blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4603650e776cec0d6f359160f4e59c05d3690c37 | 13e5bb662191dea441b93247a5112f6cefd83d0c | /rf_tuning.R | 7eebefee49c852966a95f7ce001d3d4769d86d2c | [] | no_license | nmagas/classification-comp | f8e0e9448c34af0a0dc0380f4aafaf8d0c4ba1d3 | 61877f28d43b562a1d19c5d50aa08fcf822dfc7b | refs/heads/master | 2023-05-02T19:00:23.569110 | 2021-05-28T23:25:29 | 2021-05-28T23:25:29 | 370,411,105 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 871 | r | rf_tuning.R | # Random Forest Tuning ----
# Load package(s) ----
library(tidyverse)
library(tidymodels)
set.seed(42)
# load required objects ----
load(file = "data/loan_folds.rda")
load(file = "data/loan_recipe.rda")
# Define model ----
rf_model <- rand_forest(mtry = tune(), min_n = tune()) %>%
set_mode("classification") %>%
set_engine("ranger")
# set-up tuning grid ----
# checking parameters
rf_params <- parameters(rf_model) %>%
update(mtry = mtry(range = c(1, 10)))
# define tuning grid
rf_grid <- grid_regular(rf_params, levels = 5)
# workflow ----
rf_workflow <- workflow() %>%
add_model(rf_model) %>%
add_recipe(loan_recipe)
# Tuning/fitting ----
# Place tuning code in here
rf_tune <- rf_workflow %>%
tune_grid(resamples = loan_folds, grid = rf_grid)
# Write out results & workflow
save(rf_tune, rf_workflow, file = "data/rf_tune.rda")
|
e29b38603bf8a7ba950e09b62c6b77fa30479387 | 535921a60d15998a64bf2a61043b5527e966b09e | /src/get_fdr_nfeat_associated_univar_igx_mods.R | 8908eb26ca6626ac236491effb64f8021dfd839a | [] | no_license | SimonCouv/ip-igx-public | 6209923499e3f64acb2a128e385abb6f8305b171 | fe56017e15a1e5c3f104403112c2f51b47251349 | refs/heads/master | 2020-06-11T21:19:50.162833 | 2019-06-28T21:06:19 | 2019-06-28T21:06:19 | 194,088,945 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 905 | r | get_fdr_nfeat_associated_univar_igx_mods.R | get_fdr_nfeat_associated_univar_igx_mods <- function(fdr_thresholds, data, p_adj="pv_adj_global"){
n_associated <- list()
for (fdr_threshold in fdr_thresholds){
n_associated[[as.character(fdr_threshold)]] <- data %>%
.[.[[p_adj]] < fdr_threshold, ] %>%
summarise(n_modules = n_distinct(module_tag),
n_IP = n_distinct(subset_name),
# prop_igx = n_igx/unique(total_igx),
# prop_ip = n_ip/unique(total_ip),
n_IgG = unique(module_tag) %>% str_detect(., "G") %>% sum(.),
n_IgA = n_modules - n_IgG)
}
n_associated %>%
dplyr::bind_rows(n_associated, .id = "fdr_threshold")%>%
tidyr::gather(key, n, -fdr_threshold) %>%
tidyr::separate(key, into = c("type", "omic")) %>%
dplyr::group_by(omic) %>%
dplyr::mutate(prop=n/max(n),
fdr_threshold = as.numeric(fdr_threshold))
} |
b6ea84d8efec050636d668ab9165dbb895deb481 | 5344417f4d7f08603ccd0faa2e59f0eb62ba36f3 | /man/slm_log.Rd | 2e7712fd3166f7d28623737ae30c9e8be305e721 | [] | no_license | yunrougong/r_package_sha | 455bb23daccd413d025ef4b99bb4dab31cca8f70 | afb1e9e757de1db0c5f142e1f56beebc50b25651 | refs/heads/master | 2021-08-30T18:13:52.266212 | 2017-12-19T00:00:51 | 2017-12-19T00:00:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 742 | rd | slm_log.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slm_log.R
\name{slm_log}
\alias{slm_log}
\title{Fit a Simple Linear Regression Model (Log transformed)}
\usage{
slm_log(data, x, y)
}
\arguments{
\item{data}{a dataframe}
\item{x}{the independent variable}
\item{y}{the dependent varible}
}
\value{
summary of the model
}
\description{
This is a function to conduct simple linear regression model on two continues variable,
Since the dependent variable may violate the normality assumption, so in this function, the dependent
variable is log transformed to let it follwo normal distrbution.
}
\examples{
slm_log(sleepdata, "Tempreture", "SleepDuration")
}
\seealso{
\code{\link{slm_s}}
}
\author{
Yunrou Gong
}
|
8aa88f9347caa7cf453930e10701d2f5fb701c9a | f16c66f69be8127eca953d6472141f3fd125fb5e | /Map Count Visualization/Data Preprocessing/get_bgtoccs.R | 834481e3e914f99d063690f630fe0f43589be431 | [] | no_license | IIT-IBM-BurningGlass/Job_Analytics_and_Visualizations | 6b3c1a7ae93804e555e2b7a8c4aa06aea6c626d8 | 64bfe17abe21d66e8cc5df2fed8829e1e3802256 | refs/heads/master | 2020-07-11T15:23:24.712465 | 2016-11-17T09:12:29 | 2016-11-17T09:12:29 | 74,000,351 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 756 | r | get_bgtoccs.R | ###############################################
#Original author: Dan Liu
#Last modified by: Dan Liu
#Last modified time: 11/16/2016
#get the occupation's bgtoccs data
###############################################
library("rjson")
library("XML")
setwd("/Users/Dan/IBM project/data/new/occupations")
occupations_files = list.files(pattern="*.json")
id = c()
bgtoccs = c()
for (i in 1:length(occupations_files)){
setwd("/Users/Dan/IBM project/data/new/occupations")
json_file<-occupations_files[i]
json_data<-fromJSON(file=json_file)
id = c(id, substring(json_file, 1, nchar(json_file)-5))
bgtoccs = c(bgtoccs, json_data$result$data$bgtOcc)
}
bgtoccs_data = cbind(id, bgtoccs)
write.csv(bgtoccs_data, file= "bgtoccs_data.csv", row.names=FALSE) |
849bffec9fa5fcb7d4585db3e097d11d9b454170 | 0327f08a07b06cdc0bd96a73cb3ec75f57929f19 | /plot3.R | eef24774696364ddb66d9ea7c165637ae7a34312 | [] | no_license | jabutler66/ExData_Plotting1 | 75c4990edbb52dbb6b7d6ded9ee72f550634ed10 | 6ef4076690f674bf658673d63a84a6b5ed04e40a | refs/heads/master | 2021-01-21T06:24:44.112668 | 2015-10-09T22:40:42 | 2015-10-09T22:40:42 | 43,920,000 | 0 | 0 | null | 2015-10-08T22:39:09 | 2015-10-08T22:39:09 | null | UTF-8 | R | false | false | 1,259 | r | plot3.R | # Course Project 1 for Exploratory Data Class
# create graphs for Electric Power Consumption Dataset
# Plot 3
library(dplyr)
# Read in data set
txtfile <-"exdata-data-household_power_consumption/household_power_consumption.txt"
tempfile <- read.table(txtfile, header = TRUE, sep = ";", na.strings = "?")
# re-format dates
tempfile$Date <- strptime(tempfile$Date, "%d/%m/%Y")
tempfile$Date <- as.Date(tempfile$Date, "%Y-%m-%d")
# subset the dates for the project
powerset <- filter(tempfile, (Date == "2007-02-01" | Date == "2007-02-02"))
class(powerset$Date)
# reformat time
powerset2 <- mutate(powerset, datetime = as.POSIXct(paste(Date, Time)))
# create plot 3 and output to a png file
png(file = "plot3.png", width = 480, height = 480)
with(powerset2, plot(datetime, Sub_metering_1, col = "black", type = "l", pch = 46, xlab = "", ylab = "Energy sub metering"))
with(powerset2, lines(datetime, Sub_metering_2, col = "red", type = "l", pch = 46, xlab = "", ylab = "Energy sub metering"))
with(powerset2, lines(datetime, Sub_metering_3, col = "blue", type = "l", pch = 46, xlab = "", ylab = "Energy sub metering"))
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
160ab05ec616bc0a3a8a7067dc1bd5d8c9532ccd | a903ed44358270d8261b0f94da35f20eeda5f3b4 | /R/post.R | c26a573015e84552eaa75979c64703fe3df9f5d8 | [] | no_license | JohnNay/rtweet | a168db17ca7e353d02ab4bf025aae217aa032df1 | 6f2fe8b17c7f8db3d350072b217ed3e5ae1c2b1d | refs/heads/master | 2021-01-22T21:32:37.719375 | 2017-03-19T00:22:45 | 2017-03-19T00:22:45 | 85,440,656 | 1 | 0 | null | 2017-03-19T00:21:01 | 2017-03-19T00:21:01 | null | UTF-8 | R | false | false | 7,387 | r | post.R | #' post_tweet
#'
#' @description Posts status update to user's Twitter account
#'
#' @param status Character, tweet status. Must be 140
#' characters or less.
#' @param token OAuth token. By default \code{token = NULL}
#' fetches a non-exhausted token from an environment
#' variable tokens.
#'
#' @examples
#' \dontrun{
#' post_tweet("my first rtweet #rstats")
#' }
#' @family post
#' @export
post_tweet <- function(status = "my first rtweet #rstats",
token = NULL) {
query <- "statuses/update"
stopifnot(is.character(status))
stopifnot(length(status) == 1)
if (all(nchar(status) > 140, !grepl("http", status))) {
stop("cannot exceed 140 characters.", call. = FALSE)
}
if (length(status) > 1) {
stop("can only post one status at a time",
call. = FALSE)
}
token <- check_token(token, query)
params <- list(status = status)
url <- make_url(query = query, param = params)
r <- TWIT(get = FALSE, url, token)
if (r$status_code != 200) {
message(paste0(
"something didn't work. are you using a token associated ",
"with *your* Twitter account? if so you may need to set read/write ",
"permissions or reset your token at apps.twitter.com."))
}
message("your tweet has been posted!")
}
#' post_follow
#'
#' @description Follows target twitter user.
#'
#' @param user Screen name or user id of target user.
#' @param destroy Logical indicating whether to post (add) or
#' remove (delete) target tweet as favorite.
#' @param mute Logical indicating whether to mute the intended
#' friend (you must already be following this account prior
#' to muting them)
#' @param notify Logical indicating whether to enable notifications
#' for target user. Defaults to false.
#' @param retweets Logical indicating whether to enable retweets
#' for target user. Defaults to true.
#' @param token OAuth token. By default \code{token = NULL}
#' fetches a non-exhausted token from an environment
#' variable tokens.
#' @aliases follow_user
#' @examples
#' \dontrun{
#' post_follow("BarackObama")
#' }
#' @family post
#' @export
post_follow <- function(user,
destroy = FALSE,
mute = FALSE,
notify = FALSE,
retweets = TRUE,
token = NULL) {
stopifnot(is.atomic(user), is.logical(notify))
token <- check_token(token)
if (all(!destroy, !retweets)) {
query <- "friendships/update"
params <- list(
user_type = user,
notify = notify,
retweets = retweets)
} else if (mute) {
query <- "mutes/users/create"
params <- list(
user_type = user)
} else if (destroy) {
query <- "friendships/destroy"
params <- list(
user_type = user,
notify = notify)
} else {
query <- "friendships/create"
params <- list(
user_type = user,
notify = notify)
}
names(params)[1] <- .id_type(user)
url <- make_url(query = query, param = params)
r <- TWIT(get = FALSE, url, token)
if (r$status_code != 200) {
message(paste0(
"something didn't work. are you using a token associated ",
"with *your* Twitter account? if so you may need to set read/write ",
"permissions or reset your token at apps.twitter.com."))
}
r
}
#' post_unfollow
#'
#' Remove, or unfollow, current twitter friend. Wrapper function
#' for destroy version of follow_user.
#'
#' @param user Screen name or user id of target user.
#' @param token OAuth token. By default \code{token = NULL}
#' fetches a non-exhausted token from an environment
#' variable tokens.
#' @aliases unfollow_user
#' @family post
#' @export
post_unfollow_user <- function(user, token = NULL) {
post_follow(user, destroy = TRUE, token = token)
}
#' post_mute
#'
#' Mute, or hide all content coming from, current twitter friend.
#' Wrapper function for mute version of follow_user.
#'
#' @param user Screen name or user id of target user.
#' @param token OAuth token. By default \code{token = NULL}
#' fetches a non-exhausted token from an environment
#' variable tokens.
#' @aliases mute_user
#' @family post
#' @export
post_mute <- function(user, token = NULL) {
post_follow(user, mute = TRUE, token = token)
}
#' post_favorite
#'
#' @description Favorites target status id.
#'
#' @param status_id Status id of target tweet.
#' @param destroy Logical indicating whether to post (add) or
#' remove (delete) target tweet as favorite.
#' @param include_entities Logical indicating whether to
#' include entities object in return.
#' @param token OAuth token. By default \code{token = NULL}
#' fetches a non-exhausted token from an environment
#' variable tokens.
#' @aliases post_favourite favorite_tweet
#' @examples
#' \dontrun{
#' rt <- search_tweets("rstats")
#' r <- lapply(rt$user_id, post_favorite)
#' }
#' @family post
#' @export
post_favorite <- function(status_id,
destroy = FALSE,
include_entities = FALSE,
token = NULL) {
stopifnot(is.atomic(status_id))
token <- check_token(token)
if (destroy) {
query <- "favorites/destroy"
} else {
query <- "favorites/create"
}
params <- list(
id = status_id)
url <- make_url(query = query, param = params)
r <- TWIT(get = FALSE, url, token)
if (r$status_code != 200) {
message(paste0(
"something didn't work. are you using a token associated ",
"with *your* Twitter account? if so you may need to set read/write ",
"permissions or reset your token at apps.twitter.com."))
}
invisible(r)
}
#' post_friendship
#'
#' Updates friendship notifications and retweet abilities.
#'
#' @param user Screen name or user id of target user.
#' @param device Logical indicating whether to enable or disable
#' device notifications from target user behaviors. Defaults
#' to false.
#' @param retweets Logical indicating whether to enable or disable
#' retweets from target user behaviors. Defaults to false.
#' @param token OAuth token. By default \code{token = NULL}
#' fetches a non-exhausted token from an environment
#' variable tokens.
#' @aliases friendship_update
#' @family post
#' @export
post_friendship <- function(user,
device = FALSE,
retweets = FALSE,
token = NULL) {
stopifnot(is.atomic(user), is.logical(device),
is.logical(retweets))
token <- check_token(token)
query <- "friendships/update"
params <- list(
user_type = user,
device = device,
retweets = retweets)
names(params)[1] <- .id_type(user)
url <- make_url(query = query, param = params)
r <- TWIT(get = FALSE, url, token)
if (r$status_code != 200) {
message(paste0(
"something didn't work. are you using a token associated ",
"with *your* Twitter account? if so you may need to set read/write ",
"permissions or reset your token at apps.twitter.com."))
}
invisible(r)
}
|
8ad0b091a53df258760020fa95f96e65d325616f | 04c3a615d3b456608cc5dce5c754c0ae99ee4299 | /cachematrix.R | ae6c44eacd4bb2ec7e1555e2211a0cf4f88bd62c | [] | no_license | Flippinglakes/ProgrammingAssignment2 | b854514f06dd518db834443c2f681e6d64bba69e | 85341f0efaa676fbaf0035739dc48c27d555779f | refs/heads/master | 2021-01-17T04:29:06.878885 | 2017-02-23T19:36:33 | 2017-02-23T19:36:33 | 82,961,099 | 0 | 0 | null | 2017-02-23T19:03:44 | 2017-02-23T19:03:44 | null | UTF-8 | R | false | false | 852 | r | cachematrix.R | ## The functions makeCacheMatrix and cachesolve work together to calculate and cache, or retrieve, the inverse of a matrix
## Makes a matrix object which can be used to cache the inverse of a matrix
makeCacheMatrix<-function(x=matrix()){
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setsolve<-function(solve)m<<-solve
getsolve<-function() m
list(set=set,get=get,setsolve=setsolve,getsolve=getsolve)
}
## Calculates the inverse of a matrix and stores it in the makeCacheMatrix environment.
## If the inverse has already been cached in makeCacheMatrix cachesolve will retrieve it.
cacheSolve<-function(x,...){
m<-x$getsolve()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data<-x$get()
m<-solve(data,... = )
x$setsolve(m)
m ## Return a matrix that is the inverse of 'x'
}
|
81e34ed018ff43a82356b44f50b785bb121b146b | b9a00c239f767f41b54fbedef26e21342682822d | /scripts/script_day2.R | 439126c404c060a4b697269ba10f8af8ed734867 | [] | no_license | mark-andrews/rsdwr | 2cb29b511d8d7d4fd4b04172a942fd284e0016b6 | 6d2e8aed167ba37f93a021267f80a7b286ad146c | refs/heads/master | 2022-12-18T16:35:36.096883 | 2020-09-21T07:50:27 | 2020-09-21T07:50:27 | 294,670,868 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,811 | r | script_day2.R | library(tidyverse)
recall_df <- read_csv('data/repeated_measured_a.csv')
recall_df_long <- pivot_longer(recall_df,
cols = -Subject,
names_to = 'condition',
values_to = 'score')
pivot_wider(recall_df_long,
names_from = 'condition',
values_from = 'score')
recall_b_df <- read_csv('data/repeated_measured_b.csv')
pivot_longer(recall_b_df,
cols = -Subject,
names_to = 'condition',
values_to = 'score') %>%
print(n = Inf)
recall_b_df_long <- pivot_longer(recall_b_df,
cols = -Subject,
names_to = 'condition',
values_to = 'score')
separate(recall_b_df_long, col = condition, into = c("cue", "emotion"), sep = '_')
pivot_longer(recall_b_df,
cols = -Subject,
names_to = 'condition',
values_to = 'score') %>%
separate(col = condition,
into = c("cue", "emotion"),
sep = '_')
pivot_longer(recall_b_df,
cols = -Subject,
names_to = c("cue", 'emotion'),
names_pattern = "(.*)_(.*)",
values_to = 'score')
pivot_longer(recall_b_df,
cols = -Subject,
names_to = c("cue", 'emotion'),
names_sep = '_',
values_to = 'score')
pivot_longer(recall_b_df,
cols = -Subject,
names_to = c("cue", 'emotion'),
names_sep = '[_\\-,\\;]',
values_to = 'score')
pivot_longer(recall_b_df,
cols = -Subject,
names_to = c("cue", 'emotion'),
names_pattern = "(Free|Cued)_(Neg|Neu|Pos)",
values_to = 'score')
|
b3eb0a367babfb241c662640471d9d14a24784f8 | 2b91f630f568179c6e3d95b8675ad09f4c0d65a9 | /other scripts/db.R | 255a3d6976b0bf1a687451d6da54a1f7829b0ae0 | [] | no_license | agualtieri/JMMI_partners_tracker | f4b18504660f6013adb8de593d6255318b838c1c | 509edf20e0f8326a1605999cbec2da424c9ab5c8 | refs/heads/master | 2022-07-26T04:59:56.421674 | 2020-05-24T08:32:31 | 2020-05-24T08:32:31 | 252,101,652 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,930 | r | db.R | ### Partners Tracker - Online Dashboard Code
## Yemen Team Data Unit
## 08-03-200
## V1
###########
#Build Dashboard
###########
ui <- fluidPage(
#theme = ".small-box.bg-yellow { background-color: #FFFF00 !important; color: #000000 !important; }",
titlePanel("JMMI Partner Coverage Sheet"),
#includeCSS("AdminLTE.css"),
#includeCSS("shinydashboard.css"),
# Create a new Row in the UI for selectInputs
# Create a new row for the table.
mainPanel(
selectInput("Geographic Area",
"Geographic Area:",
c("All",
sort(unique(as.character(dt_gov$geo))))),
#https://stackoverflow.com/questions/42532879/r-shiny-layout-side-by-side-chart-and-or-table-within-a-tab
id = 'data',
tabPanel(h4("Governorate"),
column(12,
h3("North Governorate"),
DT::dataTableOutput("North_Table")
),
column(12,
h3("South Governorate"),
DT::dataTableOutput("South_Table")
),
column(12,
h3("Contested Governorate"),
DT::dataTableOutput("Contested_Table")
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$partner_sum <-DT::renderDataTable(DT::datatable({
data<-partner_summary
data
}))
output$North_Table <- DT::renderDataTable(DT::datatable({
data <- dt_gov
if (dt_gov$geo == "North") {
data <- data %>%
group_by(Date) %>%
select(-c("governorate_ID", "governorate_name", "geo")) %>%
dplyr::summarise(total_obs=dplyr::n())
}
data
}))
output$South_Table <- DT::renderDataTable(DT::datatable({
data <- dt_gov_south
if (input$Governorate == "South") {
data <- data[data$gov_name == input$Governorate,]
data<-data%>%
dplyr::mutate(area = gov_name)%>%
group_by(Date,area, org)%>%
dplyr::summarise(total_obs=dplyr::n())%>%
dplyr::select(-org)%>%
spread(area,total_obs)
}
data
}))
output$Contested_Table <- DT::renderDataTable(DT::datatable({
data <- dt_gov_cnt
if (input$Governorate == "Contested") {
data <- data[data$gov_name == input$Governorate,]
data<-data%>%
dplyr::mutate(area = gov_name)%>%
group_by(Date,area, org)%>%
dplyr::summarise(total_obs=dplyr::n())%>%
dplyr::select(-org)%>%
spread(area,total_obs)
}
data
}))
}
# Run the application
shinyApp(ui = ui, server = server)
#############
#Export
#############
#setwd("C:/Users/REACH_AO_YEMEN/Dropbox/REACH/YEM/YEM Assessment/YEM Cash and Markets/02_JMMI/Partner Tracker Output")
#title_Final <- paste0("./Partner_Tracker_Update-",Sys.Date(),".xlsx")
#write_xlsx(counts_by_org_per_JMMI,title_Final)
|
f86b3ad539b4e9bde58d00ad6dc034c86ef0800c | bd8349fea7d6c7302d1dbb544cab639ec57846e6 | /man/bmi.Rd | 0f8de0a6e90421fbc4bb3fe70ed99b79995a560b | [] | no_license | hlennon/LCTMtools | 47ca70975739f134d432f01643cae9f50d2b0b63 | ba62f239773618bd22afae0ca030ed8a282b2fb2 | refs/heads/master | 2022-12-05T05:14:51.441761 | 2022-12-01T10:52:00 | 2022-12-01T10:52:00 | 125,241,020 | 21 | 13 | null | 2019-06-28T08:59:12 | 2018-03-14T16:20:23 | R | UTF-8 | R | false | true | 667 | rd | bmi.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bmi.R
\docType{data}
\name{bmi}
\alias{bmi}
\title{Body Mass Index (BMI) repeated measures of 10,000 individuals in wide format.}
\format{A wide format data frame of simulated BMI values of 10,000 individuals.
\describe{
\item{id}{Individual ID}
\item{bmi}{Body mass index of the individual at times T1,T2, T3 and T4, in kg/m^2}
\item{T}{Time of BMI measure, in years}
\item{true_class}{Tag to identify the class the individual BMI data was simulated from}
}}
\usage{
bmi
}
\description{
A wide format data frame of simulated BMI values of 10,000 individuals.
}
\keyword{datasets}
|
81b3252dec87be763f0ba043794249e935ea46c8 | 389e4267342c395fd480e78d732f1caa0f7a9435 | /plot3.R | a93de5d886c45b8fdd4ecba8fadef63f61610fc5 | [] | no_license | BlauBeereB/ExData_Plotting1 | 619fe00b1a0afb98ad37f2b8579b42ace162c5bf | 7ce1429318691002b161ef48d15e229e34445a22 | refs/heads/master | 2020-12-03T05:18:46.143443 | 2015-07-12T11:33:06 | 2015-07-12T11:33:06 | 38,804,101 | 0 | 0 | null | 2015-07-09T07:13:14 | 2015-07-09T07:13:13 | null | UTF-8 | R | false | false | 3,284 | r | plot3.R | # Note: The data file 'called household_power_consumption.txt'
# needs to be in a subfolder called 'exdata'.
# Clear any old variables:
rm(list = ls())
# Set name for directory with data files:
dataDir <- "exdata"
# Set names for original and new data file:
origFileName <- 'household_power_consumption.txt'
editFileName <- 'household_power_consumption.r'
# Load previously saved data set or
# create edited data set with relevant data:
if (file.exists(editFileName)) {
# Load previously saved data set:
load(editFileName)
print('Edited data set has been loaded from file.')
} else {
# Read in the data set:
origDataPath <- paste0(dataDir, '/', origFileName)
origData <- read.table(origDataPath,
header = TRUE,
sep = ';',
na.strings = '?',
stringsAsFactors = FALSE,
colClasses = c('character',
'character',
'numeric',
'numeric',
'numeric',
'numeric',
'numeric',
'numeric',
'numeric'))
# Inspect the original data set:
#print(head(origData))
#print(str(origData))
# Only keep data from 1/2/2007 and 2/2/2007:
editData <- subset(origData, Date == '1/2/2007' |
Date == '2/2/2007')
# Delete variables that are not needed anymore:
remove(origDataPath)
remove(origData)
# Save edited data set:
save(editData, file=editFileName)
print('Edited data set has been loaded from file.')
}
# Delete variables that are not needed anymore:
remove(origFileName)
remove(editFileName)
# Inspect the edited data set:
#print(head(editData))
#print(str(editData))
# Combine date and time into a new temporary variable:
DateTimeTmp <- paste(editData$Date, editData$Time)
#print(head(DateTimeTmp))
# Convert combined date and time format:
DateTimeTmp <- strptime(DateTimeTmp, format='%e/%m/%Y %H:%M:%S')
#print(head(DateTimeTmp))
# Add combined date and time as column to data frame:
editData$DTCombo <- DateTimeTmp
# Remove temporary variable:
remove(DateTimeTmp)
# Change local settings for time to English:
Sys.setlocale('LC_TIME', 'English')
# Specify png graphics device:
png(filename='plot3.png',
width = 480,
height = 480)
# Make plot of first data set:
plot(editData$DTCombo,
editData$Sub_metering_1,
type='l',
xlab='',
ylab='Energy sub metering',
main='')
# Add two more data sets:
lines(editData$DTCombo,
editData$Sub_metering_2,
col='red')
lines(editData$DTCombo,
editData$Sub_metering_3,
col='blue')
# Add legend:
legend('topright',
lty=1,
col=c('black', 'red', 'blue'),
legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
# Finish plot:
dev.off()
print('Plot has been made and saved.')
# Change local settings for time back to default:
Sys.setlocale('LC_TIME', '')
|
f64847c0a4da753cc967cde7ae4d30435a720f90 | 29585dff702209dd446c0ab52ceea046c58e384e | /simmer/demo/E-ctmc.R | 97ce7503dd12709f0065e4b7c8682c3be4104dbe | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,957 | r | E-ctmc.R | # ---- setup
library(simmer)
library(ggplot2)
library(dplyr)
library(tidyr)
set.seed(1234)
# ---- example1-part1
# Arrival rate
lambda <- 3/20
# Service rate (cars, motorcycles)
mu <- c(1/8, 1/3)
# Probability of car
p <- 0.75
# Theoretical resolution
A <- matrix(c(1, mu[1], 0,
1, -lambda, (1-p)*lambda,
1, mu[2], -mu[2]), byrow=T, ncol=3)
B <- c(1, 0, 0)
P <- solve(t(A), B)
N_average_theor <- sum(P * c(1, 0, 1)) ; N_average_theor
# ---- example1-part2
option.1 <- function(t) {
car <- create_trajectory() %>%
seize("pump", amount=1) %>%
timeout(function() rexp(1, mu[1])) %>%
release("pump", amount=1)
mcycle <- create_trajectory() %>%
seize("pump", amount=1) %>%
timeout(function() rexp(1, mu[2])) %>%
release("pump", amount=1)
simmer() %>%
add_resource("pump", capacity=1, queue_size=0) %>%
add_generator("car", car, function() rexp(1, p*lambda)) %>%
add_generator("mcycle", mcycle, function() rexp(1, (1-p)*lambda)) %>%
run(until=t)
}
# ---- example1-part3
option.2 <- function(t) {
vehicle <- create_trajectory() %>%
seize("pump", amount=1) %>%
branch(function() sample(c(1, 2), 1, prob=c(p, 1-p)), c(T, T),
create_trajectory("car") %>%
timeout(function() rexp(1, mu[1])),
create_trajectory("mcycle") %>%
timeout(function() rexp(1, mu[2]))) %>%
release("pump", amount=1)
simmer() %>%
add_resource("pump", capacity=1, queue_size=0) %>%
add_generator("vehicle", vehicle, function() rexp(1, lambda)) %>%
run(until=t)
}
# ---- example1-part4
option.3 <- function(t) {
vehicle <- create_trajectory() %>%
seize("pump", amount=1) %>%
timeout(function() {
if (runif(1) < p) rexp(1, mu[1]) # car
else rexp(1, mu[2]) # mcycle
}) %>%
release("pump", amount=1)
simmer() %>%
add_resource("pump", capacity=1, queue_size=0) %>%
add_generator("vehicle", vehicle, function() rexp(1, lambda)) %>%
run(until=t)
}
# ---- example1-part5
gas.station <- option.3(5000)
# Evolution + theoretical value
graph <- plot_resource_usage(gas.station, "pump", items="system")
graph + geom_hline(yintercept=N_average_theor)
# ---- example1-part6
library(microbenchmark)
t <- 1000/lambda
tm <- microbenchmark(option.1(t),
option.2(t),
option.3(t))
graph <- autoplot(tm)
graph + scale_y_log10(breaks=function(limits) pretty(limits, 5)) +
ylab("Time [milliseconds]")
# ---- example2-part1
# Theoretical resolution
A <- matrix(c(1, 0, 0, mu[1], 0,
1, -(1-p)*lambda-mu[1], mu[1], 0, 0,
1, p*lambda, -lambda, (1-p)*lambda, 0,
1, 0, mu[2], -(1-p)*lambda-mu[2], (1-p)*lambda,
1, 0, 0, mu[2], -mu[2]),
byrow=T, ncol=5)
B <- c(1, 0, 0, 0, 0)
P <- solve(t(A), B)
N_average_theor <- sum(P * c(2, 1, 0, 1, 2)) ; N_average_theor
# ---- example2-part2
option.1 <- function(t) {
car <- create_trajectory() %>%
seize("pump", amount=function() {
if (env %>% get_server_count("pump")) 2 # rejection
else 1 # serve
}) %>%
timeout(function() rexp(1, mu[1])) %>%
release("pump", amount=1)
mcycle <- create_trajectory() %>%
seize("pump", amount=1) %>%
timeout(function() rexp(1, mu[2])) %>%
release("pump", amount=1)
env <- simmer() %>%
add_resource("pump", capacity=1, queue_size=1) %>%
add_generator("car", car, function() rexp(1, p*lambda)) %>%
add_generator("mcycle", mcycle, function() rexp(1, (1-p)*lambda))
env %>% run(until=t)
}
# ---- example2-part3
option.2 <- function(t) {
vehicle <- create_trajectory() %>%
branch(function() sample(c(1, 2), 1, prob=c(p, 1-p)), c(F, F),
create_trajectory("car") %>%
seize("pump", amount=function() {
if (env %>% get_server_count("pump")) 2 # rejection
else 1 # serve
}) %>%
timeout(function() rexp(1, mu[1])) %>%
release("pump", amount=1), # always 1
create_trajectory("mcycle") %>%
seize("pump", amount=1) %>%
timeout(function() rexp(1, mu[2])) %>%
release("pump", amount=1))
env <- simmer() %>%
add_resource("pump", capacity=1, queue_size=1) %>%
add_generator("vehicle", vehicle, function() rexp(1, lambda))
env %>% run(until=t)
}
# ---- example2-part4
option.3 <- function(t) {
vehicle <- create_trajectory("car") %>%
set_attribute("vehicle", function() sample(c(1, 2), 1, prob=c(p, 1-p))) %>%
seize("pump", amount=function(attrs) {
if (attrs["vehicle"] == 1 &&
env %>% get_server_count("pump")) 2 # car rejection
else 1 # serve
}) %>%
timeout(function(attrs) rexp(1, mu[attrs["vehicle"]])) %>%
release("pump", amount=1) # always 1
env <- simmer() %>%
add_resource("pump", capacity=1, queue_size=1) %>%
add_generator("vehicle", vehicle, function() rexp(1, lambda))
env %>% run(until=t)
}
# ---- example2-part5
option.4 <- function(t) {
vehicle <- create_trajectory() %>%
branch(function() sample(c(1, 2), 1, prob=c(p, 1-p)), c(F, F),
create_trajectory("car") %>%
seize("pump", amount=3) %>%
timeout(function() rexp(1, mu[1])) %>%
release("pump", amount=3),
create_trajectory("mcycle") %>%
seize("pump", amount=2) %>%
timeout(function() rexp(1, mu[2])) %>%
release("pump", amount=2))
simmer() %>%
add_resource("pump", capacity=3, queue_size=2) %>%
add_generator("vehicle", vehicle, function() rexp(1, lambda)) %>%
run(until=t)
}
# ---- example2-part6
option.5 <- function(t) {
car <- create_trajectory() %>%
seize("pump", amount=3) %>%
timeout(function() rexp(1, mu[1])) %>%
release("pump", amount=3)
mcycle <- create_trajectory() %>%
seize("pump", amount=2) %>%
timeout(function() rexp(1, mu[2])) %>%
release("pump", amount=2)
simmer() %>%
add_resource("pump", capacity=3, queue_size=2) %>%
add_generator("car", car, function() rexp(1, p*lambda)) %>%
add_generator("mcycle", mcycle, function() rexp(1, (1-p)*lambda)) %>%
run(until=t)
}
# ---- example2-part7
gas.station <- option.1(5000)
# Evolution + theoretical value
graph <- plot_resource_usage(gas.station, "pump", items="system")
graph + geom_hline(yintercept=N_average_theor)
# ---- example2-part8
gas.station <- option.5(5000)
limits <- data.frame(item = c("system"), value = c(2))
graph <- gas.station %>% get_mon_resources() %>%
gather(item, value, server, queue, system) %>%
mutate(value = round(value * 2/5), # rescaling here <------
item = factor(item)) %>%
filter(item %in% "system") %>%
group_by(resource, replication, item) %>%
mutate(mean = c(0, cumsum(head(value, -1) * diff(time))) / time) %>%
ungroup() %>%
ggplot() + aes(x=time, color=item) +
geom_line(aes(y=mean, group=interaction(replication, item))) +
ggtitle("Resource usage: pump") +
ylab("in use") + xlab("time") + expand_limits(y=0) +
geom_hline(aes(yintercept=value, color=item), limits, lty=2)
graph + geom_hline(yintercept=N_average_theor)
# ---- example2-part9
library(microbenchmark)
t <- 1000/lambda
tm <- microbenchmark(option.1(t),
option.2(t),
option.3(t),
option.4(t),
option.5(t))
graph <- autoplot(tm)
graph + scale_y_log10(breaks=function(limits) pretty(limits, 5)) +
ylab("Time [milliseconds]")
|
9bd2d9fd877f6374ec10bb377bf3493d1aeefb50 | 43b3606a3fac8fceb8363e91e37e6c7b9ec3fc18 | /house_pred.R | 73ee1785152d8ea9c5eb157b71d7772818f693ce | [] | no_license | uthreshh/DMP_Project | 30bab2935ab762bf7f724d14202c8a76135228fa | e58ecc475f47f623dd643778a20efe4e6676501e | refs/heads/main | 2023-04-09T01:38:20.246253 | 2021-04-25T21:40:20 | 2021-04-25T21:40:20 | 359,941,076 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,148 | r | house_pred.R | library(readr)
library(dplyr)
library(ggplot2)
library(glmnet)
library(corrplot)
library(caret)
library(MASS)
library(olsrr)
library(modelr)
library(glasso)
# Importing data
data_train <- read_csv('D:/MS DS/Sem 1/DMP/Project/Dataset/train.csv')
data_test <- read_csv('D:/MS DS/Sem 1/DMP/Project/Dataset/test.csv')
sample <- read_csv('D:/MS DS/Sem 1/DMP/Project/Dataset/sample_submission.csv')
# Count NA
countNA <- function(data, byrow = FALSE){
if(byrow == FALSE){
result<-NULL
for (i in 1:ncol(data)){
temp<-sum(is.na(data[,i]))
temp<-as.data.frame(temp)
temp$cols<-colnames(data)[i]
colnames(temp)<-c('NAs','cols')
result<-rbind(result,temp)
}
return(result)
}
else{
result<-NULL
for (i in 1:nrow(data)){
temp<-sum(is.na(data[i,]))
temp<-as.data.frame(temp)
temp$cols<-rownames(data)[i]
colnames(temp)<-c('NAs','cols')
result<-rbind(result,temp)
}
return(result)
}
}
train_na <- countNA(data_train)
train_na <- train_na %>%
filter(NAs > 0)
test_na <- countNA(data_test)
test_na <- test_na %>%
filter(NAs > 0)
train_na[,"NAs"] <- sort(train_na[,"NAs"], decreasing = TRUE)
test_na[,"NAs"] <- sort(test_na[,"NAs"], decreasing = TRUE)
ggplot(train_na, aes(x = NAs, y = cols ))+
geom_bar(stat = "identity", fill = "red")+
theme_minimal()+
labs(title = "NA values in train file",
x = "NA Values",
y = "Column Names")+
theme(plot.title = element_text(hjust = 0.5))+
geom_text(aes(label = NAs), vjust = 0.5,hjust = -0.2, size = 3.5)
ggplot(test_na, aes(x = NAs, y = cols))+
geom_bar(stat = "identity", fill = "blue")+
theme_minimal()+
labs(title = "NA values in test file",
x = "NA Values",
y = "Column Names")+
theme(plot.title = element_text(hjust = 0.5))+
geom_text(aes(label = NAs), vjust = 0.5,hjust = -0.2, size = 3.5)
# Imputations for data_train
lst <- c('PoolQC', 'MiscFeature', 'Alley', 'Fence','FireplaceQu',
'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond',
'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2',
'MasVnrType')
for(lst_name in lst){
data_train[,lst_name][is.na(data_train[,lst_name])] <- "None"
}
data_train[,'GarageYrBlt'][is.na(data_train[,'GarageYrBlt'])] <- 0
data_train[,'MasVnrArea'][is.na(data_train[,'MasVnrArea'])] <- 0
Mode <- function(x){
names(which.max(table(x,useNA="no")))
}
data_train[is.na(data_train[,'Electrical']),'Electrical'] <-
Mode(data_train[,'Electrical'])
data_train <- subset(data_train, select = -c(GrLivArea, TotalBsmtSF, Id,
LotFrontage))
#Imputations for data_test
lst <- c('PoolQC', 'MiscFeature', 'Alley', 'Fence','FireplaceQu',
'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond',
'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2',
'MasVnrType')
for(lst_name in lst){
data_test[,lst_name][is.na(data_test[,lst_name])] <- "None"
}
data_test[,'GarageYrBlt'][is.na(data_test[,'GarageYrBlt'])] <- 0
data_test[,'MasVnrArea'][is.na(data_test[,'MasVnrArea'])] <- 0
data_test[,'BsmtFinSF1'][is.na(data_test[,'BsmtFinSF1'])] <- 0
data_test[,'BsmtFinSF2'][is.na(data_test[,'BsmtFinSF2'])] <- 0
data_test[,'BsmtUnfSF'][is.na(data_test[,'BsmtUnfSF'])] <- 0
data_test[,'TotalBsmtSF'][is.na(data_test[,'TotalBsmtSF'])] <- 0
data_test[,'BsmtFullBath'][is.na(data_test[,'BsmtFullBath'])] <- 0
data_test[,'BsmtHalfBath'][is.na(data_test[,'BsmtHalfBath'])] <- 0
data_test[,'GarageArea'][is.na(data_test[,'GarageArea'])] <- 0
data_test[,'Functional'][is.na(data_test[,'Functional'])] <- "Typ"
temp <- data_test %>%
filter(GarageType == "Detchd")
temp <- subset(temp, select = c(GarageCars, GarageType))
temp <- temp %>%
filter(GarageType == "Detchd")
data_test$GarageCars[is.na(data_test$GarageCars)] <-
median(temp$GarageCars,na.rm=TRUE)
Mode <- function(x){
names(which.max(table(x,useNA="no")))
}
data_test[is.na(data_test[,'Electrical']),'Electrical'] <-
Mode(data_test[,'Electrical'])
data_test[is.na(data_test[,'MSZoning']),'MSZoning'] <-
Mode(data_test[,'MSZoning'])
data_test[is.na(data_test[,'Utilities']),'Utilities'] <-
Mode(data_test[,'Utilities'])
data_test[is.na(data_test[,'Exterior1st']),'Exterior1st'] <-
Mode(data_test[,'Exterior1st'])
data_test[is.na(data_test[,'Exterior2nd']),'Exterior2nd'] <-
Mode(data_test[,'Exterior2nd'])
data_test[is.na(data_test[,'KitchenQual']),'KitchenQual'] <-
Mode(data_test[,'KitchenQual'])
data_test[is.na(data_test[,'SaleType']),'SaleType'] <-
Mode(data_test[,'SaleType'])
# EDA
#Sales count
ggplot(data_train, aes(x = SalePrice))+
geom_histogram(bins = 30, fill = "pink", color = "black")+
labs(title = "Sales Price Count",
x = "Sales Price",
y = "Count")+
theme_minimal()+
theme(plot.title = element_text(hjust = 0.5))
ggplot(data_train, aes(x = log10(SalePrice)))+
geom_histogram(bins = 30, fill = "pink", color = "black")+
labs(title = "Sale Price Count",
x = "Sale Price",
y = "Count")+
theme_minimal()+
theme(plot.title = element_text(hjust = 0.5))
#Ground living area
ggplot(data_train, aes(x= GrLivArea ,y= SalePrice))+
geom_point(color = "blue")
data_train <- data_train %>%
filter(GrLivArea < 4000)
ggplot(data_train, aes(x= GrLivArea ,y= log10(SalePrice)))+
geom_point(color = "blue")+
labs(title = "Total sq feet of Ground Living Area and Sale Price comparison",
x = "Ground Living Area (sqft)",
y = "Sale Price")+
theme_minimal()+
theme(plot.title = element_text(hjust = 0.5))
# Total basement surface
ggplot(data_train, aes(x=TotalBsmtSF, y=log10(SalePrice)))+
geom_point(color ="Dark green")+
labs(title = "Total sq feet of Basement area and Sale Price comparison",
x = "Total Basement Area (sqft)",
y = "Sale Price")+
theme_minimal()+
theme(plot.title = element_text(hjust = 0.5))
# year built
ggplot(data_train,aes(x = as.factor(YearBuilt), y = log10(SalePrice),
fill = as.factor(YearBuilt)))+
geom_boxplot()+
labs(title = "Sale Price vs Year Built",
x = "Year Built",
y = "Sale Price")+
theme_minimal()+
theme(legend.position = "none")+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))+
theme(plot.title = element_text(hjust = 0.5))
# Overall Quality
ggplot(data_train,aes(x = as.factor(OverallQual),y = log10(SalePrice),
fill = as.factor(OverallQual))) +
geom_boxplot()+
labs(title = "Overall Quality of house and Sale Price comparison ",
x = "Overall Quality",
y = "Sale Price")+
theme_minimal()+
scale_fill_discrete(name = "Overall Quality",
labels = c("Very Poor","Poor","Fair","Below Average",
"Average","Above Average","Good","Very Good",
"Excellent","Very Excellent"))+
theme(plot.title = element_text(hjust = 0.5))
# Garage Cars
ggplot(data_train,aes(x = as.factor(GarageCars),y = log10(SalePrice))) +
geom_boxplot(fill = "purple") +
labs(title = "Size of garage in car capacity and Sale Price comparison",
x = "Garage cars",
y="Sale Price")+
theme_minimal()+
theme(plot.title = element_text(hjust = 0.5))
# Plot Full bath
ggplot(data_train,aes(x=as.factor(FullBath),y=log10(SalePrice))) +
geom_boxplot(fill = "gray") +
labs(title = "Full Bathrooms and Sale Price comparison",
x = "Full Bath",
y = "Sale Price")+
theme_minimal()+
theme(plot.title = element_text(hjust = 0.5))
# Converting categorical data to numeric
data_train_numeric <- Filter(is.numeric, data_train)
data_train_cat <- data_train[, sapply(data_train, class) %in%
c('character', 'factor')]
data_train_cat <- as.data.frame(sapply(data_train_cat,
function(x) as.numeric(as.factor(x))))
cat_train <- cbind(data_train_cat,SalePrice = data_train$SalePrice)
data_train <- cbind(data_train_cat,data_train_numeric)
data_test_numeric <- Filter(is.numeric, data_test)
data_test_cat <- data_test[, sapply(data_test, class) %in%
c('character', 'factor')]
data_test_cat <- as.data.frame(sapply(data_test_cat,
function(x) as.numeric(as.factor(x))))
data_test <- cbind(data_test_cat,data_test_numeric)
# Correlation Plot
corrplot(cor(data_train), order="hclust",
tl.col="gray", tl.srt=90, type = "lower")
corrplot(cor(cat_train), order="hclust",
tl.col="red", tl.srt=90, type = "lower")
corrplot(cor(data_train_numeric), order="hclust",
tl.col="red", tl.srt=90, type = "lower")
# Linear Model
set.seed(100)
index <- sample(1:nrow(data_train), 0.75*nrow(data_train))
df_train <- data_train[index,]
df_test <- data_train[-index,]
fit <- lm(log10(SalePrice) ~ . , data = df_train)
summary(fit)
pred <- predict(fit, df_test)
result_lm <- data.frame(cbind(Actual_Values = df_test$SalePrice,
Predicted_Values = 10^(pred)))
rmse = sqrt(mean(fit$residuals^2))
aic = AIC(fit)
r2 <- summary(fit)$r.squared
error = data.frame('RMSE' = rmse, 'AIC' = aic,
'R-Squared' = summary(fit)$r.squared)
t <- summary(fit)$coefficients[,4] < 0.05
name <- names(which(t == 'TRUE'))
name <- name[-1]
var <- c("MSZoning","Street","ExterCond","BsmtExposure","BsmtFinType1",
"HeatingQC","CentralAir","KitchenQual","Functional","PavedDrive",
"PoolQC","SaleCondition","LotArea","OverallQual","OverallCond",
"YearBuilt","BsmtFinSF1","BsmtFinSF2","`1stFlrSF`","`2ndFlrSF`",
"BsmtFullBath","FullBath","HalfBath","KitchenAbvGr","TotRmsAbvGrd",
"Fireplaces","GarageCars","WoodDeckSF","ScreenPorch","PoolArea",
"YrSold")
formula <- as.formula(paste("log10(SalePrice)",
paste(var, collapse = "+"),
sep = "~"))
fit_name <- lm(formula, data = df_train)
pred_name <- predict(fit_name, df_test)
rmse_p = sqrt(mean(fit_name$residuals^2))
aic = AIC(fit_name)
error_name = data.frame('RMSE' = rmse, 'AIC' = aic,
'R-Squared' = summary(fit_name)$r.squared)
result_name <- data.frame(cbind("Actual_Values" = df_test$SalePrice,
"Predicted_Values" = 10^(pred_name)))
aic_p <- AIC(fit_name)
r2_p <- summary(fit_name)$r.squared
ggplot(df_test,aes(x = log10(SalePrice), y = 10^(pred)))+
geom_point()+
geom_smooth()+
theme_minimal()+
labs(title = "Actual Vales vs Predicted Values",
x = "Sale Price",
y = "Predicted Sale Price")+
theme(plot.title = element_text(hjust = 0.5, vjust = 0.5))
ggplot(df_test,aes(x = SalePrice, y = pred))+
geom_point()+
geom_smooth()+
theme_minimal()+
labs(title = "Actual Vales vs Predicted Values",
x = "Sale Price",
y = "Predicted Sale Price")+
theme(plot.title = element_text(hjust = 0.5, vjust = 0.5))
ggplot(df_test,aes(x = log10(SalePrice), y = 10^(pred_name)))+
geom_point()+
geom_smooth()+
theme_minimal()+
labs(title = "Actual Vales vs Predicted Values",
x = "Sale Price",
y = "Predicted Sale Price")+
theme(plot.title = element_text(hjust = 0.5, vjust = 0.5))
plot(residuals(fit))
plot(residuals(fit_name))
df_train %>%
add_residuals(fit, "resid") %>%
ggplot(aes(sample=resid)) +
geom_qq() +
theme_minimal()
df_train %>%
add_residuals(fit_name, "resid") %>%
ggplot(aes(sample=resid)) +
geom_qq() +
theme_minimal()
# Using CV
set.seed(150)
train_control <- trainControl(method="cv", number=10)
model1 <- train(log10(SalePrice)~., data=data_train,
trControl=train_control, method="lm")
summary(model1)
t <- summary(model1)$coefficients[,4] < 0.05
names(which(t == 'TRUE'))
pred1 <- predict(model1, df_test)
resul1 <- data.frame(cbind(Actual = df_test$SalePrice,
Predicted = pred1))
rmse = RMSE(obs=df_test$SalePrice, pred=10^(pred1))
mae = MAE(obs=df_test$SalePrice, pred=10^(pred1))
error_cv <- data.frame('RMSE' = rmse, 'MAE' = mae,
'R-Squared' = summary(model1)$r.squared)
plot(residuals(model))
# Predictions on test.csv using lm()
train <- data_train
test <- data_test
model <- lm(log10(SalePrice) ~ . , data = train)
summary(model)
prediction <- predict(model, test)
res <- data.frame(cbind(Actual_Values = sample$SalePrice,
Predicted_Values = 10^(prediction)))
rmse = summary(model)$r.squared
aic = AIC(model)
ggplot(res,aes(x = Actual_Values, y = Predicted_Values))+
geom_point()+
geom_smooth()+
theme_minimal()+
labs(title = "Actual Vales vs Predicted Values",
x = "Sale Price",
y = "Predicted Sale Price")+
theme(plot.title = element_text(hjust = 0.5, vjust = 0.5))
# Lasso
set.seed(100)
index = sample(1:nrow(data_train), 0.75*nrow(data_train))
df_train = data_train[index,]
df_test = data_train[-index,]
x <- model.matrix(SalePrice~., df_train)[,-1]
y <- log10(df_train$SalePrice)
set.seed(2021)
cv_model <- cv.glmnet(x, y, alpha = 1)
lambdaOptimal <- cv_model$lambda.min
lambdaOptimal
fit1 <- glmnet(x, y, alpha = 1, lambda = lambdaOptimal)
coef(fit1)
y_predicted <- predict(fit1, s = lambdaOptimal, newx = x)
sst <- sum((y - mean(y))^2)
sse <- sum((y_predicted - y)^2)
rsq <- 1 - sse/sst
rsq
rmse_lasso = sqrt(cv_model$cvm[cv_model$lambda == cv_model$lambda.min])
rmse
aic_lasso =
# Stepwise selection
starting.model <- lm(SalePrice ~ ., data = data_train)
simple.model <- lm(SalePrice ~ 1, data = data_train)
stepAIC(starting.model, scope = list(upper = starting.model,lower = simple.model),
direction = "backward")
stepAIC(starting.model, scope = list(upper = starting.model,lower = simple.model),
direction = "forward")
model_aic <- lm(log10(SalePrice) ~ ., data = data_train)
#ols_step_both_aic(model_aic)
step_ols <- ols_step_backward_aic(model_aic, details = TRUE)
aic_step <- AIC(step_ols$model)
r2_step <- step_ols$rsq[1]
model <- lm(formula = SalePrice ~ MSZoning + Street + Alley + LotShape +
LandContour + Utilities + LotConfig + LandSlope + Neighborhood +
Condition1 + Condition2 + BldgType + HouseStyle + RoofStyle +
RoofMatl + Exterior1st + Exterior2nd + MasVnrType + ExterQual +
ExterCond + Foundation + BsmtQual + BsmtCond + BsmtExposure +
BsmtFinType1 + BsmtFinType2 + Heating + HeatingQC + CentralAir +
Electrical + KitchenQual + Functional + FireplaceQu + GarageType +
GarageFinish + GarageQual + GarageCond + PavedDrive + PoolQC +
Fence + MiscFeature + SaleType + SaleCondition + Id + MSSubClass +
LotFrontage + LotArea + OverallQual + OverallCond + YearBuilt +
YearRemodAdd + MasVnrArea + BsmtFinSF1 + BsmtFinSF2 + BsmtUnfSF +
TotalBsmtSF + `1stFlrSF` + `2ndFlrSF` + LowQualFinSF + GrLivArea +
BsmtFullBath + BsmtHalfBath + FullBath + HalfBath + BedroomAbvGr +
KitchenAbvGr + TotRmsAbvGrd + Fireplaces + GarageYrBlt +
GarageCars + GarageArea + WoodDeckSF + OpenPorchSF + EnclosedPorch +
`3SsnPorch` + ScreenPorch + PoolArea + MiscVal + MoSold +
YrSold, data = data_train)
summary(model)
for(i in name){
name[i] <- as.name(name[i])
}
#final table
method = c("LINEAR","P-VALUE","LASSO","STEPWISE_AIC")
rmse_final <- c(rmse, rmse_p, rmse_lasso, "0.060")
aic_final <- c(aic, aic_p, "54.91184",aic_step)
r2_final <- c(r2, r2_p,rsq,r2_step)
result_final <- data.frame('METHOD' = method,'RMSE'=rmse_final,
'R-SQUARED' = r2_final)
|
2cd75aee344d6069094845953e469679993564f3 | 55101b998da352c6a87bf45a7aac567ba5401d14 | /R/state_names.R | 6a8f0b62368ad26badabf6c1e5ee03b07ee2b2a0 | [] | no_license | cran/seqHMM | 94bc81490712340cc081746d23e44c7d1e7e50e3 | 6e6f92383f1a74f770c69ac25b1d37f39160c79c | refs/heads/master | 2023-07-25T09:27:57.558114 | 2023-07-05T23:10:29 | 2023-07-05T23:10:29 | 48,318,279 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,370 | r | state_names.R | #' Get state names from hmm or mhmm object
#'
#' @param object An object of class `hmm` or `mhmm`.
#' @return A character vector containing the state names, or a list of such
#' vectors in `mhmm` case.
#' @export
state_names <- function(object) {
UseMethod("state_names")
}
#' @export
state_names.hmm <- function(object) {
object$state_names
}
#' @export
state_names.mhmm <- function(object) {
object$state_names
}
#' Set state names for hmm or mhmm object
#'
#' @param object An object of class `hmm` or `mhmm`.
#' @param value A character vector containing the new state names, or a list of
#' such vectors in `mhmm` case.
#' @return The modified object with updated state names.
#' @export
`state_names<-` <- function(object, value) {
UseMethod("state_names<-")
}
#' @export
`state_names<-.hmm` <- function(object, value) {
if (length(value) != object$n_states) {
stop("Number of state names does not match with the number of states.")
}
object$state_names <- value
names(object$initial_probs) <- value
dimnames(object$transition_probs) <- list(from = value, to = value)
if (object$n_channels > 1) {
for (i in 1:object$n_channels) {
dimnames(object$emission_probs[[i]])$state_names <- value
}
} else {
dimnames(object$emission_probs)$state_names <- value
}
object
}
#' @export
`state_names<-.mhmm` <- function(object, value) {
if (length(value) != object$n_clusters) {
stop(
paste0(
"New state names should be a list with length of ",
object$n_clusters, "."
)
)
}
for (i in 1:object$n_clusters) {
if (length(value[[i]]) != object$n_states[i]) {
stop(
paste0(
"Number of new state names for cluster ", i,
" is not equal to the number of hidden states."
)
)
} else {
object$state_names[[i]] <- value[[i]]
names(object$initial_probs[[i]]) <- value[[i]]
dimnames(object$transition_probs[[i]]) <- list(from = value[[i]], to = value[[i]])
if (object$n_channels > 1) {
for (j in 1:object$n_channels) {
dimnames(object$emission_probs[[i]][[j]])$state_names <- value[[i]]
}
} else {
dimnames(object$emission_probs[[i]])$state_names <- value[[i]]
}
}
}
object
}
|
3f5ed94fb70ca8bc34231136c73fb198db4aff0e | c5824359870ca766c2684c7ff3abe956de472377 | /Profile/abundance_based/Fit_ALR_as_Response/Fit_ALR_as_Response.r | f11067f4290d2b4d003ab7a5957e1b9e80b5e22d | [] | no_license | MedicineAndTheMicrobiome/AnalysisTools | ecb8d6fd4926b75744f515b84a070e31f953b375 | 8176ca29cb4c5cba9abfa0a0250378e1000b4630 | refs/heads/master | 2023-09-01T21:10:39.942961 | 2023-08-31T22:43:03 | 2023-08-31T22:43:03 | 64,432,395 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 50,407 | r | Fit_ALR_as_Response.r | #!/usr/bin/env Rscript
###############################################################################
library(MASS);
library(vegan);
library('getopt');
library(car);
options(useFancyQuotes=F);
RM_NA_TRIALS=10000*64;
NUM_TOP_CATEGORIES=30;
params=c(
"summary_file", "s", 1, "character",
"factors", "f", 1, "character",
"num_variables", "p", 2, "numeric",
"additional_variables_fname", "a", 2, "character",
"outputroot", "o", 2, "character",
"reference_levels", "r", 2, "character",
"model", "m", 2, "character",
"model_variables_file", "M", 2, "character",
"contains_remaining", "R", 2, "logical",
"shorten_category_names", "x", 2, "character",
"test_run", "T", 2, "logical",
"rm_na_trials", "N", 2, "numeric",
"required_var", "q", 2, "character",
"tag_name", "t", 2, "character"
);
opt=getopt(spec=matrix(params, ncol=4, byrow=TRUE), debug=FALSE);
script_name=unlist(strsplit(commandArgs(FALSE)[4],"=")[1])[2];
script_path=paste(head(strsplit(script_name, "/")[[1]], -1), collapse="/");
source(paste(script_path, "/../../../Metadata/RemoveNAs/Remove_NAs.r", sep=""));
usage = paste(
"\nUsage:\n", script_name, "\n",
" -s <summary file table>\n",
" -f <factors>\n",
" [-p <number of variables, in top abundances, default=", NUM_TOP_CATEGORIES, ">]\n",
" [-a <additional categories of interest filename>\n",
" [-o <output filename root>]\n",
"\n",
" Model building options:\n",
" [-r <reference levels file>]\n",
" [-m <model formula string>]\n",
" [-R (pay attention to 'remaining' category)]\n",
" [-M <model variables file>]\n",
"\n",
" NA removal options:\n",
" [-N <remove NA trials, trials=", RM_NA_TRIALS, "\n",
" [-q <required variables>]\n",
"\n",
" [-x <shorten category names, with separator in double quotes (default=\"\")>]\n",
" [-T (test run flag)]\n",
"\n",
" [-t <tag name>]\n",
"\n",
"This script will read in the summary file table, then perform\n",
"a multivariate logistic regression on the the top categories\n",
"using the factors/predictors in the factor file.\n",
"\n",
"For testing purposes, you can append the factor name with 'IGNORE.' and\n",
"the factor will be ignored.\n",
"\n",
"The -m option will allow you to specify a more sophisticated regression\n",
"model.\n",
" For example: \n",
" -m \"F1 + F2 + F1*F2\"\n",
"\n",
" Will Fit: \n",
" ALR = b0 + b1*F1 + b2*F2 + b3*F1*F2\n",
"\n",
"Without the -m option, all factors in the factors file will be fit as main effects.\n",
"\n",
"If the -R flag is set, a 'remaining' category will be be included in the denominator\n",
" independent of how large it is. I.e., do not use it as one of the response variables.\n",
"\n", sep="");
if(!length(opt$summary_file) || !length(opt$factors)){
cat(usage);
q(status=-1);
}
if(!length(opt$outputroot)){
OutputRoot=gsub("\\.summary_table\\.xls$", "", opt$summary_file);
OutputRoot=gsub("\\.summary_table\\.tsv$", "", OutputRoot);
}else{
OutputRoot=opt$outputroot;
}
if(!length(opt$num_variables)){
NumVariables=NUM_TOP_CATEGORIES;
}else{
NumVariables=opt$num_variables;
}
if(!length(opt$additional_variables_fname)){
AdditionalVariablesFname="";
}else{
AdditionalVariablesFname=opt$additional_variables_fname;
}
if(!length(opt$reference_levels)){
ReferenceLevelsFile="";
}else{
ReferenceLevelsFile=opt$reference_levels;
}
if(length(opt$model)){
Model=opt$model;
}else{
Model="All Factors";
}
if(length(opt$model_variables_file)){
ModelVariablesFile=opt$model_variables_file;
}else{
ModelVariablesFile="";
}
if(length(opt$contains_remaining)){
UseRemaining=T;
}else{
UseRemaining=F;
}
if(length(opt$shorten_category_names)){
ShortenCategoryNames=opt$shorten_category_names;
}else{
ShortenCategoryNames="";
}
if(length(opt$test_run)){
TestRun=T;
}else{
TestRun=F;
}
RequiredFile="";
if(length(opt$required_var)){
RequiredFile=opt$required_var;
}
Num_Remove_NA_Trials=RM_NA_TRIALS;
if(length(opt$rm_na_trials)){
Num_Remove_NA_Trials=opt$rm_na_trials;
}
if(length(opt$tag_name)){
TagName=opt$tag_name;
cat("Setting TagName Hook: ", TagName, "\n");
setHook("plot.new",
function(){
#cat("Hook called.\n");
if(par()$page==T){
oma_orig=par()$oma;
exp_oma=oma_orig;
exp_oma[1]=max(exp_oma[1], 1);
par(oma=exp_oma);
mtext(paste("[", TagName, "]", sep=""), side=1, line=exp_oma[1]-1,
outer=T, col="steelblue4", font=2, cex=.8, adj=.97);
par(oma=oma_orig);
}
},
"append");
}else{
TagName="";
}
SummaryFile=opt$summary_file;
FactorsFile=opt$factors;
cat("\n");
cat("Summary File : ", SummaryFile, "\n", sep="");
cat("Factors File: ", FactorsFile, "\n", sep="");
cat("Number of Response Variables: ", NumVariables, "\n", sep="");
cat("Reference Levels File: ", ReferenceLevelsFile, "\n", sep="");
cat("Output File: ", OutputRoot, "\n", sep="");
cat("Model: ", Model, "\n", sep="");
cat("Use Remaining? ", UseRemaining, "\n");
cat("Shorten Category Names: '", ShortenCategoryNames, "'\n", sep="");
cat("\n");
if(ShortenCategoryNames==TRUE){
cat("Error: You need to specify a delimitor to split the category names.\n");
cat(" i.e., this -x option is not a flag, it requires a parameter.\n");
quit(status=-1);
}
if(TestRun){
cat("***********************************************\n");
cat("* Test Run *\n");
cat("***********************************************\n");
rnd=paste(".", sprintf("%i",sample(1000, 1)), sep="");
}else{
rnd="";
}
options(width=120);
cat("Text Line Width: ", options()$width, "\n", sep="");
##############################################################################
load_factors=function(fname){
factors=data.frame(read.table(fname, sep="\t", header=TRUE, row.names=1, check.names=FALSE,
comment.char="", stringsAsFactors=TRUE));
factor_names=colnames(factors);
ignore_idx=grep("^IGNORE\\.", factor_names);
if(length(ignore_idx)!=0){
return(factors[-ignore_idx]);
}else{
return(factors);
}
}
load_summary_file=function(fname){
inmat=as.matrix(read.table(fname, sep="\t", header=TRUE, check.names=FALSE, comment.char="", quote="", row.names=1))
counts_mat=inmat[,2:(ncol(inmat))];
# Clean category names a little
cat_names=colnames(counts_mat);
cat_names=gsub("-", "_", cat_names);
cat_names=gsub("\\[", "", cat_names);
cat_names=gsub("\\]", "", cat_names);
colnames(counts_mat)=cat_names;
cat("Num Categories in Summary Table: ", ncol(counts_mat), "\n", sep="");
return(counts_mat);
}
load_reference_levels_file=function(fname){
inmat=as.matrix(read.table(fname, sep="\t", header=F, check.names=FALSE, comment.char="#", row.names=1))
colnames(inmat)=c("ReferenceLevel");
print(inmat);
cat("\n");
if(ncol(inmat)!=1){
cat("Error reading in reference level file: ", fname, "\n");
quit(status=-1);
}
return(inmat);
}
relevel_factors=function(factors, ref_lev_mat){
num_factors_to_relevel=nrow(ref_lev_mat);
relevel_names=rownames(ref_lev_mat);
factor_names=colnames(factors);
for(i in 1:num_factors_to_relevel){
relevel_target=relevel_names[i];
if(length(intersect(relevel_target, factor_names))){
target_level=ref_lev_mat[i, 1];
tmp=factors[,relevel_target];
if(length(intersect(target_level, tmp))){
tmp=relevel(tmp, target_level);
factors[,relevel_target]=tmp;
}else{
cat("WARNING: Target level '", target_level,
"' not found in '", relevel_target, "'!!!\n", sep="");
}
}else{
cat("WARNING: Relevel Target Not Found: '", relevel_target, "'!!!\n", sep="");
}
}
return(factors);
}
normalize=function(counts){
totals=apply(counts, 1, sum);
num_samples=nrow(counts);
normalized=matrix(0, nrow=nrow(counts), ncol=ncol(counts));
for(i in 1:num_samples){
normalized[i,]=counts[i,]/totals[i];
}
colnames(normalized)=colnames(counts);
rownames(normalized)=rownames(counts);
return(normalized);
}
plot_text=function(strings, size_mult=1){
par(mfrow=c(1,1));
par(family="Courier");
par(oma=rep(.5,4));
par(mar=rep(0,4));
num_lines=length(strings);
top=max(as.integer(num_lines), 52);
plot(0,0, xlim=c(0,top), ylim=c(0,top), type="n", xaxt="n", yaxt="n",
xlab="", ylab="", bty="n", oma=c(1,1,1,1), mar=c(0,0,0,0)
);
text_size=max(.01, min(.8, .8 - .003*(num_lines-52)));
#print(text_size);
for(i in 1:num_lines){
#cat(strings[i], "\n", sep="");
strings[i]=gsub("\t", "", strings[i]);
text(0, top-i, strings[i], pos=4, cex=text_size*size_mult);
}
}
sig_char=function(val){
if(!is.null(val) && !is.nan(val) && !is.na(val)){
if(val <= .0001){ return("***");}
if(val <= .001 ){ return("** ");}
if(val <= .01 ){ return("* ");}
if(val <= .05 ){ return(": ");}
if(val <= .1 ){ return(". ");}
}
return(" ");
}
##############################################################################
plot_correl_heatmap=function(mat, title="", noPrintZeros=F, guideLines=F){
if(is.null(dim(mat))){
cat(title, " Matrix is NULL. No heatmap generated.\n");
return;
}
cat("Plotting: ", title, "\n");
# Generate colors from red to blue
colors=(rainbow(2^16, start=0, end=0.65));
# Pad strings
cnames=paste(colnames(mat), " ", sep="");
rnames=paste(rownames(mat), " ", sep="");
# Get longest length of each column or row label
cname_max_len=max(nchar(cnames));
rname_max_len=max(nchar(rnames));
# Get the number of rows and columns
ncols=ncol(mat);
nrows=nrow(mat);
cscale=min(c(45/cname_max_len, 55/ncols));
rscale=min(c(45/rname_max_len, 55/nrows));
cscale=min(1, cscale);
rscale=min(1, rscale);
max_width=max(nchar(sprintf("%.2f",mat)));
#cell_cex=sqrt(min(c(cscale, rscale))^2);
cell_cex=2*(1/max_width)*sqrt(cscale^2 + rscale^2);
par(family="Courier");
par(oma=c(.5, .5, 1.5, .5));
override_length=10;
par(mar=c(min(rname_max_len/2, override_length), min(cname_max_len/2, override_length), .5, .5));
# Remember that rows and columsn are reversed in the image
image(1:nrow(mat),1:ncol(mat), mat,
xaxt="n", yaxt="n",
xlab="", ylab="",
col=colors
);
for(i in 1:nrow(mat)){
for(j in 1:ncol(mat)){
if(!is.na(mat[i,j]) && (noPrintZeros && mat[i,j]==0)){
# Skip
}else{
str=sprintf("%.2f",mat[i,j]);
str=gsub("^0\\.",".", str);
text(i,j,labels=str, cex=cell_cex, srt=45);
}
}
}
# Plot guidelines
if(guideLines){
splits=c(2,3,4,5);
h_remainder=ncols %% splits;
best_h_split=splits[max(which(h_remainder==min(h_remainder)))];
if(ncols>best_h_split){
h_line_pos=seq(best_h_split, ncols, best_h_split)+.5;
abline(h=h_line_pos, col="black", lty="dashed");
abline(h=h_line_pos, col="white", lty="dotted");
}
v_remainder=nrows %% splits;
best_v_split=splits[max(which(v_remainder==min(v_remainder)))];
if(is.na(best_v_split)){
best_v_split=max(splits);
}
if(nrows>best_v_split){
v_line_pos=seq(best_v_split, nrows, best_v_split)+.5;
abline(v=v_line_pos, col="black", lty="dashed");
abline(v=v_line_pos, col="white", lty="dotted");
}
}
# Plot the labels
mtext(cnames, at=1:ncols, side=2, las=2, cex=cscale);
mtext(rnames, at=1:nrows, side=1, las=2, cex=rscale);
# Plot the title
mtext(title, line=0, outer=T, side=3, font=2);
cat("Done plotting: ", title, "\n");
}
write_top_categorical_effects_by_factor=function(output_fn, coeff_mat, pval_mat, top_n=10){
cat("Writing top category effects by factor: ", output_fn, "\n");
top_n=min(top_n, ncol(pval_mat));
pval_cat=colnames(pval_mat);
pval_fac=rownames(pval_mat);
coeff_cat=colnames(coeff_mat);
coeff_fac=rownames(coeff_mat);
if(!all(pval_fac==coeff_fac) && !all(pval_cat==coeff_cat)){
cat("Error: categories or factors don't match up.\n");
quit(status=-1);
}else{
factors=pval_fac;
categories=pval_cat;
}
mat_buf=matrix(0, ncol=2, nrow=length(categories));
colnames(mat_buf)=c("ALR", "p-value");
rownames(mat_buf)=categories;
sig_fun_str=function(x){
if(!is.null(x) && !is.nan(x) && !is.na(x)){
if(x <= 0.0001){
return("****");
}else if(x <= 0.001){
return("***");
}else if(x <= 0.01){
return("**");
}else if(x <= 0.05){
return("*");
}else if(x <= 0.1){
return(".")
}else{
return("");
}
}
}
fh=file(output_fn, "w");
for(cur_factor in factors){
cat("Working on: ", cur_factor, "\n");
mat_buf[categories,"ALR"]=coeff_mat[cur_factor,categories];
mat_buf[categories,"p-value"]=pval_mat[cur_factor,categories];
# Sort
sort_ix=order(mat_buf[,"ALR"], decreasing=T);
mat_buf=mat_buf[sort_ix,];
signif=sapply(mat_buf[,2], sig_fun_str);
sort_cat=rownames(mat_buf);
cat(file=fh, cur_factor, ":,Category,ALR,p-value,Signif\n");
# Output Top N
cat("Writing Top ", top_n, "\n");
ix=1;
while(ix<=top_n && mat_buf[ix,"ALR"]>0){
vals=c(paste(ix,"+", sep=""), sort_cat[ix], mat_buf[ix,"ALR"], mat_buf[ix, "p-value"], signif[ix]);
cat(file=fh, paste(vals, collapse=","), "\n");
ix=ix+1;
}
# Separator
cat(file=fh, "...\n");
# Output Bottom N
num_cats=nrow(mat_buf);
cat("Writing Bottom ", top_n, "\n");
ix=0;
while((ix<top_n) && mat_buf[num_cats-ix,"ALR"]<0 ){
vals=c(paste(ix+1, "-", sep=""), sort_cat[num_cats-ix], mat_buf[num_cats-ix,"ALR"],
mat_buf[num_cats-ix, "p-value"], signif[num_cats-ix]);
cat(file=fh, paste(vals, collapse=","), "\n");
ix=ix+1;
}
cat(file=fh, "\n\n");
}
close(fh);
}
load_list=function(filename){
val=scan(filename, what=character(), comment.char="#");
return(val);
}
##############################################################################
signf_as_table=function(coef_mat, pval_mat){
num_rows=nrow(coef_mat);
num_cols=ncol(coef_mat);
num_entries=num_rows*num_cols;
cnames=colnames(coef_mat);
rnames=rownames(coef_mat);
tab_headers=c("Row", "Column", "Coefficient", "P-value", "Formatted");
comb_tab=matrix(NA, nrow=num_entries, ncol=length(tab_headers));
colnames(comb_tab)=tab_headers;
pval_val=numeric(num_entries);
line_ix=1;
for(i in 1:num_rows){
for(j in 1:num_cols){
pval=pval_mat[i,j];
if(!is.na(pval) && !is.nan(pval) && pval < 0.10){
comb_tab[line_ix, "Row"]=rnames[i];
comb_tab[line_ix, "Column"]=cnames[j];
comb_tab[line_ix, "Coefficient"]=sprintf("%.5g", coef_mat[i,j]);
comb_tab[line_ix, "P-value"]=sprintf("%.5g", pval);
comb_tab[line_ix, "Formatted"]=
sprintf("(coef = %.4f, p-val = %.3g)", coef_mat[i,j], pval);
pval_val[line_ix]=pval;
line_ix=line_ix+1;
}
}
}
num_signf=line_ix-1;
if(num_signf>=1){
comb_tab=comb_tab[1:num_signf,,drop=F];
pval_val=pval_val[1:num_signf];
sorted_ix=order(pval_val);
comb_tab=comb_tab[sorted_ix,,drop=F];
rownames(comb_tab)=1:num_signf;
}
return(comb_tab);
}
##############################################################################
# Open PDF output
pdf(paste(OutputRoot, rnd, ".alr_as_resp.pdf", sep=""), height=11, width=8.5);
# Load summary file table counts
counts=load_summary_file(SummaryFile);
# Remove zero count samples
tot=apply(counts, 1, sum);
nonzero=tot>0;
if(!(all(nonzero))){
cat("WARNING: Zero count samples found:\n");
samp_names=rownames(counts);
print(samp_names[!nonzero]);
cat("\n");
counts=counts[nonzero,,drop=F];
}
num_taxa=ncol(counts);
num_samples=nrow(counts);
#print(counts);
# Load factors
factors=load_factors(FactorsFile);
factor_names=colnames(factors);
num_factors=ncol(factors);
factor_sample_names=rownames(factors);
num_factor_samples=length(factor_sample_names);
cat("\n");
cat(num_factors, " Factor(s) Loaded:\n", sep="");
print(factor_names);
cat("\n");
input_info_text=c(
paste("Summary File: ", SummaryFile, sep=""),
paste(" Num Samples: ", nrow(counts), sep=""),
paste(" Num Categories: ", ncol(counts), sep=""),
"",
paste("Factor File: ", FactorsFile, sep=""),
paste(" Num Samples: ", nrow(factors), sep=""),
paste(" Num Factors: ", ncol(factors), sep=""),
"",
paste("Output File Root: ", OutputRoot, sep="")
);
##############################################################################
# Building Model
if(ModelVariablesFile!=""){
model_variables_file_list=load_list(ModelVariablesFile);
Model=paste(model_variables_file_list, collapse=" + ");
}
if(Model!="All Factors"){
model_pred_str=Model;
}else{
model_pred_str=paste(factor_names, collapse=" + ");
}
cat("Model: ", model_pred_str, "\n");
# Remove factors from table that aren't specified in model
model_var_arr=get_var_from_modelstring(model_pred_str);
if(length(model_var_arr)){
factors=factors[,model_var_arr, drop=F];
num_factors=ncol(factors);
}
# Load variables to require after NA removal
required_arr=NULL;
if(""!=RequiredFile){
required_arr=load_list(RequiredFile);
cat("Required Variables:\n");
print(required_arr);
cat("\n");
missing_var=setdiff(required_arr, factor_names);
if(length(missing_var)>0){
cat("Error: Missing required variables from factor file:\n");
print(missing_var);
}
}else{
cat("No Required Variables specified...\n");
}
# Read in additional categories
if(AdditionalVariablesFname!=""){
cat("Loading Additional ALR Categories...\n");
additional_categories=load_list(AdditionalVariablesFname);
}else{
additional_categories=c();
}
plot_text(c(
"Multivariate ALR Response Regression:",
"",
input_info_text,
"",
paste("Original Model: ", Model, sep=""),
"",
"Required Variables:",
paste(" File: ", RequiredFile, sep=""),
paste(" Variable(s): "),
capture.output(print(required_arr)),
"",
"Additional MALR Categories:",
capture.output(print(additional_categories))
));
##############################################################################
# Remove NAs samples/factors
if(any(is.na(factors))){
rm_na_res=remove_sample_or_factors_wNA_parallel(factors, required=required_arr,
num_trials=Num_Remove_NA_Trials, num_cores=64, outfile=OutputRoot);
# Update factors, summary table, and model
factors=rm_na_res$factors;
num_factors=ncol(factors);
counts=remove_samples_from_st(rm_na_res, counts);
model_pred_str=rem_missing_var_from_modelstring(model_pred_str, colnames(factors));
plot_text(c(
rm_na_res$summary_text,
"",
paste("New Model: ", model_pred_str, sep="")
));
}
# Shorten cateogry names
if(ShortenCategoryNames!=""){
full_names=colnames(counts);
splits=strsplit(full_names, ShortenCategoryNames);
short_names=character();
for(i in 1:length(full_names)){
short_names[i]=tail(splits[[i]], 1);
short_names[i]=gsub("_unclassified$", "_uncl", short_names[i]);
short_names[i]=gsub("_group", "_grp", short_names[i]);
short_names[i]=gsub("\\[", "", short_names[i]);
short_names[i]=gsub("\\]", "", short_names[i]);
short_names[i]=gsub("\\(", "", short_names[i]);
short_names[i]=gsub("\\)", "", short_names[i]);
}
colnames(counts)=short_names;
cat("Names have been shortened.\n");
}
# Normalize
counts=counts+.5;
normalized=normalize(counts);
if(UseRemaining){
category_names=colnames(counts);
uc_cat_names=toupper(category_names);
remaining_ix=which(uc_cat_names=="REMAINDER" | uc_cat_names=="REMAINING");
if(length(remaining_ix)!=1){
cat("*******************************************************\n");
cat("* WARNING: Could not identify remaining column. *\n");
cat("*******************************************************\n");
UseRemaining=F;
}else{
cat("Remaining original column: ", remaining_ix, "\n");
normalized_remaining_col_dat=normalized[,remaining_ix, drop=F];
normalized=normalized[,-remaining_ix];
}
}
# Reorder by abundance
mean_abund=apply(normalized, 2, mean);
ix=order(mean_abund, decreasing=TRUE);
normalized=normalized[,ix];
mean_abund=mean_abund[ix];
if(UseRemaining){
normalized=cbind(normalized, normalized_remaining_col_dat);
mean_abund=c(mean_abund, mean(normalized_remaining_col_dat));
}
sorted_taxa_names=colnames(normalized);
num_top_taxa=NumVariables;
num_top_taxa=min(c(num_top_taxa, num_taxa));
prop_abundance_represented=sum(mean_abund[1:num_top_taxa]);
cat("\nThe top ", num_top_taxa, " taxa are:\n", sep="");
for(i in 1:num_top_taxa){
cat("\t", sorted_taxa_names[i], "\t[", mean_abund[i], "]\n", sep="");
}
cat("\n");
cat("Accounting for ", prop_abundance_represented, " of taxa.\n", sep="");
cat("\n");
##############################################################################
##############################################################################
num_factors=ncol(factors);
continuous_factors=factors;
is_continous_factor=logical(num_factors);
for(f in 1:num_factors){
level_info=levels(factors[,f]);
is_continous_factor[f]=is.null(level_info);
if(is_continous_factor[f]){
# do nothing
}else if(length(level_info)==2){
# Convert two level factors to numeric
is_continous_factor[f]=TRUE;
continuous_factors[,f]=as.integer(continuous_factors[,f]);
}else{
is_continous_factor[f]=FALSE;
}
}
cat("* Reference levels: *\n");
for(f in 1:num_factors){
cat("* ", factor_names[f], ":\n* ", sep="");
print(levels(factors[,f]));
}
cat("**************************************************************\n");
continuous_factors=continuous_factors[,is_continous_factor, drop=F];
print(continuous_factors);
factor_correlations=cor(continuous_factors);
#pdf(paste(OutputRoot, ".factor_cor.pdf", sep=""), height=11, width=8.5);
#plot_correl_heatmap(factor_correlations, title="Factor Correlations");
#dev.off();
##############################################################################
# Reconcile factors with samples
factor_sample_ids=rownames(factors);
counts_sample_ids=rownames(counts);
#print(factor_sample_id);
#print(counts_sample_id);
shared_sample_ids=intersect(factor_sample_ids, counts_sample_ids);
num_shared_sample_ids=length(shared_sample_ids);
num_factor_sample_ids=length(factor_sample_ids);
num_counts_sample_ids=length(counts_sample_ids);
cat("Num counts sample IDs: ", num_counts_sample_ids, "\n");
cat("Num factor sample IDs: ", num_factor_sample_ids, "\n");
cat("Num shared sample IDs: ", num_shared_sample_ids, "\n");
cat("\n");
cat("Samples missing from count information:\n");
print(setdiff(factor_sample_ids, counts_sample_ids));
cat("\n");
cat("Samples missing from factor information:\n");
print(setdiff(counts_sample_ids, factor_sample_ids));
cat("\n");
cat("Total samples shared: ", num_shared_sample_ids, "\n");
shared_sample_ids=sort(shared_sample_ids);
# Reorder data by sample id
normalized=normalized[shared_sample_ids,];
num_samples=nrow(normalized);
factors=factors[shared_sample_ids,,drop=F];
factors=remove_no_variation_factors(factors);
model_pred_str=rem_missing_var_from_modelstring(model_pred_str, colnames(factors));
num_factors=ncol(factors);
# Relevel factor levels
if(ReferenceLevelsFile!=""){
ref_lev_mat=load_reference_levels_file(ReferenceLevelsFile)
factors=relevel_factors(factors, ref_lev_mat);
}else{
cat("* No Reference Levels File specified. *\n");
}
##############################################################################
extract_top_categories=function(ordered_normalized, top, additional_cat=c()){
num_samples=nrow(ordered_normalized);
num_categories=ncol(ordered_normalized);
cat("Samples: ", num_samples, "\n");
cat("Categories: ", num_categories, "\n");
num_top_to_extract=min(num_categories-1, top);
cat("Top Requested to Extract: ", top, "\n");
cat("Columns to Extract: ", num_top_to_extract, "\n");
# Extract top categories requested
top_cat=ordered_normalized[,1:num_top_to_extract];
if(length(additional_cat)){
cat("Additional Categories to Include:\n");
print(additional_cat);
}else{
cat("No Additional Categories to Extract.\n");
}
# Extract additional categories
# :: Make sure we can find the categories
available_cat=colnames(ordered_normalized);
missing_cat=setdiff(additional_cat, available_cat);
if(length(missing_cat)){
cat("Error: Could not find categories: \n");
print(missing_cat);
quit(status=-1);
}
# :: Remove categories we have already extracted in the top N
already_extracted_cat=colnames(top_cat);
extra_cat=setdiff(additional_cat, already_extracted_cat);
num_extra_to_extract=length(extra_cat);
cat("Num Extra Categories to Extract: ", num_extra_to_extract, "\n");
# Allocate/Prepare output matrix
num_out_mat_cols=num_top_to_extract+num_extra_to_extract+1;
out_mat=matrix(0, nrow=num_samples, ncol=num_out_mat_cols);
rownames(out_mat)=rownames(ordered_normalized);
colnames(out_mat)=c(already_extracted_cat, extra_cat, "Remaining");
# Copy over top and additional categories, and compute remainding
all_cat_names=c(already_extracted_cat, extra_cat);
out_mat[,all_cat_names]=ordered_normalized[,all_cat_names];
normalized_sums=apply(ordered_normalized, 1, sum);
for(i in 1:num_samples){
out_mat[i,"Remaining"]=normalized_sums[i]-sum(out_mat[i,]);
}
#out_mat[,"Remaining"]=apply(out_mat, 1, function(x){1-sum(x)});
return(out_mat);
}
additive_log_rato=function(ordered_matrix){
# Assumes last column will be the denominator
num_cat=ncol(ordered_matrix);
num_samp=nrow(ordered_matrix);
denominator=ordered_matrix[,num_cat];
alr_mat=matrix(0, nrow=num_samp, ncol=(num_cat-1));
for(i in 1:num_samp){
alr_mat[i,]=log(ordered_matrix[i,1:(num_cat-1)]/denominator[i]);
#print(alr_mat[i,]);
}
rownames(alr_mat)=rownames(ordered_matrix)
colnames(alr_mat)=head(colnames(ordered_matrix), num_cat-1);
alr_struct=list();
alr_struct[["transformed"]]=alr_mat;
alr_struct[["denominator"]]=denominator;
return(alr_struct);
}
plot_overlapping_density=function(mat, title=""){
cat("Plotting overlapping densities...\n");
num_cat=ncol(mat);
range=range(mat);
cat("Ranges: ", range[1], " - ", range[2], "\n", sep="");
# Compute the density for each category
density_list=list();
max_density=0;
for(i in 1:num_cat){
density_list[[i]]=density(mat[,i], n=64);
max_density=max(max_density, density_list[[i]]$y);
}
cat("Max Density: ", max_density, "\n");
# Open a blank plot
par(mar=c(5,5,5,1));
range_span=diff(range);
plot(0,0, type="n", xlim=c(range[1]-range_span*.3, range[2]+range_span*.3), ylim=c(0, max_density*3),
xlab="ALR Value", ylab="Density", main="ALR Density for Extracted Categories (Mode Labelled)");
title(main=title, outer=F, line=.5, cex.main=.85);
cat_names=colnames(mat);
colors=rainbow(num_cat, start=0, end=0.65);
# Plot Densities
label_pos=numeric(num_cat);
for(i in 1:num_cat){
xs=density_list[[i]]$x;
ys=density_list[[i]]$y;
max_y=max(ys);
max_y_ix=max(which(ys==max_y));
x_at_max_y=xs[max_y_ix];
label_pos[i]=x_at_max_y;
points(xs,ys, type="l", col=colors[i], lwd=3);
points(xs,ys, type="l", col="black", lwd=.5);
#text(x_at_max_y, max_y, cat_names[i], col=colors[i]);
points(x_at_max_y, max_y, cex=1, pch=16, col=colors[i]);
}
# Tweak label positions so they don't overlap
sort_ix=order(label_pos);
label_pos=label_pos[sort_ix]; # Original position
cat_names=cat_names[sort_ix];
colors=colors[sort_ix];
char_size=par()$cxy[1];
modified=label_pos; # Tweaked position
tweaked=T;
tol=.5;
while(tweaked){
tweaked=F;
max_tweak=max(min(diff(modified)), 0);
if(max_tweak==0){
max_tweak=tol/10;
}
max_tweak=min(tol/2, max_tweak);
# Forward adjust
for(i in 1:(num_cat-1)){
if(abs(modified[i]-modified[i+1])<tol){
modified[i+1]=modified[i+1]+max_tweak;
tweaked=T;
}
}
# Backward adjust
for(i in num_cat:2){
if(abs(modified[i]-modified[i-1])<tol){
modified[i-1]=modified[i-1]-max_tweak;
tweaked=T;
}
}
}
# Plot ticks, labels, and connectors
for(i in 1:num_cat){
# vertical tick
points(c(label_pos[i], label_pos[i]), c(max_density*1.05, max_density*1.10), type="l", col=colors[i]);
# tick to label
points(c(label_pos[i], modified[i]), c(max_density*1.1, max_density*1.2), type="l", col=colors[i]);
text(modified[i]-char_size/2, max_density*1.25, cat_names[i], srt=90, pos=4, xpd=T, col=colors[i]);
}
}
##############################################################################
# Assign 0's to values smaller than smallest abundance across entire dataset
#min_assay=min(normalized[normalized!=0]);
#cat("Lowest non-zero value: ", min_assay, "\n\n", sep="");
#zero_replacment=min_assay/10;
#normalized[normalized==0]=zero_replacment;
##############################################################################
if(num_top_taxa>= num_taxa){
num_top_taxa = (num_taxa-1);
cat("Number of taxa to work on was changed to: ", num_top_taxa, "\n");
}
##############################################################################
# Output the factor correlations
if(ncol(factor_correlations)>0){
plot_correl_heatmap(factor_correlations, title="Factor Correlations");
}else{
plot_text("Number of ordinal factors in model is zero, so no factor correlation heatmap was generated.");
}
##############################################################################
# Output reference factor levels
text=character();
text[1]="Reference factor levels:";
text[2]="";
factor_names=colnames(factors);
for(i in 1:num_factors){
fact_levels=levels(factors[,i]);
if(!is.null(fact_levels)){
fact_info=paste(factor_names[i], ": ", fact_levels[1], sep="");
}else{
fact_info=paste(factor_names[i], ": None (ordered factor)", sep="");
}
text=c(text, fact_info);
}
text=c(text, "");
text=c(text, paste("Number of Samples: ", num_samples, sep=""));
text=c(text, "");
text=c(text, "Description of Factor Levels and Samples:");
width_orig=options()$width;
options(width=80);
text=c(text, capture.output(summary(factors)));
options(width=width_orig);
plot_text(text);
##############################################################################
cat("Extracting: ", num_top_taxa, " + 1 (remaining) categories and additional categories.\n", sep="");
# Perform ALR transform
responses=extract_top_categories(normalized, num_top_taxa, additional_cat=additional_categories);
resp_alr_struct=additive_log_rato(responses);
transformed=resp_alr_struct$transformed;
num_cat_to_analyze=ncol(transformed);
sorted_taxa_names=colnames(transformed);
cat("Num ALR Categories to Analyze: ", num_cat_to_analyze, "\n", sep="");
plot_overlapping_density(transformed, title="All");
bottom_half=ceiling(num_cat_to_analyze/2) : num_cat_to_analyze;
top_half=1:floor(num_cat_to_analyze/2);
plot_overlapping_density(transformed[,top_half], title="Top Half by Avg Abundance");
plot_overlapping_density(transformed[,bottom_half], title="Bottom Half by Avg Abundance");
##############################################################################
# Try to perform MANOVA
model_string= paste("transformed ~", model_pred_str);
cat("\nFitting this multivariate model: ", model_string, "\n");
text=character();
mv_fit=tryCatch({
mv_fit=lm(as.formula(model_string), data=factors);
}, error = function(e){
print(e);
text[1]="Could not perform multivariate anova on your data because the formula";
text[2]="did not appear to be a fixed effect only model.";
plot_text(text);
cat("(There will be no mv_fit data structure to compute on.)\n");
});
manova_trial=tryCatch({
manova_res=anova(mv_fit);
res=list();
res[["manova"]]=manova_res;
res[["error"]]=NULL;
res;
}, error = function(e){
res=list();
res[["manova"]]=NULL;
res[["error"]]=e;
res;
});
manova_success=ifelse(is.null(manova_trial[["manova"]]), F, T);
if(manova_success){
manova_res=manova_trial[["manova"]];
print(manova_res);
cat("\n");
manova_txt=capture.output(manova_res);
}else{
manova_txt=paste("Error performing MANOVA: ", manova_trial[["error"]], sep="");
}
text[1]=paste("Multivariate Regression with ", num_cat_to_analyze, " taxa", sep="");
text[2]=paste("Proportion of top overall mean abundance represented: ", prop_abundance_represented, sep="");
text[3]="";
text=c(text, strsplit(model_string, "(?<=.{80})", perl=T)[[1]]);
text=c(text, "");
text=c(text, manova_txt);
plot_text(text);
###############################################################################
# Compute taxonomic correlations and pvalues
# Compute and Plot Taxonomic correlations
cor_mat=cor(transformed);
print(cor_mat);
plot_correl_heatmap(cor_mat, title="Category Correlations");
# Compute pvalues for correlations, Null Hypothesis is cor=0
cat("Computing P-values for Category Correlations...\n");
pval_matrix=matrix(NA, ncol=num_cat_to_analyze, nrow=num_cat_to_analyze);
colnames(pval_matrix)=colnames(transformed);
rownames(pval_matrix)=colnames(transformed);
pval_vect=numeric(num_cat_to_analyze*(num_cat_to_analyze-1)/2);
num_corr_to_test=0;
for(i in 2:num_cat_to_analyze){
for(j in 1:(i-1)){
pval=cor.test(transformed[,i],transformed[,j])$p.value;
pval_matrix[i,j]=pval;
pval_matrix[j,i]=pval;
num_corr_to_test=num_corr_to_test+1;
pval_vect[num_corr_to_test]=pval;
}
}
# Plot pvalues and log10(pvalues);
plot_correl_heatmap(pval_matrix, title="Unadjusted Correlation P-values");
plot_correl_heatmap(log10(pval_matrix), title="Unadjusted Correlation Log10(P-values)");
# FDR adjust pvalues
cat("Adjusting P-values for Multiple Testing using Holm.\n");
adjust_pval_vect=p.adjust(pval_vect, method="holm");
fdr_pval_matrix=matrix(NA, ncol=num_cat_to_analyze, nrow=num_cat_to_analyze);
colnames(fdr_pval_matrix)=colnames(transformed);
rownames(fdr_pval_matrix)=colnames(transformed);
num_corr_to_test=0;
for(i in 2:num_cat_to_analyze){
for(j in 1:(i-1)){
num_corr_to_test=num_corr_to_test+1;
fdr_pval_matrix[i,j]=adjust_pval_vect[num_corr_to_test];
fdr_pval_matrix[j,i]=fdr_pval_matrix[i,j];
}
}
# Plot Adjust p-values
plot_correl_heatmap(fdr_pval_matrix, title="Holm Adjusted Correlation P-values");
plot_correl_heatmap((fdr_pval_matrix<0.05)*cor_mat, title="Significant (<0.05) Correlation Holm Adjusted P-values",
noPrintZeros=T, guideLines=T);
##############################################################################
uv_fit=list();
#model_string=paste("transformed[,1] ~", paste(factor_names, collapse=" + "));
#model_matrix=model.matrix(as.formula(model_string), data=factors);
#model_variables=attributes(model_matrix)$dimnames[[2]];
#num_model_variables=length(model_variables);
#print(model_variables);
#print(num_model_variables);
# Determine many many ANOVA coefficients were analyzed, and store p-values in matrix
model_matrix=(model.matrix(as.formula(model_string), data=factors));
num_coeff=ncol(model_matrix);
cat("Number of Coefficients Expected: ", num_coeff, "\n");
coeff_names=colnames(model_matrix);
uv_pval_mat=matrix(NA, nrow=num_coeff, ncol=num_cat_to_analyze,
dimnames=list(coeff_names, sorted_taxa_names[1:num_cat_to_analyze]));
uv_coeff_mat=matrix(NA, nrow=num_coeff, ncol=num_cat_to_analyze,
dimnames=list(coeff_names, sorted_taxa_names[1:num_cat_to_analyze]));
tmp_model_string= paste("transformed[,1] ~", model_pred_str);
test_uv_fit=lm(as.formula(tmp_model_string), data=factors);
anova_factor_names=setdiff(rownames(anova(test_uv_fit)), c("Residuals", "(Intercept)"));
print(anova_factor_names);
uv_anova_pval_mat=matrix(NA, nrow=length(anova_factor_names), ncol=num_cat_to_analyze,
dimnames=list(anova_factor_names, sorted_taxa_names[1:num_cat_to_analyze]));
uv_model_fit_pval_mat=matrix(NA, ncol=num_cat_to_analyze, nrow=1,
dimnames=list("p-value", sorted_taxa_names[1:num_cat_to_analyze]));
alr_mean=numeric(num_cat_to_analyze);
alr_stderr=numeric(num_cat_to_analyze);
# Store R^2 for each taxa
rsqrd=numeric(num_cat_to_analyze);
adj_rsqrd=numeric(num_cat_to_analyze);
for(var_ix in 1:num_cat_to_analyze){
summary_txt=character();
cat("\n");
cat("##########################################################################\n");
cat("# #\n");
cat("# ", sorted_taxa_names[var_ix], "\n");
cat("# #\n");
cat("##########################################################################\n");
ALR_Abundance=transformed[,var_ix];
model_string= paste("ALR_Abundance ~", model_pred_str);
cat("Fitting: ", model_string, "\n");
alr_mean[var_ix]=mean(ALR_Abundance);
alr_stderr[var_ix]=sd(ALR_Abundance)/sqrt(length(ALR_Abundance));
# Compute Univariate fit
uv_fit[[var_ix]]=lm(as.formula(model_string), data=factors);
#print(uv_fit[[var_ix]]);
# Analyze fit
uv_summ=summary(uv_fit[[var_ix]]);
# Save overall fit
rsqrd[var_ix]=uv_summ$r.squared;
adj_rsqrd[var_ix]=uv_summ$adj.r.squared;
# Perform univariate ANOVA
uv_anova=anova(uv_fit[[var_ix]]);
avail_coef_names=setdiff(rownames(uv_anova), "Residuals");
uv_anova_pval_mat[avail_coef_names, var_ix]=uv_anova[avail_coef_names, "Pr(>F)"];
# Identify which coefficients could be used
regress_table=uv_summ$coefficients;
avail_coef_names=rownames(regress_table);
uv_coeff_mat[avail_coef_names ,var_ix]=regress_table[avail_coef_names, "Estimate"];
uv_pval_mat[avail_coef_names ,var_ix]=regress_table[avail_coef_names, "Pr(>|t|)"];
# Model p-values
print(uv_summ$fstatistic);
uv_model_fit_pval_mat[1, var_ix]=
1-pf(uv_summ$fstatistic["value"], uv_summ$fstatistic["numdf"], uv_summ$fstatistic["dendf"]);
}
cat("\nUnivariate Regression Coefficients:\n");
print(uv_coeff_mat)
cat("\nUnivariate Regression P-values:\n");
print(uv_pval_mat)
cat("\nUnivariate ANOVA P-values:\n");
print(uv_anova_pval_mat);
cat("\nUnivariate Model P-values:\n");
print(uv_model_fit_pval_mat);
# Plot univariate ANOVA F tests
anova_factors=rownames(uv_anova_pval_mat);
plot_correl_heatmap(uv_anova_pval_mat, title="Univariate F-Tests Pr(>F)", guideLines=T);
# Remove NAs
not_estimable=apply(uv_coeff_mat, 1, function(x){ any(is.na(x))});
coef_names_not_estimable=rownames(uv_coeff_mat)[not_estimable];
uv_coeff_mat=uv_coeff_mat[!not_estimable,, drop=F];
uv_pval_mat=uv_pval_mat[!not_estimable,, drop=F];
# remove intercept
uv_coeff_mat=uv_coeff_mat[-1,, drop=F];
uv_pval_mat=uv_pval_mat[-1,, drop=F]
# Plot pvalues
plot_correl_heatmap(uv_pval_mat, title="Univariate Coefficients Pr(>|t|)");
# Plot log(pvalues, 10)
log_uv_pval_mat=log(uv_pval_mat,10);
plot_correl_heatmap(log_uv_pval_mat, title="Univariate Coefficients Log10[Pr(>|t|)]");
# Plot Heatmap
if(ncol(log_uv_pval_mat)>=2 && nrow(log_uv_pval_mat)>=2 && all(!is.nan(log_uv_pval_mat))){
# Get current graphic settings so we can restore them.
par_oma_before=par()$oma;
par_mar_before=par()$mar;
cname_max_len=max(nchar(colnames(log_uv_pval_mat)));
rname_max_len=max(nchar(rownames(log_uv_pval_mat)));
par(oma=c(1,1,1,1));
num_hm_colors=20;
cl_hm_colors=(rainbow(num_hm_colors, start=0, end=0.65));
override_length=10;
heatmap(log_uv_pval_mat, col=cl_hm_colors, margins=c(min(rname_max_len/3, override_length), min(cname_max_len/2, override_length)));
# Plot Legend
par(mar=c(10,1,10,1), oma=c(0,0,0,0), mfrow=c(2,1));
log_uv_pval_range=range(log_uv_pval_mat);
color_spans=seq(log_uv_pval_range[1], log_uv_pval_range[2], length.out=num_hm_colors);
barplot(rep(1, num_hm_colors), col=cl_hm_colors, yaxt="n",
space=0, names.arg=sprintf("%3.4f", color_spans), las=2, main="HeatMap Legend (Log10[p-values])");
barplot(rep(1, num_hm_colors), col=cl_hm_colors, yaxt="n",
space=0, names.arg=sprintf("%3.4f", 10^color_spans), las=2, main="HeatMap Legend (p-values)");
# Restore prior graphics settings
par(oma=par_oma_before, mar=par_mar_before, mfrow=c(1,1));
}else{
cat("No heatmap generated because p-value matrix is not multi-dimensional or all Nan.\n");
}
# Plot log pvalues sorted by most signficiant predictor and taxa
pred_ix=order(apply(log_uv_pval_mat, 1, mean));
taxa_ix=order(apply(log_uv_pval_mat, 2, mean));
plot_correl_heatmap(log_uv_pval_mat[pred_ix, taxa_ix, drop=F],
title="Sorted Univariate Coefficients Log10[Pr(>|t|)]", guideLines=T);
# Plot R^2
rsqrd_mat=rbind(rsqrd, adj_rsqrd);
rownames(rsqrd_mat)=c("R^2", "Adjusted R^2");
colnames(rsqrd_mat)=sorted_taxa_names[1:num_cat_to_analyze];
plot_correl_heatmap(rsqrd_mat, title="Univariate R Squared");
# Plot Univariate Model F-stat
plot_correl_heatmap(uv_model_fit_pval_mat, title="Univariate Model Fit F-stat P-values");
# Plot univariate coefficients
not_na_coeff=apply(uv_coeff_mat, 1, function(x){!any(is.na(x))});
plot_correl_heatmap(uv_coeff_mat[not_na_coeff,, drop=F], title="Univariate Coefficients", guideLines=T);
mask_matrix=function(val_mat, mask_mat, mask_thres, mask_val){
masked_matrix=val_mat;
masked_matrix[mask_mat>mask_thres]=mask_val;
return(masked_matrix);
}
# Plot significant coefficients at various pvalue cutoffs
signf_coef=mask_matrix(uv_coeff_mat, uv_pval_mat, .1, 0);
plot_correl_heatmap(signf_coef, "Significant Coefficients at p-value < 0.10", noPrintZeros=T, guideLines=T);
signf_coef=mask_matrix(uv_coeff_mat, uv_pval_mat, .05, 0);
plot_correl_heatmap(signf_coef, "Significant Coefficients at p-value < 0.05", noPrintZeros=T, guideLines=T);
signf_coef=mask_matrix(uv_coeff_mat, uv_pval_mat, .01, 0);
plot_correl_heatmap(signf_coef, "Significant Coefficients at p-value < 0.01", noPrintZeros=T, guideLines=T);
# Plot table of significant associations
stab=signf_as_table(uv_coeff_mat, uv_pval_mat);
options(width=1000);
plot_text(capture.output(print(stab, quote=F)), .8);
# Significant Univariate Coefficients
uv_pval_vect=as.vector(uv_pval_mat);
adj_uv_pval_vect=p.adjust(uv_pval_vect, method="fdr");
adj_uv_pval_mat=matrix(adj_uv_pval_vect, ncol=ncol(uv_pval_mat));
sig_coeff=(adj_uv_pval_mat<0.05)*uv_coeff_mat[not_na_coeff,, drop=F];
plot_correl_heatmap(sig_coeff, "Significant (FDR) Coefficients", noPrintZeros=T, guideLines=T);
if(length(coef_names_not_estimable)){
# Output not estimable coefficients
out_lines=character();
out_lines[1]="Coefficients not calculatable:";
out_lines[2]="";
out_lines=c(out_lines, coef_names_not_estimable);
plot_text(out_lines);
}
# Write Top categories that have changed to file
write_top_categorical_effects_by_factor(paste(OutputRoot,".top_effects.csv", sep=""), uv_coeff_mat, uv_pval_mat, top_n=20);
# Write coefficient p-values to file
write.table(t(uv_pval_mat), file=paste(OutputRoot, ".alr_as_resp.pvals.tsv", sep=""),
sep="\t", quote=F, col.names=NA, row.names=T);
write.table(t(uv_coeff_mat), file=paste(OutputRoot, ".alr_as_resp.coefs.tsv", sep=""),
sep="\t", quote=F, col.names=NA, row.names=T);
##############################################################################
reg_coef_power=function(uv_reg_fit, factor=10, alpha=.05, power=.8){
cat("---------------------------------------------\n");
cat("Factor (effect size): ", factor, "\n");
cat("alpha = ", alpha, " / power = ", power, "\n");
summ_fit=summary(uv_reg_fit);
coef_names=setdiff(rownames(summ_fit$coefficients), "(Intercept)");
num_coeff=length(coef_names);
model_matrix=uv_reg_fit$model;
n=nrow(model_matrix);
sigma_reg=summ_fit$sigma; # Stderr of Residuals
SSx=numeric(num_coeff);
sigma_x=numeric(num_coeff);
sigma_b=numeric(num_coeff);
needed_sample_size=numeric(num_coeff);
for(i in 1:num_coeff){
cur_coef=coef_names[i];
#cat("Current Coefficient: ", cur_coef, "\n");
components=strsplit(cur_coef, ":")[[1]];
x_values=apply(model_matrix[,components, drop=F], 1, prod);
range_x=range(x_values);
mean_x=mean(x_values);
SSx[i]=sum((x_values-mean_x)^2);
sigma_x[i]=sqrt(SSx[i]/n);
sigma_b[i]=sigma_reg/sqrt(SSx[i]);
b=log(factor)*diff(range_x);
# Iterate over N, since the critical t value depends on N
curN=0;
N=200;
iterations=0;
while(curN!=N && iterations<10){
curN=N;
df=(curN-(num_coeff+1));
df=ifelse(df>0, df, 1);
t_alpha_2=qt(1-(alpha/2), df);
#t_beta=(abs(b)/sigma_b[i]) - t_alpha_2;
t_beta=qt(power, df);
N=ceiling(((t_alpha_2 + t_beta)^2) * ((sigma_reg/(b*sigma_x[i]))^2));
if(0){
cat("df: ", df, "\n");
cat("t_alpha_2: ", t_alpha_2, "\n");
cat("t_beta: ", t_beta, "\n");
cat("sigma_reg: ", sigma_reg, "\n");
cat("sigma_x: ", sigma_x[i], "\n");
cat("b: ", b, "\n");
cat("N[", iterations, "]:", N, "\n\n");
}
iterations=iterations+1;
}
needed_sample_size[i]=N;
cat("\tsigma_x: ", sigma_x[i], "\n");
cat("\tsigma_b: ", sigma_b[i], "\n");
cat("\tEffect: ", b, "\n");
cat("\t", cur_coef, " N: ", needed_sample_size[i], "\n");
}
cat("---------------------------------------------\n");
}
##############################################################################
# Plot univariate analyses
par(oma=c(0,0,3,0));
setHook("plot.new", function(){mtext(sorted_taxa_names[var_ix], outer=T, line=-.5);}, "prepend");
hooks=getHook("plot.new");
for(var_ix in 1:num_cat_to_analyze){
if(length(getHook("plot.new"))==0){
for(hix in 1:length(hooks)){
setHook("plot.new", hooks[[hix]], "prepend");
}
}
# Output univariate ANOVA results
summary_txt=c();
summary_txt[1]="Univariate Regression:";
summary_txt[2]="";
summary_txt[3]=paste(var_ix, ".) ", sorted_taxa_names[var_ix], sep="");
summary_txt[4]="";
summary_txt[5]=paste("Mean abundance: ", sprintf("%3.1f%%",mean_abund[var_ix]*100), sep="");
summary_txt[6]="";
summary_txt[7]=paste("R^2: ", sprintf("%3.4f", rsqrd_mat[1,var_ix]), sep="");
summary_txt[8]=paste("Adjusted R^2: ", sprintf("%3.4f", rsqrd_mat[2, var_ix]), sep="");
summary_txt[9]="";
summary_txt=c(summary_txt, capture.output(anova(uv_fit[[var_ix]])));
plot_text(summary_txt);
# Regenerate summary table after removing NAs
uv_summary=summary(uv_fit[[var_ix]]);
tmp_dtfr=as.data.frame(uv_summary$coefficients);
not_estimable=is.na(tmp_dtfr[,"Estimate"]);
tmp_dtfr=tmp_dtfr[!not_estimable,];
# Mark the p-values that are significant
sig_char=function(x){
s=character(length(x));
s[x <= .1] = ". ";
s[x <= .05] = "* ";
s[x <= .01] = "** ";
s[x <= .001] = "*** ";
s[x <= .0001]= "****";
return(s);
}
signif=sig_char(tmp_dtfr[,4]);
tmp_dtfr=cbind(tmp_dtfr, signif);
coeff_txt=capture.output(tmp_dtfr);
# Sigma
rse_txt=sprintf(
"Residual standard error: %5.4f on %i degrees of freedom",
uv_summary$sigma,
uv_summary$df[2]
);
# F-statistics
fstat_txt=sprintf(
"F-statistic: %5.4f on %i and %i DF, p-value: %5.4f",
uv_summary$fstatistic[1],
uv_summary$fstatistic[2],
uv_summary$fstatistic[3],
1-pf(uv_summary$fstatistic[1], uv_summary$fstatistic[2], uv_summary$fstatistic[3])
);
PowerCalc=F;
if(PowerCalc){
print( sorted_taxa_names[var_ix]);
reg_coef_power(uv_fit[[var_ix]]);
}
# Build page contents
summary_txt=c(
"Intercept-only Fit:",
paste(" ALR Mean: ", round(alr_mean[var_ix], 4)),
paste(" ALR Standard Error: ", round(alr_stderr[var_ix], 4)),
"",
"Univariate Regression Coefficients for: ",
paste(" ", sorted_taxa_names[var_ix], sep=""),
"",
coeff_txt,
"",
rse_txt,
fstat_txt
)
plot_text(summary_txt);
# Generate MMPs
mmps(uv_fit[[var_ix]], main="")
# Generate sideways histogram
par(mar=c(5.1,6.1,1,1));
h=hist(resp_alr_struct$transformed[,var_ix], breaks=20, plot=F);
barplot(h$counts, horiz=T, names.arg=signif(h$mids, 3), space=0, las=2, cex.names=.75,
ylab="ALR Transformed Abundance", xlab="Counts", main="");
#, main=paste(var_ix, ".) ", sorted_taxa_names[var_ix],
# sprintf(" [%3.1f%%]",mean_abund[var_ix]*100), sep=""));
setHook("plot.new", NULL, "replace");
}
#############################################################################
dev.off();
##############################################################################
# Output factor information and MANOVA table
sink(paste(OutputRoot, ".mlr.log.txt", sep=""));
cat("\nFactor information:\n\n");
summary(factors);
cat("\n");
cat(manova_txt, "\n");
sink();
##############################################################################
# Output pvalues
if(manova_success){
manova_col=ncol(manova_res);
manova_row=nrow(manova_res);
factor_names=attributes(manova_res)$row.names[2:(manova_row-1)];
pvalues=manova_res[factor_names, "Pr(>F)"];
pval_fname=paste(OutputRoot, ".pval.tsv", sep="");
fh=file(pval_fname, "w");
cat(file=fh, "# Filename", "\t", paste(factor_names, collapse="\t"), "\n", sep="");
cat(file=fh, OutputRoot, "\t", paste(pvalues, collapse="\t"), "\n", sep="");
close(fh);
}
##############################################################################
# Output univariate regression coefficient
uv_coeff_fname=paste(OutputRoot, ".uv_coeff.tsv", sep="");
write.table(t(uv_coeff_mat), file=uv_coeff_fname, quote=F, sep="\t", col.names=NA, row.names=T);
##############################################################################
# Write MANOVA pvalues to file
if(manova_success){
num_variables=nrow(manova_res)-1;
outmat=matrix("", nrow=num_variables, ncol=3);
colnames(outmat)=c(TagName, "Pr(>F)", "Signf");
varnames=unlist(rownames(manova_res));
pvals=unlist(manova_res["Pr(>F)"]);
outmat[,TagName]=varnames[1:num_variables];
outmat[,"Pr(>F)"]=sprintf("%4.4f", pvals[1:num_variables]);
outmat[,"Signf"]=sapply(pvals[1:num_variables], sig_char);
}else{
outmat=matrix("-", nrow=1, ncol=2);
colnames(outmat)=c(TagName, "Pr(>F)");
}
write.table(outmat, file=paste(OutputRoot, ".alr_as_resp.anova.summary.tsv", sep=""), sep="\t", quote=F, col.names=T, row.names=F);
##############################################################################
cat("Done.\n");
#dev.off();
print(warnings());
q(status=0);
|
9b808ee5de9913e7cff609c79123b255f707f783 | 92befee27f82e6637c7ed377890162c9c2070ca9 | /R/ccov_np_regression.R | 097741458b0a0ce3ed4e4fec735e159cc32ffcf9 | [] | no_license | alexanderrobitzsch/sirt | 38e72ec47c1d93fe60af0587db582e5c4932dafb | deaa69695c8425450fff48f0914224392c15850f | refs/heads/master | 2023-08-31T14:50:52.255747 | 2023-08-29T09:30:54 | 2023-08-29T09:30:54 | 95,306,116 | 23 | 11 | null | 2021-04-22T10:23:19 | 2017-06-24T15:29:20 | R | UTF-8 | R | false | false | 429 | r | ccov_np_regression.R | ## File Name: ccov_np_regression.R
## File Version: 0.141
ccov_np_regression <- function(x, y, xgrid, bwscale=1.1, smooth=TRUE, score=NULL)
{
N <- length(x)
if (smooth){
y <- stats::ksmooth( x=x, y=y, bandwidth=bwscale*N^(-1/5),
x.points=xgrid, kernel='normal')$y
} else {
a1 <- stats::aggregate(y, list(score), mean, na.rm=TRUE)
y <- a1[,2]
}
return(y)
}
|
432a1526fc5182983f4df48ece13fbb8c951859c | dcb3bdd567fb31ff3851bd9a419a6dc4a7a7fcdc | /app.R | 0474c70fd23643decb06846e5e7b0a7edf491a72 | [] | no_license | gonzalezben81/statistics-app | a2fdd2ba6e264d656138227ba0b2676ee587eaff | 9f57ab5916f003ac1015dbc5ae8ba569a98942f2 | refs/heads/master | 2020-04-05T16:39:01.788592 | 2019-03-07T21:41:30 | 2019-03-07T21:41:30 | 157,022,754 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 29,340 | r | app.R | ## app.R ##
library(shinydashboard)
library(shiny)
library(corrgram)
library(datasets)
library(xlsx)
library(randomForest)
library(readr)
library(dplyr)
library(ggvis)
library(tree)
library(BH)
library(tigerstats)
ui <- dashboardPage(skin = "blue",
# skin = "green",
dashboardHeader(title = "Data Analysis"),
dashboardSidebar(
sidebarMenu(
##Tab Item One
menuItem("File Upload",tabName = "file",icon = icon("file-excel-o")),
##Tab Item Two
menuItem("Plot", tabName = "plot1", icon = icon("line-chart")),
##Tab Item Three
menuItem("Table", tabName = "table", icon = icon("table")),
##Tab Item Four
menuItem("Cross Tabulation",tabName = "crosstab", icon = icon("table")),
##Tab Item Five
menuItem("Summary",tabName = "summary",icon = icon("list-alt")),
##Tab Item Six
menuItem("Correlation",tabName = "correlation",icon = icon("gears")),
##Tab Item Seven
menuItem("Correlation Matrix",tabName = "correlationm",icon = icon("th")),
##Tab Item Eight
menuItem("Correlogram",tabName = "correlogram",icon = icon("picture-o")),
##Tab Item Nine
menuItem("Simple Linear Regression",tabName = "linearregression",icon = icon("line-chart")),
##Tab Item Ten
menuItem("Multiple Linear Regression",tabName = "linearregressionm",icon = icon("line-chart")),
##Tab Item Eleven
menuItem("Histogram", tabName = "histogram",icon = icon("bar-chart")),
##Tab Item Twelve
menuItem("Interactive Plot",tabName = "ggvis",icon = icon("bar-chart")),
##Tab Item Thirteen
menuItem("Random Forest:Under Construction",tabName = "randomforest",icon = icon("line-chart"))
)),
dashboardBody(
# tags$head(includeScript("www/google-analytics.js")),
tabItems(
##Tab Item One
tabItem(tabName = "file",
fileInput("file", label = h3("File input: CSV/Text",multiple = FALSE,accept = NULL,width=NULL),
accept=c('text/csv', 'text/comma-separated-values,text/plain', '.csv','.xlsx')),
radioButtons('sep', 'Separator',c(Comma=',',Semicolon=';',Tab='\t'),','),
radioButtons('quote', 'Quote',c(None='','Double Quote'='"','Single Quote'="'"),'"'),
# radioButtons(inputId = 'fileext',label = 'File Extension',choices = c('read.csv','read.xlsx')),
checkboxInput("header", "Header", TRUE),
downloadButton('downloadData', 'Download')),
##Tab Item Two
tabItem(
tabName = "plot1",solidHeader = TRUE,
fluidRow(box(title = "Plot Controls",
selectInput(inputId = "download1",label = "Choose Format",choices = list("png","pdf","bmp","jpeg")),
selectInput("xcol", "X Variable",choices = names(df)),
selectInput("ycol", "Y Variable", choices = names(df)),
sliderInput("point","Point Type",min=0, max=25,value = 19),
numericInput("size","Point size",3,min=0.5,max = 10),
selectInput(inputId = "line",label = "Line Type",choices = list("Line"="l","Points"="p","Stairs"="s"),selected = "p"),
selectInput(inputId = 'color',label = 'Color', choices = list("Blue","Red","Yellow","Green","Black","Orange","Pink","Brown","LightGreen","LightBlue","LightGrey"),
selected = "Black"),collapsible = TRUE),
downloadButton('download', 'Download Plot')),
# downloadButton('download', 'Download Plot'),
# box(title = "Download",radioButtons(inputId = "download1",label = "Choose Format",choices = list("png","pdf")),
# downloadButton('download', 'Download Plot'),collapsible = TRUE,collapsed = TRUE),
box(title = "Data Plot",
plotOutput("plot1"),width = "auto",height = "auto",collapsible = TRUE,collapsed = TRUE)
),
##Tab Item Three
tabItem(tabName = "table",
fluidRow(
tableOutput("table"))),
##Tab Item Four
tabItem(tabName = "summary",
fluidRow(
box(title = "Data Summary",solidHeader = TRUE,background = "light-blue",
verbatimTextOutput("summary"),width = "auto",height = "auto")),
# radioButtons(inputId = "download2",label = "Choose Format",choices = list("txt","csv")),
downloadButton('downloadtwo', 'Download Summary')),
##Tab Item Five
tabItem(tabName = "correlation",
box(
fluidRow(box(title = "Correlation: Select Variables Below",
selectInput("xcol7", "X Variable",choices = names(df)),
selectInput("ycol7", "Y Variable", choices = names(df)))),
box(title = "Correlation: Pearson",
verbatimTextOutput("correlation")),width = "auto",height = "auto")),
##Tab Item Six
tabItem(tabName = "correlationm",
box(title = "Correlation Matrix",solidHeader = TRUE,background = "light-blue",
verbatimTextOutput("correlationm"),width = "auto",height = "auto")),
##Tab Item Seven
tabItem(tabName = "correlogram",
selectInput(inputId = "panel",label = "Correlogram Type",choices = list("Bar"="panel.bar","Conf"="panel.conf","panel.corr","Density"="panel.density","Ellipse"="panel.ellipse","MinMax"="panel.minmax","Pie"="panel.pie","Points"="panel.pts","Shade"="panel.shade","Text"="panel.text"),selected = "panel.shade"),
box(title = "Correlogram",solidHeader = TRUE,
plotOutput("corr"),width = 400,height = 600,collapsible = TRUE,collapsed = TRUE)),
##Tab Item Eight
tabItem(tabName = "linearregression",
selectInput("xcol1", "X Variable",choices = names(df)),
selectInput("ycol2", "Y Variable", choices = names(df)),
box(title = "Simple Linear Regression Output:",solidHeader = TRUE,background = "light-blue",
verbatimTextOutput("summarylm"),width = 300)),
##Tab Item Nine
tabItem(tabName = "linearregressionm",
selectInput("xcol3", "Predictor One",choices = names(df)),
selectInput("xcol4", "Predictor Two",choices = names(df)),
selectInput("xcol5", "Predictor Three",choices = names(df)),
selectInput("xcol6", "Predictor Four",choices = names(df)),
selectInput("ycol3", "Dependent", choices = names(df)),
box(title = "Multiple Linear Regression Output:",solidHeader = TRUE,background = "light-blue",
verbatimTextOutput("summarylmmulti"),width = 300)),
##Tab Item Ten
tabItem(tabName = "histogram",
fluidPage(
box(solidHeader = TRUE,background = "light-blue",
selectInput("xcol8", "Histogram Variable",choices = names(df)),
selectInput(inputId = 'colortwo',label = 'Color', choices = list("Blue","Red","Yellow","Green","Black","Orange","Pink","Brown","LightGreen","LightBlue","LightGrey"),
selected = "lightblue"),
numericInput("bins","Number of Bins",10,min=1,max = 50),
selectInput(inputId = "download3",label = "Choose Format",choices = list("png","pdf","bmp","jpeg")),
downloadButton('downloadthree', 'Download Histogram')),
box(title = "Histogram",solidHeader = TRUE,background = "light-blue",
plotOutput("hist"),collapsible = TRUE)
)),
##Tab Item Eleven
tabItem(tabName = "ggvis",
fluidPage(
selectInput('x', 'x:' ,'x'),
selectInput('y', 'y:', 'y'),
sliderInput("size1","Point size",100,min=100,max = 400),
selectInput(inputId = 'color1',label = 'Color', choices = list("Blue","Red","Yellow","Green","Black","Orange","Pink","Brown","LightGreen","LightBlue","LightGrey"),
selected = "lightblue"),
uiOutput("plot_ui"),
box(
ggvisOutput("plot"),width = 400))),
##Tab Item Twelve
tabItem(tabName = "randomforest",
fluidPage(
selectInput("randomcol", "Random Forest Predictor: Under Construction...Please Check Back Soon",choices = names(df)),
box(plotOutput("randomforest"))
)),
##Tab Item Thirteen
tabItem(tabName = "crosstab",
fluidPage(
selectInput(inputId = "tab1",label = "Columns",choices = names(df)),
selectInput(inputId = "tab2",label = "Rows",choices = names(df)),
box(title = "Cross Tabulation",verbatimTextOutput("crosstab"),
downloadButton('downloadcrosstab', 'Download Cross Tabulation')),
box(title = "Column Percentages:",verbatimTextOutput("crosstabcolperc"),
collapsible = TRUE,collapsed = TRUE),
box(title = "Row Percentages:",verbatimTextOutput("crosstabrowperc"),
collapsible = TRUE,collapsed = TRUE)
))
)))
server <- function(input, output,session) {
options(shiny.maxRequestSize = 25*1024^2)
## Code to read file in and load into variable selections
data <- reactive({
req(input$file) ## ?req # require that the input is available
inFile <- input$file
df <- read.csv(inFile$datapath, header = input$header,sep = input$sep,quote = input$quote)
updateSelectInput(session, inputId = 'xcol', label = 'X Variable',
choices = names(df), selected = names(df))
updateSelectInput(session, inputId = 'ycol', label = 'Y Variable',
choices = names(df), selected = names(df))
return(df)
})
##Second Reactive for Simple Linear Regression ####
dataset <- reactive({
req(input$file) ## ?req # require that the input is available
inFile <- input$file
df <- read.csv(inFile$datapath, header = input$header,sep = input$sep,quote = input$quote)
updateSelectInput(session, inputId = 'xcol1', label = 'X Variable',
choices = names(df))
updateSelectInput(session, inputId = 'ycol2', label = 'Y Variable',
choices = names(df))
return(df)
})
## Third Reactive for Multiple Linear Regression ####
datamultiple <- reactive({
req(input$file) ## ?req # require that the input is available
inFile <- input$file
df <- read.csv(inFile$datapath, header = input$header,sep = input$sep,quote = input$quote)
updateSelectInput(session, inputId = 'xcol3', label = 'Predictor One',
choices = names(df))
updateSelectInput(session, inputId = 'xcol4', label = 'Predictor Two',
choices = names(df))
updateSelectInput(session, inputId = 'xcol5', label = 'Predictor Three',
choices = names(df))
updateSelectInput(session, inputId = 'xcol6', label = 'Predictor Four',
choices = names(df))
updateSelectInput(session, inputId = 'ycol3', label = 'Dependent',
choices = names(df))
return(df)
})
## Fourth Reactive for Correlation
datasetcor <- reactive({
req(input$file) ## ?req # require that the input is available
inFile <- input$file
df <- read.csv(inFile$datapath, header = input$header,sep = input$sep,quote = input$quote)
updateSelectInput(session, inputId = 'xcol7', label = 'X Variable',
choices = names(df))
updateSelectInput(session, inputId = 'ycol7', label = 'Y Variable',
choices = names(df))
return(df)
})
##Histogram Reactive Code
datasethist <- reactive({
req(input$file) ## ?req # require that the input is available
inFile <- input$file
df <- read.csv(inFile$datapath, header = input$header,sep = input$sep,quote = input$quote)
updateSelectInput(session, inputId = 'xcol8', label = 'Histogram Variable',
choices = names(df))
return(df)
})
# datavis <- reactive({
# req(input$file) ## ?req # require that the input is available
#
# inFile <- input$file
#
#
# df <- read.csv(inFile$datapath, header = input$header,sep = input$sep,quote = input$quote)
#
#
#
# updateSelectInput(session, inputId = 'x', label = 'X Variable',
# choices = names(df))
# updateSelectInput(session, inputId = 'y', label = 'Y Variable',
# choices = names(df))
# # updateSelectInput(session, inputId = 'zcol11', label = 'Z Variable',
# # choices = names(df), selected = names(df))
# # updateSelectInput(session, inputId = 'zzcol', label = 'Q Variable',
# # choices = names(df), selected = names(df))
#
# return(df)
#
# })
##Random Forest File Reader ##############
datarandom <- reactive({
req(input$file) ## ?req # require that the input is available
inFile <- input$file
df <- read.csv(inFile$datapath, header = input$header,sep = input$sep,quote = input$quote)
updateSelectInput(session, inputId = 'randomcol', label = 'Random Forest Predictor',
choices = names(df))
return(df)
})
summarydata <- reactive({
req(input$file) ## ?req # require that the input is available
inFile <- input$file
df <- read.csv(inFile$datapath, header = input$header,sep = input$sep,quote = input$quote)
summary(df)
return(df)
})
## Plot output code
output$plot1 <- renderPlot({
par(mar = c(5.1, 4.1, 0, 1))
x <- data()[, c(input$xcol, input$ycol)]
plot(x,col=input$color,pch = input$point,type = input$line,cex=input$size)
})
## Table Data Code
output$table <- renderTable({
inFile <- input$file
if (is.null(inFile))
return("Please Upload File")
# datasetInput()
read.csv(inFile$datapath, header = input$header)
})
##File Summary Code
output$summary <- renderPrint({
inFile <- input$file
if (is.null(inFile))
return("Please Upload File")
yowsa<- read.csv(inFile$datapath, header = input$header)
summary(yowsa)
})
##Observe function used for Cross Tabulation
observe({
updateSelectInput(session,inputId = "tab1",label = "Columns", choices = sort(as.character(colnames(data()))))
updateSelectInput(session, inputId = "tab2",label = "Rows", choices = sort(as.character(colnames(data()))))
})
# observe({
# updateSelectInput(session, inputId = "tab2",label = "Rows", choices = sort(as.character(colnames(data()))))
# })
output$crosstab <- renderPrint({
validate(need(input$tab2,''),
need(input$tab1,''))
with(data(), table(get(input$tab2),get(input$tab1)))
# xtabs(as.formula(paste0("~",input$tab2,"+",input$tab1)), data())
})
output$crosstabcolperc <- renderPrint({
validate(need(input$tab2,''),
need(input$tab1,''))
colPerc(with(data(), table(get(input$tab2),get(input$tab1))))
# colPerc(xtabs(as.formula(paste0("~",input$tab2,"+",input$tab1)), data()))
})
output$crosstabrowperc <- renderPrint({
validate(need(input$tab2,''),
need(input$tab1,''))
rowPerc(with(data(), table(get(input$tab2),get(input$tab1))))
})
summarytwo <- reactive({
inFile <- input$file
yowsa<- read.csv(inFile$datapath, header = input$header)
as.data.frame.matrix(summary(yowsa))
})
crossout <- reactive({
inFile <- input$file
if (is.null(inFile))
return("Please Upload File")
# ## Needed to hold table until data is updated in it
# validate(need(input$tab2,''),
# need(input$tab1,''))
xtabs(as.formula(paste0("~",input$tab2,"+",input$tab1)), data())
# with(data(), table(get(input$tab2),get(input$tab1)))
})
## Download Buttons ################################################################################################
##Download Button Code for downloading csv file
output$download <- downloadHandler(
filename = function() { paste("Plot ",input$xcol," by ",input$ycol,input$download1,sep = ".") },
content = function(file) {
if(input$download1=="png")
png(file)
else if (input$download1=="jpeg")
jpeg(file)
else if (input$download1=="bmp")
bmp(file)
else if (input$download1=="pdf")
pdf(file)
plot(data()[, c(input$xcol, input$ycol)],col=input$color,pch = input$type,type = input$line,cex=input$size,main = paste(input$xcol," by ",input$ycol))
dev.off()
})
output$downloadthree <- downloadHandler(
filename = function() { paste("Histogram",input$xcol8,input$download3,sep = ".") },
content = function(file) {
if(input$download3=="png")
png(file)
else if (input$download3=="jpeg")
jpeg(file)
else if (input$download3=="bmp")
bmp(file)
else if (input$download3=="pdf")
pdf(file)
hist(as.numeric(datasethist()[,input$xcol8]), col=input$colortwo,breaks = input$bins,xlab = input$xcol,main = input$xcol,border = input$bordercolor,
freq = TRUE)
dev.off()
})
output$down <- downloadHandler(
filename = function() { paste(input$file, sep='') },
content = function(file) {
write.csv(data(), file)
})
output$downloadtwo <- downloadHandler(
filename = function() { paste("Summary",input$file, sep='.') },
content = function(file) {
write.csv(summarytwo(), file)
})
output$downloadcrosstab <- downloadHandler(
filename = function() { paste("Crosstab",input$tab1,"BY",input$tab2,input$file, sep='.') },
content = function(file) {
# x <- (table((input$tab2),(input$tab1),data()))
write.csv(crossout(),file,col.names = TRUE,row.names = TRUE)
})
## Download Buttons #####
## Statistics Code ######################################################
##Correlation Code
output$correlation <- renderPrint({
inFile <- input$file
if (is.null(inFile))
return("Please Upload File")
correlation<- datasetcor()[,c(input$xcol7,input$ycol7)]
cor(correlation)
})
##Correlation Code
output$correlationm <- renderPrint({
inFile <- input$file
if (is.null(inFile))
return("Please Upload File")
correlation<- data()
cor(correlation)
})
##Simple Linear Regression Code
output$summarylm <- renderPrint({
inFile <- input$file
if (is.null(inFile))
return("Please Upload File")
linearmodel<- read.csv(inFile$datapath, header = input$header)
x<-dataset()[,input$xcol1]
y<-dataset()[,input$ycol2]
lmone<-lm(as.formula(paste0(input$ycol2,"~",input$xcol1)),data = linearmodel)
print(paste0("Formula:",input$ycol2," ~ ",input$xcol1," data: "))
# # print(input$xcol1)
# print(paste0("Y =",input$ycol2))
# # print(input$ycol2)
summary(lmone)
# summary(lm(as.formula(paste0(input$ycol2,"~",input$xcol1)),data=linearmodel))
# summary(lm(as.formula(paste0("~",input$ycol2,input$xcol1)),data=linearmodel))
})
### Correlogram Code
output$corr <- renderPlot({
par(mar = c(20, 20, 0, 1))
inFile <- input$file
if (is.null(inFile))
return("Please Upload File")
corrfile<- read.csv(inFile$datapath, header = input$header)
corrgram(corrfile,panel = input$panel,order = TRUE)
})
##Multiple Linear Regression Code
output$summarylmmulti <- renderPrint({
inFile <- input$file
if (is.null(inFile))
return("Please Upload File")
linearmodelmulti<- read.csv(inFile$datapath, header = input$header)
p1<-datamultiple()[,input$xcol3]
p2<-datamultiple()[,input$xcol4]
p3<-datamultiple()[,input$xcol5]
p4<-datamultiple()[,input$xcol6]
y<-datamultiple()[,input$ycol3]
# lmonemulti<-lm(y~p1+p2+p3+p4,data = linearmodelmulti)
# print("Predictor One =")
# print(input$xcol3)
# print("Predictor Two =")
# print(input$xcol4)
# print("Predictor Three =")
# print(input$xcol5)
# print("Predictor Four =")
# print(input$xcol6)
# print("Dependent =")
# print(input$ycol3)
# summary(lm(y~p1+p2+p3+p4,data = linearmodelmulti))
summary(lm(as.formula(paste0(input$ycol3,"~",input$xcol3,"+",input$xcol4,"+",input$xcol5,"+",input$xcol6)),data=linearmodelmulti))
})
##Histogram Code
output$hist <- renderPlot({
inFile <- input$file
if (is.null(inFile))
return("Please Upload File")
x<-as.numeric(datasethist()[,input$xcol8])
hist(x, col=input$colortwo,breaks = input$bins,xlab = input$xcol8,main = input$xcol8,border = input$bordercolor,
freq = TRUE)
})
#load the data when the user inputs a file
theData <- reactive({
infile <- input$file
if(is.null(infile))
return(NULL)
d <- read.csv(infile$datapath, header = input$header,sep = input$sep,quote = input$quote)
d
})
# dynamic variable names
observe({
data<-theData()
updateSelectInput(session, 'x', label = 'x:',choices = names(data))
updateSelectInput(session, 'y', label = 'y:',choices = names(data))
}) # end observe
#gets the y variable name, will be used to change the plot legends
yVarName<-reactive({
input$y
})
#gets the x variable name, will be used to change the plot legends
xVarName<-reactive({
input$x
})
#make the filteredData frame
filteredData<-reactive({
data<-isolate(theData())
#if there is no input, make a dummy dataframe
if(input$x=="x" && input$y=="y"){
if(is.null(data)){
data<-data.frame(x=0,y=0)
}
}else{
data<-theData()[,c(input$x,input$y)]
names(data)<-c("x","y")
}
data
})
input_size <- reactive(input$size1)
input_color <- reactive(input$color1)
#plot the ggvis plot in a reactive block so that it changes with filteredData
vis<-reactive({
plotData<-filteredData()
# z <- paste0(filteredData()[,input$y])
plotData %>%
ggvis(~x, ~y,fill=~y,size := input_size,fill := input_color) %>%
layer_points() %>%
add_axis("y", title = yVarName()) %>%
add_axis("x", title = xVarName())
# add_tooltip(function(df) data()[,input$y[]])
})
vis%>%bind_shiny("plot", "plot_ui")
# # A reactive expression with the ggvis plot
# vis <- reactive({
#
# inFile <- input$file
# # if (is.null(inFile))
# # return("Please Upload File")
# # datanew<- read.csv(inFile$datapath, header = input$header)
#
# xvar <- prop("x", as.symbol(input$xcol12))
# yvar <- prop("y", as.symbol(input$ycol12))
#
#
# # xvar <- datavis()[,input$xcol12]
# # yvar <- datavis()[,input$ycol12]
#
# data %>%
# ggvis(x = xvar,y = yvar) %>%
# layer_points()
# # set_options(width = 500, height = 500)
# })
# #
# vis %>% bind_shiny("plot2")
# input_size <- reactive(input$size)
# mtcars %>%
# ggvis(~disp, ~mpg, size := input_size) %>%
# layer_points() %>%
# bind_shiny("ggvis", "ggvis_ui")
## Random Forest Approach ############################################
# ## Student Performance Random Forest #############################################
#
#
# output$randomforest <- renderPlot({
#
# library(tree)
# datarandom <- datarandom()
# names(datarandom)
# library(randomForest)
# # attach(datarandom())
#
# G3 <- paste0(input$randomcol)
# lstMSEs=numeric()
# set.seed(1)
# maxnumpreds=ncol(datarandom)-1
# maxnumtrees=10
#
# for(numpreds in 1:maxnumpreds){
# for(numtrees in 1:maxnumtrees){
#
# nrow(datarandom)
# train=sample(1:nrow(datarandom),nrow(datarandom)/2)
#
#
# model.bagged=randomForest(G3~.,data = datarandom,subset = train,mtry=numpreds,ntree=numtrees,importance=TRUE)
#
# subtract <- paste0(datarandom$G3)
#
# pred.vals.bagged=predict(model.bagged,newdata = datarandom[-train])
# testvals=datarandom[-train]
# mse=mean((pred.vals.bagged - testvals)^2)
# lstMSEs=rbind(lstMSEs,mse)
# print(paste(" Processed Trees:",numtrees))
# }
# print(paste(" Processed Predictors:",numpreds))
# }
#
# matMSEs=matrix(lstMSEs,nrow = maxnumpreds,ncol=maxnumtrees)
#
#
# print(paste("The optimal configuration is",loc[1],"predictors and",loc[2], "trees"))
# length(lstMSEs)
# list(lstMSEs)
#
# min(lstMSEs)
# min(matMSEs)
# lstMSEs
#
# loc=which(matMSEs==min(matMSEs),arr.ind=TRUE)
# print(paste("The optimal configuration is",loc[1],"predictors and",loc[2], "trees"))
# length(lstMSEs)
# print(paste(" Processed Trees:", numtrees))
# print(paste(" Processed Predictors:",numpreds))
# matMSEs[loc[1],loc[2]]
#
#
#
# which(matMSEs==min(matMSEs),arr.ind = TRUE)
# importance(model.bagged)
# tree.student1=tree(G3~.,data = datarandom)
# plot(model.bagged)
# plot(tree.student1)
# text(tree.student1,pretty = 0)
# varImpPlot(model.bagged)
# model.bagged
# min(lstMSEs)
# })
}
shinyApp(ui, server)
# ?validColors
|
d6974d833c619cb2c3b33529dcfd9949dd00655d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/GCalignR/examples/simple_chroma.Rd.R | 43e3dd8e19efba75a28b2ac0a9c77e7de2392370 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 319 | r | simple_chroma.Rd.R | library(GCalignR)
### Name: simple_chroma
### Title: Simulate simple chromatograms
### Aliases: simple_chroma
### ** Examples
## create a chromatogram
x <- simple_chroma(peaks = c(5,10,15), N = 1, min = 0, max = 30, Names = "MyChroma")
## plot chromatogram
with(x, plot(x,y, xlab = "time", ylab = "intensity"))
|
09939bd270ef42a79aa26182bed695bc336d7a3f | 756a832543f44dff0264cf6efd4ed8b9a27136d7 | /Kombucha_anova.R | bc8303e9dd50da490f3679d7f381328af8539284 | [] | no_license | pebblehut/data650a2 | 1af427f1ce407736b08ba3ad1e15a58637f241c0 | 07e68bb9a31cb8031e0893814a7996994a8f0246 | refs/heads/master | 2021-01-11T00:27:38.659589 | 2016-10-17T03:14:47 | 2016-10-17T03:14:47 | 70,530,602 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,669 | r | Kombucha_anova.R | # Have to run LoadKombucha.R first
# install.packages("multcomp")
library(multcomp)
# install.packages("gplots")
require(gplots)
summary(tweets.df$MESSAGE_RETWEET_COUNT)
summary(tweets.df$USER_FRIENDS_COUNT)
summary(tweets.df$USER_FOLLOWERS_COUNT)
summary(tweets.df$USER_LISTED_COUNT)
summary(tweets.df$USER_STATUSES_COUNT)
attach(tweets.df)
plotmeans(USER_FOLLOWERS_COUNT ~ USER_GENDER)
boxplot(USER_FOLLOWERS_COUNT ~ USER_GENDER)
detach(tweets.df)
tweets.subset.df <- subset(tweets.df,
(tweets.df$USER_FOLLOWERS_COUNT < 300000))
summary(tweets.subset.df$USER_FOLLOWERS_COUNT)
attach(tweets.subset.df)
plotmeans(USER_FOLLOWERS_COUNT ~ USER_GENDER)
boxplot(USER_FOLLOWERS_COUNT ~ USER_GENDER)
boxplot(USER_FOLLOWERS_COUNT ~ USER_GENDER, log = "y")
friend.fit <- aov(USER_FOLLOWERS_COUNT ~ USER_GENDER)
summary(friend.fit)
TukeyHSD(friend.fit)
par(mar=c(5,4,6,2))
tuk <- glht(friend.fit, linfct=mcp(USER_GENDER="Tukey"))
plot(cld(tuk, level=.05),col="lightgrey")
detach(tweets.subset.df)
attach(tweets.df)
plotmeans(USER_FRIENDS_COUNT ~ USER_GENDER)
boxplot(USER_FRIENDS_COUNT ~ USER_GENDER)
detach(tweets.df)
tweets.subset.df <- subset(tweets.df,
(tweets.df$USER_FRIENDS_COUNT < 50000))
summary(tweets.subset.df$USER_FRIENDS_COUNT)
attach(tweets.subset.df)
plotmeans(USER_FRIENDS_COUNT ~ USER_GENDER)
boxplot(USER_FRIENDS_COUNT ~ USER_GENDER)
detach(tweets.subset.df)
friend.fit <- aov(USER_FRIENDS_COUNT ~ USER_GENDER)
summary(friend.fit)
TukeyHSD(friend.fit)
par(mar=c(5,4,6,2))
tuk <- glht(friend.fit, linfct=mcp(USER_GENDER="Tukey"))
plot(cld(tuk, level=.05),col="lightgrey")
detach(tweets.subset.df)
|
9c471466ac67073a17753869736d4271abfeb056 | ed51d150f95a81a17414d45eee082d9bc6edf97c | /tests/testthat/test-existing_intervals.R | 8a1a8adf696895502fe188f8c456308165fe8efe | [] | no_license | ComunidadBioInfo/regutools | 86c058ba68a0aaf21bd39109900759bc9e3804a7 | 03e958710cb4970359a3c9f8ec5635b73f096813 | refs/heads/devel | 2023-06-08T08:07:55.575921 | 2023-03-23T02:03:04 | 2023-03-23T02:03:04 | 174,432,172 | 4 | 8 | null | 2023-06-05T18:01:42 | 2019-03-07T22:49:17 | HTML | UTF-8 | R | false | false | 2,161 | r | test-existing_intervals.R | context("build_condition")
test_that("existing_intervals returns an expected value", {
## Connect to the RegulonDB database if necessary
if (!exists("regulondb_conn")) {
regulondb_conn <- connect_database()
}
## Build a regulondb object
regdb <-
regulondb(
database_conn = regulondb_conn,
organism = "prueba",
genome_version = "prueba",
database_version = "prueba"
)
existing_intervals <- build_condition(
regdb,
dataset = "GENE",
filters = list(
posright = c("2000", "40000"),
posleft = c("2000", "40000")
),
operator = NULL,
interval = c("posright", "posleft"),
partialmatch = NULL
)
expect_match(existing_intervals, ">=")
# Having partial match
existing_intervals_and_pm <-
build_condition(
regdb,
dataset = "GENE",
filters = list(
name = c("ara"),
strand = c("forward"),
posright = c("2000", "40000")
),
operator = "AND",
interval = "posright",
partialmatch = "name"
)
# Type character
expect_type(existing_intervals_and_pm, "character")
# Length 1
expect_length(existing_intervals_and_pm, 1)
# Having more that 2 values for intervales
expect_warning(
build_condition(
regdb,
dataset = "GENE",
filters = list(
posright = c("2000", "40000", "50000"),
name = c("ara")
),
operator = NULL,
interval = "posright",
partialmatch = NULL
),
"Only the first two values of interval will be considered."
)
# Having only one value
expect_error(
build_condition(
regdb,
dataset = "GENE",
filters = list(posright = c("2000")),
operator = NULL,
interval = "posright",
partialmatch = NULL
),
"Two values in the interval filter are required. "
)
})
|
7e2472f59aacb55f5e65bc9758f324472c53041a | c03754bd79a6f9baf6bd55e74483bed1a2dbe8a7 | /R/guessBonds.R | e53ef18d004340d2b51273b3acbd9988175abd9b | [] | no_license | julienide/Atoms | 675ee51c5803bef9868838b1a229dc9d037bce9b | d805bd893a152bd4edc350a2fb18de0ed2d19a3c | refs/heads/master | 2021-04-27T06:57:27.989087 | 2018-06-19T09:09:53 | 2018-06-19T09:09:53 | 122,623,126 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,035 | r | guessBonds.R | #' #' Guess Bonds
#' #'
#' #' @name guessBonds
#' #' @export
#' guessBonds <- function(x, ...)
#' UseMethod("guessBonds")
#'
#' #' #' @rdname guessBonds
#' #' #' @export
#' #' guessBonds.Atoms <- function(x, radius = NULL, safety = 1.2){
#' #' if(!is.numeric(safety) || length(safety) != 1L)
#' #' stop("'safety' must be a numeric vector of length 1")
#' #'
#' #' if(is.null(radius)){
#' #' if(is.null(x$radius)){
#' #' radius <- rcov(x)
#' #' } else {
#' #' radius <- x$radius
#' #' }
#' #' } else {
#' #' if(!is.numeric(radius))
#' #' stop("'radius' must be a numeric vector")
#' #' if(length(radius) == 1L)
#' #' radius <- rep(radius, natom(x))
#' #' if(length(radius) != natom(x))
#' #' stop("'radius' must be of length ", natom(x))
#' #' }
#' #'
#' #' guessBondsLocal <- function(x, cell, cellInv, pbc){
#' #' if(nrow(x) > 1L){
#' #' nat <- nrow(x)
#' #' dx <- outer(x$x, x$x, "-")
#' #' dy <- outer(x$y, x$y, "-")
#' #' dz <- outer(x$z, x$z, "-")
#' #'
#' #' h <- matrix(0L, nrow = nrow(x), ncol = ncol(x))
#' #' k <- matrix(0L, nrow = nrow(x), ncol = ncol(x))
#' #' l <- matrix(0L, nrow = nrow(x), ncol = ncol(x))
#' #'
#' #' # Not possible to find bond going throught the cell with this approach
#' #' # as bond are computed within small boxes only and not inbetween them
#' #' if(any(pbc)){
#' #' da <- dx*cellInv[1L, 1L] + dy*cellInv[1L, 2L] + dz*cellInv[1L, 3L]
#' #' db <- dx*cellInv[2L, 1L] + dy*cellInv[2L, 2L] + dz*cellInv[2L, 3L]
#' #' dc <- dx*cellInv[3L, 1L] + dy*cellInv[3L, 2L] + dz*cellInv[3L, 3L]
#' #' # print(da[da > 0.5 | da < -0.5])
#' #' if(pbc[1L]){
#' #' h <- floor(da + 0.5)
#' #' da <- da - h
#' #' }
#' #' if(pbc[2L]){
#' #' k <- floor(db + 0.5)
#' #' db <- db - k
#' #' }
#' #' if(pbc[3L]){
#' #' l <- floor(dc + 0.5)
#' #' dc <- dc - l
#' #' }
#' #' dx <- da*cell[1L, 1L] + db*cell[1L, 2L] + dc*cell[1L, 3L]
#' #' dy <- da*cell[2L, 1L] + db*cell[2L, 2L] + dc*cell[2L, 3L]
#' #' dz <- da*cell[3L, 1L] + db*cell[3L, 2L] + dc*cell[3L, 3L]
#' #' }
#' #'
#' #' ds <- dx*dx + dy*dy + dz*dz
#' #' dbond <- outer(x$r, x$r, "+")
#' #' dbond <- dbond*dbond
#' #' M <- lower.tri(ds) & (0.1 < ds) & (ds < dbond)
#' #'
#' #' inds <- matrix(x$atmnumb, nrow = nat, ncol = nat)
#' #' data.frame(
#' #' atm1 = t(inds)[M], atm2 = inds[M],
#' #' h = h[M], k = k[M], l = l[M])
#' #' } else {
#' #' data.frame(
#' #' atm1 = integer(0), atm2 = integer(0),
#' #' h = integer(0), k = integer(0), l = integer(0))
#' #' }
#' #' }
#' #'
#' #' guessBonds <- function(shift, step, x, cell, cellInv, pbc){
#' #' xrange <- range(x$x, na.rm = TRUE) + shift[1L]
#' #' yrange <- range(x$y, na.rm = TRUE) + shift[2L]
#' #' zrange <- range(x$z, na.rm = TRUE) + shift[3L]
#' #'
#' #' xcut <- cut(x$x, seq(xrange[1L], xrange[2L] + step, step), include.lowest = TRUE)
#' #' ycut <- cut(x$y, seq(yrange[1L], yrange[2L] + step, step), include.lowest = TRUE)
#' #' zcut <- cut(x$z, seq(zrange[1L], zrange[2L] + step, step), include.lowest = TRUE)
#' #'
#' #' seq(xrange[1L])
#' #'
#' #' do.call(
#' #' rbind,
#' #' by(x, list(xcut, ycut, zcut),
#' #' guessBondsLocal, cell, cellInv, pbc, simplify = FALSE) )
#' #' }
#' #'
#' #' cell <- cell(x)
#' #' cellInv <- solve(cell)
#' #' pbc <- pbc(x)
#' #' x <- data.frame(x = x$x, y = x$y, z = x$z,
#' #' r = radius*safety, atmnumb = 1:natom(x))
#' #'
#' #' step <- 10.0
#' #' shift <- expand.grid(0:1,0:1,0:1)*step/2L
#' #'
#' #' B <- do.call(
#' #' rbind,
#' #' apply(shift, 1L, guessBonds, step, x, cell, cellInv, pbc) )
#' #'
#' #' B <- unique(B)
#' #' B[order(B$atm1, B$atm2), ]
#' #' } |
70b71506c208fecf7c29ea7d061264320b095b8f | 29585dff702209dd446c0ab52ceea046c58e384e | /ChemoSpec/R/rowDist.R | bb782b0caf050c38384a408dd0670351ac253d9c | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,107 | r | rowDist.R | rowDist <- function(x, method) {
# Function to access a variety of distance methods which turn out to be in different packages,
# except for cosine which is handled here.
# Some code suggested by Roberto Canteri, and used with extremely minor modifications
# Part of ChemoSpec, October 2011, Bryan Hanson, DePauw University
method <- match.arg(method, c("pearson", "correlation", "spearman", "kendall",
"euclidean", "maximum", "manhattan", "canberra","binary", "minkowski",
"cosine"))
if (method %in% c("pearson", "correlation", "spearman", "kendall")) {
if (!requireNamespace("amap", quietly = TRUE)) {
stop("You need to install package amap to use this function/option")
}
distance <- amap::Dist(x, method = method)
}
if (method %in% c("euclidean", "maximum", "manhattan", "canberra","binary", "minkowski")) {
distance <- dist(x, method = method)
}
if ( method == "cosine") { # code by Claudia Beleites/unmixR w/small modifications
x <- as.matrix(x)
x <- tcrossprod(x)
l <- rowSums(x^2)
l <- sqrt(outer(l, l))
distance <- as.dist(x/l)
}
return(distance)
}
|
9c2a0422a0ce1757d95822e2a70a46a10069af2e | c686d321ea7e6689eda2596670ea708a29aa64f3 | /run_analysis.R | 983639860c287739146e608d76fe2d99b77406a7 | [] | no_license | grahamsw/GettingAndCleaningDataProject | 2180e35cb712bc8df0f854e7234af8b7b8e18408 | c624602fbf42a00586379b8b501441a562ba6b84 | refs/heads/master | 2021-01-10T10:51:53.096585 | 2015-11-22T19:18:54 | 2015-11-22T19:18:54 | 46,536,732 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,987 | r | run_analysis.R |
# set working dir to Data dir - so that the
# rest of the script finds the data files.
dir <- paste(dirname(sys.frame(1)$ofile), '/Data', sep='')
setwd(dir)
# import headers
features <- read.table('features.txt', colClasses = c("numeric", "character"));
activityType <- read.table('activity_labels.txt');
# this is complicated enough - and has gone through enough iterations - to warrant its own
# function, rather than repeating it for training & test
prepareData <- function(subjectFile, xFile, yFile, featureData = features, activities = activityType){
# import training data
subjectData <- read.table(subjectFile);
xData <- read.table(xFile);
yData <- read.table(yFile);
# Srep 3: Uses descriptive activity names to name the activities in the data set
yNames <- activities[,2][yData$V1] # this is funky, but direct. "merge" changes the order of the records
# step 4: Appropriately labels the data set with descriptive variable names
# -------------------------------------------------------------------------
# naming columns - use feature set for the heavy lifting
# Note: the feature names are not particularly descriptive, but they're the best I've got
names(subjectData) <- 'SubjectId';
names(xData) <- featureData[,2];
# yNames is just a vector, so we name it during the cbind below
# Step 2: Extracts only the measurements on the mean and standard deviation for each measurement
# ----------------------------------------------------------------------------------------------
# assuming that all features with mean()/std() in the name are a mean,std of that feature, i.e. that
# tBodyAcc-mean()-X is actually tBodyAcc-X - mean
# the alternative is to grep only on names *ending* with mean()/std()
# this selection ignores meanFreq() values
mean_std_cols <- grepl("mean\\()", names(xData)) | grepl( "std\\()", names(xData))
# combine the data into a single table
cbind(subjectData, "ActivityName" = yNames, xData[,mean_std_cols]);
}
dataTrain <- prepareData('train/subject_train.txt', 'train/x_train.txt', 'train/y_train.txt', features, activityType)
dataTest <- prepareData('test/subject_test.txt', 'test/x_test.txt', 'test/y_test.txt', features, activityType)
# Step 1: Merging the two sets
# ----------------------------
allData <- rbind(dataTrain, dataTest)
# Step 5: From the data set in step 4, create a second independent tidy data set with the
# average of each variable for each activity and each subject.
# ---------------------------------------------------------------------------------------
library(reshape2)
# the "raw" data is in long format
tidyRaw <- melt(allData, id=c("SubjectId", "ActivityName"))
# the averages data is in wide format - this is still tidy
tidyAverages = dcast(tidyRaw, SubjectId + ActivityName ~ variable, mean)
write.table(tidyAverages, file='../tidyAverages.txt', row.names=FALSE)
|
cac630caad301a50baa911e8c34921181d71812b | aeed4b3d95e2bdcb6386c659230368b3b8cd1b74 | /man/special_xss.Rd | 64bda283e7fc3e46d1b608145024ac18dfe94165 | [] | no_license | ntyndall/detectR | b6d71e8322ad53fcc2dba3093eda4cd1a6d4e559 | af6d0a5f4893588d128bdda7ea75fc7246618295 | refs/heads/master | 2020-03-26T14:11:12.506467 | 2018-10-19T09:05:17 | 2018-10-19T09:05:17 | 144,976,300 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 183 | rd | special_xss.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/special_xss.R
\name{special_xss}
\alias{special_xss}
\title{Special XSS}
\usage{
special_xss(argument)
}
|
d0a31bb919260b9b2d5c8cad1debb9099f914871 | 5f22ca20b8d7d7846d4a54c138e823d00a260f99 | /man/Ste12.Rd | 37df24db8a5a4884f9c780dceed76c60cc5c54a7 | [] | no_license | pbiecek/bgmm | f3a00273fbe99e0ddcb0bb2ead48d793bf21c795 | 1c7dac30d993c3fba68a36a077830f44f9712ff4 | refs/heads/master | 2021-10-26T03:27:18.285028 | 2021-10-10T16:20:02 | 2021-10-10T16:20:02 | 12,442,683 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,381 | rd | Ste12.Rd | \name{Ste12}
\alias{Ste12Data}
\alias{Ste12}
\alias{Ste12Beliefs}
\alias{Ste12Binding}
\docType{data}
\title{Ste12 knockout data under pheromone treatment versus wild type;
Examples of Ste12 targets; Binding p-values of Ste12 to those targets.}
\description{
Ste12 knockout expression data (Roberts et al., 2002) and knowledge from a Ste12 binding experiment (Harbison et al., 2004) used for identifying Ste12 target genes under pheromone treatment.
}
\usage{data(Ste12)}
\format{
Ste12Data vector: 601
Ste12Beliefs matrix of example certainty: 42 x 2
Ste12Binding vector: 42
}
\details{
\code{Ste12Data} Log2 expression ratios of Ste12 knockout versus
wild type, both under 50nM alpha-factor treatment for 30min. This
data is for 601 genes that had more than 1.5 fold change in expression
after pheromone treatment versus wild type.
\code{Ste12Beliefs}: Gives the certainty (belief/plausibility) for
each out of 42 example Ste12 targets to belong to their cluster.
\code{Ste12Beliefs}: Gives the certainty (belief/plausibility) for
each out of 42 example Ste12 targets to belong to their cluster. The
42 examples were chosen to meet two criteria: (1) Had a binding
p-value <0.0001 (see \code{Ste12Binding}), and (2) Had a 2-fold change in response to pheromone treatment (versus wild-type)
\code{Ste12Binding}: Gives the binding p-value for each example Ste12 target (see \code{Ste12Belief}).
}
\references{
Roberts, C. J., Nelson, B., Marton, M. J., Stoughton, R., Meyer, M. R., Bennett, H. A.,
He, Y. D., Dai, H., Walker, W. L., Hughes, T. R., Tyers, M., Boone, C., and Friend,
S. H. (2000). Signaling and Circuitry of Multiple MAPK Pathways Revealed by a
Matrix of Global Gene Expression Profiles. Science, 287(5454), 873--880.
Harbison, C. T., Gordon, D. B., Lee, T. I., Rinaldi, N. J., Macisaac, K. D., Danford,
T. W., Hannett, N. M., Tagne, J.-B., Reynolds, D. B., Yoo, J., Jennings, E. G., Zeitlinger,
J., Pokholok, D. K., Kellis, M., Rolfe, P. A., Takusagawa, K. T., Lander, E. S.,
Gifford, D. K., Fraenkel, E., and Young, R. A. (2004). Transcriptional regulatory
code of a eukaryotic genome. Nature, 431(7004), 99--104.
}
\author{
Ewa Szczurek
}
\seealso{\code{\link{miRNA}},\code{\link{CellCycle}}}
\examples{
data("Ste12")
print(Ste12Data)
print(Ste12Beliefs)
print(Ste12Binding)
}
\keyword{datasets}
|
8d3e1ad25d8078c063154dab913e2e972b290fc9 | e2bda4f7df8fc6bc76e3c4e97d069a7d859f3373 | /dff.create.R | e7a4c7dadcdce1218ab931df7b58edaa85615133 | [
"MIT"
] | permissive | embruna/Ant-Plant-Habitat | 53cc65d48302980a1160c5953b801f86b26b1bdc | 7928d3f03f4ef5d13fb0de7e4e573190934fa642 | refs/heads/master | 2021-01-17T07:24:57.328774 | 2016-05-25T21:45:27 | 2016-05-25T21:45:27 | 31,282,520 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,171 | r | dff.create.R | #' IPMpack requires a dataframe called 'dff'. The function "dff.create" creates that dataframe
#'
dff.create <- function(x) {
# Begin by selecting data for each census-censuys transition
dff.12 <- subset(x, select = c(unique.plant.id, plant.species,canopy.cover, topography,
domatia.1,domatia.2, surv12, ant.1, ant.2))
dff.23<- subset(x, select = c(unique.plant.id, plant.species,canopy.cover, topography,
domatia.2,domatia.3, surv23, ant.2, ant.3))
dff.34<- subset(x, select = c(unique.plant.id, plant.species,canopy.cover, topography,
domatia.3,domatia.4, surv34, ant.3, ant.4))
dff.45<- subset(x, select = c(unique.plant.id, plant.species,canopy.cover, topography,
domatia.4,domatia.5, surv45, ant.4, ant.5))
dff.56<- subset(x, select = c(unique.plant.id, plant.species,canopy.cover, topography,
domatia.5,domatia.6, surv56, ant.5, ant.6))
dff.67<- subset(x, select = c(unique.plant.id, plant.species,canopy.cover, topography,
domatia.6,domatia.7, surv67, ant.6, ant.7))
dff.7<- subset(x, select = c(unique.plant.id, plant.species,canopy.cover, topography,
domatia.7, domatia.7, surv67, ant.7, ant.1))
#it was easiest to create frames that were matching sizes. Because census 7 is the last one, there is no 7-8 transition to record
#As placeholders, I inlcuded a second domatia.7, surv67, and ant.1. All of these need to be replaced with NA
dff.7[,6:7]<-NA
dff.7[,"ant.1"]<-NA
#Adds a column noting the different transition intervals. Note that because 7 is the last census, that gets "NA" placed into it.
dff.12[,"interval"]<-1
dff.23[,"interval"]<-2
dff.34[,"interval"]<-3
dff.45[,"interval"]<-4
dff.56[,"interval"]<-5
dff.67[,"interval"]<-6
dff.7[,"interval"]<-NA
#this adds the most "coarse" level data on reproduction - did a plant flower or fruit: yes (1) or no (0)?
#Recall we only have these data for surveys 4-6
dff.45[,"rep"]<-x[,"rep4"]
dff.56[,"rep"]<-x[,"rep5"]
dff.67[,"rep"]<-x[,"rep6"]
#this adds slightly more detail = how many fruit/flowers did a plant have, if it reproduced?
dff.45[,"rep2"]<-x[,"fruitsflowers.4"]
dff.56[,"rep2"]<-x[,"fruitsflowers.5"]
dff.67[,"rep2"]<-x[,"fruitsflowers.6"]
#These frames are the same size. Bind them up!
names(dff.12)<-names(dff.23)<-names(dff.34)<-names(dff.7)
bound<-rbind(dff.12,dff.23,dff.34, dff.7)
#These frames are the same size, so binfd them up too!
names(dff.45)<-names(dff.56)<-names(dff.67)
bound2<-rbind(dff.45, dff.56,dff.67)
# add columns with NA for rep and rep2 to intervals 12, 23, 34, and 7 (even though )recall - didn't survey in these years
bound[,"rep"]<-NA
bound[,"rep2"]<-NA
#now the different dataframes are the same size and can bind them all up.
names(bound)<-names(bound2)
dff<-rbind(bound,bound2)
#rename your columns in your new bound dataframe
names(dff)[names(dff)=="domatia.6"] <- "size"
names(dff)[names(dff)=="domatia.7"] <- "sizeNext"
names(dff)[names(dff)=="surv67"] <- "surv"
names(dff)[names(dff)=="rep"] <- "fec0"
names(dff)[names(dff)=="ant.6"] <- "ant"
names(dff)[names(dff)=="ant.7"] <- "antNext"
names(dff)[names(dff)=="rep2"] <- "fec1"
dff<-dff[!is.na(dff[,8]),]
#Choose your covariates and rename 2 columns - "ant" and "antNext" columns
#to match what is needed by IPMpack
dff <- subset(dff, select = c(size, sizeNext,surv, ant, antNext,fec0, fec1))
names(dff)[names(dff)=="ant"] <- "covariate"
names(dff)[names(dff)=="antNext"] <- "covariateNext"
#remove all rows hat don't have a measurement in size or sizeNext.
#This is simply housekeeping resulting from conversion from wide to long
dff<-dff[complete.cases(dff[,1] | dff[,2]),]
#log-transorm "size" and "sizeNext" to ensure more normal residuals
#Suggestion: Rob Salguero-Gomez
dff$size=log(dff$size+1)
dff$sizeNext=log(dff$sizeNext+1)
#the final dff as needed by IPM pack
dff
}
|
c6ba2ebc858da3703fa71126ac92726768a1aed0 | 65f1e5fc4d53160a93f3b34cc7428afb2f14317a | /R/CALF-package.R | 8dcf5083f2a567e458b30d959f80f9e1e542576d | [] | no_license | cran/CALF | 0bb6da3d0fca5c5bc7134e817e7debfdde47cdd1 | ba4c449e38c76205c3e18cbd8cda035222d530bd | refs/heads/master | 2022-05-05T03:19:56.596820 | 2022-03-07T17:10:05 | 2022-03-07T17:10:05 | 48,077,419 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,329 | r | CALF-package.R | #' @name CALF-package
#' @aliases CALF-package
#' @title Coarse Approximation Linear Function
#' @description Forward selection linear regression greedy algorithm.
#' @encoding UTF-8
#' @author { Stephanie Lane [aut, cre],\cr
#' John Ford [aut],\cr
#' Clark Jeffries [aut],\cr
#' Diana Perkins [aut]
#' }
#' Maintainer: John Ford \email{JoRuFo@@gmail.com}
#' @importFrom stats t.test cor
#' @importFrom utils write.table
#' @import ggplot2
#' @keywords calf
#' @details The Coarse Approximation Linear Function (CALF) algorithm is a type of forward selection
#' linear regression greedy algorithm. Nonzero weights are restricted to the values +1 and -1 and
#' their number limited by an input parameter. CALF operates similarly on two different types of samples,
#' binary and nonbinary, with some notable distinctions between the two.
#' All sample data is provided to CALF as a data matrix. A binary sample must contain a distinguished first
#' column with at least one 0 entries (e.g. controls) and at least one 1 entry (e.g. cases); at least one
#' other column contains predictor values of some type. A nonbinary sample is similar but must contain a
#' first column with real dependent (target) values. Columns containing values other that 0 or 1 must be
#' normalized, e.g. as z-scores.
#' As its score of differentiation, CALF uses either the Welch t-statistic p-value or AUC for binary samples
#' and the Pearson correlation for non-binary samples, selected by input parameter. When initiated CALF
#' selects from all predictors (markers) (first in the case of a tie) the one that yields the best score.
#' CALF then checks if the number of selected markers is equal to the limit provided and terminates if so.
#' Otherwise, CALF seeks a second marker, if any, that best improves the score of the sum function generated
#' by adding the newly selected marker to the previous markers with weight +1 or weight -1.
#' The process continues until the limit is reached or until no additional marker can be included in the sum
#' to improve the score.
#' By default, for binary samples, CALF assumes control data is designated with a 0 and case data with a 1.
#' It is allowable to use the opposite convention, however the weights in the final sum may need to be reversed.
NULL
|
f2c9c108ce5a572ddeb1f421f54cb9099b11c10b | a5fa47f69cd41f84cc37e9a2fcdb6ccef266daa3 | /plot_clusters.R | d71b21cd369c12266b4391a05c1fcc0586883acf | [] | no_license | AJangleZero/Kiehls_market_analysis | 8c4cec3fba3bf4e854346024de9a2c24ce8358e2 | 45de84e8c840d0cece09ebab6a6d77e0b5227d11 | refs/heads/master | 2020-05-26T15:21:28.135373 | 2019-05-23T18:05:17 | 2019-05-23T18:05:17 | 188,283,182 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,943 | r | plot_clusters.R | ## Plotting the clusters
library(MASS)
library(ggplot2)
require(scales)
PCA$cluster <- Kmeans$cluster
fit <- lda(cluster ~ PC1+PC2, data = PCA)
datPred <- data.frame(cluster=predict(fit)$class,predict(fit)$x)
fit2 <- lda(cluster ~ PC1 + PC2, data=PCA)
ld1lim <- expand_range(c(min(datPred$LD1),max(datPred$LD1)),mul=0.05)
ld2lim <- expand_range(c(min(datPred$LD2),max(datPred$LD2)),mul=0.05)
ld1 <- seq(ld1lim[[1]], ld1lim[[2]], length.out=300)
ld2 <- seq(ld2lim[[1]], ld1lim[[2]], length.out=300)
newdat <- expand.grid(list(PC1=ld1,PC2=ld2))
preds <-predict(fit2,newdata=newdat)
predclass <- preds$class
postprob <- preds$posterior
df <- data.frame(x=newdat$PC1, y=newdat$PC2, class=predclass)
df$classnum <- as.numeric(df$class)
df <- cbind(df,postprob)
colorfun <- function(n,l=65,c=100) { hues = seq(15, 375, length=n+1); hcl(h=hues, l=l, c=c)[1:n] } # default ggplot2 colours
colors <- colorfun(4)
colorslight <- colorfun(4,l=90,c=50)
g_clusters <- ggplot(datPred, aes(x=LD1, y=LD2) ) +
geom_raster(data=df, aes(x=x, y=y, fill = factor(class)),alpha=0.7,show.legend=FALSE) +
#geom_contour(data=df, aes(x=x, y=y, z=classnum), colour="red2", alpha=0.5, breaks=c(1.5,2.5)) +
geom_point(data = datPred, size = 3, aes(colour=cluster)) +
scale_x_continuous(limits = ld1lim, expand=c(0,0)) +
scale_y_continuous(limits = ld2lim, expand=c(0,0)) +
scale_fill_manual(values=colorslight,guide=F) +
annotate("text", x = -2.5, y = 2.5, label = "Rookies", color=colorfun(4,l=40,c=100)[1], size=10) +
annotate("text", x = -5, y = -2, label = "Passivists", color=colorfun(4,l=40,c=100)[2], size=10) +
annotate("text", x = 1, y = -4, label = "Veterans", color=colorfun(4,l=30,c=100)[3], size=10) +
annotate("text", x = 5, y = -1, label = "Addicts", color=colorfun(4,l=40,c=100)[4], size=10) +
labs(x = "PC1", y = "PC2")
png("clusters.png", width = 800, height = 500)
g_clusters
dev.off()
|
72085a39ab92b713d1b02d6a935c36daac3850f6 | 2e26bccfa670b18368e66a4f6fa9fddbb2918993 | /section3/3-1-4_murder_case_study.R | 749c5ec684e70bf309edf0471b9e5831aecb1ae1 | [] | no_license | A-Harders/PH125.6x_Wrangling | 0c3145738d4fa39c5c42f1b3454d4903e03418bd | 866be0fe04c75e3bb04f7d24626a21b6df0db503 | refs/heads/master | 2020-09-05T14:25:56.074145 | 2019-12-03T10:08:04 | 2019-12-03T10:08:04 | 220,132,104 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,076 | r | 3-1-4_murder_case_study.R | #config
library(rvest)
library(stringr)
library(tidyverse)
# scraping the web for the case study
url <- "https://en.wikipedia.org/wiki/Murder_in_the_United_States_by_state"
h <- read_html(url)
murders_raw <- h %>% html_nodes("table")
murders_raw <- murders_raw[[2]]
murders_raw <- murders_raw %>% html_table
murders_raw <- murders_raw %>% setNames(c("state", "population", "total", "murders", "gun_murders", "gun_ownership",
"total_rate", "murder_rate", "gun_murder_rate"))
head(murders_raw)
# we need to find the columns with commas by using str_detect()
commas <- function(x) any(str_detect(x, ","))
murders_raw %>% summarize_all(funs(commas))
# we then use the str_replace_all() to remove them
test_1 <- str_replace_all(murders_raw$population, ",", "")
test_1 <- as.numeric(test_1)
# we then use the mutate_all() to apply this operation to each column
# alternatively parse_number() included in readr, is used to remove non-numeric characters before parsing
murders_new <- murders_raw %>% mutate_at(2:3, parse_number)
murders_new %>% head
|
05c8ce13af7132616915ad219058018b02751d0b | 4ae32cabb0fd268f313c4c2a54cecf9daffc9797 | /man/get_redbluff_data.Rd | d71bda6f74bea8b83ecb80aff9994c6faf2af794 | [
"Apache-2.0"
] | permissive | InteragencyEcologicalProgram/smonitr | 40d9201f5284100aa34015014eeb94229e722fc2 | 6bb40251473dac065587dc1b40f4f747963d1b73 | refs/heads/master | 2021-02-13T12:33:04.458773 | 2020-10-28T17:10:49 | 2020-10-28T17:10:49 | 244,696,632 | 2 | 2 | NOASSERTION | 2020-10-01T20:54:30 | 2020-03-03T17:16:42 | R | UTF-8 | R | false | true | 1,112 | rd | get_redbluff_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_tools.R
\name{get_redbluff_data}
\alias{get_redbluff_data}
\title{Download Redbluff Data}
\usage{
get_redbluff_data(
report_year,
start_year = 2004,
parse_fun,
...,
verbose = TRUE
)
}
\arguments{
\item{report_year}{The report year.}
\item{start_year}{The initial year to retrieve data for.
Default is \code{2004}.}
\item{parse_fun}{A function to parse datasets. Default assumes that
all files in fnames can be parsed using \code{\link[readr:read_delim]{readr::read_csv()}}.}
\item{...}{Additional arguments to pass to \code{parse_fun}.}
\item{verbose}{If \code{TRUE}, display descriptive messages.}
}
\value{
a list of dataframes, each element corresponds to a year
the sequence \code{start_year:report_year}. The list also
includes an attribute \code{"Notes"} of same length and order
containing the notes section extracted each report file.
}
\description{
Download Redbluff data from
\href{https://cbr.washington.edu}{cbr.washington.edu}.
}
\examples{
\dontrun{
get_redbluff_data(2018, 2016, na = "--")
}
}
|
ce131da5369918195c3a2bc93b49130ebb2b0574 | f7bf342521645074de6691af0a5b7497eadbde93 | /tests/testthat.R | b5776cd08294090aca7bafdd18f7651b5d7f5e50 | [
"BSD-2-Clause"
] | permissive | markmfredrickson/optmatchExperimental | 82512bb2c8f54f67e006054e3b2b44252f82c47e | ff95a8eaaff35d93d7d3abfc1bc3ba5aa6c798cb | refs/heads/master | 2021-06-07T05:08:59.625217 | 2021-04-29T23:23:50 | 2021-04-29T23:23:50 | 37,880,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 84 | r | testthat.R | library(testthat)
library(optmatchExperimental)
test_check("optmatchExperimental")
|
fc24fcf1afd7c2d3f5c6fcb6266f149dabe6b639 | 241aad96241eb140e4405048411aae2276e66d3d | /ui.R | 42cb1ef83b2ad7de2dc8cc907ed371935c9f2bb5 | [] | no_license | dannhek/CourseraDatSci_Capstone | 07df00027d584ef14d77acf8d5b6c621d2cf7bba | 273096886d123b79a71624553cc859d23647d11d | refs/heads/master | 2016-08-11T13:35:06.068132 | 2016-04-11T14:50:09 | 2016-04-11T14:50:09 | 55,932,792 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,778 | r | ui.R | library(shiny)
#setwd("~/Coursera/Data Science Specialization/Data Science Capstone/data product")
# Define UI for application that reads in words and
shinyUI(fluidPage(
# Application title
titlePanel("What's the Next Word?"),
sidebarLayout(
sidebarPanel(
h3("N-Gram Weights"),
textInput("gw2","2-Gram: ",.25),
textInput("gw3","3-Gram: ",1),
textInput("gw4","4-Gram: ",3)
# ,verbatimTextOutput('test')
),
mainPanel(
tabsetPanel(
tabPanel(p("Display"),
h3("Word Prediction"),
textInput("str","Enter a Phrase:"),
submitButton("Predict Next Word"),
h4("Likely next word:"),
textOutput("pred1"),
conditionalPanel("output.multi == 'true'",
h4("Less likely options:"),
textOutput("predNext")
)
# tableOutput("predtbl"),
# p(verbatimTextOutput("tokens"))
# verbatimTextOutput("multi"),
),
tabPanel(p("About"),
h3("About this Application"),
p("This page was created as part of the",
a(href="https://www.Coursera.org","Coursera"),
a(href="https://www.coursera.org/learn/data-science-project/home/welcome","Data Science Specialization Capstone Project"),
"."),
h4("How to use"),
p("To use this application, simply enter a phrase and click the \"Predict Next Word\" button. You can also adjust the weights given to 2, 3, and 4 gram predictions to favor longer phrases (higher 4-gram weight) or shorter phrases (higher 2-gram weight). "),
h4("For more information, see:"),
p("Github: ",a(href="https://github.com/dannhek/CourseraDatSci_Capstone","github.com/dannhek")),
p("RPubs Presentation:", a(href="","http://rpubs.com/tattooed_economist/CourseraDSCapstone")),
p("Github ReadMe: ",a(href="https://github.com/dannhek/CourseraDatSci_Capstone/blob/master/README.md","README.MD"))
)
)
)
)))
|
f1b436c6804c62864efe770cd421a4d74cdf2e62 | 240bf1139f6098ec9c8c2fc96555780fc58cda30 | /lib/R/packages/lilbambu/man/harvestVariables.Rd | 7ba8bc0ed5375f0dfb7a11d576ef1784e0be34aa | [] | no_license | cerobpm/lilbambu | b9cf0d341f7f3cca493757e393cdb860331bdcdd | af48399a3198bbd2d3b3846a06a662851f705ea8 | refs/heads/master | 2020-03-22T06:22:35.764750 | 2018-09-11T19:42:15 | 2018-09-11T19:42:15 | 139,629,159 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 647 | rd | harvestVariables.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/harvestVariables.R
\name{harvestVariables}
\alias{harvestVariables}
\title{harvestVariables}
\usage{
harvestVariables(url, update = FALSE)
}
\arguments{
\item{url}{WOFWML WS end point}
\item{update}{boolean comportamiento ante VariableID duplicado (si TRUE, actualiza)}
}
\value{
"1 row inserted or updated. VariableID:..." or "nothing inserted/updated"
}
\description{
Esta funcion descarga Variables de un WOFWML WS e inserta en ODM usando "addVariableToLocal"
}
\examples{
harvestVariables("http://brasilia.essi-lab.eu/gi-axe/services/cuahsi_1_1.asmx?WSDL",TRUE)
}
|
bb437c80e02a4b64e3a01cff05df2b6a54facbb6 | 109734b597c2d760725a1a050174a5d11b3c1a9b | /man/as.ppp.Rd | a9c1793c3abf0f92e09500d819c5b493cba80da8 | [] | no_license | rubak/spatstat | c293e16b17cfeba3e1a24cd971b313c47ad89906 | 93e54a8fd8276c9a17123466638c271a8690d12c | refs/heads/master | 2020-12-07T00:54:32.178710 | 2020-11-06T22:51:20 | 2020-11-06T22:51:20 | 44,497,738 | 2 | 0 | null | 2020-11-06T22:51:21 | 2015-10-18T21:40:26 | R | UTF-8 | R | false | false | 4,884 | rd | as.ppp.Rd | \name{as.ppp}
\alias{as.ppp}
\alias{as.ppp.ppp}
\alias{as.ppp.psp}
\alias{as.ppp.quad}
\alias{as.ppp.matrix}
\alias{as.ppp.data.frame}
\alias{as.ppp.influence.ppm}
\alias{as.ppp.default}
\title{Convert Data To Class ppp}
\description{
Tries to coerce any reasonable kind of data to a spatial point pattern
(an object of class \code{"ppp"})
for use by the \pkg{spatstat} package).
}
\usage{
as.ppp(X, \dots, fatal=TRUE)
\method{as.ppp}{ppp}(X, \dots, fatal=TRUE)
\method{as.ppp}{psp}(X, \dots, fatal=TRUE)
\method{as.ppp}{quad}(X, \dots, fatal=TRUE)
\method{as.ppp}{matrix}(X, W=NULL, \dots, fatal=TRUE)
\method{as.ppp}{data.frame}(X, W=NULL, \dots, fatal=TRUE)
\method{as.ppp}{influence.ppm}(X, \dots)
\method{as.ppp}{default}(X, W=NULL, \dots, fatal=TRUE)
}
\arguments{
\item{X}{Data which will be converted into a point pattern}
\item{W}{
Data which define a window for the pattern,
when \code{X} does not contain a window.
(Ignored if \code{X} contains window information.)
}
\item{\dots}{Ignored.}
\item{fatal}{
Logical value specifying what to do if the
data cannot be converted.
See Details.
}
}
\value{
An object of class \code{"ppp"} (see \code{\link{ppp.object}})
describing the point pattern and its window of observation.
The value \code{NULL} may also be returned; see Details.
}
\details{
Converts the dataset \code{X} to a point pattern
(an object of class \code{"ppp"}; see \code{\link{ppp.object}} for
an overview).
This function is normally used to convert an existing point pattern
dataset, stored in another format, to the \code{"ppp"} format.
To create a new point pattern from raw data such as \eqn{x,y}
coordinates, it is normally easier to use the creator function
\code{\link{ppp}}.
The function \code{as.ppp} is generic, with methods for the
classes \code{"ppp"}, \code{"psp"}, \code{"quad"}, \code{"matrix"},
\code{"data.frame"} and a default method.
The dataset \code{X} may be:
\itemize{
\item
an object of class \code{"ppp"}
\item
an object of class \code{"psp"}
\item
a point pattern object created by the \pkg{spatial} library
\item
an object of class \code{"quad"} representing a quadrature scheme
(see \code{\link{quad.object}})
\item
a matrix or data frame with at least two columns
\item
a structure with entries \code{x}, \code{y} which are numeric vectors
of equal length
\item
a numeric vector of length 2, interpreted as the coordinates
of a single point.
}
In the last three cases, we need the second argument \code{W}
which is converted to a window object
by the function \code{\link{as.owin}}.
In the first four cases, \code{W} will be ignored.
If \code{X} is a line segment pattern (an object of class \code{psp})
the point pattern returned consists of the endpoints of the segments.
If \code{X} is marked then the point pattern returned will also be
marked, the mark associated with a point being the mark of the segment
of which that point was an endpoint.
If \code{X} is a matrix or data frame, the first and second columns will
be interpreted as the \eqn{x} and \eqn{y} coordinates respectively.
Any additional columns will be interpreted as marks.
The argument \code{fatal} indicates what to do when
\code{W} is missing and \code{X} contains no
information about the window. If \code{fatal=TRUE}, a fatal error
will be generated; if \code{fatal=FALSE}, the
value \code{NULL} is returned.
In the \pkg{spatial} library, a point pattern is represented
in either of the following formats:
\itemize{
\item
(in \pkg{spatial} versions 1 to 6)
a structure with entries \code{x}, \code{y}
\code{xl}, \code{xu}, \code{yl}, \code{yu}
\item
(in \pkg{spatial} version 7)
a structure with entries
\code{x}, \code{y} and \code{area},
where \code{area} is a structure with entries
\code{xl}, \code{xu}, \code{yl}, \code{yu}
}
where \code{x} and \code{y} are vectors of equal length
giving the point coordinates, and \code{xl}, \code{xu}, \code{yl},
\code{yu} are numbers giving the dimensions of a rectangular window.
Point pattern datasets can also be created by the function
\code{\link{ppp}}.
}
\seealso{
\code{\link{ppp}}, \code{\link{ppp.object}}, \code{\link{as.owin}},
\code{\link{owin.object}}
}
\examples{
xy <- matrix(runif(40), ncol=2)
pp <- as.ppp(xy, c(0,1,0,1))
# Venables-Ripley format
# check for 'spatial' package
spatialpath <- system.file(package="spatial")
if(nchar(spatialpath) > 0) {
require(spatial)
towns <- ppinit("towns.dat")
pp <- as.ppp(towns) # converted to our format
detach(package:spatial)
}
xyzt <- matrix(runif(40), ncol=4)
Z <- as.ppp(xyzt, square(1))
}
\author{
\spatstatAuthors
}
\keyword{spatial}
\keyword{manip}
|
e1404c60f3db2598be5c032c360271f173b212a4 | 0502f29348e35bb27cf1922fce0627477e831bb8 | /plot3.R | ad3f8a87b42b2b5fdddc8f479c7fcfadd23d4f06 | [] | no_license | curnutt/ExData_Plotting1 | 0d6b97f7d3e43835631f683a919b0057c10febb5 | a6074a359ed200f03fdc1ef4992b59b7b9631078 | refs/heads/master | 2020-04-30T14:47:35.467969 | 2014-09-07T22:08:14 | 2014-09-07T22:08:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 978 | r | plot3.R | # skip reading the data file if it has already been read by a previous scipt invocation
if (!exists("curnutt_data_read_in")) {
# read the (unzipped) data file
mydatatable <- read.table("household_power_consumption.txt",sep=";",na.strings="?",header=TRUE)
# extract data for 2007-02-01 and 2007-02-02
dat <- mydatatable[grep("^(1|2)/2/2007",mydatatable$Date),]
# convert date and time from string
dat$Time <- strptime(paste(dat$Date, dat$Time), format="%d/%m/%Y %H:%M:%S")
dat$Date <- as.Date(dat$Date, "%d/%m/%y")
# let the next sourced script know that we have already read dat
curnutt_data_read_in <- TRUE
}
# plot 3
png(file="plot3.png")
par(mfrow=c(1,1)) # reset the panels if we have run plot4.R
plot(dat$Time, dat$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(dat$Time, dat$Sub_metering_2, type="l", col=2)
lines(dat$Time, dat$Sub_metering_3, type="l", col=4)
legend("topright", col=c(1,2,4), legend=names(dat)[7:9], lty=1)
dev.off()
|
d3e5b26c950de1052389270592f746bf4c45fe8c | cd5e49d42b9ecd04bda186a2759a5cb28e5d17db | /hsqbrank/logit.R | 1783642c0c6ed4ed3568f5c2a34b74e071437fb4 | [] | no_license | Zerowaltz/qbmodel | d149171bdebb94acf9bc48665e54909122eb9c5a | 42164d28cf7fdb0a609eb4aa48d30649dcb50161 | refs/heads/master | 2022-01-05T22:53:29.201842 | 2018-11-28T22:59:05 | 2018-11-28T22:59:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 523 | r | logit.R | logitdata <- read.csv('logit.csv')
l <- glm(win ~ diff, data = logitdata, family = 'binomial')
x <- seq(-10,80,1)
plot(x,1/(1+2.71828^-(x*l[['coefficients']][['diff']])),
main='Win Probability vs. aPPB Difference',
xlab='Score Difference',
xlim=c(0,10),
ylab='Win Probability',
ylim=c(0,1),
type='l',
col='blue',
lwd=2)
points(logitdata[['diff']],logitdata[['win']],
pch=19,
cex=1,
col=rgb(red = 1, green = 0, blue = 0, alpha = 50/dim(logitdata)[1]))
summary(l)
|
26917c42aacbafeb88aa2fec639a0b41ec4226b6 | 14ca164ca16243197aeea379097495ed032227f4 | /man/isStable.Rd | 4321de187713c7221ba570c55a7afac4ec345a88 | [] | no_license | yangkedc1984/mixAR | 8f63cafdfb7d550d4afff19f5e116ddc63d9bf9e | 3a35d1661675c9760deae1fbb95e36681cfeddc0 | refs/heads/master | 2023-02-15T16:50:06.696550 | 2021-01-04T10:20:02 | 2021-01-04T10:20:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,066 | rd | isStable.Rd | \name{isStable}
\alias{isStable}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Check if a MixAR model is stable}
\description{Checks if a MixAR model is stable. This is also the second
order stationarity condition.}
\usage{
isStable(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{the model}
}
\details{
If each component of a MixAR model corresponds to a stable
autoregression model, then the MixAR model is also stable. However,
the MixAR model may be stable also when some of its components
correspond to integrated or explosive AR models, see the references.
}
\value{
True if the model is stable (second order stationary), FALSE otherwise.
}
\references{
\insertRef{Boshnakov2011on1st2nd}{mixAR}
\insertRef{WongLi2000}{mixAR}
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
isStable(exampleModels$WL_I)
isStable(exampleModels$WL_II)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{MixAR}
|
ad539e4005516821b6e3ee5b9bf3c1b68b0a287f | 7cbe5a44f3e568f49c5a6077fef5b0bb45e18c04 | /plot3.R | 519d90f455654749313a39632a961a2a7397e6c1 | [] | no_license | rbegg/ExData_Plotting1 | bf3f02a136d06fa8ae1ed18a00fccb2b158805b7 | 7611eedea104558a61639e2180c586e852e24f65 | refs/heads/master | 2021-01-17T20:03:54.191320 | 2016-03-20T18:33:34 | 2016-03-20T18:33:34 | 54,276,705 | 0 | 0 | null | 2016-03-19T16:53:07 | 2016-03-19T16:53:06 | null | UTF-8 | R | false | false | 1,384 | r | plot3.R | ## Coursera Exploratory Data Analysis
## Week 1 Assignment
##
## Author: Robert Begg Mar 19, 2016
##
## Function: Plot3
##
## Description: Generate plot3
library(dplyr)
source("./readEnergy.R")
plot3 <- function() {
## Check if cached energy data is already loaded
if(!exists("energy")){
## call readEnergy() utility function to read and clean data
energy <<- readEnergy()
}
# open png graphics device
png( filename = "plot3.png",
width = 480,
height= 480 )
## Plot first line
plot(Sub_metering_1 ~ DateTime,
data = energy,
type = "l",
xlab = "",
ylab = "Energy sub metering")
## Add second line
lines(Sub_metering_2 ~ DateTime,
data = energy,
type = "l",
col = "red",
xlab = "")
## Add third line
lines(Sub_metering_3 ~ DateTime,
data = energy,
type = "l",
col = "blue",
xlab = "")
## Add Legend
legend("topright",
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=1,
col=1:3)
## close png device
dev.off()
} |
fabcf5f72c89d6716ca2500583c199355e576ee7 | c24a3829e4de3b81586c3ed090ed6f69d07c41bd | /cachematrix.R | 2153a9ff590ab4c98f8862a5606241859db43c0f | [] | no_license | AmelieRu/ProgrammingAssignment2 | 58c70470349450a49a84de49edb5834e547c2dda | c929895e85bdeae75353b447c3f7c33c3e02deb2 | refs/heads/master | 2021-01-01T20:01:52.606737 | 2017-07-29T20:51:15 | 2017-07-29T20:51:15 | 98,743,210 | 0 | 0 | null | 2017-07-29T20:51:15 | 2017-07-29T15:58:27 | R | UTF-8 | R | false | false | 2,158 | r | cachematrix.R | ## This script provides functions to
## 1. Create a matrix, calculate its inverse and cache the result.
## 2. Compute the inverse of a matrix or retrieve it from cache if already computed.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
# Intitialize the variable that will contain the inverse
inverse <- NULL
# Define the general setter function for the matrix
set <- function(y) {
x <<- y # Assign y value to x value in the parent environment
inverse <<- NULL # Clear any value of inverse as if we are setting a new value to the matrix x, the value of its inverse should be re calculated
}
# Define the getter function for the matrix
get <- function() x
# Define the setter function for the inverse
setinverse <- function(solve) inverse <<- solve # Assign solve value to inverse value in the parent environment so that setinverse can use it
# Define de getter for the inverse
getinverse <- function() inverse
# Set a list with names so that the different functions can be called by their names in the parent environment, e.g. my_matrix$getinverse
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes/retrieve and return the inverse of the special "matrix" returned
## by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
# Try to retrieve the inverse from passed argument
inverse <- x$getinverse()
# Check if the inverse is NULL
if(!is.null(inverse)) {
# If the inverse is not NULL, it can be returned
message("getting cached data")
return(inverse)
}
# If the inverse is NULL, we need to calculate it based on the input object
data <- x$get() # Get the input matrix
inverse <- solve(data, ...) # Calculate the inverse of the input matrix
x$setinverse(inverse) # Set the inverse in the input object
inverse
}
|
d86dcf3ca9959dee1b11c9fc7d00c953f0b834ac | 7273ba5590ad927faca009c589e590db2b56cd4b | /geoid/geoid.R | 5ba8817f9ec5ecaf5388e82bb893dfd21d6f25e5 | [
"MIT"
] | permissive | Bakaniko/rspatial | 7ae144dc99bf2a50909c2a7f131b3bf454979345 | 65ce4fe9247ac53459877a349de4579c18ae686a | refs/heads/main | 2023-06-21T23:56:54.742090 | 2021-07-21T21:36:02 | 2021-07-21T21:36:02 | 388,364,855 | 0 | 0 | MIT | 2021-07-22T07:14:15 | 2021-07-22T07:14:14 | null | UTF-8 | R | false | false | 3,401 | r | geoid.R | library("raster")
library("geometry")
library("rgl")
library("sf")
library("geosphere")
library("smoothr")
library("magick")
# -------------
# IMPORTS & COMPUTATION
# -------------
r <- raster("files/geoid30.tif")
ctr <- densify(st_read("shp/land.shp"), n = 3)
grat <- st_read("shp/ne_50m_graticules_20.shp")
m <- st_coordinates(ctr)
m <- m[,c("X","Y")]
m <- cbind(m, extract(r, m))
m2 <- st_coordinates(grat)
m2 <- m2[,c("X","Y")]
m2 <- cbind(m2, extract(r, m2))
llh <- data.frame(randomCoordinates(100000))
llh$h <- extract(r, llh[,1:2])
## just spherical
llh2xyz <- function(lonlatheight, rad = 500, exag = 1) {
cosLat = cos(lonlatheight[,2] * pi / 180.0)
sinLat = sin(lonlatheight[,2] * pi / 180.0)
cosLon = cos(lonlatheight[,1] * pi / 180.0)
sinLon = sin(lonlatheight[,1] * pi / 180.0)
x = rad * cosLat * cosLon
y = rad * cosLat * sinLon
z = (lonlatheight[,3] * exag) + rad * sinLat
cbind(x, y, z)
}
## triangulate first in lonlat
tbr <- delaunayn(llh[,1:2])
tri.ind <- as.vector(t(tbr))
# colour mapping scale
scl <- function(x, nn = 50) {
1 + (nn-1) * ((x[!is.na(x)] - min(x,na.rm = TRUE))/diff(range(x, na.rm = TRUE)))
}
n <- 150
## those colours, closer to original
jet.colors <-
colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan",
"#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000"))
jet.colors(1)
plotmap <- function(myexag = 20000){
theta = 0
phi = 0
r3dDefaults$windowRect <- c(335, 142,1096,1008)
clear3d()
myrad <- 6378137
xyz <- llh2xyz(llh, rad = myrad, exag = myexag)
mxyz <- llh2xyz(m,rad = myrad + 10000, exag = myexag)
mxyz2 <- llh2xyz(m2,rad = myrad + 5000, exag = myexag)
rgl.triangles(xyz[tri.ind,1], xyz[tri.ind,2], xyz[tri.ind,3],col = jet.colors(n)[scl(llh[tri.ind, 3], nn = n)])
pch3d(mxyz2[,1], mxyz2[,2], mxyz2[,3], pch = 20, color = "white", cex = 0.018)
pch3d(mxyz[,1], mxyz[,2], mxyz[,3], pch = 20, color = "#403f3e", cex = 0.015)
}
# -------------
# BUILD MAPS
# -------------
r3dDefaults$windowRect <- c(0, 0,800,800)
open3d()
bg3d("white")
drawgeoid <- function(nb = 180, exag = 10000, zoom = 0.62){
uM0 <- rotationMatrix(-pi/2, 1, 0, 0) %>%
transform3d(rotationMatrix(-2, 0, 0, 1)) %>%
transform3d(rotationMatrix(-pi/12, 1, 0, 0))
angle.rad <- seq(0, 2*pi, length.out = nb)
plotmap(exag)
r3dDefaults$windowRect <- c(0, 0,800,800)
rgl.viewpoint(theta = 0, phi = 0, fov = 0, zoom = zoom,
userMatrix = uM0)
text3d(x = 100, y = 100, text = paste0("Exaggeration x",exag), adj = 0.5, cex = 1)
nb <- nb -1
for (i in 1:nb) {
#uMi <- transform3d(uM0, rotationMatrix(-angle.rad[i], 0, 0, 1))
uMi <- transform3d(uM0, rotationMatrix(angle.rad[i], 0, 0, 1))
rgl.viewpoint(theta = 0, phi = 0, fov = 0, zoom = zoom,
userMatrix = uMi)
if (i <= 9){j <- paste0("00",i)}
if (i >= 10){j <- paste0("0",i)}
if (i >= 100){j <- i}
folder = paste0("img/",exag)
if (!file.exists(folder)){dir.create(folder)}
filename <- paste0(folder, "/geoid_",j, ".jpg")
rgl.snapshot(filename)
# low definition
img <- image_read(filename) # %>% image_scale("800x")
image_write(img, path = filename, format = "jpeg", quality = 75)
}
}
# -------------
# GO !!!!!!!!!!
# -------------
for (i in 0:25){
drawgeoid(nb = 45, exag = i * 1000, zoom = 0.70)
}
|
58af4b17b0e68680a2a3138f2842d9d671905292 | 4951e7c534f334c22d498bbc7035c5e93c5b928d | /regression/Alemzadeh-influence.R | 985036c4d4c43ad54521ecd75baa274492321ed3 | [] | no_license | Derek-Jones/ESEUR-code-data | 140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1 | 2f42f3fb6e46d273a3803db21e7e70eed2c8c09c | refs/heads/master | 2023-04-04T21:32:13.160607 | 2023-03-20T19:19:51 | 2023-03-20T19:19:51 | 49,327,508 | 420 | 50 | null | null | null | null | UTF-8 | R | false | false | 1,021 | r | Alemzadeh-influence.R | #
# Alemzadeh-influence.R, 17 Mar 14
#
# Data from:
# Analysis of safety-critical computer failures in medical devices
# Homa Alemzadeh and Ravishankar K. Iyer and Zbigniew Kalbarczyk and Jai Raman
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG hardware_recall safety-critical medical
source("ESEUR_config.r")
library("car")
brew_col=rainbow(3)
# Recall_Number,Date,Year,Trade_Name,Recalling_Firm,Recall_Class,Reason_Recall,Action
comp_recalls=read.csv(paste0(ESEUR_dir, "regression/Alemzadeh-Computer_Related_Recalls.csv.xz"), as.is=TRUE)
comp_recalls$Date=as.Date(comp_recalls$Date, format="%b-%d-%Y")
#recall_subset=subset(comp_recalls, Date <= as.Date("2010-12-31"))
t1=cut(comp_recalls$Date, breaks=72)
t2=table(t1)
x_axis=1:length(t2)
y_axis=as.vector(t2)
# l_mod=glm(y_axis ~ x_axis, family=quasipoisson(link="identity"))
l_mod=glm(y_axis ~ x_axis)
influenceIndexPlot(l_mod, main="", col=point_col,
cex.axis=0.9, cex.lab=1.0)
|
9b5748b51fc790b5d5385a942b0c9a8200ff3c1f | b15e5da82043cc40a64a5e868f6ca55388bf79e1 | /helper_diff_meth.R | 97080e629eba3a76902b2a106497a7cdb83925a6 | [] | no_license | ishanparanjpe/lupus_clustering | c4183921fca69c1049a254f44f3c13abfbfa85b0 | e16bdac8be98c63ed858a4d367abd4bc9ce3028e | refs/heads/master | 2022-02-21T12:29:41.441626 | 2022-02-10T16:13:00 | 2022-02-10T16:13:00 | 179,760,073 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 3,383 | r | helper_diff_meth.R | library(VennDiagram)
library(ggplot2)
library(calibrate)
makeVenn<- function(itemList, nameList,filename, title){
# list1<- list1[!is.na(list1)]
# list2<-list2[!is.na(list2)]
# list3<-list3[!is.na(list3)]
plotList<-itemList
print(plotList)
(venn.diagram(x=plotList,category.names = nameList,main = title,
lty = 'blank',
fill= c("green","red"),
cex = 2.4,
main.cex=2,
fontface = "bold",
fontfamily = "sans",
cat.cex = 1.3,
cat.fontface = "bold",
alpha = c(0.5,0.5),
cat.fontfamily = "sans",
width = 5000,
height = 5000,
filename= filename
))
}
volcano <- function(limma_table, FC_cutoff, padj_cutoff, filename){
# limma_table<- limma_table[cpgs,]
png(filename, width = 10, height = 10)
# Make a basic volcano plot
with(limma_table, plot(logFC, -log10(P.Value), pch=20, main="Volcano plot", xlim=c(-2.5,2), ylim = c(4,9)))
# Add colored points: red if padj<0.05, orange of log2FC>1, green if both)
with(subset(limma_table, adj.P.Val<padj_cutoff), points(logFC, -log10(P.Value), pch=20, col="red"))
with(subset(limma_table, abs(logFC)>1 & adj.P.Val<padj_cutoff), points(logFC, -log10(P.Value), pch=20, col="blue"))
#with(subset(res, padj<.05 & abs(log2FoldChange)>1), points(log2FoldChange, -log10(pvalue), pch=20, col="green"))
### add gene names for significant CpGs
with(subset(limma_table, abs(logFC)>FC_cutoff & adj.P.Val<padj_cutoff), textxy(logFC, -log10(P.Value), labs=cpg_gene, cex=.8))
dev.off()
}
volcano_contrast <- function(limma_table,group1, group2, delta_beta_cutoff, cpgs, padj_cutoff, filename){
# limma_table<- limma_table[cpgs,]
limma_table$delta_beta <- limma_table[,group1]-limma_table[,group2]
png(filename, width = 15, height = 15, units = "in", res = 200)
par(ps = 12, cex = 1.8, cex.axis = 2, cex.lab = 4)
# Make a basic volcano plot
with(limma_table, plot(delta_beta, -log10(P.Value), pch=20, main="Volcano plot", xlim=c(-0.4,0.4), xlab= "", ylab = "", ylim = c(0,9)))
# Add colored points: red if padj<0.05, orange of log2FC>1, green if both)
with(subset(limma_table[cpgs,], adj.P.Val<padj_cutoff), points(delta_beta, -log10(P.Value), pch=20, col="red"))
with(subset(limma_table[cpgs,], abs(delta_beta)>delta_beta_cutoff & adj.P.Val<padj_cutoff), points(delta_beta, -log10(P.Value), pch=20, col="blue"))
#with(subset(res, padj<.05 & abs(log2FoldChange)>1), points(log2FoldChange, -log10(pvalue), pch=20, col="green"))
### add gene names for significant CpGs
# with(subset(limma_table[cpgs,], abs(delta_beta)>delta_beta_cutoff & adj.P.Val<padj_cutoff), textxy(delta_beta, -log10(P.Value), labs=cpg_gene, cex=.8))
num_sig <- dim(subset(limma_table[cpgs,], abs(delta_beta)>delta_beta_cutoff & adj.P.Val<padj_cutoff))[1]
num_sig <- ifelse(num_sig<10, num_sig, 10)
# with(subset(limma_table[cpgs,], abs(delta_beta)>delta_beta_cutoff & adj.P.Val<padj_cutoff)[1:num_sig,], textxy(delta_beta, -log10(P.Value), labs=cpg_gene, cex=.8))
with(subset(limma_table[cpgs,], abs(delta_beta)>delta_beta_cutoff & adj.P.Val<padj_cutoff), textxy(delta_beta, -log10(P.Value), labs=cpg_gene, cex=.8))
dev.off()
}
|
b0993fba1e93d7d06daa96ce6acbd1bb6822f289 | e398cbd33cba9912e5180fdba83569945e0648c9 | /buscador_fauna/shinyapp/ui.R | 07bdb969cd4700e4df68efe1acee587e0d3ee82b | [] | no_license | tereom/shiny_snmb | fbcd12750231ec86a1b5b0ccf27351233e7cc7b7 | a142c7087163bcf722e3d45097b10efc6264355b | refs/heads/master | 2016-09-07T19:07:40.363360 | 2016-01-21T17:40:10 | 2016-01-21T17:40:10 | 41,378,751 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,051 | r | ui.R | library(leaflet)
# Define UI for miles per gallon application
shinyUI(fluidPage(
p(),
# title
h2("Buscador de especies"),
wellPanel(
textInput("especies", label = "Nombre:", value = "vaca|taurus|caballo|equus"),
helpText("Ingresa el nombre o nombres (común y/o científico) de los animales
o plantas que deseas buscar, los nombres deben estar separados por el
símbolo '|'. Ejemplo: vaca|taurus|caballo|equus"),
actionButton("actualiza", "actualiza", icon("refresh"))
),
# mainPanel(
# p(),
# leafletOutput("mapa_base", width = "800px", height = "500px"),
# p(),
# dataTableOutput("tbl"),
# downloadButton('downloadCgl', 'Descargar tabla conglomerados')
# )
mainPanel(
tabsetPanel(
tabPanel("Mapa",
leafletOutput("mapa_base", width = "800px", height = "500px"),
p(),
dataTableOutput("tbl"),
p()
),
tabPanel("Descargas",
h3("Descarga de tablas en formato csv"),
wellPanel(
# p("El siguiente botón descarga la tabla de la pestaña Mapa en formato
# csv."),
h4("Tabla de presencia a nivel conglomerado"),
p("Para descargar esta tabla es necesario haber ejecutado una búsqueda
de especie(s), es la tabla análoga a la que aparece en la parte
inferior de la pestaña Mapa."),
downloadButton('downloadCgl', 'Presencia conglomerados'),
h4("Tablas con rutas a archivos"),
p("Las siguientes tablas se descargan por tipo de registro (especie
invasora, huellas/excretas, trampa cámara)."),
radioButtons("tab", "Tabla",
list("Especies Invasoras" = "ei",
"Huellas/Excretas " = "he",
"Especimen/resto" = "er",
"Cámara" = "camara")),
radioButtons("sel_especies", "Especies",
list("Especificadas en buscador" = "especies_noms",
"Todas" = "especies_todas")
),
downloadButton('downloadFiles', 'Rutas Archivos')
))
)
)
))
|
b039af6e57da062f2c46c924c9ae4b03d2fdf9ed | 9eac9f8e7495d916f7596c4444461521b1a39086 | /scripts/edgeR_analysis_abyzov_unique.R | c9cc0df6037a3ce7a40fe6c9487281e7f4af37e8 | [
"Apache-2.0"
] | permissive | uniqueg/scripts | bbb42d455196f8e047df2681661a02d38e4a762f | 9fdcb93f740c0d353b8f9c0fe3ceab6a941af87d | refs/heads/master | 2023-04-08T17:08:00.911197 | 2023-03-16T08:46:40 | 2023-03-16T08:46:40 | 211,389,152 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,531 | r | edgeR_analysis_abyzov_unique.R | #######
### GENERAL:
### --------
### Author: Alexander Kanitz
### Created: 05-APR-2013
### Modified: 28-MAY-2013 Christina Herrmann
### Language: R
### Version: 2.15.2
### Requirements: Bioconductor_2.11, edgeR + dependencies
### Description: Differential gene expression analysis of sample groups using the R/Bioconductor package edgeR
### Input from single count table for all samples
### Arguments: 1. absolute file name for count table; 2. output file prefix (MUST exist; may include file path)
### Output: BCV, MDS and smear plots; table of differentially expressed genes (FDR < 0.5); various other count tables (see section E2.); log (to STDOUT)
### Usage example: Rscript edgeR_analysis_abyzov_unique.R ./count_table path/to/out/files/prefix
#######
### A. Pre-requisites
# Get command line arguments
args <- commandArgs(trailingOnly=TRUE)
## Pass arguments
counts <- as.character(args[1])
prefix <- args[2]
# Load library
suppressMessages(library(edgeR))
###
### B. Import data
## load count table
counts <- read.table(counts)
# Assign indicated group names and sample numbers
group <- factor( c( "fam03_parental", rep("fam03_iPSC", 3), "fam03_parental", rep("fam03_iPSC", 3), "fam03_parental", "fam03_parental", rep("fam03_iPSC", 3), "fams1123_parental", rep("fams1123_iPSC", 3), "fams1123_parental", rep("fams1123_iPSC", 3),"fams1123_parental", rep("fams1123_iPSC", 3), "H1_iPSC" ))
# Read count table into DGEList object 'dge'
dge <- DGEList(counts, group=group)
###
### C. Calculation / analysis
# Calculate normalization factors
dge <- calcNormFactors(dge)
# Calculate common dispersion and
dge <- estimateCommonDisp(dge)
# Calculate tagwise dispersion
dge <- estimateTagwiseDisp(dge)
# Exact negative binomial tagwise tests, pairs to compare have to be specified
fam03_exact_test <- exactTest(dge, pair=c("fam03_parental","fam03_iPSC"))
fams1123_exact_test <- exactTest(dge, pair=c("fams1123_parental","fams1123_iPSC"))
#parental_exact_test <- exactTest(dge, pair=c("fam03_parental","fams1123_parental"))
#ipsc_exact_test <- exactTest(dge, pair=c("fam03_iPSC","fams1123_iPSC"))
# iPSC all vs. parental all
all <- dge
all$samples$group <- as.factor(sub("fam03_", "", dge$samples$group))
all$samples$group <- as.factor(sub("fams1123_", "", dge$samples$group))
all_exact_test <- exactTest(all, pair=c("parental","iPSC"))
# Calculate differentially expressed
fam03_summ_de <- summary(decideTestsDGE(fam03_exact_test))
fams1123_summ_de <- summary(decideTestsDGE(fams1123_exact_test))
#parental_summ_de <- summary(decideTestsDGE(parental_exact_test))
#ipsc_summ_de <- summary(decideTestsDGE(ipsc_exact_test))
all_summ_de <- summary(decideTestsDGE(all_exact_test))
# Subset top tags (FDR < 0.05)
fam03_tags_de <- topTags(fam03_exact_test, n=sum(fam03_summ_de[c(1,3)]))
fams1123_tags_de <- topTags(fams1123_exact_test, n=sum(fams1123_summ_de[c(1,3)]))
all_tags_de <- topTags(all_exact_test, n=sum(all_summ_de[c(1,3)]))
# Get count table normalized to counts per million
#cpm_de <- cpm(dge)[rownames(tags_de),]
###
### D. Write tables
write.table(all_exact_test$table, file=paste(prefix, "edgeR_out_all.tab", sep="_"), quote=FALSE, sep="\t")
write.table(fam03_exact_test$table, file=paste(prefix, "edgeR_out_fam03.tab", sep="_"), quote=FALSE, sep="\t")
write.table(fams1123_exact_test$table, file=paste(prefix, "edgeR_out_fams1123.tab", sep="_"), quote=FALSE, sep="\t")
write.table(fam03_tags_de$table, file=paste(prefix, "diff_exp_fam03.tab", sep="_"), quote=FALSE, sep="\t")
write.table(fams1123_tags_de$table, file=paste(prefix, "diff_exp_fams1123.tab", sep="_"), quote=FALSE, sep="\t")
write.table(all_tags_de$table, file=paste(prefix, "diff_exp_all.tab", sep="_"), quote=FALSE, sep="\t")
###
## E. Plots
## E1. Smear plot (~MA; tagwise log2 FC vs log2 cpm)
# all
pdf(file=paste(prefix, "smear_plot_all.pdf", sep="_"), width = 6, height = 6)
detags <- rownames(all)[as.logical(decideTestsDGE(all_exact_test))]
plotSmear(all_exact_test, de.tags=detags)
abline(h = c(-1, 1), col = "blue") # Blue lines indicate log2(FC) > 1 and < -1
dev.off()
# fam03
pdf(file=paste(prefix, "smear_plot_fam03.pdf", sep="_"), width = 6, height = 6)
detags <- rownames(dge)[as.logical(decideTestsDGE(fam03_exact_test))]
plotSmear(fam03_exact_test, de.tags=detags)
abline(h = c(-1, 1), col = "blue") # Blue lines indicate log2(FC) > 1 and < -1
dev.off()
#fams1123
pdf(file=paste(prefix, "smear_plot_fams1123.pdf", sep="_"), width = 6, height = 6)
detags <- rownames(dge)[as.logical(decideTestsDGE(fams1123_exact_test))]
plotSmear(fams1123_exact_test, de.tags=detags)
abline(h = c(-1, 1), col = "blue") # Blue lines indicate log2(FC) > 1 and < -1
dev.off()
## E2. Biological covariance plot (BCV; tagwise dispersion vs log2 cpm)
#pdf(file=paste(prefix, "BCV_plot.pdf", sep="_"), width = 6, height = 6)
#plotBCV(cts_tag_wise_disp, cex=0.4)
#dev.off()
## E3. Multidimensional scaling plot (MDS; sample relations)
palette(c("yellow2","orchid","orange","tomato","tomato4","pink","peachpuff","turquoise","plum","steelblue1","royalblue4","lightblue","palegreen","brown","deeppink2","red","tomato2","yellow4","seagreen","springgreen2","darkgreen","wheat4","grey","black","lightgrey"))
pdf(file=paste(prefix, "MDS_plot_all.pdf", sep="_"), width = 6, height = 6)
plotMDS(dge, labels=NULL, col=as.numeric(as.factor(group)))
legend("topright", inset=c(-0.45,0), legend=levels(as.factor(group)), fill=1:length(levels(as.factor(group))))
dev.off()
###
### F. Write log, clean up and save image
## Write log
#cat("\n### Files read for group 1:\n")
#files1
#cat("# Total number:\n")
#length(files1)
#cat("##\n\n### Files read for group 2:\n")
#files2
#cat("# Total number:\n")
#length(files2)
#cat("##\n\n### Sample information:\n")
#cts$samples
#cat("##\n\n### Count summary:\n")
#summary(cts$counts)
#cat("##\n\n### Count summary (counts per million):\n")
#summary(cpm_de)
#cat("##\n\n### Number of unique counts:\n")
#dim(cts)[1]
#cat("##\n\n### Common dispersion:\n")
#cts_comm_disp$common.dispersion
#cat("##\n\n### Pseudo/normalized library size:\n")
#cts_comm_disp$pseudo.lib.size
#cat("##\n\n### Sample comparison:\n")
#cts_exact_test$comparison
#cat("##\n\n### Differentially expressed (FDR < 0.05):\n")
#sum(summ_de[c(1,3)])
#cat("##\n\n### Downregulated, unchanged, upregulated:\n")
#summ_de
#cat("##\n\n### Session info:\n")
#print.default(sessionInfo())
#cat("##\n\n")
# Remove unused/temp files
#rm(files, group, detags)
# Save workspace image/session
save.image(file=paste(prefix, "image.Rdata", sep="_"))
###
|
506b2eb29c784d3c39c75b095b90975496206a19 | daeec4abfed850c3cf402437c4ac1bfcc25be3f0 | /man/exemplar.Rd | a31450cb5be14f24bf24fe944b73e1826fe2816b | [] | no_license | bgreenwell/pdp | 20cf41a88de955fcd4359336079ff490071d7444 | 4f22141faf57d7e53761574067f2924aceb88e0f | refs/heads/master | 2022-08-03T22:42:31.546822 | 2022-05-27T01:35:33 | 2022-05-27T01:35:33 | 53,458,235 | 100 | 13 | null | 2022-07-18T08:14:48 | 2016-03-09T01:29:34 | R | UTF-8 | R | false | true | 1,092 | rd | exemplar.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exemplar.R
\name{exemplar}
\alias{exemplar}
\alias{exemplar.data.frame}
\alias{exemplar.matrix}
\alias{exemplar.dgCMatrix}
\title{Exemplar observation}
\usage{
exemplar(object)
\method{exemplar}{data.frame}(object)
\method{exemplar}{matrix}(object)
\method{exemplar}{dgCMatrix}(object)
}
\arguments{
\item{object}{A data frame, matrix, or
\code{\link[Matrix:dgCMatrix-class]{dgCMatrix}} (the latter two are
supported by \code{\link[xgboost]{xgboost}}).}
}
\value{
A data frame with the same number of columns as \code{object} and a
single row.
}
\description{
Construct a single "exemplar" record from a data frame. For now, all numeric
columns (including \code{"\link{Date}"} objects) are replaced with their
corresponding median value and non-numeric columns are replaced with their
most frequent value.
}
\examples{
set.seed(1554) # for reproducibility
train <- data.frame(
x = rnorm(100),
y = sample(letters[1L:3L], size = 100, replace = TRUE,
prob = c(0.1, 0.1, 0.8))
)
exemplar(train)
}
|
9301b99c302792c6181047dc53a82695c5b099b6 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RNeXML/examples/nexml_write.Rd.R | 834af795deb36d3ed3a4d288617a6508aa6fc03a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,471 | r | nexml_write.Rd.R | library(RNeXML)
### Name: nexml_write
### Title: Write nexml files
### Aliases: nexml_write write.nexml
### ** Examples
## Write an ape tree to nexml, analgous to write.nexus:
library(ape); data(bird.orders)
write.nexml(bird.orders, file="example.xml")
## Not run:
##D # takes > 5s
##D ## Assemble a nexml section by section and then write to file:
##D library(geiger)
##D data(geospiza)
##D nexml <- add_trees(geospiza$phy) # creates new nexml
##D nexml <- add_characters(geospiza$dat, nexml = nexml) # pass the nexml obj to append character data
##D nexml <- add_basic_meta(title="my title", creator = "Carl Boettiger", nexml = nexml)
##D nexml <- add_meta(meta("prism:modificationDate", format(Sys.Date())), nexml = nexml)
##D
##D write.nexml(nexml, file="example.xml")
##D
##D ## As above, but in one call (except for add_meta() call).
##D write.nexml(trees = geospiza$phy,
##D characters = geospiza$dat,
##D title = "My title",
##D creator = "Carl Boettiger",
##D file = "example.xml")
##D
##D ## Mix and match: identical to the section by section:
##D nexml <- add_meta(meta("prism:modificationDate", format(Sys.Date())))
##D write.nexml(x = nexml,
##D trees = geospiza$phy,
##D characters = geospiza$dat,
##D title = "My title",
##D creator = "Carl Boettiger",
##D file = "example.xml")
##D
## End(Not run)
|
65917e4f6e373cef013041a4efa59d1537f341f3 | 5a417aada499c1e9f7ff307e3924bcc6c228aa08 | /KNN_cancer_R.R | e72a49e8f86754c7c4e3994ce6f746315bcaa155 | [] | no_license | kumarvn12/KNN | 2f50a424aed3118810e86ef869be2db1310e611b | 98cab08e04c24f017faf8cc6b46f9334a04cc270 | refs/heads/master | 2021-07-12T23:58:36.646777 | 2020-06-03T00:25:09 | 2020-06-03T00:25:09 | 152,942,669 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,507 | r | KNN_cancer_R.R | getwd()
prc <- read.csv("Prostate_Cancer_KNN.csv",stringsAsFactors = FALSE) #This command imports the required data set and saves it to the prc data frame.
#stringsAsFactors = FALSE #This command helps to convert every
#character vector to a factor wherever it makes sense.
str(prc)
#the first variable 'id' is unique in nature and can be removed.
prc <- prc[-1] #removes the first variable(id) from the data set.
#The data set contains patients who have been diagnosed with either Malignant (M) or Benign (B) cancer
table(prc$diagnosis_result) # it helps us to get the numbers of patients
#The variable diagnosis_result is our target variable
#In case we wish to rename B as"Benign" and M as "Malignant" and
#see the results in the percentage form
prc$diagnosis <- factor(prc$diagnosis_result, levels = c("B", "M"), labels = c("Benign", "Malignant"))
round(prop.table(table(prc$diagnosis)) * 100, digits = 1)
# it gives the result in the percentage form rounded of to 1 decimal place( and so it's digits = 1)
#normalize the data and transform all the values to a common scale.
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x))) }
prc_n <- as.data.frame(lapply(prc[2:9], normalize))
#function lapply() applies normalize() to each feature in the data frame
#The first variable in our data set (after removal of id) is 'diagnosis_result' which is not numeric in nature. So, we start from 2nd variable.
#Creating training and test data set
prc_train <- prc_n[1:65,]
prc_test <- prc_n[66:100,]
#Our target variable is 'diagnosis_result' which we have not included in our training and test data sets.
prc_train_labels <- prc[1:65, 1]
prc_test_labels <- prc[66:100, 1]
#This code takes the diagnosis factor in column 1 of the prc
#data frame and on turn creates prc_train_labels and prc_test_labels
#
#Training a model on data
#The knn () function needs to be used to train a model for which we
#need to install a package 'class'.
#install.packages("class")
library(class)
#now use the knn() function to classify test data
prc_test_pred <- knn(train = prc_train, test = prc_test,cl = prc_train_labels, k=10)
?knn
#Evaluate the model performance
#use the CrossTable() function available in the package 'gmodels'
#install.packages("gmodels")
library(gmodels)
CrossTable(x = prc_test_labels, y = prc_test_pred, prop.chisq = F)
?CrossTable
#refer the ppt slide to interpret the results
|
9d8b984699aaeaf6cf0e7abfb96cc4434f77661d | 1ce8c7b4b6dc8df06639648450ac080ca7dd1bea | /dcrp.R | 68654bd2872af8f84d7290eb39a6016ff959e704 | [] | no_license | martinarielhartmann/MdCRQA | 7899b84d0da44fdc96b39933b4e71de7604dd6a7 | 6ab88d29cf2cb6f7c1f8e2462b91b91834402062 | refs/heads/master | 2023-02-26T20:56:31.207429 | 2021-02-01T13:00:57 | 2021-02-01T13:00:57 | 334,950,494 | 0 | 0 | null | 2021-02-01T13:00:57 | 2021-02-01T12:57:46 | null | UTF-8 | R | false | false | 1,893 | r | dcrp.R | dcrp <- function(CRP,lags,ang) {
# This function computes a diagonal cross-recurrence profile
#
#
# Inputs:
#
# CRP is a sparse binary (cross-)recurrence matrix
#
# lags is the number of lags around the diagonal of that matrix for which
# percent recurrence should be computed
#
# ang is the angle with which the cross-recurrence matrix is rotated.
# If the input matrix is rotated so that the main diagonal (cross-recurrence at lag0) runs
# from lower-left to upper-right, the matrix needs to be rotated by 90 degrees (i.e., ang = 1).
# Otherwise, the input matrix does not need to be rotated (i.e., ang = 0).
# The plots from the mdcrqa-function are rotated by 0 degrees. Hence, ang = 0.
#
#
# Outputs:
#
# REC = percent recurrence at a respective diagonal around the central
# diagonal of the recurrence matrix
#
# lag = lag index for REC
#
#
# Reference:
#
# Wallot, S. (2017). Multidimensional Cross-Recurrence Quantification
# Analysis (MdCRQA) - a method for quantifying correlation between
# multivariate time-series. ???
#
# Version:
#
# v1.0, 04. October 2017
# by Sebastian Wallot, Max Planck Insitute for Empirical Aesthetics, Frankfurt, Germany
# Load Matrix form Matrix
library(Matrix)
# check input variables
if (exists("CRP")) {
} else {
print("No input data has been specified")
}
if (exists("lags")) {
} else {
print("lasgs has not been specified")
}
if (exists("ang")) {
if (ang == 1) {
} else {
ang <- 0
}
}
if (ang == 1) {
CRP <- t(apply(CRP, 2, rev))
}
REC <- 0
CRP <- split(CRP, row(CRP) - col(CRP))
for (i in seq(lags+round(length(CRP)/2)+1, -lags+round(length(CRP)/2)+1)) {
print(i)
tempDiagLine <- unlist(CRP[i])
REC <- append(REC, sum(tempDiagLine)/length(tempDiagLine))
}
REC <- REC[2:length(REC)]
lag <- seq(-lags, lags)
output <- list(REC = REC, lag = lag)
return(output)
} |
93ab966e902b2d1cef24110e5554b0640f25ce85 | 1c817ee8e21b3021e2723a5a4d5799008029c415 | /analysis/concatenated.conservation.round.two.parser.R | ae730e9289414d356f20285552ab4e09850fc0c4 | [
"MIT"
] | permissive | sarahjohanknecht/conservationExperiment | e7eee371355df17b2f8daafc9ccce764b792edaf | 68c009a56f8b9437abb8c34b91a09f957888aedc | refs/heads/master | 2020-12-03T02:09:30.571366 | 2018-04-19T17:09:30 | 2018-04-19T17:09:30 | 95,905,846 | 1 | 0 | null | 2017-06-30T16:26:37 | 2017-06-30T16:26:36 | null | UTF-8 | R | false | false | 5,995 | r | concatenated.conservation.round.two.parser.R | # Set my working directory to put things in the right place
#setwd("/Users/mmgdepartment/Desktop/Conservation_9-25-2015")
# This is where files will be from both my and Emily's HPCC accounts, on
# the round of testing that gave us [WHAT IS IN THIS SET OF DATA?]
#Start Code
#Read in data
# First line sets what the base path that everything else contains is.
# Second line finds just those portions which contain whatever is in the quotation marks
# and retains them
# This starting with a . gives me a relative path based on the setwd;
# This one works with just the results; I will need to repeat with the initials.
# Pull Fitness from average.dat, not tasks. Still want phenotypes.
# Might pull just final update from results and look at just the start (initial) and end (end of results); can care about trajectories later.
# Check number of skip lines from average.dat to make sure I skip the right number of rows at start of file.
print(getwd())
#dirs <- list.dirs(path = "./results", recursive=FALSE)
#dirs <- list.dirs(path = "../round_2_results", recursive=FALSE)
dirs <- list.dirs(path = ".", recursive=FALSE)
print(dirs)
#Make sure that files from same replicate get merged appropriately
# Take just the max update for each file; looking at just endpoints
# Take just the max update for each file; looking at just endpoints
results.data <- NULL #This section adapted from stackoverflow.com answer
for (d in dirs) {
for (d2 in list.dirs(path = d, recursive = FALSE)){
phenotype_file_path <- paste(d2, "phenotype_count.dat", sep = "/")
average_file_path <- paste(d2, "average.dat", sep = "/")
count_file_path <- paste(d2, "count.dat", sep = "/" )
if (file.exists(phenotype_file_path) & file.exists(average_file_path) & file.exists(count_file_path) ){
phenotypes <- read.csv(phenotype_file_path, header = F, sep = " ", na.strings = "", colClasses = "character", skip = 8)
phenotypes[,1]<-as.numeric(as.character(phenotypes[,1]))
max.update.phenotype<-max(phenotypes[,1], na.rm=TRUE)
phenotypes<-subset(phenotypes, phenotypes[,1]==max.update.phenotype)
fitness <- read.csv(average_file_path, header = F, sep = " ", na.strings = "", colClasses = "character", skip = 19)
fitness[,1]<-as.numeric(as.character(fitness[,1]))
max.update.fitness<-max(fitness[,1], na.rm=TRUE)
fitness<-subset(fitness, fitness[,1]==max.update.fitness)
print(max.update.fitness)
print(average_file_path)
print(length(rownames(fitness)))
wanted <- cbind(fitness[,1], fitness[,4], fitness[,13])
wanted[,1]<-as.numeric(as.character(wanted[,1]))
wanted[,2]<-as.numeric(as.character(wanted[,2]))
wanted[,3]<-as.numeric(as.character(wanted[,3]))
counts <- read.csv(count_file_path, header = F, sep = " ", na.strings = "", colClasses = "character", skip = 19)
# print(wanted)
# print(counts[,3])
# print(counts[,4])
print(length(rownames(counts)))
print(length(rownames(wanted)))
wanted <- cbind(wanted, counts[,3], counts[,4])
wanted[,4]<-as.numeric(as.character(wanted[,4]))
wanted[,5]<-as.numeric(as.character(wanted[,5]))
fitness <- wanted
colnames(fitness) <- c("Update", "Fitness", "Generation", "Pop.Size", "Unique.Genotypes")
colnames(phenotypes) <- c("Update", "Unique.Phenotypes.Task", "Shannon.Diversity", "Unique.Phenotype.Count", "Average.Phenotype.Diversity", "Average.Task.Diversity")
dat <- merge(phenotypes, fitness, by = 1)
dat$Condition <- tail(unlist(strsplit(d, split = "/", fixed = T)), n=1)
dat$Killed <- as.numeric(unlist(regmatches(dat$Condition, gregexpr('\\d+(?=_killed)', dat$Condition , perl=T))))
dat$Patches <- 0
if (as.numeric(unlist(gregexpr('patch', dat$Condition , perl=T)))>0) {
dat$Patches <- as.numeric(unlist(regmatches(dat$Condition, gregexpr('\\d+(?=_patch)', dat$Condition , perl=T))))
}
dat$Ecology <- "N"
if (as.numeric(unlist(gregexpr('ecology', dat$Condition , perl=T)))>0) {
dat$Ecology <- "Y"
}
full.string <- tail(unlist(strsplit(d, split = "/", fixed = T)), n=1)
seed <- tail(unlist(strsplit(full.string, split = "_", fixed = T)), n=1)
foo <- tail(unlist(strsplit(d, split = "/", fixed = T)), n=1)
blah <- tail(unlist(strsplit(foo, split = "-", fixed = T)), n=1)
initial.pop <- head(unlist(strsplit(blah, split = "_", fixed = T)), n=1)
dat$Initial.Pop<-initial.pop
dat$Seed <- as.numeric(as.character(seed))
results.data <- rbind(results.data, dat)
}
}
}
dirs <- list.dirs(path = "/mnt/home/dolsonem/conservation/round_2_results", recursive=FALSE)
#Make sure that files from same replicate get merged appropriately
# Take just the max update for each file; looking at just endpoints
# Take just the max update for each file; looking at just endpoints
backup.results.data<-results.data
results.data<-backup.results.data
#Include factors as factors
results.data$Condition<-factor(results.data$Condition)
results.data$Ecology<-factor(results.data$Ecology)
results.data$Initial.Pop<-factor(results.data$Initial.Pop)
#Convert numeric type to numeric
results.data$Unique.Phenotypes.Task<-as.numeric(as.character(results.data$Unique.Phenotypes.Task))
results.data$Shannon.Diversity<-as.numeric(as.character(results.data$Shannon.Diversity))
results.data$Unique.Phenotype.Count<-as.numeric(as.character(results.data$Unique.Phenotype.Count))
results.data$Average.Phenotype.Diversity<-as.numeric(as.character(results.data$Average.Phenotype.Diversity))
results.data$Average.Task.Diversity<-as.numeric(as.character(results.data$Average.Task.Diversity))
results.data$Fitness<-as.numeric(as.character(results.data$Fitness))
results.data$Generation<-as.numeric(as.character(results.data$Generation))
results.data$Unique.Genotypes<-as.numeric(as.character(results.data$Unique.Genotypes))
results.data$Pop.Size<-as.numeric(as.character(results.data$Pop.Size))
# Write these results to a data file.
write.csv(results.data, file="Concatenated.Conservation.Data.Round.Two.csv")
|
49d20a8d4cc9c39cb98b415d3d2233e1b6dd04d4 | 80725c80adec9f01d51c77dec14a0c5ea000c5b0 | /cmap.R | 23b0a25f8520345bc64f4b933c32d63867396821 | [] | no_license | henrymanley/cMapKMeans | 8ae854e34d3d5a176075a37cd6f0469d62378193 | 711a16470495096080fd85daf85ed10b8e0d3be9 | refs/heads/master | 2023-04-22T11:45:34.472454 | 2021-05-07T15:56:56 | 2021-05-07T15:56:56 | 363,974,568 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 429 | r | cmap.R | # Calls the sorting function, feeds rCMAP
library("reticulate")
library("shiny")
library("DT")
library("readxl")
library("rtf")
library("smacof")
library("xtable")
path = getwd()
if (!grepl("cMapKMeans", path)){
setwd(paste(path, "/cMapKMeans", sep = ""))
}
# Make data -- need to fix this
py_run_file("autoSort.py")
# Run App
runApp("/Users/henrymanley/Downloads/RCMAP",display.mode="no", launch.browser=TRUE, port=2197)
|
4332a04510e223971e5d05290bc6f84036301b71 | aff50fb938de4b301f78eddc361fb8c6723f3bc7 | /lab2/log.R | ebfded756ac22bc57b6bd2bfce74ee728c2c3efb | [] | no_license | Evantastic/analisis_12020 | 0820e575462da36453074af183d0bee6dd5d7abe | 096d719acf5bba4edcd4bd9871ffde06dd6191b1 | refs/heads/master | 2022-12-21T19:26:19.620711 | 2020-09-13T01:26:03 | 2020-09-13T01:26:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,417 | r | log.R | library(ggplot2)
library(dplyr)
library(fastDummies)
url <- "https://raw.githubusercontent.com/Evantastic/analisis12020/master/agaricus-lepiota.data"
set.seed(88)
personal.theme <- theme(plot.title = element_text(family = "Helvetica", face = "bold", size = (15), hjust = 0.5),
legend.title = element_text(colour = "steelblue", face = "bold.italic", family = "Helvetica"),
legend.text = element_text(face = "italic", colour="steelblue4",family = "Helvetica"),
axis.title = element_text(family = "Helvetica", size = (10), colour = "steelblue4"),
axis.text = element_text(family = "Courier", colour = "cornflowerblue", size = (10)))
mushrooms <- read.table(url, header = TRUE, sep = ",")
mushrooms$veil.type <- NULL
mushrooms <- mushrooms[which(mushrooms$stalk.root != '?'),]
mushrooms1 <- dummy_cols(mushrooms, select_columns = c("class"))%>%select(-c("class"))
model1 <- glm(class_p~cap.shape+cap.surface+cap.color+bruises+odor+
gill.attachment+gill.spacing+gill.size+gill.color+stalk.shape+
stalk.root+stalk.surface.above.ring+stalk.surface.below.ring+
stalk.color.above.ring+stalk.color.below.ring+veil.color+
ring.number+ring.type+spore.print.color+population+habitat,
data = mushrooms1, family = binomial, control = list(maxit = 50))
|
b36a0cdeccfb8fa8ae216bed83f030cac4425c67 | 2448d4800d4336b53489bcce3c17a32e442a7716 | /R/check-results.R | ab8a5479176df8cbf421605b91549d6859ba0782 | [] | no_license | vsbuffalo/devtools | 17d17fd1d2fb620fef8d9883dffed389f80e39fb | 782e6b071d058eea53aae596a3c120d61df2f0b4 | refs/heads/master | 2020-12-24T10:41:24.637105 | 2016-02-18T14:03:05 | 2016-02-18T14:03:05 | 52,121,375 | 2 | 0 | null | 2016-02-19T22:42:43 | 2016-02-19T22:42:43 | null | UTF-8 | R | false | false | 3,499 | r | check-results.R | parse_check_results <- function(path) {
lines <- paste(readLines(path, warn = FALSE), collapse = "\n")
# Strip off trailing NOTE and WARNING messages
lines <- gsub("^NOTE: There was .*\n$", "", lines)
lines <- gsub("^WARNING: There was .*\n$", "", lines)
pieces <- strsplit(lines, "\n\\* ")[[1]]
structure(
list(
errors = pieces[grepl("... ERROR", pieces, fixed = TRUE)],
warnings = pieces[grepl("... WARN", pieces, fixed = TRUE)],
notes = pieces[grepl("... NOTE", pieces, fixed = TRUE)]
),
path = path,
class = "check_results"
)
}
signal_check_results <- function(x, on = c("none", "error", "warning", "note")) {
has <- lapply(x, function(x) length(x) > 0)
on <- match.arg(on)
has_problem <- switch(on,
none = FALSE,
error = has$errors,
warning = has$errors | has$warnings,
note = has$errors | has$warnings | has$notes
)
if (has_problem) {
stop(summarise_check_results(x), call. = FALSE)
}
invisible(TRUE)
}
#' @export
print.check_results <- function(x, ...) {
message("R CMD check results")
message(summarise_check_results(x))
cat(format(x), "\n", sep = "")
invisible(x)
}
#' @export
format.check_results <- function(x, ...) {
checks <- trunc_middle(unlist(x))
paste0(checks, collapse = "\n\n")
}
summarise_check_results <- function(x, colour = FALSE) {
n <- lapply(x, length)
paste0(
show_count(n$errors, "error ", "errors", colour && n$errors > 0), " | ",
show_count(n$warnings, "warning ", "warnings", colour && n$warnings > 0), " | ",
show_count(n$notes, "note ", "notes")
)
}
show_count <- function(n, singular, plural, is_error = FALSE) {
out <- paste0(n, " ", ngettext(n, singular, plural))
if (is_error && requireNamespace("crayon", quietly = TRUE)) {
out <- crayon::red(out)
}
out
}
has_problems <- function(x) {
length(x$results$errors) > 0 || length(x$results$warnings) > 0
}
first_problem <- function(x) {
if (length(x$errors) > 0) {
problem <- x$errors[[1]]
} else if (length(x$warnings) > 0) {
problem <- x$warnings[[1]]
} else {
return(NA_character_)
}
strsplit(problem, "\n", fixed = TRUE)[[1]][1]
}
trunc_middle <- function(x, n_max = 25, n_top = 10, n_bottom = 10) {
trunc_middle_one <- function(x) {
lines <- strsplit(x, "\n", fixed = TRUE)[[1]]
nlines <- length(lines)
if (nlines <= n_max)
return(x)
paste(c(
lines[1:n_top],
paste0("... ", length(lines) - n_top - n_bottom, " lines ..."),
lines[(nlines - n_bottom):nlines]
), collapse = "\n")
}
vapply(x, trunc_middle_one, character(1), USE.NAMES = FALSE)
}
#' Parses R CMD check log file for ERRORs, WARNINGs and NOTEs
#'
#' Extracts check messages from the \code{00check.log} file generated by
#' \code{R CMD check}.
#'
#' @param path check path, e.g., value of the \code{check_dir} argument in a
#' call to \code{\link{check}}
#' @param error,warning,note logical, indicates if errors, warnings and/or
#' notes should be returned
#' @return a character vector with the relevant messages, can have length zero
#' if no messages are found
#'
#' @seealso \code{\link{check}}, \code{\link{revdep_check}}
#' @export
check_failures <- function(path, error = TRUE, warning = TRUE, note = TRUE) {
check_dir <- file.path(path, "00check.log")
results <- parse_check_results(check_dir)
c(
if (error) results$errors,
if (warning) results$warnings,
if (note) results$notes
)
}
|
a13f1b2d83ab0a7417d9cf1a484dd2eed11b4431 | 6327b8dfd117375e89010d669a67f88689a83aaa | /cachematrix.R | 0ea6ecac274c4f9eab8f2b827d175661be4ec040 | [] | no_license | ishan-varshney/ProgrammingAssignment2 | 9caffe27397d50ca29baa472054407b1823af307 | 7d57c6db11866bf782744900e44bcfa7cf26f65a | refs/heads/master | 2022-11-16T08:04:53.869111 | 2020-07-08T16:21:30 | 2020-07-08T16:21:30 | 278,133,937 | 0 | 0 | null | 2020-07-08T16:23:24 | 2020-07-08T15:59:57 | R | UTF-8 | R | false | false | 883 | r | cachematrix.R | ## This function creates a matrix that can cache the inverse of itself
## Args: x - A matrix passed in as an argument
makeCacheMatrix <- function(x = matrix()) {
## cached inverse of matrix
inv <- NULL
## setter for matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
## getter for matrix
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function calculates the inverse of the matrix created
## the above function makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
if (!is.null(i)) {
message("obtaining and processing cached data")
return(i)
}
mat <- x$get()
i <- solve(mat, ...)
x$setInverse(i)
i
}
|
b9cd81f60dcb93925a812943d88fc544a76cff9c | 1d19af7e2088f3570693981cb19001dd2e4f33f5 | /tests/testthat/api.twitter.com/2/users/by-ed3b03.R | df5d520f11e22835281846fd75d3b266ed5c651a | [
"MIT"
] | permissive | cjbarrie/academictwitteR | 1fa1852458795f175e65ced8d119e8c8198c2e3e | 9b573fe799c56353362dddc94de442852da67266 | refs/heads/master | 2023-07-07T03:57:55.082369 | 2023-06-30T07:51:40 | 2023-06-30T07:51:40 | 340,095,903 | 228 | 57 | NOASSERTION | 2023-02-14T12:23:45 | 2021-02-18T15:38:58 | R | UTF-8 | R | false | false | 3,256 | r | by-ed3b03.R | structure(list(url = "https://api.twitter.com/2/users/by?usernames=icahdq%2CPOTUS%2ChadleywickhaM%2C_R_Foundation%2Cadljfhjsd",
status_code = 200L, headers = structure(list(date = "Tue, 02 Nov 2021 20:37:27 UTC",
server = "tsa_o", `api-version` = "2.27", `content-type` = "application/json; charset=utf-8",
`cache-control` = "no-cache, no-store, max-age=0", `content-length` = "353",
`x-access-level` = "read", `x-frame-options` = "SAMEORIGIN",
`content-encoding` = "gzip", `x-xss-protection` = "0",
`x-rate-limit-limit` = "900", `x-rate-limit-reset` = "1635886347",
`content-disposition` = "attachment; filename=json.json",
`x-content-type-options` = "nosniff", `x-rate-limit-remaining` = "899",
`strict-transport-security` = "max-age=631138519", `x-response-time` = "153",
`x-connection-hash` = "ebbd90b82704b550bc7da12bfde5c544cde308b0971c01f98f16f6c9d980e06f"), class = c("insensitive",
"list")), all_headers = list(list(status = 200L, version = "HTTP/2",
headers = structure(list(date = "Tue, 02 Nov 2021 20:37:27 UTC",
server = "tsa_o", `api-version` = "2.27", `content-type` = "application/json; charset=utf-8",
`cache-control` = "no-cache, no-store, max-age=0",
`content-length` = "353", `x-access-level` = "read",
`x-frame-options` = "SAMEORIGIN", `content-encoding` = "gzip",
`x-xss-protection` = "0", `x-rate-limit-limit` = "900",
`x-rate-limit-reset` = "1635886347", `content-disposition` = "attachment; filename=json.json",
`x-content-type-options` = "nosniff", `x-rate-limit-remaining` = "899",
`strict-transport-security` = "max-age=631138519",
`x-response-time` = "153", `x-connection-hash` = "ebbd90b82704b550bc7da12bfde5c544cde308b0971c01f98f16f6c9d980e06f"), class = c("insensitive",
"list")))), cookies = structure(list(domain = c(".twitter.com",
".twitter.com"), flag = c(TRUE, TRUE), path = c("/", "/"),
secure = c(TRUE, TRUE), expiration = structure(c(1698938349,
1698938349), class = c("POSIXct", "POSIXt")), name = c("personalization_id",
"guest_id"), value = c("REDACTED", "REDACTED")), row.names = c(NA,
-2L), class = "data.frame"), content = charToRaw("{\"data\":[{\"id\":\"31560768\",\"name\":\"International Communication Association\",\"username\":\"icahdq\"},{\"id\":\"1349149096909668363\",\"name\":\"President Biden\",\"username\":\"POTUS\"},{\"id\":\"69133574\",\"name\":\"Hadley Wickham\",\"username\":\"hadleywickham\"},{\"id\":\"794458165987438592\",\"name\":\"The R Foundation\",\"username\":\"_R_Foundation\"}],\"errors\":[{\"value\":\"adljfhjsd\",\"detail\":\"Could not find user with usernames: [adljfhjsd].\",\"title\":\"Not Found Error\",\"resource_type\":\"user\",\"parameter\":\"usernames\",\"resource_id\":\"adljfhjsd\",\"type\":\"https://api.twitter.com/2/problems/resource-not-found\"}]}"),
date = structure(1635885447, class = c("POSIXct", "POSIXt"
), tzone = "GMT"), times = c(redirect = 0, namelookup = 0.023172,
connect = 0.037088, pretransfer = 0.058144, starttransfer = 0.237719,
total = 0.23788)), class = "response")
|
11766395eae055c2c7d3fc2ec8978702b8631bda | 98ba050961cae57b4613f99128e681a8944fc889 | /examples/Chapter6.R | 4b46ad10c8202192b92f57f882a3dc6f50bac1a6 | [] | no_license | CHeitzenrater/InfoSecEcon-InvestmentModels | 3e70bef518ea96d86ad58b55f2a57d44bd7c33ae | e83f2e2bf387982dfb6422361e8190d3c1b647fe | refs/heads/master | 2021-07-17T22:42:55.568407 | 2017-10-25T20:41:00 | 2017-10-25T20:41:00 | 64,062,883 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 23,300 | r | Chapter6.R | ################################################################################
##
## Code to generate the analysis/graphs used in dissertation Chapter 6,
## "Software Security Economics in Practice"
##
################################################################################
## Required source files
source("./models/GL-SSE.R", local=TRUE)
source("./models/GL.R", local=TRUE)
################################################################################
## Parameters
################################################################################
## Project budget numbers
phase1Budget = 700
phase2Budget = 800
develBudget = phase1Budget+phase2Budget
frameworkBudget = 20000
overallBudget = frameworkBudget + develBudget
## GL parameters, from chapter 4
GLAlpha = 0.5142
GLBeta = 1
Vuln = 1
## Investment of Project A already in security
securityInvestment = (2/43)*700
factor = 100
################################################################################
## Phase 1 calculations
################################################################################
#####
## Calculation for GL, phase 1
## Calculate the investment over various threat stances (different values of t)
##
GL_ZstarI(Vuln,GLAlpha,GLBeta,0.01,phase1Budget/factor)
# BUDGET = 600 --- [1] 1.471166
# BUDGET = 700 --- [1] 1.744863
# factor = 10: -0.7780047
# factor = 100: -0.829946
GL_ZstarI(Vuln,GLAlpha,GLBeta,0.05,phase1Budget/factor)
# BUDGET = 600 --- [1] 5.693494
# BUDGET = 700 --- [1] 6.305498
# factor = 10: 0.6641948
# factor = 100: -0.2777199
GL_ZstarI(Vuln,GLAlpha,GLBeta,0.1,phase1Budget/factor)
# BUDGET = 600 --- [1] 8.857366
# BUDGET = 700 --- [1] 9.72287
# factor = 10: 1.744863
# factor = 100: 0.03769753
GL_ZstarI(Vuln,GLAlpha,GLBeta,1,phase1Budget/factor)
# BUDGET = 700 --- [1] 34.95155
# factor = 10: 9.72287
# factor = 100: 1.58061
GL_ZstarI(Vuln,GLAlpha,GLBeta,0.95,phase1Budget/factor)
# BUDGET = 700 --- [1] 34.01731
# factor = 10: 9.427439
# factor = 100: 1.535692
GL_ZstarI(Vuln,GLAlpha,GLBeta,0.90,phase1Budget/factor)
# BUDGET = 700 --- [1] 33.05815
# factor = 10: 9.124126
# factor = 100: 1.488963
GL_ZstarI(Vuln,GLAlpha,GLBeta,1,develBudget/factor)
# BUDGET = 1500 --- [1] 52.0659
# factor = 10: 15.13491
# factor = 100: 2.320576
GL_ZstarI_v(Vuln,GLAlpha,GLBeta,1,develBudget/factor)
# BUDGET = 1500 --- [1] 0.03600712
# factor = 10: 0.1138645
# factor = 100: 0.09478542
####
## percentages
GL_ZstarI(Vuln,GLAlpha,GLBeta,0.01,phase1Budget/factor) / (phase1Budget/factor)
# BUDGET = 600 --- [1] 0.002451943
# BUDGET = 700 --- [1] 0.002492661
# factor = 10: -0.01111435
# factor = 100: -0.1185637
GL_ZstarI(Vuln,GLAlpha,GLBeta,0.1,phase1Budget/factor) / (phase1Budget/factor)
# BUDGET = 600 --- [1] 0.01476228
# BUDGET = 700 --- [1] 0.01388981
# factor = 10: 0.02492661
# factor = 100: 0.005385361
GL_ZstarI(Vuln,GLAlpha,GLBeta,1,develBudget/factor) / (develBudget/factor)
# BUDGET = 1500 --- [1] 0.0347106
# factor = 10: 0.1008994
# factor = 100: 0.154705
GL_ZstarI(Vuln,GLAlpha,GLBeta,1,overallBudget/factor) / (overallBudget/factor)
# BUDGET = 21500 --- [1] 0.009420295
# factor = 10: 0.02917109
# factor = 100: 0.02955591
################################################################################
## Phase 2 Calculations
################################################################################
#####
## Graph from Chapter 6 --- varying z2 from 1% to 3%
##
factor = 100
p2Lambda = overallBudget/factor
p2pre = seq(1,p2Lambda) #, by=0.01)
p2t = 1
GLAlpha2 = 1.0
GLAlpha = 0.5142
GLBeta = 3
delta = 0.1
Vuln = 0.95
p2ENBIS_0_175 = GL_SSE_S1_S2(Vuln,0,1.75,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS_0_300 = GL_SSE_S1_S2(Vuln,0,3,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS_0_645 = GL_SSE_S1_S2(Vuln,0,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS_175 = GL_SSE_S1_S2(Vuln,p2pre,1.75,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS_300 = GL_SSE_S1_S2(Vuln,p2pre,3,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS_645 = GL_SSE_S1_S2(Vuln,p2pre,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
ROSI_175 = p2ENBIS_175 / (p2pre+1.75)
ROSI_300 = p2ENBIS_300 / (p2pre+3)
ROSI_645 = p2ENBIS_645 / (p2pre+6.45)
ROSI_0_175 = p2ENBIS_0_175 / (1.75)
ROSI_0_300 = p2ENBIS_0_300 / (3)
ROSI_0_645 = p2ENBIS_0_645 / (6.45)
ROSSP_175 = ROSI_100 - ROSI_0_175
ROSSP_300 = ROSI_300 - ROSI_0_300
ROSSP_645 = ROSI_645 - ROSI_0_645
plot(p2pre, ROSI_645, type="l", lty=1, ylim=range(-1,75), ylab=c(expression(paste("ROSI"))),
xlab=c(expression(paste("Pre-deployment Investment ",'z'[1]," ($100K)"))) )
#line(p2pre, p2ENBIS3, type="l", lty=1)
abline(0, 0, col = "black")
lines(p2pre, ROSI_300, type="l", lty=2, pch=0)
lines(p2pre, ROSI_175, type="l", lty=3, pch=0)
legend("top", c(expression(paste('z'[2],"=$175K (0.8%)")), expression(paste('z'[2],"=$300K (1.4%)")), expression(paste('z'[2],"=$645K (3%)")) ), lty=c(3,2,1), horiz=TRUE, bty="n", cex=0.65)
p2ENBIS_0_645_099 = GL_SSE_S1_S2(0.99,0,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS_0_645_095 = GL_SSE_S1_S2(0.95,0,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS_0_645_090 = GL_SSE_S1_S2(0.90,0,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS_0_645_100 = GL_SSE_S1_S2(1.00,0,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS_645_099 = GL_SSE_S1_S2(0.99,p2pre,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS_645_095 = GL_SSE_S1_S2(0.95,p2pre,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS_645_090 = GL_SSE_S1_S2(0.90,p2pre,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS_645_100 = GL_SSE_S1_S2(1.00,p2pre,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
ROSI_645_099 = p2ENBIS_645_099 / (p2pre+6.45)
ROSI_645_095 = p2ENBIS_645_095 / (p2pre+6.45)
ROSI_645_090 = p2ENBIS_645_090 / (p2pre+6.45)
ROSI_645_100 = p2ENBIS_645_100 / (p2pre+6.45)
ROSI_0_645_099 = p2ENBIS_0_645_099 / (6.45)
ROSI_0_645_095 = p2ENBIS_0_645_095 / (6.45)
ROSI_0_645_090 = p2ENBIS_0_645_090 / (6.45)
ROSI_0_645_100 = p2ENBIS_0_645_100 / (6.45)
ROSSP_645_099 = ROSI_645_099 - ROSI_0_645_099
ROSSP_645_095 = ROSI_645_095 - ROSI_0_645_095
ROSSP_645_090 = ROSI_645_090 - ROSI_0_645_090
ROSSP_645_100 = ROSI_645_100 - ROSI_0_645_100
plot(p2pre, ROSSP_645_099, type="l", lty=1, ylim=range(-20,30), ylab=c(expression(paste("ROSSP"))),
xlab=c(expression(paste("Pre-deployment Investment ",'z'[1]," ($100K)"))) )
#line(p2pre, p2ENBIS3, type="l", lty=1)
abline(0, 0, col = "black")
lines(p2pre, ROSSP_645_095, type="l", lty=2, pch=0)
lines(p2pre, ROSSP_645_090, type="l", lty=3, pch=0)
lines(p2pre, ROSSP_645_100, type="l", lty=4, pch=0)
legend("top", c(expression(paste("v=1.00")), expression(paste("v=0.99")), expression(paste("v=0.95")), expression(paste("v=0.90")) ), lty=c(4,1,2,3), horiz=TRUE, bty="n", cex=0.65)
#legend("top", c(expression(paste('z'[2],"=1%")), expression(paste('z'[2],"=3%")), expression(paste('z'[2],"=5%")), expression(paste('z'[2],"=10%")) ), lty=c(2,1,3,4), horiz=TRUE, bty="n", cex=0.65)
legend("top", c(expression(paste('z'[2],"=1%")), expression(paste('z'[2],"=3%")), expression(paste('z'[2],"=10%")), expression(paste('z'[2],"=37%")) ), lty=c(1,2,3,4), horiz=TRUE, bty="n", cex=0.65)
#expression(paste('z'[2],"=0.5%"))
factor = 100
p2Lambda = overallBudget/factor
p2pre = seq(1,p2Lambda) #, by=0.01)
p2t = 1
GLAlpha2 = 1.0
GLAlpha = 0.5142
GLBeta = 1.0
delta = 0.1
Vuln = 0.95
v = GL_SSE_S1_S2_retVs(Vuln,p2pre,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
v_dep = unlist(v[[1]])
v_pst = unlist(v[[2]])
resS1 = unlist(v[[3]])
resS2 = unlist(v[[4]])
v = GL_SSE_S1_S2_retVs(Vuln,7.5,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
v_dep = unlist(v[[1]])
v_pst = unlist(v[[2]])
resS1 = unlist(v[[3]])
resS2 = unlist(v[[4]])
v = GL_SSE_S1_S2_retVs(Vuln,0.34,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
v_dep = unlist(v[[1]])
v_pst = unlist(v[[2]])
resS1 = unlist(v[[3]])
resS2 = unlist(v[[4]])
v = GL_SSE_S1_S2_retVs(Vuln,10,6.45,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
v_dep = unlist(v[[1]])
v_pst = unlist(v[[2]])
resS1 = unlist(v[[3]])
resS2 = unlist(v[[4]])
# #####
# ## Graph from Chapter 6 --- varying z2 from 1% to 3%
# ##
# p2Lambda = overallBudget/factor
# p2pre = seq(1,p2Lambda) #, by=0.01)
# p2t = 1
# GLAlpha2 = 1
# GLAlpha = 0.5142
# GLBeta = 3
# delta = 0.1
# Vuln = 1
# p2ENBIS1_0 = GL_SSE_S1_S2(Vuln,0,p2Lambda*0.01,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS3_0 = GL_SSE_S1_S2(Vuln,0,p2Lambda*0.03,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS10_0 = GL_SSE_S1_S2(Vuln,0,p2Lambda*0.10,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS37_0 = GL_SSE_S1_S2(Vuln,0,p2Lambda*0.37,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS1 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.01,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS3 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.03,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS10 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.10,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS37 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.37,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# #p2ENBIS25 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.25,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# #p2ENBIS5 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.05,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# #p2ENBIS05 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.005,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# ROSI1 = p2ENBIS1 / (p2pre+p2Lambda*0.01)
# ROSI3 = p2ENBIS3 / (p2pre+p2Lambda*0.03)
# ROSI10 = p2ENBIS10 / (p2pre+p2Lambda*0.10)
# ROSI37 = p2ENBIS37 / (p2pre+p2Lambda*0.37)
# ROSI1_0 = p2ENBIS1_0 / (p2Lambda*0.01)
# ROSI3_0 = p2ENBIS3_0 / (p2Lambda*0.03)
# ROSI10_0 = p2ENBIS10_0 / (p2Lambda*0.10)
# ROSI37_0 = p2ENBIS37_0 / (p2Lambda*0.37)
# ROSSP1 = ROSI1 - ROSI1_0
# ROSSP3 = ROSI3 - ROSI3_0
# ROSSP10 = ROSI10 - ROSI10_0
# ROSSP37 = ROSI37 - ROSI37_0
# plot(p2pre, ROSSP1, type="l", lty=1, ylim=range(-1,100), ylab=c(expression(paste("ROSSP"))),
# xlab=c(expression(paste("Pre-deployment Investment ",'z'[1]," ($K)"))) )
# #line(p2pre, p2ENBIS3, type="l", lty=1)
# abline(0, 0, col = "black")
# #lines(p2pre, p2ENBIS05, type="l", lty=2, pch=0)
# #lines(p2pre, p2ENBIS1, type="l", lty=2, pch=0)
# lines(p2pre, ROSSP3, type="l", lty=2, pch=0)
# lines(p2pre, ROSSP10, type="l", lty=3, pch=0)
# lines(p2pre, ROSSP37, type="l", lty=4, pch=0)
# #legend("top", c(expression(paste('z'[2],"=1%")), expression(paste('z'[2],"=3%")), expression(paste('z'[2],"=5%")), expression(paste('z'[2],"=10%")) ), lty=c(2,1,3,4), horiz=TRUE, bty="n", cex=0.65)
# legend("top", c(expression(paste('z'[2],"=1%")), expression(paste('z'[2],"=3%")), expression(paste('z'[2],"=10%")), expression(paste('z'[2],"=37%")) ), lty=c(1,2,3,4), horiz=TRUE, bty="n", cex=0.65)
# #expression(paste('z'[2],"=0.5%"))
#####
## Graph from Chapter 6 --- varying t
##
p2ENBIS3_t100_0 = GL_SSE_S1_S2(Vuln,0,p2Lambda*0.03,1.00,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_t080_0 = GL_SSE_S1_S2(Vuln,0,p2Lambda*0.03,0.80,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_t050_0 = GL_SSE_S1_S2(Vuln,0,p2Lambda*0.03,0.50,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_t020_0 = GL_SSE_S1_S2(Vuln,0,p2Lambda*0.03,0.20,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_t100 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.03,1.00,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_t080 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.03,0.80,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_t050 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.03,0.50,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_t020 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.03,0.20,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
ROSI3_t100 = p2ENBIS3_t100 / (p2pre+p2Lambda*0.03)
ROSI3_t080 = p2ENBIS3_t080 / (p2pre+p2Lambda*0.03)
ROSI3_t050 = p2ENBIS3_t050 / (p2pre+p2Lambda*0.03)
ROSI3_t020 = p2ENBIS3_t020 / (p2pre+p2Lambda*0.03)
ROSI3_t100_0 = p2ENBIS3_t100_0 / (p2Lambda*0.03)
ROSI3_t080_0 = p2ENBIS3_t080_0 / (p2Lambda*0.03)
ROSI3_t050_0 = p2ENBIS3_t050_0 / (p2Lambda*0.03)
ROSI3_t020_0 = p2ENBIS3_t020_0 / (p2Lambda*0.03)
ROSSP3_t100 = ROSI3_t100 - ROSI3_t100_0
ROSSP3_t080 = ROSI3_t080 - ROSI3_t080_0
ROSSP3_t050 = ROSI3_t050 - ROSI3_t050_0
ROSSP3_t020 = ROSI3_t020 - ROSI3_t020_0
plot(p2pre, ROSSP3_t100, type="l", lty=1, ylim=range(-2,40), xlim=range(0,p2Lambda), ylab=c(expression(paste("ROSSP"))),
xlab=c(expression(paste("Pre-deployment Investment ",'z'[1]," ($K)"))) )
abline(0, 0, col = "black")
lines(p2pre, ROSSP3_t080, type="l", lty=2, pch=0)
lines(p2pre, ROSSP3_t050, type="l", lty=3, pch=0)
lines(p2pre, ROSSP3_t020, type="l", lty=4, pch=0)
legend("top", c(expression(paste("t = 1")), expression(paste("t = 0.80")), expression(paste("t = 0.50")), expression(paste("t = 0.20")) ), lty=c(1,2,3,4), horiz=TRUE, bty="n", cex=0.65)
#####
## Graph from Chapter 6 --- varying v
##
per = 0.03
z2Expend = p2Lambda*per
#z2Expend = 3000
p2ENBIS3_v100_0 = GL_SSE_S1_S2(1.00,0,z2Expend,1,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_v099_0 = GL_SSE_S1_S2(0.99,0,z2Expend,1,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_v095_0 = GL_SSE_S1_S2(0.95,0,z2Expend,1,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_v090_0 = GL_SSE_S1_S2(0.90,0,z2Expend,1,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_v100 = GL_SSE_S1_S2(1.00,p2pre,z2Expend,1,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_v099 = GL_SSE_S1_S2(0.99,p2pre,z2Expend,1,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_v095 = GL_SSE_S1_S2(0.95,p2pre,z2Expend,1,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ENBIS3_v090 = GL_SSE_S1_S2(0.90,p2pre,z2Expend,1,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
ROSI3_v100 = p2ENBIS3_v100 / (p2pre+z2Expend)
ROSI3_v099 = p2ENBIS3_v099 / (p2pre+z2Expend)
ROSI3_v095 = p2ENBIS3_v095 / (p2pre+z2Expend)
ROSI3_v090 = p2ENBIS3_v090 / (p2pre+z2Expend)
ROSI3_v100_0 = p2ENBIS3_v100_0 / (z2Expend)
ROSI3_v099_0 = p2ENBIS3_v099_0 / (z2Expend)
ROSI3_v095_0 = p2ENBIS3_v095_0 / (z2Expend)
ROSI3_v090_0 = p2ENBIS3_v090_0 / (z2Expend)
ROSSP3_v100 = ROSI3_v100 - ROSI3_v100_0
ROSSP3_v099 = ROSI3_v099 - ROSI3_v099_0
ROSSP3_v095 = ROSI3_v095 - ROSI3_v095_0
ROSSP3_v090 = ROSI3_v090 - ROSI3_v090_0
plot(p2pre, ROSSP3_v100, type="l", lty=1, ylim=range(-30,40), ylab=c(expression(paste("ROSSP"))),
xlab=c(expression(paste("Pre-deployment Investment ",'z'[1]," ($100K)"))) )
abline(0, 0, col = "black")
lines(p2pre, ROSSP3_v099, type="l", lty=2, pch=0)
lines(p2pre, ROSSP3_v095, type="l", lty=3, pch=0)
lines(p2pre, ROSSP3_v090, type="l", lty=4, pch=0)
legend("top", c(expression(paste("v = 1.0")), expression(paste("v = 0.99")), expression(paste("v = 0.95")), expression(paste("v = 0.90")) ), lty=c(1,2,3,4), horiz=TRUE, bty="n", cex=0.65)
which(ROSSP3_v099 > 0, arr.ind = TRUE)
################################################################################
##
## EXPERIMENTAL CODE
##
################################################################################
################################################################################
## TESTING DELTA AND S1 PARAMS
################################################################################
################################################################################
## EXAMINING VULNERABILITY
################################################################################
p2pre = seq(0, 10000, by=0.1)
p2t = 1
GLAlpha2 = 1
GLAlpha = 0.5142
GLBeta = 1
delta = 0.1
Vuln = 0.999
p2Lambda = overallBudget
p2ENBIS3 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.03,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
p2ROSSP = GL_SSE_S1_S2(Vuln,0,p2Lambda*0.03,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
v = GL_SSE_S1_S2_retVs(Vuln,p2pre,p2Lambda*0.03,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
v_dep = unlist(v[[1]])
v_pst = unlist(v[[2]])
resS1 = unlist(v[[3]])
resS2 = unlist(v[[4]])
#plot(p2pre, p2ENBIS3, type="l", lty=1, ylim=range(-500,22000), ylab=c(expression(paste("ENBIS(",'z'[1],"+",'z'[2],") ($K)"))),
# xlab=c(expression(paste("Pre-deployment Investment ",'z'[1]," ($K)"))) )
#which(p2ENBIS3 == max(p2ENBIS3), arr.ind = TRUE)
rossp = (p2ENBIS3/(p2pre+(p2Lambda*0.03)) - (p2ROSSP/(p2Lambda*0.03)))
plot(p2pre, rossp, type="l", lty=1, ylim=range(-20,20), ylab=c(expression(paste("ROSSP"))),
xlab=c(expression(paste("Pre-deployment Investment ",'z'[1]," ($K)"))) )
which(p2ENBIS3 > 0, arr.ind = TRUE)
which(v_pst == max(v_pst), arr.ind = TRUE)
which(v_pst == min(v_pst), arr.ind = TRUE)
which(v_pst < (1-((p2pre+(p2Lambda*0.03))/p2Lambda)), arr.ind = TRUE)
which((1-v_pst) > (ENBIS_SwSec_c / overallBudget), arr.ind = TRUE)
#### DEBUG
#GL_SSE_S1_S2_retVs(Vuln,355,p2Lambda*0.03,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# v = GL_SSE_S1_S2_retVs(Vuln,p2pre,p2Lambda*0.1,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# v_pst = unlist(v[[2]])
# which(v_pst < (1-((p2pre+(p2Lambda*0.1))/p2Lambda)), arr.ind = FALSE)
#GL_SSE_S1_S2(Vuln,500,4000,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
################################################################################
## CALCULATIONS
################################################################################
##
which(v_pst < (1-((p2pre+(p2Lambda*0.03))/p2Lambda)), arr.ind = TRUE)
##
invMax = which(p2ENBIS3 == max(p2ENBIS3), arr.ind = TRUE)
#151
## ROSSP
ENBIS_SwSec = GL_SSE_S1_S2(Vuln,151,p2Lambda*0.03,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# 20445.27
ENBIS_SwSec_c = 151+p2Lambda*0.03
# 796
ENBIS_NoSwSec = GL_SSE_S1_S2(Vuln,0,p2Lambda*0.03,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# 1505
ENBIS_NoSwSec_c = 0 + p2Lambda*0.03
# 645
ROSSP_ProjA = ROSI_ENBIS(ENBIS_SwSec, ENBIS_SwSec_c) - ROSI_ENBIS(ENBIS_NoSwSec, ENBIS_NoSwSec_c)
# 23.35168
################################################################################
##
## LEGACY CODE
##
################################################################################
################################################################################
## Application of GL-SSE to phase 1
################################################################################
#####
## Calculation for GL-SSE, phase 1
## Calculation of various percentages of post-deployment security investment
##
## t = 0.1, \alpha2 = 1
##
# pre = seq(0:30)
# GLAlpha2 = 1
# t = 0.1
# delta = 0.1
# post05 = GL_SSE_S1_S2(Vuln,pre,phase1Budget*0.005,t,phase1Budget,delta,GLAlpha,GLBeta,GLAlpha2)
# post1 = GL_SSE_S1_S2(Vuln,pre,phase1Budget*0.010,t,phase1Budget,delta,GLAlpha,GLBeta,GLAlpha2)
# post25 = GL_SSE_S1_S2(Vuln,pre,phase1Budget*0.025,t,phase1Budget,delta,GLAlpha,GLBeta,GLAlpha2)
# post5 = GL_SSE_S1_S2(Vuln,pre,phase1Budget*0.050,t,phase1Budget,delta,GLAlpha,GLBeta,GLAlpha2)
# post10 = GL_SSE_S1_S2(Vuln,pre,phase1Budget*0.100,t,phase1Budget,delta,GLAlpha,GLBeta,GLAlpha2)
# plot(pre, post10, type="l", ylim=range(-40,50), lty=1, pch=0,
# ylab=c(expression(paste("ENBIS(",'z'[1],"+",'z'[2],") ($K)"))),
# xlab=c(expression(paste("Pre-deployment Investment ",'z'[1]," ($K)"))) )
# abline(0, 0, col = "black")
# lines(pre, post5, type="l", lty=2, pch=0)
# lines(pre, post25, type="l", lty=3, pch=0)
# lines(pre, post1, type="l", lty=4, pch=0)
# lines(pre, post05, type="l", lty=5, pch=0)
# legend("top", c(expression(paste('z'[2],"=0.5%")), expression(paste('z'[2],"=1%")), expression(paste('z'[2],"=2.5%")), expression(paste('z'[2],"=5%")), expression(paste('z'[2],"=10%")) ), lty=c(5,4,3,2,1), horiz=TRUE, bty="n", cex=0.65)
# ## t = 1, \alpha2 = 1
# ##
# pre = seq(0:150)
# GLAlpha2 = 1
# t = 0.5
# delta = 0.1
# post05 = GL_SSE_S1_S2(Vuln,pre,phase1Budget*0.005,t,phase1Budget,delta,GLAlpha,GLBeta,GLAlpha2)
# post1 = GL_SSE_S1_S2(Vuln,pre,phase1Budget*0.010,t,phase1Budget,delta,GLAlpha,GLBeta,GLAlpha2)
# post25 = GL_SSE_S1_S2(Vuln,pre,phase1Budget*0.025,t,phase1Budget,delta,GLAlpha,GLBeta,GLAlpha2)
# post5 = GL_SSE_S1_S2(Vuln,pre,phase1Budget*0.050,t,phase1Budget,delta,GLAlpha,GLBeta,GLAlpha2)
# post10 = GL_SSE_S1_S2(Vuln,pre,phase1Budget*0.100,t,phase1Budget,delta,GLAlpha,GLBeta,GLAlpha2)
# plot(pre, post10, type="l", ylim=range(-50,300), lty=1, pch=0,
# ylab=c(expression(paste("ENBIS(",'z'[1],"+",'z'[2],") ($K)"))),
# xlab=c(expression(paste("Pre-deployment Investment ",'z'[1]," ($K)"))) )
# abline(0, 0, col = "black")
# lines(pre, post5, type="l", lty=2, pch=0)
# lines(pre, post25, type="l", lty=3, pch=0)
# lines(pre, post1, type="l", lty=4, pch=0)
# lines(pre, post05, type="l", lty=5, pch=0)
# legend("top", c(expression(paste('z'[2],"=0.5%")), expression(paste('z'[2],"=1%")), expression(paste('z'[2],"=2.5%")), expression(paste('z'[2],"=5%")), expression(paste('z'[2],"=10%")) ), lty=c(5,4,3,2,1), horiz=TRUE, bty="n", cex=0.65)
################################################################################
## Code snippets
################################################################################
# which(v_pre == max(v_pre), arr.ind = TRUE)
# #6001
# which(v_pst == max(v_pst), arr.ind = TRUE)
# #173
# which(resS1 == max(resS1), arr.ind = TRUE)
# #1
# which(resS2 == max(resS2), arr.ind = TRUE)
# #6001
# v_pre[invMax]
# #0.9885561
# v_pst[invMax]
# #0.9879661
# resS1[invMax]
# #0.01144395
# resS2[invMax]
# #0.0005899834
#GL_SSE_S1_S2(Vuln,143,3396,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
#v_pst[3396]
# p2pre = seq(0:21500) # z_{1} investment range
# p2t = 1 # threat
# p2Lambda = overallBudget # loss
# delta = 0.1 # vulnerability attributed to misconfiguration
# GLAlpha2 = 1 # alphas for S2()
# ## Calculate ENBIS for various percentages of z_{2}
# p2ENBIS05 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.005,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS1 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.01,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS3 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.03,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# #p2ENBIS5 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.05,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS5 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.05,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS10 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.10,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS25 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.25,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# p2ENBIS37 = GL_SSE_S1_S2(Vuln,p2pre,p2Lambda*0.37,p2t,p2Lambda,delta,GLAlpha,GLBeta,GLAlpha2)
# ## Draw plot
# plot(p2pre, p2ENBIS3, type="l", lty=1, ylim=range(-500,22000), ylab=c(expression(paste("ENBIS(",'z'[1],"+",'z'[2],") ($K)"))),
# xlab=c(expression(paste("Pre-deployment Investment ",'z'[1]," ($K)"))) )
# #line(p2pre, p2ENBIS3, type="l", lty=1)
# abline(0, 0, col = "black")
# #lines(p2pre, p2ENBIS05, type="l", lty=2, pch=0)
# #lines(p2pre, p2ENBIS1, type="l", lty=2, pch=0)
# lines(p2pre, p2ENBIS5, type="l", lty=3, pch=0)
# lines(p2pre, p2ENBIS10, type="l", lty=4, pch=0)
# lines(p2pre, p2ENBIS25, type="l", lty=2, pch=0)
# lines(p2pre, p2ENBIS37, type="l", lty=5, pch=0)
# ## Set legend
# legend("top", c(expression(paste('z'[2],"=3%")), expression(paste('z'[2],"=5%")), expression(paste('z'[2],"=10%")), expression(paste('z'[2],"=25%")), expression(paste('z'[2],"=37%")) ), lty=c(1,3,4,2,5), horiz=TRUE, bty="n", cex=0.65)
# #legend("top", c(expression(paste('z'[2],"=1%")), expression(paste('z'[2],"=3%")), expression(paste('z'[2],"=5%")), expression(paste('z'[2],"=10%")) ), lty=c(2,1,3,4), horiz=TRUE, bty="n", cex=0.65)
# #expression(paste('z'[2],"=0.5%"))
|
dc377c0e92a94e3eef4b5a06c3a8e0a1014ba193 | 84be4da8fb667c4e9d9ab52a072309ead778bce7 | /R/bootImpute.R | c1782dcc1028e8bb2553acfdb2584cd3c826a540 | [] | no_license | jwb133/bootImpute | 570ba860cd76f65f829b99f732602695366fbaa6 | 91c402d59d0e5ff52f7731da04d09fac3069e90b | refs/heads/master | 2023-06-12T08:39:32.273206 | 2023-06-01T14:14:03 | 2023-06-01T14:14:03 | 178,089,579 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,093 | r | bootImpute.R | #' Bootstrap then impute an incomplete dataset
#'
#' Bootstraps an incomplete dataset and then imputes each bootstrap a number
#' of times. The resulting list of bootstrapped then imputed datasets can
#' be analysed with \code{\link{bootImputeAnalyse}}.
#'
#' The \code{impfun} must be a function which when passed an incomplete datasets
#' and possibly additional arguments, returns a list of (e.g. 2) imputed datasets.
#' The number of imputed datasets that \code{impfun} returns should match the value
#' you specify for the argument \code{nImp}. Depending on what your imputation function
#' returns by default, you may need to write a small wrapper function that calls
#' the imputation procedure and returns the list of \code{nImp} datasets.See the
#' Example for an illustration with the \code{mice} package.
#'
#' To improve computation times, \code{bootImpute} now supports
#' multiple cores through the \code{nCores} argument which uses the \code{parallel}
#' package.
#'
#' @param obsdata The data frame to be imputed.
#' @param impfun A function which when passed an incomplete dataset will
#' return a list of imputed data frames.
#' @param nBoot The number of bootstrap samples to take. It is recommended
#' that you use a minimum of 200. If you specify \code{nCores>1}, \code{nBoot} must
#' be a multiple of the specified \code{nCores} value.
#' @param nImp The number of times to impute each bootstrap sample. Two
#' is recommended.
#' @param nCores The number of CPU cores to use. If specified greater than one,
#' \code{bootImpute} will impute using the number of cores specified.
#' @param seed Random number seed.
#' @param ... Other parameters that are to be passed through to \code{impfun},
#' which will often include the argument that tells \code{impfun} to generate
#' as many imputations as specified by the value passed to \code{nImp}.
#' @return A list of imputed datasets.
#'
#' @example data-raw/bootImputeExamples.r
#'
#' @export
bootImpute <- function(obsdata, impfun, nBoot=200, nImp=2, nCores=1, seed=NULL, ...) {
if (nBoot<200) {
warning("It is recommended to use at least 200 bootstraps.")
}
if ((nCores>1) & (is.null(seed))) {
stop("If you specify nCores>1 you must set a seed.")
}
n <- dim(obsdata)[1]
imps <- vector("list", nBoot*nImp)
count <- 1
#check impfun returns what we want
newImps <- impfun(obsdata, ...)
#check newImps is a list of right type and length
if (typeof(newImps)!="list") {
stop("Your imputation function must return a list of imputed datasets.")
}
if (length(newImps)!=nImp) {
stop("Your imputation function must return the same number of imputed datasets as the value you specify for nImp.")
}
#capture extra arguments into an object called args
args <- list(...)
if (nCores>1) {
#use multiple cores
if ((nBoot %% nCores)!=0) stop("nBoot must be a multiple of nCores.")
nBootPerCore <- nBoot/nCores
#the setup_strategy argument here is to temporarily deal with
#this issue which affects Macs: https://github.com/rstudio/rstudio/issues/6692
cl <- parallel::makeCluster(nCores, setup_strategy = "sequential")
#cl <- parallel::makeCluster(nCores)
if (!is.null(seed)) {
parallel::clusterSetRNGStream(cl, seed)
}
dots <- list(...)
if (length(dots)==0) {
#no extra arguments to pass to imputation function
parallel::clusterExport(cl, c("obsdata", "impfun", "nBootPerCore", "nImp"),
envir=environment())
parImps <- parallel::parLapply(cl, X=1:nCores, fun = function(no){
bootImpute::bootImpute(obsdata, impfun, nBoot=nBootPerCore, nImp=nImp, nCores=1)
})
} else {
#some extra arguments to pass to imputation function
parallel::clusterExport(cl, c("obsdata", "impfun", "nBootPerCore", "nImp", "dots"),
envir=environment())
parImps <- parallel::parLapply(cl, X=1:nCores, fun = function(no){
newarg <- c(list(obsdata=obsdata, impfun=impfun, nBoot=nBootPerCore, nImp=nImp, nCores=1), dots)
do.call(bootImpute::bootImpute, newarg)
})
}
parallel::stopCluster(cl)
imps <- do.call(c, parImps)
} else {
if (!is.null(seed)) {
set.seed(seed)
}
for (b in 1:nBoot) {
#take bootstrap sample
bsIndices <- sample(1:n, replace=TRUE)
#impute nImp times
newImps <- impfun(obsdata[bsIndices,], ...)
for (m in 1:nImp) {
imps[[count]] <- newImps[[m]]
count <- count + 1
}
}
}
attributes(imps) <- list(nBoot=nBoot, nImp=nImp)
#return list of imputations
imps
}
#' Analyse bootstrapped and imputed estimates
#'
#' Applies the user specified analysis function to each imputed dataset contained
#' in \code{imps}, then calculates estimates, confidence intervals and p-values
#' for each parameter, as proposed by von Hippel and Bartlett (2021).
#'
#' Multiple cores can be used by using the \code{nCores} argument, which may be
#' useful for reducing computation times.
#'
#' @param imps The list of imputed datasets returned by \code{\link{bootImpute}}
#' @param analysisfun A function which when applied to a single dataset returns
#' the estimate of the parameter(s) of interest. The dataset to be analysed
#' is passed to \code{analysisfun} as its first argument.
#' @param nCores The number of CPU cores to use. If specified greater than one,
#' \code{bootImputeAnalyse} will impute using the number of cores specified. The
#' number of bootstrap samples in \code{imps} should be divisible by \code{nCores}.
#' @param quiet Specify whether to print a table of estimates, confidence intervals
#' and p-values.
#' @param ... Other parameters that are to be passed through to \code{analysisfun}.
#' @return A vector containing the point estimate(s), variance estimates, and
#' degrees of freedom.
#'
#' @references von Hippel PT, Bartlett JW. Maximum likelihood multiple imputation: faster,
#' more efficient imputation without posterior draws. Statistical Science, 2021, 36(3):400-420.
#' \doi{10.1214/20-STS793}
#'
#' @import stats
#'
#' @example data-raw/bootImputeAnalyseExamples.r
#'
#'
#' @export
bootImputeAnalyse <- function(imps, analysisfun, nCores=1, quiet=FALSE, ...) {
nBoot <- attributes(imps)$nBoot
nImp <- attributes(imps)$nImp
#find out how many parameters are returned by analysisFun
firstResult <- analysisfun(imps[[1]],...)
nParms <- length(firstResult)
ests <- array(0, dim=c(nBoot,nImp,nParms))
if (nCores>1) {
#use multiple cores
if ((nBoot %% nCores)!=0) stop("nBoot must be divisible by nCores.")
nBootPerCore <- nBoot/nCores
#the setup_strategy argument here is to temporarily deal with
#this issue: https://github.com/rstudio/rstudio/issues/6692
cl <- parallel::makeCluster(nCores, setup_strategy = "sequential")
dots <- list(...)
if (length(dots)==0) {
#no extra arguments to pass to analysis function
parallel::clusterExport(cl, c("imps", "analysisfun", "nBootPerCore", "nImp", "nParms"),
envir=environment())
parEsts <- parallel::parLapply(cl, X=1:nCores, fun = function(no){
estArray <- array(0, dim=c(nBootPerCore, nImp, nParms))
bootStart <- (no-1)*nBootPerCore+1
impToAnalyse <- (bootStart-1)*nImp + 1
for (i in 1:nBootPerCore) {
for (j in 1:nImp) {
estArray[i,j,] <- analysisfun(imps[[impToAnalyse]])
impToAnalyse <- impToAnalyse + 1
}
}
estArray
})
} else {
#some extra arguments to pass to analysis function
parallel::clusterExport(cl, c("imps", "analysisfun", "nBootPerCore", "nImp", "nParms", "dots"),
envir=environment())
parEsts <- parallel::parLapply(cl, X=1:nCores, fun = function(no){
estArray <- array(0, dim=c(nBootPerCore, nImp, nParms))
bootStart <- (no-1)*nBootPerCore+1
impToAnalyse <- (bootStart-1)*nImp + 1
for (i in 1:nBootPerCore) {
for (j in 1:nImp) {
newarg <- c(list(data=imps[[impToAnalyse]]), dots)
estArray[i,j,] <- do.call(analysisfun, newarg)
impToAnalyse <- impToAnalyse + 1
}
}
estArray
})
}
parallel::stopCluster(cl)
for (i in 1:nCores) {
ests[((i-1)*nBootPerCore+1):(i*nBootPerCore),,] <- parEsts[[i]]
}
} else {
#use single core
count <- 1
for (b in 1:nBoot) {
for (m in 1:nImp) {
ests[b,m,] <- analysisfun(imps[[count]],...)
count <- count + 1
}
}
}
#fit one way model, separately for each parameter
est <- array(0, dim=nParms)
var <- array(0, dim=nParms)
ci <- array(0, dim=c(nParms,2))
df <- array(0, dim=nParms)
for (i in 1:nParms) {
SSW <- sum((ests[,,i]-rowMeans(ests[,,i]))^2)
SSB <- nImp*sum((rowMeans(ests[,,i])-mean(ests[,,i]))^2)
MSW <- SSW/(nBoot*(nImp-1))
MSB <- SSB/(nBoot-1)
resVar <- MSW
randIntVar <- (MSB-MSW)/nImp
if (randIntVar <= 0) {
warning(paste("Parameter ",i," has an estimated between bootstrap variance of zero. You should re-run with a larger nBoot value.",sep=""))
randIntVar <- 0
resVar <- (SSW+SSB)/(nBoot*nImp-1)
}
est[i] <- mean(ests[,,i])
var[i] <- (1+1/nBoot)*randIntVar + resVar/(nBoot*nImp)
df[i] <- (var[i]^2)/((((nBoot+1)/(nBoot*nImp))^2*MSB^2 / (nBoot-1)) + MSW^2/(nBoot*nImp^2*(nImp-1)))
#prevent df from going below 3
df[i] <- max(3,df[i])
ci[i,] <- c(est[i]-stats::qt(0.975,df[i])*var[i]^0.5, est[i]+stats::qt(0.975,df[i])*var[i]^0.5)
}
if (quiet==FALSE) {
resTable <- array(0, dim=c(nParms,5))
resTable[,1] <- est
resTable[,2] <- var^0.5
resTable[,3] <- ci[,1]
resTable[,4] <- ci[,2]
resTable[,5] <- 2*stats::pt(abs(est/var^0.5), df=df,lower.tail = FALSE)
colnames(resTable) <- c("Estimate", "Std. error", "95% CI lower", "95% CI upper", "p")
rownames(resTable) <- names(firstResult)
print(resTable)
}
list(ests=est, var=var, ci=ci, df=df)
}
|
585722866643cb8e3775ba9019f078e49ccdaea1 | 545e9b06f3a27db484971194a6c4d2084f236b38 | /week_2/week_2.r | df9559ae2fc10528e46f066176d3c4f5b6f26b83 | [] | no_license | imclab/dwob | 3aab864218fb874df3e997cea221fd7615c9ec1e | 0f6f5519976025b8c5391ad3d1cdaaae5bbef813 | refs/heads/master | 2021-01-15T12:15:24.989898 | 2012-11-14T02:08:43 | 2012-11-14T02:08:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,454 | r | week_2.r | #HOMEWORK 2
#import data
snf <- read.csv(file="http://www.jakeporway.com/teaching/data/snf_2.csv", head=TRUE, as.is=TRUE)
# Write code to return the percentage of people who were frisked for each
# race. In other words, count up the number of people who were frisked for a given race
# divided by the number of people of that race stopped.
#total stops
total.stops <- nrow(snf)
total.frisks <- sum(snf$frisked)
frisks.by.race <- rev(sort(table(snf$race[snf$frisked == 1])))
# Which race leads to the highest percentage of frisks?
most.frisked.race <- frisks.by.race[1]
perc.frisked <- most.frisked.race/total.frisks
# Which one the lowest?
least.frisked.race <- frisks.by.race[dim(frisks.by.race)]
perc.frisked <- least.frisked.race/total.frisks
# Plot the number of times each crime occurs in descending order
# What does this distribution of crimes look like? In
# other words, are there an equal number of every kind of crime
# or are there a few that dominate?
sorted.crimes <- rev(sort(table(snf$crime.suspected)))
top.50 <- sorted.crimes[1:50]
barplot(top.50, xlab="crime suspected", ylab="number of stops", cex.lab=.2, col="red", density=(log(top.50)*7))
#Q: What does this distribution of crimes look like?
#A: The distribution looks like a power-law distribution.
# The most suspected crime has a huge percent more stops
# than the next-most, and this pattern repeats til there
# Q: Well I’m kind of answering that question for you here – let’s take the top 30
# suspected crimes and look at those. If we were to just look at stops where the
# crime.suspected was one of the top 30 crimes, what percentage of the stops would that
# cover? Do you think that’s enough?
#A: [1] 0.9132194
sum.top.30 <- sum(sorted.crimes[1:30])
perc.top.30 <- sum.top.30/total.stops
#Write code to create a variable called “crime.abbv” that consists of just the
# first three letters of crime.suspected and show the code to add it to our main data frame.
# Now what percentage of the stops do the top 30 crime.abbv account for?
crime.abbv <- substr(snf$crime.suspected, 1, 3)
sorted.crimes.abbvs <- rev(sort(table(crime.abbv)))
top.30.abbvs <- sorted.crimes.abbvs[1:30]
# Write code to show the top 3 crimes each race is suspected of (rev(),
# sort(), and table() are your friends here again, but you’ll have to subset the data by race
# first). Huh. If you do this right, almost all the top 3’s should be the same, but a few are
# different. What are these differences?
for(i in -1:6){
if(i != 0){
print(rev(sort(table(crime.abbv[snf$race == i])))[1:3])
}
}
#-1 : FEL MIS CPW
# 1 : FEL MIS CPW
# 2 : FEL MIS CPW
# 3 : FEL MIS CPW
# 4 : FEL MIS BUR
# 5 : FEL ROB MIS
# 6 : FEL ROB GLA
#I also tried this:
top.3 <- function(...){
rev(sort(table(crime.abbv[snf$race == ... ])))[1:3]
}
top.three.by.race <- tapply(sort(crime.abbv), snf$race, top.3)
# however, I couldn't figure out how r was incrementing through the top 3 function and was given errors.
# use a number but would be returned 6 tables with the same data
# PART 2
# Let’s create an “hour” variable that tells us what hour of the day each stop
# happened during and add it to our dataset. How do we do this? Well we’ve got a
# great column of “time” variables that always has the hour in the same place. Use
# the substr() function we learned about above to strip out the hour, then use
# as.numeric() from lecture 2 to convert it to a number.
hour <- substr(snf$time, 12, 13)
time.of.stop <- rev(sort(table(as.numeric(hour))))
# 20 21 19 22 18 16 15 17 23 14 1 0 13 2 12 11 10 3 9 4 8 5 7 6
# 4607 4158 4141 3721 3387 3319 3228 3220 3209 3128 3057 3022 2490 2213 2181 2071 1572 1378 1218 905 740 477 324 323
# Create a line plot (i.e. a plot with type=”l”) of the stops by hour. Which hour of the
# day has the most stops? Which hour has the fewest?
time.of.stop <- (table(as.numeric(hour)))
color.vector <- rep(1,24)
color.vector[6] <- 2
color.vector[20] <- 3
plot(as.vector(time.of.stop), type="l", xlab="time of stop", ylab="number of stops")
# Create the same plot but with points instead of lines. Use a different plotting
# symbol than the default and color the max point and min points different colors
plot(as.vector(time.of.stop), type="o", xlab="time of stop", ylab="number of stops", col=color.vector)
|
a322867b8669fcceb1d147d4238835da0aedbf97 | 67a4fde7eb27258b15360a1e2ac4b8714581ce77 | /app.R | c1aa0f0fb012bc04917225435039a066fdcb967e | [] | no_license | thlytras/COVID-19-trendviz | 96b163b87ecb7bfb9ba005decd34d5fd7d3cee77 | 055bb2967650d74b2e7cae4f742b1bd8bb21e069 | refs/heads/master | 2021-01-03T00:38:33.849409 | 2020-02-15T11:33:08 | 2020-02-15T11:33:08 | 239,839,813 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,981 | r | app.R | load("dat.RData")
source("include.R")
library(shiny)
library(shinyWidgets)
library(markdown)
library(Cairo)
options(shiny.usecairo=TRUE)
ui <- fluidPage(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "mine.css"),
tags$link(rel = "shortcut icon", href = "logo-small.png")
),
titlePanel("COVID-19 (2019-nCoV) trends visualizer"),
sidebarLayout(
sidebarPanel(
selectInput("cnt", "Country/Region",
choices=c(unique(dat$cnt)[1], sort(unique(dat$cnt)[-1])), multiple=TRUE),
fluidRow(
column(6, actionButton("cnt_selAll", "Select all countries", style="margin-bottom: 1em; width:100%")),
column(6, actionButton("cnt_clear", "Clear countries", style="margin-bottom: 1em; width:100%"))
),
selectInput("prov", "Province/State",
choices=sort(unique(dat$prov)), multiple=TRUE),
fluidRow(
column(6, actionButton("prov_selAll", "Select all provinces", style="margin-bottom: 1em; width:100%")),
column(6, actionButton("prov_clear", "Clear provinces", style="margin-bottom: 1em; width:100%"))
),
fluidRow(
column(6,
sliderInput("pltThick", "Line/bar thickness",
min=-1.6, max=1.6, value=0, step=0.1, ticks=FALSE)
),
column(6,
awesomeCheckbox("plotConf", "Plot confirmed cases", value=TRUE),
awesomeCheckbox("plotDead", "Plot deaths", value=TRUE),
awesomeCheckbox("plotRecov", "Plot recovered cases", value=TRUE)
)
),
img(src='eody.png', width=164*2/3, height=189*2/3,
style="display: block; margin-left: auto; margin-right: auto;"),
),
mainPanel(
tabsetPanel(
tabPanel("Cumulative cases",
plotOutput("cumCases"),
downloadButton("save_cumPlot", "Save plot")
),
tabPanel("Incident cases",
plotOutput("incCases"),
selectInput("plotTypeInc", "Plot type", choices=c("Bar chart" = 0, "Line chart" = 1)),
downloadButton("save_incPlot", "Save plot")
),
tabPanel("Second derivative",
plotOutput("secDerivative"),
downloadButton("save_secDerPlot", "Save plot")
),
tabPanel("About", includeMarkdown("README.md"))
)
)
)
)
server <- function(input, output, session) {
# Selectable provinces based on the countries selected
provChoices <- reactive({
# if no country selected, choose all provinces
if (is.null(input$cnt)) {
return(sort(unique(dat$prov)))
} else { # or else, chose only provinces found in the country
return(sort(unique(subset(dat, cnt %in% input$cnt)$prov)))
}
})
observe({
sel <- isolate(input$prov)
sel <- sel[sel %in% provChoices()]
updateSelectInput(session, "prov", choices=provChoices(), selected=sel)
})
selData <- reactive({
selData <- dat
if (!is.null(input$cnt)) {
selData <- subset(selData, cnt %in% input$cnt)
}
if (!is.null(input$prov)) {
selData <- subset(selData, prov %in% input$prov)
}
return(selData)
})
aggrData <- reactive({
aggregate(selData()[,c("conf","dead","recov")], selData()[,"date",drop=FALSE], sum, na.rm=TRUE)
})
output$cumCases <- renderPlot({
if (nrow(aggrData())>0) {
par(mar=c(5,4,2,2))
plotCumCases(aggrData(),
plot.conf=input$plotConf, plot.recov=input$plotRecov, plot.dead=input$plotDead,
thk=exp(input$pltThick))
}
})
output$save_cumPlot <- downloadHandler(
filename = function() {
"cumulative_cases.png"
},
content = function(file) {
png(file, width=1000, height=600, res=115)
par(mar=c(5,4,2,2))
if (nrow(aggrData())>0) {
plotCumCases(aggrData(), plot.conf=input$plotConf,
plot.recov=input$plotRecov, plot.dead=input$plotDead,
thk=exp(input$pltThick))
}
dev.off()
}
)
output$incCases <- renderPlot({
if (nrow(aggrData())>0) {
par(mar=c(5,4,2,2))
plotIncCases(aggrData(),
plot.conf=input$plotConf, plot.recov=input$plotRecov, plot.dead=input$plotDead,
thk=exp(input$pltThick), line=as.logical(as.integer(input$plotTypeInc)))
}
})
output$save_incPlot <- downloadHandler(
filename = function() {
"new_cases.png"
},
content = function(file) {
png(file, width=1000, height=600, res=115)
par(mar=c(5,4,2,2))
if (nrow(aggrData())>0) {
plotIncCases(aggrData(), plot.conf=input$plotConf,
plot.recov=input$plotRecov, plot.dead=input$plotDead,
thk=exp(input$pltThick), line=as.logical(as.integer(input$plotTypeInc)))
}
dev.off()
}
)
output$secDerivative <- renderPlot({
if (nrow(aggrData())>0) {
par(mar=c(5,4,2,2))
plotSecDer(aggrData(), plot.conf=input$plotConf,
plot.recov=input$plotRecov, plot.dead=input$plotDead,
thk=exp(input$pltThick))
}
})
output$save_secDerPlot <- downloadHandler(
filename = function() {
"second_derivative.png"
},
content = function(file) {
png(file, width=1000, height=600, res=115)
par(mar=c(5,4,2,2))
if (nrow(aggrData())>0) {
plotSecDer(aggrData(), plot.conf=input$plotConf,
plot.recov=input$plotRecov, plot.dead=input$plotDead,
thk=exp(input$pltThick))
}
dev.off()
}
)
observeEvent(input$prov_clear, {
updateSelectInput(session, "prov", selected=character())
})
observeEvent(input$prov_selAll, {
updateSelectInput(session, "prov", selected=provChoices())
})
observeEvent(input$cnt_clear, {
updateSelectInput(session, "cnt", selected=character())
})
observeEvent(input$cnt_selAll, {
all <- c(unique(dat$cnt)[1], sort(unique(dat$cnt)[-1]))
all <- all[all!="Others"]
updateSelectInput(session, "cnt",
selected=all)
})
}
shinyApp(ui = ui, server = server)
|
3ba859fac7c7ab04e2b1d3b25e1c8c02e93a14bb | b10f519b2490144f651475f9232074b3fd6af06b | /man/updateLSVM.Rd | 56d5bdc001a3beae1afecab99693afc7de413272 | [] | no_license | ClementWalter/mistral | a3720ee50b8262e05bb48a19483087d801e49a1e | 8cabf6f94cc1ea7391c3714ccf2bf55b97d00673 | refs/heads/master | 2021-06-09T02:32:25.686536 | 2020-01-02T07:37:11 | 2020-01-02T07:37:11 | 68,203,801 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,453 | rd | updateLSVM.Rd | \name{updateLSVM}
\alias{updateLSVM}
\title{Update LSVM classifier}
\description{
Update the existing classifier LSVM with a new set of data.
}
\usage{
updateLSVM(X.new,
Y.new,
X,
Y,
A.model.lsvm,
convexity,
PLOTSVM = FALSE,
step.plot.LSVM = 1,
hyperplanes = FALSE,
limit.state.estimate = TRUE)
}
\arguments{
\item{X.new}{a matrix containing a new data sets}
\item{Y.new}{a vector containing -1 or +1 that reprensents the class of each elements of X.new.}
\item{X}{a matrix containing the data sets}
\item{Y}{a vector containing -1 or +1 that reprensents the class of each elements of X.}
\item{A.model.lsvm}{a matrix containing the parameters of all hyperplanes.}
\item{convexity}{Either -1 if the set of data associated to the label "-1" is convex or +1 otherwise.}
\item{PLOTSVM}{A boolean. If TRUE, plot the data.}
\item{step.plot.LSVM}{A plot is made each \code{step.plot.LSVM} steps.}
\item{hyperplanes}{A boolean. If TRUE, plot the hyperplanes obtained.}
\item{limit.state.estimate}{A boolean. If TRUE, plot the estimate of the limit state.}
}
\value{
An object of class \code{matrix} containing the parameters of a set of hyperplanes
}
\details{
updateLSVM allows to make an update of the classifier LSVM.
}
\note{
The argument PLOTSVM is useful only in dimension 2.
}
\references{
\itemize{
\item
R.T. Rockafellar:\cr
\emph{Convex analysis}\cr
Princeton university press, 2015.\cr
\item
N. Bousquet, T. Klein and V. Moutoussamy :\cr
\emph{Approximation of limit state surfaces in monotonic Monte Carlo settings}\cr
Submitted .\cr
}
}
\author{
Vincent Moutoussamy\cr
}
\seealso{
\code{\link{LSVM}}
\code{\link{modelLSVM}}
}
\examples{
# A limit state function
f <- function(x){ sqrt(sum(x^2)) - sqrt(2)/2 }
# Creation of the data sets
n <- 200
X <- matrix(runif(2*n), nrow = n)
Y <- apply(X, MARGIN = 1, function(w){sign(f(w))})
\dontrun{
model.A <- modelLSVM(X,Y, convexity = -1)
M <- 20
X.new <- matrix(runif(2*M), nrow = M)
Y.new <- apply(X.new, MARGIN = 1, function(w){ sign(f(w))})
X.new.S <- X.new[which(Y.new > 0), ]
Y.new.S <- Y.new[which(Y.new > 0)]
model.A.new <- updateLSVM(X.new.S, Y.new.S, X, Y,
model.A, convexity = -1, PLOTSVM = TRUE, step.plot.LSVM = 5)
}
}
|
f4ddbe894940ab51200f35df6fcfde38f4a02b6f | 71f18a6a166ce48895c0230f39044093a4d6be36 | /Indiana_Cumulative_Counts/app.R | fad2d1604358d317f064c974954a46643790dc79 | [] | no_license | theandyb/nyt_covid | 8af1a53195eaed365513ff4b1bdbece404ade2f0 | 6c3244a9ec35e85a6e6f523e6aaac88590893f6b | refs/heads/master | 2021-05-17T06:24:36.609522 | 2020-06-19T23:56:54 | 2020-06-19T23:56:54 | 250,671,304 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,743 | r | app.R | library(shiny)
library(tidyverse)
library(xlsx)
df <- read.xlsx("covid_report_county_date.xlsx", 1)
cases_as_of_date <-function(Day, County,df){
df %>%
filter(DATE <= as.Date(Day)) %>%
filter(COUNTY_NAME == County) %>%
pull(COVID_COUNT) %>% sum()
}
deaths_as_of_date <-function(Day, County,df){
df %>%
filter(DATE <= as.Date(Day)) %>%
filter(COUNTY_NAME == County) %>%
pull(COVID_DEATHS) %>% sum()
}
table_gen <- function(df, Day){
final <- tibble(county = sort(unique(df$COUNTY_NAME)))
counts <- c()
deaths <- c()
for(County in final$county){
counts <- c(counts, cases_as_of_date(Day, County,df))
deaths <- c(deaths, deaths_as_of_date(Day, County,df))
}
final$case_count <- counts
final$deaths <- deaths
return(final)
}
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Indiana Cumulative Case Counts"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
dateInput("date1", "Date:")
),
# Show a plot of the generated distribution
mainPanel(
tableOutput("stateTotal"),
tableOutput("finalTable")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
dataTable <- reactive({
table_gen(df, input$date1)
})
output$stateTotal <- renderTable({
total <- dataTable() %>% pull(case_count) %>% sum()
data.frame(state = "Indiana", total = total)
})
output$finalTable <- renderTable({
dataTable()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
4e3c56e648c1ac46741b18cbbc3c2782dde65a65 | d746fef241f9a0e06ae48cc3b1fe72693c43d808 | /tesseract/rotate/d7dw2j-011.r | 060bfe36fc82ebcba56e7ac132db17bd5634e007 | [
"MIT"
] | permissive | ucd-library/wine-price-extraction | 5abed5054a6e7704dcb401d728c1be2f53e05d78 | c346e48b5cda8377335b66e4a1f57c013aa06f1f | refs/heads/master | 2021-07-06T18:24:48.311848 | 2020-10-07T01:58:32 | 2020-10-07T01:58:32 | 144,317,559 | 5 | 0 | null | 2019-10-11T18:34:32 | 2018-08-10T18:00:02 | JavaScript | UTF-8 | R | false | false | 199 | r | d7dw2j-011.r | r=359.98
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7dw2j/media/images/d7dw2j-011/svc:tesseract/full/full/359.98/default.jpg Accept:application/hocr+xml
|
f2b234ee6500a4387859f5b4c2389804ee600507 | 6c1036850e7ddee4d18bfb9c10cee73ce9e587b9 | /man/smad_norm.Rd | bbe6618405952607d684e4ce641ce0744c345d57 | [] | no_license | mattglittenberg/proteoanalysis | 13eb2610b873ad206f7851aa7b47b8cd96189dab | 06a52e4b92bad718a899ab2c7f2b16d893a68ec4 | refs/heads/master | 2023-04-18T05:46:07.122187 | 2021-05-05T04:13:49 | 2021-05-05T04:13:49 | 364,389,568 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 391 | rd | smad_norm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smad_norm.R
\name{smad_norm}
\alias{smad_norm}
\title{Scaled Median Absolute Deviation based Normalization}
\usage{
smad_norm(log_matrix)
}
\arguments{
\item{log_matrix}{Log-Transformed Matrix}
}
\value{
Log-Transformed and Normalized Matrix
}
\description{
Scaled Median Absolute Deviation based Normalization
}
|
476ee85e325dbc2ec3577634279985091e78a17c | a4ef3cbc137ec6012b723ae1d16ff4cc294f94c8 | /tests/testthat.R | e63ae3811d21b779a94bc9af8a888b51372e93fe | [
"MIT"
] | permissive | antchau/berdcsc | 2f3ee91bf05e956bfb8d8315107cec5680e22d28 | 6dc8418785e30728141d36bdb37c8bbc3efe3f7a | refs/heads/main | 2023-01-08T22:18:34.862277 | 2020-11-13T00:42:08 | 2020-11-13T00:42:08 | 303,248,915 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 58 | r | testthat.R | library(testthat)
library(berdcsc)
test_check("berdcsc")
|
21837204ce83ce476bf8aa3de0bc3f9636584dcb | 1a6e57ddd0ab0cad37f72bd61f384f4c802c7b47 | /man/MethyLumiM2GenoSet.Rd | eeb26806df5b65b77eac8daf4a8ad2022d6092d7 | [] | no_license | CHARKKS/methyAnalysis-1 | 737aa9acad8fe30d1e4aac5b5256beafc77c47f8 | fb282c56b819f5aeb9a615939a72a10d534f648b | refs/heads/master | 2021-06-18T11:12:52.055971 | 2017-06-27T17:05:27 | 2017-06-27T17:05:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,312 | rd | MethyLumiM2GenoSet.Rd | \name{MethyLumiM2GenoSet}
\alias{MethyLumiM2GenoSet}
\title{
Coerce objects of MethyLumiM-class to MethyGenoSet
}
\description{
Coerce objects of \code{\link{MethyLumiM-class}} to \code{MethyGenoSet}
}
\usage{
MethyLumiM2GenoSet(methyLumiM, lib = "FDb.InfiniumMethylation.hg19", bigMatrix=FALSE, dir.bigMatrix='.', savePrefix.bigMatrix)
}
\arguments{
\item{methyLumiM}{a MethyLumiM object}
\item{lib}{lib is a annotation library}
\item{bigMatrix}{whether to save the data as BigMatrix (designed for very large dataset)}
\item{dir.bigMatrix}{the parent directory to save the BigMatrix data files}
\item{savePrefix.bigMatrix}{the folder name prefix of the directory to save the BigMatrix data files. The fold name will be like this: paste(savePrefix.bigMatrix, '_bigmat', sep='') }
}
\value{
a MethyGenoSet object
}
\author{
Pan Du
}
\seealso{
\code{\link{MethyGenoSet}}
}
\examples{
if (require(FDb.InfiniumMethylation.hg19)) {
data(exampleMethyGenoSet)
## set as MethyLumiM object
methyLumiM <- as(exampleMethyGenoSet, 'MethyLumiM')
## set back as MethyGenoSet object
methyGenoSet <- MethyLumiM2GenoSet(methyLumiM, lib = "FDb.InfiniumMethylation.hg19")
class(methyGenoSet)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{methods}
|
2aa37ac10450c883ca29fa63e9297035ae86e6fa | 16c45c6168b3a7013b39300085879f7fde819d50 | /man/format_driors.Rd | a9dd22a880be1bdd28b6f39cbe6657a10fda2c21 | [
"MIT"
] | permissive | jcvdav/sraplus | 73531550fc5902367ea63a14863631d622f87261 | db8953aced6522b8825047cb2b17a107da40978d | refs/heads/master | 2020-04-17T05:12:48.476671 | 2019-01-17T17:55:54 | 2019-01-17T17:55:54 | 166,268,263 | 0 | 0 | null | 2019-01-17T17:34:52 | 2019-01-17T17:34:52 | null | UTF-8 | R | false | true | 1,917 | rd | format_driors.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format-driors.R
\name{format_driors}
\alias{format_driors}
\title{Format data and priors for sraplus}
\usage{
format_driors(taxa = "gadus morhua", initial_b = 1,
initial_b_cv = 0.1, terminal_b = 0.25, terminal_b_cv = 0.1,
carry = NA, carry_cv = 0.1, u_v_umsy = NA, u_years = NA,
u_cv = 0.1, final_u = NA, final_u_cv = NA, catch = NA,
years = NA, index = NA, effort = NA, ref_type = "k",
index_years = 1, effort_years = 1, use_heuristics = FALSE,
f_cv = 0.1)
}
\arguments{
\item{taxa}{the genus and species of the species}
\item{initial_b}{b reference point in the initial year}
\item{initial_b_cv}{cv associated with initial b reference point}
\item{terminal_b}{b reference point in the terminal year}
\item{terminal_b_cv}{cv associated with terminal b reference point}
\item{carry}{prior on carrying capacity}
\item{carry_cv}{cv associated with prior on carrying capacity}
\item{u_v_umsy}{u/umsy data over time}
\item{u_years}{years in which u/umsy data are available}
\item{u_cv}{cv associated with u/umsy data}
\item{final_u}{vector of priors on u/umsy in the terminal years}
\item{final_u_cv}{vector of cvs on u/umsy in the terminal years}
\item{catch}{vector of catches over lifetime of fishery}
\item{years}{vector of years that the catch data correspond to}
\item{index}{vector of an abundance index}
\item{effort}{vector of an effort series}
\item{ref_type}{k if initial and final depletions are in depletion units, b if in b/bmsy units}
\item{index_years}{the years in which abundance index data are available}
\item{effort_years}{years in which effort data are available}
\item{use_heuristics}{logical,TRUE uses catch-msy hueristics for priors, FALSE requires user to pass them}
\item{f_cv}{no idea, at all}
}
\value{
a list of data and priors
}
\description{
Format data and priors for sraplus
}
|
0ee5a7d6826ab07400fda1034c7079c38aec1f5d | e3de9965e161c401a5946852a002e1aec0d130b4 | /man/bam2bedgraph.Rd | 050a2aa5bb5863178be1171973f2c1c6690817c1 | [] | no_license | sidiropoulos/RNAprobR | 3f653b3c1721269bfb9bb1428fdb41ef3e6242d5 | d445a82bfe84e22b17d0a8f0bb434f438942f23b | refs/heads/master | 2021-01-02T09:09:57.398859 | 2017-11-02T17:28:04 | 2017-11-02T17:28:04 | 40,180,196 | 0 | 2 | null | 2015-08-04T11:11:42 | 2015-08-04T11:11:42 | null | UTF-8 | R | false | false | 1,413 | rd | bam2bedgraph.Rd | \name{bam2bedgraph}
\alias{bam2bedgraph}
\title{Function converts bam file to bedgraph by counting number of reads starting
at each position (termination counts).
It creates two-track bedgraph file (one track for each strand).}
\usage{
bam2bedgraph(bam_path, allowed_flags = 0:4095, maxMemory = 8000,
genome_build, bedgraph_out_file = "out_file", track_name = "Track_name",
track_description = "Track_description")
}
\arguments{
\item{bam_path}{path to a bam file to be converted}
\item{allowed_flags}{integer vector with SAM flags should be kept, see
https://broadinstitute.github.io/picard/explain-flags.html for explanation}
\item{maxMemory}{maxMemory of scanBam function used internally}
\item{genome_build}{character specifying which UCSC genome build should data
be displayed in, e.g. "mm9"}
\item{bedgraph_out_file}{character specifying prefix of output file.
Generated file name is: prefix.bedgraph; if file with such a name already
exists new tracks will be appended.}
\item{track_name}{character specifying track name}
\item{track_description}{character specifying track description}
}
\value{
NULL. Creates a two-track bedgraph file (one track for each strand).
}
\description{
Function converts bam file to bedgraph by counting number of reads starting
at each position (termination counts).
It creates two-track bedgraph file (one track for each strand).
}
\author{
Lukasz Jan Kielpinski
}
|
91ae8acf3fa915264c8fbe7cd99a12823afe7ec5 | 96ee33ed52f11a4efb4d32fdb4b4c02e3cec2940 | /PredictionOutputs.R | 5b7a92620be82a9280e52cf9e9a3198778549e45 | [] | no_license | jSandersWhite/PetFinder | 5778ce47307a60f68812969c92b444a4ce031809 | decda65fbfc1a5d1e57c3ac1edd7d115c8eec448 | refs/heads/master | 2020-05-14T22:29:21.236000 | 2019-04-25T00:04:07 | 2019-04-25T00:04:07 | 181,979,816 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,877 | r | PredictionOutputs.R | ## Best models and predicted outputs for Tuned C.50, Bagging, and SVM
## Import packages
library(kernlab)
library(caret)
library(gmodels)
library(ipred)
library(e1071)
library(adabag)
## Import data
pet <- readRDS('data/pet.csv')
## Numerical data for SVM
petNum <- read.csv('data/train/train.csv')
## Clean the data
petNum$AdoptionSpeed <- factor(petNum$AdoptionSpeed, levels = c(0, 1, 2, 3, 4),
labels = c("Same Day", "7 Days", "30 Days", "90 Days", "No Adoption"))
## Remove petID, remove created vars
petID <- pet[22]
petNum <- petNum[-c(2, 19, 21, 22)]
pet <- pet[-c(2, 19, 21, 22)]
pet <- pet[-c(24, 23, 22, 21)]
## Split into train/test sets
set.seed(1812)
petNumTrain <- petNum[1:12750, ]
petNumTest <- petNum[12751:14993, ]
petTrain <- pet[1:12751, ]
petTest <- pet[12751:14993, ]
petIDTest <- petID[12751:14993, ]
## Gaussian kernel SVM
classifierSVM <- ksvm(petNumTrain$AdoptionSpeed ~ ., data = petNumTrain, kernel = "rbfdot")
petPredictSVM <- predict(classifierSVM, petNumTest)
agreementSVM <- petPredictSVM == petNumTest$AdoptionSpeed
CrossTable(petPredictSVM, petNumTest$AdoptionSpeed)
table(agreementSVM)
prop.table(table(agreementSVM))
## Save SVM predictions as RDS
svmPrediction <- data.frame(petID = petIDTest, prediction = petPredictSVM)
saveRDS(svmPrediction, file = 'svmPrediction.csv')
## Full tuned C5.0 model
ctrl <- trainControl(method = "cv", selectionFunction = "oneSE")
grid <- expand.grid(.model = "rules",
.trials = c(1, 5, 10, 15, 20, 25, 30, 35),
.winnow = "FALSE")
fullC5 <- train(AdoptionSpeed ~ ., data = petNumTrain, method = "C5.0",
metric = "Kappa",
trControl = ctrl,
tuneGrid = grid)
fullC5Predict <- predict(fullC5, petNumTest)
table(fullC5Predict, petNumTest$AdoptionSpeed)
head(predict(fullC5, petNumTest, type="prob"))
head(fullC5)
## Save tuned C5.0 model as RDS
tunedC5Prediction <- data.frame(petID = petIDTest, prediction = fullC5Predict)
saveRDS(tunedC5Prediction, file = 'fullc5Prediction.csv')
## Bootstrap aggregator model
fullbag <- bagging(AdoptionSpeed ~ ., data = petTrain, nbagg = 25)
fullbagPred <- predict(fullbag, petTest)
fullbagPred$confusion
fullbagCtrl <- trainControl(method = "cv")
train(AdoptionSpeed ~ ., data = petTrain, method = "treebag", trControl = fullbagCtrl)
fullBagTestPred <- predict(fullbag, petTest)
fullbagTestPred$confusion
head(fullBagTestPred) ## Test set predictions from boostrap aggregator model
## Save single RDS
joshRDS <- data.frame(petID = petIDTest, svmPrediction = petPredictSVM, tunedC5Prediction = fullC5Predict, bagPredict = fullBagTestPred)
saveRDS(joshRDS, "josh.RDS")
|
a3f32eb604c74ea22b3f1c966a6e71619060c4a8 | 4bd77d14bd2541dabbdc29479f7a1b182ed101eb | /man/individual_data.Rd | 9a933d394c3b4e3c48887992b508b02dde5a01fe | [] | no_license | Spatial-R/EnvExpInd | f416350836cb985cc6f34158fb833c2f1cb3be70 | cc1051108bee08a3ee99bd12d8c9ebb449938071 | refs/heads/master | 2023-01-01T05:39:27.811575 | 2020-10-19T03:55:45 | 2020-10-19T03:55:45 | 90,520,549 | 6 | 1 | null | null | null | null | UTF-8 | R | false | true | 587 | rd | individual_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wuhan_data.R
\docType{data}
\name{individual_data}
\alias{individual_data}
\title{The detailed information for each individual.}
\format{
A data frame with 21 rows and 3 variables:
\describe{
\item{id}{id number for each individual}
\item{date}{the monitoring time point}
\item{lat}{the latitude for each individual}
\item{lon}{the longtitude for each individual}
...
}
}
\usage{
individual_data
}
\description{
A dataset containing the detailed information for each individual
}
\keyword{datasets}
|
cd42ae73c95025188e3ee133352fd7f142c0dddd | 1a6d54370cf45d385fe783afcffab4961501540c | /database/judge_biography/database.R | b067a72b9a1b931cb687ec47c80279d865bedaff | [] | no_license | alexahn917/ExParte | eeb966d39cd07eb7f050c02a0af2a9e3dc731e2f | d2dc2d820aaf67de364fa26110a6c1b87a1ca214 | refs/heads/master | 2021-06-12T05:14:34.505351 | 2017-03-12T07:26:16 | 2017-03-12T07:26:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 726 | r | database.R | library('dplyr')
DB <- read.csv("jb.txt")
names(DB)
#list <- select(DB, Judge.Identification.Number, Judge.First.Name, Judge.Last.Name, Birth.year, Place.of.Birth..City., Place.of.Birth..State., Court.Name, Court.Type, President.name, Party.Affiliation.of.President, Renominating.President.name, Party.Affiliation.of.Renominating.President)
# Relevant Features
features <- as.character(read.csv("features.csv", header=F)$V1)
features[1][1] <- 'Judge.Identification.Number'
judges <- select(DB, one_of(features))
judges <- arrange(judges, Name.of.School)
head(judges)
school_names <- as.factor(judges$Name.of.School)
levels(school_names) <- 1:length(levels(school_names))
school_nums <- as.numeric(levels(school_names)) |
8449f0106ca4cbeea75b68568ab0ad5c1b36579c | 9e1f73ac2c4ae4676d945a1de8d72f0530b6f1c0 | /rand.fun1.R | 7e35e4d595bc8927a8e76af2e05b9d6a79702068 | [] | no_license | hmaeda/Hello-World | a23aa38a9bb647fdcac18ef30219658f5b239920 | f249c76bf4db254b5ad6518ceb7de3949b6c915c | refs/heads/master | 2020-06-02T19:06:57.174119 | 2013-07-10T20:42:32 | 2013-07-10T20:42:32 | 1,873,508 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 40 | r | rand.fun1.R | rand.fun1 <- function(){
rnorm(1000)
} |
ef668ea0c5cd3bcce5b15ffbcf0d00fa9a36e919 | 5813cf97fd8f6f5bee4797fe8cfb5aeeb07e2c23 | /plot3.R | 374f0d1581ed612ed30d98f31e4be9e54c89fcce | [] | no_license | Doug0212/ExData_Plotting1 | b3becd0f371b286ca8250ca25171d7fcda326fe5 | 06cefe18aefcdf9544f0b57ad0cceee63d735f0d | refs/heads/master | 2021-01-17T13:18:26.123480 | 2015-02-05T21:19:58 | 2015-02-05T21:19:58 | 30,310,853 | 0 | 0 | null | 2015-02-04T17:15:21 | 2015-02-04T17:15:21 | null | UTF-8 | R | false | false | 1,521 | r | plot3.R | # Set path
setwd("C:/DataScience/ExploratoryDataAnalysis/Project1")
# Read household_power_consumption.txt file into variable
hpc <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
# Get subset of rows where date equals 02/01/2007 and 02/02/2007 and merge files
hpc1 <- hpc[hpc$Date == "1/2/2007",]
hpc2 <- hpc[hpc$Date == "2/2/2007",]
hpc3 <- rbind(hpc1, hpc2)
# Add a new blank column - DateTime
hpc3["DateTime"] <- ""
# Paste Date and Time column together and put in new column DateTime
hpc3$DateTime <- paste(hpc3$Date, hpc3$Time, sep=" ")
# Convert DateTime column from character to date/time
hpc3$DateTime <- strptime(hpc3$DateTime, "%d/%m/%Y %H:%M:%S")
# Convert sub-metering 1, 2 and 3 columns to numeric
hpc3$Sub_metering_1 <- as.numeric(as.character(hpc3$Sub_metering_1))
hpc3$Sub_metering_2 <- as.numeric(as.character(hpc3$Sub_metering_2))
hpc3$Sub_metering_3 <- as.numeric(as.character(hpc3$Sub_metering_3))
# Open png device and set width and height
png(filename = "plot3.png",
width = 480, height = 480)
# Generate overlaying line graphs
plot(hpc3$DateTime, hpc3$Sub_metering_1, type="l",
xlab="", ylab="Energy sub metering")
lines(hpc3$DateTime, hpc3$Sub_metering_2, type="l", col="red")
lines(hpc3$DateTime, hpc3$Sub_metering_3, type="l", col="blue")
# Add legend
legend("topright", pch = "____", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Turn graphic device off
dev.off()
|
0e8bcb7c52aad9ad3d5368f26c75a93d8c72baa5 | 7c756f0eaa3779587672508437e5eef5db9b9236 | /man/is_discrete.Rd | 3951ba0728386a9ae18918ee9d939f6d0c6979df | [
"MIT"
] | permissive | vjcitn/baker | 6da8e43e717eeb68f00b80eb27cdd60dfc9336f8 | 140718dfb9e87d23543570f4a402a39ea024f095 | refs/heads/master | 2021-01-19T10:32:40.575869 | 2016-12-21T16:55:12 | 2016-12-21T16:55:12 | 82,190,742 | 0 | 0 | null | 2017-02-16T14:42:24 | 2017-02-16T14:42:24 | null | UTF-8 | R | false | true | 745 | rd | is_discrete.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{is_discrete}
\alias{is_discrete}
\title{Check if covariates are discrete}
\usage{
is_discrete(X, X_reg)
}
\arguments{
\item{X}{A data frame of covariates}
\item{X_reg}{The vector of covariates that will stratify the analyses. These
variables have to be categorical.}
}
\value{
\code{TRUE} for discrete; \code{FALSE} otherwise.
}
\description{
\code{is_discrete} checks if the specified covariates could be regarded as discrete
variables.
}
\details{
Note that this function should be used with caution. It used
\deqn{nrow(X)/nrow(unique(X[,X_reg,drop=FALSE]))>10} as an \emph{ad hoc} criterion.
It is not the same as \code{\link[plyr]{is.discrete}}
}
|
6ad8c154590fd8ba6446abe65eef2e7641363ed2 | 4e0d6c32e666ddcf17963f8615c736d5fc3eb301 | /inst/samples/test08-singleGroup.R | a397850983b2f7a20f7eb1a17d3862b4631bcf59 | [] | no_license | cran/ClassComparison | ff522e3ab4bdf6d38be6956f0f72c05ebb980f1d | 6118a8471bbaad8167ed206ce3fd770855435e5e | refs/heads/master | 2020-06-24T14:29:47.094027 | 2019-05-06T15:40:12 | 2019-05-06T15:40:12 | 96,940,058 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 205 | r | test08-singleGroup.R | # two-group-stats-test.ssc
m <- rnorm(1000, 8, 2.5)
v <- rnorm(1000, 0.7)
plot(m, v)
x <- SingleGroup(m, v, name='bogus')
summary(x)
plot(x)
plot(x, multiple=2)
# cleanup
rm(m, v, x)
|
7014fd956b9455dea2e4df4915f7f1ccc41cd5d8 | 9e962cc25ed5cbd6223db61f5b61eca7195c3512 | /cachematrix.R | 4720c7eba6ba32a427848c59cdd49ac7c8a52efe | [] | no_license | martinwolst/ProgrammingAssignment2 | 16e0c7ff5d478f2edb9b341bc7d4b09944573d14 | c592caa18170b6528ec9c8f2bf0c9f738f26b2f9 | refs/heads/master | 2021-01-15T10:58:26.943614 | 2016-05-07T16:33:10 | 2016-05-07T16:33:10 | 57,602,003 | 0 | 0 | null | 2016-05-01T14:36:53 | 2016-05-01T14:36:53 | null | UTF-8 | R | false | false | 2,105 | r | cachematrix.R | ## The functions below allow the creation of a special matrix object, which can
## cache the inverse of the matrix held within the object once it has been calculated.
## Once the inverse has been cached, the inversion calcualtion is not repeated,
## rather the cached inverted matrix is simply retrieved and returned.
## makeCacheMatric takes a matrix as the only argument and makes a special matrix object,
## exposing a set of 4 functions to allow setting/getting of the matrix and a
## cached variable (which dosen't strictly have to be a matrix but it is in this case)
makeCacheMatrix <- function(mx = matrix()) {
## initialize inv within this function to NULL,
## ensures null returned if no cached item exists
inv <- NULL
## function that updates the input matrix and clears the cached item
set <- function(y){
mx <<- y
inv <<- NULL # sets cached inverse to null
}
## function that returns the matrix
get <- function() mx
## function that stores the cached item
setinv <- function(solved) inv <<- solved
## function that returns the cached item
getinv <- function() inv
## returns the functions above
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve takes the special matrix object created by makeCacheMatrix
## and calculates the inverse of the matrix and caches it back in the
## special matrix object, unless the inverse has already been cached,
## in which case it simply retrieves and returns the inverse
cacheSolve <- function(x, ...) {
## get the chached inverse (if it exists)
mx <- x$getinv()
## if the cached inverse exists, return it and end execution
if(!is.null(mx)){
message("using cached inverse matrix")
return(mx)
}
## get the original matrix out of the special matrix object
mat <- x$get()
## calculate the inverse of the matrix and store it in the
## special matrix object
mx <- solve(mat)
x$setinv(mx)
## Return the inverted matrix
mx
}
|
e7ac3c69fe0fd60146546040060601ced63f77a2 | 4855e806d6a5b65643c49ed3b602db276fe76d30 | /3. Static Map/Choosing colours.R | de449cf802cc2ded16b6966508a324e8d30a0a03 | [] | no_license | Cococatty/InteractiveMap | 5701a607a7605a4958c037b6b5559841c67126eb | 698b173ab0393cc38fdfd69f09b169dd87fd9f3d | refs/heads/master | 2021-01-10T18:14:56.274796 | 2016-02-17T09:02:23 | 2016-02-17T09:02:45 | 47,664,845 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 795 | r | Choosing colours.R | geoCol <- c()
#Assigning the colours to TAs by the number of people
for (i in 1:length(shape@data$Percentage[shape@data$Mean == travelMean]))
{
x <- shape@data$Percentage[shape@data$Mean == travelMean][i]
if (!is.na(x))
{
if (x < nclass$brks[2]) {
currCol <- geoPal[1]
x1 <- x1+1
} else if (nclass$brks[2] <= x && x < nclass$brks[3]) {
currCol <- geoPal[2]
x2 <- x2+1
} else if (nclass$brks[3] <= x && x < nclass$brks[4]) {
currCol <- geoPal[3]
x3 <- x3+1
} else if (nclass$brks[4] <= x && x < nclass$brks[5]) {
currCol <- geoPal[4]
x4 <- x4+1
} else if (x >= nclass$brks[5]) {
currCol <- geoPal[5]
x5 <- x5+1
}
}
else {
currCol <- geoPal[6]
x6 <- x6+1
}
geoCol <- c(geoCol, currCol)
}
|
a85dd52a5251a9390e823a0661be5caa11767497 | 74fec5abcb297a6415a5233bce1b6b647c7cc505 | /5/2_EdgeR_Tab4.R | f83abc2ae9e5b0abb60ee9af718fb20e359d2002 | [] | no_license | GiovanniCarapezza/KING-REX | f6e311d13bdb604e86614aec7860fb0391644d58 | f06a8e0eec4da0a784b0e7846882830d24a19993 | refs/heads/master | 2020-04-21T12:46:29.726979 | 2019-02-07T15:02:41 | 2019-02-07T15:02:41 | 169,574,440 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,273 | r | 2_EdgeR_Tab4.R | #to excecute this script, you must run first script in folder 3
library(edgeR)
library(stringi)
library(pheatmap)
rm(list = ls())
#substitute your path to results
path_to_results="~/Scrivania/KING-REX data and scripts for BMC2"
#set working directory to this source file location
setwd(paste(path_to_results,"/Results/5",sep=""))
path_input=paste(path_to_results,"/Results/3/edgeR_input",sep="")
path_output=paste("Imbalance analysis",sep="")
if(!dir.exists(path_output)){dir.create(path_output,recursive = T)}
#sample to test ordered from 100% karpas299 to 0% Karpas299
#they also correspond to 0% u118mg to 100% u118mg as shown in Tab.4
samples_to_test=c("KARPAS299", "KU8712", "KU7525", "KU50",
"UK7525", "UK8712", "U118MG")
#output file
filter_pval=1E-72
filter_FC=3
out_fusion=paste("fusion","_pv_",filter_pval,"_fc_",filter_FC,".txt",sep="")
write(x = c("Sample","Kinase","FC","p-value","IN_A","IN_B","OUT_A","OUT_B"),file = paste(path_output,out_fusion,sep="/"),ncolumns = 8,append = F,sep = "\t")
#detect imbalanced kinases
for(sample in samples_to_test){
#create sample table for edgeR
sampleTable=read.table(paste(sep="",path_input,"/condition"),header=T,stringsAsFactors = F)
samples=unique(sampleTable[,1])
dir_sample=paste(path_output,"/",sample,sep="")
if(!dir.exists(dir_sample)){dir.create(dir_sample,recursive = T)}
sel=sampleTable[,1]==sample
sampleTable2=sampleTable[sel,c(2,3,4)]
sampleTable2[,3]=as.factor(sampleTable2[,3])
rownames(sampleTable2)=c(1:dim(sampleTable2)[1])
colnames(sampleTable2)=c("sampleName","fileName","condition")
targets=sampleTable2
targets$fileName <- paste(path_input, targets$fileName, sep='/')
colnames(targets)=c("labels","files","description")
targets$group=as.character(targets$description)
targets$group[targets$group=="IN"]=1
targets$group[targets$group=="OUT"]=2
targets=targets[,c(2,4,3,1)]
###DE analysis with EdgeR
d <- readDGE(targets, skip=0, comment.char = "!")
d <- estimateCommonDisp(d, verbose = F)
d <- estimateTagwiseDisp(d)
et <- exactTest(d)
# select top
top <- topTags(et,p.value = filter_pval,adjust.method ="BH",n=Inf)
#extract count
counts_norm_log2=as.data.frame(log2(d$counts+1))
colnames(counts_norm_log2)=d$samples$labels
if(nrow(top)>0){
#filter FC
top$table$logFC=-top$table$logFC
sel1=top$table$logFC>filter_FC
top$table=top$table[sel1,]
##################
####Heatmap#######
df <- as.data.frame(d$samples$description,row.names =d$samples$labels )
colnames(df)[1]="condition"
list_col=list(condition=c(IN="black",OUT="blue"))
hmcol=colorRampPalette(c(
rep("green",4),
rep("darkgreen",2),
rep("yellow",2),
rep("orange",2),
rep("orangered2",2),
rep("red",8)
),space="rgb")(20)
brs=0:20
kins=rownames(top$table)
kins2=intersect(kins,rownames(counts_norm_log2))
data_log1=counts_norm_log2[kins2,]
byFC=rownames(top$table)[order((top$table$logFC),decreasing = T)]
data_log1=data_log1[byFC,]
data_log1=data_log1[,c(which(stri_detect_fixed(colnames(data_log1),"IN")),which(stri_detect_fixed(colnames(data_log1),"OUT")))]
wd = 15*length(colnames(data_log1))+300
ht = 12*length(kins)+250
if(length(kins)>30){cw=15;ch=8;fn=8}#;wd=600;ht=1200}
if(length(kins)<=30){cw=30;ch=25;fn=10}#;wd=600;ht=1024}
if(length(kins)<=10){cw=30;ch=30;fn=10}#;wd=500;ht=500}
if(length(kins)<=5){cw=30;ch=30;fn=10}#;wd=360;ht=360}
png(paste(dir_sample,"/Heatmap_counts2.png",sep=""),width=wd,height = ht)
c=pheatmap(data_log1,annotation_col=df,color =hmcol ,scale="none",breaks = brs,
fontsize_number=fn,
border_color="white",
number_color="#2F2F2F",
cellwidth =cw,
cellheight =ch,
annotation_colors=list_col,display_numbers=round(data_log1,1),
cluster_rows=F,cluster_cols = T)
dev.off()
#write results on file
for(row_top in 1:nrow(top$table)){
write(x = c(sample,
rownames(top$table)[row_top],
round(top$table$logFC[row_top],2),
top$table$PValue[row_top],
as.numeric(data_log1[rownames(top$table)[row_top],])),
file = paste(path_output,out_fusion,sep="/"),ncolumns = 8,append = T,sep = "\t")
x=as.data.frame(cbind(sample,
rownames(top$table)[row_top],
round(top$table$logFC[row_top],2),
top$table$PValue[row_top],
round(as.numeric(data_log1[rownames(top$table)[row_top],1]),1),
round(as.numeric(data_log1[rownames(top$table)[row_top],2]),1),
round(as.numeric(data_log1[rownames(top$table)[row_top],3]),1),
round(as.numeric(data_log1[rownames(top$table)[row_top],4]),1)))
colnames(x)=c("Sample","Kinase","FC","p-value","IN_A","IN_B","OUT_A","OUT_B")
print(x)
print("")
}
}else{
write(x = c(sample,"","","","","","",""),
file = paste(path_output,out_fusion,sep="/"),ncolumns = 8,append = T,sep = "\t")
}
}
|
59774e9ff3cb1e71a8dfecac495ece2f3f3bb39e | f5009969ad3f97807fd4b7ae7e2f5229b2160fa0 | /plot5.R | 37cccf777581f747af31dc272af3241ee13fb8aa | [] | no_license | doczetch/Coursera-DS-Exploratory-Data-Analysis-Course-Project-2 | 4eb8b0c6448c4f3ec72fb7fef06df0b69bbf4c81 | 0a896d14bc721381f6f70741691e124c29f8b851 | refs/heads/master | 2021-04-15T14:47:53.480613 | 2018-03-31T10:11:25 | 2018-03-31T10:11:25 | 126,468,117 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 992 | r | plot5.R | # plot5
# A. Check existence of files
if(!exists("NEI")){
NEI <- readRDS("./data/summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("./data/Source_Classification_Code.rds")
}
# B. Question
## How have emissions from motor vehicle sources changed from 1999-2008 in <b>Baltimore City</b>?
# C. plot5
# png("plot5.png", width =number.add.width, height=number.add.height)
# Group total NEI emissions per year:
baltcitymary.emissions<-NEI[(NEI$fips=="24510") & (NEI$type=="ON-ROAD"),]
require(dplyr)
baltcitymary.emissions.byyear <- summarise(group_by(baltcitymary.emissions, year), Emissions=sum(Emissions))
require(ggplot2)
ggplot(baltcitymary.emissions.byyear, aes(x=factor(year), y=Emissions,fill=year, label = round(Emissions,2))) +
geom_bar(stat="identity") +
xlab("Year") +
ylab(expression("Total PM"[2.5]*" Emissions in Tons")) +
ggtitle("Emissions from Motor Vehicle Sources in Baltimore City")+
geom_label(aes(fill = year),colour = "white", fontface = "italic") |
8d3bb6952ba7a76a83b466857ff46384c1b3e6ed | 280fae7f01002ddc95c0e7ec617740a58752403d | /tests/testthat.R | cfea2d5b4bccbc790f2700cd007516555b0f0971 | [
"BSD-2-Clause"
] | permissive | ktoddbrown/soilDataR | 87bb4ed675959f3fbd75024dd7b014e1966148dd | 44ab9e6ac00e49ea0106508de8ead356d9e39fa5 | refs/heads/master | 2021-04-30T07:27:34.349030 | 2018-11-09T20:07:20 | 2018-11-09T20:07:20 | 92,432,342 | 3 | 11 | null | null | null | null | UTF-8 | R | false | false | 62 | r | testthat.R | library(testthat)
library(SoilDataR)
test_check("SoilDataR")
|
6a9f554173ef6682fb396182f33a28902025ad3b | eabaf84698224f90218a5c5a1aaedec4e868d217 | /Assignment 1 - Karger's Algorithm/Karger's Analysis.R | 59630981cfe0a7adefe0d079680b0c42ee0bcc31 | [] | no_license | AlbertoParravicini/data_structures_and_algorithms | 4aa5196c5a910a7d67aaa9f6e2a74c6b3f569290 | 46ea766a67b3d4c6d165c8bb89163bc208890063 | refs/heads/master | 2021-01-18T20:12:33.317875 | 2016-12-15T10:34:14 | 2016-12-15T10:34:14 | 69,767,261 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,107 | r | Karger's Analysis.R | library(igraph)
library(dplyr)
library(ggplot2)
library(microbenchmark)
setwd("C:\\Users\\albyr\\Documents\\data_structures_and_algorithms\\Assignment 1 - Karger's Algorithm")
source("Karger's Algorithm.R")
# -------------------
# -------------------
# Profling Utilities
# -------------------
# -------------------
# Write a csv with the given name and current time.
write_with_name_and_date <- function(results, name, append = T) {
name <- sprintf("Results/%s - %s.csv", name, Sys.time())
name <- gsub(":", "", name)
write.table(results, file = name, append = append, quote = F, col.names = T, sep = " ", row.names = F)
return(name)
}
# increasing nodes and edges, 4*k edges
# 50 nodes, increasing edges
# 10 different graphs, 100 tests each
# ---- TEST 1 ----
# Increasing nodes and edges; num_nodes = 20:100, num_edges = 4*num_nodes
min_num_nodes = 1000
max_num_nodes = 1000
step = 10
edge_amplification = 4
results = data.frame(num_vertices = numeric(0), num_edges = numeric(0), iteration = numeric(0), correct = logical(0), time = numeric(0))
file_name <- write_with_name_and_date(results, "karkger_incr_vertices_edges", append = F)
for (num_vert_i in seq(min_num_nodes, max_num_nodes, by = step)) {
results = data.frame(num_vertices = numeric(0), num_edges = numeric(0), iteration = numeric(0), correct = logical(0), time = numeric(0))
g <- make_random_connected_graph(num_vert_i, edge_amplification * num_vert_i, details = F)
real_min_cut <- min_cut(g)
num_iterations <- 50
correct_results <- 0
for(iteration_i in 1:num_iterations) {
temp_res <- -1
exec_time <- microbenchmark(
temp_res <- karger_random_min_cut(g, F, F),
times = 1,
unit = "ms"
)
if (temp_res == real_min_cut) {
correct_results <- correct_results + 1
}
# Store the result in the data frame
temp_row <- c(num_vert_i, edge_amplification * num_vert_i, iteration_i, real_min_cut == temp_res, exec_time$time)
print(temp_row)
results <- rbind(results, temp_row)
}
write.table(file = file_name, x = results, append = T, row.names = F, col.names = F, sep = " ")
}
results <- read.csv("Results/karger_incr_vertices_edges_tot.csv", header = T, sep = " ")
grouped_data <- results %>% group_by(num_vertices, num_edges) %>% summarise_each(funs(mean(.)), -iteration)
p <- ggplot(grouped_data, aes(x=num_vertices, y = correct))
p <- p + geom_line(size = 1, color = "#4f72fc") + geom_point(size = 2.5, color ="#021f91")
p <- p + theme_minimal() + xlab("Number of vertices") + ylab("Percentage of correct results")
p <- p + theme(axis.text=element_text(size=12), axis.title=element_text(size=14))
p <- p + scale_x_discrete(limits = grouped_data$num_vertices, expand = 0.05)
p <- p + ggtitle(bquote(atop(.("Karger's algorithm with increasing number of vertices and edges"), atop(italic(.("Number of edges: 4 * num_vertices")), ""))))
theoretical_results <- data.frame(num_vertices = grouped_data$num_vertices, baseline = 2 / (grouped_data$num_vertices * (grouped_data$num_vertices - 1)))
p <- p + geom_line(data = theoretical_results, size = 1, color = "#ff4d4d", aes(x = num_vertices, y = baseline)) + geom_point(data = theoretical_results, size = 2.5, color ="#800000", aes(x = num_vertices, y = baseline))
p
# ---- TEST 2 ----
# Increasing edges with fixed number of nodes; num_nodes = 50, num_edges = 50:500
min_num_edges = 50
max_num_edges = 500
step = 50
num_vertices = 50
results = data.frame(num_vertices = numeric(0), num_edges = numeric(0), iteration = numeric(0), correct = logical(0), time = numeric(0))
file_name <- write_with_name_and_date(results, "karkger_incr_edges", append = F)
for (num_edge_i in seq(min_num_edges, max_num_edges, by = step)) {
results = data.frame(num_vertices = numeric(0), num_edges = numeric(0), iteration = numeric(0), correct = logical(0), time = numeric(0))
g <- make_random_connected_graph(num_vertices, num_edge_i, details = F)
real_min_cut <- min_cut(g)
num_iterations <- 50
correct_results <- 0
for(iteration_i in 1:num_iterations) {
temp_res <- -1
exec_time <- microbenchmark(
temp_res <- karger_random_min_cut(g, F, F),
times = 1,
unit = "ms"
)
if (temp_res == real_min_cut) {
correct_results <- correct_results + 1
}
# Store the result in the data frame
temp_row <- c(num_vertices, num_edge_i, iteration_i, real_min_cut == temp_res, (exec_time$time))
print(temp_row)
results <- rbind(results, temp_row)
}
write.table(file = file_name, x = results, append = T, row.names = F, col.names = F, sep = " ")
}
results <- read.csv("Results/karger_incr_edges_total.csv", header = T, sep = " ")
grouped_data <- results %>% group_by(num_vertices, num_edges) %>% summarise_each(funs(mean(.)), -iteration)
p <- ggplot(grouped_data, aes(x=num_edges, y = correct))
p <- p + geom_line(size = 1, color = "#4f72fc") + geom_point(size = 2.5, color ="#021f91")
p <- p + theme_minimal() + xlab("Number of edges") + ylab("Percentage of correct results")
p <- p + theme(axis.text=element_text(size=12), axis.title=element_text(size=14))
p <- p + scale_x_discrete(limits = grouped_data$num_edges, expand = 0.05)
p <- p + ggtitle(bquote(atop(.("Karger's algorithm with increasing number of edges"), atop(italic(.("Number of vertices: 50")), ""))))
theoretical_results <- data.frame(num_vertices = grouped_data$num_edges, baseline = 2 / (grouped_data$num_vertices * (grouped_data$num_vertices - 1)))
p <- p + geom_line(data = theoretical_results, size = 1, color = "#ff4d4d", aes(x = num_vertices, y = baseline)) + geom_point(data = theoretical_results, size = 2.5, color ="#800000", aes(x = num_vertices, y = baseline))
p
# ------ PLOT THE EXECUTION TIME ----
# TEST 1
results <- read.csv("Results/karger_incr_vertices_edges_tot.csv", header = T, sep = " ")
grouped_data <- filter(results, time != 0, time < 10^11) %>% group_by(num_vertices, num_edges) %>% summarise_each(funs(mean(.)), -iteration)
p <- ggplot(grouped_data, aes(x=num_vertices, y = time / 10^9))
p <- p + geom_line(size = 1, color = "#4f72fc") + geom_point(size = 2.5, color ="#021f91")
p <- p + theme_minimal() + xlab("Number of vertices") + ylab("Execution time [sec]")
p <- p + theme(axis.text=element_text(size=12), axis.title=element_text(size=14))
p <- p + scale_x_discrete(limits = grouped_data$num_vertices, expand = 0.05)
p <- p + ggtitle(bquote(atop(.("Karger's algorithm with increasing number of edges"), atop(italic(.("Execution time. Number of edges: 4 * num_vertices")), ""))))
theoretical_results <- data.frame(num_vertices = grouped_data$num_vertices, baseline = 2 / (grouped_data$num_vertices * (grouped_data$num_vertices - 1)))
p <- p + geom_line(data = theoretical_results, size = 1, color = "#ff4d4d", aes(x = num_vertices, y = baseline)) + geom_point(data = theoretical_results, size = 2.5, color ="#800000", aes(x = num_vertices, y = baseline))
p
# TEST 2
results <- read.csv("Results/karger_incr_edges_total.csv", header = T, sep = " ")
grouped_data <- filter(results, time != 0, time < 10^11) %>% group_by(num_vertices, num_edges) %>% summarise_each(funs(mean(.)), -iteration)
p <- ggplot(grouped_data, aes(x=num_edges, y = time / 10^9))
p <- p + geom_line(size = 1, color = "#4f72fc") + geom_point(size = 2.5, color ="#021f91")
p <- p + theme_minimal() + xlab("Number of edges") + ylab("Execution time [sec]")
p <- p + theme(axis.text=element_text(size=12), axis.title=element_text(size=14))
p <- p + scale_x_discrete(limits = grouped_data$num_edges, expand = 0.05)
p <- p + ggtitle(bquote(atop(.("Karger's algorithm with increasing number of edges"), atop(italic(.("Execution time with 50 vertices")), ""))))
theoretical_results <- data.frame(num_vertices = grouped_data$num_edges, baseline = 2 / (grouped_data$num_vertices * (grouped_data$num_vertices - 1)))
p <- p + geom_line(data = theoretical_results, size = 1, color = "#ff4d4d", aes(x = num_vertices, y = baseline)) + geom_point(data = theoretical_results, size = 2.5, color ="#800000", aes(x = num_vertices, y = baseline))
p
|
388ace4e3471de0f5c57dbc06c690c7221393223 | 84d4b0f90866b8ef5ab3bd325a295d46b195d20f | /tests/testthat/test-spec_res.R | 20fc9b3c4c6d805e6f03118bdae8bb1860c835c1 | [
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | rainyl/OpenSpecy | 310d8a42bdd6abd39f5c8b1bcd0046bf3338a158 | 92c72594abaaf91925d7c0550e791de5a149192d | refs/heads/main | 2023-05-11T06:28:30.482481 | 2021-06-01T18:04:47 | 2021-06-01T18:04:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | test-spec_res.R | data("raman_hdpe")
test_that("spec_res() gives correct output", {
expect_equal(round(spec_res(raman_hdpe$wavenumber), 3), 3.005)
})
|
5de30797937875dc9d9e448e8d8fa18391065ead | a363b992b0e3f9ae200eb2eee8a9fd698ae29819 | /times series/codeChapitre10.r | 43ed77ae5f128804c11e23c9017568c060d47569 | [] | no_license | lucayapi/Ressources_ML_Statistics | f938ebbb1a6f41e85622eb85b1546ecde3e69dae | 0302e436a796528b0ae542079fdd2782402336e2 | refs/heads/master | 2023-09-02T04:15:01.298394 | 2021-11-18T13:25:57 | 2021-11-18T13:25:57 | 429,435,808 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,087 | r | codeChapitre10.r | ########################################################
####### Codes du chapitre 5
####### Y. Aragon
########################################################
# Mise à jour effectuée le 17/08/2016
# packages utilisés dans le chapitre
require(caschrono)
require(xtable)
require(dse)
require(polynom)
# 10.1 Identification de la série des résidus obtenus par MCO
data(khct)
plot.ts(khct,xlab="temps",main="",cex.lab=.9,cex.axis=.8,
oma.multi=c(4.5,3,.2,0),mar.multi=c(0, 4, 0, .5),las=0)
khct.df<-as.data.frame(window(cbind(khct,time(khct),
(time(khct)-1977)^2), end=c(1983,12)))
colnames(khct.df) <- c("kwh","htdd","cldd","t1","t1.2")
mod2 = lm(sqrt(kwh) ~ htdd + cldd + t1 + t1.2, data=khct.df)
u = ts(residuals(mod2), start=c(1970,1), frequency=12)
acf2y(u,numer=FALSE)
require("forecast")
(modar1=Arima(u,order=c(3,0,1),seasonal=list(order=c(1,0,1)),
include.mean=FALSE))
llag = seq(6,30,6)
t(Box.test.2(residuals(modar1),llag,type ="Ljung-Box",decim=2,fitdf=6))
t_stat(modar1)
(modar2=Arima(u,order=c(2,0,1),seasonal=list(order= c(1,0,1)),
include.mean= FALSE))
t_stat(modar2)
(modar3=Arima(u,order=c(1,0,1),seasonal=list(order=c(1,0,1)),
include.mean=FALSE))
(mod.auto=auto.arima(u,max.p=4,max.q=4,max.P=1,approximation=FALSE))
t(Box.test.2(residuals(mod.auto),llag,type="Ljung-Box",decim=2,fitdf=4))
# 10.2 Estimation du modèle ARMAX
kwh1rc = window(sqrt(khct[,"kwh"]), end=c(1983,12))
xreg1 = khct.df[ ,c("htdd","cldd","t1","t1.2")]
mdarx1=Arima(kwh1rc,order=c(1,0,1),seasonal=list(order=c(1,0,1)),
xreg=xreg1)
xreg2 = xreg1[,-4]
(mdarx2=Arima(kwh1rc,order=c(1,0,1),seasonal=list(order=c(1,0,1)),
xreg=xreg2))
t(Box.test.2(residuals(mdarx2),seq(6,30,6),type="Ljung-Box",
decim=2,fitdf=8))
t_stat(mdarx2)
(mdarx3c=Arima(kwh1rc,order=c(1,0,0),seasonal=list(order=c(1,0,1)),
xreg=xreg2))
t(Box.test.2(residuals(mdarx3c),seq(6,30,6),type="Ljung-Box",
decim=2,fitdf=7))
u.3c=kwh1rc-as.matrix(xreg2)%*%as.matrix(mdarx3c$coef[5:7])-mdarx3c$coef[4]
op=par(oma=rep(0,4))
plot(u.3c,type="l")
abline(h=0)
par(op)
tt = (khct.df$t1 - 1973)^2
mmo = lm(u.3c ~tt)
summary(mmo)
(mod2s = lm(sqrt(kwh) ~ htdd + cldd + t1, data=khct.df))
# 10.3 Estimation d'un modèle à erreur non stationnaire
(mdarx1 = Arima(kwh1rc,order=c(1,0,1),seasonal=list(order=c(1,0,1)),
xreg = xreg1))
(modarimax1=Arima(kwh1rc,order=c(1,0,1),seasonal=list(order=c(0,1,1)),
xreg=xreg1))
(modarimax1b=Arima(kwh1rc,order=c(1,0,1),seasonal=list(order=c(0,1,1)),
xreg = xreg1, include.drift=TRUE) )
xreg1b = xreg1[,-3]
(mx2b=Arima(kwh1rc,order=c(1,0,1),seasonal=list(order=c(0,1,1)),
xreg = xreg1b, include.drift= TRUE))
t(Box.test.2(residuals(mx2b),seq(6,30,6),type ="Ljung-Box",decim=2,fitdf=7))
t_stat(mx2b)
(modarimax2c=Arima(kwh1rc,order=c(1,0,0),seasonal=list(order=c(0,1,1)),
xreg=xreg1b,include.drift=TRUE) )
t(Box.test.2(residuals(modarimax2c),seq(6,30,6),type ="Ljung-Box",decim=2,fitdf=6))
t_stat(modarimax2c)
# 10.4 Prévision de l'année 1984
# Prévision par le modèle MCO.
khct.df.84<-as.data.frame(window(cbind(khct,time(khct),
(time(khct)-1977)^2), start = c(1984,1)))
colnames(khct.df.84) <- c("kwh","htdd","cldd","t1","t1.2")
p2=predict(mod2,khct.df.84,interval="prediction",level=0.80,se.fit=TRUE)
# Prévision par le modèle ARMAX.
xreg.p=khct.df.84[,2:4]
prev.3c=forecast(mdarx3c,h=12,level=c(80,95),fan=FALSE,xreg=xreg.p)
str(prev.3c)
etyp.pred = (prev.3c$upper[,1]-prev.3c$mean)/qnorm(0.9)
etyp.pred2 = (prev.3c$upper[,2]-prev.3c$mean)/qnorm(0.975)
# Prévision par le modèle ARIMAX.
xreg.2c=khct.df.84[,c(2,3,5)]
prev.2c=forecast(modarimax2c, h=12,level=c(80,95),fan=FALSE,xreg=xreg.2c)
kwh2rc <- window(sqrt(khct[,"kwh"]), start = c(1984,1))
op=par(oma=rep(0,4))
aa= seq(as.Date("2000/1/1"), by="month", length.out=12)
id.mois= months(aa, abbreviate = TRUE)
plot(ts(cbind( kwh2rc, prev.3c$lower[,1],prev.3c$upper[,1],p2$fit[,2:3]),frequency=12,start=c(1984,1)),
plot.type = "single", lty=c(1,2,2,3,3), ylab=expression(sqrt(kwh)),cex.main=.8, xlab="1984",
xaxt = "n")
axis(1, at=seq(1984, 1984.917, length.out=12),labels=id.mois)
title(main="Année 1984 - Bandes de prédiction à 80% : modélisations ARMAX et MCO", cex.main=0.9)
legend( par("usr")[1], par("usr")[4],c("Valeur observée","Prédiction ARMAX","Prédiction MCO"),
lwd=1, lty=c(1,2,3))
par(op)
# ARMAX
un80=sum( (kwh2rc < prev.3c$lower[,1])|(kwh2rc > prev.3c$upper[,1]))
un95=sum( (kwh2rc < prev.3c$lower[,2] ) | (kwh2rc > prev.3c$upper[,2]))
cat("taux de non appartenance 95 (ARMAX)= ",sum(un95)/12,"\n")
cat("taux de non appartenance 80 (ARMAX)= ",sum(un80)/12,"\n")
pp= c(sum(un80),sum(un95))/12
# ARIMAX
un80i=sum((kwh2rc < prev.2c$lower[,1])|(kwh2rc > prev.2c$upper[,1]))
un95i=sum((kwh2rc < prev.2c$lower[,2])|(kwh2rc > prev.2c$upper[,2]))
ppi= c(sum(un80i),sum(un95i))/12
cat("taux de non appartenance 80 (ARIMAX)= ",sum(un80i)/12,"\n")
cat("taux de non appartenance 95 (ARIMAX)= ",sum(un95i)/12,"\n")
# Comparaison des prédictions
# MCO
p0=predict(mod2, khct.df.84, se.fit=TRUE)
# ARMAX
prev.3c=forecast(mdarx3c,h=12,level=c(80,95),fan=FALSE,xreg=xreg.p)
# ARIMAX
prev.2c=forecast(modarimax2c,h=12,level=c(80,95),fan=FALSE,xreg=xreg.2c)
# EQM
b.arimax = cumsum((kwh2rc - prev.2c$mean)^2)/ 1:12
b.armax = cumsum((kwh2rc - prev.3c$mean)^2)/ 1:12
b.mco = cumsum((kwh2rc - p0$fit)^2)/ 1:12
aaa= t(cbind(b.mco,b.armax, b.arimax))
rownames(aaa) = c("MCO","ARMAX","ARIMAX")
colnames(aaa) = id.mois
# Tableau 10.1
xtable(aaa[,c(2,4,6,8,10,12)],caption="Erreurs quadratiques de prévision pour 1984.", label="eqm84")
# 10.5 Prédiction sur la série non transformée
ret.u= rev(u.3c)[1:13]
# 12 valeurs retardées de z_t
ret.z = rev(residuals(mdarx3c))[1:12]
# modèle du bruit u_t
coef0=mdarx3c$coef
ret.u= rev(u.3c)[1:13] # 13 valeurs retournées
ret.z = rev(residuals(mdarx3c))[1:12] # 12 valeurs retournées de z_t
A.u=polynomial(c(1,-coef0[1]))*polynomial(c(1,rep(0,11),-coef0[2]))
A.arr=array(A.u ,c(length(A.u),1,1))
B.arr=array(c(1,coef0[3]),c(2,1,1))
mod.u=ARMA(A=A.arr, B=B.arr)
pred.moy=mdarx3c$coef[4]+
as.matrix(xreg.p)%*%as.matrix(mdarx3c$coef[5:7])
nsim=10000
pred.y=matrix(NA,ncol=nsim,nrow=12)
set.seed=539
wsim=matrix(rnorm(12*nsim, sd=mdarx3c$sigma2^.5),ncol=nsim,nrow=12)
for ( i in 1:nsim)
{pred.y[,i]=pred.moy + simulate(mod.u, y0=ret.u,
noise=list(w=as.matrix(wsim[,i]),w0=ret.z),sampleT=12)$output}
# retour aux données initiales
pred.kwh=pred.y^2
# quantiles
quant=apply(pred.kwh,1,quantile,probs=c(.05,.10,.90,.95,.5))
op=par(oma=rep(0,4))
plot(ts(cbind(as.numeric(khct.df.84[,"kwh"]), t(quant)),frequency=12,start=c(1984,1)),
plot.type="single",lty=c(1,2,3,3,2,1), ylab= "kwh",cex.main=.8, xlab="1984",
xaxt = "n",col=c(rep("black",5),"grey"))
axis(1, at=seq(1984, 1984.917, length.out=12),labels=id.mois)
title(main="Année 1984 - Intervalle de prédiction interquantiles à 90% et 80% et série", cex.main=0.9)
legend(par("usr")[1], par("usr")[4],c("Série","Intervalle à 90%","Intervalle à 80%","Médiane"),
lwd=1, lty=c(1,2,3,1),col=c(rep("black",5),"grey"))
par(op)
|
e6fd48c7ee83323cae1336a2fae113286986990e | fc0c33827b13210227f590b11cce3c8b20104e59 | /R/mlClassificationBoosting.R | 3c938c960d3461943c071eee53d787925d84dad0 | [] | no_license | sophieberkhout/jaspMachineLearning | 47a6f581acbeaf9c99d3af558ca3fca6a84b7eb8 | f78574c69e7e3b00362ced4fa229ccb73f14d3d0 | refs/heads/master | 2023-09-06T04:33:53.194899 | 2021-11-24T18:58:22 | 2021-11-24T18:58:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,363 | r | mlClassificationBoosting.R | #
# Copyright (C) 2013-2021 University of Amsterdam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
mlClassificationBoosting <- function(jaspResults, dataset, options, ...) {
# Preparatory work
dataset <- .mlClassificationReadData(dataset, options)
.mlClassificationErrorHandling(dataset, options, type = "boosting")
# Check if analysis is ready to run
ready <- .mlClassificationReady(options, type = "boosting")
# Compute results and create the model summary table
.mlClassificationTableSummary(dataset, options, jaspResults, ready, position = 1, type = "boosting")
# If the user wants to add the classes to the data set
.mlClassificationAddPredictionsToData(dataset, options, jaspResults, ready)
# Add test set indicator to data
.mlAddTestIndicatorToData(options, jaspResults, ready, purpose = "classification")
# Create the data split plot
.mlPlotDataSplit(dataset, options, jaspResults, ready, position = 2, purpose = "classification", type = "boosting")
# Create the confusion table
.mlClassificationTableConfusion(dataset, options, jaspResults, ready, position = 3)
# Create the class proportions table
.mlClassificationTableProportions(dataset, options, jaspResults, ready, position = 4)
# Create the validation measures table
.mlClassificationTableMetrics(dataset, options, jaspResults, ready, position = 5)
# Create the relative influence table
.mlBoostingTableRelInf(options, jaspResults, ready, position = 6, purpose = "classification")
# Create the OOB improvement plot
.mlBoostingPlotOobImprovement(options, jaspResults, ready, position = 7, purpose = "classification")
# Create the ROC curve
.mlClassificationPlotRoc(dataset, options, jaspResults, ready, position = 8, type = "boosting")
# Create the Andrews curves
.mlClassificationPlotAndrews(dataset, options, jaspResults, ready, position = 9)
# Create the deviance plot
.mlBoostingPlotDeviance(options, jaspResults, ready, position = 10, purpose = "classification")
# Create the relative influence plot
.mlBoostingPlotRelInf(options, jaspResults, ready, position = 11, purpose = "classification")
# Decision boundaries
.mlClassificationPlotBoundaries(dataset, options, jaspResults, ready, position = 12, type = "boosting")
}
.boostingClassification <- function(dataset, options, jaspResults) {
jaspBase:::assignFunctionInPackage(fakeGbmCrossValModelBuild, "gbmCrossValModelBuild", "gbm")
jaspBase:::assignFunctionInPackage(fakeGbmCrossValErr, "gbmCrossValErr", "gbm")
# Import model formula from jaspResults
formula <- jaspResults[["formula"]]$object
# Set model-specific parameters
trees <- switch(options[["modelOpt"]],
"optimizationManual" = options[["noOfTrees"]],
"optimizationOOB" = options[["maxTrees"]]
)
# Split the data into training and test sets
if (options[["holdoutData"]] == "testSetIndicator" && options[["testSetIndicatorVariable"]] != "") {
# Select observations according to a user-specified indicator (included when indicator = 1)
trainingIndex <- which(dataset[, options[["testSetIndicatorVariable"]]] == 0)
} else {
# Sample a percentage of the total data set
trainingIndex <- sample.int(nrow(dataset), size = ceiling((1 - options[["testDataManual"]]) * nrow(dataset)))
}
trainingAndValidationSet <- dataset[trainingIndex, ]
# Create the generated test set indicator
testIndicatorColumn <- rep(1, nrow(dataset))
testIndicatorColumn[trainingIndex] <- 0
# gbm expects the columns in the data to be in the same order as the variables...
trainingAndValidationSet <- trainingAndValidationSet[, match(names(trainingAndValidationSet), all.vars(formula))]
if (options[["modelOpt"]] == "optimizationManual") {
# Just create a train and a test set (no optimization)
trainingSet <- trainingAndValidationSet
testSet <- dataset[-trainingIndex, ]
noOfFolds <- 0
fit <- gbm::gbm(
formula = formula, data = trainingSet, n.trees = trees,
shrinkage = options[["shrinkage"]], interaction.depth = options[["intDepth"]],
cv.folds = noOfFolds, bag.fraction = options[["bagFrac"]], n.minobsinnode = options[["nNode"]],
distribution = "multinomial", n.cores = 1, keep.data = TRUE
) # Multiple cores breaks modules in JASP, see: INTERNAL-jasp#372
noOfTrees <- options[["noOfTrees"]]
} else if (options[["modelOpt"]] == "optimizationOOB") {
# Create a train, validation and test set (optimization)
validationIndex <- sample.int(nrow(trainingAndValidationSet), size = ceiling(options[["validationDataManual"]] * nrow(trainingAndValidationSet)))
testSet <- dataset[-trainingIndex, ]
validationSet <- trainingAndValidationSet[validationIndex, ]
trainingSet <- trainingAndValidationSet[-validationIndex, ]
if (options[["modelValid"]] == "validationManual") {
noOfFolds <- 0
} else if (options[["modelValid"]] == "validationKFold") {
noOfFolds <- options[["noOfFolds"]]
trainingSet <- trainingAndValidationSet
validationSet <- trainingAndValidationSet
}
fit <- gbm::gbm(
formula = formula, data = trainingSet, n.trees = trees,
shrinkage = options[["shrinkage"]], interaction.depth = options[["intDepth"]],
cv.folds = noOfFolds, bag.fraction = options[["bagFrac"]], n.minobsinnode = options[["nNode"]],
distribution = "multinomial", n.cores = 1, keep.data = TRUE
) # Multiple cores breaks modules in JASP, see: INTERNAL-jasp#372
noOfTrees <- gbm::gbm.perf(fit, plot.it = FALSE, method = "OOB")[1]
fit <- gbm::gbm(
formula = formula, data = trainingSet, n.trees = noOfTrees,
shrinkage = options[["shrinkage"]], interaction.depth = options[["intDepth"]],
cv.folds = noOfFolds, bag.fraction = options[["bagFrac"]], n.minobsinnode = options[["nNode"]],
distribution = "multinomial", n.cores = 1
) # Multiple cores breaks modules in JASP, see: INTERNAL-jasp#372
validationProbs <- gbm::predict.gbm(fit, newdata = validationSet, n.trees = noOfTrees, type = "response")
validationPredictions <- colnames(validationProbs)[apply(validationProbs, 1, which.max)]
}
# Use the specified model to make predictions for dataset
dataProbs <- gbm::predict.gbm(fit, newdata = dataset, n.trees = noOfTrees, type = "response")
dataPredictions <- colnames(dataProbs)[apply(dataProbs, 1, which.max)]
testPredictions <- dataPredictions[-trainingIndex]
# Create results object
result <- list()
result[["model"]] <- fit
result[["formula"]] <- formula
result[["noOfFolds"]] <- noOfFolds
result[["noOfTrees"]] <- noOfTrees
result[["confTable"]] <- table("Pred" = testPredictions, "Real" = testSet[, options[["target"]]])
result[["testAcc"]] <- sum(diag(prop.table(result[["confTable"]])))
result[["relInf"]] <- summary(fit, plot = FALSE)
result[["auc"]] <- .classificationCalcAUC(testSet, trainingSet, options, "boostingClassification", noOfFolds = noOfFolds, noOfTrees = noOfTrees)
result[["ntrain"]] <- nrow(trainingSet)
result[["ntest"]] <- nrow(testSet)
result[["testPred"]] <- testPredictions
result[["testReal"]] <- testSet[, options[["target"]]]
result[["train"]] <- trainingSet
result[["test"]] <- testSet
result[["method"]] <- if (options[["modelValid"]] == "validationManual") "OOB" else ""
result[["testIndicatorColumn"]] <- testIndicatorColumn
result[["classes"]] <- dataPredictions
if (options[["modelOpt"]] != "optimizationManual") {
result[["validationConfTable"]] <- table("Pred" = validationPredictions, "Real" = validationSet[, options[["target"]]])
result[["validAcc"]] <- sum(diag(prop.table(result[["validationConfTable"]])))
result[["nvalid"]] <- nrow(validationSet)
result[["valid"]] <- validationSet
}
return(result)
}
|
e740eb55e24343176f15d3933ebc4fc62a230afb | 1d7c7adfe4190456c0bce74a55f8842f113b54e1 | /man/evaluate_integral.Rd | d54dd12f6fb20e008c61ba1c5fa50aa8ca89591d | [
"MIT"
] | permissive | dmoseev/j0j0r | dd56cfa4e9d80324c2e39872a0f9ab480e0c3ba6 | 38e61ba17d1e8395738379adcb4af3f6ef33824b | refs/heads/master | 2023-07-31T13:57:54.235793 | 2020-05-18T20:52:57 | 2020-05-18T20:52:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 477 | rd | evaluate_integral.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/j0j0_element.R
\name{evaluate_integral}
\alias{evaluate_integral}
\title{create_integral_settings}
\usage{
evaluate_integral(integral_settings, integration_method)
}
\arguments{
\item{integral_settings}{list of settings for the integral}
\item{integration_method}{name of integration method}
}
\value{
\code{numeric} value of integral
}
\description{
creates a list of arguments for the integral
}
|
d92561f15731950a1fabaf816c8082241c371aab | f2570fca876260081b4153f623df3234493520c4 | /Week 4/HW4.R | 6cb006e3a678bb04847e9c86664feceb8812c470 | [] | no_license | pravinkunhiraman/MITAnalyticsEdge | 01dcb2f8a5739e0a707bf87870da4f1e01cd1795 | 3d97177a8c91f4f79fdadd5928096fa91c2dc276 | refs/heads/master | 2020-04-26T18:34:24.551920 | 2019-05-06T15:24:56 | 2019-05-06T15:24:56 | 173,748,227 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,874 | r | HW4.R |
data(state)
statedata = data.frame(state.x77)
str(statedata)
summary(statedata)
head(statedata)
LMModel = lm(Life.Exp ~ ., data=statedata)
summary(LMModel)
LMPredict = predict(LMModel, data=statedata)
sum((LMPredict - statedata$Life.Exp)^2)
LMModel2 = lm(Life.Exp ~ Population + Murder + Frost + HS.Grad, data=statedata)
summary(LMModel2)
LMPredict2 = predict(LMModel2, data=statedata)
sum((LMPredict2 - statedata$Life.Exp)^2)
cor(statedata)
library(rpart)
library(rpart.plot)
TreeModel = rpart(Life.Exp ~ ., data=statedata)
summary(TreeModel)
prp(TreeModel)
TreePredict = predict(TreeModel, data=statedata)
sum((TreePredict - statedata$Life.Exp)^2)
TreeModel1 = rpart(Life.Exp ~ ., data=statedata, minbucket = 5)
prp(TreeModel1)
TreePredict1 = predict(TreeModel1, data=statedata)
sum((TreePredict1 - statedata$Life.Exp)^2)
TreeModel2 = rpart(Life.Exp ~ Area, data=statedata, minbucket = 1)
prp(TreeModel2)
TreePredict2 = predict(TreeModel2, data=statedata)
sum((TreePredict2 - statedata$Life.Exp)^2)
install.packages("caret")
install.packages("e1071")
library(caret)
library(e1071)
set.seed(111)
tr.control = trainControl(method = "cv", number = 10)
cp.grid = expand.grid(.cp = (0:50)*0.01)
tr = train(Life.Exp ~ ., data = statedata, method = "rpart", trControl = tr.control, tuneGrid = cp.grid)
tr
BestTree = rpart(Life.Exp ~ ., data = statedata, cp=0.11)
prp(BestTree)
PredictTree = predict(BestTree,data=statedata)
sum((PredictTree - statedata$Life.Exp)^2)
set.seed(111)
tr.control = trainControl(method = "cv", number = 10)
cp.grid = expand.grid(.cp = (0:50)*0.01)
tr = train(Life.Exp ~ Area, data = statedata, method = "rpart", trControl = tr.control, tuneGrid = cp.grid)
tr
BestTree2 = rpart(Life.Exp ~ Area, data = statedata, cp=0.01)
prp(BestTree2)
51e+3
PredictTree2 = predict(BestTree2,data=statedata)
sum((PredictTree2 - statedata$Life.Exp)^2)
|
a78a9b3bef70bf5dc175a905afdd175e152d2561 | 81efde29367a2c093b8ac0994677336e74efb41d | /checkpoints/archive/2010/04.2010/04.20.2010/spacodi/man/as.phylocom.Rd | 4a800b9631dd861f7738fa03f92564d835f7e8c8 | [] | no_license | eastman/spacodiR | 642c05bb40b7ef1446d1d37f529bfa34b312ecba | a8716ab75279914d1f3c65d9c6b071312fe5c1ab | refs/heads/master | 2020-06-07T02:10:35.589573 | 2013-01-15T19:40:31 | 2013-01-15T19:40:31 | 1,932,566 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,213 | rd | as.phylocom.Rd | \name{as.phylocom}
\alias{as.phylocom}
\title{converting between data formats for community phylogenetics}
\usage{as.phylocom(data, outfile = NULL)}
\arguments{
\item{data}{a species-by-plots matrix}
\item{outfile}{an optional text file to which to write output}
}
\details{This utility converts a species-by-plots matrix into \code{triplet} format, which is readable by the external program \code{phylocom}.
The user has the option to save an output file, defined by \code{outfile}.
}
\value{an array, formatted for use in \code{phylocom}}
\references{WEBB CO, DD ACKERLY and SW KEMBEL. 2008. Phylocom: software for the analysis of phylogenetic community structure and trait evolution. Bioinformatics 24:2098-2100.}
\author{Jonathan Eastman}
\seealso{\code{\link{as.spacodi}} for converting between \code{phylocom} and \code{SPACoDi} formats.}
\examples{
# generate a species-by-plots matrix
foo <- r.plot(species=10,plots=6,missing.prop=0.15,sim.tree=FALSE)
# convert to phylocom format
as.phylocom(foo) -> p.foo
p.foo
# convert back to spacodi format
as.spacodi(p.foo) -> s.foo
s.foo
# save the converted dataset to working directory
as.phylocom(s.foo, outfile="phylocom.in.triplet.txt")
} |
a10cfd50c50f6055e24fd531f96821cfa2a2a279 | 2a96855c2e3278507371f7c60b457206c23d3bbc | /Sampling/Faces/Coherence/RUNSTEP.R | 131db1e723371c949ad586c1bb60353921d04553 | [
"MIT"
] | permissive | NeuroStat/replicability_fmri | 698154c030bcf312dd1e809a77812fe3bdf6449a | 7710ed0a8074a668130af15cebfb359e7b4ce4e8 | refs/heads/master | 2022-03-26T17:10:46.346095 | 2020-01-08T20:19:30 | 2020-01-08T20:19:30 | 103,663,214 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,507 | r | RUNSTEP.R | ####################
#### TITLE: Give back the correspondig run and step from a sequence of numbers
#### Contents:
####
#### Source Files: \\FreddieFreeloader/Script.git/Sampling/Coherence
#### First Modified: 24/05/2018
#### Notes:
#################
##
###############
### Notes
###############
##
# FACES dataset
# There are in this scenario:
# A) 50 runs
# B) 46 steps in each run
# C) varying amount of sets in each step within a run
##
###############
### Preparation
###############
##
# Take arguments from master file
args <- commandArgs(TRUE)
# Which index are we in?
index <- as.numeric(as.character(args[1]))
##
###############
### RUN and STEP
###############
##
# Number of runs
NRUNS <- 50
# Starting amount of subjects in group analysis
startingTotal <- 10
# Database total
DATTOTAL <- 1400
# Total amount of possible subjects if we have a maximum of 3 disjoint subsets
NTOT <- 460
steps <- seq(startingTotal, NTOT, by = 10)
# Create the sequence
sequence <- data.frame()
for(k in 1:NRUNS){
for(i in 1:length(steps)){
# Number of disjoint sets in this sample size
numDS <- floor(DATTOTAL/steps[i])
for(s in 1:numDS){
sequence <- rbind(sequence,
data.frame('run' = k, 'step' = i, 'set' = s))
}
}
}
# Dimension of this data frame
dimSeq <- dim(sequence)[1]
# Now we print the run, step and set to the console, which will be read in by
# master file
cat(unlist(c(sequence[index,'run'], sequence[index,'step'],sequence[index,'set'])))
|
7c7d8c16632a57964514fa358e1e2e3abd2b41e5 | 0a524ee05014952d9dc90cd9651f6a2ac0955613 | /HW5DecisionTree.R | ce68996e63460fe68fbd3228f371ddb796d07182 | [] | no_license | shefaliemmanuel/DATA101IntroductionToDataScience | 86ea1fee035a14018a0c6dc91c3eadbb4991cc7c | 9b671c1954254ac3f9cf63d716894142645f530e | refs/heads/master | 2020-07-07T21:12:38.798945 | 2019-12-10T02:38:31 | 2019-12-10T02:38:31 | 203,479,068 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 725 | r | HW5DecisionTree.R | #Shefali Emmanuel
#Data 101 HW5 Decision Tree
#September 13, 2019
install.packages("lattice")
install.packages("ggplot2")
install.packages("rpart")
install.packages("rpart.plot")
library(lattice)
library(ggplot2)
library(rpart)
library(rpart.plot)
data ('iris')
head('iris')
str(iris)
set.seed(9850)
g <- runif(nrow(iris))
iris_ran <- iris[order(g),]
head(iris_ran)
model <- rpart(Species ~ ., data=iris_ran[1:100, ], method='class')
rpart.plot(model, type=4, fallen.leaves = T, extra=104)
model_predict <- predict(model,iris_ran[101:150, ], type ='class')
model_predict
install.packages("caret")
library(caret)
install.packages("e1071")
library(e1071)
confusionMatrix(iris_ran[101:150, 5], reference = model_predict)
|
ceb6678ee8123bd06c20ddd66f1be8b6727a5473 | 373f2abd88834acc0d1ab21ba13ce335600ceb0f | /R/engen2008.r | e2aa794f14142d90136ee27e2410713bf87924ba | [] | no_license | ClementCalenge/adehabitat | fa26e43fba432c29a5757fcd4b5f9ffd972bdd44 | 23ba023d5a57eec861fb6d3d07772cb9d2db6968 | refs/heads/master | 2021-01-22T05:20:32.012511 | 2018-01-28T12:27:32 | 2018-01-28T12:27:32 | 81,652,118 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,980 | r | engen2008.r | engen2008II <- function(us, av, id, nsim=500, nsimra=500)
{
if (is.data.frame(us))
us <- as.matrix(us)
if (is.data.frame(av))
av <- as.matrix(av)
if (ncol(us)!=ncol(av))
stop("us and av should have the same number of column")
if (any(colnames(us)!=colnames(av)))
stop("us and av should have the same column names")
if (length(id)!=nrow(us))
stop("id should have same length as the number of rows in us")
ma <- apply(av,2, function(x) max(table(x)))
ma2 <- apply(us,2, function(x) max(table(x)))
ma <- max(c(ma,ma2))
if (max(ma)==1) {
nsimra <- 1
warning("there were no ties in the data,\n no randomizations were carried out")
}
if (!is.factor(id))
id <- factor(id)
if (min(table(id))<2)
stop("at least two used units are required")
toto <- .C("engen2008r", as.double(t(av)), as.double(t(us)),
as.integer(nrow(av)), as.integer(nrow(us)),
as.integer(ncol(av)), as.integer(as.numeric(id)),
as.integer(nlevels(id)), as.integer(nsim),
double(ncol(av)*5*nsimra), as.integer(nsimra),
PACKAGE="adehabitat")
to <- as.data.frame(matrix(toto[[9]], nrow=nsimra, byrow=TRUE))
res <- list()
res$raw <- lapply(1:ncol(av), function(i) {
oo <- as.data.frame(to[,c(((i-1)*5+1):((i-1)*5+5))])
names(oo) <- c("sigma2", "tau", "rho", "mu", "sd.mu")
return(oo)
})
names(res$raw) <- colnames(av)
res$results <- as.data.frame(do.call("rbind", lapply(res$raw,
function(x) apply(x,2,mean))))
res$results <- res$results[,-c(1:2)]
res$results[,3] <- sqrt(res$results[,3])
class(res) <- "engenetalII"
return(res)
}
print.engenetalII <- function(x, ...)
{
if (!inherits(x, "engenetalII"))
stop("x should be of class \"engenetalII\"")
cat("*************************************\n")
cat("** Method of Engen et al. (2008)\n\n")
cat("Preferences and correlation:\n")
print(x$results)
cat("\nThis data frame is stored in the component $result of this list.\n\n")
}
engen2008I <- function(us, av, nsimra=500)
{
if (is.data.frame(us))
us <- as.matrix(us)
if (is.data.frame(av))
av <- as.matrix(av)
if (ncol(us)!=ncol(av))
stop("us and av should have the same number of column")
if (any(colnames(us)!=colnames(av)))
stop("us and av should have the same column names")
ma <- apply(av,2, function(x) max(table(x)))
ma2 <- apply(us,2, function(x) max(table(x)))
ma <- max(c(ma,ma2))
if (max(ma)==1) {
nsimra <- 1
warning("there were no ties in the data,\n no randomizations were carried out")
}
toto <- .C("engen2008Ir", as.double(t(av)), as.double(t(us)),
as.integer(nrow(av)), as.integer(nrow(us)),
as.integer(ncol(av)), double(ncol(av)*2*nsimra),
as.integer(nsimra), PACKAGE="adehabitat")
to <- as.data.frame(matrix(toto[[6]], nrow=nsimra, byrow=TRUE))
res <- list()
res$raw <- lapply(1:ncol(av), function(i) {
oo <- as.data.frame(to[,c(((i-1)*2+1):((i-1)*2+2))])
names(oo) <- c("mu", "sd.mu")
return(oo)
})
names(res$raw) <- colnames(av)
res$results <- as.data.frame(do.call("rbind", lapply(res$raw,
function(x) apply(x,2,mean))))
res$results <- res$results[,c(1:2)]
res$results[,2] <- sqrt(res$results[,2])
class(res) <- "engenetalI"
return(res)
}
print.engenetalI <- function(x, ...)
{
if (!inherits(x, "engenetalI"))
stop("x should be of class \"engenetalI\"")
cat("*************************************\n")
cat("** Method of Engen et al. (2008)\n\n")
cat("Preferences:\n")
print(x$results)
cat("\nThis data frame is stored in the component $result of this list.\n\n")
}
|
fe874dd6dc4577002a8757a22f7e0bdbc7b1b6b1 | 6601d897b6a632d546ec79643bc926f06d9a9d23 | /R/utils.R | 219966fdd56a4d8b30e4d0de2717b245b85150bb | [] | no_license | daryabusen/fueri | 2e74a7f4e1c7350016020923eb696ecb2062c165 | e2affd97707b397ad107822bb775bb5e9af3cf33 | refs/heads/master | 2021-08-17T02:43:50.113320 | 2017-11-20T17:36:01 | 2017-11-20T17:36:01 | 110,166,160 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 381 | r | utils.R | #' Mean value of a vector
#'
#' @param x a numeric vector
#' @return the mean of a \code{(n x 1)} - vector as a scalar value
#'
#' @examples
#' x = c(2,2,2)
#' mean(x)
#' # [1] 2
#'
#' @author Darya Busen <\email{dasha-89.89@@mail.ru}>
#'
#'
#' Return the rounded mean
mean = function(x) {
n = length(x)
sum = sum(x)
rnd = round(x = (1/n) * sum , digits = 3)
return(rnd)
}
|
28085e7caea57fb125768b6afddd93894e10c2af | fad4d435ac6ac71a39b8b558985b38b95062eb50 | /codedump.R | 068c2bc396e8ab223b1fa068423d7f32f0b3a4b1 | [] | no_license | kjhogan/NFL | cbfbf4080ee82d251d631ebf8e4e5a2e0b4d60ff | de0eb45b0877b7e7a9e7250e8cb01379eec080f6 | refs/heads/master | 2018-11-25T10:01:40.598614 | 2018-10-08T13:55:22 | 2018-10-08T13:55:22 | 103,428,957 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,857 | r | codedump.R |
id <- "0021700206"
download.file("http://stats.nba.com/stats/playbyplayv2?EndPeriod=10&EndRange=55800&GameID=0021700206&RangeType=2&StartPeriod=1&StartRange=0", "address.json")
web_page <- readLines("address.json")
##regex to strip javascript bits and convert raw to csv format
x1 <- gsub("[\\{\\}\\]]", "", web_page, perl=TRUE)
x2 <- gsub("[\\[]", "\n", x1, perl=TRUE)
x3 <- gsub("\"rowSet\":\n", "", x2, perl=TRUE)
x4 <- gsub(";", ",",x3, perl=TRUE)
nba<-read.table(textConnection(x4), header=T, sep=",", fill=T, skip=2, stringsAsFactors=FALSE)
nba<-nba[!nba$GAME_ID%in% c("headers:","VIDEO_AVAILABLE_FLAG", "0" ), !colnames(nba)%in%"X" ]
nba[nba=="null"]<-NA
nba<-nba[, c("GAME_ID", "EVENTNUM", "EVENTMSGTYPE", "EVENTMSGACTIONTYPE",
"PCTIMESTRING","HOMEDESCRIPTION", "NEUTRALDESCRIPTION" , "VISITORDESCRIPTION", "SCORE",
"PERSON1TYPE", "PLAYER1_ID", "PLAYER1_NAME", "PLAYER1_TEAM_ID", "PLAYER1_TEAM_ABBREVIATION",
"PERSON2TYPE", "PLAYER2_ID", "PLAYER2_NAME", "PLAYER2_TEAM_ID", "PLAYER2_TEAM_ABBREVIATION",
"PERSON3TYPE", "PLAYER3_ID", "PLAYER3_NAME", "PLAYER3_TEAM_ID", "PLAYER3_TEAM_ABBREVIATION" )]
###TIME/QUARTER
nba<-nba[nba$GAME_ID==id, ]
nba$Time<-sapply(strsplit( nba$PCTIMESTRING, ":"), function(x) { as.numeric(head(x, 1))+as.numeric(tail(x, 1))/60 })
#add "quarter" variable
lastPlay<-which(diff(nba$Time)>4)
lastPlay<-c(0, lastPlay, length(nba$Time))
lastPlay<-diff(lastPlay)
#for some reason mapply causes error if lastPlay is all the same number
if(length(unique(lastPlay))==1) {
nba$Quarter<-rep( c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)[1:length(lastPlay)], each=lastPlay[1])
} else {
nba$Quarter<-unlist(mapply(rep, c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)[1:length(lastPlay)], lastPlay))
}
##GET GAME-BOX SCORE########
download.file(paste0("http://stats.nba.com/stats/boxscoretraditionalv2?EndPeriod=10&EndRange=55800&GameID=", id,
"&RangeType=2&Season=2015-16&SeasonType=Regular+Season&StartPeriod=1&StartRange=0"),"box.json")
web_page <- readLines("box.json")
##regex to strip javascript bits and convert raw to csv format
x1 <- gsub("[\\{\\}\\]]", "", web_page, perl=TRUE)
x2 <- gsub("[\\[]", "\n", x1, perl=TRUE)
x3 <- gsub("\"rowSet\":\n", "", x2, perl=TRUE)
x4 <- gsub(";", ",",x3, perl=TRUE)
box<-read.table(textConnection(x4), header=T, sep=",", fill=T, skip=2, stringsAsFactors=FALSE)
box<-box[1:(which(box$GAME_ID=="headers:")-1),!colnames(box)%in% "X"]
#in years <=2007, box-score errors where players not in nba.com box score
if(id=="0020700880"){
box<-rbind.fill(data.frame(PLAYER_ID=2056, TEAM_ID=1610612761,GAME_ID=id, PLAYER_NAME="Primoz Brezec", START_POSITION=""), box)
}
if(id=="0020600448"){
box<-rbind.fill(data.frame(PLAYER_ID=1891, TEAM_ID=1610612742,GAME_ID=id, PLAYER_NAME="Jason Terry", START_POSITION=""), box)
}
###ORGANIZE SUBSTITUTIONS####
nba[, unique(box$PLAYER_ID)]<-0
nba[nba$Quarter==1, unique(box$PLAYER_ID[box$START_POSITION!=""])]<-1
nba$TimeElapsed<-ifelse(nba$Quarter<=4,12-nba$Time, 5-nba$Time)
nba$TimeElapsed<-ifelse(nba$Quarter<=4, nba$TimeElapsed+12*(nba$Quarter-1), nba$TimeElapsed+12*4+5*(nba$Quarter-5))
is_sub<- which(nba$EVENTMSGTYPE==8)
for(i in 1:nrow(nba)){
# put player in if they register a play and were never in for the quarter (exception is technical fouls or weird game-violations --can get these while not in play)
if(!i %in% is_sub &
sum(is.na(nba[i,grepl("DESCRIPT", colnames(nba))]))!=3 &
sum(grepl("T.FOUL|Ejection|Timeout|TECH.FOUL|Unsportsmanlike", nba[i,grepl("DESCRIPT", colnames(nba))]))==0 ){
if(nba$PLAYER1_ID[i]%in% colnames(nba)){
if(sum(nba[nba$Quarter==nba$Quarter[i], as.character(nba$PLAYER1_ID[i])])==0){
nba[nba$Quarter==nba$Quarter[i], as.character(nba$PLAYER1_ID[i])]<-1
}
}
if(nba$PLAYER2_ID[i]%in% colnames(nba)){
if(sum(nba[nba$Quarter==nba$Quarter[i], as.character(nba$PLAYER2_ID[i])])==0){
nba[nba$Quarter==nba$Quarter[i], as.character(nba$PLAYER2_ID[i])]<-1
}
}
if(nba$PLAYER3_ID[i]%in% colnames(nba)){
if(sum(nba[nba$Quarter==nba$Quarter[i],as.character( nba$PLAYER3_ID[i])])==0){
nba[nba$Quarter==nba$Quarter[i], as.character(nba$PLAYER3_ID[i])]<-1
}
}
}
#handling substitution events
if(i %in% is_sub ){
#player enterring
nba[nba$TimeElapsed>nba$TimeElapsed[i]& nba$Quarter==nba$Quarter[i], as.character(nba$PLAYER2_ID[i])]<-1
#player leaving
nba[nba$TimeElapsed>nba$TimeElapsed[i]& nba$Quarter==nba$Quarter[i], as.character(nba$PLAYER1_ID[i])]<-0
#if player was not previously in during the quarter,and he wasn't an immediate sub-in->sub-out, then he must have started the quarter, so put him in for t<T
player_court_sum<-sum(nba[nba$TimeElapsed<=nba$TimeElapsed[i] & nba$Quarter==nba$Quarter[i], as.character(nba$PLAYER1_ID[i])] )
just_subbed_in<-nba[nba$TimeElapsed==nba$TimeElapsed[i]& nba$EVENTMSGTYPE==8, c("PLAYER2_ID")]
if(player_court_sum==0 & !nba$PLAYER1_ID[i]%in% just_subbed_in ){
nba[nba$TimeElapsed<=nba$TimeElapsed[i] & nba$Quarter==nba$Quarter[i], as.character(nba$PLAYER1_ID[i])]<-1
}
}
}
tail(nba[,grepl("EVENTM|PCTIME|HOMED|VISITORD|PLAYER1_NAME|PLAYER1_TEAM_ID|POSS", colnames(nba)) ],20)
#manual OT errors or other errors
source("PBP scrape errors.R")
# EVENTMSGTYPE
# 1 - Make 2 - Miss 3 - Free Throw 4 - Rebound 5 - out of bounds / Turnover / Steal 6 - Personal Foul
# 7 - Violation 8 - Substitution 9 - Timeout 10 - Jumpball 12 - Start Q1? 13 - Start Q2?
# EVENTMSGACTIONTYPE
# 1 - Jumpshot 2 - Lost ball Turnover 3 - ? 4 - Traveling Turnover / Off Foul 5 - Layup
#7 - Dunk 10 - Free throw 1-1 11 - Free throw 1-2 12 - Free throw 2-2 40 - out of bounds
#41 - Block/Steal 42 - Driving Layup 50 - Running Dunk 52 - Alley Oop Dunk 55 - Hook Shot
#57 - Driving Hook Shot 58 - Turnaround hook shot 66 - Jump Bank Shot 71 - Finger Roll Layup 72 - Putback Layup 108 - Cutting Dunk Shot
###POSESSIONS AND SCORING##########
nba$POSSESSION<-nba$PLAYER1_TEAM_ID
#team rebound
nba$POSSESSION[is.na(nba$PLAYER1_TEAM_ID)& !is.na(nba$PLAYER1_ID)& nba$PLAYER1_ID%in% unique(box$TEAM_ID)]<-
nba$PLAYER1_ID[is.na(nba$PLAYER1_TEAM_ID)& !is.na(nba$PLAYER1_ID)& nba$PLAYER1_ID%in% unique(box$TEAM_ID)]
#subs, timeouts, game stoppage don't affect possession
nba$POSSESSION[is_sub]<-NA
nba$POSSESSION[nba$EVENTMSGTYPE==9| nba$PLAYER1_ID==0]<-NA
#a foul means the non-fouling team is in posession
#an offensive foul means the fouling team is in posession
nba$POSSESSION[nba$EVENTMSGTYPE==6& nba$EVENTMSGACTIONTYPE!=4& !is.na(nba$POSSESSION)]<-
sapply(nba$POSSESSION[nba$EVENTMSGTYPE==6& nba$EVENTMSGACTIONTYPE!=4& !is.na(nba$POSSESSION)], function(x) unique(box$TEAM_ID[box$TEAM_ID!=x]))
nba$nextPOSS<-lead(nba$POSSESSION)
nba[ c("POSSESSION","nextPOSS")]<-sapply(nba[, c("POSSESSION","nextPOSS")], na.locf, fromLast=F, na.rm=F)
nba$possComplete<-ifelse(nba$POSSESSION!=nba$nextPOSS,1, 0)
nba$possComplete[nba$EVENTMSGTYPE==12]<-0
nba$possComplete[nba$EVENTMSGTYPE==13]<-1
home<-tail(box$TEAM_ID,1)
nba[, unique(box$PLAYER_ID[box$TEAM_ID!=home]) ][nba[, unique(box$PLAYER_ID[box$TEAM_ID!=home]) ]==1]<-(-1)
nba[, c("HomePTS","AwayPTS")]<-NA
nba$HomePTS[!is.na(nba$SCORE)]<-as.numeric(sapply(strsplit(nba$SCORE[!is.na(nba$SCORE)], " - "),`[[`, 2))
nba$AwayPTS[!is.na(nba$SCORE)]<-as.numeric(sapply(strsplit(nba$SCORE[!is.na(nba$SCORE)], " - "),`[[`, 1))
nba[1, c("HomePTS","AwayPTS")]<-0
nba[, c("HomePTS","AwayPTS")]<-sapply(nba[, c("HomePTS","AwayPTS")], na.locf, fromLast=F)
nba[ ,c("HomePTS","AwayPTS")]<-sapply(nba[, c("HomePTS","AwayPTS")], as.numeric)
###OVERTIME ERROR FIX#########
#automatically fix OT problems by checking to see if there is a player off by exactly 5 minutes--happens if played whole OT w.o. stat
errors<-unname(which(rowSums(nba[, unique(box$PLAYER_ID)])!=0))
if(length(errors)>0 ){ #if there is an error
quarterError<-unique(nba$Quarter[errors])
if(length(quarterError)==1 &quarterError>=5) { #if it is an OT error
stints<-getStint(box, nba)
minSums<-sapply(paste("X",unique(box$PLAYER_ID), sep=""), function(x) sum(stints$TimeEnd[stints[, x]!=0]-stints$TimeStart[stints[, x]!=0]))
minSums<-data.frame(MINS=unname(minSums), ID=gsub("X", "", names(minSums)))
minSums$boxMIN<-box$MIN[match(minSums$ID, box$PLAYER_ID)]
minSums$boxMIN<-as.numeric(sapply(strsplit(minSums$boxMIN,":"), `[[`, 1))+as.numeric(sapply(strsplit(minSums$boxMIN,":"), `[[`, 2))/60
minSums$boxDiff<-minSums$MINS-minSums$boxMIN
fix<- minSums$ID[abs(minSums$boxDiff)>4.9 &abs(minSums$boxDiff)<5.1 ]
nba[nba$Quarter==quarterError,fix]<-1
}
}
nba[, unique(box$PLAYER_ID[box$TEAM_ID!=home]) ][nba[, unique(box$PLAYER_ID[box$TEAM_ID!=home]) ]==1]<-(-1)
#
#homeStart and timeStart should begin at the line before the lineChange, homeend and lineend should be the line before the next linechange,
#possessions should not include the line before the linechange and should go to the line before the next linechange
getStint<-function(box, nba){
lineChange<-coldiffs(as.matrix(nba[, unique(box$PLAYER_ID)]))
lineChange<-c(1, which(sapply(1:nrow(lineChange), function(x) sum(lineChange[x, ]!=0)) !=0))
stints<-data.frame(rbindlist(lapply( 1:length(lineChange), function(i){
if(i!=1){
start<-lineChange[i]+1
} else{
start<-1
}
if(i==length(lineChange)){
end<-nrow(nba)
}else{
end<-lineChange[i+1]
}
data<-data.frame(nba[start, unique(box$PLAYER_ID)])
if(start!=1){
data$HomeStart<-nba$HomePTS[(start-1)]
data$AwayStart<-nba$AwayPTS[(start-1)]
data$TimeStart<-nba$TimeElapsed[start-1]
} else{
data$HomeStart<-nba$HomePTS[(start)]
data$AwayStart<-nba$AwayPTS[(start)]
data$TimeStart<-nba$TimeElapsed[start]
}
data$HomeEnd<-nba$HomePTS[(end)]
data$AwayEnd<-nba$AwayPTS[(end)]
data$TimeEnd<-nba$TimeElapsed[end]
data$POSSESSIONS<-sum(nba$possComplete[(start):(end)])
data$HomePOSS<-sum(nba$possComplete[start:end][which(nba$POSSESSION[start:end]==tail(box$TEAM_ID,1)) ])
data$AwayPOSS<-data$POSSESSIONS-data$HomePOSS
data
})))
stints$GAME_ID<-box$GAME_ID[1]
stints
}
#plot( rowSums(nba[, unique(box$PLAYER_ID)]), main=k)
##use below to inspect errors i.e. if above plot is not a straight horizontal line. handle errors in pbp error script
stints<-getStint(box, nba)
# box[, 1:10]
sapply(paste("X",unique(box$PLAYER_ID), sep=""), function(x) sum(stints$TimeEnd[stints[, x]!=0]-stints$TimeStart[stints[, x]!=0]))
# errors<-unname(which(rowSums(nba[, unique(box$PLAYER_ID[box$TEAM_ID==home])])!=5| rowSums(nba[, unique(box$PLAYER_ID[box$TEAM_ID!=home])])!=-5 ))
# nba[errors,!grepl("TEAM_ID|TEAM_ABBREV|_TYPE|NEUTRAL|PCT|GAME_|EVENTNUM|POSS|poss", colnames(nba)) ]
library(RJSONIO);library(matrixStats);library(data.table);library(dplyr);library(zoo)
options(stringsAsFactors = F)
#setwd("~/RAPM/nba-pbp-data")
####ENTER SEASON AND NAME TO SAVE FILE AS####
season<-"2016-17" #season must be of form 20XX-(XX+1)
fileName<-"PBP17" #save as "PBP(XX+1)" if planning to run RAPM analysis
##SCRAPE TEAM-GAME LOGS TO GET ALL GAMEIDS#####
getGames<-function( Season, Type){
url<-paste0(c("http://stats.nba.com/stats/leaguegamelog?Counter=1000&DateFrom=&DateTo=&Direction=DESC&LeagueID=00&PlayerOrTeam=T&Season=",
Season,"&SeasonType=",Type ,"&Sorter=DATE"), collapse="")
Data<-fromJSON(url)
Data<-unname(unlist(Data$resultSet))
Data<-Data[-1]
storeNames<-Data[1:29]
Data<-Data[-seq(1, 29)]
#cancelled game
if(Season=="2012-13"& Type=="Regular+Season"){
Data<-Data[-c((which(Data=="1610612738")[2]-1):(which(Data=="1610612738")[2]+48))]
}
Data<-data.frame(t(matrix(Data, nrow=29)))
colnames(Data)<-storeNames
if(nrow(Data)>1){
Data$Type<-gsub("[+]", " ", Type)
}
Data
}
getSeason<-function( Season) {
rbindlist(lapply(c("Playoffs", "Regular+Season"), function(x) getGames( Season, x)), fill=T)
}
games<- data.frame(getSeason( season))
head(games)
###LOOP THROUGH ALL GAMEIDS AND SCRAPE/CLEAN PBP#####
nbaList<-list();length(nbaList)<-length(unique(games$GAME_ID))
boxList<-list();length(boxList)<-length(unique(games$GAME_ID))
stintsList<-list();length(stintsList)<-length(unique(games$GAME_ID))
for(k in 1:length(nbaList)){
# k<-1
id<-unique(games$GAME_ID)[k]
source("PBP scrape function.R") ##ignore readLines errors!
#if plot does not =0 all the way through, then print
if(sum(abs(rowSums(nba[, unique(box$PLAYER_ID)])))!=0){
print(k)
}
nbaList[[k]]<-nba
boxList[[k]]<-box
stintsList[[k]]<-stints
}
####CALCULATE STINTS AND SAVE#########
hasPlays<-which(sapply(boxList, function(x) !is.null(x)) )
setdiff(1:length(boxList), hasPlays)
stintsList<-lapply(hasPlays, function(x) getStint(box=boxList[[x]],nba=nbaList[[x]] ))
save(list=ls()[ls()%in%c("games", "nbaList", "boxList", "stintsList")], file=fileName)
pbp_Test <- function(gameId){
download.file("http://stats.nba.com/stats/playbyplayv2?EndPeriod=10&EndRange=55800&GameID=0021700206&RangeType=2&StartPeriod=1&StartRange=0", "test.json")
the.data.file<-fromJSON("test.json")
test <-the.data.file$resultSets$rowSet
test2 <- test[[1]]
test3 <- data.frame(test2)
coltest <- the.data.file$resultSets$headers
colnames(test3) <- coltest[[1]]
return (test3)
}
pbp_Test_SVU <- function(gameId){
#download.file("http://stats.nba.com/stats/playbyplayv2?EndPeriod=10&EndRange=55800&GameID=0021700206&RangeType=2&StartPeriod=1&StartRange=0", "test.json")
the.data.file<-fromJSON("test.json")
moments <- the.data.file$events$moments
return (moments)
}
get_Team_Data <- function(season = "2017-18", start_date = ""){
start_date <- gsub("-", "%2F",start_date)
url <- paste0("http://stats.nba.com/stats/leaguedashteamstats?Conference=&DateFrom=",start_date,"&DateTo=&Division=&GameScope=&GameSegment=&LastNGames=0&LeagueID=00&Location=&MeasureType=Advanced&Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&PerMode=Per100Plays&Period=0&PlayerExperience=&PlayerPosition=&PlusMinus=N&Rank=N&Season=",season,"&SeasonSegment=&SeasonType=Regular+Season&ShotClockRange=&StarterBench=&TeamID=0&VsConference=&VsDivision=")
download.file(url,"teams.json")
team_data <- df_from_JSON("teams.json")
team_data <- team_data %>% mutate_at(vars(GP:PIE_RANK), funs(as.numeric))
return(team_data)
}
df_from_JSON = function(json_file) {
json = fromJSON(json_file)
row_set <- json$resultSets$rowSet
row_set_result <- row_set[[1]]
df <- data.frame(row_set_result, stringsAsFactors = FALSE)
columns <- json$resultSets$headers
colnames(df) <- columns[[1]]
return(df)
}
get_Team_Box <- function(teamId, type = "Advanced", season = "2017-18"){
Sys.sleep(2)
url <- paste0("http://stats.nba.com/stats/teamgamelogs?DateFrom=&DateTo=&GameSegment=&LastNGames=0&LeagueID=00&Location=&MeasureType=",type,"&Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&PerMode=Totals&Period=0&PlusMinus=N&Rank=N&Season=",season,"&SeasonSegment=&SeasonType=Playoffs&ShotClockRange=&TeamID=",teamId,"&VsConference=&VsDivision=")
download.file(url,"box.json")
box_scores <- df_from_JSON("box.json")
box_scores <- box_scores %>% mutate_at(vars(MIN:PIE_RANK), funs(as.numeric))
return(box_scores)
}
get_all_Team_Box <- function(team_df, season = "2017-18", type = "Advanced") {
all_box_scores <- lapply(team_df$TEAM_ID, get_Team_Box, type = type, season = season) %>% do.call(rbind,.)
return(all_box_scores)
}
currentBox %>% ggplot(aes(x=DEF_RATING, y = TEAM_ABBREVIATION)) +
geom_density_ridges(aes(fill = TEAM_ABBREVIATION), color = "white") +
scale_y_discrete(expand = c(.01, 0), limits = rev(currentBox$TEAM_ABBREVIATION)) +
scale_x_continuous(expand = c(.01, 0)) +
labs(title = "2017-18 Defensive Rating by Game", subtitle = "Data from stats.nba.com", x = "Def Rating") +
theme_joy(font_size = 13, grid = TRUE) +
theme (axis.title.y = element_blank(), legend.position = "none") +
scale_fill_viridis(discrete = TRUE, option = "C", alpha = .7)
currentBox %>% ggplot(aes(x=PACE, y = reorder(TEAM_ABBREVIATION, PACE, median))) +
geom_density_ridges(aes(fill = TEAM_ABBREVIATION), color = "white") +
scale_y_discrete(expand = c(.01, 0)) +
scale_x_continuous(expand = c(.01, 0)) +
labs(title = "2017-18 Offensive Rating by Game", subtitle = "data from stats.nba.com", x = "Off Rating") +
theme_joy(font_size = 13, grid = TRUE) +
theme (axis.title.y = element_blank(), legend.position = "none") +
scale_fill_viridis(discrete = TRUE, option = "E", alpha = .7)
|
b04c6352328e06a342cf499e240f1bd878455395 | 75b026f4ff12ae414d61fcf1447e589c5d9b62d7 | /pr/data-raw/DATASET_rho_ci.R | 220b09b63c05dcdf1f1393e163a8fdde15a6e055 | [] | no_license | jpwoeltjen/PersistentRegressors | ccd5fec5e164510e2bb1d5296f3766ca71c71aad | 685e66a8fa7ed2526d2e2e569835fcd7f12005a1 | refs/heads/master | 2020-06-08T18:50:57.776840 | 2019-09-18T07:42:28 | 2019-09-18T07:42:28 | 193,285,696 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 742 | r | DATASET_rho_ci.R |
name = 'df_gls95'
dir_name = paste('/Users/jan/Desktop/PersistentRegressors/ci/',name, '.csv', sep='')
df_gls95 = read.csv(dir_name, header =TRUE, sep=",", dec="." , row.names = 1, check.names=TRUE)
usethis::use_data(df_gls95, overwrite = TRUE)
name = 'df_gls90'
dir_name = paste('/Users/jan/Desktop/PersistentRegressors/ci/',name, '.csv', sep='')
df_gls90 = read.csv(dir_name, header =TRUE, sep=",", dec="." , row.names = 1, check.names=TRUE)
usethis::use_data(df_gls90, overwrite = TRUE)
name = 'df_gls80'
dir_name = paste('/Users/jan/Desktop/PersistentRegressors/ci/',name, '.csv', sep='')
df_gls80 = read.csv(dir_name, header =TRUE, sep=",", dec="." , row.names = 1, check.names=TRUE)
usethis::use_data(df_gls80, overwrite = TRUE)
|
d6f1ce2a401b47d81e2bf62d55497e385eb0d1a8 | a407e7551a05b15a26085369bc4aeeadc80fee87 | /man/raw2intR.Rd | 93fb9a33e8cb0556914d9a0d39980bca6ca09550 | [
"MIT"
] | permissive | tkatsuki/dipr | 8ec80f0aaaec481e6a477f2108365760964e7511 | 2d9adf558a893cd3525ee54c099695c9becae0a4 | refs/heads/master | 2021-06-06T19:54:00.071143 | 2021-04-18T06:04:38 | 2021-04-18T06:04:38 | 63,497,539 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 588 | rd | raw2intR.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/raw2intR.R, R/raw2intcpp.R
\name{raw2intR}
\alias{raw2intR}
\title{2 byte raw to integer conversion}
\usage{
raw2intR(obj)
raw2intR(obj)
}
\arguments{
\item{obj}{A target image of Image object or an array.}
\item{ref}{A reference image of Image object or an array.}
\item{obj}{A target image of Image object or an array.}
\item{ref}{A reference image of Image object or an array.}
}
\description{
2 byte raw to integer conversion
2 byte raw to integer conversion
}
\examples{
raw2intcpp()
raw2intcpp()
}
|
e1cfeee614c7bec5e6c40eadc2ec3cd869c49d1e | 6f7ff548a95059157cc23f68c7bc8864588b66ed | /top20pie.R | 6036ae7be8b31b4a14baf02c498b4d0852f53b29 | [] | no_license | hedlundb/LP16S | f92581277bcca35f5290e03d1e4d00bac9f77f38 | c1e21615c9845bc8ce0f7805ce665ed2b22b397e | refs/heads/master | 2020-12-03T08:48:56.499885 | 2020-03-01T01:35:26 | 2020-03-01T01:35:26 | 231,259,858 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,427 | r | top20pie.R | library('ggplot2')
library('phyloseq')
source('tax_glom_fast.R')
#source('run_this.R')
taxpal = read.csv('Genus.taxa.pal.csv', header=TRUE, row.names=1, stringsAsFactors=FALSE)
physeq = snp_physeq
ranks = colnames(tax_table(physeq))
## Make substitutions in the taxonomy.
tax_table(physeq)[,1] = gsub(paste0('^D_0__'), '',tax_table(physeq)[,1])
for(i in 2:length(ranks))
{
tax_table(physeq)[,i] = gsub(paste0('^D_',i-1,'__'), '',tax_table(physeq)[,i])
uncl = grepl('[Uu]nclassified',tax_table(physeq)[,i]) |
grepl('[Uu]ncultured',tax_table(physeq)[,i]) |
grepl('[Mm]etagenome',tax_table(physeq)[,i])
tax_table(physeq)[uncl,i] = ''
tax_table(physeq)[tax_table(physeq)[,i] == '',i] = gsub('Unclassified Unclassified ', 'Unclassified ', paste0('Unclassified ', tax_table(physeq)[tax_table(physeq)[,i] == '', i-1]))
}
## Toy with the taxonomy so that the names are a little more consistent.
newtax = gsub('^D_.__', '', unlist(tax_table(physeq)[,6]))
subsetted = with(sample_data(physeq), {Time %in% c(0,3) & (Group %in% c('Old','New'))})
physeqp = subset_samples(physeq, subsetted)
# ## Tax glom (faster, cause it's slow otherwise.)
# taxglom = do.call('rbind', tapply(1:nrow(otu_table(physeq)), newtax,
# function(x) colSums(otu_table(physeq)[x,])))[,subsetted]
taxglomt = tax_glom_fast(physeqp, 'Genus')
taxglom = otu_table(tax_glom_fast(physeqp, 'Genus'))
taxa_names(taxglom) = tax_table(taxglomt)[,6]
pieglom = do.call('cbind', tapply(1:ncol(taxglom), with(sample_data(physeqp), {paste(Group, Time, Location)}),
function(x) if(length(x) > 1) rowSums(taxglom[,x]) else taxglom[,x]))
## Correct by dividing by sample size.
pieglomfixed = apply(pieglom, 2, function(x) x/sum(x))
## Sort by abundance.
pieglomfixed = pieglomfixed[rev(order(rowSums(pieglomfixed))),]
pieglomhead = pieglomfixed[1:19,]
pieglomhead = rbind(pieglomhead, 1 - colSums(pieglomhead))
rownames(pieglomhead)[nrow(pieglomhead)] = 'ZZ'
rownames(pieglomhead)[grepl('unclassified', rownames(pieglomhead))] = 'ZY'
taxa = sort(rownames(pieglomhead))
taxa_sub = gsub('ZY','Unclassified', taxa)
taxa_sub = gsub('ZZ','<5%', taxa_sub)
pieglomheadpal = setNames(taxpal[rownames(pieglomhead),], rownames(pieglomhead))
pieglomheadpal["ZZ"] = '#BDBDBD'
pieglomheadpal["ZY"] = '#CDCDCD'
library(ggplot2)
# Barplot
pieplots = apply(pieglomhead, 2,
function(dat)
{
df = data.frame(group=rownames(pieglomhead), value = dat)
bp = ggplot(df, aes(x="", y=value, fill = group)) +
geom_bar(width = 1, stat = "identity", color='black') +
coord_polar("y", start=0) +
theme_bw() +
theme(
axis.text.x = element_blank(),
axis.text.y = element_blank(),
legend.text=element_text(family='Arial', colour = 'black'),
legend.title=element_text(family='Arial', colour = 'black',face='bold'),
legend.background = element_blank(),
legend.key.size = unit(0.6,"line"),
legend.position = 'none',
axis.ticks = element_blank(),
axis.title.y = element_blank(), #element_text(family='serif',size=10, colour = 'black', face='bold'),
axis.title.x = element_blank(),
plot.background = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
panel.grid = element_blank(),
panel.grid.major = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
) +
scale_fill_manual(values = pieglomheadpal, labels = taxa_sub)
return(bp)
})
dir.create('piecharts')
lapply(1:length(pieplots),
function(n)
{
fname = paste0('piecharts/', names(pieplots)[n], '.svg')
svglite::svglite(fname)
print(pieplots[[n]])
dev.off()
})
g_legend <- function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
legend
}
plotlegend = g_legend(pieplots[[1]] + theme(legend.position = 'right'))
svglite::svglite('piecharts/legend.svg')
grid::grid.draw(plotlegend)
dev.off()
|
5184afbb606d6190768966a779a26dfc7c77f546 | 9a5cd516300be561dc627ebb3fc07ead2707b502 | /man/as_numeric.Rd | c734f7e324cec78ebc68b38f83bce0c0a9523b25 | [] | no_license | cran/incadata | 11535f59e08977e5cb0dca961ea16f5e723e4b2e | a80e811b5d22ae44d39231b5ed1994653dc01d27 | refs/heads/master | 2021-05-23T02:43:52.314286 | 2020-04-09T07:20:02 | 2020-04-09T07:20:02 | 82,063,922 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 991 | rd | as_numeric.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as_numeric.R
\name{as_numeric}
\alias{as_numeric}
\alias{is_numeric}
\title{Test object for, or coerce to, numeric}
\usage{
as_numeric(x)
is_numeric(x)
}
\arguments{
\item{x}{object to be coerced or tested
(and return a logical vector of the same length) or should it test the whole
vector as one object and return a logical vector of length one. (\code{TRUE} by default).}
}
\description{
\code{as_numeric} is essentially a wrapper to \code{as.numeric} except that objects of class factor are first coerced to character and then to numeric.
\code{is_numeric} test if \code{x} is "somehow numeric" (see examples).
}
\examples{
df <- data.frame(v = c("46513", "45"))
class(df$v) # factor
# Note that
as.numeric(df$v) # 2 1
# but
as_numeric(df$v) # 46513 45
is_numeric(1) # TRUE
is_numeric("1") # TRUE
is_numeric(as.factor(1)) # TRUE
is_numeric(as.factor("khb")) # FALSE
}
|
d3d5b64dde916b54564ae57f9b5cef28e429364f | fc46c8b63c5a13c373627c48754063324e4300ca | /예제코드/DBDA2Eprograms/OneOddGroupModelComp2E.R | f52c91d9708aaaebe1dc3f259c3afa46cde9c805 | [] | no_license | Jpub/DBDA | 98cc57fe0d1f47ac9bfbd583a5e765c95d4b683e | f6e51982170bf68b5e4708edb831de7a0735029c | refs/heads/master | 2022-05-04T11:40:46.530475 | 2019-01-02T01:27:06 | 2019-01-02T01:27:06 | 155,650,138 | 3 | 3 | null | 2022-04-08T09:25:45 | 2018-11-01T02:22:52 | R | UTF-8 | R | false | false | 10,222 | r | OneOddGroupModelComp2E.R | # OneOddGroupModelComp2E.R
# Accompanies the book:
# Kruschke, J. K. (2014). Doing Bayesian Data Analysis:
# A Tutorial with R, JAGS, and Stan. 2nd Edition. Academic Press / Elsevier.
graphics.off()
rm(list=ls(all=TRUE))
source("DBDA2E-utilities.R")
#require(rjags)
#require(runjags)
fileNameRoot="OneOddGroupModelComp2E-"
#------------------------------------------------------------------------------
# THE DATA.
# Randomly generated fictitious data.
# For each subject, specify the condition s/he was in,
# the number of trials s/he experienced, and the number correct.
npg = 20 # number of subjects per group
ntrl = 20 # number of trials per subject
CondOfSubj = c( rep(1,npg) , rep(2,npg) , rep(3,npg) , rep(4,npg) )
nTrlOfSubj = rep( ntrl , 4*npg )
set.seed(47405)
condMeans = c(.40,.50,.51,.52)
nCorrOfSubj = c( rbinom(npg,ntrl,condMeans[1]) , rbinom(npg,ntrl,condMeans[2]) ,
rbinom(npg,ntrl,condMeans[3]) , rbinom(npg,ntrl,condMeans[4]) )
nCond = length(unique(CondOfSubj))
nSubj = length(CondOfSubj)
# jitter the data to be as close as possible to desired condition means:
for ( cIdx in 1:nCond ) {
nToAdd = round(condMeans[cIdx]*npg*ntrl)-sum(nCorrOfSubj[CondOfSubj==cIdx])
if ( nToAdd > 0 ) {
for ( i in 1:nToAdd ) {
thisNcorr = ntrl
while ( thisNcorr == ntrl ) {
randSubjIdx = sample(which(CondOfSubj==cIdx),size=1)
thisNcorr = nCorrOfSubj[randSubjIdx]
}
nCorrOfSubj[randSubjIdx] = nCorrOfSubj[randSubjIdx]+1
}
}
if ( nToAdd < 0 ) {
for ( i in 1:abs(nToAdd) ) {
thisNcorr = 0
while ( thisNcorr == 0 ) {
randSubjIdx = sample(which(CondOfSubj==cIdx),size=1)
thisNcorr = nCorrOfSubj[randSubjIdx]
}
nCorrOfSubj[randSubjIdx] = nCorrOfSubj[randSubjIdx]-1
}
}
}
show( aggregate( nCorrOfSubj , by=list(CondOfSubj) , FUN=mean ) / ntrl )
# Package the data:
dataList = list(
nCond = nCond ,
nSubj = nSubj ,
CondOfSubj = CondOfSubj ,
nTrlOfSubj = nTrlOfSubj ,
nCorrOfSubj = nCorrOfSubj
)
#------------------------------------------------------------------------------
# THE MODEL.
modelString = "
model {
for ( s in 1:nSubj ) {
nCorrOfSubj[s] ~ dbin( theta[s] , nTrlOfSubj[s] )
theta[s] ~ dbeta( aBeta[CondOfSubj[s]] , bBeta[CondOfSubj[s]] )
}
# for ( j in 1:nCond ) {
# # Use omega[j] for model index 1, omega0 for model index 2:
# aBeta[j] <- ( equals(mdlIdx,1)*omega[j]
# + equals(mdlIdx,2)*omega0 ) * (kappa[j]-2)+1
# bBeta[j] <- ( 1 - ( equals(mdlIdx,1)*omega[j]
# + equals(mdlIdx,2)*omega0 ) ) * (kappa[j]-2)+1
# }
# for ( j in 1:2 ) {
# omega[j] ~ dbeta( a[j,mdlIdx] , b[j,mdlIdx] )
# }
# omega[3] <- omega[2]
# omega[4] <- omega[2]
for ( j in 1:nCond ) {
# Use omega[j] for model index 1, omega0 for model index 2:
aBeta[j] <- ( equals(mdlIdx,1)*omega[j]
+ equals(mdlIdx,2)*omega0 ) * (kappa[j]-2)+1
bBeta[j] <- ( 1 - ( equals(mdlIdx,1)*omega[j]
+ equals(mdlIdx,2)*omega0 ) ) * (kappa[j]-2)+1
omega[j] ~ dbeta( a[j,mdlIdx] , b[j,mdlIdx] )
}
omega0 ~ dbeta( a0[mdlIdx] , b0[mdlIdx] )
for ( j in 1:nCond ) {
kappa[j] <- kappaMinusTwo[j] + 2
kappaMinusTwo[j] ~ dgamma( 2.618 , 0.0809 ) # mode 20 , sd 20
}
# Constants for prior and pseudoprior:
aP <- 1
bP <- 1
# a0[model] and b0[model]
a0[1] <- .48*500 # pseudo
b0[1] <- (1-.48)*500 # pseudo
a0[2] <- aP # true
b0[2] <- bP # true
# a[condition,model] and b[condition,model]
a[1,1] <- aP # true
a[2,1] <- aP # true
a[3,1] <- aP # true
a[4,1] <- aP # true
b[1,1] <- bP # true
b[2,1] <- bP # true
b[3,1] <- bP # true
b[4,1] <- bP # true
a[1,2] <- .40*125 # pseudo
a[2,2] <- .50*125 # pseudo
a[3,2] <- .51*125 # pseudo
a[4,2] <- .52*125 # pseudo
b[1,2] <- (1-.40)*125 # pseudo
b[2,2] <- (1-.50)*125 # pseudo
b[3,2] <- (1-.51)*125 # pseudo
b[4,2] <- (1-.52)*125 # pseudo
# Prior on model index:
mdlIdx ~ dcat( modelProb[] )
modelProb[1] <- .5
modelProb[2] <- .5
}
" # close quote for modelstring
writeLines( modelString , con="TEMPmodel.txt" )
#------------------------------------------------------------------------------
# INTIALIZE THE CHAINS.
# Let JAGS do it...
#------------------------------------------------------------------------------
# RUN THE CHAINS.
parameters = c("omega","kappa","omega0","theta","mdlIdx")
adaptSteps = 1000 # Number of steps to "tune" the samplers.
burnInSteps = 5000 # Number of steps to "burn-in" the samplers.
nChains = 3 # Number of chains to run.
numSavedSteps=12000 # Total number of steps in chains to save.
thinSteps=20 # Number of steps to "thin" (1=keep every step).
# nPerChain = ceiling( ( numSavedSteps * thinSteps ) / nChains ) # Steps per chain.
# # Create, initialize, and adapt the model:
# jagsModel = jags.model( "TEMPmodel.txt" , data=dataList , # inits=initsList ,
# n.chains=nChains , n.adapt=adaptSteps )
# # Burn-in:
# cat( "Burning in the MCMC chain...\n" )
# update( jagsModel , n.iter=burnInSteps )
# # The saved MCMC chain:
# cat( "Sampling final MCMC chain...\n" )
# codaSamples = coda.samples( jagsModel , variable.names=parameters ,
# n.iter=nPerChain , thin=thinSteps )
runJagsOut <- run.jags( method=c("rjags","parallel")[2] ,
model="TEMPmodel.txt" ,
monitor=parameters ,
data=dataList ,
#inits=initsList ,
n.chains=nChains ,
adapt=adaptSteps ,
burnin=burnInSteps ,
sample=ceiling(numSavedSteps/nChains) ,
thin=thinSteps ,
summarise=FALSE ,
plots=FALSE )
codaSamples = as.mcmc.list( runJagsOut )
# resulting codaSamples object has these indices:
# codaSamples[[ chainIdx ]][ stepIdx , paramIdx ]
save( codaSamples , file=paste0(fileNameRoot,"Mcmc.Rdata") )
#-------------------------------------------------------------------------------
# Display diagnostics of chain:
parameterNames = varnames(codaSamples) # get all parameter names
show(parameterNames)
for ( parName in c("mdlIdx","omega[1]","omega0","kappa[1]","theta[1]") ) {
diagMCMC( codaSamples , parName=parName ,
saveName=fileNameRoot , saveType="eps" )
}
#------------------------------------------------------------------------------
# EXAMINE THE RESULTS.
mcmcMat = as.matrix(codaSamples,chains=TRUE)
xLim=c(0.35,0.75)
# Display the model index
modelIdxSample = mcmcMat[, "mdlIdx" ]
pM1 = sum( modelIdxSample == 1 ) / length( modelIdxSample )
pM2 = 1 - pM1
string1 =paste("p( Diff Omega M1 | D )=",round(pM1,3),sep="")
string2 =paste("p( Same Omega M2 | D )=",round(pM2,3),sep="")
openGraph(10,4)
nStepsToPlot = 1000
plot( 1:nStepsToPlot , modelIdxSample[1:nStepsToPlot] , type="l" , lwd=2 ,
xlab="Step in Markov chain" , ylab="Model Index (1, 2)" ,
main=paste(string1,", ",string2,sep="") , col="skyblue" )
saveGraph(file=paste0(fileNameRoot,"MdlIdx"),type="eps")
# Display the omega0 posterior
omega0sampleM1 = mcmcMat[, "omega0" ][ modelIdxSample == 1 ]
omega0sampleM2 = mcmcMat[, "omega0" ][ modelIdxSample == 2 ]
openGraph()
layout( matrix(1:2,nrow=2) )
plotPost( omega0sampleM1 , main="Pseudoprior for M = 1 (Diff Omega)" ,
xlab=expression(omega[0]) , xlim=xLim )
plotPost( omega0sampleM2 , main="Posterior for M = 2 (Same Omega)" ,
xlab=expression(omega[0]) , xlim=xLim )
saveGraph(file=paste0(fileNameRoot,"Omega0"),type="eps")
# Display the omega[j] posterior
omega1sampleM1 = mcmcMat[, "omega[1]" ][ modelIdxSample == 1 ]
omega2sampleM1 = mcmcMat[, "omega[2]" ][ modelIdxSample == 1 ]
omega3sampleM1 = mcmcMat[, "omega[3]" ][ modelIdxSample == 1 ]
omega4sampleM1 = mcmcMat[, "omega[4]" ][ modelIdxSample == 1 ]
omega1sampleM2 = mcmcMat[, "omega[1]" ][ modelIdxSample == 2 ]
omega2sampleM2 = mcmcMat[, "omega[2]" ][ modelIdxSample == 2 ]
omega3sampleM2 = mcmcMat[, "omega[3]" ][ modelIdxSample == 2 ]
omega4sampleM2 = mcmcMat[, "omega[4]" ][ modelIdxSample == 2 ]
openGraph(10,5)
layout( matrix(1:8,nrow=2,byrow=T) )
plotPost( omega1sampleM1 , main="Posterior for M = 1 (Diff Omega)" ,
xlab=expression(omega[1]) , xlim=xLim )
plotPost( omega2sampleM1 , main="Posterior for M = 1 (Diff Omega)" ,
xlab=expression(omega[2]) , xlim=xLim )
plotPost( omega3sampleM1 , main="Posterior for M = 1 (Diff Omega)" ,
xlab=expression(omega[3]) , xlim=xLim )
plotPost( omega4sampleM1 , main="Posterior for M = 1 (Diff Omega)" ,
xlab=expression(omega[4]) , xlim=xLim )
plotPost( omega1sampleM2 , main="Pseudoprior for M = 2 (Same Omega)" ,
xlab=expression(omega[1]) , xlim=xLim )
plotPost( omega2sampleM2 , main="Pseudoprior for M = 2 (Same Omega)" ,
xlab=expression(omega[2]) , xlim=xLim )
plotPost( omega3sampleM2 , main="Pseudoprior for M = 2 (Same Omega)" ,
xlab=expression(omega[3]) , xlim=xLim )
plotPost( omega4sampleM2 , main="Pseudoprior for M = 2 (Same Omega)" ,
xlab=expression(omega[4]) , xlim=xLim )
saveGraph(file=paste0(fileNameRoot,"OmegaCond"),type="eps")
# Display the differences of omega[j]'s
omegaSample = rbind( omega1sampleM1 , omega2sampleM1 , omega3sampleM1 , omega4sampleM1 )
openGraph(10,5)
layout( matrix(1:6,nrow=2,ncol=3,byrow=T) )
xmin = -0.25
xmax = 0.25
for ( i in 1:3 ) {
for ( j in (i+1):4 ) {
plotPost( omegaSample[i,]-omegaSample[j,] , compVal=0.0 ,
xlab=bquote(omega[.(i)]-omega[.(j)]) ,
#breaks=unique( c( min(c(xmin,omegaSample[i,]-omegaSample[j,])),
# seq(xmin,xmax,len=20),
# max(c(xmax,omegaSample[i,]-omegaSample[j,])) )) ,
main="" , xlim=c(xmin,xmax) )
}
}
saveGraph(file=paste0(fileNameRoot,"OmegaDiff"),type="eps")
|
ffabbf008783474473e0f03313f6c4453720bf98 | aaa77251f64018398dcc9c23f800f7478eb15798 | /HousingData_Modeling.R | cf1bb984901898b8d2906a2e9a62fe49d4d120a5 | [
"MIT"
] | permissive | amotter443/r-python-housing | 1c9433ec6b21c5f722660e04f5fad49a81239f52 | 7e1f68875df9390e3f3cb85d6220c062f41eb22a | refs/heads/master | 2022-12-23T23:16:59.470045 | 2020-09-25T18:18:13 | 2020-09-25T18:18:13 | 297,347,048 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,159 | r | HousingData_Modeling.R | #Part 2: Modeling
library(readxl)
library(dplyr)
library(MASS)
start_time<-Sys.time()
housing_data<-read.csv('housingdata_clean.csv',stringsAsFactors = FALSE)
train<-housing_data[!is.na(housing_data$SalePrice),]
#Think these plots may be problematic and need removing
plot(housing_data$LowQualFinSF,housing_data$SalePrice)
plot(housing_data$EnclosedPorch,housing_data$SalePrice)
plot(housing_data$PoolArea,housing_data$SalePrice)
cor(train$SalePrice,train$LowQualFinSF)
cor(train$SalePrice,train$EnclosedPorch)
cor(train$SalePrice,train$PoolArea)
#remove all bc all have very week correlation with SalePrice
housing_data<-housing_data[,-60]
housing_data<-housing_data[,-41]
housing_data<-housing_data[,-61]
#Collected all numeric columns to check for multicollinearity, need for
#standardization/transformation
num_columns<-c("LotFrontage","LotArea","MasVnrArea","TotalBsmtSF","BsmtUnfSF",
"FirstSF","SecondSF","GrLivArea","GarageArea","GarageYrBlt",
"WoodDeckSF","OpenPorchSF","ThreeSsnPorch","ScreenPorch","Age",
"YrSinceRemodel","BsmtPUnf","Bathrooms")
#Generate Correlation table and visual correlation matrix
cor<-cor(train[,num_columns])
library(corrplot)
corrplot(cor, method="circle")
#Collinearity between Lot Frontage and Lot area 0.62409014
#Despite high collinearity, keeping because both are strong predictors of SalePrice
#Collinearity between Age and GarageYrBuilt 0.84514067
cor(train$SalePrice,train$Age)
cor(train$SalePrice,train$GarageYrBlt)
#Removing GarageYrBuilt bc weaker correlation to SalePrice than Age
housing_data<-housing_data[-50]
num_columns<-num_columns[-10]
#Collinearity between BsmtPUnf and BsmtFinSF1 0.708626367
#BsmtPUnf and BsmtUnfSF 0.826271944
#Keeping BsmtPUnf because it represents proportion and not raw values, getting rid of
#BsmtUnfSF to prevent interference in OLS
housing_data<-housing_data[-33]
num_columns<-num_columns[-5]
#Model 1: OLS
#Split into train/test & collect RSMLE before transformations
train<-housing_data[!is.na(housing_data$SalePrice),]
test<-housing_data[is.na(housing_data$SalePrice),]
ols_model1 <- lm(SalePrice~.,data=train)
summary(ols_model1)
#Performing Transformations to see if they can improve the overall OLS model
plot(housing_data$LotArea,housing_data$SalePrice)
plot(sqrt(housing_data$LotArea),housing_data$SalePrice)
plot((1/(1+housing_data$LotArea)),housing_data$SalePrice)
plot(log(housing_data$LotArea),housing_data$SalePrice)
#going with the log
housing_data$LotArea<-log(housing_data$LotArea)
housing_data[housing_data$LotArea=="-Inf",'LotArea']<-0
plot(housing_data$YrSinceRemodel^(1/3),housing_data$SalePrice)
#Using cubed root to show negative linear relationship
housing_data$YrSinceRemodel<-housing_data$YrSinceRemodel^(1/3)
plot(housing_data$TotalBsmtSF,housing_data$SalePrice)
replace<-round(mean(housing_data$TotalBsmtSF))
housing_data[housing_data$TotalBsmtSF>5000,'TotalBsmtSF']<-mean(replace)
plot(log(housing_data$TotalBsmtSF),housing_data$SalePrice)
housing_data$TotalBsmtSF<-log(housing_data$TotalBsmtSF)
housing_data[housing_data$TotalBsmtSF=="-Inf",'TotalBsmtSF']<-0
#Removed high leverage point in 1stFlrSf
housing_data[housing_data$FirstSF>4000,'FirstSF']<-mean(housing_data$FirstSF)
#Removed high leverage point in BsmtFinSF1
housing_data[housing_data$BsmtFinSF1>3000,'BsmtFinSF1']<-mean(housing_data$BsmtFinSF1)
plot(housing_data$GrLivArea,housing_data$SalePrice)
replace<-mean(train$GrLivArea)
housing_data[which(housing_data$GrLivArea>4000 & housing_data$SalePrice<=700000|housing_data$GrLivArea>4000 & is.na(housing_data$SalePrice)),'GrLivArea']<-replace
plot(log(housing_data$GarageArea),housing_data$SalePrice)
housing_data$GarageArea<-log(housing_data$GarageArea)
housing_data[housing_data$GarageArea=="-Inf",'GarageArea']<-0
#Improved from 0.000423 -> 0.000395
housing_data$ScreenPorch<-3*housing_data$ScreenPorch
#Re-run the OLS to see how R Squared affected by transformations
train<-housing_data[!is.na(housing_data$SalePrice),]
test<-housing_data[is.na(housing_data$SalePrice),]
ols_model2 <- lm(SalePrice~.,data=train)
summary(ols_model2)
#Model 2: Stepwise OLS
#step <- stepAIC(ols_model2, direction="both")
#step$anova
step_both<-lm(SalePrice ~ MSSubClass + LotFrontage + LotArea + Utilities +
LotConfig + LandSlope + Condition1 + OverallQual + OverallCond +
RoofStyle + RoofMatl + Exterior1st + MasVnrArea + ExterQual +
ExterCond + BsmtQual + BsmtExposure + BsmtFinSF1 + BsmtFinType2 +
BsmtFinSF2 + TotalBsmtSF + Heating + Electrical + FirstSF +
SecondSF + GrLivArea + BedroomAbvGr + KitchenAbvGr + KitchenQual +
Functional + GarageType + GarageFinish + GarageCars + GarageArea +
GarageQual + WoodDeckSF + ScreenPorch + PoolQC + NumFloors +
BsmtPUnf + Age + New + Commercial + FireplaceQu,data=train)
summary(step_both)
#step_forward <- stepAIC(ols_model2, direction="forward")
#step_forward$anova
step_forward<-lm(SalePrice ~ MSSubClass + LotFrontage + LotArea + Street + Alley +
LotShape + LandContour + Utilities + LotConfig + LandSlope +
Neighborhood + Condition1 + Condition2 + BldgType + OverallQual +
OverallCond + RoofStyle + RoofMatl + Exterior1st + Exterior2nd +
MasVnrType + MasVnrArea + ExterQual + ExterCond + Foundation +
BsmtQual + BsmtCond + BsmtExposure + BsmtFinType1 + BsmtFinSF1 +
BsmtFinType2 + BsmtFinSF2 + TotalBsmtSF + Heating + HeatingQC +
CentralAir + Electrical + FirstSF + SecondSF + GrLivArea +
BedroomAbvGr + KitchenAbvGr + KitchenQual + TotRmsAbvGrd +
Functional + Fireplaces + FireplaceQu + GarageType + GarageFinish +
GarageCars + GarageArea + GarageQual + GarageCond + PavedDrive +
WoodDeckSF + OpenPorchSF + ThreeSsnPorch + ScreenPorch +
PoolQC + Fence + MiscFeature + MiscVal + MoSold + YrSold +
SaleType + SaleCondition + NumFloors + HouseFinish + BsmtPUnf +
Bathrooms + Age + YrSinceRemodel + New + Commercial,data=train)
summary(step_forward)
#step <- stepAIC(ols_model2, direction="backward")
#step$anova
step_backward<-lm(SalePrice ~ MSSubClass + LotFrontage + LotArea + Utilities +
LotConfig + LandSlope + Condition1 + OverallQual + OverallCond +
RoofStyle + RoofMatl + Exterior1st + MasVnrArea + ExterQual +
ExterCond + BsmtQual + BsmtExposure + BsmtFinSF1 + BsmtFinType2 +
BsmtFinSF2 + TotalBsmtSF + Heating + Electrical + FirstSF +
SecondSF + GrLivArea + BedroomAbvGr + KitchenAbvGr + KitchenQual +
Functional + GarageType + GarageFinish + GarageCars + GarageArea +
GarageQual + WoodDeckSF + ScreenPorch + PoolQC + NumFloors +
BsmtPUnf + Age + New + Commercial,data=train)
summary(step_backward)
#Split into validate train and test randomly
set.seed(75)
train_ind <- sample(seq_len(nrow(train)), size = floor(0.7 * nrow(train)))
validate_train <- train[train_ind, ]
validate_test <- train[-train_ind, ]
library(Metrics)
ols.fitted <- predict(ols_model1,newdata=validate_test)
ols.fitted[which(ols.fitted<0)]<-abs(ols.fitted[which(ols.fitted<0)])
error<-rmsle(validate_test$SalePrice,ols.fitted)
error #0.2421673
ols.fitted <- predict(ols_model2,newdata=validate_test)
ols.fitted[which(ols.fitted<0)]<-abs(ols.fitted[which(ols.fitted<0)])
error<-rmsle(validate_test$SalePrice,ols.fitted)
error #0.1498343
both.fitted <- predict(step_both,newdata=validate_test)
error<-rmsle(validate_test$SalePrice,both.fitted)
error #0.1469933
forward.fitted <- predict(step_forward,newdata=validate_test)
forward.fitted[which(forward.fitted<0)]<-abs(forward.fitted[which(forward.fitted<0)])
error<-rmsle(validate_test$SalePrice,forward.fitted)
error #0.1498343
back.fitted <- predict(step_backward,newdata=validate_test)
error<-rmsle(validate_test$SalePrice,back.fitted)
error #0.1469933
#Going with Both as it has the lowest RMSLE
ols_model<-step_both
#Model 3: Polynomial Regression
#For poly put all the features in and put 2nd degree polynomial for each of the ones that
#have an apparently quadratic relationship
poly_model<-lm(SalePrice ~ MSSubClass + LotFrontage + LotArea + Street + Alley +
LotShape + LandContour + Utilities + LotConfig + LandSlope +
Neighborhood + Condition1 + Condition2 + BldgType + OverallQual +
OverallCond + RoofStyle + RoofMatl + Exterior1st + Exterior2nd +
MasVnrType + MasVnrArea + ExterQual + ExterCond + Foundation +
BsmtQual + BsmtCond + BsmtExposure + BsmtFinType1 + BsmtFinSF1 +
BsmtFinType2 + BsmtFinSF2 + TotalBsmtSF + Heating + HeatingQC +
CentralAir + Electrical + poly(FirstSF,2,raw=T) + SecondSF + GrLivArea +
BedroomAbvGr + KitchenAbvGr + KitchenQual + TotRmsAbvGrd +
Functional + Fireplaces + FireplaceQu + GarageType + GarageFinish +
GarageCars + GarageArea + GarageQual + GarageCond + PavedDrive +
WoodDeckSF + OpenPorchSF + ThreeSsnPorch + ScreenPorch +
PoolQC + Fence + MiscFeature + MiscVal + MoSold + YrSold +
SaleType + SaleCondition + NumFloors + HouseFinish + BsmtPUnf +
Bathrooms + poly(Age,2,raw=T) + YrSinceRemodel + New + Commercial,
data=train)
#Also tried Poly on TotalBsmtSF, GrLivArea,and but they decreased model RMSLE
summary(poly_model)
poly.fitted <- predict(poly_model,newdata=validate_test)
error<-rmsle(validate_test$SalePrice,poly.fitted)
error #0.1425319
#Model 4: Ridge, Lasso, Elastic Net Regression
library(glmnet)
train<-housing_data[!is.na(housing_data$SalePrice),]
test<-housing_data[is.na(housing_data$SalePrice),]
validate_train <- train[train_ind, ]
validate_test <- train[-train_ind, ]
y<-validate_train$SalePrice
x<-data.matrix(validate_train[,-67])
xnew<-data.matrix(validate_test[,-67])
cv.out <- cv.glmnet(x, y, alpha=0, nlambda=100, lambda.min.ratio=0.0001)
plot(cv.out)
best.lambda <- cv.out$lambda.min
best.lambda #was 24821.9, now 6876.934
rmsles<-0
# alpha 0 ridge, alpha 1 LASSO
for (i in 0:10) {
model<-glmnet(x,y,alpha=i/10,nlambda = 100, lambda.min.ratio=0,standardize = TRUE)
fitted.results <- predict(model,s=best.lambda,newx=xnew)
fitted.results<-abs(fitted.results)
rmsles[i]<-rmsle(validate_test$SalePrice,fitted.results)
}
rmsles
min(rmsles)
rmsles[7]
#Elastic net chosen, alpha=0.2 on 50% validation set and 0.7 on 70% validation train
enet_model<-glmnet(x,y,alpha=0.7,nlambda = 100, lambda.min.ratio=0,standardize = TRUE)
enet.fitted<-predict(enet_model,s=best.lambda,newx=xnew)
error<-rmsle(validate_test$SalePrice,enet.fitted)
error #0.1648151
#Random Forest
library(randomForest)
#error.bag<-0
#for (i in 1:74){
# bag.house=randomForest(SalePrice~.,mtry=i,validate_train,importance=TRUE)
# yhat.house = predict(bag.house,mtry=i,newdata=validate_test)
# error.bag[i]<-rmsle(validate_test$SalePrice,yhat.house)
#}
error.bag<-c(0.2274555,0.1718773,0.1611817,0.1566194,0.1551772,0.1525404,
0.1510553,0.1520002,0.1517172,0.1507572,0.1503473,0.1505908,
0.1507651,0.1503753,0.1512496,0.1500346,0.1495613,0.1511179,
0.1499379,0.1504011,0.1504805,0.1509515,0.1505135,0.1507352,
0.1509615,0.1500946,0.1500345,0.1506053,0.1510781,0.1506822,
0.1519326,0.1510075,0.1501223,0.1508313,0.1509766,0.1519841,
0.1507625,0.1504607,0.1505907,0.1501098,0.1505320,0.1503323,
0.1510927,0.1504840,0.1505804,0.1512766,0.1509918,0.1515258,
0.1512600,0.1517276,0.1508588,0.1514667,0.1521470,0.1509810,
0.1510652,0.1514559,0.1513501,0.1521374,0.1512269,0.1524595,
0.1519946,0.1517997,0.1520564,0.1525723,0.1523241,0.1519591,
0.1518582,0.1528970,0.1533325,0.1524626,0.1523205,0.1532770,
0.1527862,0.1539374)
min(error.bag)
#Minimum at 29
error.bag[29]
bag.house=randomForest(SalePrice~.,mtry=29,validate_train,importance=TRUE)
yhat.house = predict(bag.house,mtry=29,newdata=validate_test)
error.bag<-rmsle(validate_test$SalePrice,yhat.house)
error.bag # 0.1355805
random_model<-randomForest(SalePrice~.,mtry=29,train,importance=TRUE)
#Gradient Boosting model
library(xgboost)
library(caret)
trainvalidatex<-data.matrix(train[,-67])
trainvalidatey<-train$SalePrice
dtrain <- xgb.DMatrix(data = trainvalidatex,label = trainvalidatey)
dtest <- xgb.DMatrix(data = data.matrix(validate_test[,-67]),label=validate_test$SalePrice)
#cv <- train(SalePrice ~., data = train, method = "xgbTree",
#trControl = trainControl("cv", number = 5))
#cv$bestTune
boost_model <- xgboost(dtrain,nrounds=150,max_depth=3,eta=0.3,gamma=0,
colsample_bytree=0.8,min_child_weight=1,subsample=1)
boost.fitted<- predict(boost_model, newdata=dtest)
serror<-rmsle(validate_test$SalePrice,boost.fitted)
serror #0.05523038
#Apply final model to test set
boost.predict<-predict(boost_model,newdata=data.matrix(validate_test)[,-67])
poly.predict<-predict(poly_model,newdata=validate_test)
pred_rmsles<-0
for (i in 0:10){
replace<-((1-(i/10))*boost.predict)+((i/10)*poly.predict)
replace[which(replace<0)]<-abs(replace[which(replace<0)])
pred_rmsles[i]<-rmsle(validate_test$SalePrice,replace)
}
pred_rmsles
min(pred_rmsles) #minmum 0.06294109 at alpha = 0
#100% boosting 0% poly
boost.predict<-predict(boost_model,newdata=data.matrix(validate_test)[,-67])
rforrest.predict<-predict(random_model,newdata = validate_test)
pred_rmsles<-0
for (i in 0:10){
replace<-((1-(i/10))*boost.predict)+((i/10)*rforrest.predict)
pred_rmsles[i]<-rmsle(validate_test$SalePrice,replace)
}
pred_rmsles
min(pred_rmsles) #0.05155053
#Min Alpha = 5, 0.5 boost and 0.5 random forrest
train_ind <- sample(seq_len(nrow(train)), size = floor(0.5 * nrow(train)))
validate_train <- train[train_ind, ]
validate_test <- train[-train_ind, ]
boost.predict<-predict(boost_model,newdata=data.matrix(validate_test)[,-67])
poly.predict<-predict(poly_model,newdata=validate_test)
pred_rmsles<-0
for (i in 0:10){
replace<-((1-(i/10))*boost.predict)+((i/10)*poly.predict)
replace[which(replace<0)]<-abs(replace[which(replace<0)])
pred_rmsles[i]<-rmsle(validate_test$SalePrice,replace)
}
pred_rmsles
min(pred_rmsles) #Min 0.05955391
#Alpha = 0, 100% boost and 0% poly
boost.predict<-predict(boost_model,newdata=data.matrix(validate_test)[,-67])
rforrest.predict<-predict(random_model,newdata = validate_test)
pred_rmsles<-0
for (i in 0:10){
replace<-((1-(i/10))*boost.predict)+((i/10)*rforrest.predict)
pred_rmsles[i]<-rmsle(validate_test$SalePrice,replace)
}
pred_rmsles
min(pred_rmsles) # 0.05242982
#Min is also Alpha = 4, 0.6 boost and 0.4 random forrest
#Going with alpha of 0.5 between boost and random forrest for lowest RMSLE
poly.predict<-predict(poly_model,newdata=test)
boost.predict<-predict(boost_model,newdata=data.matrix(test)[,-67])
rforrest.predict<-predict(random_model,newdata = test)
housing_data$ID<-seq(1:nrow(housing_data))
train<-housing_data[!is.na(housing_data$SalePrice),]
test<-housing_data[is.na(housing_data$SalePrice),]
test$SalePrice<-((1-(2/10))*boost.predict)+((2/10)*poly.predict)
test<-data.frame(test$ID,test$SalePrice)
colnames(test)<-c("ID","SalePrice")
#Calculate runtime
end_time<-Sys.time()
print(end_time-start_time)
write.csv(test,file="best_model.csv",row.names = F)
|
950bef73d06659f4b49f37fb2fbdc56481e8bbb0 | 97f72636249030afadffe1551bc91a005fc5c68e | /Model4/Chain3/LIDEL_MCMC_setup_trial3.R | b908804a6d92bea717d08d4aac9d9594e0bf4d97 | [] | no_license | eecampbell/LIDEL | 7cf251b6c3e565188562668a80bb662212d27425 | 29de2a22e3c29e1493d6c6a137080e1a7f84d9d5 | refs/heads/master | 2021-01-20T11:42:06.590595 | 2015-11-25T17:39:05 | 2015-11-25T17:39:05 | 44,201,733 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 14,572 | r | LIDEL_MCMC_setup_trial3.R | ##########SETUP FOR MCMC##########
#using all data
#Nell Campbell
#moved to separate file 11/17/14
##################################
#####iterations and chains
#Set up the number of iterations. While you are getting things working, this can be small, ca 5000
n.chain=1
num_litter=5
model_names=c("Day", "C1_5", "C6", "C7")
#####specify inputs to model
#name all inputs, including parameters and derived values
param_names=c("k1", "k2", "k4", "bet3", "lam2",
"sigma_y_C1_5", "sigma_y_C6", "sigma_y_C7",
"sigma_L_C1_5", "sigma_L_C6", "sigma_L_C7",
litter_nam)
#specify the dimension of all sites (either number of sites- 1 currently-, or sites and time, ie scalar or vector)
dim_param=c(1,1,1,1,1,
length(d_l_list[[1]][[1]]),nrow(d_l_list[[1]][[2]]),nrow(d_l_list[[1]][[3]]),
length(d_l_list[[1]][[1]]),nrow(d_l_list[[1]][[2]]),nrow(d_l_list[[1]][[3]]))
#names of parameters constant across litter types and time (KEEP ORDER THE SAME THROUGHOUT)
params=c("k1", "k2", "k4", "bet3", "lam2",
"sigma_y_C1_5", "sigma_y_C6", "sigma_y_C7",
"sigma_L_C1_5", "sigma_L_C6", "sigma_L_C7")
#specify how the parameters will be 'tuned' during the runs
tune_param=c(0.1, 0.005, 0.1, 0.1, 0.05,
5, 5, 5,
1, 6, 6)
#what sort of data are the parameters?
support_param=c("zero-one", "zero-one","zero-one","zero-one","zero-one",
"non-negative", "non-negative","non-negative",
"non-negative", "non-negative","non-negative",
"real", "real","real", "real", "real")
#create matrix for to initialize chains
parameter_inits=matrix(NA, nrow=n.chain, ncol=length(params))
#set parameter values to initialize each chain
k1=runif(1,min=0.05, max=0.25)
k2=runif(1,min=0.002, max=0.01)
k4=runif(1,min=0.2, max=0.8)
bet3=runif(1,min=0.4,max=0.8)
lam2=runif(1,min=0.02,max=0.1)
sigma_y_C1_5=matrix(runif(1,min=10,max=1000), ncol=n.chain, nrow=length(d_l_list[[1]][[1]]))
sigma_y_C6=matrix(runif(1,min=10,max=1000), ncol=n.chain, nrow=nrow(d_l_list[[1]][[2]]))
sigma_y_C7=matrix(runif(1,min=10,max=1000), ncol=n.chain, nrow=nrow(d_l_list[[1]][[3]]))
sigma_L_C1_5=matrix(runif(1,min=10,max=1000), ncol=n.chain, nrow=length(d_l_list[[1]][[1]]))
sigma_L_C6=matrix(runif(1,min=10,max=1000), ncol=n.chain, nrow=nrow(d_l_list[[1]][[2]]))
sigma_L_C7=matrix(runif(1,min=10,max=1000), ncol=n.chain, nrow=nrow(d_l_list[[1]][[3]]))
#loop to set init values within each parameter_inits
parameter_inits=list()
for(j in 1:n.chain){
parameter_inits[[j]]=list()
parameter_inits[[j]][[1]]=k1[j]
parameter_inits[[j]][[2]]=k2[j]
parameter_inits[[j]][[3]]=k4[j]
parameter_inits[[j]][[4]]=bet3[j]
parameter_inits[[j]][[5]]=lam2[j]
parameter_inits[[j]][[6]]=sigma_y_C1_5[,j]
parameter_inits[[j]][[7]]=sigma_y_C6[,j]
parameter_inits[[j]][[8]]=sigma_y_C7[,j]
parameter_inits[[j]][[9]]=sigma_L_C1_5[,j]
parameter_inits[[j]][[10]]=sigma_L_C6[,j]
parameter_inits[[j]][[11]]=sigma_L_C7[,j]
}
#parameters that vary by litter type
#run LIDEL model based on parameters for each chain to generate initial values for first iteration
init_mu=list()
init_proc_days=list()
for(i in 1:n.chain){
init_mu[[i]]=list()
init_proc_days[[i]]=list()
for(j in 1:num_litter){
Pars <- c(tau=LIDEL_inputs$tau[j], EM1=LIDEL_inputs$EM1, Em1=LIDEL_inputs$Em1, EM2=LIDEL_inputs$EM2,
Em2=LIDEL_inputs$Em2, Nmid=LIDEL_inputs$Nmid, NC=LIDEL_inputs$NC,LcM=LIDEL_inputs$LcM,
beta1=LIDEL_inputs$beta1, beta2=LIDEL_inputs$beta2, lambda3=LIDEL_inputs$lambda3,
k1=parameter_inits[[1]][[1]],
k2=parameter_inits[[1]][[2]],
k4=parameter_inits[[1]][[3]],
beta3=parameter_inits[[1]][[4]],
lambda2=parameter_inits[[1]][[5]],
Lcalph=LIDEL_inputs$Lcalph[j])
#initial condition at day 0
start_initial_vals=LIDEL_initials(mass=Init_fctn_vals[[j]][1:2], fdoc=Init_fctn_vals[[j]][3:4], fs=Init_fctn_vals[[j]][5:6], flig=Init_fctn_vals[[j]][7:8])
yini <- c(C1=start_initial_vals[2], C2=start_initial_vals[3], C3=start_initial_vals[4], C4=0,
C5=0, C6=start_initial_vals[5], C7=0)
print(start_initial_vals[1])
print(sum(yini))
out<-lsoda(func=DOCmod, y=yini, parms=Pars, times=LIDEL_inputs$days)
#save model results for pools 1-7
init_proc_days[[i]][[j]]=out[,1:8]
#pull daily values for measured data
init_mu[[i]][[j]]=cbind(as.vector(out[,1]),as.vector(apply(out[,2:6], 1, sum)),
as.vector(out[,7]), as.vector(out[,8]))
#replace 1st row so that total mass is including DOC init
init_mu[[i]][[j]][1,]=cbind(as.vector(out[1,1]),as.vector(sum(out[1,2:8])),
as.vector(out[1,7]), as.vector(out[1,8]))
colnames(init_mu[[i]][[j]])=model_names
}
names(init_mu[[i]])=litter_nam
}
##############plot initial vs measured results#############
colors=c("black", "orange", "lightblue", "lightgreen", "darkgrey", "pink", "blue")
#plot init_mu vs measured remaining litter c
jpeg(file="C:/LIDEL/Model4/Chain3/plots/litterCinit1.jpg")
par(mfrow=c(3,2), oma=c(0,0,2,0))
#plot of litter C remaining through time
for(t in 1:length(litter_nam)){
Ctot_lim=max(rowSums(as.matrix(Init_Cs[,2:5])))+10
plot(0, rowSums(as.matrix(Init_Cs[t,2:5])), col=colors, ylim=c(0,Ctot_lim), xlim=c(0, 365),
main=litter_nam[t], ylab="Remaining litter C (mg)", typ="p", xlab="Time", lwd=3, pch=4)
for(s in 3:ncol(all_data[[t]][[1]])){
points(all_data[[t]][[1]][,2], all_data[[t]][[1]][,s], col=colors[s-1],
typ="p", pch=4, lwd=3)
}
points(init_mu[[1]][[t]][,1], init_mu[[1]][[t]][,2],
typ="l", lwd=3)
}
mtext("chain1", outer=TRUE)
dev.off()
#plot of DOC accumulation through time
jpeg(file="C:/LIDEL/Model4/Chain3/plots/DOCinit1.jpg")
par(mfrow=c(3,2), oma=c(0,0,2,0))
for(t in 1:num_litter){
DOC_lim=max(cumul_DOC[[t]][,2:ncol(cumul_DOC[[t]])], init_mu[[1]][[t]][,3])+10
plot(init_mu[[1]][[t]][,1], init_mu[[1]][[t]][,3], col=colors[1], ylim=c(0,DOC_lim),
main=litter_nam[t], ylab="DOC (mg)", typ="l", xlab="Time", lwd=3)
for(s in 2:ncol(cumul_DOC[[1]])){
if(cumul_DOC[[t]][9,s]!=-99.99){
points(cumul_DOC[[t]][,1], cumul_DOC[[t]][,s], col=colors[s],
typ="l", pch=1, cex=3, lwd=3)
}
if(cumul_DOC[[t]][9,s]==-99.99){
points(cumul_DOC[[t]][1:7,1], cumul_DOC[[t]][1:7,s], col=colors[s],
typ="l", pch=1, cex=3, lwd=3)
}
}
points(init_mu[[1]][[t]][,1], init_mu[[1]][[t]][,3], col=colors[1],
typ="l", lwd=3)
}
mtext("chain1", outer=TRUE)
#dev.off()
#plot of CO2 accumulation through time
jpeg(file="C:/LIDEL/Model4/Chain3/plots/CO2init1.jpg")
par(mfrow=c(3,2), oma=c(0,0,2,0))
for(t in 1:num_litter){
CO2_lim=max(cumul_CO2[[t]][,2:ncol(cumul_CO2[[t]])], init_mu[[1]][[t]][,4])+10
plot(init_mu[[1]][[t]][,1], init_mu[[1]][[t]][,4], col=colors[1], ylim=c(0,CO2_lim),
main=litter_nam[t], ylab="CO2-C (mg)", typ="l", xlab="Time", lwd=3)
for(s in 2:ncol(cumul_CO2[[1]])){
if(cumul_CO2[[t]][18,s]!=-99.99){
points(cumul_CO2[[t]][,1], cumul_CO2[[t]][,s], col=colors[s],
typ="l", pch=1, cex=3, lwd=3)
}
if(cumul_CO2[[t]][18,s]==-99.99){
if(t==1){
points(cumul_CO2[[t]][1:17,1], cumul_CO2[[t]][1:17,s], col=colors[s-1],
typ="l", pch=1, cex=3, lwd=3)
}
if(t!=1){
points(cumul_CO2[[t]][1:12,1], cumul_CO2[[t]][1:12,s], col=colors[s-1],
typ="l", pch=1, cex=3, lwd=3)
}
}
}
points(init_mu[[1]][[t]][,1], init_mu[[1]][[t]][,4], col=colors[1],
typ="l", lwd=3)
}
mtext("chain1", outer=TRUE)
dev.off()
#################Calculate Difference#########################
#calculate difference to use as initial values in setup of latent states
#Modeled difference, using measured time points
init_alfalfa=list()
init_ash=list()
init_bluestem=list()
init_oak=list()
init_pine=list()
#############for all timepoints##########
#for C1_5, measurement is in same increment as model results (mass remaining)
init_alfalfa[[1]]=init_mu[[1]][[1]][all_data[[1]][[1]][,2]+1,2]
init_ash[[1]]=init_mu[[1]][[2]][all_data[[2]][[1]][,2]+1,2]
init_bluestem[[1]]=init_mu[[1]][[3]][all_data[[3]][[1]][,2]+1,2]
init_oak[[1]]=init_mu[[1]][[4]][all_data[[4]][[1]][,2]+1,2]
init_pine[[1]]=init_mu[[1]][[5]][all_data[[5]][[1]][,2]+1,2]
#C6, DOC (calculate difference between measurement timepoints)
init_alfalfa[[2]]=subset(init_mu[[1]][[1]][,3], init_mu[[1]][[1]][,1] %in% as.numeric(all_data[[1]][[2]][,2]))-
subset(init_mu[[1]][[1]][,3], init_mu[[1]][[1]][,1] %in% as.numeric(all_data[[1]][[2]][,1]))
init_ash[[2]]=subset(init_mu[[1]][[2]][,3], init_mu[[1]][[2]][,1] %in% as.numeric(all_data[[2]][[2]][,2]))-
subset(init_mu[[1]][[2]][,3], init_mu[[1]][[2]][,1] %in% as.numeric(all_data[[2]][[2]][,1]))
init_bluestem[[2]]=subset(init_mu[[1]][[3]][,3], init_mu[[1]][[3]][,1] %in% as.numeric(all_data[[3]][[2]][,2]))-
subset(init_mu[[1]][[3]][,3], init_mu[[1]][[3]][,1] %in% as.numeric(all_data[[3]][[2]][,1]))
init_oak[[2]]=subset(init_mu[[1]][[4]][,3], init_mu[[1]][[4]][,1] %in% as.numeric(all_data[[4]][[2]][,2]))-
subset(init_mu[[1]][[4]][,3], init_mu[[1]][[4]][,1] %in% as.numeric(all_data[[4]][[2]][,1]))
init_pine[[2]]=subset(init_mu[[1]][[5]][,3], init_mu[[1]][[5]][,1] %in% as.numeric(all_data[[5]][[2]][,2]))-
subset(init_mu[[1]][[5]][,3], init_mu[[1]][[5]][,1] %in% as.numeric(all_data[[5]][[2]][,1]))
#C7, CO2 (calculate difference between measurement timepoints)
init_alfalfa[[3]]=subset(init_mu[[1]][[1]][,4], init_mu[[1]][[1]][,1] %in% as.numeric(all_data[[1]][[3]][,2]))-
subset(init_mu[[1]][[1]][,4], init_mu[[1]][[1]][,1] %in% as.numeric(all_data[[1]][[3]][,1]))
init_ash[[3]]=subset(init_mu[[1]][[2]][,4], init_mu[[1]][[2]][,1] %in% as.numeric(all_data[[2]][[3]][,2]))-
subset(init_mu[[1]][[2]][,4], init_mu[[1]][[2]][,1] %in% as.numeric(all_data[[2]][[3]][,1]))
init_bluestem[[3]]=subset(init_mu[[1]][[3]][,4], init_mu[[1]][[3]][,1] %in% as.numeric(all_data[[3]][[3]][,2]))-
subset(init_mu[[1]][[3]][,4], init_mu[[1]][[3]][,1] %in% as.numeric(all_data[[3]][[3]][,1]))
init_oak[[3]]=subset(init_mu[[1]][[4]][,4], init_mu[[1]][[4]][,1] %in% as.numeric(all_data[[4]][[3]][,2]))-
subset(init_mu[[1]][[4]][,4], init_mu[[1]][[4]][,1] %in% as.numeric(all_data[[4]][[3]][,1]))
init_pine[[3]]=subset(init_mu[[1]][[5]][,4], init_mu[[1]][[5]][,1] %in% as.numeric(all_data[[5]][[3]][,2]))-
subset(init_mu[[1]][[5]][,4], init_mu[[1]][[5]][,1] %in% as.numeric(all_data[[5]][[3]][,1]))
init_mu_all=list(init_alfalfa, init_ash, init_bluestem, init_oak, init_pine)
##############for d_l_list subset time points
init_alfalfa_d=list()
init_ash_d=list()
init_bluestem_d=list()
init_oak_d=list()
init_pine_d=list()
#for C1_5, measurement is in same increment as model results (mass remaining)
init_alfalfa_d[[1]]=init_mu[[1]][[1]][d_l_list[[1]][[1]]+1,2]
init_ash_d[[1]]=init_mu[[1]][[2]][d_l_list[[2]][[1]]+1,2]
init_bluestem_d[[1]]=init_mu[[1]][[3]][d_l_list[[3]][[1]]+1,2]
init_oak_d[[1]]=init_mu[[1]][[4]][d_l_list[[4]][[1]]+1,2]
init_pine_d[[1]]=init_mu[[1]][[5]][d_l_list[[5]][[1]]+1,2]
#C6, DOC (calculate difference between measurement timepoints)
init_alfalfa_d[[2]]=subset(init_mu[[1]][[1]][,3], init_mu[[1]][[1]][,1] %in% as.numeric(d_l_list[[1]][[2]][,2]))-
subset(init_mu[[1]][[1]][,3], init_mu[[1]][[1]][,1] %in% as.numeric(d_l_list[[1]][[2]][,1]))
#initial conditions tend to result in a final alfalfa DOC value that is very, very far from its converged value
#therefore takes a long time to get there because the tuning size is small
#set value to be more reasonable for initial conditions
init_alfalfa_d[[2]][length(init_alfalfa_d[[2]])]=rnorm(1, 100, 10)
init_ash_d[[2]]=subset(init_mu[[1]][[2]][,3], init_mu[[1]][[2]][,1] %in% as.numeric(d_l_list[[2]][[2]][,2]))-
subset(init_mu[[1]][[2]][,3], init_mu[[1]][[2]][,1] %in% as.numeric(d_l_list[[2]][[2]][,1]))
init_bluestem_d[[2]]=subset(init_mu[[1]][[3]][,3], init_mu[[1]][[3]][,1] %in% as.numeric(d_l_list[[3]][[2]][,2]))-
subset(init_mu[[1]][[3]][,3], init_mu[[1]][[3]][,1] %in% as.numeric(d_l_list[[3]][[2]][,1]))
init_oak_d[[2]]=subset(init_mu[[1]][[4]][,3], init_mu[[1]][[4]][,1] %in% as.numeric(d_l_list[[4]][[2]][,2]))-
subset(init_mu[[1]][[4]][,3], init_mu[[1]][[4]][,1] %in% as.numeric(d_l_list[[4]][[2]][,1]))
init_pine_d[[2]]=subset(init_mu[[1]][[5]][,3], init_mu[[1]][[5]][,1] %in% as.numeric(d_l_list[[5]][[2]][,2]))-
subset(init_mu[[1]][[5]][,3], init_mu[[1]][[5]][,1] %in% as.numeric(d_l_list[[5]][[2]][,1]))
#C7, CO2 (calculate difference between measurement timepoints)
init_alfalfa_d[[3]]=subset(init_mu[[1]][[1]][,4], init_mu[[1]][[1]][,1] %in% as.numeric(d_l_list[[1]][[3]][,2]))-
subset(init_mu[[1]][[1]][,4], init_mu[[1]][[1]][,1] %in% as.numeric(d_l_list[[1]][[3]][,1]))
init_ash_d[[3]]=subset(init_mu[[1]][[2]][,4], init_mu[[1]][[2]][,1] %in% as.numeric(d_l_list[[2]][[3]][,2]))-
subset(init_mu[[1]][[2]][,4], init_mu[[1]][[2]][,1] %in% as.numeric(d_l_list[[2]][[3]][,1]))
init_bluestem_d[[3]]=subset(init_mu[[1]][[3]][,4], init_mu[[1]][[3]][,1] %in% as.numeric(d_l_list[[3]][[3]][,2]))-
subset(init_mu[[1]][[3]][,4], init_mu[[1]][[3]][,1] %in% as.numeric(d_l_list[[3]][[3]][,1]))
init_oak_d[[3]]=subset(init_mu[[1]][[4]][,4], init_mu[[1]][[4]][,1] %in% as.numeric(d_l_list[[4]][[3]][,2]))-
subset(init_mu[[1]][[4]][,4], init_mu[[1]][[4]][,1] %in% as.numeric(d_l_list[[4]][[3]][,1]))
init_pine_d[[3]]=subset(init_mu[[1]][[5]][,4], init_mu[[1]][[5]][,1] %in% as.numeric(d_l_list[[5]][[3]][,2]))-
subset(init_mu[[1]][[5]][,4], init_mu[[1]][[5]][,1] %in% as.numeric(d_l_list[[5]][[3]][,1]))
init_mu_final=list(init_alfalfa_d, init_ash_d, init_bluestem_d, init_oak_d, init_pine_d)
#use the above to create chain storage list (x), initialize chains, specify tuning and support
set_LIDEL1=setup(n.iter=n.iter,n.chain=n.chain, parameter.names=param_names,
dim.x=dim_param, parameter_inits=parameter_inits,
init_mu_input=init_mu_final, init_proc_days=init_proc_days,
tune_param=tune_param, Latent_tune=Latent_tune_sub, support_param=support_param)
|
716b8f8689333c70644c510d42a6cbfacfd4eed2 | 49679b97305617476aa1acd685ae31e0c7fadb87 | /All data extract/All data extract EXP4 Extinction.R | 6678508b495fc8d39847ee481ba6c42deaf3a201 | [] | no_license | mvegavillar/Accumbens-Rew-learning | 2541e07dc6e93f7ea1b39516f783f75f97470a20 | be221cf5777ec62365927213c613bc9dd6066664 | refs/heads/master | 2020-05-24T11:19:13.151823 | 2019-07-09T17:01:57 | 2019-07-09T17:01:57 | 187,246,076 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 44,486 | r | All data extract EXP4 Extinction.R |
######################################################################################################
### EXPERIMENT 4: UNILATERAL AP5 INFUSIONS: EXTINCTION TEST (MV ) ###
######################################################################################################
### LOAD IMPORTANT LIBRARIES
install.packages("matrixStats")
install.packages('ez')
install.packages('dplyr')
library(matrixStats)
library(ez)
library(dplyr)
setwd("E:/Dropbox (EinsteinMed)/NMDA/")
#Define colors
colindx <- c("#2171b5", "#cb181d") #Strong blue and red
colindxB <- c("#bdd7e7", "#fcae91") #Less strong blue and red
colindxC <- c("#eff3ff", "#fb6a4a") #Even less strong blue and red
colindxD <- c("#6baed6", "#fee5d9") #Lightest blue and red
### DEFINE ALL THE IMPORTANT FOLDERS
funcdirect <- paste(getwd(), "/R functions/", sep="")
datafolder_L <- paste(getwd(), "/EXP4_Unilateral AP5 Extinction/MedPC files/Learners/", sep="") #Rats that learned the task before the test
datafolder_NL <- paste(getwd(), "/EXP4_Unilateral AP5 Extinction/MedPC files/Nonlearners/", sep="") #Rats that didn't learn the task before the test
dataForRdir_L <- paste(getwd(), "/EXP4_Unilateral AP5 Extinction/Data for R/Learners/", sep="")
dataForRdir_NL <- paste(getwd(), "/EXP4_Unilateral AP5 Extinction/Data for R/Nonlearners/", sep="")
dataForRCumulative_L <- paste(getwd(), "/EXP4_Unilateral AP5 Extinction/Data for R cumulative/Learners/", sep="")
dataForRCumulative_NL <- paste(getwd(), "/EXP4_Unilateral AP5 Extinction/Data for R cumulative/Nonlearners/", sep="")
behGraphFolder <- paste(getwd(), "/EXP4_Unilateral AP5 Extinction/Graphs/Behavior/", sep="")
CPfuncFolder <- paste(getwd(), '/R functions/Change_Point-master/', sep="")
CPGraphFolder <- paste(getwd(), "/EXP4_Unilateral AP5 Extinction/Graphs/Behavior/Change point/", sep="")
MixedGraphFolder <- paste(getwd(), "/EXP4_Unilateral AP5 Extinction/Graphs/Mixed/", sep="")
NEXfiles_L <- paste(getwd(), "/EXP4_Unilateral AP5 Extinction/NEX files/Learners/", sep="")
NEXfiles_NL <- paste(getwd(), "/EXP4_Unilateral AP5 Extinction/NEX files/Nonlearners/", sep="")
IndcumPerfGraphFolder <- paste(getwd(), "/EXP4_Unilateral AP5 Extinction/Graphs/Behavior/Cumulative/", sep="")
### Load necessary functions
load(file=paste(funcdirect, "MedPCextract.r", sep=""))
load(file=paste(funcdirect, "mpcextract_blockSingle.Rfunc", sep=""))
load(file=paste(funcdirect, "CPextract.R", sep=""))
load(file=paste(funcdirect, "cumulativeIndGraphs.R", sep=""))
load(file=paste(funcdirect, "PerformanceFromCP.R", sep=""))
load(file=paste(funcdirect, "PrePostCP_Perf.R", sep=""))
load(file=paste(funcdirect, "avgPerfByBin.R", sep=""))
load(file=paste(funcdirect, "CPextractMultipleCrit.R", sep=""))
load(file=paste(funcdirect, "neuralhist.r", sep=""))
load(file=paste(funcdirect, "FRbyNEURONbyBINcue.r", sep=""))
load(file=paste(funcdirect, "plotFRandCP.r", sep=""))
load(file=paste(funcdirect, "errBars.r", sep=""))
load(file=paste(funcdirect, "errCloud.r", sep=""))
load(file=paste(funcdirect, "masterDFsummary.r", sep=""))
load(file=paste(funcdirect, "cp_wrapper3.r", sep=""))
load(file=paste(funcdirect, "PLOT_perRatTrial_FRandBEH.r", sep=""))
load(file=paste(funcdirect, "RealCP.r", sep=""))
load(file=paste(funcdirect, "plotFRandCPhistogram.R", sep=""))
load(file=paste(funcdirect, "megaplot.r", sep=""))
load(file=paste(funcdirect, "sessFromCPdata.r", sep=""))
load(file=paste(funcdirect, "giveStars.R", sep=""))
load(file=paste(funcdirect, "SessionBOXPLOT.R", sep=""))
load(file=paste(funcdirect, "SessionPSTH.R", sep=""))
#############################################################################################################
########### LEARNERS: MV27, 30, 31, 51, 52, 55 ##############################################################
#############################################################################################################
### EXTRACT BASIC BEHAVIOR OBJECTS
MedPCextract(MovAvg="Impinged only", funcdirect = funcdirect, datafolder = datafolder_L,
dataForRdir = dataForRdir_L, dataForRCumulative=dataForRCumulative_L, cuelength=10)
# Load important behavior-related objects
files <- paste(dataForRdir_L, list.files(dataForRdir_L), sep="")
filesCum <- paste(dataForRCumulative_L, list.files(dataForRCumulative_L), sep="")
for(i in 1:length(files)){load(files[[i]])}
for(i in 1:length(filesCum)){load(filesCum[[i]])}
csacqidx$session <- rep(1, nrow(csacqidx))
#### BEHAVIORAL GRAPHS
#Individual performance
DStaskAccbyTrial <- sapply(seq(1, 35), function(x){
sapply(seq(1, length(DStaskAcc)), function(j){
DStaskAcc[[j]][x]
})
})
DStaskAccMeanTrial <- colMeans(DStaskAccbyTrial, na.rm=T)
DStaskAccSEMTrial <- colSds(DStaskAccbyTrial, na.rm=T)
NStaskAccbyTrial <- sapply(seq(1, 35), function(x){
sapply(seq(1, length(NStaskAcc)), function(j){
NStaskAcc[[j]][x]
})
})
NStaskAccMeanTrial <- colMeans(NStaskAccbyTrial, na.rm=T)
NStaskAccSEMTrial <- colSds(NStaskAccbyTrial, na.rm=T)
#trial by trial performance on the extinction test
plot.new()
plot.window(xlim=c(0, 35), ylim=c(0, 5.5))
lines(DStaskAccMeanTrial, col="black", lwd=2)
#5 trial bin performance on the extinction test
binCuts <- seq(1, length(DStaskAccMeanTrial), by=5)
binIndex <- 1:length(DStaskAccMeanTrial)
binAssignment <- findInterval(binIndex, binCuts)
#DS SPECIFICITY
DStaskAcc5trialBins <- sapply(seq(1, length(binCuts)), function(m){
mean(DStaskAccMeanTrial[binAssignment==m], na.rm=T)
})
DStaskAcc5trialBinsSEM <- sapply(seq(1, length(binCuts)), function(m){
sd(DStaskAccMeanTrial[binAssignment==m], na.rm=T)/sqrt(sum(binAssignment==m))
})
#NS SPECIFICITY
NStaskAcc5trialBins <- sapply(seq(1, length(binCuts)), function(m){
mean(NStaskAccMeanTrial[binAssignment==m], na.rm=T)
})
NStaskAcc5trialBinsSEM <- sapply(seq(1, length(binCuts)), function(m){
sd(NStaskAccMeanTrial[binAssignment==m], na.rm=T)/sqrt(sum(binAssignment==m))
})
plot.new()
plot.window(xlim=c(0, length(binCuts)), ylim=c(-2, 6))
points(DStaskAcc5trialBins, pch=19, cex=2)
lines(DStaskAcc5trialBins, lwd=2)
errBars(x=seq(1, length(binCuts)), y=DStaskAcc5trialBins, err=DStaskAcc5trialBinsSEM, hatLength = 0.05)
points(NStaskAcc5trialBins, pch=19, cex=2, col="gray40")
lines(NStaskAcc5trialBins, lwd=2, col="gray40")
errBars(x=seq(1, length(binCuts)), y=NStaskAcc5trialBins, err=NStaskAcc5trialBinsSEM, hatLength = 0.05)
labVals <- c("1-5", "6-10", "11-15", "16-20", "21-25", "26-30", "31-35")
axis(side=1, at=seq(1, 7, by=1), labels=labVals, cex.axis=2, las=2)
axis(side=2, at=seq(-2, 6, by=1), cex.axis=2, las=2, pos=0.5)
abline(h=0, lty=3)
legend("topright", legend=c("S+", "S-"), col=c("black", "gray40"), lwd=2)
### For analysis:
DS_PerfIndex_Longform <- do.call("rbind", lapply(seq(1, nrow(DStaskAccbyTrial)), function(x){
ratByBin <- sapply(unique(binAssignment), function(y){
mean(DStaskAccbyTrial[x, binAssignment==y], na.rm=T)
})
data.frame(cue="S+", rat=rats[x], PerfIndex=ratByBin, bin=unique(binAssignment))
})
)
NS_PerfIndex_Longform <- do.call("rbind", lapply(seq(1, nrow(NStaskAccbyTrial)), function(x){
ratByBin <- sapply(unique(binAssignment), function(y){
mean(NStaskAccbyTrial[x, binAssignment==y], na.rm=T)
})
data.frame(cue="S-", rat=rats[x], PerfIndex=ratByBin, bin=unique(binAssignment))
})
)
PerfIndex_ForANOVA <- rbind(DS_PerfIndex_Longform, NS_PerfIndex_Longform)
ezANOVA(data=PerfIndex_ForANOVA, wid=rat, within = c(cue, bin), dv=PerfIndex) #Check this for questions about Mauchly's test and GG correction. https://www.r-exercises.com/2016/11/29/repeated-measures-anova-in-r-exercises/
# $`ANOVA`
# Effect DFn DFd F p p<.05 ges
# 1 cue 1 5 119.926109 0.0001104032 * 0.8477629
# 2 bin 1 5 2.381756 0.1834075990 0.1937092
# 3 cue:bin 1 5 10.526758 0.0228349341 * 0.3567954
# $`Mauchly's Test for Sphericity`: it tests the null hypothesis that variance across each level of a particular within-subjects factor is equal
# Effect W p p<.05
# 3 bin 7.068002e-17 1.118896e-11 *
# Assumption of sphericity is violated for the "bin" factor, which increases the likelihood of Type II error (failing to reject a null hypothesis that is actually wrong aka less power)
#Greenhouse-Geiser corrects for that and recalculates p values of the effects that rely on "bin" (main effect of bin and interaction cue*bin). The results are the same.
#$`Sphericity Corrections`
# Effect GGe p[GG] p[GG]<.05 HFe p[HF] p[HF]<.05
# 3 bin 0.4565120 0.18282855 1.0640495 0.117037877
# 4 cue:bin 0.4143815 0.02049974 * 0.8564852 0.002310229 *
#With "aov" instead of "ezANOVA"
PerfIndex_ForANOVA$cue <- factor(PerfIndex_ForANOVA$cue)
PerfIndex_ForANOVA$bin <- factor(PerfIndex_ForANOVA$bin)
options(contrasts = c("contr.sum","contr.poly"))
aovtest_Perf_L <- summary(with(PerfIndex_ForANOVA,
aov(PerfIndex ~ cue*bin + Error(rat/(cue*bin)))
))
qqnorm(PerfIndex_ForANOVA$PerfIndex)
hist(PerfIndex_ForANOVA$PerfIndex)
save(aovtest_Perf_L, file=paste(dataForRdir_L, "aovtest_Perf_L.rdat", sep=""))
# RASTERS
load(file=paste(funcdirect, "MAKERASTER.r", sep=""))
MAKERASTER(i=1, data=alldata, idxdata=csacqidx); title(main=paste(rats[[1]], "Extinction test"))
MAKERASTER(i=2, data=alldata, idxdata=csacqidx); title(main=paste(rats[[2]], "Extinction test"))
MAKERASTER(i=3, data=alldata, idxdata=csacqidx); title(main=paste(rats[[3]], "Extinction test"))
MAKERASTER(i=4, data=alldata, idxdata=csacqidx); title(main=paste(rats[[4]], "Extinction test"))
MAKERASTER(i=6, data=alldata, idxdata=csacqidx); title(main=paste(rats[[6]], "Extinction test"))
#### NEURONAL DATA_Extract firing rate data related to cues and entries on BOTH SIDES (VEH and AP5)
allNeuronsDS_L_VEH = neuralhist (funcdirect=funcdirect, path=NEXfiles_L, event=1, startt=0, endt=10000, binw=50, psthmin=10, psthmax=10, cueexonly=F, allResults=T, side="vehicle")
allNeuronsNS_L_VEH= neuralhist (funcdirect=funcdirect, path=NEXfiles_L, event=2, startt=0, endt=10000, binw=50, psthmin=10, psthmax=10, cueexonly=F, allResults=T, side="vehicle")
allNeuronsDSresponded_L_VEH= neuralhist (funcdirect=funcdirect, path=NEXfiles_L, event=5, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="vehicle")
allNeuronsNSresponded_L_VEH= neuralhist (funcdirect=funcdirect, path=NEXfiles_L, event=7, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="vehicle")
allNeuronsDSmissed_L_VEH= neuralhist (funcdirect=funcdirect, path=NEXfiles_L, event=6, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="vehicle")
allNeuronsNSmissed_L_VEH= neuralhist (funcdirect=funcdirect, path=NEXfiles_L, event=8, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="vehicle")
allNeuronsDS_L_AP5 = neuralhist (funcdirect=funcdirect, path=NEXfiles_L, event=1, startt=0, endt=10000, binw=50, psthmin=10, psthmax=10, cueexonly=F, allResults=T, side="drug")
allNeuronsNS_L_AP5= neuralhist (funcdirect=funcdirect, path=NEXfiles_L, event=2, startt=0, endt=10000, binw=50, psthmin=10, psthmax=10, cueexonly=F, allResults=T, side="drug")
allNeuronsDSresponded_L_AP5= neuralhist (funcdirect=funcdirect, path=NEXfiles_L, event=5, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="drug")
allNeuronsNSresponded_L_AP5= neuralhist (funcdirect=funcdirect, path=NEXfiles_L, event=7, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="drug")
allNeuronsDSmissed_L_AP5= neuralhist (funcdirect=funcdirect, path=NEXfiles_L, event=6, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="drug")
allNeuronsNSmissed_L_AP5= neuralhist (funcdirect=funcdirect, path=NEXfiles_L, event=8, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="drug")
#sAVE THESE OBJECTS
save(allNeuronsDS_L_VEH, file=paste(dataForRdir, 'allNeuronsDS_L_VEH.rdat', sep=""))
save(allNeuronsNS_L_VEH, file=paste(dataForRdir, 'allNeuronsNS_L_VEH.rdat', sep=""))
save(allNeuronsDSresponded_L_VEH, file=paste(dataForRdir, 'allNeuronsDSresponded_L_VEH.rdat', sep=""))
save(allNeuronsNSresponded_L_VEH, file=paste(dataForRdir, 'allNeuronsNSresponded_L_VEH.rdat', sep=""))
save(allNeuronsDSmissed_L_VEH, file=paste(dataForRdir, 'allNeuronsDSmissed_L_VEH.rdat', sep=""))
save(allNeuronsNSmissed_L_VEH, file=paste(dataForRdir, 'allNeuronsNSmissed_L_VEH.rdat', sep=""))
save(allNeuronsDS_L_AP5, file=paste(dataForRdir, 'allNeuronsDS_L_AP5.rdat', sep=""))
save(allNeuronsNS_L_AP5, file=paste(dataForRdir, 'allNeuronsNS_L_AP5.rdat', sep=""))
save(allNeuronsDSresponded_L_AP5, file=paste(dataForRdir, 'allNeuronsDSresponded_L_AP5.rdat', sep=""))
save(allNeuronsNSresponded_L_AP5, file=paste(dataForRdir, 'allNeuronsNSresponded_L_AP5.rdat', sep=""))
save(allNeuronsDSmissed_L_AP5, file=paste(dataForRdir, 'allNeuronsDSmissed_L_AP5.rdat', sep=""))
save(allNeuronsNSmissed_L_AP5, file=paste(dataForRdir, 'allNeuronsNSmissed_L_AP5.rdat', sep=""))
#BUILD A DATAFRAME WITH FIRING RATE OF EACH NEURON ON EACH TRIAL WITH BEH INFO ABOUT EACH TRIAL
masterDF_DS_L_VEH <- FRbyNEURONbyBINcue(eventType="cue", cue="S+", neudata=allNeuronsDS_L_VEH,
CPvector=rep(1, length(rats)), funcdirect=funcdirect,
dataForRdir=dataForRdir_L,
BLduration=2, sessionCPperRat=rep(1, length(rats)),
BLneudata=allNeuronsDS_L_VEH)
masterDF_DS_L_AP5 <- FRbyNEURONbyBINcue(eventType="cue", cue="S+", neudata=allNeuronsDS_L_AP5,
CPvector=rep(1, length(rats)), funcdirect=funcdirect,
dataForRdir=dataForRdir_L,
BLduration=2, sessionCPperRat=rep(1, length(rats)),
BLneudata=allNeuronsDS_L_AP5)
masterDF_NS_L_VEH <- FRbyNEURONbyBINcue(eventType="cue", cue="S-", neudata=allNeuronsNS_L_VEH,
CPvector=rep(1, length(rats)), funcdirect=funcdirect,
dataForRdir=dataForRdir_L,
BLduration=2, sessionCPperRat=rep(1, length(rats)),
BLneudata=allNeuronsNS_L_VEH)
masterDF_NS_L_AP5 <- FRbyNEURONbyBINcue(eventType="cue", cue="S-", neudata=allNeuronsNS_L_AP5,
CPvector=rep(1, length(rats)), funcdirect=funcdirect,
dataForRdir=dataForRdir_L,
BLduration=2, sessionCPperRat=rep(1, length(rats)),
BLneudata=allNeuronsNS_L_AP5)
masterDF_DSMissed_L_VEH <- FRbyNEURONbyBINcue(eventType="cue", cue="S+", neudata=allNeuronsDSmissed_L_VEH,
CPvector=rep(1, length(rats)), funcdirect=funcdirect,
dataForRdir=dataForRdir_L,
BLduration=2, sessionCPperRat=rep(1, length(rats)),
BLneudata=allNeuronsDSmissed_L_VEH)
masterDF_DSMissed_L_AP5 <- FRbyNEURONbyBINcue(eventType="cue", cue="S+", neudata=allNeuronsDSmissed_L_AP5,
CPvector=rep(1, length(rats)), funcdirect=funcdirect,
dataForRdir=dataForRdir_L,
BLduration=2, sessionCPperRat=rep(1, length(rats)),
BLneudata=allNeuronsDSmissed_L_AP5)
masterDF_DSResp_L_VEH <- FRbyNEURONbyBINcue(eventType="cue", cue="S+", neudata=allNeuronsDSresponded_L_VEH,
CPvector=rep(1, length(rats)), funcdirect=funcdirect,
dataForRdir=dataForRdir_L,
BLduration=2, sessionCPperRat=rep(1, length(rats)),
BLneudata=allNeuronsDSresponded_L_VEH)
masterDF_DSResp_L_AP5 <- FRbyNEURONbyBINcue(eventType="cue", cue="S+", neudata=allNeuronsDSresponded_L_AP5,
CPvector=rep(1, length(rats)), funcdirect=funcdirect,
dataForRdir=dataForRdir_L,
BLduration=2, sessionCPperRat=rep(1, length(rats)),
BLneudata=allNeuronsDSresponded_L_AP5)
save(masterDF_DS_L_VEH, file=paste(dataForRdir_L, "masterDF_DS_L_VEH.rdat", sep=""))
save(masterDF_DS_L_AP5, file=paste(dataForRdir_L, "masterDF_DS_L_AP5.rdat", sep=""))
save(masterDF_NS_L_VEH, file=paste(dataForRdir_L, "masterDF_NS_L_VEH.rdat", sep=""))
save(masterDF_NS_L_AP5, file=paste(dataForRdir_L, "masterDF_NS_L_AP5.rdat", sep=""))
save(masterDF_DSMissed_L_VEH, file=(paste(dataForRdir_L, "masterDF_DSMissed_L_VEH.rdat", sep="")))
save(masterDF_DSMissed_L_AP5, file=(paste(dataForRdir_L, "masterDF_DSMissed_L_AP5.rdat", sep="")))
save(masterDF_DSResp_L_VEH, file=(paste(dataForRdir_L, "masterDF_DSResp_L_VEH.rdat", sep="")))
save(masterDF_DSResp_L_AP5, file=(paste(dataForRdir_L, "masterDF_DSResp_L_AP5.rdat", sep="")))
### PLOT FR POST-CUE (100-400MS WINDOW) as a function of distance from 1st trial
#CUE
plotFRandCP(experiment="Exp 4 EXT Learners VEH side", cue=c("S+", "S-"),
masterDF=list(masterDF_DS_L_VEH, masterDF_NS_L_VEH), graphFolder=MixedGraphFolder,
trialBinSize=5, WdwStart=100, WdwEnd=400, dataProcess="Zscores", correctOnly=FALSE,
colindx =c(colindx[1], "darkblue"), legLabels=c("S+", "S-"), capped=T, capValue = c(1, 35),
yAxMinZ = -1, yAxMaxZ = 8, yAxMaxRaw = 10, neudata=allNeuronsDS_L_VEH)
plotFRandCP(experiment="Exp 4 EXT Learners VEH side CUE EXC", cue=c("S+", "S-"),
masterDF=list(masterDF_DS_L_VEH, masterDF_NS_L_VEH), graphFolder=MixedGraphFolder, cueExcOnly = TRUE,
trialBinSize=5, WdwStart=100, WdwEnd=400, dataProcess="Zscores", correctOnly=FALSE,
colindx =c(colindx[1], "darkblue"), legLabels=c("S+", "S-"), capped=T, capValue = c(1, 35),
yAxMinZ = -1, yAxMaxZ = 8, yAxMaxRaw = 10, neudata=allNeuronsDS_L_VEH)
plotFRandCP(experiment="Exp 4 EXT Learners VEH side PERCEXC", cue=c("S+"),
masterDF=list(masterDF_DS_L_VEH), graphFolder=MixedGraphFolder, cueExcOnly = FALSE,
trialBinSize=35, WdwStart=100, WdwEnd=400, dataProcess="PercCueExc", correctOnly=FALSE,
colindx =c(colindx[1], "darkblue"), legLabels=c("S+"), capped=T, capValue = c(1, 35),
yAxMinZ = -1, yAxMaxZ = 8, yAxMaxRaw = 10, neudata=allNeuronsDS_L_VEH)
plotFRandCP(experiment="Exp 4 EXT Learners AP5 side", cue=c("S+", "S-"),
masterDF=list(masterDF_DS_L_AP5, masterDF_NS_L_AP5), graphFolder=MixedGraphFolder,
trialBinSize=5, WdwStart=100, WdwEnd=400, dataProcess="Zscores", correctOnly=FALSE,
colindx =c(colindx[2], "darkred"), legLabels=c("S+", "S-"), capped=T, capValue = c(1, 35),
yAxMinZ = -1, yAxMaxZ = 8, yAxMaxRaw = 10, neudata=allNeuronsDS_L_AP5)
plotFRandCP(experiment="Exp 4 EXT Learners AP5 side CUE EXC", cue=c("S+", "S-"),
masterDF=list(masterDF_DS_L_AP5, masterDF_NS_L_AP5), graphFolder=MixedGraphFolder, cueExcOnly = TRUE,
trialBinSize=5, WdwStart=100, WdwEnd=400, dataProcess="Zscores", correctOnly=FALSE,
colindx =c(colindx[2], "darkred"), legLabels=c("S+", "S-"), capped=T, capValue = c(1, 35),
yAxMinZ = -1, yAxMaxZ = 8, yAxMaxRaw = 10, neudata=allNeuronsDS_L_AP5)
plotFRandCP(experiment="Exp 4 EXT Learners AP5 side PercCueExc", cue=c("S+"),
masterDF=list(masterDF_DS_L_AP5), graphFolder=MixedGraphFolder,
trialBinSize=35, WdwStart=100, WdwEnd=400, dataProcess="PercCueExc", correctOnly=FALSE,
colindx =c(colindx[2], "darkred"), legLabels=c("S+"), capped=T, capValue = c(1, 35),
yAxMinZ = -1, yAxMaxZ = 8, yAxMaxRaw = 10, neudata=allNeuronsDS_L_AP5)
### PLOT FR AROUND THE CUE (PTSH)
#All units
SessionPSTH(experiment="Exp 4 Unil AP5 Learners EXT", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 8, yAxMaxRaw = 10, psthmin=0.5, psthmax=2, imgFormat="pdf",
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=F,
legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
#Cue excited only
SessionPSTH(experiment="Exp 4 Unil AP5 Learners EXT Cue exc only", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 16, yAxMaxRaw = 10, psthmin=0.5, psthmax=2, imgFormat="pdf", capped = F,
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=T,
legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
#Make data frame with info about proportion of cue-exc neurons (this I know from calculations inside the previous function, you can check the % cue exc. printed on the graph to calculate this)
ncueExcVEH <- 12
ncueExcAP5 <- 8
forChiSq <- data.frame(side=c("VEH", "AP5"), cueexc=c(ncueExcVEH, ncueExcAP5), noncueexc=c(38-ncueExcVEH, 39-ncueExcAP5))
chisq.test(forChiSq[, -1])
# Pearson's Chi-squared test with Yates' continuity correction
#
# data: forChiSq[, -1]
# X-squared = 0.71784, df = 1, p-value = 0.3969
fisher.test(forChiSq[, -1]) #0.3073
barplot(forChiSq$cueexc/(forChiSq$cueexc+forChiSq$noncueexc), col=colindx, border = NA, ylim=c(0, 1))
#First few trials:
SessionPSTH(experiment="Exp 4 Unil AP5 Learners EXT trials 1 to 10", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 8, yAxMaxRaw = 10, psthmin=0.5, psthmax=2, imgFormat="pdf", capped=T, capValue = c(1, 10),
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=F, legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
SessionPSTH(experiment="Exp 4 Unil AP5 Learners EXT trials 1 to 10 Cue Exc only", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 15, yAxMaxRaw = 10, psthmin=0.5, psthmax=2, imgFormat="pdf", capped=T, capValue = c(1, 10),
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=T, legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
#Middle trials:
SessionPSTH(experiment="Exp 4 Unil AP5 Learners EXT trials 11 to 20", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 8, yAxMaxRaw = 10, psthmin=0.5, psthmax=2, imgFormat="pdf", capped=T, capValue = c(11, 20),
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=F, legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
SessionPSTH(experiment="Exp 4 Unil AP5 Learners EXT trials 11 to 20 Cue Exc only", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 8, yAxMaxRaw = 10, psthmin=0.5, psthmax=2, imgFormat="pdf", capped=T, capValue = c(11, 20),
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=T, legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
#Last trials:
SessionPSTH(experiment="Exp 4 Unil AP5 Learners EXT trials 21 to 30", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 8, yAxMaxRaw = 10, psthmin=0.5, psthmax=2, imgFormat="pdf", capped=T, capValue = c(21, 30),
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=F, legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
SessionPSTH(experiment="Exp 4 Unil AP5 Learners EXT trials 21 to 30 Cue Exc only", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 8, yAxMaxRaw = 10, psthmin=0.5, psthmax=2, imgFormat="pdf", capped=T, capValue = c(21, 30),
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=T, legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
#Responded and missed S+ trials separately (all units)
SessionPSTH(experiment="Exp 4 Unil AP5 Learners EXT Responded trials", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=T, color=colindx,
yAxMinZ = -2, yAxMaxZ = 8, yAxMaxRaw = 10, psthmin=0.5, psthmax=2, imgFormat="pdf",
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=F, legendLabels=c("S+ resp VEH side L", "S+ resp AP5 side L"))
SessionPSTH(experiment="Exp 4 Unil AP5 EXT Learners Missed trials", masterDF=list(masterDF_DSMissed_L_VEH, masterDF_DSMissed_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 8, yAxMaxRaw = 10, psthmin=0.5, psthmax=2, imgFormat="pdf",
neudata=list(allNeuronsDSmissed_L_VEH, allNeuronsDSmissed_L_AP5), cueExcOnly=F, legendLabels=c("S+ missed VEH side L", "S+ missed AP5 side L"))
# Boxplot for a specific window and analysis
# 100-400ms. All units.
SessionBOXPLOT(experiment="Exp 4 Unil AP5 Learners EXT", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 6, yAxMaxRaw = 10, imgFormat="pdf", WdwStart = 100, WdwEnd = 400,
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=F,
legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
# [[2]]
#
# Wilcoxon rank sum test with continuity correction
#
# data: first and second
# W = 1034400, p-value = 2.2e-16
# alternative hypothesis: true location shift is greater than 0
# 750-2000ms. All units.
SessionBOXPLOT(experiment="Exp 4 Unil AP5 Learners EXT TAIL", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 6, yAxMaxRaw = 10, imgFormat="pdf", WdwStart = 750, WdwEnd = 2000,
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=F,
legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
# [[2]]
#
# Wilcoxon rank sum test with continuity correction
#
# data: first and second
# W = 906790, p-value = 7.753e-07
# alternative hypothesis: true location shift is greater than 0
p.adjust(p=c(2.2e-16, 7.753e-07), method = "holm") #4.400e-16 7.753e-07
# -2000ms to 0. All units.
SessionBOXPLOT(experiment="Exp 4 Unil AP5 Learners BASELINE", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="raw", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 6, yAxMaxRaw = 10, imgFormat="pdf", WdwStart = -2000, WdwEnd = 0,
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=F,
legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
#[[2]]
#Wilcoxon rank sum test with continuity correction
#data: first and second
#W = 816250, p-value = 0.5185
#alternative hypothesis: true location shift is greater than 0
# 100-400ms. Raw scoresAll units.
SessionBOXPLOT(experiment="Exp 4 Unil AP5 Learners PEAK CUE EXC RAW", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="raw", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 6, yAxMaxRaw = 10, imgFormat="pdf", WdwStart = 100, WdwEnd = 400,
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=F,
legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
#[[2]]
#Wilcoxon rank sum test with continuity correction
#data: first and second
#W = 1070300, p-value = 2.841e-16
#alternative hypothesis: true location shift is not equal to 0
#### CUE-EXCITED UNITS ALONE
#100-400ms. Cue-excited units alone
SessionBOXPLOT(experiment="Exp 4 Unil AP5 Learners EXT 100 t0 400", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 12, yAxMaxRaw = 10, imgFormat="pdf", WdwStart = 100, WdwEnd = 400,
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=T, capped=T,
legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
# [[2]]
#
# Wilcoxon rank sum test with continuity correction
#
# data: first and second
# W = 14223, p-value < 2.2e-16
# alternative hypothesis: true location shift is greater than 0
#750-2000ms Cue-excited units alone
SessionBOXPLOT(experiment="Exp 4 Unil AP5 Learners EXT 750 to 2000", masterDF=list(masterDF_DS_L_VEH, masterDF_DS_L_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 12, yAxMaxRaw = 10, imgFormat="pdf", WdwStart = 750, WdwEnd = 2000,
neudata=list(allNeuronsDS_L_VEH, allNeuronsDS_L_AP5), cueExcOnly=T, capped=T,
legendLabels=c("S+ VEH side L", "S+ AP5 side L"))
# [[2]]
#
# Wilcoxon rank sum test with continuity correction
#
# data: first and second
# W = 11088, p-value = 0.0001746
# alternative hypothesis: true location shift is greater than 0
########### NON LEARNERS: MV28, 29 ##############################################################
### EXTRACT BASIC BEHAVIOR OBJECTS
MedPCextract(MovAvg="Impinged only", funcdirect = funcdirect, datafolder = datafolder_NL,
dataForRdir = dataForRdir_NL, dataForRCumulative=dataForRCumulative_NL, cuelength=10)
# Load important behavior-related objects
files <- paste(dataForRdir_NL, list.files(dataForRdir_NL), sep="")
filesCum <- paste(dataForRCumulative_NL, list.files(dataForRCumulative_NL), sep="")
for(i in 1:length(files)){load(files[[i]])}
for(i in 1:length(filesCum)){load(filesCum[[i]])}
csacqidx$session <- rep(1, nrow(csacqidx))
#### BEHAVIORAL GRAPHS
#Individual performance
DStaskAccbyTrial <- sapply(seq(1, 35), function(x){
sapply(seq(1, length(DStaskAcc)), function(j){
DStaskAcc[[j]][x]
})
})
DStaskAccMeanTrial <- colMeans(DStaskAccbyTrial, na.rm=T)
DStaskAccSEMTrial <- colSds(DStaskAccbyTrial, na.rm=T)
NStaskAccbyTrial <- sapply(seq(1, 35), function(x){
sapply(seq(1, length(NStaskAcc)), function(j){
NStaskAcc[[j]][x]
})
})
NStaskAccMeanTrial <- colMeans(NStaskAccbyTrial, na.rm=T)
NStaskAccSEMTrial <- colSds(NStaskAccbyTrial, na.rm=T)
#5 trial bin performance on the extinction test
binCuts <- seq(1, length(DStaskAccMeanTrial), by=5)
binIndex <- 1:length(DStaskAccMeanTrial)
binAssignment <- findInterval(binIndex, binCuts)
#DS SPECIFICITY
DStaskAcc5trialBins <- sapply(seq(1, length(binCuts)), function(m){
mean(DStaskAccMeanTrial[binAssignment==m], na.rm=T)
})
DStaskAcc5trialBinsSEM <- sapply(seq(1, length(binCuts)), function(m){
sd(DStaskAccMeanTrial[binAssignment==m], na.rm=T)/sqrt(sum(binAssignment==m))
})
#NS SPECIFICITY
NStaskAcc5trialBins <- sapply(seq(1, length(binCuts)), function(m){
mean(NStaskAccMeanTrial[binAssignment==m], na.rm=T)
})
NStaskAcc5trialBinsSEM <- sapply(seq(1, length(binCuts)), function(m){
sd(NStaskAccMeanTrial[binAssignment==m], na.rm=T)/sqrt(sum(binAssignment==m))
})
plot.new()
plot.window(xlim=c(0, length(binCuts)), ylim=c(-4, 6))
points(DStaskAcc5trialBins, pch=19, cex=2)
lines(DStaskAcc5trialBins, lwd=2)
errBars(x=seq(1, length(binCuts)), y=DStaskAcc5trialBins, err=DStaskAcc5trialBinsSEM, hatLength = 0.05)
points(NStaskAcc5trialBins, pch=19, cex=2, col="gray40")
lines(NStaskAcc5trialBins, lwd=2, col="gray40")
errBars(x=seq(1, length(binCuts)), y=NStaskAcc5trialBins, err=NStaskAcc5trialBinsSEM, hatLength = 0.05)
labVals <- c("1-5", "6-10", "11-15", "16-20", "21-25", "26-30", "31-35")
axis(side=1, at=seq(1, 7, by=1), labels=labVals, cex.axis=2, las=2)
axis(side=2, at=seq(-4, 6, by=1), cex.axis=2, las=2, pos=0.5)
abline(h=0, lty=3)
legend("topright", legend=c("S+", "S-"), col=c("black", "gray40"), lwd=2)
### For analysis:
DS_PerfIndex_Longform <- do.call("rbind", lapply(seq(1, nrow(DStaskAccbyTrial)), function(x){
ratByBin <- sapply(unique(binAssignment), function(y){
mean(DStaskAccbyTrial[x, binAssignment==y], na.rm=T)
})
data.frame(cue="S+", rat=rats[x], PerfIndex=ratByBin, bin=unique(binAssignment))
})
)
NS_PerfIndex_Longform <- do.call("rbind", lapply(seq(1, nrow(NStaskAccbyTrial)), function(x){
ratByBin <- sapply(unique(binAssignment), function(y){
mean(NStaskAccbyTrial[x, binAssignment==y], na.rm=T)
})
data.frame(cue="S-", rat=rats[x], PerfIndex=ratByBin, bin=unique(binAssignment))
})
)
PerfIndex_ForANOVA <- rbind(DS_PerfIndex_Longform, NS_PerfIndex_Longform)
ezANOVA(data=PerfIndex_ForANOVA, wid=rat, within = c(cue, bin), dv=PerfIndex) #Check this for questions about Mauchly's test and GG correction. https://www.r-exercises.com/2016/11/29/repeated-measures-anova-in-r-exercises/
# $`ANOVA`
# Effect DFn DFd F p p<.05 ges
# 1 cue 1 1 1.869764e+00 0.4019855461 0.4669608
# 2 bin 1 1 5.750234e+00 0.2515236555 0.7534574
# 3 cue:bin 1 1 2.221958e+06 0.0004270829 * 0.2145717
#With "aov" instead of "ezANOVA"
PerfIndex_ForANOVA$cue <- factor(PerfIndex_ForANOVA$cue)
PerfIndex_ForANOVA$bin <- factor(PerfIndex_ForANOVA$bin)
options(contrasts = c("contr.sum","contr.poly"))
aovtest_Perf_NL <- summary(with(PerfIndex_ForANOVA,
aov(PerfIndex ~ cue*bin + Error(rat/(cue*bin)))
))
qqnorm(PerfIndex_ForANOVA$PerfIndex)
hist(PerfIndex_ForANOVA$PerfIndex)
save(aovtest_Perf_NL, file=paste(dataForRdir_L, "aovtest_Perf_NL.rdat", sep=""))
########## FR NON LEARNERS
#### NEURONAL DATA_Extract firing rate data related to cues and entries on BOTH SIDES (VEH and AP5)
allNeuronsDS_NL_VEH = neuralhist (funcdirect=funcdirect, path=NEXfiles_NL, event=1, startt=0, endt=10000, binw=50, psthmin=10, psthmax=10, cueexonly=F, allResults=T, side="vehicle")
allNeuronsNS_NL_VEH= neuralhist (funcdirect=funcdirect, path=NEXfiles_NL, event=2, startt=0, endt=10000, binw=50, psthmin=10, psthmax=10, cueexonly=F, allResults=T, side="vehicle")
allNeuronsDSresponded_NL_VEH= neuralhist (funcdirect=funcdirect, path=NEXfiles_NL, event=5, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="vehicle")
allNeuronsNSresponded_NL_VEH= neuralhist (funcdirect=funcdirect, path=NEXfiles_NL, event=7, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="vehicle")
allNeuronsDSmissed_NL_VEH= neuralhist (funcdirect=funcdirect, path=NEXfiles_NL, event=6, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="vehicle")
allNeuronsNSmissed_NL_VEH= neuralhist (funcdirect=funcdirect, path=NEXfiles_NL, event=8, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="vehicle")
allNeuronsDS_NL_AP5 = neuralhist (funcdirect=funcdirect, path=NEXfiles_NL, event=1, startt=0, endt=10000, binw=50, psthmin=10, psthmax=10, cueexonly=F, allResults=T, side="drug")
allNeuronsNS_NL_AP5= neuralhist (funcdirect=funcdirect, path=NEXfiles_NL, event=2, startt=0, endt=10000, binw=50, psthmin=10, psthmax=10, cueexonly=F, allResults=T, side="drug")
allNeuronsDSresponded_NL_AP5= neuralhist (funcdirect=funcdirect, path=NEXfiles_NL, event=5, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="drug")
allNeuronsNSresponded_NL_AP5= neuralhist (funcdirect=funcdirect, path=NEXfiles_NL, event=7, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="drug")
allNeuronsDSmissed_NL_AP5= neuralhist (funcdirect=funcdirect, path=NEXfiles_NL, event=6, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="drug")
allNeuronsNSmissed_NL_AP5= neuralhist (funcdirect=funcdirect, path=NEXfiles_NL, event=8, startt=0,endt=10000, binw=50, psthmin=10, psthmax=10,cueexonly=F, allResults=T, side="drug")
#sAVE THESE OBJECTS
save(allNeuronsDS_NL_VEH, file=paste(dataForRdir_NL, 'allNeuronsDS_NL_VEH.rdat', sep=""))
save(allNeuronsNS_NL_VEH, file=paste(dataForRdir_NL, 'allNeuronsNS_NL_VEH.rdat', sep=""))
save(allNeuronsDSresponded_NL_VEH, file=paste(dataForRdir_NL, 'allNeuronsDSresponded_NL_VEH.rdat', sep=""))
save(allNeuronsNSresponded_NL_VEH, file=paste(dataForRdir_NL, 'allNeuronsNSresponded_NL_VEH.rdat', sep=""))
save(allNeuronsDSmissed_NL_VEH, file=paste(dataForRdir_NL, 'allNeuronsDSmissed_NL_VEH.rdat', sep=""))
save(allNeuronsNSmissed_NL_VEH, file=paste(dataForRdir_NL, 'allNeuronsNSmissed_NL_VEH.rdat', sep=""))
save(allNeuronsDS_NL_AP5, file=paste(dataForRdir_NL, 'allNeuronsDS_NL_AP5.rdat', sep=""))
save(allNeuronsNS_NL_AP5, file=paste(dataForRdir_NL, 'allNeuronsNS_NL_AP5.rdat', sep=""))
save(allNeuronsDSresponded_NL_AP5, file=paste(dataForRdir_NL, 'allNeuronsDSresponded_NL_AP5.rdat', sep=""))
save(allNeuronsNSresponded_NL_AP5, file=paste(dataForRdir_NL, 'allNeuronsNSresponded_NL_AP5.rdat', sep=""))
save(allNeuronsDSmissed_NL_AP5, file=paste(dataForRdir_NL, 'allNeuronsDSmissed_NL_AP5.rdat', sep=""))
save(allNeuronsNSmissed_NL_AP5, file=paste(dataForRdir_NL, 'allNeuronsNSmissed_NL_AP5.rdat', sep=""))
masterDF_DS_NL_VEH <- FRbyNEURONbyBINcue(eventType="cue", cue="S+", neudata=allNeuronsDS_NL_VEH,
CPvector=rep(1, length(rats)), funcdirect=funcdirect,
dataForRdir=dataForRdir_NL,
BLduration=2, sessionCPperRat=rep(1, length(rats)),
BLneudata=allNeuronsDS_NL_VEH)
masterDF_DS_NL_AP5 <- FRbyNEURONbyBINcue(eventType="cue", cue="S+", neudata=allNeuronsDS_NL_AP5,
CPvector=rep(1, length(rats)), funcdirect=funcdirect,
dataForRdir=dataForRdir_NL,
BLduration=2, sessionCPperRat=rep(1, length(rats)),
BLneudata=allNeuronsDS_NL_AP5)
masterDF_NS_NL_VEH <- FRbyNEURONbyBINcue(eventType="cue", cue="S-", neudata=allNeuronsNS_NL_VEH,
CPvector=rep(1, length(rats)), funcdirect=funcdirect,
dataForRdir=dataForRdir_NL,
BLduration=2, sessionCPperRat=rep(1, length(rats)),
BLneudata=allNeuronsNS_NL_VEH)
masterDF_NS_NL_AP5 <- FRbyNEURONbyBINcue(eventType="cue", cue="S-", neudata=allNeuronsNS_NL_AP5,
CPvector=rep(1, length(rats)), funcdirect=funcdirect,
dataForRdir=dataForRdir_NL,
BLduration=2, sessionCPperRat=rep(1, length(rats)),
BLneudata=allNeuronsNS_NL_AP5)
save(masterDF_DS_NL_VEH, file=paste(dataForRdir, "masterDF_DS_NL_VEH.rdat", sep=""))
save(masterDF_DS_NL_AP5, file=paste(dataForRdir, "masterDF_DS_NL_AP5.rdat", sep=""))
save(masterDF_NS_NL_VEH, file=paste(dataForRdir, "masterDF_NS_NL_VEH.rdat", sep=""))
save(masterDF_NS_NL_AP5, file=paste(dataForRdir, "masterDF_NS_NL_AP5.rdat", sep=""))
### PLOT FR POST-CUE (100-400MS WINDOW) as a function of distance from 1st trial
### PLOT FR AROUND THE CUE (PTSH)
#All units
SessionPSTH(experiment="Exp 4 Unil AP5 Non Learners EXT",
masterDF=list(masterDF_DS_NL_VEH, masterDF_DS_NL_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 8, yAxMaxRaw = 10, psthmin=0.5, psthmax=2, imgFormat="pdf",
neudata=list(allNeuronsDS_NL_VEH, allNeuronsDS_NL_AP5), cueExcOnly=F,
legendLabels=c("S+ VEH side NL", "S+ AP5 side NL"))
#Make data frame with info about proportion of cue-exc neurons (this I know from calculations inside the previous function, you can check the % cue exc. printed on the graph to calculate this)
ncueExcVEH <- 1
ncueExcAP5 <- 1
forChiSq <- data.frame(side=c("VEH", "AP5"), cueexc=c(ncueExcVEH, ncueExcAP5), noncueexc=c(26-ncueExcVEH, 15-ncueExcAP5))
chisq.test(forChiSq[, -1])
# Pearson's Chi-squared test with Yates' continuity correction
# data: forChiSq[, -1]
# X-squared = 1.0839e-30, df = 1, p-value = 1
fisher.test(forChiSq[, -1]) # p = 1
barplot(forChiSq$cueexc/(forChiSq$cueexc+forChiSq$noncueexc), col=colindx, border = NA, ylim=c(0, 1))
#Cue excited only
SessionPSTH(experiment="Exp 4 Unil AP5 Non Learners EXT Cue exc only",
masterDF=list(masterDF_DS_NL_VEH, masterDF_DS_NL_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 8, yAxMaxRaw = 10, psthmin=0.5, psthmax=2, imgFormat="pdf",
neudata=list(allNeuronsDS_NL_VEH, allNeuronsDS_NL_AP5), cueExcOnly=T,
legendLabels=c("S+ VEH side NL", "S+ AP5 side NL"))
#Boxplot for a specific window and analysis
SessionBOXPLOT(experiment="Exp 4 Unil AP5 Non learners EXT", masterDF=list(masterDF_DS_NL_VEH, masterDF_DS_NL_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 10, yAxMaxRaw = 10, imgFormat="pdf", WdwStart = 100, WdwEnd = 400,
neudata=list(allNeuronsDS_VEH_NL, allNeuronsDS_AP5_NL), cueExcOnly=F,
legendLabels=c("S+ VEH side NL", "S+ AP5 side NL"))
# Wilcoxon rank sum test with continuity correction
#
# data: first and second
# W = 239040, p-value = 0.1008
# alternative hypothesis: true location shift is greater than 0
SessionBOXPLOT(experiment="Exp 4 Unil AP5 Non learners EXT tail", masterDF=list(masterDF_DS_NL_VEH, masterDF_DS_NL_AP5),
graphFolder=MixedGraphFolder, dataProcess="Zscores", correctOnly=FALSE, color=colindx,
yAxMinZ = -2, yAxMaxZ = 10, yAxMaxRaw = 10, imgFormat="pdf", WdwStart = 750, WdwEnd = 2000,
neudata=list(allNeuronsDS_VEH_NL, allNeuronsDS_AP5_NL), cueExcOnly=F,
legendLabels=c("S+ VEH side NL", "S+ AP5 side NL"))
# [[2]]
#
# Wilcoxon rank sum test with continuity correction
#
# data: first and second
# W = 233180, p-value = 0.3166
# alternative hypothesis: true location shift is greater than 0
|
e03559dc7c3107a17f67b14b4ea1c33bd0a18adf | 41cf007ff1b7abd744f1d9afae2eaa49537dd480 | /lesson_06/barcharts_of_mean_price.r | ec008f1a3693cbd0ba7432768ff25e0d67ad436e | [] | no_license | dsharath/R_programmimg | 1ecd82534a26b8ef7cd75ebd4b61ee387a2bb7a1 | 7f95347109a57879a2f770e64731a83b35e02b7c | refs/heads/master | 2021-09-05T16:48:50.002962 | 2018-01-29T18:56:12 | 2018-01-29T18:56:12 | 116,184,935 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,432 | r | barcharts_of_mean_price.r | # We’ve created summary data frames with the mean price
# by clarity and color. You can run the code in R to
# verify what data is in the variables diamonds_mp_by_clarity
# and diamonds_mp_by_color.
# Your task is to write additional code to create two bar plots
# on one output image using the grid.arrange() function from the package
# gridExtra.
# This assignment is not graded and
# will be marked as correct when you submit.
# See the Instructor Notes for more info on bar charts
# and for a hint on this task.
# DO NOT DELETE THE LINES OF CODE BELOW
# ===================================================================
data(diamonds)
library(dplyr)
diamonds_by_clarity <- group_by(diamonds, clarity)
diamonds_mp_by_clarity <- summarise(diamonds_by_clarity, mean_price = mean(price))
diamonds_by_color <- group_by(diamonds, color)
diamonds_mp_by_color <- summarise(diamonds_by_color, mean_price = mean(price))
# ENTER YOUR CODE BELOW THIS LINE
# ===================================================================
diamonds_by_color <- group_by(diamonds, color)
diamonds_mp_by_color <- summarise(diamonds_by_color, mean_price = mean(price))
a = ggplot(aes(x = clarity, y = mean_price), data = diamonds_mp_by_clarity) +
geom_bar(stat = "identity")
b = ggplot(aes(x = color, y = mean_price), data = diamonds_mp_by_color) +
geom_bar(stat = "identity")
myPlotsList = list(a,b)
grid.arrange(grobs = myPlotsList, col = 1)
|
d352f2d63fa7474e5fed26657924a0fe8c3a63ea | 46bc75bcd08ec918f4859d6dbb6d97dbb02b03b4 | /quarantine.R | 5ae490acf85225c24005f4ff01d1f9ddf0ccf817 | [
"MIT"
] | permissive | sanjmeh/covid | 2a0162bff21151c2982370a8b8e14e77909c4531 | 036519c96c441064af6a6382cc7cba7a964ba357 | refs/heads/master | 2022-11-28T08:10:01.651810 | 2020-08-14T19:03:20 | 2020-08-14T19:03:20 | 287,571,456 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 22,894 | r | quarantine.R | library(tidyverse)
library(janitor)
library(readxl)
library(magrittr)
library(stringr)
library(stringdist)
library(splitstackshape)
library(data.table)
library(googlesheets4)
library(googledrive)
library(lubridate)
library(wrapr)
library(gmapsdistance)
library(geosphere)
library(ggmap)
library(ggrepel)
library(htmlTable)
load("keys.data")
#helper
`%notin%` <- function(lhs, rhs) !(lhs %in% rhs)
# input an address vector to get a address vector with + signs in between
prep_addr <- function(addstr,bangalore=T){
str2 <- addstr %>% str_replace_all("[#,&]"," ") %>%
str_trim %>% str_replace_all("\\s+","+") %>%
{ifelse(grepl("Bangalore|Bengaluru", .,ig=T), paste(.,"Karnataka",sep="+"),paste(.,"Bangalore+Karnataka",sep="+"))}
str2 %>% strsplit("\\s+") %>% map(paste,collapse="+") %>% unlist
}
getmap <- function(cen=center_ward46,zoom=15){
get_googlemap(center = cen,
zoom=zoom,
size=c(640,640),
format="png8"
)
}
# returns a DT
load_covid_data <- function(file="kaggle_data/covid_19_india.csv"){
x1 <- fread(file)
x1[,Date:=dmy(Date)]
setnames(x1,qc(sn,Date,Time,state,indnat,fornat,cured,dead,confm))
x1[,mth:=month(Date)]
}
plot_top_n <- function(dt,n=5){
dt[,{topstates <- .SD[order(-confm),unique(state)][seq_len(n)]
.SD[state %in% topstates]}] %>%
ggplot(aes(Date,dead)) +
geom_line(aes(col=state),size=2) +
facet_wrap(~state,scales = "free_y")
}
states_top_n <- function(dt,n=5){
dt[,{
topstates <- .SD[order(-confm),unique(state)][seq_len(n)]
.(topstates)
}]
}
# pass covid dataset DT with unique string of one state only. Output will have two new columns with doubling rate of deaths and confirmed cases.
# this function is used by the next function, and not used independently
dble_days <- function(dt,statestr="karnat"){
dt <- dt[grepl(statestr,state,ig=T)]
diffdays_dead <- dt$Date %>% map(~ dt[first(which(dt$dead>=dt[Date==.x,2*dead])),Date - .x]) %>% unlist
diffdays_conf <- dt$Date %>% map(~ dt[first(which(dt$confm>=dt[Date==.x,2*confm])),Date - .x]) %>% unlist
dt[seq_along(diffdays_dead),dbldays_dead:=diffdays_dead]
dt[seq_along(diffdays_conf),dbldays_cases:=diffdays_conf]
dt[,dbldays_dead:=ifelse(dbldays_dead<0,NA,dbldays_dead)]
dt[,dbldays_cases:=ifelse(dbldays_cases<0,NA,dbldays_cases)]
dt[,futDate_dead:=Date+dbldays_dead]
dt[,futDate_cases:=Date+dbldays_cases]
}
# pass the covid dataset and value of n, to return a DT with just states, futureDate vals and doubling rate (mean)
dbl_data_all <- function(dt,n=5,type="dead"){
dtdbl <- states_top_n(dt,n = n)$topstates %>% map(~dble_days(dt,.x)) %>% rbindlist
if(type=="dead")
dtdbl2 <-
dtdbl[!is.na(futDate_dead),.(Days=mean(dbldays_dead,na.rm = T)),by=.(state,Date=futDate_dead)] else
dtdbl2 <- dtdbl[!is.na(futDate_cases),.(Days=mean(dbldays_cases,na.rm = T)),by=.(state,Date=futDate_cases)]
dtdbl2[order(Date)]
}
# generate n samples for all the curves for unclutterred labeling on the line chart
gen_label_samples <- function(dt,n=3){
dtchunks <- split(dt,f = dt$state)
model_list <- dtchunks %>% map(~pam(.x$Days,n)) # using Partitioning around Mediods (better than kmeans)
select_samples <- function(dt,model) split(dt,model$clustering) %>% map(~.x[sample(nrow(.x),1)]) %>% rbindlist # select 1 sample per partition
model_list %>% map2(.y = dtchunks, .f = ~select_samples(.y,.x)) %>% rbindlist
}
# most frequent string actions coded in one function
str_action <- function(x,what="punct"){
x <- as.character(x)
case_when(
grepl("punct",what,ig=T) ~ str_remove_all(x,"[\\s[:punct:]]") %>% tolower,
grepl("vow",what,ig=T) ~ str_remove_all(x,regex("[aeiou]",ig=T)),
grepl("dou|dbl",what,ig=T) ~ str_replace_all(x, regex("([a-z])\\1{1,}",ig=T),regex("\\1",ig=T)),
grepl("near|next",what,ig=T) ~ str_replace(x,regex("(near|opp[.]*|behind|opp to|next to|beside) [a-z]{1,20}",ig=T),"BLACKLIST"),
TRUE ~ x
)
}
# add new compressed columns of address
compress_addr <- function(dt,colname="ADDRESS"){
dt[,addr_compr:=str_action(get(colname),"punct")]
dt[,addr_rm_doub:= str_action(addr_compr,"dbl")] # remove doubles before removing vowels
dt[,addr_rm_vow:= str_action(addr_compr,"vowel")] # remove vowels
dt[,addr_rm_vow_and_doub:= str_action(addr_rm_doub,"vowel")] # remove doubles as well as vowels
dt[,google_addr:=prep_addr(get(colname))]
}
load_bbmp_raw <- function(file="baf/Qwatch Data Dump Bangalore Urban & Rural districts as on 06082020 1900.csv",retainnames=F){
x1 <- fread(file)
# setnames(x1,qc(qwid,name,gender,mob,email,rem1,rem2,porto,porta,addr1,addr2,addr3,age,
# addrty,district,taluk,panchyt,ward,city,bbmpzone,qtype,postcode,state,DateQ,DateEndQ))
x1 %<>% map_if(is.character, ~ifelse(.x=="NULL" | .x=="" | .x==".",NA,.x) ) %>% as.data.table
x1 %<>% map_at(.at = c(3,8:9,14:23), as.factor) %>% as.data.table
x1 %<>% map_at(.at = c(6:7), dmy) %>% as.data.table
x1 %<>% map_at(.at = c(13), as.numeric) %>% as.data.table
if(retainnames==F) {
setnames(x1,qc(qwid,name,gender,mob,email,dateQ,endQ,porto,porta,addr1,addr2,addr3,
age,region,distt,taluk,panch,ward,city,
bbmpzone,qtype,pin,state))
}
return(x1)
}
# load the new excel file sent by BBMP daily.
load_bbmp_file <- function(f="BAF.xlsx",colsize=13){
ctype <- rep("text",30) # max columns
#ctype[c(9)] <- "numeric" # failed the 23rd file hence reverted to text
ctype[7] <- "date" # Qurantine date is always at 7th position
x1 <- read_excel(f,range = cell_cols(seq_len(colsize)), col_types = "text")
setnames(x1,qc(ptype,qdays,qwid,name,gender,mob,dateQ,ADDRESS,age,bbmpzone,ward,state,region))
x1 <- x1 %>% map_at(.at = c(1,5,10:13), as.factor) %>% as.data.table
x1 <- x1 %>% map_at(.at = c(2,9), as.numeric) %>% as.data.table
x1 <- x1 %>% map_at(.at = c(7), convert_to_date) %>% as.data.table # new function from janitor
x1 %<>% map_if(is.character, ~ifelse(.x=="NULL" | .x=="" | .x==".",NA,.x) ) %>% as.data.table()
x1[,dateQ:=as.Date(dateQ)] # because convert_to_date brings it into Posixct Date. We need pure Date format
}
# pass the output of reading the file
proc_bbmp_raw <- function(dt){
dt[,mob:=str_action(mob,"punct")]
#dt[,ADDRESS := str_remove_all(ADDRESS,",?\\bNULL\\b,?") ]
if("qdays" %in% names(dt) ){
dt[,qdays:=as.numeric(qdays)]
dt[qdays>44000,dateB := as.Date(qdays,or="1900-01-01")]
dt[qdays>44000,qdays:=NA]
}
if("ptype" %in% names(dt))
suppressWarnings(dt[,dateB:= ptype %>% as.character() %>% str_extract("(?<=\\().+(?=\\))") %>%
paste(2020) %>% dmy()]) # extract the hand entered date from brackets
if(grepl("addr1",names(dt)) %>% any){
dt[,ADDRESS:=paste(unique(c(addr1,addr2,addr3)),collapse = " ") %>% str_squish,by=qwid][,c("addr1","addr2","addr3"):=NULL]
}
dt[,ADDRESS := str_remove_all(ADDRESS,",?\\bNULL\\b,?") ]
dt[,mob:=str_action(mob,"punct")]
dt[,valid_mob := mob %>% str_detect("^[1-5]|^\\d{1,9}$|^.{11,}$") %>% not]
dt[,junk_addr:= nchar(ADDRESS)<20]
dt[,isappt := str_detect(ADDRESS,regex("\\bflat|\\bapart|\\bapp\\b|\\bappart|society|\\bdoor\\b",ig=T))]
dt[,addr_black:=str_action(ADDRESS,"near")]
compress_addr(dt,"addr_black")
dt[,google_addr:=prep_addr(ADDRESS)]
}
# Change the digit with a roman number string. Useful for apartment names ending with 1,2,3.
replace_numeric <- function(dt,from="1",to="I") {
dt[str_detect(appt_baf,"\\d"), clean_name:= clean_name %>% str_replace(from,to)]
}
# input : DT with googlesheet4 read_sheet of BAF volunteer googlesheet
# output : processed output DT with cleaned BAF apartment names and addition of a google address column ready for running geocode()
proc_volunteers <- function(dt=vol1,setnames=T){
setDT(dt)
if(setnames==T){
setnames(dt,old=c(1:9,13:26),new = c("dttim", "email", "name", "age", "mob", "society", "door",
"isbaf", "bafno", "appt_baf", "flatcount", "vol_address",
"locty", "city", "pin", "cluster", "zonlead", "wardno", "wardname",
"subzone", "zone", "seqno","identfier"
))
}
newcols <- names(dt) %>% str_subset("^[a-z_]+$")
dt1 <- dt[,.SD,.SDcols=newcols]
dt1[,mob:=as.double(mob)]
dt1[,dttim := parse_date_time(dttim,orders=c("mdyHMS"),tz = "Asia/Kolkata")]
dt1[,appt_baf:= repl_fullnames(appt_baf)]
dt1 <- map_at(dt1,.at = qc(society,isbaf,bafno,appt_baf,locty,city,cluster,zonlead,wardno,wardname,subzone,zone,identifier), as.factor) %>% as.data.table
dt1 <- map_at(dt1,.at = qc(age,flatcount,pin), as.numeric) %>% as.data.table
map_if(dt1,is.character, ~ifelse(.x=="NULL" | .x=="" | .x==".",NA,.x) ) %>% setDT
dt1[,clean_name := str_replace_all(appt_baf,"[:punct:]"," ")]
compress_addr(dt1,"appt_baf") # replaced repetitive lines by a function
1:3 %>% as.character() %>% walk2(c("I","II","III"),~replace_numeric(dt1,.x,.y))
dt1[,google_addr:=ifelse(!is.na(clean_name),
prep_addr(paste(clean_name,
ifelse(is.na(locty),"",as.character(locty)),
ifelse(is.na(vol_address),"",as.character(vol_address)),
ifelse(is.na(pin),"",pin))),
prep_addr(paste(society,
ifelse(is.na(vol_address),"",
as.character(vol_address)),
ifelse(is.na(pin),"",pin)))),
by=email]
}
repl_fullnames <- function(x){
x <- as.character(x)
case_when(
x=="SPP" ~ "Sai Poorna Premier",
x=="SMR Vinay" ~ "SMR Vinay Endeavour",
x=="ZZZ" ~ "ZZZ: dummy",
TRUE ~ x
)
}
# old function - no longer used
# be careful as few variables are had coded inside ;: bbmp_trunc is nothing but bbmp subset data that has likely flat/apartment addresses
# donot forget to re index the data once new bbmp data received
map_bafno <- function(indx,baf_names,n=3,bafnos){
stopifnot(uniqueN(baf_names)==length(baf_names))
stopifnot(length(indx)==length(baf_names))
addr3 <- indx %>% map2(baf_names,~.x %>% intersect_3(n=n,appt_name=.y) %>% bbmp_trunc[.])
names(addr3) <- baf_names
addr3
}
# main function to merge bbmp data to baf member data
merge_baf <- function(bbmp,baf,volunt=T){
# pass the two DTs and a variable:
# generalized on which variable we use for search string as well as pattern string. Pass the variable one of : "addr_compr", "addr_rm_vow", "addr_rm_doub"
get_match_index <- function(var, x=bbmp,y=baf,base_data=baf_base_data){
appt_indx <- y[!is.na(appt_baf),eval(var),with=F] %>% unique() %>% unlist %>% map(~str_which(x[[var]],regex(.x,ig=T)) %>% x[.,qwid])
cols <- c(var,"bafno") # prepare the two columns for creating a unique bafno list (may be smaller in length due to compression)
names(appt_indx) <- y[!is.na(appt_baf),.SD,.SDcols=cols] %>% unique(by=var) %>% .[,bafno] # extract the BAFnos against each compressed appt_name
appt_indx_nz <- appt_indx %>% compact
qwatch_ids <- appt_indx_nz %>% map(~data.table(qwatch=.x)) # list of bafno with matching qwatchIDs
bafcases <- qwatch_ids %>% rbindlist(idcol = "BAFno")
bafcases_wide <- base_data[bafcases,on=.(bafno=BAFno),nomatch=0]
x1 <- x[!is.na(qwid)][bafcases_wide,on=.(qwid=qwatch),nomatch=0]
if (!"ptype" %in% names(x1)) x1[,ptype:=NA]
if (!"qdays" %in% names(x1)) x1[,qdays:=NA]
if (!"dateB" %in% names(x1)) x1[,dateB:=NA]
x1[,qc(bafno,appt_baf,appt_addr,lon,lat,qwid,ptype,qdays,name,age,gender,mob,dateQ,dateB,ADDRESS,google_addr,addr_compr,addr_rm_vow,addr_rm_doub,addr_rm_vow_and_doub,bbmpzone,ward,region,state,valid_mob,flatcount,locty,cluster,zonlead,wardno),with=F]
}
# get rid of junk first
if(volunt==T)
baf_base_data <- baf[!is.na(appt_baf) & !grepl("Dup",bafno,ig=T),.(bafno,appt_baf,flatcount,locty,cluster,zonlead,wardno)] %>% unique
else
baf_base_data <- baf
baf_base_data[,addr_compr:=str_action(appt_baf,what = "punct")]
baf_base_data[,addr_rm_doub:=str_action(addr_compr,what = "doub")]
baf_base_data[,addr_rm_vow:=str_action(addr_compr,what = "vow")]
# take unique bafnos
# bafnos <- baf_base_data[!is.na(appt_baf),bafno] %>% unique
# main step of matching : slice index lists 3 times:
indx_compr <- get_match_index("addr_compr")
indx_novow <- get_match_index("addr_rm_vow")
indx_nodb <- get_match_index("addr_rm_doub")
list(indx_compr=indx_compr,
indx_novow = indx_novow,
indx_nodb = indx_nodb
)
}
# pipe function for removing surplus address columns - not fit for printing
remove_addr <- function(dt){
dt[,.SD,.SDcols= str_subset(names(dt),pattern = "addr",negate = T)][]
}
# a roundabout way to reducing column width of ADDRESS without transforming the DT by reference
narrow_addr <- function(dt,colwid=40){
colnames<- names(dt)
names2 <- setdiff(colnames,"ADDRESS")
dt[,.SD,.SDcols=names2][,ADDRESS:=str_sub(dt$ADDRESS,1,colwid)][]
}
# process google forms of volunteer feedback
proc_vol_qwforms <- function(dt = volgf){
dt <- dt[,c(1:12)]
setnames(dt,qc(ts,idvol,secret,qwid,success,mode,proof,tm_cont,feeling,mention,applicable,comments))
dt <- map_at(dt,.at = qc(secret), as.numeric) %>% as.data.table
dt <- map_at(dt,.at = qc(ts,tm_cont), parse_date_time,orders = "mdyHMS",tz="Asia/Kolkata") %>% as.data.table
dt <- map_at(dt,.at = qc(dateQ), dmy) %>% as.data.table
dt <- map_at(dt,.at = qc(success,mode,proof,feeling,mention,applicable), as.factor) %>% as.data.table
dt
}
# compact a list of data.tables: it's a general function - can be used anywhere
rm_z_nrows <- function(lofdts){
allcounts <- lofdts %>% map_dbl(nrow)
nzpos <- which(allcounts > 0)
lofdts[nzpos]
}
# Prepare a list of data.tables for uploading to volunteer googlesheet
prep_list_patients <- function(cases = cases_31){
cases %>% remove_addr %>%
#cbind(data.table(success="",mode="",proof="",time="",feeling="",mention="")) %>%
as_tibble %>% split(cases$cluster) %>%
rm_z_nrows()
}
# pass a named list of data tables to be loaded in separate worksheet tabs of a googlesheet
upload_vol_sheets <- function(sp=sp25,k=allocation_sheet){
for(i in names(sp)){
write_sheet(sp[[i]],ss = k,sheet = i)
}
}
# now index calculation over rided the hard coding of short names: donot use this. Use apptindex
rm_shortnames <- function(dt,shortnames="^(ittin|rose|satya|alpine|aoane|opal)|tree"){
dt[!grepl(shortnames,appt_baf,ig=T)]
}
cr_allposs_match <- function(baflist=baflist26,aindex=apptindx,expand_addr=F){
dt_novow <- aindex[,.(bafno,passv)][baflist$indx_novow,on="bafno"][passv==T][,passv:=NULL]
dt_nodbl <- aindex[,.(bafno,passd)][baflist$indx_nodb,on="bafno"][passd==T][,passd:=NULL]
dt_strict <- baflist$indx_compr %>% {if(expand_addr==F) remove_addr(.) else .}
dt_novowel <- baflist$indx_compr[,c(1:27)] %>% fsetdiff(dt_novow,.) %>% {if(expand_addr==F) remove_addr(.) else .}
dt_nodouble <- baflist$indx_compr[,c(1:27)] %>% fsetdiff(dt_nodbl,.) %>% {if(expand_addr==F) remove_addr(.) else .}
dt_strict[,data:="STRICT"]
dt_novowel[,data:="NOVOWEL"]
dt_nodouble[,data:="NODOUBLE"]
rbind(dt_strict,dt_novowel,dt_nodouble) %>% unique(by=c("bafno","qwid")) %>% .[order(bafno)]
}
summ_counts <- function(allposs, voldt=vol2){
allposs[,TOTCASES:=.N,appt_baf]
cases <- allposs[,.N,by=.(appt_baf,TOTCASES,data)] %>% dcast(appt_baf + TOTCASES ~ data,fill=0)
result <- voldt[,.N,by=.(appt_baf,flatcount)][cases,on="appt_baf"] %>% setnames(c("N"),c("Volunteers"))
result
}
min_dist <- function(bafdt=bafgeo,bbdt=bb27_gcodes_isappt){
seq_len(nrow(bbdt)) %>% map_dbl(~distGeo(as.matrix(bafdt[,.(lon,lat)]), bbdt[.x,.(lon,lat)] ) %>% min())
}
# Load BAF membership file into a DT :now direct from the dynamic googlesheet
load_members <- function(dt=NULL,ss=bafmembsheet){
#dt1 <-read_excel(f, range=cell_cols(c(1,8)), col_types = "text") %>% setDT # only first 8 columns relevant.
if(is.null(dt)) {
drive_download(file = ss,type = "csv",overwrite = T)
dt1 <- fread("BAF Member Database.csv")
setnames(dt1,qc(bafno,appt_long,status,appt_baf,nblks,nflats,appt_addr,locty,city,pin,ph1,ph2,ward,acno,pcno,clustno,totrep,doj,fsales,maplink,clust_name,zonlead,target,ward_name,zone,assly))
}
else
dt1 <- dt
dt1 <- map_at(dt1,.at = qc(bafno,status,appt_baf,locty,city,status,ward,clustno,ward_name,zone,assly), as.factor) %>% as.data.table
dt1[,pin:= as.numeric(pin)]
dt1[,appt_baf:= repl_fullnames(appt_baf)]
dt1[,clean_name := str_replace_all(appt_baf,"[:punct:]"," ") %>% str_squish]
dt1[,clean_appt_addr :=
ifelse(!grepl("apart",clean_name,ig=T),
paste(clean_name,"Appartments"),
clean_name
) %>% paste(appt_addr)
]
# below lines need to be identical in both : process BBMP addresses as well as process BAF data
compress_addr(dt1,"clean_appt_addr")
1:3 %>% as.character() %>% walk2(c("I","II","III"),~replace_numeric(dt1,.x,.y))
# dt1[,google_addr:= prep_addr(paste(clean_appt_addr,
# ifelse(is.na(locty),"",as.character(locty)),
# ifelse(is.na(pin),"",pin))),
# by=clean_appt_addr]
dt1
}
# pass a DT with columns bafno and appt_name and geo codes of the appts will be binded as new columns
merge_geocodes <- function(dt=bafmembs,file=geocodesfile){
gc <- fread(file,stringsAsFactors = T)
gc[dt,on=.(bafno,appt_baf)]
}
merge_zonal_into_baf <- function(voldt=vol2,bafdt=bafmembs){
unique(voldt[,.(bafno,appt_baf,cluster,flatcount,zonlead,wardno)],by=c("bafno"))[bafdt,on=.(bafno,appt_baf)]
}
merge_bafmaster_into_vol <- function(voldt=vol2,bafdt=bafmembs){
bafdt[,-c("locty","clean_name","city")][voldt,on=c("bafno","appt_baf")]
}
# pass a DT that has unique bafnos and appt_baf
add_appt_indx <- function(dt,indx=0.6,size=5){
dt[,appt_vow := str_action(appt_baf,"vow")]
dt[,appt_dbl := str_action(appt_baf,"dou")]
dt[,ncvow:=str_split(appt_baf," ",simplify = T) %>% str_action("vow") %>% nchar %>% paste(collapse =","),by=bafno]
dt[,ncdbl:=str_split(appt_baf," ",simplify = T) %>% str_action("dou") %>% nchar %>% paste(collapse =","),by=bafno]
dt[,ncappt:=str_split(appt_baf," ",simplify = T) %>% nchar %>% paste(collapse =","),by=bafno]
dt[,ivwl:=(str_split(ncvow,",",simplify = T) %>% as.numeric() /
str_split(ncappt,",",simplify = T) %>% as.numeric())
%>% round(3) %>% paste(collapse =",") ,by=bafno]
dt[,idbl:=(str_split(ncdbl,",",simplify = T) %>% as.numeric() /
str_split(ncappt,",",simplify = T) %>% as.numeric()) %>%
round(3) %>% paste(collapse =","),by=bafno]
dt[,passv:=ifelse(str_split(ncvow,",",simplify=T) >= size | str_split(ivwl,",",simplify = T) > indx, T,F) %>% all,by=bafno]
dt[,passd:=ifelse(str_split(ncdbl,",",simplify=T) >= size | str_split(idbl,",",simplify = T) > indx, T,F) %>% all,by=bafno]
dt
}
edit_appt_indx <- function(dt,indx=0.6,size=5){
dt[,passv:=ifelse(str_split(ncvow,",",simplify=T) >= size | str_split(ivwl,",",simplify = T) >= indx, T,F) %>% all,by=bafno]
dt[,passd:=ifelse(str_split(ncdbl,",",simplify=T) >= size | str_split(idbl,",",simplify = T) >= indx, T,F) %>% all,by=bafno]
dt
}
newcolorder <- function(dt){
setcolorder(dt,c("qwid", "ptype", "name", "mob", "gender", "age", "dateQ","qdays",
"appt_baf", "bafno", "ADDRESS", "bbmpzone", "ward", "region", "state",
"valid_mob", "flatcount", "locty", "cluster", "zonlead", "wardno"
))
}
# check cache before firing for identical address
fire_geocode <- function(addr_str,gmast=geomaster){
ex_geo <- gmast[gaddr %in% addr_str]
message("detected ",nrow(ex_geo)," addresses existing in cache .. pulling them in")
new_addr <- setdiff(addr_str,gmast$gaddr)
new_geo <- geocode(new_addr) %>% cbind(data.table(gaddr=new_addr),.)
rbind(ex_geo,new_geo)
}
# Read the latest EIDs of BAF volunteers
read_eid <- function(gsheet=volsh){
eid <- read_sheet(gsheet,sheet = 2,range = "A:D") %>% setDT
setnames(eid,qc(bafid,vol_fulid,volid,bafno))
eid[,volid:=as.character(volid)]
}
# Download from latest paperform google sheet. Switch off (download=F) to just read a copy
read_paperform <- function(gsheet=paperformsheet,download=T){
if(download) drive_download(file = paperformsheet,"paperform.csv",type = "csv",overwrite = T)
fread("paperform.csv")
}
proc_paperform <- function(dt){
setnames(dt[,c(1:22)],qc(subm,cqsid,attby,mode,ttype,hqid,breached,reason,fir,sympt,distt,zone_taluk,ward_panch,
comments,hq_addr_chg,new_addr,mob_chg,new_mob,distt_chg,zone_chg,ward_chg,photo_rem))
dt[,hqid_upper:=toupper(hqid)]
dt[,date_submitted:=parse_date_time(subm,orders = c("dmy","ymd HMS")) %>% as.Date()]
dt[,cqcode:=str_sub(cqsid,-5)]
}
# this is the master merging function, of 4 databases: paperform, baf membership, covid cases, citizen volunteers, electronic ids to volunteers
# cases must have columns: bbmpzone; member must gave columns clust_name, cluster,
merge_databases <- function(paperdt, member=bafmembs,cases=cases_10_aug,volntr=vol2,eid=eid_old,from=20200806,html=T){
cqid <- eid[paperdt,on=.(volid=cqcode),nomatch=0]
case_cnt <- cases[,.(bafno,qwid)][,.N,by=bafno]
cq_u <- cqid[,.(volid,bafno)] %>% unique
cq_u[,.N,bafno] -> cq_u_cnt
names(cq_u_cnt) <- qc(bafno,Active)
volcounts <- volntr[,.N,by=bafno]
setnames(volcounts,qc(bafno,volnts))
zonedt <- cases[,.(bafno,bbmpzone)] %>% unique
zonedt <- zonedt[,.(bbmpzone=first(bbmpzone)),by=bafno] # select one bbmpzone, since many times same baf appt is mapped to a different bbmpzone
x1 <- zonedt[cq_u_cnt,on="bafno"
][volcounts,on="bafno"
][case_cnt,on="bafno"
][member,on="bafno"
][cqid,on=.(bafno)][date_submitted>=ymd(from)
][,dat_rev:=fct_rev(format(date_submitted,"%b %d"))
][,bbmp_master:=as.character(N)] %>%
dcast(bafno + appt_baf + clust_name + ward_name + bbmpzone + volnts + Active + bbmp_master ~ dat_rev,fill=NA) %>%
adorn_totals(where = c("row","col"),,,,contains("Aug")) %>%
{
if(html==T)
addHtmlTableStyle(.,align="llllr",col.columns= c(rep("none", 8),rep("#F0F0F0",25))) %>%
htmlTable(rnames=F,cgroup=c("","VOLUNTEERS","CASES","DATES","TOTAL"),n.cgroup=c(5,2,1,9,1),total = T) # make these numbers more robust by using total days columns
else .
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.