blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40a22afb0a1125dd814e682952960294e7f6b562
|
ab1b73887d52d6c77922320761e5830e45c5178f
|
/source/model/rule_base.R
|
b7d9f06d28bad7af48309c85e65dad46098bbc42
|
[] |
no_license
|
ken-nakano/kaggle_titanic
|
0934db71301758766a1ee1b763331d038e202702
|
ef90928cff423a41fbf5ae7af9b4affddc691de6
|
refs/heads/master
| 2020-04-21T21:04:28.090165
| 2019-02-25T02:23:59
| 2019-02-25T02:23:59
| 169,867,111
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 680
|
r
|
rule_base.R
|
library(tidyverse)
library(modelr)
project.root <- Sys.getenv("PROJECT_ROOT")
source(str_c(project.root, "/source/service/prepro.R"))
train_and_predict_rb <- function(X_train, X_valid, Y_train, Y_valid) {
## 学習
train <- cbind(X_train, Y_train)
agg <- train %>%
group_by(Pclass, Sex, age_group) %>%
summarise(survive_rate = mean(Survived))
## 予測
pred <- X_valid %>% left_join(agg, by=c("Pclass", "Sex", "age_group")) %>% select(survive_rate)
pred_binary <- as.numeric(pred > 0.5)
## 正解率
Y_valid <- Y_valid %>% unlist() %>% as.integer()
ac.rate <- accuracy.rate(table(Y_valid, pred_binary))
return(list(pred_binary, ac.rate, pred))
}
|
2b9d3a955368b4ef1e03506f191dfee893d1aeca
|
dc3642ea21337063e725441e3a6a719aa9906484
|
/Finance/macd.r
|
ea953bb9f4a64c5dd29851992689a0d32e589111
|
[] |
no_license
|
akmiller01/alexm-util
|
9bbcf613384fe9eefd49e26b0c841819b6c0e1a5
|
440198b9811dcc62c3eb531db95abef8dbd2cbc7
|
refs/heads/master
| 2021-01-18T01:51:53.120742
| 2020-09-03T15:55:13
| 2020-09-03T15:55:13
| 23,363,946
| 0
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 939
|
r
|
macd.r
|
#install.packages("ggplot2")
#install.packages("dplyr")
library(ggplot2)
library(plyr)
windows <- TRUE
if(windows){pathpre<-"C:"}else{pathpre<-"~"}
wd <- paste0(pathpre,"/git/alexm-util/Finance/")
setwd(wd)
stock <- "TSLA"
timespan <- "30"
filename <- paste0("data/",stock,"_macd.csv")
command <- paste("node","macd_direct.js",stock,timespan,filename)
system(command)
setClass("myDate")
setAs("character","myDate", function(from) as.Date(from, format="%m/%d/%y"))
df <- read.csv(filename
,header=FALSE
,col.names=c("Date","Stock","MACD","Signal")
,colClasses=c("myDate","numeric","numeric","numeric"))
p1 <- ggplot(data=df,aes(x=Date,y=Stock)) + geom_line() + geom_point()
p2 <- ggplot(data=df,aes(x=Date)) +
geom_line(aes(y=MACD,colour="MACD")) +
geom_point(aes(y=MACD,colour="MACD")) +
geom_line(aes(y=Signal,colour="Signal")) +
geom_point(aes(y=Signal,colour="Signal"))
p1
p2
|
31929a877145900d3954c5e6ca39264cf4da05b1
|
22001710e3dbcec99e032b0dfaacfe9918fcaaa8
|
/run_analysis.R
|
ac185c1cfdfcd79beeec9cc2e847a74694942dbf
|
[] |
no_license
|
Maligit/CleaningData
|
ebbdf892db82b2eecec45b2e818a0c28907e6ee2
|
ef77b374706decc6fb307156db619b495d95ea9b
|
refs/heads/master
| 2021-01-10T19:24:51.896583
| 2014-11-23T21:43:23
| 2014-11-23T21:43:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,781
|
r
|
run_analysis.R
|
#This script will download a set of files from the accelerometers from the Samsung Galaxy S smartphone.
#It then coalesces the files into one dataset capturing the mean and std for various subjects on six different types of activities.
#It then produces a long form of tidy dataset
# load packages
library(data.table)
# set workdrive
setwd("./")
#Create new directory if it does not exist
if (!file.exists("./UCI HAR Dataset")) {
dir.create("UCI HAR Dataset")
}
#Open zip file and unpack it into right directory
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
temp <- tempfile()
download.file(fileURL, destfile ="temp", method="curl")
unzip(zipfile="temp", files = NULL, list = FALSE, overwrite = TRUE,
junkpaths = FALSE, exdir = getwd(), unzip = "internal",
setTimes = FALSE)
unlink(temp)
#close("fileURL")
#list.files("./UCI HAR Dataset")
#Read appropriate files into respective directories
features <- read.table("./UCI HAR Dataset/features.txt")
xtrain <- read.table("./UCI HAR Dataset/train/X_train.txt")
xtest <- read.table("./UCI HAR Dataset/test/X_test.txt")
ytrain <- read.table("./UCI HAR Dataset/train/Y_train.txt")
ytest <- read.table("./UCI HAR Dataset/test/Y_test.txt")
subtrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
subtest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
#Merge files to combine training and test data
xtrain.test <- rbind(xtrain,xtest)
ytrain.test <- rbind(ytrain,ytest)
subtrain.test <- rbind(subtrain, subtest)
colnames(xtrain.test) <- features$V2
#colnames(ytrain.test) <- "Activity"
#Select mean and std as the only relevant data for the study
mean.std.data <- xtrain.test[grep("mean|std", names(xtrain.test))]
clean.names <- make.names(colnames(mean.std.data))
colnames(mean.std.data) <- clean.names
#substitute the right activity based on id
ytrain.test$words[ytrain.test$V1 == 1] <- "WALKING"
ytrain.test$words[ytrain.test$V1 == 2] <- "WALKING_UPSTAIRS"
ytrain.test$words[ytrain.test$V1 == 3] <- "WALKING_DOWNSTAIRS"
ytrain.test$words[ytrain.test$V1 == 4] <- "SITTING"
ytrain.test$words[ytrain.test$V1 == 5] <- "STANDING"
ytrain.test$words[ytrain.test$V1 == 6] <- "LAYING"
#Combine subject and activity as one data.frame
sub.activity <- cbind(subtrain.test,ytrain.test[,2])
colnames(sub.activity) <- c("Subject", "Activity")
sub.activity <- cbind(mean.std.data,sub.activity)
#create a data.table to facilitate lapply operation
final.set = data.table(sub.activity)
final.Xtrain.set <- final.set[, lapply(.SD, mean), by = list(Subject,Activity)]
#coerce data.table into a data.frame
outfile <-data.frame()
outfile <- final.Xtrain.set
#Write output as text file
write.table(outfile, "subactivity_mean_test.txt", row.names=FALSE )
|
bb88d5aaf1e7e4e96bfb3190301f056e6083d867
|
fadbaf1d8d71091884ba864e0b39887f5dd380ea
|
/LPS2_epi.R
|
de0c1c156429d35286370f81cf92acfd92d80560
|
[] |
no_license
|
ariadnacilleros/TFM
|
bc27936b04beefc9d59eff823c04b13e1241c384
|
71e1890e55b643d3784a60e59474e3a17d62df4d
|
refs/heads/master
| 2022-11-21T12:13:32.373774
| 2020-07-21T14:12:35
| 2020-07-21T14:12:35
| 279,327,518
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,208
|
r
|
LPS2_epi.R
|
#########################
#
#STEP:PREPARE .EPI FOR SMR
#
##########################
library(illuminaHumanv4.db)
library(stringr)
NA_LPS2 <- read.table("C:/Users/ariad/OneDrive/Desktop/Illumina_annotation/LPS/NA_LPS2.txt", quote="\"", comment.char="")
phenotype <- read.delim("C:/Users/ariad/OneDrive/Desktop/Illumina_annotation/LPS/phenotype_LPS2.txt")
probes <- setdiff(phenotype$PROBE_ID, NA_LPS2$V1)
#get chr, start curated
df <- phenotype[phenotype$PROBE_ID %in% probes, c(1,2,4)]
#get strand
x <- illuminaHumanv4GENOMICLOCATION
# Get the probe identifiers that are mapped to any cytoband
mapped_probes <- mappedkeys(x)
# Convert to a list
xx <- as.list(x[mapped_probes])
strand_vector <- c()
for (probe in df$PROBE_ID){
strand <- str_split(xx[probe], ":", n = , simplify = FALSE)[[1]][4]
strand_vector <- append(strand_vector, strand)
}
df <- cbind(df, strand_vector)
write.table(x = df, file = "probes_info_LPS2.txt", sep = "\t", quote = FALSE, row.names = FALSE)
#ILMN_3288752
x <- illuminaHumanv4SYMBOL
# Get the probe identifiers that are mapped to a gene symbol
mapped_probes <- mappedkeys(x)
# Convert to a list
xx <- as.list(x[mapped_probes])
|
e185600116e44ee186b3be08d6e20c52a7c95329
|
83342cbd4f745ab062953351a2526843b6ebf162
|
/R/gammatau.R
|
b991cadfdbff9e4e3dc6c6cc5f92c5228f39b4a4
|
[] |
no_license
|
rtlemos/scallops
|
2bec9eaaf1833f21cca494d7ffd839e89ca29c4d
|
8a56259689fdaf63a5c7c5bda61c2d807e6a1d0f
|
refs/heads/master
| 2023-04-02T20:55:07.834958
| 2018-09-03T17:43:53
| 2018-09-03T17:43:53
| 141,383,751
| 0
| 2
| null | 2023-03-18T14:29:47
| 2018-07-18T05:08:30
|
R
|
UTF-8
|
R
| false
| false
| 7,008
|
r
|
gammatau.R
|
#' Build basic priors for gamma and tau
#'
#' @param dpc_grid Discrete Process Convolution grid
#' @param precision_diagonal_value Diagonal value for gamma's prior precision matrix
#' @param precision_nearest_neighbor_factor Off-diagonal factor for gamma's prior precision matrix, in (0,1)
#'
#' @return List with prior mean and precision for gamma, and prior shape and rate parameters for tau
#' @export
#'
#' @examples
#' dpc_grid = get_grid(c(0,1), c(1,2), 1)
#' priors = get_priors(dpc_grid, 5, 0.4)
#' priors$gamma$precision
#' (prior_variance = solve(priors$gamma$precision))
#'
get_priors = function(dpc_grid, precision_diagonal_value = 1e-6,
precision_nearest_neighbor_factor = 0) {
ngridpoints = nrow(dpc_grid$coord)
precision = if (precision_nearest_neighbor_factor == 0)
Diagonal(ngridpoints, x = precision_diagonal_value)
else
get_prior_precision_matrix(dpc_grid, precision_diagonal_value, precision_nearest_neighbor_factor)
list(gamma = list(mean = rep(0, ngridpoints), precision = precision), tau = list(a = 2, b = 2))
}
#' Construct gamma's precision matrix for a given DPC grid
#'
#' @param dpc_grid Discrete Process Convolution grid
#' @param diag_value Diagonal value in precision matrix
#' @param nearest_neighbor_factor Off-diagonal factor for adjacent gridpoints
#'
#' @return A sparse precision matrix
#'
#' @examples
#' dpc_grid = get_grid(c(0,1), c(1,2), 1)
#' get_prior_precision_matrix(dpc_grid, 3, 0.2)
#'
get_prior_precision_matrix = function(dpc_grid, diag_value, nearest_neighbor_factor) {
nlon = length(dpc_grid$lon)
nlat = length(dpc_grid$lat)
ii = as.numeric(unlist(mapply(1:nlon, FUN = function(lo) {
lo_min = max(1, lo - 1)
lo_max = min(nlon, lo + 1)
nx = lo_max - lo_min + 1
unlist(mapply(1:nlat, FUN = function(la) {
la_min = max(1, la - 1)
la_max = min(nlat, la + 1)
ny = la_max - la_min + 1
idx = (lo - 1) * nlat + la
rep(idx, nx * ny)
}))
})))
jj = as.numeric(unlist(mapply(1:nlon, FUN = function(lo) {
lo_min = max(1, lo - 1)
lo_max = min(nlon, lo + 1)
nx = lo_max - lo_min + 1
unlist(mapply(1:nlat, FUN = function(la) {
la_min = max(1, la - 1)
la_max = min(nlat, la + 1)
ny = la_max - la_min + 1
idx = (lo - 1) * nlat + la
as.numeric(mapply(lo_min:lo_max, FUN = function(llo) (llo - 1) * nlat + la_min:la_max))
}))
})))
xx = as.numeric(unlist(mapply(1:nlon, FUN = function(lo) {
lo_min = max(1, lo - 1)
lo_max = min(nlon, lo + 1)
nx = lo_max - lo_min + 1
unlist(mapply(1:nlat, FUN = function(la) {
la_min = max(1, la - 1)
la_max = min(nlat, la + 1)
ny = la_max - la_min + 1
idx = (lo - 1) * nlat + la
as.numeric(mapply(lo_min:lo_max, FUN = function(llo) mapply(la_min:la_max, FUN = function(lla) {
if (llo == lo & lla == la)
diag_value
else if (llo != lo & lla != la)
0
else
-abs(nearest_neighbor_factor) * diag_value
})))
}))
})))
mat = sparseMatrix(i = ii, j = jj, x = xx)
mdet = Matrix::determinant(mat)
if (mdet$sign == -1) {
cat("Bad determinant, pick a smaller value for nearest_neighbor_factor.\n")
} else {
return(mat)
}
}
#' Compute the full conditional (given tau, rho, and data) precision of gamma
#'
#' @param prior_precision Prior precision matrix for gamma
#' @param kernel_matrix Discrete Process Convolution kernel matrix (fixed values of rho)
#' @param tau Sampled value of tau
#'
#' @return Full conditional precision of gamma
#' @export
#'
#' @examples
#' get_posterior_precision(Matrix::Diagonal(2, 3), list(mat = Matrix::Diagonal(2, 1)), 5)
#'
get_posterior_precision = function(prior_precision, kernel_matrix, tau) {
prior_precision + tau * Matrix::crossprod(kernel_matrix$mat)
}
#' Compute the full conditional (given tau, rho, and data) mean of gamma
#'
#' @param prior_mean Prior mean of gamma
#' @param prior_precision Prior precision of gamma
#' @param posterior_precision Full conditional precision of gamma
#' @param kernel_matrix DPC kernel matrix
#' @param tau Sampled value of tau
#' @param y Data
#'
#' @return
#' @export
#'
#' @examples
#' km = list(mat = Matrix::Diagonal(2, 1))
#' prior_pr = Matrix::Diagonal(2, 0.3)
#' tau = 5
#' post_pr = get_posterior_precision(prior_pr, km, tau)
#' get_posterior_mean(prior_mean = rep(0, 2), prior_precision = prior_pr,
#' posterior_precision = post_pr, kernel_matrix = km, tau = tau, y = c(0.9, 0.7))
get_posterior_mean = function(prior_mean, prior_precision, posterior_precision, kernel_matrix, tau, y) {
d = prior_precision %*% prior_mean + tau * Matrix::crossprod(kernel_matrix$mat, y)
Matrix::solve(posterior_precision, d)
}
#' Sample gamma from its full conditional distribution
#'
#' @param prior_mean Prior mean of gamma
#' @param prior_precision Prior precision of gamma
#' @param kernel_matrix DPC kernel matrix
#' @param tau Sampled value of tau
#' @param y Observation value(s)
#' @param return_mean Provide full conditional mean as pseudo-sample?
#'
#' @return Either the full conditional mean of gamma or a sample of it
#' @export
#'
#' @examples
#' km = list(mat = Matrix::Diagonal(2, 1))
#' prior_pr = Matrix::Diagonal(2, 0.3)
#' tau = 5
#' post_pr = get_posterior_precision(prior_pr, km, tau)
#' get_gamma_sample(prior_mean = rep(0, 2), prior_precision = prior_pr,
#' kernel_matrix = km, tau = tau, y = c(0.9, 0.7), return_mean = TRUE)
#'
get_gamma_sample = function(prior_mean, prior_precision, kernel_matrix, tau, y, return_mean = FALSE) {
posterior_precision = get_posterior_precision(prior_precision, kernel_matrix, tau)
posterior_mean = get_posterior_mean(prior_mean, prior_precision, posterior_precision, kernel_matrix, tau, y)
if (return_mean)
as.numeric(posterior_mean)
else {
n = nrow(posterior_precision)
R = Matrix::chol(posterior_precision)
as.numeric(solve(R, rnorm(n)) + posterior_mean)
}
}
#' Sample tau from its full conditional distribution
#'
#' @param kernel_matrix DPC kernel matrix
#' @param gamma Sample of gamma
#' @param y Observation values
#' @param prior_tau_a Prior shape parameter
#' @param prior_tau_b Prior rate parameter
#' @param return_mean Provide full conditional mean as pseudo-sample?
#'
#' @return Either the full conditional mean of tau or a sample of it
#' @export
#'
#' @examples
#' km = list(mat = Matrix::Diagonal(2, 1))
#' get_tau_sample(kernel_matrix = km, gamma = c(0.849, 0.66), y = c(0.9, 0.7),
#' prior_tau_a = 2, prior_tau_b = 2, return_mean = TRUE)
#'
get_tau_sample = function(kernel_matrix, gamma, y, prior_tau_a, prior_tau_b, return_mean = FALSE) {
sse = sum((y - kernel_matrix$mat %*% gamma) ** 2)
posterior_tau_a = prior_tau_a + 0.5 * length(y)
posterior_tau_b = prior_tau_b + 0.5 * sse
if (return_mean)
posterior_tau_a / posterior_tau_b
else
rgamma(1, shape = posterior_tau_a, rate = posterior_tau_b)
}
|
494e2469f58b356ee0c72071c2c4495e7632216c
|
7e39ca6104d055974719e15a076b6777c8c3f56a
|
/programs/EDA_uninsured_SAHIE_aggregate_CPS_cty.R
|
7acbaab68830562ac9795e15081beda35fd430ab
|
[] |
no_license
|
Qasim-1develop/flu-SDI-dzBurden-drivers
|
00e61a992e75ea500b9d690ef6f25f77f9638def
|
c66a556c1d5012af3b69c16ba407b3d444ea23be
|
refs/heads/master
| 2022-07-22T16:46:10.612961
| 2018-09-26T14:45:35
| 2018-09-26T14:45:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,200
|
r
|
EDA_uninsured_SAHIE_aggregate_CPS_cty.R
|
## Name: Elizabeth Lee
## Date: 11/16/15
## Function: Plot county-level time series and choropleths for health insurance
## Filenames: SG_covariate_data/Cleaned_Data/clean_HI_SAHIE_aggregate_CPS.csv
## Data Source:
## Notes:
##
## useful commands:
## install.packages("pkg", dependencies=TRUE, lib="/usr/local/lib/R/site-library") # in sudo R
## update.packages(lib.loc = "/usr/local/lib/R/site-library")
#### header #################################
rm(list = ls())
require(readr)
require(tidyr)
require(ggplot2)
require(dplyr)
require(choroplethr)
require(choroplethrMaps)
require(grid)
require(gridExtra)
setwd(dirname(sys.frame(1)$ofile)) # only works if you source the program
#### import data ################################
setwd('../reference_data')
abbrDat <- read_csv("state_abbreviations_FIPS.csv", col_types = list(FIPS = col_character()))
setwd(dirname(sys.frame(1)$ofile))
setwd("../../../../Sandra Goldlust's Work/Shared_Data/SG_covariate_data/Cleaned_Data/")
sahieCPS <- read_csv("clean_HI_SAHIE_aggregate_CPS.csv", col_types = "ciiiiiiidididddddddddcc") %>%
filter(type == 'county') %>%
mutate(region = as.numeric(county_id)) %>%
mutate(pctui.calc = nui/pop*100) %>%
mutate(value = pctui.calc) %>%
left_join(abbrDat, by = c("state_id" = "FIPS"))
#### plot formatting ################################
w <- 6; h <- 4; dp <- 300
w2 <- 9; h2 <- 9
num <- 6
years <- paste0('X', 2005:2007)
choro <- list()
#### clean data ################################
uqSt <- sahieCPS %>% select(state_id) %>% unique %>%
mutate(for.plot = seq_along(1:nrow(.)))
fullDat <- sahieCPS %>%
select(year, region, value, county_id, Abbreviation, state_id) %>%
left_join(uqSt, by = "state_id")
indexes <- seq(1, max(fullDat %>% select(for.plot)), by=num)
#### choropleth ################################
setwd(dirname(sys.frame(1)$ofile))
dir.create("../graph_outputs/EDA_uninsured_SAHIE_cty", showWarnings = F)
setwd("../graph_outputs/EDA_uninsured_SAHIE_cty")
for (y in years){
pltDat <- fullDat %>%
filter(year == substr.Right(y, 4))
choro[[eval(y)]] <- county_choropleth(pltDat, legend = "Uninsured (%)")
ggsave(sprintf("pctui_SAHIE_CPS_cty_%s.png", substr.Right(y, 4)), choro[[eval(y)]], width = w, height = h, dpi = dp)
}
# # not easy to format everything in the same figure
# png(filename = "pctui_cty_0813.png", height = h, width = w*3, units = "in", res = dp)
# plts <- grid.arrange(grobs = choro, widths = unit(rep(w, 3), "in"), nrow = 2)
# dev.off()
#### time series ################################
for(i in indexes){
dummyplots <- ggplot(fullDat %>% filter(for.plot>= i & for.plot < i+num), aes(x=year, y=value)) +
theme(axis.text=element_text(size=12), axis.title=element_text(size=14,face="bold")) +
geom_line(aes(colour = county_id)) +
scale_y_continuous(name = "Uninsured (%)") +
guides(colour = "none") +
coord_cartesian(ylim = c(0, 45)) +
facet_wrap(~Abbreviation)
labs <- fullDat %>% filter(for.plot>= i & for.plot < i+num) %>% select(Abbreviation) %>% distinct %>% slice(c(i, i+num-1)) %>% unlist
ggsave(sprintf("pctui_SAHIE_CPS_cty_%s-%s.png", labs[1], labs[2]), dummyplots, width = w2, height = h2, dpi = dp)
}
|
a332a4241e8513d080db6201aaede5f62102e199
|
b0d0a890851d4f3f658e3c7d9fd8d14d0bb33f3e
|
/Supplementary Material/Replication Code/Britain-WAS/code/8b_age_adjustment_bis2_imp3.R
|
2ed667a17a8672fb37aedc007aa6b10f3c38de32
|
[] |
no_license
|
Juannadie/oep-wealth-inequality-inh-fam-background
|
7eff1879c5c7c67f98296804c1f3253a8c43b60c
|
9e45ff2002e18c180a94d123448aec071f47aa5f
|
refs/heads/master
| 2023-06-30T14:59:29.829201
| 2021-08-06T22:58:55
| 2021-08-06T22:58:55
| 393,522,711
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,208
|
r
|
8b_age_adjustment_bis2_imp3.R
|
library(Hmisc)
#library(reldist)
library(tidyverse)
library(sjPlot)
library(sjmisc)
library(sjlabelled)
library(snakecase)
options ("scipen"=100, "digits"=6)
#### WE LOAD THE AGGREGATION FROM THE PREVIOUS FILE, ###############
dataukh <- readRDS(file = "data_rds/datauk-new-final-no-cpt-new-data8a-bis2.rds")
#THEN WE CONVERT THE WEALTH DATA INTO NET TERMS OF AGE AND GENDER
dataukh$agedif <- dataukh$age - 65
dataukh$agedif2 <- (dataukh$agedif)^2
dataukh$agedif3 <- (dataukh$agedif)^3
dataukh$agedif4 <- (dataukh$agedif)^4
dataukh$femaledummy <- 0
dataukh$femaledummy[dataukh$sex == "Female"] <- 1
dataukh$sexfactor <- dataukh$sex #Just to have same name as in HFCS for the graphs
dataukh$femaleagedif <- dataukh$agedif * dataukh$femaledummy
dataukh$femaleagedif2 <- (dataukh$femaleagedif)^2
dataukh$femaleagedif3 <- (dataukh$femaleagedif)^3
dataukh$femaleagedif4 <- (dataukh$femaleagedif)^4
#We adjust already using equivalent wealth
dataukh$wealth <- dataukh$eqwealth
#modelwealth <- lm((wealth) ~ agedif + agedif2 + agedif3 + agedif4 + femaledummy + femaleagedif + femaleagedif2 + femaleagedif3 + femaleagedif4, data = dataukh, weights = hholdweight)
modelwealth <- lm(log(wealth) ~ agedif + agedif2 + agedif3 + agedif4 + femaledummy + femaleagedif + femaleagedif2 + femaleagedif3 + femaleagedif4, data = dataukh, weights = hholdweight)
summary(modelwealth)
modelagegenderadjstUK<- tab_model(modelwealth, digits = 3, digits.p = 3, show.fstat = T, show.se = T, show.ci = F)
modelagegenderadjstUK
#dataukh$wealthpredict <- predict(modelwealth)
dataukh$wealthpredict <- modelwealth$coefficients[1] + modelwealth$resid
summary(dataukh$wealthpredict)
summary(modelwealth$residuals)
summary(modelwealth$resid)
summary(modelwealth$coefficients[1])
NROW (dataukh$wealthpredict[dataukh$wealthpredict <= 0]) #No negative values
dataukh$wealthpredictexp <- exp(dataukh$wealthpredict)
summary(dataukh$wealth)
summary(dataukh$wealthpredictexp)
### Now we do some graphical test of age and wealth after the adjustment
#We use the prediction in logs for the fit
##### OK NOW WE DO A SIMPLE ESTIMATION SMOOTHING ####
yr_range = c(-4:4) # same as c(-1, 0, 1)
#Make a copy of each row for each entry in yr_range using tidyr::uncount, then create a dummy age_adj that adjusts each row's age to move it into a bucket for summarization:
df2 <- dataukh %>%
uncount(length(yr_range)) %>%
mutate(age_adj = rep(yr_range, length.out = n()),
age_bucket = age + age_adj) %>%
# At this point it looks like:
# income age type age_adj age_bucket
#1 1000 41 1 -1 40
#2 1000 41 1 0 41
#3 1000 41 1 1 42
#4 2000 42 2 -1 41
#5 2000 42 2 0 42
#6 2000 42 2 1 43
#filter(relativewealthalltypes<quantile(relativewealth, probs = .99 )) %>%
group_by(age_bucket) %>%
mutate(mean_wealth = weighted.mean(log(wealth), w=hholdweight)) %>%
mutate(median_wealth = reldist::wtd.quantile(log(wealth), q=0.5, weight=hholdweight)) %>%
# optional, to prune edge years beyond orig data
filter(age_adj == 0)
#filter(age_bucket >= min(dataesplot$age),
#age_bucket <= max(dataesplot$age))
df2gender <- dataukh %>%
uncount(length(yr_range)) %>%
mutate(age_adj = rep(yr_range, length.out = n()),
age_bucket = age + age_adj) %>%
# At this point it looks like:
# income age type age_adj age_bucket
#1 1000 41 1 -1 40
#2 1000 41 1 0 41
#3 1000 41 1 1 42
#4 2000 42 2 -1 41
#5 2000 42 2 0 42
#6 2000 42 2 1 43
#filter(relativewealthalltypes<quantile(relativewealth, probs = .99 )) %>%
group_by(age_bucket, sex) %>%
mutate(mean_wealth = weighted.mean(log(wealth), w=hholdweight)) %>%
mutate(median_wealth = reldist::wtd.quantile(log(wealth), q=0.5, weight=hholdweight)) %>%
# optional, to prune edge years beyond orig data
filter(age_adj == 0)
#filter(age_bucket >= min(dataesplot$age),
#age_bucket <= max(dataesplot$age))
df3 <- df2 %>%
uncount(length(yr_range)) %>%
mutate(age_adj = rep(yr_range, length.out = n()),
age_bucket2 = age + age_adj) %>%
# At this point it looks like:
# income age type age_adj age_bucket
#1 1000 41 1 -1 40
#2 1000 41 1 0 41
#3 1000 41 1 1 42
#4 2000 42 2 -1 41
#5 2000 42 2 0 42
#6 2000 42 2 1 43
#filter(relativewealthalltypes<quantile(relativewealth, probs = .99 )) %>%
group_by(age_bucket2) %>%
mutate(mean_pred_wealth = weighted.mean(wealthpredict, w=hholdweight)) %>%
mutate(median_pred_wealth = reldist::wtd.quantile(wealthpredict, q=0.5, weight=hholdweight)) %>% #WE have to rename weights first
# optional, to prune edge years beyond orig data
filter(age_adj == 0)
#filter(age_bucket >= min(dataesplot$age),
#age_bucket <= max(dataesplot$age))
df3gender <- df2gender %>%
uncount(length(yr_range)) %>%
mutate(age_adj = rep(yr_range, length.out = n()),
age_bucket2 = age + age_adj) %>%
# At this point it looks like:
# income age type age_adj age_bucket
#1 1000 41 1 -1 40
#2 1000 41 1 0 41
#3 1000 41 1 1 42
#4 2000 42 2 -1 41
#5 2000 42 2 0 42
#6 2000 42 2 1 43
#filter(relativewealthalltypes<quantile(relativewealth, probs = .99 )) %>%
group_by(age_bucket2, sex) %>%
mutate(mean_pred_wealth = weighted.mean(wealthpredict, w=hholdweight)) %>%
mutate(median_pred_wealth = reldist::wtd.quantile(wealthpredict, q=0.5, weight=hholdweight)) %>% #WE have to rename weights first
# optional, to prune edge years beyond orig data
filter(age_adj == 0)
#filter(age_bucket >= min(dataesplot$age),
#age_bucket <= max(dataesplot$age))
#NOW THE GRAPH
#And now we do the plot again for the median type...
#col=Type2, group=Type2,
graph1 <- ggplot()+
geom_point(data=df2gender, aes(x=age, y=median_wealth, weight=df2$hholdweight))+
#geom_point(data=dataesplot, aes(x=age, y=wealth, group=Type), pch=16, size=1)+
#scale_y_continuous(limits = c(0, 100))+
#coord_cartesian(ylim = c(0, 3)) +
coord_cartesian(xlim = c(35, 80)) +
xlab("Age")+
ylab("Share of median wealth")+
ggtitle("Age-Wealth profiles Wealth")+
guides(colour = guide_legend(override.aes = list(size=3.5)))+
theme(plot.title = element_text(hjust = 0.5))+
#theme_minimal()+
theme(
legend.position = c(.05, 0.95),
legend.justification = c("left", "top"),
legend.key = element_rect(fill = "NA", colour = "transparent"),
legend.box.just = "right",
legend.margin = margin(5, 5, 5, 5),
legend.title=element_blank()
)
graph1gender <- ggplot()+
geom_point(data=df2gender, aes(x=age, y=median_wealth, weight=df2$hholdweight, colour=sex))+
#geom_point(data=dataesplot, aes(x=age, y=wealth, group=Type), pch=16, size=1)+
#scale_y_continuous(limits = c(0, 100))+
#coord_cartesian(ylim = c(0, 3)) +
coord_cartesian(xlim = c(35, 80)) +
xlab("Age")+
ylab("Median wealth")+
ggtitle("Age-Wealth profiles Britain")+
guides(colour = guide_legend(override.aes = list(size=3.5)))+
theme(plot.title = element_text(hjust = 0.5))+
##theme_minimal()+
theme(
legend.position = c(.05, 0.95),
legend.justification = c("left", "top"),
legend.key = element_rect(fill = "NA", colour = "transparent"),
legend.box.just = "right",
legend.margin = margin(5, 5, 5, 5),
legend.title=element_blank()
)
ggsave(file="graphs/Wealth-Age-Profile-by-Gender-UK-log-bis2-imp3.pdf", device = "pdf", scale = 1, width = 7.5, height = 5, units = ("in"), dpi = 400, limitsize = TRUE)
graph1gendermean <- ggplot()+
geom_point(data=df2gender, aes(x=age, y=mean_wealth, weight=df2$hholdweight, colour=sex))+
#geom_point(data=dataesplot, aes(x=age, y=wealth, group=Type), pch=16, size=1)+
#scale_y_continuous(limits = c(0, 100))+
#coord_cartesian(ylim = c(0, 3)) +
coord_cartesian(xlim = c(35, 80)) +
xlab("Age")+
ylab("Mean Wealth")+
ggtitle("Age-Wealth profiles Britain")+
guides(colour = guide_legend(override.aes = list(size=3.5)))+
theme(plot.title = element_text(hjust = 0.5))+
#theme_minimal()+
theme(
legend.position = c(.05, 0.95),
legend.justification = c("left", "top"),
legend.key = element_rect(fill = "NA", colour = "transparent"),
legend.box.just = "right",
legend.margin = margin(5, 5, 5, 5),
legend.title=element_blank()
)
ggsave(file="graphs/Wealth-Age-Profile-by-Gender-Mean-UK-log-bis2-imp3.pdf", device = "pdf", scale = 1, width = 7.5, height = 5, units = ("in"), dpi = 400, limitsize = TRUE)
#And now we do the plot again for the median type...
#col=Type2, group=Type2,
graph2 <- ggplot()+
geom_point(data=df3, aes(x=age, y=median_pred_wealth, weight=df3$hholdweight, colour = 'Adjusted Wealth'))+
geom_point(data=df3, aes(x=age, y=median_wealth, weight=df3$hholdweight, colour = 'Wealth'))+
#geom_point(data=dataesplot, aes(x=age, y=wealth, group=Type), pch=16, size=1)+
#scale_y_continuous(limits = c(0, 100))+
#coord_cartesian(ylim = c(0, 3)) +
coord_cartesian(xlim = c(35, 80)) +
xlab("Age")+
ylab("Median wealth")+
ggtitle("Age-Wealth profiles Britain")+
guides(colour = guide_legend(override.aes = list(size=3.5)))+
theme(plot.title = element_text(hjust = 0.5))+
#theme_minimal()+
theme(
legend.position = c(.05, 0.95),
legend.justification = c("left", "top"),
legend.key = element_rect(fill = "NA", colour = "transparent"),
legend.box.just = "right",
legend.margin = margin(5, 5, 5, 5),
legend.title=element_blank()
)
#And now we do the plot again for the median type...
#col=Type2, group=Type2,
graph2male <- ggplot()+
geom_point(data=df3gender[df3gender$sex=='Male',], aes(x=age, y=median_pred_wealth, weight=hholdweight, colour = 'Adjusted Wealth'))+
geom_point(data=df3gender[df3gender$sex=='Male',], aes(x=age, y=median_wealth, weight=hholdweight, colour = 'Wealth'))+
#geom_point(data=dataesplot, aes(x=age, y=wealth, group=Type), pch=16, size=1)+
#scale_y_continuous(limits = c(0, 100))+
#coord_cartesian(ylim = c(0, 3)) +
coord_cartesian(xlim = c(35, 80)) +
xlab("Age")+
ylab("Median wealth")+
ggtitle("Age-Wealth profiles Britain")+
guides(colour = guide_legend(override.aes = list(size=3.5)))+
theme(plot.title = element_text(hjust = 0.5))+
#theme_minimal()+
theme(
legend.position = c(.6, 0.3),
legend.justification = c("left", "top"),
legend.key = element_rect(fill = "NA", colour = "transparent"),
legend.box.just = "right",
legend.margin = margin(5, 5, 5, 5),
legend.title=element_blank()
)
#And now we do the plot again for the median type...
#col=Type2, group=Type2,
graph2female <- ggplot()+
geom_point(data=df3gender[df3gender$sex=='Female',], aes(x=age, y=median_pred_wealth, weight=hholdweight, colour = 'Adjusted Wealth'))+
geom_point(data=df3gender[df3gender$sex=='Female',], aes(x=age, y=median_wealth, weight=hholdweight, colour = 'Wealth'))+
#geom_point(data=dataesplot, aes(x=age, y=wealth, group=Type), pch=16, size=1)+
#scale_y_continuous(limits = c(0, 100))+
#coord_cartesian(ylim = c(0, 3)) +
coord_cartesian(xlim = c(35, 80)) +
xlab("Age")+
ylab("Median wealth")+
ggtitle("Age-Wealth profiles Britain")+
#theme_minimal()+
guides(colour = guide_legend(override.aes = list(size=3.5)))+
theme(plot.title = element_text(hjust = 0.5))+
theme(
legend.position = c(.60, 0.3),
legend.justification = c("left", "top"),
legend.key = element_rect(fill = "NA", colour = "transparent"),
legend.box.just = "right",
legend.margin = margin(5, 5, 5, 5),
legend.title=element_blank()
)
graph2gendermedian <- ggplot()+
geom_point(data=df3gender[df3gender$sexfactor=='Female',], aes(x=age, y=median_pred_wealth, weight=weight, colour = '4. Adjusted Wealth Women'))+
geom_point(data=df3gender[df3gender$sexfactor=='Male',], aes(x=age, y=median_pred_wealth, weight=weight, colour = '2. Adjusted Wealth Men'))+
geom_point(data=df3gender[df3gender$sexfactor=='Female',], aes(x=age, y=median_wealth, weight=weight, colour = '3. Wealth Women'))+
geom_point(data=df3gender[df3gender$sexfactor=='Male',], aes(x=age, y=median_wealth, weight=weight, colour = '1. Wealth Men'))+
#geom_point(data=dataesplot, aes(x=age, y=wealth, group=Type), pch=16, size=1)+
#scale_y_continuous(limits = c(0, 100))+
#coord_cartesian(ylim = c(0, 3)) +
coord_cartesian(xlim = c(35, 80)) +
xlab("Age")+
ylab("Median wealth")+
ggtitle("Adjusted and Original Wealth-Age profiles Britain")+
guides(colour = guide_legend(override.aes = list(size=3.5)))+
theme(plot.title = element_text(hjust = 0.5))+
#theme_minimal()+
theme(
legend.position = c(.6, 0.3),
legend.justification = c("left", "top"),
legend.key = element_rect(fill = "NA", colour = "transparent"),
legend.box.just = "right",
legend.margin = margin(5, 5, 5, 5),
legend.title=element_blank()
)
ggsave(file="graphs/Adjusted-Wealth-Age-Profile-by-Gender-Median-UK-log-bis2-imp3.pdf", device = "pdf", scale = 1, width = 7.5, height = 5, units = ("in"), dpi = 400, limitsize = TRUE)
#And now we do the plot again for the median type...
#col=Type2, group=Type2,
graph2gendermean <- ggplot()+
geom_point(data=df3gender[df3gender$sexfactor=='Female',], aes(x=age, y=mean_pred_wealth, weight=weight, colour = '4. Adjusted Wealth Women'))+
geom_point(data=df3gender[df3gender$sexfactor=='Male',], aes(x=age, y=mean_pred_wealth, weight=weight, colour = '2. Adjusted Wealth Men'))+
geom_point(data=df3gender[df3gender$sexfactor=='Female',], aes(x=age, y=mean_wealth, weight=weight, colour = '3. Wealth Women'))+
geom_point(data=df3gender[df3gender$sexfactor=='Male',], aes(x=age, y=mean_wealth, weight=weight, colour = '1. Wealth Men'))+
#geom_point(data=dataesplot, aes(x=age, y=wealth, group=Type), pch=16, size=1)+
#scale_y_continuous(limits = c(0, 100))+
#coord_cartesian(ylim = c(0, 3)) +
coord_cartesian(xlim = c(35, 80)) +
xlab("Age")+
ylab("Mean wealth")+
ggtitle("Adjusted and Original Wealth-Age profiles Britain")+
guides(colour = guide_legend(override.aes = list(size=3.5)))+
theme(plot.title = element_text(hjust = 0.5))+
#theme_minimal()+
theme(
legend.position = c(.6, 0.3),
legend.justification = c("left", "top"),
legend.key = element_rect(fill = "NA", colour = "transparent"),
legend.box.just = "right",
legend.margin = margin(5, 5, 5, 5),
legend.title=element_blank()
)
ggsave(file="graphs/Adjusted-Wealth-Age-Profile-by-Gender-Mean-Britain-log-bis2-imp3.pdf", device = "pdf", scale = 1, width = 7.5, height = 5, units = ("in"), dpi = 400, limitsize = TRUE)
####
saveRDS(dataukh, file = "data_rds/WAS-After-Step-v2-IO-8b-bis2-imp3.rds")
saveRDS(dataukh, file = "/Users/Juan/Google Drive/A-UK-Research/IO-Wealth-All-Countries/WAS-IOp/code/data_rds/WAS-After-Step-v2-IO-8b-bis2-imp3.rds")
#####
|
e4eafb87d4880d048d56da3d5e177aad8f6296a9
|
d3c7fd200a1302f27262e5ec72567a25409c1d20
|
/aggregate.R
|
c3a44e9d90288cf81e6ce11aa6e4a629f6c33716
|
[] |
no_license
|
sjackman/stat540-project
|
b6c7371594dd20509d9c650c357145b22b7938e4
|
dd864f46bea6d2558039b02b29350e451bf5687a
|
refs/heads/master
| 2016-09-06T11:40:20.098827
| 2013-04-05T03:30:53
| 2013-04-05T03:30:53
| 8,084,590
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,732
|
r
|
aggregate.R
|
# Aggregate the beta values of the probes for each CpG island.
# Input: 1.) normalized beta-value data sets
# 2.) metadata
# Output: 1.) beta-value AND M-value data sets with CPGI attached
# 2.) beta-value AND M-value CPGI-level data (mean)
setwd('~/UBC Stats/STAT540/Group Project/')
library(IlluminaHumanMethylation450k.db)
library(plyr)
library(gtools)
library(lattice)
library(reshape2)
### Load in (EDIT PATH):
load('Data/All_3_metasets.Rdata')
load('Data/All_3_sets_normAndFilt.Rdata')
dataList <- list(ALL = ALL.dat, APL = APL.dat, CTRL = CTRL.dat)
#load('~/UBC Stats/STAT540/Group Project/Data/GSE42865_matrix.R')
####### Extracting and cleaning map of probe ID to CpG islands
cpginame <- as.data.frame(IlluminaHumanMethylation450kCPGINAME)
colnames(cpginame) <- c('Probe_ID', 'cpginame')
cpginame$cpginame <- factor(cpginame$cpginame)
#### There are 309,465 probes (out of total 485,577) in 27,176 islands
### Not all probes that map passed through the previous filter:
table(cpginame$Probe_ID %in% rownames(dataList$ALL))
# FALSE TRUE
# 19933 289532
### Restrict mappings to exclude filtered-out probes
cpginame <- subset(cpginame, Probe_ID %in% rownames(dataList$ALL))
##### Restrict all data sets to probes in mapping:
cpgi.probes.Beta <- lapply(dataList, function(x) x[cpginame$Probe_ID,])
###############################################
##### M-value transformation
###############################################
#### Ranges:
(ranges <- ldply(cpgi.probes.Beta, function(x) range(x, na.rm = T)))
# .id V1 V2
# 1 ALL 2.123553e-14 0.9999967
# 2 APL 1.292368e-02 0.9906188
# 3 CTRL 3.266047e-14 0.9994150
names(ranges) <- c('Set', 'Min', 'Max')
rownames(ranges) <- ranges$Set; ranges$Set <- NULL
ranges <- as.matrix(ranges)
#### Check out:
logit(ranges)
logit(ranges + 1e-6) ## This one seems reasonable! (somewhat symmetric on the tails)
### Add epsilon to Beta-values before transforming to M-values:
cpgi.probes.M <- lapply(cpgi.probes.Beta, function(x) logit(x + 1e-6))
#### Explore a bit:
whole.M <- with(cpgi.probes.M, cbind(ALL, APL, CTRL))
whole.M.tall <- melt(t(whole.M), value.name = 'M', varnames = c('Sample', 'Probe_ID'))
png('Figures/Normalized_M_CpG_allSamps.png', width = 2000, height = 400)
par(mar = c(8, 4, 4, 2))
bwplot(M~Sample, data = whole.M.tall, panel = panel.violin,
scales = list(x = list(rot = 90)), xlab = 'Sample', ylab = 'M values',
main = 'Distribution of M-values from normalized and filtered Beta-values')
dev.off()
rm(whole.M.tall)
####################################################
#### Aggregation CpG --> Island
####################################################
#### Tack on to data in both lists: CPGI as a factor
cpgi.probes.Beta <- lapply(cpgi.probes.Beta, function(x)
cbind(x, cpgi = cpginame$cpginame))
cpgi.probes.M <- lapply(cpgi.probes.M, function(x)
cbind(x, cpgi = cpginame$cpginame))
#### Then aggregate both sets by island --> means of both betas and Ms
#### Note: avoid NA's in mean calculation
cpgi.Beta <- lapply(cpgi.probes.Beta, function(x)
simplify2array(by(
x[,-ncol(x)], list(x$cpgi), function(y) colMeans(y, na.rm=T))))
cpgi.M <- lapply(cpgi.probes.M, function(x)
simplify2array(
by(x[,-ncol(x)], list(x$cpgi), function(y) colMeans(y, na.rm=T))))
#### Transpose: (want features in rows for most packages)
cpgi.Beta <- lapply(cpgi.Beta, t)
cpgi.M <- lapply(cpgi.M, t)
##### Save these data sets:
########### Edit the path!!!!!!!!!!!!!!!!!!!
save(cpgi.probes.Beta, file = 'Data/CPGI2Probe_betaList.Rdata')
save(cpgi.probes.M, file = 'Data/CPGI2Probe_MList.Rdata')
save(cpgi.Beta, file = 'Data/CPGI_betaList.Rdata')
save(cpgi.M, file = 'Data/CPGI_MList.Rdata')
|
9b57743a29664cd7b5f14af02cb7e9de6aff55be
|
fb5a4392c02428ef7171e757e9dced834f6b72e1
|
/04.co-eQTL.R
|
5c7c5b22ce11e47245779be91ee29e35a73d05c1
|
[] |
no_license
|
pdicarl3/BiolPsychiatry_2019
|
99d05e44fdf56a3cc109543d4672343725b80ff5
|
82de7fe00e7237b44b6e4b7ebb0957efe9914c69
|
refs/heads/master
| 2020-04-24T19:52:04.835367
| 2019-07-03T13:59:21
| 2019-07-03T13:59:21
| 172,226,048
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,319
|
r
|
04.co-eQTL.R
|
#########################################################################################################
### co-eQTL - association between module eigengene and SNP genotypes ####################################
#########################################################################################################
nameModule = "darkgreen"
nameData = "LIBD"
f = "lmRob"
CPU = 4 # set number of CPU for parallel computing
### load a data.frame with module eigengenes (ME) [samples, ME] containing also demographics information and important covariates
pd = pd # demo-eigengene data.frame
### laod SNP genotype data [samples, SNP]
GEN = data.frame(GEN)
### parallel adjust for covariates - get the p-value of the ME-SNP association
### use robust linear model
require(snow)
require(robust)
require(car)
clus = makeCluster(CPU)
clusterExport(clus, c("pd", "GEN", "lmRob")) ## if you need to export a variable
ptm = proc.time()
###
QTL = parLapply(clus, GEN, function(G)
summary(lmRob(darkgreen ~ Age + RIN + mitoMapped + totalMapped + as.factor(Sex) + ## your covariates
EV1 + EV2 + EV3 + EV4 + EV5 + ## your covariates
EV6 + EV7 + EV8 + EV9 + EV10 + ## your covariates
as.factor(Dx) + ## your covariates
G, ## SNP genotypes
data = pd))$coefficients)
t1 = proc.time() - ptm
stopCluster(clus)
### aggregate results (get the last row of coefficients table)
clus = makeCluster(CPU)
clusterExport(clus, c("QTL"))
coeQTL = parSapply(clus, QTL, function(l) l[nrow(l),])
stopCluster(clus)
### give variables name
row.names(coeQTL) = c("beta", "st_err", "t_value", "p_value")
coeQTL = data.frame(t(coeQTL))
### multiple comparisons correction
coeQTL$FDR = p.adjust(coeQTL$p_value, method = "fdr")
coeQTL$Bonferroni = p.adjust(coeQTL$p_value, method = "bonferroni")
### assign
assign(paste0("QTL_", f, nameData), QTL)
assign(paste0("coeQTL_", f, nameData), coeQTL)
### save
save(list=c(paste0("QTL_", f, nameData),
paste0("coeQTL_", f, nameData)),
file=paste0("coeQTL_", f, nameData, ".RData"))
|
89d1dff8865c9f477a7a5ade211d93b884bf5454
|
2605ed5c32e799ddfd7b1f739800e35093fbc24e
|
/R/lib/netdiffuseR/doc/introduction-to-netdiffuser.R
|
6706f0827d93f8ac54632a59ab1dcda68065e2cc
|
[] |
no_license
|
BRICOMATA/Bricomata_
|
fcf0e643ff43d2d5ee0eacb3c27e868dec1f0e30
|
debde25a4fd9b6329ba65f1172ea9e430586929c
|
refs/heads/master
| 2021-10-16T06:47:43.129087
| 2019-02-08T15:39:01
| 2019-02-08T15:39:01
| 154,360,424
| 1
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,918
|
r
|
introduction-to-netdiffuser.R
|
## ----Setup, echo=FALSE---------------------------------------------------
library(knitr)
knitr::opts_chunk$set(fig.width=9, fig.height=6, out.width="600px",fig.align = "center")
## ----Simulating diffnets-------------------------------------------------
library(netdiffuseR)
s <- 11532
set.seed(s)
diffnet_ran <- rdiffnet(200, 20, "random", seed.p.adopt = .1,
seed.graph = "small-world",
rgraph.args = list(undirected=FALSE, k=4, p=.5),
threshold.dist = function(x) 0.3)
set.seed(s)
diffnet_cen <- rdiffnet(200, 20, "central", seed.p.adopt = .1,
seed.graph = "small-world",
rgraph.args = list(undirected=FALSE, k=4, p=.5),
threshold.dist = function(x) 0.3)
set.seed(s)
diffnet_mar <- rdiffnet(200, 20, "marginal", seed.p.adopt = .1,
seed.graph = "small-world",
rgraph.args = list(undirected=FALSE, k=4, p=.5),
threshold.dist = function(x) 0.3)
## ------------------------------------------------------------------------
summary(diffnet_mar)
## ----Printing the networks-----------------------------------------------
diffnet_ran; diffnet_cen; diffnet_mar
## ----Seed graph and initial adopters, message=FALSE, fig.height=4--------
cols <- c("lightblue","green", "blue")
oldpar <- par(no.readonly = TRUE)
par(mfcol=c(1,3), mai = c(0, 0, 1, 0), mar = rep(1, 4) + 0.1)
set.seed(s);plot(diffnet_ran, main="Random seed")
set.seed(s);plot(diffnet_cen, main="Central seed")
coords <- set.seed(s);plot(diffnet_mar, main="Marginal seed")
par(oldpar)
## ------------------------------------------------------------------------
plot_diffnet(diffnet_ran, slices = c(1,4,8,12,16,20), layout=coords)
## ----Cumulative adopt count----------------------------------------------
plot_adopters(diffnet_ran, bg = cols[1], include.legend = FALSE, what="cumadopt")
plot_adopters(diffnet_cen, bg = cols[2], add=TRUE, what="cumadopt")
plot_adopters(diffnet_mar, bg = cols[3], add=TRUE, what="cumadopt")
legend("topleft", bty="n",
legend = c("Random","Central", "Marginal"),
fill=cols)
## ----Hazard rate---------------------------------------------------------
plot_hazard(diffnet_ran, ylim=c(0,1), bg=cols[1])
plot_hazard(diffnet_cen, add=TRUE, bg=cols[2])
plot_hazard(diffnet_mar, add=TRUE, bg=cols[3])
legend("topleft", bty="n",
legend = c("Random","Central", "Marginal"),
fill=cols)
## ----Infection and susceptibility----------------------------------------
plot_infectsuscep(diffnet_ran, bins=15, K=3,
main = "Distribution of Infectiousness and\nSusceptibility (Random)")
plot_infectsuscep(diffnet_cen, bins=15, K=3,
main = "Distribution of Infectiousness and\nSusceptibility (Central)")
plot_infectsuscep(diffnet_mar, bins=15, K=3,
main = "Distribution of Infectiousness and\nSusceptibility (Marginal)")
## ----Threshold-----------------------------------------------------------
plot_threshold(diffnet_ran)
## ----Multiple-simulations------------------------------------------------
# Simulating a diffusion process with all the defaults but setting
# -seed.nodes- to be random
set.seed(1)
ans0 <- rdiffnet_multiple(R=50, statistic=function(x) sum(!is.na(x$toa)),
n = 100, t = 4, seed.nodes = "random", stop.no.diff=FALSE)
# Simulating a diffusion process with all the defaults but setting
# -seed.nodes- to be central
set.seed(1)
ans1 <- rdiffnet_multiple(R=50, statistic=function(x) sum(!is.na(x$toa)),
n = 100, t = 4, seed.nodes = "central", stop.no.diff=FALSE)
boxplot(cbind(Random = ans0, Central = ans1),
main="Distribution of number of adopters in\ndifferent seedscenarios",
sub = "(50 simulations each)", ylab="Number of adopters")
|
56811c3021abe466d830c901ccd1e5adea8d25b0
|
f3a979438f0ed4305cd01be796cb28f53d3bb538
|
/Chapter15/article_plot1.r
|
937b8e0a5b8f2e1dad72cbbb369509fe589f8d4a
|
[] |
no_license
|
scchess/DataMiningApplicationsWithR
|
2be6a064106a44c76b4577a5c15a0e065cd89260
|
580f5a9d3c749c2f6d00e437a28cd8705bf6ab76
|
refs/heads/master
| 2021-06-14T18:21:08.155141
| 2017-01-27T14:38:02
| 2017-01-27T14:38:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,271
|
r
|
article_plot1.r
|
#!/usr/bin/Rscript
# Rscript cost k mean
# importations
library(Matrix)
library(cluster)
source("./plot_functions.r")
par(cex=2)
# several variables
filename = "Cluster3_dnssec.txt" # input filename
#filename = "cluster_entry_5min.txt"
output_file = paste( "k_clusters_bin_unbound.txt") # file where to print
# function to print vector summary
manuvectorsummary <- function(v){
v[1] <- v[2]
print(paste("max (", which.max(v), ") =", max(v)))
print(paste("min (", which.min(v), ") =", min(v)))
print(v)
}
tmp_file="/tmp/r5"
cost_unbound_r=0.096612818
cost_unbound_q=0.005427997
cost_bind_r=0.173573505
cost_bind_q=0.015105740
clab <- c("euQR", "reQR", "tR", "euBR", "reBR", "tBR", "cQRT", "reOCC", "euOCC", "tOCC", "CHR", "eQR", "eCPU", "ePRT", "succRatio", "failRatio", "cltQNbr", "pltQNbr", "Qlen", "Rlen", "Sigcheck", "MIRT", "SDIRT", "MPRT", "SDPRT", "MTTL", "SDTTL")
mat_ent <- read.table(filename, row.names=1, col.names=clab)
mat_ent <- subset(mat_ent, cQRT > 0) # python script return -1 if no request is present and cQRT is used to plot several variables
mat_ent <- subset(mat_ent, MTTL > 0)
mat_ent$iTTL <- 1/mat_ent$MTTL
mat_ent$cost_unbound = cost_unbound_q * mat_ent$euQR + cost_unbound_q * mat_ent$reQR
mat_ent$cost_bind = cost_bind_q * mat_ent$euQR + cost_bind_q * mat_ent$reQR
attach(mat_ent)
mat_sorted <- mat_ent[order(-cost_bind),]
detach(mat_ent)
mat_ent <- mat_sorted
mat <- mat_ent
write.table(scale (mat, scale=TRUE, center=TRUE), file=tmp_file)
mat <- read.table(tmp_file, header=TRUE, row.names=1)
mat <- subset(mat, cltQNbr > 20)
matbind <- subset(mat, select=c("cost_bind"))
matbind <- log(matbind)
swbind <- numeric(15)
twbind <- numeric(15)
sink (output_file, split=TRUE)
for (k in c(2:5)) {
print(paste("k",k))
km <- kmeans(matbind, k, iter.max=1000)
swbind[k] <- summary(silhouette(km$cluster, daisy(matbind))) $ avg.width
png(paste("bind",k,"kmean.png", sep=""))
par(cex=2);plot(mat$cost_bind, col=km$cluster * 5, log="xy", ylab="cost")
dev.off()
km <- pam(matbind, k)
twbind[k] <- km $ silinfo $ avg.width
png(paste("bind",k,"kmedo.png", sep=""))
par(cex=2);plot(mat$cost_bind, col=km$clustering * 5, log="xy", ylab="cost")
dev.off()
}
manuvectorsummary(swbind)
manuvectorsummary(twbind)
|
90a80449967ebcb16201f58ae93f2134f2b6c8cf
|
5f6fca5dcf1331f0f0ecba1af68d062612c9c7d3
|
/Projekt_2/IFSpremium/R/SierpinskiCarpet-data.R
|
b80763048a269daa5d6a261240d42cdcc1a305b0
|
[] |
no_license
|
ultramargarine/ProgramowanieWizualizacja2017
|
421666d332c3ff3ffea40b6d12be10537ef41654
|
fcfd5b393717ec6ca96919656b44d3c4bcbd7d93
|
refs/heads/master
| 2021-09-15T09:01:58.860448
| 2018-03-08T22:00:50
| 2018-03-08T22:00:50
| 105,997,439
| 0
| 0
| null | 2018-03-08T21:58:51
| 2017-10-06T11:32:12
|
R
|
UTF-8
|
R
| false
| false
| 278
|
r
|
SierpinskiCarpet-data.R
|
#' Sierpinski carpet fractal.
#'
#' List of contractions for Sierpinski carpet.
#'
#' @docType data
#' @keywords datasets
#' @name SierpinskiCarpet
#' @format An object of class \code{'list'} of length 8.
#' @examples
#' plot(createIFS(SierpinskiCarpet), 5)
"SierpinskiCarpet"
|
dfd192702253ced84d8b0be4f1c8d2a22dca5da9
|
a5c335dc2dcb70b72f314b05bff5560789d7cb35
|
/man/ftd_prefixes.Rd
|
64f821417e5babc0c6e80d672588e05e164f6c7e
|
[] |
no_license
|
cran/fulltext
|
24edf71c4a29b8f201750bdd6f86bf7abce09cba
|
c48f2efb650512f5d2bc10282f5ba65b8728e670
|
refs/heads/master
| 2021-06-17T05:05:10.740944
| 2021-06-12T03:10:07
| 2021-06-12T03:10:07
| 40,326,100
| 6
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 690
|
rd
|
ftd_prefixes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ftdoi_other.R
\name{ftd_prefixes}
\alias{ftd_prefixes}
\title{Prefixes}
\usage{
ftd_prefixes(id = NULL)
}
\arguments{
\item{id}{(character) a DOI prefix. Default is \code{NULL}, which
gets all}
}
\value{
named list of details of the publisher for the DOI prefix
}
\description{
Prefixes
}
\examples{
\dontrun{
ftd_prefixes()
ftd_prefixes(id = '10.1080')
# doesn't work
# ftd_prefixes(id = '10.9999')
}
}
\seealso{
Other ftdoi:
\code{\link{ftd_doi}()},
\code{\link{ftd_fetch_patterns}()},
\code{\link{ftd_members}()},
\code{\link{ftdoi_cache}},
\code{\link{prefix_local}()}
}
\concept{ftdoi}
\keyword{internal}
|
f41a2b0a76934d751e7f58b5c218d5b7573ae508
|
5df5335b9dc6e3b24d7f73488067cda39e032524
|
/cachematrix.R
|
a11d2da32f04862f3151178f1457c306afb41f5b
|
[] |
no_license
|
basi4869/ProgrammingAssignment2
|
7eb4ba364fd219db92d83db7e8b0013c75c20145
|
5302aa8c21b1b0c7954fa11967111245ea627e72
|
refs/heads/master
| 2020-12-25T04:08:55.245693
| 2014-06-20T07:44:35
| 2014-06-20T07:44:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,123
|
r
|
cachematrix.R
|
## make use of cache when computing the inverse of a matrix
## makeCacheMatrix creates a matrix object whose inverse can be cached
makeCacheMatrix <- function(x = matrix()) {
m.inv <- NULL
get <- function() {
return (x)
}
set <- function(y) {
x <<- y
m.inv <<- NULL
}
get.inv <- function() {
return (m.inv)
}
set.inv <- function(inverse) {
m.inv <<- inverse
}
return (list(get = get,
set = set,
get.inv = get.inv,
set.inv = set.inv)
)
}
## cacheSolve computes the inverse of the matrix returned by makeCacheMatrix
## the result is feteched from cache if already availabe
## the result is computed and stored in cache otherwise
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m.inv <- x$get.inv()
if (!is.null(m.inv)) {
# retrieve from cache if m.inv is non-empty
message("Retrieving from cache")
return(m.inv)
}
# solve for the inverse matrix if the result is not available in cache
data <- x$get()
m.inv <- solve(data, ...)
x$set.inv(m.inv)
return(m.inv)
}
|
04bd1df8e9510d08b90bf04810861e9d3928a959
|
164a4905afb7f7b825f704c44daa00914cc97af2
|
/tests/testthat/test-docx-plot.R
|
d21c3cb4933bbaf1d0eea4e85ece579b2b546824
|
[] |
no_license
|
hhy5277/ReporteRs
|
b8403e5a71f124bf682a9e6fb4b8839a72f8676b
|
fc1e891ffea15cdbaeb4d727d17205dccca2089e
|
refs/heads/master
| 2020-06-03T18:38:17.477441
| 2018-11-30T14:06:06
| 2018-11-30T14:06:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,571
|
r
|
test-docx-plot.R
|
context("docx plot")
library(ggplot2)
dummy_plot <- function(){
plot.new()
points(.5,.5)
}
test_that("[vg] no size generate no error", {
skip_if_not(check_valid_java_version())
doc <- docx( )
doc <- try( addPlot(doc, fun = dummy_plot,
vector.graphic = TRUE), silent = TRUE)
expect_is(doc, "docx" )
})
test_that("[raster] no size generate no error", {
skip_if_not(check_valid_java_version())
doc <- docx( )
doc <- try( addPlot(doc, fun = dummy_plot,
vector.graphic = FALSE), silent = TRUE)
expect_is(doc, "docx" )
})
test_that("[vg] size generate no error", {
skip_if_not(check_valid_java_version())
doc <- docx( )
doc <- try( addPlot(doc, fun = dummy_plot,
width = 4, height = 4,
vector.graphic = TRUE),
silent = TRUE)
expect_is(doc, "docx" )
})
test_that("[raster] size generate no error", {
skip_if_not(check_valid_java_version())
doc <- docx( )
doc <- try( addPlot(doc, fun = dummy_plot,
width = 4, height = 4,
vector.graphic = FALSE),
silent = TRUE)
expect_is(doc, "docx" )
})
test_that("[vg] test raster", {
skip_if_not(check_valid_java_version())
myplot <- qplot(Sepal.Length, Petal.Length,
data = iris, color = Petal.Width,
alpha = I(0.7) )
doc <- docx( )
doc <- try( addPlot(doc, fun = print,
x = myplot,
vector.graphic = TRUE), silent = TRUE)
expect_is(doc, "docx" )
})
|
0cbc1d7fe9afc6062a5278a529ed63e67e43dd35
|
99f6134c198a9c45ebcf6adbcdfebaeb37511318
|
/shinytest/tests/utils.test.R
|
a88fe635b99bb89028ebfa923872286580697651
|
[] |
no_license
|
zappingseb/EARL2019
|
3d8cb9d3b48239587fb8387e14b054ef8cc852ae
|
53c9d4540c1659d93116ddf727443c2d00682295
|
refs/heads/master
| 2020-07-13T17:56:35.231798
| 2019-09-12T19:07:26
| 2019-09-12T19:07:26
| 205,126,983
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,598
|
r
|
utils.test.R
|
get_strings_from_p_html <- function(app_elem){
HTML_in <- XML::htmlTreeParse(app_elem, asText=T, encoding = "UTF-8")
XML::xmlApply(
HTML_in$children$html$children$body,
function(x) XML::xmlValue(x[[1]]))
}
get_code_from_div_p_html <- function(app_elem) {
HTML_in <- XML::htmlTreeParse(app_elem, asText=T, encoding = "UTF-8")
XML::xmlApply(
HTML_in$children$html$children$body[["div"]],
function(x) XML::xmlAttrs(x[[1]])["src"])
}
get_table_from_html <- function(html_input) {
HTML_in <- XML::htmlTreeParse(html_input, asText=T, encoding = "UTF-8")
the_table <- HTML_in$children$html$children$body[[1]]
# Get the table headers
headers <- the_table$children[["thead"]]
column_names <- lapply(headers$children[[1]]$children, function(x) XML::xmlValue(x))
# Get the table content
content <- c()
# For each row
for(i in 1:length(the_table[["tbody"]]$children))
{
table_row <- the_table[["tbody"]]$children[[i]]
row_content<-c()
# for each column
for(j in 1:length(table_row$children)){
v <- XML::xmlValue(table_row[[j]])
if(is.null(v)) v2 <- as.character("")
else if(length(v) == 0) v2 <- as.character("")
else if(is.na(v)) v2 <- as.character("")
else v2 <- as.character(v)
row_content <- c(row_content, v2)
}
content <- rbind(content, row_content)
}
# Write out the table as a data.frame and delete row.names
colnames(content) <- as.character(column_names)
rownames(content) <- NULL
return(as.data.frame(content,stringsAsFactors=F,check.names = F))
}
|
a980cbafba9f0f505970a92ffc209a81fb2b629d
|
e92f2ab483b8c925fc15500b602b35dfb0d03a0c
|
/process_database.R
|
16e4b92dd3669173c01873927a11022f9023e587
|
[] |
no_license
|
FluentData/weather_data
|
384e7cb065abe23e0c0c99579bc751d9405b3e44
|
371b9441c2e7c653cf2f99f58cd1347f00a52e89
|
refs/heads/master
| 2021-01-17T18:50:45.708935
| 2016-06-19T12:18:26
| 2016-06-19T12:18:26
| 61,478,879
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 892
|
r
|
process_database.R
|
library(ISDr)
con <- dbConnect(RSQLite::SQLite(), "D:/Weather/ISD_Indiana.sqlite")
dbListTables(con)
errors <- dbReadTable(con, "Download_Errors")
readings <- dbReadTable(con, "Readings")
dbDisconnect(con)
history <- getISDhistory() %>%
mutate(USAF = as.integer(USAF),
WBAN = as.integer(WBAN))
in_stations <- readings %>%
select(USAF, WBAN) %>%
distinct() %>%
left_join(history) %>%
mutate(ID = seq_along(USAF)) %>%
rename(STATION_NAME = STATION.NAME, ELEV_M = ELEV.M.) %>%
select(ID, USAF:END)
readings_ <- readings %>%
left_join(select(in_stations, ID:WBAN)) %>%
select(ID, DATE:SEA_LEVEL_PRESSURE_QUALITY)
con <- dbConnect(RSQLite::SQLite(), "D:/Weather/weather_Indiana.sqlite")
dbListTables(con)
dbWriteTable(con, "readings", readings_)
dbWriteTable(con, "stations", in_stations)
dbWriteTable(con, "codes", ISD_lookup)
dbListTables(con)
dbDisconnect(con)
|
babdcddf6fc7629090354757e3ee12760beb45c4
|
d618806ca936970fa03bcc1c0be51c6dfba88b39
|
/tests/testthat.R
|
bffb6ab760b83cbfbc203ddd701ce36a15b9e372
|
[] |
no_license
|
cormac85/packagerrr2
|
675bef0173a32f81b2ffc229422a317ddf286ea9
|
6bfca40956b2f325e7414cd5136e22cb6492631e
|
refs/heads/master
| 2021-05-12T02:39:57.689962
| 2018-01-15T19:04:49
| 2018-01-15T19:04:49
| 117,594,536
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 66
|
r
|
testthat.R
|
library(testthat)
library(packagerrr2)
test_check("packagerrr2")
|
6941f9f91bb7d0b88665df0b9d6294acf55bb5e6
|
2e238ef4214dea849ce796268862256fdf0bc7da
|
/coastal_ts.R
|
a9756162df511843e53b2ee95c5293c11403eb74
|
[] |
no_license
|
farnazn/coastal_ts
|
1f2618498ae24cba29b628a26a83eeb19bd9837a
|
7441cfd65ec2ae8d230948d89244fb25957fec27
|
refs/heads/master
| 2021-05-15T15:09:45.434468
| 2018-01-24T23:24:00
| 2018-01-24T23:24:00
| 107,284,357
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,008
|
r
|
coastal_ts.R
|
################################################################################
# 1. Setup
################################################################################
pkgs <- c("devtools", "randomForest", "dplyr", "rjags", "arm", "xtable","caret",
"ggplot2","knitr")
cran_no <- pkgs[!pkgs %in% installed.packages()[,1]]
for(i in cran_no){
install.packages(i)
}
gh_pkgs <- c("usepa/LakeTrophicModelling")
gh_no <- gh_pkgs[!basename(gh_pkgs) %in% installed.packages()[,1]]
for(i in gh_no){
devtools::install_github(i)
}
all_pkgs <- c(basename(gh_pkgs),pkgs)
lapply(all_pkgs, library, character.only = T)
################################################################################
# 2. Data
################################################################################
coastal <- read.csv("CoastalWQ_20161129.csv", stringsAsFactors = FALSE)
# VISNUM = 1
coastal <- coastal[coastal[,"VISNUM"]==1,]
# Col_loc = "SURFACE"
coastal <- coastal[coastal[,"Col_loc"]=="SURFACE",]
# SAMPYEAR=2010
coastal <- coastal[coastal[,"SAMPYEAR"]=="2010",]
# SAMPYEAR=2015
coastal <- coastal[coastal[,"SAMPYEAR"]=="2015",]
coastal[,"REGION"] <- as.factor(coastal[,"REGION"])
coastal[,"SUBREGIONS"] <- as.factor(coastal[,"SUBREGIONS"])
#
predictors_coastal <- colnames(coastal)
predictors_coastal <- predictors_coastal[predictors_coastal!="Col_Date" &
predictors_coastal!="VISNUM" &
predictors_coastal!="Col_loc"&
predictors_coastal!="Site_Visnum_Layer"&
predictors_coastal!="UID"&
predictors_coastal!="SITE_ID"&
predictors_coastal!="STATE"&
predictors_coastal!="SAMPYEAR"&
predictors_coastal!="TSS..mg.L."&
predictors_coastal!="CHLA..ug.L."]
# Removing the missing values:
coastal<- coastal[!is.na(coastal[,"SECCHI_MEAN..m."])
& !is.na(coastal[,"DIP..mgP.L."])
& !is.na(coastal[,"DIN..mgN.L."])
& !is.na(coastal[,"TN..mgN.L."])
& !is.na(coastal[,"TP..mgP.L."])
& !is.na(coastal[,"SUBREGIONS"])
& !is.na(coastal[,"CHLA..ug.L."]),]
# Replacing the non-detects with 2010 NCCA MDL values
coastal[coastal[,"TP..mgP.L."]==0,"TP..mgP.L."] <- 0.0012
coastal[coastal[,"DIN..mgN.L."]==0,"DIN..mgN.L."] <- 0.001
coastal[coastal[,"DIP..mgP.L."]==0,"DIP..mgP.L."] <- 0.0027
# Three categories based on Bricker et al, 2003
# Chlorophyll a
coastal <- coastal %>% mutate(TS_Chla=cut(CHLA..ug.L., breaks=c(-Inf, 5, 20, Inf), labels=c("Oligo", "Meso", "Eu")))
# Nitrogen
coastal <- coastal %>% mutate(TS_N=cut(TN..mgN.L., breaks=c(-Inf, 0.1, 1, Inf), labels=c("Oligo", "Meso", "Eu")))
# Phosphorus
coastal <- coastal %>% mutate(TS_P=cut(TP..mgP.L., breaks=c(-Inf, 0.01, 0.1, Inf), labels=c("Oligo", "Meso", "Eu")))
# SD
coastal <- coastal %>% mutate(TS_SD=cut(SECCHI_MEAN..m., breaks=c(-Inf, 1, 3, Inf), labels=c("Oligo", "Meso", "Eu")))
# Equal Quantile
Breaks_Chla_Q <- c(quantile(coastal[,"CHLA..ug.L."], probs = seq(0, 1, by = 1/3)))
coastal <- coastal %>% mutate(TS_Chla_Q=cut(CHLA..ug.L., breaks=Breaks_Chla_Q, labels=c("Oligo", "Meso", "Eu")))
# consistent_ts <- ifelse((coastal$TS_Chla==coastal$TS_P
# & coastal$TS_Chla==coastal$TS_N
# ), 1, 0)
#
# coastal <- cbind(coastal, consistent_ts)
# Four categories
# coastal <- coastal %>% mutate(TS_Chla=cut(CHLA..ug.L., breaks=c(-Inf, 5, 20, 60 , Inf), labels=c("Oligo", "Meso", "Eu","Hyper")))
# Equal Quantile
Breaks_Chla_Q <- c(quantile(coastal[,"CHLA..ug.L."], probs = seq(0, 1, by = 1/4)))
coastal <- coastal %>% mutate(TS_Chla_Q=cut(CHLA..ug.L., breaks=Breaks_Chla_Q, labels=c("Oligo", "Meso", "Eu", "Hyper")))
##################################################
#All Variables
#Clean Up Data - Complete Cases
all_coastal <- data.frame(coastal[predictors_coastal],LogCHLA=log10(coastal$CHLA..ug.L.))
row.names(all_coastal)<-coastal$SITE_ID
all_coastal <- all_coastal[complete.cases(all_coastal),]
################################################################################
# 3. Random Forest for Variable Selection
################################################################################
##################################################
#Model: All Variables
all_rf<-randomForest(y=all_coastal$LogCHLA,x=all_coastal[,predictors_coastal]
, ntree=5000, importance=TRUE, proximity=TRUE
, keep.forest=TRUE,keep.inbag=TRUE)
################################################################################
# 4. Random Forest for Variable Selection - Evaluation
################################################################################
#
# data_def <- read.csv("data_def.csv", stringsAsFactors = FALSE)
# all_imp <- importance(all_rf)
#
# var_importance <- varImportance(all_imp,"All variables")
#
# dplyr::arrange(var_importance,desc(mean_decrease_acc))
#
# importancePlot(all_rf, data_def=data_def,type='acc',size=3)
#
# dplyr::arrange(var_importance,desc(mean_decrease_gini))
#
# importancePlot(all_rf, data_def=data_def,type='gini',size=3)
################################################################################
# 7. JAGS Model
################################################################################
# Removing the missing values:
coastal<- coastal[!is.na(coastal[,"SECCHI_MEAN..m."])
& !is.na(coastal[,"DIP..mgP.L."])
& !is.na(coastal[,"DIN..mgN.L."])
& !is.na(coastal[,"TN..mgN.L."])
& !is.na(coastal[,"TP..mgP.L."])
& !is.na(coastal[,"SUBREGIONS"])
& !is.na(coastal[,"CHLA..ug.L."]),]
# Replacing the non-detects with 2010 NCCA MDL values
coastal[coastal[,"TP..mgP.L."]==0,"TP..mgP.L."] <- 0.0012
coastal[coastal[,"DIN..mgN.L."]==0,"DIN..mgN.L."] <- 0.001
coastal[coastal[,"DIP..mgP.L."]==0,"DIP..mgP.L."] <- 0.0027
# Data splitting for cross validation 90%/10%
set.seed(100)
# Sample <- sample(nrow(coastal_consistent),size= round(0.1*dim(coastal)[1]),replace=FALSE)
#
Sample <- sample(nrow(coastal),size= round(0.1*dim(coastal)[1]),replace=FALSE)
# coastal_consistent <- subset(coastal, coastal$consistent_ts==1)
# Evaluation <- coastal_consistent[Sample,]
# Model <- rbind(coastal_consistent[-Sample,], subset(coastal, coastal$consistent_ts==0))
Evaluation <- coastal[Sample,]
Model <- coastal[-Sample,]
#set up the initializations
# Three cut-off points
cutpt.inits <- array(dim= c(3))
for (k in 1:3){
cutpt.inits[k] <- rnorm(1)
}
# Two cut-off points
# cutpt.inits <- array(dim= c(2))
#
# for (k in 1:2){
# cutpt.inits[k] <- rnorm(1)
# }
inits <- function () {list("cutpt_raw" = cutpt.inits)}
# Center, scale, and log transform the predictors
SDD.C <- as.numeric(scale(log(Model$SECCHI_MEAN..m.), center = TRUE, scale = TRUE))
TN.C <- as.numeric(scale(log(Model$TN..mgN.L.), center = TRUE, scale = TRUE))
TP.C <- as.numeric(scale(log(Model$TP..mgP.L.), center = TRUE, scale = TRUE))
DIN.C <- as.numeric(scale(log(Model$DIN..mgN.L.), center = TRUE, scale = TRUE))
DIP.C <- as.numeric(scale(log(Model$DIP..mgP.L.), center = TRUE, scale = TRUE))
DataList = list('TS' = factor(Model[,"TS_Chla_Q"])
,'SD' = SDD.C
,'Nitrogen' = TN.C
,'Phosphorus' = TP.C
,'DIN' = DIN.C
,'DIP' = DIP.C
,'Subregion' = factor(Model[,"SUBREGIONS"]))
#The parameter(s) to be monitored
parameters = c('alpha_SD', 'alpha_N', 'alpha_P', 'alpha_DIN', 'alpha_DIP', 'alpha_SubR'
, 's'
, 'C')
# Number of steps to "tune" the samplers.
adaptSteps = 3000
# Number of steps to "burn-in" the samplers.
#changes from 1000 the initail setting
burnInSteps = 5000
# Number of chains to run.
nChains = 1 # Change to 3 chains for final run
# Total number of steps in chains to save.
numSavedSteps=10000 # Change to 50000 for the final run
# Number of steps to "thin" (1=keep every step).
thinSteps= 5
# Steps per chain.
nIter = ceiling( ( numSavedSteps * thinSteps ) / nChains )
# Start the clock!
ptm <- proc.time()
coastal_jags <- jags.model('coastal_jags.R',data = DataList
, inits, n.chains = nChains, n.adapt = adaptSteps)
# Stop the clock
proc.time() - ptm
################################################################################
################################################################################
#8. JAGS Model Diagnostics
################################################################################
# Start the clock!
ptm <- proc.time()
coastal_coda <- coda.samples(coastal_jags, parameters, n.iter=10000) # Change to 50000 for the final run
# Stop the clock
proc.time() - ptm
#################################################
# Plot
plot(coastal_coda[,1:3])
plot(coastal_coda[,4:6])
plot(coastal_coda[,7:9])
plot(coastal_coda[,10:12])
# Table
print(xtable(cbind(summary(coastal_coda)$quantiles, summary(coastal_coda)$statistics[,2])), floating=FALSE)
################################################################################
################################################################################
#9. JAGS Model Evaluation
################################################################################
# 3 Chains Combined
simCodaOne.Coda.Coastal <- NULL
for (i in 1:nChains) simCodaOne.Coda.Coastal <- rbind(simCodaOne.Coda.Coastal, coastal_coda[[i]])
MCMC.Coastal <- as.mcmc(simCodaOne.Coda.Coastal)
Coeff.Coastal.Summary <- matrix(NA, 42, 4)
# Coeff.Coastal.Summary <- matrix(NA, 41, 4)
for (i in 1:42){ Coeff.Coastal.Summary[i,] <- cbind(mean(simCodaOne.Coda.Coastal[,i])
, sd(simCodaOne.Coda.Coastal[,i])
, quantile(simCodaOne.Coda.Coastal[,i], c(0.025), type = 1)
, quantile(simCodaOne.Coda.Coastal[,i], c(0.975), type = 1))}
colnames(Coeff.Coastal.Summary) <- cbind("mean", "sd", "2.5%", "97.5%")
rownames(Coeff.Coastal.Summary ) <-colnames(MCMC.Coastal)
print(xtable(Coeff.Coastal.Summary, floating=FALSE))
# Coefficient Matrix
Alpha <- rbind(Coeff.Coastal.Summary["alpha_SD",]
, Coeff.Coastal.Summary["alpha_N",]
, Coeff.Coastal.Summary["alpha_P",]
, Coeff.Coastal.Summary["alpha_DIN",]
, Coeff.Coastal.Summary["alpha_DIP",]
, Coeff.Coastal.Summary[9:41,])
# Center, scale, and log transform the evaluation data
Eval.SDD.C <- as.numeric(scale(log(Evaluation$SECCHI_MEAN..m.), center = TRUE, scale = TRUE))
Eval.TN.C <- as.numeric(scale(log(Evaluation$TN..mgN.L.), center = TRUE, scale = TRUE))
Eval.TP.C <- as.numeric(scale(log(Evaluation$TP..mgP.L.), center = TRUE, scale = TRUE))
Eval.DIN.C <- as.numeric(scale(log(Evaluation$DIN..mgN.L.), center = TRUE, scale = TRUE))
Eval.DIP.C <- as.numeric(scale(log(Evaluation$DIP..mgP.L.), center = TRUE, scale = TRUE))
# Subregion Matrix
SubRegion <- matrix(0, dim(Evaluation)[1], length(levels(Evaluation[,"SUBREGIONS"])))
for(j in 1:dim(Evaluation)[1]){
for(i in 1:length(levels(Evaluation[,"SUBREGIONS"]))){
if (factor(Evaluation[j, "SUBREGIONS"])==levels(Evaluation[,"SUBREGIONS"])[i])
SubRegion[j,i] <-1
}
}
# Evaluation predictors
Eval.Predictors <- cbind(Eval.SDD.C, Eval.TN.C, Eval.TP.C, Eval.DIN.C, Eval.DIP.C, SubRegion)
predict.EvaluationAll <- Eval.Predictors %*% Alpha[,"mean"]
# Remove NA's
predict.EvaluationAll <- predict.EvaluationAll[!is.na(predict.EvaluationAll)]
Predict.CatAll <- vector(length = length(predict.EvaluationAll))
C <- rbind(Coeff.Coastal.Summary["C[1]",], Coeff.Coastal.Summary["C[2]",], Coeff.Coastal.Summary["C[3]",])
for (i in 1:length(predict.EvaluationAll)){
if (predict.EvaluationAll[i]< C[1]) Predict.CatAll[i] <- "Oligo"
if (predict.EvaluationAll[i]< C[2] && predict.EvaluationAll[i]> C[1]) Predict.CatAll[i] <- "Meso"
if (predict.EvaluationAll[i]< C[3] && predict.EvaluationAll[i]> C[2]) Predict.CatAll[i] <- "Eu"
if (predict.EvaluationAll[i]> C[3]) Predict.CatAll[i] <- "Hyper"
}
Pred.CatAll <- factor(Predict.CatAll, levels=c("Oligo", "Meso", "Eu", "Hyper"), ordered=TRUE)
True.CatAll <- Evaluation[, "TS_Chla_Q"]
# True.CatAll <- True.CatAll[!is.na(log(Evaluation$SECMEAN))]
CM.TS.Multilevel <- confusionMatrix(Pred.CatAll, True.CatAll)
CM.TS.Multilevel
xtable(CM.TS.Multilevel$table)
CM.TS.Multilevel$overall["Accuracy"]
CM.TS.Multilevel$byClass[,"Balanced Accuracy"]
################################################################################
# Functions
################################################################################
expected <- function(x, c1.5, c2.5, c.3.5, sigma){
p1.5 <- invlogit((x-c1.5)/sigma)
p2.5 <- invlogit((x-c2.5)/sigma)
p3.5 <- invlogit((x-c3.5)/sigma)
return((1*(1-p1.5)+2*(p1.5-p2.5)+3*(p2.5-p3.5)+4*p3.5))
# return((1*(1-p1.5)+2*(p1.5-p2.5)+3*p2.5))
}
# for plotting logistic regression model
jitter.binary <- function(a, jitt=.05, up=1){
up*(a + (1-2*a)*runif(length(a),0,jitt))
}
logit <- function(x) return(log(x/(1-x)))
invlogit <- function(x) return(1/(1+exp(-x)))
Model_SubRegion <- matrix(0, dim(Model)[1], length(levels(Model[,"SUBREGIONS"])))
for(j in 1:dim(Model)[1]){
for(i in 1:length(levels(Model[,"SUBREGIONS"]))){
if (factor(Model[j, "SUBREGIONS"])==levels(Model[,"SUBREGIONS"])[i]) Model_SubRegion[j,i] <-1
}
}
################################################################################
# Plots
################################################################################
beta <- Alpha[,"mean"]
c1.5 <- C[1]
c2.5 <- C[2]
c3.5 <- C[3]
sigma <- Coeff.Coastal.Summary["s","mean"]
pdf("Figures/POLR.pdf", width=8, height=10)
par(mar=c(3,3,0.25,0.25), mgp=c(1.5,0.25,0), tck=-0.005)
plot(0, 0, xlim=c(-800,700), ylim=c(1,4), xlab="TSI", ylab="TS",
type="n", axes=F)
axis(1)
axis(2, at=1:4, labels=c("Oligo","Meso","Eutro", "Hyper"), las=1)
lines(rep(c1.5, 2), c(1,2))
lines(rep(c2.5, 2), c(2,3))
lines(rep(c3.5, 2), c(3,4))
curve(expected(x, c1.5, c2.5, c3.5, sigma), add=TRUE)
# curve(expected(x, c1.5, c2.5, sigma), add=TRUE)
with(Model, points(cbind(SDD.C, TN.C, TP.C, DIN.C, DIP.C, Model_SubRegion)%*%beta,
jitter.binary(as.numeric(ordered(Model[,"TS_Chla_Q"]))), col="azure4"))
invisible(dev.off())
#################################################
#################################################
beta_AllVar <- Alpha[,"mean"]
kappa_AllVar <- C
c1.5_AllVar <- kappa_AllVar[1]
c2.5_AllVar <- kappa_AllVar[2]
c3.5_AllVar <- kappa_AllVar[3]
sigma_AllVar <- Coeff.Coastal.Summary["s","mean"]
X <- cbind(SDD.C, TN.C, TP.C, DIN.C, DIP.C, Model_SubRegion)
TSI <- X%*% beta_AllVar
TSI <- TSI[!is.na(TSI)]
# se of kappas
se.c <- cbind(Coeff.Coastal.Summary["C[1]","sd"], Coeff.Coastal.Summary["C[2]","sd"], Coeff.Coastal.Summary["C[3]","sd"])
Ibcg <- seq(range(TSI)[1],range(TSI)[2], length.out = 100)
pA <- invlogit((kappa_AllVar[1] - Ibcg)/sigma_AllVar)
pB <- invlogit((kappa_AllVar[2] - Ibcg)/sigma_AllVar) - invlogit((kappa_AllVar[1] - Ibcg)/sigma_AllVar)
pC <- invlogit((kappa_AllVar[3] - Ibcg)/sigma_AllVar) - invlogit((kappa_AllVar[2] - Ibcg)/sigma_AllVar)
pNA <- 1.0 - invlogit((kappa_AllVar[3] - Ibcg)/sigma_AllVar)
# Figure
# Graphical presentation of the POLR model.
# The x-axis is the trophic state index, the y-axis is the probability of being classified into one of the 4 trophic state classes, and the vertical lines and blue bars are the cutpoints $\pm$ one standard error.
pdf("Figures/POLR_Prob.pdf", width=8, height=10)
par(mar=c(3,3,2,0.25), mgp=c(1.5,0.5,0), tck=-0.01)
plot(range(Ibcg), c(0,1), type="n", xlab="Tropic State Index", ylab="Prob")
polygon(x=c(c1.5_AllVar-se.c[1], c1.5_AllVar+se.c[1], c1.5_AllVar+se.c[1],c1.5_AllVar-se.c[1]),
y=c(0,0,1,1), col="azure3", density=-1, border=NA)
polygon(x=c(c2.5_AllVar-se.c[2], c2.5_AllVar+se.c[2], c2.5_AllVar+se.c[2],c2.5_AllVar-se.c[2]),
y=c(0,0,1,1), col="azure3", density=-1, border=NA)
polygon(x=c(c3.5_AllVar-se.c[3], c3.5_AllVar+se.c[3], c3.5_AllVar+se.c[3],c3.5_AllVar-se.c[3]),
y=c(0,0,1,1), col="azure3", density=-1, border=NA)
segments(x0=c(c1.5_AllVar,c2.5_AllVar,c3.5_AllVar), y0=rep(0,3),
x1=c(c1.5_AllVar,c2.5_AllVar,c3.5_AllVar), y1=rep(1,3),col=grey(0.3))
axis(3, at=c(c1.5_AllVar,c2.5_AllVar, c3.5_AllVar), labels=c("Oligo|Meso","Meso|Eu" ,"Eu|Hyper"))
lines(Ibcg, pA, lwd=2)
lines(Ibcg, pB, lty=2, lwd=2)
lines(Ibcg, pC, lty=3, lwd=2)
lines(Ibcg, pNA, lty=4, lwd=2)
legend(400, 0.5, legend=c("Oligo", "Meso","Eu", "Hyper"),
lty=1:4, cex=0.75, bty="n")
invisible(dev.off())
################################################
# All Data
# Center, scale, and log transform the coastal data
Coastal.SDD.C <- as.numeric(scale(log(coastal$SECCHI_MEAN..m.), center = TRUE, scale = TRUE))
Coastal.TN.C <- as.numeric(scale(log(coastal$TN..mgN.L.), center = TRUE, scale = TRUE))
Coastal.TP.C <- as.numeric(scale(log(coastal$TP..mgP.L.), center = TRUE, scale = TRUE))
Coastal.DIN.C <- as.numeric(scale(log(coastal$DIN..mgN.L.), center = TRUE, scale = TRUE))
Coastal.DIP.C <- as.numeric(scale(log(coastal$DIP..mgP.L.), center = TRUE, scale = TRUE))
# Subregion Matrix
SubRegion <- matrix(0, dim(coastal)[1], length(levels(coastal[,"SUBREGIONS"])))
for(j in 1:dim(coastal)[1]){
for(i in 1:length(levels(coastal[,"SUBREGIONS"]))){
if (factor(coastal[j, "SUBREGIONS"])==levels(coastal[,"SUBREGIONS"])[i])
SubRegion[j,i] <-1
}
}
# Evaluation predictors
Coastal.Predictors <- cbind(Coastal.SDD.C, Coastal.TN.C, Coastal.TP.C, Coastal.DIN.C, Coastal.DIP.C, SubRegion)
predict.CoastalAll <- Coastal.Predictors %*% Alpha[,"mean"]
# Remove NA's
predict.CoastalAll <- predict.CoastalAll[!is.na(predict.CoastalAll)]
Predict.CatAll <- vector(length = length(predict.CoastalAll))
C <- rbind(Coeff.Coastal.Summary["C[1]",], Coeff.Coastal.Summary["C[2]",], Coeff.Coastal.Summary["C[3]",])
for (i in 1:length(predict.CoastalAll)){
if (predict.CoastalAll[i]< C[1]) Predict.CatAll[i] <- "Oligo"
if (predict.CoastalAll[i]< C[2] && predict.CoastalAll[i]> C[1]) Predict.CatAll[i] <- "Meso"
if (predict.CoastalAll[i]< C[3] && predict.CoastalAll[i]> C[2]) Predict.CatAll[i] <- "Eu"
if (predict.CoastalAll[i]> C[3]) Predict.CatAll[i] <- "Hyper"
}
Pred.CatAll <- factor(Predict.CatAll, levels=c("Oligo", "Meso", "Eu", "Hyper"), ordered=TRUE)
coastal2010 <- c(coastal, predict.CoastalAll, Predict.CatAll)
coastal$predict_TSI <- predict.CoastalAll
coastal$predict_Cat <- Predict.CatAll
View(coastal)
write.csv(coastal, file = "coastal2010.csv")
|
6d38ede45f0e647d03d3c94e69b5a08ddef74a26
|
d1bc382da458eece07e01459a903302b8d129919
|
/new nb.R
|
497cb2b1730599b6a1adee3170ffb99f689d2590
|
[] |
no_license
|
123saaa/Hello
|
00f09762363b1ff81c56d0863e393d5d6cfe6e85
|
d451bb093691ad4dfaed5283960fa4e5728b54e7
|
refs/heads/master
| 2022-12-28T10:59:08.458103
| 2020-10-02T03:12:24
| 2020-10-02T03:12:24
| 281,630,664
| 0
| 0
| null | 2020-08-18T06:01:46
| 2020-07-22T09:14:42
| null |
UTF-8
|
R
| false
| false
| 565
|
r
|
new nb.R
|
library(caret)
library(rsample)
library(klaR)
nb.features
load(file = "/Users/wangyunxuan/Downloads/caddata (3).RData")
df=as.data.frame(cad.df.balanced)
#head(df)
#which( colnames(df)=="Cath" )
#n<-ncol(df)
#c(1:54)
#c(1:42,44:54)
set.seed(123)
train <- train.df[,c(predictors(nb.features),"Cath")]
test <- test.df[,c(predictors(nb.features),"Cath")]
control <- trainControl(method="repeatedcv", number=10)
train_model<-train(Cath ~., data = train, method="nb", ,trControl=control)
train_model$results
pred=predict(train_model,test)
mean(pred== test$Cath)
|
3474e5c5a1ff5ab26440559c430703089c16c2fe
|
a754951b3c8e53c21463b37c459be7db8445d942
|
/plot4.R
|
0222274fd9f42ad68462c71f6e293c1159d74f8b
|
[] |
no_license
|
kcpyeung/ExData_Plotting1
|
054694a4d8c613775b1281f10b61d84763d22449
|
a803bca85cf68d87d662a433fedd12d02e908865
|
refs/heads/master
| 2021-01-24T21:47:52.466576
| 2015-10-10T14:39:49
| 2015-10-11T06:15:27
| 44,011,658
| 0
| 0
| null | 2015-10-10T13:27:43
| 2015-10-10T13:27:42
| null |
UTF-8
|
R
| false
| false
| 1,049
|
r
|
plot4.R
|
data <- read.csv("household_power_consumption.txt", sep=";", na.strings="?")
data <- data[as.character(data$Date) == "1/2/2007" | as.character(data$Date) == "2/2/2007", ]
png(filename="plot4.png")
par(mfrow=c(2, 2))
x <- strptime(paste(data[,1], data[, 2]), "%d/%m/%Y %H:%M:%S")
# top left
y <- data[, 3]
plot(x, y, type="l", xlab="", ylab="Global Active Power (kilowatts)")
# top right
y <- data[, 5]
plot(x, y, type="l", xlab="datetime", ylab="Voltage")
# bottom left
y1 <- data[, 7]
y2 <- data[, 8]
y3 <- data[, 9]
plot(x, y1, ylim=range(c(y1, y2, y3)), type="l", xlab="", ylab="Energy sub metering")
par(new = TRUE)
plot(x, y2, ylim=range(c(y1, y2, y3)), type="l", xlab="", ylab="", col="#ff0000")
par(new = TRUE)
plot(x, y3, ylim=range(c(y1, y2, y3)), type="l", xlab="", ylab="", col="#0000ff")
legend("topright", box.lty=0, lty=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# bottom right
y <- data[, 4]
plot(x, y, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
d54e0971c977ce1c63e65057b2c020056a2916a2
|
c9e33c4fa8734f0ea069dab3ff487a1f8f4a4eb1
|
/man/Berkowitz.Rd
|
a8d9895708409e00c619ac971390d6f67a07b54e
|
[] |
no_license
|
mpiktas/acp
|
9ae68d8dff87627abeaacdd01d372eb4f6eec377
|
92f0fc787a2a11601a495074d5e36176ddb634ea
|
refs/heads/master
| 2020-05-01T02:06:15.017074
| 2014-12-31T10:21:43
| 2014-12-31T10:32:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,763
|
rd
|
Berkowitz.Rd
|
\name{Berkowitz}
\alias{Berkowitz}
\title{Berkowitz test}
\description{Implements Berkowitz test (2001) for density evaluation.}
\usage{
Berkowitz(ydata, yhatdata, rep, ...)
}
\arguments{
\item{ydata}{ a data frame containing the real values of the dependent varible. }
\item{yhatdata}{ a data frame containing the fitted values of the dependent varible. }
\item{rep}{ number of uniform distirbution drawings. }
\item{\dots}{ not used. }
}
\details{
Diebold et al. (1998) proposed a density evaluation method which consists in computing the sequence of cumulative probability
of the observed counts under the assumed forecast distribution (Probability Transform Integral-PIT). If the density fit is adequate this sequence will be uniformly distributed and will have no-autocorrelation
left neither in level nor when raised to integer powers. For this purpose intuitive graphical methods such as correlograms on the basis of the usual Bartlett confidence intervals, histograms and quantile-quantile (QQ) plots
are used. In the case of discrete data Heinen et al. (2007) propose the use of a uniform zero-one continued extension as suggested by Denuit and Lambert (2005).
Finally instead of using graphical tools for detecting uniformity and independence, Berkowitz (2001) applied a formal test for normality and independence of the inverse standard cumulative normal transform of the PIT sequence
through the estimation of an AR(1) specification and the use of an LR test to the coefficients.
}
\value{
P-value of the Likelihood Ratio test statistic based on the chi-square distribution with 3 degress of freedom.
}
\author{Siakoulis Vasileios}
\references{
\itemize{
\item {Berkowitz, J., 2001. Testing density forecasts with applications to risk management. American Statistical Association.Journal of Business and Economics Statistics, 19, 4.}
\item {Denuit , M., and Lambert, P., 2005. Constraints on concordance measures in bivariate discrete data. Journal of Multivariate Analysis, 93, 40-57.}
\item {Diebold, F., Gunther, T., and Tay, A., 1998. Evaluating density forecasts with applications financial to risk management. International Economic Review,39, 4, 863-883.}
\item {Heinen,A., Rengifo, E., 2007. Multivariate autoregressive modeling of time series count data using copulas. Journal of empirical finance 14 (2007) 564-583.}
}
}
\examples{
data(polio)
#Create time trend and seasonality variables
trend=(1:168/168)
cos12=cos((2*pi*(1:168))/12)
sin12=sin((2*pi*(1:168))/12)
cos6=cos((2*pi*(1:168))/6)
sin6=sin((2*pi*(1:168))/6)
polio_data<-data.frame(polio, trend , cos12, sin12, cos6, sin6)
mod1 <- acp(polio~-1+trend+cos12+sin12+cos6+sin6,data=polio_data)
summary(mod1)
Berkowitz(polio_data[[1]],fitted(mod1),50)
}
\keyword{Berkowitz}
|
f6d351deac7d24ec09f9ddda528aa94c7e8e3a42
|
917e0bde0b68e7f1bfdb489b68137ebf9b06f704
|
/examples/example-yshape.R
|
db2b0480334db8239398111c5eda5fe4adb48443
|
[] |
no_license
|
p-soma/cedarmapper
|
331a25e846d511b269b72b266b78e28de2ca268d
|
a665d326ec241da650a802d3297e565a2ca54191
|
refs/heads/master
| 2021-06-17T03:58:52.743438
| 2017-05-22T15:33:47
| 2017-05-22T15:33:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,651
|
r
|
example-yshape.R
|
#' Y-shaped data example
#' Synthetic x,y data in shape of Y when plotted for testing Mapper output
#'
library(cedar)
testdata = y_data(100)
plot(testdata, main="Our example Y-shaped, x-y data")
cat ("Press [enter] to run Mapper and plot")
line <- readline()
# create graphmapper object, set lense to be the density function
gm = graphmapper(dataset = testdata, lensefun=lense.density, lenseparam = 0.5,
partition_count=10, overlap = 0.5, bin_count=15)
# gm pipeline, run manually
gm$distance = dist(gm$d,method="euclidean", upper=FALSE)
gm$partitions = partition.graphmapper(gm)
# print information about the the partitions
for(i in 1:length(gm$partitions)) {print(paste0("parition ", i, " sized ", length(gm$partitions[[i]])))}
gm[["clusters"]] = clusters.graphmapper(gm)
gm[["nodes"]] = nodes.graphmapper(gm)
# print information about the nodes
print(paste0("created ", length(gm$nodes), " nodes"))
# build links of overlapping nodes as an adjancy matrix
gm[["adjmatrix"]] = adjacency.graphmapper(gm)
# create iGraph and plot
plot(graph.graphmapper(gm))
# set groups by dividing nodes in half (arbitrarily)
midnode = floor(length(gm$nodes)/2)
gm[["groups"]] <- setgroup.graphmapper(gm, 1:midnode,group_id = "1")
gm[["groups"]] <- setgroup.graphmapper(gm, midnode+1:length(gm$nodes),group_id = "2")
print(kstable(gm))
# example 2, using the 'maker' function to create mapper object in one step
# gm2 = makegraphmapper(dataset = testdata, lensefun=lense.projection, lenseparam = 'y',
# partition_count=4, overlap = 0.5, bin_count=10)
# plot(graph.graphmapper(gm2),main="Y-data mapper graph")
|
c7bb52b3fcff6c35fb71bac092dc0738ec47b4d5
|
ff7c424aab80e944be984f44578ae0487c2b751b
|
/evolutionnary_constrainte_dnds_pspn_10000genomes.R
|
93b1cfc02eb5bdee3d53c651397794f1bab2efeb
|
[] |
no_license
|
mmdavid/R_ensembl
|
07db96d099c827449519d598be008d53202d0184
|
f76d704af3d78849b8bff6e734a52faa3e9ff8d6
|
refs/heads/master
| 2021-01-10T06:43:38.482971
| 2016-01-13T23:07:59
| 2016-01-13T23:07:59
| 36,760,530
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,131
|
r
|
evolutionnary_constrainte_dnds_pspn_10000genomes.R
|
#usefull ggplot2 links:
#http://docs.ggplot2.org/0.9.3.1/geom_point.html
#now let's try with the 1000 genomes projects
#pnps_file_1000Gafricanpops contains Ensembl Gene ID, PN, PS, DN and DS (order of the columns). PN and PS is from african pops from the 1000 Genomes project.
pnps_file_1000Gafricanpops<-read.table("pnps_file_1000Gafricanpops")
colnames(pnps_file_1000Gafricanpops)<-c("Ensembl_Gene_ID", "PN", "PS", "DN", "DS")
pnps_file_1000Gafricanpops$PNPS<-(pnps_file_1000Gafricanpops$PN /pnps_file_1000Gafricanpops$PS)
pnps_file_1000Gafricanpops$DNDS<-(pnps_file_1000Gafricanpops$DN /pnps_file_1000Gafricanpops$DS)
#quick silly fix
names(nonuniqueASD_emsblID)<-c()
uniqueASD_emsblID
none<-rep("none",dim(pnps_file_1000Gafricanpops)[1])
pnps_file_1000Gafricanpops$ASD_status<-none
#fix fix fix
allgenes<-c(nonuniqueASD_emsblID,uniqueASD_emsblID) #1208 genes
pnps_file_1000Gafricanpops$Ensembl_Gene_ID<-as.character(pnps_file_1000Gafricanpops$Ensembl_Gene_ID)
pnps_file_1000GafricanpopsASD<-subset(pnps_file_1000Gafricanpops, pnps_file_1000Gafricanpops$Ensembl_Gene_ID %in% allgenes)
#850 genes with DNDS or PNPS values
for (i in 1:dim(pnps_file_1000GafricanpopsASD)[1]){
if (pnps_file_1000GafricanpopsASD$Ensembl_Gene_ID[i] %in% nonuniqueASD_emsblID) {pnps_file_1000GafricanpopsASD$ASD_status[i] <- "comorbdASD"} else {pnps_file_1000GafricanpopsASD$ASD_status[i] <- "ASDonly"}
cat(i,"\t")
}
#ok let's do the dnds boxplot
#save a verison
pnps_file_1000GafricanpopsASD_save<-pnps_file_1000GafricanpopsASD
#one verison zero instead of na because it's zero divided by a number
pnps_file_1000GafricanpopsASD[is.na(pnps_file_1000GafricanpopsASD)] <- 0
------------------------------------------------------------------------------------------------------------------------------------------
#DNDS
------------------------------------------------------------------------------------------------------------------------------------------
#one verison without any modification
pnps_file_1000GafricanpopsASD1<- pnps_file_1000GafricanpopsASD
#Lets try with no changes at all DNDS
update_geom_defaults("point", list(colour = NULL)) #to allo the dots to be the same color than the rest, need ot reset at the end update_geom_defaults("point", list(colour = "black"))
p2<-ggplot(pnps_file_1000GafricanpopsASD1, aes(ASD_status,DNDS))+
geom_boxplot(aes(colour = factor(ASD_status)), color = c("darkgreen","orange"), outlier.colour = NULL, outlier.size = 4, outlier.shape = 1, fill = c("green", "yellow"),)
p2 + geom_point(aes(colour = factor(ASD_status)), size = I(5), alpha = I(0.1), position = position_jitter(width = 0.4) ) +
scale_color_manual(values=c("darkgreen","orange"))
#a few warning: Warning messages: due to NaN and Inf
# Removed 403 rows containing non-finite values (stat_boxplot).
# Removed 376 rows containing missing values (geom_point).
wilcox.test( DNDS ~ ASD_status, data=pnps_file_1000GafricanpopsASD1)
#Wilcoxon rank sum test with continuity correction $ not significatif but lots of Nan and Inf problem
#Wilcoxon rank sum test with continuity correction
#data: DNDS by ASD_status
#W = 76363.5, p-value = 0.7743
#alternative hypothesis: true location shift is not equal to 0
#let's try removing all Inf
#remove the one with infinite
inf_pb<-pnps_file_1000GafricanpopsASD$DNDS == "Inf"
noinf_pnps_file_1000GafricanpopsASD<-pnps_file_1000GafricanpopsASD[!inf_pb,]
#test
wilcox.test( DNDS ~ ASD_status, data= noinf_pnps_file_1000GafricanpopsASD)
# Of course: Wilcoxon rank sum test with continuity correction
#data: DNDS by ASD_status
#W = 76363.5, p-value = 0.7743
#alternative hypothesis: true location shift is not equal to 0
#let's remove zero and inf
no_zero_pnps_file_1000GafricanpopsASD2<-pnps_file_1000GafricanpopsASD_save
#one verison zero instead of na because it's zero divided by a number
inf_pb<-no_zero_pnps_file_1000GafricanpopsASD2$DNDS == "Inf"
no_zero_pnps_file_1000GafricanpopsASD2<-no_zero_pnps_file_1000GafricanpopsASD2[!inf_pb,]
zero<-is.na(no_zero_pnps_file_1000GafricanpopsASD2$DNDS)
no_zero_pnps_file_1000GafricanpopsASD2<-no_zero_pnps_file_1000GafricanpopsASD2[!zero,]
#test
wilcox.test( DNDS ~ ASD_status, data=no_zero_pnps_file_1000GafricanpopsASD2)
#still not significant
#Wilcoxon rank sum test with continuity correction
#data: DNDS by ASD_status
#W = 22827.5, p-value = 0.4562
#alternative hypothesis: true location shift is not equal to 0
#ok and now let's add 1 to avoid all the problems of zero in the ratios
#now add one to eveyry one
pnps_file_1000GafricanpopsASD2<-pnps_file_1000GafricanpopsASD_save
dim(pnps_file_1000GafricanpopsASD2)
pnps_file_1000GafricanpopsASD2$PS<-pnps_file_1000GafricanpopsASD2$PS + 1
pnps_file_1000GafricanpopsASD2$PN<-pnps_file_1000GafricanpopsASD2$PN + 1
pnps_file_1000GafricanpopsASD2$DS<-pnps_file_1000GafricanpopsASD2$DS + 1
pnps_file_1000GafricanpopsASD2$DN<-pnps_file_1000GafricanpopsASD2$DN + 1
pnps_file_1000GafricanpopsASD2$PNPS<-(pnps_file_1000GafricanpopsASD2$PN /pnps_file_1000GafricanpopsASD2$PS)
pnps_file_1000GafricanpopsASD2$DNDS<-(pnps_file_1000GafricanpopsASD2$DN /pnps_file_1000GafricanpopsASD2$DS)
#test
wilcox.test( DNDS ~ ASD_status, data= pnps_file_1000GafricanpopsASD2)
#plot of +1
update_geom_defaults("point", list(colour = NULL)) #to allo the dots to be the same color than the rest, need ot reset at the end update_geom_defaults("point", list(colour = "black"))
p2<-ggplot(pnps_file_1000GafricanpopsASD2, aes(ASD_status,DNDS))+
geom_boxplot(aes(colour = factor(ASD_status)), color = c("darkgreen","orange"), outlier.colour = NULL, outlier.size = 4, outlier.shape = 1, fill = c("green", "yellow"),)
p2 + geom_point(aes(colour = factor(ASD_status)), size = I(5), alpha = I(0.1), position = position_jitter(width = 0.4) ) +
scale_color_manual(values=c("darkgreen","orange"))
#save as dsdn_adding1_1000genomes
#density graph
blou<-ggplot(pnps_file_1000GafricanpopsASD2, aes(DNDS, fill = ASD_status)) +
stat_density(aes(y = ..density..), position = "identity", color = "black", alpha = 0.5)
blou + scale_fill_manual( values = c("green","yellow"))
#save as dnds_densitygraph_addingone_1000genomes
#not significatif
#Wilcoxon rank sum test with continuity correction
#data: DNDS by ASD_status
#W = 85905, p-value = 0.2513
#alternative hypothesis: true location shift is not equal to 0
------------------------------------------------------------------------------------------------------------------------------------------
#PSPN
------------------------------------------------------------------------------------------------------------------------------------------
#all data Nan -> zero
wilcox.test(PNPS ~ ASD_status, data=pnps_file_1000GafricanpopsASD1)
#not significatif
#Wilcoxon rank sum test with continuity correction
#data: PNPS by ASD_status
#W = 77548, p-value = 0.982
#alternative hypothesis: true location shift is not equal to 0
#remove InF
inf_pb_pnps<-pnps_file_1000GafricanpopsASD_save$PNPS == "Inf"
pnps_file_1000GafricanpopsASD_save_noinfPNPS<-pnps_file_1000GafricanpopsASD_save_noinfPNPS[!inf_pb,]
#test
wilcox.test( PNPS ~ ASD_status, data= pnps_file_1000GafricanpopsASD_save_noinfPNPS)
#significant; W = 22838, p-value = 0.05449
#remove Inf and Nan
#test
zero<-is.na(pnps_file_1000GafricanpopsASD_save_noinfPNPS$PNPS)
no_zero_pnps_file_1000GafricanpopsASD_save_noinfPNPS<-pnps_file_1000GafricanpopsASD_save_noinfPNPS[!zero,]
wilcox.test( PNPS ~ ASD_status, data=no_zero_pnps_file_1000GafricanpopsASD_save_noinfPNPS)
#W = 22838, p-value = 0.05449 YES
#adding 1
#test
wilcox.test( PNPS ~ ASD_status, data= pnps_file_1000GafricanpopsASD2)
#yes significatif
#W = 88930.5, p-value = 0.037
#alternative hypothesis: true location shift is not equal to
#plot
update_geom_defaults("point", list(colour = NULL)) #to allo the dots to be the same color than the rest, need ot reset at the end update_geom_defaults("point", list(colour = "black"))
p2<-ggplot(pnps_file_1000GafricanpopsASD2, aes(ASD_status,PNPS))+
geom_boxplot(aes(colour = factor(ASD_status)), color = c("darkgreen","orange"), outlier.colour = NULL, outlier.size = 4, outlier.shape = 1, fill = c("green", "yellow"),)
p2 + geom_point(aes(colour = factor(ASD_status)), size = I(5), alpha = I(0.1), position = position_jitter(width = 0.4) ) +
scale_color_manual(values=c("darkgreen","orange"))
#density graph
blou<-ggplot(pnps_file_1000GafricanpopsASD2, aes(PNPS, fill = ASD_status)) +
stat_density(aes(y = ..density..), position = "identity", color = "black", alpha = 0.5)
blou + scale_fill_manual( values = c("green","yellow"))
#saveas pnps_density_1000genomes
#save as pnps_adding1_1000genomes
#------------------------------------------------------------------------------------------------------------------------------------
# McDonald–Kreitman test intrespecies with 1000 genomes
#------------------------------------------------------------------------------------------------------------------------------------
#can't do the equation 1- dnpn/dspn so remove all the NaN and InF
no_zero_pnps_file_1000GafricanpopsASD2 #no NaN anf zero for dnds, remove the pnps as well
#pnps_file_1000GafricanpopsASD[is.na(pnps_file_1000GafricanpopsASD)] <- 0
nonNanaPNPS<-no_zero_pnps_file_1000GafricanpopsASD2$PNPS == "Nan"
no_zero_pnps_file_1000GafricanpopsASD3<-no_zero_pnps_file_1000GafricanpopsASD2[!nonNanaPNPS,]
noInfPNPS<-no_zero_pnps_file_1000GafricanpopsASD3$PNPS == "Inf"
no_zero_pnps_file_1000GafricanpopsASD3<-no_zero_pnps_file_1000GafricanpopsASD3[!noInfPNPS,]
#lefy 427 genes
no_zero_pnps_file_1000GafricanpopsASD3$McDonald<-(1-(no_zero_pnps_file_1000GafricanpopsASD3$DNDS*no_zero_pnps_file_1000GafricanpopsASD3$PNPS))
#need to remove the PNPSand above too because I'm STUPID!!!!
wilcox.test( McDonald ~ ASD_status, data= no_zero_pnps_file_1000GafricanpopsASD3)
#yeah significant:W = 15920.5, p-value = 0.1061
p2<-ggplot(no_zero_pnps_file_1000GafricanpopsASD3, aes(ASD_status,McDonald))+
geom_boxplot(aes(colour = factor(ASD_status)), color = c("darkgreen","orange"), outlier.colour = NULL, outlier.size = 4, outlier.shape = 1, fill = c("green", "yellow"),)
p2 + geom_point(aes(colour = factor(ASD_status)), size = I(5), alpha = I(0.1), position = position_jitter(width = 0.4) ) +
scale_color_manual(values=c("darkgreen","orange"))
#density graph
blou<-ggplot(no_zero_pnps_file_1000GafricanpopsASD3, aes(McDonald, fill = ASD_status)) +
stat_density(aes(y = ..density..), position = "identity", color = "black", alpha = 0.5)
blou + scale_fill_manual( values = c("green","yellow"))
-------------------------------with everything
#------------------------------------------------------------------------------------------------------------------------------------
# McDonald–Kreitman test intrespecies with primate for dnds and 1000 genomes for pnps
#------------------------------------------------------------------------------------------------------------------------------------
#I need ot merge both df
head(primates)
head(pnps_file_1000GafricanpopsASD_save)
#merge
primate2<-primates
colnames(primate2)<-c("ASD_status_onlyornot","Ensembl_Gene_ID","dnds_primate")
dim(primate2)
primates_1000gen <- merge(primate2,pnps_file_1000GafricanpopsASD_save,by="Ensembl_Gene_ID")
primates_1000save<-primates_1000gen
#cleaning up
primates_1000gen <-primates_1000gen [,-10]
primate2<-primates_save<-primate2<-primates
#more lcean up
noInfPNPS<-primates_1000gen$PNPS == "Inf"
primates_1000gen<-primates_1000gen[!noInfPNPS,]
noNaN<-primates_1000gen$PNPS == "NaN"
primates_1000gen<-primates_1000gen[!noNaN,]
dim(primates_1000gen) #439 genes
#add McDonald–Kreitman for
primates_1000gen$McDonald_primate<-(1-(primates_1000gen$dnds_primate*primates_1000gen$PNPS))
#now the test
wilcox.test( McDonald_primate ~ ASD_status, data= primates_1000gen)
# ok significatif W = 17724.5, p-value = 0.01258
#quick fix
colnames(primates_1000gen)<-c("Ensembl_Gene_ID","ASD_status","dnds_primate","PN","PS","DN","DS","PNPS","DNDS","McDonald_primate")
#super significative #W = 24014, p-value = 0.01059
p2<-ggplot(primates_1000gen, aes(ASD_status,McDonald_primate))+
geom_boxplot(aes(colour = factor(ASD_status)), color = c("darkgreen","orange"), outlier.colour = NULL, outlier.size = 4, outlier.shape = 1, fill = c("green", "yellow"),)
p2 + geom_point(aes(colour = factor(ASD_status)), size = I(5), alpha = I(0.1), position = position_jitter(width = 0.4) ) +
scale_color_manual(values=c("darkgreen","orange"))
#density graph
#density graph
blou<-ggplot(primates_1000gen, aes(McDonald_primate, fill = ASD_status)) +
stat_density(aes(y = ..density..), position = "identity", color = "black", alpha = 0.5)
blou + scale_fill_manual( values = c("green","yellow"))
#---------------------------------------let do the same but on everything
primates_1000save
primates_1000save$McDonald_primate<-(1-(primates_1000save$dnds_primate*primates_1000save$PNPS))
wilcox.test( McDonald_primate ~ ASD_status, data= primates_1000save)
#still pretty good significantE W = 17724.5, p-value = 0.01258
p2<-ggplot(primates_1000save, aes(ASD_status,McDonald_primate))+
geom_boxplot(aes(colour = factor(ASD_status)), color = c("darkgreen","orange"), outlier.colour = NULL, outlier.size = 4, outlier.shape = 1, fill = c("green", "yellow"),)
p2 + geom_point(aes(colour = factor(ASD_status)), size = I(5), alpha = I(0.1), position = position_jitter(width = 0.4) ) +
scale_color_manual(values=c("darkgreen","orange"))
#density graph
blou<-ggplot(primates_1000save, aes(McDonald_primate, fill = ASD_status)) +
stat_density(aes(y = ..density..), position = "identity", color = "black", alpha = 0.5)
blou + scale_fill_manual( values = c("green","yellow"))
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#any genes above zero?
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
plot(sort(primates_1000save$McDonald_primate))
|
b18e209ce20ffd926296e8018b5ad0ce89f400cd
|
81cca83f3254f2a0fbc5835d938473b46b608096
|
/RESel_v1.4.6/analyze_dig.R
|
7aaf859292769e5ad6f017fef9505130dc42db24
|
[] |
no_license
|
mikesovic/RESel
|
2e012db9f08734f575c08e702f185dad79ee386a
|
32d5193eb1e086440db8f70a75acd94454702574
|
refs/heads/main
| 2023-08-11T13:50:56.471923
| 2021-09-07T20:02:39
| 2021-09-07T20:02:39
| 404,092,708
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,465
|
r
|
analyze_dig.R
|
library(tidyverse)
args = commandArgs(trailingOnly=TRUE)
#annotation (bed) file, if present, will need 4 columns, tab delimited and no header
#4 fields: chrom_name, start, end, feature_name
#this is used to check for overlap of the fragments with specific features/positions of interest.
#the name of this file is given as an optional 3rd argument in the bash wrapper script.
min_frag_size = 180
max_frag_size = 480
#run this for each possible combination of selectives
total.frags.in.range <- function(min.frag.size,
max.frag.size,
selectives,
rev = FALSE,
side1_re_name,
side2_re_name
) {
#filter the fragments for those with at least one side "2" adaptor and with both overhangs matching an adaptor
if (rev == FALSE) {
frags_filt <- frags_all %>%
filter(side1 == 2 | side2 == 2) %>%
filter(seq1 %in% selectives & seq2 %in% selectives) %>%
filter(length >= min.frag.size & length <= max.frag.size)
frag_2k <- frags_filt %>%
filter(length <= 2000)
if (side1_re_name != side2_re_name) {
pdf(file = paste0(args[1], "_out/", side1_re_name, "_", side2_re_name, "/", side1_re_name, "_", side2_re_name, "_", selectives[1], ":", selectives[2], "-frag2k.pdf"))
}
else {
pdf(file = paste0(args[1], "_out/", side1_re_name, "/", side1_re_name, "_", side2_re_name, "_", selectives[1], "-frag2k.pdf"))
}
hist(frag_2k$length, main = paste0(side1_re_name, "_", side2_re_name, " under 2k"))
dev.off()
}
if (rev == TRUE) {
frags_filt <- frags_all %>%
filter(side1 == 1 | side2 == 1) %>%
filter(seq1 %in% selectives & seq2 %in% selectives) %>%
filter(length >= min.frag.size & length <= max.frag.size)
frag_2k <- frags_filt %>%
filter(length <= 2000)
if (side1_re_name != side2_re_name) {
pdf(file = paste0(args[1], "_out/", side1_re_name, "_", side2_re_name, "/", side2_re_name, "_", side1_re_name, "_", selectives[2], ":", selectives[1], "-frag2k.pdf"))
}
else {
pdf(file = paste0(args[1], "_out/", side1_re_name, "/", side1_re_name, "_", side1_re_name, "_", selectives[1], "-frag2k.pdf"))
}
hist(frag_2k$length, main = paste0(side2_re_name, "_", side1_re_name, " under 2k"))
dev.off()
}
num_frags <- nrow(frags_filt)
#return a data frame for the current set of selective adaptors that can be rbound to a master df
if(rev == FALSE) {
return(data.frame("genome" = args[1],
"RE1" = side1_re,
"RE2" = side2_re,
"adaptor1" = selectives[1],
"adaptor2" = selectives[2],
"num_in_range" = num_frags))
}
if(rev == TRUE) {
return(data.frame("genome" = args[1],
"RE1" = side2_re,
"RE2" = side1_re,
"adaptor1" = selectives[2],
"adaptor2" = selectives[1],
"num_in_range" = num_frags))
}
}
#bed_df has "chr", "start", "end", "feature_ID"
feature.overlaps <- function(selectives,
features = args[4],
rev = FALSE,
min.frag.size,
max.frag.size) {
summary_by_feature <- data.frame("feature_ID" = character(),
"feature_length" = integer(),
"sites_covered" = integer(),
"pct_covered" = double())
#unnest bed positions
bed <- read_tsv(features, col_names = FALSE, col_types=c("ciic"))
names(bed) <- c("chr", "start", "end", "feature_ID")
for (x in unique(bed$chr)) {
#get vector of positions ('chr_frag_positions') in fragments recovered
#for the current chr
chr_frags <- frags_all %>%
filter(chr == x & length >= min.frag.size & length <= max.frag.size)
if (rev == FALSE){
chr_frags <- chr_frags %>%
filter(side1 == 2 | side2 == 2) %>%
filter(seq1 %in% selectives & seq2 %in% selectives)
}
if (rev == TRUE){
chr_frags <- chr_frags %>%
filter(side1 == 1 | side2 == 1) %>%
filter(seq1 %in% selectives & seq2 %in% selectives)
}
chr_frag_positions <- mapply(FUN = function(start, end) {
start:end },
start = chr_frags$start_pos,
end = chr_frags$end_pos
) %>% unlist() %>% as.vector()
#doing this to try to keep the RAM requirement down for bed files with lot of positions.
unique_features <- unique(bed$feature_ID)
nfeatures <- length(unique_features)
nfeatures_per_loop <- ceiling(nfeatures/50)
for (i in 0:48) {
start_idx <- i*nfeatures_per_loop+1
end_idx <- i*nfeatures_per_loop+nfeatures_per_loop
target_features <- unique_features[start_idx:end_idx]
bed_sub <- bed %>% filter(feature_ID %in% target_features)
#bed_sub <- bed %>% dplyr::slice((i*nrows_per_loop)+1:(i*nrows_per_loop)+nrows_per_loop)
feat_pos_df <- bed_sub %>%
filter(chr == x)
if (nrow(feat_pos_df) > 0) {
feat_pos_df <- feat_pos_df %>%
group_by(r = row_number()) %>%
mutate("feat_pos" = list(start:end)) %>%
ungroup %>%
unnest(feat_pos) %>%
select(feature_ID, feat_pos) %>%
mutate("covered" = ifelse(feat_pos %in% chr_frag_positions, 1, 0))
tmp_summary <- feat_pos_df %>%
group_by(feature_ID) %>%
summarise("feature_length" = n(),
"sites_covered" = sum(covered),
"pct_covered" = sites_covered/feature_length)
summary_by_feature <- rbind(summary_by_feature, tmp_summary)
}
}
start_idx <- 49*nfeatures_per_loop+1
#end_idx <- 49*nfeatures_per_loop+nfeatures_per_loop
target_features <- unique_features[start_idx:length(unique_features)]
bed_sub <- bed %>% filter(feature_ID %in% target_features)
feat_pos_df <- bed_sub %>%
filter(chr == x)
if (nrow(feat_pos_df) > 0) {
feat_pos_df <- feat_pos_df %>%
group_by(r = row_number()) %>%
mutate("feat_pos" = list(start:end)) %>%
ungroup %>%
unnest(feat_pos) %>%
select(feature_ID, feat_pos) %>%
mutate("covered" = ifelse(feat_pos %in% chr_frag_positions, 1, 0))
tmp_summary <- feat_pos_df %>%
group_by(feature_ID) %>%
summarise("feature_length" = n(),
"sites_covered" = sum(covered),
"pct_covered" = sites_covered/feature_length)
summary_by_feature <- rbind(summary_by_feature, tmp_summary)
}
}
summary_by_feature
}
###############################################################
#assume there is no features file
#if there is, 'bed' is assigned the name of the file
bed <- NULL
if (!is.na(args[4])) {
bed <- args[4]
}
#the full set of cut sites identified from the bash script are stored as 'cuts'
cuts <- read_tsv(paste0(args[1], "_out/", args[2], "/cutsites.tsv"), col_names=FALSE, col_types=c("icc"))
names(cuts) <- c("pos", "seq", "chr")
meta <- read_tsv(args[3], col_names = FALSE)
names(meta) <- c("seq",
"RE_name",
"side")
#add in RE_name and RE_side (arbitrary) to cuts data
merged_all <- left_join(cuts, meta, by = "seq")
get_pairs1 <- function(x) {
c(x[1:(length(x) - 1)], NA)
}
get_pairs2 <- function(x) {
c(x[2:length(x)], NA)
}
#check to make sure all chromosomes have at least 2 cut sites. If not, script will break.
dup_chrs <- merged_all$chr[duplicated(merged_all$chr)] %>% unique()
merged_all <- merged_all %>% filter(chr %in% dup_chrs)
frags_all <- merged_all %>%
group_by(chr) %>%
mutate("start_pos" = get_pairs1(x = pos)) %>%
mutate("end_pos" = get_pairs2(x = pos)) %>%
mutate("length" = end_pos - start_pos) %>%
mutate("RE1" = get_pairs1(x = RE_name)) %>%
mutate("RE2" = get_pairs2(x = RE_name)) %>%
mutate("side1" = get_pairs1(x = side)) %>%
mutate("side2" = get_pairs2(x = side)) %>%
mutate("seq1" = get_pairs1(x = seq)) %>%
mutate("seq2" = get_pairs2(x = seq)) %>%
drop_na() %>% ungroup()
#get the names of the RE's, and order them based on the side they represent
#this isn't used after the next ordering step
re_names <- frags_all %>%
select(RE1,side1) %>%
distinct() %>%
unite(col = "RE_side", sep = "_") %>%
unlist() %>% as.character()
#Order the re_names - this stores just the names of the RE's
side1_re <- re_names[grep("_1$", re_names)]
side1_re <- gsub("_1", "", side1_re)
side2_re <- re_names[grep("_2$", re_names)]
side2_re <- gsub("_2", "", side2_re)
#now start filtering
#for fragments with at least one side 2 (think that's required for amplification)
#for fragments for which seq1 and seq2 will both match the adaptor overhang used
#evaluate for each pair of possible adaptor overhangs (only a single pair if no ambguities in RE recognition seq)
#get set of potential overhangs for RE1
RE1_selectives <- meta %>%
filter(RE_name == side1_re) %>%
#filter(RE_name == gsub("(.+)(_\\d)", "\\1", re_names[1])) %>%
select(seq) %>%
distinct() %>%
unlist() %>%
as.character()
#get set of potential overhangs for RE2
RE2_selectives <- meta %>%
filter(RE_name == side2_re) %>%
select(seq) %>%
distinct() %>%
unlist() %>%
as.character()
master_df <- data.frame("genome" = character(), "RE1"= character(), "RE2" = character(), "adaptor1" = character(), "adaptor2" = character(), "num_in_range" = integer())
#iterate over all possible combinations of adaptor overhangs and consider
#each RE on each side
#for each, bind the results to master_df
for (i in 1:length(RE1_selectives)) {
for (j in 1:length(RE2_selectives)) {
#get the current pair of overhangs
selectives <- c(RE1_selectives[i], RE2_selectives[j])
current_selectives_df <- total.frags.in.range(selectives = selectives,
min.frag.size = min_frag_size,
max.frag.size = max_frag_size,
side1_re_name = side1_re,
side2_re_name = side2_re)
current_selectives_df_rev <- total.frags.in.range(selectives = selectives,
min.frag.size = min_frag_size,
max.frag.size = max_frag_size,
rev = TRUE,
side1_re_name = side1_re,
side2_re_name = side2_re)
master_df <- rbind(master_df, current_selectives_df, current_selectives_df_rev)
}
}
#if there's a bed file, check feature overlap
if (!is.null(bed)) {
#iterate over all possible combinations of adaptor overhangs and consider
#each RE on each side, just like above for master_df - will give same # of rows for cbind
overall_summary_master <- data.frame()
summary_by_feature <- data.frame("feature_ID" = character(),
"feature_length" = integer(),
"sites_covered" = integer(),
"pct_covered" = double(),
"RE1" = character(),
"RE2" = character(),
"overhang1" = character(),
"overhang2" = character())
for (i in 1:length(RE1_selectives)) {
for (j in 1:length(RE2_selectives)) {
#get the current pair of overhangs
selectives <- c(RE1_selectives[i], RE2_selectives[j])
#first get info on the individual features
coverage <- feature.overlaps(selectives = selectives,
min.frag.size = min_frag_size,
max.frag.size = max_frag_size)
coverage_rev <- feature.overlaps(selectives = selectives,
min.frag.size = min_frag_size,
max.frag.size = max_frag_size,
rev = TRUE)
summary <- as.data.frame(coverage)
summary_rev <- as.data.frame(coverage_rev)
summary$RE1 <- side1_re
summary$RE2 <- side2_re
summary$overhang1 <- RE1_selectives[i]
summary$overhang2 <- RE2_selectives[j]
summary_rev$RE1 <- side2_re
summary_rev$RE2 <- side1_re
summary_rev$overhang1 <- RE2_selectives[i]
summary_rev$overhang2 <- RE1_selectives[j]
summary_by_feature <- rbind(summary_by_feature, summary, summary_rev)
#next get the overall summary to col bind to master_df
#need to get # features evaluated, # partially covered, # fully covered, total # covered, % features covered
feats_eval <- nrow(coverage)
partial_feat_cov <- coverage %>% filter(pct_covered > 0 & pct_covered < 100) %>% nrow()
whole_feat_cov <- coverage %>% filter(pct_covered == 100) %>% nrow()
tot_feat_cov <- coverage %>% filter(pct_covered > 0) %>% nrow()
pct_cov <- tot_feat_cov/feats_eval
overall_summary <- data.frame("features_evaluated" = feats_eval,
"features_covered_partial" = partial_feat_cov,
"features_covered_whole" = whole_feat_cov,
"total_features_covered" = tot_feat_cov,
"pct_features_covered" = pct_cov)
feats_eval <- nrow(coverage_rev)
partial_feat_cov <- coverage_rev %>% filter(pct_covered > 0 & pct_covered < 100) %>% nrow()
whole_feat_cov <- coverage_rev %>% filter(pct_covered == 100) %>% nrow()
tot_feat_cov <- coverage_rev %>% filter(pct_covered > 0) %>% nrow()
pct_cov <- tot_feat_cov/feats_eval
overall_summary_rev <- data.frame("features_evaluated" = feats_eval,
"features_covered_partial" = partial_feat_cov,
"features_covered_whole" = whole_feat_cov,
"total_features_covered" = tot_feat_cov,
"pct_features_covered" = pct_cov)
overall_summary_master <- rbind(overall_summary_master, overall_summary, overall_summary_rev)
}
}
write.table(summary_by_feature,
file = paste0(args[1], "_out/", side1_re, "_", side2_re, "_features_detail.txt"),
sep = "\t",
quote = FALSE,
col.names = TRUE,
row.names = FALSE)
master_df <- cbind(master_df, overall_summary_master)
write.table(master_df,
file = paste0(args[1], "_out/", side1_re, "_", side2_re, "_summary.txt"),
sep = "\t",
quote = FALSE,
col.names = TRUE,
row.names = FALSE)
}
if (is.null(bed)) {
write.table(master_df,
file = paste0(args[1], "_out/", side1_re, "_", side2_re, "_summary.txt"),
sep = "\t",
quote = FALSE,
col.names = TRUE,
row.names = FALSE)
}
|
4884eb083a0afe43f08666c9501697db540eadbd
|
02b1081c868a38735cf46af3fc47bbcd0e71065f
|
/R/uiModResult.R
|
95ba1fa99187c316a5985d89efaf347be440bc4a
|
[] |
no_license
|
SaJaToGu/bullwhipgame
|
806441486b82aae10032f99c5e8b01758ccc20a6
|
88a6ca92f91d77001133144ae8851d7c48f03eb6
|
refs/heads/master
| 2018-10-23T15:54:42.010448
| 2018-08-20T12:33:57
| 2018-08-20T12:33:57
| 119,365,547
| 0
| 0
| null | 2018-01-29T10:05:37
| 2018-01-29T10:05:37
| null |
UTF-8
|
R
| false
| false
| 569
|
r
|
uiModResult.R
|
library(shiny)
library(DT)
#' User Interface Module for Results
#'
#' @param id character, used to identify a namespace
#' other parameters , see \code{shiny::\link[shiny]{tabPanel}}
#' @return a \code{shiny::\link[shiny]{tabPanel}} containing UI elements
#' @export
#'
uiModResult <- function(id,
title = id,
...,
value = title) {
ns <- shiny::NS(id)
tabPanel(
title,
br(),
h4('Results'),
br(),
DT::dataTableOutput(outputId = ns("Results"))
)
}
|
e89923e9b746978304de78b2663cbfeee2b7bc0a
|
983b741705ad1365fd30e114d787bd4f300e292b
|
/data_misc/thermo_encoding.R
|
bd13e20bdad38faad3f9a5f59b7199c59b2953dc
|
[] |
no_license
|
gefero/data_misc
|
085f1d260edda616f9d2905f66cc78682850faf0
|
e16457cfe67376ac3c8877b820b5e150599feacc
|
refs/heads/master
| 2020-04-08T21:48:29.788144
| 2019-02-11T00:08:58
| 2019-02-11T00:08:58
| 159,759,182
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 610
|
r
|
thermo_encoding.R
|
####################################################
############### Thermometer encoding ###############
####################################################
term2<-function(z,name="",media=T){
z<-as.character(z)
z<-model.matrix(~z-1)
z<-1-t(apply(z,1,cumsum))
if(media==TRUE){
z<-z[,-ncol(z)]
}else{
z<-cbind(z[,ncol(z)]+1,z[,-ncol(z)])
}
colnames(z)<-gsub("z",name,colnames(z))
return(z)
}
## Use expample
x = cbind(c(1,2,3,4,5,6,7),c(3,1,2,1,2,3,1))
term2(x[,1],"voto",media=T)
apply(x,2,term2,media=F)
|
5bc98af011f39395f39f6c2608b3aaeb21db6b90
|
635528230f899dda7d1d30d52a62e03b4ed38d0e
|
/man/slots_cleaned.Rd
|
19e922aec80822a35f3beb42ac0552ea30f82471
|
[] |
no_license
|
deepanshu88/cowin
|
3850498ccfd858ece73e5c3c0cd176b0d292a1f2
|
d36335b0570ac4af326b209a4228e4215f4945d9
|
refs/heads/main
| 2023-05-12T05:37:03.260657
| 2021-06-12T07:17:56
| 2021-06-12T07:17:56
| 365,274,011
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 576
|
rd
|
slots_cleaned.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cowinAPI.R
\name{slots_cleaned}
\alias{slots_cleaned}
\title{Clean Output of slots_district() and slots_pincode()}
\usage{
slots_cleaned(slots.df, age_limit = NULL)
}
\arguments{
\item{slots.df}{Dataframe obtained from slots_district() function}
\item{age_limit}{Enter 18 for 18+, 45 for 45+. By default, combination of both}
}
\value{
}
\description{
Clean Output of slots_district() and slots_pincode()
}
\examples{
slots2 <- slots_cleaned(slots, age_limit = 45)
}
\author{
Deepanshu Bhalla
}
|
f2bc623ade73ec990725b933c68ac3a7fc66e9d1
|
81828e33db8d84acbda494a2ab8c27f0dee62d58
|
/File/Practice.R
|
adfed6af33640dc7f46acfdfca049ce0a55dd003
|
[] |
no_license
|
KSTVCHATTERJEE/R_in_Action
|
6363804c9587a6649b03750b2e4f2496080588ad
|
dc77c782498bc577465b2c7bf22b975cbe23920e
|
refs/heads/master
| 2021-01-22T21:12:48.921682
| 2019-04-09T10:02:22
| 2019-04-09T10:02:22
| 100,679,648
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,493
|
r
|
Practice.R
|
# R - Practice
#ST33062P
#Question - 1 ----
gender <- c('M','M','F','F','M','M','M','F','M','F','F','M','F','M','F')
systolic <- c(139,125,142,99,101,136,145,100,169,130,125,89,150,152,142)
diastolic <- c(82,85,91,100,104,88,97,70,107,92,95,88,61,78,99)
df1 <- data.frame(gender,systolic,diastolic)
df1
mean(df1$systolic[which(df1$gender=='M')]) #average of systolic blood pressure for males
mean(df1$diastolic[which(df1$gender=='M')])#average of diastolic blood pressure for males
mean(df1$systolic[which(df1$gender=='F')])#average of systolic blood pressure for females
mean(df1$diastolic[which(df1$gender=='F')])#average of diastolic blood pressure for females
length(which(df1$systolic > 140 & df1$gender == 'M')) #males with systolic BP > 140
mean(df1$systolic[which(df1$diastolic > 140)]) #average of systolic BP where diastolic BP > 140
#Question - 2 ----
ds1 <- c(10,20,30,40,50,60,70,80,90,100,110,120)
ds2 <- c(1,2,3,4,5,6,7,8,9,100,110,120)
ds3 <- c(10,10,11,70,71,72,73,74,75,76,77,78,79)
boxplot(ds1,ds2,ds3)
#Question - 3 ----
years <- c(1961,1971,1981,1991,2001)
sexratio <- c(941,930,934,927,933)
plot(years,sexratio,type='l')
#Mock Test Paper
(x=11:20)
for(i in 1:10){
if((i==2) | (i==5) | (i==7))
print(x[i]^2)
}
sqroot <- function(i){
print(sqrt(x[i]))
}
for(j in 1:10){
sqroot(j)
}
f <- function(a,b){a^2}
f(2)
paste("a","b",sep=":")
log(-1)
args(f)
seq(4)
seq_along(4)
?seq_along
v1=x+c(NA)
class(v1)
paste("Data","Science","from","MUIT",sep="-")
|
b8f2ff95777ce941855369b9a7610c86ae768ac4
|
bf5a581e64ab88975da7f457c23a586d3a2faf44
|
/R/generateReport.UI.R
|
579f6f7c240e4264328ee38e8ed0d5e551f0d79e
|
[] |
no_license
|
lockedata/PackageReviewR
|
efcaeade76b8bf1fede5bfbf1a3ec29d169f76ab
|
bb6c0246641051ce7ae17c261b14a162143067fa
|
refs/heads/master
| 2021-01-19T11:11:43.880318
| 2018-05-10T06:12:18
| 2018-05-10T06:12:18
| 87,942,894
| 18
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 458
|
r
|
generateReport.UI.R
|
#' Generate report via UI
#'
#' @param template An alternative template, if required
#' @param ... Additional arguments to [rmarkdown::render()]
#'
#' @export
#'
#' @examples
#' \dontrun{
#' generateReport.UI()
#' }
generateReport.UI<-function(template=NULL, ...){
if(is.null(template))
template <- system.file("templates","PackageReview.Rmd"
, package = "PackageReviewR")
rmarkdown::render(template, params = "ask", ...)
}
|
04f0de26a3327dc47986fd1e1db4eeeab0824872
|
ec25b6fd7bbba5626dc0b19719dee3fa0278a3ee
|
/shap_plot_data.R
|
134a3f31fc531e0e35fc6df7d32a4ed3324c09e7
|
[] |
no_license
|
psubs/solar_flare
|
ebf0bee06c52469575c92210f8aa271275e12b16
|
5f9f8a7c3e45428bec47d2fc1b95b53eb5b14ecc
|
refs/heads/master
| 2020-07-17T01:14:10.941522
| 2019-10-03T15:52:44
| 2019-10-03T15:52:44
| 205,910,538
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,216
|
r
|
shap_plot_data.R
|
allcovs<-fread("ieee/allcovsbase.tsv")[,unlist(covs)]
pat<-paste0(allcovs,collapse="|")
global_shap<-function(shapdt, shapcols,abs=TRUE,byvars=NULL){
if(abs){
aggf<-function(x){ mean(abs(x)) }
} else{
aggf<-function(x){ mean(x) }
}
if(!missing(byvars)){
global<-melt(shapdt[,lapply(.SD, function(x) aggf(x)),
.SDcols=shapcols,by=byvars][,id:="var"],
value.name="global_shap",
id.vars=c("id", byvars))[,id:=NULL][order(-global_shap)]
}else{
global<-melt(shapdt[,lapply(.SD, function(x) aggf(x)),
.SDcols=shapcols][,id:="var"],
value.name="global_shap",
id.vars="id")[,id:=NULL][order(-global_shap)]
}
return(global)
}
decomp<-function(dd){
d<-copy(dd)
d[,base:=str_match(variable, pat)[,1]]
d[,window:=tstrsplit(variable, base)[[2]]]
d[,stat:=tstrsplit(window, "t10")[[1]]]
d[,window:=gsub("^.*t10_", "", window)]
d[,stat:=gsub("^_|_$", "", stat)]
return(d)
}
std1 <- function(x){
return ((x - min(x, na.rm = T))/(max(x, na.rm = T) - min(x, na.rm = T)))
}
#global_shap<-function(shapdt, shapcols){
# global<-melt(shapdt[,lapply(.SD, function(x) mean(abs(x))),.SDcols=shapcols][,id:="var"],
# value.name="global_shap",
# id.vars="id")[,id:=NULL][order(-global_shap)]
# return(global)
#}
source("solar_flare/make_sumdt_window_func.R")
library(ggplot2)
library(data.table)
library(stringr)
library(xgboost)
library(parallelDist)
library(stringr)
## data read-in ##
cvshap<-fread("ieee/cvshap_exact.tsv")
tstshap<-fread("ieee/testpred_shap_exact.tsv")
setnames(tstshap, old="Id", new="id")
cvshap[,.(.N,m=as.character(round(mean(obs),3)),s=sum(obs)),by=fold]
tstshap<-tstshap[,`:=`(fold=4)][,fid:=paste0("f",fold,"_",id)]
allshap<-rbindlist(list(cvshap,tstshap),use.names=T,fill=T)
cvcols<-c(names(allshap)[1:7], "BIAS", "lev", "ClassLabel", "Id")
shapcols<-setdiff(names(allshap), cvcols)
d<-fread("window_select_data.tsv")
d[,class_label:=factor(class_label, levels=c("0", "1"),
labels=c("no_flare", "big_flare"))]
grpvars <- c("id", "fid", "fold", "grp", "class_label")
cnvrt_num<-melt(d[,lapply(.SD, class)][,id:="id"],
id.vars="id",
variable.factor=F)[value=="character"][!variable %in% grpvars,variable]
d[,paste(cnvrt_num):=lapply(.SD, as.numeric),.SDcols=cnvrt_num]
d[,(paste0("q_", shapcols)):=lapply(.SD, function(x) rank(x) / .N),.SDcols=shapcols]
gcv<-global_shap(allshap, shapcols)
#gtst<-global_shap(tstshap, shapcols)
gcv<-decomp(gcv)[,rnk:=1:.N]
#gtst<-decomp(#gtst)[,rnk:=1:.N]
gcv[,.(list(unique(variable))),by=.(base,window)]
gcv[grep("l1", variable),vtype:="var"]
gcv[grep("sd|range|slope", variable),vtype:="var"]
gcv[is.na(vtype),vtype:="loc"]
#gcv[,base_shap_sum:=sum(abs(global_shap)),by=base]
#gcv[,window_shap_sum:=sum(abs(global_shap)),by=window]
#gcv[,stat_shap_sum:=sum(abs(global_shap)),by=stat]
#gcv[,.(Nvar=uniqueN(variable)),by=.(base, base_shap_sum)][order(-base_shap_sum)]
#gtst[,base_shap_sum:=sum(abs(global_shap)),by=base]
#gtst[,stat_shap_sum:=sum(abs(global_shap)),by=stat]
#gtst[,window_shap_sum:=sum(abs(global_shap)),by=window]
shaplong<-melt(allshap[,c(shapcols, "flare", "fid","obs", "fold"),with=F],
measure.vars=shapcols,value.name="shap")
qcols<-paste0("q_", shapcols)
dlong<-melt(d,id.vars=setdiff(names(d),shapcols))[,-qcols,with=F]
dlong[,stdfvalue:=std1(value),by=variable]
dqlong<-melt(d,id.vars=setdiff(names(d),qcols),value.name="qvalue")[,-shapcols,with=F]
dqlong[,variable:=gsub("q_", "", variable)]
dlong<-merge(dlong, dqlong, by=intersect(names(dlong), names(dqlong)))
bigl<-merge(shaplong, dlong, by=c("fid", "fold", "variable"))
#shaplong<-melt(allshap,id.vars=setdiff(names(allshap),shapcols))
bigl<-merge(bigl, gcv, by="variable")
setnames(bigl, old="value", new="rfvalue")
bigl[,base_shap_id:=sum(shap),by=.(fid, base)]
bigl[,window_shap_id:=sum(shap),by=.(fid, window)]
bigl[,stat_shap_id:=sum(shap),by=.(fid, stat)]
bigl[,base_window_shap_id:=sum(shap),by=.(fid, base, window)]
bigl[,vtype_shap_id:=sum(shap),by=.(fid, vtype)]
bigl[,base_vtype_shap_id:=sum(shap),by=.(fid, base, vtype)]
bigl[,global_base_shap:=mean(abs(base_shap_id)),by=base]
bigl[,global_window_shap:=mean(abs(window_shap_id)),by=window]
bigl[,global_stat_shap:=mean(abs(stat_shap_id)),by=stat]
bigl[,global_base_window_shap:=mean(abs(base_window_shap_id)),by=.(base,window)]
bigl[,global_vtype_shap:=mean(abs(vtype_shap_id)),by=.(vtype)]
bigl[,global_base_vtype_shap:=mean(abs(base_vtype_shap_id)),by=.(base,vtype)]
chk<-bigl[,.(fid, base_shap_sum, base_shap_id, base)][,unique(.SD)]
chk[,.(base_shap_sum, mean(abs(base_shap_id))),by=base]
#fwrite(bigl, file="big_plot_shap_long.tsv", sep="\t")
top20<-gcv[rnk < 21,as.character(variable)]
plotd<-bigl[variable %in% top20]
pfid<-c()
for(v in top20){
setnames(d, old=v, new="v_tmp")
pfid<-c(pfid, d[order(v_tmp)][seq(1,.N,length.out=500),fid])
print(v)
setnames(d, old="v_tmp", new=v)
}
pfid<-unique(pfid)
pfid2<-allshap[order(flare)][seq(1,.N,length.out=15000-length(pfid)),fid]
pfid2<-setdiff(pfid2, pfid)
allshap[fid %in% pfid,quantile(flare,seq(0,1,.1))]
allshap[fid %in% c(pfid,pfid2),quantile(flare,seq(0,1,.1))]
allshap[,quantile(flare,seq(0,1,.1))]
#num_skip<-ceiling(allshap[,.N]/10^4)
#showid<-allshap[order(flare)][seq(1,.N,num_skip)][,fid]
showid<-c(pfid, pfid2)
fwrite(bigl[fid %in% showid],file="sub_plot_shap_long.tsv",sep="\t")
top20<-gcv[rnk < 21,as.character(variable)]
plotd<-bigl[fid %in% showid][variable %in% top20]
#fwrite(plotd,file="sub_plot_shap_long_top20.tsv",sep="\t")
library(SHAPforxgboost)
library(RColorBrewer)
source("solar_flare/shap_plot_funcs.R")
plotd<-fread(file="sub_plot_shap_long_top20.tsv")
setnames(plotd,old=c("shap","global_shap"), new=c("value","mean_value"))
fd<-plotd[order(rnk),head(.SD,1),by=variable][,.(levs=as.character(variable),
labes=paste0(toupper(base),
"_", stat, "_w",
window))]
levs<-fd[,levs]
labes<-fd[,labes]
plotd[,variable:=factor(variable, levels=levs)]
plotd[,rfvalue:=as.numeric(rfvalue)]
#shap.plot.summary(data_long=plotd)
#cls<-brewer.pal(8, "Set1")[c(2,2,5,4,4)]
#cls<-brewer.pal(8, "Dark2")[c(1,3,3, 3, 2)]
cls<-c(brewer.pal(8, "Paired")[3], brewer.pal(8, "Dark2")[c(3,3,3, 2)])
top20<-plotd[,.(variable, mean_value)][order(-mean_value)][,unique(.SD)][1:20][,as.character(variable)]
pd20<-plotd[variable %in% top20]
pd20[,variable:=factor(variable,labels=labes)]
source("solar_flare/make_sumdt_window_func.R")
m<-me.shap.plot.summary(dat=pd20,
cols=cls) +
annotate("text",
x = 20 + 0.27, y=-1.3,label="Global Importance",
size = 2.1, alpha = 0.7,
hjust = -0.2,
fontface = "bold") +
labs(title="SHAP: Top 20 Final Variables") +
theme(plot.title=element_text(hjust=0.5,size=10),
axis.title.x=element_text(size=8)) +
coord_cartesian(clip="off") + coord_flip()
m<-m + theme(legend.position="bottom") +
theme(legend.title=element_text(size=8),legend.spacing.x=unit(0.5,'cm'))
m
ggsave("global_shap_top20.png",width=3.5,height=4.5,dpi=300)
plotd<-fread(file="sub_plot_shap_long.tsv")
setnames(plotd,old=c("shap","global_shap"), new=c("value","mean_value"))
fd<-plotd[order(rnk),
head(.SD,1),
by=variable][,
.(levs=as.character(variable),
labes=paste0(toupper(base),
"_", stat, "_w",
window))]
levs<-fd[,levs]
labes<-fd[,labes]
plotd[,variable:=factor(variable, levels=levs,labels=labes)]
plotd[,rfvalue:=as.numeric(rfvalue)]
#shap.plot.summary(data_long=plotd)
#cls<-brewer.pal(8, "Set1")[c(2,2,5,4,4)]
#cls<-brewer.pal(8, "Dark2")[c(1,3,3, 3, 2)]
cls<-c(brewer.pal(8, "Paired")[3], brewer.pal(8, "Dark2")[c(3,3,3, 2)])
#plotdw<-plotd[,.(fid, base, base_shap_id, global_base_shap)][,unique(.SD)]
#plotds<-plotd[,.(fid, base, base_shap_id, global_base_shap)][,unique(.SD)]
plotdb<-plotd[,.(fid,
base=toupper(base),
base_shap_id,
global_base_shap)][,unique(.SD)]
blevs<-plotdb[order(-global_base_shap),head(.SD,1),by=base][,base]
plotdb[,base:=factor(base, levels=blevs)]
source("solar_flare/shap_plot_funcs.R")
base_g<-me.shap.plot.summary(dat=plotdb,
cols=cls,
variable_name="base",
shap_name="base_shap_id",
global_shap_name="global_base_shap",
show_color=F)
base_g<-base_g +
annotate("text",
x = 11 + 0.27, y=-3.9,label="Global Importance",
size = 2.1, alpha = 0.7,
hjust = -0.2,
fontface = "bold") +
labs(title="Aggregated SHAP\n by SHARP Parameter") +
theme(plot.title=element_text(hjust=0.5,size=10),
axis.title.x=element_text(size=8))
base_g
ggsave("global_shap_base.png",width=3,height=4.2,dpi=300)
plotdb<-plotd[,.(fid, window, window_shap_id, global_window_shap)][,unique(.SD)]
blevs<-plotdb[order(-global_window_shap),head(.SD,1),by=window][,window]
wlabs<-c("w5: 50-59", "w4: 40-49", "w3: 30-39", "w2: 20-29", "w0: time 0-9", "w1: time 10-19")
plotdb[,window:=factor(window, levels=blevs,labels=wlabs)]
window_g<-me.shap.plot.summary(dat=plotdb,
cols=cls,
variable_name="window",
shap_name="window_shap_id",
global_shap_name="global_window_shap",
show_color=F)
window_g<-window_g +
annotate("text",
x = 6 + 0.27, y=-7,label="Global Importance",
size = 2.1, alpha = 0.7,
hjust = -0.2,
fontface = "bold") +
labs(title="Aggregated SHAP\n by Time-Series Window") +
theme(plot.title=element_text(hjust=0.5,size=10),
axis.title.x=element_text(size=8))
window_g
ggsave("global_shap_window.png",width=3,height=4.2,dpi=300)
aa<-ggplotGrob(m)
bb<-ggplotGrob(base_g)
cc<-ggplotGrob(window_g)
aa<-plot_grid(ggplot(),aa,rel_heights=c(0.025,.975),nrow=2)
bb<-plot_grid(bb,ggplot(),rel_heights=c(0.9,.1),nrow=2)
cc<-plot_grid(cc,ggplot(),rel_heights=c(0.9,.1),nrow=2)
oo<-plot_grid(aa ,
bb,cc,
#align="h",
nrow=1
#axis="l"#,
# rel_heights=c(1/2,,.4)
,labels=paste0("(",letters[1:3], ")"),
label_x=c(0.3, 0.3, 0.3),
label_y=c(0.1, 0.1, 0.1),
label_size=10,
label_fontface="plain")
ggsave("global_shap_3.png",width=10,height=4.8,dpi=300)
plotd[,.(variable,base, mean_value)][,unique(.SD)][,.N,by=base][order(-N)]
sharp<-data.table(toupper(allcovs))
desc<-c("Absolute value of the net current helicity",
"Sum of X-component of normalized Lorentz force",
"Sum of Y-component of normalized Lorentz force",
"Sum of Z-component of normalized Lorentz force",
"Mean twist parameter",
"Mean inclination angle",
"Mean value of the horizontal field gradient",
"Mean value of the total field gradient",
"Mean value of the vertical field gradient",
"Mean vertical current density",
"Mean current helicity",
"Mean photospheric excess magnetic energy density",
"Mean shear angle",
"Total unsigned flux around high gradient
polarity inversion lines using the Br component",
"Sum of the absolute value of the net current
per polarity",
"Area with shear angle greater than 45 degrees",
"Total magnitude of Lorentz force",
"Sum of X-component of Lorentz force",
"Sum of Y-component of Lorentz force",
"Sum of Z-component of Lorentz force",
"Total photospheric magnetic energy density",
"Total unsigned current helicity",
"Total unsigned vertical current",
"Total unsigned flux in Maxwells",
"Max X-ray luminosity observed +/- 6 minutes around observation time")
sharp[,desc:=desc]
print(xtable(sharp), include.rownames=F)
grid::grid.draw(cbind(aa,bb,cc))
grid.arrange(m, base_g, window_g,nrow=1)
plotdb<-plotd[,.(fid, bw=paste0(base, "_w", window), base_window_shap_id, global_base_window_shap)][,unique(.SD)]
blevs<-plotdb[order(-global_base_window_shap),head(.SD,1),by=bw][,bw]
#wlabs<-c("time 50-59", "time 40-49", "time 30-39", "time 20-29", "time 0-9", "time 10-19")
plotdb[,bw:=factor(bw, levels=blevs)]
bw_g<-me.shap.plot.summary(dat=plotdb,
cols=cls,
variable_name="bw",
shap_name="base_window_shap_id",
global_shap_name="global_base_window_shap",
show_color=F)
bw_g +
annotate("text",
x = 6 + 0.25, y=-7,label="Global Importance",
size = 3.1, alpha = 0.7,
hjust = -0.2,
fontface = "bold") +
labs(title="Aggregated SHAP by \nSHARP Parameter and Time-Series Window") +
theme(plot.title=element_text(hjust=0.5,size=10),
axis.title.x=element_text(size=8))
ggsave("global_base_shap_window.png",width=4,height=4.5,dpi=300)
plotdb<-plotd[,.(fid, vtype, base, bv=paste0(base, "_", vtype), base_vtype_shap_id, global_base_vtype_shap)][,unique(.SD)]
blevs<-plotdb[order(-global_base_vtype_shap),head(.SD,1),by=bv][,bv]
#wlabs<-c("time 50-59", "time 40-49", "time 30-39", "time 20-29", "time 0-9", "time 10-19")
plotdb[,bv:=factor(bv, levels=blevs)]
bv_g<-me.shap.plot.summary(dat=plotdb,
cols=cls,
variable_name="bv",
shap_name="base_vtype_shap_id",
global_shap_name="global_base_vtype_shap",
show_color=F)
bv_g +
annotate("text",
x = 6 + 0.25, y=-7,label="Global Importance",
size = 3.1, alpha = 0.7,
hjust = -0.2,
fontface = "bold") +
labs(title="Aggregated SHAP by \nSHARP Parameter and Variability/Location Type of Derived Statistic") +
theme(plot.title=element_text(hjust=0.5,size=12))
ggsave("global_base_shap_vtype.png",width=4,height=4,dpi=300)
plotd<-fread("sub_plot_shap_long.tsv")
setnames(plotd,old=c("shap","global_shap"), new=c("value","mean_value"))
fd<-plotd[order(rnk),head(.SD,1),by=variable][,.(levs=as.character(variable),
labes=paste0(toupper(base),
"_", stat, "_w",
window))]
levs<-fd[,levs]
labes<-fd[,labes]
plotd[,variable:=factor(variable, levels=levs,labels=labes)]
plotd[,rfvalue:=as.numeric(rfvalue)]
dw<-dcast(plotd,fid + fold + flare + obs + id + grp ~ variable, value.var="value")
#kk<-list()
#for(k in 2:15){
# kk[[k]]<-kmeans(dw[,-c("fid", "fold", "flare", "obs", "id", "grp"),with=F],centers=k)
# print(kk[[k]]$betweenss / kk[[k]]$totss)
#}
#
#dw[,cluster:=kk[[13]]$cluster]
#dw[,.(mean(flare),.N),by=.(cluster,fold)][order(-V1)]
#clusters <- hclust(dist(scale(as.matrix(dw[,-c("fid", "fold", "flare", "obs", "id", "grp"),
# with=F]))),
# method = "ward.D")
#save(clusters, file="hclust_exact.RData")
load("solar_flare/hclust_exact.RData")
kcl<-cutree(clusters, k=7)
dw[,hclust10:=kcl]
dwr<-dw[,clusterid:=clusters$order][rank(clusterid),]
dwr[,clusterid:=NULL]
dwr[,id:=.I]
dwr[,mflare:=mean(flare),by=hclust10]
dws<-dwr[,.(gsub("NA", "-", paste0(round(mean(obs),2)),
# "(", .N, ")")
)),
by=.(mflare,hclust10,fold)]
dws<-dcast(dws, mflare + hclust10 ~ fold, value.var=c("V1"))
clustsum<-dwr[fold!=4,
.(mobs=mean(obs),
mflare=mean(mflare),
f1=f1(flare,obs,.35),
prec=prec(flare,obs,.35),
rec=rec(flare,obs,.35),
typeI=mean(flare > 0.35 & obs==0),.N,
fn=mean(flare < 0.35 & obs==1),
fold1=sum(fold==1)/.N,
fold2=sum(fold==2)/.N,
fold3=sum(fold==3)/.N),
by=hclust10][order(-mflare)]#[hclust10==1]
clustsum[,(paste0("fold", 1:3)):=lapply(.SD, function(x) round(as.numeric(x),2)),
.SDcols=paste0("fold", 1:3)][]
clustsum<-clustsum[,lapply(.SD, function(x) as.character(round(x,2)))]
print(xtable(clustsum[,c(1:4,9:12)]),include.rownames=F)
xx<-clustsum[,c(1:4,9:12)]
print(xtable(clustsum[,c(1:4,9:12)]),include.rownames=F)
print(xtable(clustsum),include.rownames=F)
grep("XR_MAX_min", names(dwr),value=T)
minxr<- grep("XR_MAX_min", names(dwr),value=T)
plotd[variable %in% minxr][,missxr:=as.numeric(sum(rfvalue==-99999) > 0),by=fid]
plotd[variable %in% minxr][,missxr:=as.numeric(sum(rfvalue==-99999) > 0),by=fid][][,.(mean(obs,na.rm=T),mean(missxr)),by=.(hclust10)][order(hclust10,missxr)]
ggplot(dwr[fold!=4], aes(x=XR_MAX_min_w5,
y=flare,
col=factor(obs))) +
geom_point() + facet_wrap(~hclust10)
plotd[variable %in% minxr][,.(mean(rfvalue==-99999)),by=.(fold,fid,hclust10)][V1!=0][order(-V1)]
#ch<-melt(dw, id.vars=c("fid", "fold", "flare", "obs", "hclust10"),measure.vars=plotd[,unique(as.character(variable))])
#library(class)
#
gcv_clust<-global_shap(dwr,
shapcols=plotd[,as.character(unique(variable))],
byvars=c("mflare", "hclust10"))[order(hclust10,-global_shap)]
gcv_clust[,rnk:=1:.N,by=.(hclust10)]
decomp<-function(dd){
d<-copy(dd)
d[,base:=str_match(as.character(variable), pat)[,1]]
d[,window:=tstrsplit(as.character(variable), base)[[2]]]
d[,stat:=tstrsplit(window, "t10")[[1]]]
d[,window:=gsub("^.*t10_", "", window)]
d[,window:=gsub("^.*w_", "", window)]
d[,stat:=gsub("^_|_$", "", stat)]
return(d)
}
allcovs<-fread("allcovsbase.tsv")[,unlist(toupper(covs))]
pat<-paste0(toupper(allcovs),collapse="|")
gcv_clust<-decomp(gcv_clust)
gcv_clust[,window:=gsub("^.*_w","",window)]
plotd<-merge(dwr[,.(fid,mflare,hclust10)],plotd,by="fid")
bases<-dcast(plotd[,.(fid,base=toupper(base),hclust10,mflare,base_shap_id)][,unique(.SD)],
fid + hclust10+mflare ~ base, value.var="base_shap_id")
gcvb<-global_shap(bases, shapcols=intersect(names(bases),allcovs),
byvars=c("hclust10","mflare"))
gcvb<-global_shap(bases, shapcols=intersect(names(bases),allcovs),abs=F,
byvars=c("hclust10","mflare"))
blevs<-gcvb[order(-mflare,-global_shap),unique(as.character(variable))]
gcvb[,variable:=factor(variable, levels=blevs)]
clevs<-gcvb[order(-mflare)][,unique(hclust10)]
clabs<-gcvb[order(-mflare)][,as.character(round(unique(mflare),2))]
clabs<-paste(paste0("Cluster", 1:7, ":"), clabs)
gcvb[,hclust10:=factor(hclust10,levels=clevs,labels=clabs)]
clustcols<-brewer.pal(11,"Paired")
ggplot(gcvb,
aes(x=variable, y=global_shap,fill=hclust10)) +
geom_bar(stat="identity",position="dodge") +
# facet_wrap(~hclust10,scales="free_x") +
scale_fill_manual(values=clustcols[1:7],
#breaks=clevs,
labels=clabs,
name="Cluster ID:\nAvg Predicted Probability \nof MSF\nwithin Cluster") +
theme_bw() +
labs(x="SHARP Parameter",
title="Avg Magnitude of SHARP on predicted log-odds of MSF",
y="Avg Magnitude of SHARP" )
ggsave("subcluster_breakdownBdir_exact.png")
ggplot(gcvb,
aes(x=hclust10, y=global_shap,fill=variable)) +
geom_bar(stat="identity",position="dodge") +
# facet_wrap(~hclust10,scales="free_x") +
scale_fill_manual(values=clustcols,name="SHARP parameter") + theme_bw() +
labs(x="Cluster ID: (Avg Predicted Probability of Flare within Cluster)",
title="Avg influence of SHARP on predicted log-odds of MSF by Cluster",
y="Avg influence of SHARP" )
ggsave("subcluster_breakdown_dir_exact.png",width=10, height=4.8,dpi=300)
ggsave("subcluster_breakdown_dir_exact.pdf",width=10, height=4.8,dpi=300)
plot(x=0:59,y=rep(0,60),pch="",ylim=c(-.3,.3),
xaxt='n',yaxt='n', ann=F,axes=F)
rect(xleft=seq(0,59,1),
ybottom=rep(-.1, 60),
xright=seq(0,59,1)+1,
ytop=rep(0.1,60),
col=rep(clustcols[1:6],each=10),ylab="")
text(x=seq(5, 55, 10),y=rep(-0.2,10),labels=paste0("window",0:5),cex=0.8)
text(x=c(seq(0.5, 50.5,10),59.5),y=rep(-0.2,10),
labels=paste0("t=",c(seq(0,50,10),59)),
cex=0.8)
points(x=c(seq(0.5, 50.5,10),59.5), y=
points(x=0:59,y=rep(0,60),pch="|")
abline(h=0)
text(x=
#text(x = 0.5, y = 0.5, '{', srt = 90, cex = 8, family = 'Helvetica Neue UltraLight')
plotd[,.(list(unique(stat)),uniqueN(variable)),by=base][order(-V2)]
plotd[,.(list(unique(paste0(stat, "_w",window))),uniqueN(variable)),by=base][order(-V2)]
shapobs_long<-merge(dw[mflare > 0.1,.(cid=1:.N,hclust10,mflare,fid)],
plotd[,.(fid,base,base_shap_id)][,unique(.SD)],
by=c("fid"))
shapobs_long<-shapobs_long[order(cid)]
id<-"cid"
value<-"base_shap_id"
variable<-"base"
zoom_in_location = NULL;
y_parent_limit = NULL;
y_zoomin_limit = NULL; # c(a;b) to limit the y-axis in zoom-in
zoom_in = TRUE; # default zoom in to zoom_in_location
zoom_in_group = NULL
blevs<-gcvb[order(-mflare,-global_shap),unique(as.character(variable))]
shapobs_long[,base:=factor(base, levels=blevs)]
p <- ggplot(shapobs_long, aes_string(x = id, y = value , fill = variable)) +
geom_col(width =1, alpha = 0.9) +
# geom_area() +
labs(fill = 'Feature', x = 'Observation',
y = 'SHAP values by feature:\n (Contribution to the base value)') +
geom_hline(yintercept = 0, col = "gray40") +
theme_bw() +
# theme(legend.position="none") +
scale_fill_manual(values=clustcols,name="SHARP parameter")
#gcv_sub<-gcv_clust[rnk < 10]
#incl<-gcv_sub[order(-mflare, rnk),unique(as.character(variable))]
#gcv_clust[,variable:=factor(variable, levels=incl)]
#gcv[,hclust10
#ggplot(gcv_clust[variable %in% incl][mflare > 0.1],
# aes(x=variable, y=global_shap,fill=factor(hclust10))) +
# geom_bar(stat="identity",position="dodge") #+
# facet_wrap(~hclust10,scales="free_x") +
# scale_fill_manual(breaks=)
#
#
#
#
#chk<-gcv_clust[rnk < 20]
#allv<-chk[,as.character(unique(variable))]
#for(k
combos<-CJ(xvar=plotd[,unique(variable)],
yvar=plotd[,unique(variable)])[xvar!=yvar]
combos[,ind:=1:.N]
combos<-combos[grep("XR_MAX_min",xvar)][grep("R_VALUE_.*w5$|TOTUSJH.*w5",yvar)]
#combos<-combos[grep("r_value_mean_", xvar)][grep("r_value_mean_",yvar)]
#combos<-combos[!grep("r_value", xvar)][!grep("r_value",yvar)]
#combos<-combos[xvar=="totpot_max_t10_5"]
source("solar_flare/shap_plot_funcs.R")
for(i in 1:combos[,.N]){
#for(i in (i+1):combos[,.N]){
print(
me.dep.plot(plotd,x=combos[i,xvar],color_feature=combos[i,yvar],quantx=T) +
facet_wrap(~factor(hclust10),labeller="label_both")
)
#)
#readline(prompt="Press [enter] to continue")
#print(
# b<-me.dep.plot(plotd,combos[i,yvar],combos[i,xvar],quantx=T) +
# facet_wrap(~fold,labeller="label_both")
readline(prompt="Press [enter] to continue")
# print(g<-grid.arrange(a,b) )
}
## 58
ggsave("interaction_epsx_totusjh_ind45.png")
ggsave("interaction_totpot_epsx_ind82.png")
ggsave("interaction_xr_max_totusjh_ind100.png")
xvars<-plotd[,as.character(unique(variable))]
xvars<-intersect(xvars, "epsx_min_t10_5")
for(x in xvars){
for(i in plotd[variable!=x,as.character(unique(variable))]){
for(j in plotd[variable!=x][variable!=i,as.character(unique(variable))]){
print(
me.dep2.plot(plotd,
x=x,
color_feature=j,
shape_feature=i,wrapfold=T)
)
readline(prompt="Press [enter] to continue")
}
}
}
x="totpot_max_t10_5";
color_feature="epsx_min_t10_5";
shape_feature="r_value_max_t10_5"
shape_feature="absnjzh_sd_t10_5"
me.dep2.plot(plotd, x, color_feature, shape_feature)
shap.plot.dependence(data_long=plotd,
x="r_value_max_l1_t10_5",
#y="shrgt45_min_t10_5",
color_feature="shrgt45_min_t10_5")#,size0=2) #+
#shap.dependenc.plot(
plt_tst<-tstshap[order(flare)][seq(1,.N,18)]#[,.N] # 15
plt_cv<-allshap[order(flare)][seq(1,.N,20)]#[,.N]
|
e0757c9e09270f4b003177fe327634ec3ab39f11
|
2b990f06b7e44ca13de1e87286338b214d63ead3
|
/R/plot_calibration.R
|
38b0f00db01a243f4968d5183b38ede47afb34ff
|
[] |
no_license
|
MichalOleszak/footballpredict
|
0728ca469d6bd2a6109d1f5b50a41e0b6edf0203
|
4b9249a06425474f4aaeef3bf856bb4ed84efb2a
|
refs/heads/master
| 2020-03-27T05:34:01.123660
| 2019-10-08T22:19:05
| 2019-10-08T22:19:05
| 146,030,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,329
|
r
|
plot_calibration.R
|
plot_calibration <- function(ensemble_preds, games_train = NULL) {
if (is.null(games_train)) {
games_train <- readRDS(file.path(path_data, "games_train.rds"))
}
obs_train <- 1:round(nrow(games_train) * 0.8)
obs_test <- (tail(obs_train, 1) + 1):nrow(games_train)
dat <- ensemble_preds %>%
bind_cols(games_train[obs_test, "result"]) %>%
select(-home_team, -away_team) %>%
bind_cols(pred = colnames(.)[apply(.[, 1:3], 1, which.max)]) %>%
bind_cols(prob_pred = apply(.[, 1:3], 1, max)) %>%
filter(prob_pred > 0.0)
res <- tibble()
for (predicted_class in c("H", "A", "D")) {
for (predicted_prob in seq(0, 0.95, by = 0.05)) {
temp <- dat %>%
filter(pred == predicted_class) %>%
filter(prob_pred >= predicted_prob & prob_pred < predicted_prob + 0.05)
# rows predicted, columns real
tb <- table(temp$pred, temp$result)
if (predicted_class %in% colnames(tb)) {
ppv <- tb[, predicted_class] / sum(tb)
} else {
ppv <- 0
}
res <- bind_rows(res, tibble("class" = predicted_class,
"prob" = predicted_prob,
"ppv" = ppv,
"n_obs" = nrow(temp)))
}
}
res <- res %>%
filter(class != "D") %>%
rename(Result = class) %>%
mutate(n_obs = n_obs / length(obs_test))
grid.arrange(ggplot(res, aes(prob, ppv, colour = Result)) +
ylim(c(0, 1)) +
xlim(c(0, 0.95)) +
geom_line(size = 1, alpha = 0.6) +
geom_point(size = 3) +
geom_abline() +
ggtitle("Model Calibration") +
ylab("Positive Predictive Value") +
xlab("") +
theme_minimal() +
scale_colour_manual(values = c("#8bbc21", "#2f7ed8")),
ggplot(res, aes(prob, n_obs, fill = Result)) +
geom_bar(stat = "identity", position = "dodge", alpha = 0.6) +
ylab("% observations") +
xlab("Predicted result probability (intervals of length 0.05)") +
theme_minimal() +
scale_fill_manual(values = c("#8bbc21", "#2f7ed8")),
layout_matrix = matrix(c(1,1,2), ncol = 1))
}
|
e068e03cf6295a258571c4f70904ce2c8f2eac85
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rbmn/examples/arcs4nbn1nbn.Rd.R
|
1c2f41ac73ff2ab552126f7a3227631ee30e6ecb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 198
|
r
|
arcs4nbn1nbn.Rd.R
|
library(rbmn)
### Name: arcs4nbn1nbn
### Title: returns the list of 'parallel' arcs of a crossed-nbn
### Aliases: arcs4nbn1nbn
### ** Examples
print(arcs4nbn1nbn(rbmn0nbn.01, rbmn0nbn.04));
|
dd47fd615a43da4c2e2376b1da4f6f2a00593b38
|
c1140c29282b6135c1c828196d7245972c018de7
|
/TwoSimR5/R/GetANewSimulation.r
|
6068dfe97b3f8cfd9f15308d2879672b0ca2e8d9
|
[] |
no_license
|
lenarcica/SimulationStackForBayesSpike
|
467835d2cac63099c357ceb3f29b0e641d806965
|
3a54f517a835a094b60f9fba20efa7b760949e3f
|
refs/heads/master
| 2020-04-15T19:01:25.806506
| 2019-01-09T18:36:48
| 2019-01-09T18:36:48
| 164,934,682
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45,787
|
r
|
GetANewSimulation.r
|
################################################################################
## LookToGetNewSimulation()
## (c) 2009-2018 Alan Lenarcic
##
## As commanded by "AssessFirstTime()" this will look into which Estimators and Simulatiors
## haven't been conducted and uses SimMeData() to get new simulations.
##
##
## LICENSE INFO: R CODE
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# https://www.R-project.org/Licenses/
#
# Note, TwoSimR5 code is predominantly built around running existing
# selector impleentations which exist as R packages, most of which have
# a GNU license.
LookToGetNewSimulation <- function(verbose = NULL, quoteMore = "LookForNewSimulation : ",
ForceFunction = NULL, TCS = NULL, ...) {
eval(parse(text=GetG0Text("CurrentLargeContainDir", S=1)));
eval(parse(text=GetG0Text("CurrentSmallContainDir", S=1)));
if (is.numeric(CurrentLargeContainDir) && CurrentLargeContainDir[1] == 0) {
eval(parse(text=GetG0Text("CurrentLargeContainDir", S=1)));
eval(parse(text=GetG0Text("n", S=1)));
eval(parse(text=GetG0Text("p", S=1)));
eval(parse(text=GetG0Text("k", S=1)));
eval(parse(text=GetG0Text("sigma", S=1)));
eval(parse(text=GetG0Text("NumReps", S=1)));
eval(parse(text=GetG0Text("LARSSeekFlag", S=1)));
eval(parse(text=GetG0Text("DefaultGenerateBetaVec", S=1)));
eval(parse(text=GetG0Text("DefaultCorrelationXMatrix", S=1)));
eval(parse(text=GetG0Text("AllDir", S=1)));
CurrentLargeContainDir <- paste(AllDir, "//", DirNamingClenture(
DefaultGenerateBetaVec, DefaultCorrelationXmatrix,
NLen = n, kLen = p, kActiveLen = k, SigmaNoise = sigma,
NumReps = NumReps, LARSSeekFlag = LARSSeekFlag), sep="");
dir.create(CurrentLargeContainDir, showWarnings = FALSE);
eval(parse(text=SetGText("CurrentLargeContainDir", S=1)));
}
if (is.numeric(CurrentSmallContainDir) && CurrentSmallContainDir[1] == 0) {
eval(parse(text=GetG0Text("CurrentSmallContainDir", S=1)));
eval(parse(text=GetG0Text("n", S=1)));
eval(parse(text=GetG0Text("p", S=1)));
eval(parse(text=GetG0Text("k", S=1)));
eval(parse(text=GetG0Text("sigma", S=1)));
eval(parse(text=GetG0Text("NumReps", S=1)));
eval(parse(text=GetG0Text("LARSSeekFlag", S=1)));
eval(parse(text=GetG0Text("DefaultGenerateBetaVec", S=1)));
eval(parse(text=GetG0Text("DefaultCorrelationXMatrix", S=1)));
eval(parse(text=GetG0Text("AllDir", S=1)));
CurrentSmallContainDir <- paste(AllDir, "//", DirNamingClenture(
DefaultGenerateBetaVec, DefaultCorrelationXmatrix,
NLen = n, kLen = p, kActiveLen = k, SigmaNoise = sigma,
NumReps = NumReps, LARSSeekFlag = LARSSeekFlag), sep="");
dir.create(CurrentSmallContainDir, showWarnings = FALSE);
eval(parse(text=SetGText("CurrentSmallContainDir", S=1)));
}
CurrentTotalCompleted <- 0;
if (is.null(verbose)) {
eval(parse(text=GetG0Text("verbose")));
if (verbose == 0) {
verbose = 0;
}
} else { verbose = as.numeric(verbose); }
MySummaryPerformanceDirectory = paste(
CurrentLargeContainDir, "//", "SumPerformance", sep="");
dir.create(MySummaryPerformanceDirectory, showWarnings=FALSE, recursive=TRUE);
if (verbose > 0) {
AFilePrint("Looking for New Simulation Start with LockMeIn");
flush.console();
}
try(Oldwd <- getwd());
try(setwd(CurrentSmallContainDir));
while(TwoSimR5:::LockMeIn(verbose=as.numeric(verbose), quoteMore=quoteMore,
LFDir = "SumPerformance", LockIn="SumLock")==FALSE){
try(Sys.sleep(runif(1,0,4))); }
try(setwd(Oldwd));
try(MySummarySimulationsList <- NULL);
try(eval(parse(text=SetGText("MySummarySimulationsList",
"globalenv()", S=1))))
MyListfiles = list.files(MySummaryPerformanceDirectory);
try(MySummarySimulationsList <- NULL);
try(setwd(CurrentSmallContainDir));
try(load(paste(
"SumPerformance//SummaryPerformanceRData.RData", sep="")));
try(eval(parse(text=TwoSimR5:::RecoveryTSSLText(CurrentLargeContainDir))));
if (is.null(MySummarySimulationsList)) {
paste(AFilePrint("**GetSims: we did not load in MySummarySimulationsList. "));
}
GetSims <- NULL;
try(GetSims <- MySummarySimulationsList$GetMeASimulation(
AlreadyLocked=TRUE, ForceFunction = ForceFunction, TCS = TCS));
try(eval(parse(text=SetGText("GetSims", "globalenv()", S=1))))
if (!exists("GetSims") || is.null(GetSims) || is.numeric(GetSims) ||
!is.list(GetSims) || is.null(GetSims$SMS)) {
AFilePrint("*************************************************************");
AFilePrint(paste("** Error inspection: LocateASim: GetSims was returned ",
"as a NULL, we fail", sep=""));
AFilePrint("** SaveMySummarySimulationList to terminal: ");
eval(parse(text=SetGText("MySummarySimulationsList", "globalenv()", S=1)));
return(-1);
tryCatch("We are in it bad!");
}
Oldwd <- getwd();
try(setwd(CurrentSmallContainDir));
try(secure.save.MSS(AObject=MySummarySimulationsList,
ObjectName="MySummarySimulationsList",
file=paste("SumPerformance//SummaryPerformanceRData.RData", sep="")));
try(setwd(Oldwd));
try(setwd(CurrentSmallContainDir));
UnLockMe(verbose=verbose, quoteMore=quoteMore,
LFDir = "SumPerformance", LockIn="SumLock");
try(setwd(Oldwd));
try(eval(parse(text=TryFillCurrentTotalCompleted())));
if (is.character(GetSims) && GetSims[1] == "AllComplete") {
if (verbose >= 2) {
AFilePrint("We received a Getsims is AllComplete message from GetMeASimulation");
AFilePrint("End Get Sims. ");
}
return(111);
}
if ((is.character(GetSims) && Getsims[1] == "AFail") ||
(is.numeric(GetSims) && length(GetSims) == 1 && GetSims <= -1)) {
AFilePrint("*****************************************************************");
AFilePrint("*****************************************************************");
AFilePrint("LookToGetNewSimulation: Fail From GetSims!");
AFilePrint("LookToGetNewSimulation: We were delivered -1 from Get Sims");
AFilePrint(paste("Our MySummaryPerformanceDirectory is: ",
MySummaryPerformanceDirectory, sep=""));
AErrorPrint(paste("LookToGetNewSimulation: MySummaryPerformanceDirectory",
" results in Error.", sep="")); flush.console();
return(-1);
tryCatch("Fleeing LookToGetNewSimulation (In GetANewSimulation.r)")
}
##UnLockMe(verbose=verbose, quoteMore=quoteMore, LFDir = MySummaryPerformanceDirectory);
#LockedSMSFile <- LockInSMS(SMS, CurrentContainDir = CurrentContainDir, OnFunction=NameFunctions[ISample]);
return(list(SMS=GetSims$SMS, UniqueSimulationIdentifier=GetSims$UniqueSimulationIdentifier,
InSimulationsList=GetSims$InSimulationsList,
OnFunction = GetSims$OnFunction,
OnNumFunction=GetSims$OnNumFunction));
}
##############################################################################
## GenerateBetaLongQuantiles
SummarySimulationList$methods(
GetMeASimulation = function(AlreadyLocked=TRUE,ForceFunction=NULL,
TCS = NULL,...) {
eval(parse(text=GetG0Text("TargetTotalSimulations")));
if (!is.null(ForceFunction)) {
if (is.character(ForceFunction)) {
AT <- (1:length(.self$NameFunctions))[
.self$NameFunctions == ForceFunction];
if (length(AT) == 1) {
OnNumForceFunction = AT;
} else {
OnNumForceFunction = NULL; ForceFunction = NULL;
}
} else if (is.numeric(ForceFunction)) {
if (round(ForceFunction) >= 1 &&
round(ForceFunction) <= length(.self$NameFunctions)) {
OnNumForceFunction <- round(ForceFunction);
ForceFunction <- .self$NameFunctions[OnNumForceFunction]
} else {
OnNumForceFunction = NULL; Forcefunction = NULL;
}
}
} else {OnNumForceFunction <- NULL; }
if (is.null(.self$TheSummarySimulationList) ||
length(.self$TheSummarySimulationList) <= 0) {
SMS <- GenerateANewSimulation(AlreadyLocked=AlreadyLocked)
UniqueSimulationIdentifier <- SMS$UniqueSimulationIdentifier;
.self$TheSummarySimulationList[[1]] <- SummarySimulationRecord$new(
UniqueSimulationIdentifier=SMS$UniqueSimulationIdentifier,
SavedUniqueSimulationRDataFile = paste(.self$LargeContainingDir,
"//SMS", SMS$UniqueSimulationIdentifier, ".RData", sep=""));
try(names(.self$TheSummarySimulationList)[1] <-
SMS$UniqueSimulationIdentifier);
names(.self$TheSummarySimulationList)[1] <-
SMS$UniqueSimulationIdentifier;
VLD <- .self$ValidSimulations;
SampleAFunction <- sample(VLD, prob=rep(1, length(VLD)),
size=1, replace=FALSE);
if (!is.null(OnNumForceFunction)) {
SampleAFunction <- OnNumForceFunction;
}
if (SampleAFunction <= 1) {
AFilePrint(paste("Error In GetASimulation! ",
"SampleAFunction = ", SampleAFunction, sep=""));
}
TryAddText <- "
SuccAdd <- FALSE; AOn <- -1;
AOn <- .self$TheSummarySimulationList[[1]]$AddAFunction(
as.integer(SampleAFunction), SSL=.self);
SuccAdd <- TRUE;
";
try(eval(parse(text=TryAddText)));
if (is.character(AOn) && AOn == "AFail") {
AFilePrint("GetANewSimulation: We get A FAIL!");
}
AFilePrint(paste("*** AfterAddAFunction: Well \n Well ",
"\n Well: We get AOn = ", AOn, sep=""));
if (SuccAdd == FALSE || (is.numeric(AOn) &&
AOn[1] != SampleAFunction) || !is.numeric(AOn) ||
(is.numeric(AOn) && AOn < 0) ||
(is.character(AOn) && AOn == "AFail")) {
AFilePrint(paste("**********************************",
"************************", sep=""));
AFilePrint(paste("GetANewSimulation.r On[", 1,
"] Try add function ", SampleAFunction, " fails!", sep=""));
AText <- "
TheSummarySimulationList <- .self$TheSummarySimulationList;
MySummarySimulationStore <- .self;
";
try(eval(parse(text=AText)));
eval(parse(text=SetGText("TheSummarySimulationList", S=1)));
eval(parse(text=SetGText("MySummarySimulationStore", S=1)));
eval(parse(text=SetGText("SampleAFunction", S=1)));
return("AFail")
tryCatch("GetANewSimulation.r Fail!")
}
return(list(SMS=SMS, UniqueSimulationIdentifier=
SMS$UniqueSimulationIdentifier,
InSimulationsList=1, OnFunction =
.self$NameFunctions[SampleAFunction],
OnNumFunction=SampleAFunction,
CurrentTotalCompleted = CurrentTotalCompleted));
}
VLD <- NULL; TC = NULL; TK = NULL;
try(VLD <- .self$ValidSimulations);
try(TC <- .self$GetTotalCompletedFunction());
try(TK <- .self$MyTotalKilledFunctions);
try(eval(parse(text=TryFillCurrentTotalCompletedTC())));
if (all(TC[VLD] >= TargetTotalSimulations)) {
return("AllComplete");
}
if (exists("OnNumForceFunction") &&
!is.null(OnNumForceFunction) && OnNumForceFunction >= 1) {
if (TC[OnNumForceFunction] + TK[OnNumForceFunction] >=
length(.self$TheSummarySimulationList)) {
SMS <- GenerateANewSimulation(AlreadyLocked=AlreadyLocked)
UniqueSimulationIdentifier <- SMS$UniqueSimulationIdentifier;
NewN <- length(.self$TheSummarySimulationList)
.self$TheSummarySimulationList[[NewN]] <- SummarySimulationRecord$new(
UniqueSimulationIdentifier=SMS$UniqueSimulationIdentifier,
SavedUniqueSimulationRDataFile = paste(.self$LargeContainingDir,
"//SMS", SMS$UniqueSimulationIdentifier, ".RData", sep=""));
try(names(.self$TheSummarySimulationList)[NewN] <-
SMS$UniqueSimulationIdentifier);
names(.self$TheSummarySimulationList)[NewN] <-
SMS$UniqueSimulationIdentifier;
SampleAFunction <- OnNumForceFunction;
TryAddText <- "
SuccAdd <- FALSE; AOn <- -1;
AOn <- .self$TheSummarySimulationList[[NewN]]$AddAFunction(
as.integer(SampleAFunction), SSL = .self);
SuccAdd <- TRUE;
";
try(eval(parse(text=TryAddText)));
return(list(SMS=SMS, UniqueSimulationIdentifier=
SMS$UniqueSimulationIdentifier,
InSimulationsList=NewN, OnFunction =
.self$NameFunctions[SampleAFunction],
OnNumFunction=SampleAFunction));
}
}
if (all(TC[VLD] + TK[VLD] >= length(.self$TheSummarySimulationList))) {
SMS <- GenerateANewSimulation(AlreadyLocked=AlreadyLocked)
try(NewSim <- length(.self$TheSummarySimulationList)+1);
try(.self$TheSummarySimulationList[[NewSim]] <-
SummarySimulationRecord$new(SMS$UniqueSimulationIdentifier,
SavedUniqueSimulationRDataFile = paste(.self$LargeContainingDir,
"//SMS", SMS$UniqueSimulationIdentifier, ".RData", sep="")));
try(names(.self$TheSummarySimulationList)[NewSim] <-
SMS$UniqueSimulationIdentifier);
SampleAFunction <- sample(VLD,
prob=.self$SampleFunctionProbabilities[VLD],
size=1);
if (!is.null(OnNumForceFunction)) {
SampleAFunction <- OnNumForceFunction; }
TryAddText <- "
SuccAdd <- FALSE;
AOn <- -1;
AOn <- .self$TheSummarySimulationList[[NewSim]]$AddAFunction(
as.integer(SampleAFunction), SSL = .self);
SuccAdd <- TRUE;
";
try(eval(parse(text=TryAddText)));
AFilePrint(paste("*** AfterAddAFunction: Well \n Well ",
"\n Well: We get AOn = ", AOn, sep=""));
if (is.character(AOn) && AOn == "AFail") {
AFilePrint("GetANewSimulation: We get A FAIL!");
}
if (SuccAdd == FALSE || (is.numeric(AOn) &&
AOn[1] != SampleAFunction) || !is.numeric(AOn)
||
(is.numeric(AOn) && AOn < 0) ||
(is.character(AOn) && AOn == "AFail")) {
AFilePrint("ERROR ERROR, TC > SSL ********************************");
AFilePrint(paste("TC > TSSL: GetANewSimulation.r On[", NewSim,
"] Try add function ", SampleAFunction, " fails!", sep=""));
ATText <- "
TheSummarySimulationList <- .self$TheSummarySimulationList;
MySummarySimulationStore <- .self;
";
try(eval(parse(text=ATText)));
eval(parse(text=SetGText("TheSummarySimulationList", S=1)));
eval(parse(text=SetGText("MySummarySimulationStore", S=1)));
eval(parse(text=SetGText("SampleAFunction", S=1)));
eval(parse(text=SetGText("NewSim", S=1)));
return("AFail")
tryCatch("GetANewSimulation.r Fail!")
}
return(list(SMS=SMS,
UniqueSimulationIdentifier=SMS$UniqueSimulationIdentifier,
InSimulationsList=NewSim,
OnFunction = .self$NameFunctions[SampleAFunction],
OnNumFunction=SampleAFunction));
}
MyT <- "
try(PerformanceMatrix <- NULL);
";
try(eval(parse(text=MyT)));
try(PerformanceMatrix <- .self$GenPerformanceMatrix());
if (is.null(PerformanceMatrix)) {
AFilePrint(paste("GenPerformanceMatrix Failed, save ",
"TheSummarySimulationList to terminal", sep=""));
ATText <- "
TheSummarySimulationList <- .self$TheSummarySimulationList
MySummarySimulationListOnSave <- .self;
";
try(eval(parse(text=ATText)));
eval(parse(text=SetGText("TheSummarySimulationList", S=1)))
eval(parse(text=SetGText("MySummarySimulationListOnSave", S=1)))
}
if (!is.null(PerformanceMatrix) && !is.null(OnNumForceFunction) &&
OnNumForceFunction >= 0) {
SampleAFunction <- OnNumForceFunction;
if (any(PerformanceMatrix[,SampleAFunction] >= 0)) {
AVT <- rep(0, NROW(PerformanceMatrix));
if (any(PerformanceMatrix[,SampleAFunction] == 0)) {
AVT[PerformanceMatrix[,SampleAFunction] == 0] <- 1;
} else {
AID <- PerformanceMatrix[,SampleAFunction] >= 0;
AVT[AID] <-
max(PerformanceMatrix[AID,SampleAFunction]) -
PerformanceMatrix[AID,SampleAFunction]+1;
}
if (!any(AVT > 0)) {
AFilePrint(paste("Error in Search PerformanceMatrix for ",
" force function: ", ForceFunction, sep=""));
AFilePrint(paste("Performance was : [",
paste(PerformanceMatrix[,SampleAFunction], collapse = ", "),
"]", sep=""));
AFilePrint(paste("Something is wrong in the ",
"GetANewsimulation and you shoule come and fight it.", sep=""));
AErrorPrint("Error Trying to get ForceFunction Simulation!");
return(-1);
} else {
SampleASimulation <- sample(
1:NROW(PerformanceMatrix), prob=AVT,size=1);
}
} else {
SMS <- GenerateANewSimulation(AlreadyLocked=AlreadyLocked)
try(NewSim <- length(.self$TheSummarySimulationList)+1);
try(.self$TheSummarySimulationList[[NewSim]] <-
SummarySimulationRecord$new(SMS$UniqueSimulationIdentifier,
SavedUniqueSimulationRDataFile = paste(.self$LargeContainingDir,
"//SMS", SMS$UniqueSimulationIdentifier, ".RData", sep="")));
try(names(.self$TheSummarySimulationList)[NewSim] <-
SMS$UniqueSimulationIdentifier);
SampleASimulation <- NewSim;
}
TryAddText <- "
SuccAdd <- FALSE;
AOn <- -1;
AOn <- .self$TheSummarySimulationList[[
SampleASimulation]]$AddAFunction(as.integer(SampleAFunction),
SSL=.self);
SuccAdd <- TRUE;
";
try(eval(parse(text=TryAddText)));
try(UniqueSimulationIdentifier <-
.self$TheSummarySimulationList[[
SampleASimulation]]$UniqueSimulationIdentifier);
load(paste(.self$MyBaseSimulationStorageDirectory, "//SMS",
.self$TheSummarySimulationList[[
SampleASimulation]]$UniqueSimulationIdentifier,
".RData", sep=""));
return(list(SMS=SMS,
UniqueSimulationIdentifier = SMS$UniqueSimulationIdentifier,
InSimulationsList=SampleASimulation, OnFunction =
.self$NameFunctions[SampleAFunction],
OnNumFunction = SampleAFunction));
}
if (!is.null(PerformanceMatrix)) {
if (any(PerformanceMatrix[,VLD] == 0)) {
ATOs <- apply(PerformanceMatrix,2, function(x) { length(x[x==0]) });
SampleAFunction <- sample(VLD, prob=ATOs[VLD], size=1);
ARD <- (1:NROW(PerformanceMatrix));
ATTs <- rep(0, length(ARD));
ATTs[PerformanceMatrix[,SampleAFunction] == 0] <- 1;
SampleASimulation <- sample(ARD, prob=ATTs, size=1);
TryAddText <- "
SuccAdd <- FALSE;
AOn <- -1;
AOn <- .self$TheSummarySimulationList[[
SampleASimulation]]$AddAFunction(as.integer(SampleAFunction),
SSL = .self);
SuccAdd <- TRUE;
";
try(eval(parse(text=TryAddText)));
AFilePrint(paste("*** AfterAddAFunction: Well \n Well \n ",
"Well: We get AOn = ", AOn, sep=""));
if (is.character(AOn) && AOn == "AFail") {
AFilePrint("GetANewSimulation: We get A FAIL!");
}
if (SuccAdd == FALSE || (is.numeric(AOn)
&& AOn[1] != SampleAFunction) || !is.numeric(AOn) ||
(is.numeric(AOn) && AOn < 0) ||
(is.character(AOn) && AOn == "AFail")) {
AFilePrint(paste("SampleASimulation = ", SampleASimulation,
": GetANewSimulation.r On[", SampleASimulation,
"] Try add function ", SampleAFunction, " fails!", sep=""));
ATText <- "
TheSummarySimulationList <- .self$TheSummarySimulationList;
MySummarySimulationStore <- .self;
";
try(eval(parse(text=ATText)));
try(eval(parse(text=SetGText("ATOs", S=1))));
try(eval(parse(text=SetGText("PerformanceMatrix", S=1))));
eval(parse(text=SetGText("TheSummarySimulationList", S=1)));
try(TSSL <- .self$TheSummarySimulationList, silent=TRUE);
eval(parse(text=SetGText("TSSL", S=1)));
eval(parse(text=SetGText("MySummarySimulationStore", S=1)));
eval(parse(text=SetGText("SampleAFunction", S=1)));
eval(parse(text=SetGText("SampleASimulation", S=1)));
return("AFail")
tryCatch("GetANewSimulation.r Fail!")
}
try(UniqueSimulationIdentifier <-
.self$TheSummarySimulationList[[
SampleASimulation]]$UniqueSimulationIdentifier);
load(paste(.self$MyBaseSimulationStorageDirectory, "//SMS",
.self$TheSummarySimulationList[[
SampleASimulation]]$UniqueSimulationIdentifier,".RData", sep=""));
return(list(SMS=SMS,
UniqueSimulationIdentifier = SMS$UniqueSimulationIdentifier,
InSimulationsList=SampleASimulation, OnFunction =
.self$NameFunctions[SampleAFunction],
OnNumFunction = SampleAFunction));
} else if (NROW(PerformanceMatrix) < TargetTotalSimulations) {
SMS <- GenerateANewSimulation(AlreadyLocked=AlreadyLocked)
UniqueSimulationIdentifier <- SMS$UniqueSimulationIdentifier;
try(NewSim <- length(.self$TheSummarySimulationList)+1);
try(.self$TheSummarySimulationList[[NewSim]] <-
SummarySimulationRecord$new(SMS$UniqueSimulationIdentifier));
try(names(.self$TheSummarySimulationList)[NewSim] <-
.self$TheSummarySimulationList[[NewSim]]$UniqueSimulationIdentifier);
SampleAFunction <- sample(VLD,
prob=.self$SampleFunctionProbabilities[VLD],
size=1);
if (!is.null(OnNumForceFunction)) {
SampleAFunction <- OnNumForceFunction;
ForcingFunction<-TRUE;
} else { ForcingFunction <- FALSE; }
TryAddText <- "
SuccAdd <- FALSE; AOn <- -1;
AOn <- .self$TheSummarySimulationList[[NewSim]]$AddAFunction(
as.integer(SampleAFunction), ForcingFunction=ForcingFunction,
SSL = .self);
SuccAdd <- TRUE;
";
try(eval(parse(text=TryAddText)));
## AFilePrint(paste("*** AfterAddAFunction:
## Well \n Well \n Well: We get AOn = ", AOn, sep=""));
if (is.character(AOn) && AOn == "AFail") {
AFilePrint("GetANewSimulation: We get A FAIL!");
}
if (SuccAdd == FALSE || (is.numeric(AOn) &&
AOn[1] != SampleAFunction) || !is.numeric(AOn) ||
(is.numeric(AOn) && AOn < 0) ||
(is.character(AOn) && AOn == "AFail")) {
AFilePrint(paste("NewSim AddOn = ", NewSim,
": GetANewSimulation.r On[", SampleASimulation,
"] Try add function ", SampleAFunction, " fails!", sep=""));
ATText <- "
TheSummarySimulationList <- .self$TheSummarySimulationList;
MySummarySimulationStore <- .self;
";
try(eval(parse(text=ATText)));
try(eval(parse(text=SetGText("ATOs", S=1))));
try(eval(parse(text=SetGText("PerformanceMatrix", S=1))));
eval(parse(text=SetGText("TheSummarySimulationList", S=1)));
eval(parse(text=SetGText("MySummarySimulationStore", S=1)));
eval(parse(text=SetGText("SampleAFunction", S=1)));
##eval(parse(text=SetGText("SampleASimulation", S=1)));
eval(parse(text=SetGText("NewSim", S=1)));
return("AFail")
tryCatch("GetANewSimulation.r Fail!")
}
return(list(SMS=SMS,
UniqueSimulationIdentifier=SMS$UniqueSimulationIdentifier,
InSimulationsList=NewSim,
OnFunction = .self$NameFunctions[SampleAFunction],
OnNumFunction=SampleAFunction));
} else {
if (FALSE) {
AFilePrint("ERROR ERROR ERROR ERROR ERROR ERROR ERROR ERROR ");
AFilePrint(paste("*** GetANewSimulation Hey we are at a ",
"MaxDuplicateTrials call but it doesn't make sense", sep=""));
AFilePrint("***We are not testing this out yet. ")
AFilePrint("***Save PerformanceMatrix to ask question!");
eval(parse(text=SetGText("PerformanceMatrix", "globalenv()", S=1)));
AFilePrint("*** Figure out source of error! ");
return("AFail")
tryCatch("GetANewSimulation.r Fail")
}
eval(parse(text=GetG0Text("MaxDuplicateTrials", "globalenv()", S=1)));
if (MaxDuplicateTrials <= 1) { MaxDuplicateTrials <- 2; }
LLens <- apply(PerformanceMatrix, 2, function(x) { length(x[x>=0]) });
Acts <- apply(PerformanceMatrix, 2, function(x) {
sum(x[x>=0], na.rm=TRUE) });
Maxs <- apply(PerformanceMatrix, 2, function(x) {
AT <- x[x>=0];
if (length(AT) >= 1) { return(max(AT)); }
return(0); });
AllMaxs <- max(Maxs[VLD]);
DOs <- LLens * AllMaxs - Acts+1;
if (!any(DOs[VLD]>0)) {
ATT <- paste("Hey, Maximum filled table can't find a ",
"valid DO greater than zero
VLD = (", paste(VLD, collapse=", "), ")
DOs = (", paste(DOs, collapse=", "), ")
LLens = (", paste(LLens, collapse=", "), ")
Acts = (", paste(LLens, collapse=", "), ")
Maxs= (", paste(Maxs, collapse=", "), ")", sep="");
AFilePrint(ATT);
AErrorPrint(ATT);
}
SampleAFunction <- sample(VLD, prob=DOs[VLD], size=1);
ART <- PerformanceMatrix[,SampleAFunction];
maxART <- max(ART);
TrySims <- 1:length(ART);
SProb <- rep(0, length(ART));
if (maxART < 0) {
AFilePrint("ERROR ERROR ");
AFilePrint(paste("Hey, We sampled ", SampleAFunction,
" but ART is (", paste(ART, collapse=", "), ")", sep=""));
AErrorPrint(paste("Hey, We sampled ", SampleAFunction,
" but ART is (", paste(ART, collapse=", "), ")", sep=""));
} else if (maxART == 0) {
SProb[ART == 0] <- 1;
} else if (any(ART) == 0) {
SProb[ART == 0] <- 1;
} else {
SProb[ART >= 0] = maxART - ART[ART >= 0] +1;
}
if (!any(SProb > 0)) {
AFilePrint("ERROR ERROR");
AFilePrint(paste("Hey, We sampled ", SampleAFunction,
" but ART is (", paste(ART, collapse=", "), ")", sep=""));
AFilePrint(paste(" but SProb Sucks! (",
paste(SProb, collapse=", "), ")", sep=""));
ErrorText <- paste("Hey, We sampled ", SampleAFunction,
" but ART is (", paste(ART, collapse=", "), ")
but SProb Sucks! (",
paste(SProb, collapse=", "), ")", sep="");
AErrorPrint(ErrorText);
return(-1);
}
SampleASimulation <- sample(TrySims, prob=SProb, size=1);
try(.self$TheSummarySimulationList[[
SampleASimulation]]$AddAFunction(as.integer(SampleAFunction)),
SSL = .self);
load(paste(.self$MyBaseSimulationStorageDirectory, "//SMS",
try(.self$TheSummarySimulationList[[
SampleASimulation]]$UniqueSimulationIdentifier,".RData", sep="")));
return(list(SMS=SMS, UniqueSimulationIdentifier=
SMS$UniqueSimulationIdentifier,
InSimulationsList=length(.self$TheSummarySimulationList),
OnFunction = .self$NameFunctions[SampleAFunction],
OnNumFunction=SampleAFunction));
}
AFilePrint(paste("GetASimulation: How the Heck did we get ",
"Here to return -1!!", sep=""));
AErrorPrint(paste("GetASimulation: How the Heck did we get ",
"Here to return -1!!", sep=""));
return(-1);
}
AFilePrint(paste("GetASimulation: Null PerformanceMatrix ",
", How the Heck did we get ",
"Here to return Nothing!!", sep=""));
AErrorPrint(paste("GetASimulation: Null PerformanceMatrix ",
"How the Heck did we get ",
"Here to return Nothing!!", sep=""));
return(-2);
}
);
GenerateANewSimulation <- function(AlreadyLocked=TRUE) {
eval(parse(text=GetG0Text("n", S=1)));
eval(parse(text=GetG0Text("p", S=1)));
eval(parse(text=GetG0Text("k", S=1)));
eval(parse(text=GetG0Text("sigma", S=1)));
eval(parse(text=GetG0Text("tNoiseDF", S=1)));
eval(parse(text=GetG0Text("GenerateBetaVec", S=1)));
eval(parse(text=GetG0Text("CorrelationXmatrix", S=1)));
eval(parse(text=GetG0Text("tNoiseDF", S=1)));
eval(parse(text=GetG0Text("LogitRegression", S=1)));
eval(parse(text=GetG0Text("Beta0",S=1)));
eval(parse(text=GetG0Text("ExperimentName", S=1)));
eval(parse(text=GetG0Text("jobii", S=1)));
eval(parse(text=GetG0Text("WorkingRow",S=1)));
eval(parse(text=GetG0Text("TargetTotalSimulations",S=1)));
eval(parse(text=GetG0Text("NameFunctions",S=1)));
eval(parse(text=GetG0Text("OnSimType", S=1)));
INeed = 1:length(NameFunctions);
ISample = sample(INeed, size=1);
SMS = NULL;
if (verbose > 1) {
AFilePrint("Look for a new simulation, had to configure a new SMS"); flush.console();
}
eval(parse(text=GetG0Text("ALargeContainDir")));
eval(parse(text=GetG0Text("ASmallContainDir")));
MySummaryPerformanceDirectory <- paste(ALargeContainDir,
"//SumPerformance", sep="");
if (OnSimType == "Group") {
eval(parse(text=GetG0Text("GroupSize", S=1)));
eval(parse(text=GetG0Text("pGroups", S=1)));
eval(parse(text=GetG0Text("kGroups", S=1)));
eval(parse(text=GetG0Text("tNoiseDF", S=1)));
eval(parse(text=GetG0Text("sigma", S=1)));
eval(parse(text=GetG0Text("SigmaNoise", S=1)));
eval(parse(text=GetG0Text("Beta0", S=1)));
eval(parse(text=GetG0Text("GenerateBetaGroupVec", S=1)));
eval(parse(text=GetG0Text("CorrelationXmatrix", S=1)));
eval(parse(text=GetG0Text("LogitRegression", S=1)));
eval(parse(text=GetG0Text("CorrelationXmatrix", S=1)));
eval(parse(text=GetG0Text("MeanCenterXX", S=1)));
eval(parse(text=GetG0Text("jobii", S=1)));
SMS <- SimGroupData(n = n, pGroups = pGroups, GroupSize=GroupSize,
kGroups = kGroups, sigma = sigma,
SigmaNoise = SigmaNoise, tNoiseDF = tNoiseDF, GenerateBetaGroupVec = GenerateBetaGroupVec,
CorrelationXmatrix = CorrelationXmatrix,
LogitRegression = FALSE, Beta0 = Beta0, ExperimentName="", jobii= jobii,
WorkingRow = WorkingRow, AlreadyLocked=AlreadyLocked, MeanCenterXX = MeanCenterXX,
UniqueProcessIdentifier = UniqueProcessIdentifier, ISample = ISample,
DontSave=TRUE)
} else if (OnSimType == "Robit") {
try(SMS <- SimMeData(n = n, p = p, k = k, sigma = sigma,
GenerateBetaVec = GenerateBetaVec,
CorrelationXmatrix = CorrelationXmatrix, tNoiseDF = tNoiseDF,
LogitRegression = TRUE, Beta0 = Beta0,
ExperimentName=ExperimentName, jobii= jobii, WorkingRow = WorkingRow,
AlreadyLocked=AlreadyLocked,
UniqueProcessIdentifier=UniqueProcessIdentifier, ISample=ISample,
DontSave = TRUE));
} else if (OnSimType == "GroupRobit") {
eval(parse(text=GetG0Text("GroupSize", S=1)));
eval(parse(text=GetG0Text("pGroups", S=1)));
eval(parse(text=GetG0Text("kGroups", S=1)));
eval(parse(text=GetG0Text("tNoiseDF", S=1)));
eval(parse(text=GetG0Text("sigma", S=1)));
eval(parse(text=GetG0Text("SigmaNoise", S=1)));
eval(parse(text=GetG0Text("Beta0", S=1)));
eval(parse(text=GetG0Text("GenerateBetaGroupVec", S=1)));
eval(parse(text=GetG0Text("CorrelationXmatrix", S=1)));
eval(parse(text=GetG0Text("LogitRegression", S=1)));
eval(parse(text=GetG0Text("CorrelationXmatrix", S=1)));
eval(parse(text=GetG0Text("MeanCenterXX", S=1)));
eval(parse(text=GetG0Text("jobii", S=1)));
SMS <- SimGroupData(n = n, pGroups = pGroups, GroupSize=GroupSize,
kGroups = kGroups, sigma = sigma,
SigmaNoise = SigmaNoise, tNoiseDF = tNoiseDF, GenerateBetaGroupVec = GenerateBetaGroupVec,
CorrelationXmatrix = CorrelationXmatrix,
LogitRegression = TRUE, Beta0 = Beta0, ExperimentName="", jobii= jobii,
WorkingRow = WorkingRow, AlreadyLocked=AlreadyLocked, MeanCenterXX = MeanCenterXX,
UniqueProcessIdentifier = UniqueProcessIdentifier, ISample = ISample,
DontSave=TRUE)
} else {
try(SMS <- SimMeData(n = n, p = p, k = k, sigma = sigma,
GenerateBetaVec = GenerateBetaVec,
CorrelationXmatrix = CorrelationXmatrix, tNoiseDF = tNoiseDF,
LogitRegression = LogitRegression, Beta0 = Beta0,
ExperimentName=ExperimentName, jobii= jobii, WorkingRow = WorkingRow,
AlreadyLocked=AlreadyLocked,
UniqueProcessIdentifier=UniqueProcessIdentifier, ISample=ISample,
DontSave = TRUE));
}
if (AlreadyLocked == FALSE) {
try(MOld <- getwd());
try(setwd(CurrentSmallContainDir));
while(LockMeIn(verbose=as.numeric(verbose), quoteMore=quoteMore, LFDir = "SumPerformance",
LockIn="SumLock")==FALSE){
try(Sys.sleep(runif(1,0,4)));
}
try(setwd(MOld));
}
try(MOld <- getwd());
try(setwd(CurrentLargeContainDir));
save(SMS = SMS, file=paste("SumPerformance", "//SMS", SMS$UniqueSimulationIdentifier, ".RData", sep=""));
if (AlreadyLocked == FALSE) {
try(setwd(CurrentSmallContainDir));
try(UnLockMe(verbose=as.numeric(verbose), quoteMore=quoteMore, LFDir = "SumPerformance",
LockIn="SumLock"));
try(setwd(MOld));
}
try(setwd(MOld));
return(SMS);
}
##############################################################################
## HitASimulation
##
## After a successful SMS simulation run on function NameFunction, this finds the directory
## to save this UniqueSimulationIdentiifer setup and record the success.
##
HitAFunctionForSimulation <- function(UniqueProcessIdentifier, UniqueSimulationIdentifier, NameFunction, Hit = "+",
quoteMore = "HitASimulation", verbose=0, TCS = NULL) {
if (is.logical(verbose) && verbose == TRUE) {verbose = 1;}
if (is.logical(verbose) && verbose == FALSE) {verbose =0;}
if (verbose >= 2) {
AFilePrint(paste("HitAFunctionForSimulation: Running for Name = ",
NameFunction, " and UniqueProcess = ",
UniqueProcessIdentifier, sep="")); flush.console();
AFilePrint(paste(" --- with UniqueSimulationIdentifier = ",
UniqueSimulationIdentifier, sep=""));
flush.console();
}
if (Hit!="+") { Hit = "D"
if (verbose >= 2) {
AFilePrint(paste(
"---- HitAFunctionForSimulation[", UniqueSimulationIdentifier,
"] --- OOF, Hit = ", Hit, " for NameFunction = ", NameFunction,
sep=""));
flush.console();
}
}
CurrentTotalCompleted <- 0;
eval(parse(text=GetG0Text("CurrentTotalCompleted", "globalenv()", S=1)));
eval(parse(text=GetG0Text("CurrentLargeContainDir", S=1)));
eval(parse(text=GetG0Text("CurrentSmallContainDir", S=1)));
if (verbose >= 2) {
AFilePrint(paste(
"---- HitAFunctionForSimulation[", UniqueSimulationIdentifier,
"] --- CurrentLargeContainDir = ", CurrentLargeContainDir,
" for NameFunction = ", NameFunction, sep=""));
flush.console();
}
MySummaryPerformanceDirectory = paste(
CurrentLargeContainDir, "//", "SumPerformance", sep="");
dir.create(MySummaryPerformanceDirectory, showWarnings=FALSE);
try(quoteMore <- paste("Hit A Simulation, NameFunction = ", NameFunction,
" and Hit = ", Hit, sep=""));
if (verbose >= 1) {
AFilePrint(paste(
"---- HitAFunctionForSimulation[", UniqueSimulationIdentifier,
"] --- Perform LockMeIn", sep=""));
flush.console();
}
MyTestT <- FALSE;
TryOutT <- "
Oldwd <- getwd();
try(setwd(CurrentSmallContainDir));
while(LockMeIn(verbose=as.numeric(verbose), quoteMore=quoteMore,
LFDir = \"SumPerformance\", NoSpaceLFDir=TRUE, LockIn=\"SumLock\")==FALSE){
MyTestT <- FALSE;
}
try(setwd(Oldwd));
MyTestT <- TRUE;
";
try(eval(parse(text=TryOutT)));
if (MyTestT == FALSE) {
AFilePrint(paste(
"---- HitAFunctionForSimulation[", UniqueSimulationIdentifier,
"] --- Hey we tried to lock in but failed! ",
"I will return NULL now!", sep=""), LockIn="SumLock");
return(NULL);
flush.console();
}
if (verbose >= 1) {
AFilePrint(paste("---- HitAFunctionForSimulation[",
UniqueSimulationIdentifier,
"] --- We have finished LockMeIn", sep=""));
}
if (verbose >= 1) {
AFilePrint(paste("---- HitAFunctionForSimulation[",
UniqueSimulationIdentifier,
"] --- Completed LockMeIn. Get listfiles:", sep=""));
flush.console();
}
eval(parse(text=SetGText("MySummaryPerformanceDirectory",
"globalenv()", S=1)));
try(MyListFiles <- unlist(list.files(MySummaryPerformanceDirectory)));
if (verbose >= 1) {
AFilePrint(paste("---- HitAFunctionForSimulation[",
UniqueSimulationIdentifier,
"] --- MyListFiles receieved length ", length(MyListFiles), sep=""));
flush.console();
}
if (verbose >= 1) {
AFilePrint(paste("---- HitAFunctionForSimulation[",
UniqueSimulationIdentifier,
"] --- Run GetSummaryPerformanceFile", sep=""));
flush.console();
}
if (is.null(MyListFiles)) {
AFilePrint(paste("HitAFunctionForSimulation: NOTE WARNING: ",
"MyListfiles is NULL!", sep=""));
}
if (length(MyListFiles) <= 0) {
AFilePrint(paste("HitAFunctionForSimulation: NOTE WARNING: ",
"MyListfiles has no length!", sep=""));
}
if (is.null(MyListFiles) || length(MyListFiles)<= 0 ||
!("SummaryPerformanceRData.RData" %in% MyListFiles)) {
AFilePrint(paste("HitAFunctionForSimulation: Error Summary ",
"PerformanceRData not in directory: how could you do that?", sep=""));
AFilePrint(" We have that summary performance was: ");
AFilePrint(MySummaryPerformanceDirectory);
AFilePrint(paste(" and MyListFiles is: (",
paste(MyListFiles, collapse=", "), ")", sep=""));
eval(parse(text=SetGText("MyListFiles", "globalenv()", S=1)));
eval(parse(text=SetGText("MySummaryPerformanceDirectory",
"globalenv()", S=1)));
AFilePrint(paste(" We cannot be running HitAFunctionForSimulation ",
"without files.", sep=""));
AErrorPrint(paste("HitAFunctionForSimulation: Error Summary ",
"PerformanceRData not in directory: how could you do that?", sep=""));
tryCatch("HitAFunctionForSimulation");
}
if (verbose >= 1) {
AFilePrint(paste(" Hit AFunction For Simulation: right now ",
"getting MySummarySimulationsList", sep=""));
}
try(MySummarySimulationsList <- NULL);
eval(parse(text=SetGText("MySummarySimulationsList", S=1)));
try(MyOldOldwd <- getwd());
try(setwd(CurrentSmallContainDir));
try(load(paste(
"SumPerformance//SummaryPerformanceRData.RData", sep="")));
try(eval(parse(text=TwoSimR5:::RecoveryTSSLText(CurrentLargeContainDir))));
try(ISimID <- (1:length(MySummarySimulationsList$TheSummarySimulationList))[
names(MySummarySimulationsList$TheSummarySimulationList) ==
UniqueSimulationIdentifier]);
try(setwd(MyOldOldwd));
if (is.null(ISimID) || length(ISimID) != 1) {
AFilePrint(paste("HitAFunctionForSimulation: after load, we don't ",
"calculate ISimID for ", UniqueSimulationIdentifier, sep=""));
AFilePrint("Will Paste Names of MySummarySimulationsList to global!");
ATText <- "
TheSummarySimulationList <-
MySummarySimulationsList$TheSummarySimulationList;
namesTheSummarySimulationList <- names(TheSummarySimulationList);
";
try(eval(parse(text=ATText)));
eval(parse(SetGText("ISimID", "globalenv()", S=1)));
eval(parse(text=SetGText("namesTheSummarySimulationList",
"globalenv()", S=1)));
eval(parse(text=SetGText("TheSummarySimulationList", "globalenv()", S=1)));
eval(parse(text=SetGText("UniqueSimulationIdentifier",
"globalenv()", S=1)));
eval(parse(text=SetGText("MySummarySimulationsList", "globalenv()", S=1)));
tryCatch("HitAFunctionForSimulation: We failed!");
}
if (Hit == "+") {
if (verbose >= 1) {
AFilePrint(paste(" Hit AFunction about to run successful Complete ",
sep=""));
}
AR <- MySummarySimulationsList$TheSummarySimulationList[[
ISimID]]$CompleteAFunction(ANameFunction=NameFunction,
SSL =MySummarySimulationsList);
if (AR %in% c(10, 11) || AR != 1) {
ReturnAHit = "L";
} else {
ReturnAHit = "+";
}
} else {
if (verbose >= 1) {
AFilePrint(paste(" Hit AFunction about to run a fail",
sep=""));
}
AR <- MySummarySimulationsList$TheSummarySimulationList[[
ISimID]]$FailAFunction(ANameFunction=NameFunction,
SSL = MySummarySimulationsList);
ReturnAHit = "F";
}
if (verbose >= 3) {
AFilePrint(paste(" --- HitASimulation finish, now we will",
" Unlock.", sep=""));
flush.console();
}
try(eval(parse(text=TryFillCurrentTotalCompleted())));
Oldwd <- getwd();
eval(parse(text=GetG0Text("CurrentSmallContainDir", "globalenv()", S=1)));
try(setwd(CurrentSmallContainDir));
try(setwd("SumPerformance"));
try(secure.save.MSS(AObject=MySummarySimulationsList,
ObjectName="MySummarySimulationsList",
file=paste("SummaryPerformanceRData.RData", sep="")));
try(setwd(CurrentSmallContainDir));
UnLockMe(verbose=verbose, quoteMore=paste(
quoteMore, " - HitASimulation", sep=""),
LFDir = "SumPerformance", LockIn="SumUnLock");
try(setwd(Oldwd));
if (verbose >= 3) {
AFilePrint(paste(" --- HitASimulation: AllFinish.", sep=""));
}
return(ReturnAHit);
}
TryFillCurrentTotalCompleted <- function() {
MyText <- "
eval(parse(text=GetG0Text(\"CurrentTotalCompleted\", \"globalenv()\", S=1)));
eval(parse(text=GetG0Text(\"ValidSimulations\", \"globalenv()\", S=1)));
try(CurrentTotalCompleted <- MySummarySimulationsList$
GetTotalCompletedFunction());
try(ValidSimulations <- MySummarySimulationsList$ValidSimulations);
eval(parse(text=SetGText(\"CurrentTotalCompleted\", \"globalenv()\", S=1)));
eval(parse(text=SetGText(\"ValidSimulations\", \"globalenv()\", S=1)));
eval(parse(text=SetGText(\"CurrentTotalKilled\", \"globalenv()\", S=1)));
try(CurrentTotalKilled <- MySummarySimulationsList$MyTotalKilledFunctions);
eval(parse(text=SetGText(\"CurrentTotalKilled\", \"globalenv()\", S=1)));
if (!exists(\"TCS\") || is.null(TCS)) {
eval(parse(text=GetG0Text(\"TCS\", \"globalenv()\", S=1)));
}
if (exists(\"TCS\") && !is.null(TCS) && !is.numeric(TCS)) {
try(TCS$CurrentTotalCompleted <- as.integer(CurrentTotalCompleted));
try(TCS$ValidSimulations <- as.integer(ValidSimulations));
}
";
}
TryFillCurrentTotalCompletedSELF <- function() {
MyText <- "
eval(parse(text=GetG0Text(\"CurrentTotalCompleted\", \"globalenv()\", S=1)));
try(CurrentTotalCompleted <-
MySummarySimulationsList$GetTotalCompletedFunction());
eval(parse(text=GetG0Text(\"CurrentTotalKilled\", \"globalenv()\", S=1)));
eval(parse(text=SetGText(\"CurrentTotalCompleted\", \"globalenv()\", S=1)));
try(.self$CurrentTotalCompleted <- as.integer(CurrentTotalCompleted));
eval(parse(text=GetG0Text(\"ValidSimulations\", \"globalenv()\", S=1)));
try(ValidSimulations <-
MySummarySimulationsList$ValidSimulations);
eval(parse(text=SetGText(\"ValidSimulations\", \"globalenv()\",S=1)));
try(CurrentTotalKilled <- MySummarySimulationsList$MyTotalKilledFunctions);
eval(parse(text=SetGText(\"CurrentTotalKilled\", \"globalenv()\", S=1)));
try(.self$ValidSimulations <- as.integer(ValidSimulations));
";
}
TryFillCurrentTotalCompletedTC <- function() {
MyText <- "
eval(parse(text=GetG0Text(\"CurrentTotalCompleted\", \"globalenv()\", S=1)));
eval(parse(text=GetG0Text(\"ValidSimulations\", \"globalenv()\", S=1)));
eval(parse(text=GetG0Text(\"CurrentTotalKilled\", \"globalenv()\", S=1)));
try(CurrentTotalCompleted <- TC);
try(CurrentTotalKilled <- TK);
try(ValidSimulations <- VLD);
eval(parse(text=SetGText(\"CurrentTotalCompleted\", \"globalenv()\", S=1)));
eval(parse(text=SetGText(\"CurrentTotalKilled\", \"globalenv()\", S=1)));
eval(parse(text=SetGText(\"ValidSimulations\", \"globalenv()\", S=1)));
if (!exists(\"TCS\") || is.null(TCS)) {
eval(parse(text=GetG0Text(\"TCS\", \"globalenv()\", S=1)));
}
if (exists(\"TCS\") && !is.null(TCS) && !is.numeric(TCS)) {
try(TCS$CurrentTotalCompleted <- as.integer(CurrentTotalCompleted));
try(TCS$ValidSimulations <- as.integer(ValidSimulations));
}
";
}
|
e0b9a4a89dc678b39ac8a867996e9de3e7e89f16
|
7fdd10a4a052e62efc85023717ae64f8442c9eae
|
/parse.R
|
86cfaa27f898e21487b72f3d38a236297fc01a68
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
jmoggridge/ottawabeaches
|
515d0cddabf7c5df9d07a168d8b6a1d19ec9d0da
|
9d1b4f85d76d6919c90dd896d0bab1bc8e4aabf6
|
refs/heads/master
| 2022-10-05T08:02:57.837431
| 2020-06-04T15:20:10
| 2020-06-04T15:20:10
| 259,743,153
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,573
|
r
|
parse.R
|
library(tidyverse)
library(lubridate)
# library(rcartocolor)
library(zoo)
setwd("~/Dropbox/R coding/ottawa_beaches")
### CSV data:
# geographic data
# beaches count and beach status data
beaches <- read.csv("data/raw_data/beach_coliforms.csv", strip.white=TRUE)
opens <- read.csv("data/raw_data/beach_status.csv", strip.white=TRUE)
# turn into long format tables
beaches <- beaches %>%
pivot_longer(names(beaches)[-1], names_to = "location", values_to = "count")
opens <- opens %>%
pivot_longer(names(opens)[-1], names_to = "location", values_to = "status")
# Merge Ecoli counts and beach status data frames by date
beaches <- merge(beaches, opens, by = c('Date','location'), all=TRUE)
beaches$status <- factor(beaches$status,levels(beaches$status)[c(1,4,3,2)])
levels(beaches$status)[levels(beaches$status) == "NoSwim"] <- "E. coli"
# tidying data, especially dates
beaches$Date <- as.Date(beaches$Date)
beaches$location <- as.factor(beaches$location)
beaches <- beaches %>%
dplyr::mutate(year = lubridate::year(beaches$Date),
month = lubridate::month(beaches$Date),
day = lubridate::day(beaches$Date),
julian = lubridate::yday(beaches$Date))
beaches <-subset(beaches, select = c(Date, julian, year, month, day, location, count, status))
### WEATHER DATA:
# source: eg 2014: https://climate.weather.gc.ca/climate_data/daily_data_e.html?hlyRange=2011-12-14%7C2020-04-22&dlyRange=2011-12-15%7C2020-04-22&mlyRange=%7C&StationID=49568&Prov=ON&urlExtension=_e.html&searchType=stnName&optLimit=yearRange&StartYear=1840&EndYear=2020&selRowPerPage=25&Line=14&searchMethod=contains&txtStationName=ottawa&timeframe=2&Day=22&Year=2014&Month=1#
# click download data csv. wget isn't working on in my terminal for some reason and homebrew...
# read in the weather data from ottawa airport for years 2014-2019
# grabbed 3 missing days from Ottawa CDA station (approximated mean)
weather <- data.frame(read.csv("data/raw_data/weather_data/weather2014.csv", strip.white = TRUE))
weather <- rbind(weather, data.frame(read.csv("data/raw_data/weather_data/weather2015.csv", strip.white = TRUE)))
weather <- rbind(weather, data.frame(read.csv("data/raw_data/weather_data/weather2016.csv", strip.white = TRUE)))
weather <- rbind(weather, data.frame(read.csv("data/raw_data/weather_data/weather2017.csv", strip.white = TRUE)))
weather <- rbind(weather, data.frame(read.csv("data/raw_data/weather_data/weather2018.csv", strip.white = TRUE)))
weather <- rbind(weather, data.frame(read.csv("data/raw_data/weather_data/weather2019.csv", strip.white = TRUE)))
#we can select desired columns from weather before we merge it with beaches df
# weather <- subset(weather, select = c(Date, Max.Temp...C., Mean.Temp...C., Min.Temp...C., Total.Rain..mm., Total.Snow..cm., Total.Precip..mm.,Year))
# change column names to make it less of a pita
# names(weather) <- c('date','Tmax', 'Tmean', 'Tmin', 'rain', 'snow', 'precip', 'year')
weather$Date <- as.Date(weather$Date)
weather <- weather %>%
# dplyr::mutate(Date = lubridate::date(weather$Date))
dplyr::mutate(julian = lubridate::yday(weather$Date))
### SUBSET WEATHER AND TAKE ONLY SUMMER OBSERVATIONS
weather <- subset(weather, select = c(Date, Tmax, Tmean, Tmin, rain))
names(weather) <- c('Date','Tmax','Tmean','Tmin','rain')
weather <- weather %>%
filter(Date %in% beaches$Date)
beaches <- left_join(beaches, weather, by=c("Date","Date"))
write.csv(beaches, 'data/beaches.csv', row.names = FALSE)
# write.csv(weather,'data/weather_full.csv', row.names = FALSE)
rm(list=ls())
|
026a29eca863fc13c1da946d8347b4ad4546de50
|
205d6eeaea09cf2855140f159759373520a04b87
|
/R/pprofile.R
|
fece35dbfecb385035661297ac15ed0c23ff58f5
|
[
"MIT"
] |
permissive
|
vineetrepository/patprofile
|
998d9c3befbc20f484765373c6338770e005cee6
|
214c9838362cbf4374302ac2f78faa323686bb18
|
refs/heads/master
| 2022-04-23T16:53:28.958254
| 2020-04-17T07:58:02
| 2020-04-17T07:58:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 237
|
r
|
pprofile.R
|
#' Patient profile package
#'
#' Patient profile module and submodules for various ADAM datase for plug & play
#' in Novartis Shiny apps.
#'
#' @import shiny
#' @import data.table
#'
#' @name pprofile-pkg
#' @author Renan Sauteraud
NULL
|
72f9bb9276594e6474b9c1cc4bada37c832846f9
|
28c777d15c558f5e09adafa8b513eb58b3911374
|
/R1.3/temp.R
|
9db931e9a9294f3852af6335e784f6f1fbbbf659
|
[] |
no_license
|
nandatascientist/XeevaIDM
|
48aee051500584170f1b79f6326c16111a806010
|
74d7aad9ea08cb03f4a77f2f69eb6a7ac119f8e4
|
refs/heads/master
| 2020-04-09T14:39:18.690464
| 2016-12-01T20:53:46
| 2016-12-01T20:53:46
| 31,968,489
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 308
|
r
|
temp.R
|
testinput<-"[12345678]"
returnL3Value<-0
inputText<-gsub("\\[|\\]","",testinput)
if (nchar(inputText)==8){
if(substr(inputText,5,8)!=c("0000")){
returnL3Value<-as.numeric(
paste0(substr(inputText,1,4),"0000"))
}
}
|
ba69fb4f6768bb680aedf4a4c49ff26804feacdf
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/data.table/examples/duplicated.Rd.R
|
f8f473d1e4bd6eac70c1a78ace04a4a63b09a3fc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,741
|
r
|
duplicated.Rd.R
|
library(data.table)
### Name: duplicated
### Title: Determine Duplicate Rows
### Aliases: duplicated duplicated.data.table unique unique.data.table
### anyDuplicated anyDuplicated.data.table uniqueN
### Keywords: data
### ** Examples
DT <- data.table(A = rep(1:3, each=4), B = rep(1:4, each=3),
C = rep(1:2, 6), key = "A,B")
duplicated(DT)
unique(DT)
duplicated(DT, by="B")
unique(DT, by="B")
duplicated(DT, by=c("A", "C"))
unique(DT, by=c("A", "C"))
DT = data.table(a=c(2L,1L,2L), b=c(1L,2L,1L)) # no key
unique(DT) # rows 1 and 2 (row 3 is a duplicate of row 1)
DT = data.table(a=c(3.142, 4.2, 4.2, 3.142, 1.223, 1.223), b=rep(1,6))
unique(DT) # rows 1,2 and 5
DT = data.table(a=tan(pi*(1/4 + 1:10)), b=rep(1,10)) # example from ?all.equal
length(unique(DT$a)) # 10 strictly unique floating point values
all.equal(DT$a,rep(1,10)) # TRUE, all within tolerance of 1.0
DT[,which.min(a)] # row 10, the strictly smallest floating point value
identical(unique(DT),DT[1]) # TRUE, stable within tolerance
identical(unique(DT),DT[10]) # FALSE
# fromLast=TRUE
DT <- data.table(A = rep(1:3, each=4), B = rep(1:4, each=3),
C = rep(1:2, 6), key = "A,B")
duplicated(DT, by="B", fromLast=TRUE)
unique(DT, by="B", fromLast=TRUE)
# anyDuplicated
anyDuplicated(DT, by=c("A", "B")) # 3L
any(duplicated(DT, by=c("A", "B"))) # TRUE
# uniqueN, unique rows on key columns
uniqueN(DT, by = key(DT))
# uniqueN, unique rows on all columns
uniqueN(DT)
# uniqueN while grouped by "A"
DT[, .(uN=uniqueN(.SD)), by=A]
# uniqueN's na.rm=TRUE
x = sample(c(NA, NaN, runif(3)), 10, TRUE)
uniqueN(x, na.rm = FALSE) # 5, default
uniqueN(x, na.rm=TRUE) # 3
|
7349886fd451c190e512d91a7c24afc751f4bb82
|
3d6ae65b66466ec4ff7d222d70509e2ec3113b18
|
/man/PICrefine.Rd
|
9f21e5c225285fad73a2ee78135fd1e09dc1c326
|
[] |
no_license
|
zmzhang/KPIC
|
efe4d324de288e990ca3f4b1c7c2759981c731aa
|
8a7a61ba898f1e8c5ce273a73099ce2d9457c754
|
refs/heads/master
| 2021-01-20T03:54:11.381311
| 2016-07-26T03:39:01
| 2016-10-24T07:48:54
| 89,603,929
| 2
| 1
| null | 2017-04-27T14:02:07
| 2017-04-27T14:02:07
| null |
UTF-8
|
R
| false
| false
| 559
|
rd
|
PICrefine.Rd
|
\name{PICrefine}
\alias{PICrefine}
\title{
Refine the PICs.
}
\description{
Remove unsatisfactory PICs by calculating the Gaussian similarity, sharpness and SNR
}
\usage{
PICrefine(PICs, n = 1)
}
\arguments{
\item{PICs}{
the result of the extracting through getPIC function
}
\item{n}{
a parameter for calculating the threshold i.e. the lambda referred in the article
}}
\value{
\item{PICs}{the list of all extracted PICs}
\item{Info}{the matrix of the properties of the features}
\item{rt}{the scan time}
\item{scores}{the score of each PIC}
}
|
290c601337ce03e4aa2951ebc68ffc233fab2f14
|
b6e31cfa3c6bb42f2f7f32ab2c49b372149b4924
|
/man/Est_regions.Rd
|
b373d896f234c85938a89b5151def4fd21ed17ef
|
[] |
no_license
|
marcjwilliams1/Alleloscope
|
b2b2020b800d1ad2e63154b130ae2626d0a13e61
|
dbf1b7773c197affedac540b49ea9c4a3a6c0e20
|
refs/heads/main
| 2023-08-07T14:30:27.923798
| 2021-09-27T03:06:33
| 2021-09-27T03:06:33
| 394,618,929
| 1
| 0
| null | 2021-08-10T10:57:18
| 2021-08-10T10:57:17
| null |
UTF-8
|
R
| false
| true
| 1,823
|
rd
|
Est_regions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Est_regions.R
\name{Est_regions}
\alias{Est_regions}
\title{Perform EM iterations on the filtered cells with barcodes, and plot the results for each region.}
\usage{
Est_regions(
Obj_filtered = NULL,
max_nSNP = 30000,
plot_stat = TRUE,
min_cell = 5,
min_snp = 0,
rds_path = NULL,
cont = FALSE,
max_iter = 50,
phases = NULL,
sub_cells = NULL,
sub_region = NULL
)
}
\arguments{
\item{Obj_filtered}{An Alleloscope object with allele and segment information for estimating cell major haplotype proportion (theta_hat) for each region.}
\item{max_nSNP}{Integer. Maximum SNP number used for estimating theta_hat for a region.}
\item{plot_stat}{Logical (TRUE/ FALSE). Whether or not to plot the statistics and EM results for each region.}
\item{min_cell}{Integer. Filter out the cells with reads < min_cells.}
\item{min_snp}{Integer. Filter out the SNPs with reads < min_snp.}
\item{rds_path}{The path for saving the rds files for the estimated results for each region.}
\item{cont}{Logical (TRUE/FALSE). Whether or not to skip the regions with the rds files in the specified rds_path.}
\item{max_iter}{Integer. Maximum numbers of EM iterations.}
\item{phases}{List. The estimated phase indicators (I_j) of each SNP across all regions.}
\item{sub_cells}{A vector of cell names for the cells used to update the phases.}
\item{sub_region}{A region name (in the "chrr" column of the seg_table) for a region where the SNP phases and cell major haplotype proportion are updated.}
}
\value{
A "rds_list" of the estimated SNP phases (I_hat), estimated cell major haplotype proportion (theta_hat) for all regions.
}
\description{
Perform EM iterations on the filtered cells with barcodes, and plot the results for each region.
}
|
0e20c17028a78365fd594ae529fb5df0e8ad51aa
|
19b426c6b55f0bdf799de76d577fe0736549ba6d
|
/man/gc_list_courses_response.Rd
|
8bf4566408d4e4416423753a9789d51ab06cacf8
|
[] |
no_license
|
curso-r/googleclassroom
|
7b9891d37d8b6ad64107b623bccdd01194fdbade
|
60045dc9ca750f3ec917f5879807b937b6988ddc
|
refs/heads/master
| 2022-06-22T16:09:17.051625
| 2020-05-06T22:50:54
| 2020-05-06T22:50:54
| 260,591,170
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 576
|
rd
|
gc_list_courses_response.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classroom_objects.R
\name{gc_list_courses_response}
\alias{gc_list_courses_response}
\title{ListCoursesResponse Object}
\usage{
gc_list_courses_response(nextPageToken = NULL, courses = NULL)
}
\arguments{
\item{nextPageToken}{Token identifying the next page of results to return}
\item{courses}{Courses that match the list request}
}
\value{
ListCoursesResponse object
}
\description{
ListCoursesResponse Object
}
\details{
Response when listing courses.
}
\concept{ListCoursesResponse functions}
|
ab90ab6eeb51792319935b6d069cfad42d61164a
|
26c37f1bdbcb50846dfa1473accf3853152ed319
|
/NaiveBayes.R
|
80e0af13a0b03ee10b82c16380758c17dd8851c0
|
[] |
no_license
|
sskarkhanis/KaggleGrantsPredictionR
|
0da2fd0aa31056e88116ed8cf0d76f042ce931de
|
03e049502e49bdaa8e69b2f2e975c2bb27c2000b
|
refs/heads/master
| 2021-01-01T16:51:31.899671
| 2014-09-04T18:11:04
| 2014-09-04T18:11:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 593
|
r
|
NaiveBayes.R
|
library(e1071)
library(pROC)
N<-5 # 5-fold
CV<-generateCV(training_grants,N) # generate the CV-data
results<-list()
auc<-rep(0,N)
# run the crossvalidation
for(cv in seq(N))
{
#train the model
bayes <- naiveBayes(Grant.Status ~ Contract.Value.Group+Age, data = CV[[cv]]$training)
#predict on unseen data
p<-predict(bayes, CV[[cv]]$test[,list(Contract.Value.Group,Age)], type = "raw")
#generate roc-data
r<-roc(CV[[cv]]$test$Grant.Status, p[,2],levels=c("0", "1"))
#store the results of the run in a list
results[[cv]]<-list(model=bayes,prediction=p,roc=r)
auc[cv]<-r$auc
}
|
587d355c842c480b84ab2029287be315e0ae0c09
|
e526a751fb3419c44a0d257c1d79d7844ca9bcd4
|
/Rcode/arules_analysis.R
|
8d94c11838fdb427e00c849d7ea36f641231c691
|
[] |
no_license
|
PeaWagon/cse780-project
|
eabd650ed3d2e2a0e8854e998696a5b2c7b15c6e
|
65c22cfc1a1dc06e510f6416c28813c61a7e9285
|
refs/heads/master
| 2020-05-04T09:40:37.228802
| 2019-04-02T12:51:41
| 2019-04-02T12:51:41
| 179,073,412
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,633
|
r
|
arules_analysis.R
|
# association rules analysis
library(arules)
source("std_lift.R") # professor's code to calculate
# standardised lift
infile <- "../data_processing/drug_data_trimmed_text_len.csv"
d <- read.csv(infile)
# we can consider:
# drugName (1), conditon (2), rating (4), revLenDesc(9)
x <- d[, c(1,2,4,9)]
# put the rating in two categories: >= 7 and < 7
# sets rating variable to 0 for values below 7
x$rating[x$rating<7] <- 0
# sets rating variable to 1 for values above or equal to 7
x$rating[x$rating>0] <- 1
# make the option 0 or 1
x$rating <- as.factor(x$rating)
# minlen is number of parameters to consider
# maxlen is how many parameters there are for the rules
# if no rules are generated, reduce support and/or
# confidence
params <- list(support=0.01, confidence=0.8, minlen=2,
maxlen=4)
# set the right-hand side or the left-hand side
# to see what rules there are (a)
app <- list(rhs=c("rating=0", "rating=1"),
default="lhs")
fit <- apriori(x, parameter=params, appearance=app)
qual <- quality(fit)
inspect(sort(fit, by = "lift"))
fit2 <- fit
quality(fit2) <- std_lift(fit2, x)
inspect(sort(fit2, by="slift"))
# see where drugs are poorly rated (b)
params <- list(support=0.005, confidence=0.6, minlen=2,
maxlen=4)
app <- list(rhs=c("rating=0"), default="lhs")
fit <- apriori(x, parameter=params, appearance=app)
qual <- quality(fit)
inspect(sort(fit, by = "lift"))
fit2 <- fit
quality(fit2) <- std_lift(fit2, x)
inspect(sort(fit2, by="slift"))
# see if length of text is a rule for anything (c)
params <- list(support=0.0001, confidence=0.7,
minlen=2, maxlen=4)
app <- list(rhs=c("revLenDesc=v.long",
"revLenDesc=long",
"revLenDesc=medium",
"revLenDesc=short",
"revLenDesc=v.short"), default="lhs")
fit <- apriori(x, parameter=params, appearance=app)
qual <- quality(fit)
inspect(sort(fit, by = "lift"))
fit2 <- fit
quality(fit2) <- std_lift(fit2, x)
inspect(sort(fit2, by="slift"))
# remove rhs restrictions (d)
params <- list(support=0.01, confidence=0.7, minlen=2,
maxlen=4)
fit <- apriori(x, parameter=params)
qual <- quality(fit)
inspect(sort(fit, by = "lift"))
fit2 <- fit
quality(fit2) <- std_lift(fit2, x)
inspect(sort(fit2, by="slift"))
# remove rhs restrictions again (e)
params <- list(support=0.0075, confidence=0.75,
minlen=2, maxlen=4)
fit <- apriori(x, parameter=params)
qual <- quality(fit)
inspect(sort(fit, by = "lift"))
fit2 <- fit
quality(fit2) <- std_lift(fit2, x)
inspect(sort(fit2, by="slift"))
|
579b636fd43df1f4c1563a48ebae0129ecd81575
|
dc60c8532ebe40eb2bdb6342e0d5896078523b48
|
/code/004_outlines_inspect_harmonics.r
|
0c0813a96bf8257d9111faad9808c7e83a75a454
|
[] |
no_license
|
benmarwick/marwick-and-maloney-saa2014
|
9f9f4be835b28c9b4101f4f8b771b645f1db3d53
|
fe0df12fecec9c3511e21b4e8882bbf1e46f6f8b
|
refs/heads/master
| 2021-01-22T04:49:30.378010
| 2014-04-23T09:01:54
| 2014-04-23T09:01:54
| 18,430,545
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,497
|
r
|
004_outlines_inspect_harmonics.r
|
# determine how many harmonics to use in
# the elliptical fourier analysis
# now just working through http://www.vincentbonhomme.fr/Momocs/vignettes/A-graphical-introduction-to-Momocs.pdf
nb.h = 50 # over estimate harmonics to see what minimum number is ok...
# one method...
# windows() # pops up a new window to hold plots
# set up a grid of plots...
# par(mfrow=c(ceiling(length(fac$nms)/2),ceiling(length(fac$nms)/2)-1))
# they may not all fit, if so, skip on to the next approach...
# lapply(coords_center@coo, function(i) hpow(Coo(i), nb.h = nb.h))
# dev.off() # may need to repeat this a few times to free up the graphics device
# another approach... watch the colours flash by...
# lapply(coords_center@coo, function(i) hqual(Coo(i), harm.range = seq(1, nb.h, 10), plot.method = c("panel")[1]))
# lapply(coords_center@coo, function(i) hqual(Coo(i), harm.range = seq(1, nb.h, 10), plot.method = c("stack")[1]))
# yet another approach, probably the best one since it's more objective and repeatable
# hpow(coords_scale, nb.h = nb.h,
# title="eFourier with extrema and mean dev.")
hpow(coords_scale, probs=c(0.25, 0.5, 0.75), drop=FALSE, legend=TRUE, nb.h = nb.h,
title="eFourier three quartiles")
# hpow(coords_scale, method="rfourier",
# title="rFourier")
# hpow(coords_scale, method="tfourier",
# title="tFourier")
# inspect harmonics plots and output to see minimum number needed to get 0.9999 of total
# harmonic power.
nb.h = 30 # after looking at the plots
|
66b981749023206eb876649902f8b4ed7163c0b2
|
086208ed314b1b2398a80d149a09b42d4da35c1c
|
/Projects/HCM_SCS_2021_05_TEICHMANN_Convert_litvinukova_file_pt2_Global_HPC.R
|
827f708e44706609227f365524b7db48d72458e2
|
[] |
no_license
|
vanrooij-lab/scRNAseq-HCM-human
|
9505497e055093a5a6b8d0feb07186c8ac2ed405
|
1b27e55e1762a69ef8faef6f0dcf30e108e31cb7
|
refs/heads/master
| 2023-08-14T13:44:48.798735
| 2023-07-21T12:19:29
| 2023-07-21T12:19:29
| 366,691,443
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,785
|
r
|
HCM_SCS_2021_05_TEICHMANN_Convert_litvinukova_file_pt2_Global_HPC.R
|
# This is an edited copy of
# HCM_SCS_2021_05_TEICHMANN_Convert_litvinukova_file_HPC.R
#
# It deals with converting other files from the Litviňuková data set to
# data files I can read.
#
# m.wehrens@hubrecht.eu
# At HPC, e.g.:
# srun --nodes=1 -c 1 --mem=50G --time=2:00:00 --pty bash -i
# R
# etc ..
################################################################################
# libs
library(Seurat)
library(SeuratDisk)
library(SeuratData)
library(ggplot2)
library(umap)
library(Rtsne)
library(RColorBrewer)
myPalette <- colorRampPalette(rev(brewer.pal(11, "Spectral")))
################################################################################
# commands performed at the HPC
# First convert to h5seurat
mysource='/hpc/hub_oudenaarden/mwehrens/data/Teichmann/Litvinukova_global_raw.h5ad'
Convert(
mysource,
dest = "h5seurat",
assay = "RNA",
overwrite = FALSE,
verbose = TRUE
)
# Then can be further converted
# First load it:
# https://mojaveazure.github.io/seurat-disk/articles/h5Seurat-load.html
# Can be viewed using Connect without loading into memory
Teichmann_all = SeuratDisk::Connect('/hpc/hub_oudenaarden/mwehrens/data/Teichmann/Litvinukova_global_raw.h5seurat')
Teichmann_all
Teichmann_all$index()
Teichmann_all$close_all()
################################################################################
# OK we're done.
################################################################################
# We can test it:
Teichmann_all <- LoadH5Seurat('/hpc/hub_oudenaarden/mwehrens/data/Teichmann/Litvinukova_global_raw.h5seurat')
Teichmann_all
# > object_size(Teichmann_all)
# 5.02 GB
################################################################################
# The rest of the code is old code I used to play around with data;
# but not part of final analysis
################################################################################
# Playing around a bit more
# To access the raw data:
Teichmann_all@assays$RNA[1:100,1:100]
any(grepl('septum',rownames(Teichmann_all@assays$RNA)))
sum(grepl('septum',rownames(Teichmann_all@assays$RNA)))
# There appear to be 15K septal cells in this data
> sum(grepl('septum',colnames(Teichmann_all@assays$RNA)))
# [1] 15710
> dim(Teichmann_all@assays$RNA)
#[1] 33538 125289
# Let's see if we can create a somewhat smaller sample of the cells
# to play around with later
# sample(x=dim(Teichmann_all@assays$RNA)[2],size=4000)
some_sampled_colnames=colnames(Teichmann_all@assays$RNA)[sample(x=dim(Teichmann_all@assays$RNA)[2],size=4000)]
sum(grepl('septum',some_sampled_colnames))
# 532
# rsync -avh --progress gw2hpct01:/hpc/hub_oudenaarden/mwehrens/data/Teichmann/Teichmann_subset.Rdata /Volumes/workdrive_m.wehrens_hubrecht/data/2020_12_Litvinukova2020/HPC_sync
|
d1ca7da50daddf11af23eb88a1ea94bd47f0dc70
|
663aca06846516b68ec31bf53027604e2b41516c
|
/aveALL_process.R
|
db6e7bd59dff898e4118c9a122947b1d0d0f60ab
|
[] |
no_license
|
LiuyangJLU/WGCNA_bestFS
|
b3dc2c6bdbe45e94426e26c88efd8749e7dc620b
|
d0277025530a45e231cf98e7becef28da6e09a85
|
refs/heads/master
| 2020-03-07T00:28:21.438443
| 2018-06-13T01:57:33
| 2018-06-13T01:57:33
| 127,158,070
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,458
|
r
|
aveALL_process.R
|
#原始脑部数据的处理
#step1:将原始的txt文件写入csv文件,打开文件,删除不必要的行和列
#step2:将数据框的第一列作为列名
#step3:删除第一列
#step4:mode()查看数据类型,如果是list,转换我Numeric()
#eset<-apply(eset,1,function(x){as.numeric(x)})
#使相应的行名等于列名
CRBL_rawset = read.table("E:/R_Code/tissue/BrainTissue/expr_CRBL.txt")
CRBL_csv = write.csv(CRBL_rawset,file="E:/R_Code/tissue/BrainTissue/expr_CRBL.csv")
FCTX_rawset = read.table("E:/R_Code/tissue/BrainTissue/expr_aveALL.txt")
FCTX_csv = write.csv(FCTX_rawset,file="E:/R_Code/tissue/BrainTissue/expr_FCTX.csv")
WHMT_rawset = read.table("E:/R_Code/tissue/BrainTissue/expr_WHMT.txt")
WHMT_csv = write.csv(WHMT_rawset,file="E:/R_Code/tissue/BrainTissue/expr_WHMT.csv")
MEDU_rawset = read.table("E:/R_Code/tissue/BrainTissue/expr_MEDU.txt")
MEDU_csv = write.csv(MEDU_rawset,file="E:/R_Code/tissue/BrainTissue/expr_MEDU.csv")
HIPP_rawset = read.table("E:/R_Code/tissue/BrainTissue/expr_HIPP.txt")
HIPP_csv = write.csv(HIPP_rawset,file="E:/R_Code/tissue/BrainTissue/expr_HIPP.csv")
OCTX_rawset = read.table("E:/R_Code/tissue/BrainTissue/expr_OCTX.txt")
OCTX_csv = write.csv(OCTX_rawset,file="E:/R_Code/tissue/BrainTissue/expr_OCTX.csv")
PUTM_rawset = read.table("E:/R_Code/tissue/BrainTissue/expr_PUTM.txt")
PUTM_csv = write.csv(PUTM_rawset,file="E:/R_Code/tissue/BrainTissue/expr_PUTM.csv")
SNIG_rawset = read.table("E:/R_Code/tissue/BrainTissue/expr_SNIG.txt")
SNIG_csv = write.csv(SNIG_rawset,file="E:/R_Code/tissue/BrainTissue/expr_SNIG.csv")
TCTX_rawset = read.table("E:/R_Code/tissue/BrainTissue/expr_TCTX.txt")
TCTX_csv = write.csv(TCTX_rawset,file="E:/R_Code/tissue/BrainTissue/expr_TCTX.csv")
THAL_rawset = read.table("E:/R_Code/tissue/BrainTissue/expr_THAL.txt")
THAL_csv = write.csv(THAL_rawset,file="E:/R_Code/tissue/BrainTissue/expr_THAL.csv")
#统一读取表中txt文件,转换为csv文件并统一写入文件夹中
#step1: 首先,函数形参传入原始数据
#原始数据
file1 = "E:/R_Code/tissue/BrainTissue/expr_CRBL.txt"
file2 = "E:/R_Code/tissue/BrainTissue/expr_WHMT.txt"
file3 = "E:/R_Code/tissue/BrainTissue/expr_FCTX.txt"
file4 = "E:/R_Code/tissue/BrainTissue/expr_HIPP.txt"
file5 = "E:/R_Code/tissue/BrainTissue/expr_MEDU.txt"
file6 = "E:/R_Code/tissue/BrainTissue/expr_OCTX.txt"
file7 = "E:/R_Code/tissue/BrainTissue/expr_PUTM.txt"
file8 = "E:/R_Code/tissue/BrainTissue/expr_SNIG.txt"
file9 = "E:/R_Code/tissue/BrainTissue/expr_TCTX.txt"
file10 = "E:/R_Code/tissue/BrainTissue/expr_THAL.txt"
str1 ="CRBL"
str2 ="WHMT"
str3 = "FCTX"
str4 = "HIPP"
str5 = "MEDU"
str6 = "OCTX"
str7 = "PUTM"
str8 = "SNIG"
str9 = "TCTX"
str10 = "THAL"
Data_progress<-function(file,str)
{
writer = read.table(paste(file,1:10,seq="_",str,1:10))
filename<-as.character(x = )
for(i in 10)
{
writer = read.table(rawset,file="E:\R_code\tissue\expr_'i'.txt")
}
filename<-str()
eset = write.csv(rawset,file="E:\R_Code\tissue\expr_'filename'.csv")
}
#DataProcess
#eset = read.table("E:/R_Code/tissue/expr_aveALL.txt")
P<-function(rawset)
{
rownames(rawset)<-rawset[,1]
rawset<-rawset[,-1]
eset<-rawset
eset<-apply(eset,1,function(x){as.numeric(x)})
rownames(eset)<-colnames(rawset)
return(eset)
}
Rawset = read.csv("E:/R_Code/tissue/rawset.csv")
dim(rawset)#查看行和列
head(rawset[1:4,1:4])#查看前4行和4列
rownames(rawset)<-rawset[,1]#将第一列赋值给行名
rawset<-rawset[,-1]#删除多余的第一列
eset<-rawset#重新赋值eset
eset<-apply(eset,1,function(x){as.numeric(x)})#将数据类型转换为数字类型
mode(eset)
rownames(eset)<-colnames(rawset)
#Coefficient of Variation
eset1 <- eset[,order(apply(eset,2,mad), decreasing = T)[1:4000]]
eset<-eset1
library(WGCNA)
#detect sampleOutlier
sampleTree<-hclust(dist(eset),method="average")
sizeGrWindow(12,9)
par(cex=0.6)
par(mar=c(0,4,2,0))
plot(sampleTree,main="Sample clustering to detect outliers",sub="",xlab="",cex.lab=1.5,cex.axis=1.5,cex.main=2)
#Network Construction
softPower =12
adjacency = adjacency(eset,power = softPower)
TOM_P<-TOMsimilarity(adjacency)
dissTOM<-1-TOM_P
geneTree<-hclust(as.dist(dissTOM),method="average")
plot(geneTree,xlab="",sub="",main = "Gene clustering TOM-based dissimilarity",labels=FALSE,hang=0.04)
#这里的最小的聚类数量依赖于基因数
dynamicMods = cutreeDynamic(dendro = geneTree,distM = dissTOM,minClusterSize = 30)
dynamicColors = labels2colors(dynamicMods)
#module eigengenes defined as the first principal component of the expression of the gene within the module
MElist <- moduleEigengenes(eset,colors = dynamicColors)
MEs<-MElist$eigengenes
MEdiss<-1-cor(MEs)
METree<-hclust(as.dist(MEdiss),method = "average")
#Graphical the result
sizeGrWindow(7,6)
plot(METree,main="Clustering of module eigengenes(Normal)")
MEDissThres = 0.1
abline(h=MEDissThres,col="red")
#合并模块的目的是看主要的模块之间的差异
merge<-mergeCloseModules(eset,dynamicColors,cutHeight = MEDissThres)
#The merged module colors
mergeColors = merge$
cellToExps<-functio
moduleDetect<-function(eset,dissTOM)
{
geneTree<-hclust(as.dist(dissTOM),method="average")
plot(geneTree,xlab="",sub="",main = "Gene clustering TOM-based dissimilarity",labels=FASLE,hang=0.04)
dynamicMods = cutreeDynamic(dendro = geneTree,distM = dissTOM,minClusterSize = 30)
dynamicColors = labels2colors(dynamicMods)
}
|
82a4e44c0391e5aa6439f987abf0abc8ded2625d
|
056a9d1bdf6fc1090c87b013d2ab0953de5cbd29
|
/Rcrime.R
|
4a589e49a1b05b09adfe6f3e2d251f49b218a286
|
[] |
no_license
|
virginiasaulnier/HW1_412
|
04cf10ae2d7d996763b96ec5776d12742d3437fb
|
f146a5173fb101a4f6a5e339393eb562391dcfe8
|
refs/heads/master
| 2021-01-18T18:42:33.659116
| 2016-06-22T18:04:50
| 2016-06-22T18:04:50
| 61,148,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,725
|
r
|
Rcrime.R
|
library(dplyr)
#create a function to read in files, also has ability to skip extra header line, uses headers as column titles and returns data
readin <- function(wd,file,skip){
setwd(wd)
dataframe = readLines(file)
if (skip == "TRUE")
{
dataframe = dataframe[-1]
}
data = read.csv(textConnection(dataframe),header=T, fill = TRUE )
return(data)
}
alleylights <- readin('/Users/VirginiaSaulnier/Desktop/412_hw1','311_Service_Requests_-_Alley_Lights_Out.csv',TRUE)#read in alley lights file using funtion readin
crime <- readin('/Users/VirginiaSaulnier/Desktop/412_hw1','Crimes2016.csv',FALSE)#read in crime info using funtion readin
population <- readin('/Users/VirginiaSaulnier/Desktop/412_hw1','District_Population.csv',FALSE)#read in population data using function readin
colnames(alleylights) <- c("Request","Status","Completion", "Number","Type", "Address", "Zip", "1","2","Ward","District","Community", "Latitude","Longitude","Location")
alley16 <- alleylights[grep("2016", alleylights$Request), ]#filter out 2016
alley16$date_diff <- as.Date(as.character(alley16$Completion),format="%m/%d/%y")-
as.Date(as.character(alley16$Request), format="%m/%d/%y")#get date diff between date service was requested and date completed create new column to store
service_ave = alley16 %>%#create new database(service_ave)with repair time average by district
group_by(District) %>%
summarise(repairtime = round(mean(date_diff, na.rm=TRUE),digits = 1)) %>%
arrange(District)
group_by(alley16, District)
crime_counts = crime %>% #create new database(crime_counts) using the number of crimes per district, calculated by counting the instance of crimes(rows) per district
group_by(District) %>%
summarise(crimetotal=length(Primary.Type)) %>%
arrange(District)
results = full_join(crime_counts, service_ave, by = "District")%>% #combine crime and service data sets and arrage by crime total
arrange(crimetotal)
results = full_join(results, population, by = "District")%>% #combine population data sets and arrage by crime total
arrange(crimetotal)
#results$crimerate <- as.numeric(results$crimetotal,na.rm=TRUE)/as.numeric(results$population,na.rm=TRUE)
results$crimerate <- results$crimetotal/results$Population * 100
plot(results$repairtime, results$crimerate)
#mod1 <- lm(y ~ x)
fit <- lm(results$repairtime ~ results$crimerate)
abline(fit)
#summary(fit)
#abline(lm(height ~ bodymass))
#below is the start of print out info, will add if I have time
#dat$MAX <- apply(results[,-1],1,max,na.rm=TRUE)
#cat("The district with the slowest repair time is the number of crimes this year was")
#cat("The district with the quickest repair time is the number of crimes this year was",dat$MAX)
|
624b5cb08bde47ced4a6b2a1f70eedc2ecaa1c38
|
610ba9e0f2469188ce36a0471baeb453a9c031dd
|
/app.R
|
235d108db46566f2b2834b623ec4659df729a755
|
[] |
no_license
|
dicicch/DS220Group10
|
57ae11ab00e550b4e757714497cd580a5a7ed5f9
|
f789aff1c899bde50e2af308b5c453668d97805a
|
refs/heads/master
| 2020-05-16T13:21:30.691174
| 2019-04-23T20:57:09
| 2019-04-23T20:57:09
| 183,071,430
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,961
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# Query Structure Reference
#
# query <- '** query **' %>%
# call_neo4j(query, con,
# type = c("row", "graph"),
# output = c("r","json"),
# include_stats = FALSE,
# include_meta = FALSE)
#
## Then, one of these three depending on what *query* returns:
#
# unnest_nodes(nodes_tbl, what = c("all", "label", "properties")) # for node table
# unnest_relationships(relationships_tbl) # for relationship table
# unnest_graph(res) # for api graph result
library(shiny)
library(neo4r)
library(ggplot2)
monthStart <- function(x) {
x <- as.POSIXlt(x)
x$mday <- 1
as.Date(x)
}
# ========Queries=========
return.medals.age = "with (n.medal) as `Tier`, size(collect(n.ID)) as `Medals`
return Tier, Medals"
medals.1 = "Match(n:olympics) where 10<=toInteger(n.age)<=39"
medals.2 = "Match(n:olympics) where 40<=toInteger(n.age)<=68"
medals.3 = "Match(n:olympics) where 69<=toInteger(n.age)<=97"
champ.g = "match(m:olympics) where m.medal ='Gold' and m.name <> 'null' return count(m) as medalCount, m.ID, m.name, m.medal order by medalCount DESC limit 10"
champ.s = "match(m:olympics) where m.medal ='Silver' and m.name <> 'null' return count(m) as medalCount, m.ID, m.name, m.medal order by medalCount DESC limit 10"
champ.b = "match(m:olympics) where m.medal ='Bronze' and m.name <> 'null' return count(m) as medalCount, m.ID, m.name, m.medal order by medalCount DESC limit 10"
sports = "Match(o:olympics) return count(o) as mostAth, o.sports order by mostAth DESC"
yearclause = "<=toInteger(n.year)<="
seasonclause = "n.season ="
# ========================
# Neo4R Connection Object
con <- neo4j_api$new(url = "100.25.118.163:33648",
user = "neo4j",
password = "stare-pea-explosion") # Password is treated leniently as the sandbox will expire in a few days.
# Load data from CSV only if unimported
# Capitalize the attributes you need!
if (dim(con$get_labels())[1] == 0) {
# Stop app if data not found
if (!file.exists("./dt/athlete_events.csv")) {stopApp("Data not found by R: Could not find ./dt/athlete_events.csv")}
else{
load_csv(on_load="create (a1: olympics {ID: olympics.ID, name: olympics.Name, sex: olympics.sex, age: olympics.Age, heights: olympics.heights, weights: olympics.weights, team: olympics.team, NOC: olympics.NOC, games: olympics.games, year: olympics.Year, season: olympics.Season, city: olympics.city, sports: olympics.Sport, event: olympics.event, medal: olympics.Medal });",
con=con,
url="https://docs.google.com/spreadsheets/d/1-wGtrPbwIMGfwdlyDrcSXeEH0p6n2dEcObCgfl_FiDc/export?format=csv&id=1-wGtrPbwIMGfwdlyDrcSXeEH0p6n2dEcObCgfl_FiDc&gid=1049694386",
as="olympics",
output = "r"
)
}
}
# Define UI for application
ui <- fluidPage(
titlePanel("Olympic History Statistics"),
h2("Outline:"),
navlistPanel(widths = c(12,12),
"Information",
tabPanel("Home",
h1("Data Summarizations from 1896-2016", align = "center"),
h3("DS220-002 Lee, Group 10", align = "center"),
h4("Dataset provided by Kaggle user heesoo37", align = "center"),
h4(as.character(Sys.Date()), align = "center"),
HTML('<p style="text-align:center;"><img src="olympic_rings.png" width="640" height="720" align="middle"/></p>')
),
tabPanel("Background",
h2("Hello! Thank you for checking out our Shiny Web App."),
h3("This was created as a school project by Matt Bubb, Xincheng Zhou, Muthu Nagesh, Shao Hui Lee and Hunter DiCicco."),
br(),
"We think that the Olympics is something that everyone, no matter where you are from, can connect with due to the extreme number of countries that are represented at each Olympics. We hope that these summarizations prove as interesting and thoughtful for the viewer as they were for us."
),
"Summarization Tools",
tabPanel("Age Range Performance",
titlePanel("Age Range Performance by Year/Season"),
"Breaks down how each age range performed in a given timespan for the summer or winter Olympics.",
br(),
dateRangeInput('dateRange',
label = "Selected Year: ",
format = "yyyy",
language="en",
start = Sys.Date() - 365 * 10,
end = Sys.Date(),
startview = "year"
),
fluidRow(
column(width = 8,
selectInput("checkGroup_Age",
width = '20%',
label = "Selected Age Range(s)",
choices = list("10-39" = 1, "40-68" = 2, "69-97" = 3)
),
selectInput("select_Season",
width = '40%',
label = "Selected Olympic Season",
choices = list("Summer" = "Summer", "Winter" = "Winter"),
selected = 1
),
textOutput("SliderText1"),
textOutput("SliderText2"),
textOutput("SliderText3")
),
column(width = 12,
tableOutput("age")
)
)
),
tabPanel("All-Time Champions",
titlePanel("All-Time Champion Atheletes"),
h3("Who has the most medals?"),
selectInput("select_Medal",
width = '40%',
label = "Selected Medal Tier",
choices = list("Bronze" = "Bronze", "Silver" = "Silver", "Gold" = "Gold"),
selected = "Bronze"
),
textOutput("SliderText4"),
tableOutput("champ")
),
tabPanel("Event Popularity",
titlePanel("Most Popular Events Throughout History"),
tableOutput("sports")
)
)
)
# Define required server logic
server <- function(input, output) {
Dates <- reactiveValues()
observe({
Dates$SelectedDates <- c(
as.character(
format(input$dateRange[1],
format = "%Y")
),
as.character(
format(input$dateRange[2],
format = "%Y")
)
)
})
Ranges = reactiveValues()
observe({
Ranges$SelectedRanges = c(as.character(input$checkGroup_Age))
})
Seasons = reactiveValues()
observe({
Seasons$SelectedSeasons = c(as.character(input$select_Season))
})
age1 = renderTable({
setNames(as.data.frame(
call_neo4j(con=con,
query = paste(medals.1, " and ", Dates$SelectedDates[1], yearclause, Dates$SelectedDates[2], " and ", seasonclause, "'", Seasons$SelectedSeasons, "' ", return.medals.age, sep="")
)
), c("Tier", "Medals"))
})
age2 = renderTable({
setNames(as.data.frame(
call_neo4j(con=con,
query = paste(medals.2, " and ", Dates$SelectedDates[1], yearclause, Dates$SelectedDates[2], " and ", seasonclause, "'", Seasons$SelectedSeasons, "' ", return.medals.age, sep="")
)
), c("Tier", "Medals"))
})
age3 = renderTable({
setNames(as.data.frame(
call_neo4j(con=con,
query = paste(medals.3, " and ", Dates$SelectedDates[1], yearclause, Dates$SelectedDates[2], " and ", seasonclause, "'", Seasons$SelectedSeasons, "' ", return.medals.age, sep="")
)
), c("Tier", "Medals"))
})
observe({
if (1 %in% Ranges$SelectedRanges) {output$age = age1}
else if (2 %in% Ranges$SelectedRanges) {output$age = age2}
else if (3 %in% Ranges$SelectedRanges) {output$age = age3}
})
Medal = reactiveValues()
observe({
Medal$SelectedMedals = input$select_Medal
})
medal1 = renderTable({
setNames(as.data.frame(
call_neo4j(con=con,
query = champ.b
)
), c("Medal Count", "ID", "Name", "Medal"))
})
medal2 = renderTable({
setNames(as.data.frame(
call_neo4j(con=con,
query = champ.s
)
), c("Medal Count", "ID", "Name", "Medal"))
})
medal3 = renderTable({
setNames(as.data.frame(
call_neo4j(con=con,
query = champ.g
)
), c("Medal Count", "ID", "Name", "Medal"))
})
observe({
if ("Bronze" %in% Medal$SelectedMedals) {output$champ = medal1}
else if ("Silver" %in% Medal$SelectedMedals) {output$champ = medal2}
else if ("Gold" %in% Medal$SelectedMedals) {output$champ = medal3}
})
output$sports = renderTable({
setNames(as.data.frame(
call_neo4j(con=con,
query = sports
)
), c("Number of Athletes", "Event"))
})
output$SliderText1 <- renderText({paste("Years Selected: ", paste(Dates$SelectedDates, collapse = " - "))})
output$SliderText2 <- renderText({paste("Age Ranges Selected: ", paste(Ranges$SelectedRanges, collapse = ", "))})
output$SliderText3 <- renderText({paste("Seasons Selected: ", Seasons$SelectedSeasons)})
output$SliderText4 = renderText({paste(Medal$SelectedMedals, " Champions Top 10:", sep="")})
}
# Run the application
shinyApp(ui = ui, server = server)
|
92fce769855762c3cdf91bc5d343c8cfaa8b5f31
|
6859b2f6070e639040b89016bf50c72813ae8a1e
|
/Code/untreaties_download.R
|
3da0d3d46b8eda78b267458e2d91d528910407a3
|
[] |
no_license
|
kfruge/ehrfruhag_accesspoints
|
0ca17f12123e5dc4a60ab99101ca594a0308482f
|
36842e2c0ccc6d933c7cea8f2909f75f42c2e63b
|
refs/heads/master
| 2020-03-20T08:15:25.489038
| 2018-06-14T07:05:33
| 2018-06-14T07:05:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,384
|
r
|
untreaties_download.R
|
# Author: Kimberly R. Frugé
# File: untreaites_download.R
# Date: 14 June 2018
# Purpose- Download - this file downloads the data from the untreaties repository by Zachary M. Jones (https://github.com/zmjones/untreaties). # Results: This file creates untreaties.csv
setwd("~/Dropbox/ehlfruhag_accesspoints/Data")
library(countrycode); library(assertthat);library(RCurl)
#--------------------------------------------------------------------------------------------------------------------------------------------------#
### Read in "untreaties" repository necessary files ###
#The untreaties is using an older version of stringr so make sure to have that installed, otherwise will throw an error for regex
#install_version("stringr", version = "0.6.1", repos = "http://cran.us.r-project.org")
#script for utilities.R
script <- getURL("https://raw.githubusercontent.com/opetchey/RREEBES/Beninca_developmehttps://raw.githubusercontent.com/zmjones/untreaties/master/utilities.R", ssl.verifypeer = FALSE)
#source script
eval(parse(text = script))
#download treaties of interest (4-3,4-4,4-9) list (https://github.com/zmjones/untreaties/blob/master/index.csv)
treatycescr<- read.csv(text=getURL("https://raw.githubusercontent.com/zmjones/untreaties/master/treaties/4-3.csv"))
treatyccpr<- read.csv(text=getURL("https://raw.githubusercontent.com/zmjones/untreaties/master/treaties/4-4.csv"))
treatycat<- read.csv(text=getURL("https://raw.githubusercontent.com/zmjones/untreaties/master/treaties/4-9.csv"))
#create treaties folder if it doesn't exist
if(!dir.exists("./treaties"))
dir.create("./treaties")
#save to treaties folder so that utilities.R can read files
write.csv(treatycescr, "./treaties/4-3.csv", row.names=FALSE)
write.csv(treatyccpr, "./treaties/4-4.csv", row.names=FALSE)
write.csv(treatycat, "./treaties/4-9.csv", row.names=FALSE)
#load data
cescr<- loadData(chap="4", treaty="3", expand=TRUE, panel=TRUE, syear="1945", eyear="2013")
ccpr<- loadData(chap="4", treaty="4", expand=TRUE, panel=TRUE, syear="1945", eyear="2013")
cat <- loadData(chap="4", treaty="9", expand=TRUE, panel=TRUE, syear="1945", eyear="2013")
#--------------------------------------------------------------------------------------------------------------------------------------------------#
### Create ratify variable ###
colnames(cescr) <- c("participant", "year", "signature", "ratification", "accession", "succession")
cescr$cescr_ratify <- ifelse(cescr$ratification == 1 | cescr$accession == 1 | cescr$succession == 1, 1, 0)
cescr<- cescr[, c("participant", "year", "cescr_ratify")]
colnames(ccpr) <- c("participant", "year", "signature", "ratification", "accession", "succession")
ccpr$ccpr_ratify <- ifelse(ccpr$ratification == 1 | ccpr$accession == 1 | ccpr$succession == 1, 1, 0)
ccpr<- ccpr[, c("participant", "year", "ccpr_ratify")]
colnames(cat) <- c("participant", "year", "signature", "ratification", "accession", "succession")
cat$cat_ratify <- ifelse(cat$ratification == 1 | cat$accession == 1 | cat$succession == 1, 1, 0)
cat<- cat[, c("participant", "year", "cat_ratify")]
#--------------------------------------------------------------------------------------------------------------------------------------------------#
### Merge ###
dat<- merge(cescr, ccpr, all.x=TRUE, all.y=TRUE, by=c("participant", "year"))
dat<- merge(dat, cat, all.x=TRUE, all.y=TRUE, by=c("participant", "year"))
#--------------------------------------------------------------------------------------------------------------------------------------------------#
### Fill in blanks ###
dat$cescr_ratify[is.na(dat$cescr_ratify)] <- 0
dat$ccpr_ratify[is.na(dat$ccpr_ratify)] <- 0
dat$cat_ratify[is.na(dat$cat_ratify)] <- 0
#--------------------------------------------------------------------------------------------------------------------------------------------------#
### Set Country Codes ###
dat <- dat[!(dat$participant %in% c("Serbia", "State of Palestine")), ]
dat$ccode <- countrycode(dat$participant, "country.name", "cown")
assert_that(!anyDuplicated(dat[, c("ccode", "year")])) #Make sure there are no duplicates
#--------------------------------------------------------------------------------------------------------------------------------------------------#
### Save Data ###
write.csv(dat, "./untreaties.csv", row.names=FALSE)
|
20f103d4efafd1236612b0d9aa81386e5d622e79
|
840d52ada0ecc95c439e74cd563e4404c572143a
|
/Energy/energy_monthly_average.R
|
839edf6f72ae3a91fb39455446fa9cc680b80e53
|
[] |
no_license
|
DanTheMinotaur/R-Analysis-Weather-Energy
|
8fbb741358c3aa19a4aebcb41bcff2ebfae9bd6b
|
5b364cadbb8bfa6d957253092eda32ff3bc77c75
|
refs/heads/master
| 2022-12-12T06:40:12.263745
| 2019-04-15T23:43:56
| 2019-04-15T23:43:56
| 180,748,582
| 0
| 0
| null | 2022-12-08T04:58:16
| 2019-04-11T08:29:09
|
R
|
UTF-8
|
R
| false
| false
| 768
|
r
|
energy_monthly_average.R
|
# Title : Create groupings of energy_data from eirgide
# Objective : group data in meaningful ways for data visualisations
# Created by: Daniel Devine
# Created on: 13/04/2019
source("./energy_groupings.R")
View(monthly_energy_usage)
colnames(monthly_energy_usage)
# Factor years for display on graph
monthly_energy_usage$month <- factor(monthly_energy_usage$month)
# Convert int month to word
monthly_energy_usage$month <- month.abb[monthly_energy_usage$month]
# Bar chart with the energy usage by month.
ggplot(monthly_energy_usage, aes(x = reorder(month, mean_energy_usage), y = mean_energy_usage)) +
geom_bar(stat = "identity", aes(fill = "month")) +
xlab("Month") +
ylab("Mwh") +
ggtitle("Average Monthly Energy Usage") +
guides(fill=FALSE)
|
7037e5e98ad1b6c02c2ffa9345f80615d3ea2e60
|
54d6ea7251681b5a49ab139a0bf332d06da5439a
|
/plot/Error/PPIC/Kosarak.R
|
1128904948b3b117dea3265b1899301818c1ef27
|
[] |
no_license
|
wsgan001/MasterThesis-3
|
d558b3dcf5b302ef3a4e1e3ffc412ffd67ca29cc
|
a3dc8797242e3f9b282a8621fced57981fc6bdae
|
refs/heads/master
| 2020-04-07T00:31:18.669699
| 2017-06-03T00:41:58
| 2017-06-03T00:41:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 950
|
r
|
Kosarak.R
|
#DATA
x <- c(0.2, 0.4, 0.6, 0.8, 1.0)
cp4d <- c(262.589, 262.037, 261.587, 256.581, 272.613)
spark1 <- c(108.297, 49.781, 38.638, 32.163, 25.873)
sparkNP <- c(44.389, 34.931, 31.863, 25.149, 20.716)
sparkPPIC <- c(43.858, 34.967, 31.483, 24.314, 19.117)
sparkPPICFix <- c(43.559, 33.130, 30.081, 24.420, 20.054)
#PLOT
plot(x,cp4d, log = "y", type="l", xlab="Minsup(%)", ylab="Time (s logscale)", col="blue", ylim=c(18, 45))
lines(x,spark1, col="red")
lines(x,sparkNP, col="green")
lines(x,sparkPPIC, col="magenta")
lines(x,sparkPPICFix, col="orange")
title(main="Kosarak", font.main=4)
par(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE)
plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n")
#Pretty up
legend("bottomright", legend=c("Original CP4D", "Original Spark", "Spark - no permutation", "Spark - PPIC", "Spark - PPICfix413"), lty=c(1,1),lwd=c(2,2), col=c("blue","red", "green", "magenta", "orange"))
box()
|
a341ceb35deada8e167a1bf63e4c5aa5740100ce
|
6ee3fb64677ccb66c0f7bb8173ffc36b70868f6d
|
/src/01-toyline.R
|
6ef4dc1bdec923e8e45aca0daab7be619b0cdf71
|
[] |
no_license
|
BernhardKonrad/2014-SFU-R
|
a84b6f250ae55d5fdfee9407298491bc5f373f8c
|
181dd129cfd9f63bba96d4d526376121b8cb9e00
|
refs/heads/master
| 2020-06-02T07:25:23.234051
| 2014-03-01T19:42:02
| 2014-03-01T19:42:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
01-toyline.R
|
rm(list=ls())
a <- 15
b <- -12
sigSq <- 0.5
x <- runif(2000)
y <- a + b * x + rnorm(2000, sd = sigSq)
avgX <- mean(x)
write(avgX, "scraps/avgX.txt")
plot(x,y)
abline(a, b, col="red")
dev.print(png, "results/toylinePlot.png")
|
4724f4e18d3974c0641675cee91f8c1df373b3d7
|
05d63cddca66a42c063fefad1ab687867f98e2c5
|
/man/bhl_getsubjecttitles-defunct.Rd
|
587082654bdfcb51bcd1b9b6ae289e3ef840c705
|
[
"MIT"
] |
permissive
|
cran/rbhl
|
41783fb4bb0e29a33acb8b720bc21700c45d5277
|
7dc88838844e58455994cc83e602107780e262fa
|
refs/heads/master
| 2021-06-12T00:39:52.208803
| 2021-05-13T03:30:03
| 2021-05-13T03:30:03
| 17,699,055
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 337
|
rd
|
bhl_getsubjecttitles-defunct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bhl_getsubjecttitles.R
\name{bhl_getsubjecttitles}
\alias{bhl_getsubjecttitles}
\title{Return a list of titles associated with a subject}
\usage{
bhl_getsubjecttitles(...)
}
\description{
Return a list of titles associated with a subject
}
\keyword{internal}
|
f61db1f88483d8be083b60bdc22edd8a7e0bea55
|
c36ddf4f524810c12f0dec48350d28476135ee6b
|
/estimate.R
|
c4c3dcf9489c87bac63bc8497e544231b585f3e0
|
[] |
no_license
|
pnxenopoulos/skellam_soccer
|
0a81794492718f508fa0d92261041b445a156ee5
|
0f8a4b2bde963278235c209af6bb836b5774652d
|
refs/heads/master
| 2020-04-13T02:45:44.324789
| 2018-12-31T07:31:59
| 2018-12-31T07:31:59
| 162,911,716
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,032
|
r
|
estimate.R
|
library(tidyverse)
library(plyr)
skellam.likelihood <- function(row, params) {
mu <- params[1]
h <- params[2]
#params[3] <- -sum(params[4:22])
#params[23] <- -sum(params[24:42])
z <- row$GDiff
lambda1 <- exp(mu + h + params[2 + row$HomeTeam] + params[22 + row$AwayTeam])
lambda2 <- exp(mu + params[2 + row$AwayTeam] + params[22 + row$HomeTeam])
bessel <- besselI(2 * sqrt(lambda1 * lambda2), abs(z))
return(log(exp(-(lambda1 + lambda2)) * (lambda1/lambda2)^(z/2) * bessel))
}
neg.log.likelihood <- function(params) {
sum <- 0
for(i in 1:nrow(current)) {
sum <- sum + skellam.likelihood(current[i,], params=params)
}
return(sum)
}
skellam <- function(z,lambda1,lambda2) {
bessel <- besselI(2 * sqrt(lambda1 * lambda2), abs(z))
return(exp(-(lambda1 + lambda2)) * (lambda1/lambda2)^(z/2) * bessel)
}
match_results <- read_csv("epl_results_1819.csv")
match_results <- match_results %>% mutate(GDiff = HG-AG)
current <- match_results %>% select(HomeTeam, AwayTeam, HG, AG, GDiff)
#current <- match_results %>% filter(Season == "2015-16") %>% select(HomeTeam, AwayTeam, HG, AG, GDiff)
home_team_mapped <- as.numeric(mapvalues(current$HomeTeam, from = unique(current$HomeTeam), to = seq(1:20)))
key_vals <- home_team_mapped[1:20]
names(key_vals) <- current$HomeTeam[1:20]
for(i in 1:nrow(current)) {
current$AwayTeam[i] <- which(names(key_vals) == current$AwayTeam[i])
}
current$HomeTeam <- home_team_mapped
current$AwayTeam <- as.numeric(current$AwayTeam)
#result <- optim(par=rnorm(42,0,0.5), fn=NLL, data=current, method = "L-BFGS-B", control=list(trace=0))
#y <- result$par[23:42]; names(y) <- names(key_vals)
library(maxLik)
A <- matrix(0, 2, 42)
B <- matrix(0, 2, 1)
A[1,3:22] <- 1
A[2,23:42] <- 1
res <- maxNM(neg.log.likelihood, start = rnorm(42,0.1,0.1), constraints=list(eqA = A, eqB = B))
y <- res$estimate[23:42]; names(y) <- names(key_vals)
sort(y)
sum <- 0
for(j in 1:10) {
sum <- sum + skellam(j, lambda1, lambda2)
}
|
ab4f11086dcbb30f952db09fd74d3d4af6769266
|
f21e2801678d3d301e11fb28a7de52ca73b66d5b
|
/lesson 2/lesson 2.R
|
13bea63319f5dc6aecd8963b03de7e18506e4476
|
[] |
no_license
|
osefatul/Udacity-Data-Analyst
|
c6d1d91e20144230388e57f2347d63fad5eba670
|
94c31b54430cf5fb20351987106c7a3e22c25f29
|
refs/heads/master
| 2022-12-12T13:57:01.010899
| 2020-09-01T16:41:16
| 2020-09-01T16:41:16
| 292,031,160
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,248
|
r
|
lesson 2.R
|
reddit= read.csv('reddit.csv')
summary(reddit)
names(reddit)
head(table(reddit$state))
summary(reddit)
str(reddit)
summary(reddit$age.range)
levels(reddit$age.range)
str(reddit)
#convert a datatype to factor
reddit$age.range= factor(reddit$age.range)
#check the structure of reddit; notice especially age.range
str(reddit)
#check the level of age
levels(reddit$age.range)
library(ggplot2)
#see the range level
qplot(data = reddit, x=age.range)
#as we saw in the plot that its not by ordered(started by the heighest value and ended with lowest)
reddit$age.range=factor(reddit$age.range, levels=c('Under 18','18-24','25-34','35-44', '45-54','55-64','65 of Above'),
ordered=T)
qplot(data=reddit, x=age.range)
#lets check the income variable
str(reddit)
qplot(data=reddit, x=reddit$income.range)
#lets factrized and order the income range
reddit$income.range= factor(reddit$income.range, levels=c('Under $20,000',
'$21,000-$29,000','$41,000-$60,000', '$61,000-$80,000','$81,000-$10,0000',
'$10,0000-$150000','$150,000 of above'), ordered=T)
levels(reddit$income.range)
|
6f3ecf19545189c1f894930df7b988089c5e67ad
|
9e8c1f5ac055cbbfd6aae73bca32885bdadcd491
|
/Regression/random.R
|
749337f00c06b1a490ab0a5d8caf0c35e48b060a
|
[] |
no_license
|
hughbzhang/SIMR
|
682c0a03720933210ed779d13f8b4640f3a83d1f
|
9022b2114d139d41738f588ecdb5f951f03220c2
|
refs/heads/master
| 2021-06-11T20:11:40.144148
| 2016-04-03T07:14:45
| 2016-04-03T07:14:45
| 21,960,479
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 702
|
r
|
random.R
|
library(kernlab)
CV = matrix(data = 0,7,7)
TRAIN = matrix(data = 0,7,7)
sum = 0;
for (i in 1:100){
random = matrix(data = runif(3000),ncol = 30,nrow = 100)
print(i)
test = random[1:5,]
train = random[6:100,]
Y = c(rep(0,50),rep(1,50))
Y = Y[sample(100)]
if(F){
for(SIGMA in seq(-3,3,by = 1)){
for(OURC in seq(-3,3,by = 1)){
model = ksvm(train,Y[6:100],type = "C-svc",kernel = 'rbf',kpar = list(sigma=2^SIGMA),C=2^OURC)
now = (sum(predict(model,test)!=Y[1:5]))
CV[SIGMA+4,OURC+4] = CV[SIGMA+4,OURC+4]+now
TRAIN[SIGMA+4,OURC+4] = TRAIN[SIGMA+4,OURC+4]+sum(predict(model,train)!=Y[6:100])
}
}
}
}
print(CV)
print(TRAIN)
|
04a4aafd6fbc01c1c015a6be34343ade49563832
|
5a6b954048ef2df667ab9465b477c2257ed1e323
|
/ggplot_tutorial.R
|
5e557f824eacff699724b40c0ee0097b1da6bd3c
|
[] |
no_license
|
benchoi93/ACTM-R-
|
e195e4cc9b2ffaa5b0ba90a19dcd726ef521a242
|
5e1d325e3afb9a6c1085d512e64f73bd295a2515
|
refs/heads/master
| 2021-04-12T08:58:03.431463
| 2018-03-27T06:31:37
| 2018-03-27T06:31:37
| 126,761,685
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,861
|
r
|
ggplot_tutorial.R
|
# R + ggplot2 tutorial
a <- 1
b = 1
c -> 1
print(a)
print(b)
print(c)
a == b
a != b
c <- a+b
print(c)
typeof(a)
typeof(b)
typeof(c)
a = as.integer(a)
typeof(a)
################################
# type or storage mode of object
################################
# "logical",
# "integer",
# "double",
# "complex",
# "character",
# "raw" and "list",
# "NULL",
# "closure" (function),
# "special" and "builtin"
# "environment",
################################
a = c(1,2,3)
print(a)
append(a, 4)
a
a <- append(a, 4)
a
################################
a = c(1,2,3)
b = c(4,5,6)
a1 = append(a,b)
print(a1)
length(a1)
dim(a1)
a2 = rbind(a,b)
print(a2)
length(a2)
dim(a2)
a3 = cbind(a,b)
print(a3)
length(a3)
dim(a3)
################################
a = seq(1,100,1)
a[a %% 2 == 0]
a[which(a %% 2 == 0)]
subset(a , a%%2 == 0)
b = a %% 2 == 0
c = a %% 3 == 0
b
c
b & c
b && c
# ‘&’ and ‘&&’ indicate logical AND and ‘|’ and ‘||’ indicate
# logical OR. The shorter form performs elementwise comparisons in
# much the same way as arithmetic operators. The longer form
# evaluates left to right examining only the first element of each
# vector. Evaluation proceeds only until the result is determined.
# The longer form is appropriate for programming control-flow and
# typically preferred in ‘if’ clauses.
a[b & c]
a[b | c]
################################
a = c(1,2,3)
b = c("one","two","three")
df_a = data.frame(a)
df_b = data.frame(b , stringsAsFactors = F)
df = cbind(df_a,df_b)
colnames(df) = c("number","name")
df
df$number
df[,1]
df[,"number"]
df$name
df[,2]
df[,"name"]
typeof(df) # list with same row length
################################
x_plus_3 = function(x){
return(x+3)
}
df = matrix(1:6, nrow = 2)
df_initial = df
for(i in 1:dim(df)[1]){
for(j in 1:dim(df)[2]){
df[i,j] = x_plus_3(df[i,j])
}
}
df
df <- df_initial
df
sapply(df , FUN = x_plus_3)
lapply(df , FUN = x_plus_3)
apply(df , MARGIN = c(1,2) , FUN = x_plus_3)
apply(df , MARGIN = c(1) , FUN = x_plus_3)
apply(df , MARGIN = c(2) , FUN = x_plus_3)
################################
a = 1:100
a
for(i in 1:10){
a[i] = x_plus_3(a[i])
}
a
a = 1:100
n = 1
while(n <= 10){
a[n] <- x_plus_3(a[n])
n <- n+1
}
for(i in 1:100){
if(i != a[i]){
print(a[i])
}
}
################################
install.packages("ggplot2")
library(ggplot2)
a = 1:100
b = rnorm(100)
df = data.frame(cbind(a,b))
head(df)
# plotting without ggplot
plot(df$a , df$b)
###
ggplot(data = df , aes(x = a , y = b)) + geom_point()
ggplot(data = df , aes(x = a , y = b)) + geom_line()
ggplot(data = df , aes(x = a , y = b)) + geom_point() + geom_smooth()
ggplot(data = df , aes(x = a , y = b)) + geom_point() + geom_smooth(method = "lm")
ggplot(data = df , aes(x = b )) + geom_histogram(bins = 10)
ggplot(data = df , aes(x = b )) + geom_histogram(bins = 30)
ggplot(data = df , aes(x = b )) + geom_histogram(bins = 100)
ggplot(data = df , aes(x = b )) + stat_ecdf( )
ggplot(data = df , aes(x = b )) + stat_ecdf( geom = "step")
ggplot(data = df , aes(x = b )) + stat_ecdf( geom = "point")
ggplot(data = df , aes(x = b )) + stat_ecdf( geom = "line")
df$c = rep(1:10, each = 10)
ggplot(df , aes(x = a , y =b , color = c)) + geom_point()
ggplot(df , aes(x = a , y =b , color = factor(c))) + geom_point()
################################
data <- read_csv("~/Google Drive/KAIST/ANS_Simulator/data.txt", col_names = FALSE)
data = as.data.frame(data)
head(data)
colnames(data) = c("Time","MP","Speed","Flow")
ggplot(data , aes( x= Time , y = Speed)) + geom_point()
ggplot(data , aes( x= Time , y = Speed , color = MP)) + geom_point()
ggplot(data , aes( x= Time , y = Speed , color = factor(MP))) + geom_point()
ggplot(data , aes( x= Time , y = Speed , color = factor(MP))) + geom_point() + guides(color = F)
ggplot(data , aes( x= Time , y = Speed , color = factor(MP))) + geom_line() + guides(color = F)
ggplot(data , aes( x= Time , y = Speed , color = factor(MP))) + geom_line() + guides(color = F) + geom_abline(slope = 0 , intercept = 40 , color = "red")
ggplot(data , aes( x= Time , y = Speed , color = factor(MP))) + geom_line() + guides(color = F) + geom_abline(slope = 0 , intercept = 40 , color = "red") + geom_abline(slope = 0 , intercept = 60 , color = "blue")
ggplot(data , aes( x= Time , y = Speed , color = factor(MP))) + geom_line() + guides(color = F)
levels(factor(data$MP))
length(levels(factor(data$MP)))
RColorBrewer::display.brewer.all()
new_colors = c(RColorBrewer::brewer.pal(12 , "Set3"),RColorBrewer::brewer.pal(8 , "Set2"),RColorBrewer::brewer.pal(3 , "Set1")[1:2])
ggplot(data , aes( x= Time , y = Speed , color = factor(MP))) + geom_line() + scale_color_manual(values = new_colors)
ggplot(data , aes( x= Time , y = Flow , color = factor(MP))) + geom_line() + guides(color = F)
ggplot(data , aes( x= Flow , y = Speed , color = factor(MP))) + geom_point() + guides(color = F)
ggplot(data , aes( x= Flow/Speed , y = Flow , color = factor(MP))) + geom_point() + guides(color = F)
ggplot(data , aes( x= Flow/Speed , y = Flow , color = factor(MP))) + geom_line() + guides(color = F)
ggplot(data , aes( x= (Time) , y = Speed )) + geom_point() + facet_wrap(~MP)
ggplot(data , aes( x= (Time) , y = Speed )) + geom_point() + facet_wrap(~MP , nrow = 3)
ggplot(data , aes( x= (Time) , y = Speed )) + geom_point() + facet_wrap(~MP , ncol = 5)
ggplot(data , aes( x= (Time) , y = Speed , color = Flow)) + geom_point() + facet_wrap(~MP , ncol = 5)
ggplot(data , aes( x= (Time) , y = Speed , color = Flow)) + geom_point() + facet_wrap(~MP , ncol = 5) + scale_color_gradient(low = "red", high = "blue")
ggplot(data , aes( x= (Time) , y = Speed , color = Flow)) + geom_point() + facet_wrap(~MP , ncol = 5) + scale_color_gradient2(low = "red" , mid = "yellow" , high ="green" , midpoint = 1000)
|
55ed29cf584d9c9a57f36f622100de84532cb7d5
|
a03c8593a2767212e6308872a98cdd37e1ac1694
|
/R/fixedvarianceKernGradient.R
|
215c4401c79068036b46add183d7e613ea583cb7
|
[] |
no_license
|
cran/GPrank
|
1362b2e85037660804a7afefac7a7c9ce6c6de80
|
a67e15734bd4e8d72a8159453272c526fa374b10
|
refs/heads/master
| 2021-01-12T13:08:34.932470
| 2018-08-17T14:00:03
| 2018-08-17T14:00:03
| 72,123,173
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 697
|
r
|
fixedvarianceKernGradient.R
|
#' @title fixedvarianceKernGradient
#'
#' @description
#' Function for computing the gradient of the \code{fixedvariance} kernel.
#' As the parameters of the \code{fixedvariance} kernel are fixed, the
#' gradient equals zero. This function is written to comply with
#' the \code{gptk} package.
#'
#' @param kern GP kernel structure which contains the
#' fixed variances.
#' @param x x
#' @param x2 x2
#' @param covGrad covGrad
#'
#' @export
#' @return Return value for the gradient of fixedvariance kernel.
#'
#' @keywords fixedvariance
#' @keywords internal
#' @author Hande Topa, \email{hande.topa@@helsinki.fi}
#'
fixedvarianceKernGradient <-
function(kern,x,x2,covGrad) {
g=0
return(g)
}
|
503b4084dd0d70046f0ad8e1c3f32dff056ca8dd
|
0a0ae7b243236e823c23bc71b9a0c99625d759a5
|
/02 - Build Data Frame - Copy.R
|
dc727532053445b01ac4d3f06619902112b64c30
|
[] |
no_license
|
Jia-Lin/data_science_capstone_project
|
ff0bdb31182222ffb159e75d63d47267234c414a
|
7f43fa73343426192ea3b00a7b59a28201ed6667
|
refs/heads/master
| 2021-01-10T23:46:10.147500
| 2016-10-09T13:47:36
| 2016-10-09T13:47:36
| 70,405,091
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,000
|
r
|
02 - Build Data Frame - Copy.R
|
# 1. Process blog
gram <- readRDS("./training/bigram_blog")
sum <- colSums(gram, na.rm = TRUE)
DF <- data.frame(feature = names(sum), frequency=sum)
gram <- readRDS("./training/trigram_blog")
sum <- colSums(gram, na.rm = TRUE)
DF <- rbind(DF, data.frame(feature = names(sum), frequency=sum))
saveRDS(DF, "./training/blog.df")
# 2. Process twitter
gram <- readRDS("./training/bigram_twitter")
sum <- colSums(gram, na.rm = TRUE)
DF <- data.frame(feature = names(sum), frequency=sum)
gram <- readRDS("./training/trigram_twitter")
sum <- colSums(gram, na.rm = TRUE)
DF <- rbind(DF, data.frame(feature = names(sum), frequency=sum))
saveRDS(DF, "./training/twitter.df")
# 3. Process news
gram <- readRDS("./training/bigram_news")
sum <- colSums(gram, na.rm = TRUE)
DF <- data.frame(feature = names(sum), frequency=sum)
gram <- readRDS("./training/trigram_news")
sum <- colSums(gram, na.rm = TRUE)
DF <- rbind(DF, data.frame(feature = names(sum), frequency=sum))
saveRDS(DF, "./training/news.df")
rm(DF)
|
332d850f62861c9bcc41fec2b593c3976257601d
|
dea5eb56fb0ef73fc57b354fc4001ef76ee32c59
|
/R/ds.glm.R
|
2e7699aeb0d5bbb0304a1c0a801e56180fd891bf
|
[] |
no_license
|
datashield/dsModellingClient
|
beb48723e8af2d96c574ace3e6e42fe87172f919
|
1b18f86365f163f998c6e90f4853c123c680fe67
|
refs/heads/master
| 2021-01-10T19:22:52.859131
| 2019-03-09T09:50:09
| 2019-03-09T09:50:09
| 9,456,558
| 0
| 3
| null | 2019-03-09T09:50:10
| 2013-04-15T19:32:25
|
R
|
UTF-8
|
R
| false
| false
| 15,365
|
r
|
ds.glm.R
|
#'
#' @title Runs a combined GLM analysis of non-pooled data
#' @description A function fit generalized linear models
#' @details It enables a parallelized analysis of individual-level data sitting
#' on distinct servers by sending
#' @param formula a character, a formula which describes the model to be fitted
#' @param family a description of the error distribution function to use in the model
#' @param startBetas starting values for the parameters in the linear predictor
#' @param offset a character, null or a numeric vector that can be used to specify an a priori known component
#' to be included in the linear predictor during fitting.
#' @param weights a character, the name of an optional vector of 'prior weights' to be used in the fitting
#' process. Should be NULL or a numeric vector.
#' @param data a character, the name of an optional data frame containing the variables in
#' in the \code{formula}. The process stops if a non existing data frame is indicated.
#' @param checks a boolean, if TRUE (default) checks that takes 1-3min are carried out to verify that the
#' variables in the model are defined (exist) on the server site and that they have the correct characteristics
#' required to fit a GLM. The default value is FALSE because checks lengthen the runtime and are mainly meant to be
#' # used as help to look for causes of eventual errors.
#' @param maxit the number of iterations of IWLS used
#' instructions to each computer requesting non-disclosing summary statistics.
#' The summaries are then combined to estimate the parameters of the model; these
#' parameters are the same as those obtained if the data were 'physically' pooled.
#' @param CI a numeric, the confidence interval.
#' @param viewIter a boolean, tells whether the results of the intermediate iterations
#' should be printed on screen or not. Default is FALSE (i.e. only final results are shown).
#' @param datasources a list of opal object(s) obtained after login to opal servers;
#' these objects also hold the data assigned to R, as a \code{dataframe}, from opal datasources.
#' @return coefficients a named vector of coefficients
#' @return residuals the 'working' residuals, that is the residuals in the final
#' iteration of the IWLS fit.
#' @return fitted.values the fitted mean values, obtained by transforming the
#' linear predictors by the inverse of the link function.
#' @return rank the numeric rank of the fitted linear model.
#' @return family the \code{family} object used.
#' @return linear.predictors the linear fit on link scale.
#'
#' @author Burton,P;Gaye,A;Laflamme,P
#' @seealso \link{ds.lexis} for survival analysis using piecewise exponential regression
#' @seealso \link{ds.gee} for generalized estimating equation models
#' @export
#' @examples {
#'
#' # load the file that contains the login details
#' data(glmLoginData)
#'
#' # login and assign all the variables to R
#' opals <- datashield.login(logins=glmLoginData, assign=TRUE)
#'
#' # Example 1: run a GLM without interaction (e.g. diabetes prediction using BMI and HDL levels and GENDER)
#' mod <- ds.glm(formula='D$DIS_DIAB~D$GENDER+D$PM_BMI_CONTINUOUS+D$LAB_HDL', family='binomial')
#' mod
#' # Example 2: run the above GLM model without an intercept
#' # (produces separate baseline estimates for Male and Female)
#' mod <- ds.glm(formula='D$DIS_DIAB~0+D$GENDER+D$PM_BMI_CONTINUOUS+D$LAB_HDL', family='binomial')
#' mod
#' # Example 3: run the above GLM with interaction between GENDER and PM_BMI_CONTINUOUS
#' mod <- ds.glm(formula='D$DIS_DIAB~D$GENDER*D$PM_BMI_CONTINUOUS+D$LAB_HDL', family='binomial')
#' mod
#' # Example 4: Fit a standard Gaussian linear model with an interaction
#' mod <- ds.glm(formula='D$PM_BMI_CONTINUOUS~D$DIS_DIAB*D$GENDER+D$LAB_HDL', family='gaussian')
#' mod
#' # Example 5: now run a GLM where the error follows a poisson distribution
#' # P.S: A poisson model requires a numeric vector as outcome so in this example we first convert
#' # the categorical BMI, which is of type 'factor', into a numeric vector
#' ds.asNumeric('D$PM_BMI_CATEGORICAL','BMI.123')
#' mod <- ds.glm(formula='BMI.123~D$PM_BMI_CONTINUOUS+D$LAB_HDL+D$GENDER', family='poisson')
#' mod
#'
#' # clear the Datashield R sessions and logout
#' datashield.logout(opals)
#' }
#'
ds.glm <- function(formula=NULL, data=NULL, family=NULL, offset=NULL, weights=NULL, checks=FALSE, maxit=15, CI=0.95, viewIter=FALSE, datasources=NULL) {
# if no opal login details are provided look for 'opal' objects in the environment
if(is.null(datasources)){
datasources <- findLoginObjects()
}
# verify that 'formula' was set
if(is.null(formula)){
stop(" Please provide a valid regression formula!", call.=FALSE)
}
# check if user gave offset or weights directly in formula, if so the argument 'offset' or 'weights'
# to provide name of offset or weights variable
if(sum(as.numeric(grepl('offset', formula, ignore.case=TRUE)))>0 ||
sum(as.numeric(grepl('weights', formula, ignore.case=TRUE)))>0)
{
cat("\n\n WARNING: you may have specified an offset or regression weights")
cat("\n as part of the model formula. In ds.glm (unlike the usual glm in R)")
cat("\n you must specify an offset or weights separately from the formula")
cat("\n using the offset or weights argument.\n\n")
}
formula <- as.formula(formula)
# check that 'family' was set
if(is.null(family)){
stop(" Please provide a valid 'family' argument!", call.=FALSE)
}
# if the argument 'data' is set, check that the data frame is defined (i.e. exists) on the server site
if(!(is.null(data))){
defined <- isDefined(datasources, data)
}
# beginning of optional checks - the process stops if any of these checks fails #
if(checks){
message(" -- Verifying the variables in the model")
# call the function that checks the variables in the formula are defined (exist) on the server site and are not missing at complete
glmChecks(formula, data, offset, weights, datasources)
}else{
#message("WARNING:'checks' is set to FALSE; variables in the model are not checked and error messages may not be intelligible!")
}
#MOVE ITERATION COUNT BEFORE ASSIGNMENT OF beta.vect.next
#Iterations need to be counted. Start off with the count at 0
#and increment by 1 at each new iteration
iteration.count<-0
# number of 'valid' studies (those that passed the checks) and vector of beta values
numstudies <- length(datasources)
#ARBITRARY LENGTH FOR START BETAs AT THIS STAGE BUT IN LEGAL TRANSMISSION FORMAT ("0,0,0,0,0")
beta.vect.next <- rep(0,5)
beta.vect.temp <- paste0(as.character(beta.vect.next), collapse=",")
#IDENTIFY THE CORRECT DIMENSION FOR START BETAs VIA CALLING FIRST COMPONENT OF glmDS
cally1 <- call('glmDS1', formula, family, data)
study.summary <- datashield.aggregate(datasources, cally1)
# num.par.glm<-study.summary$study1$dimX[2]
num.par.glm<-study.summary[[1]][[1]][[2]]
beta.vect.next <- rep(0,num.par.glm)
beta.vect.temp <- paste0(as.character(beta.vect.next), collapse=",")
#Provide arbitrary starting value for deviance to enable subsequent calculation of the
#change in deviance between iterations
dev.old<-9.99e+99
#Convergence state needs to be monitored.
converge.state<-FALSE
#Define a convergence criterion. This value of epsilon corresponds to that used
#by default for GLMs in R (see section S3 for details)
epsilon<-1.0e-08
f<-NULL
while(!converge.state && iteration.count < maxit) {
iteration.count<-iteration.count+1
message("Iteration ", iteration.count, "...")
#NOW CALL SECOND COMPONENT OF glmDS TO GENERATE SCORE VECTORS AND INFORMATION MATRICES
cally2 <- call('glmDS2', formula, family, beta.vect=beta.vect.temp, offset, weights, data)
study.summary <- datashield.aggregate(datasources, cally2)
.select <- function(l, field) {
lapply(l, function(obj) {obj[[field]]})
}
info.matrix.total<-Reduce(f="+", .select(study.summary, 'info.matrix'))
score.vect.total<-Reduce(f="+", .select(study.summary, 'score.vect'))
dev.total<-Reduce(f="+", .select(study.summary, 'dev'))
message("CURRENT DEVIANCE: ", dev.total)
if(iteration.count==1) {
# Sum participants only during first iteration.
nsubs.total<-Reduce(f="+", .select(study.summary, 'numsubs'))
# Save family
f <- study.summary[[1]]$family
}
#Create variance covariance matrix as inverse of information matrix
variance.covariance.matrix.total<-solve(info.matrix.total)
# Create beta vector update terms
beta.update.vect<-variance.covariance.matrix.total %*% score.vect.total
#Add update terms to current beta vector to obtain new beta vector for next iteration
if(iteration.count==1)
{
beta.vect.next<-rep(0,length(beta.update.vect))
}
beta.vect.next<-beta.vect.next+beta.update.vect
beta.vect.temp <- paste0(as.character(beta.vect.next), collapse=",")
#Calculate value of convergence statistic and test whether meets convergence criterion
converge.value<-abs(dev.total-dev.old)/(abs(dev.total)+0.1)
if(converge.value<=epsilon)converge.state<-TRUE
if(converge.value>epsilon)dev.old<-dev.total
if(viewIter){
#For ALL iterations summarise model state after current iteration
message("SUMMARY OF MODEL STATE after iteration ", iteration.count)
message("Current deviance ", dev.total," on ",(nsubs.total-length(beta.vect.next)), " degrees of freedom")
message("Convergence criterion ",converge.state," (", converge.value,")")
message("\nbeta: ", paste(as.vector(beta.vect.next), collapse=" "))
message("\nInformation matrix overall:")
message(paste(capture.output(info.matrix.total), collapse="\n"))
message("\nScore vector overall:")
message(paste(capture.output(score.vect.total), collapse="\n"))
message("\nCurrent deviance: ", dev.total, "\n")
}
}
if(!viewIter){
#For ALL iterations summarise model state after current iteration
message("SUMMARY OF MODEL STATE after iteration ", iteration.count)
message("Current deviance ", dev.total," on ",(nsubs.total-length(beta.vect.next)), " degrees of freedom")
message("Convergence criterion ",converge.state," (", converge.value,")")
message("\nbeta: ", paste(as.vector(beta.vect.next), collapse=" "))
message("\nInformation matrix overall:")
message(paste(capture.output(info.matrix.total), collapse="\n"))
message("\nScore vector overall:")
message(paste(capture.output(score.vect.total), collapse="\n"))
message("\nCurrent deviance: ", dev.total, "\n")
}
#If convergence has been obtained, declare final (maximum likelihood) beta vector,
#and calculate the corresponding standard errors, z scores and p values
#(the latter two to be consistent with the output of a standard GLM analysis)
#Then print out final model summary
if(converge.state)
{
family.identified<-0
beta.vect.final<-beta.vect.next
scale.par <- 1
if(f$family== 'gaussian') {
scale.par <- dev.total / (nsubs.total-length(beta.vect.next))
}
family.identified<-1
se.vect.final <- sqrt(diag(variance.covariance.matrix.total)) * sqrt(scale.par)
z.vect.final<-beta.vect.final/se.vect.final
pval.vect.final<-2*pnorm(-abs(z.vect.final))
parameter.names<-names(score.vect.total[,1])
model.parameters<-cbind(beta.vect.final,se.vect.final,z.vect.final,pval.vect.final)
dimnames(model.parameters)<-list(parameter.names,c("Estimate","Std. Error","z-value","p-value"))
if(CI > 0)
{
ci.mult <- qnorm(1-(1-CI)/2)
low.ci.lp <- model.parameters[,1]-ci.mult*model.parameters[,2]
hi.ci.lp <- model.parameters[,1]+ci.mult*model.parameters[,2]
estimate.lp <- model.parameters[,1]
if(family=="gaussian"){
estimate.natural <- estimate.lp
low.ci.natural <- low.ci.lp
hi.ci.natural <- hi.ci.lp
name1 <- paste0("low",CI,"CI")
name2 <- paste0("high",CI,"CI")
ci.mat <- cbind(low.ci.lp,hi.ci.lp)
dimnames(ci.mat) <- list(NULL,c(name1,name2))
}
if(family=="binomial"){
family.identified <- 1
num.parms <- length(low.ci.lp)
estimate.natural <- exp(estimate.lp)/(1+exp(estimate.lp))
low.ci.natural <- exp(low.ci.lp)/(1+exp(low.ci.lp))
hi.ci.natural <- exp(hi.ci.lp)/(1+exp(hi.ci.lp))
if(num.parms > 1){
estimate.natural[2:num.parms] <- exp(estimate.lp[2:num.parms])
low.ci.natural[2:num.parms] <- exp(low.ci.lp[2:num.parms])
hi.ci.natural[2:num.parms] <- exp(hi.ci.lp[2:num.parms])
name1 <- paste0("low",CI,"CI.LP")
name2 <- paste0("high",CI,"CI.LP")
name3 <- paste0("P_OR")
name4 <- paste0("low",CI,"CI.P_OR")
name5 <- paste0("high",CI,"CI.P_OR")
}
ci.mat <- cbind(low.ci.lp,hi.ci.lp,estimate.natural,low.ci.natural,hi.ci.natural)
dimnames(ci.mat) <- list(NULL,c(name1,name2,name3,name4,name5))
}
if(family=="poisson"){
family.identified <- 1
num.parms <- length(low.ci.lp)
estimate.natural <- exp(estimate.lp)
low.ci.natural <- exp(low.ci.lp)
hi.ci.natural <- exp(hi.ci.lp)
name1 <- paste0("low",CI,"CI.LP")
name2 <- paste0("high",CI,"CI.LP")
name3 <- paste0("EXPONENTIATED RR")
name4 <- paste0("low",CI,"CI.EXP")
name5 <- paste0("high",CI,"CI.EXP")
ci.mat <- cbind(low.ci.lp,hi.ci.lp,estimate.natural,low.ci.natural,hi.ci.natural)
dimnames(ci.mat) <- list(NULL,c(name1,name2,name3,name4,name5))
}
if(family.identified==0)
{
estimate.natural <- estimate.lp
low.ci.natural <- low.ci.lp
hi.ci.natural <- hi.ci.lp
name1 <- paste0("low",CI,"CI")
name2 <- paste0("high",CI,"CI")
ci.mat <- cbind(low.ci.lp,hi.ci.lp)
dimnames(ci.mat) <- list(NULL,c(name1,name2))
}
}
model.parameters<-cbind(model.parameters,ci.mat)
if(!is.null(offset)&&!is.null(weights)){
formulatext <- paste0(Reduce(paste, deparse(formula)), paste0(" + offset(", offset, ")"), paste0(" + weights(", weights, ")"))
}
if(!is.null(offset)&&is.null(weights)){
formulatext <- paste0(Reduce(paste, deparse(formula)), paste0(" + offset(", offset, ")"))
}
if(is.null(offset)&&!is.null(weights)){
formulatext <- paste0(Reduce(paste, deparse(formula)), paste0(" + weights(", weights, ")"))
}
if(is.null(offset)&&is.null(weights)){
formulatext <- Reduce(paste, deparse(formula))
}
glmds <- list(
formula=formulatext,
family=f,
coefficients=model.parameters,
dev=dev.total,
df=(nsubs.total-length(beta.vect.next)),
nsubs=nsubs.total,
iter=iteration.count
)
class(glmds) <- 'glmds'
return(glmds)
} else {
warning(paste("Did not converge after", maxit, "iterations. Increase maxit parameter as necessary."))
return(NULL)
}
}
|
dc073796ef666f6263580b5b969ed5390a7b0a2c
|
023ffb12628a66f4e77a201f5cd54a631afb7927
|
/man/ChoosePlots.Rd
|
752ef415db56c1dde75223b61aab548a30fa3425
|
[] |
no_license
|
fschirr/VirSysMon
|
1a36343aa1c7ce09d88da4458371181e451306c0
|
8f3cdbb5001dd9f99f3250eb3b61b8988b04c26a
|
refs/heads/master
| 2020-04-01T21:04:30.566154
| 2016-06-13T15:16:57
| 2016-06-13T15:16:57
| 61,039,594
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 556
|
rd
|
ChoosePlots.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sampling_internal_r_code.R
\name{ChoosePlots}
\alias{ChoosePlots}
\title{Select plots of the dataset.}
\usage{
ChoosePlots(data, num.of.plots)
}
\arguments{
\item{data}{Complete dataset}
\item{num.of.plots}{A number}
}
\value{
chosenplots A vector with the name/number of the selected plots
}
\description{
\code{ChoosePlots} creats a vector in which every plot ID occurs once and
than selects randomly a certain number of plot IDs and saves them ordered
into a new vector.
}
|
e68a7442b33998f1dd56908641a59493d4018641
|
c52caf6775e94a4e74aa6d1d00dba3fa2fdf2ac7
|
/2014data.R
|
2d68c3b9b97e0e7f616ea83be17feb2b6c624013
|
[] |
no_license
|
yamiewoof/CPBLdata
|
644ecafe570dd358986b8eef76b483849fa4d478
|
0d7d54400a67ee8dc339f9352ddc697ceb1ed124
|
refs/heads/master
| 2020-05-09T21:28:41.795118
| 2019-05-01T15:04:46
| 2019-05-01T15:04:46
| 181,444,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,416
|
r
|
2014data.R
|
#rm(list=ls())
setwd("c:\\baseball")
library(dplyr)
#讀取資料
data2014 <- read.csv("log_process\\logdata\\logdata_2014_corrected.csv",na.strings=c("","NA"),stringsAsFactors = FALSE)
#加上年分
data2014 <- cbind(year = 2014,data2014)
allgames2014 <- data2014 %>% select(1,5:13, 15, 16,18,19)
# allgames2014 <- data2019 %>% select(5:13, 15, 16,18,19) %>% slice(1:162) #做第一場
allgames2014$base1N <- NA
allgames2014$base2N <- NA
allgames2014$base3N <- NA
allgames2014$basesit <- NA
allgames2014$outN <- NA
allgames2014$rem_typeN <- NA
###
allgames2014$NOT <- NA
noout <- c("一出局","二出局","三出局","更換","更動","變動","先發","換投","暫停","啟用","盜壘成功","捕逸","盜上","安打","保送","四壞球","暴投","全壘打",")","1上","1下","2上","2下","3上","3下","4上","4下","5上","5下","6上","6下","7上","7下","8上","8下","9上","9下","10上","10下","11上","11下","12上","12下")
###
hit1_type <- c("一壘安打", "左外野安打", "中外野安打", "右外野安打",
"穿越安打", "平飛安打", "內野安打", "中間方向安打", "德州安打", "滾地安打", "不死三振")
sf_type <- c("犧牲打","犧牲球","犧牲飛球")
base1 <- c("佔一壘", "一壘有人", "上一壘", "到一壘", "至一壘","攻佔一壘","攻占一壘")
base2 <- c("佔二壘", "二壘有人", "上二壘", "到二壘", "至二壘","攻佔二壘","攻占二壘")
base3 <- c("佔三壘", "三壘有人", "上三壘", "到三壘", "至三壘","攻佔三壘","攻占三壘")
base12 <- c("佔一二壘", "一二壘有人", "佔一、二壘", "一、二壘有人", "至一二壘", "至一、二壘","攻佔一二壘","攻占一二壘","攻佔一、二壘","攻占一、二壘")
base13 <- c("佔一三壘", "一三壘有人", "佔一、三壘", "一、三壘有人", "至一三壘", "至一、三壘","攻佔一三壘","攻占一三壘","攻佔一、三壘","攻占一、三壘")
base23 <- c("佔二三壘", "二三壘有人", "佔二、三壘", "二、三壘有人", "至二三壘", "至二、三壘","攻佔二三壘","攻占二三壘","攻佔二、三壘","攻占二、三壘")
base123 <- c("滿壘")
allbases <- c("佔一壘", "一壘有人", "上一壘","佔二壘", "二壘有人", "上二壘","佔三壘", "三壘有人", "上三壘","佔一二壘", "一二壘有人", "佔一、二壘", "一、二壘有人","佔一三壘", "一三壘有人", "佔一、三壘", "一、三壘有人","佔二三壘", "二三壘有人", "佔二、三壘", "二、三壘有人","滿壘")
onfirstbase <- c("一壘安打", "左外野安打", "中外野安打", "右外野安打",
"穿越安打", "平飛安打", "內野安打", "中間方向安打", "德州安打", "滾地安打", "不死三振","保送","失誤")
onsecondbase <- c("二壘安打")
onthirdbase <- c("三壘安打")
alldead <- c("雙殺")
scores <- c("打點","得分","回本壘")
#加一欄判斷有無得分(**scores可能不只三種可能)
allgames2014$scores <- F
for ( i in 1:nrow(allgames2014)){
for (j in 1:length(scores)) {
if (grepl(scores[j], allgames2014$log[i])){
allgames2014$scores[i] <- T
}}
}
allgames2014$outnum <- NA
for (i in 2:nrow(allgames2014))
{
if (is.na(allgames2014$out[i])){
allgames2014$outnum[i] <- allgames2014$outnum[i-1]
}
if (grepl("零出局", allgames2014$out[i])){
allgames2014$outnum[i] <- 0
}
else if (grepl("一出局", allgames2014$out[i])){
allgames2014$outnum[i] <- 1
}
else if (grepl("二出局", allgames2014$out[i])){
allgames2014$outnum[i] <- 2
}
}
for ( i in 1:nrow(allgames2014)) {
if(i==1){
inning_now <- "一上"
base1_now <- NA
base2_now <- NA
base3_now <- NA
allgames2014$basesit[i] <- 0
out_now <- 0
}
if(i>1){
allgames2014$base1N[i] <- allgames2014$base1N[i-1]
allgames2014$base2N[i] <- allgames2014$base2N[i-1]
allgames2014$base3N[i] <- allgames2014$base3N[i-1]
}
if (allgames2014$inning[i] != inning_now){
inning_now <- allgames2014$inning[i]
out_now <- 0
allgames2014$base1N[i] <- NA
allgames2014$base2N[i] <- NA
allgames2014$base3N[i] <- NA
allgames2014$basesit[i] <- 0
}
if (grepl("一出局", allgames2014$log[i])){
out_now <- 1
}
if (grepl("二出局", allgames2014$log[i])){
out_now <- 2
}
if (grepl("三出局", allgames2014$log[i])){
out_now <- 2
}
count <- 0
for (j in 1:length(allbases)) {
if (grepl(allbases[j], allgames2014$log[i]) == TRUE) #沒出現幾人在壘關鍵詞
{count <- count + 1
}}
if(count==0){
for (k in 1:length(onfirstbase)){
if (grepl(onfirstbase[k], allgames2014$log[i])==TRUE){ #壘包沒人,上一壘
allgames2014$basesit[i] <- 1
currently <- 1
}}
for (k in 1:length(onsecondbase)){
if (grepl(onsecondbase[k], allgames2014$log[i])==TRUE){ #壘包沒人,上二壘
allgames2014$basesit[i] <- 2
currently <- 2
}}
for (k in 1:length(onthirdbase)){
if (grepl(onthirdbase[k], allgames2014$log[i])==TRUE){ #壘包沒人,上三壘
allgames2014$basesit[i] <- 3
currently <- 3
}}
for (k in 1:length(alldead)){
if (grepl(alldead[k], allgames2014$log[i])==TRUE){ #雙殺
allgames2014$basesit[i] <- 0
}}
#####################################################################################################################################################
#連續兩位上壘
#連兩位上一壘
if (previous==1 & currently == 1){
allgames2014$basesit[i] <- 12
}
#前一位在二壘、後面保送變一二壘
if (previous==2 & currently == 1 & allgames2014$scores[i] == F){
allgames2014$basesit[i] <- 12
}
#前一位在二壘、後面一壘安打
if (previous==2 & currently == 1 & allgames2014$scores[i] == T){
allgames2014$basesit[i] <- 1
}
#前一位在一壘、後面二壘安打變二三壘
if (previous==1 & currently == 2 & allgames2014$scores[i] == F){
allgames2014$basesit[i] <- 23
}
#前面二三壘、後面保送變滿壘
if (previous==23 & currently == 1 & allgames2014$scores[i] == F){
allgames2014$basesit[i] <- 123
}
for (k in 1:length(sf_type)){
if (grepl(sf_type[k], allgames2014$log[i])){
#高飛犧牲打
if (allgames2014$basesit[i-1] == 3){
allgames2014$basesit[i] <- 0
}
else if (allgames2014$basesit[i-1] == 13){
allgames2014$basesit[i] <- 1
}
else if (allgames2014$basesit[i-1] == 23){
allgames2014$basesit[i] <- 2
}
else if (allgames2014$basesit[i-1] == 123){
allgames2014$basesit[i] <- 12
}
}}
currently <- 1000
}
for (j in 1:length(base1)) {
if (grepl(base1[j], allgames2014$log[i])){
allgames2014$basesit[i] <- 1
}}
for (j in 1:length(base2)) {
if (grepl(base2[j], allgames2014$log[i])){
allgames2014$basesit[i] <- 2
}}
for (j in 1:length(base3)) {
if (grepl(base3[j], allgames2014$log[i])){
allgames2014$basesit[i] <- 3
}}
for (j in 1:length(base12)) {
if (grepl(base12[j], allgames2014$log[i])){
allgames2014$basesit[i] <- 12
}}
for (j in 1:length(base13)) {
if (grepl(base13[j], allgames2014$log[i])){
allgames2014$basesit[i] <- 13
}}
for (j in 1:length(base23)) {
if (grepl(base23[j], allgames2014$log[i])){
allgames2014$basesit[i] <- 23
}}
for (j in 1:length(base123)) {
if (grepl(base123[j], allgames2014$log[i])){
allgames2014$basesit[i] <- 123
}}
if (grepl("全壘打", allgames2014$result[i])){
allgames2014$basesit[i] <- 0
}
if (is.na(allgames2014$basesit[i])){
allgames2014$basesit[i] <- allgames2014$basesit[i-1]
}
################################################################
allgames2014$outN[i] <- out_now
outcount <- 0
for (j in 1:length(noout)) {
if (grepl(noout[j], allgames2014$log[i]) == TRUE) #沒出現幾人在壘關鍵詞
{outcount <- outcount + 1
}
}
if(outcount==0){
allgames2014$NOT[i] <- TRUE
}
#跑沒寫出局數的列
if(is.na(allgames2014$NOT[i])==FALSE && is.na(allgames2014$result[i])==FALSE){
#三振
if((allgames2014$result[i]=="三振") && (grepl("不死三振",allgames2014$log[i])==FALSE) )
{
allgames2014$outN[i] <- allgames2014$outN[i-1] +1
}
#失誤
if((grepl("失誤",allgames2014$result[i])==TRUE) && (grepl("跑者出局",allgames2014$log[i])==TRUE) )
{
allgames2014$outN[i] <- allgames2014$outN[i-1] +1
}
#刺殺
if((grepl("刺殺",allgames2014$result[i])==TRUE) && (grepl("失誤",allgames2014$result[i])==FALSE))
{
allgames2014$outN[i] <- allgames2014$outN[i-1] +1
}
#封殺
if((grepl("封殺",allgames2014$result[i])==TRUE) && (grepl("失誤",allgames2014$result[i])==FALSE))
{
allgames2014$outN[i] <- allgames2014$outN[i-1] +1
}
#觸擊/打者沒上壘(2014年可以看warning,但其他年待驗證)
if((grepl("觸擊",allgames2014$result[i])==TRUE) && (grepl("失誤",allgames2014$result[i])==FALSE) && (is.na(allgames2014$special[i])==TRUE))
{
allgames2014$outN[i] <- allgames2014$outN[i-1] +1
}
#高飛犧牲打
if((grepl("高飛犧牲打",allgames2014$result[i])==TRUE))
{
allgames2014$outN[i] <- allgames2014$outN[i-1] +1
}
#雙殺
if((grepl("雙殺",allgames2014$result[i])==TRUE) )
{
allgames2014$outN[i] <- 2
}
}
#決定rem_type
#rem = 1
if ( allgames2014$basesit[i] == 0 && allgames2014$outN[i] == 0 ){
allgames2014$rem_typeN[i] <- 1
}
#rem = 2
if ( allgames2014$basesit[i] == 0 && allgames2014$outN[i] == 1 ){
allgames2014$rem_typeN[i] <- 2
}
#rem = 3
if ( allgames2014$basesit[i] == 0 && allgames2014$outN[i] == 2 ){
allgames2014$rem_typeN[i] <- 3
}
#rem = 4
if ( allgames2014$basesit[i] == 1 && allgames2014$outN[i] == 0 ){
allgames2014$rem_typeN[i] <- 4
}
#rem = 5
if ( allgames2014$basesit[i] == 1 && allgames2014$outN[i] == 1 ){
allgames2014$rem_typeN[i] <- 5
}
#rem = 6
if ( allgames2014$basesit[i] == 1 && allgames2014$outN[i] == 2 ){
allgames2014$rem_typeN[i] <- 6
}
#rem = 7
if ( allgames2014$basesit[i] == 2 && allgames2014$outN[i] == 0 ){
allgames2014$rem_typeN[i] <- 7
}
#rem = 8
if ( allgames2014$basesit[i] == 2 && allgames2014$outN[i] == 1 ){
allgames2014$rem_typeN[i] <- 8
}
#rem = 9
if ( allgames2014$basesit[i] == 2 && allgames2014$outN[i] == 2 ){
allgames2014$rem_typeN[i] <- 9
}
#rem = 10
if ( allgames2014$basesit[i] == 3 && allgames2014$outN[i] == 0 ){
allgames2014$rem_typeN[i] <- 10
}
#rem = 11
if ( allgames2014$basesit[i] == 3 && allgames2014$outN[i] == 1 ){
allgames2014$rem_typeN[i] <- 11
}
#rem = 12
if ( allgames2014$basesit[i] == 3 && allgames2014$outN[i] == 2 ){
allgames2014$rem_typeN[i] <- 12
}
#rem = 13
if ( allgames2014$basesit[i] == 12 && allgames2014$outN[i] == 0 ){
allgames2014$rem_typeN[i] <- 13
}
#rem = 14
if ( allgames2014$basesit[i] == 12 && allgames2014$outN[i] == 1 ){
allgames2014$rem_typeN[i] <- 14
}
#rem = 15
if ( allgames2014$basesit[i] == 12 && allgames2014$outN[i] == 2 ){
allgames2014$rem_typeN[i] <- 15
}
#rem = 16
if ( allgames2014$basesit[i] == 13 && allgames2014$outN[i] == 0 ){
allgames2014$rem_typeN[i] <- 16
}
#rem = 17
if ( allgames2014$basesit[i] == 13 && allgames2014$outN[i] == 1 ){
allgames2014$rem_typeN[i] <- 17
}
#rem = 18
if ( allgames2014$basesit[i] == 13 && allgames2014$outN[i] == 2 ){
allgames2014$rem_typeN[i] <- 18
}
#rem = 19
if ( allgames2014$basesit[i] == 23 && allgames2014$outN[i] == 0 ){
allgames2014$rem_typeN[i] <- 19
}
#rem = 20
if ( allgames2014$basesit[i] == 23 && allgames2014$outN[i] == 1 ){
allgames2014$rem_typeN[i] <- 20
}
#rem = 21
if ( allgames2014$basesit[i] == 23 && allgames2014$outN[i] == 2 ){
allgames2014$rem_typeN[i] <- 21
}
#rem = 22
if ( allgames2014$basesit[i] == 123 && allgames2014$outN[i] == 0 ){
allgames2014$rem_typeN[i] <- 22
}
#rem = 23
if ( allgames2014$basesit[i] == 123 && allgames2014$outN[i] == 1 ){
allgames2014$rem_typeN[i] <- 23
}
#rem = 24
if ( allgames2014$basesit[i] == 123 && allgames2014$outN[i] == 2 ){
allgames2014$rem_typeN[i] <- 24
}
previous <- allgames2014$basesit[i] #目前壘包狀況
}
allgames2014$rem_typeN <- as.factor(allgames2014$rem_typeN)
str(allgames2014)
##############################################################
comparison2014 <- allgames2014
#把rem_typeN往上移一列
comparison2014 <- transform(comparison2014, rem_type = lead(rem_type))
comparison2014 <- transform(comparison2014, outnum = lead(outnum))
comparison2014 <- transform(comparison2014, out = lead(out))
comparison2014 <- transform(comparison2014, base1 = lead(base1))
comparison2014 <- transform(comparison2014, base2 = lead(base2))
comparison2014 <- transform(comparison2014, base3 = lead(base3))
comparison2014 <- comparison2014 %>% select(2:4,9:11,14,12,22,19,6:8,18,5,20:21)
#write.csv(comparison2014,file = sprintf("log_process\\logdata\\comparison2014.csv"), row.names=FALSE)
#write.csv(allgames2014,file = sprintf("log_process\\logdata\\allgames2014.csv"), row.names=FALSE)
|
8e58c647494c611e0a1b36095f8f80b5419d7764
|
e6206fc1b5098534234df1d4d4ce2a12cc5d9bb3
|
/R/get-hosp-data.r
|
158b4fd095c4655c57b6aa21f6ab6a7f779253be
|
[
"MIT"
] |
permissive
|
hrbrmstr/cdcfluview
|
bc37e36cbc588479f50ff30c377f320204fd81a7
|
60ffe50553371148e962daf5559fce8270ce3fdd
|
refs/heads/master
| 2023-09-04T08:52:54.404129
| 2022-11-22T17:49:14
| 2022-11-22T17:49:14
| 29,077,478
| 64
| 24
|
NOASSERTION
| 2023-05-27T13:35:26
| 2015-01-11T01:26:20
|
R
|
UTF-8
|
R
| false
| false
| 4,246
|
r
|
get-hosp-data.r
|
#' Retrieves influenza hospitalization statistics from the CDC (deprecated)
#'
#' Uses the data source from the
#' \href{https://gis.cdc.gov/GRASP/Fluview/FluHospRates.html}{CDC FluView}
#' and provides influenza hospitalization reporting data as a data frame.
#'
#' @param area one of "\code{flusurvnet}", "\code{eip}", "\code{ihsp}", or two
#' digit state abbreviation for an individual site. Exceptions are
#' New York - Albany ("\code{nya}") and New York - Rochester
#' ("\code{nyr}")
#' @param age_group a vector of age groups to pull data for. Possible values are:
#' "\code{overall}", "\code{0-4y}", "\code{5-17y}, "\code{18-49y},
#' "\code{50-64y}, "\code{65+y}".
#' @param years a vector of years to retrieve data for (i.e. \code{2014} for CDC
#' flu season 2014-2015). Default value is the current year and all
#' \code{years} values should be >= \code{2009}
#' @return A single \code{data.frame}.
#' @note There is often a noticeable delay when making the API request to the CDC.
#' This is not due to a large download size, but the time it takes for their
#' servers to crunch the data. Wrap the function call in \code{httr::with_verbose}
#' if you would like to see what's going on.
#' @export
#' @examples \dontrun{
#' # All of FluSurv-NET, 50-64 years old, 2010/11-2014/15 flu seasons
#' hosp <- get_hosp_data("flusurvnet", "50-64y", years=2010:2014)
#' }
get_hosp_data <- function(area="flusurvnet", age_group="overall",
years=as.numeric(format(Sys.Date(), "%Y")) - 1) {
message(
paste0(
c("This function has been deprecated and will be removed in future releases.",
"Use hospitalizations() instead."),
collapse="\n"
)
)
area <- tolower(area)
age_group <- tolower(age_group)
if (!(area %in% c("flusurvnet", "eip", "ihsp", "ca", "co", "ct", "ga", "md",
"mn", "nm", "nya", "nyr", "or", "tn", "id", "ia", "mi",
"oh", "ok", "ri", "sd", "ut")))
stop("Error: area must be one of flusurvnet, eip, ihsp, or a valid state abbreviation")
if (length(area) != 1)
stop("Error: can only select one area")
if (!all(age_group %in% c("overall", "0-4y", "5-17y", "18-49y",
"50-64y", "65+y")))
stop("Error: invalid age group specified")
if (any(years < 2009))
stop("Error: years should be >= 2009")
# Match names of age groups to numbers for API
age_match <- data.frame(age_group = c("overall", "0-4y", "5-17y",
"18-49y", "50-64y", "65+y"),
code = c(6, 1, 2, 3, 4, 5))
age_group_num <- age_match$code[age_match$age_group %in% age_group]
# format the input parameters to fit the CDC API
years <- years - 1960
area_match <- data.frame(
area = c("flusurvnet", "eip", "ca", "co", "ct",
"ga", "md", "mn", "nm", "nya", "nyr", "or",
"tn", "ihsp", "id", "ia", "mi", "oh", "ok",
"ri", "sd", "ut"),
catch = c(22, 22, 1, 2, 3, 4, 7, 9, 11, 13, 14, 17,
20, 22, 6, 5, 8, 15, 16, 18, 19, 21),
network = c(1, rep(2, 12), rep(3, 9)),
stringsAsFactors=FALSE
)
# Format years
year_list <- lapply(seq_along(years),
function(x) list(ID = years[x]))
# Format age group
age_list <- lapply(seq_along(age_group_num),
function(x) list(ID = age_group_num[x]))
params <- list(AppVersion = "Public",
agegroups = age_list,
catchmentid = area_match$catch[area_match$area == area],
networkid = area_match$network[area_match$area == area],
seasons = year_list)
out_file <- tempfile(fileext=".json")
# CDC API returns a ZIP file so we grab, save & expand it to then read in CSVs
tmp <- httr::POST("https://gis.cdc.gov/GRASP/Flu3/PostPhase03DownloadData",
body = params,
encode = "json",
httr::write_disk(out_file, overwrite = T))
httr::stop_for_status(tmp)
if (!(file.exists(out_file)))
stop("Error: cannot process downloaded data")
file <- jsonlite::fromJSON(out_file)[[1]]
return(file)
}
|
2222cdd945840c2224c1fc536361054b905ce9a2
|
8078d61b576fc31a7ff3c59cf83688042f8660db
|
/qExponential/man/comp.n.q.pad.Rd
|
b7fd9ba660d2aa3501f7968b5a27d65d1b590869
|
[] |
no_license
|
Alessandra23/q-Exponential-mfmm
|
ac6704459e5c91e51e9c9a783db022dafc49cde8
|
16789a8653836231908335f890ec236999d009fc
|
refs/heads/master
| 2023-07-12T23:42:13.588015
| 2021-08-17T19:20:07
| 2021-08-17T19:20:07
| 231,951,866
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 312
|
rd
|
comp.n.q.pad.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_comp_n_stand.R
\name{comp.n.q.pad}
\alias{comp.n.q.pad}
\title{Function to create the plot of theta stan comp n}
\usage{
comp.n.q.pad(values, n.values, v.values)
}
\description{
Function to create the plot of theta stan comp n
}
|
523f1c2bad7b952071aa147086bdca876df54df6
|
04a7e4899d9aac6d1dbb0c37a4c45e5edb4f1612
|
/man/alert.Rd
|
f5c27ebda369a6e291ef34d63b025508e016a480
|
[
"MIT"
] |
permissive
|
pbs-assess/csasdown
|
796ac3b6d30396a10ba482dfd67ec157d7deadba
|
85cc4dda03d6513c11350f7f607cce1cacb6bf6a
|
refs/heads/main
| 2023-08-16T17:22:18.050497
| 2023-08-16T00:35:31
| 2023-08-16T00:35:31
| 136,674,837
| 47
| 18
|
NOASSERTION
| 2023-06-20T01:45:07
| 2018-06-08T23:31:16
|
R
|
UTF-8
|
R
| false
| true
| 368
|
rd
|
alert.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{alert}
\alias{alert}
\title{Issue a warning}
\usage{
alert(...)
}
\arguments{
\item{...}{Arguments that make up the message}
}
\description{
Issue a warning
}
\details{
Uses \link[cli:symbol]{cli::symbol} and \code{\link[crayon:crayon]{crayon::yellow()}}
}
\keyword{internal}
|
b4c1ce84f6a049682e540165f5424e939c79dfab
|
f2bae7d51a38370043ee35281c89df7bf31b253b
|
/02_Analyses_ITS1.R
|
17597b52c99402f20ab1501961df483e45f7a6bb
|
[] |
no_license
|
gzahn/Utah_Fires
|
31f170c9be80bf7dad5cdc8b2317a400281a2506
|
7501142769816c5b87d68e6467434a448261625e
|
refs/heads/master
| 2022-02-18T05:41:30.606076
| 2022-01-31T17:23:03
| 2022-01-31T17:23:03
| 170,213,972
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,440
|
r
|
02_Analyses_ITS1.R
|
# ITS Forest Soil Fungi Burn Recovery Exploratory analyses
# Load packages ####
library(phyloseq)
library(tidyverse)
library(vegan)
library(ade4)
library(sf)
library(rnaturalearth)
library(rnaturalearthdata)
library(maps)
library(maptools)
library(tools)
library(ggmap)
library(tools)
library(ggbiplot)
library(microbiome)
library(RColorBrewer)
library(dada2)
library(ecodist)
library(modelr)
# library(ggmap)
# library(maps)
library(colorblindr)
library(lme4)
library(corncob)
library(broom)
library(patchwork)
library(microbiome)
library(ggpubr)
library(lme4)
library(lmerTest)
source("./palettes.R")
source("./plot_bar2.R")
palette_plot(pal.discrete)
library(modelr)
#Set ggplot theme
theme_set(theme_bw())
# import phyloseq object and metadata ####
ps_ITS = readRDS("./output/phyloseq_object_ITS_noncontam.RDS")
readRDS("./")
# import metadata
meta = read.csv("./Fire_metadata_plus_GDC.csv")
# Subset to Fire Samples only ####
ps_FF = subset_samples(ps_ITS, Project_Name == "Forest_Fire_Recovery")
sample_names(ps_FF)
sample_names(ps_FF)
meta$SampleID <- as.character(meta$SampleID)
meta = meta[which(meta$SampleID %in% sample_names(ps_FF)),]
meta = meta[order(meta$SampleID),]
row.names(meta) = meta$SampleID
# build new phyloseq object with updated fire metadata
ps_FF = phyloseq(otu_table(ps_FF), sample_data(meta), tax_table(ps_FF))
# remove neg control
ps_FF = subset_samples(ps_FF,SampleID != "FS-NEG2")
ps_FF = readRDS("./output/phyloseq_object_ITS.RDS")
ps_ITS <- ps_FF
sample_data(ps_FF)
meta <- sample_data(ps_FF)
sample_names(ps_FF)
meta_df <- meta(ps_FF)
# examine non-fungi ####
taxa = ps_ITS@tax_table
kingdoms = c(unique(taxa[,1]))
# plot kingdom abundance ####
tax_summ = as.character(summary(as.data.frame(taxa)["Kingdom"]))
ggplot(as.data.frame(taxa), aes(x=Kingdom)) +
geom_bar(stat = "count") + ggtitle("No. of taxa from each kingdom")
ggsave("./output/non-fungal_taxa_assignment_plot.png")
# Remove non-fungi ####
ps_FF = subset_taxa(ps_FF, Kingdom == "k__Fungi")
# Plot diversity ####
plot_richness(ps_FF,x="Set_ID",measures = "Shannon") +
stat_smooth() +
facet_grid(~FireTreatment)
# Clean up taxonomy names
for(i in 1:7){
tax_table(ps_FF)[,i] <- str_remove(tax_table(ps_FF)[,i],".__")
}
# clean up empty samples and ESVs
summary(sample_sums(ps_FF))
summary(taxa_sums(ps_FF))
ps_FF <- subset_taxa(ps_FF,taxa_sums(ps_FF) > 1)
ps_FF <- subset_samples(ps_FF,sample_sums(ps_FF) > 1)
# Plot diversity ####
plot_richness(ps_FF,x="Set_ID",measures = "Shannon") +
stat_smooth() +
facet_grid(~FireTreatment) + labs(y="Shannon diversity")
ggsave("./output/alpha_diversity_scatterplot.png")
ps_FF@sam_data$Shannon <- estimate_richness(ps_FF, measures="Shannon")$Shannon
sam = as.data.frame(ps_FF@sam_data)
sam = (as(sam, "data.frame"))
ggplot(sam, aes(x=Set_ID,y=Shannon,color=FireTreatment)) +
geom_violin()
ggsave("./output/Shannon_Div_by_set.png", dpi=300)
names(sample_data(ps_FF))
ps_FF@sam_data$Richness <- specnumber(otu_table(ps_FF))
# Add grouping factor for mean annual temp
temps <- ps_FF@sam_data$ANNUAL_MEAN_TEMP
tempgroups <- cut(temps,3)
tempgroups <- as.character(tempgroups) %>% str_replace("\\(-7.9,-6.4]","A: -7.9:-6.4") %>%
str_replace("\\(-6.4,-4.9]","B: -6.4:-4.9") %>%
str_replace("\\(-4.9,-3.4]","C: -4.9:-3.4") %>%
factor(ordered = FALSE)
ps_FF@sam_data$TempGroup <- tempgroups
# export ps_FF
saveRDS(ps_FF,"./output/final_phyloseq_object.RDS")
#
# Basic plots of diversity for overall data
# stacked boxplots x-axis factor(years since burn), y-axis relative-abundance
ps_FF %>% merge_samples(group = "FireTreatment") %>%
transform_sample_counts(function(x){x/sum(x)}) %>%
plot_bar2(fill="Phylum") + scale_fill_manual(values = pal.discrete) + labs(y="Relative abundance",x="Site type")
ggsave("./output/Barplot_Phylum_mean_by_burn_treatment.png",dpi=300,height = 6,width = 6)
# Alph diversity vs fire treatment boxplot
ggplot(sam, aes(x=FireTreatment,y=Shannon,fill=FireTreatment)) + geom_boxplot() + geom_jitter(width = .1) + labs(y="Shannon diversity") +
scale_fill_manual(values = pal.discrete[c(2,6)])
ggsave("./output/Shannon_Div_by_treatment.png", dpi=300)
mod1 = aov(Shannon ~ Set_ID * FireTreatment, data=sam)
summary(mod1)
sam = sam[sam$Shannon>0,]
sam = mutate(sam, YearsSinceBurn = 2019-BurnYear)
sam = mutate(sam, YearsSinceBurn = 2019-BurnYear)
sam$Richness <- specnumber(otu_table(ps_FF))
ggplot(sam, aes(x=YearsSinceBurn,y=Shannon,color=FireTreatment)) +
geom_jitter() + stat_smooth(method="lm", se = FALSE) + labs(x="Years Since Burn",y="Shannon Diversity") +
scale_x_reverse()
ggsave("./output/Shannon_div_raw_over_time.png", dpi=300)
# Remove low-abundance taxa and empty samples ####
# remove empty taxa, and those with fewer than 10 occurrences
ps_FF = subset_taxa(ps_FF, colSums(ps_FF@otu_table) > 9)
# remove empty samples
ps_FF = subset_samples(ps_FF, rowSums(ps_FF@otu_table) != 0)
# quick barplot
source("./plot_bar2.R")
plot_bar2(ps_FF, x = "FireTreatment", fill = "Class") + theme_bw()
ggsave("./output/barplot_class.png", dpi = 300)
names(sam)
# richness vs time since fire (as a factor), colored by fire treatment
sam %>%
group_by(YearsSinceBurn, FireTreatment) %>%
dplyr::summarize(N=n(),Mean_Shannon_Div=mean(Shannon),Upper_Shannon=Mean_Shannon_Div+sd(Shannon),Lower_Shannon=Mean_Shannon_Div-sd(Shannon)) %>%
ggplot(aes(x=factor(YearsSinceBurn),y=Mean_Shannon_Div,ymin=Lower_Shannon,ymax=Upper_Shannon,color=FireTreatment)) +
geom_errorbar() + geom_line()
summarize
sam %>%
ggplot(aes(x=factor(YearsSinceBurn),y=Shannon,color=FireTreatment)) +
geom_boxplot() + geom_line(aes(group=factor(YearsSinceBurn)))
sam %>%
ggplot(aes(x=factor(YearsSinceBurn),y=Richness,color=FireTreatment)) +
geom_boxplot() + geom_line(aes(group=factor(YearsSinceBurn)))
# Remove low-abundance taxa and empty samples ####
# quick barplot
ps_FF %>%
merge_samples(group = "FireTreatment") %>%
transform_sample_counts(function(x){x/sum(x)}) %>%
plot_bar2(fill = "Phylum") + theme_bw() + labs(y="Relative abundance") + scale_fill_manual(values=pal.discrete)
ggsave("./output/barplot_phylum_burntreatment.png", dpi = 300)
# merge samples by burn and group ####
newpastedvar = paste(sample_data(ps_FF)$FireTreatment, sample_data(ps_FF)$Set_ID, sep = "_")
sample_data(ps_FF)$NewPastedVar = newpastedvar
firelevels = levels(sample_data(ps_FF)$FireTreatment)
setlevels = levels(sample_data(ps_FF)$Set_ID)
lat = unique(sample_data(ps_FF)$Latitude)
lon = unique(sample_data(ps_FF)$Longitude)
latlon = as.data.frame(cbind(ps_FF@sam_data$NewPastedVar, as.numeric(ps_FF@sam_data$Latitude), as.numeric(ps_FF@sam_data$Longitude)))
psm = merge_samples(ps_FF, "NewPastedVar")
# repair values
psm@sam_data$FireTreatment <- c(rep(firelevels, each = 10),firelevels[2])
psm@sam_data$Set_ID <- setlevels[psm@sam_data$Set_ID]
# normalize (relabund) ####
psm_ra = transform_sample_counts(psm, function(OTU) OTU/sum(OTU) )
psm_ra = subset_taxa(psm_ra, Class != "c__NA|NA")
psm_ra = subset_taxa(psm_ra, Class != "c__NA")
plot_bar2(psm_ra, x="BurnYear",fill = "Class") + facet_wrap(~Class) + theme_bw()
ggsave("./output/Class_by_burnyear.png", dpi=300, width = 12,height = 10)
# Fix longitude
sample_data(psm_ra)$Longitude[which(sample_data(psm_ra)$Longitude > -100)] <- -111.33787
# Normalize full ps object ####
ps_ra = transform_sample_counts(ps_FF, function(OTU) OTU/sum(OTU) )
# Find community distance between burned and non-burned within each set ####
# find and remove any sets that don't have both burned and unburned samples remaining
sets = as.character(purrr::map(strsplit(unique(ps_ra@sam_data$NewPastedVar), split = "_"),3))
sets = as.data.frame(table(sets))
goodsets = as.character(sets[which(sets$Freq > 1),1])
goodsets = goodsets[order(as.numeric(goodsets))]
goodsets = paste0("Set_",goodsets)
# set vectors and counter
set = c()
meandist = c()
sddist = c()
x=1
# For-loop calculates mean and stdev between burned and unburned samples in each set ####
for(i in goodsets){
ps = subset_samples(ps_ra, Set_ID == i)
psb = subset_samples(ps, FireTreatment == "Burn")
psn = subset_samples(ps, FireTreatment == "NonBurn")
dist = as.data.frame(as.matrix(vegdist(otu_table(ps))))
dist_fire = dist[sample_names(psb),sample_names(psn)]
mean = mean(c(as.matrix(dist_fire)))
sdev = sd(c(as.matrix(dist_fire)))
set[x] <- i
meandist[x] <- mean
sddist[x] <- sdev
x=x+1
}
setyear = as.data.frame(unique(cbind(as.character(ps_ra@sam_data$Set_ID),ps_ra@sam_data$BurnYear)))
setyear = setyear[setyear$V1 %in% set,]
setyear = setyear[c(3,4,5,6,7,8,9,10,1,2),]
# build data frame and plot ####
dist.df = data.frame(Set = set, MeanDist = meandist, StDevDist = sddist, BurnYear = as.numeric(as.character(setyear$V2)))
dist.df$upper = dist.df$MeanDist + dist.df$StDevDist
dist.df$lower = dist.df$MeanDist - dist.df$StDevDist
n=1
meantemps = c()
meanprecips = c()
meanslopes = c()
meancanopys = c()
for(i in levels(meta$Set_ID)[c(1,5,6,7,8,9,10,11,2,3)]) {
df = meta[meta$Set_ID == i,]
meantemps[n] = mean(df[,"ANNUAL_MEAN_TEMP"])
meanprecips[n] = mean(df[,"ANNUAL_PRECIP"])
meanslopes[n] = mean(df[,"SLOPE_AVG"])
meancanopys[n] = mean(df[,"CANOPY_2001"])
n=n+1
}
dist.df$Temp <- meantemps
dist.df$Precip <- meanprecips
dist.df$Slope <- meanslopes
dist.df$CanopyCover <- meancanopys
ggplot(dist.df, aes(x=(2019-BurnYear),y=MeanDist,ymin=lower,ymax=upper)) +
geom_point() +
geom_errorbar() +
stat_smooth(method = "lm") +
theme_bw() +
labs(y="Community Distance", x="Years Since Burn")
ggsave("./output/Community_Distance_vs_Burn_Year.png", dpi = 300)
# Add community distances to metadata
dist.df
from = levels(meta$Set_ID)[c(1,5,6,7,8,9,10,11,2,3,4)]
to = c(dist.df$MeanDist,NA)
distance = plyr::mapvalues(meta$Set_ID,from=from,to=to)
meta$CommDist_Burn = as.numeric(as.character(distance))
# Models with GDC data
mod2 = glm(CommDist_Burn ~ ANNUAL_MEAN_TEMP / Set_ID, data = meta)
summary(mod2)
library(lme4)
mod3 = lmer(CommDist_Burn ~ (SLOPE_AVG + ANNUAL_MEAN_TEMP) + (1|Set_ID), data = meta)
summary(mod3)
anova(mod3)
meta$SLOPE_AVG
ggplot(meta,aes(x=ANNUAL_MEAN_TEMP,y=CommDist_Burn)) +
geom_jitter(aes(color=2019-BurnYear),height = .01, width = 0,size=2,alpha=.5)+geom_smooth(method="lm") +
labs(x="Annual Mean Temp (deg C)",y="Fungal Community Distance",color = "Years Since Burn")
ggsave("./output/Community_Distance_vs_Temp_and_Burn_Year.png",dpi=300,width = 10,height = 10)
names(dist.df)[1] <- "Set_ID"
meta <- as(meta,"data.frame")
meta <- as.data.frame(left_join(meta,dist.df,by=c("Set_ID","BurnYear")))
glimpse(meta)
# names(meta)
#
# from = (meta$Set_ID)[c(1,5,6,7,8,9,10,11,2,3,4)]
# to = c(dist.df$MeanDist,NA)
# distance = plyr::mapvalues(meta$Set_ID,from=from,to=to)
# meta$CommDist_Burn = as.numeric(as.character(distance))
# Models with GDC data
meta$YearsSinceBurn <- 2019 - meta$BurnYear
mod2 = glm(MeanDist ~ (ANNUAL_MEAN_TEMP * YearsSinceBurn), data = meta)
summary(mod2)
meta$BurnYear
mod3 = lmer(MeanDist ~ (ANNUAL_MEAN_TEMP) + (1|YearsSinceBurn), data = meta)
summary(mod3)
mod4 <- lmerTest::lmer(MeanDist ~ (ANNUAL_MEAN_TEMP) + (1|YearsSinceBurn), data = meta)
summary(mod4)
sink("./output/lmer_mod_of_Paired_Distance.txt")
summary(mod4)
sink(NULL)
ggplot(meta,aes(x=ANNUAL_MEAN_TEMP,y=MeanDist)) +
geom_jitter(aes(color=factor(2019-BurnYear)),height = .01, width = 0,size=3,alpha=.75) + geom_smooth(color="Black",se=FALSE,method = "lm") +
# geom_boxplot(aes(group=(ANNUAL_MEAN_TEMP),fill=factor(2019-BurnYear)),alpha=.25)
labs(x="Annual Mean Temperature",y="Fungal Community Distance",color = "Years Since Burn") + scale_color_viridis_d() +
theme(axis.title = element_text(size=14,face="bold"),
axis.text.x = element_text(size=12,face="bold"),
legend.title = element_text(size=14,face="bold"),
legend.text = element_text(size=14,face="bold"),
legend.position = "right") + scale_y_continuous(limits = c(0.5,1))
ggsave("./output/Community_Distance_vs_AnnualMeanTemp_partial-lims_lm.png",dpi=300,width = 10,height = 10)
cor(meta$ANNUAL_MEAN_TEMP,meta$BurnYear)
ggplot(meta,aes(x=ANNUAL_PRECIP,y=CommDist_Burn)) +
geom_jitter(aes(color=2019-BurnYear),height = .01, width = 0,size=2,alpha=.5)+geom_smooth(method = "lm") +
labs(x="Annual Average Precipitation (mm)",y="Fungal Community Distance",color = "Years Since Burn")
ggsave("./output/Community_Distance_vs_Precip_and_Burn_Year2.png",dpi=300,width = 10,height = 10)
ggplot(meta,aes(x=2019-BurnYear,y=CommDist_Burn)) +
geom_jitter(aes(color=ANNUAL_MEAN_TEMP),height = .01, width = 0,size=2,alpha=.5)+geom_smooth(method="lm") +
labs(x="Years Since Burn",y="Fungal Community Distance",color = "Annual Mean Temp (deg C)")
glimpse(meta)
# adonis(otu_table(ps_ra) ~ meta$BurnYear+meta$SLOPE_AVG+meta$FireTreatment+meta$ANNUAL_PRECIP)
# Ordinate ####
DCA = ordinate(ps_ra)
plot_ordination(ps_ra,DCA, color = "FireTreatment") + theme_bw()
ggsave("./output/DCA_ordination.png", dpi=300)
# PCoA
pca = prcomp(as.matrix(otu_table(ps_ra)))
dim(sample_data(ps_ra))
dim(otu_table(ps_ra))
g <- ggbiplot(pca, groups = ps_ra@sam_data$Set_ID)
g
NMDS = ordinate(ps_ra, method = "NMDS")
plot_ordination(ps_ra,NMDS, color = "FireTreatment", type = "biplot") + theme_bw()
# PermANOVA ####
sink("./output/adonis_table.txt")
adonis(otu_table(ps_ra) ~ ps_ra@sam_data$BurnYear * ps_ra@sam_data$FireTreatment * ps_ra@sam_data$Set_ID)
adonis(otu_table(ps_ra) ~ (ps_ra@sam_data$BurnYear + ps_ra@sam_data$FireTreatment) * ps_ra@sam_data$ANNUAL_MEAN_TEMP)
adonis(otu_table(ps_ra) ~ (ps_ra@sam_data$BurnYear + ps_ra@sam_data$ANNUAL_MEAN_TEMP) * ps_ra@sam_data$Set_ID)
sink(NULL)
# Mantel Test and multiple regression on distance matrices ####
spatial.dist = dist(cbind(ps_ra@sam_data$Longitude, ps_ra@sam_data$Latitude))
comm.dist = vegdist(as.matrix(ps_ra@otu_table))
envir = cbind(ps_ra@sam_data$SLOPE_AVG,ps_ra@sam_data$CANOPY_2001,ps_ra@sam_data$ANNUAL_MEAN_TEMP,ps_ra@sam_data$ANNUAL_PRECIP)
envir.dist = vegdist(envir, method = "man")
mantel.test = mantel.rtest(spatial.dist, comm.dist, nrepet = 9999)
# MRM
dist_MRM <- MRM(comm.dist ~ spatial.dist + envir.dist, nperm = 9999)
sink("./output/mantel_test.txt")
print(mantel.test)
sink(NULL)
sink("./output/MRM_table.txt")
dist_MRM
sink(NULL)
# single groups ??? ####
sordaria = subset_taxa(psm_ra, Class == "c__Sordariomycetes")
plot_bar2(sordaria, x="FireTreatment", fill = "Family")
# Core members ####
# rename ASVs
ps_ra_rn <- ps_ra
taxa_count = length(colnames(ps_ra_rn@otu_table@.Data))
colnames(ps_ra_rn@otu_table@.Data) <- paste0("ASV_",1:taxa_count)
det <- c(0, 0.1, 0.5, 2, 5, 20)/100
prevalences <- seq(.05, 1, .05)
detections <- 10^seq(log10(1e-3), log10(.2), length = 10)
plot_core(ps_ra_rn, prevalences = prevalences, detections = det, plot.type = "lineplot") + xlab("Relative Abundance (%)")
p <- plot_core(ps_ra_rn, plot.type = "heatmap",
prevalences = prevalences,
detections = detections,
colours = rev(brewer.pal(5, "Spectral")),
min.prevalence = .1, horizontal = TRUE)
print(p)
ggsave("./output/Core_Taxa_Overall.png",dpi=300)
core_burned = subset_samples(ps_ra, FireTreatment == "Burn")
core_unburned = subset_samples(ps_ra, FireTreatment == "NonBurn")
core_burned = subset_taxa(core_burned,colSums(otu_table(core_burned)) > 0)
core_unburned = subset_taxa(core_unburned,colSums(otu_table(core_unburned)) > 0)
taxa_count = length(colnames(core_burned@otu_table@.Data))
colnames(core_burned@otu_table@.Data) <- paste0("ASV_",1:taxa_count)
taxa_count = length(colnames(core_unburned@otu_table@.Data))
colnames(core_unburned@otu_table@.Data) <- paste0("ASV_",1:taxa_count)
p <- plot_core(core_burned, plot.type = "heatmap",
prevalences = prevalences,
detections = detections,
colours = rev(brewer.pal(5, "Spectral")),
min.prevalence = .1, horizontal = TRUE) + ggtitle("Burned Sites - Core Taxa")
print(p)
ggsave("./output/Core_Taxa_Burned.png", dpi=300)
p <- plot_core(core_unburned, plot.type = "heatmap",
prevalences = prevalences,
detections = detections,
colours = rev(brewer.pal(5, "Spectral")),
min.prevalence = .1, horizontal = TRUE) + ggtitle("Non-Burned Sites - Core Taxa")
print(p)
ggsave("./output/Core_Taxa_NonBurned.png", dpi=300)
core(otu_table(core_burned), detection = 0.01, prevalence = .2)
colSums(otu_table(core_burned))
# Mapping sites ####
shannon = diversity(t(otu_table(psm_ra)), "shannon")
simpson = diversity(t(otu_table(psm_ra)), "simpson")
world <- ne_countries(scale = "medium", returnclass = "sf")
sites = data.frame(latitude = sample_data(psm_ra)$Latitude, longitude = sample_data(psm_ra)$Longitude,
diversity = diversity)
str(diversity)
states <- st_as_sf(map("state", plot = FALSE, fill = TRUE))
states <- cbind(states, st_coordinates(st_centroid(states)))
states$ID <- toTitleCase(states$ID)
ggplot(data = world) +
geom_sf() +
geom_sf(data = states, fill = "White") +
geom_text(data = states, aes(X, Y, label = ID), size = 5) +
geom_point(data = sites, aes(x = longitude, y = latitude, color = diversity),
shape = 19, size=2) +
coord_sf(xlim = c(-114.05, -108.95), ylim = c(42.05,36.95), expand = FALSE) +
labs(color = "Shannon Diversity", x="Longtitude",y="Latitude") +
scale_color_gradient(low="Blue",high= "Orange")
ggsave("./output/Site_Map_merged.png", dpi=300,height = 10,width = 12)
diversity = diversity(otu_table(ps_ra), "shannon")
sites = data.frame(latitude = sample_data(ps_ra)$Latitude, longitude = sample_data(ps_ra)$Longitude,
diversity = diversity)
states <- st_as_sf(map("state", plot = FALSE, fill = TRUE))
states <- cbind(states, st_coordinates(st_centroid(states)))
states$ID <- toTitleCase(states$ID)
theme_set(theme_bw())
ggplot(data = world) +
geom_sf() +
geom_sf(data = states, fill = "White") +
geom_text(data = states, aes(X, Y, label = ID), size = 5) +
geom_point(data = sites, aes(x = longitude, y = latitude, color = diversity),
shape = 19, size=2.5) +
coord_sf(xlim = c(-114.05, -108.95), ylim = c(42.05,36.95), expand = FALSE) +
labs(color = "Shannon Diversity", x="Longtitude",y="Latitude") +
scale_color_gradient(low="Blue",high= "Orange")
ggsave("./output/Site_Map.png", dpi=300,height = 10,width = 12)
# metadata figures ####
meta_figs = as.data.frame(sample_data(ps_FF))
meta_figs = meta_figs %>% select(Set_ID,FireTreatment,BurnYear,Shannon)
ggplot(meta_figs, aes(x=BurnYear,y=Set_ID, color=FireTreatment)) +
geom_jitter() + ggtitle("Sample Distribution") +
scale_x_continuous(labels = c(unique(meta_figs$BurnYear)), breaks = c(unique(meta_figs$BurnYear))) +
labs(x= "Year of burn",y="Set ID",color="Fire Treatment")
meta_figs$ANNUAL_MEAN_TEMP
ggplot(meta_figs, aes(x=BurnYear,y=ANNUAL_MEAN_TEMP, color=FireTreatment)) +
geom_jitter(size=2) +
scale_x_continuous(labels = c(unique(meta_figs$BurnYear)), breaks = c(unique(meta_figs$BurnYear))) +
labs(x= "Year of burn",y="Annual Mean Temp (C)",color="Fire Treatment",caption = "Sample distribution through time and temperature") +
scale_color_manual(values=pal.discrete[c(2,6)]) +
theme(axis.text.x = element_text(angle=60,hjust=1))
ggsave("./output/sample_distribution.png", dpi=300)
ggplot(meta_figs, mapping = aes(x=BurnYear, y=Shannon, color=FireTreatment)) +
geom_point() + geom_smooth(method = "lm")
geom_point() + geom_smooth(method = "lm") + scale_color_manual(values=pal.discrete[c(2,6)])
# Alpha diversity between treatment pairs ####
sample_data(ps_FF)$Richness <- specnumber(otu_table(ps_FF))
alpha.df <- as(sample_data(ps_FF),"data.frame")
ggplot(alpha.df, aes(x=factor(BurnYear),y=Richness,fill=FireTreatment)) +
geom_boxplot() + labs(x="Burn year", y="Fungal richness",fill= "Site status")
# what is different about 2012?
df.2012 = alpha.df[alpha.df$BurnYear==2012,]
df.other = alpha.df[alpha.df$BurnYear!=2012,]
df.2012$ANNUAL_MEAN_TEMP
df.other$ANNUAL_MEAN_TEMP
geom_boxplot() + labs(x="Burn year", y="Fungal richness",fill= "Site status") +
scale_fill_manual(values=pal.discrete[c(2,6)])
ggsave("./output/boxplot_richness_vs_burnyear.png")
# Boxplots between groups #
names(alpha.df)
comparisons <- list(c("in","out"))
alpha.df$InOrOut
ggboxplot(alpha.df, x = "BurnYear", y = "Richness",
color = "InOrOut", palette =pal.discrete,
add = "jitter") +
stat_compare_means(comparisons = comparisons)
+
theme(axis.title = element_text(face="bold",size=14),
axis.text = element_text(face="bold")) +
labs(x="")
# Site map ####
library(ggmap)
library(maps)
library(tidyverse)
ggmap::register_google(key = "AIzaSyCeQ16lubc8-cu_qblZe-qwj1xIfiImCM0") # Key kept private
df2 = read.csv("./data/Fire_metadata.csv")
ggmap(get_googlemap(center = c(lon = -111.4, lat = 39.5),
zoom = 7, scale = 2,
maptype ='satellite')) +
geom_point(aes(x = Longitude, y = Latitude, colour = BurnYear), data = df2, size = 4) +
theme(legend.position="right") +
scale_colour_viridis_c() +
borders("state", colour = "dark blue", region = "utah", size = 2) +
theme(axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank())
ggsave("./output/sitemap_viridis.png",dpi=300)
# Look for Rhizopogon and Wilcoxina differences between burned-unburned
sum((ps_FF@tax_table[,6] == "g__Rhizopogon"),na.rm = TRUE)
sum((ps_FF@tax_table[,6] == "g__Wilcoxina"),na.rm = TRUE)
tax_table(ps_FF)
plot_bar(ps_FF, fill="Order")
plot_bar(psm_ra, fill="Genus")
# Find species, if any, that are more prevalent in burned sites than unburned, arrange by burn year
# ggmap::register_google(key = "??????????") # Key kept private
#
# df2 = read.csv("Desktop/Fire_metadata.csv")
#
# ggmap(get_googlemap(center = c(lon = -111.4, lat = 39.5),
# zoom = 7, scale = 2,
# maptype ='satellite')) +
# geom_point(aes(x = Longitude, y = Latitude, colour = BurnYear), data = df2, size = 4) +
# theme(legend.position="right") +
# scale_colour_gradient(low = 'orange', high = 'red') +
# borders("state", colour = "dark blue", region = "utah", size = 2) +
# theme(axis.title = element_blank(),
# axis.text = element_blank(),
# axis.ticks = element_blank())
# Look for Rhizopogon and Wilcoxina differences between burned-unburned
# sum((ps_FF@tax_table[,6] == "g__Rhizopogon"),na.rm = TRUE)
# sum((ps_FF@tax_table[,6] == "g__Wilcoxina"),na.rm = TRUE)
# tax_table(ps_FF)
#
# plot_bar(ps_FF, fill="Order")
# plot_bar(psm_ra, fill="Genus")
# Find species, if any, that are more prevalent in burned sites than unburned, arrange by burn year
sample_data(ps_FF)[,"FireTreatment"]
ps_family <- tax_glom(ps_FF,"Family")
set.seed(123)
da_analysis <- differentialTest(formula = ~ FireTreatment, #abundance
phi.formula = ~ FireTreatment, #dispersion
formula_null = ~ 1, #mean
phi.formula_null = ~ 1,
test = "Wald", boot = FALSE,
data = ps_family,
fdr_cutoff = 0.05)
da_analysis$significant_taxa
stax_1 <- paste(tax_table(ps_family)[da_analysis$significant_taxa,2:5][1],sep="_")
stax_1 <- paste(stax_1[1],stax_1[2],stax_1[3],stax_1[4],sep="_")
stax_2 <- paste(tax_table(ps_family)[da_analysis$significant_taxa,2:5][2],sep="_")
stax_2 <- paste(stax_2[1],stax_2[2],stax_2[3],stax_2[4],sep="_")
stax_3 <- paste(tax_table(ps_family)[da_analysis$significant_taxa,2:5][3],sep="_")
stax_3 <- paste(stax_3[1],stax_3[2],stax_3[3],stax_3[4],sep="_")
stax_4 <- paste(tax_table(ps_family)[da_analysis$significant_taxa,2:5][3],sep="_")
stax_4 <- paste(stax_3[1],stax_4[2],stax_4[3],stax_4[4],sep="_")
names(da_analysis$significant_models) <- c(stax_1,stax_2,stax_3,stax_4)
sink("./output/Differential_abundance_model_stats_tables.txt")
print("Family-level taxonomic comparisons...")
da_analysis$significant_models
sink(NULL)
daplot_1 <- plot(da_analysis) + labs(y="Differentially abundant taxa\n(relative abundance)") +
theme(axis.text.y = element_text(face="bold.italic"),
axis.title.y = element_text(face="bold",size=16),
strip.text = element_text(face="bold",size=12))
ggsave(daplot_1, filename = "./output/Diff_Abund_Family_By_BurnTreatment.png",device = "png",width = 9,height = 5,dpi=300)
da_analysis$significant_taxa
set.seed(123)
corncob_da1 <- bbdml(formula = ACGAGTTACAAGTCGGTCGACCGTGCTGGCGGAAACGCACGTGCACGTCGGTCGCAAACCTCATCCACACACCTGTGAACGTATGGCCTTGGGTCTCTCGACCCGGGGCAAACCTTTTTTACCCACTCTGTTTGTAAAGGAATGTCATACGTGCGTAACGCATAAATGAA ~ FireTreatment,
phi.formula = ~ FireTreatment,
data = ps_family)
# pull out model results into df
corncob_da1_wald <- waldt(corncob_da1)
corncob_da1_wald <- corncob_da1_wald[grep("mu.",row.names(corncob_da1_wald)),]
corncob_da1_wald <- tidy(corncob_da1_wald)
corncob_da1_wald$OTU <- stax_1
p1 <- plot(corncob_da1,color="FireTreatment") + scale_color_manual(values = pal.discrete[c(2,6)] ) + ggtitle(stax_1)
###
set.seed(123)
corncob_da2 <- bbdml(formula = CTGAACTGTCAACACGAGTTGTTGCTGGTCCTCAAATGGGGGCATGTGCACGCTCTGTTTACATACCCACTCACACCCGTGCACCCTCTGTAGTTCTGTGGTGTGGGGGACTCTGTCCTCCCGCTGTGGTTCTATGTCTTTTACACACACACAGTCTCATAGAATGTATGTCGCGTTTAACGCAATACAATA ~ FireTreatment,
phi.formula = ~ FireTreatment,
data = ps_family)
# pull out model results into df
corncob_da2_wald <- waldt(corncob_da2)
corncob_da2_wald <- corncob_da2_wald[grep("mu.",row.names(corncob_da2_wald)),]
corncob_da2_wald <- tidy(corncob_da2_wald)
corncob_da2_wald$OTU <- stax_2
p2 <- plot(corncob_da2,color="FireTreatment") + scale_color_manual(values = pal.discrete[c(2,6)] ) + ggtitle(stax_2)
###
set.seed(123)
corncob_da3 <- bbdml(formula = AAGAGATAGGGTGCTCAGCGCCCGACCTCCAACCCTTTGTTGTTAAAACTACCTTGTTGCTTTGGCGGGACCGCTCGGTCTCGAGCCGCTGGGGATTCGTCCCAGGCGAGTGCCCGCCAGAGTTAAACCAAACTCTTGTTAATTAAACCGGTCGTCTGAGTTAAAATTTTGAATAAATCA ~ FireTreatment,
phi.formula = ~ FireTreatment,
data = ps_family)
# pull out model results into df
corncob_da3_wald <- waldt(corncob_da3)
corncob_da3_wald <- corncob_da3_wald[grep("mu.",row.names(corncob_da3_wald)),]
corncob_da3_wald <- tidy(corncob_da3_wald)
corncob_da3_wald$OTU <- stax_3
p3 <- plot(corncob_da3,color="FireTreatment") + scale_color_manual(values = pal.discrete[c(2,6)] ) + ggtitle(stax_3)
###
set.seed(123)
corncob_da4 <- bbdml(formula = CCGAAGTTACCTTCAAAACCCACTGTGAACCTTACCTCTTGCCGCGTTGTCTCGGCGGGAGGCGGTGGGCGTCGCGTGCCCTAGCGGGCCGTGCCGCTCCCGTCCCCGCCGGCGGCGCCAAACTCTAAATTTACAGCGGACTGTATGTTCTGATTTACAAAAAAAACAAGTTA ~ FireTreatment,
phi.formula = ~ FireTreatment,
data = ps_family)
# pull out model results into df
corncob_da4_wald <- waldt(corncob_da4)
corncob_da4_wald <- corncob_da4_wald[grep("mu.",row.names(corncob_da4_wald)),]
corncob_da4_wald <- tidy(corncob_da4_wald)
corncob_da4_wald$OTU <- stax_4
p4 <- plot(corncob_da4,color="FireTreatment") + scale_color_manual(values = pal.discrete[c(2,6)] ) + ggtitle(stax_4)
# combine plots
p1/p2/p3/p4
ggsave("./output/Diff_Abund_Family_By_BurnTreatment_Indiv_Taxa.png",height = 6,width = 12,dpi=300)
# join all 4 together
full_corncob <- rbind(corncob_da1_wald,corncob_da2_wald,corncob_da3_wald,corncob_da4_wald)
# plot
tidy_corncob <- full_corncob %>% select(FireTreatment = .rownames, Estimate, StdErr = Std..Error, t.value, P.val = Pr...t..,OTU) %>%
filter(FireTreatment != "mu.(Intercept)") %>%
# arrange(AgeGroup) %>%
mutate(ymin = Estimate - StdErr, ymax=Estimate + StdErr)
tidy_corncob$FireTreatment <- str_remove(tidy_corncob$FireTreatment,pattern = "mu.CoralAgeBinned")
ggplot(tidy_corncob, aes(x=FireTreatment,y=Estimate)) +
geom_errorbar(ggplot2::aes(ymin = ymin, ymax = ymax), width = .2) + theme_bw() +
geom_hline(yintercept =0,linetype=2,alpha=.5) +
coord_flip() +
facet_grid(~OTU) +
labs(x="Fire Treatment", y= "Wald test estimate") + theme(strip.text = element_text(size=14,face="bold"),
axis.title = element_text(size=14,face="bold"),
axis.text = element_text(size=12,face = "bold"))
taxa_names(ps_family)
sample_data(ps_FF)
sample_data(ps_FF)$DaysSinceBurn <- as.numeric(as.POSIXct("2019-01-01") - as.POSIXct(sample_data(ps_FF)$BurnDate,format='%Y-%m-%d'))
ggplot(sample_data(ps_FF),aes(x=DaysSinceBurn,y=Richness,color=FireTreatment,group=Set_ID)) +
geom_point() + facet_wrap(~Set_ID)
tempgroups = kmeans(x = df2$ANNUAL_MEAN_TEMP,centers = 2)
tempgroups$cluster
mod3 = lmer(data = as(sample_data(ps_FF),"data.frame"), Richness ~ DaysSinceBurn + ANNUAL_MEAN_TEMP + (1|Set_ID)/(1|FireTreatment))
summary(mod3)
df2 = add_predictions(data = as(sample_data(ps_FF),"data.frame"),model = mod3,type = "response")
df2$tempgroup <- tempgroups$cluster
sink("./output/lmer_model_Richness.txt")
print("Richness as dependent var., DaysSinceBurn and ANNUAL_MEAN_TEMP as fixed vars., FireTreatment nested within SetID as random vars")
summary(mod3)
sink(NULL)
# # Site table ####
# df = as(sample_data(psm_ra),"data.frame")
# write.csv(df,"./output/site_info.csv",row.names = TRUE,quote = FALSE)
ggplot(df2,aes(x=DaysSinceBurn,y=Shannon,color=factor(tempgroup))) +
geom_point() + geom_smooth(method="lm",se=FALSE) + facet_wrap(~FireTreatment) +
scale_x_reverse()
# difference between shannon diversity between burned/unburned
d = df2 %>% group_by(factor(NewPastedVar)) %>%
dplyr::summarize(Shan = mean(Shannon))
d
d = data.frame(SetID = str_remove(d$`factor(NewPastedVar)`[1:10],pattern = "Burn_") ,DiffShannon = d[1:10,2] - d[c(11:13,15:21),2])
names(d)[2] <- "DiffShannon"
d
d$DiffShannon = as.character(d$DiffShannon * -1)
d
df2$ShannonDifference <- as.numeric(as.character(plyr::mapvalues(df2$Set_ID,from = d$SetID,to=d$DiffShannon)))
mod4 <- glm(data=df2,ShannonDifference ~ tempgroup * DaysSinceBurn)
summary(mod4)
ggplot(df2,aes(x=DaysSinceBurn,y=ShannonDifference,color=factor(tempgroup))) + geom_point() +
geom_point(aes(y=predict(object = mod4,newdata = df2)),color="Red",size=3) +
geom_smooth(method="lm",formula = y~x+color)
# this plot sucks. I hate it
ggplot(subset(df2,Set_ID != "Set_2"),aes(x=factor(BurnYear),y=Shannon,fill=FireTreatment)) +
geom_boxplot(position="dodge")
ggplot(subset(df2,Set_ID != "Set_2"),aes(x=factor(round(ANNUAL_MEAN_TEMP)),y=Shannon,fill=FireTreatment)) +
geom_boxplot(position="dodge")
df2$ANNUAL_MEAN_TEMP
# Site table ####
df = as(sample_data(psm_ra),"data.frame")
write.csv(df,"./output/site_info.csv",row.names = TRUE,quote = FALSE)
# Import plant data -- not finished!!! ####
plants <- read.csv("./plant_metadata_by_site.csv")
plants$Unique.ID
plants$Family=as.character(plants$Family)
plants$Family[plants$Family == "Moss species*"] <- "Bryophyte"
plants$Family <- factor(plants$Family)
plants$Genus %>% table
plants_otu <- plants %>% group_by(Unique.ID) %>%
summarize(Genera = unique(Genus)) %>% ungroup() %>%
filter(!is.na(Genera)) %>%
filter(Genera != "") %>% table() %>%
as.matrix() %>% as.data.frame()
meta = read.csv("./Fire_metadata_plus_GDC.csv")
df <- data.frame(Unique.ID=meta$ID,meta$SampleID)
plants_otu <- full_join(df,plants_otu) %>%
select(meta.SampleID,Genera) %>% table() %>% as.data.frame.matrix()
keepers <- ps_FF@sam_data$SampleID %in% row.names(plants_otu)
ps_FF <- ps_FF %>% subset_samples(keepers)
mantel.rtest(dist(plants_otu),dist(ps_FF@otu_table))
ps_FF %>%
subset_samples(BurnYear == "2006") %>%
microbiome::meta()
# make it match :(
meta %>% glimpse
plants %>% glimpse
plants$Fire.Site <- as.character(plants$Fire.Site)
meta <- meta[grep("^FS",meta$SampleID),]
plantrichness <- table(plants$Unique.ID)
plantrichness <- as.data.frame(plantrichness)
plantrichness$Var1 <- as.character(plantrichness$Var1)
names(plantrichness) <- c("ID","Plant_Richness")
sam <- sam %>%
mutate(Fire.Site = SampleID %>% str_remove_all("FS-") %>%
str_remove_all("N|E|W|S") %>% as.character())
meta <- left_join(sam,plants,by="Fire.Site")
ps_FF %>% sample_names()
mod <- glm(data=meta,Richness ~ FireTreatment)
summary(mod)
# No noticable difference in alpha diversity based on burn year or fire treatment
# But... B-diversity is a different story!
'%ni%' <- Negate('%in%')
# find any values that don't match
notinmeta <- which(meta$ID %ni% plants$Unique.ID)
nrow(meta[notinmeta,])
# find unique genus values in each site
x=1
y=list()
for(i in levels(plants$Unique.ID)){
d <- plants %>% filter(Unique.ID == i)
y[[x]] <- as.character(unique(d$Family))
names(y)[x] <- as.character(i)
x=x+1
}
y
# build into presence-absence table
# blank data frame
pa <- data.frame(row.names = names(y))
for(i in levels(d$Family)){
pa[,i] <- NA
}
# fill, row by row
x=1
for(i in names(y)){
pa[x,] <- colnames(pa) %in% y[[i]]
x=x+1
}
pa
# convert to presence-absence for vegan
pa[pa==FALSE] <- 0
pa[pa==TRUE] <- 1
# NMDS
NMDS = vegan::metaMDS(pa,distance = "jaccard")
mds1=NMDS$points[,1]
mds2=NMDS$points[,2]
treatment=unlist(purrr::map(str_split(unique(as.character(plants$Unique.ID)),"_"),2))
plantord=data.frame(mds1=mds1,mds2=mds2,treatment=treatment,site=row.names(plantord))
plantord$treatment = as.character(plantord$treatment)
plantord$treatment[plantord$treatment == "in"] <- "Burned"
plantord$treatment[plantord$treatment == "out"] <- "Unburned"
plantord %>% filter(site != "46_out") %>%
ggplot(plantord,mapping = aes(x=mds1,y=mds2,color=treatment,label=site)) +
geom_point(size=3) + stat_ellipse() + labs(x="MDS1",y="MDS2",color="Fire Treatment",
caption = "Plant community in sampling plot")
ggsave("./output/NMDS_plant_community_by_burn_treatment.png",dpi=300,device = "png")
# Build gen. linear models incorporating habitat info besides temp and precip (tree cover, etc)
# use lmer
# burnstatus nested within site
# sample ID nested within burnstatus for each site
|
b144d62c57fac9ac0a8fdca109d93b44cbc4ffc2
|
57ee3bae6afcd94f0d468cb2a9cbe872e50e020b
|
/R/h2o-package/man/h2o.downloadCSV.Rd
|
6cad24a567a86d78602fad4ff46d9d7032b2dbfb
|
[
"Apache-2.0"
] |
permissive
|
woobe/h2o
|
626b3595251c75ed6d72469b442c467b21cbdeaa
|
5a7bbb6d82f6dcc1384dc51dcde227665ce1baf6
|
refs/heads/master
| 2021-01-24T16:47:36.786749
| 2014-07-28T04:48:02
| 2014-07-28T04:48:02
| 22,338,992
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,358
|
rd
|
h2o.downloadCSV.Rd
|
\name{h2o.downloadCSV}
\alias{h2o.downloadCSV}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Download H2O Data to Disk
}
\description{
Download a H2O dataset to a CSV file on local disk.
}
\usage{
h2o.downloadCSV(data, filename, quiet = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{An \code{\linkS4class{H2OParsedData}} object to be downloaded.}
\item{filename}{A character string indicating the name that the CSV file should be saved to.}
\item{quiet}{(Optional) If \code{TRUE}, suppress status messages and progress bar.}
}
\details{
WARNING: Files located on the H2O server may be very large! Make sure you have enough hard drive space to accommodate the entire file.
}
\seealso{
\code{\linkS4class{H2OParsedData}}
}
\examples{
library(h2o)
localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE)
irisPath = system.file("extdata", "iris_wheader.csv", package = "h2o")
iris.hex = h2o.importFile(localH2O, path = irisPath)
myFile = paste(getwd(), "my_iris_file.csv", sep = .Platform$file.sep)
h2o.downloadCSV(iris.hex, myFile)
file.info(myFile)
file.remove(myFile)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
fe9ae5249879b7b198b49a28fddddb1397e615aa
|
8a62b8e396a262f4bacdc8db87ea3cf48d93b554
|
/run_analysis.R
|
6f06395cb9d0244cb5387369747075396c3e3761
|
[] |
no_license
|
nimaarvin83/GettingAndCleaningData
|
a3b0a00e0c38e4bda7937da4526fef945eba8000
|
7ca2bcfad0fae42f7521400e7d714098daa94190
|
refs/heads/master
| 2021-09-10T10:51:13.741261
| 2018-03-21T04:44:29
| 2018-03-21T04:44:29
| 126,115,434
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,189
|
r
|
run_analysis.R
|
library(reshape2)
##downloading and unziping the raw data
url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
if (!file.exists("./UCI HAR Dataset.zip")) {
download.file(url,"./UCI HAR Dataset.zip")
}
if (!file.exists("UCI HAR Dataset")) {
unzip("./UCI HAR Dataset.zip",exdir = ".")
}
## reading data
wd<-getwd()
mainpath<-paste0(wd,"/UCI HAR Dataset")
trainpath<-paste0(wd,"/UCI HAR Dataset/train")
testpath<-paste0(wd,"/UCI HAR Dataset/test")
##reading features
features<-read.table(paste0(mainpath,"/features.txt"),colClasses = "character")
activitylables<-read.table(paste0(mainpath,"/activity_labels.txt"),colClasses = "character")
##reading train data
x_train<-read.table(paste0(trainpath,"/x_train.txt"))
subject_train<-read.table(paste0(trainpath,"/subject_train.txt"))
y_train<-read.table(paste0(trainpath,"/y_train.txt"))
dim(x_train)
dim(y_train)
dim(subject_train)
##reading test data
x_test<-read.table(paste0(testpath,"/x_test.txt"))
subject_test<-read.table(paste0(testpath,"/subject_test.txt"))
y_test<-read.table(paste0(testpath,"/y_test.txt"))
## data exploration
dim(x_test)
dim(y_test)
dim(subject_test)
dim(subject_train)
str(x_train)
head(x_train)
names(x_train)
dim(x_train)
str(x_test)
head(x_test)
names(x_test)
dim(x_test)
## merging test and train to shape the main data
x_data<-rbind(x_train,x_test)
y_data <- rbind(y_train, y_test)
subject_data<- rbind(subject_train, subject_test)
## giving the tag of activity lables to the associated subject data
y_data[,2]<-activitylables[y_data[,1],2]
colnames(y_data)<-c("subject","activity")
## renaming the x_data variables
library(dplyr)
for (i in 1:ncol(x_data)) {
names(x_data)[[i]]<-features[i,2]
}
## extracting only the mean and Stdrv
meanstd<-x_data[,grep("mean|std",names(x_data))]
dim(meanstd)
## cbind the y_data to the meanstd dataset
meanstd<-cbind(y_data,meanstd)
dim(meanstd)
##create the mean dataset
library(plyr)
averages_data<- ddply(meanstd, .(subject, activity), function(x) colMeans(x[, 3:81]))
## writing the results into csv files
write.csv(meanstd,file = "./tidy.csv")
write.csv(averages_data,file = "./average_data.csv")
|
d03fa89c572f866bb4da4a28c9175ed5505144fb
|
dd25fec8c5262b5dd60f4bcd18fa47279bc8b2c3
|
/preparing/source/recommend/07recommend.R
|
1f9b2f5a38126a888ccacad0bc35bb0ac39ffdf8
|
[] |
no_license
|
osj3474/Recommendation-system
|
a97f1371643f8c093201e0ad8640fe00dbe104e9
|
9f9c77673ac4f95378b73d90266ae26cbc1b3c82
|
refs/heads/master
| 2022-12-05T00:04:52.565274
| 2020-08-06T03:10:16
| 2020-08-06T03:10:16
| 279,812,379
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 780
|
r
|
07recommend.R
|
df <- read.csv("pohang.csv")
m <- df[, -12]
m <- t(m)
m <- m[-1,]
colnames(m) <- paste("i", 1:ncol(m), sep='')
class(m)
View(tt)
tt <- as.data.frame(m)
tt[,c(1:ncol(tt))] <- as.double(unlist(tt[,c(1:ncol(tt))]))
str(tt)
tt[which(tt == 0, arr.ind = TRUE)] <- NA
###
library(recommenderlab)
library(dplyr)
library(tidyr)
library(tibble)
user_item_ratings <- as.matrix(tt)
rating <- as(user_item_ratings, "realRatingMatrix")
recommenderRegistry$get_entries(dataType = "realRatingMatrix")
nrow(rating)
class(rating)
# Recommender(data, method)
r <- Recommender(rating[1:9], method = "UBCF")
r
names(getModel(r))
getModel(r)$description
recom <- predict(r, rating[10], n=5, type="topNList")
recom
t <- as(recom, "list")
recom3 <- bestN(recom, n = 3)
recom3
as(recom3, "list")
|
ce5a80ce770090a5c10f2c3e60fb1daf9d903b0b
|
3abf06b894d28b490acc23c2b115181bc7853e77
|
/R/plot_decisiontree.r
|
adacbb3d4944353ea85503f34e1b9cf50f8a6f71
|
[] |
no_license
|
cran/specr
|
4c5b29a0e2bd91854b01d1ae3c8c42f737fcf46e
|
25d5f7931ace474f5b592965814964979907f135
|
refs/heads/master
| 2023-02-08T06:37:23.073215
| 2023-01-20T12:50:02
| 2023-01-20T12:50:02
| 250,514,715
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,033
|
r
|
plot_decisiontree.r
|
#' Plot decision tree
#'
#' @description `r lifecycle::badge("deprecated")`
#' This function is deprecated because the new version of specr uses a new analytic framework.
#' In this framework, you can plot a similar figure simply by using the generic \code{plot()}.
#' This function plots a simple decision tree that is meant to help understanding how few analytical choices may results in a large number of specifications. It is somewhat useless if the final number of specifications is very high.
#'
#' @param df data frame resulting from [run_specs()].
#' @param label Logical. Should labels be included? Defaults to FALSE. Produces only a reasonable plot if number of specifications is low.
#' @param legend Logical. Should specific decisions be identifiable. Defaults to FALSE.
#'
#' @return a \link[ggplot2]{ggplot} object.
#'
#' @export
#'
#' @examples
#' results <- run_specs(df = example_data,
#' y = c("y1", "y2"),
#' x = c("x1", "x2"),
#' model = c("lm"),
#' controls = c("c1", "c2"))
#'
#' # Basic, non-labelled decisions tree
#' plot_decisiontree(results)
#'
#' # Labelled decisions tree
#' plot_decisiontree(results, label = TRUE)
#'
#' # Add legend
#' plot_decisiontree(results, label = TRUE, legend = TRUE)
plot_decisiontree <- function(df,
label = FALSE,
legend = FALSE) {
# Deprecation warning
lifecycle::deprecate_warn("1.0.0", "plot_decisiontree()", "plot.specr.setup()")
# Create data set for graph transformation
df <- df %>%
dplyr::select(.data$model, .data$x, .data$y, .data$controls, .data$subsets) %>%
dplyr::arrange(.data$model, .data$x, .data$y, .data$controls, .data$subsets) %>%
dplyr::mutate(start = "raw data") %>%
dplyr::select(start, dplyr::everything()) %>%
dplyr::mutate(x = paste0(.data$x, " & ", .data$model),
y = paste0(.data$y, " & ", .data$x),
controls = paste0(.data$controls, " & ", .data$y),
subsets = paste0(.data$subsets, " & ", .data$controls))
# Create edges
edges_level1 <- df %>%
dplyr::select(.data$start, .data$model) %>%
dplyr::rename(from = .data$start, to = .data$model) %>%
unique %>%
dplyr::mutate(decisions = "model")
edges_level2 <- df %>%
dplyr::select(.data$model, .data$x) %>%
dplyr::rename(from = .data$model, to = .data$x) %>%
unique %>%
dplyr::mutate(decisions = "independent variable")
edges_level3 <- df %>%
dplyr::select(.data$x, .data$y) %>%
dplyr::rename(from = .data$x, to = .data$y) %>%
unique %>%
dplyr::mutate(decisions = "dependent variable")
edges_level4 = df %>%
dplyr::select(.data$y, .data$controls) %>%
dplyr::rename(from = .data$y, to = .data$controls) %>%
dplyr::mutate(decisions = "control variables")
edges_level5 <- df %>%
dplyr::select(.data$controls, .data$subsets) %>%
dplyr::rename(from = .data$controls, to = .data$subsets) %>%
dplyr::mutate(decisions = "subsets")
# Combine edges
edge_list <- rbind(edges_level1,
edges_level2,
edges_level3,
edges_level4,
edges_level5)
# Plot edges
plot <- edge_list %>%
graph_from_data_frame %>%
ggraph::ggraph(layout = 'dendrogram',
circular = FALSE) +
ggraph::geom_edge_diagonal() +
theme_void()
# Check if legend should be plotted
if(isTRUE(legend)) {
plot <- plot +
ggraph::geom_edge_diagonal(aes(color = .data$decisions)) +
ggraph::scale_edge_color_brewer(palette = "Blues")
}
# Check if labels should be plotted
if(isTRUE(label)) {
plot <- plot +
ggraph::geom_node_text(aes(label = .data$name,
filter = .data$leaf),
angle=90 ,
hjust=1,
nudge_y = -0.10) +
ylim(-5, NA)
}
return(plot)
}
|
2ea1827f6ee5804cabf435ad120e23f46e29fbdf
|
9ed21120bcb596e8065d11744063928724fb1481
|
/cesR_demo.R
|
a6a677e9e76b26c68496c5e1a67c1f1bc24d1a1f
|
[] |
no_license
|
hodgettsp/cesR_demo
|
b2ffd5e2a7000d037d267e4f9f81eeddbfa0cc74
|
b8ad840af117b589054227842fa781f58707fb7b
|
refs/heads/main
| 2023-02-17T09:41:08.551441
| 2021-01-15T18:33:24
| 2021-01-15T18:33:24
| 329,992,098
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,040
|
r
|
cesR_demo.R
|
# install cesR package from GitHub repo
devtools::install_github("hodgettsp/cesR")
# load cesR intro R workspace using library()
library(cesR)
# call associated CES survey codes using get_cescodes()
# this function takes no arguments
get_cescodes()
# get a preview of a CES survey dataset by using the get_preview() function
# this function takes two arguments ->
# the first is a survey name ->
# the second is the number of rows to return ->
# row value can be left empty and will return a default value of 6 rows
get_preview("ces2011", 10)
head(ces2011_preview, 10)
get_preview("ces2004")
head(ces2004_preview)
# remove previews from R environment
rm(ces2004_preview)
rm(ces2011_preview)
# call and load a CES survey dataset into the R environment using the get_ces() function
# this function takes one argument ->
# a CES survey call name
# the function temporarily downloads the CES survey ->
# loads the data into the R environment under the called name ->
# then removes the downloaded files
get_ces("ces2004")
head(ces2004)
# check what question was asked for a survey question using the get_question() function
# this function takes two arguments ->
# the first is the character string of the loaded survey dataset ->
# the second is the character string of a question code in that survey
get_question("ces2004", "cps_c1")
# load a pre-organized, built-in dataset using the get_decon() function
# decon stands for demographics/economy
# this function takes no arguments
# it loads in a subset of the 2019 Online CES survey ->
# and contains varaiables relating to demographic and economic survey questions
get_decon()
head(decon)
# install the labelled package from CRAN
install.packages("labelled")
# load the labelled package into the R working space
library(labelled)
# factorize ces2004 data using the to_factor() function from the labelled package
ces2004_factor <- labelled::to_factor(ces2004)
head(ces2004_factor)
|
2869658b2670e7863fdf9d93d568716e1b5c65b1
|
ad8f748aa729abc93294e75c5d6597b2777c2c8c
|
/man/cmdiff.Rd
|
9075c504817edb7dcbe6c30fa2466e7d2e0eb364
|
[] |
no_license
|
richardsc/cmocean
|
1d655f65676cd214f19c1897de81d02d03583df0
|
aa8fd16bfb232ab064598bbafcb189538a44feb9
|
refs/heads/master
| 2020-05-09T12:14:01.541523
| 2019-04-12T10:49:27
| 2019-04-12T10:49:27
| 181,106,181
| 0
| 0
| null | 2019-04-13T01:35:34
| 2019-04-13T01:35:33
| null |
UTF-8
|
R
| false
| true
| 695
|
rd
|
cmdiff.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/roxy.R
\name{cmdiff}
\alias{cmdiff}
\title{Cmdiff colour palette}
\arguments{
\item{n}{integer giving the number of colours (>= 1) to be produced;
currently it does not make sense to ask for more than 256}
}
\value{
Vector of R colours (\code{'#RRGGBB'} strings) of length \code{n}.
}
\description{
The diff colormap is diverging, with one side shades of blues and one side shades of browns.
}
\examples{
z <- xtabs(weight~Time+Chick, ChickWeight)
x <- sort(as.numeric(rownames(z)))
y <- sort(as.numeric(colnames(z)))
image(
x = x, y = y, z = z, col = cmdiff(100),
xlab = 'Time', ylab = 'Chick'
)
}
\keyword{color}
|
fa03e8009ff1bcde337ecedd8c2cc1a583393491
|
f305ddcebdbc575b5cf6b0509d31006aa5b5c18e
|
/R/speclib_wavelength.R
|
921d0766384b72c043e3b8b5a71a0f673fca0b2b
|
[] |
no_license
|
keltoskytoi/hsdar
|
6f9083f1844e6aa2db534cfa930f17a40be49559
|
8ff148e5aba93be419517385843e0cad7bfe05f1
|
refs/heads/master
| 2021-07-15T15:08:17.598271
| 2017-10-17T08:31:42
| 2017-10-17T08:31:42
| 111,017,970
| 1
| 0
| null | 2017-11-16T20:19:59
| 2017-11-16T20:19:59
| null |
UTF-8
|
R
| false
| false
| 288
|
r
|
speclib_wavelength.R
|
setReplaceMethod("wavelength", signature(object = "Speclib", value = "numeric"),
function(object, value)
{
object@wavelength <- value
return(object)
}
)
setMethod("wavelength", signature(object = "Speclib"),
function(object)
return(object@wavelength)
)
|
98eebf1ba345a55aec11102b314fa43176ed87e2
|
0e2859631b841ccdaf020dbdcdd42c5193b77192
|
/tests/testthat/test-utils.R
|
3679d0aebe07f9c4fbbb98c9444c65d3ddc07ff2
|
[
"Apache-2.0"
] |
permissive
|
bcgov/rems
|
4f41ccf7588e38bc15e88ed1d6880505eac644cf
|
85cbbca54aff440909d9751e7c387adb8fbbba63
|
refs/heads/master
| 2023-08-17T07:49:23.354561
| 2023-08-10T16:55:46
| 2023-08-10T16:55:46
| 63,990,524
| 20
| 6
|
Apache-2.0
| 2023-08-10T16:55:47
| 2016-07-23T00:03:38
|
R
|
UTF-8
|
R
| false
| false
| 823
|
r
|
test-utils.R
|
context("utils")
test_that("set_ems_tz works with all classes", {
datestring <- "2019-11-18"
expected <- as.POSIXct(datestring, tz = "Etc/GMT+8")
expect_equal(set_ems_tz(datestring), expected)
expect_equal(set_ems_tz(as.Date(datestring)), expected)
expect_equal(set_ems_tz(expected), expected)
expect_equal(set_ems_tz(as.POSIXlt(datestring, tz = "Etc/GMT+8")), expected)
})
test_that("basic utils work", {
expect_equal(attr(add_rems_type(list(), "2yr"), "rems_type"), "2yr")
expect_error(add_rems_type(list(), "hello"))
skip_on_cran()
skip_if_offline()
expect_equal(httr::status_code(httr::GET(base_url())), 200)
})
test_that("find_os works", {
# This is only set on GitHub Actions
localos <- tolower(Sys.getenv("RUNNER_OS"))
skip_if(!nzchar(localos))
expect_equal(find_os(), localos)
})
|
10f17431645e17961d10f6ca5bb2fe73b94601dc
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/HiDimMaxStable/R/margins.R
|
44a001ec30d7bbd9c73d4446c1de6bd47e1f12ed
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,722
|
r
|
margins.R
|
# Different kinds of margin distributions that are used. The
# distributions must have unit mean
setClass("margin",
slots=list(
name="character",type="character",
d="function",p="function",q="function",
p.min="numeric",p.max="numeric",p.start="numeric"
))
marginUnif<-new("margin",
name="Uniform",type="unif",
d=function(x,param) dunif(x,min=1-param,max=1+param),
p=function(q,param) punif(q,min=1-param,max=1+param),
q=function(p,param) qunif(p,min=1-param,max=1+param),
p.min=0,p.max=1,p.start=0.5)
marginExp<-new("margin",
name="Exponential",type="exp",
d=function(x,param) dexp(x,1),
p=function(q,param) pexp(q,1),
q=function(p,param) qexp(p,1),
p.min=1,p.max=1,p.start=1)
marginLnorm<-new("margin",
name="Log-Normal",type="lnorm",
d=function(x,param) dlnorm(x,meanlog=-param^2/2,sdlog=param),
p=function(x,param) pnorm((log(x)+param^2/2)/param),
q=function(p,param) exp(-param^2/2+param*qnorm(p)),
p.min=0,p.max=Inf,p.start=0.2)
marginWeibull<-new("margin",
name="Weibull",type="weibull",
d=function(x,a) dweibull(x, shape = a, scale = 1/gamma(1+1/a)),
p=function(q,a) pweibull(q, shape = a, scale = 1/gamma(1+1/a)),
q=function(p,a) qweibull(p, shape = a, scale = 1/gamma(1+1/a)),
p.min=0,p.max=Inf,p.start=0.5)
# Needs library(VGAM)
marginFrechet<-new("margin",
name="Frechet",type="frechet",
d=function(x,a) dfrechet(x, location = 0, scale = 1/gamma(1-1/a), shape = a),
p=function(q,a) pfrechet(q, location = 0, scale = 1/gamma(1-1/a), shape = a),
q=function(p,a) qfrechet(p, location = 0, scale = 1/gamma(1-1/a), shape = a),
p.min=1,p.max=Inf,p.start=1.2)
marginGamma<-new("margin",
name="Gamma",type="gamma",
d=function(x,a) dgamma(x, shape = a, scale = 1/a),
p=function(q,a) pgamma(q, shape = a, scale = 1/a),
q=function(p,a) qgamma(p, shape = a, scale = 1/a),
p.min=0,p.max=Inf,p.start=1)
# Needs library(VGAM)
marginGPD<-new("margin",
name="GPD",type="gpd",
d=function(x,a) dgpd(x, xi = a, mu = 0, beta = 1-a),
p=function(q,a) pgpd(q, xi = a, mu = 0, beta = 1-a),
q=function(p,a) qgpd(p, xi = a, mu = 0, beta = 1-a),
p.min=-Inf,p.max=1,p.start=0)
|
f9a52f6e9da059e320c3ebd0831b2224acf82deb
|
bf060a4ee1e4d77c4483f9ceeb70ed7ede01c860
|
/Stocks 4.R
|
9d0b031a487dbf4563e796bb8e944ae517be6b91
|
[
"MIT"
] |
permissive
|
Doraflara/financas-r
|
599f32bc220c05d0dfb56cc845015e5627ce351b
|
0a676de5e1326b177be2b0baf66438a30e0b8a4b
|
refs/heads/master
| 2020-04-28T02:22:19.425110
| 2020-04-13T14:39:30
| 2020-04-13T14:39:30
| 174,895,756
| 19
| 6
|
MIT
| 2019-05-21T09:19:21
| 2019-03-10T23:55:25
|
R
|
ISO-8859-1
|
R
| false
| false
| 7,936
|
r
|
Stocks 4.R
|
library(tidyverse)
# Parsing of HTML/XML files
library(rvest)
# String manipulation
library(stringr)
# Verbose regular expressions
library(rebus)
# Eases DateTime manipulation
library(lubridate)
# Manipular dados
library(dplyr)
# Uso para unir colunas
library(tidyr)
# Para Gráficos
library(ggplot2)
# Para estilo do gráfico
library(bbplot)
# Para gráficos animados
library(gganimate)
# para API twitter
library(twitteR)
# biblioteca para analise de texto
library(tidytext)
# Biblioteca mineração de dados
library(tm)
# Bibliotecas auxiliares
library('openssl')
library('httpuv')
library(httr)
setwd("C:/Users/Isadora/Dropbox/R - análises/STOCKS_v02")
#html_session("you-url", use_proxy("no.73.189.42", 999))
########## ---------------------------------------------------------------
# Pega nome das Empresas pelo site "ADVFN" ------------------------------
# Url do site, de A a Z
url <- paste("https://br.advfn.com/bolsa-de-valores/bovespa/",LETTERS,sep="")
# Função para pegar a tabela com o nome das empresas
nomes <- function(url){
advfn <- read_html(url)
companies <- advfn %>%
# The '.' indicates the class
html_nodes('.atoz-link-bov') %>%
html_table()
#companies1 <- companies[[1]]
companies
}
# Cria um Data Frame com os nomes
empresas <- lapply(url, nomes)
empresas <- empresas%>%
unlist(empresas,recursive=FALSE)%>%
bind_rows(.id = "Ação")
empresas[is.na(empresas)] = ''
colnames(empresas)[1] <- "nada"
colnames(empresas)[3] <- "AA"
colnames(empresas)[4:26] <- LETTERS[1:23]
empresas <- unite(empresas, "ticker", AA:W, remove = FALSE)%>%
select(Ação, ticker)%>%
mutate(ticker=gsub("_","",ticker))
empresas <- mutate(empresas, item = paste(ticker,".SA",sep=""))
# Fundamentos da ação --------------------------------------------
# Histórico de dividendos
dividendos <- function(nome){
url2 = paste("http://www.fundamentus.com.br/proventos.php?papel=",nome,"&tipo=2",sep="")
download.file(url2, destfile = "scrapedpage.html", quiet=TRUE)
dados1 <- read_html("scrapedpage.html")
df3 <- tryCatch(html_table(html_nodes(dados1, "table")[[1]]) %>%
mutate(Valor=as.numeric(gsub(",",".",Valor)))%>%
mutate(Data=dmy(gsub("/","-",Data)))%>%
mutate(pr=Valor/as.numeric(`Por quantas ações`))%>%
mutate(ano=substr(Data,1,4))%>%
mutate(ticker=nome), error = function(e) NA)
df3
}
# Pl, Div. Yield, Cotação, Dívida, Ativo, setor, roic, dividendos
empresaT <- function(nome){
# Pega a 1 página do fundamentus
url1 = paste("http://www.fundamentus.com.br/detalhes.php?papel=",nome,"&tipo=2",sep="")
download.file(url1, destfile = "scrapedpage1.html", quiet=TRUE)
dados <- read_html("scrapedpage1.html") %>%
# The '.' indicates the class
html_nodes('.txt') %>%
html_text() %>%
# Trim additional white space
str_trim() %>%
# Convert the list into a vector
unlist()
# Pega o histórico de dividendos
df3 <- dividendos(nome)
# Cria um data.frame com as características da empresa
nome <- data.frame(as.character(nome))%>%
mutate(cotacao=sub(",",".",dados[4]))%>%
mutate(setor=dados[14])%>%
mutate(valor.mercado=as.numeric(gsub("\\.","",dados[22])))%>%
mutate(pl=as.numeric(sub(",",".",dados[33])))%>%
mutate(roic=as.numeric(sub("%","",sub(",",".",dados[65]))))%>%
mutate(div.yield=as.numeric(sub("%","",(sub(",",".",dados[68])))))%>%
mutate(divida.liquida=as.numeric(gsub("\\.","",dados[94])))%>%
mutate(ativos.liq=as.numeric(gsub("\\.","",dados[96])))%>%
mutate(ticker=as.character(nome))%>%
mutate(inclinacao=tryCatch(as.numeric(lm(df3$pr~df3$ano)$coefficients[2]),
error = function(e) NA)[[1]])%>%
mutate(anos.div=tryCatch(length(year(df3$Data[!duplicated(year(df3$Data))])),
error = function(e) NA)[[1]])%>%
mutate(anos.div.min=tryCatch(min((year(df3$Data[!duplicated(year(df3$Data))]))),
error = function(e) NA)[[1]])%>%
mutate(anos.div.max=tryCatch(max((year(df3$Data[!duplicated(year(df3$Data))]))),
error = function(e) NA)[[1]])%>%
mutate(div.12.meses=tryCatch(df3 %>% filter(Data>=today()-365) %>% summarise(sum(Valor)),
error = function(e) NA)[[1]])
}
# Pega o EBITDA, O imobilizado e o capital de giro
empresaI <- function(nome){
# Pega a 1 página do fundamentus
url1 = paste("https://br.financas.yahoo.com/quote",nome,"/balance-sheet?p=",nome,"&.tsrc=fin-srch",sep="")
download.file(url1, destfile = "scrapedpage1.html", quiet=TRUE)
dados <- read_html("scrapedpage1.html") %>%
# The '.' indicates the class
html_nodes('.txt') %>%
html_text() %>%
# Trim additional white space
str_trim() %>%
# Convert the list into a vector
unlist()
# Pega o histórico de dividendos
df3 <- dividendos(nome)
# Cria um data.frame com as características da empresa
nome <- data.frame(as.character(nome))%>%
mutate(cotacao=sub(",",".",dados[4]))%>%
mutate(setor=dados[14])%>%
mutate(valor.mercado=as.numeric(gsub("\\.","",dados[22])))
}
# Dados de todas as empresas
df1 <- lapply(empresas$ticker,empresaT)
y <- colnames(df1[[2]])
df1 <- data.frame(matrix(unlist(df1), nrow=length(df1), byrow=T),stringsAsFactors=FALSE)
colnames(df1) <- y
# Encontra o yield real (o Fundamentus as vezes erra)
df1 <- df1%>%
mutate(div.yield.real=as.numeric(div.12.meses)/as.numeric(cotacao))
df <- left_join(empresas[,2:3],df1)
df <- df%>%
filter(!is.na(cotacao))
rm(df1)
# Empresas com bom fundamento
df.filter <- df%>%
filter(as.numeric(div.yield.real)>0.06)%>%
filter(as.numeric(ativos.liq)-as.numeric(divida.liquida)>0)%>%
filter(as.numeric(pl)<18&as.numeric(pl)>0)%>%
filter(as.numeric(anos.div.min)<2009)%>%
mutate(dif.ano=as.numeric(anos.div.max)-as.numeric(anos.div.min)-as.numeric(anos.div))%>%
arrange(desc(as.numeric(div.yield.real)))
write.csv2(df,paste(today(),"-tudo.csv",sep=""),row.names = FALSE)
write.csv2(df.filter,paste(today(),"-filtro.csv",sep=""),row.names = FALSE)
# Plot --------------------------------------------------------------------
empresas.final <- df.filter[!duplicated(df.filter$empresas...2.),]
dividendo <- lapply(df.filter$ticker, dividendos)
dividendo <- do.call(rbind.data.frame, dividendo)
dividendo1 <- left_join(dividendo,df,by="ticker")%>%
mutate(div.sob.cotação=pr/as.numeric(cotacao))
# Base do gráfico para dividendos
ggplot(dividendo1%>% filter(as.numeric(ano)>2005),
aes(x=ano, y=pr))+geom_bar(stat="identity", fill="steelblue")+
labs(title = "Effect of Vitamin C on Tooth Growth",
subtitle = "Plot of length by dose",
caption = "Data source: ToothGrowth")+
ggtitle("Dividendos ao longo de 13 anos")+facet_wrap(~ticker)+ylim(0,10)+
theme(axis.text.x = element_text(angle = 90, hjust = 1),
plot.title = element_text())+bbc_style()
# Modelo de gordon --------------------------------------------------------
# retorno no empreendimento (k)
k <- 0.10
# Crescimento do dividendo (g)
g <- 0.01
# Valor da ação: dividendos desse ano/(k-g)
agrupamento <- filter(dividendo1,as.numeric(ano)>2005)%>%
group_by(ticker,cotacao,ano,div.12.meses)%>%
summarise(div.ano=sum(pr))%>%
arrange(div.ano)%>%
group_by(ticker,cotacao,div.12.meses)%>%
summarise(dividendo=median(div.ano))%>%
mutate(valor.ideal=as.numeric(dividendo)/(k-g))%>%
mutate(crescimento=(valor.ideal-as.numeric(cotacao))/as.numeric(cotacao))
|
516e2aad279aedf8f35e886ede6107f88606fae3
|
cf586320910e8f27fe7e9dbe70c5909d550c3d44
|
/1_Intro_to_Shiny_materials/Build_UI_scripts_and_images/image_app.R
|
1a443f737b1b4ce48fa1cefd89a09e7fdf88620b
|
[] |
no_license
|
narensahu13/Shiny-Materials
|
bb757f94e06fe83c466dc169e629a36cbbdf4e87
|
b0ad1d3428126b8af2681446e8ea085eaba619e1
|
refs/heads/master
| 2020-05-16T13:03:32.929982
| 2019-04-23T17:28:05
| 2019-04-23T17:28:05
| 183,063,312
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 325
|
r
|
image_app.R
|
library(shiny)
# ui layout instruction
ui <- fluidPage(
titlePanel("My Shiny App"),
sidebarLayout(
sidebarPanel(),
mainPanel(
img(src="http://shiny.rstudio.com/tutorial/lesson2/www/bigorb.png", height = 400, width = 400)
)
)
)
server <- function(input, output) {}
shinyApp(ui = ui, server = server)
|
ed22d8d2edde401bf3e863dd1fc5c8cd95926b49
|
1a579f8496b6f146deb95de55c7b340851ba317a
|
/Part_4_Finish-Vectors_and_Begin_Matrices/scripts/System-time-and-redimensioning.R
|
bf758106191247555789334268007fe60d2fb42c
|
[] |
no_license
|
mdmmsrhs/Udemy-R-Programming-Course
|
b22d9f90a42bca8d43a6b7933af4044e0f2cae8f
|
f8c20585280ebf47ac990b494f171f0ac0ee737a
|
refs/heads/master
| 2021-01-10T07:26:25.795233
| 2016-03-18T16:08:20
| 2016-03-18T16:08:20
| 54,214,498
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,852
|
r
|
System-time-and-redimensioning.R
|
##################################################
##### SYSTEM TIME FUNCTION #####
##################################################
##### REDIMENSIONING A VECTOR #####
##################################################
## The system.time() function is a handy tool
## for checking the efficiency of your programs
require(stats)
# mad() computes median absolute deviation
# runif() generates a uniform sample
system.time(for(i in 1:100) mad(runif(1000)))
# ?mad ... if want to look mad() up
# ?runif ... go on, don't be lazy !
## Create a function calculates mean
## of student's t distribution over and over
exT <- function(n = 10000) {
# Purpose: Test if system.time works ok; n: loop size
system.time(for(i in 1:n) x <- mean(rt(1000, df=4)))
}
# Run the following statements:
exT() #- about 4 secs on a 2.5GHz Xeon
# user system elapsed
# 3.83 0.00 3.84
# program takes about 3.83 seconds to run
system.time(exT()) #~ +/- same
# user time is for the function
# system time is 'other stuff' computer busy with
# elapsed is total time
# user system elapsed
# 3.89 0.00 3.88
#### REDIMENSIONING AN ARRAY (Vector in this case)
# This can help you make some of your programs
# run faster. The following two programs produce
# the same result, but the first is faster:
## PROGRAM 1
faster <- function() {
n <- 1000000
# fills up x with successive
# integers up to one million
x <- rep(0, n)
for (i in 1:n) {
x[i] <- i
}
}
faster()
system.time(faster())
## PROGRAM 2
slower <- function() {
# note am only filling to 100,000
n <- 100000
x <- 1
for (i in 2:n) {
x[i] <- i
}
}
system.time(slower())
## Why is Program #2 so much slower?
## How do you 'fix it' to make it faster?
|
27e74533dcbc81cabeed689ec439f1e99c7aca4f
|
cc336a29f50450945ac308e3c3a1bf7ce43d8fc7
|
/Shannon_moore.R
|
093a10322bf02d73a04d0646a78a16c48daa1a18
|
[] |
no_license
|
HannahWhite/StabilityDrivers
|
5915a7cc02c2e1407f798fe05d895c77382a7818
|
de7cd5ca4fa5519f6df2510c3eac5f6f695ee096
|
refs/heads/master
| 2021-12-07T04:59:04.182809
| 2021-09-30T14:31:06
| 2021-09-30T14:31:06
| 184,769,050
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,235
|
r
|
Shannon_moore.R
|
###########################################################################################
### Calculate Shannon diversity of landcover within Moore neighbourhood of focal square ###
###########################################################################################
### Hannah White 27.07.2018
## read in dataframe that contains %cover of different CORINE land cover classes
corine.agg <- read.csv('G:\\Postdoc Grassland Resilience\\LandCoverData\\corine.agg.csv', header = TRUE)
corine.agg$east <- corine.agg$east + 5000
corine.agg$north <- corine.agg$north + 5000
### Calculate distances between squares
dd <- dist(corine.agg[,2:3], method = 'maximum')
matrixdd <- as.matrix(dd)
rm(dd)
landscape.het <- rep(NA, 830)
for (i in 1: 830){
ind <- which(matrixdd[i,]==10000)
temp.set <- corine.agg[ind,]
habs <- colSums(temp.set[,4:18])
shan.moore <- diversity(habs, index = 'shannon')
landscape.het[i] <- shan.moore
}
# add to original corine.agg where easting and northing haven't been transformed
corine.agg$landscape.het <- landscape.het
write.csv(corine.agg, 'G:\\Postdoc Grassland Resilience\\LandCoverData\\corine.agg.csv', row.names = FALSE)
|
b7e710c52be5fe1cb9c1344a25c7fecfcc415fcb
|
862562e3b247cd95321195d91b46bf5d73b9596c
|
/archive/sim_logger.R
|
e550db00fbe94eeaffa11e199352ebbfb4d37fe4
|
[] |
no_license
|
bsaul/dr
|
dd6b733dfaf935b83423aaa0431f2ba23ad1c7a6
|
327f3537fcd70b4b62bb3e3b2a9cfa628fcbf3f7
|
refs/heads/main
| 2023-04-07T09:35:48.876785
| 2018-11-27T15:35:15
| 2018-11-27T15:35:15
| 358,607,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 997
|
r
|
sim_logger.R
|
#------------------------------------------------------------------------------#
# TITLE: Simulation Logger
# AUTHOR: Bradley Saul
# DATE: 5/02/16
# PURPOSE:
#------------------------------------------------------------------------------#
ts <- Sys.time()
# estimation_file <- 'DR_sim_estimation_6'
experimentID <- 'X007'
n_i <- 4
m <- 500
nsims <- 200
totalobs <- n_i * m * nsims
seed <- 198
simdir <- 'logs/'
filename <- paste0(ts, '_', experimentID, '.log')
fn <- paste0(simdir, filename)
sink(fn, append = TRUE, type = 'output')
# sink(fn, append = TRUE, type = 'message')
timestamp(prefix = "##------ Begin: ")
# Notes
# Create similuated dataset
source('R/create_sims.R', echo = T, max.deparse.length = 5000)
# Load necessary functions
source('R/functions_v5.R', echo = T, max.deparse.length = 5000)
# Run esimtations
source('development/DR_sim_estimation_9.R', echo = T, max.deparse.length = 5000)
timestamp(prefix = "##------ End: ")
sink()
rm(ts, simdir, filename, fn)
|
333bb3bb51bb2b5150709dc6b33814fae4c8eee0
|
9dcff6306fb7c38df8d2130f9deb33703afa332d
|
/code/Trajectory_reconstruction_of_HNSCC_tumor_cells.R
|
b74f163710a82cb1c4c055388e34b1be5d1b9c46
|
[] |
no_license
|
cole-trapnell-lab/pseudospace
|
a951e1604a74cc3c9e72e8320ce57140d9002bb3
|
bae0e5bb5ecee5691842105e02f902ff8f73b7ad
|
refs/heads/master
| 2020-06-17T09:26:22.165737
| 2019-09-04T21:01:13
| 2019-09-04T21:01:13
| 195,880,675
| 10
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,958
|
r
|
Trajectory_reconstruction_of_HNSCC_tumor_cells.R
|
###### Load packages ######
# Load necessary packages for single cell RNA-Seq analysis including packages for downstream Gene Ontology Analysis
suppressPackageStartupMessages({
library(devtools)
library(stringr)
library(scales)
library(dtw)
library(monocle)
library(reshape2)
library(GSA)
library(limma)
library(DBI)
library(MASS)
library(plyr)
library(dplyr)
library(matrixStats)
library(piano)
library(cluster)
library(pheatmap)
library(grid)
library(RColorBrewer)
library(viridis)})
##### Load and define necessary functions #####
source("Pseudospace_support_functions.R")
getDTW <- function(query_cds, ref_cds, ref, query, cores)
{
alignment_genes <- row.names(subset(fData(ref_cds),use_for_ordering))
ref_align_cds <- ref_cds[alignment_genes,]
query_align_cds <- query_cds[alignment_genes,]
pData(ref_align_cds)$cell_id <- row.names(pData(ref_align_cds))
pData(ref_align_cds)$Pseudotime <- 100 * pData(ref_align_cds)$Pseudotime/max(pData(ref_align_cds)$Pseudotime)
ref_align_cds <- ref_align_cds[alignment_genes, as.character(arrange(pData(ref_align_cds),
Pseudotime)$cell_id)]
pData(query_align_cds)$cell_id <- row.names(pData(query_align_cds))
pData(query_align_cds)$Pseudotime <- 100 * pData(query_align_cds)$Pseudotime/max(pData(query_align_cds)$Pseudotime)
query_align_cds <- query_align_cds[alignment_genes, as.character(arrange(pData(query_align_cds),
Pseudotime)$cell_id)]
smoothed_ref_exprs <- genSmoothCurves(ref_align_cds[alignment_genes],
data.frame(Pseudotime = seq(0, 100, by = 1)), cores = cores)
smoothed_ref_exprs <- smoothed_ref_exprs[rowSums(is.na(smoothed_ref_exprs)) ==
0, ]
vst_smoothed_ref_exprs <- vstExprs(ref_cds, expr_matrix = smoothed_ref_exprs)
smoothed_query_exprs <- genSmoothCurves(query_align_cds[alignment_genes],
data.frame(Pseudotime = seq(0, 100, by = 1)), cores = cores)
smoothed_query_exprs <- smoothed_query_exprs[rowSums(is.na(smoothed_query_exprs)) ==
0, ]
vst_smoothed_query_exprs <- vstExprs(query_cds, expr_matrix = smoothed_query_exprs)
alignment_genes <- intersect(row.names(vst_smoothed_ref_exprs),
row.names(vst_smoothed_query_exprs))
ref_matrix <- t(scale(t(vst_smoothed_ref_exprs[alignment_genes,
])))
query_matrix <- t(scale(t(vst_smoothed_query_exprs[alignment_genes,
])))
ref_query_dtw <- align_cells(query_matrix, ref_matrix, step_pattern = rabinerJuangStepPattern(3,
"c"), open.begin = T, open.end = T)
return(ref_query_dtw)
}
#### Load data ####
HNSCC_cds <- readRDS("HSNCC_cds.rds")
expressed_genes <- row.names(fData(HNSCC_cds)[Matrix::rowSums(Biobase::exprs(HNSCC_cds) > 0) > 50 ,])
# Filter HNSCC data for samples with more than 100 cells, non-lymph node, cancer cells processesd with Maxima enzyme
# Exclude HNSCC17 which gives a super funky trajectory that we can't compare to MCF10A. Would be cool to
# to see if there are sub-clones
metadata_summary <- pData(HNSCC_cds) %>%
filter(clasified_as_cancer_cell == "1" & Maxima_enzyme == "0" & lymph_node == "0") %>%
group_by(patient_id) %>% summarize(n = n()) %>% arrange(desc(n))
metadata_summary
HNSCC_cds.list <- list()
HNSCC_cds.list[["HNSCC20"]] <- HNSCC_cds[,pData(HNSCC_cds)$patient_id == "HNSCC20" &
pData(HNSCC_cds)$clasified_as_cancer_cell == "1" &
pData(HNSCC_cds)$Maxima_enzyme == "0" &
pData(HNSCC_cds)$lymph_node == "0"]
HNSCC_cds.list[["HNSCC18"]] <- HNSCC_cds[,pData(HNSCC_cds)$patient_id == "HNSCC18" &
pData(HNSCC_cds)$clasified_as_cancer_cell == "1" &
pData(HNSCC_cds)$Maxima_enzyme == "0" &
pData(HNSCC_cds)$lymph_node == "0"]
HNSCC_cds.list[["HNSCC22"]] <- HNSCC_cds[,pData(HNSCC_cds)$patient_id == "HNSCC22" &
pData(HNSCC_cds)$clasified_as_cancer_cell == "1" &
pData(HNSCC_cds)$Maxima_enzyme == "0" &
pData(HNSCC_cds)$lymph_node == "0"]
pData(HNSCC_cds.list[["HNSCC20"]])$cell <- row.names(pData(HNSCC_cds.list[["HNSCC20"]]))
pData(HNSCC_cds.list[["HNSCC22"]])$cell <- row.names(pData(HNSCC_cds.list[["HNSCC22"]]))
pData(HNSCC_cds.list[["HNSCC18"]])$cell <- row.names(pData(HNSCC_cds.list[["HNSCC18"]]))
for(patient in names(HNSCC_cds.list)){
names(pData(HNSCC_cds.list[[patient]])$Maxima_enzyme) <- NULL
names(pData(HNSCC_cds.list[[patient]])$lymph_node) <- NULL
names(pData(HNSCC_cds.list[[patient]])$clasified_as_cancer_cell) <- NULL
names(pData(HNSCC_cds.list[[patient]])$clasified_as_non_cancer_cell) <- NULL
names(pData(HNSCC_cds.list[[patient]])$non_cancer_cell_type) <- NULL
}
for(patient in names(HNSCC_cds.list)){
print(dim(HNSCC_cds.list[[patient]]))
}
for(patient in names(HNSCC_cds.list)){
HNSCC_cds.list[[patient]] <- preprocess_cds(HNSCC_cds.list[[patient]])
}
for(patient in names(HNSCC_cds.list)){
HNSCC_cds.list[[patient]] <- reduceDimension(HNSCC_cds.list[[patient]][expressed_genes,],
max_components=2,
norm_method = 'log', num_dim = 25, reduction_method = 'tSNE',
verbose = T, cores = detectCores()-1)
}
for(patient in names(HNSCC_cds.list)){
HNSCC_cds.list[[patient]] <- setOrderingFilter(HNSCC_cds.list[[patient]], expressed_genes)
HNSCC_cds.list[[patient]] <- reduceDimension(HNSCC_cds.list[[patient]], norm_method = "log")
}
HNSCC_cds.list[["HNSCC20"]] <- orderCells(HNSCC_cds.list[["HNSCC20"]])
HNSCC_cds.list[["HNSCC22"]] <- orderCells(HNSCC_cds.list[["HNSCC22"]])
HNSCC_cds.list[["HNSCC18"]] <- orderCells(HNSCC_cds.list[["HNSCC18"]], reverse = FALSE)
plot_cell_trajectory(HNSCC_cds.list[["HNSCC20"]], color_by = "Pseudotime", show_branch_points = FALSE) +
theme(legend.position="right",
text=element_text(size=24, family="Arial"),
legend.direction = "vertical",
legend.title = element_text(size = 14),
legend.text = element_text(size = 11),
legend.key.width = unit(0.2, "in"),
legend.key.height = unit(0.4, "in")) +
scale_color_viridis(option = "magma") +
ggsave(file = "HNSCC20_Trajectory.png", height = 4, width = 5)
plot_genes_in_pseudotime(HNSCC_cds.list[["HNSCC20"]][fData(HNSCC_cds.list[["HNSCC22"]])$gene_short_name == "SNAI2",],
min_expr = 1, color_by = "Pseudotime") +
scale_color_viridis(option = "magma") +
theme(text = element_text(size = 24, family = "Arial")) +
ggsave("SNAI2_expression_HNSCC20_pseudotime.png", height = 4, width = 6)
plot_cell_trajectory(HNSCC_cds.list[["HNSCC22"]], color_by = "Pseudotime", show_branch_points = FALSE) +
theme(legend.position="right",
text=element_text(size=24, family="Arial"),
legend.direction = "vertical",
legend.title = element_text(size = 14),
legend.text = element_text(size = 11),
legend.key.width = unit(0.2, "in"),
legend.key.height = unit(0.4, "in")) +
scale_color_viridis(option = "magma") +
ggsave(file = "HNSCC22_Trajectory.png", height = 4, width = 5)
plot_genes_in_pseudotime(HNSCC_cds.list[["HNSCC22"]][fData(HNSCC_cds.list[["HNSCC22"]])$gene_short_name == "SNAI2",],
min_expr = 1, color_by = "Pseudotime") +
scale_color_viridis(option = "magma") +
theme(text = element_text(size = 24, family = "Arial")) +
ggsave("SNAI2_expression_HNSCC22_pseudotime.png", height = 4, width = 6)
plot_cell_trajectory(HNSCC_cds.list[["HNSCC18"]], color_by = "Pseudotime") +
theme(legend.position="right",
text=element_text(size=24, family="Arial"),
legend.direction = "vertical",
legend.title = element_text(size = 14),
legend.text = element_text(size = 11),
legend.key.width = unit(0.2, "in"),
legend.key.height = unit(0.4, "in")) +
scale_color_viridis(option = "magma") +
ggsave(file = "HNSCC18_Trajectory.png", height = 4, width = 5)
plot_genes_in_pseudotime(HNSCC_cds.list[["HNSCC18"]][fData(HNSCC_cds.list[["HNSCC18"]])$gene_short_name == "SNAI2",],
min_expr = 1, color_by = "Pseudotime") +
scale_color_viridis(option = "magma") +
theme(text = element_text(size = 24, family = "Arial")) +
ggsave("SNAI2_expression_HNSCC18_pseudotime.png", height = 4, width = 6)
# Load Mock and TGFB cds objects created in Figure1 code to align MF10A and HNSCC trajectories
cds.list <- readRDS("pseudospace_processed_trajectories_cds.list.rds")
HNSCC_genes <- unique(intersect(expressed_genes,row.names(fData(HNSCC_cds))))
cds.list[["Mock"]] <- cds.list[["Mock"]][HNSCC_genes,]
cds.list[["TGFB"]] <- cds.list[["TGFB"]][HNSCC_genes,]
HNSCC20.to.Mock.dtw <- getDTW(HNSCC_cds.list[["HNSCC20"]], cds.list[["Mock"]],
ref = "Mock", query = "HNSCC20",
cores = 1)
HNSCC20.to.TGFB.dtw <- getDTW(HNSCC_cds.list[["HNSCC20"]], cds.list[["TGFB"]],
ref = "TGFB", query = "HNSCC20",
cores = 1)
HNSCC22.to.Mock.dtw <- getDTW(HNSCC_cds.list[["HNSCC22"]], cds.list[["Mock"]],
ref = "Mock", query = "HNSCC22",
cores = 1)
HNSCC22.to.TGFB.dtw <- getDTW(HNSCC_cds.list[["HNSCC22"]], cds.list[["TGFB"]],
ref = "TGFB", query = "HNSCC22",
cores = 1)
HNSCC18.to.Mock.dtw <- getDTW(HNSCC_cds.list[["HNSCC18"]], cds.list[["Mock"]],
ref = "Mock", query = "HNSCC18",
cores = 1)
HNSCC18.to.TGFB.dtw <- getDTW(HNSCC_cds.list[["HNSCC18"]], cds.list[["TGFB"]],
ref = "TGFB", query = "HNSCC18",
cores = 1)
pdf("HNSCC20.to.Mock.dtw.openEnd.openStart.pdf")
dtwPlotDensity(HNSCC20.to.Mock.dtw, normalize=T, xlab="Spontaneous EMT", ylab="HNSCC20 Pseudotime")
dev.off()
pdf("HNSCC22.to.Mock.dtw.openEnd.openStart.pdf")
dtwPlotDensity(HNSCC22.to.Mock.dtw, normalize=T, xlab="Spontaneous EMT", ylab="HNSCC22 Pseudotime")
dev.off()
pdf("HNSCC18.to.Mock.dtw.openEnd.openStart.pdf")
dtwPlotDensity(HNSCC18.to.Mock.dtw, normalize=T, xlab="Spontaneous EMT", ylab="HNSCC18 Pseudotime")
dev.off()
pdf("HNSCC20.to.TGFB.dtw.openEnd.openStart.pdf")
dtwPlotDensity(HNSCC20.to.TGFB.dtw, normalize=T, xlab="TGFB EMT", ylab="HNSCC20 Pseudotime")
dev.off()
pdf("HNSCC22.to.TGFB.dtw.openEnd.openStart.pdf")
dtwPlotDensity(HNSCC22.to.TGFB.dtw, normalize=T, xlab="TGFB EMT", ylab="HNSCC22 Pseudotime")
dev.off()
pdf("HNSCC18.to.TGFB.dtw.openEnd.openStart.pdf")
dtwPlotDensity(HNSCC18.to.TGFB.dtw, normalize=T, xlab="TGFB EMT", ylab="HNSCC18 Pseudotime")
dev.off()
|
3766ede27f86e5430b34e32cd2a07d3c142107dd
|
ba3e1a64b6a56469bb89fcb697acf5cbb51a9aca
|
/bigdata_ai_study/chapter_11/2.R
|
c35b495931b0b075a68cd3cd06832a56e28279b2
|
[] |
no_license
|
himitery/dankook
|
c5a2df9c989a31c4470212e7f6165ea4292f0b62
|
9f5208c7d9f2001794c67f4f27f45490ea260d4d
|
refs/heads/master
| 2023-06-08T21:23:22.394774
| 2021-06-27T20:00:39
| 2021-06-27T20:00:39
| 330,082,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 229
|
r
|
2.R
|
model <- lm(formula = Volume ~ Girth, data = trees)
getVolume <- function(Girth) {
return(coef(model)[2] * Girth + coef(model)[1])
}
# Girth == 8.5
getVolume(8.5)
# Girth == 9.0
getVolume(9.0)
# Girth == 9.5
getVolume(9.5)
|
b00ef6d5794435a39c774038f5994eb771194fe2
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/tergm/R/coef.stergm.R
|
01adda1156268464283496b5e77a35f3383a1693
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 615
|
r
|
coef.stergm.R
|
# File R/coef.stergm.R in package tergm, part of the Statnet suite
# of packages for network analysis, http://statnet.org .
#
# This software is distributed under the GPL-3 license. It is free,
# open source, and has the attribution requirements (GPL Section 7) at
# http://statnet.org/attribution
#
# Copyright 2003-2014 Statnet Commons
#######################################################################
coef.stergm <- function(object, ...){list(formation=object$formation.fit$coef,
dissolution=object$dissolution.fit$coef)}
coefficients.stergm <- coef.stergm
|
4e366236c481c516ebe2e380d084a3271331d0a6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Luminescence/examples/report_RLum.Rd.R
|
be9ff34a3fbf82a5df6a8f3baf9214fb84826eeb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,921
|
r
|
report_RLum.Rd.R
|
library(Luminescence)
### Name: report_RLum
### Title: Create a HTML report for (RLum) objects
### Aliases: report_RLum
### ** Examples
## Not run:
##D ## Example: RLum.Results ----
##D
##D # load example data
##D data("ExampleData.DeValues")
##D
##D # apply the MAM-3 age model and save results
##D mam <- calc_MinDose(ExampleData.DeValues$CA1, sigmab = 0.2)
##D
##D # create the HTML report
##D report_RLum(object = mam, file = "~/CA1_MAM.Rmd",
##D timestamp = FALSE,
##D title = "MAM-3 for sample CA1")
##D
##D # when creating a report the input file is automatically saved to a
##D # .Rds file (see saveRDS()).
##D mam_report <- readRDS("~/CA1_MAM.Rds")
##D all.equal(mam, mam_report)
##D
##D
##D ## Example: Temporary file & Viewer/Browser ----
##D
##D # (a)
##D # Specifying a filename is not necessarily required. If no filename is provided,
##D # the report is rendered in a temporary file. If you use the RStudio IDE, the
##D # temporary report is shown in the interactive Viewer pane.
##D report_RLum(object = mam)
##D
##D # (b)
##D # Additionally, you can view the HTML report in your system's default web browser.
##D report_RLum(object = mam, launch.browser = TRUE)
##D
##D
##D ## Example: RLum.Analysis ----
##D
##D data("ExampleData.RLum.Analysis")
##D
##D # create the HTML report (note that specifying a file
##D # extension is not necessary)
##D report_RLum(object = IRSAR.RF.Data, file = "~/IRSAR_RF")
##D
##D
##D ## Example: RLum.Data.Curve ----
##D
##D data.curve <- get_RLum(IRSAR.RF.Data)[[1]]
##D
##D # create the HTML report
##D report_RLum(object = data.curve, file = "~/Data_Curve")
##D
##D ## Example: Any other object ----
##D x <- list(x = 1:10,
##D y = runif(10, -5, 5),
##D z = data.frame(a = LETTERS[1:20], b = dnorm(0:9)),
##D NA)
##D
##D report_RLum(object = x, file = "~/arbitray_list")
## End(Not run)
|
16528aa44b68d3f1f83b1180d0c022f3ee297d17
|
abb970da8f3fab448d26bdb03fc947e414fdf031
|
/netsim/20_connectivity.R
|
320231d3f8f4dd0ccb33a9f98f2d5491bdab7e75
|
[] |
no_license
|
czarrar/causality_primer
|
bf7490769f7f1e5de90b45039d7553d2f304827c
|
e41ccdac47ea930ea5e01bc24d052b0a10f7ac92
|
refs/heads/master
| 2020-04-26T09:50:20.332272
| 2014-07-21T03:21:22
| 2014-07-21T03:21:22
| 21,718,538
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,241
|
r
|
20_connectivity.R
|
#' For now only subject 1
#' Goal here is to compare the accuracy of estimating a connection using NetSim
#' I will compare the regular correlation with a host of partial correlations
#'
#' Let's read in the data.
#+ read
net <- as.matrix(read.table("data/sim01/sub01_net.txt")) # reference
ts <- as.matrix(read.table("data/sim01/sub01_ts.txt"))
#+ compute the correlation
upper <- function(x) x[upper.tri(x)]
ztrue <- function(x, net) upper(x)[upper(net)!=0]
zfalse <- function(x, net) upper(x)[upper(net)==0]
cmat <- cor(ts)
mean(ztrue(cmat, net))
mean(zfalse(cmat, net))
table(upper(cmat)>0.2, upper(net)!=0)
#' compute the inverse covariance
#+ glasso
library(glasso)
s <- cov(ts)
a <- glasso(s, rho=0.01)
cmat2 <- -cov2cor(a$wi)
#round(cmat2, 2); round(net, 2)
table(me=upper(cmat2)>0.2, ref=upper(net)!=0)
#' below is an example from the package help
#' i'm a little confused as to what passint the a$w and a$wi exactly do
#' is it different then doing that all at once?
#+ glasso-example
set.seed(100)
x<-matrix(rnorm(50*20),ncol=20)
s<- var(x)
a<-glasso(s, rho=.01)
aa<-glasso(s,rho=.02, w.init=a$w, wi.init=a$wi)
a2<-glasso(s,rho=0.2)
#' other. this does a cross-validation...
#+ parcor
library(parcor)
a <- adalasso.net(ts,k=5)
table(me=upper(a$pcor.lasso)>0.15, ref=upper(net)!=0)
table(me=upper(a$pcor.adalasso)>0.15, ref=upper(net)!=0)
a <- pls.net(ts,ncomp=10,k=5)
table(me=upper(a$pcor)>0.16, ref=upper(net)!=0)
a <- ridge.net(ts,k=5)
table(me=upper(a$pcor)>0.16, ref=upper(net)!=0)
#' not sure
#+ quic
library(QUIC)
a <- QUIC(s, rho=0.01)
cmat2 <- -cov2cor(a$X)
table(me=upper(cmat2)>0.2, ref=upper(net)!=0)
#' corpcor
#+ corpcor
library(corpcor)
a <- -cov2cor(invcov.shrink(ts))
table(me=upper(a)>0.15, ref=upper(net)!=0)
a <- -cov2cor(invcor.shrink(ts))
table(me=upper(a)>0.15, ref=upper(net)!=0)
#' might be useful for larger matrices
#+ clime
library(clime)
a <- clime(ts)
cmat2 <- -cov2cor(a$Omegalist[[70]])
table(me=upper(cmat2)>0.15, ref=upper(net)!=0)
#' also good for larger matrices?
#+ scio
library(scio)
a <- scio.cv(s, 0.2) # doesn't work well
cmat2 <- cov2cor(a$w)
table(me=upper(cmat2)>0.16, ref=upper(net)!=0)
#' gives some kind of p-value
#+ lassoscore
library(lassoscore)
a <- glassoscore(ts, 0.2)
table(me=upper(-cov2cor(a$wi))>0, ref=upper(net)!=0)
table(me=upper((a$p.model < 0.0001)), ref=upper(net)!=0)
a <- mbscore(ts, 0.2)
table(me=upper((a$p.model < 0.001)), ref=upper(net)!=0)
#' large matrices?
#+ huge
library(huge)
out.mb = huge(ts)
out.ct = huge(ts, method = "ct")
out.glasso = huge(ts, method = "glasso")
out.select = huge.select(out.mb)
out.select$refit
out.select = huge.select(out.ct)
out.select$refit
out.select = huge.select(out.glasso)
out.select$refit
#' threshold
#+ threshold
# consider using fdrtool
library(fdrtool)
fdrtool(upper(cmat), statistic="correlation")
#qcor0...
# also consider wavestrap
# can use this to determine the threshold
library(wmtsa)
z <- wavBootstrap(ts[,1], n.realization=500)
z2 <- wavBootstrap(ts[,2], n.realization=500)
# tawny
library(tawny)
a1 <- cov_shrink(ts)
-solve(a)[1:5,1:5]
library(BurStFin)
a2 <- var.shrink.eqcor(ts)
a[1:5,1:5]
-solve(a)[1:5,1:5]
a1[1:2,1:2]
a2[1:2,1:2]
all.equal(as.numeric(a1),as.numeric(a2))
|
d3986ff354ba6918732d99d3ec3e344c1842060b
|
8e4aab72095a0fa790f19f6fc426db3979e11ad2
|
/metode.R
|
ec7b307e946b117c89e496ffb8d50dfd04ed9088
|
[] |
no_license
|
vsamija/Diplomski_rad_web
|
5a5e90528a454c1126d7f0e21a34b6aabc78b1eb
|
3fd909aede591752d110e8a6d85a4f32d13c11fe
|
refs/heads/main
| 2023-07-15T19:36:25.276140
| 2021-08-24T08:24:43
| 2021-08-24T08:24:43
| 399,215,201
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 934
|
r
|
metode.R
|
# Pronalaženje datoteka koje se nalaze u istom direktoriju.
sourceDirectory <- function(path, recursive = FALSE, local = TRUE) {
if (!dir.exists(path)) {
warning(paste(path, "direktorij nije valjan!"))
return(NULL)
}
# Funkcija (local)
if (is.logical(local) && local) { env <- parent.frame() }
# Funkcija unutar Global Environment
else if (is.logical(local) && !local) { env <- globalenv() }
else if (is.environment(local)) { env <- local }
else { stop("'local' must be TRUE, FALSE or an environment") }
files <- list.files(path = path, pattern = ".*\\.R", all.files = F, full.names = TRUE, recursive = recursive)
for (fileToSource in files) {
tryCatch(
{
source(fileToSource, local = env)
cat(fileToSource, "sourced.\n")
},
error = function(cond) {
message("Učitavanje datoteke \"", fileToSource, "\" nije uspjelo.")
message(cond)
}
)
}
}
|
eab1fb382d6e71f4a2e8a1d0abab449f1b41c9af
|
173de58722f7d505f01c9fff6d0dff1dcf7bd605
|
/data_collecting_storing_retrieving/module02/Valentine_C_2.R
|
db3b30f5a1fa35af988111f0570eb07e24332df6
|
[] |
no_license
|
clintval/coursework
|
be69cb65feb6545125f28f071f1ab1229c52372b
|
3d3daf1b2d2e7ce007e14ad7091eb4d33432a323
|
refs/heads/master
| 2021-01-12T07:36:18.423760
| 2017-05-29T20:39:49
| 2017-05-29T20:39:49
| 80,670,673
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,654
|
r
|
Valentine_C_2.R
|
# Clint Valentine
# 01/21/2017
rm(list = ls())
# If the data.frame already exists then do not load again
if (!exists("data")) {
# Unzip data and process as csv with a header
unzip("AirlineDelays.zip")
data <- read.table("AirlineDelays.txt", header=T, sep=",")
}
TotalNumDelays <- function(Carrier) {
# Computes the total number of unique delays for each carrier.
#
# Args:
# Carrier: Airline carrier in data.
#
# Returns:
# The total number of unique delays.
selection <- subset(data, CARRIER == Carrier)
# All delay types are in 6:7 and 9:13, sum them up as unique
count <- sum(apply(selection[,c(6:7,9:13)],
MARGIN=2,
function(x) sum(x > 0, na.rm=T)))
return(count)
}
TotalDelaysByOrigin <- function(Origin) {
# Computes the total number of unique delays for each origin.
#
# Origin:
# Origin: Origin of flight in data.
#
# Returns:
# The total number of unique delays.
selection <- subset(data, ORIGIN == Origin)
# All delay types are in 6:7 and 9:13, sum them up as unique.
count <- sum(apply(selection[,c(6:7,9:13)],
MARGIN=2,
function(x) sum(x > 0, na.rm=T)))
return(count)
}
AvgDelay <- function(Carrier, Dest) {
# Computes the average delay in minutes for a specified carrier
# and destination in data.
#
# Origin:
# Carrier: Airline carrier in data.
# Dest: Destination of flight in data.
#
# Returns:
# The average delay in minutes.
selection <- subset(data, CARRIER == Carrier & DEST == Dest)
return(unname(sapply(selection['ARR_DELAY'], mean, na.rm=T)))
}
|
0b580b662cec9eb7dbc44a0ce863f41f963c9c9c
|
9fba50d3c86a4d441967db92d209ef9ee40610d4
|
/chapter1/points lines rectangles.R
|
992cc0aac9b585cc96610ed79377e28ae8677aee
|
[] |
no_license
|
data-better/dataviz
|
ce4241b0497fcb1e06efb6d7d3350bbb4dc8af21
|
04503ded9383cb6173e5c89c45dc085c829cd97b
|
refs/heads/master
| 2020-07-16T11:32:16.099552
| 2019-09-02T05:44:56
| 2019-09-02T05:44:56
| 205,780,580
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,986
|
r
|
points lines rectangles.R
|
# Chapter 1. Points, Lines, Rectangles
kings <- read.table("chosun kings.txt", header=T)
str(kings)
head(kings)
attach(kings)
median(Life)
median(Period)
windows(height=4.5,width=7.5)
plot(c(0,0),c(28,60),type="n",xlim=c(0,28),ylim=c(-5,65),xlab="순서",ylab="재위기간",main="조선 왕조")
points(1:27,Period,pch=15,col="red")
segments(1:27,rep(0,27),1:27,Period,lwd=3,col="red")
abline(h=c(0,mean(Period)),lty="dotted",lwd=1,col="blue")
windows(height=4.5,width=7.5)
plot(1:27,Period,pch=15,col="blue",xlim=c(0,28),ylim=c(-5,65),xlab="순서",ylab="재위기간",main="조선 왕조")
par(new=T)
plot(1:27,rep(0,27),pch=15,col="blue",xlim=c(0,28),ylim=c(-5,65),xlab="",ylab="",main="")
abline(h=c(0,mean(Period)),lty="dotted",lwd=1,col="blue")
segments(1:27,rep(0,27),1:27,Period,lwd=3,col="blue")
windows(height=4.5,width=7.5)
plot(1:27,Period,pch=15,col="blue",xlim=c(0,28),ylim=c(-5,65),xlab="순서",ylab="재위기간",main="조선 왕조")
par(new=T)
plot(1:27,rep(0,27),pch=15,col="blue",xlim=c(0,28),ylim=c(-5,65),xlab="",ylab="",main="")
abline(h=c(0,mean(Period)),lty="dotted",lwd=1,col="blue")
for (i in 1:27) lines(c(i,i),c(0,Period[i]),lwd=3,col="blue")
windows(height=5,width=4.5)
plot(c(0,25),c(0,500),col="blue",type="n",xlab="",ylab="",main="")
rect(5,100,10,200,col="royalblue",border="royalblue")
rect(20,400,22.5,450,col="royalblue",border="royalblue")
windows(height=8,width=7.5)
P <- cumsum(Period)
plot(1:27,P,type="n",xlab="순서",ylab="누적년수",main="조선 왕조")
rect(0,0,1,P[1],col="royalblue",border="royalblue")
for (i in 2:27) rect(i-1,P[i-1],i,P[i],col="royalblue",border="royalblue")
segments(0,0,27,518,lty="dotted")
windows(height=8,width=7.5)
P <- cumsum(Period)
plot(1:27,P,type="n",xlab="순서",ylab="누적년수",main="조선 왕조")
polygon(c(0,0,1,1),c(0,P[1],P[1],0),col=rainbow(27)[1])
for (i in 2:27) polygon(c(i-1,i-1,i,i),c(P[i-1],P[i],P[i],P[i-1]),col=rainbow(27)[i])
segments(0,0,27,518,lty="dotted")
# end
|
a4d445c6dedd1958725d99d9ab11246e8684c40e
|
dee6c5bed839c814c08cad5306d44485352ebfc9
|
/R/att.R
|
f621f2b5df0b1e587f4723a492e8bead2042eefc
|
[] |
no_license
|
ehkennedy/npcausal
|
108bc3ce94e2454163ca946335ded2d6ee8cc286
|
56a5ac117a29258b67b94874be662a171b5131f7
|
refs/heads/master
| 2022-02-27T03:12:28.468368
| 2021-02-25T01:59:18
| 2021-02-25T01:59:18
| 91,638,398
| 73
| 24
| null | 2022-01-31T22:01:11
| 2017-05-18T02:08:13
|
R
|
UTF-8
|
R
| false
| false
| 3,475
|
r
|
att.R
|
#' @title Estimating average effect of treatment on the treated
#'
#' @description \code{att} is used to estimate the difference in mean outcome among treated subjects had a binary (unconfounded) treatment been withheld.
#'
#' @usage att(y, a, x, nsplits=2, sl.lib=c("SL.earth","SL.gam","SL.glm","SL.glmnet",
#' "SL.glm.interaction","SL.mean","SL.ranger"))
#'
#' @param y outcome of interest.
#' @param a binary treatment.
#' @param x covariate matrix.
#' @param nsplits integer number of sample splits for nuisance estimation.
#' If nsplits=1, sample splitting is not used, and nuisance functions are estimated
#' on full sample (in which case validity of SEs/CIs requires empirical
#' process conditions). Otherwise must have nsplits>1.
#' @param sl.lib algorithm library if using SuperLearner.
#' Default library includes "earth", "gam", "glm", "glmnet", "glm.interaction",
#' "mean", and "ranger".
#'
#' @return A list containing the following components:
#' \item{res}{ estimates/SEs/CIs/p-values for treated means and contrast.}
#' \item{nuis}{ subject-specific estimates of nuisance functions (i.e., propensity score and outcome regression) }
#' \item{ifvals}{ vector of estimated influence function values.}
#'
#' @examples
#' n <- 100; x <- matrix(rnorm(n*5),nrow=n)
#' a <- rbinom(n,1,.3); y <- rnorm(n)
#'
#' att.res <- att(y,a,x)
#'
#' @references (Also see references for function \code{ate})
#' @references Kennedy EH, Sjolander A, Small DS (2015). Semiparametric causal inference in matched cohort studies. \emph{Biometrika}.
#'
att <- function(y,a,x, nsplits=2,
sl.lib=c("SL.earth","SL.gam","SL.glm","SL.glm.interaction","SL.mean",
"SL.ranger")){
require("SuperLearner")
require("earth")
require("gam")
require("ranger")
require("rpart")
n <- dim(x)[1]
pb <- txtProgressBar(min=0, max=2*nsplits, style=3)
s <- sample(rep(1:nsplits,ceiling(n/nsplits))[1:n])
pihat <- rep(NA,n); mu0hat <- rep(NA,n)
pbcount <- 0
Sys.sleep(0.1); setTxtProgressBar(pb,pbcount); pbcount <- pbcount+1
for (vfold in 1:nsplits){
train <- s!=vfold; test <- s==vfold
if (nsplits==1){ train <- test }
# estimate propensity score
pifit <- SuperLearner(a[train],as.data.frame(x[train,]),
newX=as.data.frame(x[test,]), SL.library=sl.lib, family=binomial)
pihat[test] <- pifit$SL.predict
Sys.sleep(0.1)
setTxtProgressBar(pb,pbcount); pbcount <- pbcount+1
# estimate regression function
mu0fit <- SuperLearner(y[a==0 & train],
as.data.frame(x[a==0 & train,]),
newX=as.data.frame(x[test,]), SL.library=sl.lib)
mu0hat[test] <- mu0fit$SL.predict
Sys.sleep(0.1)
setTxtProgressBar(pb,pbcount); pbcount <- pbcount+1
}
ey01hat <- mean((a/mean(a))*mu0hat + ((1-a)/mean(1-a))*(y-mu0hat)*pihat/(1-pihat))
psihat <- mean((a/mean(a))*(y-mu0hat) - ((1-a)/mean(1-a))*(y-mu0hat)*pihat/(1-pihat))
ifvals <- cbind( a*(y-mean(y))/mean(a),
(a/mean(a))*(mu0hat - ey01hat) + ((1-a)/mean(1-a))*(y-mu0hat)*pihat/(1-pihat),
(a/mean(a))*(y-mu0hat - psihat) - ((1-a)/mean(1-a))*(y-mu0hat)*pihat/(1-pihat) )
est <- c(mean(y[a==1]), ey01hat, psihat)
se <- apply(ifvals,2,sd)/sqrt(n)
ci.ll <- est-1.96*se; ci.ul <- est+1.96*se
pval <- round(2*(1-pnorm(abs(est/se))),3)
param <- c("E(Y|A=1)","E{Y(0)|A=1}", "E{Y-Y(0)|A=1}")
res <- data.frame(parameter=param, est,se,ci.ll,ci.ul,pval)
rownames(res) <- NULL
Sys.sleep(0.1)
setTxtProgressBar(pb,pbcount)
close(pb)
nuis <- data.frame(pi=pihat,mu0=mu0hat)
print(res)
return(invisible(list(res=res, nuis=nuis, ifvals=as.data.frame(ifvals) )))
}
|
aef661562e09c13be54108dac2d352b6d424d1ed
|
c6679e02409faf47730541f0696abf2f8d0a05f7
|
/man/voice_default_weights.Rd
|
0756d8d91592e5580b6285575a6068b00e6e93f6
|
[
"MIT"
] |
permissive
|
pmcharrison/voicer
|
1d1c9d14556aee91e6e9e53c177407e0d50a674a
|
6f08e7066e15bf2ab091649161bcf760351c982a
|
refs/heads/master
| 2021-06-23T07:33:31.530673
| 2020-12-13T13:55:31
| 2020-12-13T13:55:31
| 151,832,740
| 3
| 0
|
NOASSERTION
| 2019-09-20T10:53:55
| 2018-10-06T11:21:09
|
R
|
UTF-8
|
R
| false
| true
| 775
|
rd
|
voice_default_weights.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/voice-opt.R
\docType{data}
\name{voice_default_weights}
\alias{voice_default_weights}
\title{Default weights}
\format{An object of class \code{numeric} of length 12.}
\usage{
voice_default_weights
}
\description{
Provides the default regression weights for the features
listed in \code{\link{voice_features}}.
These were optimized on the \code{\link[hcorp]{bach_chorales_1}} dataset.
}
\details{
The object constitutes a named numeric vector,
ordered to match \code{\link{voice_features}},
where the names identify the features and the
values correspond to the regression weights.
These weights are suitable for passing to the \code{weights} argument
of \code{\link{voice_opt}}.
}
\keyword{data}
|
6b2fe1c6f4b928226ac2b80ddd8bd04b4b6d22ec
|
f05eb7cbaa5f06897bd26f02e868ad4b1a333473
|
/london.R
|
032905f87c29c8dc9c09e67ceb0fd71b8060f54f
|
[] |
no_license
|
efarinre/Mentally_green
|
b959a42f9f7b4204e6f7bff90451aae48f78c470
|
0720f3bb81c8086b8b2da95c1901969fcaf827f0
|
refs/heads/main
| 2023-07-15T12:15:50.324527
| 2021-08-23T22:10:20
| 2021-08-23T22:10:20
| 398,518,397
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,033
|
r
|
london.R
|
# read in necessary packages
library(plyr)
library(tidyverse)
library(INLA)
library(spdep)
library(SpatialEpi)
library(rgdal)
library(tmap)
library(RSQLite)
library(tmaptools)
library(leaflet)
library(htmltools)
library(htmlwidgets)
library(broom)
library(plotly)
library(geojsonio)
library(mapview)
library(crosstalk)
library(viridis)
library(reshape2)
library(shinyjs)
library(janitor)
library(car)
library(corrplot)
library(shades)
library(ggpubr)
library(RDS)
library(hrbrthemes)
# Replace *path-to-folder* with the directory to this cloned repository
# read in the postcode -> OA -> LSOA -> MSOA -> LA lookup table. filter for only london lsoas
london <- read_csv(("*path-to-folder*/data/PCD_OA_LSOA_MSOA_LAD_NOV20_UK_LU/PCD_OA_LSOA_MSOA_LAD_NOV20_UK_LU.csv"),
col_names = TRUE,
locale = locale(encoding = "Latin1")) %>%
dplyr::filter(str_detect(ladcd, "^E09"))
#head(london)
# keep the code and name columns only
london <- london[c("lsoa11cd", "lsoa11nm")]
# remove the duplicate lsoa rows, given that there is a row for each postcode
london <- london[!duplicated(london[,c("lsoa11cd", "lsoa11nm")]),]
# Reading in the map of england with lsoa boundaries
england_lsoas <- st_read("*path-to-folder*/data/Lower_Layer_Super_Output_Areas__December_2011__Boundaries_Full_Extent__BFE__EW_V3-shp/Lower_Layer_Super_Output_Areas__December_2011__Boundaries_Full_Extent__BFE__EW_V3.shp")
#head(england_lsoas)
# filter to just london
ldn_lsoas <- inner_join(england_lsoas, london, by = c("LSOA11CD" = "lsoa11cd"))
ldn_lsoas <- ldn_lsoas[c("LSOA11CD", "LSOA11NM")]
#(ldn_lsoas)
rm(england_lsoas)
# plot the outline
#plot(ldn_lsoas)
# plotting just the outline of the shape
#ldn_lsoas %>%
# st_geometry() %>%
# plot()
#################### read in the covariate from london data store - taken from the census
# ethnic propotions, and mean and median household income lsoa data
# I'll need to read in the pop dnsity data separately because the london data store doesn't provide the 2011 pop den value
# It's an issue because I have the 2011 pop density for cornwall, so they need to match
covariates1 <- read_csv("*path-to-folder*/data/lsoa_LDS_subset.csv")
# drop the 2013 population density column column
drop <- c("PPH_2013")
covariates1 <- covariates1[,!(names(covariates1) %in% drop)]
# rename
names(covariates1) <- c("codes", "names", "white_pc", "mixed_pc", "asian_pc", "black_pc",
"other_pc", "bame_pc", "mean_income", "median_income", "socialrenting_pc", "unemployed_pc")
#head(covariates1)
# read in the 2011 persons per hectare data
popden <- read_csv("*path-to-folder*/data/england_popdensity.csv")
names(popden) <- c("names", "code", "pop_density")
popden <- popden[c("code", "pop_density")]
# merge
covariates1 <- inner_join(covariates1, popden, by = c("codes" = "code"))
#head(covariates1)
#################### read in the CDRC Access to Health Assets and Hazards dataset
# there are raw scores, deciles and exponentiated scores. Different values were explored, hence the variety of covairates
covariates2 <- read_csv("*path-to-folder*/data/allvariableslsoawdeciles.csv")
head(covariates2)
# filter for the lsoa code, gp, a&e, pharmacy, green (passive) and green (active) accessbitlity columns
covariates2 <- covariates2[c("lsoa11", "gpp_dist", "ed_dist", "pharm_dist", "green_pas", "green_act")]
#head(covariates2)
names(covariates2) <- c("lsoa", "gp_access", "ae_access", "pharm_access", "green_access_prop", "green_access_dist")
#################### read in the composite INDEX values CDRC Access to Health Assets and Hazards dataset
covariates3 <- read_csv("*path-to-folder*/data/ahahv2domainsindex.csv")
head(covariates3)
# filter for the lsoa code, health domain deciles and blue/green space domain deciles columns
covariates3 <- covariates3[c("lsoa11", "h_exp", "g_exp", "h_dec", "g_dec")]
#head(covariates3)
names(covariates3) <- c("lsoa","health_access_valexp", "greenblue_access_valexp", "health_access_deciles", "greenblue_access_deciles")
# merge the dataframes together
ldn_covariates <- inner_join(covariates1, covariates2, by = c("codes" = "lsoa"))
#head(ldn_covariates)
# add the remaing columns via another merge
ldn_covariates <- inner_join(ldn_covariates, covariates3, by = c("codes" = "lsoa"))
#head(ldn_covariates)
# Checking for na values
#apply(ldn_covariates, 2, function(x) any(is.na(x)))
# Reduce to only relevant columns
ldn_covariates <- ldn_covariates[c("codes", "names", "pop_density", "green_access_prop", "green_access_dist", "unemployed_pc", "socialrenting_pc", "bame_pc")]
# EXPLORATORY ANALYSIS
########### Checking the distribution of the covariates
###### I'll also use Tukey's ladder of transformations to see if and how the covariates need to be transformed
## Persons per hectare - population density
ggplot(ldn_covariates, aes(x = pop_density)) +
geom_histogram(aes(y = ..density..),
binwidth = 10) +
geom_density(colour = "red",
size = 1,
adjust =1)
symbox(~pop_density,
ldn_covariates,
na.rm=T,
powers = seq(-2,2,by=.5))
## Total BAME ethnic percentage
ggplot(ldn_covariates, aes(x = bame_pc )) +
geom_histogram(aes(y = ..density..),
binwidth = 0.5) +
geom_density(colour = "red",
size = 1,
adjust =1)
symbox(~bame_pc,
ldn_covariates,
na.rm=T,
powers = seq(-2,2,by=.5))
## Green space (passive) access
ggplot(ldn_covariates, aes(x = green_access_prop)) +
geom_histogram(aes(y = ..density..),
binwidth = 0.1) +
geom_density(colour = "red",
size = 1,
adjust =1)
symbox(~green_access_prop,
ldn_covariates,
na.rm=T,
powers = seq(-2,2,by=.5))
## Green space (active) access
ggplot(ldn_covariates, aes(x = green_access_dist)) +
geom_histogram(aes(y = ..density..),
binwidth = 0.1) +
geom_density(colour = "red",
size = 1,
adjust =1)
symbox(~green_access_dist,
ldn_covariates,
na.rm=T,
powers = seq(-2,2,by=.5))
## Socially rented accomodation %
ggplot(ldn_covariates, aes(x = socialrenting_pc)) +
geom_histogram(aes(y = ..density..),
binwidth = 1) +
geom_density(colour = "red",
size = 1,
adjust =1)
symbox(~socialrenting_pc,
ldn_covariates,
na.rm=T,
powers = seq(-2,2,by=.5))
## Unemployment rate
ggplot(ldn_covariates, aes(x = unemployed_pc)) +
geom_histogram(aes(y = ..density..),
binwidth = 1) +
geom_density(colour = "red",
size = 1,
adjust =1)
symbox(~unemployed_pc,
ldn_covariates,
na.rm=T,
powers = seq(-2,2,by=.5))
############ computing means and ranges of covariates
mean(ldn_covariates$pop_density)
range(ldn_covariates$pop_density)
mean(ldn_covariates$green_access_prop)
range(ldn_covariates$green_access_prop)
mean(ldn_covariates$green_access_dist)
range(ldn_covariates$green_access_dist)
mean(ldn_covariates$unemployed_pc)
range(ldn_covariates$unemployed_pc)
mean(ldn_covariates$socialrenting_pc)
range(ldn_covariates$socialrenting_pc)
mean(ldn_covariates$bame_pc)
range(ldn_covariates$bame_pc)
###rename
names(ldn_covariates) <- c("codes", "names", "pop_density","green_area", "distance_to_green",
"unemployment", "social_renters", "bame")
############# Boxplots
ldn_covariates_adj <- ldn_covariates[c("codes", "pop_density",
"green_area", "distance_to_green",
"unemployment", "social_renters",
"bame")]
# log-transform the covariates to make them more Gaussian
ldn_covariates_adj$pop_density <- log(ldn_covariates_adj$pop_density)
ldn_covariates_adj$green_area <- log(ldn_covariates_adj$green_area)
ldn_covariates_adj$distance_to_green <- log(ldn_covariates_adj$distance_to_green)
ldn_covariates_adj$unemployment <- log(ldn_covariates_adj$unemployment)
ldn_covariates_adj$social_renters <- log(ldn_covariates_adj$social_renters)
ldn_covariates_adj$bame <- log(ldn_covariates_adj$bame)
#using code from stackoverflow to get my data into the correct format for plotting nicer boxplots/violin plots
#url for page that helped with the data formatting: https://stackoverflow.com/questions/14604439/plot-multiple-boxplot-in-one-graph
boxplotdf_log <- ldn_covariates_adj %>%
dplyr::select(codes,
pop_density,
green_area,
distance_to_green,
unemployment,
social_renters,
bame)
boxplotdf_log.m <- melt(boxplotdf_log, id.var = "codes")
#show the new format of the data
#boxplotdf_log
# Boxplots
boxplot_ldn_1 <- ggplot(data = boxplotdf_log.m, aes(x=variable, y=value, fill=variable)) +
geom_boxplot() +
scale_fill_viridis(discrete = TRUE, alpha=1) +
geom_jitter(color="black", size=0.4, alpha=0.15) +
theme_ipsum() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
legend.position="none",
plot.title = element_text(size=11)
) +
ggtitle("") +
xlab("")
boxplot_ldn_1
#warnings()
###################### And now the unlogged covariates for comparison
boxplotdf <- ldn_covariates %>%
dplyr::select(codes,
pop_density,
green_area,
distance_to_green,
unemployment,
social_renters,
bame)
boxplotdf.m <- melt(boxplotdf, id.var = "codes")
#show the new format of the data
#boxplotdf
# Boxplots
boxplot_london_appendix <- ggplot(data = boxplotdf.m, aes(x=variable, y=value, fill=variable)) +
geom_boxplot() +
scale_fill_viridis(discrete = TRUE, alpha=1) +
geom_jitter(color = "black", size=0.4, alpha=0.15) +
theme_ipsum() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
legend.position="none",
plot.title = element_text(size=11)
) +
ggtitle("") +
xlab("")
boxplot_london_appendix
#warnings()
##############################################################
# join london data with the map of london
map <- inner_join(ldn_lsoas, ldn_covariates, by = c("LSOA11CD" = "codes"))
# drop the repeated lso names column
drop <- c("names")
map <- map[,!(names(map) %in% drop)]
#st_write(map, "london_covariates.shp")
# For the interactive map, because I'm using leaflet, I Need '+proj=longlat +datum=WGS84'
# Checking projection of my map of London
print(map) #Returns Projected CRS: OSGB 1936 / British National Grid
# which has the epsg code of 4326
# Reproject
mapREPROJECTED <- map %>%
st_transform(., 4326)
print(mapREPROJECTED)
############### Computing Correlation Matrix
cMartixVars <- ldn_covariates[c("pop_density",
"green_area",
"distance_to_green",
"unemployment",
"social_renters",
"bame")]
cMatrix <- cor(cMartixVars)
head(round(cMatrix, 2))
corrplot(cMatrix, method = "number")
##################
# As this analysis was conducted in the UCL Data Safe Haven for data ethics purposes, only the results were exportable
# READ IN THE RESULTS OF THE SAVED MODEL
load("ldn.rds")
summary(ldn)
####### Mapping relative risks of model 1.5 aka 1c
head(ldn$summary.fitted.values)
# Add these data points to the map of london, assigning the mean to the estimate of the relative risk
# and 0.025quant and 0.975quant to the lower and upper limits of 95% credible intervals of the risks
mapREPROJECTED$RR <- ldn$summary.fitted.values[, "mean"]
mapREPROJECTED$LL <- ldn$summary.fitted.values[, "0.025quant"]
mapREPROJECTED$UL <- ldn$summary.fitted.values[, "0.975quant"]
# We cannot export the observed of expected counts, but the SMR is okay
# read in the 2011 SMR, exported from the data safe haven
smr_2011 <- read_csv("*path-to-folder*/data/smr_2011.csv")
head(smr_2011)
# filter to just london smrs
ldn_smr <- inner_join(smr_2011, london, by = c("codes" = "lsoa11cd"))
ldn_smr <- ldn_smr[c("codes", "smr")]
head(ldn_smr)
# add smr column to mapReprojected
mapREPROJECTED$SMR <- ldn_smr$smr
head(mapREPROJECTED)
# save this to keep a consistent scale across all mappings
#st_write(mapREPROJECTED, "mapREPROJECTED_RR_ldn.shp")
################ ploting interactive map of the SMR
pal <- colorNumeric(palette = "YlOrRd", domain = mapREPROJECTED$SMR)
labels <- sprintf("<strong> %s </strong> <br/> SMR: %s",
mapREPROJECTED$LSOA11NM, round(mapREPROJECTED$SMR, 2)) %>%
lapply(htmltools::HTML)
twenty_11 <- leaflet(mapREPROJECTED) %>% addTiles() %>%
addPolygons(color = "grey", weight = 1, fillColor = ~pal(SMR), fillOpacity = 0.5,
highlightOptions = highlightOptions(weight = 4), label = labels,
labelOptions = labelOptions(style = list("font-weight" = "normal",
padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
addLegend(pal = pal, values = ~SMR, opacity = 0.5, title = "SMR",
position = "bottomright")
twenty_11
#saveWidget(twenty_11, file="ldn_smr2011.html")
############### PLOTTING a Interactive map of the RR
# specify the palette
pal <- colorNumeric(palette = "YlOrRd", domain = mapREPROJECTED$SMR)
# specify the labels
labels <- sprintf("<strong> %s </strong> <br/> SMR: %s <br/> Person per hectare: %s <br/> Green area (km2): %s <br/>
Distance to Greenery (km): %s <br/> RR: %s (%s, %s)",
mapREPROJECTED$LSOA11NM, round(mapREPROJECTED$SMR, 2),
mapREPROJECTED$pop_density,
round(mapREPROJECTED$green_area, 2),
round(mapREPROJECTED$distance_to_green, 2),
round(mapREPROJECTED$RR, 2),
round(mapREPROJECTED$LL, 2), round(mapREPROJECTED$UL, 2)) %>%
lapply(htmltools::HTML)
# apply final touches and now plot
rr2011 <- leaflet(mapREPROJECTED) %>% addTiles() %>%
addPolygons(color = "grey", weight = 1, fillColor = ~pal(RR), fillOpacity = 0.5,
highlightOptions = highlightOptions(weight = 4), label = labels,
labelOptions = labelOptions(style = list("font-weight" = "normal",
padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
addLegend(pal = pal, values = ~RR, opacity = 0.5, title = "RR",
position = "bottomright")
rr2011
saveWidget(rr2011, file="ldn_rr2011.html")
# Computing the range of the two measures
range(mapREPROJECTED$SMR)
range(mapREPROJECTED$RR)
####### extracting RR mean, and 90/10 quantile ratio
mean(mapREPROJECTED$RR)
# most at risk / least at risk
ninety1 <- quantile(mapREPROJECTED$RR, .90)
ten1 <- quantile(mapREPROJECTED$RR, .10)
ninety1/ten1
########## plotting the posterior distribution curves of the covariates
### extract the marginal values from the results
marginal_popden <- inla.smarginal(ldn$marginals.fixed$`log(pop_density)`)
#create a dataframe in order to plot
marginal_popden <- data.frame(marginal_popden)
# now plot
popden_density <- ggplot(marginal_popden, aes(x = x, y = y)) + geom_line() +
labs(x = expression(beta[1]), y = "Density") +
geom_vline(xintercept = 0, col = "blue") + theme_bw()
# next covariate
### extract the marginal values from the results
marginal_green_prop <- inla.smarginal(ldn$marginals.fixed$`log(green_access_prop)`)
#create a dataframe in order to plot
marginal_green_prop <- data.frame(marginal_green_prop)
# now plot
greenprop_density <- ggplot(marginal_green_prop, aes(x = x, y = y)) + geom_line() +
labs(x = expression(beta[2]), y = "Density") +
geom_vline(xintercept = 0, col = "blue") + theme_bw()
# next covariate
### extract the marginal values from the results
marginal_green_dist <- inla.smarginal(ldn$marginals.fixed$`log(green_access_dist)`)
#create a dataframe in order to plot
marginal_green_dist <- data.frame(marginal_green_dist)
# now plot
greendist_density <- ggplot(marginal_green_dist, aes(x = x, y = y)) + geom_line() +
labs(x = expression(beta[3]), y = "Density") +
geom_vline(xintercept = 0, col = "blue") + theme_bw()
# next covariate
### extract the marginal values from the results
marginal_unemployed <- inla.smarginal(ldn$marginals.fixed$`log(unemployed_pc + 1)`)
#create a dataframe in order to plot
marginal_unemployed <- data.frame(marginal_unemployed)
# now plot
unemployed_density <- ggplot(marginal_unemployed, aes(x = x, y = y)) + geom_line() +
labs(x = expression(beta[4]), y = "Density") +
geom_vline(xintercept = 0, col = "blue") + theme_bw()
# next covariate
### extract the marginal values from the results
marginal_socialrent <- inla.smarginal(ldn$marginals.fixed$`log(socialrenting_pc + 1)`)
#create a dataframe in order to plot
marginal_socialrent <- data.frame(marginal_socialrent)
# now plot
socialrent_density <- ggplot(marginal_socialrent, aes(x = x, y = y)) + geom_line() +
labs(x = expression(beta[5]), y = "Density") +
geom_vline(xintercept = 0, col = "blue") + theme_bw()
# next covariate
### extract the marginal values from the results
marginal_bame <- inla.smarginal(ldn$marginals.fixed$`log(bame_pc)`)
#create a dataframe in order to plot
marginal_bame <- data.frame(marginal_bame)
# now plot
bame_density <- ggplot(marginal_bame, aes(x = x, y = y)) + geom_line() +
labs(x = expression(beta[6]), y = "Density") +
geom_vline(xintercept = 0, col = "blue") + theme_bw()
###### arrange the ggplot density plots on one page
ggarrange(popden_density, greenprop_density, greendist_density,
unemployed_density, socialrent_density, bame_density)
|
e740cd1dbce74b9d81c4f9dfbc37f743885b6957
|
e54c3f3d3538c676eff3140f889b8b454ec30324
|
/memorymigration/R/PlottingFunctions.R
|
b05a6f1760f0d47c7ac2bb21c4204c1e0d65d204
|
[] |
no_license
|
EliGurarie/memorymigration
|
acaf4094ba4f580db31bd2b9e25af7bbb8778ca3
|
192d44e030eb73729a7f7c3969cba520ca386177
|
refs/heads/master
| 2023-08-21T17:09:24.312679
| 2021-09-20T16:49:40
| 2021-09-20T16:49:40
| 327,377,347
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,507
|
r
|
PlottingFunctions.R
|
#' Plot Year List
#'
#' This function plots the migratoriness population distribution over a time period.
#'
#' @param yearlist a list of T x X matrices describing the population
#' distribution for each year after initial population; produced by the runManyYear function
#' @param World List of 5: a population distribution across the time period in a T x X matrix,
#' a vector with midpoint X-values, the time points for the population as integers 1:tau,
#' the dx value and the tau value. Can incorporate resource attribute into the world to make
#' a list of 6. Set up by the getSinePop function.
#' @param X.max maximum value of X-axis
#' @param tau maximum value of time T
#' @param log logical;
#' @param persp whether to plot an image / contour or perspective plot
#' @seealso \link{runManyYears}; \link{printParameters}
#' @example examples/plotting_examples.R
#' @export
#'
plotYearList <- function(yearlist, world, X.max = 100, tau = 360, log = FALSE,
persp = FALSE, ...){
n.years <- length(yearlist)
allyears <- do.call(rbind, yearlist)
tau <- world$tau
if(log) allyears <- log(allyears)
time <- 1:nrow(allyears)
x <- seq(0,X.max, length = ncol(allyears))
if(!persp){
image(time, x, allyears)
contour(time, x, allyears, add = TRUE)
mtext(side = 3, at = (1:n.years)*tau - tau/2, text = paste("Year", 1:n.years-1))
abline(v = tau*1:(n.years-1), col = "grey")
} else
mypersp(time, x, allyears, ...)
}
#' Print Parameters
#'
#' This function prints the parameters of the diffusion-advection equation used
#' on the model on the plot of the population distribution set up by the plotYearList function.
#'
#' @param p world object; list of 7: a population distribution across the time period in a
#' T x X matrix, a vector with midpoint X-values, the time points for the population as integers
#' 1:tau, the minimum value of population distribution (X.min), the maximum value of population
#' distribution (X.max), the dx value and the tau value. Can incorporate resource attribute into
#' the world to make a list of 8. Set up by the getSinePop/getOptimal function.
#' @return displays the values of the parameters on the plot which are:
#' \code{epsilon} - diffusion coefficient;
#' \code{alpha} - resource following coefficient; \code{beta} - spatial scale of sociality; \code{kappa} -
#' memory following coefficient; \code{lambda} - maximum speed
#' @seealso \link{plotMemories}; \link{getSinePop}; \link{getOptimalPop}
#' @example examples/plotting_examples.R
#' @export
#'
printParameters <- function(p) {
parse(text = paste0("list(",paste(names(p), p, sep = "==", collapse = ", "),")"))
}
#' My Persp
#'
#' This function...
mypersp <- function(x,y,z,...)
persp(x,y,z, theta=45, phi=45, bor=NA, shade=TRUE, ylab = "x", xlab="time", zlab="population", ...)
#' Plotting efficiency
#' @export
plotEfficiency <- function(M, world, ...){
FE1 <- ldply(M, computeEfficiency,
resource = world$resource,
world = world,
.id = "year") %>% mutate(year = 1:length(year))
plot(FE1, type = "o", ...)
}
#' Plot migration patterns over resource
#'
#' @param M A simulation run from runManyYears.
#' @param world world object; list of 7: a population distribution across the time period in a T x X matrix,
#' a vector with midpoint X-values, the time points for the population as integers 1:tau, the minimum value of population distribution
#' (X.min), the maximum value of population distribution (X.max),
#' the dx value and the tau value. Can incorporate resource attribute into the world to make a list of 8.
#' Set up by the getSinePop/getOptimal function.
#' @return a plot of the average migration of a simulation run for each year
#' @export
plotMigration <- function(M, world, plotresource = TRUE, add = FALSE){
if(plotresource)
with(world, image(time, X, resource, col = grey.colors(100))) else
with(world, image(time, X, resource, col = NA))
memory.df <- ldply(M, function(l)
data.frame(time = 1:nrow(l),
memory = getMem(l, world = world)),.id = "year")
if(!add) with(memory.df, plot(time, memory, type= "n"))
n.years <- length(unique(memory.df$year))
palette(rich.colors(n.years))
ddply(memory.df, "year", function(df)
with(df, lines(time, memory, col = as.integer(year))))
}
#' Plot estimated migration (with or without memory)
#'
#' @param mhat data frame of migration estimates
#' @param {x.peak,t.peak}
#' @export
plotMigrationHat <- function(mhat, x.peak = NULL, t.peak = NULL,
cols = c("darkorange", "darkblue"), legend = TRUE){
par(mfrow = c(1,2), mar = c(3,3,2,2), xpd = FALSE); with(mhat,{
plot(year, t1, ylim = c(0,100), ylab = "migration timing (day of year)", col = cols[1])
segments(year, t1, year, t1+dt1, col = cols[1])
points(year, t1 + dt1, col = cols[1])
points(year, t2, col = cols[2])
points(year, t2 + dt2, col = cols[2])
segments(year, t2, year, t2+dt2, col = cols[2])
if(!is.null(t.peak))
abline(h = c(t.peak,100-t.peak), col =alpha("black",.3), lwd = 3, lty =3)
plot(year, x1, type = "o", ylim = c(-100,100), ylab = "seasonal centroids", col = cols[1])
lines(year, x2, type = "o", col = cols[2])
if(!is.null(x.peak))
abline(h = c(-x.peak,x.peak), col =alpha("black",.3), lwd = 3, lty =3)
if(legend)
legend("topright", pch = c(1,1,NA), lty = c(1,1,3),
lwd = c(1,1,3),
legend = c( "summer", "winter", "true value"), col = c(cols, "darkgrey"), bty = "n")
})
}
#' Plotting simulation results
#'
#' This function plots the migratoriness population distribution over a time period.
#'
#' @param sim A simulation run from runManyYears. Use sim$pop for the population.
#' @param world world object; list of 7: a population distribution across the time period in a T x X matrix,
#' a vector with midpoint X-values, the time points for the population as integers 1:tau, the minimum value of population distribution
#' (X.min), the maximum value of population distribution (X.max),
#' the dx value and the tau value. Can incorporate resource attribute into the world to make a list of 8.
#' Set up by the getSinePop/getOptimal function.
#' @example examples/plotting_examples.R
#' @export
plotManyRuns <- function(sim, world, years = NULL, nrow = 1, outer = TRUE,
labelyears = FALSE,
par = NULL, ylab = "", ...){
require(gplots)
if(is.null(years)){
years <- 1:length(sim)
n <- length(years)
zmax <- max(sapply(sim, max))
} else{
zmax <- max(sapply(sim, max)[paste0("Year",c(0,years[-length(years)]))])
n <- length(years)
}
parameters <- attributes(sim)$parameter
if(is.null(par))
par(mfrow = c(nrow,ceiling(n/nrow)), mar = c(1,0,1,0), oma = c(2,2,4,2), tck = 0.01)
for(i in years){
image(1:world$tau, world$X, sim[[i]],
breaks = seq(0, zmax, length = 100), col = rich.colors(99),
yaxt = "n", xaxt = "n", ylab = "", xlab = "")
if(labelyears) title(paste("year", i-1), line = 0.3)
if(i == 1) mtext(side = 2, ylab, ...)
}
if(outer)
title(outer = TRUE, paste(names(parameters),"=",parameters, collapse = "; "))
}
#' Plot memories against years
#'
#' This function plots the memory of the population over a time period for each year of the population.
#'
#' @param sim A simulation run from runManyYears
#' @param world world object; list of 7: a population distribution across the time period in a T x X matrix,
#' a vector with midpoint X-values, the time points for the population as integers 1:tau, the minimum value of population distribution
#' (X.min), the maximum value of population distribution (X.max),
#' the dx value and the tau value. Can incorporate resource attribute into the world to make a list of 8.
#' Set up by the getSinePop/getOptimal function.
#' @seealso \link{PlotYearList}; \link{getSinePop}; \link{getOptimalPop}; \link{runManyYears}
#' @example examples/plotting_examples.R
#' @export
plotMemories <- function(sim, world){
memory.df <- ldply(sim, function(l)
data.frame(time = 1:nrow(l),
memory = getMem(l, world = world)))
ggplot(memory.df, aes(time, memory, col = .id)) + geom_path() +
theme_few()
}
#' Double Plot for Shiny
#' @export
doublePlotForShiny <- function(M, world){
par(mfrow = c(1,2), mar = c(2,2,1,1),
tck = 0.01, mgp = c(1.5,.25,0),
bty = "l", cex.lab = 1.25, las = 1, xpd = NA)
plotMigrationForShiny(M, world, add = TRUE)
FE1 <- ldply(M, computeEfficiency,
resource = world$resource[1,,], world = world,
.id = "year") %>% mutate(year = 1:length(year))
plot(FE1, type = "o")
}
#' @export
plotMigrationForShiny <- function(M, world, plotresource = TRUE, add = FALSE){
if(plotresource)
with(world, image(time, X, resource[1,,], col = grey.colors(100))) else
with(world, image(time, X, resource[1,,], col = NA))
memory.df <- ldply(M, function(l)
data.frame(time = 1:nrow(l),
memory = getMem(l, world = world)),.id = "year")
if(!add) with(memory.df, plot(time, memory, type= "n"))
n.years <- length(unique(memory.df$year))
palette(rich.colors(n.years))
ddply(memory.df, "year", function(df)
with(df, lines(time, memory, col = as.integer(year))))
}
|
e84b647302f952c22f6798dbf5b350b242911f4e
|
bfa0753fd0b135f884dbdbc0222e670629127f45
|
/net.R
|
83c44b6e9b6d20af114547842dd8a9afeb7bef6b
|
[] |
no_license
|
sccmckenzie/cocktail-ingredients
|
882171998fed287e7a42e4c913b347db45448fc6
|
85cd0d84131cb9412b2392735302ab99a8f7b6fb
|
refs/heads/master
| 2022-11-20T07:35:31.351204
| 2020-07-12T19:08:15
| 2020-07-12T19:08:15
| 275,653,454
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,835
|
r
|
net.R
|
generate_net <- function(data, inventory, fulfilled) {
edges1 <- data %>%
select(ingredient, drink) %>%
inner_join(., ., by = "ingredient") %>%
filter(drink.x != drink.y) %>%
rowwise() %>%
mutate(drink = list(sort(c(drink.x, drink.y))),
.keep = "unused") %>%
ungroup() %>%
distinct() %>%
rowid_to_column(var = "edge") %>%
unnest(cols = drink)
partial <- edges1 %>% filter(ingredient %in% inventory) %>% distinct(drink) %>% pull()
set.seed(17)
nodes <- edges1 %>%
distinct(drink) %>%
sample_n(size = n()) %>%
mutate(grp = 1,
status = case_when(
drink %in% fulfilled ~ "fulfilled",
drink %in% partial ~ "partial",
TRUE ~ "dormant")
)
l <- circleFun(nodes$grp)
l_label <- as_tibble(l) %>%
mutate(across(everything(), ~ ..1 * 1.2),
label = nodes$drink)
edges2 <- edges1 %>%
left_join(nodes) %>%
group_by(ingredient, edge) %>%
mutate(status = factor(status, levels = c("fulfilled", "partial", "dormant"), ordered = TRUE)) %>%
summarise(tibble(A = drink[1], B = drink[2], status = max(status)),
.groups = "drop") %>%
arrange(desc(status)) %>%
modify_at(vars(status), as.character) %>%
relocate(A:B)
net <- graph_from_data_frame(d = edges2, vertices = nodes)
ggraph(net, layout = l, circular = TRUE) +
geom_node_point(aes(color = status), size = 20) +
geom_edge_arc(aes(color = status), n = 100, width = 1) +
geom_node_label(data = l_label, aes(label = label), size = 5, repel = TRUE) +
scale_color_manual(values = colors) +
scale_edge_color_manual(values = colors) +
theme_void() +
coord_fixed(ylim = c(-1.5, 1.5), xlim = c(-1.5, 1.5)) +
theme(legend.position = "none")
}
|
03408f79dcb75a893763be7ea683499923cdda95
|
2c016a008e06fcb55d60e51cce0ba683674c71a9
|
/man/lmestSearch.Rd
|
5aa266c35f743aade61f0b269e0ac499ff691074
|
[] |
no_license
|
cran/LMest
|
31dddf3c579ed8c1a4ebedf3c72191ad113e97e7
|
c91ca5182c24fa437dba72f4545d2a3266355125
|
refs/heads/master
| 2023-09-01T08:43:51.610982
| 2023-08-27T14:50:02
| 2023-08-27T15:30:36
| 17,680,242
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,150
|
rd
|
lmestSearch.Rd
|
\name{lmestSearch}
\alias{lmestSearch}
\title{Search for the global maximum of the log-likelihood}
\description{
Function that searches for the global maximum of the log-likelihood of different models and selects the optimal number of states.}
\usage{
lmestSearch(responsesFormula = NULL, latentFormula = NULL,
data, index, k,
version = c("categorical", "continuous"),
weights = NULL, nrep = 2, tol1 = 10^-5,
tol2 = 10^-10, out_se = FALSE, miss.imp = FALSE, seed = NULL, ...)
}
\arguments{
\item{responsesFormula}{a symbolic description of the model to fit. A detailed description is given in the ‘Details’ section of \code{\link{lmest}}
}
\item{latentFormula}{a symbolic description of the model to fit. A detailed description is given in the ‘Details’ section of \code{\link{lmest}}
}
\item{data}{a \code{data.frame} in long format}
\item{index}{a character vector with two elements, the first indicating the name of the unit identifier, and the second the time occasions
}
\item{k}{a vector of integer values for the number of latent states}
\item{weights}{an optional vector of weights for the available responses}
\item{version}{type of responses for the LM model: "categorical" and "continuous"}
\item{nrep}{number of repetitions of each random initialization}
\item{tol1}{tolerance level for checking convergence of the algorithm in the random initializations}
\item{tol2}{tolerance level for checking convergence of the algorithm in the last deterministic initialization}
\item{out_se}{to compute the information matrix and standard errors (FALSE is the default option)}
\item{miss.imp}{Only for continuous responses: how to deal with missing values (TRUE for imputation through the imp.mix function, FALSE for missing at random assumption)}
\item{seed}{an integer value with the random number generator}
\item{\dots}{additional arguments to be passed to functions \code{\link{lmest}} or \code{\link{lmestCont}}}
}
\details{
The function combines deterministic and random initializations strategy to reach the global maximum of the model log-likelihood.
It uses one deterministic initialization (\code{start=0}) and a number of random initializations (\code{start=1}) proportional to the number of latent states. The tolerance level is set equal to 10^-5. Starting from the best solution obtained in this way, a final run is performed (\code{start=2}) with a default tolerance level equal to 10^-10.
Missing responses are allowed according to the model to be estimated.
}
\value{Returns an object of class \code{'LMsearch'} with the following components:
\item{out.single}{Output of every LM model estimated for each number of latent states given in input}
\item{Aic}{Values the Akaike Information Criterion for each number of latent states given in input}
\item{Bic}{Values of the Bayesian Information Criterion for each number of latent states given in input}
\item{lkv}{Values of log-likelihood for each number of latent states given in input.}
}
\author{Francesco Bartolucci,
Silvia Pandolfi,
Fulvia Pennoni,
Alessio Farcomeni,
Alessio Serafini
}
\references{
%Bacci, S., Pandolfi, S., Pennoni, F. (2014). A comparison of some criteria for states selection in the latent Markov model for longitudinal data, \emph{Advances in Data Analysis and Classification}, \bold{8}, 125-145.
Bartolucci F., Pandolfi S., Pennoni F. (2017) LMest: An R Package for Latent Markov Models for Longitudinal Categorical
Data, \emph{Journal of Statistical Software}, \bold{81}(4), 1-38.
Bartolucci, F., Farcomeni, A. and Pennoni, F. (2013) \emph{Latent Markov Models for Longitudinal Data}, Chapman and Hall/CRC press.}
\examples{
### Example with data on drug use in wide format
data("data_drug")
long <- data_drug[,-6]
# add labels referred to the identifier
long <- data.frame(id = 1:nrow(long),long)
# reshape data from the wide to the long format
long <- reshape(long,direction = "long",
idvar = "id",
varying = list(2:ncol(long)))
out <- lmestSearch(data = long,
index = c("id","time"),
version = "categorical",
k = 1:3,
weights = data_drug[,6],
modBasic = 1,
seed = 123)
out
summary(out$out.single[[3]])
\dontrun{
### Example with data on self rated health
# LM model with covariates in the measurement model
data("data_SRHS_long")
SRHS <- data_SRHS_long[1:1000,]
# Categories rescaled to vary from 1 (“poor”) to 5 (“excellent”)
SRHS$srhs <- 5 - SRHS$srhs
out1 <- lmestSearch(data = SRHS,
index = c("id","t"),
version = "categorical",
responsesFormula = srhs ~ -1 +
I(gender - 1) +
I( 0 + (race == 2) + (race == 3)) +
I(0 + (education == 4)) +
I(0 + (education == 5)) + I(age - 50) +
I((age-50)^2/100),
k = 1:2,
out_se = TRUE,
seed = 123)
summary(out1)
summary(out1$out.single[[2]])
}
}
|
067991e37f8a7401fb37285c03e35d42f83635c3
|
d5485e869fd25da9e2256fec0a227bd8d61d6678
|
/interactive_heatmap.R
|
f96295120368bcda66735ad1cf091e393f2cf907
|
[] |
no_license
|
jhr9yp/Group-10-SYS-2202
|
873355d39f7308d5e426f0ed2c17f7f77d049f0c
|
e3b94c37fcb9255127748a1ae0b238d6c8095268
|
refs/heads/master
| 2021-05-25T16:58:08.962729
| 2020-05-06T03:27:00
| 2020-05-06T03:27:00
| 253,830,998
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,530
|
r
|
interactive_heatmap.R
|
# NOT RUN {
## Only run examples in interactive R sessions
if (interactive()) {
library("shiny")
library("shinyWidgets")
#creating title page (emily)
ui <- fluidPage(
tags$h2("US MAP"),
br(),
dropdown(
#Sub title
tags$h3("Interactive US Map of Various Datasets"),
#Making dropdown bar to select the preffered map (emily)
pickerInput(inputId = 'xcol2',
label = 'DataSet',
choices = names(filler)[2:7],
options = list(`style` = "btn-info")),
#Animation to fade between selected graphs (emily)
style = "unite", icon = icon("gear"),
status = "danger", width = "300px",
animate = animateOptions(
enter = animations$fading_entrances$fadeInLeftBig,
exit = animations$fading_exits$fadeOutRightBig
)
),
#Plots the graph
plotOutput(outputId = 'plot2')
)
server <- function(input, output, session) {
#reactive value that changes based on input selected (james)
selectedData2 <- reactive({
filler[, input$xcol2]
})
output$plot2 <- renderPlot({
#plot for map that adjusts based on input selected (James)
plot_usmap(data=filler, values = input$xcol2, color = "black") +
scale_fill_continuous(name = input$xcol2, label = scales::comma) +
theme(legend.position = "right")
})
}
shinyApp(ui = ui, server = server)
}
# }
|
5df356d29c0bc368911390cbfd4ca23150bc7e5a
|
3ad3200f27d58ae8565cdeccc0d0c717ffcd472c
|
/app/server.R
|
202409b11401f98589d632afc3f3198f97b41059
|
[] |
no_license
|
uvarc/uva-mos2-ml
|
e51e6b5dc25a336e8721f6b795274a23773554f1
|
9e5c1fc7365b520c76c82aea1802bdbd769462fc
|
refs/heads/master
| 2021-05-21T04:47:28.824421
| 2021-01-08T21:07:24
| 2021-01-08T21:07:24
| 252,549,676
| 0
| 0
| null | 2020-09-22T13:15:31
| 2020-04-02T19:38:43
|
R
|
UTF-8
|
R
| false
| false
| 6,823
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
# http://shiny.rstudio.com
#
# library(shiny)
# library(caret)
# library(knnGarden)
library(plotly)
library(caret)
set.seed(1)
dataset.model = preProcess(model.training[,c(1:6)], method=c('nzv'))
dataset.norm = predict(dataset.model, model.training[,c(1:6)])
dataset.norm$Class<-ifelse(dataset.norm$Class=='yes', 1,0)
model.training$Class <- as.character(model.training$Class)
model.training$Class <- as.factor(model.training$Class)
modelrf = randomForest(Class ~ ., data=model.training, ntree = 1573, mtry = 2, importance=TRUE)
# Define server logic for random distribution application
#Define SVM function to clean up and speed up code
svm.predictions <- function(check.data){
check.data = rbind(check.data, check.data)
nboot = 100
predict.data.virtual = NULL
i=1
repeat{
predict.data.virtual[[i]] = predict(svc.rbf.mos2.mono[[i]], check.data)
i=i+1
if(i>100) break ()
}
predict.data.virtual.df = as.data.frame(predict.data.virtual)
virtual.yes_count = apply(predict.data.virtual.df,1,function(x)sum(x != "no"))
virtual.no_count = apply(predict.data.virtual.df,1,function(x)sum(x != "yes"))
#predict.data.virtual.labels.df = cbind(check.data, virtual.yes_count, virtual.no_count)
#predict.data.virtual.labels.df$Predicted_label = with(predict.data.virtual.labels.df, ifelse(virtual.yes_count > (nboot/2), "yes", "no"))
#predict.data.virtual.labels.df$Confidence = with(predict.data.virtual.labels.df, ifelse((virtual.yes_count/nboot) < 0.66 & (virtual.yes_count/nboot) > 0.34, "Not_Confident", "Confident"))
#svc.virtual.predictions = cbind(check.data, predict.data.virtual.labels.df)
#svc.virtual.predictions$Probability = svc.virtual.predictions$virtual.yes_count / 100
#a = svc.virtual.predictions$Probability
# print(virtual.yes_count)
return((virtual.yes_count / 100)[1])
#return(a)
}
svm.predictions.two <- function(check.data){
nboot = 100
predict.data.virtual = NULL
i=1
repeat{
predict.data.virtual[[i]] = predict(svc.rbf.mos2.mono[[i]], check.data)
i=i+1
if(i>100) break ()
}
predict.data.virtual.df = as.data.frame(predict.data.virtual)
virtual.yes_count = apply(predict.data.virtual.df,1,function(x)sum(x != "no"))
virtual.no_count = apply(predict.data.virtual.df,1,function(x)sum(x != "yes"))
return((virtual.yes_count / 100))
#return(a)
}
shinyServer(function(input, output, session) {
output$image <- renderImage({
list(src = "MoS2_Dataset_v2-1.png",
height = 400, width = 800)
}, deleteFile = FALSE)
output$MoS2_table = renderDT(data.training)
rv <- reactiveValues(filtered = TRUE)
#Show parcoods grouped by input$group
# For more info on parcoords: https://cran.r-project.org/web/packages/parcoords/parcoords.pdf
output$pc <- renderParcoords({
dataframe.inputs = reactive({
data.training[,input$Title]
})
if(!is.null(input$Title)){
for(title in input$Title){
new = which(data.training$Title == title, arr.ind = TRUE)
data1 = rbind(data1, data.training[new,c(1,5:13)])
}
names(data1) = col_labels
}
else{
data1 = data.training[,c(5:13)]
names(data1) = col_labels[2:10]
}
parcoords(data1, color = list(colorBy = input$group, colorScale = "scaleOrdinal", colorScheme = "schemeCategory10"),
withD3 = TRUE, rownames = F, alphaOnBrushed = 0.15, brushMode = '1D-axes', autoresize = FALSE, reorderable = TRUE)
})
sliderValues <- reactive({
data.frame(
Name = c("Mo_T",
"S_T",
"Highest_growth_T",
"Growth_Time",
"Growth_P"),
Value = as.character(c(input$Mo_T,
input$S_T,
input$Highest_growth_T,
input$Growth_Time,
input$Growth_P)),
stringsAsFactors = FALSE)
})
#Get input values from Predictions tab
val <- reactive({
data.frame(
Mo_T = input$Mo_T,
S_T = input$S_T,
Highest_growth_T = input$Highest_growth_T,
Growth_Time = input$Growth_Time,
Growth_P = input$Growth_P,
stringsAsFactors = F
)
})
# Show the values in an HTML table ----
output$concentrationvalues <- renderTable({
sliderValues()
}, striped = T, width = '100%', bordered = T)
#Select which methods you'd like to show predictions for based on input values
outtable <- observeEvent(input$getprobs, {
for(method in input$updateconcentration){
Method = rbind(Method, method)
if(method == "Random Forest"){
Prediction = rbind(Prediction, c(signif(predict(modelrf, val(), type="prob")[2], digits=2)))
}else if(method == "k-NN"){
Prediction = rbind(Prediction, (as.character(knnVCN(TrnX=model.training[,c(1:5)],
TstX=val(),
OrigTrnG=model.training[,c(6)],method='canberra', K=2, ShowObs = T)[,6])))
}else if(method == "SVM"){
Prediction = rbind(Prediction, as.character(svm.predictions(as.data.frame(val()))))
}
}
output$valtable <- renderTable({data.frame(Method, Prediction)}, striped = T, width = '100%', bordered = T)
})
#Show growth maps based on user input
growth_map <- observeEvent(input$gr_map, {
growth.data$S_T = input$gr_S_T
growth.data$Highest_growth_T = input$gr_Highest_growth_T
growth.data$Growth_Time = input$gr_Growth_Time
output$plot.rf = renderPlotly({
growth.data$yes = predict(modelrf, growth.data, type='prob')[,2]
p <- plot_ly(data = growth.data, x=~Mo_T,y=~Growth_P, z=~yes, type = "contour", colorscale='Jet')
p <- p %>% layout(title = "Random Forest Predictions", xaxis = list(title="Mo Precursor Temp (Celcius)"), yaxis = list(title="Growth Pressure (Torr)"))
p <- p %>% colorbar(limits=c(0.5,1), title="Will Form?")
})
#predict.data.virtual.labels.df = cbind(growth.data, virtual.yes_count, virtual.no_count)
#predict.data.virtual.labels.df$Predicted_label = with(predict.data.virtual.labels.df, ifelse(virtual.yes_count > (nboot/2), "yes", "no"))
#predict.data.virtual.labels.df$Confidence = with(predict.data.virtual.labels.df, ifelse((virtual.yes_count/nboot) < 0.66 & (virtual.yes_count/nboot) > 0.34, "Not_Confident", "Confident"))
#svc.virtual.predictions = cbind(virtual, predict.data.virtual.labels.df)
#growth.data$yes = svc.virtual.predictions$virtual.yes_count / 100
output$plot.svm = renderPlotly({
growth.data$yes = svm.predictions.two(growth.data[,c(1:5)])
p <- plot_ly(data = growth.data, x=~Mo_T,y=~Growth_P, z=~yes, type = "contour", colorscale='Jet')
p <- p %>% layout(title = "SVM Predictions", xaxis = list(title="Mo Precursor Temp (Celcius)"), yaxis = list(title="Growth Pressure (Torr)"))
p <- p %>% colorbar(limits=c(0.5,1), title="Will Form?")
})
}, ignoreInit = F, ignoreNULL = F)
})
|
fbddf006d68e74cd857000cf68a278004a6a27f0
|
b4b03b1fa76925763db9b913183ebe1745d78fb0
|
/plot4.R
|
ef81c478b4ea7510110a3d047db81707031faaca
|
[] |
no_license
|
TDELALOY/ExData_Plotting1
|
b8d6ee8926a4458efb888fc3e970a0aa95c17282
|
d99b653bfabdccac7973187db509146a6b5bdf4a
|
refs/heads/master
| 2021-01-15T13:23:31.581811
| 2015-04-10T21:43:06
| 2015-04-10T21:43:06
| 33,672,803
| 0
| 0
| null | 2015-04-09T14:12:57
| 2015-04-09T14:12:56
| null |
UTF-8
|
R
| false
| false
| 2,018
|
r
|
plot4.R
|
## DATA SCIENCE - COURSERA - T DELALOY
## we will be using the "Individual household electric power consumption Data Set"
## first time
library(data.table)
#define working directory
setwd("C:/Users/I051921/Desktop/Prediction (KXEN)/Coursera")
## Open File
file <- "EXPLORING/household_power_consumption.txt"
data <- fread(file)
## transform data
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
## select data between 2 dates
data <- data[data$Date=="2007-02-01" | data$Date=="2007-02-02"]
## Convert data to a data frame
data <- data.frame(data)
## Convert columns 3 to 9 to numeric
for(i in c(3:9)) {
class(data[,i])
data[,i] <- as.numeric(as.character(data[,i]))
}
## Create Date_Time variable
data$DateTime <- paste(data$Date, data$Time)
## Convert Date_Time variable to proper format
data$DateTime <- strptime(data$DateTime, format="%Y-%m-%d %H:%M:%S")
## create Plot 4
png(filename = "plot4.png", width = 480, height = 480, units = "px", bg = "white")
par(mfrow= c(2, 2), col="black", mar=c(4,4,4,4))
plot(data$DateTime, data$Global_active_power, xaxt=NULL, xlab = "", ylab = "Global Active Power", type="n")
lines(data$DateTime, data$Global_active_power, type="S")
plot(data$DateTime, data$Voltage, xaxt=NULL, xlab = "datetime", ylab = "Voltage", type="n")
lines(data$DateTime, data$Voltage, type="S")
plot(data$DateTime, data$Sub_metering_1, xaxt=NULL, xlab = "", ylab = "Energy sub metering", type="n")
lines(data$DateTime, data$Sub_metering_1, type="S" )
par(col="red")
lines(data$DateTime, data$Sub_metering_2, type="S" )
par(col="blue")
lines(data$DateTime, data$Sub_metering_3, type="S")
par(col="black")
legend("topright",border="white", pch="-",col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(data$DateTime, data$Global_reactive_power, xaxt=NULL, xlab = "datetime", ylab = "Global_reactive_power", type="n")
lines(data$DateTime, data$Global_reactive_power, type="S")
dev.off()
|
ef2f5a42e8cf51276cdb5ce1913db9f36844c866
|
aa9edd29b8e074991387ebdea0068f1e8685f5aa
|
/code/data_summary_functions.R
|
4002a6c9d2e58f5911dc65f66c8298e467f0229c
|
[
"MIT"
] |
permissive
|
mjarvenpaa/bacterial-colonization-model
|
db13f53d0a6ca92008eff7effef590c1fdcaffa5
|
474813ec33700e3353132f6a0277716dc8bff5d0
|
refs/heads/master
| 2021-06-30T02:11:23.340243
| 2019-06-14T13:32:00
| 2019-06-14T13:32:00
| 149,721,623
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,623
|
r
|
data_summary_functions.R
|
# Functions for computing distances, summaries and the discrepancy needed for the ABC inference.
count.total.number.mutations <- function(pop) {
pop$snp.sets
snp.set.sizes <- unlist(lapply(pop$snp.sets, length))
num.snps.in.strains <- snp.set.sizes[pop$strains]
total.number.mutations <- sum(num.snps.in.strains)
return(total.number.mutations)
}
compute.distances.in.simulation <- function(save.path, save.extension, generations.to.consider, dist.path) {
for (generation.index in generations.to.consider) {
population.file.name <- paste(save.path, '/pop', generation.index, save.extension, '.RData', sep='')
load(population.file.name) # pop
distance.distribution <- compute.distance.distribution(pop=pop)
distance.file.name <- paste(dist.path, '/distances', generation.index, save.extension, '.RData', sep='')
save(distance.distribution, file=distance.file.name)
}
}
compute.distance.distribution <- function(pop) {
# The value returned by this function is a list with two
# fields:
#
# "snp.set.dist" is a distance matrix for SNP sets
# (the number of SNPs by which two sets differ)
#
# "dist.elem.count" is also a matrix. Element (i,j)
# tells how many strain pairs there are, such that the
# first strain has SNP set i, and the second has set j.
#
# So, together these two specify the full distance
# distribution between all strain pairs. The distance
# between two specific strains x and y can be obtained
# as snp.set.dist[pop$strains[x],pop$strains[y]]
# Compute distance matrix between snp.sets
n.snp.sets <- length(pop$snp.sets)
snp.set.dist <- matrix(0, n.snp.sets, n.snp.sets)
dist.elem.count <- matrix(0, n.snp.sets, n.snp.sets)
strain.type.counts <- table(pop$strains)
n.within.pairs <- choose(strain.type.counts,2)
# Check that all SNP sets are present in the population
if (!all(as.numeric(names(strain.type.counts)) == 1:n.snp.sets)) {
stop('Corrupt data structure')
}
# Note the special case of no mutations: then snp.sets contain only empty list, the code now handles this
if (n.snp.sets == 1) {
snp.set.dist <- matrix(0, 1, 1)
dist.elem.count <- matrix(0, 1, 1)
dist.elem.count[1, 1] <- n.within.pairs["1"]
}
else if (n.snp.sets > 1) {
for (set1.index in seq(1, n.snp.sets - 1)) {
set1 <- pop$snp.sets[[set1.index]]
for (set2.index in seq(set1.index + 1, n.snp.sets)) {
set2 <- pop$snp.sets[[set2.index]]
# Distance is the number of SNPs that are found
# in exactly one or the other of the SNP sets,
# but not in both.
snp.set.dist[set1.index, set2.index] <- length(union(set1, set2)) - length(intersect(set1, set2))
# Compute the number of strain pairs that
# have this distance, i.e., one of the strains
# has set1 and the other has set2.
dist.elem.count[set1.index, set2.index] <- strain.type.counts[as.character(set1.index)] * strain.type.counts[as.character(set2.index)]
}
dist.elem.count[set1.index, set1.index] <- n.within.pairs[as.character(set1.index)]
}
dist.elem.count[n.snp.sets, n.snp.sets] <- n.within.pairs[as.character(n.snp.sets)]
} else {
stop('Corrupt data structure')
}
res <- list()
res$snp.set.dist <- snp.set.dist + t(snp.set.dist) # Symmetric
res$dist.elem.count <- dist.elem.count
res$gen <- pop$gen
return(res)
}
############################################
# Summaries and discrepancy for ABC fitting
############################################
summaries.one.patient <- function(distance.distribution) {
# Computes the summary vector for one distance distribution (one patient)
# compute the average distance over all the pairwise distances at that time point
n.pairs <- sum(distance.distribution$dist.elem.count)
d.sum <- distance.distribution$snp.set.dist * distance.distribution$dist.elem.count
s1 <- sum(d.sum)/n.pairs
# if other summaries are also used, add them here
return(s1)
}
discrepancy <- function(discrepancy.type, dists, dists.AM, data, data.AM, save.interval, save.interval.AM,
print.discrepancy=FALSE, neff=NULL, mu=NULL) {
# Computes the discrepancy for fitting the simulation model with ABC, all data
# points (i.e. patients 1-8 and A-M) can be used for computing the discrepancy.
rem.index <- 2 # index of patient #1219 in the data
# NOTE: THIS NOW WORKS ONLY IF LENGTH(SAVE.INTERVAL) > 1
if (length(save.interval) <= 1) {
stop('This now works only if save time points are given i.e. save.interval must be a vector longer than 1.')
}
# data lengths
n.first <- length(data$first)
n.AM <- length(data.AM)
n.s.AM <- length(save.interval.AM)
## DISCREPANCY "1"
if (discrepancy.type$type == 'l1_sep') {
## Compute the L1 distances separately and then the total discrepancy
# patients 1-8
d.first <- rep(0,n.first)
d.last <- rep(0,n.first)
for (i in 1:n.first) {
# get the time of making the observation
time1.ind <- which(discrepancy.type$times18_1[i]==save.interval)
time2.ind <- which(discrepancy.type$times18_2[i]==save.interval)
if (length(time1.ind) < 1 || length(time2.ind) < 1) {
stop('Time of observed data was not simulated.')
}
d.first[i] <- l1.distance.discrete(dists[[i]][[time1.ind]], data$first[[i]])
d.last[i] <- l1.distance.discrete(dists[[i]][[time2.ind]], data$last[[i]])
}
# deal with patient #1219
if (discrepancy.type$ignore1219) {
d.first <- d.first[-rem.index]
d.last <- d.last[-rem.index]
}
# patients A-M
d.AM <- rep(0,n.AM)
if (!discrepancy.type$ignoreAM) {
for (i in 1:n.AM) {
dis <- rep(0,n.s.AM)
for (j in 1:n.s.AM) {
dis[j] <- l1.distance.discrete(dists.AM[[i]][[j]], data.AM[[i]])
}
# Since the obs.time is not known, we take such simulated times that yield minimum discrepancy
d.AM[i] <- min(dis)
}
}
# total discrepancy
d <- (sum(d.first) + sum(d.last) + sum(d.AM))/(2*length(d.first) + (!discrepancy.type$ignoreAM)*n.AM)
if (print.discrepancy) {
print.discrepancy.debug.l1(neff, mu, d, d.first, d.last, d.AM)
}
return(d)
}
## ALTERNATIVE FUNCTIONS FOR THE DISCREPANCY ARE ADDITIONALLY COMPUTED BELOW
## 0) Compute true data summaries (this and some other values could be precomputed but it is very fast anyway)
s.obs1 <- sapply(data$first, mean)
s.obs2 <- sapply(data$last, mean)
s.obs.AM1 <- sapply(data.AM, mean)
## Compute the summaries from the simulation model
# 1) Summaries for the first data set (patients 1-8, possibly #1219 to be excluded)
s.model1 <- rep(NA,n.first)
s.model2 <- rep(NA,n.first)
for (i in 1:n.first) {
time1.ind <- which(discrepancy.type$times18_1[i]==save.interval)
time2.ind <- which(discrepancy.type$times18_2[i]==save.interval)
if (length(time1.ind) < 1 || length(time2.ind) < 1) {
stop('Time of observed data was not simulated.')
}
s.model1[i] <- summaries.one.patient(dists[[i]][[time1.ind]])
s.model2[i] <- summaries.one.patient(dists[[i]][[time2.ind]])
}
if (discrepancy.type$ignore1219) {
# remove the index 2 that corresponds to the patient #1219
rem.index <- 2
s.obs1 <- s.obs1[-rem.index]
s.obs2 <- s.obs2[-rem.index]
s.model1 <- s.model1[-rem.index]
s.model2 <- s.model2[-rem.index]
}
# 2) Summaries for the second data set (patients A-M)
s.model.AM1 <- matrix(NA,n.s.AM,n.AM)
for (i in 1:n.AM) {
for (j in 1:n.s.AM) {
s.model.AM1[j,i] <- summaries.one.patient(dists.AM[[i]][[j]])
}
}
## Compute the discrepancy using the summaries computed above
d.AM <- 0
if (discrepancy.type$type == 'eucl') {
d1 <- norm_vec(s.obs1 - s.model1)
d2 <- norm_vec(s.obs2 - s.model2)
#d2 <- norm_vec((s.obs1 - s.obs2) - (s.model1 - s.model2))
if (!discrepancy.type$ignoreAM) {
abs.errors.AM <- apply(abs(t(t(s.model.AM1) - s.obs.AM1)), 2, min)
#print('A-M abs. errors:'); print(abs.errors.AM); print(' ')
d.AM <- norm_vec(abs.errors.AM)
}
## Final discrepancy over all data
#d <- sqrt(1*d1^2 + 1*d2^2 + 1*d.AM^2)
d <- sum(d1 + d2 + d.AM)
} else {
stop('Incorrect discrepancy type.')
}
## Debug printing
if (print.discrepancy) {
print.discrepancy.debug(neff, mu, d, d1, d2, d.AM, s.obs1, s.obs2, s.obs.AM1, s.model1, s.model2, s.model.AM1)
}
return(d)
}
l1.distance.discrete <- function(distance.distribution1, obs.samples) {
# Compute the l1 distance between two pmf's that are represented by samples,
# the domain is assumed to be non-negative integers
maxd <- max(distance.distribution1$snp.set.dist, obs.samples)
counts1 <- rep(0,maxd+1)
counts2 <- rep(0,maxd+1)
for (i in 0:maxd) {
ind1 <- which(distance.distribution1$snp.set.dist == i)
if (length(ind1) > 0) {
counts1[i+1] <- sum(distance.distribution1$dist.elem.count[ind1])
}
counts2[i+1] <- sum(obs.samples == i)
}
return(0.5*sum(abs(counts1/sum(counts1) - counts2/sum(counts2))))
}
norm_vec <- function(x) sqrt(sum(x^2))
#norm_vec <- function(x) norm_abs_sum(x)
norm_abs_sum <- function(x) sum(abs(x))
print.discrepancy.debug.l1 <- function(neff, mu, d, d.first, d.last, d.AM) {
# Prints some information about the discrepancy. Intended for testing.
# print parameter and discrepancy values
cat(paste('neff: ', neff, ', mu: ', mu, ', d = ', d, sep = ''))
cat('\n')
# print discrepancy contributions from data sets
print('data_1-8_first:')
print(d.first)
cat('\n')
print('data_1-8_second:')
print(d.last)
cat('\n')
print('data_AM:')
print(d.AM)
cat('\n\n')
}
print.discrepancy.debug <- function(neff, mu, d, d1, d2, d.AM, s.obs1, s.obs2, s.obs.AM1, s.model1, s.model2, s.model.AM1) {
# Prints some information about the summaries and the discrepancy. Intended for testing.
# print parameter and discrepancy values
cat(paste('neff: ', neff, ', mu: ', mu, ', d = ', d, sep = ''))
cat('\n')
cat(paste('d1: ', d1, ', d2: ', d2, ', d.AM: ', d.AM, sep = ''))
cat('\n')
# print summaries
s1 <- cbind(s.obs1, s.model1)
s2 <- cbind(s.obs2, s.model2)
#s.AM <- cbind(s.obs.AM1, s.model.AM1)
print('data_1-8_first:')
print(s1)
cat('\n')
print('data_1-8_second:')
print(s2)
cat('\n')
print('data_AM:')
print(s.obs.AM1)
print(s.model.AM1)
cat('\n\n')
}
|
e90b1fae15b6689121b5a74d0730b4cdf4b0106e
|
cef45d66c5e2166578927c0440eee9f74814446d
|
/tugas_matlan1.r
|
ac36a9181df3e3b0e87daf70a766b8e94b97c092
|
[] |
no_license
|
difaim/17523193
|
230841aea8ffcd82ce4fd3d578751d60b7aab2e7
|
16e371ca493bbafab520275cfff536b45b440d86
|
refs/heads/master
| 2020-03-30T08:43:14.969329
| 2018-10-21T12:10:06
| 2018-10-21T12:10:06
| 151,034,470
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 691
|
r
|
tugas_matlan1.r
|
"example 1"
f1<-function(x){
result<-x^2-5
return(result)
}
f1(2)
"example 2"
f2<-function(x){
y<-sqrt(x)
return(y)
}
f2(4)
f3<-function(x){
y<-x^3+x^2-6
return(y)
}
f3(1)
f4<-function(a,b){
z<-a*b*(b-a)
return(z)
}
f4(2,1)
f5<-function(m,n){
z<-(sqrt(m)/n)+m-2*n
return(z)
}
f5(1,1)
f2<-function(a,b){
z<-(a+b)%*%a%*%b
return(z)
}
c<-matrix(1:4,2,2,TRUE)
d<-matrix(c(1,2,4,4),2,2,TRUE)
f2(c,d)
c<-matrix(1:4,2,2,TRUE)
d<-matrix(c(1,2,4,4),2,2,TRUE)
f3<-function(m,n){
z<-det(m)*n-m%*%n
return(z)
}
f3(c,d)
f4<-function(x){
z<-solve(x)%*%x-2*x
return(z)
}
c<-matrix(1:4,2,2,TRUE)
d<-matrix(c(1,2,4,4),2,2,TRUE)
f4(c)
|
e7705ad573665331d10243171d3e6ceed160a741
|
fd2a324a9505ed29e6136a06216edce999fa97a1
|
/man/NMixMCMCinity.Rd
|
c00782e826d6da2ba490292aa52fc0a1ba039ef6
|
[] |
no_license
|
cran/mixAK
|
995c88ac9b1f70ab2dac51b4fc1347b9b1356eed
|
adc4c2229d8ad3573e560fd598158e53e5d1da76
|
refs/heads/master
| 2022-09-27T10:45:02.953514
| 2022-09-19T13:46:13
| 2022-09-19T13:46:13
| 17,697,529
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,886
|
rd
|
NMixMCMCinity.Rd
|
\name{NMixMCMCinity}
\alias{NMixMCMCinity}
\title{
Initial values of censored observations for the NMixMCMC function
}
\description{
This is a help function for \code{\link{NMixMCMC}} function. If
\code{inity} is not given, it calculates reasonable initial values for
censored observations. If \code{inity} argument is given then it is
checked for consistency and formatted on output.
THIS FUNCTION IS NOT TO BE CALLED BY ORDINARY USERS.
}
\usage{
NMixMCMCinity(y0, y1, censor, sd.init,
are.Censored, are.Right, are.Exact, are.Left, are.Interval,
p, n, inity, random=FALSE)
}
\arguments{
\item{y0}{see output from \code{\link{NMixMCMCdata}}}
\item{y1}{see output from \code{\link{NMixMCMCdata}}}
\item{censor}{see output from \code{\link{NMixMCMCdata}}}
\item{sd.init}{a vector of length \eqn{p} with initial values for
overall standard deviations in each margin}
\item{are.Censored}{see output from \code{\link{NMixMCMCdata}}}
\item{are.Right}{see output from \code{\link{NMixMCMCdata}}}
\item{are.Exact}{see output from \code{\link{NMixMCMCdata}}}
\item{are.Left}{see output from \code{\link{NMixMCMCdata}}}
\item{are.Interval}{see output from \code{\link{NMixMCMCdata}}}
\item{p}{dimension of the data}
\item{n}{number of observations}
\item{inity}{a vector (if \eqn{p=1}) or a \eqn{n\times p}{n x p} matrix
(if \eqn{p \geq 1}{p >= 1}) of initial values of censored observations to be checked
for consistency. If not given then reasonable initials are
generated.}
\item{random}{logical value. If \code{TRUE} then some randomness is
used when generating initial values.}
}
\value{
A \eqn{n\times p}{n x p} matrix with reasonable initial values for
censored observations.
}
\seealso{
\code{\link{NMixMCMC}}.
}
\author{
Arnošt Komárek \email{arnost.komarek@mff.cuni.cz}
}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.