blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8d2bf354bd32fa7fe262598f3f1f0554f7931a8d | 3868576e8cb60fc86575f03aa38f1f7331f7a7e5 | /man/rbind_df.Rd | 0a827e3a11f0aa6d5d98e0723f4594d74c10dd6d | [] | no_license | cran/bigreadr | dac4319c53342dac7c9a0a995ddca280c14ec844 | 131e342cf6a841d5ed7e484665f029faa6c761e4 | refs/heads/master | 2022-12-11T23:58:51.974200 | 2022-12-06T14:50:02 | 2022-12-06T14:50:02 | 145,906,580 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 467 | rd | rbind_df.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bind.R
\name{rbind_df}
\alias{rbind_df}
\title{Merge data frames}
\usage{
rbind_df(list_df)
}
\arguments{
\item{list_df}{A list of multiple data frames with the same variables in the
same order.}
}
\value{
One merged data frame with the names of the first input data frame.
}
\description{
Merge data frames
}
\examples{
str(iris)
str(rbind_df(list(iris, iris)))
}
|
eb97854a77d89950a5ce75566a431172cad52ce4 | fb3567838f718d29273ff5b22a008dbace7bbfe1 | /code/Analysis/regressions_parcels.R | c9b58e9d0f7ae9222068c39494acd2885aca0baa | [] | no_license | rlsweeney/public_cs_texas | 70ab79056fd4954ba0bb66b357e20e1fcee4552e | 27b1fbbc774c3e25bbf734255019dada7ec18808 | refs/heads/master | 2023-01-29T01:45:24.666394 | 2023-01-11T16:18:08 | 2023-01-11T16:18:08 | 175,434,809 | 3 | 3 | null | null | null | null | UTF-8 | R | false | false | 12,376 | r | regressions_parcels.R | #==============================================================================
# code to estimate and format regression tables for parcel outcomes
# this is currently WIP, since it estimates way more specs than we ultimately
# want to report
# by thom/rich in december 2020 - march 2021
#==============================================================================
library(tidyverse)
library(lubridate)
library(splines)
library(lmtest)
library(sandwich)
library(fixest)
library(sf)
#==============================================================================
# BASIC TEXAS SETUP
#==============================================================================
library(here)
root <- here()
source(file.path(root, "code", "paths.R"))
source(file.path(root, "code", "texas_constants.R"))
source(file.path(root, "code", "functions", "regtable.R"))
source(file.path(root, "code", "functions", "latex_number.R"))
source(file.path(root, "code", "functions", "utils.R"))
source(file.path(root, "code", "functions", "load_crossfit.R"))
#===============================================================================
# LOAD DATA CLEANED IN final_parcel_outcomes.R: parcel outcomes and lease
# outcomes underlying these parcel outcomes.
# ALSO LOAD final leases so that we can compare lease and parcel results
#===============================================================================
load(file.path(gen, "final_parcel_outcomes.Rda"))
regdata <-
parcel_outcomes_main$parcel_outcomes %>%
mutate(Acres = ParcelAcres, Type = ParcelType) %>%
rename(DBOE = BOE_total, Bonus = PmtBonus, SellerRevenue = PmtTotal) %>%
mutate(across(c(Bonus, DBOE, SellerRevenue, LeaseRevenue),
~ . / Acres, .names = "{col}PerAcre")) %>%
mutate(Auction = if_else(Type == "STATE", 1, 0),
Acres = Acres / 1000) %>%
filter(Type %in% c("RAL", "STATE"))
load(file.path(gen, "final_leases.Rda"))
regdata_leases_fl_info <-
final_leases %>%
select(Lease_Number, InSample, FLAuction = Auction, Year)
regdata_leases <-
parcel_outcomes_main$lease_outcomes %>%
mutate(Acres = GrossAcres, Type = LeaseType) %>%
select(-Bonus) %>%
rename(DBOE = BOE_total, Bonus = PmtBonus, SellerRevenue = PmtTotal) %>%
mutate(across(c(Bonus, DBOE, SellerRevenue, LeaseRevenue),
~ . / Acres, .names = "{col}PerAcre")) %>%
filter(Type %in% c("RAL", "STATE")) %>%
mutate(Acres = Acres / 1000) %>%
mutate(Auction = if_else(Type == "STATE", 1, 0)) %>%
mutate(EffDate = as.numeric(Effective_Date)) %>%
left_join(regdata_leases_fl_info) %>%
replace_na(list(InSample = FALSE))
load(file.path(gen, "outcomes_earlyLeases.Rda"))
regdata_everleased <-
outcomes_earlyLeases$parcel_outcomes %>%
mutate(Acres = ParcelAcres, Type = ParcelType) %>%
rename(DBOE = BOE_total, Bonus = PmtBonus, SellerRevenue = PmtTotal) %>%
mutate(across(c(Bonus, DBOE, SellerRevenue, LeaseRevenue),
~ . / Acres, .names = "{col}PerAcre")) %>%
mutate(Auction = if_else(Type == "STATE", 1, 0),
Acres = Acres / 1000) %>%
filter(Type %in% c("RAL", "STATE"))
#===============================================================================
# define specifications
#===============================================================================
# this reduces code complexity/duplication in the feols calls that follow
setFixest_fml(..depvarspa = ~ c(BonusPerAcre, DBOEPerAcre,
LeaseRevenuePerAcre, SellerRevenuePerAcre),
..depvars = ~ c(Bonus, DBOE, LeaseRevenue, SellerRevenue),
..fe = ~sw(Grid10, Grid20),
..surface = c(parcel_characteristics_1, parcel_characteristics_2))
dml_pa_outcomes <-
c("BonusPerAcre", "DBOEPerAcre",
"LeaseRevenuePerAcre", "SellerRevenuePerAcre")
dml_outcomes <-
c("Bonus", "DBOE", "LeaseRevenue", "SellerRevenue")
dml_spec <- "Acres + CentLat + CentLong"
extra_controls <-
paste(paste0(parcel_characteristics_1, collapse = " + "),
paste0(parcel_characteristics_2, collapse = " + "),
sep = " + ")
dml_fml <- function(...) {
left_fml <- "Auction"
right_fml <- paste(dml_spec, ..., sep = " + ")
fml <- paste(left_fml, right_fml, sep = " | ")
return(fml)
}
dml_models <- function(lhs, type = "linear") {
m <-
paste(lhs, dml_fml(), sep = " ~ ") %>%
as.formula %>%
dml(regdata, type, n = dml_n, ml = "rf", workers = 8)
return(list(m))
}
#===============================================================================
# table helper function
#===============================================================================
## ms is a list of the 4 model run programs
make_table <- function(lhs, ms, output_format = "latex") {
results <-
c(ms[[1]][lhs = lhs, fixef = 1],
ms[[1]][lhs = lhs, fixef = 2],
list(ms[[2]][[lhs]][[1]]),
ms[[3]][lhs = lhs, fixef = 1],
ms[[3]][lhs = lhs, fixef = 2],
list(ms[[4]][[lhs]][[1]]))
tbl <-
regtable(results,
est = c("Auction"),
est_names = c("Auction"),
extra_rows = list("Estimate" = rep(c("G10", "G20", "DML"), 2),
"Estimator" = c(rep("Linear", 3),
rep("Poisson", 3))),
n_obs = TRUE,
stats = NA,
stats_names = NA,
decimals = 2,
output_format = output_format)
return(tbl)
}
#===============================================================================
# estimate FE models in levels (fast)
#===============================================================================
parcel_models_fe <-
feols(..depvarspa ~ Auction + bs(Acres, df = 4) | ..fe, regdata)
parcel_models_pois_fe <-
fepois(..depvars ~ Auction + bs(Acres, df = 4) | ..fe, regdata)
#===============================================================================
# estimate dml models (slow)
#===============================================================================
parcel_models_dml <-
dml_pa_outcomes %>%
map(dml_models)
parcel_models_pois_dml <-
dml_outcomes %>%
map(dml_models, type = "poisson")
#===============================================================================
# make and save table
#===============================================================================
main_parcel_outcomes <-
seq(1, 4) %>%
map(~ make_table(., list(parcel_models_fe, parcel_models_dml,
parcel_models_pois_fe, parcel_models_pois_dml),
output_format = "df")) %>%
regtable_stack(table_names = c("Bonus", "Output",
"Lease Revenue", "Seller Revenue"),
n_bottom = TRUE,
output_format = "latex")
writeLines(main_parcel_outcomes, file.path(tdir, "parcel_regressions.tex"))
#===============================================================================
# make parcel lease comparison table
# this will have 5 columns and four rows
# columns are lease level in sample + year, lease level all + year, lease level
# all no time control, lease level no time weighted by nparcels, parcel reg
# for ever leased sample, and then finally parcel reg for the whole sample
# rows are the outcomes
# do one table for grid10, the other for grid20
#===============================================================================
setFixest_fml(..depvarspa = ~ c(BonusPerAcre, DBOEPerAcre,
LeaseRevenuePerAcre, SellerRevenuePerAcre),
..depvars = ~ c(Bonus, DBOE,
LeaseRevenue, SellerRevenue),
..fe = ~sw(Grid10, Grid20),
reset = TRUE)
ols_leases <-
feols(..depvarspa ~ Auction + bs(Acres, df = 4) | ..fe,
regdata_leases)
ols_parcels <-
feols(..depvarspa ~ Auction + bs(Acres, df = 4) | ..fe,
filter(regdata, EverLeased))
pois_leases <-
fepois(..depvars ~ Auction + bs(Acres, df = 4) | ..fe,
regdata_leases)
pois_parcels <-
fepois(..depvars ~ Auction + bs(Acres, df = 4) | ..fe,
filter(regdata, EverLeased))
dml_pl_model <- function(lhs, rd, type = "linear") {
m <-
paste(lhs, dml_fml(), sep = " ~ ") %>%
as.formula %>%
dml(rd, type, n = dml_n, ml = "rf", workers = 8)
return(m)
}
ols_lease_dml <-
dml_pa_outcomes %>%
map(~ dml_pl_model(., regdata_leases))
ols_parcel_dml <-
dml_pa_outcomes %>%
map(~ dml_pl_model(., filter(regdata, EverLeased)))
poisson_lease_dml <-
dml_outcomes %>%
map(~ dml_pl_model(., regdata_leases, type = "poisson"))
poisson_parcel_dml <-
dml_outcomes %>%
map(~ dml_pl_model(., filter(regdata, EverLeased), type = "poisson"))
get_est <- function(m, lhs, fixef) {
m[lhs=lhs, fixef=fixef]
}
# row of a table is g10, g20, dml (spec 3), g10, g20, dml (spec 5)
comparetbl <- function(cols) {
temp <-
cols %>%
regtable(est = c("Auction"),
est_names = c("Auction"),
extra_rows = list("Estimate" = rep(c("G10", "G20", "DML"), 2),
"Sample" = c(rep("All Leases", 3),
rep("Leased Parcels", 3))),
n_obs = TRUE,
stats = NA,
stats_names = NA,
output_format = "df")
return(temp)
}
appendix_compare_linear <-
regtable_stack(
list(comparetbl(c(ols_leases[fixef=1, lhs=1],
ols_leases[fixef=2, lhs=1],
list(ols_lease_dml[[1]]),
ols_parcels[fixef=1, lhs=1],
ols_parcels[fixef=2, lhs=1],
list(ols_parcel_dml[[1]]))),
comparetbl(c(ols_leases[fixef=1, lhs=2],
ols_leases[fixef=2, lhs=2],
list(ols_lease_dml[[2]]),
ols_parcels[fixef=1, lhs=2],
ols_parcels[fixef=2, lhs=2],
list(ols_parcel_dml[[2]]))),
comparetbl(c(ols_leases[fixef=1, lhs=3],
ols_leases[fixef=2, lhs=3],
list(ols_lease_dml[[3]]),
ols_parcels[fixef=1, lhs=3],
ols_parcels[fixef=2, lhs=3],
list(ols_parcel_dml[[3]]))),
comparetbl(c(ols_leases[fixef=1, lhs=4],
ols_leases[fixef=2, lhs=4],
list(ols_lease_dml[[4]]),
ols_parcels[fixef=1, lhs=4],
ols_parcels[fixef=2, lhs=4],
list(ols_parcel_dml[[4]])))),
table_names = c("Bonus", "Output", "Lease Revenue", "Seller Revenue"),
output_format = "latex")
appendix_compare_poisson <-
regtable_stack(
list(comparetbl(c(pois_leases[fixef=1, lhs=1],
pois_leases[fixef=2, lhs=1],
list(poisson_lease_dml[[1]]),
pois_parcels[fixef=1, lhs=1],
pois_parcels[fixef=2, lhs=1],
list(poisson_parcel_dml[[1]]))),
comparetbl(c(pois_leases[fixef=1, lhs=2],
pois_leases[fixef=2, lhs=2],
list(poisson_lease_dml[[2]]),
pois_parcels[fixef=1, lhs=2],
pois_parcels[fixef=2, lhs=2],
list(poisson_parcel_dml[[2]]))),
comparetbl(c(pois_leases[fixef=1, lhs=3],
pois_leases[fixef=2, lhs=3],
list(poisson_lease_dml[[3]]),
pois_parcels[fixef=1, lhs=3],
pois_parcels[fixef=2, lhs=3],
list(poisson_parcel_dml[[3]]))),
comparetbl(c(pois_leases[fixef=1, lhs=4],
pois_leases[fixef=2, lhs=4],
list(poisson_lease_dml[[4]]),
pois_parcels[fixef=1, lhs=4],
pois_parcels[fixef=2, lhs=4],
list(poisson_parcel_dml[[4]])))),
table_names = c("Bonus", "Output", "Lease Revenue", "Seller Revenue"),
output_format = "latex")
writeLines(appendix_compare_linear,
file.path(tdir, "lease_parcel_comparisons_linear.tex"))
writeLines(appendix_compare_poisson,
file.path(tdir, "lease_parcel_comparisons_poisson.tex"))
|
3c98029fa69113bf66a40766005ff322985f67cc | d6b22b8479178ad1cda7b837432634c3195b9705 | /Lab 4/logistic_k_fold.r | 8cde8617d658aaa8cdc7f38e43719267c6b4a28d | [] | no_license | xyRen6617/Data-Management-Analytics | cd04bec1278ae936a70460aa3fd5fc646ced4783 | 4dacc6e82bb12784c4efe3a7332b9bd5a2dbcb56 | refs/heads/main | 2023-06-11T14:06:00.373780 | 2021-06-19T01:30:30 | 2021-06-19T01:30:30 | 368,035,954 | 0 | 0 | null | null | null | null | EUC-JP | R | false | false | 10,066 | r | logistic_k_fold.r | library(tidyverse )
library(MASS)
library(corrplot)
setwd("C:/Users/duli/Documents/20210520")
bike_data <- read.csv("bike_buyers_clean.csv")
t = model.matrix(~Commute.Distance-1,bike_data) %>% as.data.frame()
s = as.data.frame(model.matrix(~Commute.Distance-1,bike_data))
bike_data = cbind(bike_data,as.data.frame(model.matrix(~Commute.Distance-1,bike_data)))
bike_data = cbind(bike_data,as.data.frame(model.matrix(~Education-1,bike_data)))
bike_data = cbind(bike_data,as.data.frame(model.matrix(~Occupation-1,bike_data)))
bike_data = cbind(bike_data,as.data.frame(model.matrix(~Region-1,bike_data)))
bike_data <- bike_data[,-c(1)]
bike_data <- bike_data[,-c(5)]
bike_data <- bike_data[,-c(5)]
bike_data <- bike_data[,-c(7)]
bike_data <- bike_data[,-c(7)]
factors <- factor(bike_data$Marital.Status)
bike_data$Marital.Status <- as.numeric(factors)
factors <- factor(bike_data$Gender)
bike_data$Gender <- as.numeric(factors)
factors <- factor(bike_data$Home.Owner)
bike_data$Home.Owner <- as.numeric(factors)
numRows = nrow(bike_data) #there are 272 in this set
id = seq(1, numRows, by =1)
bikeShuffle = slice(bike_data, sample(1:n()))
bikeShuffle = mutate(bikeShuffle, id)
k = 5 #5-fold validation
errors = matrix( nrow = 4, ncol = 5)
errors[1,2] = 0
View(errors)
for(j in 1:4){
for(i in 1:5){
errors[j,i] = 0
} }
totalError = 0
for(i in 1:k){ #the k folds for each model
test = filter(bikeShuffle, id >= (i-1)*numRows/k+1 & id <=i*numRows/k)
train = anti_join(bikeShuffle, test, by="id")
model1 <- glm(as.factor(Purchased.Bike)~Marital.Status+ Children+ Income + Cars + `RegionNorth America` + `Commute.Distance0-1 Miles`+`Commute.Distance2-5 Miles` + RegionEurope + OccupationProfessional , data= train, family=binomial)
summary(model1)
prob<-predict(object =model1,newdata=test,type = "response")
pred<-ifelse(prob>=0.5,"yes","no")
pred<-factor(pred,levels = c("no","yes"),order=TRUE)
f<-table(test$Purchased.Bike,pred)
errors[1,i] = (f[1,1] + f[2,2])/nrow(test)
}
for(i in 1:k){ #the k folds for each model
test = filter(bikeShuffle, id >= (i-1)*numRows/k+1 & id <=i*numRows/k)
train = anti_join(bikeShuffle, test, by="id")
model2 <- glm(as.factor(Purchased.Bike)~Marital.Status + poly(Children,2)+ Income + Cars + `RegionNorth America` + `Commute.Distance0-1 Miles`+`Commute.Distance2-5 Miles` + RegionEurope + OccupationProfessional , data= train, family=binomial)
summary(model2)
prob<-predict(object =model2,newdata=test,type = "response")
pred<-ifelse(prob>=0.5,"yes","no")
pred<-factor(pred,levels = c("no","yes"),order=TRUE)
f<-table(test$Purchased.Bike,pred)
errors[2,i] = (f[1,1] + f[2,2])/nrow(test)
}
for(i in 1:k){ #the k folds for each model
test = filter(bikeShuffle, id >= (i-1)*numRows/k+1 & id <=i*numRows/k)
train = anti_join(bikeShuffle, test, by="id")
model3 <- glm(as.factor(Purchased.Bike)~Marital.Status+ Children+ poly(Income,2) + Cars + `RegionNorth America` + `Commute.Distance0-1 Miles`+`Commute.Distance2-5 Miles` + RegionEurope + OccupationProfessional , data= train, family=binomial)
summary(model3)
prob<-predict(object =model3,newdata=test,type = "response")
pred<-ifelse(prob>=0.5,"yes","no")
pred<-factor(pred,levels = c("no","yes"),order=TRUE)
f<-table(test$Purchased.Bike,pred)
errors[3,i] = (f[1,1] + f[2,2])/nrow(test)
}
for(i in 1:k){ #the k folds for each model
test = filter(bikeShuffle, id >= (i-1)*numRows/k+1 & id <=i*numRows/k)
train = anti_join(bikeShuffle, test, by="id")
model4 <- glm(as.factor(Purchased.Bike)~Marital.Status+ Children+ Income + poly(Cars,2) + `RegionNorth America` + `Commute.Distance0-1 Miles`+`Commute.Distance2-5 Miles` + RegionEurope + OccupationProfessional , data= train, family=binomial)
summary(model4)
prob<-predict(object =model4,newdata=test,type = "response")
pred<-ifelse(prob>=0.5,"yes","no")
pred<-factor(pred,levels = c("no","yes"),order=TRUE)
f<-table(test$Purchased.Bike,pred)
errors[4,i] = (f[1,1] + f[2,2])/nrow(test)
}
avgAcc = rep(0,4)
for(j in 1:4){
for(i in 1:5){
avgAcc[j] = avgAcc[j]+errors[j, i]
} }
errors = matrix( nrow = 7, ncol = 5)
errors[1,2] = 0
for(j in 1:7){
for(i in 1:5){
errors[j,i] = 0
} }
for(i in 1:k){ #the k folds for each model
test = filter(bikeShuffle, id >= (i-1)*numRows/k+1 & id <=i*numRows/k)
train = anti_join(bikeShuffle, test, by="id")
model5 <- glm(as.factor(Purchased.Bike)~Marital.Status+ Children+ Income + Cars + `RegionNorth America` + `Commute.Distance0-1 Miles`+`Commute.Distance2-5 Miles` + RegionEurope + OccupationProfessional , data= train, family=binomial)
summary(model5)
prob<-predict(object =model5,newdata=test,type = "response")
pred<-ifelse(prob>=0.5,"yes","no")
pred<-factor(pred,levels = c("no","yes"),order=TRUE)
f<-table(test$Purchased.Bike,pred)
errors[1,i] = (f[1,1] + f[2,2])/nrow(test)
}
for(i in 1:k){ #the k folds for each model
test = filter(bikeShuffle, id >= (i-1)*numRows/k+1 & id <=i*numRows/k)
train = anti_join(bikeShuffle, test, by="id")
model6 <- glm(as.factor(Purchased.Bike)~Marital.Status + Children+ Income + poly(Cars,2) + `RegionNorth America` + `Commute.Distance0-1 Miles`+`Commute.Distance2-5 Miles` + RegionEurope + OccupationProfessional , data= train, family=binomial)
summary(model6)
prob<-predict(object =model6,newdata=test,type = "response")
pred<-ifelse(prob>=0.5,"yes","no")
pred<-factor(pred,levels = c("no","yes"),order=TRUE)
f<-table(test$Purchased.Bike,pred)
errors[2,i] = (f[1,1] + f[2,2])/nrow(test)
}
for(i in 1:k){ #the k folds for each model
test = filter(bikeShuffle, id >= (i-1)*numRows/k+1 & id <=i*numRows/k)
train = anti_join(bikeShuffle, test, by="id")
model7 <- glm(as.factor(Purchased.Bike)~Marital.Status+ Children+ Income + poly(Cars,3) + `RegionNorth America` + `Commute.Distance0-1 Miles`+`Commute.Distance2-5 Miles` + RegionEurope + OccupationProfessional , data= train, family=binomial)
summary(model7)
prob<-predict(object =model7,newdata=test,type = "response")
pred<-ifelse(prob>=0.5,"yes","no")
pred<-factor(pred,levels = c("no","yes"),order=TRUE)
f<-table(test$Purchased.Bike,pred)
errors[3,i] = (f[1,1] + f[2,2])/nrow(test)
}
for(i in 1:k){ #the k folds for each model
test = filter(bikeShuffle, id >= (i-1)*numRows/k+1 & id <=i*numRows/k)
train = anti_join(bikeShuffle, test, by="id")
model8 <- glm(as.factor(Purchased.Bike)~Marital.Status+ Children+ Income + poly(Cars,4) + `RegionNorth America` + `Commute.Distance0-1 Miles`+`Commute.Distance2-5 Miles` + RegionEurope + OccupationProfessional , data= train, family=binomial)
summary(model8)
prob<-predict(object =model8,newdata=test,type = "response")
pred<-ifelse(prob>=0.5,"yes","no")
pred<-factor(pred,levels = c("no","yes"),order=TRUE)
f<-table(test$Purchased.Bike,pred)
errors[4,i] = (f[1,1] + f[2,2])/nrow(test)
}
for(i in 1:k){ #the k folds for each model
test = filter(bikeShuffle, id >= (i-1)*numRows/k+1 & id <=i*numRows/k)
train = anti_join(bikeShuffle, test, by="id")
model9 <- glm(as.factor(Purchased.Bike)~Marital.Status + Children+ poly(Income,2) + poly(Cars,2) + `RegionNorth America` + `Commute.Distance0-1 Miles`+`Commute.Distance2-5 Miles` + RegionEurope + OccupationProfessional , data= train, family=binomial)
summary(model9)
prob<-predict(object =model9,newdata=test,type = "response")
pred<-ifelse(prob>=0.5,"yes","no")
pred<-factor(pred,levels = c("no","yes"),order=TRUE)
f<-table(test$Purchased.Bike,pred)
errors[5,i] = (f[1,1] + f[2,2])/nrow(test)
}
for(i in 1:k){ #the k folds for each model
test = filter(bikeShuffle, id >= (i-1)*numRows/k+1 & id <=i*numRows/k)
train = anti_join(bikeShuffle, test, by="id")
model10 <- glm(as.factor(Purchased.Bike)~Marital.Status + Children+ poly(Income,2) + poly(Cars,3) + `RegionNorth America` + `Commute.Distance0-1 Miles`+`Commute.Distance2-5 Miles` + RegionEurope + OccupationProfessional , data= train, family=binomial)
summary(model10)
prob<-predict(object =model10,newdata=test,type = "response")
pred<-ifelse(prob>=0.5,"yes","no")
pred<-factor(pred,levels = c("no","yes"),order=TRUE)
f<-table(test$Purchased.Bike,pred)
errors[6,i] = (f[1,1] + f[2,2])/nrow(test)
}
for(i in 1:k){ #the k folds for each model
test = filter(bikeShuffle, id >= (i-1)*numRows/k+1 & id <=i*numRows/k)
train = anti_join(bikeShuffle, test, by="id")
model11 <- glm(as.factor(Purchased.Bike)~Marital.Status + Children+ poly(Income,2) + poly(Cars,4) + `RegionNorth America` + `Commute.Distance0-1 Miles`+`Commute.Distance2-5 Miles` + RegionEurope + OccupationProfessional , data= train, family=binomial)
summary(model11)
prob<-predict(object =model11,newdata=test,type = "response")
pred<-ifelse(prob>=0.5,"yes","no")
pred<-factor(pred,levels = c("no","yes"),order=TRUE)
f<-table(test$Purchased.Bike,pred)
errors[7,i] = (f[1,1] + f[2,2])/nrow(test)
}
avgAcc = rep(0,7)
for(j in 1:7){
for(i in 1:5){
avgAcc[j] = avgAcc[j]+errors[j, i]
} }
#now we confirmed that our calculated CV errors are very similar to those offered by glm
#(difference likely due to random points chosen) we can move on
#we did this so we could find Standard Error (SE) and create error bars
se = rep(0, 7)
for (i in 1:7){
se[i] = sqrt(var(errors[i,])/k)
}
se
#now making data frame for ease of plotting
x = seq(1,7, by = 1)
faithBest = data.frame(x,avgAcc/k , se)
p1 <-ggplot(data = faithBest, aes(x = x, y=avgAcc.k))+ geom_point()+
geom_line()+
geom_errorbar(aes(ymin = avgAcc.k-se, ymax = avgAcc.k +se))+labs(x="Model Complexity")
p1
p2 <- ggplot(data = faithBest, aes(x = x, y=avgAcc.k))+ geom_point()+
geom_line()+labs(x="Model Complexity")
p2
|
ac0b5910d03ae052daf7ae0c4d03474f5f041e01 | 1c295ce44342aa16846278b317a084d885fc85e5 | /drat.R | 251064c05f5eafca73a8a07f78cc2abbc56ddd1a | [] | no_license | skranz-repo/drat | a548ca265da5b3b788c1b7ef842ebbe23deb2aed | 7eb367045773c6f86b55073f1371530d8b2fcf59 | refs/heads/master | 2022-03-06T21:29:14.824777 | 2022-02-08T12:08:49 | 2022-02-08T12:08:49 | 133,791,967 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,106 | r | drat.R | #install.packages("drat")
library(drat)
library(restorepoint)
repodir = "C:/libraries/drat/drat"; download.dir = "C:/libraries/drat"
libdir = "C:/libraries"
example.drat = function() {
library(devtools)
assignInNamespace("version_info", c(devtools:::version_info, list("3.5" = list(version_min = "3.3.0", version_max = "99.99.99", path = "bin"))), "devtools")
find_rtools()
#github.to.drat("felsti/RTutorECars",download.dir, repodir, skip.download = FALSE)
insert.drat("stringtools", repodir,libdir)
insert.drat("codeUtils", repodir, libdir)
insert.drat("dbmisc", repodir, libdir)
insert.drat("glueformula", repodir, libdir)
#install.packages("RelationalContractsCpp",repos = c("https://skranz-repo.github.io/drat/",getOption("repos")))
#insert.drat("RelationalContracts", repodir,libdir, R.versions = "3.6",add.source = FALSE)
#insert.drat("RelationalContractsCpp", repodir,libdir, pkg.dir="C:/libraries/RelationalContracts/RelationalContractsCpp",R.versions = "3.6", add.source = FALSE)
insert.drat("RelationalContracts", repodir,libdir)
insert.drat("RelationalContractsCpp", repodir,libdir, pkg.dir="C:/libraries/RelationalContracts/RelationalContractsCpp")
insert.drat("gtree", repodir,libdir)
insert.drat("gtreeWebPlay", repodir,libdir, pkg.dir="C:/libraries/gtree/gtreeWebPlay")
insert.drat("rmdtools", repodir,libdir)
insert.drat("shinyEvents", repodir,libdir)
insert.drat("dplyrExtras", repodir,libdir)
insert.drat("mlogitExtras", repodir,libdir, pkg.dir="C:/libraries/mlogit/mlogitExtras")
insert.drat("regtools", repodir,libdir)
insert.drat("lfe",repodir, libdir)
drat::insertPackage("C:/libraries/drat/lfe_2.8-5.1.tgz", repodir)
drat::insertPackage("C:/libraries/drat/lfe_2.8-5.1.zip", repodir)
drat::insertPackage("C:/libraries/drat/lfe_2.8-5.1.tar.gz", repodir)
insert.drat("RTutorSAGI", repodir,libdir, pkg.dir="C:/libraries/RTutor/RTutorSAGI")
insert.drat("RTutor", repodir,libdir, add.source = TRUE,add.binary = TRUE)
drat::insertPackage("C:/libraries/drat/RTutor_2020.6.08.tar.gz", repodir)
#
#insert.drat("RTutorIncentiveContracts", repodir, pkg.dir="C:/libraries/RTutor/examples/IncentiveContracts/RTutorIncentiveContracts", add.binary=FALSE)
#drat::insertPackage("C:/libraries/drat/RTutorIncentiveContracts_0.1.tar.gz", repodir)
insert.drat("repgame", repodir, libdir)
insert.drat("rowmins", repodir, libdir)
insert.drat("shinyEventsUI", repodir, libdir)
insert.drat("ddsim", repodir, libdir)
insert.drat("symbeqs", repodir, libdir)
insert.drat("bbsvg", repodir, libdir)
insert.drat("rgmpl", repodir, libdir)
insert.drat("rampl", repodir, libdir)
insert.drat("sktools", repodir, libdir)
insert.drat("skUtils", repodir, libdir)
insert.drat("dyngame", repodir, libdir)
insert.drat("RSGSolve", repodir, libdir)
insert.drat("RMaxima", repodir, libdir)
#insert.drat("LyxMaxima", repodir, libdir)
options(repos = unique(c("https://skranz-repo.github.io/drat/",getOption("repos"))))
install.packages("rowmins")
install.packages("skUtils")
install.packages("restorepoint")
install.packages("restorepoint")
install.packages("repgame", repos = c("https://skranz-repo.github.io/drat/",getOption("repos")))
install.packages("dyngame")
install.packages("RTutor")
}
insert.drat = function(pkg,repodir=getwd(),libdir, pkg.dir=file.path(libdir, pkg, pkg), add.binary=TRUE, add.source=TRUE, R.versions=c("4.0","3.6")) {
library(drat)
restore.point("insert.drat")
if (add.source) {
src = devtools::build(pkg.dir)
drat::insertPackage(src, repodir)
cat("\nSource package inserted.")
}
if (add.binary) {
#bin = devtools::build(pkg.dir, binary = TRUE, args = c('--preclean'))
#drat::insertPackage(bin, repodir)
writeLines(pkg.dir, "C:/libraries/drat/pkgdir.txt")
if ("4.0" %in% R.versions) {
cat("\nR 4.0.")
system("C:/Programs/R/R-4.0.2/bin/Rscript C:/libraries/drat/build_script.R")
}
if ("3.6" %in% R.versions) {
cat("\nR 3.6.3")
system("C:/Programs/R-3.6.3/bin/Rscript C:/libraries/drat/build_script.R")
}
}
}
github.to.drat = function(rep,download.dir = getwd(),repodir=getwd(),libdir, pkg.dir=file.path(libdir, pkg, pkg), add.binary=FALSE, add.source=TRUE, skip.download=FALSE) {
library(stringtools)
pkg = str.right.of(rep, "/")
pkg.dir = download.github.package(rep, download.dir, skip.download=skip.download)
insert.drat(pkg,repodir=repodir, pkg.dir=pkg.dir, add.binary = add.binary, add.source=add.source)
}
download.github.package = function(rep, download.dir = getwd(), skip.download=FALSE) {
library(stringtools)
pkg = str.right.of(rep, "/")
if (!skip.download) {
zip.url = paste0("https://github.com/", rep, "/archive/master.zip")
zip.file = file.path(download.dir,paste0(pkg,".zip"))
download.file(zip.url,destfile = zip.file)
unzip(zip.file, exdir=download.dir)
} else {
cat("\nskip github download of ", rep)
}
pkg.dir = paste0(download.dir, "/", pkg,"-Master")
return(pkg.dir)
}
|
06521293ea72ce81ecd4b9d35596394b76a171ea | aba37c9cb48a203474244f78d864c182aff4dabc | /DOE/doe_7week.R | af84a7c84b8c293a6265bd3db60efb528dbef5bc | [] | no_license | GyuYoungCho/R | 1da159146e90b9db6ac63c82a78612406450c434 | ffb52115cacb1e376577052c53bc8b04bffa6c07 | refs/heads/master | 2020-12-27T10:55:11.575868 | 2020-09-20T12:45:28 | 2020-09-20T12:45:28 | 237,871,773 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,676 | r | doe_7week.R | rm(list=ls())
carb = factor(c(10,12,14))
psi = factor(c(25,30))
speed = factor(c(200,250))
times=factor(1:2)
dat_5_3 = expand.grid(times=times,speed=speed,psi=psi,carb = carb)
dat_5_3$values = c(-3,-1,-1,0,-1,0,1,1
,0,1,2,1,2,3,6,5,
5,4,7,6,7,9,10,11)
fit_5_3 = aov(values~speed*psi*carb,data=dat_5_3)
summary(fit_5_3)
dat_carb = tapply(dat_5_3$values, dat_5_3$carb,mean)
plot(dat_carb, type="b",col="blue",xaxt = "n",
main = "Main effects for Percent carbonation (A)", xlab="Percent carbonation (A)", ylab = "Average fill deviation", xlim = c(0,4),ylim = c(-2,8))
axis(1,at=c(1,2,3),labels=c(10,12,14), lwd.ticks = 2)
x11()
par(mfrow=c(2,2))
for( i in 4:1){
if(i!=1){
fct_name = colnames(dat_5_3)[i]
x_num = as.numeric(levels(dat_5_3[,i]))
n = length(x_num)
tmp_dat = tapply(dat_5_3$values, dat_5_3[,i],mean)
plot(tmp_dat, type="b",col=i,xaxt = "n",
main = paste("Main effects for",fct_name), xlab=fct_name, ylab = "Average fill deviation", xlim = c(0,n+1),ylim = c(-2,8))
axis(1,at=seq(from=1,by=1,length.out = n),labels=x_num, lwd.ticks = 2)
}
else{
tmp_dat = tapply(dat_5_3$values, list(carb = dat_5_3$carb,psi = dat_5_3$psi),mean)
interaction.plot(dat_5_3$carb,dat_5_3$psi, response = dat_5_3$values, ylim= c(-2,10),xaxt="n",
xlab = "Carbonation-pressure interaction",
ylab = "Response",main="Carbonation-pressure interaction",
legend = F, col=c("blue","red"))
axis(1,at=c(1,2,3),labels=c("10","12","14"),lwd.ticks = 2)
legend("topleft",c("25 psi", "30 psi"),lty=2:1, col=c("blue","red"))
clist = c("blue","blue","blue","red","red","red")
fct_list = rep(1:3,2)
for(i in 1:6){
points(fct_list[i],tmp_dat[i],cex=1.3,pch=19,col=clist[i])
}
}
}
Mtype = factor(1:3)
temp = factor(c(15,70,125))
times = factor(1:4)
dat_5_4 = expand.grid(times = times, temp = temp, Mtype = Mtype)
dat_5_4$values =c(130,155,74,180, 34,40,80,75, 20,70,82,58,
150,188,159,126, 136,122,106,115, 25,70,58,45,
138,110,168,160, 174,120,150,139, 96,104,82,60)
fit_origin = aov(values~Mtype*temp, data = dat_5_4)
summary(fit_origin)
str(dat_5_4)
dat_5_4$temp = as.numeric(as.character(dat_5_4$temp))
str(dat_5_4)
fit_5_4 = aov(values~Mtype*temp + Mtype*I(temp^2), data = dat_5_4)
summary(fit_5_4)
fit_mtype = list()
for(i in 1:3){
fit_mtype[[i]] = lm(values~temp + I(temp^2),data=dat_5_4, subset=Mtype==i)
print(summary(fit_mtype[[i]]))
}
par(mfrow=c(1,1))
plot(dat_5_4$temp, dat_5_4$values, pch=16, ylim = c(min(dat_5_4$values),max(dat_5_4$values)),
xaxt="n", yaxt="n",ylab = "Life", xlab = "Temperature")
axis(1,seq(15,125,by=27.5))
axis(2,seq(20,188,by=42))
for(i in 1:3){
lines(seq(15,125),predict(fit_mtype[[i]], data.frame(temp = seq(15,125))))
}
text(locator(1),"Material type 1")
text(locator(1),"Material type 2")
text(locator(1),"Material type 3")
noise = factor(c("Low","Medium","High"))
f_type = factor(1:2)
blk = factor(1:4)
dat_5_6 = expand.grid(f_type = f_type, blk= blk, noise = noise)
dat_5_6$value = c(90,86,96,84,100,92,92,81,
102,87,106,90,105,97,96,80,
114,93,112,91,108,95,98,83)
library(lme4)
fit_blk = lmer(value~ noise*f_type+(1|blk),data = dat_5_6)
summary(fit_blk)
anova(fit_blk)
fit_blk2 = aov(value~ noise*f_type+blk,data = dat_5_6)
summary(fit_blk2)
par(mfrow=c(1,1))
plot(dat_5_6$value~fitted.values(fit_blk),
ylim=c(75,115),xlim=c(75,115))
abline(a=0,b=1)
|
593d57c7b9ce0cf6a4815ca099761cb787ced814 | b326020d7eb7db18ed883cbd8d6e4417153f0726 | /code/glms/dev/downsampling_events.R | ab8f353125a063a5e95baf9ddd9a463dabc73abd | [
"MIT"
] | permissive | mcfreund/stroop-rsa | 62ccff55ddbd3ed69526683e87485c0c4ad75f23 | 1b20349885efe5678fd1448d48449f296b258065 | refs/heads/master | 2021-11-01T10:14:56.985952 | 2021-10-19T03:38:33 | 2021-10-19T03:38:33 | 229,976,419 | 5 | 1 | MIT | 2021-08-31T05:09:51 | 2019-12-24T16:58:14 | Roff | UTF-8 | R | false | false | 3,072 | r | downsampling_events.R | stop("don't source me")
## setup ----
library(here)
# library(mikeutils)
library(magrittr)
library(dplyr)
library(data.table)
library(purrr)
source(here("code", "strings.R"))
stroop <- fread(here("in", "behavior-and-events_group201902.csv"))
sample.analysis <- unique(filter(stroop, is.analysis.group)$subj)
dir.to.write.in <- here("glms")
subjs <- list.dirs(dir.to.write.in, recursive = FALSE, full.names = FALSE)
subjs <- subjs[subjs != "results"]
dirs.input <- file.path(dir.to.write.in, subjs, "input", "pro")
dirs.input <- dirs.input[dir.exists(dirs.input)]
bias.items.run1 <- c("blueBLUE", "bluePURPLE", "blueRED", "purplePURPLE", "purpleRED", "purpleWHITE", "redWHITE", "whiteBLUE")
bias.items.run2 <- c("blueWHITE", "purpleBLUE", "redBLUE", "redPURPLE", "redRED", "whitePURPLE", "whiteRED", "whiteWHITE")
for (dir.i in seq_along(dirs.input)) {
# dir.i = 1
name.dir.i <- dirs.input[dir.i]
fnames.i <- list.files(name.dir.i)
fnames.i
items.congruent <- c("redRED", "blueBLUE", "whiteWHITE", "purplePURPLE")
items.congruent1 <- c("blueBLUE", "purplePURPLE")
# items.congruent2 <- c("blueBLUE", "purplePURPLE")
fnames.i <- fnames.i[grep(paste(items.congruent, collapse = "|"), fnames.i)]
fnames.i <- fnames.i[grep("acc-only\\.txt$", fnames.i)] ## discard run-wise times
if (length(fnames.i) != 4) stop("bad length")
modeled.out <- vector("list", length = 2)
for (stimtime.i in seq_along(fnames.i)) {
# stimtime.i <- 1
name.stimtime.i <- fnames.i[stimtime.i]
fname.i <- file.path(name.dir.i, name.stimtime.i)
# run1 <- readLines(fname.i, 1)
# run2 <- readLines(fname.i, 2)
stimtimes <- readChar(fname.i, file.info(fname.i)$size)
stimtimes <- strsplit(stimtimes, split = "\n")[[1]]
stimtimes1 <- as.numeric(strsplit(stimtimes[1], " ")[[1]])
stimtimes2 <- as.numeric(strsplit(stimtimes[2], " ")[[1]])
is.run1.item <- grepl(paste0(items.congruent1, collapse = "|"), name.stimtime.i)
if (is.run1.item) {
keep <- sample.int(length(stimtimes1), 3)
stimtimes.keep <- stimtimes1[keep]
stimtimes.burn1 <- stimtimes1[-keep]
stimtimes.burn2 <- stimtimes2
onsets.keep <- paste0(onsets4afni(sort(stimtimes.keep)), "*\n") ## star in second row (run)
} else {
keep <- sample.int(length(stimtimes2), 3)
stimtimes.keep <- stimtimes2[keep]
stimtimes.burn2 <- stimtimes2[-keep]
stimtimes.burn1 <- stimtimes1
onsets.keep <- paste0("*\n", onsets4afni(sort(stimtimes.keep))) ## star in first row (run)
}
modeled.out[[1]] <- c(stimtimes.burn1, modeled.out[[1]]) ## run 1
modeled.out[[2]] <- c(stimtimes.burn2, modeled.out[[2]]) ## run 2
onsets2file(onsets.keep, paste0(gsub(".txt", "", fname.i), "_downsamp"))
}
burn <- c(
onsets4afni(sort(modeled.out[[1]])),
onsets4afni(sort(modeled.out[[2]]))
)
onsets2file(burn, gsub("(.*_bas_).*$", "\\1congr_modelout_acc-only", fname.i))
}
|
283d083369ff3cffc4f283495d6e679994fafa0a | a479c4f44a1fa2d7e29651d8e022edb38509e4db | /Thesis_R_Codes/scripts/20190606_graveyard_scripts/old_Gong & van Leeuwen 2004.R | 2a213b50ee26161b6eac7428e050952ddb80c3de | [] | no_license | psyguy/Thesis-Codes | aa5dbe04d80c860955d7d2d2e0312e2911ba3eb5 | 8c25cbc8e30413ab9b6679bbee91ef7e21d11157 | refs/heads/master | 2020-06-03T13:43:34.714715 | 2020-01-15T11:23:38 | 2020-01-15T11:23:38 | 191,588,238 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 771 | r | old_Gong & van Leeuwen 2004.R | library(dplyr)
seed <- 1
L_c <- 5200
N <- 300
T_ <- 6000
tol <- 0.001
set.seed(seed)
conn <- make.random.graph(size = N, num.links = L_c, seed = seed)
x.init <- N %>% runif(-1,1)
x.out <- x.init
# for (i in 1:T_) {
# x.temp <- x.out %>% GongvLeeuwen2004.logistic(conn)
# x.out <- x.out %>% cbind(x.temp)
# }
save(x.out, file = "x.out_5200.300.6000.Rdata")
distances <- GongvLeeuwen2004.coherenceD(x.out,connectivity.matrix = conn)
i_ <- sample.int(N,1)
d_ <- distances[,i_]
d_ <- d_[i_]
j_1 <- which.min(d_)
j_2 <- which.max(d_)
if(!conn[i_,j_1]) conn <- conn %>% swap.edge(i1 = i_, j1 = j_1, i2 = i_, j2 = j_2)
g <- graph_from_adjacency_matrix(conn, mode = "undirected")
ClCoef <- transitivity(g)
transitivity
|
2887e357268a1a62dc694bfccf1ff23eb500800e | 1fce1ecc49eb43771111c80eb654f4c4d9a89b6d | /simulation_functions/censorFunc.R | 53626bfd684783c538b07229ac1d013fb477bf69 | [] | no_license | hplisiecki/bias_detection_showdown | 1a13b31f67350cfb2970c55895e2128974482471 | 0ab49c3e07d3f6fb4e59afed4ffc01c14c5d1e16 | refs/heads/main | 2023-05-06T02:19:09.055665 | 2021-05-24T17:38:59 | 2021-05-24T17:38:59 | 362,840,035 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,057 | r | censorFunc.R |
# A file used to generate data fgor the Carter and colleagues simulation study (2019)
# Source: https://github.com/nicebread/meta-showdown
# helper functions for censor function
# clamp x to values between 0 and 1
clamp <- function(x) {if (x > 1) x=1; if (x < 0) x = 0; return(x)}
#' @param p observed p value
#' @param p_range Range of observed p-values where the easing takes place
#' @param from_prob Probability of publication (starting position)
#' @param to_prob Probability of publication (end position)
easeOutExpo <- function (p, p_range, from_prob, to_prob) {
p_start <- p_range[1]
p_range_length <- p_range[2] - p_range[1]
(to_prob-from_prob) * (-2^(-10 * (p-p_range[1])/p_range_length) + 1) + from_prob;
}
easeInExpo <- function (p, p_range, from_prob, to_prob) {
p_start <- p_range[1]
p_range_length <- p_range[2] - p_range[1]
(to_prob-from_prob) * 2^(10 * (((p-p_range[1])/p_range_length) - 1)) + from_prob;
}
#' @param pObs two-tailed p-value
#' @param posSign_NS_baseRate What's the probability that a p > .10 in the right direction enters the literature?
#' @param negSign_NS_baseRate What's the probability that a p > .01 in the wrong direction enters the literature? (Left anchor at p = .01)
#' @param counterSig_rate What's the probability that a p < .001 in the wrong direction enters the literature?
#' @param direction +1: Expected direction, -1: wrong direction
censor <- function(pObs, direction, posSign_NS_baseRate = 0.3, negSign_NS_baseRate = 0.05, counterSig_rate = 0.50){
# ---------------------------------------------------------------------
# Correct direction of effect
if (direction > 0 & pObs < .05){ #right direction, sig
pubProb = 1
} else if(direction > 0 & pObs >= .05 & pObs < .1){ #right direction, trending
pubProb = easeOutExpo(p=pObs, p_range=c(.05, .1), from_prob=1, to_prob=posSign_NS_baseRate)
} else if (direction > 0 & pObs >= .1){ # right direction; non-significant (p > .1)
pubProb =posSign_NS_baseRate
# ---------------------------------------------------------------------
# Wrong direction of effect
} else if(direction <= 0 & pObs < .001){ # wrong direction, highly sig.
pubProb= counterSig_rate
} else if(direction <= 0 & pObs >= .001 & pObs < .01){ # wrong direction, standard sig.
pubProb= easeOutExpo(p=pObs, p_range=c(.001, .01), from_prob=counterSig_rate, to_prob=negSign_NS_baseRate)
} else if(direction <= 0 & pObs >= .01){ # wrong direction, non-sig.
pubProb=negSign_NS_baseRate
}
return(pubProb)
}
# in this (equivalent) variant of the function, you can provide a one-tailed p-value
# --> then it's not necessary to provide the direction of the effect
censor.1t.0 <- function(pObs, posSign_NS_baseRate = 0.3, negSign_NS_baseRate = 0.05, counterSig_rate = 0.50){
# ---------------------------------------------------------------------
# Correct direction of effect
if (pObs < .05/2) { #right direction, sig
pubProb = 1
} else if (pObs >= .05/2 & pObs < .1/2) { #right direction, trending
pubProb = easeOutExpo(p=pObs, p_range=c(.05/2, .1/2), from_prob=1, to_prob=posSign_NS_baseRate)
} else if (pObs >= .1/2 & pObs < .5) { # right direction; non-significant (p > .1)
pubProb = posSign_NS_baseRate
# ---------------------------------------------------------------------
# Wrong direction of effect
} else if (pObs >= .5 & pObs < 1-(.01/2)){ # wrong direction, non-sig. at 1%
pubProb = negSign_NS_baseRate
} else if (pObs >= 1-(.01/2) & pObs < 1-(.001/2)){ # wrong direction, two-sided p between .01 and .001
pubProb = easeInExpo(p=pObs, p_range=c(1-(.01/2), 1-(.001/2)), from_prob=negSign_NS_baseRate, to_prob=counterSig_rate)
} else if (pObs >= 1-(.001/2)){ # wrong direction, highly sig.
pubProb = counterSig_rate
}
return(pubProb)
}
censor.1t <- Vectorize(censor.1t.0)
# helper: convert 1-tailed p-value to 2-tailed
p.1.to.2 <- function(p.1tailed) {
1-abs(0.5-p.1tailed)*2
}
# helper: get direction of a 1-tailed p-value
getDir <- function(p.1tailed) {
ifelse(p.1tailed < .5, 1, -1)
}
# Sanity check: do both censor functions return the same value?
# curve(censor.1t(pObs=x, posSign_NS_baseRate = 0.20, negSign_NS_baseRate = 0.05, counterSig_rate = 0.50), from=0, to=1, n=10000)
#
# for (p.1t in seq(0, 1, length.out=1000)) {
# points(x=p.1t, y=censor(pObs=p.1.to.2(p.1t), direction=getDir(p.1t)), col="red", pch=21, cex=.3)
# }
# some predefined settings: medium publication bias
censorMedium0 <- function(pObs, direction) {
censor(pObs, direction, posSign_NS_baseRate = 0.20, negSign_NS_baseRate = 0.05, counterSig_rate = 0.50)
}
censorMedium <- Vectorize(censorMedium0)
# some predefined settings: strong publication bias
censorHigh0 <- function(pObs, direction) {
censor(pObs, direction, posSign_NS_baseRate = 0.05, negSign_NS_baseRate = 0.00, counterSig_rate = 0.20)
}
censorHigh <- Vectorize(censorHigh0)
|
eb334ab8e58b80fcfcdff783597c89d005d28042 | 8769749bb0a919299b66db7aaafa400d1d412469 | /hervh/inter_chrom.hervh.build_interactions.r | 0fbb88d26e1428c302f965ca6ad2103a4b75d8f8 | [] | no_license | bioinfx/cvdc_scripts | e9e113fae866d2d3f0c2515fae1b410b2d7a3eeb | d33757f9d02fa6d503b5cb65336c0e4e410caa78 | refs/heads/master | 2022-03-19T00:15:13.372417 | 2019-12-05T04:38:48 | 2019-12-05T04:38:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 742 | r | inter_chrom.hervh.build_interactions.r | a=read.delim("D00.rna_seq.ranked_by_rpkm.v2.bed",header=F)
a = a[grep("chr",a$V4),]
# leave only HERVH expression RPKM >1.
a2 = a[which(a$V7 >=1),]
# sort the peak location wrt to chromosome and location
a2$loc = floor(a2$V2/1e4) * 1e4
a3 = a2[,c(1,8)]
a3$V1 = factor(a3$V1, levels=paste0("chr",c(1:22,"X")))
a3 = a3[order(a3$V1,a3$loc),]
out_dict = list()
for (i in 1:nrow(a3)){
print(i)
for (j in (i+1):nrow(a3)) {
print(j)
out_dict[[length(out_dict)+1]] = cbind(a3[i,],a3[j,])
}
}
out = do.call(rbind, out_dict)
colnames(out) = c("chr1","x1","chr2","y1")
out$x2 = out$x1+1e4
out$y2 = out$y1+1e4
out = out[,c(1,2,5,3,4,6)]
write.table(out,"inter_chrom_int/hervh.pairs.rpkm_gt1.txt",row.names=F,sep="\t",quote=F)
|
79afc4957c43c989e65ba9bc6c0f827c940b4afa | 6f4fb74cebb8c9339f8eee35f574195c0952fd44 | /R/reportEDGETransport2.R | 94e0210d7b7941e529cac3651b23ea845309c63e | [] | no_license | pik-piam/edgeTrpLib | 9023a3023be3db3542a093f8bd71f377ef34bd5f | 202697ac490b6921a1d3ab4eabc1a4b7cf62085b | refs/heads/master | 2023-01-24T18:15:29.217386 | 2022-06-01T13:05:26 | 2022-06-01T13:05:26 | 252,760,526 | 0 | 8 | null | 2023-01-03T01:07:24 | 2020-04-03T14:46:09 | R | UTF-8 | R | false | false | 42,896 | r | reportEDGETransport2.R | #' Reporting for the coupled EDGE-T Transport Sector Model (REMIND Module edge_esm)
#'
#' Data is loaded from the EDGE-T subfolder in the output folder.
#' The input files can be (re-) generated calling
#' `Rscript EDGETransport.R --reporting`
#' from the output folder.
#'
#' *Warning* The function modifies the "REMIND_generic_<scenario>.mif" file by appending the
#' additional reporting variables and replaces the "_withoutPlus" version.
#'
#' Region subsets are obtained from fulldata.gdx
#'
#' @param output_folder path to the output folder, default is current folder.
#' @param sub_folder subfolder with EDGE-T output files (level_2 for standalone, EDGE-T for coupled runs)
#' @param loadmif shall we try to load a REMIND MIF file from the output folder to append the variables?
#' @param extendedReporting report a larger set of variables
#' @param scenario_title a scenario title string
#' @param model_name a model name string
#' @param gdx path to the GDX file used for the run.
#' @author Johanna Hoppe Alois Dirnaichner Marianna Rottoli
#'
#' @importFrom rmndt approx_dt readMIF writeMIF
#' @importFrom gdxdt readgdx
#' @importFrom data.table fread fwrite rbindlist copy CJ
#' @importFrom remind2 toolRegionSubsets
#' @importFrom quitte as.quitte
#' @export
reportEDGETransport2 <- function(output_folder = ".", sub_folder = "EDGE-T/",
loadmif = TRUE , extendedReporting = FALSE,
scenario_title = NULL, model_name = "EDGE-Transport",
gdx = NULL) {
## NULL Definitons for codeCheck compliance
RegionCode <- CountryCode <- `.` <- sector <- subsector_L3 <- region <- year <- NULL
subsector_L2 <- subsector_L1 <- aggr_mode <- vehicle_type <- det_veh <- aggr_nonmot <- NULL
demand_F <- demand_EJ <- remind_rep <- V25 <- aggr_veh <- technology <- NULL
ttot <- se_share <- fe_demand <- variable <- value <- demand_VKM <- loadFactor <- NULL
all_enty <- ef <- variable_agg <- model <- scenario <- period <- NULL
Region <- Variable <- co2 <- co2val <- elh2 <- fe <- NULL
int <- se <- sec <- sharesec <- te <- tech <- val <- share <- NULL
eff <- sharebio <- sharesyn <- totseliq <- type <- ven <- NULL
unit <- tot_VOT_price <- tot_price <- logit_type <- capture.output <- weight <- NULL
#pkm or tkm is called km in the reporting. Vehicle km are called vkm
yrs <- c(seq(2005, 2060, 5), seq(2070, 2100, 10))
datapath <- function(fname){
file.path(output_folder, sub_folder, fname)}
reporting <- function(datatable, mode){
aggr_mode_tech <- aggr_LDV <- aggr_LDV_tech <- det_veh_tech <- aggr_bunkers <- aggr_bunkers_tech <- aggr_veh_tech <- capture.output <- NULL
report <- list()
datatable[, sector := ifelse(sector %in% c("trn_pass", "trn_aviation_intl"), "Pass", "Freight")]
datatable <- merge(datatable,Aggrdata,by = c("sector", "subsector_L1", "subsector_L2", "subsector_L3", "vehicle_type", "technology"), all.x = TRUE, allow.cartesian = TRUE)
#How to account for Hybrid Electric in Final Energy?
if (mode == "FE") {
techmap <- data.table(
technology = c("BEV","Electric", "Hybrid Electric", "FCEV", "Hydrogen", "Liquids", "NG"),
remind_rep = c("Electricity", "Electricity", "Liquids", "Hydrogen", "Hydrogen", "Liquids", "Gases"))
} else {
techmap <- data.table(
technology = c("BEV", "Electric", "Hybrid Electric", "FCEV", "Hydrogen", "Liquids","NG"),
remind_rep = c("BEV", "Electric", "Hybrid Electric", "FCEV", "Hydrogen", "Liquids", "Gases"))
}
datatable <- merge(datatable,techmap,by = c("technology"), all.x = TRUE)
datatable[!is.na(aggr_mode) & !is.na(remind_rep), aggr_mode_tech := paste0(aggr_mode, "|", remind_rep)]
datatable[!is.na(aggr_veh) & !is.na(remind_rep), aggr_veh_tech := paste0(aggr_veh, "|", remind_rep)]
datatable[!is.na(aggr_LDV) & !is.na(remind_rep), aggr_LDV_tech := paste0(aggr_LDV, "|", remind_rep)]
datatable[!is.na(det_veh) & !is.na(remind_rep), det_veh_tech := paste0(det_veh, "|", remind_rep)]
datatable[!is.na(aggr_bunkers) & !is.na(remind_rep), aggr_bunkers_tech := paste0(aggr_bunkers, "|", remind_rep)]
unit <- switch(mode,
"FE" = "EJ/yr",
"ES" = "bn km/yr",
"VKM" = "bn vkm/yr")
prefix <- switch(mode,
"FE" = "FE|Transport|",
"ES" = "ES|Transport|",
"VKM" = "ES|Transport|VKM|")
var <- c("Pass","Freight")
Aggr <- c("aggr_mode", "aggr_veh", "aggr_LDV", "det_veh", "nonmot", "aggr_nonmot", "aggr_bunkers", "aggr_mode_tech", "aggr_veh_tech", "aggr_LDV_tech", "det_veh_tech","aggr_bunkers_tech")
for (var0 in var) {
for (Aggr0 in Aggr) {
#Aggregate data
datatable0 <- copy(datatable)
datatable0 <- datatable0[!is.na(get(Aggr0))]
datatable0 <- datatable0[sector == var0, .(value = sum(value, na.rm = T)),
by = c("region", "year", Aggr0)]
if(nrow(datatable0) > 0) {
setnames(datatable0, "year", "period")
datatable0 <- datatable0[, model := model_name][, scenario := scenario_title][, variable := paste0(prefix, get(Aggr0))][, unit := unit][, eval(Aggr0) := NULL]
datatable0 <- approx_dt(datatable0, yrs, xcol = "period", ycol = "value",
idxcols = c("scenario","variable","unit","model","region"),
extrapolate = T)
report <- rbind(report, datatable0)}
}
}
return(report)
}
## Demand emissions
reportingEmi <- function(repFE, gdx){
## load emission factors for fossil fuels
p_ef_dem <- readgdx(gdx, "p_ef_dem")[all_enty %in% c("fepet", "fedie", "fegas")] ## MtCO2/EJ
p_ef_dem[all_enty == "fegas", all_enty := "fegat"]
setnames(p_ef_dem, old = c("value", "all_regi"), new = c("ef", "region"))
## attribute explicitly fuel used to the FE values
emidem = repFE[grepl("Liquids|Gases|Hydrogen|Electricity", variable) & region != "World"] ## EJ
emidem[, all_enty := ifelse(grepl("Liquids", variable), "fedie", NA)]
emidem[, all_enty := ifelse(grepl("LDV.+Liquids", variable), "fepet", all_enty)]
emidem[, all_enty := ifelse(grepl("Gases", variable), "fegat", all_enty)]
emidem[, all_enty := ifelse(grepl("Electricity", variable), "feelt", all_enty)]
emidem[, all_enty := ifelse(grepl("Hydrogen", variable), "feh2t", all_enty)]
## merge with emission factors
emidem = emidem[p_ef_dem, on = c("all_enty","region")]
## calculate emissions and attribute variable and unit names
emidem[, value := value*ef][, c("variable", "unit") := list(gsub("FE", "Emi\\|CO2", variable), "Mt CO2/yr")]
emi = rbind(copy(emidem)[, c("type", "variable") := list("tailpipe", paste0(variable, "|Tailpipe"))],
copy(emidem)[, c("type", "variable") := list("demand", paste0(variable, "|Demand"))])
prodFe <- readgdx(gdx, "vm_prodFE")[, ttot := as.numeric(ttot)]
setnames(prodFe,
c("period", "region", "se", "all_enty", "te", "fe_demand"))
prodFe[, se_share := fe_demand/sum(fe_demand), by = c("period", "region", "all_enty")]
prodFe <- prodFe[all_enty %in% c("fedie", "fepet", "fegat") & se %in% c("segafos", "seliqfos")][, c("se", "te", "fe_demand") := NULL]
emi <- prodFe[emi, on = c("period", "region", "all_enty")]
## in case no fossil fuels are used (e.g. 100% biodiesel), the value in se_share results NA. set the NA value to 0
emi[is.na(se_share), se_share := 0]
emi <- emi[all_enty %in% c("fedie", "fepet", "fegat") & type == "demand", value := value*se_share]
emi[, c("se_share", "type", "ef", "all_enty") := NULL]
## aggregate removing the fuel dependency
emi[, variable_agg := gsub("\\|Liquids|\\|Electricity|\\|Hydrogen|\\|Gases", "", variable)]
emi = emi[, .(value = sum(value)), by = c("model", "scenario", "region", "unit", "period", "variable_agg")]
setnames(emi, old = "variable_agg", new = "variable")
emi = emi[, .(model, scenario, region, variable, unit, period, value)]
return(emi)
}
reportingVehNum <- function(demand_vkm, annual_mileage){
venum <- copy(demand_vkm)
## merge annual mileage
anmil <- copy(annual_mileage)
anmil[grepl("Subcompact", vehicle_type),
variable := "Pass|Road|LDV|Small"]
anmil[grepl("Mini", vehicle_type),
variable := "Pass|Road|LDV|Mini"]
anmil[vehicle_type == "Compact Car", variable := "Pass|Road|LDV|Medium"]
anmil[grepl("Large Car|Midsize Car", vehicle_type), variable := "Pass|Road|LDV|Large"]
anmil[grepl("SUV", vehicle_type),
variable := "Pass|Road|LDV|SUV"]
anmil[grepl("Van|Multipurpose", vehicle_type),
variable := "Pass|Road|LDV|Van"]
anmil[grepl("Motorcycle|Scooter|Moped", vehicle_type),
variable := "Pass|Road|LDV|Two-Wheelers"]
anmil[grepl("^Truck", vehicle_type),
variable := sprintf("Freight|Road|%s", vehicle_type)]
anmil[grepl("Bus", vehicle_type),
variable := "Pass|Road|Bus"]
anmil <- anmil[,.(region, period = year, variable, annual_mileage)]
anmil <- approx_dt(anmil, unique(demand_vkm$period), xcol = "period", ycol = "annual_mileage", idxcols = c("region", "variable"), extrapolate = T)
anmil<- unique(anmil[, c("period", "region", "variable", "annual_mileage")])
anmil <- anmil[, variable := paste0("ES|Transport|VKM|", variable)]
venum <- merge(demand_vkm, anmil, by = c("variable", "region", "period"))
venum[, ven := value/annual_mileage] # billion vehicle-km -> thousand vehicles
venum <- venum[!is.na(ven)]
venum[, variable := gsub("|VKM", "|VNUM", variable, fixed=TRUE)][, c("value", "annual_mileage") := NULL]
venum[, unit := "tsd veh"]
setnames(venum, "ven", "value")
venum = venum[,.(model, scenario, region, variable, unit, period, value)]
return(venum)
}
reportStockAndSales <- function(annual_mileage){
if(file.exists(file.path(output_folder, "vintcomp.csv"))){
vintages_file <- file.path(output_folder, "vintcomp.csv")
vintgs <- fread(vintages_file)
} else if (file.exists(datapath(fname = "vintcomp.RDS"))){
#vintages_file <- datapath(fname = "vintcomp.RDS")
#vintgs <- readRDS(vintages_file)
return(NULL)
} else {
print("EDGE-T Reporting: No vintages file found.")
return(NULL)
}
year_c <- construction_year <- Stock <- Sales <- vintage_demand_vkm <- fct <- category <- NULL
## backward compat. fix
fct <- 1.
if("variable" %in% colnames(vintgs)){
fct <- 1e-6
setnames(vintgs, "variable", "construction_year")
}
vintgs[, year_c := as.numeric(gsub("C_", "", construction_year))]
## stock is the full stock up to the end of the current year
## sales are the sales of the current year
setnames(vintgs, "full_demand_vkm", "Stock")
vintgs[, Stock := Stock * fct]
vintgs[, Sales := Stock - sum(vintage_demand_vkm), by=.(year, region, vehicle_type, technology)]
vintgs[, c("construction_year", "vintage_demand_vkm", "year_c") := NULL]
vintgs <- unique(vintgs)
vintgs <- data.table::melt(vintgs, measure.vars = c("Stock", "Sales"), variable.name = "category")
## vkm -> v-num
vintgs = merge(vintgs, annual_mileage, by = c("year", "region", "vehicle_type"))
vintgs[, value := value / annual_mileage]
vintgs[, variable := ifelse(
vehicle_type == "Bus_tmp_vehicletype",
sprintf("%s|Transport|Bus|%s", category, technology),
sprintf("%s|Transport|LDV|%s|%s", category, vehicle_type, technology))]
## totals
vintgs <- rbindlist(list(
vintgs,
vintgs[, .(value=sum(value), variable=gsub("(.+)\\|.+$", "\\1", variable)),
by=c("category", "year", "region", "vehicle_type")],
vintgs[grepl("|LDV|", variable, fixed=TRUE),
.(value=sum(value), variable=sprintf("%s|Transport|LDV", category)),
by=c("category", "year", "region")]), fill=TRUE)
vintgs[, c("vehicle_type", "technology", "annual_mileage", "category") := NULL]
vintgs <- unique(vintgs[!is.na(value)])
setnames(vintgs, "year", "period")
vintgs = approx_dt(vintgs, c(2005, 2010, unique(vintgs$period), 2110, 2130, 2150),
xcol = "period", ycol = "value", idxcols = c("region", "variable"), extrapolate = T)
vintgs[period <= 2010|period > 2100, value := 0]
## remove the variable (e.g. vehicle_types) that are not present for this specific region
vintgs[, `:=`(model = model_name, scenario = scenario_title, unit = "Million vehicles")]
return(vintgs)
}
reportTotals <- function(aggrname, datatable, varlist){
vars <- varlist[[aggrname]]
if (length(unique(datatable[variable %in% vars]$variable)) < length(vars)){
print(paste0("Missing variables to aggregate data to ", aggrname))}
datatable <- datatable[variable %in% vars,
.(variable = aggrname,
value = sum(value)),
by = c("model", "scenario", "region", "period","unit")]
return(datatable)
}
## check the regional aggregation
regionSubsetList <- toolRegionSubsets(gdx)
# ADD EU-27 region aggregation if possible
if("EUR" %in% names(regionSubsetList)){
regionSubsetList <- c(regionSubsetList,list(
"EU27"=c("ENC", "EWN", "ECS", "ESC", "ECE", "FRA", "DEU", "ESW")
))
}
Aggrdata <- fread(system.file("extdata", "EDGETdataAggregation.csv", package = "edgeTrpLib"),header = TRUE)
## load input data from last EDGE run
## Data manipulation shouldnt be necessary
demand_km <- readRDS(datapath(fname = "demandF_plot_pkm.RDS"))
demand_km[, demand_F := demand_F * 1e-3] ## million -> billion pkm
setnames(demand_km, "demand_F", "value")
demand_ej <- readRDS(datapath(fname = "demandF_plot_EJ.RDS")) ## detailed final energy demand, EJ
setnames(demand_ej, "demand_EJ", "value")
demand_ej[, demand_F := NULL]
load_factor <- readRDS(datapath(fname = "loadFactor.RDS"))
annual_mileage <- readRDS(datapath(fname = "annual_mileage.RDS"))
if (length(annual_mileage)> 4){
#Same is done in lvl2_createoutput
annual_mileage <- unique(annual_mileage[, c("region", "year", "vkm.veh", "vehicle_type")])
setnames(annual_mileage, old = "vkm.veh", new = "annual_mileage")
}
if (length(load_factor)> 4){
load_factor <- load_factor[, c("year","region","vehicle_type","loadFactor","technology")]
demand_vkm <- merge(demand_km, load_factor, by = c("year", "region", "vehicle_type","technology"))
demand_vkm[, value := value/loadFactor] ## billion vkm
} else {
demand_vkm <- merge(demand_km, load_factor, by = c("year", "region", "vehicle_type"))
demand_vkm[, value := value/loadFactor]} ## billion vkm
repFE <- reporting(
demand_ej,
mode = "FE")
repVKM <- reporting(
datatable = demand_vkm,
mode = "VKM")
repES <- reporting(
datatable = demand_km,
mode = "ES")
toMIF <- rbind(
repFE,
repVKM,
repES,
reportingVehNum(repVKM,
annual_mileage),
reportingEmi(repFE = repFE,
gdx = gdx)
)
varsl <- list(
`ES|Transport|Pass|Road` = c("ES|Transport|Pass|Road|LDV", "ES|Transport|Pass|Road|Bus", "ES|Transport|Pass|Road|Non-Motorized"),
`ES|Transport|Pass|Aviation` = c("ES|Transport|Pass|Aviation|International", "ES|Transport|Pass|Aviation|Domestic"),
`ES|Transport|Pass|Rail` = c("ES|Transport|Pass|Rail|HSR", "ES|Transport|Pass|Rail|non-HSR"),
`ES|Transport|Pass` = c("ES|Transport|Pass|Road|LDV", "ES|Transport|Pass|Road|Bus", "ES|Transport|Pass|Road|Non-Motorized","ES|Transport|Pass|Rail|HSR", "ES|Transport|Pass|Rail|non-HSR","ES|Transport|Pass|Aviation|International", "ES|Transport|Pass|Aviation|Domestic"),
`ES|Transport|Freight` = c("ES|Transport|Freight|Road","ES|Transport|Freight|International Shipping","ES|Transport|Freight|Rail", "ES|Transport|Freight|Navigation"),
`ES|Transport` = c("ES|Transport|Freight|Road","ES|Transport|Freight|International Shipping","ES|Transport|Freight|Rail", "ES|Transport|Freight|Navigation","ES|Transport|Pass|Road|LDV", "ES|Transport|Pass|Road|Bus", "ES|Transport|Pass|Road|Non-Motorized","ES|Transport|Pass|Rail|HSR", "ES|Transport|Pass|Rail|non-HSR","ES|Transport|Pass|Aviation|International", "ES|Transport|Pass|Aviation|Domestic"),
`ES|Transport|VKM|Pass|Road` = c("ES|Transport|VKM|Pass|Road|LDV", "ES|Transport|VKM|Pass|Road|Bus"),
`ES|Transport|VKM||Road` = c("ES|Transport|VKM|Freight|Road", "ES|Transport|VKM|Pass|Road|LDV", "ES|Transport|VKM|Pass|Road|Bus"),
`ES|Transport|VKM|Rail` = c("ES|Transport|VKM|Pass|Rail|HSR", "ES|Transport|VKM|Pass|Rail|non-HSR", "ES|Transport|VKM|Freight|Rail" ),
`FE|Transport|Pass|Road` = c("FE|Transport|Pass|Road|LDV", "FE|Transport|Pass|Road|Bus"),
`FE|Transport|Road` = c("FE|Transport|Freight|Road", "FE|Transport|Pass|Road|LDV", "FE|Transport|Pass|Road|Bus"),
`FE|Transport|Pass|Rail` = c("FE|Transport|Pass|Rail|HSR", "FE|Transport|Pass|Rail|non-HSR"),
`FE|Transport|Rail` = c("FE|Transport|Pass|Rail|HSR", "FE|Transport|Pass|Rail|non-HSR", "FE|Transport|Freight|Rail"),
`FE|Transport|Pass` = c("FE|Transport|Pass|Road|LDV", "FE|Transport|Pass|Road|Bus","FE|Transport|Pass|Rail|HSR", "FE|Transport|Pass|Rail|non-HSR","FE|Transport|Pass|Aviation|International", "FE|Transport|Pass|Aviation|Domestic"),
`FE|Transport|Freight` = c("FE|Transport|Freight|Road","FE|Transport|Freight|International Shipping","FE|Transport|Freight|Rail", "FE|Transport|Freight|Navigation"),
`FE|Transport` = c("FE|Transport|Freight|Road","FE|Transport|Freight|International Shipping","FE|Transport|Freight|Rail", "FE|Transport|Freight|Navigation","FE|Transport|Pass|Road|LDV", "FE|Transport|Pass|Road|Bus","FE|Transport|Pass|Rail|HSR", "FE|Transport|Pass|Rail|non-HSR","FE|Transport|Pass|Aviation|International", "FE|Transport|Pass|Aviation|Domestic"),
`FE|Transport|w/o bunkers` = c("FE|Transport|Freight|w/o bunkers","FE|Transport|Pass|w/o bunkers"),
`FE|Transport|Pass|Liquids` = c("FE|Transport|Pass|Road|LDV|Liquids", "FE|Transport|Pass|Road|Bus|Liquids", "FE|Transport|Pass|Rail|non-HSR|Liquids","FE|Transport|Pass|Aviation|International|Liquids", "FE|Transport|Pass|Aviation|Domestic|Liquids"),
`FE|Transport|Pass|Hydrogen` = c("FE|Transport|Pass|Road|LDV|Hydrogen", "FE|Transport|Pass|Road|Bus|Hydrogen", "FE|Transport|Pass|Aviation|Domestic|Hydrogen"),
`FE|Transport|Pass|Gases` = c("FE|Transport|Pass|Road|LDV|Gases", "FE|Transport|Pass|Road|Bus|Gases"),
`FE|Transport|Pass|Electricity` = c("FE|Transport|Pass|Road|LDV|Electricity", "FE|Transport|Pass|Road|Bus|Electricity","FE|Transport|Pass|Rail|HSR|Electricity", "FE|Transport|Pass|Rail|non-HSR|Electricity"),
`FE|Transport|Freight|Liquids` = c("FE|Transport|Freight|Road|Liquids","FE|Transport|Freight|International Shipping|Liquids","FE|Transport|Freight|Rail|Liquids", "FE|Transport|Freight|Navigation|Liquids"),
`FE|Transport|Freight|Hydrogen` = c("FE|Transport|Freight|Road|Hydrogen"),
`FE|Transport|Freight|Gases` = c("FE|Transport|Freight|Road|Gases"),
`FE|Transport|Freight|Electricity` = c("FE|Transport|Freight|Road|Electricity","FE|Transport|Freight|Rail|Electricity"),
`FE|Transport|Liquids` = c("FE|Transport|Freight|Road|Liquids","FE|Transport|Freight|International Shipping|Liquids","FE|Transport|Freight|Rail|Liquids", "FE|Transport|Freight|Navigation|Liquids","FE|Transport|Pass|Road|LDV|Liquids", "FE|Transport|Pass|Road|Bus|Liquids", "FE|Transport|Pass|Rail|non-HSR|Liquids","FE|Transport|Pass|Aviation|International|Liquids", "FE|Transport|Pass|Aviation|Domestic|Liquids"),
`FE|Transport|Hydrogen` = c("FE|Transport|Freight|Road|Hydrogen","FE|Transport|Pass|Road|LDV|Hydrogen", "FE|Transport|Pass|Road|Bus|Hydrogen", "FE|Transport|Pass|Aviation|Domestic|Hydrogen"),
`FE|Transport|Gases` = c("FE|Transport|Freight|Road|Gases","FE|Transport|Pass|Road|LDV|Gases", "FE|Transport|Pass|Road|Bus|Gases"),
`FE|Transport|Electricity` = c("FE|Transport|Freight|Road|Electricity","FE|Transport|Freight|Rail|Electricity","FE|Transport|Pass|Road|LDV|Electricity", "FE|Transport|Pass|Road|Bus|Electricity","FE|Transport|Pass|Rail|HSR|Electricity", "FE|Transport|Pass|Rail|non-HSR|Electricity"),
`FE|Transport|w/o bunkers|Liquids` = c("FE|Transport|Freight|w/o bunkers|Liquids","FE|Transport|Pass|w/o bunkers|Liquids"),
`FE|Transport|w/o bunkers|Hydrogen` = c("FE|Transport|Freight|w/o bunkers|Hydrogen","FE|Transport|Pass|w/o bunkers|Hydrogen"),
`FE|Transport|w/o bunkers|Gases` = c("FE|Transport|Freight|w/o bunkers|Gases","FE|Transport|Pass|w/o bunkers|Gases"),
`FE|Transport|w/o bunkers|Electricity` = c("FE|Transport|Freight|w/o bunkers|Electricity","FE|Transport|Pass|w/o bunkers|Electricity"),
`Emi|CO2|Transport|Pass|Road|Tailpipe` = c("Emi|CO2|Transport|Pass|Road|LDV|Tailpipe", "Emi|CO2|Transport|Pass|Road|Bus|Tailpipe"),
`Emi|CO2|Transport|Pass|Road|Demand` = c("Emi|CO2|Transport|Pass|Road|LDV|Demand", "Emi|CO2|Transport|Pass|Road|Bus|Demand"),
`Emi|CO2|Transport|Road|Tailpipe` = c("Emi|CO2|Transport|Freight|Road|Tailpipe", "Emi|CO2|Transport|Pass|Road|LDV|Tailpipe", "Emi|CO2|Transport|Pass|Road|Bus|Tailpipe"),
`Emi|CO2|Transport|Rail|Tailpipe` = c("Emi|CO2|Transport|Pass|Rail|non-HSR|Tailpipe", "Emi|CO2|Transport|Freight|Rail|Tailpipe"),
`Emi|CO2|Transport|Road|Demand` = c("Emi|CO2|Transport|Freight|Road|Demand", "Emi|CO2|Transport|Pass|Road|LDV|Demand", "Emi|CO2|Transport|Pass|Road|Bus|Demand"),
`Emi|CO2|Transport|Rail|Demand` = c("Emi|CO2|Transport|Pass|Rail|non-HSR|Demand", "Emi|CO2|Transport|Freight|Rail|Demand"))
names <- names(varsl)
totals <- sapply(names, reportTotals, datatable = toMIF, varlist = varsl, simplify = FALSE, USE.NAMES = TRUE)
totals <- rbindlist(totals, use.names = TRUE)
toMIF <- rbind(toMIF, totals)
toMIF <- rbindlist(list(toMIF, reportStockAndSales(annual_mileage)), use.names=TRUE)
if (!is.null(regionSubsetList)){
toMIF <- rbindlist(list(
toMIF,
toMIF[region %in% regionSubsetList[["EUR"]], .(value = sum(value), region = "EUR"), by = .(model, scenario, variable, unit, period)],
toMIF[region %in% regionSubsetList[["NEU"]], .(value = sum(value), region = "NEU"), by = .(model, scenario, variable, unit, period)],
toMIF[region %in% regionSubsetList[["EU27"]], .(value = sum(value), region = "EU27"), by = .(model, scenario, variable, unit, period)],
toMIF[, .(value = sum(value), region = "World"), by = .(model, scenario, variable, unit, period)]
), use.names=TRUE)
}
if (extendedReporting) {
LogitCostplotdata <- function(priceData, prefData, logitExp, groupValue, Reg_Aggregation){
tot_price <- sw <- logit.exponent <- weight <- NULL
yrs_costs <-c(seq(2005, 2060, 5), seq(2070, 2100, 10))
all_subsectors <- c("technology", "vehicle_type", "subsector_L1", "subsector_L2",
"subsector_L3", "sector")
# change variable names for quitte format
setnames(priceData, c("year"), c("period"))
setnames(prefData, c("year"), c("period"))
prefData <- prefData[period %in% yrs_costs]
priceData<- priceData[period %in% yrs_costs][, -c("share")]
#Filter for logit level according to groupValue. leave out tmp placeholders
priceData <- priceData[!grepl("tmp", get(groupValue))]
prefData <- prefData[!grepl("tmp", get(groupValue))]
# Calculate Inconvenience Cost from share Weight
# Logit Exponent and total price are needed for this
prefData_inco <- merge(prefData, logitExp, all.y = TRUE)
#rename original prefs afterwards
setnames(prefData,c("sw"),c("value"))
#Reduce priceData to total price
price_tot <- priceData[, c("period", "region", "tot_price", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]), with = FALSE]
prefData_inco <- merge(prefData_inco, price_tot, by = c("period", "region", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
prefData_inco[, value := tot_price * (sw^(1 / logit.exponent) - 1)]
#Set Inconveniencecost to zero for shareweights where ES demand is anyway zero
prefData_inco <- prefData_inco[is.infinite(prefData_inco$value), value:=0]
prefData_inco <- prefData_inco[, c("region", "period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)], "value"), with = FALSE][, variable := "Eq inconvenience cost"]
#Prepare PriceData
priceData <- data.table::melt(priceData[, -c("tot_price")], id.vars = c("region", "period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
#Regional Aggregation
#Costs are intensive variables and are aggregated with ES weights for each level of the logit
weight_pkm_logitlevel <- weight_pkm[, .(weight = sum(weight)), by = c("region", "period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)])]
prefData_aggr <- aggregate_dt(prefData[region %in% Reg_Aggregation$region], Reg_Aggregation ,fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_logitlevel[region %in% Reg_Aggregation$region & period %in% prefData$period], datacols = c("period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
setnames(prefData_aggr,"aggr_reg","region")
prefData_inco_aggr <- aggregate_dt(prefData_inco[region %in% Reg_Aggregation$region], Reg_Aggregation , fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_logitlevel[region %in% Reg_Aggregation$region], datacols = c("period", "variable", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
setnames(prefData_inco_aggr,"aggr_reg","region")
priceData_aggr <- aggregate_dt(priceData[region %in% Reg_Aggregation$region], Reg_Aggregation , fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_logitlevel[region %in% Reg_Aggregation$region], datacols = c("period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
setnames(priceData_aggr,"aggr_reg","region")
prefData <- rbind(prefData, prefData_aggr)
priceData <- rbind(prefData_inco, prefData_inco_aggr, priceData,priceData_aggr)
if (groupValue=="vehicle_type"){
#Before prices are finally structured, vehicles are aggregated
Aggrdata_veh <- as.data.table(Aggrdata[, c("vehicle_type", "det_veh")])
Aggrdata_veh <- unique(Aggrdata_veh[!is.na(det_veh)])[, det_veh := gsub("Freight\\|Road\\||Pass\\|Road\\|", "", det_veh)]
#Exclude those wihout aggregation
Aggrdata_veh <- Aggrdata_veh[!vehicle_type==det_veh]
priceData <- priceData[, c("region","variable","vehicle_type","period","value")]
weight_pkm_VS1 <- weight_pkm[,.(weight = sum(weight)), by = c("region", "vehicle_type", "period")]
weight_pkm_VS1_aggrreg <- aggregate_dt(weight_pkm_VS1[region %in% Reg_Aggregation$region], Reg_Aggregation ,fewcol = "aggr_reg", manycol = "region", yearcol = "period", valuecol="weight", datacols = c("period", "vehicle_type"))
setnames(weight_pkm_VS1_aggrreg,"aggr_reg","region")
weight_pkm_VS1 <- rbind(weight_pkm_VS1, weight_pkm_VS1_aggrreg)
Prices_veh_aggr <- aggregate_dt(priceData[vehicle_type %in% Aggrdata_veh$vehicle_type], Aggrdata_veh , fewcol = "det_veh", manycol = "vehicle_type", yearcol = "period", weights = weight_pkm_VS1[vehicle_type %in% Aggrdata_veh$vehicle_type], datacols = c("region","variable"))
setnames(Prices_veh_aggr, "det_veh", "vehicle_type")
Prices_veh_aggr[, variable:=paste0("Logit cost|V|", vehicle_type, "|", variable)][, vehicle_type := NULL]
}
if (groupValue=="vehicle_type"){
#Convert original shareweights to quitte format
prefData[, variable := paste0("Shareweight|V|", get(groupValue))]
prefData <- prefData[, .(region, period, scenario, variable, value)]
#Convert costs to quitte format
priceData[, variable := paste0("Logit cost|V|", get(groupValue), "|", variable)]
priceData <- priceData[, .(region, period, scenario, variable, value)]
priceData <- rbind(priceData, Prices_veh_aggr)}
else{
prefData[, variable := paste0("Shareweight|S",gsub("[^123]","",groupValue), "|", get(groupValue))]
prefData <- prefData[, .(region, period, scenario, variable, value)]
#Convert costs to quitte format
priceData[, variable := paste0("Logit cost|S",gsub("[^123]","",groupValue), "|", get(groupValue), "|", variable)]
priceData <- priceData[, .(region, period, scenario, variable, value)]
}
data <- rbind(prefData[, unit := "-"], priceData[, unit := "$2005/km"])
data[, scenario := scenario_title][, model := model_name]
return(data)
}
LogitCostplotdata_FV <- function(priceData, prefData, logitExp, Reg_Aggregation){
tot_price <- sw <- logit.exponent <- weight <- logit_type <- av_veh <- NULL
#Calcualte equivalent inconvenience cost and
yrs_costs <-c(seq(2005, 2060, 5), seq(2070, 2100, 10))
# change variable names for mip
setnames(priceData, c("year"), c("period"))
setnames(prefData, c("year"), c("period"))
#Exclude active modes as they have no fuel
prefData <- prefData[period %in% yrs_costs & !technology %in% c("Cycle_tmp_technology","Walk_tmp_technology")]
priceData<- priceData[period %in% yrs_costs]
# Calculate Inconvenience Cost from share Weight
priceData_sw <- copy(prefData)
priceData_sw <- priceData_sw[logit_type == "sw"][, logit_type := NULL]
setnames(priceData_sw, "value", "sw")
priceData_sw <- merge(priceData_sw, logitExp, all.x = TRUE)
#This should be removed in refactoring process
priceData_sw[grepl("^Truck", vehicle_type), logit.exponent := -4]
priceData_sw <- priceData_sw[is.na(logit.exponent), logit.exponent := -10]
price_tot <- priceData[, c("period", "region","tot_price", "technology","vehicle_type")]
priceData_sw <- merge(priceData_sw, price_tot, by = c("period", "region", "technology","vehicle_type"),
all.x=TRUE)
priceData_sw[, value := tot_price * (sw^(1 / logit.exponent) - 1)]
#Set Inconveniencecost to zero for shareweights where ES demand is anyway zero
priceData_sw <- priceData_sw[is.infinite(priceData_sw$value), value := 0]
#Some total prices are missing
priceData_sw <- priceData_sw[is.na(priceData_sw$value), value := 0]
priceData_sw <- priceData_sw[, c("period", "region", "technology","vehicle_type","value")][, variable := "Eq inconvenience cost"]
priceData_inco_LDV <- prefData[!logit_type == "sw"][, c("period", "region", "technology","vehicle_type","value","logit_type")]
setnames(priceData_inco_LDV, "logit_type", "variable")
#Exclude LDV inco from prefdata
prefData <- prefData[logit_type == "sw"]
prefData <- prefData[, .(region, period, scenario, vehicle_type, technology, value)]
priceData <- data.table::melt(priceData[, -c("tot_price", "share", "subsector_L1", "subsector_L2", "subsector_L3", "sector")], id.vars = c("region", "period", "technology", "vehicle_type"))
priceData <- rbind(priceData, priceData_sw, priceData_inco_LDV)
#Regional Aggregation
#Costs are intensive variables and are aggregated with ES weights for each level of the logit
weight_pkm_FV <- weight_pkm[, .(weight = sum(weight)), by = c("region", "period","vehicle_type", "technology")]
#TO FIX:
#Hydrogen and BEV technologies for aviation and 2Wheelers are not everywhere available: -> Insert zero as weight
weight_pkm_FV <- merge(weight_pkm_FV, priceData, on=c("region", "period", "vehicle_type", "technology"), all = TRUE)
weight_pkm_FV[is.na(weight_pkm_FV$weight), weight := 0]
weight_pkm_FV <- weight_pkm_FV[, c("region", "period","vehicle_type", "technology", "weight")]
weight_pkm_FV <- weight_pkm_FV[period > 1990 & period < 2110]
weight_pkm_FV <- unique(weight_pkm_FV)
weight_pkm_FV_aggrreg <- aggregate_dt(weight_pkm_FV[region %in% Reg_Aggregation$region], Reg_Aggregation ,fewcol = "aggr_reg", manycol = "region", yearcol = "period", valuecol="weight", datacols = c("period", "vehicle_type","technology"))
setnames(weight_pkm_FV_aggrreg,"aggr_reg","region")
weight_pkm_FV <- rbind(weight_pkm_FV, weight_pkm_FV_aggrreg)
priceData_aggrreg <- aggregate_dt(priceData[region %in% Reg_Aggregation$region], Reg_Aggregation, fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_FV[region %in% Reg_Aggregation$region], datacols = c("period", "technology", "vehicle_type"))
setnames(priceData_aggrreg,"aggr_reg","region")
priceData <- rbind(priceData, priceData_aggrreg)
prefData_aggrreg <- aggregate_dt(prefData[region %in% Reg_Aggregation$region], Reg_Aggregation, fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_FV[region %in% Reg_Aggregation$region], datacols = c("period", "technology", "vehicle_type"))
setnames(prefData_aggrreg,"aggr_reg","region")
prefData <- rbind(prefData, prefData_aggrreg)
#Before prices are finally structured, vehicles are aggregated
#ES pkm are used as weights for data aggregation
Aggrdata_veh <- as.data.table(Aggrdata[, c("vehicle_type", "det_veh")])
#Remove entries that are not aggregated
Aggrdata_veh <- Aggrdata_veh[!vehicle_type == det_veh]
Aggrdata_veh <- unique(Aggrdata_veh[!is.na(det_veh)])[, det_veh := gsub("Freight\\|Road\\||Pass\\|Road\\|", "", det_veh)]
priceData_aggr <- aggregate_dt(priceData[vehicle_type %in% Aggrdata_veh$vehicle_type], Aggrdata_veh , fewcol = "det_veh", manycol = "vehicle_type", yearcol = "period", weights = weight_pkm_FV[vehicle_type %in% Aggrdata_veh$vehicle_type], datacols = c("region", "variable", "technology"))
setnames(priceData_aggr, "det_veh", "vehicle_type")
#Aggregate average vehicle
Aggrdata_avveh <- as.data.table(Aggrdata)
Aggrdata_avveh <- Aggrdata_avveh[subsector_L1 == "trn_pass_road_LDV_4W"]
Aggrdata_avveh <- unique(Aggrdata_avveh[, c("vehicle_type")])
Aggrdata_avveh[, av_veh := "Average veh"]
priceData_av <- aggregate_dt(priceData[vehicle_type %in% Aggrdata_avveh$vehicle_type], Aggrdata_avveh , fewcol = "av_veh", manycol = "vehicle_type", yearcol = "period", weights = weight_pkm_FV[vehicle_type %in% Aggrdata_avveh$vehicle_type], datacols = c("region", "variable","technology"))
setnames(priceData_av, "av_veh", "vehicle_type")
priceData <- rbind(priceData, priceData_aggr, priceData_av)
priceData <- priceData[, variable := paste0("Logit cost|F|", gsub("_tmp_vehicletype", "", vehicle_type), "|", technology, "|", variable)][, c("region", "period", "variable", "value")][, unit := "$2005/km"][, model := model_name][, scenario := scenario_title]
prefData[, variable := paste0("Shareweight|F|", gsub("_tmp_vehicletype", "", vehicle_type), "|", technology)][, unit := "-"][, model := model_name][, scenario := scenario_title]
prefData <- prefData[, c("period", "region", "variable", "unit", "model", "scenario", "value")]
data <- rbind(priceData, prefData)
return(data)
}
# Mapping efficiencies for useful energy
Mapp_UE <- data.table(
technology = c("FCEV", "BEV", "Electric", "Liquids", "Hydrogen"),
UE_efficiency = c(0.36, 0.64, 0.8, 0.23, 0.25))
#ES pkm are used as weights for data aggregation
weight_pkm <- copy(demand_km)
setnames(weight_pkm, c("value","year"), c("weight","period"))
weight_pkm[, sector := ifelse(sector %in% c("Pass"), "trn_pass", "trn_freight")]
weight_pkm[, sector := ifelse(subsector_L3 == c("International Aviation"), "trn_aviation_intl", sector)]
weight_pkm[, sector := ifelse(subsector_L3 == c("International Ship"), "trn_shipping_intl", sector)]
#Mapping for region Aggregation
RegAggregation <- data.table(
aggr_reg = c("EUR", "EUR", "EUR", "EUR", "EUR", "EUR", "EUR", "EUR", "EUR", "NEU", "NEU"),
region = c("ENC", "EWN", "ECS", "ESC", "ECE", "FRA", "DEU", "UKI", "ESW", "NES", "NEN"))
# #Calculate useful energy
# UE <- toMIF[grepl("FE" & ("FCEV"|"BEV"|"Electric"|"Liquids"|"Hydrogen"), variable)]
# UE[, technology := gsub(!("FCEV"|"BEV"|"Electric"|"Liquids"|"Hydrogen"),"", variable)]
# UE <- merge(UE, Mapp_UE)
# UE[, value:= value*UE_efficiency][, variable := gsub("FE","UE", variable)]
# toMIF <- rbind(toMIF, UE)
#Calculate logit Costs
#Read in additional data if exist
if (file.exists(datapath(fname = "logit_data.RDS"))){
logit_data <- readRDS(datapath(fname = "logit_data.RDS"))
prices <- logit_data$share_list
Pref <- logit_data$pref_data
if (file.exists(datapath(fname = "logit_exp.RDS"))){
logit_exp <- readRDS(datapath(fname = "logit_exp.RDS"))
logit_exp <- logit_exp$logit_output
#Prices S3S
Prices_S3S <- prices$S3S_shares
setkey(Prices_S3S, NULL)
Pref_S3S <- Pref$S3S_final_pref
setkey(Pref_S3S, NULL)
logit_exp_S3S <- logit_exp$logit_exponent_S3S
setkey(logit_exp_S3S, NULL)
#Adjust in model itself in refactoring process
Prices_S3S[subsector_L3 %in% c("Cycle","Walk"), tot_VOT_price := tot_price]
PrefandPrices_S3S <- LogitCostplotdata(priceData = Prices_S3S, prefData = Pref_S3S, logitExp =logit_exp_S3S, groupValue = "subsector_L3", Reg_Aggregation = RegAggregation)
#Prices S2S3
Prices_S2S3 <- prices$S2S3_shares
setkey(Prices_S2S3, NULL)
Pref_S2S3 <- Pref$S2S3_final_pref
setkey(Pref_S2S3, NULL)
logit_exp_S2S3 <- logit_exp$logit_exponent_S2S3
setkey(logit_exp_S2S3, NULL)
PrefandPrices_S2S3 <- LogitCostplotdata(priceData = Prices_S2S3, prefData = Pref_S2S3, logitExp = logit_exp_S2S3, groupValue = "subsector_L2", Reg_Aggregation = RegAggregation)
#Prices S1S2
Prices_S1S2 <- prices$S1S2_shares
setkey(Prices_S1S2, NULL)
Pref_S1S2 <- Pref$S1S2_final_pref
setkey(Pref_S1S2, NULL)
logit_exp_S1S2 <- logit_exp$logit_exponent_S1S2
setkey(logit_exp_S1S2, NULL)
PrefandPrices_S1S2 <- LogitCostplotdata(priceData = Prices_S1S2, prefData = Pref_S1S2, logitExp = logit_exp_S1S2, groupValue = "subsector_L1", Reg_Aggregation = RegAggregation)
#Prices VS1
Prices_VS1 <- prices$VS1_shares
setkey(Prices_VS1, NULL)
Pref_VS1 <- Pref$VS1_final_pref
setkey(Pref_VS1, NULL)
logit_exp_VS1 <- logit_exp$logit_exponent_VS1
setkey(logit_exp_VS1, NULL)
#Add subsector_L2, subsector L3 and sector to Prices_VS1 (for structural conformity)
Prices_VS1 <- merge(Prices_VS1, unique(Pref_VS1[, c("subsector_L2", "subsector_L3", "sector", "vehicle_type")]), by = "vehicle_type", all.x = TRUE)
PrefandPrices_VS1 <- LogitCostplotdata(priceData=Prices_VS1, prefData = Pref_VS1,logitExp = logit_exp_VS1, groupValue = "vehicle_type", Reg_Aggregation = RegAggregation)
#Prices FV
Prices_FV <- prices$FV_shares
setkey(Prices_FV, NULL)
Pref_FV <- Pref$FV_final_pref
setkey(Pref_FV, NULL)
logit_exp_VS1 <- logit_exp$logit_exponent_FV
setkey(logit_exp_VS1, NULL)
Prices_FV <- LogitCostplotdata_FV(priceData=Prices_FV, prefData=Pref_FV, logitExp=logit_exp_VS1, Reg_Aggregation = RegAggregation)
Pref_FV <- Pref_FV[logit_type=="sw"]
#Walking and cycling have no fuel options
Pref_FV <- Pref_FV[!technology %in% c("Cycle_tmp_technology","Walk_tmp_technology")]
Pref_FV[, variable:=paste0("Shareweight|F|",gsub("_tmp_vehicletype","",vehicle_type),"|",technology)][,unit:="-"][,scenario:=scenario_title][,model:=model_name]
Pref_FV <- Pref_FV[,.(region,period,scenario,variable,value,unit,model)]
toMIF <- rbind(toMIF,PrefandPrices_S3S, PrefandPrices_S2S3, PrefandPrices_S1S2, PrefandPrices_VS1, Prices_FV, Pref_FV)}}
#Aggregate data
#Insert POP and GDP
if (file.exists(datapath(fname = "POP.RDS")) & file.exists(datapath(fname = "GDP.RDS"))){
POP <- readRDS(datapath(fname = "POP.RDS"))
GDP <- readRDS(datapath(fname = "GDP.RDS"))
POP <- POP[year %in% yrs]
GDP <- GDP[year %in% yrs]
POP[, model:= model_name][, scenario:= scenario_title][, variable := "Population"][, unit := "million"]
GDP[, model:= model_name][, scenario:= scenario_title][, variable := "GDP|PPP"]
GDP[, weight := weight*0.001][, unit := "billion US$2005/yr"]
setnames(GDP,c("year","weight"),c("period","value"))
setnames(POP,"year","period")
if (!is.null(regionSubsetList)){
toMIF <- rbindlist(list(
toMIF,
POP[region %in% regionSubsetList[["EUR"]], .(value = sum(value), region = "EUR"), by = .(model, scenario, variable, unit, period)],
POP[region %in% regionSubsetList[["NEU"]], .(value = sum(value), region = "NEU"), by = .(model, scenario, variable, unit, period)],
POP[region %in% regionSubsetList[["EU27"]], .(value = sum(value), region = "EU27"), by = .(model, scenario, variable, unit, period)],
POP[, .(value = sum(value), region = "World"), by = .(model, scenario, variable, unit, period)],
GDP[region %in% regionSubsetList[["EUR"]], .(value = sum(value), region = "EUR"), by = .(model, scenario, variable, unit, period)],
GDP[region %in% regionSubsetList[["NEU"]], .(value = sum(value), region = "NEU"), by = .(model, scenario, variable, unit, period)],
GDP[region %in% regionSubsetList[["EU27"]], .(value = sum(value), region = "EU27"), by = .(model, scenario, variable, unit, period)],
GDP[, .(value = sum(value), region = "World"), by = .(model, scenario, variable, unit, period)]
), use.names=TRUE)
}
toMIF <- rbind(toMIF, POP, GDP)
}
}
#We should finally decide for which yrs the model runs and shows reasonable results
toMIF <- toMIF[period %in% yrs]
## Make sure there are no duplicates!
idx <- anyDuplicated(toMIF, by = c("region", "variable", "period"))
if(idx){
warning(paste0("Duplicates found in EDGE-T reporting output:",
capture.output(toMIF[idx]), collapse="\n"))
}
toMIF <- toMIF[!duplicated(toMIF)]
toMIF <- toMIF[, c("model", "scenario", "region", "variable", "unit", "period", "value")]
return(as.quitte(toMIF))
}
|
953b53ac0177cbce519036a66ec0440f619804f5 | 41e7ad3949eb12b8a868669acab3c9698bead4b1 | /Recomnder_System.R | 9151ac380a8a4497d0a7d1c3c40e10b8437ba147 | [] | no_license | pseemakurthi/Recomendation-Engine | 47cfe0f63d9fbbb8b5c8512846c66751b7160a64 | ff47cf2abaa4530820b6477efb10241e0ffef58c | refs/heads/master | 2016-09-06T14:11:30.371487 | 2015-02-03T13:19:26 | 2015-02-03T13:19:26 | 30,231,490 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,818 | r | Recomnder_System.R | library("XLConnect")
options( java.parameters = "-Xmx4g" )
# To check the Amount of free memory in the Java Virtual Machine (JVM):
xlcMemoryReport()
# install.packages("recommenderlab")
library(recommenderlab)
library(ggplot2)
# Clear the existing workspace
rm(list=ls(all="TRUE"))
setwd('D:\\Music')
# Read the user song listen counts
data=read.table(file="tripletsout.txt",header=TRUE,sep=",")
colnames(data)=c("UserID","SongID","Rating")
# Convert the data into a rating matrix can be converted to real or binary matrix
(songs.rat <- as(data,"realRatingMatrix"))
# (songs.rat <- as(data,"binaryRatingMatrix"))
# display the contents of the rating matrix
# as(songs.rat, "list")
head(as(songs.rat, "data.frame"))
# image(songs.rat)
#analyze the ratings matrix
summary(getRatings(songs.rat))
#Bar chart of Users and Ratings. THis shows mist of the user did a rating from 1-7.
qplot(getRatings(songs.rat),
binwidth = 1, xlim = c(1,10),
main = "Ratings Bar Chart",
ylab = "# of Users", xlab = "Rating")
################## COMMENTED Z-SCORE NORMALIZATION ######################
# We can normalize the rating matrix using the z-score method if needed.
# This will remove off high tail ratingg.
# summary(getRatings(normalize(songs.rat, method = "Z-score")))
# qplot(getRatings(normalize(songs.rat, method = "Z-score")), binwidth = 1, xlim = c(-3,5),
# main = "Ratings Bar Chart", ylab = "# of Users", xlab = "Rating")
# songs.rat <- normalize(songs.rat, method = "Z-score")
##########################################################################
# Plot the distribution of the No of songs Rated by the users
qplot(as.vector(colCounts(songs.rat)),
binwidth=1, xlim = c(1,30),
main = "How Many Rated",
xlab = "Songs",
ylab = "No of Raters")
# This is very sparse rating with most users rating very few songs.
################## CONSIDER HIGHLY RATED SONGS ######################
# returns the number of ratings per column (for each song) as a integer vector
colIds <- colCounts(songs.rat)
# sort the vector and finds the index of the 1000th most rated song
sort(colIdx, decreasing = TRUE)[1000]
###### For computation lets consider only 1000 most rated songs ############
songs.rat@data <- songs.rat@data[, which(colIdx >= sort(colIdx, decreasing = TRUE)[1000])]
# analyze the ratings matrix
songs.rat
summary(rowCounts(songs.rat))
# as(songs.rat, "list")
head(as(songs.rat, "data.frame"))
################## CONSIDER HIGHLY RATED SONGS ######################
# Also lets bring down the number of users. Consider only users that have rated
# at least 10 songs
################## CONSIDER USERS RATING MORE SONGS ######################
qplot(as.vector(rowCounts(songs.rat)),
binwidth=1, xlim = c(0,30),
main = "Avg Songs Rated",
xlab = "Raters",
ylab = "No of Songs")
# returns the number of ratings (by each user) per row as a integer vector
rowIdx <- rowCounts(songs.rat)
# Remove all the rows that have rated less than 20 songs
songs.rat <- songs.rat[- 1 * (which(rowIdx < 20))]
songs.rat
summary(rowCounts(songs.rat))
# as(songs.rat, "list")
head(as(songs.rat, "data.frame"))
################## CONSIDER USERS RATING MORE SONGS ######################
###############################
## CROSS VALIDATION EVALUATION
scheme <- evaluationScheme(songs.rat, method="cross-validation", goodRating=5, k=2, given=10)
###############################
###############################
## SPLIT EVALUATION
# scheme <- evaluationScheme(songs.rat, method="split", train=0.9, given=10, goodRating=5)
###############################
algorithms <- list(
"random items" = list(name="RANDOM", param=NULL),
"popular items" = list(name="POPULAR", param=NULL),
"user-based CF" = list(name="UBCF", param=list(method="Cosine",
nn=10, minRating=1)),
"Item-based CF" = list(name = "IBCF", param = list(normalize="Z-score")),
"Assoc Rules CF" = list(name="AR", param=NULL),
"LRMF (100 categories)" = list(name = "LRMF", param = list(categories=100,
normalize="Z-score"))
)
# evaluate the results
results <- evaluate(scheme, algorithms, n=c(1, 3, 5, 10))
# plot the ROC poins
plot (results, annotate=c(1,3), legend="topleft")
# plot the precision/recall poins
plot (results,"prec/rec",annotate=c(1,2,3,4))
results
names(results)
results[["user-based CF"]]
avg(results)[[1]]
# getConfusionMatrix(results)[[2]]
recommenderRegistry$get_entries(dataType = "realRatingMatrix")
########################################
# EVALUATION OF THE PREDICETED RATINGS
########################################
scheme <- evaluationScheme(songs.rat, method="split", train=0.9, given=15, goodRating=5)
scheme
# We create two recommenders (user-based and item-based collaborative
# filtering) using the training data.
r1 <- Recommender(getData(scheme, "train"), "UBCF")
r1
r2 <- Recommender(getData(scheme, "train"), "IBCF")
r2
# Compute predicted ratings for the known part of the test data (
# 15 items for each user) using the two algorithms.
p1 <- predict(r1, getData(scheme, "known"), type="ratings")
p1
as(p1, "list")
head(as(p1, "data.frame"))
## compute error metrics averaged per user and then averaged over all
## recommendations
calcPredictionError(p1, getData(scheme, "unknown"))
calcPredictionError(p1, getData(scheme, "unknown"), byUser=TRUE)
calcPredictionError(p1, songs.rat, byUser=FALSE)
p2 <- predict(r2, getData(scheme, "known"), type="ratings")
p2
# Calculate the error between the prediction and the unknown part of the test data.
error <- rbind(
calcPredictionError(p1, getData(scheme, "unknown")),
calcPredictionError(p2, getData(scheme, "unknown"))
)
rownames(error) <- c("UBCF","IBCF")
error |
62b5492b732c18715341b365972d79955849d5bd | 6b0f6cfd899add326a4fe835feabb2c7d5c92278 | /tests/testthat/test-plots.R | 90c889735d54e6d3df9bedfc43886a89d401e4b0 | [] | no_license | SvetiStefan/olsrr | 9c7b4b82fdae631cab778107381d3d259408996d | db9f0427771f07ca93e8600a1573aef7779e76f5 | refs/heads/master | 2021-06-17T11:56:39.893077 | 2017-06-06T10:31:45 | 2017-06-06T10:31:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,829 | r | test-plots.R | context('plots')
test_that("ovsp_plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_ovsp_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("rvsr_plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_rvsr_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("rvsp_plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_rvsp_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("qqresid fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_rsd_qqplot(m), 'Please specify a OLS linear regression model.')
})
test_that("residual histogram fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_rsd_hist(m), 'Please specify a OLS linear regression model.')
})
test_that("diag_panel fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_diagnostic_panel(m), 'Please specify a OLS linear regression model.')
})
test_that("rfs_plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_rfs_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("fm_plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_fm_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("rsd_plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_rsd_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("hadi_plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_hadi_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("poten_resid_plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_potrsd_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("ols_avplots fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_avplots(m), 'Please specify a OLS linear regression model.')
})
test_that("cplusr_plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_rpc_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("residual boxplot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_rsd_boxplot(m), 'Please specify a OLS linear regression model.')
})
test_that("cooks d barplot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_cooksd_barplot(m), 'Please specify a OLS linear regression model.')
})
test_that("cooks d chart fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_cooksd_chart(m), 'Please specify a OLS linear regression model.')
})
test_that("dfbetas panel fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_dfbetas_panel(m), 'Please specify a OLS linear regression model.')
})
test_that("dffits plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_dffits_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("dsrvsp plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_dsrvsp_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("rsdlev plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_rsdlev_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("studentized residual plot fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_srsd_plot(m), 'Please specify a OLS linear regression model.')
})
test_that("studentized residual chart fails when model inherits other than 'lm'", {
y <- sample(c(1:4), 100, replace = T)
x <- sample(c(1, 2), 100, replace = T)
m <- glm(x ~ y)
expect_error(ols_srsd_chart(m), 'Please specify a OLS linear regression model.')
}) |
be78867563d4a82df8fb7ac35a915227c50afb68 | 16de310794564e0525188a9a08f3410f060ad198 | /R/A2P.lin.R | b6205a465a09138438284d5e7a36c43c552e0c60 | [] | no_license | cran/AtmRay | b6d578638de5d8687a2a41b04eed877c08fc3044 | 9205b1ef22b39c39f3ce4e224bf55af13e425f41 | refs/heads/master | 2020-05-18T04:40:39.503065 | 2012-09-28T00:00:00 | 2012-09-28T00:00:00 | 17,677,750 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 259 | r | A2P.lin.R | A2P.lin = function(angle, z, az, ATM){
ceff = as.vector(ATM$c0 + c(ATM$wx0, ATM$wy0) %*% c(sin(az*pi/180), cos(az*pi/180)) + (z - ATM$z0) * (ATM$gc + c(ATM$gwx, ATM$gwy) %*% c(sin(az*pi/180), cos(az*pi/180))))
p = sin(angle * pi/180)/ceff
return(p)
}
|
2015d555cefd0dce908ec778935e0f1579ef3496 | b27163aa44e444db482972e939533597afefc04e | /readCalibration.R | 07d1e6db8eb0a23ee82c49128f67983f1b0c97ea | [] | no_license | vargovargo/ITHIM_CA | d0082c073e10963f91fd997f8d6ac085ccd417c6 | 3e1b386d59a4dc37a061ea636fd392ff6e629510 | refs/heads/master | 2021-08-23T12:05:18.328736 | 2017-12-04T20:58:03 | 2017-12-04T20:58:03 | 107,732,246 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,409 | r | readCalibration.R | rm(list=ls())
library("devtools")
# install_github("syounkin/ITHIM", ref="master")
install_github("ITHIM/ITHIM", ref="devel", force=TRUE)
library("ITHIM")
xlFileName <- "./ITHIM/fromNeil/ITHIM_California2016-12-12SANDAG_Trends.xlsx"
ITHIMss <- readxl::read_excel(xlFileName, sheet="Calibration Data")
holder <- createITHIM()
getParameterSet(holder)
holder@parameters@Rwt
##############################
# extract baseline Population
popTemp <-
ITHIMss %>% filter(
item_name == "Distribution of population by age and gender",
scenario_id == 0,
sex %in% c("M", "F", "m", "f")) %>%
select(age_group, sex, unwt_n) %>%
spread(key = sex, value = unwt_n) %>%
mutate(ageClass = paste0("ageClass", (1:8))) %>%
select(c(4, 3, 2))
##############################
# extract baseline realtive walking means
RwtTemp <-
ITHIMss %>% filter(item_name == "Per capita mean daily travel time by mode",
scenario_id == 0,
sex %in% c("M","F","m","f"),
mode %in% c("walk","Walk")) %>%
select(age_group, sex, item_result) %>%
spread(key = sex, value = item_result) %>%
mutate(ageClass = paste0("ageClass", (1:8))) %>%
select(c(4,3,2))
##############################
# extract baseline relative cycling means
RctTemp <-
ITHIMss %>% filter(item_name == "Per capita mean daily travel time by mode",
scenario_id == 0,
sex %in% c("M","F","m","f"),
mode %in% c("bike","Bike")) %>%
select(age_group, sex, item_result) %>%
spread(key = sex, value = item_result) %>%
mutate(ageClass = paste0("ageClass", (1:8))) %>%
select(c(4,3,2))
# add a line to replace NA with zeros?
##############################
# extract baseline mean walking time
muwtTemp <-
ITHIMss %>% filter(
item_name == "Per capita mean daily travel time",
scenario_id == 0,
mode %in% c("walk", "Walk")) %>%
select(item_result)
##############################
# extract baseline mean cycling time
muctTemp <-
ITHIMss %>% filter(
item_name == "Per capita mean daily travel time",
scenario_id == 0,
mode %in% c("bike", "Bike")) %>%
select(item_result)
##############################
# extract coefficient of variation
cvTemp <-
ITHIMss %>% filter(
item_name == "Standard deviation of mean daily active travel time") %>%
select(cv)
|
53b02c3d438b10e832948b76cb189119df282fd8 | d99b7ff77e769fde0e3e7810ca39a3c1008ba07f | /R/triadic.R | a5d41d868bc339cccd532169ba0f3e819122bc6b | [] | no_license | gastonstat/colortools | 96c56012d4726979ad049f8ceb356a19d71c6afb | 8d96e2c5dbc21548c5f01532e70b56a5263c90aa | refs/heads/master | 2022-05-02T19:48:47.543024 | 2022-03-27T00:22:40 | 2022-03-27T00:22:40 | 5,799,925 | 18 | 2 | null | 2020-02-02T00:10:03 | 2012-09-13T19:41:34 | R | UTF-8 | R | false | false | 2,545 | r | triadic.R | #' @title Triadic Color Scheme
#'
#' @description
#' Triadic color schemes use colors that are evenly spaced around the color
#' wheel.
#'
#' @details
#' The triadic colors are obtained following a color wheel with 12 colors, each
#' one spaced at 30 degrees from each other.
#' Triadic color schemes tend to be quite vibrant. To use a triadic harmony
#' successfully, the colors should be carefully balanced letting one color
#' dominate and use the others for accent.
#'
#' @param color an R color name or a color in hexadecimal notation
#' @param plot logical value indicating whether to plot a color wheel with the
#' generated scheme
#' @param bg background color of the plot. Used only when \code{plot=TRUE}
#' @param labcol color for the labels (i.e. names of the colors). Used only when
#' \code{plot=TRUE}
#' @param cex numeric value indicating the character expansion of the labels
#' @param title logical value indicating whether to display a title in the plot.
#' Used only when \code{plot=TRUE}
#' @return A character vector with the given color and the triadic colors in
#' hexadecimal notation
#' @author Gaston Sanchez
#' @seealso \code{\link{complementary}}, \code{\link{splitComp}},
#' \code{\link{adjacent}}, \code{\link{tetradic}}, \code{\link{square}}
#' @export
#' @examples
#' # triadic colors of 'tomato'
#' triadic("tomato")
#'
#' # triadic colors of 'tomato' with background color 'gray20'
#' triadic("tomato", bg = "gray20")
#'
triadic <-
function(color, plot=TRUE, bg="white", labcol=NULL, cex=0.8, title=TRUE)
{
tmp_cols = setColors(color, 12)
triad_colors <- tmp_cols[c(1,5,9)]
# plot
if (plot)
{
# labels color
if (is.null(labcol))
{
lab_col = rep("", 12)
if (mean(col2rgb(bg)) > 127)
{
lab_col[c(1, 5, 9)] <- "black"
lab_col[c(2,3,4,6,7,8,10,11,12)] <- col2HSV(bg)
} else {
lab_col[c(1, 5, 9)] <- "white"
lab_col[c(2,3,4,6,7,8,10,11,12)] <- col2HSV(bg)
}
} else {
lab_col = rep(labcol, 12)
if (mean(col2rgb(bg)) > 127)
{
lab_col[c(1, 5, 9)] <- labcol
lab_col[c(2,3,4,6,7,8,10,11,12)] <- col2HSV(bg)
} else {
lab_col[c(1, 5, 9)] <- labcol
lab_col[c(2,3,4,6,7,8,10,11,12)] <- col2HSV(bg)
}
}
# hide non-adjacent colors
tmp_cols[c(2,3,4,6,7,8,10,11,12)] <- paste(
substr(tmp_cols[c(2,3,4,6,7,8,10,11,12)],1,7), "0D", sep="")
pizza(tmp_cols, labcol=lab_col, bg=bg, cex=cex)
# title
if (title)
title(paste("Triadic color scheme of: ", tmp_cols[1]),
col.main=lab_col[1], cex.main=0.8)
}
# result
triad_colors
}
|
8cd82a6f17facbc91c989a0cc56b471b50fad3ee | a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3 | /B_analysts_sources_github/topepo/caret/test_twoClassSummary.R | 78026bf4280b3019bd676bee5fcb9d47e2fa5a7c | [] | no_license | Irbis3/crantasticScrapper | 6b6d7596344115343cfd934d3902b85fbfdd7295 | 7ec91721565ae7c9e2d0e098598ed86e29375567 | refs/heads/master | 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 950 | r | test_twoClassSummary.R |
context('twoClassSummary')
test_that("twoClassSummary is calculating correctly", {
library(caret)
set.seed(1)
tr_dat <- twoClassSim(100)
te_dat <- tr_dat
tr_dat$Class = factor(tr_dat$Class, levels = rev(levels(te_dat$Class)))
set.seed(35)
mod1 <- train(Class ~ ., data = tr_dat,
method = "fda",
tuneLength = 10,
metric = "ROC",
trControl = trainControl(classProbs = TRUE,
summaryFunction = twoClassSummary))
set.seed(35)
mod2 <- train(Class ~ ., data = te_dat,
method = "fda",
tuneLength = 10,
metric = "ROC",
trControl = trainControl(classProbs = TRUE,
summaryFunction = twoClassSummary))
expect_equal(mod1$resample$ROC, mod2$resample$ROC)
expect_equal(mod1$resample$Sens, mod2$resample$Spec)
expect_equal(mod1$resample$Spec, mod2$resample$Sens)
})
|
246d0936f3ac0f82836299e7cb73053fbb57f102 | b4eae8e4e5caf81889ce980f314aad8b7f684e39 | /R/gg_resX.R | b06b8a64bce8b4bbb72f7945137afd84b8fdd1d4 | [] | no_license | alienzj/lindia | a9771e72ad50ec4e4360a50f6a39e64cd5d50d0e | 9853c34ff2568e95ad9aa3649f779d84ea838bff | refs/heads/master | 2023-02-17T23:16:40.238951 | 2020-08-05T03:54:48 | 2020-08-05T03:54:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,272 | r | gg_resX.R |
#' Generate residual plot of residuals against predictors
#'
#' @param fitted.lm a fitted linear model (i.e. lm, glm) that contains fitted regression
#' @param plot.all boolean value to determine whether plot will be return as
#' a plot arranged using `grid.arrange()`. When set to false, the function
#' would return a list of residual plots. Parameter defaults to TRUE.
#' @param ncol specify number of columns in resulting plot per page. Default to make a square matrix of the output.
#' @param scale.factor numeric; scales the point size and linewidth to allow customized viewing. Defaults to 0.5.
#' @param max.per.page numeric; maximum number of plots allowed in one page. Parameter defaults to fit all plots on one page.
#' @return An arranged grid of residuals against predictor values plots in ggplot.
#' If plotall is set to FALSE, a list of ggplot objects will be returned instead.
#' Name of the plots are set to respective variable names.
#' @examples
#' library(MASS)
#' data(Cars93)
#' # a regression with categorical variable
#' cars_lm <- lm(Price ~ Passengers + Length + RPM + Origin, data = Cars93)
#' gg_resX(cars_lm)
#' # customize which diagnostic plot is included by have gg_resX to return a list of plots
#' plots <- gg_resX(cars_lm, plot.all = FALSE)
#' names(plots) # get name of the plots
#' exclude_plots <- plots[-1 ] #exclude certain residual plots
#' include_plots <- plots[1] # include certain residual plots
#' plot_all(exclude_plots) # make use of plot_all() in lindia
#' plot_all(include_plots)
#' @export
gg_resX <- function(fitted.lm, plot.all = TRUE, scale.factor = 0.5, max.per.page = NA, ncol = NA){
handle_exception(fitted.lm, "gg_resX")
# extract model matrix
lm_matrix = fortify(fitted.lm)
# extract relevant explanatory variables in model matrix
var_names = get_varnames(fitted.lm)$predictor
dim = length(var_names)
# create a list to hold all residual plots
plots = vector("list", dim)
# number of plots so far
n = 1
for (i in 1:length(var_names)){
var = var_names[i]
this_plot <- get_resplot(var, lm_matrix, fitted.lm, scale.factor)
if (!is.null(this_plot)) {
plots[[n]] <- this_plot
n = n + 1
}
}
# rename the plots
names(plots) = var_names
# handle malformed max.per.page request
if (is.na(max.per.page)) {
max.per.page = length(plots)
} else if (class(max.per.page) != "numeric" || max.per.page < 1) {
message("Maximum plots per page invalid; switch to default")
max.per.page = length(plots)
}
# determine to plot the plots, or return a list of plots
if (plot.all) {
return(arrange.plots(plots, max.per.page, ncol))
}
else {
return (plots)
}
}
#
# arrange.plots arranges plot to pages according to max.per.page
#
arrange.plots <- function(plots, plots.per.page, ncol) {
# get total number of plots
len <- length(plots)
if (plots.per.page >= len) {
if (is.na(ncol)) {
nCol = get_ncol(len)
} else {
nCol = ncol
}
return (do.call("grid.arrange", c(plots, ncol = nCol)))
}
# get pages needed
pages <- ceiling(len/plots.per.page)
for (i in 1:pages) {
start = (i - 1) * (plots.per.page) + 1
end = min(i * plots.per.page, len)
if (is.na(ncol)) {
nCol = get_ncol(end-start)
} else {
nCol = ncol
}
do.call("grid.arrange", c(plots[start:end], ncol = nCol))
}
}
#
# get_resplot - returns a ggplot object of residuals in fitted.lm against var in lm_matrix
#
# input : var - variable name string the residual plot is about
# lm_matrix - model matrix of the fitted lm
# fitted.lm : fitted lm
# data : original dataset (optional)
#
# output : a ggplot object of var vs. residual of fitted lm
#
get_resplot <- function(var, lm_matrix, fitted.lm, scale.factor){
# to center residual plot around y = 0 line
res = residuals(fitted.lm)
limit = max(abs(res))
margin_factor = 5
margin = round(limit / margin_factor)
n_var_threshold = 4 # if more number of variables than threshold, tilt label to 45 degrees
# handle categorical and continuous variables
x = lm_matrix[, var]
# continuous variable: return scatterplot
if (is.numeric(x)) {
return (ggplot(data = fitted.lm, aes(x = lm_matrix[, var], y = fitted.lm$residuals)) +
labs(x = var, y = "residuals") +
ggtitle(paste("Residual vs.", var)) +
geom_point(size = scale.factor) +
geom_hline(yintercept = 0, linetype = "dashed", color = "indianred3", size = scale.factor) +
ylim(-(limit + margin), limit + margin))
}
# categorical variable: return boxplot
else {
base_plot = ggplot(data = data.frame(lm_matrix), aes(x = lm_matrix[, var], y = fitted.lm$residuals)) +
labs(x = var, y = "Residuals") +
ggtitle(paste("Residual vs.", var)) +
geom_boxplot(size = scale.factor)
if (nlevels(lm_matrix[, var]) > n_var_threshold) {
return (base_plot + theme(axis.text.x = element_text(angle = 45, hjust = 1)))
}
else {
return (base_plot)
}
return(base_plot)
}
}
|
3899b1b761a946dd4020d881194296a11a532ab7 | c49aa09f1f83ee8f8c9d1e716ae38381ed3fafca | /preprocessing/glmnettutorial.R | b212999ef38a61c2c23899b4bd465c57c5e6d409 | [] | no_license | whtbowers/multiomics | de879d61f15aa718a18dc866b1e5ef3848e27c42 | 81dcedf2c491107005d184f93cb6318865d00e65 | refs/heads/master | 2020-04-11T03:25:40.635266 | 2018-09-24T08:51:06 | 2018-09-24T08:51:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 69 | r | glmnettutorial.R | library(glmnet)
load("QuickStartExample.RData")
fit = glmnet(x, y)
|
6ad45469d2eef23f8f905b9d050ed0ea3ed5b81a | 325902f26f8df4914f3931d6a3a5c5af12b975b1 | /R scripts/Coregulation_score_vs_corr_part2.R | a2cca0d7aaf7c62e82647947fde1e4263381789e | [] | no_license | Rappsilber-Laboratory/ProteomeHD | 5e9ca415a0dac31ef46972eeff018547b9ee8aeb | 2ee6d87110b9d4932af0d106927eb289dfbce321 | refs/heads/master | 2020-04-28T22:22:08.913323 | 2019-11-05T12:24:45 | 2019-11-05T12:24:45 | 175,614,695 | 8 | 0 | null | 2019-03-14T12:10:33 | 2019-03-14T12:10:32 | null | UTF-8 | R | false | false | 5,453 | r | Coregulation_score_vs_corr_part2.R | ## A script to compare the distance/correlation metrics to treeClust using REACTOME test set and precision-recall curves
# Load the required packages
library(data.table); library(reshape2); library(ggplot2); library(ROCR); library(scales)
#### Prepare the data ####
# Read in the data
treeclust <- fread("treeClust_similarities.csv")
correlations <- fread("ProHD_correlations.csv")
# Prepare the tables for merging
treeclust <- treeclust[, .(Protein_1, Protein_2, treeClust_sim = tc_sim, treeClust_tom = tc_tom) ]
setkey(treeclust, Protein_1, Protein_2)
setkey(correlations, Protein_1, Protein_2)
# Merge into one table
DT <- merge(treeclust, correlations)
# Remove rows with missing values (only 19 rows)
DT <- DT[ complete.cases(DT) ]
# Remove isoform information (which can't be mapped to gold standard)
DT[, SimpleID_1 := gsub(";.+", "", Protein_1)][, SimpleID_1 := gsub("-.+", "", SimpleID_1)] # Simplify protein 1 IDs of DT
DT[, SimpleID_2 := gsub(";.+", "", Protein_2)][, SimpleID_2 := gsub("-.+", "", SimpleID_2)] # Simplify protein 2 IDs of DT
DT <- DT[ SimpleID_1 != SimpleID_2 ] # Remove self-interactions after ID simplification (isoform removal)
DT <- DT[ !duplicated( DT[,.(SimpleID_1, SimpleID_2)] ) ] # Remove duplicates after ID simplification (isoform removal)
DT[ SimpleID_1 > SimpleID_2 , .N] == DT[, .N] # Double checking that all pairs are in B <-> A order (alphabetically)
DT <- DT[, .(SimpleID_1, SimpleID_2, treeClust_sim, # Keep only relevant columns
treeClust_tom, PCC, RHO, BIC)]
setkey(DT, SimpleID_1, SimpleID_2) # Set keys for merging
#### Compare metrics using precision recall curves ####
# Load gold standard of true and false positives (based on Reactome)
TP_FP_pairs <- fread("Reactome_TP_FP.csv") # Note that these pairs are already sorted such that Protein_1 > Protein_2
names(TP_FP_pairs) <- c("SimpleID_1", "SimpleID_2", "Class") # But rename them to fit DT
setkey(TP_FP_pairs, SimpleID_1, SimpleID_2) # Set keys for merging
# Merge gold standard with data
DT <- merge(DT, TP_FP_pairs)
# Sample FPs down to get 5% TP
TP <- DT[ Class == "TP" ] # All true positives
FP <- DT[ Class == "FP" ] # All false positives
n_FP <- TP[, .N] * 19 # Number of false positives we need
FP <- FP[ sample( FP[,.N] , n_FP) ] # Restrict to random subset of false positives
DT <- rbindlist( list( TP, FP)) # Downsample DT
# Add a randomised classifier
DT[, Random := sample( treeClust_tom ) ]
# Get precision recall data using ROCR package
treeClust_sim <- DT$treeClust_sim
treeClust_tom <- DT$treeClust_tom
PCC <- DT$PCC
RHO <- DT$RHO
BIC <- DT$BIC
Random <- DT$Random
labels <- DT$Class
pred <- prediction( predictions = list(treeClust_sim, treeClust_tom, PCC, RHO, BIC, Random),
labels = list(labels, labels, labels, labels, labels, labels),
label.ordering = c("FP", "TP"))
perf <- performance(pred, measure = "prec", x.measure = "rec")
# Make the precision recall plot
treeClust_sim = data.table( Recall = perf@x.values[[1]],
Precision = perf@y.values[[1]],
Measure = "treeClust")
treeClust_tom = data.table( Recall = perf@x.values[[2]],
Precision = perf@y.values[[2]],
Measure = "treeClust + TOM")
PCC = data.table( Recall = perf@x.values[[3]],
Precision = perf@y.values[[3]],
Measure = "PCC")
RHO = data.table( Recall = perf@x.values[[4]],
Precision = perf@y.values[[4]],
Measure = "Rho")
BIC = data.table( Recall = perf@x.values[[5]],
Precision = perf@y.values[[5]],
Measure = "bicor")
Random = data.table( Recall = perf@x.values[[6]],
Precision = perf@y.values[[6]],
Measure = "random classifier")
pre_rec_dt <- rbindlist( list( treeClust_sim, treeClust_tom, PCC, RHO, BIC, Random) )
pre_rec_dt <- pre_rec_dt[ Recall > 0.005 ] # Drop low recall points because their pretty much random
pre_rec_dt <- pre_rec_dt[ sample( pre_rec_dt[,.N], 2000000 ) ] # Randomly downsample to speed up loading times in Inkscape
# Set plotting order
pre_rec_dt[, Measure := factor( Measure, levels = c("treeClust", "treeClust + TOM", "PCC", "Rho", "bicor", "random classifier")) ]
# Plot the result
p1 <- ggplot(pre_rec_dt, aes(x = Recall, y = Precision, colour = Measure))+
geom_line( size = 0.25 )+
scale_colour_manual(values = c("royalblue1", "navy", "lightseagreen", "violetred2", "mediumorchid", "grey50"))+
scale_x_continuous(limits=c(0,1), breaks=seq(0,1,0.2))+
scale_y_continuous(limits=c(0,1), breaks=seq(0,1,0.2))+
theme(panel.background = element_blank(), axis.text=element_text(size=5), axis.title=element_text(size=6),
axis.ticks = element_line(size=0.25), axis.line = element_line(colour="black", size=0.25),
legend.position = "none", plot.margin = rep(unit(0,"null"),4))
ggsave("TreeClust_vs_Cor.png", p1, width=4.6, height=4.6, units=c("cm"), dpi = 600)
|
247099761a0c3948f39821f55cc84b20ca1046fc | eba5aaf3ed11be632bbe0a0273bed118ab17b658 | /tab2-likert-plot.R | aa1452d44788781b9900fad572454298e3384ea6 | [
"Apache-2.0"
] | permissive | bcgov/career-survey | 9a848a81cd010c95aabd48108419b33657377124 | 57b629b3ec8026004f23d2e9611e691594298c3f | refs/heads/master | 2023-07-14T13:11:12.914218 | 2023-06-30T17:14:15 | 2023-06-30T17:14:15 | 78,044,633 | 1 | 0 | Apache-2.0 | 2021-05-06T22:31:31 | 2017-01-04T19:09:00 | R | UTF-8 | R | false | false | 1,841 | r | tab2-likert-plot.R | library(ggplot2)
library(dplyr)
data <- read.csv("data/BGS_SUCC_RESULTS_sample.csv") %>%
filter(MINISTRY_NAME == "Ministry of Scarlet") %>%
filter(WK_UNIT_NAME == "Frog Unit") %>%
filter(SRV_QU_GRP_CODE == 14) %>%
dplyr::select(QU_TXT, RESP_TXT, RESP_CODE, RESP_PERCENT, SCALE_AVG) %>%
mutate(RESP_PERCENT = RESP_PERCENT * 100) %>%
mutate(QU_TXT_UI =
unlist(lapply(
stringi::stri_wrap(QU_TXT, width = 50,simplify = FALSE),
paste, collapse = "\n"))) %>%
droplevels
data$RESP_TXT <- factor(
data$RESP_TXT,
levels = rev(unique(data$RESP_TXT[order(data$RESP_CODE)])))
ggplot(data,
aes(x = QU_TXT_UI, y = RESP_PERCENT,
fill = RESP_TXT, label = RESP_PERCENT)) +
geom_bar(stat = "identity", width = 0.8) +
geom_text(aes(label = ifelse(RESP_PERCENT == 0, "",
sprintf("%s%%", round(RESP_PERCENT)))),
color = "#11335b", fontface = "bold", size = 4,
position = position_stack(vjust = 0.5)) +
coord_flip() +
theme(
panel.background = element_rect(fill = "transparent"),
axis.ticks = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_text(colour = "#11335b"),
legend.text = element_text(colour = "#11335b", face = "bold"),
legend.position = "bottom",
legend.title = element_blank()
) +
xlab(NULL) + ylab(NULL) +
scale_fill_manual(values = c("#0081c1", "#4db447", "#ffb200"),
labels = paste0(levels(data$RESP_TXT), " ")) +
guides(fill = guide_legend(reverse = TRUE)) +
geom_text(data = data, y = 105,
aes(label = round(SCALE_AVG * 100)),
position = "identity", vjust = 0.5, hjust = -0.5) +
scale_y_continuous(limits = c(0, 105)) +
ggtitle("Score \n/100 ") +
theme(plot.title = element_text(hjust = 1))
|
ee9f658235a298f60835a50c9f529892092878a2 | 65d0d046269881862f229f0fd53de59650b128d2 | /cachematrix.R | f465ac63e775c650399b0f231f98db1217d91e78 | [] | no_license | keithburner/ProgrammingAssignment2 | 6e3d1c393ca71f77a736d18978e265023fb0358d | d3124b98459cc0546a9e87a3fc9802083a7e4487 | refs/heads/master | 2020-12-05T01:26:16.703021 | 2020-01-06T17:51:32 | 2020-01-06T17:51:32 | 231,966,548 | 0 | 0 | null | 2020-01-05T19:39:30 | 2020-01-05T19:39:29 | null | UTF-8 | R | false | false | 960 | r | cachematrix.R | ## Part of R Programming module Week 3, this is the project assignment to create a square
## matrix and its inverse. To then store this in cache and retrieve it from cache
## functions do
## Calculate the Inverse of a Square matrix
makeCacheMatrix <- function(x = matrix()) {
im <- NULL
set <- function(y) {
x <<- y
im <<- NULL
}
get <- function() x
setinv <- function(invmatrix) im <<- invmatrix
getinv <- function() im
list(set = set, get = get,
setinv = setinv,
getinv = getinv
)
}
## Use the cache to find the inverse of a Square Matrix already calculated, or calculate it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invmatrix <- x$getinv()
if(!is.null(invmatrix)) {
message("getting cached data")
return(invmatrix)
}
data <- x$get()
invmatrix <- solve(data, ...)
x$setinv(invmatrix)
invmatrix
}
|
f3abb52c90c54113c1d2b2dd4cc691f8ba8d95f9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/MConjoint/examples/mc.add.to.design.Rd.R | dac9abbccc975d9791ea7c6bbe4df1e03c82faeb | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 680 | r | mc.add.to.design.Rd.R | library(MConjoint)
### Name: mc.add.to.design
### Title: Add cards to a design
### Aliases: mc.add.to.design mc.add.to.design.fast
### Keywords: multivariate
### ** Examples
data(hire.candidates)
base.design = mc.get.one.design(hire.candidates, 9)
#use defaults, (except max.trials=10 for speed)
mc.add.to.design(hire.candidates,base.design, max.trials=10)
#add 4 cards, accepting cross corellations up to .35
#warning, this may take several minutes
#mc.add.to.design(hire.candidates,base.design,4,tol=.35)
# you can speed this up and in this case
# have almost as many good designs (53 vs.54)
#mc.add.to.design(hire.candidates,base.design,4,tol=.35,max.trials=10)
|
dff68d38849ccb23bfb196fb14960daa5ae8c29d | 32315c68e56f4c4ebb91527750b40d092ac2830b | /Travis/outliers.R | 333d72206af9296081faeb983199f96366bca7b3 | [] | no_license | Qingzz7/18S571project | 371f6312c0aa6efc1af32be06972ac033a22202c | 053265a0dc9260e676bc3517f47be340e05d0d6b | refs/heads/master | 2021-09-13T21:38:33.012399 | 2018-05-04T14:42:13 | 2018-05-04T14:42:13 | 119,405,951 | 1 | 1 | null | 2018-02-01T23:13:43 | 2018-01-29T16:05:54 | null | UTF-8 | R | false | false | 730 | r | outliers.R | install.packages("outliers")
library(outliers)
water_df <- read.csv("~/Desktop/completeMwrd.csv", header=TRUE, sep=",")
head(water_df)
water_df[,'Location'] <- as.factor(water_df[,'Location'])
sapply(water_df,class)
temp_water <- water_df[-1]
temp_water <- temp_water[-8]
head(temp_water)
new_water <- temp_water
# z-score test
set.seed(1234)
summary(scores(new_water, type = "z", prob = 0.95))
qnt <- quantile(new_water, probs=c(.25, .75), na.rm = T)
caps <- quantile(new_water, probs=c(.05, .95), na.rm = T)
H <- 1.5 * IQR(new_water, na.rm = T)
new_water[new_water < (qnt[1] - H)] <- caps[1]
new_water[new_water > (qnt[2] + H)] <- caps[2]
summary(temp_water)
summary(new_water)
boxplot(temp_water)
boxplot(new_water)
|
98a8e489f097ca2712135b574f18b5f36b846cd8 | b91c8580e2edf727e839e5febcec629dbcd34137 | /geoapp1/app.R | 412251740428312d3ec7c11d57e8782ee834fe1f | [
"MIT"
] | permissive | jarvisc1/Geostat2016 | 2ba96d17fc6b88c544fb22cd640e2e78acbe46ae | 72941c374b821bc4901eee08a10fcfec661e2cc8 | refs/heads/master | 2021-05-03T12:24:11.027285 | 2016-09-23T14:10:24 | 2016-09-23T14:10:24 | 68,620,717 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,524 | r | app.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(leaflet)
library(sp)
library(mapview)
library(raster)
library(deldir)
library(dismo)
library(ggplot2)
# Define UI for application that draws a histogram
ui = shinyUI(fluidPage(
fluidRow(
titlePanel(title = "GEOSTAT 2016 Game Entry: interactive visualisation understand spatial models"),
includeMarkdown("instructions.md"),
column(3,
"Controls",
tabsetPanel(
tabPanel("Raster data",
sliderInput("bins",
"Resolution (cell size, m):",
min = 100,
max = 5000,
step = 100,
value = 500),
selectInput("raster_layer",
"Raster layer:",
choices = c(
"Elevation (m)" = "DEMNED6_100m",
"Land cover map" = "LNDCOV6_100m",
"Parent materials" = "PMTGSS7_100m",
"MODIS EVI image (EX1MOD5)"= "EX1MOD5_100m",
"Precipitation (cm/yr)" = "Precip"
)),
selectInput("raster_pal", "Colourscheme:",
choices = c(
"terrain.colors",
"heat.colors",
"topo.colors",
"cm.colors",
"rainbow"
)
)
),
tabPanel("Point data",
selectInput("point_layer",
"Point visualisation method:",
choices = c(
"Circles" = "c",
"Voronoi polygons" = "v"
)
),
sliderInput(inputId = "circle_size", label = "Predicted values circle size:", min = 10, max = 1000, value = 200, step = 10),
conditionalPanel(condition = "input.point_layer == 'c'",
sliderInput(inputId = "pcircle_size", label = "Oberved values circle size:", min = 10, max = 1000, value = 200, step = 10)
)
)
),
hr(),
fluidRow(" ",
selectInput(inputId = "model", label = "Model selection:", choices = c("None", "Random", "Voronoi", "Altitude-dependent", "Random Forest"))
)
,
hr(),
fluidRow("Observed/predicted comparison (frequency of selected soil types)", plotOutput("barplot")
)
),
column(9,
"Interactive map",
leafletOutput("m", width ="100%", height = "800"),
actionButton("reset_button", "Reset view")
)
)
))
# Define server logic required to draw a histogram
server <- shinyServer(function(input, output) {
initial_lat = 0.2081755
initial_lon = 25.331
initial_zoom = 12
p = readRDS("training.Rds")
v = readRDS("v.Rds")
r = readRDS("raster-mini.Rds")
sel_precip = grep(pattern = "PR", x = names(r))
r$Precip = sum(r[[sel_precip]])
projections = c("+proj=aea +lat_1=20 +lat_2=-23 +lat_0=0 +lon_0=25 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs") # Africa_Albers_Equal_Area_Conic
aproj = projections
crs(r) = crs(p) = crs(v) = aproj
p = spTransform(p, CRS("+init=epsg:4326"))
v = spTransform(v, CRS("+init=epsg:4326"))
r = projectRaster(r, crs = "+init=epsg:4326")
vo = dismo::voronoi(p)
# m <- mapview(r) + mapview(p)
rre = reactive({
if(input$bins > 100){
r = aggregate(r, input$bins / 100)
}
r
})
vre = reactive({
v$TAXNUSDA = NA
if(input$model=="Voronoi"){
v$TAXNUSDA <- raster::extract(vo, v)$TAXNUSDA
}
if(input$model == "Random"){
v$TAXNUSDA <- sample(unique(p$TAXNUSDA), size = nrow(v), replace = T)
}
if(input$model == "Altitude-dependent"){
height = as.data.frame(extract(rre(), v))$DEMNED6_100m
height_training = as.data.frame(extract(rre(), p))$DEMNED6_100m
closest_height = sapply(height, function(x) which.min(abs(x - height_training)))
v$TAXNUSDA <- p$TAXNUSDA[closest_height]
# aggregate(height ~ v$TAXNUSDA, FUN = mean) # test model
# aggregate(height_training ~ p$TAXNUSDA, FUN = mean) # test model
}
if(input$model == "Random Forest"){
v$TAXNUSDA <- v$pred
}
v$TAXNUSDA
})
output$m = renderLeaflet({
v$TAXNUSDA = vre()
pal = colorFactor(palette = "RdYlBu", domain = unique(p$TAXNUSDA))
r_sub = rre()[[input$raster_layer]]
raster_pal = match.fun(input$raster_pal)
hide_v = ifelse(input$point_layer == "v", "nv", "v")
vorpopup <- paste0("Voronoi Soil Type: ", vo$TAXNUSDA)
ppopup <- paste0(p$TAXNUSDA)
vpopup <- ifelse(is.na(v$TAXNUSDA), paste0("Select Model for Prediction"),paste0("Predicted: ", v$TAXNUSDA))
leaflet() %>%
addRasterImage(r_sub, raster_pal(n = 10)) %>%
addCircles(data = p, color = ~pal(p$TAXNUSDA), radius = input$pcircle_size, opacity = 1, popup = ppopup) %>%
addPolygons(data = vo, fillColor = ~pal(vo$TAXNUSDA), fillOpacity = 1, group = "v", popup = vorpopup ) %>%
hideGroup(hide_v) %>%
addCircles(data = v, color = ~pal(v$TAXNUSDA), radius = input$circle_size, popup = vpopup) %>%
addLegend(pal = pal, values = p$TAXNUSDA, title = "Soil type") %>%
mapOptions(zoomToLimits = "first")
})
output$textout = renderPrint(summary(as.factor(vre())))
output$barplot = renderPlot({
to_keep = c(names(sort(table(p$TAXNUSDA), decreasing = T)[1:3]), NA)
bdf1 = data.frame(Model = rep("Training", nrow(p)), Soil_type = p$TAXNUSDA)
bdf2 = data.frame(Model = rep("Prediction", nrow(v)), Soil_type = vre())
bdf = rbind(bdf1, bdf2)
bdf = bdf[bdf$Soil_type %in% to_keep,]
bdf$`Soil type` = bdf$Soil_type
ggplot(bdf) + geom_bar(aes(`Soil type`, fill = Model), position = "dodge") +
ylab("Count")
})
observe({
input$reset_button
leafletProxy("m") %>% setView(lat = initial_lat, lng = initial_lon, zoom = initial_zoom)
})
})
# Run the application
shinyApp(ui, server)
|
3bdf5fbdd101d3a39b1587552bd271457a0e3912 | 390855ce2c0eca8659d5952250316ba9ec629915 | /R/lines.R | b30337799d680da8e9f4ce912a3e0e78098edf86 | [] | no_license | dtkaplan/detex | 3a5efa337fdd9f70fc46c3c9fe8e8253da55e8ff | 562ccc63f536b75ccc65f4dc352f4d7fdf43a243 | refs/heads/master | 2021-04-05T20:34:36.080413 | 2019-10-24T01:38:26 | 2019-10-24T01:38:26 | 64,130,241 | 1 | 1 | null | 2019-06-06T21:05:50 | 2016-07-25T11:50:52 | HTML | UTF-8 | R | false | false | 287 | r | lines.R | #' Eliminate within-paragraph line breaks
#' @export
simplify_paragraphs <- function(text) {
tmp <- gsub("\n{2,}", " special code ", text)
tmp <- gsub("\n", " ", tmp)
tmp <- gsub(" special code ", "\n\n", tmp)
# smart quotes
tmp <- gsub("(``[^`]\\{r|'')", "\"", tmp)
tmp
}
|
08cc87df68e9240137437583f467951ddfb849f3 | 29cc5f9fc270967167ddf9fbc3c5000f565eb9a2 | /man/rpy.Rd | 4b69bf86cd9f523ea10dd0d3e55cdffc5de0eb15 | [] | no_license | RkzYang/pyryp | f56ccc6b5ab7703632d199e473d314d8efdd2d4d | 388af548631030adff302daa5edc325032fada62 | refs/heads/master | 2020-04-28T08:13:32.053015 | 2019-03-22T07:07:38 | 2019-03-22T07:07:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 310 | rd | rpy.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rpy.R
\name{rpy}
\alias{rpy}
\title{Translate R Functions into Python Functions}
\usage{
rpy(R_code)
}
\arguments{
\item{R_code}{R Code String}
}
\description{
Translate R Functions into Python Functions
}
\examples{
rpy("getwd")
}
|
8bc6f6ec491d4e78c2098ea546a774b846304066 | 73281ddf16775e53401f161a98d9b92c9a363b2d | /work/10_figure4_jitter_performance.R | 069183a6ba8232d823a0ff6cf18b31914acd5a39 | [] | no_license | joshzyj/satellite-review-public | 5f9bf0f6d1fb4eeb4912408858501ba23bc9ea78 | 8213f7bc558d5977eacbe6cd4167ab3162e7fa2b | refs/heads/main | 2023-06-29T01:34:17.616422 | 2021-03-02T20:52:40 | 2021-03-02T20:52:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,438 | r | 10_figure4_jitter_performance.R | source("00_init.R")
library(RColorBrewer)
library(boot)
########################################################################################
# Written by: Anne, Oct 2019
# Last edited by: Anne, Nov 2020
# This file creates Figure 4 based on the noisy data created in the previous two files
########################################################################################
##########################################################################################
# read in data
##########################################################################################
setwd(data_path)
jitter_file = "jitter-impact/dhs_jitter_international_results_linearTRUE_histbins35_6foldsby4_jitters.csv"
sample_file = "jitter-impact/sample_model.csv"
response_file = "responsenoise_model.csv"
jitter = read.csv(jitter_file)
sample = read.csv(sample_file)
response = read.csv(response_file)
crop = read.csv("maps2016.dataforrep.csv")
##########################################################################################
# calculate mean jitter for each level we add
# since adding normally distributed distance of jitter doesn't add to existing jitter
##########################################################################################
jitter_calc = function(xy, distance, og=F) {
n = length(xy$x)
d = runif(n, 0, distance)
if(og) {
id = sample(1:length(d), 0.05*length(d))
d[id] = runif(length(id), -distance*2, distance*2)
}
angle = runif(n, 0, 2*pi)
dx = d * cos(angle)
dy = d * sin(angle)
xy = data.frame(x = xy$x+dx, y = xy$y+dy)
return(xy)
}
distance = function(xy) {
d = sqrt(xy$x^2 + xy$y^2)
return(d)
}
set.seed(1000)
jitter_means = 0:25
jitter_means = data.frame(jitter=jitter_means, mean=rep(NA, length(jitter_means)))
zero = data.frame(x=rep(0, 1000000), y=rep(0,1000000))
zero = jitter_calc(zero, 5)
jitter_means$mean[1] = mean(distance(zero))
for (i in 2:nrow(jitter_means)) { #if you add different amounts of jitter, what is mean
distance_jittered = jitter_means$jitter[i]
jitter_means$mean[i] = mean(distance(jitter_calc(zero, distance_jittered)))
}
#add 5 since the baseline we start with has 5km jitter
jitter_means$jitter = jitter_means$jitter + 5
#################################################################
# jitter figure
#################################################################
jitter = merge(jitter, jitter_means, by="jitter")[, 2:8]
names(jitter)[7] = "jitter"
jitter = tidyr::gather(jitter[,-5], type, r2, r2_train:r2_true, factor_key=TRUE)
jitter = jitter[!(jitter$type=="r2_test"&jitter$jitter==min(jitter$jitter)), ]
jitter$r2 = as.numeric(jitter$r2)
jitter = aggregate(jitter$r2, by=jitter[, c("jitter", "type")],
FUN = function(x) c(mean = mean(x, na.rm=T), max = max(x, na.rm=T),
min = min(x, na.rm=T), sd=sd(x, na.rm=T)))
jitter = do.call(data.frame, jitter)
jitter$type = as.character(jitter$type)
jitter$type[jitter$type == "r2_true"] = "true_test_cor"
myColors = c("#1FB24A", "#6F95CE")
names(myColors) = levels(jitter$type)
colScale = scale_colour_manual(name = "grp",values = myColors)
fillScale = scale_fill_manual(name = "grp",values = myColors)
#plot
jitter_plot = ggplot(jitter[jitter$jitter!=Inf & jitter$type != "r2_train",]) +
geom_ribbon(aes(x=jitter, ymin=x.mean-2*x.sd, ymax=x.mean+2*x.sd, fill=type), alpha=0.2) +
geom_line(aes(jitter, x.mean, color=type, group=type)) +
stat_smooth(method="lm", se=F, formula=y ~ poly(x, 3), colour="#1FB24A", linetype=2,
aes(jitter, x.mean), data=jitter[jitter$type=="r2_test", ], size=0.4,
fullrange=T) +
ylim(0.55, 0.9) + xlim(0, 12) + theme_anne("Times", size=18) +
xlab("average km of jitter") + ylab("r^2") +
colScale + fillScale
ggsave(paste0(git_path, "/figures/raw/Figure_4a.pdf"), jitter_plot,
"pdf", width=7.5, height=4, dpi=300)
#################################################################
# sample figure
#################################################################
myColors = c("#1FB24A", "#6F95CE")
names(myColors) = levels(sample$type)
colScale = scale_colour_manual(name = "grp",values = myColors)
fillScale = scale_fill_manual(name = "grp",values = myColors)
sample = sample[sample$type %in% sample("test_cor", "true_test_cor"),]
sample_plot = ggplot(sample) +
geom_ribbon(aes(x=n, ymin=x.mean-2*x.sd, ymax=x.mean+2*x.sd, fill=type), alpha=0.2) +
geom_line(aes(n, x.mean, color=type, group=type)) +
theme_anne("Times", size=18) + ylim(0.55, 0.9) + xlim(15, 1) +
xlab("n samples in village") + ylab("") +
colScale + fillScale
sample_plot
ggsave(paste0(git_path, "/figures/raw/Figure_4b.pdf"), sample_plot, "pdf",
width=7.5, height=4, dpi=300)
#################################################################
# noise from responses figure
#################################################################
myColors = c("#1FB24A", "#6F95CE")
names(myColors) = levels(response$type)
colScale = scale_colour_manual(name = "grp",values = myColors)
fillScale = scale_fill_manual(name = "grp",values = myColors)
response_plot = ggplot(response) +
geom_ribbon(aes(x=n, ymin=x.mean-2*x.sd, ymax=x.mean+2*x.sd, fill=type), alpha=0.2) +
geom_line(aes(n, x.mean, color=type, group=type)) +
theme_anne("Times", size=18) + xlab("added error = N(0, x)") + ylab("") +
colScale + fillScale + ylim(0.55, .9)
ggsave(paste0(git_path, "/figures/raw/Figure_4c.pdf"), response_plot, "pdf",
width=7.5, height=4, dpi=300)
#################################################################
# sat yield vs crop cut yield figure
#################################################################
ind = which(crop$plot_area_GPS >= .1 & (crop$purestand == 1)) #from David's code
model = lm(cc_yield ~ DOY_mean_151.6 + DOY_mean_171.6, data=crop[ind,])
crop$satpred = predict(model, crop)
g = panel(crop[ind, c("satpred", "cc_yield")], lm=F, a=0.5, size=16.5, font="Times") +
geom_abline(intercept=0, slope=1, color="darkgrey") +
xlab('Satellite yield (Mg/ha)') + ylab('Crop cut yield (Mg/ha)')
ggsave(paste0(git_path, "/figures/raw/Figure_4d.pdf"), g, "pdf",
width=5, height=5, dpi=300)
#################################################################
# sat yield vs crop cut yield figure
#################################################################
g = panel(crop[ind, c("satpred", "fullplot_yield")],
lm=F, a=0.5, size=16.5, font="Times") +
geom_abline(intercept=0, slope=1, color="darkgrey") +
xlab('Satellite yield (Mg/ha)') + ylab('Full plot yield (Mg/ha)')
ggsave(paste0(git_path, "/figures/raw/Figure_4e.pdf"), g, "pdf",
width=5, height=5, dpi=300)
#################################################################
# sat yield correlation figure
#################################################################
getcors = function(x, indices) cor(x[indices, ], use = 'p')[, 1]
ps_cors = list()
yieldvars = c('cc_yield', 'satpred')
xvars = c("used_inorganic", 'wave1_SQI')
for (i in yieldvars) {
use = which(is.finite(apply(crop[ind, c(i, xvars)], 1, sum)))
temp = boot(data = crop[ind[use], c(i, xvars)],
statistic = getcors,
R = 1000)
ps_cors[[i]] = t(apply(temp$t, 2, function(x)
c(mean(x, na.rm = T), sd(x, na.rm = T), quantile(x, c(0.05, 0.95), na.rm = T)))[, -1])
}
pdf(paste0(git_path, "/figures/raw/Figure_4f.pdf"))
par(mfrow=c(1,1), lwd=3, mar=c(5,4.5,1,1), cex.lab=1.5, cex.axis=1.5,
cex=1.5, family="Times")
mids = t(sapply(ps_cors,function(x) x[,1]))
lows = t(sapply(ps_cors,function(x) x[,3]))
his = t(sapply(ps_cors,function(x) x[,4]))
var.names=c('Used \nFertilizer','Soil Quality \nIndex')
a=barplot(mids,beside=T,plot=F)
nmod=nrow(mids)
cols=c(brewer.pal(3,'Set1'))[1:length(var.names)]
ylim=c(0,1)
ylab='Correlation'
plot(a, mids, col=cols, ylim=ylim, ylab=ylab, xlab='', axes=F, xlim=c(1,6), pch=19)
axis(2,las=1)
axis(1,at=(a[1,]+a[2,])/2,labels = var.names,cex.axis=1.0,tcl=0)
abline(h=0,col=gray(.7), lwd=.8)
for (i in 1:length(a)) arrows(a[i],lows[i],a[i],his[i],length=.1,angle=90,code=3,col=rep(cols,10)[i])
legend('topleft',
leg=c('Crop cut yields','Satellite yields'),
text.col=cols,bty='n', lwd=2, col=cols,
title='Correlation with inputs', title.col=1, title.adj=1.5)
dev.off() |
128b96555430a362c9dc13fb895e1a7ce6c57e4b | 96b0edcb9eb80a21443388eb83f405bd59c65201 | /man/bound.standard.Rd | 6843457d138a4f5332287ca70471abd6a9ad279f | [] | no_license | cran/NetworkAnalysis | 22fc36c297dae9ecf986a30ae212e31c8293cbcc | 21d0645da4b4d4363a8b3232c86313876891ffe7 | refs/heads/master | 2021-01-25T12:14:01.275318 | 2010-12-01T00:00:00 | 2010-12-01T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,051 | rd | bound.standard.Rd | \name{bound.standard}
\alias{bound.standard}
\title{Standardization of bounds when partially integrating topological metrics.}
\description{This is a small function for the standardization of the
integration bounds when computing the cost-integrated version
of the topological metrics of a given
weighted graph.}
\usage{
bound.standard(bounds, Ne)
}
\arguments{
\item{bounds}{A numeric vector of size 2, describing the lower and
upper bound of the integral of interest.}
\item{Ne}{Number of edges.}
}
\value{
\item{bounds}{Standardized bounds in terms of nubmer of edges.}
}
\author{
Cedric E Ginestet <c.ginestet05@gmail.com>
}
\references{
Ginestet, C.E. and Simmons, A. (2011). Statistical Parametric Network Analysis of Functional
Connectivity Dynamics during a Working Memory Task. Neuroimage, 5(2), 688--704.
}
\seealso{cost.int.metric, cutoff.int.metric}
\examples{
# Standardize bounds in terms of number of edges.
bound.standard(c(.1,.6),25)
}
\keyword{lower bound}
\keyword{upper bound}
\keyword{integration}
|
7dfec9234d8c69320716777df05408817406bc43 | 0ba927fc8d22b6aa5be1bf0c608fdd67f43b1349 | /tests/testthat/test-utils.R | bb9914444d53276f58347b50259acc4f114a9483 | [] | no_license | paleolimbot/rosm | 5bb829e2a3be98e8fc6e1e65c38333d37a4df64c | 28c484ebb875cb26dc6dd709c17df982d73f5e32 | refs/heads/master | 2023-08-08T00:37:49.839532 | 2023-07-23T01:03:55 | 2023-07-23T01:03:55 | 44,122,876 | 27 | 9 | null | 2023-01-27T20:55:31 | 2015-10-12T17:29:34 | R | UTF-8 | R | false | false | 90 | r | test-utils.R |
test_that("has_internet() works", {
skip_if_offline()
expect_true(has_internet())
})
|
337ba231befd9154cb93ffecd8fc5e8e7b3422dd | 2558f4d2a73895dd48666563f221bda22e6a9e9a | /plot2.R | 74d9e92fd2224406129ea516a36d9d7f8e3ffc77 | [] | no_license | ayushk007/ExData_Plotting1 | e9dbffa50ec467ffb96b0d4bea18750e737c9d3e | 5687feb684f8fbeb189c578e0583954ff73a74fb | refs/heads/master | 2020-03-10T23:57:53.963955 | 2018-04-26T10:58:14 | 2018-04-26T10:58:14 | 129,652,017 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 452 | r | plot2.R | dataset <- read.csv("household_power_consumption.txt",sep = ";")
dataset$Date <- as.Date(dataset$Date,"%d/%m/%Y")
data <- dataset[dataset$Date == "2007-02-01" | dataset$Date == "2007-02-02" ,]
data$Time <- strptime(paste(data$Date,data$Time,sep = " "),format = "%Y-%m-%d %H:%M:%S")
png('plot2.png',width = 480, height = 480)
plot(data$Time,as.numeric(data$Global_active_power),type = "l",xlab="", ylab="Global Active Power (kilowatts)")
dev.off() |
c69f9aa81eb97f755d06e5210c862fdb908bee3c | a7e390f9b4e25478163824297a3c939c3e052161 | /R/is.bathy.R | 99ac12b09adc99fa98ffc853496e8ba513289834 | [] | no_license | ericpante/marmap | 7adeceaf85ca55ea0cfe9fc18d929d5a83996c67 | 2cfae824728228e4acdf84cb0b4f09948258ffef | refs/heads/master | 2023-08-09T23:51:16.752079 | 2023-03-24T12:38:10 | 2023-03-24T12:38:10 | 18,529,447 | 29 | 8 | null | 2022-11-16T10:11:58 | 2014-04-07T19:03:45 | R | UTF-8 | R | false | false | 57 | r | is.bathy.R | is.bathy = function(xyz){
print(class(xyz) == "bathy")
} |
bd1299eb5a21f8226c0b34874fddfd130dee008f | ee5999bf0a13cbb822db408b7fbacad54365cead | /MarkovChainAttr.R | c25fabe9a35615b1392c94f4aff0d83534779b66 | [] | no_license | LeondraJames/MarkovChains_MultiTouchAttribution | 20ec757fb0fea7601219608d2dccc3806b38b1c1 | 749ee1f48afb95f73c7ea6bfd62db1d282b1eef2 | refs/heads/master | 2020-07-23T00:00:29.123396 | 2019-09-09T18:46:28 | 2019-09-09T18:46:28 | 207,376,880 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,511 | r | MarkovChainAttr.R | #Load libraries
library(tidyverse)
library(reshape2)
library(ChannelAttribution)
library(markovchain)
# Simulate marketing channel dataset
set.seed(42)
df <- data.frame(client_id = sample(c(1:1000), 5000, replace = TRUE),
date = sample(c(1:32), 5000, replace = TRUE),
channel = sample(c(0:9), 5000, replace = TRUE,
prob = c(0.1, 0.15, 0.05, 0.07, 0.11, 0.07, 0.13, 0.1, 0.06, 0.16)))
df$date <- as.Date(df$date, origin = "2015-01-01")
df$channel <- paste0('channel_', df$channel)
head(df)
# Aggregate channels to the paths for each customer
df <- df %>%
group_by(client_id) %>%
summarise(path = paste(channel, collapse = ' > '),
# assume that all paths were finished with conversion
conv = 1,
conv_null = 0) %>%
ungroup()
# Calculating the model - Markov chains Attribution
markov <- markov_model(df,
var_path = 'path',
var_conv = 'conv',
var_null = 'conv_null',
out_more = TRUE)
#Show results, transition matrix and removal_effects
head(markov$result)
tail(markov$transition_matrix)
markov$removal_effects
# Calculating heuristic models - First, Last & Linear Touch Attribution
heur <- heuristic_models(df,
var_path = 'path',
var_conv = 'conv',
var_value = NULL,
sep = ">")
#Create heuristic models dataframe, df2
channel_name <- heur$channel_name
first_touch<- heur$first_touch
last_touch <- heur$last_touch
linear_touch <- round(heur$linear_touch,0)
df2 <- cbind(channel_name, first_touch, last_touch, linear_touch)
#Add markov chain and heuristic results to "results" df
markov_result <- markov$result %>%
separate(channel_name, c(NA,'channel_name'), sep = '_') %>%
rename(markov_results = total_conversions) %>%
mutate(markov_results = round(markov_results, 0))
results <- merge(df2, markov_result, all.x = TRUE)
#Plot results
results2 <- gather(results, 'type', 'attr_count',2:5)
results2$channel_name <- as.factor(results2$channel_name)
ggplot(results2, aes(channel_name, attr_count, fill = type))+
geom_bar(stat = 'identity', position= position_dodge())+
theme_minimal()+
theme(legend.position="bottom")+
scale_y_continuous(breaks = seq(25,200,25))+
xlab('Channel Name')+
ylab('Conversions')+
ggtitle('Multi-Touch Attribution Results')
#Heatmap of transition probabilities
trans_matrix <- markov$transition_matrix
ggplot(trans_matrix, aes(y = channel_from, x = channel_to, fill = transition_probability)) +
theme_minimal() +
geom_tile(colour = "white", width = .9, height = .9) +
geom_text(aes(label = round(transition_probability, 2)), fontface = "bold", size = 4) +
theme(legend.position = 'bottom',
legend.direction = "horizontal",
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.title = element_text(size = 20, face = "bold", vjust = 2, color = 'black', lineheight = 0.8),
axis.title.x = element_text(size = 24, face = "bold"),
axis.title.y = element_text(size = 24, face = "bold"),
axis.text.y = element_text(size = 8, face = "bold", color = 'black'),
axis.text.x = element_text(size = 8, angle = 90, hjust = 0.5, vjust = 0.5, face = "plain")) +
ggtitle("Transition Matrix Heatmap")
|
5ce1afbf99344a71e763ea72edc2275004b16d4e | ab6dd9cd7fe7417f121905d96d71e76c9986c57d | /man/simFossilTaxa.Rd | 1ebf49fada61fc74979be2bd377c64884ea22cd6 | [
"CC0-1.0"
] | permissive | pnovack-gottshall/paleotree | d51482c6d0edd6ae2e4cca7ef2c934d9f87067eb | e0dad1431f6cbed87d565c4eb651a85ffe6f4678 | refs/heads/master | 2020-12-03T02:06:37.852589 | 2015-10-21T05:34:09 | 2015-10-21T05:34:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,115 | rd | simFossilTaxa.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/simFossilTaxa.R
\name{simFossilTaxa}
\alias{simFossilTaxa}
\title{Simulating Taxa in the Fossil Record}
\usage{
simFossilTaxa(p, q, anag.rate = 0, prop.bifurc = 0, prop.cryptic = 0,
nruns = 1, mintaxa = 1, maxtaxa = 1000, mintime = 1, maxtime = 1000,
minExtant = 0, maxExtant = NULL, min.cond = TRUE,
count.cryptic = FALSE, print.runs = FALSE, sortNames = FALSE,
plot = FALSE)
}
\arguments{
\item{p}{Instantaneous rate of origination/branching per lineage-time units.}
\item{q}{Instantaneous rate of extinction per lineage-time units.}
\item{anag.rate}{Instantaneous rate of anagenesis (i.e. pseudospeciation/pseudoextinction).}
\item{prop.bifurc}{Proportion of morphological branching by bifurcating
cladogenesis relative to budding cladogenesis.}
\item{prop.cryptic}{Proportion of branching events with no morphological
differentiation (i.e. cryptic speciation) relative to branching events
associated with morphological differentiation (budding and/or bifurcating
cladogenesis).}
\item{nruns}{Number of datasets to accept, save and output.}
\item{mintaxa}{Minimum number of total taxa over the entire history of a
clade necessary for a dataset to be accepted.}
\item{maxtaxa}{Maximum number of total taxa over the entire history of a
clade necessary for a dataset to be accepted.}
\item{mintime}{Minimum time units to run any given simulation before
stopping.}
\item{maxtime}{Maximum time units to run any given simulation before
stopping.}
\item{minExtant}{Minimum number of living taxa allowed at end of
simulations.}
\item{maxExtant}{Maximum number of living taxa allowed at end of
simulations.}
\item{min.cond}{If TRUE, the default, simulations are stopped when they meet
all minimum conditions. If FALSE, simulations will continue until they hit
maximum conditions, but are only accepted as long as they still meet all
minimum conditions in addition.}
\item{count.cryptic}{If TRUE, cryptic taxa are counted as separate taxa for
conditioning limits, such as maxtaxa or maxExtant. If FALSE, then each cryptic
complex (i.e. each distinguishable morphotaxon) is treated as a single taxon}
\item{print.runs}{If TRUE, prints the proportion of simulations accepted for
output to the terminal.}
\item{sortNames}{If TRUE, output taxonomic matrices are sorted by the taxon
names (rownames; thus sorting cryptic taxa together) rather than by taxon id
(which is the order they were simulated in).}
\item{plot}{If TRUE, plots the diversity curves of accepted simulations.}
}
\value{
This function gives back a list containing nruns number of taxa
datasets, where each element is a matrix. If nruns=1, the output is not a
list but just a single matrix. Sampling has not been simulated in the output
for either function; the output represents the 'true' history of the
simulated clade.
For each dataset, the output is a six column per-taxon matrix where all
entries are numbers, with the first column being the taxon ID, the second
being the ancestral taxon ID (the first taxon is NA for ancestor), the third
column is the first appearance date of a species in absolute time, the
fourth column is the last appearance data and the fifth column records
whether a species is still extant at the time the simulation terminated (a
value of 1 indicates a taxon is still alive, a value of 0 indicates the
taxon is extinct). The sixth column (named "looks.like") gives information
about the morphological distinguishability of taxa; if they match the taxon
ID, they are not cryptic. If they do not match, then this column identifies
which taxon id they would be identified as.
Each matrix of simulated data also has rownames, generally of the form "t1"
and "t2", where the number is the taxon id. Cryptic taxa are instead named
in the form of "t1.2" and "t5.3", where the first number is the taxon which
they are a cryptic descendant of (i.e. column 6 of the matrix,
"looks.like"). The second number, after the period, is the rank order of
taxa in that cryptic group of taxa. Taxa which are the common ancestor of a
cryptic lineage are also given a unique naming convention, of the form
"t1.1" and "t5.1", where the first number is the taxon id and the second
number communicates that this is the first species in a cryptic lineage.
As with many functions in the paleotree library, absolute time is always
decreasing, i.e. the present day is zero.
}
\description{
Functions for simulating taxon ranges and relationships under various models
of evolution
}
\details{
simFossilTaxa simulates a birth-death process (Kendall, 1948; Nee, 2006),
but unlike most functions for this implemented in R, this function enmeshes
the simulation of speciation and extinction with explicit models of how
lineages are morphologically differentiated, as morphotaxa are the basic
units of paleontological estimates of diversity and phylogenetics.
Any particular use of simFossilTaxa will probably involve iteratively
running many simulations of diversification. Simulation runs are only
accepted for output if and when they meet the conditioning criteria defined
in the arguments, both minima and maxima. If min.cond is true (the default),
simulations will be stopped and accepted when clades satisfy mintime,
mintaxa, minExtant and maxExtant (if the later is set). To reduce the effect
of one conditioning criterion, simply set that limit to an arbitrarily low
or high number (depending if a minimum or maximum constraint is involved).
If min.cond is false, simulation runs are not stopped and evaluated for
output acceptance until they (a) go completely extinct or (b) hit either
maxtaxa or maxtime. Whether the simulation runs are accepted or not for
output is still dependent on mintaxa, mintime, minExtant and maxExtant. Note
that some combinations of conditions, such as attempting to condition on a
specific non-zero value of minExtant and maxExtant, may take a long time to
find any acceptable simulation runs.
This function continues the simulation once mintaxa or minExtant is
hit, until the next taxon (limit +1) originates. Once the simulation
terminates, it is judged whether it is acceptable for all conditions given
and if so, the run is accepted as a dataset for output.
Please note that mintaxa and maxtaxa refer to the number of static
morphotaxa birthed over the entire evolutionary history of the simulated
clade, not the extant richness at the end of the simulation. Use minExtant
and maxExtant if you want to condition on the number of taxa living at some
time.
The simFossilTaxa function can effectively simulate clades evolving under
any combination of the three "modes" of speciation generally referred to by
paleontologists: budding cladogenesis, branching cladogenesis and anagenesis
(Foote, 1996). The first two are "speciation" in the typical sense used by
biologists, with the major distinction between these two modes being whether
the ancestral taxon shifts morphologically at the time of speciation. The
third is where a morphotaxon changes into another morphotaxon with no
branching, hence the use of the terms "pseudoextinction" and
"pseudospeciation". As bifurcation and budding are both branching events,
both are controlled by the p, the instantaneous rate, while the probability
of a branching event being either is set by u. By default, only budding
cladogenesis occurs To have these three modes occur in equal proportions,
set p to be twice the value of w and set u to 0.5.
This function also includes the ability to simulate cryptic cladogenesis.
The available patterns of morphological speciation thus form a gradient:
cryptic cladogenesis has no morphological shifts in either daughter branches
after a branching event, budding cladogenesis has one morphological shift in
the two daughter lineages and, in bifurcating cladogenesis, shifts occur in
both daughter lineages. The argument prop.cryptic dictates what proportion
of branching/cladogenesis events (the overall occurance of which with rate
p) are cryptic versus those that have some morphological divergence (either
budding of bifurcating. prop.bifurc controls the proportion of
morphologically divergent cladogenesis which is bifurcating relative to
budding. Thus, for example, the probability of a given cladogenesis event
being budding is (1-prop.cryptic)*prop.bifurc.
When there is cryptic speciation, by default, the conditioning arguments
involving numbers of taxa (mintaxa, maxtaxa, minExtant and maxExtant) count
the number of unique morphologically distinguishable taxa is checked (i.e.
the number of unique values in column 6 of the simulated data). This
behavior can be changed with the argument count.cryptic.See below about the
output data structure to see how information about cryptic cladogenesis is
recorded. The functions taxa2phylo, taxa2cladogram and taxicDivCont each
handle cryptic species in different ways, as described in their respective
help files.
If maxExtant is 0, then the function will be limited to only accepting
simulations that end in total clade extinction before maxtime.
If conditions are such that a clade survives to maxtime, then maxtime will
become the time of first appearance for the first taxa. Unless maxtime is
very low, however, it is more likely the maxtaxa limit will be reached
first, in which case the point in time at which maxtaxa is reached will
become the present data and the entire length of the simulation will be the
time of the first appearance of the first taxon.
simFossilTaxa simulates single taxa until they go extinct or exceed maxtime.
This means the function may have fully simulated some lineages for thousands
of time-steps while others are not yet simulated, and thus sometimes
overshoot constraints on the number of taxa. This function will
automatically discard any runs where the number of taxa exceeds 2 x maxtaxa
to avoid blowing up computation time. This is likely to happen under a
pure-birth scenario; I suggest using low maxtime settings if doing a
pure-birth simulation.
simFossilTaxa_SRCond is a wrapper for simFossilTaxa for when clades of a
particular size are desired, post-sampling. For more details, see the help
file at \code{\link{simFossilTaxa_SRCond}}.
More details on this function's design can be read here:
http://nemagraptus.blogspot.com/2012/04/simulating-fossil-record.html
}
\examples{
set.seed(444)
taxa <- simFossilTaxa(p=0.1,q=0.1,nruns=1,mintaxa=20,maxtaxa=30,maxtime=1000,maxExtant=0)
#let's see what the 'true' diversity curve looks like in this case
#plot the FADs and LADs with taxicDivCont
taxicDivCont(taxa[,3:4])
#can also see this by setting plot=TRUE in simFossilTaxa
#make datasets with multiple speciation modes
#following has anagenesis, budding cladogenesis and bifurcating cladogenesis
#all set to 1/2 extinction rate
set.seed(444)
res <- simFossilTaxa(p=0.1,q=0.1,anag.rate=0.05,prop.bifurc=0.5,mintaxa=30,maxtaxa=60,
maxExtant=0,nruns=1,plot=TRUE)
#what does this mix of speciation modes look like as a phylogeny?
tree <- taxa2phylo(res,plot=TRUE)
\donttest{
#some other options with cryptic speciation
taxaCrypt1 <- simFossilTaxa(p=0.1,q=0.1,anag.rate=0,prop.bifurc=0,prop.crypt=0.5,mintaxa=30,
maxtaxa=60,maxExtant=0,nruns=1,plot=TRUE)
tree1 <- taxa2phylo(taxaCrypt1,plot=TRUE)
taxaCrypt2 <- simFossilTaxa(p=0.1,q=0.1,anag.rate=0.05,prop.bifurc=0.5,prop.crypt=0.5,
mintaxa=30,maxtaxa=60,maxExtant=0,nruns=1,plot=TRUE)
tree2 <- taxa2phylo(taxaCrypt2,plot=TRUE)
taxaCrypt3 <- simFossilTaxa(p=0.1,q=0.1,anag.rate=0.05,prop.bifurc=0,prop.crypt=1,
mintaxa=30,maxtaxa=60,maxExtant=0,nruns=1,plot=TRUE)
tree3 <- taxa2phylo(taxaCrypt2,plot=TRUE)
}
set.seed(444)
#can choose to condition on total morphologically-distinguishable taxa
#or total taxa including cryptic taxa with count.cryptic=FALSE
taxa<-simFossilTaxa(0.1,0.1,prop.cryptic=1,anag.rate=0.05,mintaxa=20,
count.cryptic=FALSE,plot=TRUE)
nrow(taxa) #number of lineages (inc. cryptic)
length(unique(taxa[,6])) #number of morph-distinguishable taxa
#now with count.cryptic=TRUE
taxa <- simFossilTaxa(0.1,0.1,prop.cryptic=1,anag.rate=0.05,mintaxa=20,
count.cryptic=TRUE,plot=TRUE)
nrow(taxa) #number of lineages (inc. cryptic)
length(unique(taxa[,6])) #number of morph-distinguishable taxa
#now let's look at extant numbers of taxa
#can choose to condition on total morphologically-distinguishable living taxa
#or total living taxa including cryptic taxa with count.cryptic=FALSE
taxa <- simFossilTaxa(0.1,0.1,prop.cryptic=1,anag.rate=0.05,minExtant=20,
count.cryptic=FALSE,plot=TRUE)
sum(taxa[,5]) #number of still-living lineages (inc. cryptic)
length(unique(taxa[taxa[,5]==1,6])) #number of still-living morph-dist. taxa
#now with count.cryptic=TRUE
taxa <- simFossilTaxa(0.1,0.1,prop.cryptic=1,anag.rate=0.05,minExtant=20,
count.cryptic=TRUE,plot=TRUE)
sum(taxa[,5]) #number of still-living lineages (inc. cryptic)
length(unique(taxa[taxa[,5]==1,6])) #number of still-living morph-dist, taxa
#can generate datasets that meet multiple conditions: time, # total taxa, # extant taxa
set.seed(444)
res <- simFossilTaxa(p=0.1,q=0.1,mintime=10,mintaxa=30,maxtaxa=40,minExtant=10,maxExtant=20,
nruns=20,plot=FALSE,print.runs=TRUE)
#use print.run to know how many simulations were accepted of the total generated
layout(1:2)
#histogram of # taxa over evolutionary history
hist(sapply(res,nrow),main="#taxa")
#histogram of # extant taxa at end of simulation
hist(sapply(res,function(x) sum(x[,5])),main="#extant")
\donttest{
#pure-birth example
#note that conditioning is tricky
layout(1)
taxa <- simFossilTaxa(p=0.1,q=0,mintime=10,mintaxa=100,maxtime=100,maxtaxa=100,
nruns=10,plot=TRUE)
#can generate datasets where simulations go until extinction or max limits
#and THEN are evaluated whether they meet min limits
#good for producing unconditioned birth-death trees
set.seed(444)
res <- simFossilTaxa(p=0.1,q=0.1,maxtaxa=100,maxtime=100,nruns=10,plot=TRUE,
print.runs=TRUE,min.cond=FALSE)
#hey, look, we accepted everything! (That's what we want.)
layout(1:2)
#histogram of # taxa over evolutionary history
hist(sapply(res,nrow),main="#taxa")
#histogram of # extant taxa at end of simulation
hist(sapply(res,function(x) sum(x[,5])),main="#extant")
}
layout(1)
}
\author{
David W. Bapst
}
\references{
Foote, M. 1996 On the Probability of Ancestors in the Fossil
Record. \emph{Paleobiology} \bold{22}(2):141--151.
Kendall, D. G. 1948 On the Generalized "Birth-and-Death" Process. \emph{The
Annals of Mathematical Statistics} \bold{19}(1):1--15.
Nee, S. 2006 Birth-Death Models in Macroevolution. \emph{Annual Review of
Ecology, Evolution, and Systematics} \bold{37}(1):1--17.
Solow, A. R., and W. Smith. 1997 On Fossil Preservation and the
Stratigraphic Ranges of Taxa. \emph{Paleobiology} \bold{23}(3):271--277.
}
\seealso{
\code{\link{simFossilTaxa_SRCond}}, \code{\link{sampleRanges}},
\code{\link{simPaleoTrees}}, \code{\link{taxa2phylo}},
\code{\link{taxa2cladogram}}
}
|
bcdbdfeb64074af53a445f70b0be1b02bb5914e2 | 3178860d26781702ca94412669f49c45c2a26275 | /man/mlgvarDat.Rd | 476e84d1546573cb69c8de032ddf260e389a01e5 | [] | no_license | cran/modnets | 08f1fea424212bcf851972de068c62434fb196f3 | 81983f2b895c53602ccb44c989d6692594439568 | refs/heads/master | 2023-08-10T18:19:43.907567 | 2021-10-01T07:20:02 | 2021-10-01T07:20:02 | 412,522,956 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 741 | rd | mlgvarDat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mlgvarDat}
\alias{mlgvarDat}
\title{Simulated multi-level network data}
\format{
A \code{50000 x 7} data frame with five variables to serve as nodes in
the networks, one variable \code{"M"} to serve as the time-lagged
moderator, and an \code{ID} variable that labels responses from each of
\code{100} simulated individuals. For each ID number, there are 500
time-ordered responses.
}
\usage{
mlgvarDat
}
\description{
Data generated from \code{\link{mlGVARsim}}, with five variables that serve
as nodes in the multi-level GVAR model, one moderator variable, and an ID
variable that distinguishes between subjects.
}
\keyword{datasets}
|
45a426d9ab3b055e8c267c7442c4b98c0a396b07 | 66e9ef4bfb22efa5d0cb517d771fb0bcdcc67c5a | /man/forward.Rd | 4798bc33067902ee4bc3b3cd99fcea7299a7ab74 | [] | no_license | cran/bigstep | 14f69982ad35a8dd85453026a2679fc410d1a6ad | 2b1acb00b0616f34abf255a8be82aebcb91306cd | refs/heads/master | 2023-05-29T05:06:11.989083 | 2023-05-13T22:12:03 | 2023-05-13T22:12:03 | 72,123,165 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 935 | rd | forward.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forward.R
\name{forward}
\alias{forward}
\title{Forward step}
\usage{
forward(data, crit = mbic, ...)
}
\arguments{
\item{data}{an object of class \code{big}.}
\item{crit}{a function defining the model selection criterion. You can use
your own function or one of these: \code{bic}, \code{mbic}, \code{mbic2},
\code{aic}, \code{maic}, \code{maic2}.}
\item{...}{optional arguments to \code{crit}.}
}
\value{
An object of class \code{big}.
}
\description{
Add the best variable to a model according to the given criterion.
}
\details{
Type \code{browseVignettes("bigstep")} for more details.
}
\examples{
set.seed(1)
n <- 30
p <- 10
X <- matrix(rnorm(n * p), ncol = p)
y <- X[, 2] + 2*X[, 3] - X[, 6] + rnorm(n)
d <- prepare_data(y, X)
forward(d, crit = bic)
d \%>\%
forward() \%>\%
forward() \%>\%
forward()
}
|
13263fe40738c15c3db70e6608f895647d32963a | c51cf8d45bc122a44b5f170ae2c912092fd61561 | /R/MZILN.R | 46bb601c81056eaa30e13511eef70f63b8d2e3f1 | [] | no_license | lovestat/IFAA-R | f77b5d038cc807b1cb2558281556f5a8bb7661b6 | bf1f1f5ad96917e476ce7e797b2325c813fbfd75 | refs/heads/master | 2023-06-30T16:47:01.019421 | 2021-08-03T16:20:16 | 2021-08-03T16:20:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,612 | r | MZILN.R | ##' Conditional regression for microbiome analysis based on multivariate zero-inflated logistic normal model
##'
##' Make inference on the associations of microbiome with covariates given a user-specified reference taxon/OTU/ASV.
##' \loadmathjax
##'
##' The regression model for `MZILN()` can be expressed as follows:
##' \mjdeqn{\log\bigg(\frac{\mathcal{Y}_i^k}{\mathcal{Y}_i^{K+1}}\bigg)|\mathcal{Y}_i^k>0,\mathcal{Y}_i^{K+1}>0=\alpha^{0k}+\mathcal{X}_i^T\alpha^k+\epsilon_i^k,\hspace{0.2cm}k=1,...,K}{}
##' where
##' - \mjeqn{\mathcal{Y}_i^k}{} is the AA of taxa \mjeqn{k}{} in subject \mjeqn{i}{} in the entire
##' ecosystem.
##' - \mjeqn{\mathcal{Y}_i^{K+1}}{} is the reference taxon (specified by user).
##' - \mjeqn{\mathcal{X}_i}{} is the covariate matrix for all covariates including confounders.
##' - \mjeqn{\alpha^k}{} is the regression coefficients along with their 95% confidence intervals that will be estimated by the `MZILN()` function.
##'
##' High-dimensional \mjeqn{X_i}{} is handled by regularization.
##'
##' @param MicrobData Microbiome data matrix containing microbiome abundance with each row
##' per sample and each column per taxon/OTU/ASV. It should contain an `"id"` variable to
##' correspond to the `"id"` variable in the covariates data: `CovData`. This argument can
##' take directory path. For example, `MicrobData="C://...//microbiomeData.tsv"`.
##' @param CovData Covariates data matrix containing covariates and confounders with each row
##' per sample and each column per variable. It should also contain an `"id"` variable to
##' correspond to the `"id"` variable in the microbiome data: `MicrobData`. This argument can
##' take directory path. For example, `CovData="C://...//covariatesData.tsv"`.
##'
##'
##' @param linkIDname Variable name of the `"id"` variable in both `MicrobData` and `CovData`. The two data sets will be merged by this `"id"` variable.
##' @param allCov All covariates of interest (including confounders) for estimating and testing their associations with microbiome. Default is 'NULL' meaning that all covariates in covData are of interest.
##' @param refTaxa Reference taxa specified by the user and will be used as the reference taxa.
##' @param reguMethod regularization approach used in phase 1 of the algorithm. Default is `"mcp"`. Other methods are under development.
##' @param sequentialRun This takes a logical value `TRUE` or `FALSE`. Default is `TRUE` since there is only 1 reference taxon.
##' @param paraJobs If `sequentialRun` is `FALSE`, this specifies the number of parallel jobs that will be registered to run the algorithm. If specified as `NULL`, it will automatically detect the cores to decide the number of parallel jobs. Default is `NULL`. It is safe to have 4gb memory per job. It may be needed to reduce the number of jobs if memory is limited.
##' @param standardize This takes a logical value `TRUE` or `FALSE`. If `TRUE`, all design matrix X in phase 1 and phase 2 will be standardized in the analyses. Default is `FALSE`.
##' @param bootB Number of bootstrap samples for obtaining confidence interval of estimates in phase 2. The default is `500`.
##' @param bootLassoAlpha The significance level in phase 2. Default is `0.05`.
##' @param seed Random seed for reproducibility. Default is `1`.
##' @return A list containing the estimation results.
##'
##' - `analysisResults$estByRefTaxaList`: A list containing estimating results for all reference taxa and all the variables in 'allCov'. See details.
##'
##' - `covariatesData`: A dataset containing all covariates used in the analyses.
##'
##' @examples
##' data(dataM)
##' dim(dataM)
##' dataM[1:5, 1:8]
##' data(dataC)
##' dim(dataC)
##' dataC[1:5, ]
##' \donttest{
##' results <- MZILN(MicrobData = dataM,
##' CovData = dataC,
##' linkIDname = "id",
##' allCov=c("v1","v2","v3"),
##' refTaxa=c("rawCount11"),
##' paraJobs=2)
##'
##' }
##'
##'
##'
##' @references Li et al.(2018) Conditional Regression Based on a Multivariate Zero-Inflated Logistic-Normal Model for Microbiome Relative Abundance Data. Statistics in Biosciences 10(3): 587-608
##' @references Zhang CH (2010) Nearly unbiased variable selection under minimax concave penalty. Annals of Statistics. 38(2):894-942.
##' @export
##' @md
MZILN=function(
MicrobData,
CovData,
linkIDname,
allCov=NULL,
refTaxa,
reguMethod=c("mcp"),
paraJobs=NULL,
bootB=500,
bootLassoAlpha=0.05,
standardize=FALSE,
sequentialRun=TRUE,
seed=1
){
allFunc=allUserFunc()
results=list()
start.time = proc.time()[3]
MZILN=TRUE
runMeta=metaData(MicrobData=MicrobData,CovData=CovData,
linkIDname=linkIDname,testCov=allCov,MZILN=MZILN)
data=runMeta$data
results$covariatesData=runMeta$covariatesData
binaryInd=runMeta$binaryInd
covsPrefix=runMeta$covsPrefix
Mprefix=runMeta$Mprefix
testCovInd=runMeta$testCovInd
testCovInOrder=runMeta$testCovInOrder
testCovInNewNam=runMeta$testCovInNewNam
ctrlCov=runMeta$ctrlCov
microbName=runMeta$microbName
results$covriateNames=runMeta$xNames
rm(runMeta)
nRef=length(refTaxa)
if(length(refTaxa)>0){
if(sum(refTaxa%in%microbName)!=length(refTaxa)){
stop("Error: One or more of the specified reference taxa have no sequencing reads
or are not in the data set. Double check the names of the reference taxa and their
sparsity levels.")
}
}
results$analysisResults=Regulariz_MZILN(data=data,testCovInd=testCovInd,
testCovInOrder=testCovInOrder,microbName=microbName,
binaryInd=binaryInd,covsPrefix=covsPrefix,Mprefix=Mprefix,
refTaxa=refTaxa,paraJobs=paraJobs,reguMethod=reguMethod,
bootB=bootB,bootLassoAlpha=bootLassoAlpha,
standardize=standardize,sequentialRun=sequentialRun,
allFunc=allFunc,seed=seed
)
rm(data)
results$testCov=testCovInOrder
results$ctrlCov=ctrlCov
results$microbName=microbName
results$bootB=bootB
results$bootLassoAlpha=bootLassoAlpha
results$paraJobs=paraJobs
results$nRef=nRef
if(length(seed)==1){
results$seed=seed
}else{
results$seed="No seed used."
}
rm(testCovInOrder,ctrlCov,microbName)
totalTimeMins = (proc.time()[3] - start.time)/60
message("The entire analysis took ",round(totalTimeMins,2), " minutes")
results$totalTimeMins=totalTimeMins
return(results)
}
|
1feeed38c4c5a14b3b2e28bc6fb3d250bcf3e058 | 9b87d4483086f61aef2bf45315fe898a5fc42dd6 | /01-getting-help.R | c002f5c5c9718cfefea976b95cd960e74189427f | [] | no_license | ashander/2017-05-18-sac-water-science-R | b9a4eda5eb14ee3b78c9e07e5dd874edb724c9c3 | 7644f79bb6d40065e128f7926d07d18fe09597a1 | refs/heads/master | 2021-01-21T16:45:30.746895 | 2017-05-20T17:31:23 | 2017-05-20T17:31:23 | 91,905,422 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 631 | r | 01-getting-help.R | # getting help
# ? works when you know the funciton name
# ??
# help(package='ggplot2') for all functions in a package
# seeing a vignette -- long form help
vignette(package='ggplot2') #gives names of vignettes
vignette('extending-ggplot2') # look at the actual vignette
# what if r help doesn't help?
mtcars # my data
dput(mtcars) # puts out incomprehenisble R code
# structure(list(mpg = c(21, 21, 22.8, 21.4, 18.7, 18.1, 14.3,
# 4.4, 22.8, 19.
# that you can copy-paste into a text input box or provide as an attachment
# what tools and versions am I using?
sessionInfo() # copy paste into your email or online help forum
|
a4bd6ec3c577065735452675758d1a10f705651a | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/13508_0/rinput.R | 311c9a0b85ce9dda05332bcb8e284cee9f9c1721 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("13508_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="13508_0_unrooted.txt") |
6d96aa1da7e82255750e5a1e7b8fc47e237ee2cd | 845f441d966dc7f4bfb292005f08f164a18cf2e1 | /files/02-Basic.R | fa6a7da70b63c900d938ab2f3be3d504c3da8692 | [] | no_license | Natasia09/INT214-Statistics | 400099d36e04c58b11bf3694892dbc29dcc60c94 | 49a5e5b4c375d9153aeba37e3931e284aed2984f | refs/heads/main | 2023-09-03T05:35:06.117512 | 2021-11-01T06:27:47 | 2021-11-01T06:27:47 | 423,363,371 | 1 | 0 | null | 2021-11-01T06:43:40 | 2021-11-01T06:43:39 | null | UTF-8 | R | false | false | 1,201 | r | 02-Basic.R | # create variable
x <- 1
y <- 2
x+y
print(x + y)
z <- x+y
z
# Numeric ex 1, 1.0
varA <- 1.0
class(varA)
?class
help(class)
# Logical: TRUE, FALSE
varB <- 1!=2
varB
class(varB)
class(1==2)
# Character
varC <- "Hello, My name is Safe"
class(varC)
# Character Vectors
animals <- c("Ant","Bird","Cat")
class(animals)
length(animals)
animals[2]
# Create Variable name
v1 <- c(1,2,3,4)
v2 <- c(2,3,0,0)
v1+v2
# Replicate
v3 <- rep(c(1,2,3),5)
# Creating integer sequences
v4 <- c(1:100)
# WHAT IF ?
v1+v4
v5 <- c(1,2,3)
v6 <- c(1,2,0)
v5+v6
# Initial
name <- c("Antony","Boky","Caty")
age <-c(10,25,30)
club <-c("Sec A","Sec B","Sec A")
retired <- c(T,F,T)
# Create list
myList <- list(name,age,club,retired)
myList
# Or assign name
myList <- list(stdName = name,
stdAge = age,
stdClub = club,
retired = retired)
myList[2] #get variable stdAge
View(myList)
myList <- data.frame(name,age,club,retired)
continent <- c("Africa","Asia","Europe","North America","Oceania","South America","Antarctica")
countries <- c(54,48,51,23,14,12,0)
world <- data.frame(continent,countries)
View(world)
x <- c(1:10)
x
?mean
mean(x)
sum(x)
# Summaries
summary(x)
|
70051997f6e73c87cf4089038894b1f28ca4e753 | 68aba67d31a3d5fea5a83bd8acc8b445cb6f4598 | /Rphylip/man/setupOSX.Rd | a8b3196fdd4d03e53aa1e6d3fa1e13ece8a5e3e7 | [] | no_license | liamrevell/Rphylip | 482d20c6ef82bf92a98a8d73d94269afea2ff210 | eee8c6eff145aee4030fb6eb9279e204ddcab5de | refs/heads/master | 2021-06-03T23:38:52.636359 | 2020-05-07T04:20:32 | 2020-05-07T04:20:32 | 14,834,438 | 7 | 12 | null | 2019-01-26T20:12:29 | 2013-12-01T06:07:10 | R | UTF-8 | R | false | false | 1,017 | rd | setupOSX.Rd | \name{setupOSX}
\alias{setupOSX}
\title{Help set up PHYLIP in Mac OS X}
\usage{
setupOSX(path=NULL)
}
\arguments{
\item{path}{path to the folder containing the PHYLIP package. If \code{path = NULL}, the R will search several commonly used directories.}
}
\details{
This function can be used to help set up PHYLIP (\url{http://evolution.genetics.washington.edu/phylip.html}) following the special instructions found here: \url{http://evolution.genetics.washington.edu/phylip/install.html}. \code{setupOSX} should only be run once - when PHYLIP is first installed.
}
\description{
This function attempts to help set up PHYLIP on a Mac OS X machine.
}
\references{
Felsenstein, J. (2013) PHYLIP (Phylogeny Inference Package) version 3.695. Distributed by the author. Department of Genome Sciences, University of Washington, Seattle.
}
\author{
Liam J. Revell, Scott A. Chamberlain
Maintainer: Liam J. Revell <liam.revell@umb.edu>
}
\examples{
\dontrun{
setupOSX()
}
}
\keyword{phylogenetics}
\keyword{utilities}
|
cd5b1a19a7c7ae3b0f18c615ef5b516b8f6f80af | bd208ac20a73be1fc72acf6aaf3c95b1f9e06e4e | /plot2.R | 45e9d6ebbcfd87bcd74bdbdfb602d58b8038f208 | [] | no_license | MirekBXL/ExData_Plotting1 | 7f8499ec0fc799189a172213c8cd930690a66aed | 048a1facc627ce1df416e3a9295cae80b58f896e | refs/heads/master | 2021-05-07T01:42:50.831572 | 2017-11-12T00:52:49 | 2017-11-12T00:52:49 | 110,384,226 | 0 | 0 | null | 2017-11-11T22:07:06 | 2017-11-11T22:07:05 | null | UTF-8 | R | false | false | 773 | r | plot2.R | #read data
work <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
#convert column Date in date data type
work$Date <- as.Date(work$Date, format = "%d/%m/%Y")
#subset data only for 01-02/2007
d <- subset(work, subset = (Date >= "2007-02-01" & Date < "2007-02-03"))
#convert to numeric
d$Global_active_power <- as.numeric(d$Global_active_power)
d$DateTime <- strptime(paste(d$Date, d$Time, sep = " "), "%Y-%m-%d %H:%M:%S")
d$DateTime <- as.POSIXct(d$DateTime)
#remove the original data frame from the memory
rm("work")
#create the plot and save it
png("plot2.png", width = 480, height = 480, units = "px")
with(d, plot(Global_active_power ~ DateTime, type="l", xlab = "", ylab = "Global Active Power (kilowatts)"))
dev.off() |
bd77789186e06dc3a58ad7df3d97458638cc3106 | 7aa1bc3dedd865bf833294c63e578d72392f7cc8 | /Reappearing Words ('kRecentMonths')/kRecentMonths.R | 35d2195307d9a60fad0f4547b4214b5b8602f7b8 | [] | no_license | SophiaLC/KSSP_Data_Quality | 8724a1544b285c4aceeb510f829aadc1d5957499 | cd9ec42d5167a49449b457d2f6858e02faeb144c | refs/heads/master | 2020-03-22T09:51:36.959338 | 2018-09-13T19:30:54 | 2018-09-13T19:30:54 | 139,865,192 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 404 | r | kRecentMonths.R | kRecentMonths <- function(k, dat) {
if (k + 3 > ncol(dat) | k < 1) {
cat("The k you selected is not able to be calculated with the current data.")
} else {
rows <- which(
rowSums(dat[,c(2:(ncol(dat)-k-1))]) > 0 &
rowSums(dat[,c((ncol(dat)-k):(ncol(dat)-1))]) == 0 &
rowSums(dat[,ncol(dat)]) > 0
)
dat <- dat[rows,]
return(dat[order(-dat[,ncol(dat)]),])
}
} |
51753255ed306c41b0faf379acc1ab0bea9bd0f1 | 95fed8697a76ab3013784a0b9c53716b6f40715c | /man/multiNet.Rd | 0bc35565e4d6dec3482f2572c8bb47368f1a0bc1 | [] | no_license | cran/spaceNet | f8fd6612e8cc2aa46b0b622b34a460074358cb64 | ff9525f44e62a331ae1737a85a8c7d3a59eb48f6 | refs/heads/master | 2021-04-15T12:18:46.744040 | 2019-05-19T21:30:03 | 2019-05-19T21:30:03 | 126,347,452 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,180 | rd | multiNet.Rd | \name{multiNet}
\alias{multiNet}
\alias{print.multiNet}
\title{Latent Space Models for Multivariate Networks}
\description{
Implements latent space models for multivariate networks (multiplex) via MCMC algorithm.
}
\usage{
multiNet(Y, niter = 1000, D = 2,
muA = 0, tauA = NULL, nuA = 3,
muB = 0, tauB = NULL, nuB = 3,
muL = 0, tauL = NULL, nuL = 3,
alphaRef = NULL,
sender = c("const", "var"),
receiver = c("const", "var"),
covariates = NULL,
DIC = FALSE, WAIC = FALSE,
burnIn = round(niter*0.3),
trace = TRUE,
allChains = FALSE,
refSpace = NULL)
}
\arguments{
\item{Y}{A three-dimensional array or list of \eqn{(n\times n)}{n x n} adjacency matrices composing the multidimensional network. A list will be converted to an array. If an array, the dimension of \code{Y} must be \code{(n,n,K)}, where \code{n} is the number of nodes and \code{K} the number of networks. Missing values (\code{NA}) are allowed; see details.}
\item{niter}{The number of MCMC iterations. The default value is \code{niter = 1000}.}
\item{D}{The dimension of the latent space, with \code{D > 1}. The default value is \code{D = 2}.}
\item{muA, muB, muL}{Mean hyperparameters, see details.}
\item{tauA, tauB, tauL}{Mean hyperparameters, see details.}
\item{nuA, nuB, nuL}{Variance hyperparameters, see details.}
\item{alphaRef}{The value for the intercept in the first network (the reference network). This value can be specified by the user on the basis of prior knowledge. By default is computed using the function \code{\link{alphaRef}}, see details.}
\item{sender, receiver}{The type of node-specific sender and receiver effects to be included in the model. If specified, these effects can be set to constant (\code{"const"}) or/and variable (\code{"var"}). By default, node-specific effects are not included in the model (\code{NULL}).}
\item{covariates}{An array or a list with edge-covariates matrices. A list is automatically converted to an array. Covariates can be either continuous or discrete and must be constant throughout the views of the multiplex. The dimension of \code{covariates} is \code{(n,n,L)}, where \code{n} is the number of nodes and \code{L} the number of covariates, that is, the number of covariates matrices. Missing values (\code{NA}) are not allowed.}
\item{DIC}{A logical value indicating wether the DIC (Deviance Information Criterion) should be computed. The default is \code{DIC = FALSE}.}
\item{WAIC}{A logical value indicating wether the WAIC (Widely Available Information Criterion) should be computed. The default is \code{WAIC = FALSE}.}
\item{burnIn}{A numerical value, the number of iterations of the chain to be discarded when computing the posterior estimates. The default value is \code{burnIn = round(niter*0.3)}.}
\item{trace}{A logical value indicating if a progress bar should be printed.}
\item{allChains}{A logical value indicating if the full parameter chains should also be returned in output. The default value is \code{allChains = FALSE}.}
\item{refSpace}{Optional. A matrix containing a set of reference values for the latent coordinates of the nodes. Its dimension must be \code{(n, D)}, where \code{n} is the number of nodes and \code{D} the number of dimensions of the latent space. The coordinates stored in the matrix \code{refSpace} are compared with the estimated ones at each iteration via Procrustes correlation. High values of the correlation index indicate that the estimated coordinates are a translation and/or a rotation of the coordinates in \code{refSpace}.}
}
\value{
An object of class \code{'multiNet'} containing the following components:
\item{n}{The number of nodes in the multidimensional network.}
\item{K}{The number of networks in the multidimensional network.}
\item{D}{The number of dimensions of the estimated latent space.}
\item{parameters}{A list with the following components:
\itemize{
\item{\code{alpha}}{ is a list with two components: the means of the posterior distributions and the standard deviations of the posterior distributions for the intercept parameters; }
\item{\code{beta}}{ is a list with two components: the means of the posterior distributions and the standard deviations of the posterior distributions for the latent space coefficient parameters;}
\item{\code{theta}}{ is a list with two components: the means of the posterior distributions and the standard deviations of the posterior distributions for the sender effect parameters;}
\item{\code{gamma}}{ is a list with two components: the means of the posterior distributions and the standard deviations of the posterior distributions for the receiver effect parameters;}
\item{\code{lambda}}{ is a list with two components: the means of the posterior distributions and the standard deviations of the posterior distributions for the covariate coefficient parameters.}
}
}
\item{latPos}{A list with posterior estimates of means and standard deviations of the latent coordinates.}
\item{accRates}{A list with the following components:
\itemize{
\item{\code{alpha}}{ is a vector with the acceptance rates for the intercept parameters;}
\item{\code{beta}}{ is a vector with the acceptance rates for the latent space coefficient parameters;}
\item{\code{theta}}{ is a matrix with the acceptance rates for the sender effect parameters;}
\item{\code{gamma}}{ is a matrix with the acceptance rates for the receiver effect parameters;}
\item{\code{lambda}}{ is a vector with the acceptance rates for the covariate coefficient parameters;}
\item{\code{latPos}}{ is a vector with the acceptance rates for the latent coordinates of the nodes.}
}
}
\item{DIC}{The Deviance Information Criterion of the estimated model. Computed only if \code{DIC = TRUE} in input.}
\item{WAIC}{The Widely Available Information Criterion of the estimated model. Computed only if \code{WAIC = TRUE} in input.}
\item{allChains}{If \code{allChains = TRUE}, a list with the following components is returned:
\itemize{
\item{\code{parameters}}{ is a list with the estimated posterior distributions of the model parameters: \eqn{\alpha}{\alpha}, \eqn{\beta}{\beta}, \eqn{\theta}{\theta}, \eqn{\gamma}{\gamma} and \eqn{\lambda}{\lambda};}
\item{\code{latPos}}{ is an array with the posterior distributions of the latent coordinates of each node;}
\item{\code{priorParameters}}{ is a list with the estimated posterior distributions of the parameters of the prior distributions of \eqn{\alpha}{\alpha}, \eqn{\beta}{\beta} and \eqn{\lambda}{\lambda}. }
}
}
\item{corrRefSpace}{A numerical vector containing the values of the Procrustes correlation between the reference space and the estimated one, computed at each mcmc iteration. Only outputed when \code{refSpace} is given, otherwise \code{NULL}.}
\item{info}{A list with some information on the estimated model:
\itemize{
\item{\code{call}}{ contains the function call;}
\item{\code{niter}}{ is the number of MCMC iterations;}
\item{\code{burnIn}}{ is the number of initial iterations to discarded when computing the estimates;}
\item{\code{sender}}{ is the node-specific sender effect type;}
\item{\code{receiver}}{ is the node-specific receiver effect type;}
\item{\code{covariates}}{ is the covariates array, if present;}
\item{\code{L}}{ is the number of covariates.}
}
}
}
\details{
The function estimates a latent space model for multidimensional networks (multiplex) via MCMC. The model assumes that the probability of observing an arc between any two nodes is inversely related to their distance in a low-dimensional latent space. Hence, nodes close in the latent space have a higher probability of being connected across the views of the multiplex than nodes far apart.
The model allows the inclusion of node-specific sender and receiver effects and edge-specific covariates.
The probability of an edge beteween nodes \eqn{i}{i} and \eqn{j}{j} in the \eqn{k^{th}}{k_{th}} network is defined as:
\deqn{
P \Bigl( y_{ij}^{(k)} = 1 | \Omega^{(k)} , d_{ij}, \lambda \Bigr)=
\frac{ C_{ij}^{(k)} }{1 + C_{ij}^{(k)} }.}{
P ( y_{ijk} = 1 | \Omega_k , d_{ij} , \lambda ) = C_{ijk} \ ( 1 + C_{ijk} ).}
with \eqn{C_{ij}^{(k)} = \exp \{\alpha^{(k)}-\beta^{(k)} d_{ij} -\lambda x_{ij} \} }{C_{ijk} = exp( \alpha_k - \beta_k * d_{ij} - \lambda * x_{ij} )} when node-specific effects are not present and \eqn{C_{ij}^{(k)} = \exp \{\alpha^{(k)} \phi_{ij}^{(k)} -\beta^{(k)} d_{ij} -\lambda x_{ij} \} }{C_{ijk} = exp( \alpha_k \phi_{ijk} - \beta_k * d_{ij} - \lambda * x_{ij} )} when they are included in the model.
The arguments of \eqn{C_{ij}^{(k)}}{C_{ijk}} are:
\itemize{
\item{}{The squared Euclidean distance between nodes \eqn{i}{i} and \eqn{j}{j} in the latent space, \eqn{d_{ij}}{d_{ij}.}}
\item{}{A coefficient \eqn{\lambda}{\lambda} to scale the edge-specific covariate \eqn{x_{ij}}{x_{ij}}. If more than one covariate is introduced in the model, their sum is considered, with each covariate being rescaled by a specific coefficient \eqn{\lambda_l}{\lambda_l}. Edge-specific covariates are assumed to be inversely related to edge probabilities, hence \eqn{\lambda \geq 0}{ \lambda => 0 }.}
\item{}{A vector of network-specific parameters, \eqn{\Omega^{(k)} = (\alpha^{(k)},\beta^{(k)}) }{\Omega_k = ( \alpha_k, \beta_k )}. These parameters are:
\itemize{
\item{}{A rescaling coefficient \eqn{\beta^{(k)} }{\beta_k}, which weights the importance of the latent space in the \eqn{k^{th}}{k_{th}} network, with \eqn{\beta^{(k)} \geq 0 }{\beta_k => 0}. In the first network (that is the reference network), the coefficient is fixed to \eqn{\beta^{(1)} = 1}{\beta_1 = 1} for identifiability reasons. }
\item{}{An intercept parameter \eqn{\alpha^{(k)} }{\alpha_k}, which corresponds to the largest edge probability allowed in the \eqn{k^{th}}{k_{th}} network. Indeed, when \eqn{\beta^{(k)} = 0 }{\beta_k = 0} and when no covariate is included, the probability of having a link between a couple of nodes is that of the random graph:
\deqn{
P \Bigl( y_{ij}^{(k)} = 1 | \alpha^{(k)} \Bigr)=
\frac{ \exp \{ \alpha^{(k)}\} }{1 + \exp \{\alpha^{(k)}\} }.}{
P ( y_{ijk} = 1 | \alpha_k ) = exp( \alpha_k ) \ ( 1 + exp( \alpha_k ) ).}
}
The intercepts have a lower bound corresponding to \eqn{\log \Bigl( \frac{\log (n)}{ n - \log(n)} \Bigr) }{ log ( log( n ) \ ( n - log( n ) ) )}. For identifiability reasons, the intercept of the first network needs to be fixed. Its value can be either specified by the user on the basis of prior knowledge or computed with the function \code{\link{alphaRef}}.
}
}
\item{}{When node-specific effects are included in the model,
\deqn{\phi_{ij}^{(k)} = g (\theta_{i}^{(k)} + \gamma_{j}^{(k)} )}{\phi_{ijk} = g*(\theta_{ik} + \gamma_{jk})}
with :
\itemize{
\item{}{ \eqn{\theta_{i}^{(k)}}{\theta_{ik}} the sender effect of node \eqn{i}{i} in network \eqn{k}{k}. }
\item{}{ \eqn{\gamma_{j}^{(k)}}{\gamma_{k}} the receiver effect of node \eqn{j}{j} in network \eqn{k}{k}.}
\item{}{ \eqn{g}{g} a scalar. When both sender and receiver effects are present, \eqn{g=0.5}{g=0.5}; when only one type of effect is included in the model, \eqn{g=1}{g=1}.}
}
When the sender and/or receiver effects are set to constant (\code{"const"}), each node \eqn{i}{i} is assumed to have a constant effect across the different networks: \eqn{\theta_{i}^{(k)} = \theta_{i}}{\theta_{ik} = \theta_{i}} and/or \eqn{\gamma_{i}^{(k)} = \gamma_{i}}{\gamma_{ik} = \gamma_{i}}. Instead, when they are set to variable (\code{"var"}), each node has a different effect across the networks: \eqn{\theta_{i}^{(k)}}{\theta_{ik}} and/or \eqn{\gamma_{i}^{(k)}}{\gamma_{ik}}.
}
}
Inference on the model parameters is carried out via a MCMC algorithm. A hierarchical framework is adopted for estimation, where the parameters of the distributions of \eqn{\alpha}{\alpha}, \eqn{\beta}{\beta} and \eqn{\lambda}{\lambda} are considered nuisance parameters and assumed to follow hyper-prior distributions. The parameters of these hyperpriors need to be fixed and are the following:
\itemize{
\item{}{\code{tauA, tauB} and \code{tauL} are the scale factors for the variances of the hyperprior distributions for the mean parameters of \eqn{\alpha^{(k)}, \beta^{(k)}}{\alpha_k , \beta_k} and \eqn{\lambda_l}{\lambda_l}. If not specified by the user, \code{tauA} and \code{tauB} are computed as \eqn{(K-1)\ K }{( K - 1 ) \ K }, if \eqn{K > 1}{K > 1}, otherwise they are set to \eqn{0.5}{0.5}. Parameter \code{tauL} is calculated as \eqn{(L-1)\ K }{( L - 1 ) \ K }, if \eqn{L > 1}{L > 1}, otherwise it is set to \eqn{0.5}{0.5}. }
\item{}{\code{muA, muB} and \code{muL} are the means of the hyperprior distributions for the mean parameters of \eqn{\alpha^{(k)}, \beta^{(k)}}{\alpha_k , \beta_k} and \eqn{\lambda_l}{\lambda_l}. If not specified by the user, they are all set to \eqn{0}{0}. }
\item{}{\code{nuA, nuB} and \code{nuL} are the degrees of freedom of the hyperprior distributions for the variance parameters of \eqn{\alpha^{(k)}, \beta^{(k)}}{\alpha_k , \beta_k} and \eqn{\lambda_l}{\lambda_l}. If not specified by the user, they are all set to \eqn{3}{3}. }
}
Missing data are considered structural and correspond to edges missing because one or more nodes are not observable in some of the networks of the multiplex. No imputation is performed, instead, the term corresponding to the missing edge is discarded in the computation of the likelihood function. For example, if either node \eqn{i}{i} or \eqn{j}{j} is not observable in network \eqn{k}{k}, the edge \eqn{(i,j)}{(i,j)} is missing and the likelihood function for network \eqn{k}{k} is calculated discarding the corresponding \eqn{(i,j)}{(i,j)} term. Notice that the model assumes a single common generative latent space for the whole multidimensional network. Thus, discarding the \eqn{(i,j)}{(i,j)} term in the \eqn{k^{th}}{k_{th}} network does not prevent from recovering the coordinates of nodes \eqn{i}{i} and \eqn{j}{j} in the latent space.
}
%\author{Silvia D'Angelo, Michael Fop}
\examples{
data(vickers)
it <- 10 # small number of iterations just for example
# 2-dimensional latent space model, no covariates
mod <- multiNet(vickers, niter = it, D = 2)
# 2-dimensional latent space model, sex as covariate
mod <- multiNet(vickers, niter = it, D = 2,
covariates = sex)
# 2-dimensional latent space model, with constant sender
# effect and variable receiver effect
mod <- multiNet(vickers, niter = it, D = 2,
sender = "const", receiver = "var")
}
\references{
D'Angelo, S. and Murphy, T. B. and Alfò, M. (2018). Latent space modeling of multidimensional networks with application to the exchange of votes in the Eurovision
Song Contest. \href{https://arxiv.org/abs/1803.07166}{arXiv}.
D'Angelo, S. and Alfò, M. and Murphy, T. B. (2018). Node-specific effects in latent space modelling of multidimensional networks. \href{https://arxiv.org/abs/1807.03874}{arXiv}.
}
\seealso{
\code{\link{alphaRef}}
}
|
87c4b8783ab1af42d411a11d0b057c56b6f148f1 | 49ff0bc7c07087584b907d08e68d398e7293d910 | /INLA-CAR/admin1/run_INLA.R | 3be6a33b5b09101ed725b4f25186b7a297f06174 | [] | no_license | The-Oxford-GBD-group/typhi_paratyphi_modelling_code | db7963836c9ce9cec3ca8da3a4645c4203bf1352 | 4219ee6b1fb122c9706078e03dd1831f24bdaa04 | refs/heads/master | 2023-07-30T07:05:28.802523 | 2021-09-27T12:11:17 | 2021-09-27T12:11:17 | 297,317,048 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,961 | r | run_INLA.R | ##############################
#### set working directory####
##############################
rm(list = ls())
setwd("/ihme/homes/annieb6/AMR/typhi_paratyphi/CAR_INLA/admin1")
model_id <- '1b_sSA_raw_covs_country_re_int3'
dir.create(model_id)
#######################
#### Load libraries####
#######################
library(data.table)
library(plyr)
library(INLA)
library(ggplot2)
library(foreign)
library(boot)
#
# library(spdep)
# library(sf)
#
# library(spData)
#
# library(CARBayesST)
# library(maptools)
#
# library(raster)
#
# library(sf)
# library(ggplot2)
# library(viridis)
RMSE = function(m, o){
sqrt(mean((m - o)^2))
}
#~~~~~~~~~~~~~~~~~~~~~~~~#
# Read in data & covs ####
#~~~~~~~~~~~~~~~~~~~~~~~~#
mydata <- fread('MDR_Typhi.csv')
# covs <- read.csv('sSA_child_model_preds.csv')
covs <- read.csv('all_admin1_typhi_covs.csv')
colnames(covs)[colnames(covs) == 'year_id'] <- 'year'
#restrict to sSA
mydata <- mydata[!is.na(mydata$adj_id_sSA),]
covs <- covs[!is.na(covs$adj_id_sSA),]
#add a weights column
mydata$w <- 1
mydata$n <- round(mydata$n, 0)
# covs_to_include <- c('xgboost', 'gam', 'ridge')
covs_to_include <- c('crutstmp',
'nexndvi',
'distriverslakes',
'intest_typhoid',
'physicians_pc',
'hospital_beds_per1000',
'anc4_coverage_prop',
'sanitation_prop'
)
covs <- covs[colnames(covs) %in% covs_to_include | colnames(covs)=='adj_id_sSA' | colnames(covs) =='year'| colnames(covs) == 'COUNTRY_ID']
covs[colnames(covs) %in% covs_to_include] <- data.frame(scale(covs[colnames(covs) %in% covs_to_include]))
# covs$year_scaled <- scale(covs$year)
covs <- data.table(covs)
covs <- na.omit(covs)
covs$iso3 <- as.character(covs$COUNTRY_ID)
covs$COUNTRY_ID <- NULL
mydata <- merge(mydata, covs, by = c('adj_id_sSA', 'year', 'iso3'))
mydata <- na.omit(mydata, c('n', 'd', 'p', names(covs)))
#rescale year
mydata$year <- mydata$year-1989
covs$year <- covs$year-1989
#bind the covs onto the mydata as a prediction dataset
mydata <- rbind.fill(mydata, covs)
#Introduce a columns space and time
mydata <- cbind(mydata, reg0=mydata$adj_id_sSA, reg1=mydata$adj_id_sSA, time0=mydata$year, time1=mydata$year, time2=mydata$year, cntry = as.numeric(as.factor(mydata$iso3)), id = as.numeric(as.factor(mydata$source_id)))
head(mydata)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Fit ST CAR model using INLA ####
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
formula <- as.formula(paste0('n ~ -1 + ', paste(covs_to_include, collapse = " + "), ' +
f(reg0, model="bym", graph="typhi_adj_sSA.adj")+
f(time0,model="rw1")+
f(cntry,model = "iid")+
f(id,model = "iid")+
f(reg1, model="iid", group = time1, control.group=list(model="rw2"))'))
# f(reg1,time2,model="iid")'))
# f(reg1, model="bym", graph="typhi_adj_sSA.adj", group = time1, control.group=list(model="rw2"))'))
# f(time1, model="iid", group =reg1,control.group=list(model="besag", graph = "typhi_adj_sSA.adj"))'))
typhi.INLA <- inla(formula, family = "binomial", Ntrials = d, data = mydata,
control.compute = list(cpo = T, dic = T, config = T), control.inla = list(strategy = "laplace"),
control.predictor = list(link=1),
control.family=list(link="logit"),
verbose = TRUE)
summary(typhi.INLA)
saveRDS(typhi.INLA, paste0(model_id, '/full_model.rds'))
# typhi.INLA <- readRDS(paste0(model_id, '/full_model.rds'))
#~~~~~~~~~~~~~~~~~~~~~~~~#
# Extract predictions ####
#~~~~~~~~~~~~~~~~~~~~~~~~#
#get insample preds
start <- length(mydata$p[!is.na(mydata$p)])+1
end <- length(mydata$p)
predictedmean<-typhi.INLA$summary.fitted.values$mean[start:end]
predictedsd<-typhi.INLA$summary.fitted.values$sd[start:end]
predictedci97.5<-typhi.INLA$summary.fitted.values$`0.975quant`[start:end]
predictedci2.5<-typhi.INLA$summary.fitted.values$`0.025quant`[start:end]
typhi.df<- data.frame(adj_id_sSA = mydata$adj_id_sSA[is.na(mydata$p)],
year = mydata$year[is.na(mydata$p)]+1989,
predictedmean, predictedsd,predictedci2.5, predictedci97.5)
locs <- fread('all_admin1_typhi_covs.csv')
locs <- unique(locs[,.(COUNTRY_ID, adj_id_sSA)])
typhi.df <- merge(typhi.df, locs, by = 'adj_id_sSA')
write.csv(typhi.df, paste0(model_id, "/typhi_fitted_final.csv"),row.names=F)
#calculate metrics
results <- mydata[!is.na(mydata$p),]
results <- results[,1:10]
results$year <- results$year+1989
results <- merge(results, typhi.df, by = c('adj_id_sSA', 'year'),all.x = T, all.y = T)
coverage <- results$p[!is.na(results$p)]>results$predictedci2.5[!is.na(results$p)] & results$p[!is.na(results$p)]<results$predictedci97.5[!is.na(results$p)]
model_metrics <- data.frame(r2 = cor(results$p[!is.na(results$p)], results$predictedmean[!is.na(results$p)])^2,
RMSE = RMSE(results$p[!is.na(results$p)], results$predictedmean[!is.na(results$p)]),
coverage = length(coverage[coverage==TRUE])/length(coverage)*100,
cpo = -mean(log(typhi.INLA$cpo$cpo[!is.na(typhi.INLA$cpo$cpo)])))
write.csv(model_metrics, paste0(model_id, '/model_metrics.csv'), row.names = F)
#~~~~~~~~~~~~~~~~~~~~~~~#
# Plot out estimates ####
#~~~~~~~~~~~~~~~~~~~~~~~#
#plot the subnat results
#plot out estimates
pdf(paste0(model_id, '/subnational_estimates.pdf'),
height = 8.3, width = 11.7)
#plot out a page for each region
for(i in 1:length(unique(results$COUNTRY_ID))){
subset <- results[results$COUNTRY_ID == unique(results$COUNTRY_ID)[i],]
print(
ggplot(subset)+
geom_line(aes(x=year, y = predictedmean),color = 'green')+
geom_ribbon(aes(ymin = predictedci2.5, ymax=predictedci97.5, x = year), alpha = 0.1, fill = 'green') +
geom_point(aes(x = year, y = p))+
# geom_pointrange(aes(x=year_id, y = val, ymin = lower_ci, ymax = upper_ci)) +
scale_x_continuous("Year",
breaks = seq(1990, 2018, 5),
labels = c("1990", "1995", "2000", "2005", "2010", "2015"))+
ylim(0,1)+
ylab('Proportion DR')+
theme_bw()+
theme(legend.position = "bottom")+
ggtitle(unique(subset$COUNTRY_ID))+
facet_wrap(~adj_id_sSA, nrow = ceiling(sqrt(length(unique(subset$adj_id_sSA)))))+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(plot.title = element_text(hjust = 0.5))
)
}
dev.off()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Get draws and aggregate to country level ####
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
my_draws <- inla.posterior.sample(100, typhi.INLA)
saveRDS(my_draws, paste0(model_id, '/INLA_draws.rds'))
# my_draws <- readRDS(paste0(model_id, '/INLA_draws.rds'))
pred.names <- rownames(my_draws[[1]]$latent)
pred.names <- pred.names[grepl('Predictor', pred.names)]
pred.names <- pred.names[start:end]
all_pred <- sapply(my_draws, function(x) {
x$latent[pred.names,]})
all_pred <- inv.logit(all_pred)
all_pred_dt <- data.table(all_pred)
#assign location info to this
all_pred_dt$adj_id_sSA <- typhi.df$adj_id_sSA
all_pred_dt$country <- typhi.df$COUNTRY_ID
all_pred_dt$year <- typhi.df$year
# get the population estimates
pop <- fread('all_admin1_typhi_covs.csv')
pop <- pop[,.(adj_id_sSA, year, population)]
all_pred_dt <- merge(all_pred_dt, pop, by = c('adj_id_sSA', 'year'))
# #aggregate up to country level (population weighted mean)
national_ests <-
all_pred_dt[, lapply(.SD, weighted.mean, w=population, na.rm=TRUE), by=c('country', 'year'), .SDcols=3:102]
national_ests$mean <- rowMeans(national_ests[, 3:102])
national_ests$lower <- apply(national_ests[, 3:102], 1, function(x) quantile(x, 0.025))
national_ests$upper <- apply(national_ests[, 3:102], 1, function(x) quantile(x, 0.975))
national_ests <- national_ests[,.(country, year, mean, lower, upper)]
#merge on super region info
locs <- read.dbf('/snfs1/DATA/SHAPE_FILES/GBD_geographies/master/GBD_2019/master/shapefiles/GBD2019_analysis_final_loc_set_22.dbf')
locs <- locs[c('ihme_lc_id', 'spr_reg_id')]
locs$region[locs$spr_reg_id == 137] <- 'North Africa & Middle East'
locs$region[locs$spr_reg_id == 158] <- 'South sSA'
locs$region[locs$spr_reg_id == 166] <- 'Sub-Saharan Africa'
locs$region[locs$spr_reg_id == 4] <- 'Southeast sSA, East sSA & Oceania'
locs$spr_reg_id <- NULL
national_ests <- merge(national_ests, locs, by.x = 'country', by.y = 'ihme_lc_id')
write.csv(national_ests, paste0(model_id, '/national_estimates.csv'), row.names = F)
#merge on input data
input <- mydata[!is.na(mydata$p),]
input$year <- input$year+1989
input <- input[c('iso3', 'year', 'p')]
national_ests <- merge(national_ests, input, by.x = c('country', 'year'), by.y = c('iso3', 'year'), all.x = T, all.y = T)
national_ests <- national_ests[!is.na(national_ests$region),]
#plot out estimates
pdf(paste0(model_id, '/national_estimates.pdf'),
height = 8.3, width = 11.7)
#plot out a page for each region
for(i in 1:length(unique(national_ests$region))){
subset <- national_ests[national_ests$region == unique(national_ests$region)[i],]
print(
ggplot(subset)+
geom_line(aes(x=year, y = mean),color = 'green')+
geom_ribbon(aes(ymin = lower, ymax=upper, x = year), alpha = 0.1, fill = 'green') +
geom_point(aes(x = year, y = p))+
# geom_pointrange(aes(x=year, y = val, ymin = lower_ci, ymax = upper_ci)) +
scale_x_continuous("Year",
breaks = seq(1990, 2018, 5),
labels = c("1990", "1995", "2000", "2005", "2010", "2015"))+
ylim(0,1)+
ylab('Proportion DR')+
theme_bw()+
theme(legend.position = "bottom")+
ggtitle(unique(subset$region))+
facet_wrap(~country, nrow = ceiling(sqrt(length(unique(subset$country)))))+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(plot.title = element_text(hjust = 0.5))
)
}
dev.off()
|
0518761845e33e9655aa8d11c414dc5df31194e7 | 354066554dd1c3ca7a2d45e3673a2da86330f816 | /man/mmult.Rd | a95c3308976dd01bac30c5049d9eb9631c767f3e | [
"MIT"
] | permissive | elemosjr/planejamento | e8bfe3d0d9d255b1216a91d91a0a880fa2f3cb0c | f7cd310c6a59533b8d7b7ec8b03391a874f0cb51 | refs/heads/master | 2023-04-30T19:34:33.232372 | 2021-05-20T00:54:16 | 2021-05-20T00:54:16 | 366,199,524 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 302 | rd | mmult.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{mmult}
\alias{mmult}
\title{Multiplicação de matrizes}
\usage{
mmult(m, v)
}
\arguments{
\item{m}{Primeira matriz}
\item{v}{Segunda matriz}
}
\value{
Matriz.
}
\description{
Multplica duas matrizes.
}
|
004c850fdcae696510275f166005ddace80797e8 | 29c00e4f138695197d798e1a748bdbceeccd9765 | /man/survdefHR.Rd | 761cf59b50a72a9497f9110cf576c6f66f6c0d51 | [
"MIT"
] | permissive | anneae/RMSTdesign | fb17b22476f58e6c43e10edd60798647884c2003 | ae467e818944f15f0f0d1935ee994607d6c78212 | refs/heads/master | 2020-05-29T16:37:19.210719 | 2019-12-31T00:57:11 | 2019-12-31T00:57:11 | 189,254,027 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,072 | rd | survdefHR.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survdefHR.R
\name{survdefHR}
\alias{survdefHR}
\title{Create a Survdef Object Based on a Hazard Ratio}
\usage{
survdefHR(survdefC, HR)
}
\arguments{
\item{survdefC}{the survival distribution for the reference/control group,
as a list in the form output by \code{survdef}.}
\item{HR}{the hazard ratio defining the relationship between the two distributions.}
}
\value{
a list with components:
\item{S}{a vectorized function that takes time as input and returns the survival probability at that time}
\item{h}{a vectorized function that takes time as input and returns the hazard at that time}
}
\description{
Creates a new object which stores user-specified survival distribution
information in the format needed for the main function, \code{RMSTpow}.
\code{survdefHR} is used when the user wishes to specify a survival distribution
that is defined by its relationship to another distribution via a constant
hazard ratio.
}
\examples{
con<-survdef(times = 3, surv = 0.5); survdefHR(con, 0.5)
}
|
786819adc5702383a547bf0f137a8825b0a0db48 | 43fb608bb140489e80e9ff4b688d3cfa774eb387 | /run_analysis.R | 523b5fb7adb79bdf924de993278f949f851ed221 | [] | no_license | chrislit/getdata-project | ae70ad9ffae77f786709b1c3442b34242086a98b | e261e028017c0548fa02d28a12c3bffc4b6ab1f7 | refs/heads/master | 2021-01-10T13:43:47.456446 | 2015-05-23T07:37:06 | 2015-05-23T07:37:06 | 36,103,630 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,192 | r | run_analysis.R | #!/usr/bin/env Rscript
library(dplyr)
# Helper functions
expand.path <- function(fn) {
data.dir = "./UCI HAR Dataset/"
paste(data.dir, fn, sep="")
}
# Read features.txt into a df and extract the set of features that end in -mean() or -std()
features <- read.table(file = expand.path("features.txt"))
means.and.stds <- features[grep("-(mean|std)\\(\\)", features$V2) ,1]
# Read activity_labels.txt into a df
activity <- read.table(file = expand.path("activity_labels.txt"),
colClasses=c("NULL", "character"),
col.names=c("NULL", "type"))
# Read the test data from disk and subset to means & stds
test.df <- read.table(file = expand.path("test/X_test.txt"))
test.df <- test.df[,means.and.stds]
# Add subject & activity columns to the left
test.df <- cbind(read.table(file = expand.path("test/subject_test.txt")),
read.table(file = expand.path("test/y_test.txt")),
test.df)
# Read the train data from disk and subset to means & stds
train.df <- read.table(file = expand.path("train/X_train.txt"))
train.df <- train.df[,means.and.stds]
# Add subject & activity columns to the left
train.df <- cbind(read.table(file = expand.path("train/subject_train.txt")),
read.table(file = expand.path("train/y_train.txt")),
train.df)
# rbind the data together and label the variables using the feature names from features
sensor.df <- rbind(test.df, train.df)
names(sensor.df) <- c("Subject", "Activity", as.character(features[means.and.stds,2]))
# Substitute descriptive names for activities and make other variable names prettier
sensor.df$Activity <- activity$type[sensor.df$Activity]
names(sensor.df) <- gsub("[\\(\\)]", "", names(sensor.df))
names(sensor.df) <- gsub("-", "_", names(sensor.df))
# Clear no longer needed data from memory
rm(test.df, train.df, features, means.and.stds, activity)
# Store the mean of each variable in a new data frame
subj.activity.means.df <-sensor.df %>% group_by(Subject, Activity) %>% summarise_each(funs(mean))
# Write the table to disk for submission
write.table(subj.activity.means.df, file="SubjectActivityMeans.txt", row.names = FALSE)
|
12e1d8ebc11af758217083f65dcc1aea5ecc1066 | 63883301f67d43dd74c2d100ecd62f3e38d3f4d0 | /man/setPermanentAnalyst.Rd | ff1f3b590b8416e7af6dcc19cce00c1e0e2b1319 | [
"MIT"
] | permissive | akirosingh/Jagannathantools | fee3d7b49ebc6659f001e44954de146d60cbcf4a | ac894317f73c71a119e93d2ffebccbde8c94ce65 | refs/heads/master | 2023-08-23T12:20:01.464604 | 2021-11-03T01:59:45 | 2021-11-03T01:59:45 | 349,272,998 | 2 | 0 | null | 2021-05-20T22:38:58 | 2021-03-19T02:07:29 | R | UTF-8 | R | false | true | 383 | rd | setPermanentAnalyst.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MiscFxns.R
\name{setPermanentAnalyst}
\alias{setPermanentAnalyst}
\title{Set Default Analyst Value}
\usage{
setPermanentAnalyst(Name)
}
\arguments{
\item{Name}{A string containing the analyst name}
}
\description{
This is an internal function that writes the Default Analyst name to the
users Rprofile.
}
|
6ea3c8ed49cb37f9a98e916f3e5875de137910cb | 5f97b926b492cb41435b54dbd998a77b8762cbcc | /man/simexpodat.Rd | 95e3a293a3a123d0c545d1058a6e3aa373fa5b6c | [] | no_license | lvhoskovec/mmpack | 38bbc1a72cdd5f26aeeca3e07b5c2c5a0f563085 | a55bab3e69ab2dc10a2ce4ccc933e28b122a5e9c | refs/heads/master | 2021-09-04T14:34:07.888345 | 2021-08-16T16:49:17 | 2021-08-16T16:49:17 | 192,816,897 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 645 | rd | simexpodat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simexpodat.R
\name{simexpodat}
\alias{simexpodat}
\title{Simulate Data for MixModelPack}
\usage{
simexpodat(n, Xdat)
}
\arguments{
\item{n}{sample size}
\item{Xdat}{Xdat exposure data matrix loaded from package mmpack}
}
\value{
list with components
\itemize{
\item Y: response data
\item X: exposure data
\item W: covariate data
\item h: exposure-response function
\item active: active main effects
\item active.ints: active interactions
}
}
\description{
Function to simulate exposure, covariate, and response data for up to 1000 observations
}
|
6f52e8fe55e3c55ef7ef37929c694426b7f3697b | 871ec0e7231d3f8578fa1d75428b06ee6320a550 | /learning/ML_previsao_1.R | 105b17aa01f7b55eaf01a353e4738811c7c928f4 | [] | no_license | gabrielgasparoti/Titanic_Survivors | f5741ba27d10585d553cde119618af09773fa5cb | 5a66186caebb520e1f2e74702f867213a1385632 | refs/heads/main | 2023-03-22T02:06:10.942682 | 2021-03-20T15:01:03 | 2021-03-20T15:01:03 | 349,746,111 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,630 | r | ML_previsao_1.R |
# PREVISAO DE SOBREVIVENCIA AO TITANIC
# TREINAMENTO DO MODELO DE REGRESSAO LOGISTICA SOBRE OS DADOS DE TREINO
# Neste script treinamos o modelo com base nos dados de treino
# Fizemos previsões para os dados de teste
library(tidyverse)
library(kableExtra)
library(factoextra)
library(gridExtra)
library(jtools)
library(fastDummies)
library(caret)
library(pROC)
library(plotly)
# Importando os dados já processados em python
dir_data <- file.path(dirname(getwd()), 'data')
treino <- read.csv( file.path(dir_data, 'processed_train_data.csv') )
# Ajustando dados categóricos
glimpse(treino)
treino$Survived <- as.factor(treino$Survived)
treino$Pclass <- as.factor(treino$Pclass)
treino$Sex <- as.factor(treino$Sex)
treino$Cabin <- as.factor(treino$Cabin)
treino$Embarked <- as.factor(treino$Embarked)
treino$Alone <- as.factor(treino$Alone)
treino$Title <- as.factor( str_replace(treino$Title, ' ', ''))
# Criando variáveis dummies
treino_dummy <- dummy_columns(.data = treino,
select_columns = c('Pclass', 'Sex',
'Embarked', 'Title'),
remove_most_frequent_dummy = T,
remove_selected_columns = T)
# Instanciando o modelo glm
glmodel <- glm(formula = Survived ~ .,
data = treino_dummy,
family = 'binomial')
# Realizando o procedimento stepwise para descartar variáveis sem significância
glmodel <- step(glmodel, k = qchisq(p = 0.05, df = 1, lower.tail = FALSE) )
# Analisando os parâmetros do modelo
summ(glmodel)
# Realizando previsão com os dados de treino
treino$Predict <- predict(glmodel, newdata = treino_dummy, type = 'response')
# Construindo a matriz de confusão
cm <- confusionMatrix(table(predict(glmodel, type = "response") >= 0.5,
treino_dummy$Survived == 1)[2:1, 2:1])
# Visualizando a matriz
cm
# Criando a curva ROC
roc <- roc(response = treino$Survived,
predictor = glmodel$fitted.values)
# Visualizando a Curva ROC
ggroc(roc, color = 'darkorchid', size = 0.9)+
geom_segment(aes(x = 0, y =1, xend = 1, yend = 0),
color = 'orange', size = 0.9)+
labs(title = paste('Regressão Logística Binária','\nAUC:', round( roc$auc, 3), "|",
"GINI:", round( (roc$auc - 0.5)/0.5, 3) ),
subtitle = paste('Eficiência do modelo para os dados de treino:',
'\nAcurácia:', round(cm$overall[1],3),
'\nSensitividade:', round(cm$byClass[1], 3),
'\nEspecificidade:', round(cm$byClass[2], 3)))+
theme_bw()
# Importando a base de teste já processa em python
teste <- read.csv( file.path (dir_data, 'processed_test_data.csv') )
# Ajustando o tipo de dado
teste$Pclass <- as.factor(teste$Pclass)
teste$Sex <- as.factor(teste$Sex)
teste$Cabin <- as.factor(teste$Cabin)
teste$Embarked <- as.factor(teste$Embarked)
teste$Alone <- as.factor(teste$Alone)
teste$Title <- as.factor( str_replace(teste$Title, ' ', ''))
# Excluindo dados fora do intervalo de treino para evitar extrapolação
summary(treino)
summary(teste)
teste <- subset(teste, teste$Age >= 2.5)
teste <- subset(teste, teste$Age <= 54.5)
teste <- subset(teste, teste$Fare <= 65.63)
# Criando a base de dummies
teste_dummies <- dummy_columns(.data = teste,
select_columns = c('Pclass', 'Sex',
'Embarked', 'Title'),
remove_selected_columns = T,
remove_first_dummy = F,
remove_most_frequent_dummy = F)
# Selecionando apenas variáveis consideradas no modelo
glmodel$coefficients
teste_dummies2 <- teste_dummies[, c('Age','SibSp', 'Cabin', 'Alone',
'Pclass_1', 'Pclass_2', 'Sex_female',
'Title_Other')]
# Prevendo os resultados
teste$resp <- predict(glmodel,
newdata = teste_dummies2,
type = 'response') >= .5
# Ajuste na visualização da variável resposta
teste$resp <- str_replace(teste$resp, 'TRUE', '1')
teste$resp <- str_replace(teste$resp, 'FALSE', '0')
teste$resp <- as.factor(teste$resp)
# Salvando a base de teste com a previsão da variável resposta
# Será concatenada em python com a base de treino para uma terceira análise
write.csv(teste, file = file.path(dir_data, 'teste_predict.csv'), row.names = F )
|
f4c02c914af519a2d06f66bc10298349bccc2fef | 455fb725c615d1c4b11277b2f0cbbd7012f14ab9 | /XG_Boost.R | 7c1a4f1741674085fc55eb3601dd7e3d00add24f | [] | no_license | BigData-Capstone/M5-Forecasting | 8517b8b4da7faff52314f72dc7ec80e07528965c | 83797df84f377eadf407f423f0e7b7496eacefc0 | refs/heads/master | 2022-07-17T06:44:02.847105 | 2020-05-18T06:56:53 | 2020-05-18T06:56:53 | 258,253,153 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,033 | r | XG_Boost.R | #importing relevant libraries
library(tidyverse)
library(data.table)
library(RcppRoll)
library(dplyr)
library(janitor)
library(forecast)
library(xgboost)
library(Matrix)
library(mltools)
library(caret)
library(foreach)
library(doParallel)
#define function for clearing memory
free <- function() invisible(gc())
#read in the data
sales_train_validation.csv <- fread("sales_train_validation.csv", stringsAsFactors = TRUE)
calendar.csv <- fread("calendar.csv", stringsAsFactors = TRUE)
sell_prices.csv <- fread("sell_prices.csv", stringsAsFactors = TRUE)
#create the dataset
dataset <- data.table::melt.data.table(
data = sales_train_validation.csv,
id.vars = colnames(sales_train_validation.csv)[grepl("id", colnames(sales_train_validation.csv))],
measure.vars = colnames(sales_train_validation.csv)[grepl("^d_", colnames(sales_train_validation.csv))],
variable.name = "d",
value.name = "demand",
na.rm = FALSE
)
#remove csv and clear memory
rm(sales_train_validation.csv)
free()
# make data smaller
dataset[, ("d") := as.integer(gsub("^d_", "", get("d")))]
free()
# ensure order
data.table::setorder(
x = dataset,
cols = "d"
)
free()
# define indices for prediction/evaluation, training and testing set
train_index <- 1431 # use the last 1.5 yrs for training -> Full 2015 + Half 2016
test_index <- (1913-28) #predict the last 28 days
# reduce data
dataset <- dataset[get("d") >= train_index, ]
free()
#Make sure the dataset is sorted correctly
stopifnot(!is.unsorted(dataset$d))
#merge the calendar
calendar.csv[, `:=` (weekend = ifelse(get("weekday") %in% c("Saturday", "Sunday"), 1L, 0L),
# create weekend feature
d = as.integer(gsub("^d_", "", get("d"))),
day = as.integer(substr(get("date"), start = 9, stop = 10)),
date = factor(as.Date(get("date"))),
event_name_1 = as.integer(factor(get("event_name_1"))),
event_type_1 = as.integer(factor(get("event_type_1"))),
event_name_2 = as.integer(factor(get("event_name_2"))),
event_type_2 = as.integer(factor(get("event_type_2"))))][
, `:=` (date = NULL,
weekday = NULL)
]
# merge calendar to dataset
dataset <- calendar.csv[get("d") >= train_index, ][dataset, on = "d"]
rm(calendar.csv)
free()
# merge prices to dataset
dataset <- sell_prices.csv[dataset, on = c("store_id", "item_id", "wm_yr_wk")][, wm_yr_wk := NULL]
rm(sell_prices.csv)
free()
# create more features (slightly modified code from https://www.kaggle.com/mayer79/m5-forecast-attack-of-the-data-table)
agg <- dataset[, .(agg_mean = mean(demand, na.rm = TRUE)), keyby = c("item_id", "d")]
agg[, lag_t28 := dplyr::lag(agg_mean, 28), keyby = "item_id"
][
, rolling_mean_t30r := RcppRoll::roll_meanr(lag_t28, 30), keyby = "item_id"
][
, rolling_sd_t30r := RcppRoll::roll_sdr(lag_t28, 30), keyby = "item_id"
]
#Merge features to dataset
dataset[agg, `:=` (rolling_mean_t30r_item = i.rolling_mean_t30r,
rolling_sd_t30r_item = i.rolling_sd_t30r
), on = c("item_id", "d")]
demand_features <- function(X) {
X %>%
group_by(id) %>%
mutate(
lag_1 = dplyr::lag(demand, 1),
lag_2 = dplyr::lag(demand, 2),
lag_3 = dplyr::lag(demand, 3),
lag_7 = dplyr::lag(demand, 7),
lag_28 = dplyr::lag(demand, 28),
roll_lag7_w7 = roll_meanr(lag_7, 7),
roll_lag7_w28 = roll_meanr(lag_7, 28),
roll_lag28_w7 = roll_meanr(lag_28, 7),
roll_lag28_w28 = roll_meanr(lag_28, 28)) %>%
ungroup()
}
#more features
dataset <- dataset %>%
demand_features()
str(dataset)
View(dataset)
rm(agg)
free()
test_df = filter(dataset, item_id == "FOODS_1_016")
View(test_df)
#filter only the items of store CA_3
dataset = filter(dataset, store_id == "CA_3")
#drop un-nessecary colums store id state id
dataset = select(dataset, -store_id)
dataset = select(dataset, -state_id)
#convert item id into numeric format
dataset$item_id = as.integer(dataset$item_id)
dataset$id = as.integer(dataset$id)
#Encode category and departement id as dummy variables
dataset$cat_id = one_hot(as.data.table(dataset$cat_id))
dataset$dept_id = one_hot(as.data.table(dataset$dept_id))
#clear memory
free()
#split the training data
train_dataset = filter(dataset, d <= test_index)
View(train_dataset)
test_dataset = filter(dataset, d > test_index)
#omit missing values
View(dataset)
#Assign label
train_label <- train_dataset$demand
test_label <- test_dataset$demand
View(x_train)
#remove label from dataset
train_dataset = select(train_dataset, -demand)
test_dataset = select(test_dataset, -demand)
#convert datasets to matrix
x_train = as.matrix(train_dataset)
x_test = as.matrix(test_dataset)
#Create input for xgboost
trainDMatrix <- xgb.DMatrix(data = x_train, label = train_label)
#set the parameter
params <- list(booster = "gbtree",
objective = "reg:linear",
eval_metric = "rmse",
eta = 0.07,
max_depth = 5,
min_child_weight = 10,
colsample_bytree = 1,
gamma = 0.9,
alpha = 1.0,
subsample = 0.7
)
N_cpu = detectCores()
N_cpu
xgb.tab <- xgb.cv(data = trainDMatrix,
, param = params, evaluation = "rmse", nrounds = 100
, nthreads = N_cpu, nfold = 5, early_stopping_round = 10)
xgb.tab$best_iteration
model_xgb <- xgboost(data = trainDMatrix, param = params, nrounds = 10, importance = TRUE)
#predict smth.
View(x_test)
pred = predict(model_xgb, newdata = x_test)
View(pred)
View(test_label)
pred_salesData1 <- x_test %>%
bind_cols(pred = predict(model_xgb, newdata = x_test)) %>%
mutate(error = demand - pred)
#importance plot
importance <- xgb.importance(feature_names = colnames(trainMatrix), model = model)
xgb.ggplot.importance(importance_matrix = importance)
|
1acaede800c586addd589614cbcf380884e96ea7 | 6e2656f82b2a1cf2c18158d03f3a4cba197f9c1e | /man/get_script_dir.Rd | a0c284697b39f2b997f274798c980a74e2899b85 | [] | no_license | Fgazzelloni/ihme.covid | a8f46800ee513971f5c1742ac2eadb1edc5c7d5e | c014610e2d60cb4e2edf7d3ee14f84bd4d3d9bf7 | refs/heads/master | 2022-11-19T18:31:40.528400 | 2020-07-24T16:28:36 | 2020-07-24T16:28:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 347 | rd | get_script_dir.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_script_dir.R
\name{get_script_dir}
\alias{get_script_dir}
\title{Returns the directory the currently running script is in or NULL if all fails}
\usage{
get_script_dir()
}
\description{
Returns the directory the currently running script is in or NULL if all fails
}
|
f6eaf310c6deb73c916770af666cfaddc0252c0c | 2dd63cb845ae41b596301c4a3cf0e2602a9dc281 | /ruijin.R | 4fbb7fc8fcfce6109b22eb34324bbf022418cdaa | [] | no_license | tienan/Shsmu_tienanTools | da8609fb026e849ddd1b2b95297bcae1f9a81e6e | 9d8a4d2575f07d62d70d1e4f5f4936bfb908d867 | refs/heads/master | 2021-06-15T15:32:23.659992 | 2021-02-15T03:31:38 | 2021-02-15T03:31:38 | 149,967,692 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,055 | r | ruijin.R | install.packages("rms") #这一步一劳永逸。安装好后,再次启动R时,无需再次输入代码,下同
install.packages("Hmisc")
install.packages("lattice")
install.packages("survival")
install.packages("Formula")
install.packages("ggplot2")
install.packages("foreign")
library("rms") #这一步一劳永逸。安装好后,再次启动R时,无需再次输入代码,下同
library("Hmisc")
library("lattice")
library("survival")
library("Formula")
library("ggplot2")
library("foreign")
library(pROC)
F1<-read.table("ruijinNongram_1.txt",header = T,sep = "\t")
attach(F1)
ddist<-datadist(Size,pT,Location,Ulcer,NerveInvasion,VascularInvasion)
options(datadist='ddist')
logi<-lrm(LNM~Size+pT+Location+Ulcer+NerveInvasion+VascularInvasion, x=TRUE, y=TRUE)
nomo<-nomogram(logi, fun=plogis, fun.at=c(.001, .01, .05, seq(.1,.9,by=.1), .95, .99, .999),
lp=F, funlabel="LNM")
plot(nomo)
cal<-calibrate(logi, method="boot", B=1000, bw=FALSE, rule="p",
type="individual", sls=.05, aics=0, force=NULL, estimates=T, pr=FALSE,
smoother="lowess", digits=NULL)
plot(cal,scat1d.opts=list(nhistSpike=500))
Predi<-predict(logi,F1,type="lp")
roc1<-roc(F1$LNM,Predi,legacy.axes=TRUE)
plot(roc1,font=2,legacy.axes=TRUE)
ci.auc(roc1)
auc(roc1)
dat = read.table("ruijinNongram_1.txt",header = T,sep = "\t")
#logi<-lrm(Outcome~Age+pT+pN+Grade+ molecularSubtype+geneRs, data=g1,x=TRUE, y=TRUE)
logi<-lrm(LNM ~ Size+pT+Location+Ulcer+NerveInvasion+LymphInvasion+VascularInvasion, data=dat,x=TRUE, y=TRUE)
nomo<-nomogram(logi, fun=plogis, fun.at=c(.001, .01, .05, seq(.1,.9, by=.1), .95, .99, .999),lp=F, funlabel="Outcome")
tiff(filename = "Fig-1.tif",
width =800, height = 800, units = "px", pointsize = 12,
compression = "lzw",
bg = "white", res = 300)
plot(nomo)
dev.off()
Pred = predict(logi,g1,type="lp")
roc1=roc(g1$Outcome,Predi,legend.axes=T)
plot(roc1,font=2,legacy,axes=T)
g2=F1[id[(1*126):(2*126)],]
g3=F1[id[(2*126):(3*126)],]
g4=F1[id[(3*126):(4*126)],]
|
2562abbc7a8441f3017728dcf13924ae73217fa6 | 2655fcbde895737e36a1f2283e0cd51765e98168 | /Taxonomy/R/clustering_hierarchical.R | 435b8508b2855a39cfe02279fe6f2c7e06bb9999 | [] | no_license | DDTD-IS/DDTD-IS | 5b7128df844289fa804bc9a3750c73898001bfb4 | eb21f343a7224793af823cd580f206d2fb48b604 | refs/heads/master | 2020-09-21T19:21:24.316497 | 2019-11-29T17:38:21 | 2019-11-29T17:38:21 | 224,897,542 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,503 | r | clustering_hierarchical.R | #' @title Wrapper function for hierarchical clustering algorithms
#' @description This function wraps the functions \code{hclust} and \code{cutree} and returns a list containing the original clustering as computed by \code{hclust} and
#' the resulting cluster assignments based on the passed \code{number_clusters}
#' @param number_clusters \code{integer}\cr
#' Integer value determining the number of clusters used by \code{cutree}, i.e. the clustering result will have as many clusters as specified by \code{number_clusters}
#' @param distance_matrix \code{distance matrix}\cr
#' Distance matrix of class \code{dist} that is used as input for hierarchical clustering algorithms
#' @param method \code{method = c("ward.D", "ward.D2", "single", "complete", "average", "mcquitty", "median" , "centroid" )}\cr
#' Selection of the hierarchical clustering algorithm, default is \code{ward.D2}
#' @return \code{clustering}\cr
#' Original clustering, i.e. the standard output from \code{hclust}
#' @return \code{clusterCut}\cr
#' Cluster assignment as the result of applying \code{clusterCut} to the original clustering, i.e. the standard output from \code{hclust}
#' @family Clustering
#' @export
clustering_hierarchical <-
function(distance_matrix,
method = "ward.D2",
number_clusters) {
clustering = hclust(distance_matrix, method = method)
clusterCut = cutree(clustering, k = number_clusters)
return(list("clustering" = clustering, "clusterCut" = clusterCut))
}
|
29e5ddaf89e9a87c6dd333871fd371f259e2a149 | 8b5e9897c10bd990e7aee1201325397dfb7e1c82 | /SHIFT/202105/t29_Aula4.R | 1e0c5e138d24abe8b1b6308bcf55422a5d2ff089 | [] | no_license | Uemura84/FIAP | 9ea08e79b4b9f1c8de87c317d5bb19f8e66bfd30 | 2c9a62b775096c108c0da42dcb4f66dbd74a0e07 | refs/heads/master | 2023-08-02T10:40:17.526656 | 2021-10-08T02:31:11 | 2021-10-08T02:31:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,370 | r | t29_Aula4.R | # Aula 4 - Regressões no R
pib <- read.csv("https://raw.githubusercontent.com/diogenesjusto/FIAP/master/SHIFT/Data/pib.csv")
# Separação Treino e Teste
treino <- pib[1:132,]
teste <- pib[133:138,]
##################################################################
# 1. Modelo de RLS:
mod <- lm(PIB~BRL, data=treino)
# Visualização das estatísticas descritivas da regressão
summary(mod)
# Previsão sobre dados teste
p <- predict(mod, newdata=teste)
# Comparação de dados previstos x reais
cbind(p, teste$PIB, p-teste$PIB, (p-teste$PIB)/teste$PIB)
# Erro de previsão
sse <- sum((p-teste$PIB)^2) # sum of squared errors - amplifica métrica de erro para análise variações mais sensíveis
rmse <- sqrt(mean((p-teste$PIB)^2)) # root mean squared errors - mesma ordem de grandeza da variável
##################################################################
# 2. Regressão Linear Multivariada
mod <- lm(PIB~BRL+BRP, data=treino)
# Visualização das estatísticas descritivas da regressão
summary(mod)
# Previsão sobre dados teste
p <- predict(mod, newdata=teste)
# Erro de previsão
sse <- sum((p-teste$PIB)^2) # sum of squared errors - amplifica métrica de erro para análise variações mais sensíveis
##################################################################
# 3. Modelo Autoregressivos
mod <- lm(PIB~PIBi1+PIBi2+PIBi4+PIBi12, data=treino)
# Visualização das estatísticas descritivas da regressão
summary(mod)
# Previsão sobre dados teste
p <- predict(mod, newdata=teste)
# Erro de previsão
sse <- sum((p-teste$PIB)^2) # sum of squared errors - amplifica métrica de erro para análise variações mais sensíveis
##################################################################
# 4. Modelo RLM com uso de dummies para tratamento de sazonalidades
mod <- lm(PIB~BRL+BRP+D2+D5+D6+D7+D8+D9+D11, data=treino)
# Visualização das estatísticas descritivas da regressão
summary(mod)
# Previsão sobre dados teste
p <- predict(mod, newdata=teste)
# Erro de previsão
sse <- sum((p-teste$PIB)^2) # sum of squared errors - amplifica métrica de erro para análise variações mais sensíveis
###################################################################
# Construção de variáveis dummy
pib$MES <- substr(pib$ANO_MES, 1,3)
install.packages("dummies")
library(dummies)
pib <- cbind(pib, dummy(pib$MES))
|
47ae16d862634b3b7ac332f5eac73a9000cfca91 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/ROCit/examples/logit.Rd.R | cb1276188116c3c1ee92c2d65413069017d62fb4 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 156 | r | logit.Rd.R | library(ROCit)
### Name: logit
### Title: Log Odds of Probability
### Aliases: logit
### ** Examples
logit(0.2)
set.seed(1)
logit(runif(10, 0, 1))
|
2d7aafc70635145dc2692f2859c1ee293f83f1a9 | 38d166ede31183e2121388be0f66fe9d7ac4e93a | /R/parse_uc.R | f3fe12730d9bac318a183dbd0d785ea120e2572c | [
"MIT"
] | permissive | vmikk/metagMisc | a01151347b620745b278265700e503dc74669af5 | 310b1a40951de46348084e150d7471ed66feb0c8 | refs/heads/master | 2023-08-31T08:41:27.684905 | 2023-08-28T10:09:50 | 2023-08-28T10:09:50 | 76,531,351 | 38 | 12 | MIT | 2019-07-29T06:12:12 | 2016-12-15T06:40:05 | R | UTF-8 | R | false | false | 4,631 | r | parse_uc.R | ## TO DO - add examples
#' @title Parse UC files (clustering output from USEARCH, VSEARCH, SWARM)
#'
#' @param x File name (typically with .uc extension)
#' @param map_only Logical, return only mapping (correspondence of query and cluster)
#' @param package Which package to use ("base" or "data.table")
#' @param rm_dups Logical, remove duplicated entries (default, TRUE)
#'
#' @details USEARCH cluster format (UC) is a tab-separated text file.
#' Description of the UC file format (from USEARCH web-site: http://www.drive5.com/usearch/manual/opt_uc.html):
#' 1 Record type S, H, C or N (see table below).
#' 2 Cluster number (0-based).
#' 3 Sequence length (S, N and H) or cluster size (C).
#' 4 For H records, percent identity with target.
#' 5 For H records, the strand: + or - for nucleotides, . for proteins.
#' 6 Not used, parsers should ignore this field. Included for backwards compatibility.
#' 7 Not used, parsers should ignore this field. Included for backwards compatibility.
#' 8 Compressed alignment or the symbol '=' (equals sign). The = indicates that the query is 100% identical to the target sequence (field 10).
#' 9 Label of query sequence (always present).
#' 10 Label of target sequence (H records only).
#'
#' Record Description
#' H Hit. Represents a query-target alignment. For clustering, indicates the cluster assignment for the query. If ‑maxaccepts > 1, only there is only one H record giving the best hit. To get the other accepts, use another type of output file, or use the ‑uc_allhits option (requires version 6.0.217 or later).
#' S Centroid (clustering only). There is one S record for each cluster, this gives the centroid (representative) sequence label in the 9th field. Redundant with the C record; provided for backwards compatibility.
#' C Cluster record (clustering only). The 3rd field is set to the cluster size (number of sequences in the cluster) and the 9th field is set to the label of the centroid sequence.
#' N No hit (for database search without clustering only). Indicates that no accepts were found. In the case of clustering, a query with no hits becomes the centroid of a new cluster and generates an S record instead of an N record.
#'
#' @return Data.frame or data.table.
#' @export
#' @import data.table
#'
#' @references
#' http://www.drive5.com/usearch/manual/opt_uc.html
#' @examples
#' parse_uc("usearch_OTUs.uc", map_only = F)
#' parse_uc("usearch_OTUs.uc", map_only = T)
#'
parse_uc <- function(x, map_only = F, package = "data.table", rm_dups = TRUE){
## Load data with built-in R commands
if(package %in% "base"){
cat("Option to use the `base` package will be duplicated in future releas of metagMisc!\n")
## Read file
ii <- read.delim(x, header = F, stringsAsFactors = F)
## Remove redundant S-records
redund <- ii$V1 == "S"
if(any(redund)){ ii <- ii[-which(redund), ] }
## Split Query name
ii$Query <- do.call(rbind, strsplit(x = ii$V9, split = ";"))[,1]
## Split OTU name
ii$OTU <- do.call(rbind, strsplit(x = ii$V10, split = ";"))[,1]
## OTU name = query name for centroids
ii$OTU[which(ii$V1 == "C")] <- ii$Query[which(ii$V1 %in% c("S", "C"))]
## Check for duplicates
tmp <- ii[ , c("Query", "OTU")]
dups <- duplicated(tmp)
if(any(dups)){
cat("Warning: duplicated rows found!\n")
## Remove duplicated seqs
if(rm_dups == TRUE){
cat("..", sum(dups), " duplicates removed\n")
ii <- ii[ - which(dups), ]
}
}
if(map_only == TRUE){
ii <- ii[, which(colnames(ii) %in% c("Query", "OTU"))]
}
} # end of `base` package
## Load data with `data.table` package
if(package %in% "data.table"){
## Read file
ii <- fread(file = x, header = FALSE, sep = "\t")
## Remove redundant S-records
ii <- ii[ ! V1 %in% "S" ]
## Split Query name
ii[, Query := tstrsplit(V9, ";", keep = 1) ]
## Split OTU name
ii[, OTU := tstrsplit(V10, ";", keep = 1) ]
## OTU name = query name for centroids
ii[ V1 %in% "C", OTU := Query ]
## Check for duplicates
if(nrow(ii[, .(Query, OTU)]) != nrow(unique(ii[, .(Query, OTU)]))){
cat("Warning: duplicated rows found!\n")
## Remove duplicated seqs
if(rm_dups == TRUE){
dups <- duplicated(ii[, .(Query, OTU)])
cat("..", sum(dups), " duplicates removed\n")
ii <- ii[ ! dups ]
}
}
## Subset to Query - OTU names only
if(map_only == TRUE){
ii <- ii[, .(Query, OTU)]
}
}
return(ii)
}
|
2802ef47981078810792523e0d8baa8103b62c1e | 6ceab1bf9c435b523d2f8e7e9440da39770d741b | /R/f7Icon.R | 44ca993a6f4597622dce52cf2ddf0d0f7b8e7eb9 | [] | no_license | RinteRface/shinyMobile | a8109cd39c85e171db893d1b3f72d5f1a04f2c62 | 86d36f43acf701b6aac42d716adc1fae4f8370c6 | refs/heads/master | 2023-07-25T16:28:41.026349 | 2022-11-25T17:04:29 | 2022-11-25T17:04:29 | 139,186,586 | 328 | 92 | null | 2023-03-26T05:58:53 | 2018-06-29T19:13:06 | R | UTF-8 | R | false | false | 2,313 | r | f7Icon.R | #' @title Framework7 icons
#'
#' @description Use Framework7 icons in shiny applications,
#' see complete list of icons here : \url{https://framework7.io/icons/}.
#'
#' @param ... Icon name and \link{f7Badge}.
#' @param lib Library to use: NULL, "ios" or "md". Leave \code{NULL} by default. Specify, md or ios
#' if you want to hide/show icons on specific devices.
#' @param color Icon color, if any.
#' @param style CSS styles to be applied on icon, for example
#' use \code{font-size: 56px;} to have a bigger icon.
#' @param old Deprecated. This was to handle old and new icons but shinyMobile only uses
#' new icons from now. This parameter will be removed in a future release.
#'
#' @examples
#' if(interactive()){
#' library(shiny)
#' library(shinyMobile)
#'
#' shinyApp(
#' ui = f7Page(
#' title = "Icons",
#' f7SingleLayout(
#' navbar = f7Navbar(title = "icons"),
#' f7List(
#' f7ListItem(
#' title = tagList(
#' f7Icon("envelope")
#' )
#' ),
#' f7ListItem(
#' title = tagList(
#' f7Icon("envelope_fill", color = "green")
#' )
#' ),
#' f7ListItem(
#' title = f7Icon("home", f7Badge("1", color = "red"))
#' ),
#' f7ListItem(
#' title = f7Icon("envelope", lib = "md"),
#' "This will not appear since only for material design"
#' )
#' )
#' )
#' ),
#' server = function(input, output) {}
#' )
#' }
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
f7Icon <- function(..., lib = NULL, color = NULL, style = NULL, old = NULL) {
call_ <- as.list(match.call())
if (!is.null(call_$old)) {
warning(
"Deprecated. This was to handle old and new icons. ",
"This parameter will be removed in a future release."
)
}
if (!is.null(lib)) {
if (identical(lib, "ios")) {
iconCl <- "icon f7-icons ios-only"
}
if (identical(lib, "md")) {
iconCl <- "icon material-icons md-only"
}
} else {
# class icon is necessary so that icons with labels render well,
# for instance
iconCl <- "icon f7-icons"
}
if (!is.null(color)) {
iconCl <- paste0(iconCl, " color-", color)
}
iconTag <- shiny::tags$i(class = iconCl, style = style, ...)
htmltools::browsable(iconTag)
}
|
9abd8165fd0ab8f9c8e5ac51de4413c131ff29d8 | 799468ce526db6f14f2aa5003c601e259e5f0d62 | /man/betaval.Rd | 7786bae2a06113db06f8b2913a35bb803d493f5b | [] | no_license | kostask84/popbio | 6aa45015bfc1659bd97f2ce51ad5246b8d434fac | 682d3ffb922dfab4fd2c7fc7179af2b0d926edfd | refs/heads/master | 2021-05-09T02:01:42.050755 | 2017-02-09T21:44:20 | 2017-02-09T21:44:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,942 | rd | betaval.Rd | \name{betaval}
\alias{betaval}
\title{ Generate beta-distributed random numbers }
\description{
This function calculates a random number
from a beta distribution and uses the R function pbeta(x,vv,ww).
}
\usage{
betaval(mn, sdev, fx=runif(1))
}
\arguments{
\item{mn}{ mean rate between 0 and 1}
\item{sdev}{ standard deviation }
\item{fx}{cumulative distribution function, default is a random
number between 0 and 1}
}
\details{
This function is used by \code{\link{vitalsim}} .
}
\value{
Returns a random beta value
}
\source{converted Matlab code from Box 8.3 in Morris and Doak (2002)
}
\references{ Morris, W. F., and D. F. Doak. 2002. Quantitative conservation
biology: Theory and practice of population viability analysis.
Sinauer, Sunderland, Massachusetts, USA. }
\author{Original MATLAB code by Morris and Doak (2002: 277- 278),
adapted to R by Patrick Nantel, 20 June 2005.}
%\note{}
\seealso{Beta Distribution \code{\link{rbeta}} }
\examples{
betaval(.5, sd=.05)
betaval(.5, sd=.05)
## histogram with mean=0.5 and sd=0.05
x <- sapply(1:100, function(x) betaval(0.5, 0.05))
hist(x, seq(0,1,.025), col="green", ylim=c(0,25), xlab="Value",
main="Beta distribution with mean=0.5 and sd=0.05")
# generates a graph similar to Figure 8.2 A in Morris & Doak (2002:264)
# a much simpler version of BetaDemo in Box 8.3
x<-matrix(numeric(3*1000), nrow=3)
sd <-c(.05, .25, .45)
for (i in 1:3)
{
for (j in 1:1000)
{
x[i,j]<-betaval(.5,sd[i])
}
}
plot(0,0,xlim=c(0,1), ylim=c(0,0.4), type='n', ylab='Frequency',
xlab='Value', main="Examples of beta distributions")
for (i in 1:3)
{
h<-hist(x[i,], plot=FALSE, breaks=seq(0,1,.02) )
lines(h$mids, h$counts/1000, type='l', col=1+i, lwd=2, lty=i)
}
legend(0.5,0.4, c("(0.50, 0.05)", "(0.50, 0.25)", "(0.50, 0.45)"),
lty=1:3, lwd=2, col=2:4, title="mean and sd")
}
\keyword{ survey }
|
287ea0a7447b67ce28d30cf8193280fc9e5fe3cd | ef8d66ebaeaf27fa1aed1cf01ebd70ce8224c5cd | /man/get_top_effects_log10p.Rd | e841ca4864fe24f98bbc3f640e25c84b8367c15c | [] | no_license | Alice-MacQueen/CDBNgenomics | dd6c8026156d91be7f12a9857d0ebeb89c32c384 | 6b00f48eb1c6eec848f11416d7a5fd752cd778bd | refs/heads/master | 2021-07-08T06:15:56.774003 | 2020-08-12T19:28:32 | 2020-08-12T19:28:32 | 178,261,021 | 2 | 2 | null | null | null | null | UTF-8 | R | false | true | 558 | rd | get_top_effects_log10p.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cdbn_bigsnp2mashr.R
\name{get_top_effects_log10p}
\alias{get_top_effects_log10p}
\title{Step One of bigsnp2mashr}
\usage{
get_top_effects_log10p(path, gwas_rds, phenotype, numSNPs, markers)
}
\arguments{
\item{path}{Path}
\item{gwas_rds}{RDS file with gwas results}
\item{phenotype}{Character vector. Single phenotype name}
\item{numSNPs}{Integer. Number of top SNPs to choose.}
\item{markers}{Marker CHR & POS for the GWAS you ran}
}
\description{
Step One of bigsnp2mashr
}
|
aeb6e1ac84a82b989e6b30e30bb1cf2b2c1ed971 | 2863c6a6081752860eac69843ff5f4daa99bc0b1 | /man/GenerateRandomValues.Rd | ba03fda039961bd0dcf47a88f1a75e5cd866b5ea | [] | no_license | djhwueng/BMhyb | 6bf5cb87fd9a86e9ab66f69b50aaad608d5bd5f8 | a0283ab96b05cfea8b7ae41d901ed27c965a92f1 | refs/heads/master | 2020-03-13T15:51:47.381014 | 2018-03-27T18:38:06 | 2018-03-27T18:38:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 765 | rd | GenerateRandomValues.Rd | \name{GenerateRandomValues}
\alias{GenerateRandomValues}
\title{
Generate random values
}
\description{
Generate random value for parameters
}
\usage{
GenerateRandomValues(data, free.parameters, lower, upper)
}
\arguments{
\item{data}{
numerical vector that contains trait data
}
\item{free.parameters}{
TRUE/FALSE argument for free parameters
}
\item{lower}{
lower bound for the new values
}
\item{upper}{
upper bound for the new values
}
}
\details{
The function generates random values for parameter estimate. The bound for each paramter is set up using exponential and uniform distribution accordingly.
}
\value{
A vector contains the generate values for parameters.
}
\author{
Brian O'Meara, Dwueng-Chwuan Jhwueng.
}
|
4a6537873f9839f422012aae5aea06a855435c64 | 9ea744d0e28fe4fc4d3e1e00f7ec53ea054b8cd0 | /man/reexports.Rd | 7cc50495c683df65501543846da6fd0ffa7c8cbb | [] | no_license | YuLab-SMU/treeio | 8d434454f25336859e0e0c12fc65029a310b638b | c3f7b8e6df5f768f53e33b46b3e13dd529bb4f56 | refs/heads/devel | 2023-09-01T19:44:13.166325 | 2023-08-25T04:27:18 | 2023-08-25T04:27:18 | 75,700,092 | 56 | 17 | null | 2023-08-25T04:25:14 | 2016-12-06T06:05:56 | R | UTF-8 | R | false | true | 2,141 | rd | reexports.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R, R/ape.R, R/method-as-phylo.R,
% R/reexport.R
\docType{import}
\name{reexports}
\alias{reexports}
\alias{treedata}
\alias{read.tree}
\alias{read.nexus}
\alias{rtree}
\alias{write.tree}
\alias{write.nexus}
\alias{Nnode}
\alias{Ntip}
\alias{is.rooted}
\alias{root}
\alias{as.phylo}
\alias{\%>\%}
\alias{\%<>\%}
\alias{get.fields}
\alias{get.data}
\alias{as.treedata}
\alias{ancestor}
\alias{parent}
\alias{child}
\alias{offspring}
\alias{rootnode}
\alias{nodeid}
\alias{nodelab}
\alias{MRCA}
\alias{full_join}
\alias{inner_join}
\alias{as_tibble}
\alias{tibble}
\alias{.data}
\alias{drop.tip}
\alias{isTip}
\title{Objects exported from other packages}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{ape}{\code{\link[ape]{as.phylo}}, \code{\link[ape:root]{is.rooted}}, \code{\link[ape:summary.phylo]{Nnode}}, \code{\link[ape:summary.phylo]{Ntip}}, \code{\link[ape]{read.nexus}}, \code{\link[ape]{read.tree}}, \code{\link[ape]{root}}, \code{\link[ape]{rtree}}, \code{\link[ape]{write.nexus}}, \code{\link[ape]{write.tree}}}
\item{dplyr}{\code{\link[dplyr:mutate-joins]{full_join}}, \code{\link[dplyr:mutate-joins]{inner_join}}}
\item{magrittr}{\code{\link[magrittr:compound]{\%<>\%}}, \code{\link[magrittr:pipe]{\%>\%}}}
\item{rlang}{\code{\link[rlang:dot-data]{.data}}}
\item{tibble}{\code{\link[tibble]{as_tibble}}, \code{\link[tibble]{tibble}}}
\item{tidytree}{\code{\link[tidytree]{ancestor}}, \code{\link[tidytree:reexports]{as.phylo}}, \code{\link[tidytree]{as.treedata}}, \code{\link[tidytree]{child}}, \code{\link[tidytree:drop.tip-methods]{drop.tip}}, \code{\link[tidytree:get.data-methods]{get.data}}, \code{\link[tidytree:get.fields-methods]{get.fields}}, \code{\link[tidytree]{isTip}}, \code{\link[tidytree]{MRCA}}, \code{\link[tidytree]{nodeid}}, \code{\link[tidytree]{nodelab}}, \code{\link[tidytree]{offspring}}, \code{\link[tidytree]{parent}}, \code{\link[tidytree]{rootnode}}, \code{\link[tidytree]{treedata}}}
}}
|
2ed1c6054808f87ed6b1bba187082d7ddf81beb5 | 7b202240d021c315637b6ad8b1a2dc1bbade984a | /Function.proportion.events.captured.R | ccfbf9f6ee4ddbad9cdea93bc5588e3e30b34ceb | [] | no_license | summerx0821/Commentary-on-Tayob-and-Murray-2014 | 6ea525e23c81f2c85fac9845f8c8fa633ef8b209 | 1dfcc003359918ab536ee09dcf916cf4368e5b81 | refs/heads/master | 2020-03-15T09:31:16.115794 | 2020-03-08T20:40:57 | 2020-03-08T20:40:57 | 132,076,946 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,280 | r | Function.proportion.events.captured.R |
####################################################################################################################################
# PROGRAM NAME : Function.proportion.events.captured.R
#
# AUTHOR : 3JUL2018 by Meng Xia
#
# EXECUTED UNDER : R 3.4.1 (2017-06-30)
#
# FUNCTION NAME : prop.events.captured
#
# DESCRIPTION : To calculate the proportion of events captured for a given
# s : overall follow up time
# lambda : control group recurrent event rate
# a : space follow-up windows every a unit apart
#
# EXAMPLE : prop.events.captured(s = 48,
# lambda = 1/3,
# a = 1.5)
#
# EXAMPLE OUTPUT : Time difference of 4.705428 mins
# [1] "The proportion of events captured is 0.8"
#
####################################################################################################################################
rm(list=ls())
prop.events.captured <- function(s, lambda, a){
time1 <- Sys.time()
#Compute the number of windows
b=ceiling(s/a)
# Function to compute the average number of events missed
sumsum <- function(J){
ww <- 1:b
jj <- 2:J
prob_w <- function (wj){
w <- wj[1]
j <- wj[2]
#Function to calculate pdf_Gamma(j,lambda) (r) * cdf_Gamma(l, lambda) (s-r)
prob_RR <- function(j, l, w){
f <- function(R_j){dgamma(R_j, shape=j, rate=lambda) * pgamma(s-R_j, shape = l, rate = lambda)}
integral <- integrate(f, lower = 0, upper = min(a*w, s))$value
return(integral)
}
#Function to calculate pdf_Gamma(j-1, lambda) (r) * pdf_Exp(lambda) (g) * cdf_Gamma(l, lambda) (s-r-g)
prob_RGR <- function(j, l, w){
InnerFunc <- function(R_j_1, G_j)
{dgamma(R_j_1, shape=j-1, rate=lambda) * dexp(G_j, rate = lambda) * pgamma(s-R_j_1-G_j, shape = l, rate = lambda)}
InnerIntegral <- function(R_j_1)
{ sapply(R_j_1, function(x) { integrate(function(G_j) InnerFunc(x, G_j), 0, min(a*w, s)-x)$value }) }
OuterIntegral <- integrate(InnerIntegral, 0, (w-1)*a)$value
return(OuterIntegral)
}
#Calculate numerator and denominator
if (w == 1) {
top1 <- prob_RR(j=j, l=J-j, w=w)
top2 <- prob_RR(j=j, l=J-j+1, w=w)
}
if (w > 1) {
top1 <- prob_RR(j=j, l=J-j, w=w)-prob_RGR(j=j, l=J-j, w=w)
top2 <- prob_RR(j=j, l=J-j+1, w=w)-prob_RGR(j=j, l=J-j+1, w=w)
}
bottom <- lambda^J*exp(-lambda*s)*s^J/gamma(J+1)
return((top1-top2)/bottom)
}
element <- apply(expand.grid(ww,jj),1,prob_w)
return(sum(element))
}
prop=array()
for (J in 1:100){
if (J==1) {prop[J] <- (s*lambda)^J*exp(-lambda*s)/factorial(J)/(1-(s*lambda)^0*exp(-lambda*s)/factorial(0))}
if (J>1) {prop[J] <- (J-sumsum(J))/J*(s*lambda)^J*exp(-lambda*s)/factorial(J)/(1-(s*lambda)^0*exp(-lambda*s)/factorial(0))}
}
#Output
print(Sys.time()-time1)
return(paste("The proportion of events captured is",round(sum(prop), digits = 2)))
}
prop.events.captured(s = 48,
lambda = 1/3,
a = 1.5)
|
d68c9bf7f38510996dc99e32ec720e463b69454a | b4c486650810278808ccc663adb9c5882c0749c6 | /man/eu_all.Rd | 09490d179ab87142bbb3552b29b828389cd43c1d | [
"MIT"
] | permissive | elenius/agedecision | 75ae1815ecb2ec980e5a9d1c1e4ee95e74368920 | defa324d32dff1fe1ee226e713b9038981519f97 | refs/heads/master | 2020-04-08T02:31:07.449537 | 2018-12-08T15:59:16 | 2018-12-08T15:59:16 | 158,938,166 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 916 | rd | eu_all.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/result.R
\name{eu_all}
\alias{eu_all}
\title{Förväntad nytta för alla alternativ/metoder}
\usage{
eu_all(prevalence = 0.84, lowest.utility = 0, age.min = 15,
age.max = 21, zero.limit.child = 15, zero.limit.adult = 21)
}
\arguments{
\item{prevalence}{Prevalens, andel vuxna.}
\item{lowest.utility}{Nyttan för en felklassificerad vuxen.}
\item{age.min}{Lägsta ålder i åldersfördelningen, se ekvation (1).}
\item{age.max}{Högsta ålder i åldersfördelningen, se ekvation (1).}
\item{zero.limit.child}{Anger ålder då lägsta nytta för en felaktig klassificering av ett barn, se ekvation (3) och (4).}
\item{zero.limit.adult}{Anger ålder då lägsta nytta för en felklassificering av en vuxen, se ekvation (3) och (4).}
}
\description{
Förväntad nytta (Expected Utility) för alla metoder, se ekvation (16) och (17).
}
|
1ef574d61d651bd5cb4e39aebf8c3fca31bbb740 | 56c28601cbc951d53b3b01b40dcf96c69f431cbb | /Getting and Cleaning Data/run_analysis.R | d739e103cbae3801e598aacf7ab1a4e6f3e72287 | [] | no_license | chacemerritt/datasciencecoursera | 14e4c3524653e59dbdb994712e2b4b1faf1a7f40 | 938dc2fb24df89c95ea212b6ef8b5f294c75edeb | refs/heads/master | 2021-01-01T15:55:54.245317 | 2017-08-28T16:31:08 | 2017-08-28T16:31:08 | 97,731,415 | 0 | 1 | null | 2017-08-07T03:06:43 | 2017-07-19T15:18:08 | null | UTF-8 | R | false | false | 2,445 | r | run_analysis.R | getwd()
setwd()
#loads activity data sets from test and train within working directory
testactivity <- read.table("test/Y_test.txt" , header = FALSE)
trainactivity <- read.table("train/Y_train.txt", header = FALSE)
#loads subject data sets from test and train
testsubject <- read.table("test/subject_test.txt", header = FALSE)
trainsubject <- read.table("train/subject_train.txt", header = FALSE)
#loads features data sets from test and train
testfeatures <- read.table("test/X_test.txt", header = FALSE)
trainfeatures <- read.table("train/X_train.txt", header = FALSE)
#Looking at the fragmented data in different txt files
head(testactivity);head(trainactivity);head(testsubject);head(trainsubject);head(testfeatures);head(trainfeatures);
#combines activity, subject, and features sets from test and train repectively
#Merges the training and the test sets to create one data set.
activity <- rbind(trainactivity, testactivity)
subject <- rbind(trainsubject, testsubject)
features <- rbind(trainfeatures, testfeatures)
#changes factor levels(1-6) to match activity labels
labels <- read.table("activity_labels.txt", header = FALSE)
activity$V1 <- factor(activity$V1, levels = as.integer(labels$V1), labels = labels$V2)
#names activity and subject columns
names(activity)<- c("activity")
names(subject)<-c("subject")
#names feature columns from features text file
featurestxt <- read.table("features.txt", head=FALSE)
names(features)<- featurestxt$V2
#selects columns with mean and standard deviation data and subsetting
meanstdev<-c(as.character(featurestxt$V2[grep("mean\\(\\)|std\\(\\)", featurestxt$V2)]))
subdata<-subset(features,select=meanstdev)
#Combines data sets with activity names and labels
subjectactivity <- cbind(subject, activity)
finaldata <- cbind(subdata, subjectactivity)
#Clarifying time and frequency variables
names(finaldata)<-gsub("^t", "time", names(finaldata))
names(finaldata)<-gsub("^f", "frequency", names(finaldata))
#Creates new data set with subject and activity means
suppressWarnings(cleandata <- aggregate(finaldata, by = list(finaldata$subject, finaldata$activity), FUN = mean))
colnames(cleandata)[1] <- "Subject"
names(cleandata)[2] <- "Activity"
#removes avg and stdev for non-aggregated sub and act columns
cleandata <- cleandata[1:68]
#Looking at all the data
head(cleandata)
#Writes tidy data to text file
write.table(cleandata, file = "cleandata.txt", row.name = FALSE)
|
03041c64d7591c53110d01468a6e1f1bdc307a73 | 7d39794c02d83d3aa534a4b8a43c9074c52fb873 | /Adelanto Sueldo/AdwordsScript_ADS.R | c683433526fb1bbef28896625fbb16c58cb60d93 | [] | no_license | ccespedeschristian/sbpPeru | 1d91010be6500e970f3d5078ac9b1fde177232cd | e105b0a9919b395655b4ffeaa04a048005cc162a | refs/heads/master | 2020-03-28T13:44:54.363323 | 2018-09-14T03:51:34 | 2018-09-14T03:51:34 | 148,425,111 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,369 | r | AdwordsScript_ADS.R | library(googleAnalyticsR)
library(openxlsx)
library(ggplot2)
library(dplyr)
library(RColorBrewer)
library(stringr)
ga_auth()
scotiabank <- 86666711
#ANALISIS DE CAMPANAS
campanaS <- dim_filter(dimension = "campaign", operator = "REGEXP", expressions="(.*)_Adelanto_Sueldo_(.*)")
Sads <- filter_clause_ga4(list(campanaS))
Adwords_ADS <- google_analytics(scotiabank,
date_range = c("2018-01-01","2018-05-31"),
metrics = c("sessions", "goal3Completions", "adCost"),
dimensions = c("month","campaign","adGroup", "adContent"),
dim_filters = Sads,
anti_sample = TRUE)
Adwords_ADS$NCampana <- str_extract(Adwords_ADS$campaign, "GSP|GDN|SEM")
camp_ADS <- Adwords_ADS %>%
group_by(NCampana, month) %>%
summarise(Solicitudes = sum(goal3Completions), Visitas = sum(sessions), adCost = sum(adCost)) %>%
mutate(CPL= adCost/Solicitudes, TC =Solicitudes/Visitas)
my_font <- function() {
theme(text=element_text(size=16))
}
i <- ggplot(camp_ADS, aes(reorder(NCampana, -Solicitudes) , Solicitudes, fill = month))
i + geom_bar(width=0.7, stat = "identity", position = "dodge") + theme_light() + geom_text(aes(label = Solicitudes, y= Solicitudes + 0.05), position = position_dodge(0.7), vjust= -1) + my_font()
|
36d0f132a2c895b1ba4cdd9a8231eb14c05274b0 | 4cf6967f00c6e2f725daf487e1c1fb2ca075c1e2 | /man/readData.Rd | 1c1a97571433217d3cc264e3eaf1494d61f2a536 | [] | no_license | nwagenbrenner/windtools | a262a38a7af24aa6f983868e0e268d423b926ea2 | f656f12544944dcfe1b4eb12f081b07acd12610c | refs/heads/master | 2021-01-17T13:24:19.065858 | 2019-05-23T16:17:10 | 2019-05-23T16:17:10 | 11,617,845 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 559 | rd | readData.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_data.R
\name{readData}
\alias{readData}
\title{Read wind data}
\usage{
readData(fileName)
}
\arguments{
\item{fileName}{full path to wind data file to be read in}
}
\value{
dataframe for use by other windtools functions
}
\description{
\code{readData} reads wind data into a dataframe
}
\details{
This fucntion reads in a file containing wind data. The input
file must contain the following data in this order:
'identifier', 'lat', 'lon', 'datetime', 'obs_speed', obs_dir'
}
|
6a0f91411f91935a523d4d949fac608c9404e8a7 | b122664f52ee053806388ccbeb2e54657b6f801a | /1.R-programming/2.r-containers/caret.R | 4875c25fde788a54b4884151e8b79d0d1c046cb3 | [] | no_license | rmatam/Data-Science2 | cd5d13a9a0fa1841869652e904fa2029b1900ed4 | 6aec5d996a34fa34c7f6213b404588dc5e93f5fa | refs/heads/master | 2021-05-04T04:57:27.078540 | 2016-10-14T19:06:36 | 2016-10-14T19:06:36 | 70,911,017 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,258 | r | caret.R | library(caret)
library(ggplot2)
library(lattice)
data(iris)
install.packages("RWeka")
library(RWeka)
# No other hyperparameter values, no random search
getModelInfo("J48")
# No other hyperparameter values, no random search
getModelInfo("PART")
# This needs sampling for random search
getModelInfo("JRip")
# Also it appears NumOpt it now named O in RWeka
RWeka::WOW("JRip")
# Other values of these hyperparameters run when called directly through RWeka
# J48 called through RWeka
j48c1 <- J48(Species ~ ., data = iris, control = Weka_control(C = 0.2))
j48c2 <- J48(Species ~ ., data = iris, control = Weka_control(C = 0.25))
# JRip called through RWeka
jripc1 <- JRip(Species ~ ., data = iris, control = Weka_control(O = 3))
jripc2 <- JRip(Species ~ ., data = iris, control = Weka_control(O = 100))
# PART called through RWeka
partc1 <- PART(Species ~ ., data = iris, control = Weka_control(threshold = 0.25, pruned = "yes"))
partc2 <- PART(Species ~ ., data = iris, control = Weka_control(threshold = 0.2, pruned = "no"))
partc3 <- PART(Species ~ ., data = iris, control = Weka_control(threshold = 0.25, pruned = "yes"))
partc4 <- PART(Species ~ ., data = iris, control = Weka_control(threshold = 0.2, pruned = "no"))
|
466418ea3316504eff15d24f8f4503337bcdc3e4 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/FreeSortR/examples/getStress.Rd.R | 7da71e12a1bf4363e66aee8e74da8dd077ae050a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 233 | r | getStress.Rd.R | library(FreeSortR)
### Name: getStress
### Title: Gets the stress value
### Aliases: getStress
### ** Examples
data(AromaSort)
Aroma<-SortingPartition(AromaSort)
resMds<-MdsSort(Aroma,ndim=2)
stress<-getStress(resMds)
|
cca1a50fe8007d3877098438ed420c5a61143ff5 | af1f023425520b2c71ae0b977f0dd4d5901b85de | /mini-analysis.R | 2051ae977077921274484f3c9a3af0a43d2f49c5 | [] | no_license | otherbodies/abalone | d73a458cb992c846dbbd0d6f7a0ad600fcf55bdc | 8408d9134fe73f88e0ada8e2e751f4b1a24d8b1d | refs/heads/master | 2021-01-19T01:22:25.036609 | 2018-07-01T13:31:53 | 2018-07-01T13:31:53 | 27,663,640 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,900 | r | mini-analysis.R | wholedout_mini = subset(wholedata, outlier !="1" & condition !="sys" & task == "mini")
#wrongly coded condtition order in mini - correct is: trunk, head-down, str, eyes
# correcting this
for (i in 1:nrow(wholedout_mini)){
if(wholedout_mini$condition[i]=="str"){
wholedout_mini$condition[i]="hd"
}
else if(wholedout_mini$condition[i]=="hd"){
wholedout_mini$condition[i]="str"
}
}
#export to Unity
write.table(wholedout_mini, file = "wholedout-mini-unity.csv", sep=",", row.names = FALSE)
## mini angles
# here - mini regression lines
load("data.Rda")
library(plyr)
library(reshape2)
meansM = wholedout_mini
#names(means2) = c("m","condition","task","participant","z","x","y")
meansM$X = meansM$X*-1
computeSlopAnglesM = function(subs,plane)
{
if(plane=="head"){
slop = lm(subs$z~subs$y)
}
else if(plane=="trunk"){
slop = lm(subs$z~subs$X)
}
angle = format(atan(slop$coef[2])*360/2/pi, digits = 3)
return(angle)
}
angleSlopeMyz = ddply(meansM,.(participant,condition),computeSlopAnglesM,plane="head")
angleSlopeMxz = ddply(meansM,.(participant,condition),computeSlopAnglesM,plane="trunk")
#angleSlope = subset(angleSlope, task != "mini")
angSlopWideMyz = dcast(angleSlopeMyz, participant ~ condition)
angSlopWideMxz = dcast(angleSlopeMxz, participant ~ condition)
write.table(angSlopWide, file = "angles-slopes.csv", sep=";", row.names = FALSE)
## binding mini and means from months for inter session consistency
interconsist1 = subset(wholedout_mini,condition=="tr")
interconsist2 = subset(means,condition=="tr" & task=="month" & rounds=="both")
inter_total = merge(interconsist1,interconsist2,by=c("participant","m","type","rounds","condition"))
inter_total$dist = with(inter_total,sqrt((z.x-z.y)^2+(X.x-X.y)^2+(y.x-y.y)^2))
inter_means = aggregate(inter_total[c("dist")],inter_total[c("participant","type")],mean)
inter_lengths = aggregate(inter_total[c("dist")],inter_total[c("participant")],length)
# bootstrap effect size with CI - with bootES package
library("bootES", lib.loc="~/R/win-library/3.2")
test = inter_total[,c("type","dist")]
boo = bootES(data=test,R=2000,data.col="dist",group.col="type",contrast=c("control","synaesthete"),effect.type="r",plot=F)
bootES(data=inter_means,R=20000,data.col="dist",group.col="type",contrast=c("synaesthete","control"),effect.type="cohens.d",plot=F)
write.table(inter_total, file = "inter.csv", sep=";", row.names = FALSE)
write.table(inter_means, file = "inter_str_means.csv", sep=";", row.names = FALSE)
## MONTHS ANALYSIS - PART OF MAIN ANALYSIS
## Testing hypothesis that preferred FoR is different for syn and for controls
## group comparisons 10/10
mt = subset(mainTable,rounds=="both")
b1 = bootES(data=mt,R=20000,data.col="head_fit_mixed",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b2 = bootES(data=mt,R=20000,data.col="trunk_fit_mixed",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b3 = bootES(data=mt,R=20000,data.col="room_fit_mixed",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b4 = bootES(data=mt,R=20000,data.col="head_fit_month",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b5 = bootES(data=mt,R=20000,data.col="trunk_fit_month",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b6 = bootES(data=mt,R=20000,data.col="room_fit_month",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b7 = bootES(data=mt,R=20000,data.col="head_fit_fur",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b8 = bootES(data=mt,R=20000,data.col="trunk_fit_fur",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b9 = bootES(data=mt,R=20000,data.col="room_fit_fur",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b10 = bootES(data=mt,R=20000,data.col="head_fit_reg",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b11 = bootES(data=mt,R=20000,data.col="trunk_fit_reg",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b12 = bootES(data=mt,R=20000,data.col="room_fit_reg",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b_list=list(b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12)
es_table = data.frame(es=numeric(12),ci_low=numeric(12),ci_high=numeric(12))
measure = c("head_fit_mixed","trunk_fit_mixed","room_fit_mixed","head_fit_month","trunk_fit_month","room_fit_month","head_fit_fur","trunk_fit_fur","room_fit_fur",
"head_fit_reg","trunk_fit_reg","room_fit_reg")
es_table$measure = measure
for (i in 1:12){
es = b_list[[i]]$t0
ci_low = b_list[[i]]$bounds[1]
ci_high = b_list[[i]]$bounds[2]
es_table[i,]$es = es
es_table[i,]$ci_low = ci_low
es_table[i,]$ci_high = ci_high
}
##plotting es with ci
gg = ggplot(es_table,aes(y=measure,x=es))+geom_point()
gg = gg+geom_errorbarh(aes(xmin=ci_low, xmax=ci_high),height=0.5)
gg
#t-tests for group comparisons 10/10
t.test(mt$head_fit_mixed~mt$type)
t.test(mt$trunk_fit_mixed~mt$type)
t.test(mt$room_fit_mixed~mt$type)
t.test(mt$head_fit_month~mt$type)
t.test(mt$trunk_fit_month~mt$type)
t.test(mt$room_fit_month~mt$type)
t.test(mt$head_fit_fur~mt$type)
t.test(mt$trunk_fit_fur~mt$type)
t.test(mt$room_fit_fur~mt$type)
t.test(mt$head_fit_reg~mt$type)
t.test(mt$trunk_fit_reg~mt$type)
t.test(mt$room_fit_reg~mt$type)
##consistency CI ES
conTableM$type = NA
conTableM[1:10,]$type = "control"
conTableM[11:20,]$type = "synaesthete"
bootES(data=conTableM,R=20000,data.col="overall_hd_m",group.col="type",contrast=c("synaesthete","control"),effect.type="cohens.d",plot=F)
bootES(data=conTableM,R=20000,data.col="overall_hd_cr",group.col="type",contrast=c("synaesthete","control"),effect.type="cohens.d",plot=F)
bootES(data=conTableM,R=20000,data.col="overall_str_m",group.col="type",contrast=c("synaesthete","control"),effect.type="cohens.d",plot=F)
bootES(data=conTableM,R=20000,data.col="overall_str_cr",group.col="type",contrast=c("synaesthete","control"),effect.type="cohens.d",plot=F)
bootES(data=conTableM,R=20000,data.col="overall_tr_m",group.col="type",contrast=c("synaesthete","control"),effect.type="cohens.d",plot=F)
bootES(data=conTableM,R=20000,data.col="overall_tr_cr",group.col="type",contrast=c("synaesthete","control"),effect.type="cohens.d",plot=F)
## CRAZY ANALYSIS - PART OF MAIN ANALYSIS
## Testing hypothesis that preferred FoR is different for syn and for controls
## group comparisons 10/10
mt = subset(mainTable,rounds=="both")
b1_cr = bootES(data=mt,R=20000,data.col="head_fit_horse",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b2_cr = bootES(data=mt,R=20000,data.col="trunk_fit_horse",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b3_cr = bootES(data=mt,R=20000,data.col="room_fit_horse",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b4_cr = bootES(data=mt,R=20000,data.col="head_fit_fur_cr",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b5_cr = bootES(data=mt,R=20000,data.col="trunk_fit_fur_cr",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b6_cr = bootES(data=mt,R=20000,data.col="room_fit_fur_cr",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b7_cr = bootES(data=mt,R=20000,data.col="head_fit_reg_cr",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b8_cr = bootES(data=mt,R=20000,data.col="trunk_fit_reg_cr",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b9_cr = bootES(data=mt,R=20000,data.col="room_fit_reg_cr",group.col="type",contrast=c("synaesthete","control"),effect.type="r",plot=F)
b_list_cr=list(b1_cr,b2_cr,b3_cr,b4_cr,b5_cr,b6_cr,b7_cr,b8_cr,b9_cr)
es_table_cr = data.frame(es=numeric(9),ci_low=numeric(9),ci_high=numeric(9))
measure_cr = c("head_fit_horse","trunk_fit_horse","room_fit_horse","head_fit_fur_cr","trunk_fit_fur_cr","room_fit_fur_cr",
"head_fit_reg_cr","trunk_fit_reg_cr","room_fit_reg_cr")
es_table_cr$measure = measure_cr
for (i in 1:9){
es = b_list_cr[[i]]$t0
ci_low = b_list_cr[[i]]$bounds[1]
ci_high = b_list_cr[[i]]$bounds[2]
es_table_cr[i,]$es = es
es_table_cr[i,]$ci_low = ci_low
es_table_cr[i,]$ci_high = ci_high
}
##plotting es with ci
gg = ggplot(es_table_cr,aes(y=measure,x=es))+geom_point()
gg = gg+geom_errorbarh(aes(xmin=ci_low, xmax=ci_high),height=0.5)
gg
#t-tests for group comparisons 10/10
t.test(mt$head_fit_horse~mt$type)
t.test(mt$trunk_fit_horse~mt$type)
t.test(mt$room_fit_horse~mt$type)
t.test(mt$head_fit_fur_cr~mt$type)
t.test(mt$trunk_fit_fur_cr~mt$type)
t.test(mt$room_fit_fur_cr~mt$type)
t.test(mt$head_fit_reg_cr~mt$type)
t.test(mt$trunk_fit_reg_cr~mt$type)
t.test(mt$room_fit_reg_cr~mt$type)
|
cc95b873dabe01d847be3c0951b4bed4f59f136e | 0266a444751df0af04f5f6899c70eb2919bed086 | /stats_scripts/Generating_graphs.R | 840fd0852f28d9f3dab66e6e87ea3f45c2de4fcc | [
"MIT"
] | permissive | LilithHafner/SymbulationEmp | 55b902ede043e4f7ef57a17a6922188b6ee43d8b | fb53c7fbefa03f6d901cdd49bdb823a637449bba | refs/heads/master | 2021-01-02T14:58:19.938906 | 2020-05-13T16:01:17 | 2020-05-13T16:07:17 | 239,671,139 | 0 | 0 | MIT | 2020-05-13T12:58:29 | 2020-02-11T03:54:51 | C++ | UTF-8 | R | false | false | 9,599 | r | Generating_graphs.R | require(ggplot2)
require(RColorBrewer)
fullcubeHelix <- c("#673F03", "#7D3002", "#891901", "#A7000F", "#B50142", "#CD0778", "#D506AD", "#E401E7", "#AB08FF","#7B1DFF", "#5731FD","#5E8EFF", "#4755FF" ,"#6FC4FE", "#86E9FE", "#96FFF7", "#B2FCE3", "#BBFFDB", "#D4FFDD", "#EFFDF0")
shorthelix <- c("#A7000F", "#E401E7","#5E8EFF","#86E9FE","#B2FCE3")
elevenhelix <- c("#673F03", "#891901", "#B50142", "#D506AD", "#AB08FF", "#5731FD", "#4755FF", "#86E9FE", "#B2FCE3", "#D4FFDD", "#EFFDF0")
tenhelix <- c("#891901", "#B50142", "#D506AD", "#AB08FF", "#5731FD", "#4755FF", "#86E9FE", "#B2FCE3", "#D4FFDD", "#EFFDF0")
setwd("~/Desktop")
all_data <- read.table("munged_buckets_sym_around10.dat", h=T)
all_data <- read.table("munged_buckets_sym.dat", h=T)
under10k <- subset(all_data, update <=50000)
first_try <- subset(all_data, treatment=="mut0.001_mult5_vert0.1_start0.")
first_try <- all_data
neg1_9 <- cbind(subset(first_try, interval=="-1_-.9"), Interaction_Rate="-1 to -0.8 (Parasitic)")
neg9_8 <- cbind(subset(first_try, interval=="-.9_-.8"), Interaction_Rate="-1 to -0.8 (Parasitic)")
neg8_7 <- cbind(subset(first_try, interval=="-.8_-.7"), Interaction_Rate="-0.8 to -0.6 (Parasitic)")
neg7_6 <- cbind(subset(first_try, interval=="-.7_-.6"), Interaction_Rate="-0.8 to -0.6 (Parasitic)")
neg6_5 <- cbind(subset(first_try, interval=="-.6_-.5"), Interaction_Rate="-0.6 to -0.4 (Detrimental)")
neg5_4 <- cbind(subset(first_try, interval=="-.5_-.4"), Interaction_Rate="-0.6 to -0.4 (Detrimental)")
neg4_3 <- cbind(subset(first_try, interval=="-.4_-.3"), Interaction_Rate="-0.4 to -0.2 (Detrimental)")
neg3_2 <- cbind(subset(first_try, interval=="-.3_-.2"), Interaction_Rate="-0.4 to -0.2 (Detrimental)")
neg2_1 <- cbind(subset(first_try, interval=="-.2_-.1"), Interaction_Rate="-0.2 to 0 (Nearly Neutral)")
neg1_0 <- cbind(subset(first_try, interval=="-.1_0"), Interaction_Rate="-0.2 to 0 (Nearly Neutral)")
pos0_1 <- cbind(subset(first_try, interval=="0_.1"), Interaction_Rate="0 to 0.2 (Nearly Neutral)")
pos1_2 <- cbind(subset(first_try, interval==".1_.2"), Interaction_Rate="0 to 0.2 (Nearly Neutral)")
pos2_3 <- cbind(subset(first_try, interval==".2_.3"), Interaction_Rate="0.2 to 0.4 (Positive)")
pos3_4 <- cbind(subset(first_try, interval==".3_.4"), Interaction_Rate="0.2 to 0.4 (Positive)")
pos4_5 <- cbind(subset(first_try, interval==".4_.5"), Interaction_Rate="0.4 to 0.6 (Positive)")
pos5_6 <- cbind(subset(first_try, interval==".5_.6"), Interaction_Rate="0.4 to 0.6 (Positive)")
pos6_7 <- cbind(subset(first_try, interval==".6_.7"), Interaction_Rate="0.6 to 0.8 (Mutualistic)")
pos7_8 <- cbind(subset(first_try, interval==".7_.8"), Interaction_Rate="0.6 to 0.8 (Mutualistic)")
pos8_9 <- cbind(subset(first_try, interval==".8_.9"), Interaction_Rate="0.8 to 1.0 (Mutualistic)")
pos9_1 <- cbind(subset(first_try, interval==".9_1"), Interaction_Rate="0.8 to 1.0 (Mutualistic)")
pos1 <- cbind(subset(first_try, interval=="1"), Interaction_Rate="0.8 to 1.0 (Mutualistic)")
combined <- rbind(neg1_9, neg9_8, neg8_7, neg7_6, neg6_5, neg5_4, neg4_3, neg3_2, neg2_1, neg1_0, pos0_1, pos1_2, pos2_3, pos3_4, pos4_5, pos5_6, pos6_7, pos7_8, pos8_9, pos9_1, pos1)
vert0 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0._start0."), Rate = "0%")
vert10 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0.1_start0."), Rate = "10%")
vert20 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0.2_start0."), Rate = "20%")
vert30 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0.3_start0."), Rate = "30%")
vert40 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0.4_start0."), Rate = "40%")
vert50 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0.5_start0."), Rate = "50%")
vert60 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0.6_start0."), Rate = "60%")
vert70 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0.7_start0."), Rate = "70%")
vert80 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0.8_start0."), Rate = "80%")
vert90 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0.9_start0."), Rate = "90%")
vert100 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert1._start0."), Rate = "100%")
combined <- rbind(vert0, vert10, vert20, vert30, vert40, vert50, vert60, vert70, vert80, vert90, vert100)
vert08 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0.08_start0."), Rate = "8%")
vert09 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0.09_start0."), Rate = "9%")
vert10 <- cbind(subset(combined, treatment=="mut0.001_mult5_vert0.1_start0."), Rate="10%")
combined <- rbind(vert08, vert09, vert10)
combined <- vert09
##Reps
temp <- aggregate(list(count = combined$count), list(update=combined$update, rep=combined$rep, Interaction_Rate=combined$Interaction_Rate, Rate=combined$Rate), sum)
ggplot(temp, aes(update, count)) + geom_area(aes(fill=Interaction_Rate), position='stack') +ylab("Count of Symbionts with Phenotype") + xlab("Evolutionary time (in updates)") +scale_fill_manual("Interaction Rate\n Phenotypes",values=tenhelix) + theme(panel.background = element_rect(fill='light grey', colour='black')) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + guides(fill=FALSE) + guides(fill = guide_legend())+ facet_wrap(~rep)
##Averaged
temp <- aggregate(list(count = combined$count), list(update=combined$update, Interaction_Rate=combined$Interaction_Rate, treatment=combined$treatment, Rate=combined$Rate), mean)
ggplot(temp, aes(update, count)) + geom_area(aes(fill=Interaction_Rate), position='stack') +ylab("Count of Symbionts with Phenotype") + xlab("Evolutionary time (in updates)") +scale_fill_manual("Interaction Rate\n Phenotypes",values=tenhelix) + theme(panel.background = element_rect(fill='light grey', colour='black')) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + guides(fill=FALSE) + guides(fill = guide_legend()) + facet_wrap(~Rate)
ggplot(temp, aes(update, count)) + geom_area(aes(fill=Interaction_Rate), position='stack') +ylab("Count of Hosts with Phenotype") + xlab("Evolutionary time (in updates)") +scale_fill_manual("Interaction Rate\n Phenotypes",values=tenhelix) + theme(panel.background = element_rect(fill='light grey', colour='black')) + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + guides(fill=FALSE) + guides(fill = guide_legend()) + facet_wrap(~Rate)
##Hosts
host_data <- read.table("munged_buckets_host.dat", h=T)
first_try <- subset(host_data, update <=10000)
first_try <- subset(first_try, treatment=="mut0.001_mult5_vert0.1_start0.")
first_try <- host_data
all_data <- read.table("munged_buckets_host_around10.dat", h=T)
first_try <- subset(all_data, treatment=="mut0.001_mult5_vert0.09_start0.")
first_try <- subset(all_data, update <= 50000)
neg1_9 <- cbind(subset(first_try, interval=="-1_-.9"), Interaction_Rate="-1 to -0.8 (Defensive)")
neg9_8 <- cbind(subset(first_try, interval=="-.9_-.8"), Interaction_Rate="-1 to -0.8 (Defensive)")
neg8_7 <- cbind(subset(first_try, interval=="-.8_-.7"), Interaction_Rate="-0.8 to -0.6 (Defensive)")
neg7_6 <- cbind(subset(first_try, interval=="-.7_-.6"), Interaction_Rate="-0.8 to -0.6 (Defensive)")
neg6_5 <- cbind(subset(first_try, interval=="-.6_-.5"), Interaction_Rate="-0.6 to -0.4 (Mildly Defensive)")
neg5_4 <- cbind(subset(first_try, interval=="-.5_-.4"), Interaction_Rate="-0.6 to -0.4 (Mildly Defensive)")
neg4_3 <- cbind(subset(first_try, interval=="-.4_-.3"), Interaction_Rate="-0.4 to -0.2 (Mildly Defensive)")
neg3_2 <- cbind(subset(first_try, interval=="-.3_-.2"), Interaction_Rate="-0.4 to -0.2 (Mildly Defensive)")
neg2_1 <- cbind(subset(first_try, interval=="-.2_-.1"), Interaction_Rate="-0.2 to 0 (Nearly Neutral)")
neg1_0 <- cbind(subset(first_try, interval=="-.1_0"), Interaction_Rate="-0.2 to 0 (Nearly Neutral)")
pos0_1 <- cbind(subset(first_try, interval=="0_.1"), Interaction_Rate="0 to 0.2 (Nearly Neutral)")
pos1_2 <- cbind(subset(first_try, interval==".1_.2"), Interaction_Rate="0 to 0.2 (Nearly Neutral)")
pos2_3 <- cbind(subset(first_try, interval==".2_.3"), Interaction_Rate="0.2 to 0.4 (Positive)")
pos3_4 <- cbind(subset(first_try, interval==".3_.4"), Interaction_Rate="0.2 to 0.4 (Positive)")
pos4_5 <- cbind(subset(first_try, interval==".4_.5"), Interaction_Rate="0.4 to 0.6 (Positive)")
pos5_6 <- cbind(subset(first_try, interval==".5_.6"), Interaction_Rate="0.4 to 0.6 (Positive)")
pos6_7 <- cbind(subset(first_try, interval==".6_.7"), Interaction_Rate="0.6 to 0.8 (Mutualistic)")
pos7_8 <- cbind(subset(first_try, interval==".7_.8"), Interaction_Rate="0.6 to 0.8 (Mutualistic)")
pos8_9 <- cbind(subset(first_try, interval==".8_.9"), Interaction_Rate="0.6 to 1.0 (Mutualistic)")
pos9_1 <- cbind(subset(first_try, interval==".9_1"), Interaction_Rate="0.6 to 1.0 (Mutualistic)")
pos1 <- cbind(subset(first_try, interval=="1"), Interaction_Rate="0.6 to 1.0 (Mutualistic)")
combined <- rbind(neg1_9, neg9_8, neg8_7, neg7_6, neg6_5, neg5_4, neg4_3, neg3_2, neg2_1, neg1_0, pos0_1, pos1_2, pos2_3, pos3_4, pos4_5, pos5_6, pos6_7, pos7_8, pos8_9, pos9_1, pos1)
temp <- aggregate(list(count = combined$count), list(update=combined$update, rep=combined$rep, Interaction_Rate=combined$Interaction_Rate, treatment=combined$treatment), sum)
ggplot(temp, aes(update, count)) + geom_area(aes(fill=Interaction_Rate), position='stack') +ylab("Count of Hosts with Phenotype") + xlab("Evolutionary time (in updates)") +scale_fill_brewer("Interaction Rate\n Phenotypes", palette="Paired") + guides(fill = guide_legend(reverse=TRUE)) + facet_wrap(~treatment) +facet_wrap(~rep)
|
ddd5e98d8bda49557b6271b46cb765cd3241fec8 | e45e510a9ddfc830d3d15f41c29772618b60bde2 | /analysis/analysis.R | 55febdff708714c1022a957e31d508a708e201ec | [
"LicenseRef-scancode-public-domain"
] | permissive | craig-willis/sigir-2016-queries | 37c3adc8bd537782422aa51b7fdce0169120f116 | f6caddb5f1137a1d7678edb6e00c07b7f87ce45b | refs/heads/master | 2021-01-21T14:07:40.163549 | 2016-05-21T16:19:41 | 2016-05-21T16:19:41 | 50,811,396 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,653 | r | analysis.R | setwd("/users/cwillis/dev/uiucGSLIS/ecir-2016/analysis")
cost <- function(r, pi = 0) mean(abs(r-pi) > 0.5)
# Summaries
# Dakka et al, TREC 6-8 (301-450), LATimes
d <- read.csv("dakka-?latimes-trec678-rel-acf.csv", header=T)
d <- d[,3:ncol(d)] # Remove topic and file fields
d[is.na(d)] <- 0 # Replace NA with 0
d$Event <- d$PeriodicEvent + d$SpecificEvent + d$IndirectEventReference
d$Entity <- d$OrganizationEntity + d$OtherEntity + d$PersonEntity
d <- d[-c(1,2,3,4,6,8,9,10)] # Remove ExplicitDate, Futuh
d[,c(1,2,6,7)][d[,c(1,2,6,7)] > 0] <- 1 # Replace non-zero with 1
#d[,1:10] <- lapply(d[,1:10], factor)
#d$Temporal <- as.factor(d$Temporal)
# Logistic model without ACF/DPS, stepwise selection, 10-fold cross-validation
m <- glm(Temporal ~ . - ACF - DPS, d, family=binomial(link=logit))
summary(m)
m <- step(m, direction=c("both"))
summary(m)
cv.glm(d, m, cost, K=10)$delta
# Logistic model with ACF/DPS, stepwise selection, 10-fold cross-validation
m <- glm(Temporal ~ ., d, family="binomial")
summary(m)
m <- step(m, direction=c("both"))
summary(m)
cv.glm(d, m, cost, K=10)$delta
norm
# Dakka et al, TREC 6-8 (301-450), Financial Times
d <- read.csv("dakka-ft-trec678-rel-acf.csv", header=T)
d <- d[,3:ncol(d)]
d[is.na(d)] <- 0
d[,1:10][d[,1:10] > 0] <- 1
d <- d[-c(6)]
# Logistic model without ACF/DPS, stepwise selection, 10-fold cross-validation
m <- glm(Temporal ~ . - ACF - DPS, d, family="binomial")
summary(m)
m <- step(m, direction=c("both"))
summary(m)
cv.glm(d, m, cost, K=10)$delta
# Logistic model with ACF/DPS, stepwise selection, 10-fold cross-validation
m <- glm(Temporal ~ DPS, d, family=binomial(link=logit))
m <- step(m, direction=c("both"))
summary(m)
cv.glm(d, m, cost, K=10)$delta
m <- glm(Temporal ~ DPS, d, family=binomial(link=logit))
summary(m)
m <- logistf(Temporal ~ SpecificEvent, d, family=binomial(link=logit))
summary(m)
# Efron & Golovchinsky, TREC 6-8 (301-450), Financial Times
d <- read.csv("efron-ft-trec678-rel-acf.csv", header=T)
d <- d[,3:ncol(d)]
d[is.na(d)] <- 0
d[,1:10][d[,1:10] > 0] <- 1
m <- glm(Temporal ~ . - DPS - ACF, d, family=binomial)
m <- step(m, direction=c("both"))
summary(m)
cv.glm(d, m, cost, K=10)$delta
m <- glm(Temporal ~ ., d, family="binomial")
m <- step(m, direction=c("both"))
summary(m)
cv.glm(d, m, cost, K=10)$delta
# Efron & Golovchinsky, TREC 6-8 (301-450), LA Times
d <- read.csv("efron-latimes-trec678-rel-acf.csv", header=T)
d <- d[,3:ncol(d)]
d[is.na(d)] <- 0
d[,1:10][d[,1:10] > 0] <- 1
m <- glm(Temporal ~ . - DPS - ACF, d, family="binomial")
m <- step(m, direction=c("both"))
summary(m)
cv.glm(d, m, cost, K=10)$delta
m <- glm(Temporal ~ ., d, family="binomial")
m <- step(m, direction=c("both"))
summary(m)
cv.glm(d, m, cost, K=10)$delta
# Peetz et al, Blog06-08 (900-1050), Blog 06
d <- read.csv("peetz-blog0608-rel-acf.csv", header=T)
d <- d[,3:ncol(d)]
d[is.na(d)] <- 0
d[,1:10][d[,1:10] > 0] <- 1
m <- glm(Temporal ~ . - ACF - DPS, d, family="binomial")
m <- step(m, direction=c("both"))
summary(m)
cv.glm(d, m, cost, K=10)$delta
m <- glm(Temporal ~ ., d, family="binomial")
m <- step(m, direction=c("both"))
summary(m)
cv.glm(d, m, cost, K=10)$delta
# Novelty 03-04. Predict whether the topic is an "Event" (1) or "Opinion" (0)
d <- read.csv("novelty-0304-rel-acf.csv", header=T)
d <- d[,3:ncol(d)]
d[is.na(d)] <- 0
d[,1:10][d[,1:10] > 0] <- 1
m <- glm(Temporal ~ . - ACF - DPS, d, family="binomial")
m <- step(m, direction=c("both"))
summary(m)
cv.glm(d, m, cost, K=10)$delta
m <- glm(Temporal ~ ., d, family="binomial")
m <- step(m, direction=c("both"))
summary(m)
cv.glm(d, m, cost, K=10)$delta
|
2f004369634828342915e4e62266bb1dbfc331ed | 074df927499b9e18b86bd38be490dd9ff78d9b7c | /R/complement.r | 019df1e981051f868936e9ec03576b14be24c484 | [] | no_license | Kosile92/myDNA | a506226181d9ffbd1134e9e846086eee9d29d28a | e8079b7dded85483e8724935602a5be7ef9140d4 | refs/heads/main | 2023-01-07T00:35:57.237517 | 2020-11-06T19:32:55 | 2020-11-06T19:32:55 | 310,689,662 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 717 | r | complement.r | #' compute DNA nucleotide complement
#'
#' computes DNA complement
#' @param input the character string of info to parse
#' @keywords split, parse
#' @export
#' @examples anthrax_sasp.nuc
complement <- function(nuc){
comp <- c()
for (i in 1:length(nuc)){
if (nuc[i] == 'A'){
comp[i] = 'T'
}
else if (nuc[i] == 'a'){
comp[i] = 't'
}
else if (nuc[i] == 'T'){
comp[i] = 'A'
}
else if (nuc[i] == 't'){
comp[i] = 'a'
}
else if (nuc[i] == 'G'){
comp[i] = 'C'
}
else if (nuc[i] == 'g'){
comp[i] = 'c'
}
else if (nuc[i] == 'C'){
comp[i] = 'G'
}
else if (nuc[i] == 'c'){
comp[i] = 'g'
}
}
return(comp)
} |
673dc62c1b4aa15e93b33566195516bed75c8e15 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/dtwclust/man/tsclust.Rd | 02ee21a1fb4964011f7a845424d059891483e305 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 21,922 | rd | tsclust.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CLUSTERING-tsclust.R
\name{tsclust}
\alias{tsclust}
\title{Time series clustering}
\usage{
tsclust(series = NULL, type = "partitional", k = 2L, ...,
preproc = NULL, distance = "dtw_basic", centroid = ifelse(type ==
"fuzzy", "fcm", "pam"), control = do.call(paste0(type, "_control"),
list()), args = tsclust_args(), seed = NULL, trace = FALSE,
error.check = TRUE)
}
\arguments{
\item{series}{A list of series, a numeric matrix or a data frame. Matrices and data frames are
coerced to a list row-wise (see \code{\link[=tslist]{tslist()}}).}
\item{type}{What type of clustering method to use: \code{"partitional"}, \code{"hierarchical"}, \code{"tadpole"}
or \code{"fuzzy"}.}
\item{k}{Number of desired clusters. It can be a numeric vector with different values.}
\item{...}{Arguments to pass to preprocessing, centroid \strong{and} distance functions (added to
\code{args}). Also passed to \code{method} from \code{\link[=hierarchical_control]{hierarchical_control()}} if it happens to be a function,
and to \code{\link[stats:hclust]{stats::hclust()}} if it contains the \code{members} parameter.}
\item{preproc}{Function to preprocess data. Defaults to \code{\link[=zscore]{zscore()}} \emph{only} if \code{centroid} \code{=}
\code{"shape"}, but will be replaced by a custom function if provided.}
\item{distance}{A registered distance from \code{\link[proxy:dist]{proxy::dist()}}. Ignored for \code{type} \code{=} \code{"tadpole"}.}
\item{centroid}{Either a supported string, or an appropriate function to calculate centroids when
using partitional/hierarchical/tadpole methods. See Centroids section.}
\item{control}{An appropriate list of controls. See \link{tsclust-controls}.}
\item{args}{An appropriate list of arguments for preprocessing, distance and centroid functions.
See \code{\link[=tsclust_args]{tsclust_args()}} and the examples.}
\item{seed}{Random seed for reproducibility.}
\item{trace}{Logical flag. If \code{TRUE}, more output regarding the progress is printed to screen.}
\item{error.check}{Logical indicating whether the function should try to detect inconsistencies
and give more informative errors messages. Also used internally to avoid repeating checks.}
}
\value{
An object with an appropriate class from \linkS4class{TSClusters}.
If \code{control$nrep > 1} and a partitional procedure is used, \code{length(method)} \code{> 1} and
hierarchical procedures are used, or \code{length(k)} \code{>} \code{1}, a list of objects is returned.
}
\description{
This is the main function to perform time series clustering. See the details and the examples for
more information, as well as the included package vignettes (which can be found by typing
\code{browseVignettes("dtwclust")}). A convenience wrapper is available in \code{\link[=compare_clusterings]{compare_clusterings()}},
and a shiny app in \code{\link[=interactive_clustering]{interactive_clustering()}}.
}
\details{
Partitional and fuzzy clustering procedures use a custom implementation. Hierarchical clustering
is done with \code{\link[stats:hclust]{stats::hclust()}} by default. TADPole clustering uses the \code{\link[=TADPole]{TADPole()}} function.
Specifying \code{type} = \code{"partitional"}, \code{preproc} = \code{zscore}, \code{distance} = \code{"sbd"} and \code{centroid} =
\code{"shape"} is equivalent to the k-Shape algorithm (Paparrizos and Gravano 2015).
The \code{series} may be provided as a matrix, a data frame or a list. Matrices and data frames are
coerced to a list, both row-wise. Only lists can have series with different lengths or multiple
dimensions. Most of the optimizations require series to have the same length, so consider
reinterpolating them to save some time (see Ratanamahatana and Keogh 2004; \code{\link[=reinterpolate]{reinterpolate()}}). No
missing values are allowed.
In the case of multivariate time series, they should be provided as a list of matrices, where
time spans the rows of each matrix and the variables span the columns (see \link{CharTrajMV} for an
example). All included centroid functions should work with the aforementioned format, although
\code{shape} is \emph{not} recommended. Note that the \code{plot} method will simply append all dimensions
(columns) one after the other.
}
\note{
The lower bounds are defined only for time series of equal length. They are \strong{not} symmetric,
and \code{DTW} is not symmetric in general.
}
\section{Centroid Calculation}{
In the case of partitional/fuzzy algorithms, a suitable function should calculate the cluster
centroids at every iteration. In this case, the centroids may also be time series. Fuzzy
clustering uses the standard fuzzy c-means centroid by default.
In either case, a custom function can be provided. If one is provided, it will receive the
following parameters with the shown names (examples for partitional clustering are shown in
parentheses):
\itemize{
\item \code{x}: The \emph{whole} data list (\code{list(ts1, ts2, ts3)})
\item \code{cl_id}: An integer vector with length equal to the number of series in \code{data}, indicating
which cluster a series belongs to (\code{c(1L, 2L, 2L)})
\item \code{k}: The desired number of total clusters (\code{2L})
\item \code{cent}: The current centroids in order, in a list (\code{list(centroid1, centroid2)})
\item \code{cl_old}: The membership vector of the \emph{previous} iteration (\code{c(1L, 1L, 2L)})
\item The elements of \code{...} that match its formal arguments
}
In case of fuzzy clustering, the membership vectors (2nd and 5th elements above) are matrices
with number of rows equal to amount of elements in the data, and number of columns equal to the
number of desired clusters. Each row must sum to 1.
The other option is to provide a character string for the custom implementations. The following
options are available:
\itemize{
\item "mean": The average along each dimension. In other words, the average of all \eqn{x^j_i}
among the \eqn{j} series that belong to the same cluster for all time points \eqn{t_i}.
\item "median": The median along each dimension. Similar to mean.
\item "shape": Shape averaging. By default, all series are z-normalized in this case, since the
resulting centroids will also have this normalization. See \code{\link[=shape_extraction]{shape_extraction()}} for more
details.
\item "dba": DTW Barycenter Averaging. See \code{\link[=DBA]{DBA()}} for more details.
\item "sdtw_cent": Soft-DTW centroids, See \code{\link[=sdtw_cent]{sdtw_cent()}} for more details.
\item "pam": Partition around medoids (PAM). This basically means that the cluster centroids are
always one of the time series in the data. In this case, the distance matrix can be
pre-computed once using all time series in the data and then re-used at each iteration. It
usually saves overhead overall for small datasets (see \link{tsclust-controls}).
\item "fcm": Fuzzy c-means. Only supported for fuzzy clustering and used by default in that case.
\item "fcmdd": Fuzzy c-medoids. Only supported for fuzzy clustering. It \strong{always} precomputes/uses
the whole cross-distance matrix.
}
The \code{dba}, \code{shape} and \code{sdtw_cent} implementations check for parallelization. Note that only
\code{shape}, \code{dba}, \code{sdtw_cent}, \code{pam} and \code{fcmdd} support series of different length. Also note
that for \code{shape}, \code{dba} and \code{sdtw_cent}, this support has a caveat: the final centroids' length
will depend on the length of those series that were randomly chosen at the beginning of the
clustering algorithm. For example, if the series in the dataset have a length of either 10 or
15, 2 clusters are desired, and the initial choice selects two series with length of 10, the
final centroids will have this same length.
As special cases, if hierarchical or tadpole clustering is used, you can provide a centroid
function that takes a list of series as first input. It will also receive the contents of
\code{args$cent} that match its formal arguments, and should return a single centroid series. These
centroids are returned in the \code{centroids} slot. By default, the medoid of each cluster is
extracted (similar to what \code{\link[=pam_cent]{pam_cent()}} does).
In the following cases, the \code{centroids} list will have an attribute \code{series_id} with an integer
vector indicating which \code{series} were chosen as centroids:
\itemize{
\item Partitional clustering using "pam" centroid.
\item Fuzzy clustering using "fcmdd" centroid.
\item Hierarchical clustering with the default centroid extraction.
\item TADPole clustering with the default centroid extraction.
}
}
\section{Distance Measures}{
The distance measure to be used with partitional, hierarchical and fuzzy clustering can be
modified with the \code{distance} parameter. The supported option is to provide a string, which must
represent a compatible distance registered with \code{proxy}'s \code{\link[proxy:dist]{proxy::dist()}}. Registration is done
via \code{\link[proxy:pr_DB]{proxy::pr_DB()}}, and extra parameters can be provided in \code{args$dist} (see the examples).
Note that you are free to create your own distance functions and register them. Optionally, you
can use one of the following custom implementations (all registered with \code{proxy}):
\itemize{
\item \code{"dtw"}: DTW, optionally with a Sakoe-Chiba/Slanted-band constraint. Done with \code{\link[dtw:dtw]{dtw::dtw()}}.
\item \code{"dtw2"}: DTW with L2 norm and optionally a Sakoe-Chiba/Slanted-band constraint. See
\code{\link[=dtw2]{dtw2()}}.
\item \code{"dtw_basic"}: A custom version of DTW with less functionality, but faster. See
\code{\link[=dtw_basic]{dtw_basic()}}.
\item \code{"dtw_lb"}: DTW with L1 or L2 norm and a Sakoe-Chiba constraint. Some computations are
avoided by first estimating the distance matrix with Lemire's lower bound and then
iteratively refining with DTW. See \code{\link[=dtw_lb]{dtw_lb()}}. Not suitable for \code{pam.precompute} = \code{TRUE} nor
hierarchical clustering.
\item \code{"lbk"}: Keogh's lower bound for DTW with either L1 or L2 norm for the Sakoe-Chiba
constraint. See \code{\link[=lb_keogh]{lb_keogh()}}.
\item \code{"lbi"}: Lemire's lower bound for DTW with either L1 or L2 norm for the Sakoe-Chiba
constraint. See \code{\link[=lb_improved]{lb_improved()}}.
\item \code{"sbd"}: Shape-based distance. See \code{\link[=sbd]{sbd()}}.
\item \code{"gak"}: Global alignment kernels. See \code{\link[=gak]{gak()}}.
\item \code{"sdtw"}: Soft-DTW. See \code{\link[=sdtw]{sdtw()}}.
}
Out of the aforementioned, only the distances based on DTW lower bounds \emph{don't} support series
of different length. The lower bounds are probably unsuitable for direct clustering unless
series are very easily distinguishable.
If you know that the distance function is symmetric, and you use a hierarchical algorithm, or a
partitional algorithm with PAM centroids, or fuzzy c-medoids, some time can be saved by
calculating only half the distance matrix. Therefore, consider setting the symmetric control
parameter to \code{TRUE} if this is the case (see \link{tsclust-controls}).
}
\section{Preprocessing}{
It is strongly advised to use z-normalization in case of \code{centroid = "shape"}, because the
resulting series have this normalization (see \code{\link[=shape_extraction]{shape_extraction()}}). Therefore, \code{\link[=zscore]{zscore()}} is
the default in this case. The user can, however, specify a custom function that performs any
transformation on the data, but the user must make sure that the format stays consistent, i.e.
a list of time series.
Setting to \code{NULL} means no preprocessing (except for \code{centroid = "shape"}). A provided function
will receive the data as first argument, followed by the contents of \code{args$preproc} that match
its formal arguments.
It is convenient to provide this function if you're planning on using the \code{\link[stats:predict]{stats::predict()}}
generic (see also \link{TSClusters-methods}).
}
\section{Repetitions}{
Due to their stochastic nature, partitional clustering is usually repeated several times with
different random seeds to allow for different starting points. This function uses
\code{\link[parallel:nextRNGStream]{parallel::nextRNGStream()}} to obtain different seed streams for each repetition, utilizing the
\code{seed} parameter (if provided) to initialize it. If more than one repetition is made, the
streams are returned in an attribute called \code{rng}.
Multiple values of \code{k} can also be provided to get different partitions using any \code{type} of
clustering.
Repetitions are greatly optimized when PAM centroids are used and the whole distance matrix is
precomputed, since said matrix is reused for every repetition.
}
\section{Parallel Computing}{
Please note that running tasks in parallel does \strong{not} guarantee faster computations. The
overhead introduced is sometimes too large, and it's better to run tasks sequentially.
The user can register a parallel backend, e.g. with the \pkg{doParallel} package, in order to
attempt to speed up the calculations (see the examples). This relies on
\code{\link[foreach:foreach]{foreach}}, i.e. it uses multi-processing.
Multi-processing is used in partitional and fuzzy clustering for multiple values of \code{k} and/or
\code{nrep} (in \code{\link[=partitional_control]{partitional_control()}}). See \code{\link[=TADPole]{TADPole()}} to know how it uses parallelization. For
cross-distance matrix calculations, the parallelization strategy depends on whether the
distance is included with \pkg{dtwclust} or not, see the caveats in \linkS4class{tsclustFamily}.
If you register a parallel backend and special packages must be loaded, provide their names in
the \code{packages} element of \code{control}. Note that "dtwclust" is always loaded in each parallel
worker, so that doesn't need to be included. Alternatively, you may want to pre-load
\pkg{dtwclust} in each worker with \code{\link[parallel:clusterEvalQ]{parallel::clusterEvalQ()}}.
}
\examples{
#' NOTE: More examples are available in the vignette. Here are just some miscellaneous
#' examples that might come in handy. They should all work, but some don't run
#' automatically.
# Load data
data(uciCT)
# ====================================================================================
# Simple partitional clustering with Euclidean distance and PAM centroids
# ====================================================================================
# Reinterpolate to same length
series <- reinterpolate(CharTraj, new.length = max(lengths(CharTraj)))
# Subset for speed
series <- series[1:20]
labels <- CharTrajLabels[1:20]
# Making many repetitions
pc.l2 <- tsclust(series, k = 4L,
distance = "L2", centroid = "pam",
seed = 3247, trace = TRUE,
control = partitional_control(nrep = 10L))
# Cluster validity indices
sapply(pc.l2, cvi, b = labels)
# ====================================================================================
# Hierarchical clustering with Euclidean distance
# ====================================================================================
# Re-use the distance matrix from the previous example (all matrices are the same)
# Use all available linkage methods for function hclust
hc.l2 <- tsclust(series, type = "hierarchical",
k = 4L, trace = TRUE,
control = hierarchical_control(method = "all",
distmat = pc.l2[[1L]]@distmat))
# Plot the best dendrogram according to variation of information
plot(hc.l2[[which.min(sapply(hc.l2, cvi, b = labels, type = "VI"))]])
# ====================================================================================
# Multivariate time series
# ====================================================================================
# Multivariate series, provided as a list of matrices
mv <- CharTrajMV[1L:20L]
# Using GAK distance
mvc <- tsclust(mv, k = 4L, distance = "gak", seed = 390,
args = tsclust_args(dist = list(sigma = 100)))
# Note how the variables of each series are appended one after the other in the plot
plot(mvc)
\dontrun{
# ====================================================================================
# This function is more verbose but allows for more explicit fine-grained control
# ====================================================================================
tsc <- tsclust(series, k = 4L,
distance = "gak", centroid = "dba",
preproc = zscore, seed = 382L, trace = TRUE,
control = partitional_control(iter.max = 30L),
args = tsclust_args(preproc = list(center = FALSE),
dist = list(window.size = 20L,
sigma = 100),
cent = list(window.size = 15L,
norm = "L2",
trace = TRUE)))
# ====================================================================================
# Registering a custom distance with the 'proxy' package and using it
# ====================================================================================
# Normalized asymmetric DTW distance
ndtw <- function(x, y, ...) {
dtw::dtw(x, y, step.pattern = asymmetric,
distance.only = TRUE, ...)$normalizedDistance
}
# Registering the function with 'proxy'
if (!pr_DB$entry_exists("nDTW"))
proxy::pr_DB$set_entry(FUN = ndtw, names=c("nDTW"),
loop = TRUE, type = "metric", distance = TRUE,
description = "Normalized asymmetric DTW")
# Subset of (original) data for speed
pc.ndtw <- tsclust(series[-1L], k = 4L,
distance = "nDTW",
seed = 8319,
trace = TRUE,
args = tsclust_args(dist = list(window.size = 18L)))
# Which cluster would the first series belong to?
# Notice that newdata is provided as a list
predict(pc.ndtw, newdata = series[1L])
# ====================================================================================
# Custom hierarchical clustering
# ====================================================================================
require(cluster)
hc.diana <- tsclust(series, type = "h", k = 4L,
distance = "L2", trace = TRUE,
control = hierarchical_control(method = diana))
plot(hc.diana, type = "sc")
# ====================================================================================
# TADPole clustering
# ====================================================================================
pc.tadp <- tsclust(series, type = "tadpole", k = 4L,
control = tadpole_control(dc = 1.5,
window.size = 18L))
# Modify plot, show only clusters 3 and 4
plot(pc.tadp, clus = 3:4,
labs.arg = list(title = "TADPole, clusters 3 and 4",
x = "time", y = "series"))
# Saving and modifying the ggplot object with custom time labels
require(scales)
t <- seq(Sys.Date(), len = length(series[[1L]]), by = "day")
gpc <- plot(pc.tadp, time = t, plot = FALSE)
gpc + ggplot2::scale_x_date(labels = date_format("\%b-\%Y"),
breaks = date_breaks("2 months"))
# ====================================================================================
# Specifying a centroid function for prototype extraction in hierarchical clustering
# ====================================================================================
# Seed is due to possible randomness in shape_extraction when selecting a basis series
hc.sbd <- tsclust(CharTraj, type = "hierarchical",
k = 20L, distance = "sbd",
preproc = zscore, centroid = shape_extraction,
seed = 320L)
plot(hc.sbd, type = "sc")
# ====================================================================================
# Using parallel computation to optimize several random repetitions
# and distance matrix calculation
# ====================================================================================
require(doParallel)
# Create parallel workers
cl <- makeCluster(detectCores())
invisible(clusterEvalQ(cl, library(dtwclust)))
registerDoParallel(cl)
## Use constrained DTW and PAM
pc.dtw <- tsclust(CharTraj, k = 20L, seed = 3251, trace = TRUE,
args = tsclust_args(dist = list(window.size = 18L)))
## Use constrained DTW with DBA centroids
pc.dba <- tsclust(CharTraj, k = 20L, centroid = "dba",
seed = 3251, trace = TRUE,
args = tsclust_args(dist = list(window.size = 18L),
cent = list(window.size = 18L)))
#' Using distance based on global alignment kernels
pc.gak <- tsclust(CharTraj, k = 20L,
distance = "gak",
centroid = "dba",
seed = 8319,
trace = TRUE,
control = partitional_control(nrep = 8L),
args = tsclust_args(dist = list(window.size = 18L),
cent = list(window.size = 18L)))
# Stop parallel workers
stopCluster(cl)
# Return to sequential computations. This MUST be done if stopCluster() was called
registerDoSEQ()
}
}
\references{
Please refer to the package vignette references (which can be loaded by typing
\code{vignette("dtwclust")}).
}
\seealso{
\linkS4class{TSClusters}, \link{TSClusters-methods}, \linkS4class{tsclustFamily}, \link{tsclust-controls},
\code{\link[=compare_clusterings]{compare_clusterings()}}, \code{\link[=interactive_clustering]{interactive_clustering()}}, \code{\link[=ssdtwclust]{ssdtwclust()}}.
}
\author{
Alexis Sarda-Espinosa
}
|
e4e7981080433da98965b66e180e815d00612bb6 | 4b4a16d3d8543817a3ae11f81c08ea85e5728dd6 | /man/rasch_drop.Rd | f6bbbfd738bf4644b87dbd62743d215fef93411e | [] | no_license | CarolinaFellinghauer/whomds | ca7022f24419c33f2f295e2fa7cb107ef8f40e58 | d43eaedd05ea4fe808cc50a6dbe3a9333fc66caf | refs/heads/master | 2020-03-19T06:13:10.744716 | 2018-05-31T19:07:24 | 2018-05-31T19:07:24 | 136,001,057 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,071 | rd | rasch_drop.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rasch_drop.R
\name{rasch_drop}
\alias{rasch_drop}
\title{Drop items from a Rasch Analysis}
\usage{
rasch_drop(vars_metric, drop_vars, max_values)
}
\arguments{
\item{vars_metric}{a character vector of items to use in the Rasch Analysis}
\item{drop_vars}{a character vector of items to drop from the Rasch Analysis}
\item{max_values}{a tibble with two columns, \code{var} equivalent to \code{vars_metric} and \code{max_val} with their corresponding maximum possible values}
}
\value{
a named list with the new \code{vars_metric} and new \code{max_values} after dropping the desired variables
}
\description{
Drop items from a Rasch Analysis
}
\details{
Dropping variables might be desirable if one finds that particular items are causing a lot of problems for the fit of a Rasch Model.
}
\seealso{
Other rasch functions: \code{\link{rasch_DIF}},
\code{\link{rasch_factor}}, \code{\link{rasch_mds}},
\code{\link{rasch_model}}, \code{\link{rasch_rawscore}},
\code{\link{rasch_rescale}}
}
|
c635936280c0d43d09a5398897618d2b9e9baa4e | 41d7c2ff4628f27213f90aca0be867c0f747b300 | /R/utils-preprocess.R | 00be4b7ef51b5ea30baad0f7605252066ba33e3a | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jlaffy/statistrics | 69e17a043522d3bcad6127127e66eea9649c3a92 | 2de58328790ede712c3aa6bbeccda611d7eaa121 | refs/heads/master | 2020-03-14T03:03:24.759893 | 2018-08-23T12:37:04 | 2018-08-23T12:37:04 | 131,412,171 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,362 | r | utils-preprocess.R | #' Convert expression data from logTPM to TPM form.
#'
#' TPM aka Transcripts Per Million.
#' If a matrix is provided, \code{TPM} will be applied over each cell and returned as a matrix.
#' Note. \code{TPM(logtpm) == logTPM(tpm)}
#' @param logtpm a numeric vector. Either a matrix or of length 1.
#' @param dividebyten a boolean. Usually TRUE when single cells expression vals are concerned; usually FALSE when bulk (many cell) expression vals are concerned.
#'
#' @return a numeric vector (either a matrix or of length 1) with values converted from logTPM to TPM form.
#' @export
#'
TPM <- function(logtpm, dividebyten=TRUE) {
if(dividebyten) {
tpm <- 10*(2^(logtpm)-1)}
else if(!dividebyten) {
tpm <- 2^(logtpm)-1}
return(tpm)
}
#' Convert expression data from TPM to logTPM form.
#'
#' TPM aka Transcripts Per Million.
#' If a matrix is provided, \code{logTPM} will be applied over each cell and returned as a matrix.
#' Note. \code{logTPM(tpm) == TPM(logtpm)}
#' @param tpm a numeric vector. Either a matrix or of length 1.
#' @param dividebyten a boolean. Usually TRUE when single cells expression vals are concerned; usually FALSE when bulk (many cell) expression vals are concerned.
#'
#' @return a numeric vector (either a matrix or of length 1) with values converted from TPM to logTPM form.
#' @export
#'
logTPM <- function(tpm, dividebyten=TRUE) {
# same as TPM() applies.
# logTPM(tpm) == TPM(logtpm)
if(dividebyten) {
logtpm <- log(tpm/10+1, 2)}
else if(!dividebyten) {
logtpm <- log(tpm+1, 2)}
return(logtpm)
}
#3
#' Cell complexity counter
#'
#' @param mat a matrix of cells-by-genes or genes-by-cells containing gene expression data.
#' @param cellcols boolean. The columns (variables) are cells if TRUE or are genes if FALSE.
#' @param rank boolean. If TRUE, order cells by # of non-zero genes detected
#'
#' @return complexity value per cell (# of genes detected per cell)
#' @export
#'
detected <- function(mat, cellcols=TRUE, rank=TRUE) {
# Calculates # of genes detected (complexity) per cell.
# mat is a matrix with GENE ROWS and CELL COLUMNS.
# if cells are rows and not columns, first transpose the matrix.
if(!cellcols) {mat <- t(mat)}
# for each column in the matrix, sums the number of genes with non-zero vals.
mat2 <- apply(mat, 2, function(x) sum(x!=0))
if(rank) {return(sort(mat2))}
else {return(mat2)}
}
#' Filter cells by complexity cutoff
#'
#' Removes all cells whose detected gene number/complexity value is below the cutoff.
#'
#' @param mat a matrix of cells-by-genes containing gene expression data.
#' @param cutoff complexity value below which cells are filtered out.
#' @param justNames boolean. If TRUE, return only the names of cells that passed the filtering.
#'
#' @return cell IDs whose detected gene number >= 3000 and -- if \code{justNames==FALSE} -- their detected values.
#' @export
#'
complexityCut <- function(mat, cutoff=3000, justNames=FALSE) {
d <- detected(mat, rank=FALSE)
# [d >= cutoff] is a logical vector of T (>=3000) and F (<3000).
# It is passed to d and masks/deletes false values in d
true3000 <- d[d >= cutoff, drop=FALSE]
goodCells <- names(true3000)
# return vector of cell IDs with accepted complexity vals.
if(justNames) {return(goodCells)}
# return mat with low complexity Cells (columns) removed.
else {return(mat[,goodCells])}
}
#' Filter genes by cutoff
#'
#' Removes genes whose average expression across cells is below the cutoff.
#'
#' @param mat a matrix of cells-by-genes containing gene expression data.
#' @param cutoff gene expression value below which genes are filtered out.
#'
#' @return matrix without the genes/rows whose aggregate expression across cells was too low.
#' @export
#'
genesCut <- function(mat, cutoff=4) {
# remove genes with ~no reads across cells
# remove log
mat.tpm <- TPM(mat)
# average expression of each gene across cells
mat.tpm.avg <- apply(mat.tpm, 1, mean)
# add 1 and log (dividebyten=F since avg across cells)
avg.logtpm1 <- logTPM(mat.tpm.avg, dividebyten=F)
# remove genes with ~no expression
mat.cut <- avg.logtpm1[avg.logtpm1 >= cutoff]
# selected gene names
genes <- names(mat.cut)
# subset data for selected geenes only
return(mat[genes,])
}
#' Centering
#'
#' Center gene expression data (before computing correlations), such that the average correlation between cells will be 0.
#' Expression(Gene a, Cell k) - Avg(Expression(Gene a, Cell 1 to n))
#' mat is a matrix with cell rows and gene columns.
#'
#' @param mat a matrix of cells-by-genes or genes-by-cells containing gene expression data.
#' @param rowWise boolean. Center by rows if TRUE and by columns if FALSE.
#'
#' @return a matrix with centered gene expression data.
#' @export
#'
center <- function(mat, rowWise = TRUE, center.by = 'mean') {
if (center.by %in% c('med', 'median') & isTRUE(rowWise)) {
medians <- robustbase::rowMedians(mat)
mat.centered <- t(t(mat) - medians)
}
else if (center.by %in% c('med', 'median') & !isTRUE(rowWise)) {
medians <- robustbase::colMedians(mat)
mat.centered <- mat - medians
}
else if (isTRUE(rowWise)) {
mat.centered <- t(scale(t(mat), center = TRUE, scale = FALSE))
}
else if (!isTRUE(rowWise)) {
mat.centered <- scale(mat, center = TRUE, scale = FALSE)
}
mat.centered
}
|
61a361b69d802c27c48108d8810a62c4335b495c | 5619c945bb306f7e0a19e03f9f596d286158c45c | /man/query_s3_txdb.Rd | 565a045f54172595213b7eeef3a69602b2e413b1 | [] | no_license | khughitt/EuPathDB | f569a31de1d7716371bbf493c2fdf1d95926dced | b822c59134b6249bae2a8eb1e276a2bd4460cfc0 | refs/heads/master | 2023-08-05T05:32:47.830631 | 2022-10-25T02:38:40 | 2022-10-25T02:38:40 | 151,275,849 | 4 | 3 | null | 2023-07-21T14:37:30 | 2018-10-02T15:14:58 | R | UTF-8 | R | false | true | 595 | rd | query_s3_txdb.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query_s3_file.R
\name{query_s3_txdb}
\alias{query_s3_txdb}
\title{Perform what should be a completely silly final check on the file which is to be copied to s3.}
\usage{
query_s3_txdb(file)
}
\arguments{
\item{file}{Filename to query.}
}
\value{
MD5 sum of the file or NULL.
}
\description{
This function really should not be needed. But damn. This will do a final check that the
data in the s3 staging directory is loadable in R and return the md5 sum of the file.
Thus the md5 sum may be added to the metadata.
}
|
795e9e003d7e5f3feb7de4cc1bf828e5b493a1c1 | b58ef6361161adfad9bdc7cc1b23c4988030fbe3 | /man/acs.Rd | c1752b71bba494e48433cad4843966ff5d4cc05e | [
"MIT"
] | permissive | DSPG-ISU/DSPG | 01b9ec9a3dd02cd2ee7e52a28ba22f6d312ad2f8 | c20b50c1dd28eedd879a9226b5f6511a0471c870 | refs/heads/master | 2023-02-26T12:54:52.616558 | 2021-02-05T05:44:04 | 2021-02-05T05:44:04 | 277,006,430 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 2,070 | rd | acs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{acs}
\alias{acs}
\title{American Community Survey - Computer Presence}
\format{
A data frame with 99 rows and 9 variables:
\describe{
\item{geography_id}{geographic ID used by the U.S. census for the location associated with this record}
\item{type}{The type of location associated with this record. Categories are state, county, place, and tract}
\item{name}{Name of the location}
\item{variable}{Variable ID identified by the U.S. Census Bureau}
\item{variable_description}{Description of the variable}
\item{computer_present}{Classifies if a computer is present in those households. Categories are total, yes, and no}
\item{internet_subscription}{Classifies what kind of internet subscription is present in those households. Categories are total, total w/computer, broadband, dial-up, and none}
\item{data_collection_period}{Period in which the data was collected}
\item{data_collection_end_date}{The date in which the data was done being collected}
\item{households}{The number of households estimated to have the specified characteristics in the record}
\item{row_id}{Unique ID associated with the record}
\item{geometry}{sf point object of geographic location}
}
}
\source{
\url{https://data.iowa.gov/Utilities-Telecommunications/Iowa-Households-by-Presence-of-a-Computer-and-Type/gz3j-hzab}
}
\usage{
acs
}
\description{
Summary data of Iowa American Community Survey responses by computer presence and internet subscription status. Each row represents a combination of variables and the households variable representing the number of households estimated to be associated with that variable combination.
}
\examples{
# county map of iowa in ggplot2
library(ggplot2)
library(dplyr) # for the pipe
library(ggthemes)
ia_counties \%>\%
ggplot() + geom_sf(aes(fill = acres_sf)) +
theme_map()
# leaflet map
library(leaflet)
library(sf)
ia_counties \%>\%
leaflet() \%>\%
addTiles() \%>\%
addPolygons()
}
\keyword{datasets}
|
5ac5fc3f7376998b05312080d5de130bbc04ead0 | 87c30bb64856dc8940105447abcb827f7629c468 | /man/iddplacebo.Rd | 4c95584bf008c77a1361c681f60b74e38fba2965 | [] | no_license | carlbona/idd | c28f2856b25c8d085768dd1c998bd576442aff2e | 888ccd68976bd6ad1bfa3211a870fd39a5243763 | refs/heads/master | 2021-05-05T12:22:59.283961 | 2018-06-10T17:37:35 | 2018-06-10T17:37:35 | 118,240,758 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,825 | rd | iddplacebo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iddplacebo.R
\name{iddplacebo}
\alias{iddplacebo}
\title{Run placebo studies on untreated units and estimate placebo p-values.}
\usage{
iddplacebo(eventvar, popvar, treatvar, postvar, timevar, idvar, data,
iter = 10)
}
\arguments{
\item{eventvar}{Name of the event count variable.}
\item{popvar}{The person-time (or another exposure) variable.}
\item{treatvar}{The treatment group indicator dummy (0 for controls, 1 for treated units).}
\item{postvar}{The post-period indicator dummy (0 for all time points in the pre-intervention period, 1 for all time points in the post-period)}
\item{timevar}{The time variable (can be coded on any scale).}
\item{idvar}{The panel id variable.}
\item{data}{A long-form panel dataset containing the supplied variables.}
\item{iter}{The number of subsampling iterations. Defaults to 10, but 500-1000 are usually recommended.}
}
\value{
Returns a list containing the following elements:
\code{$Resdata}: a data frame containing the results.
\code{$ECDF}: the empirical cumulative distribution function.
\code{$Effects}: a long-form data frame containing the estimated placebo effects.
\code{$Treat.ratio}: a data frame containing the post/pre-RMSE ratio for the treated unit.
\code{$supp_stats}: a data frame containing supplementary statistics (p-values etc).
}
\description{
\code{iddplacebo} estimates placebo studies on untreated units and produces pseudo p-values based on the empirical distribution of post/pre-RMSE ratios.
}
\examples{
\dontrun{
data(simpanel)
placebo.out <- iddplacebo(eventvar="y",
popvar="pop",
idvar="age",
timevar="time",
postvar="post",
treatvar="treat",
data=simpanel,
iter=50)
}
}
|
083729b1a23eecc4d4844332be6b7ff8b2e20769 | 555e53b79fc56f5263fedb403485cb6fc4ae0412 | /man/input66002.Rd | 705eca9689f7c6e611544d636b02b9167b37ea82 | [] | no_license | DannyArends/BSE | 547902710829e92027cd57d698645daaf8e29a76 | 0186f2b5acf2b8160c8102ce50c850ac8e866e1f | refs/heads/master | 2021-01-01T17:47:40.754738 | 2013-12-06T10:19:38 | 2013-12-06T10:19:38 | 14,977,266 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 305 | rd | input66002.Rd | \name{input66002}
\alias{input66002}
\docType{data}
\title{
Example data.
}
\description{
Example data.
}
\usage{
data(input66002)
}
\format{
TODO
}
\details{
TODO
}
\source{
TODO
}
\references{
TODO
}
\examples{
library(BSE)
data(input66002) # Load the dataset
}
\keyword{datasets}
|
4b3381802d0ee6acf8c19c59bf103a4aeed3e417 | 4cf8709cec08920b192e35065fc8d985735610b7 | /module2/week2/assignment/part2/R/Part2.R | 98327d946bccbfab42094419f28a0e2312e45847 | [] | no_license | Kishwar/ML_UoW_Coursera | 6ad612d41fe726a591f95ac48e4a8c883fd8b931 | 11c49de9c47378e03fd90ea257be98648c16ca92 | refs/heads/master | 2021-05-30T08:51:09.694825 | 2016-01-10T21:31:14 | 2016-01-10T21:31:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,437 | r | Part2.R | main <- function(csv_train, csv_test) {
#set path to csv files directory
setwd("~/Desktop/MachineLearningCourse/module2/week2/assignment/part2/R/")
#read CSVs
tr <- read.csv(csv_train)
te <- read.csv(csv_test)
print("++++++++++++++++++++++++++++++++++ SIMPLE REGRESSION ++++++++++++++++++++++++++++++++++++++++++")
print("Q1:What is the value of the weight for sqft_living -- the second element of 'simple_weights'")
step_size <- 7e-12
tolerance <- 2.5e7
feature_matrix <- tr[['sqft_living']]
ones <- rep(1, length(feature_matrix))
feature_matrix <- matrix(c(ones, feature_matrix), nrow = length(ones), ncol = 2)
output <- tr[['price']]
initial_weights <- c(-4.70000000e+04, 1.00000000e+00)
weights <- regression_gradient_descent(feature_matrix, output, initial_weights, step_size, tolerance)
print(paste("Answer:", round(weights[2], digits = 1)))
print("Q2:What is the predicted price for the 1st house in the TEST data set for model 1 (round to nearest dollar)")
p <- weights[1] + (te[['sqft_living']][1] * weights[2])
print(paste("Answer: $", round(p, digits = 0), sep = ""))
print("Q:RSS-Simple regression - test data")
rss <- sum((te[['price']] - (weights[1] + (te[['sqft_living']] * weights[2])))^2)
print(paste("RSS-Simple Regression:", signif(rss, 5)))
print("++++++++++++++++++++++++++++++++++ MULTIPLE REGRESSION ++++++++++++++++++++++++++++++++++++++++++")
print("Q1:What is the value of the weights for multiple features")
initial_weights = c(-100000., 1., 1.)
step_size = 4e-12
tolerance = 1e9
output <- tr[['price']]
ones <- rep(1, length(tr[['sqft_living']]))
feature_matrix <- matrix(c(ones, tr[['sqft_living']], tr[['sqft_living15']]), nrow = length(ones), ncol = 3)
weights <- regression_gradient_descent(feature_matrix, output, initial_weights, step_size, tolerance)
print(paste("Answer:", weights[1], weights[2], weights[3]))
print("Quiz Question: What is the predicted price for the 1st house in the TEST data set for model 2 (round to nearest dollar)")
p <- weights[1] + (te[['sqft_living']][1] * weights[2]) + (te[['sqft_living15']][1] * weights[3])
print(paste("Answer: $", round(p, digits = 0), sep = ""))
print(paste("Actual Price of House: $", te[['price']][1], sep = ""))
print("Q:RSS-Multiple regression - test data")
rss <- sum((te[['price']] - (weights[1] + (te[['sqft_living']] * weights[2]) + (te[['sqft_living15']] * weights[3])))^2)
print(paste("RSS-Multiple Regression:", signif(rss, 5)))
}
regression_gradient_descent <- function(feature_matrix, output, initial_weights, step_size, tolerance) {
Moving <- TRUE
weights <- initial_weights
while (Moving) {
prediction <- feature_matrix %*% weights
error <- prediction - output
gradient_sum_squares <- 0
for(i in 1:length(weights)) {
derivative <- feature_matrix[,i] %*% error
gradient_sum_squares <- gradient_sum_squares + (as.numeric(derivative) * as.numeric(derivative))
weights[i] <- weights[i] - (2 * step_size * as.numeric(derivative))
}
gradient_magnitude <- sqrt(gradient_sum_squares)
#print(gradient_magnitude)
if (as.numeric(gradient_magnitude) < as.numeric(tolerance))
Moving = FALSE
}
return(weights)
} |
d1442b3c7a8bae50d701e245199afe42035a88c4 | ff393a02cbad28b23554d338293f62325b4c993a | /functions.R | cf723f07c63be25132bed39d79ed8fcace007e17 | [] | no_license | Tharnid/DS | 547a4c40d27597b111ec15ec776db0cd973ccf47 | ce3817ac685790679340324090052aae7ca19eaa | refs/heads/master | 2021-01-10T15:15:01.360546 | 2016-02-12T18:12:55 | 2016-02-12T18:12:55 | 50,303,491 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 65 | r | functions.R | # function
f <- function(x) { x + 1 }
# invoking function
f(2)
|
d8888e32a7800fc12685ffd91688bcf7bfcad58c | 184180d341d2928ab7c5a626d94f2a9863726c65 | /issuestests/NAM/inst/testfiles/SPM/SPM_output/log_3950d342bd8d7b480bf0137f7ab82365cc59faba/SPM-test.R | ee544daaf5476dc53f951e5d749ad61d100b79be | [] | no_license | akhikolla/RcppDeepStateTest | f102ddf03a22b0fc05e02239d53405c8977cbc2b | 97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5 | refs/heads/master | 2023-03-03T12:19:31.725234 | 2021-02-12T21:50:12 | 2021-02-12T21:50:12 | 254,214,504 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,083 | r | SPM-test.R | testlist <- list(blk = c(-9.42479322868495e-139, -2.77880834313929e+218, -2.36203134257835e-109, 1.45149655469103e-51, 2.28216284227782e+209, 8.82377133141316e+78, -4.23622850344875e-216, 752551560228.574, 32.8480477357901, 0.0273134900865026, 6.53337849169343e+86, -1.41326940822168e-285, 8.43309075166444e+303, 2.6305878853304e+250, 4.45091837857516e+102, -4.84176888258619e+169, -8.83361792258959e-43, 3.06061747514633e+137, 7.26657465234565e+159, 2.46941730175599e-155, -1.10394974878945e-214, 1.11903595215922e+28, -1.25162578864962e+173, -2.07025600526726e+48, NA, 1.29928754190194e-296, -7.61409343551287e+238, 1.11140487837846e-76, -4.06595675179469e+121, 6.14435136975689e+258, -1.39305578317756e+115, NaN, 2.15635714927475e+28, -2.23363957380906e-170, 1.30639432708205e+208, -1.49931372640431e+136, -2.16152624512117e-68, 9.58808012524132e-147, 3.48281911311636e+218, -2.72287343191417e-12, -3.76114124426251e-251, -9.09668163292374e+291, 1.51957570968137e+290, Inf, -3.48625481190644e-113, -6.65021626462029e-189, -1.23760175655554e+119, -5.97373429107074e-272, -4.73757349050144e+254, 3.32716994918954e-139, -1.62313915715165e-259, 4.06933520761332e-271, -1.11063791222347e+64, 7.62115480602828e-75, -9.46721467185711e-185, -9.50531319104704e+60, -2.36574709877486e-114, 5.15250632284284e+213, -2.53548599141654e-97, 1.31935230466731e+226, -7.05809596577169e-169, 2.99410983430624e-112, -2.02278848208876e-110, -1.3042602755397e+72, 3.67387261951318e-27, 5.66357514998748e-178, -6.19770299348907e+265, 1.19639464299215e-235, -9.12745149403451e+65, 1.78014553709723e+86, 7.55556527887261e+83, 2.19269884269335e+117, 1.17256730328089e-272, -5.85339348640067e-133, -7.26217715562477e+185, 2.12257943350137e-103, -Inf, -7.78981864892437e-283, -9.02319968697613e+174, 1.44271180606491e+215, -Inf, -5.18944242822972e+123, 0), cN = 2.00083656807416e+258, col = c(2.83705318082755e-168, -3.74074371204008e+126, 1.1727628969681e-121, 4.27090998741248e-250, 1.13459540055622e+86, -8.78355880484277e-278, 511120024.64334, 2.24613040611104e-206, -Inf, -1.73106394187413e-81, -1.80178551735879e+276, -1.05298873279163e+262, NaN, -4.88140482889244e-211, 8.08365759623683e+272, -2.08179477550806e+114, 3.58088182056612e-12, -2.47460499011964e+82, 2.30665398682254e+244, -2.44932285525777e-189, 4.80131433298859e-145, -1.09208376312016e-307, 8.80891910004997e+232, -2.51947649378149e-213, 2.57198178072734e-167, -3.88572194578948e+127, 0.00390600784666965, -8.10852705428849e+155, -8.80651724284188e+215, 8.12252520024622e+61, -2.09943412138504e+227, NA, 133.465021570115, -1.20761997486179e+165, -9.21241621115341e-28, 1.86427434783251e-190, 2.16859280272584e-216, -199822857367897248, -7.00876878283612e-113, 2.25787724610965e+278, 5.57097263170885e+111, -1.00529515974947e+289, 0), rN = 4.26154236835835e-299, row = c(Inf, Inf, 3.32271496684439e-247, -Inf, 4.8592907699178e-256, -5.10474086579319e-157, 4.75620281245605e-266, 0))
result <- do.call(NAM:::SPM,testlist)
str(result) |
dff7a5f0f6af750937849c5cfababd3f30de77d9 | 4cfb61c018de0def8fbb85ab515cae8484c03f11 | /ref_nmds_v3.R | 45cf9196f3cbc027283763eb26981f4fddf8a348 | [] | no_license | tleung2/nmds_phytoPAM | 10238c0074ea2c2aabe643f6beb11251454fe22c | 7aa85eea3ae968a49728c510bde31012524e62e8 | refs/heads/main | 2023-06-04T01:35:24.637848 | 2021-06-21T02:10:17 | 2021-06-21T02:10:17 | 358,148,564 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 28,954 | r | ref_nmds_v3.R |
## LOAD PACKAGES
library(vegan)
library(viridis)
library(NbClust) #for finding optimal clusters
library(cluster) #for computing cluster silhouettes
library(tidyverse)
library(factoextra) #computing heirarchical cluster
library(vegan3d) # For plotting 3D images, autoload vegan package
library(plotly)
library(plot3D)
## Use 2018-2020 fluorescence data with references all in one dataset.
## Then plot the ordination to see how the fluorescence samples
## compare with the references (default and lab measured)
## Read data----from Fluorescence Ref Folder--------------------
data.all=read.csv("all_data_2018-2020_v3.csv",header=T)
data.all$fit_error=as.numeric(data.all$fit_error)
#######################################################################
######################## TIDY DATASET ##########################
## dataset without non-default references
data.all2 <- data.all %>%
filter(source !='non_Default')
########################################################################
######################## NORMALIZE DATA ########################
## Normalize samples realtive to max (max F values scaled to 1)
## First subset samples then scale to max F
data.norm <- as.data.frame(t(apply(data.all2[2:6], 1,
(function(x) round((x/(max(x))),3)))))
## Normalize checked columns
check.norm <- as.data.frame(t(apply(data.all2[14:18], 1,
function(x) round((x/(max(x)))*1000,2))))
## Add references back to normalized data
data.norm2<-bind_cols(data.norm,data.all2[1])
data.norm3<-bind_cols(data.norm2,data.all2[8])
data.norm4<-bind_cols(data.norm3,data.all2[c(9,10,13,19:24)])
check.norm2<-bind_cols(check.norm,data.all[1])
check.norm3<-bind_cols(check.norm2,data.all[8])
check.norm4<-bind_cols(check.norm3,data.all[9,10,19:24])
## Remove samples with Fit error = 32000 (user error)
data.norm4 <- data.norm4 %>%
filter(fit_error !='32000')
check.norm4 <- check.norm4 %>%
filter(fit_error !='32000')
## Subset 2019-2020 datapoints
data.subset<-subset(data.norm4, source == "2019" | source == "2020" | source == "Default" | source == "2018")
data.subset2<-subset(check.norm4, source == "2019" | source == "2020" | source == "2018" | source == "Default")
#######################################################################
########################## HISTOGRAMS #########################
## Histrogram of fit errors for each year
hist.1<-ggplot(data.subset2, aes(x = fit_error, fill = source)) +
geom_histogram(binwidth = 1) +
xlab("Range of Fit Errors") +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_blank(),text = element_text(size = 18),
axis.line = element_line(colour = "black"),
axis.text=element_text(size=18), axis.title = element_text(size = 18),
legend.text = element_text(size = 18), legend.key=element_rect(fill='white'),
legend.title = element_blank()) +
facet_wrap(.~source, scales = "free_x")
hist.1
########################################################################
######################### BOXPLOTS
### Boxplot only lakes with consisitent sampling (not random testing)
### and ignoring null values
### Convert z-off background fluorescence to numeric
data.subset$zoff_F =as.numeric(data.subset$zoff_F)
### filter our random lakes and lakes with 2 sample sites
### Use pipe (%>%) and (!) to do this and then graph boxplot
### this eliminates making another dataframe
data.subset %>% filter(!site %in% c("Pleasant Creek", "Honey Creek Resort",
"Lacey-Keosauqua", "Lake Wapello",
"Marble Beach", "Red Haw", "Crandall's",
"Union Grove 10x Dilution", "Default",
"Denison", "McIntosh Woods",
"North Twin East")) %>%
#ggplot(aes(x = fct_rev(as_factor(site)), y = zoff_F, fill = site)) +
ggplot(aes(x = site, y = zoff_F, fill = site)) +
geom_boxplot(color = "black", fill = "gray", alpha = 0.7) +
scale_x_discrete(limits = rev) + ## reverse order of flipped x axis
labs(y = 'Background Fluoresence (%)') +
coord_flip() +
#scale_fill_manual(values = c("#66CCCC", "#FFFF00", "#99CC00", "#FF9900"),
#labels = c("'Blue' group", "'Brown' group",
#"'Green' group","'Red' group")) +
#facet_wrap(.~order, scale = "free", ncol = 2) +
#scale_fill_viridis_d(option = "turbo") +
theme(panel.background = element_blank(),
axis.title.y = element_text(size = 12, color = "black"),
axis.title.x = element_text(size = 12, color = "black"),
axis.text.y = element_text(size = 12, color = "black"),
axis.text.x = element_text(size = 14, color = "black"),
axis.line = element_line(color = "black"),
strip.text = element_text(size = 12),
legend.position = "none",
legend.key = element_blank(),
legend.text = element_text(size = 12),
legend.title = element_text(color = "white"))
########################################################################
######################### RUN nMDS ############################
## 1) start mds:
## Is there a difference in fluorescence of default fluorescence signatures
## vs lab calibrated fluorescence vs field samples?
### note:run more than once if first attempt is no convergence
set.seed(123)
## mds of default ref and cyano cultures
## Import PAM-ref
## mds.ref2 => k = 3 whereas mds.ref => k = 2
mds.ref2<-metaMDS(PAM_ref[, c(2:6)], distance = "bray", k = 3,
maxit = 999, trace =2)
## mds of samples and default ref
mds.subset3<-metaMDS(data.subset[, c(1:5)], distance = "bray", k = 2,
maxit = 999, trace =2)
## mds of deconvolution F (check F) of samples and default ref
mds.subset2<-metaMDS(data.subset2[, c(1:5)], distance = "bray", k = 3,
maxit = 999, trace =2)
stressplot(mds.data)
mds.data
mds.subset
mds.subset3
## 2) Saving nMDS output
save(mds.subset3b, file = "mds_subset3b.rda")
save(mds.ref2, file = "mds_ref2.rda")
## 3) Plot nMDS output using base R
plot(mds.subset4) ## Base R will automatically recognize ordination
plot(mds.data)
########################################################################
################# PREPARING OUTPUT FOR GGPLOT ##################
## 1) Extract nMDS output into a dataframe
## Use the score () to extraxt site scores and convert to data frame
mds.scores3<-as.data.frame(scores(mds.subset3))
mds.scores<-as.data.frame(scores(mds.data))
## 2) create solumn of site names from row names of meta.scores
mds.scores3$site<-rownames(mds.subset3)
mds.scores$site<-rownames(mds.scores)
## 3) add details to mds.scores dataframe
grp.fit<-round(data.subset$fit_error, digits = 0) ## Round Fit error to 1 decimal place
mds.scores3$fit<-data.subset$fit_error
mds.scores3$source<-data.subset$source
mds.scores3$sample<-data.subset$sample
mds.scores3$location<-data.subset$site
mds.scores3$tot_chla<-data.subset$tot_chla
mds.scores3$cyano_chla<-data.subset$cyano_chla
mds.scores3$green_chla<-data.subset$green_chla
mds.scores3$brown_chla<-data.subset$brown_chla
mds.scores3$pe_chla<-data.subset$pe_chla
mds.scores3$class<-data.all2$fit_class
## 4) Extract Species scores into dataframe
## Use score () to extract species score from mds output
## and convert to data frame
species.score<-as.data.frame(scores(mds.data, "species"))
## 5) create columns of species from the species score dataframe
species.score$species<-rownames(species.score)
##check species dataframe
head(species.score)
## 6) Create polygons for default (factory) reference spectra
## Renaming default references
mds.scores3$sample[which(mds.scores3$sample == "syleo")] <- "'Blue' group" #Synechococcus leopoliensis.
mds.scores3$sample[which(mds.scores3$sample == "chlorella")] <- "'Green' group" #Chorella vulgaris"
mds.scores3$sample[which(mds.scores3$sample == "phaeo")] <- "'Brown' group" #Phaeodactylum tricornutum"
mds.scores3$sample[which(mds.scores3$sample == "crypto")] <- "'Red' group" #Cryptomonas ovata"
## hull values for grp Default
grp.default3<-mds.scores3[mds.scores3$source=="Default",][chull(mds.scores3[mds.scores3$source=="Default",
c("NMDS1", "NMDS2")]),]
## hull values for grp Customized
grp.cust<-mds.scores3[mds.scores3$grp.source == "non_Default",][chull(mds.scores3[mds.scores3$grp.source == "non_Default",
c("NMDS1", "NMDS2")]),]
## hull values for mixed cultures
grp.mix<-mds.scores[mds.scores$grp.source == "mixed",][chull(mds.scores[mds.scores$grp.source == "mixed",
c("NMDS1", "NMDS2")]),]
## combine grp default and grp cust
hull.data<-rbind(grp.default,grp.cust)
hull.data
## Do same for ref nmds
## Add PAM-ref details to mds.scores nmds output
mds.scores$sample<-PAM_ref$sample
mds.scores$source<-PAM_ref$source
mds.scores$sample[which(mds.scores$sample == "syleo")] <- "'Blue' group" #Synechococcus leopoliensis.
mds.scores$sample[which(mds.scores$sample == "chlorella")] <- "'Green' group" #Chorella vulgaris"
mds.scores$sample[which(mds.scores$sample == "phaeo")] <- "'Brown' group" #Phaeodactylum tricornutum"
mds.scores$sample[which(mds.scores$sample == "crypto")] <- "'Red' group" #Cryptomonas ovata"
## Save mds scores dataframe
save(mds.scores3, file = "mds_scores3.rda")
## Do same for ref and cyano mds output
## 1) Extract nMDS output into a dataframe
ref2.scores<-as.data.frame(scores(mds.ref2))
## 2) create solumn of site names from row names of meta.scores
ref2.scores$site<-rownames(mds.ref)
## 3) add details to mds.scores dataframe
ref2.scores$source<-PAM_ref$source
ref2.scores$sample<-PAM_ref$sample
## 4) rename refs
ref2.scores$sample[which(ref.scores$sample == "syleo")] <- "'Blue' group" #Synechococcus leopoliensis.
ref2.scores$sample[which(ref.scores$sample == "chlorella")] <- "'Green' group" #Chorella vulgaris"
ref2.scores$sample[which(ref.scores$sample == "phaeo")] <- "'Brown' group" #Phaeodactylum tricornutum"
ref2.scores$sample[which(ref.scores$sample == "crypto")] <- "'Red' group" #Cryptomonas ovata"
## Save ref mds scores dataframe
save(ref2.scores, file = "ref_scores3.rda")
########################################################################
################### Plot nMDS using ggplot #####################
## -------------- Plot nMDS of references and cyanos -----------------
## hulling only default refs, alpha = transparency w/ 0 being transparent
## Use grp.ref for references
## subset for cyanos as grp.cust (group customized)
grp.ref<-ref.scores[ref.scores$source=="Default",][chull(ref.scores[ref.scores$source=="Default",
c("NMDS1", "NMDS2")]),]
grp.cust<-subset(ref.scores, source == "non-Default")
## plot nmds with ggplot
p1<-ggplot() +
# this adds default refs scores
geom_polygon(data = grp.ref, aes(x = NMDS1, y = NMDS2, group = source),
fill = NA, color = "gray", size = 1) +
geom_point(data = grp.ref, aes(x=NMDS1, y=NMDS2), size = 2) +
geom_text(data = grp.ref, aes(x = NMDS1, y = NMDS2, label = sample),
size = 3.5, nudge_x = 0.2) +
# this adds cyanos scores
geom_point(data = grp.cust, aes(x=NMDS1, y=NMDS2, shape = sample),
size = 3) +
#geom_text(data = grp.cust, aes(x = NMDS1, y = NMDS2, label = sample),
#size = 3.5, nudge_x = 0.2) +
scale_shape_manual(values = c(0:9)) + ## assign multiple shapes
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
text = element_text(size = 10),
axis.line = element_line(colour = "black"),
axis.text=element_text(size=10),
axis.title = element_text(size = 10),
legend.text = element_text(size = 10),
legend.key=element_rect(fill='white'),
legend.title = element_blank(),
legend.position = "na")
p1
## 3D plot for nmds (because k =3)
plot_ly(ref2.scores, x=ref2.scores$NMDS1,
y=ref2.scores$NMDS2,
z=ref2.scores$NMDS3,
type="scatter3d", mode="markers", color=source)
ggplot(ref2.scores, aes(x=NMDS1, y=NMDS2, z=NMDS3, color=source)) +
theme_void() +
axes_3D() +
stat_3D()
## -------------- Plot references and samples -------------------
## Subset 2018 data from the mds output
mds.2019<-subset(mds.scores3, grp.source == "2019")
mds.2020<-subset(mds.scores3, grp.source == "2020")
mds.2018<-subset(mds.scores3, grp.source == "2018")
mds.sample<- mds.scores3 %>%
filter(source !='Default')
mds.zerogreen<-subset(mds.scores3, green_chla == "0")
mcintosh.2018<-subset(mds.2018, location == "McIntosh Woods")
beeds.2018<-subset(mds.2018, location == "Beed's Lake")
## subset by fit error
fit0<-subset(mds.scores3, grp.fit == 0)
fit1<-subset(mds.scores3, grp.fit == 1)
fit2<- subset(mds.scores3, grp.fit == 2)
fit3<-subset(mds.scores3, grp.fit == 3)
fit4<-subset(mds.scores3, grp.fit > 3)
p3<-ggplot() +
geom_polygon(data = grp.default3,
aes(x = NMDS1, y = NMDS2, group = grp.source),
fill = NA, color = "gray", size = 1) +
geom_point(data = grp.default3, aes(x=NMDS1, y=NMDS2), size =2) +
#geom_point(data = mds.sample, aes(x=NMDS1, y=NMDS2, color = class), size =2) +
#geom_point(data = mds.2020, aes(x=NMDS1, y=NMDS2), size = 6, color = "#9999FF") +
geom_point(data = beeds.2018, aes(x=NMDS1, y=NMDS2, color = grp.fit), size = 6) +
#geom_point(data = mds.2018, aes(x=NMDS1, y=NMDS2), color = "#33CC99", size = 6) +
#geom_point(data = mds.2019, aes(x=NMDS1, y=NMDS2), size = 6, color = "#FF9900") +
#geom_text(data = grp.cust, aes(x = NMDS1, y = NMDS2, label = grp.fit), size = 5, vjust = 0, nudge_x = 0.07) +
geom_text(data = grp.default3, aes(x = NMDS1, y = NMDS2, label = sample), size = 4, vjust = 0, nudge_x = 0.07) +
#geom_text(data = mds.2020, aes(x = NMDS1, y = NMDS2, label = grp.fit), size = 7, vjust = 0, nudge_x = 0.01) +
#geom_text(data = mds.2018, aes(x = NMDS1, y = NMDS2, label = grp.fit), size = 7, vjust = 0, nudge_x = 0.01) +
#geom_text(data = mds.2019, aes(x = NMDS1, y = NMDS2, label = grp.fit), size = 7, vjust = 0, nudge_x = 0.01) +
geom_text(data = beeds.2018, aes(x = NMDS1, y = NMDS2, label = sample), size = 3, vjust = 0.1, nudge_x = 0.05) +
scale_colour_viridis(option = "plasma") +
#scale_color_manual(values = c("#de4968", "#929596"), #de4968
#label = c("> 1", "0")) +
#facet_wrap(.~ location) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
text = element_text(size = 10),
axis.line = element_line(colour = "black"),
axis.text=element_text(size=10),
axis.title = element_text(size = 10),
legend.text = element_text(size = 10),
legend.key=element_rect(fill='white'),
legend.position = c(0.9,0.9),
legend.title = element_blank())
p3
## Turn on interactive graph
library(plotly)
ggplotly(p3)
## Subset and plot green valley
mds.gval<-subset(mds.scores3, location == "Green Valley")
mds.twin<-subset(mds.scores3, location == "North Twin East" | location == "North Twin West")
p5<-ggplot() +
geom_polygon(data = grp.default3, aes(x = NMDS1, y = NMDS2, group = grp.source), fill = c("gray"), alpha = 0.5) +
geom_point(data = grp.default3, aes(x=NMDS1, y=NMDS2), color = "black", size =7) +
geom_point(data = mds.twin, aes(x=NMDS1, y=NMDS2, color = grp.source), size = 7) +
geom_text(data = mds.twin, aes(x = NMDS1, y = NMDS2, label = sample), size = 5, vjust = 0, nudge_x = 0.01) +
geom_text(data = grp.default3, aes(x = NMDS1, y = NMDS2, label = sample), size = 7, vjust = 0, nudge_x = 0.06) +
scale_color_viridis_d(option = "plasma", direction = -1, breaks = c("2018", "2019", "2020")) +
#scale_color_viridis(option = "plasma", direction = -1) +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
text = element_text(size = 14),
axis.line = element_line(colour = "black"),
axis.text=element_text(size=14),
axis.title = element_text(size = 14),
legend.text = element_text(size = 14),
legend.key=element_rect(fill='white'),
legend.title = element_blank())
p5
ggplotly(p5)
## Plot references and 2019 samples
## Subset 2018 data from the mds output
mds.2019<-subset(mds.scores, grp.source == "2019")
p5<-ggplot() +
#geom_polygon(data = hull.data, aes(x = NMDS1, y = NMDS2, group = grp.source), fill = c("gray"), alpha = 0.5) +
#geom_point(data = hull.data, aes(x=NMDS1, y=NMDS2, color = grp.source), size = 3) +
geom_point(data = mds.2019, aes(x=NMDS1, y=NMDS2, color = week), size = 3) +
geom_text(data = mds.2019, aes(x = NMDS1, y = NMDS2, label = grp.fit), size = 4, vjust = 0, nudge_x = 0.01) +
#geom_text(data = hull.data, aes(x = NMDS1, y = NMDS2, label = site), size = 4, vjust = 0, nudge_x = 0.01) +
coord_equal() +
scale_color_viridis_d() +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_blank(),text = element_text(size = 14),
axis.line = element_line(colour = "black"),
axis.text=element_text(size=14), axis.title = element_text(size = 14),
legend.text = element_text(size = 14), legend.key=element_rect(fill='white'),
legend.title = element_blank())
p6<- p5 + facet_wrap(.~location, ncol = 3)
p6
## Plot Black Hawk Lake 2018 + 2019 samples
## Subset black Hawk data from the mds output
mds.chase<-subset(mds.scores, location %in% c('Brushy Creek','North Twin East','North Twin West',
'Black Hawk','Denison'))
p7<-ggplot() +
#geom_polygon(data = hull.data, aes(x = NMDS1, y = NMDS2, group = grp.source), fill = c("gray"), alpha = 0.5) +
#geom_point(data = hull.data, aes(x=NMDS1, y=NMDS2, color = grp.source), size = 3) +
geom_point(data = mds.chase, aes(x=NMDS1, y=NMDS2, color = week), size = 3) +
geom_text(data = mds.chase, aes(x = NMDS1, y = NMDS2, label = grp.fit), size = 4, vjust = 0, nudge_x = 0.01) +
#geom_text(data = hull.data, aes(x = NMDS1, y = NMDS2, label = site), size = 4, vjust = 0, nudge_x = 0.01) +
coord_equal() +
scale_color_viridis_d() +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_blank(),text = element_text(size = 14),
axis.line = element_line(colour = "black"),
axis.text=element_text(size=14), axis.title = element_text(size = 14),
legend.text = element_text(size = 14), legend.key=element_rect(fill='white'),
legend.title = element_blank())
p8 <- p7 + facet_wrap(.~location, ncol = 2)
p8
##2018-2019 samaples
mds.all<-subset(mds.scores, grp.source %in% c('2018','2019'))
p9<-ggplot() +
#geom_polygon(data = hull.data, aes(x = NMDS1, y = NMDS2, group = grp.source), fill = c("gray"), alpha = 0.5) +
#geom_point(data = hull.data, aes(x=NMDS1, y=NMDS2, color = grp.source), size = 3) +
geom_point(data = mds.all, aes(x=NMDS1, y=NMDS2, color = month), size = 4) +
geom_text(data = mds.all, aes(x = NMDS1, y = NMDS2, label = site), size = 4, vjust = 0, nudge_x = 0.01) +
#geom_text(data = hull.data, aes(x = NMDS1, y = NMDS2, label = site), size = 4, vjust = 0, nudge_x = 0.01) +
coord_equal() +
scale_color_viridis_d(breaks=c("May", "June", "July", "August")) +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_blank(),text = element_text(size = 16),
axis.line = element_line(colour = "black"),
axis.text=element_text(size=16), axis.title = element_text(size = 16),
legend.text = element_text(size = 16), legend.key=element_rect(fill='white'),
legend.title = element_blank(), strip.text = element_text(size = 16))
p9
p10<- p9 + facet_wrap(.~location, ncol = 3)
p10
##Subset for green valley
mds.gv<-subset(mds.scores, location == "Green Valley")
p11<-ggplot() +
#geom_polygon(data = hull.data, aes(x = NMDS1, y = NMDS2, group = grp.source), fill = c("gray"), alpha = 0.5) +
#geom_point(data = hull.data, aes(x=NMDS1, y=NMDS2, color = grp.source), size = 3) +
geom_point(data = mds.gv, aes(x=NMDS1, y=NMDS2, color = month), size = 7) +
#geom_text(data = mds.gv, aes(x = NMDS1, y = NMDS2, label = site), size = 4, vjust = 0, nudge_x = 0.01) +
#geom_text(data = hull.data, aes(x = NMDS1, y = NMDS2, label = site), size = 4, vjust = 0, nudge_x = 0.01) +
#coord_equal() +
scale_color_viridis_d(breaks=c("May", "June", "July", "August")) +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_blank(),text = element_text(size = 22),
axis.line = element_line(colour = "black"),
axis.text=element_text(size=22), axis.title = element_text(size = 22),
legend.text = element_text(size = 22), legend.key=element_rect(fill='white'),
legend.title = element_blank(), strip.text = element_text(size = 22))
p11
p10<- p9 + facet_wrap(.~location, ncol = 3)
p10
## Clustering: Determine the optimal number of clusters
## the NbClust package used to do this
## use the scaled or normalized data
nb<-NbClust(data.norm[,c(1:5)], distance = "euclidean", min.nc = 2, max.nc = 10,
method = "average", index = "all")
## K-means Clustering Analysis
km.res<-eclust(data.norm[,c(1:5)], "kmeans", k = 2,
nstart = 25, graph = FALSE)
## visualize k-means cluster
## Notes: ellipse.alpha = ellipse color transparency, show.clus.cent = cluster center
p9 <- fviz_cluster(km.res, geom = "point", ellipse.type = "norm",
pointsize = 4, ellipse.alpha = 0, ellipse.width = 2,
show.clust.cent = FALSE) +
scale_color_viridis_d() +
theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_blank(),text = element_text(size = 16),
axis.line = element_line(colour = "black"),
axis.text=element_text(size=14), axis.title = element_text(size = 14),
legend.text = element_text(size = 16), legend.key=element_rect(fill='white'),
legend.title = element_blank())
p9
## Dendrogram of cluster analysis
## similar samples will be closer together
## First, create new dataframe with normalized/scaled values and sample info
data.hc<-cbind(data.norm,data.all[,c(8:12)])
data.hc<-cbind(data.hc,data.all$sample)
## Start hierarchical clustering analysis
res.hc <- eclust(data.hc[,c(1:5)], "hclust", k = 2, method = "average", graph = FALSE)
plot(res.hc) #view hclust results
## Make dendrogram from hclust results
dend<-as.dendrogram(res.hc)
plot(dend)
## this is very difficult to see because there are so many samples
par(mfrow = c(1,1)) #2,1 = two trees laying on top of each other
plot(dend, main = "Original dendrogram")
dend_list <- get_subdendrograms(dend, 2) #creates list of 2 clusters
sapply(dend_list, plot) #applies list of clusters to a plot
## cut the dendrogram into different groups
grp<-as.data.frame(cutree(dend, k = 2, order_clusters_as_data = FALSE))
grp #check order of samples and grouped clusters
write.csv(grp,'grp_cluster.csv') #save this as a .csv file
order_clust=read.csv("grp_cluster.csv",header=T) #bring it back in
IDs<- data.all %>% mutate(order = row_number())
order_clust <-order_clust %>% mutate(row_name = row_number())
final<- merge(order_clust, IDs, by = "row_name") #Joins IDs and order_clust by "row_name"
test.trans<-as.data.frame(t(data.all))
test<- test.trans %>% mutate(order = rownames(test.trans))
############################################################################################
################## MAKING 3D NMDS WITH GGPLOT #################
### This uses plot3D package so turn it on if you haven't
### this package is follows the same coding as ggplot
library(rgl) ## loads package for 3D plotting
### Set the x,y,z coordinates
### For this plot, will use the nmds scores since
### stress was lowest at 3 dimensions (because 2D did not converge)
### Create df of ref scores
default.3d<-mds.scores3 %>%
subset(source == "Default")
### create df without references
mds.3d<-mds.scores3 %>%
subset(!source %in% "Default")
### Assign dimensions to x,y,z
x <- mds.3d$NMDS1
y <- mds.3d$NMDS2
z <- mds.3d$NMDS3
### Basic scatter plot
rgl.open()
rgl.bg(color = "white") # Setup the background color
rgl.points(x, y, z, size = 5,
color = get_colors(mds.3d$fit)) #color = "blue"
### Color sample points by group
### Create function
#' Get colors for the different levels of
#' a factor variable
#'
#' @param groups a factor variable containing the groups
#' of observations
#' @param colors a vector containing the names of
# the default colors to be used
get_colors <- function(groups, group.col = palette()){
#groups <- as.factor(groups)
#ngrps <- length(levels(groups))
#if(ngrps > length(group.col))
#group.col <- rep(group.col, ngrps)
color <- group.col[as.numeric(groups)]
names(color) <- as.vector(groups)
return(color)
}
# Add x, y, and z Axes
rgl.lines(c(0, 1), c(0, 0), c(0, 0), color = "black")
rgl.lines(c(0, 0), c(0,0.5), c(0, 0), color = "red")
rgl.lines(c(0, 0), c(0, 0), c(0,1), color = "green")
### Add plane (use references)
# define function to calculate axes limit
lim <- function(x){c(-max(abs(x)), max(abs(x))) * 1}
# xlab, ylab, zlab: axis labels
# show.plane : add axis planes
# show.bbox : add the bounding box decoration
# bbox.col: the bounding box colors. The first color is the
# the background color; the second color is the color of tick marks
rgl_add_axes <- function(x, y, z, axis.col = "grey",
xlab = "", ylab="", zlab="", show.plane = TRUE,
show.bbox = FALSE, bbox.col = c("#333377","black"))
{
lim <- function(x){c(-max(abs(x)), max(abs(x))) * 1}
# Add axes
xlim <- lim(x); ylim <- lim(y); zlim <- lim(z)
rgl.lines(xlim, c(0, 0), c(0, 0), color = axis.col)
rgl.lines(c(0, 0), ylim, c(0, 0), color = axis.col)
rgl.lines(c(0, 0), c(0, 0), zlim, color = axis.col)
# Add a point at the end of each axes to specify the direction
axes <- rbind(c(xlim[2], 0, 0), c(0, ylim[2], 0),
c(0, 0, zlim[2]))
rgl.points(axes, color = axis.col, size = 3)
# Add axis labels
rgl.texts(axes, text = c(xlab, ylab, zlab), color = axis.col,
adj = c(0.5, -0.8), size = 2)
# Add plane
if(show.plane)
xlim <- xlim/1.1; zlim <- zlim /1.1
rgl.quads( x = c(-0.3526627, 0.3076674,0.4029682,0.1305049),
y = c(-0.05494934, -0.1255089, 0.03109923, 0.15533561),
z = c(0.02211692, -0.01647502, 0.03821252, 0.0314686))
# Add bounding box decoration
if(show.bbox){
rgl.bbox(color=c(bbox.col[1],bbox.col[2]), alpha = 0.5,
emission=bbox.col[1], specular=bbox.col[1], shininess=5,
xlen = 3, ylen = 3, zlen = 3)
}
}
rgl_add_axes(x, y, z)
### clab is used to change legend title
### The points are automatically colored according to variable Z
colVar <- sapply(mds.3d$fit,function(a){ifelse(a==0,'gray','red')})
colVar <- factor(colVar,levels=c('gray','red'))
### viewing angle: theta = azimuthal direction (rotate up/down?)
### viewing angle: phi = co-latitude (horizontal??)
### Use extreme for each and then 45 for both to test it out
scatter3D(x=x,y=y,z=z, cex = 1.3,
phi = 45,theta = 45, ticktype = "detailed",
xlab = "NMDS1", ylab ="NMDS2", zlab = "NMDS3",
clab = c("Least", "Square Fit"),
colvar=as.integer(colVar),
colkey=list(at=c(0,1),side=4),
col=as.character(levels(colVar)),
pch=19)
### Add ref scores to 3D plot
### Cyano ref
scatter3D(x = -0.3526627, y = -0.05494934, z = 0.02211692, add = TRUE, colkey = FALSE,
pch = 18, cex = 3, col = "black")
############# BARPLOT: CHLA COMMUNITY COMPOSITON ##############
### select only chla data from PhytoPAM for year 2018
chla.comm<-data.all2[c(5:151),c(1,9,10,11,20:23)]
### Pivot longer to make column for taxa and chla values
chla.comm2<-pivot_longer(chla.comm,5:8,names_to = "taxa", values_to = "chla")
### Plot stacked barplot
ggplot(chla.comm2, aes(fill = taxa, y = chla, x = week)) +
geom_bar(position = "stack", stat = "identity") +
facet_wrap(.~site)
|
3aae19ab9130d8915b5c14b5238f969455795657 | 3790ae072a11cb72ad894f8540c13ca5f427e0ba | /AssociationRules.R | 97d6fd4418fe39ab56a9f85945b64187b8b091cf | [] | no_license | heekim33/0823 | 0f33f505d2e8ae827a8ae8b5b134348e8d0ff98e | d5fb132dbeb02eab0ea654f83701cdca516144a1 | refs/heads/main | 2023-07-13T05:15:14.741834 | 2021-08-22T12:17:59 | 2021-08-22T12:17:59 | 398,786,496 | 0 | 0 | null | null | null | null | UHC | R | false | false | 1,047 | r | AssociationRules.R | # 패키지 설치 및 로딩
install.packages("arules")
install.packages("arulesViz")
library(arules)
library(arulesViz)
# sales.csv 파일 읽기
# 트랜잭션 클래스 형태의 데이터 파일(Sales.csv) 읽기
trans <- read.transactions(file.choose(), format = "basket", sep=",");
trans
# 데이터 출력
inspect(trans)
# 데이터 현황
itemFrequency(trans, type='absolute')
itemFrequency(trans)
itemFrequencyPlot(trans, type="absolute",
xlab="상품 아이템", ylab="거래 빈도",
col=1:5)
itemFrequencyPlot(trans,
xlab="상품 아이템", ylab="비율",
col=1:5, topN=5)
# 연관규칙 생성
rules <- apriori(trans,
parameter = list(supp=0.4, conf=0.6, minlen=2))
# 연관규칙
inspect(rules)
# 규칙의 조건 검색
rules2 <- subset(rules, lift>1.0)
rules3 <- sort(rules2, by="lift", decreasing=TRUE)
inspect(rules3)
# 연관규칙의 그래프 출력
plot(rules2, method="graph")
|
bf294c42cfe2843c6195b0544e21475fbbc10c52 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/RSiteCatalyst/examples/GetReport.Rd.R | cff95b670f47cb61c676c6a4b8afdb8e386d75a5 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 214 | r | GetReport.Rd.R | library(RSiteCatalyst)
### Name: GetReport
### Title: Get EnQueued Report by report ID
### Aliases: GetReport
### ** Examples
## Not run:
##D
##D custom_report <- GetReport(12345678)
##D
## End(Not run)
|
18ff02346bf39b905a1cd2f26f911bd4afa6a27a | cc4bec8fcef05144b64bfdfa5fcd76876c1b532a | /Multivariate/midterm/code/.Rproj.user/2E5D35AB/sources/per/t/C92DFE73-contents | bc752ba58e0bce979909a9a1d4484843f5b1365f | [] | no_license | Wentworthliu123/Spring20 | d80c3e21664f39cac94bf4309f181c349f9fa7db | f1175c92d7816b94eab813277dd7782952c9d78b | refs/heads/master | 2021-06-02T07:04:29.128393 | 2020-05-12T06:23:22 | 2020-05-12T06:23:22 | 254,317,841 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 36,933 | C92DFE73-contents | #### Appplied Multivariate Analysis
#### A collection of the R codes used in Bus41912 over the years.
#### Putting together on June 14, 2014 by Ruey S. Tsay
####
"Behrens" <- function(x1,x2){
# The x1 and x2 are two data matrices with xi for population i.
# Test the equal mean vectors.
# Written by Ruey S. Tsay on April 17, 2008
if(!is.matrix(x1))x1=as.matrix(x1)
if(!is.matrix(x2))x2=as.matrix(x2)
n1 = dim(x1)[1]
n2 = dim(x2)[1]
p1 = dim(x1)[2]
p2 = dim(x2)[2]
if (p1 == p2){
x1bar=matrix(colMeans(x1),p1,1)
x2bar=matrix(colMeans(x2),p2,1)
dev = x1bar-x2bar
S1 = cov(x1)
S2 = cov(x2)
S1n = (1/n1)*S1
S2n = (1/n2)*S2
Sp = S1n+S2n
Si=solve(Sp)
T2 = t(dev)%*%Si%*%dev
S1s= S1n%*%Si
S2s = S2n%*%Si
SS1s = S1s%*%S1s
SS2s = S2s%*%S2s
d1 = (sum(diag(SS1s))+(sum(diag(S1s)))^2)/n1 + (sum(diag(SS2s))+(sum(diag(S2s)))^2)/n2
v = (p1+p1^2)/d1
cat("Estimate of v: ",v,"\n")
deg = v-p1+1
tt=T2*deg/(v*p1)
pvalue=1-pf(tt,p1,deg)
Behrens=matrix(c(T2,pvalue),1,2)
colnames(Behrens) <- c("T2-stat","p.value")
}
cat("Test result:","\n")
print(Behrens,digit=4)
}
####
"BoxM" <- function(x,nv){
# The x is the data vector with the first n1 rows belonging to population 1
# the (n1+1):(n1+n2) rows belonging to population 2, etc.
# nv = (n1,n2,...,ng)'
# The number of groups is the length of nv-vector.
# Box's M-test for equal covariance matrics
# Written by Ruey S. Tsay on April 18, 2008
Box.M=NULL
g=length(nv)
p=dim(x)[2]
S=array(0,dim=c(p,p,g))
Sp=matrix(0,p,p)
n=sum(nv)
deg2=n-g
M = 0
# tmp1 is the sum[(n_i-1)*ln(det(S_i))
# u1 is the sum[1/(n_i-1)]
tmp1=0
u1 = 0
idx=0
for (i in 1:g){
da=x[(idx+1):(idx+nv[i]),]
smtx=cov(da)
S[,,i]=smtx
Sp=(nv[i]-1)*smtx+Sp
tmp1=(nv[i]-1)*log(det(smtx))+tmp1
u1 = u1 + 1.0/(nv[i]-1)
print("determinant")
print(det(smtx))
idx=idx+nv[i]
}
Sp=Sp/deg2
M=deg2*log(det(Sp))-tmp1
u = (u1-(1.0/deg2))*(2*p^2+3*p-1)/(6*(p+1)*(g-1))
C = (1-u)*M
nu=p*(p+1)*(g-1)/2
pvalue=1-pchisq(C,nu)
Box.M=cbind(Box.M,c(C,pvalue))
row.names(Box.M)=c("Box.M-C","p.value")
cat("Test result:","\n")
print(Box.M)
BoxM <-list(Box.M=M, Test.Stat=C,p.value=pvalue)
}
####
"BoxCox" <- function(da,interval=c(-2,2)){
if(!is.matrix(da))da=as.matrix(da)
n = dim(da)[1]; k=dim(da)[2]
#### objective function
lklam <- function(lam,x){
x = abs(x)
if(min(x) < 0.0001){x = x+0.2}
if(abs(lam) < 0.00001){
y = log(x)
}
else{
y = (x^lam-1)/lam
}
n1 = length(x)
dev=scale(y,center=T,scale=F)
s = sum(dev*dev)/n1
nlk = -0.5*n1*log(s)+(lam-1)*sum(log(x))
lklam = nlk
}
lambda = seq(min(interval),max(interval),0.05)
est=NULL
for (j in 1:k){
fv=NULL; x=da[,j]
nT=length(lambda)
for (i in 1:nT){
fv=c(fv,lklam(lambda[i],x))
}
idx = which.max(fv)
est=c(est,lambda[idx])
}
cat("Estimated transformation: ",est,"\n")
}
####
"classify56" <- function(da,size,eqP=T,pr=c(0),newdata=NULL){
### classification using equation (11-56) of Johnson and Wickern.
###
# da: data matrix. The data are arranged according to populations.
# size: a vector of sample size of each populations so that
# the length g [g=length(size)] is the number of populations.
# eqP: switch for equal probabilities
# eqV: switch for equal covariance matrices.
## pr: prior probability
# Assume equal costs.
# Assume normality.
#
if(!is.matrix(da))da <- as.matrix(da)
nr = dim(da)[1]
nc = dim(da)[2]
nrnew <- 0; newclass <- NULL
if(length(newdata) > 0){
if(!is.matrix(newdata)) newdata <- as.matrix(newdata)
nrnew=dim(newdata)[1]; ncnew=dim(newdata)[2]
if(ncnew < nc){
cat("newdata is not in the proper format","\n")
return
}
}
if(is.null(newdata)){newdata=da; nrnew=nr; ncnew=nc}
##
g=length(size)
# compute the sample mean and covariance matrix of each populations
cm=matrix(0,nc,g)
dm=g*nc
# S: stores the covariance matrices (one on top of the other)
## Sinv: stores the inverse covariance matrices
S=matrix(0,dm,nc)
Sinv=matrix(0,dm,nc)
ist=0
for (i in 1:g){
x = da[(ist+1):(ist+size[i]),]
if(nc > 1){
cm1 = apply(x,2,mean)}
else{
cm1=c(mean(x))
}
cm[,i]=cm1
smtx=var(x)
jst =(i-1)*nc
S[(jst+1):(jst+nc),]=smtx
Si=solve(smtx)
Sinv[(jst+1):(jst+nc),]=Si
cat("Population: ",i,"\n")
print("mean vector:")
print(cm1,digits=3)
print("Covariance matrix:")
print(smtx,digits=4)
# print("Inverse of cov-mtx:")
# print(Si,digits=4)
ist=ist+size[i]
}
##
if(eqP){
pr=rep(1,g)/g
}
##
print("Assume equal covariance matrices and costs")
Sp=matrix(0,nc,nc)
for (i in 1:g){
jdx=(i-1)*nc
smtx=S[(jdx+1):(jdx+nc),]
Sp=Sp+(size[i]-1)*smtx
}
Sp = Sp/(nr-g)
print("Sp")
print(Sp,digits=4)
Spinv=solve(Sp)
print("Sp-inv")
print(Spinv,digits=4)
##
### Classification
for (i in 1:nrnew){
xo=as.numeric(newdata[i,])
xo <- matrix(xo,nc,1)
for (k in 1:g){
##### compute the constant terms
dki = rep(0,g)
ckk=cm[,k]
for (ii in 1:g){
dif=matrix(c(ckk-cm[,ii]),1,nc); s=c(ckk+cm[,ii])
dki[ii] = -0.5*dif%*%Spinv%*%matrix(s,nc,1)+dif%*%Spinv%*%xo
dki[ii] = dki[ii]-log(pr[ii]/pr[k])
}
dki=dki[-k]
if(min(dki) > 0){newclass=c(newclass,k)}
}
}
cat("New classification: ","\n")
print(newclass)
return <- list(newclass=newclass)
}
#####
"Cmeans" <- function(da,size,eqV=T,alpha=0.05){
# The data matrix is "da".
# size: the sample size for each population.
# eqV: equal covariance matrices.
# Written by Ruey S. Tsay for Bus 41912 class.
#
if(!is.matrix(da))da=as.matrix(da)
nc=ncol(da)
nr=nrow(da)
n1=size[1]
n2=size[2]
mu1 = apply(da[1:n1,],2,mean)
S1=cov(da[1:n1,])
print("Population 1:")
cat("Mean-vector:","\n")
print(mu1,digits=3)
cat("Covariance matrix","\n")
print(S1,digits=3)
mu2 = apply(da[(n1+1):nr,],2,mean)
S2=cov(da[(n1+1):nr,])
print("Population 2:")
cat("Mean-vector: ","\n")
print(mu2,digits=3)
cat("Covariance matrix: ","\n")
print(S2,digits=3)
if(eqV){
Sp = ((n1-1)/(nr-2))*S1+((n2-1)/(nr-2))*S2
print("Pooled covariance matrix:")
print(Sp,digits=3)
Spm = ((1/n1) + (1/n2))*Sp
Spmv = solve(Spm)
}
else {
Spm = S1*(1/n1)+S2*(1/n2)
Spmv = solve(Spm)
}
dif=matrix((mu1-mu2),nc,1)
cat("differnces in means: ","\n")
print(dif,digits=3)
T2 = t(dif)%*%Spmv%*%dif
d2=nr-nc-1
csq=(nr-2)*nc/(nr-nc-1)
cri=T2/csq
p=1-pf(cri,nc,d2)
print("Hotelling T2, approx-F, & its p-value")
tmp=c(T2,cri,p)
print(tmp)
# C.I. for differences in means
print("Simultaneous Tsq. C.I. for difference in means")
pr=1-alpha
c = sqrt(csq*qf(pr,nc,d2))
for (i in 1:nc){
tmp=sqrt(Spm[i,i])*c
ci = c(dif[i]-tmp,dif[i]+tmp)
print(ci,digits=3)
}
# Bonferroni simultanrous C.I.
print("Simultaneous Bonferroni C.I. for difference in means")
pr=1 - alpha/(2*nc)
cri=qt(pr,d2)
for (i in 1:nc){
tmp=cri*sqrt(Spm[i,i])
ci = c(dif[i]-tmp,dif[i]+tmp)
print(ci,digits=3)
}
# Print out the most responsible linear combination
av=Spmv%*%dif
print("Critical linear combination: ")
print(av,digits=3)
}
### Given the data
"confreg" <- function(da,alpha=0.05,length=FALSE){
# The data matrix is "da".
# Written by Ruey S. Tsay for Bus 41912 class.
#
if(!is.matrix(da))da=as.matrix(da)
nr = nrow(da)
nc = ncol(da)
cm=matrix(colMeans(da),1,nc)
s=cov(da)
simucr=matrix(0,nc,2)
dg2=nr-nc
cr=qf((1-alpha),nc,dg2)
cr1=sqrt(nc*(nr-1)*cr/(nr-nc))
se=sqrt(diag(s))/sqrt(nr)
for (i in 1:nc){
simucr[i,1]=cm[i]-cr1*se[i]
simucr[i,2]=cm[i]+cr1*se[i]
}
print("C.R. based on T^2")
print(simucr)
indvcr=matrix(0,nc,2)
q=1-(alpha/2)
cr=qt(q,(nr-1))
for (i in 1:nc){
indvcr[i,1]=cm[i]-cr*se[i]
indvcr[i,2]=cm[i]+cr*se[i]
}
print("CR based on individual t")
print(indvcr)
bonfcr=matrix(0,nc,2)
q=1-(alpha/(2*nc))
cr=qt(q,(nr-1))
for (i in 1:nc){
bonfcr[i,1]=cm[i]-cr*se[i]
bonfcr[i,2]=cm[i]+cr*se[i]
}
print("CR based on Bonferroni")
print(bonfcr)
asymcr=matrix(0,nc,2)
cr=sqrt(qchisq((1-alpha),nc))
for (i in 1:nc) {
asymcr[i,1]=cm[i]-cr*se[i]
asymcr[i,2]=cm[i]+cr*se[i]
}
print("Asymp. simu. CR")
print(asymcr)
if(length){
print("Lengths of confidence intervals:")
leng=matrix(0,nc,4)
leng[,1]=simucr[,2]-simucr[,1]
leng[,2]=indvcr[,2]-indvcr[,1]
leng[,3]=bonfcr[,2]-bonfcr[,1]
leng[,4]=asymcr[,2]-asymcr[,1]
colnames(leng) <- c("T^2","ind-t","Bonf","Asym")
print(leng,digits=3)
}
}
####
"contrast" <- function(da,cmtx,alpha=0.05){
# The data matrix is "da".
# Written by Ruey S. Tsay for Bus 41912 class.
#
if(!is.matrix(da))da=as.matrix(da)
nr = nrow(da)
nc = ncol(da)
ave=matrix(colMeans(da),1,nc)
S=cov(da)
cm = cmtx%*%t(ave)
co=cmtx%*%S%*%t(cmtx)
Si=solve(co)
Tsq = nr*(t(cm)%*%Si%*%cm)
qm1=dim(cmtx)[1]
tmp = Tsq*(nr-qm1)/((nr-1)*qm1)
deg2=nr-qm1
pv=1-pf(tmp,qm1,deg2)
print("Hotelling T^2 statistics & p-value")
print(c(Tsq,pv))
print("Simultaneous C.I. for each contrast")
cri = sqrt((nr-1)*qm1*qf(1-alpha,qm1,deg2)/deg2)
simucr=matrix(0,qm1,2)
for (i in 1:qm1){
c=cmtx[i,]
me=t(c)%*%t(ave)
s = t(c)%*%S%*%c
se=sqrt(s)/sqrt(nr)
simucr[i,1]=me-cri*se
simucr[i,2]=me+cri*se
}
print(simucr,digits=3)
}
#### Given sample mean and sample covariance ###
"confreg.s" <- function(sm,s,nr,alpha=0.05){
# sm is the sample mean vector
# s: sample covariance matrix
# nr: number of observations
# written by Ruey S. Tsay for Bus 41912 class.
if(!is.matrix(s))s=as.matrix(s)
nc=ncol(s)
cm=matrix(sm,1,nc)
simucr=matrix(0,nc,2)
dg2=nr-nc
cr=qf((1-alpha),nc,dg2)
cr1=sqrt(nc*(nr-1)*cr/(nr-nc))
se=sqrt(diag(s))/sqrt(nr)
for (i in 1:nc){
simucr[i,1]=cm[i]-cr1*se[i]
simucr[i,2]=cm[i]+cr1*se[i]
}
print("C.R. based on T^2")
print(simucr)
indvcr=matrix(0,nc,2)
q=1-(alpha/2)
cr=qt(q,(nr-1))
for (i in 1:nc){
indvcr[i,1]=cm[i]-cr*se[i]
indvcr[i,2]=cm[i]+cr*se[i]
}
print("CR based on individual t")
print(indvcr)
bonfcr=matrix(0,nc,2)
q=1-(alpha/(2*nc))
cr=qt(q,(nr-1))
for (i in 1:nc){
bonfcr[i,1]=cm[i]-cr*se[i]
bonfcr[i,2]=cm[i]+cr*se[i]
}
print("CR based on Bonferroni")
print(bonfcr)
asymcr=matrix(0,nc,2)
cr=sqrt(qchisq((1-alpha),nc))
for (i in 1:nc) {
asymcr[i,1]=cm[i]-cr*se[i]
asymcr[i,2]=cm[i]+cr*se[i]
}
print("Asymp. simu. CR")
print(asymcr)
}
##############
######
"eigTest" <- function(Sigma,p,q,n){
### Perform eigenvalue test using asymptotic chi-squares
### Sigma: the (p+q)-by-(p+q) covariance or correlation matrix
### n: sample size
###
###
if(!is.matrix(Sigma))Sigma=as.matrix(Sigma)
Sig=(Sigma+t(Sigma))/2
S11=Sig[1:p,1:p]
S12=Sig[1:p,(p+1):(p+q)]
S22=Sig[(p+1):(p+q),(p+1):(p+q)]
if(q < p){
tmp = S11; S11=S22; S22=tmp
S12=t(S12)
}
S11inv=solve(S11)
S22inv=solve(S22)
S=S11inv%*%S12%*%S22inv%*%t(S12)
m1=eigen(S)
minpq=min(p,q)
vec1 = m1$vectors
vec2 = S22inv%*%t(S12)%*%vec1
tmp=crossprod(vec2,vec2)
se = sqrt(diag(tmp))
se=diag(1/se)
vec2=vec2%*%se
tst = 0; result=NULL
adj=-(n-1-0.5*(p+q+1))
for (j in 1:minpq){
k1 = minpq-j+1
k=minpq-j
tst = tst + adj*log(1-m1$values[k1])
df = (p-k)*(q-k)
pv=1-pchisq(tst,df)
result=rbind(result,c(j,tst,df,pv))
}
colnames(result) <- c("N(zero-Eig)","Test-statistic","df","p-value")
print(result)
cat("First set of ordered canonical variates: ","\n")
print(vec1,digits=3)
cat("Second set of ordered canonical variates: ","\n")
print(vec2,digits=3)
eigTest <- list(values=m1$values, Xvectors=vec1, Yvectors=vec2)
}
#####
"EMmiss" <- function(da,fix=NULL,iter=1){
## da: data
## fix: an indicator matrix of the same dimension as da.
## fix(i,j) = 0 indicates missing, = 1 means observed.
## iter: number of iterations
##
if(!is.matrix(da))da=as.matrix(da)
n = dim(da)[1]
k = dim(da)[2]
x=da
if(length(fix) < 1)fix=matrix(1,n,k)
if(!is.matrix(fix))fix=as.matrix(fix)
# Compute the sample mean and covariance matrix.
mu=rep(0,k)
for (i in 1:k){
mu[i]=crossprod(x[,i],fix[,i])/sum(fix[,i])
}
cat("Sample mean: ","\n")
print(mu)
## compute the sample covariance matrix
xx = x
for (i in 1:k){
idx=c(1:n)[fix[,i]==0]
x[idx,i]=mu[i]
xx[idx,i]=0
}
###print(x)
S=cov(x)*(n-1)/n
cat("Sample covariance matrix (MLE): ","\n")
print(S)
### Compute the observed sum of squares
T2=t(xx)%*%xx
### Perform EM iterations
for (it in 1:iter){
### locate missing values in each data point (row of x-matrix)
S2=T2
for (i in 1:n){
idx=c(1:k)[fix[i,]==0]
if(length(idx) > 0){
jdx=c(1:k)[-idx]
obs=x[i,jdx]
###cat("obs: ","\n")
###print(obs)
mu1=mu[idx]
mu2=mu[jdx]
S11=S[idx,idx]
S12=S[idx,jdx]
S21=S[jdx,idx]
S22=S[jdx,jdx]
S22i=solve(S22)
x1=mu1+S12%*%S22i%*%(obs-mu2)
x[i,idx]=x1
X11=S11-S12%*%S22i%*%S21+x1%*%t(x1)
X12=x1%*%t(obs)
##cat("X11 and X12","\n")
##print(X11)
##print(X12)
S2[idx,idx]=S2[idx,idx]+X11
S2[idx,jdx]=S2[idx,jdx]+X12
S2[jdx,idx]=t(S2[idx,jdx])
S2[jdx,jdx]=S2[jdx,jdx]
}
#
}
cat("Iteration: ",it,"\n")
##cat("Modified data matrix: ","\n")
##print(x)
## compute the mean and covariance matrix
mu=colMeans(x)
cat("iterated Sample mean: ","\n")
print(mu)
## compute the sample covariance matrix
muv=matrix(mu,k,1)
####print(S2)
S=S2/n - muv%*%t(muv)
cat("Iterated sample covariance matrix (MLE): ","\n")
print(S)
}
## end of the program
}
#####
"growth" <- function(da,nv,tp,q){
# This program tests the q-order growth curve model.
# The da is the data matrix.
# nv is the vector of sample sizes of the groups.
# q is the polynomial order.
# tp: time vector
# Written by Ruey S. Tsay on April 24, 2008
growth=NULL
N = dim(da)[1]
p = dim(da)[2]
g=length(nv)
Sp=matrix(0,p,p)
idx=0
xbar=NULL
for (i in 1:g){
ni=nv[i]
x=da[(idx+1):(idx+ni),]
ave=apply(x,2,mean)
xbar=cbind(xbar,ave)
S=cov(x)
Sp=(ni-1)*S+Sp
idx=idx+ni
}
W=Sp
Sp=Sp/(N-g)
Si=solve(Sp)
beta=matrix(0,(q+1),g)
semtx=matrix(0,(q+1),g)
B=matrix(1,p,1)
for (j in 1:q){
B=cbind(B,tp^j)
}
idx=0
Wq=matrix(0,p,p)
k=(N-g)*(N-g-1)/((N-g-p+q)*(N-g-p+q+1))
for (i in 1:g){
ni=nv[i]
x=da[(idx+1):(idx+ni),]
S=t(B)%*%Si%*%B
Sinv=solve(S)
d1=xbar[,i]
xy=t(B)%*%Si%*%d1
bhat=Sinv%*%xy
beta[,i]=bhat
semtx[,i]=sqrt((k/ni)*diag(Sinv))
fit=t(B%*%bhat)
err=x-kronecker(fit,matrix(1,ni,1))
err=as.matrix(err)
Wq=t(err)%*%err+Wq
idx=idx+ni
}
Lambda=det(W)/det(Wq)
Tst=-(N-(p-q+g)/2)*log(Lambda)
pv=1-pchisq(Tst,(p-q-1)*g)
growth=cbind(growth,c(Tst,pv))
row.names(growth)=c("LR-stat","p.value")
print("Growth curve model")
print("Order: ")
print(q)
print("Beta-hat: ")
print(beta,digits=4)
print("Standard errors: ")
print(semtx,digits=4)
print("W")
print(W,digits=4)
print("Wq")
print(Wq,digits=4)
print("Lambda:")
print(Lambda)
print("Test result:")
print(growth,digits=4)
}
####
"Hotelling" <- function(da,mu=NULL){
# The data matrix is "da".
# The mean vector is mu. (a list of numbers).
#
if(!is.matrix(da))da=as.matrix(da)
Hotelling=NULL
nr = dim(da)[1]
nc = dim(da)[2]
if(is.null(mu))mu = matrix(rep(0,nc),nc,1)
cm=matrix(colMeans(da),nc,1)
S = cov(da)
si=solve(S)
mu0=matrix(mu,nc,1)
dev=cm-mu0
T2 = nr*(t(dev)%*%si%*%dev)
d2=nr-nc
tt=T2*d2/(nc*(nr-1))
pvalue=1-pf(tt,nc,d2)
Hotelling=cbind(Hotelling,c(T2,pvalue))
row.names(Hotelling)=c("Hoteliing-T2","p.value")
Hotelling
}
####
"mlrchk" <- function(z,y,constant=TRUE,pred=NULL,alpha=0.05){
# Perform multiple linear regression analysis, provide leverage and
# influential information, and compute prediction intervals.
#
# Created April 27, 2010. Ruey Tsay
#
if(!is.matrix(z))z=as.matrix(z)
n=dim(z)[1]
r=dim(z)[2]
y1=as.matrix(y,n,1)
# estimation
z1=z
if(constant)z1=cbind(rep(1,n),z)
r1=dim(z1)[2]
ztz=t(z1)%*%z1
zty=t(z1)%*%y
ztzinv=solve(ztz)
beta=ztzinv%*%zty
res=y-z1%*%beta
sig=sum(res^2)/(n-r1)
# print results
print("coefficient estimates:")
pmtx=NULL
for (i in 1:r1){
s1=sqrt(sig*ztzinv[i,i])
tra=beta[i]/s1
ii = i; if(constant)ii=ii-1
pmtx=rbind(pmtx,c(ii,beta[i],s1,tra))
}
colnames(pmtx) <- c("Regor","Estimate","StdErr","t-ratio")
print(pmtx,digit=3)
# Compute the hat-matrix
par(mfcol=c(3,1))
H = z1%*%ztzinv%*%t(z1)
hii=diag(H)
ImH=rep(1,n)-hii
stres=res/sqrt(ImH*sig)
plot(stres,type='h',xlab='index',ylab='st-resi')
abline(h=c(0))
title(main='Studentized residuals')
plot(hii,type='h',xlab='index',ylab='h(i,i)',ylim=c(0,1))
lines(1:n,rep(1/n,n),lty=2,col="blue")
title(main='Leverage plot')
# Compute the Cook's distance.
# starts with studentized residuals
CookD=(hii/ImH)*(stres^2)/r1
plot(CookD,type='h',xlab='index',ylab='Cook Dist')
title(main='Cook Distance')
# Compute the predictive interval
if(length(pred)>0){
z0=pred
if(constant)z0=c(1,z0)
z0=matrix(z0,r1,1)
fst=t(z0)%*%beta
tc=qt((1-alpha/2),(n-r1))
sd=t(z0)%*%ztzinv%*%z0*sig
sd1=sqrt(sd)
cint=c(fst-tc*sd1,fst+tc*sd1)
sd2=sqrt(sd+sig)
pint=c(fst-tc*sd2,fst+tc*sd2)
print("100(1-alpha)% confidence interval:")
print(cint,digit=3)
print("100(1-alpha)% predictive interval:")
print(pint,digit=3)
}
mlrchk <- list(Cook=CookD,Hii=hii,stres=stres)
}
####
"mmlr" <- function(y,z,constant=TRUE){
# This program performs multivariate multiple linear regression analysis.
# z: design matrix
# constant: switch for the constant term of the regression model
# y: dependent variables
## Model is y = z%*%beta+error
if(!is.matrix(y)) y <- as.matrix(y)
m <- ncol(y)
##
z=as.matrix(z)
n=nrow(z)
nx=ncol(z)
zc=z
if (constant) zc=cbind(rep(1,n),z)
p=ncol(zc)
y=as.matrix(y)
ztz=t(zc)%*%zc
zty=t(zc)%*%y
ZtZinv=solve(ztz)
beta=ZtZinv%*%zty
Beta=beta
cat("Beta-Hat matrix: ","\n")
print(round(Beta,3))
res=y-zc%*%beta
sig=t(res)%*%res/(n-p)
cat("LS residual covariance matrix: ","\n")
print(round(sig,3))
co=kronecker(sig,ZtZinv)
cat("Individual LSE of the parameter","\n")
##print(" est s.d. t-ratio prob")
par=beta[,1]
deg=n-p
p1=ncol(y)
if (p1 > 1){
for (i in 2:p1){
par=c(par,beta[,i])
}
}
iend=nrow(beta)*ncol(beta)
tmp=matrix(0,iend,4)
for (i in 1:iend){
sd=sqrt(co[i,i])
tt=par[i]/sd
pr=2*(1-pt(abs(tt),deg))
tmp[i,]=c(par[i],sd,tt,pr)
}
colnames(tmp) <- c("Estimate","stand.Err","t-ratio","p-value")
#print(tmp,digits=3)
print(round(tmp,3))
# testing
sigfull=sig*(n-p)/n
det1=det(sigfull)
cat("===================","\n")
cat("Test for overall mmlr: ","\n")
C0 <- cov(y)*(n-1)/n
det0 <- det(C0)
tst <- -(n-p-0.5*(m-nx+1))*(log(det1)-log(det0))
df <- m*nx
pv <- 1-pchisq(tst,df)
cat("Test statistic, df, and p-value: ",c(tst,df, pv),"\n")
#
if (nx > 1){
ztmp=z
cat("===================","\n")
print("Testing individual regressor")
for (j in 1:nx){
zc <- ztmp[,-j]
if(constant) zc=cbind(rep(1,n),zc)
ztz=t(zc)%*%zc
zty=t(zc)%*%y
ztzinv=solve(ztz)
beta=ztzinv%*%zty
res1=y-zc%*%beta
sig1=t(res1)%*%res1/n
det2=det(sig1)
tst=log(det1)-log(det2)
tst=-(n-p-0.5*ncol(y))*tst
deg=ncol(y)
pr=1-pchisq(tst,deg)
tmp=matrix(c(j,tst,pr),1,3)
colnames(tmp) <- c("regressor", "test-stat", "p-value")
print(round(tmp,4))
}
}
mmlr <- list(beta=Beta,residuals=res,sigma=sig,ZtZinv=ZtZinv,y=y,z=z,intercept=constant)
}
##############
"mmlrTest" <- function(z1,z2,y,constant=TRUE){
# This program performs likelihood ratio test for multivariate multiple linear regression models.
# z1: design matrix of the reduced model
# z2: the regressors to be tested. That is, the full model contains regressors[z1,z2]
# constant: switch for the constant term of the regression model
# y: dependent variables
if(!is.matrix(z1))z1=as.matrix(z1)
if(!is.matrix(z2))z2=as.matrix(z2)
if(!is.matrix(y))y=as.matrix(y)
# sample size
n=nrow(z1)
q=ncol(z1)
# estimate the full model
z=cbind(z1,z2)
m=ncol(y)
r=ncol(z)
if(constant) z <- cbind(rep(1,n),z)
ztz=t(z)%*%z
zty=t(z)%*%y
ztzinv=solve(ztz)
beta=ztzinv%*%zty
Betafull=beta
res=y-z%*%beta
sig=t(res)%*%res/n
print("Beta-Hat of Full model:")
print(beta,digits=3)
# Estimate the reduced model
z=z1
if(constant)z=cbind(rep(1,n),z)
ztz=t(z)%*%z
zty=t(z)%*%y
ztzinv=solve(ztz)
beta=ztzinv%*%zty
BetaR=beta
res=y-z%*%beta
sig1=t(res)%*%res/n
print("Beta-Hat of Reduced model:")
print(beta,digits=3)
d1=det(sig)
d2=det(sig1)
Test=-(n-r-1-0.5*(m-r+q+1))*(log(d1)-log(d2))
deg=m*(r-q)
pvalue=1-pchisq(Test,deg)
print("Likelihood ratio test: beta2 = 0 vs beta_2 .ne. 0")
print("Test & p-value:")
print(c(Test,pvalue))
print("degrees of freedom:")
print(deg)
# Other tests
E=n*sig
H=n*(sig1-sig)
Wilk=det(E)/det(E+H)
HEinv=solve(H+E)
Pillai=sum(diag(H%*%HEinv))
print("Wilk & Pillai statistics:")
print(c(Wilk,Pillai))
mmlrTest <- list(betaF=Betafull,betaR=BetaR,sigmaF=sig,sigmaR=sig1,LR=Test,Degrees=deg)
}
### mmlr confidence and prediction intervals
"mmlrInt" <- function(model,newx,alpha=0.05){
### newx should include 1 if the model has a constant term.
###
y <- model$y; z <- model$z; constant <- model$intercept
ZtZinv <- model$ZtZinv; beta <- model$beta; sigma=model$sigma
if(!is.matrix(newx))newx <- matrix(newx,1,length(newx))
r <- ncol(z)
n <- nrow(y)
if((ncol(newx)==r) && constant){
newx <- cbind(rep(1,nrow(newx)),newx)
}
yhat <- newx%*%beta
##
m <- ncol(y)
p <- ncol(newx)
npt <- nrow(newx)
fv <- qf(1-alpha,m,(n-r-m))
crit <- m*(n-p)/(n-r-m)
crit <- sqrt(crit*fv)
for (i in 1:npt){
cat("at predictors: ",newx[1,],"\n")
cat("Point prediction: ","\n")
print(round(yhat[i,],3))
wk <- matrix(c(newx[i,]),1,p)
tmp <- wk%*%ZtZinv%*%t(wk)
tmp1 <- tmp+1
confi <- NULL
predi <- NULL
for (j in 1:m){
d1 <- tmp*sigma[j,j]*n/(n-p)
d2 <- tmp1*sigma[j,j]*n/(n-p)
lcl <- yhat[i,j]-crit*sqrt(d1)
ucl <- yhat[i,j]+crit*sqrt(d1)
lpi <- yhat[i,j]-crit*sqrt(d2)
upi <- yhat[i,j]+crit*sqrt(d2)
confi <- rbind(confi,c(lcl,ucl))
predi <- rbind(predi,c(lpi,upi))
}
cat("Simultaneous C.I. with prob",(1-alpha),"\n")
print(round(confi,4))
cat("Simultaneous P.I. with prob",(1-alpha),"\n")
print(round(predi,4))
}
}
#####
"profile" <- function(x1,x2){
# The x1 and x2 are two data matrices with xi for population i.
# This program performs profile analysis (Parallel, Coincident, Level).
# Written by Ruey S. Tsay on April 23, 2008
profile=NULL
n1 = nrow(x1)
n2 = nrow(x2)
p1 = ncol(x1)
p2 = ncol(x2)
if (p1 == p2){
x1bar=matrix(colMeans(x1),p1,1)
x2bar=matrix(colMeans(x2),p2,1)
dev = x1bar-x2bar
S1 = cov(x1)
S2 = cov(x2)
Sp = ((n1-1)/(n1+n2-2))*S1+((n2-1)/(n1+n2-2))*S2
cmtx=matrix(0,(p1-1),p1)
for (i in 1:(p1-1)){
cmtx[i,i]=-1
cmtx[i,(i+1)]=1
}
print("Are the profiles parallel?")
d1=cmtx%*%dev
deg1=p1-1
deg2=n1+n2-p1
S=((1/n1)+(1/n2))*cmtx%*%Sp%*%t(cmtx)
Si=solve(S)
Tsq=t(d1)%*%Si%*%d1
c=Tsq*deg2/((n1+n2-2)*deg1)
pv=1-pf(c,deg1,deg2)
profile=cbind(profile,c(Tsq,pv))
row.names(profile)=c("Test-T2","p.value")
print(profile,digits=4)
print("Are the profiles coincident?")
one=matrix(1,p1,1)
d1=sum(dev)
s=t(one)%*%Sp%*%one
s=s*((1/n1)+(1/n2))
T2=d1^2/s
pv2=1-pf(T2,1,(n1+n2-2))
profile=cbind(profile,c(T2,pv2))
print(profile,digits=4)
print("Are the profiles level?")
x=t(cbind(t(x1),t(x2)))
xbar=matrix(colMeans(x),p1,1)
S=cov(x)
d1=cmtx%*%xbar
CSC=cmtx%*%S%*%t(cmtx)
Si=solve(CSC)
Tst=(n1+n2)*t(d1)%*%Si%*%d1
c3=Tst*(n1+n2-p1+1)/((n1+n2-1)*(p1-1))
pv3=1-pf(c3,deg1,(n1+n2-p1+1))
profile=cbind(profile,c(Tst,pv3))
print(profile,digits=4)
}
cat("Summary: ","\n")
profile
}
############
"profileM" <- function(X,loc=1){
### Profile analysis for more than two pupolations.
### X: data matrix, including group indicators
### loc: column number for the group indicator
if(!is.matrix(X))X=as.matrix(X)
X1=as.matrix(X[,-loc])
p = dim(X1)[2] ## number of variables
if(p > 1){
C = matrix(0,p-1,p)
for (i in 1:(p-1)){
C[i,i] = -1; C[i,i+1]=1
}
Y = X1%*%t(C)
fac1 = factor(X[,loc])
m1 = manova(Y~fac1)
cat("Are the profiles parallel?","\n")
print(summary(m1,test="Wilks"))
cat("===","\n")
cat("Are the profile coincident?","\n")
J=matrix(1,p,1)
Y1=X1%*%J
m2=aov(Y1~fac1)
print(summary(m2))
###
cat("====","\n")
cat("Are the profiles level? ","\n")
Hotelling(Y,rep(0,p-1))
}
}
#####
"qqbeta" <- function(da){
# The data matrix is "da".
if(!is.matrix(da))da=as.matrix(da)
nr = dim(da)[1]; nc = dim(da)[2]
dev=scale(da,center=T,scale=F)
dev=as.matrix(dev)
s=cov(da)
si=solve(s)
d2=sort(diag(dev%*%si%*%t(dev)))
d2=nr*d2/((nr-1)^2)
a <- nc/2; b <- (nr-nc-1)/2
alpha <- (nc-2)/(2*nc)
beta <- (nr-nc-3)/(2*(nr-nc-1))
mn = min(a,b,alpha,beta)
if(mn > 0){
prob=(c(1:nr)-alpha)/(nr-alpha-beta+1)
q1=qbeta(prob,a,b)
plot(q1,d2,xlab='Quantile of beta-dist',ylab='d^2')
fit = lsfit(q1,d2)
fitted = d2-fit$residuals
lines(q1,fitted)
rq=cor(q1,d2)
cat("correlation coefficient:",rq,"\n")
}
else{
cat("Insufficient sample size")
}
}
#####
"qqchi2" <- function(da){
# The data matrix is "da".
if(!is.matrix(da))da=as.matrix(da)
nr = dim(da)[1]
nc = dim(da)[2]
dev=scale(da,center=T,scale=F)
dev=as.matrix(dev)
s=cov(da)
si=solve(s)
d2=sort(diag(dev%*%si%*%t(dev)))
prob=(c(1:nr)-0.5)/nr
q1=qchisq(prob,nc)
plot(q1,d2,xlab='Quantile of chi-square',ylab='d^2')
fit = lsfit(q1,d2)
fitted = d2-fit$residuals
lines(q1,fitted)
rq=cor(q1,d2)
cat("correlation coefficient:",rq,"\n")
}
####
"sir" <- function(y,x,H){
# Performs the Sliced Inverse Regression analysis
# y: the data matrix, nT by 1, of the dependent variable
# x: the nT-by-p matrix of predictors.
# H: the number of slices.
#
z=as.matrix(x)
p=ncol(z)
nT=nrow(z)
m1=sort(y,index.return=T)
idx=m1$ix
if(H < 5)H=5
m=floor(nT/H)
nH=rep(m,H)
rem = nT-m*H
if(rem > 0){
for (i in 1:rem){
nH[i]=nH[i]+1
}
}
#print(nH)
xmean=apply(z,2,mean)
ones=matrix(rep(1,nT),nT,1)
xm=matrix(xmean,1,p)
Xdev=z-ones%*%xm
varX=cov(Xdev)
m1=eigen(varX)
P=m1$vectors
Dinv=diag(1/sqrt(m1$values))
Sighlf=P%*%Dinv%*%t(P)
X1=Xdev%*%Sighlf
# the sliced mean vectors of standardized predictors
EZ=matrix(0,H,p)
X1=X1[idx,]
iend=0
for (i in 1:H){
ist=iend+1
iend=iend+nH[i]
for (j in ist:iend){
EZ[i,]=EZ[i,]+X1[j,]
}
EZ[i,]=EZ[i,]/nH[i]
ist=iend
}
Msir=matrix(0,p,p)
for (i in 1:H){
tmp=matrix(EZ[i,],p,1)
Msir=Msir+(nH[i]/nT)*tmp%*%t(tmp)
}
m2=eigen(Msir)
eiv=m2$values
print(eiv,digits=3)
vec=Sighlf%*%m2$vectors
print(vec,digits=3)
tX=z%*%vec
# perform tests
result=matrix(0,p,3)
print("Testing:")
for (i in 1:p){
j=p-i+1
avl=mean(eiv[j:p])
tst = nT*i*avl
deg = i*(H-1)
pva=1-pchisq(tst,deg)
result[i,]=c(i,tst,pva)
}
print(result,digits=4)
list(values=m2$values,dir=vec,transX=tX)
}
######
"t2chart" <- function(da){
# The data matrix is "da".
# Written by Ruey S. Tsay on April 10, 2008.
if(!is.matrix(da))da=as.matrix(da)
nr = nrow(da)
nc = ncol(da)
dev=scale(da,center=T,scale=F)
dev=as.matrix(dev)
s=t(dev)%*%dev/(nr-1)
si=solve(s)
t2=diag(dev%*%si%*%t(dev))
ul1=qchisq(0.95,nc)
ul2=qchisq(0.99,nc)
yl=max(t2,ul1,ul2)
plot(t2,type='l',ylim=c(0,yl))
points(t2,pch='*')
abline(h=c(ul1),lty=2)
abline(h=c(ul2),lty=3)
title(main='The limits are 95% and 99% quantiles')
t2chart <- list(Tsq=t2)
}
####
"t2future" <- function(da,ini){
# The data matrix is "da".
# Program written by Ruey S. Tsay, April 10, 2008.
# Modified April 15, 2014.
if(!is.matrix(da))da=as.matrix(da)
nr = nrow(da)
nc = ncol(da)
if (ini < nc) ini= nc+2
npts= nr-ini
T2=rep(0,npts)
for (i in ini:nr){
#### Data used is from t=1 to t=i-1.
dat=da[1:(i-1),]
dev=scale(dat,center=T,scale=F)
cm=matrix(apply(dat,2,mean),1,nc)
dev=as.matrix(dev)
ii=i-1
s=t(dev)%*%dev/(ii-1)
si=solve(s)
xi=matrix(da[i,],1,nc)
t2= (ii/i)*(xi-cm)%*%si%*%t(xi-cm)
ul1=((ii-1)*nc)/(ii-nc)*qf(0.95,nc,(ii-nc))
ul2=((ii-1)*nc)/(ii-nc)*qf(0.99,nc,(ii-nc))
T2[(i-ini+1)]=t2
}
yl=max(T2,ul2)
tdx=c(ini:nr)
plot(tdx,T2,type='l',ylim=c(0,yl))
points(tdx,T2,pch='*')
abline(h=c(ul1),lty=2)
abline(h=c(ul2),lty=3)
title(main='Future Chart: the limits are 95% and 99% quantiles')
t2future <- list(Tsq=T2)
}
####
"NormSimMean" <- function(n,k=2,mean=rep(0,k),sigma=diag(rep(1,k)),iter=1){
## Use simulation to see the sampling properties of sample mean.
## n: sample size (can be a sequence of sample sizes)
## k: dimension
## iter: number of iterations in simulation
##
require(mvtnorm)
k1 <- length(mean)
k2 <- ncol(sigma)
k <- min(k1,k2)
##
nsize <- length(n)
sm <- NULL
scov <- NULL
for (it in 1:iter){
for (j in 1:nsize){
nob <- n[j]
r <- rmvnorm(nob,mean=mean[1:k],sigma=sigma[1:k,1:k])
mu <- apply(r,2,mean)
v1 <- cov(r)
sm <- rbind(sm,mu)
scov <- rbind(scov,c(v1))
}
}
#
NormSimMean <- list(sm=sm,scov=scov)
}
"MeanSim" <- function(mu1,mu2,sigma1=NULL,sigma2=NULL,n1,n2,df=0,iter=1){
## Simualtion to compare mean vectors of two populations
##
require(mvtnorm)
k1 <- length(mu1); k2 <- length(mu2)
k <- min(k1,k2)
if(is.null(sigma1))sigma1 <- diag(rep(1,k))
if(is.null(sigma2))sigma2 <- diag(rep(1,k))
sigma1 <- (sigma1+t(sigma1))/2
sigma2 <- (sigma2+t(sigma2))/2
del <- mu1-mu2
#
if(n1 < 1)n1 <- 2*k
if(n2 < 1)n2 <- 2*k
fr <- (n1-1)/(n1+n2-2)
dmean <- NULL; Sigma1 <- NULL; Sigma2 <- NULL; Fstat <- NULL
for (it in 1:iter){
if(df <= 0){
x1 <- rmvnorm(n1,mean=mu1,sigma=sigma1)
x2 <- rmvnorm(n2,mean=mu2,sigma=sigma2)
}else{
x1 <- rmvt(n1,sigma=sigma1,df=df)
x1 <- x1 + matrix(1,n1,1)%*%matrix(mu1,1,k)
x2 <- rmvt(n2,sigma=sigma2,df=df)
x2 <- x2 + matrix(1,n2,1)%*%matrix(mu2,1,k)
}
#
sm1 <- apply(x1,2,mean)
sm2 <- apply(x2,2,mean)
S1 <- cov(x1)
S2 <- cov(x2)
dm <- sm1-sm2 - del
Sp <- S1*fr+S2*(1-fr)
Sp <- Sp*((1/n1)+(1/n2))
Spinv <- solve(Sp)
T2 <- matrix(dm,1,k)%*%Spinv%*%matrix(dm,k,1)
dmean <- rbind(dmean,dm)
Sigma1 <- rbind(Sigma1,c(S1))
Sigma2 <- rbind(Sigma2,c(S2))
Fst <- T2*(n1+n2-k-1)/((n1+n2-2)*k)
Fstat <- c(Fstat,Fst)
}
df2 <- n1+n2-k-1
Fdist <- rf(10000,df1=k,df2=df2)
cat("F-stat is ditrtibuted as F with dfs: ",c(k,(n1+n2-k-1)),"\n")
par(mfcol=c(1,1))
qqplot(Fstat,Fdist)
MeanSim <- list(dmean = dmean,sigma1=Sigma1, sigma2=Sigma2,Fstat=Fstat)
}
### LASSO simulations
"LASSO.sim" <- function(n,p,coef=rep(0,p),sigma2=1.0,df=0,X=NULL, sigmaX = 1.0){
## Perform LASSO simualtion.
## n: sample size
## p: dimension
## coef: true coefficient vectors
## sigma2: noise varinace
## df = 0: Gaussian noise; df .ne. 0, then use t-noise
## X: the design matrix. It will be generated from Gaussian if not given.
## sigmaX: variance of columns of X
##
## output: Y: response
## X: design matrix
## epsilon: true noise
##
if(is.null(X)){
npt <- n*p
X <- matrix(rnorm(npt)*sigmaX,n,p)
}
if(!is.matrix(X)) X <- as.matrix(X)
if(df==0){
eps <- rnorm(n)*sqrt(sigma2)
}
else{
eps <- rt(n,df=df)*sqrt(sigma2)
}
Y <- X%*%matrix(coef,p,1)+eps
LASSO.sim <- list(Y=Y,X=X,epsilon=eps,beta=coef)
}
###
"CVlasso" <- function(x,y,sizesample,s=c(0.1,0.25,0.5,0.75,0.9),iter=10){
## Compute MSE of out-of-sample prediction of LASSO
## x: design matrix
## y: dependent variable
## sizesample: subsample size
n1 <- length(s)
MSE <- matrix(0,n1,iter)
X <- x; Y <- y
for (i in 1:iter){
train <- sample(1:nrow(X),sizesample)
lasso.m <- lars(x=X[train,],y=Y[train],type="lasso")
for (j in 1:n1){
MSE[j,i] <- mean((predict(lasso.m,X[-train,],s=s[j],mode="fraction")$fit-Y[-train])^2)
}
}
mse <- apply(MSE,1,mean)
mse <- rbind(s,mse)
rownames(mse) <- c("fraction","mean(MSE)")
print(mse)
x1 <- paste("s = ",s)
tmse <- t(MSE)
colnames(tmse) <- x1
boxplot(tmse,ylab='MSE')
}
###
"CVglmnet" <- function(x,y,sizesample,sidx=c(5,10,20,30,40,50,60,70,80,90,95),iter=20,family="gaussian",type="link",alpha=1){
## Compute MSE of out-of-sample prediction of LASSO
## x: design matrix
## y: dependent variable
## sizesample: subsample size
n1 <- length(sidx)
X <- x; Y <- y
MSE <- matrix(0,n1,iter)
sidx <- sidx/100
for (i in 1:iter){
train <- sample(1:nrow(X),sizesample)
glmnet.fit <- glmnet(x=X[train,],y=Y[train],family=family,alpha=alpha)
iidx <- floor(sidx*length(glmnet.fit$lambda))
s <-glmnet.fit$lambda[sidx]
NX <- X[-train,]
for (j in 1:n1){
pp <- predict(glmnet.fit,newx=NX,s=s[j],type=type)
MSE[j,i] <- mean((pp-Y[-train])^2)
}
}
mse <- apply(MSE,1,mean)
mse <- rbind(s,mse)
rownames(mse) <- c("lambda","mean(MSE)")
print(mse)
x1 <- paste("s = ",sidx)
tmse <- t(MSE)
colnames(tmse) <- x1
boxplot(tmse,ylab='MSE',main="glmnet")
}
###
"discrim" <- function(da,size,eqP=T,eqV=T,pr=c(0),newdata=NULL){
# da: data matrix. The data are arranged according to populations.
# size: a vector of sample size of each populations so that
# the length g [g=length(size)] is the number of populations.
# eqP: switch for equal probabilities
# eqV: switch for equal covariance matrices.
# Assume equal costs.
# Assume normality.
#
da=as.matrix(da)
nr = dim(da)[1]
nc = dim(da)[2]
nrnew=0
if(length(newdata) > 0){
newclass=NULL
if(!is.matrix(newdata)){
newdata <- as.matrix(newdata)
}
nrnew=dim(newdata)[1]; ncnew=dim(newdata)[2]
if(ncnew < nc){
cat("newdata is not in the proper format","\n")
return
}
}
##
g=length(size)
# compute the sample mean and covariance matrix of each populations
cm=matrix(0,nc,g)
dm=g*nc
# S: stores the covariance matrices (one on top of the other)
S=matrix(0,dm,nc)
Sinv=matrix(0,dm,nc)
ist=0
for (i in 1:g){
x = da[(ist+1):(ist+size[i]),]
if(nc > 1){
cm1 = apply(x,2,mean)}
else{
cm1=c(mean(x))
}
cm[,i]=cm1
smtx=var(x)
jst =(i-1)*nc
S[(jst+1):(jst+nc),]=smtx
Si=solve(smtx)
Sinv[(jst+1):(jst+nc),]=Si
cat("Population: ",i,"\n")
print("mean vector:")
print(cm1,digits=3)
print("Covariance matrix:")
print(smtx,digits=4)
print("Inverse of cov-mtx:")
print(Si,digits=4)
ist=ist+size[i]
}
##
if(eqP){
pr=rep(1,g)/g
}else{
pr <- size/sum(size)
}
##
if(eqV){
print("Assume equal covariance matrices and costs")
Sp=matrix(0,nc,nc)
for (i in 1:g){
jdx=(i-1)*nc
smtx=S[(jdx+1):(jdx+nc),]
Sp=Sp+(size[i]-1)*smtx
}
Sp = Sp/(nr-g)
print("Sp: pooled covariance matrix")
print(Sp,digits=4)
Spinv=solve(Sp)
print("Sp-inv")
print(Spinv,digits=4)
if(g==2){
d=cm[,1]-cm[,2]
a=t(d)%*%Spinv
y1bar=a%*%matrix(cm[,1],nc,1)
y2bar=a%*%matrix(cm[,2],nc,1)
mhat=(y1bar+y2bar)/2
print("Linear classification function:")
cat('coefficients: ',a,"\n")
cat('m-hat:',mhat,"\n")
}
# compute ai-vector and bi value
ai=matrix(0,g,nc)
bi=rep(0,g)
for (i in 1:g){
cmi = as.matrix(cm[,i],nc,1)
ai[i,]= t(cmi)%*%Spinv
bi[i]=(-0.5)*ai[i,]%*%cmi+log(pr[i])
}
cat("Discriminate functions: matrix: ","\n")
print(ai,digits=3)
cat("Discriminate fucntions: constant: ","\n")
print(bi,digits=3)
##
ist = 0
tmc=matrix(0,g,g)
# icnt: counts of misclassification
icnt = 0
mtab=NULL
for(i in 1:g){
cls=rep(0,g)
jst=ist+1
jend=ist+size[i]
for (j in jst:jend){
dix=bi
for (ii in 1:g){
dix[ii]=dix[ii]+sum(ai[ii,]*da[j,])
}
# Find the maximum
kx=which.max(dix)
#
cls[kx]=cls[kx]+1
if(abs(kx-i)>0){
icnt=icnt+1
# compute the posterior probabilities
xo1=matrix(da[j,],1,nc)
print(xo1)
print(c(j,icnt))
tmp=xo1%*%Spinv
tmp1=sum(xo1*tmp)
tmp1 = (tmp1 + det(Sp))/2
pro = exp(dix-tmp1)
pro=pro/sum(pro)
case=c(j,i,kx,pro)
mtab=rbind(mtab,case)
}
}
tmc[i,]=cls
ist=ist+size[i]
}
######
##### new data classification
if(nrnew > 0){
for (j in 1:nrnew){
dix = bi
for (ii in 1:g){
dix[ii]=dix[ii]+sum(ai[ii,]*newdata[j,])
}
newclass=c(newclass,which.max(dix))
}
}
}
# Unequal covariance matrices
else {
print("Assume Un-equal covariance matrices, but equal costs")
# The next loop computes the dix components that do not depend on the obs.
bi=log(pr)
for (i in 1:g){
jst=(i-1)*nc
Si=S[(jst+1):(jst+nc),]
bi[i]=bi[i]-0.5*log(det(Si))
}
cat("Discriminate fucntions: constant: ","\n")
print(bi,digits=3)
ist = 0
tmc=matrix(0,g,g)
# icnt: counts of misclassification
icnt = 0
# mtab: stores the misclassification cases
mtab=NULL
for(i in 1:g){
cls=rep(0,g)
# identify the cases for population i.
jst=ist+1
jend=ist+size[i]
for (j in jst:jend){
# Compute the dix measure
xo=matrix(da[j,],1,nc)
dix=bi
for (ii in 1:g){
cmi = matrix(cm[,ii],1,nc)
kst=(ii-1)*nc
Si=Sinv[(kst+1):(kst+nc),]
dev=xo-cmi
tmp=as.matrix(dev)%*%Si
dix[ii]=dix[ii]-0.5*sum(dev*tmp)
}
# Find the maximum
kx=which.max(dix)
#
cls[kx]=cls[kx]+1
if(abs(kx-i)>0){
icnt=icnt+1
# compute the posterior probabilities
pro = exp(dix)
pro=pro/sum(pro)
case=c(j,i,kx,pro)
mtab=rbind(mtab,case)
}
}
tmc[i,]=cls
ist=ist+size[i]
}
#### new data classification, if needed.
if(nrnew > 0){
for (j in 1:nrnew){
dix=bi
for (ii in 1:g){
cmi = matrix(cm[,ii],1,nc)
kst=(ii-1)*nc
Si=Sinv[(kst+1):(kst+nc),]
xo=matrix(newdata[j,],1,nc)
dev=xo-cmi
tmp=as.matrix(dev)%*%Si
dix[ii]=dix[ii]-0.5*sum(dev*tmp)
}
newclass=c(newclass,which.max(dix))
}
}
}
# Output
if(nrow(mtab)>0){
print("Misclassification cases")
print("Case From To Post-prob")
print(mtab,digits=3)
}
print("Classification table:")
print(tmc)
print("Error rate:")
aper=icnt/nr
print(aper)
###
if(nrnew > 0){
cat("New data classification","\n")
print(newclass)
}
return <- list(newclass=newclass,misclassTable=tmc)
}
### This program performs canonical correlation test of independence
"cancorTest" <- function(x,y){
if(!is.matrix(x))x <- as.matrix(x)
if(!is.matrix(y))y <- as.matrix(y)
n1 <- nrow(x); p <- ncol(x)
n2 <- nrow(y); q <- ncol(y)
n <- min(n1,n2)
m1 <- cancor(x[1:n,],y[1:n,])
cor <- rev(m1$cor)
k <- min(p,q)
tmp <- log(1-cor^2)
tst <- cumsum(tmp)
tst <- -(n-1-0.5*(p+q+1))*tst
Test <- NULL
for (i in 1:k){
ii <- k-i
df <- (p-ii)*(q-ii)
pv <- 1-pchisq(tst[i],df)
Test <- rbind(Test,c(tst[i],df,pv))
}
colnames(Test) <- c("Test-stat","df","p-value")
print(round(Test,3))
cancorTest <- list(Test=Test)
} | |
b0b53f9f55091762e05df35d57bf82e7db505347 | 36529e0093254e9aa33f02f39d191d6201a2f826 | /PQE-input/stats_decades.R | 3e8a4ce1325519094fa2a13141883b638c1f4396 | [] | no_license | LynnSeo/Sensitivity-Analysis | 09a07d39f5011de3e130166cd1a2a4a6fbc73414 | e8c763c4553aa56cad0ecbce8799d844dfda67fc | refs/heads/master | 2021-01-23T07:34:52.522742 | 2017-06-19T05:17:11 | 2017-06-19T05:17:11 | 86,503,370 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,354 | r | stats_decades.R | #summary of statistical features of time series
load_synthetic_tsPQE <- function(t_catchment='Gingera_synthetic', t_year='70s'){
name_tspqe=str_c('C:/UserData/seol/Sensitivity Analyses/PQE input/',t_catchment,'/',t_catchment,'_ts',t_year,'.csv')
tsPQE=read.zoo(name_tspqe,sep=',',header=TRUE); tsPQE=as.xts(tsPQE)
assign('ts_t_year', tsPQE, envir = .GlobalEnv)
return(ts_t_year)
}
load_tsPQE <- function(t_catchment='Gingera', t_year='70s'){
name_tspqe=str_c('C:/UserData/seol/Sensitivity Analyses/PQE input/',t_catchment,'/',t_catchment,'.csv')
tsPQE=read.zoo(name_tspqe,sep=',',header=TRUE); tsPQE=as.xts(tsPQE)
if(t_year=='70s'){
assign('ts_t_year', tsPQE["1970-01-01::1979-12-31"],envir = .GlobalEnv)
}else if(t_year=='80s'){
assign('ts_t_year', tsPQE["1980-01-01::1989-12-31"],envir = .GlobalEnv)
}else if(t_year=='90s'){
assign('ts_t_year', tsPQE["1990-01-01::1999-12-31"],envir = .GlobalEnv)
}else if(t_year=='00s'){
assign('ts_t_year', tsPQE["2000-01-01::2009-12-31"],envir = .GlobalEnv)
}
return(ts_t_year)
}
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
library(xts)
years= c('70s','80s','90s','00s')
for (y in years){
t_year = load_tsPQE(t_catchment = 'Gingera', t_year = y)
sink(paste0('Gingera_',y,'.txt'))
print(summary(t_year))
sink()
}
|
2ff2677943c78c36e5ca5c22da0758cc892efc47 | e1e552e57d999ed7055244f82c0d6da9e9395c83 | /cachematrix.R | 20d8fc3a7d1f9da743ed2827edc58cbd5eaad6de | [] | no_license | wftorres/ProgrammingAssignment2 | ae76088770dccbddeeb059c8116cc5e1af51df01 | bf6c4218ed739ac50042e8cd1ca42f07462266a0 | refs/heads/master | 2021-01-25T06:06:06.770091 | 2014-10-26T01:41:22 | 2014-10-26T01:41:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,796 | r | cachematrix.R | ## Programming assignment - R Programming Oct 2014
## by Fernando Torres
## The MakeCacheMatrix creates a special "matrix" object that can cache its inverse
## that is, calculates the inverse of a matrix sent as a parameter and stores it in memory
## for later use by other functions.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
## The '$set()' element changes the variable to be calculated and clears its inverse
## It allows to change the value of the matrix to be inverted without having to call
## The 'makeCacheMatrix' function again
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function uses the special matrix from the former function
## If the inverse has already been calculated and cached, it gets the cached data
## Otherwise, it calculates the inverse and caches it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## x is the special "matrix" returned by the former function
m <- x$getinverse()
if(!is.null(m)) {
## x$getinverse() will have a null value the first time the function is executed
## therefore this condition will never be true the first time
## From the second time on, though, it's faster to retrieve data from memory
## than to calculate the inverse
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
82da6f48e5f66164fbc621bb519cf26fceb1dcb0 | b56eee2ac6a95d0e0eb9bdbdda40679de795de10 | /microEMAWindowManager/plotUnMappedData.R | af6d63b380ea48a8fb8320db85f6984a918203ca | [] | no_license | adityaponnada/microEMA-Preprocessing | 1c658a46582753c1dae2fd1349a339a34e2868a0 | d254a4fcd8b088399108261994b56c6cfe6b3424 | refs/heads/master | 2020-03-10T14:36:24.369331 | 2018-10-30T16:17:44 | 2018-10-30T16:17:44 | 129,430,763 | 0 | 0 | null | 2018-10-30T16:17:46 | 2018-04-13T16:57:48 | R | UTF-8 | R | false | false | 10,014 | r | plotUnMappedData.R | #### Import libraries
library(psych)
library(ggplot2)
library(plotly)
library(MASS)
library(plyr)
options(digits.secs=3)
AnkleCountsAllPlot <- plot_ly(uEMA01AnkleCounts, x = ~DATE_TIME_ANKLE, y= ~COUNTS_MAGNITUDE_ANKLE, name = "ANKLE_COUNTS",
legendgroup = "ANKLE", type = "bar")%>% layout(xaxis = list(range = as.POSIXct(c('2018-02-02 00:00:00',
'2018-02-02 23:59:59'))))
WristCoountsAllPlot <- plot_ly(uEMA01WristCounts, x = ~DATE_TIME_ANKLE, y= ~COUNTS_MAGNITUDE_WRIST, name = "WRIST_COUNTS",
legendgroup = "WRIST", type = "bar")%>% layout(xaxis = list(range = as.POSIXct(c('2018-02-02 00:00:00',
'2018-02-02 23:00:00'))))
uEMAAllPlot <- plot_ly(uEMA01AnsweredPrompts, x=~ANSWER_TIME, y=~ACTIVITY_NUMERIC, name = "uEMA_RESPONSES",
legendgroup = "uEMA", type = "bar")
subplot(style(AnkleCountsAllPlot, showlegend = TRUE), style(WristCoountsAllPlot, showlegend = TRUE),
style(uEMAAllPlot, showlegend = TRUE), nrows = 3, margin = 0.05)
uEMAAllPlot
AnkleCountsAllPlot
Ankle_plot1 <- ggplot(uEMA01AnkleCounts, aes(DATE_TIME_ANKLE, COUNTS_MAGNITUDE_ANKLE)) +
geom_bar(stat="identity", fill = "deeppink3") + xlab("Date-Time") + ylab("Ankle counts") +
ggtitle("Ankle counts 1s interval") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10)) +
scale_x_datetime(limits = as.POSIXct(c('2018-02-02 00:00:00', '2018-02-02 23:59:59')))
#Ankle_plot1
uEMAPlot1 <- ggplot(countsEMADataFrame, aes(ANSWER_TIME, ACTIVITY_NUMERIC)) +
geom_bar(stat="identity", fill = "grey14") + xlab("Date-Time") + ylab("uEMA responses") +
ggtitle("Responses on uEMA prompts") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10))+
scale_x_datetime(limits = as.POSIXct(c('2018-02-02 00:00:00', '2018-02-02 23:59:59')))
#uEMA_plot1
Ankle_plot2 <- ggplot(uEMA01AnkleCounts, aes(DATE_TIME_ANKLE, COUNTS_MAGNITUDE_ANKLE)) +
geom_bar(stat="identity", fill = "deeppink3") + xlab("Date-Time") + ylab("Ankle counts") +
ggtitle("Ankle counts 1s interval") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10)) +
scale_x_datetime(limits = as.POSIXct(c('2018-02-03 00:00:00', '2018-02-03 23:59:59')))
#Ankle_plot2
uEMAPlot2 <- ggplot(countsEMADataFrame, aes(ANSWER_TIME, ACTIVITY_NUMERIC)) +
geom_bar(stat="identity", fill = "grey14") + xlab("Date-Time") + ylab("uEMA responses") +
ggtitle("Responses on uEMA prompts") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10))+
scale_x_datetime(limits = as.POSIXct(c('2018-02-03 00:00:00', '2018-02-03 23:59:59')))
#uEMA_plot2
Ankle_plot3 <- ggplot(uEMA01AnkleCounts, aes(DATE_TIME_ANKLE, COUNTS_MAGNITUDE_ANKLE)) +
geom_bar(stat="identity", fill = "deeppink3") + xlab("Date-Time") + ylab("Ankle counts") +
ggtitle("Ankle counts 1s interval") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10)) +
scale_x_datetime(limits = as.POSIXct(c('2018-02-04 00:00:00', '2018-02-04 23:59:59')))
#Ankle_plot3
uEMAPlot3 <- ggplot(countsEMADataFrame, aes(ANSWER_TIME, ACTIVITY_NUMERIC)) +
geom_bar(stat="identity", fill = "grey14") + xlab("Date-Time") + ylab("uEMA responses") +
ggtitle("Responses on uEMA prompts") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10))+
scale_x_datetime(limits = as.POSIXct(c('2018-02-04 00:00:00', '2018-02-04 23:59:59')))
#uEMA_plot3
Ankle_plot4 <- ggplot(uEMA01AnkleCounts, aes(DATE_TIME_ANKLE, COUNTS_MAGNITUDE_ANKLE)) +
geom_bar(stat="identity", fill = "deeppink3") + xlab("Date-Time") + ylab("Ankle counts") +
ggtitle("Ankle counts 1s interval") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10)) +
scale_x_datetime(limits = as.POSIXct(c('2018-02-05 00:00:00', '2018-02-05 23:59:59')))
#Ankle_plot4
uEMAPlot4 <- ggplot(countsEMADataFrame, aes(ANSWER_TIME, ACTIVITY_NUMERIC)) +
geom_bar(stat="identity", fill = "grey14") + xlab("Date-Time") + ylab("uEMA responses") +
ggtitle("Responses on uEMA prompts") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10))+
scale_x_datetime(limits = as.POSIXct(c('2018-02-05 00:00:00', '2018-02-05 23:59:59')))
#uEMA_plot4
Ankle_plot5 <- ggplot(uEMA01AnkleCounts, aes(DATE_TIME_ANKLE, COUNTS_MAGNITUDE_ANKLE)) +
geom_bar(stat="identity", fill = "deeppink3") + xlab("Date-Time") + ylab("Ankle counts") +
ggtitle("Ankle counts 1s interval") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10)) +
scale_x_datetime(limits = as.POSIXct(c('2018-02-06 00:00:00', '2018-02-06 23:59:59')))
#Ankle_plot5
uEMAPlot5 <- ggplot(countsEMADataFrame, aes(ANSWER_TIME, ACTIVITY_NUMERIC)) +
geom_bar(stat="identity", fill = "grey14") + xlab("Date-Time") + ylab("uEMA responses") +
ggtitle("Responses on uEMA prompts") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10))+
scale_x_datetime(limits = as.POSIXct(c('2018-02-06 00:00:00', '2018-02-06 23:59:59')))
#uEMA_plot5
Ankle_plot6 <- ggplot(uEMA01AnkleCounts, aes(DATE_TIME_ANKLE, COUNTS_MAGNITUDE_ANKLE)) +
geom_bar(stat="identity", fill = "deeppink3") + xlab("Date-Time") + ylab("Ankle counts") +
ggtitle("Ankle counts 1s interval") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10)) +
scale_x_datetime(limits = as.POSIXct(c('2018-02-07 00:00:00', '2018-02-07 23:59:59')))
#Ankle_plot6
uEMAPlot6 <- ggplot(countsEMADataFrame, aes(ANSWER_TIME, ACTIVITY_NUMERIC)) +
geom_bar(stat="identity", fill = "grey14") + xlab("Date-Time") + ylab("uEMA responses") +
ggtitle("Responses on uEMA prompts") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10))+
scale_x_datetime(limits = as.POSIXct(c('2018-02-07 00:00:00', '2018-02-07 23:59:59')))
#uEMA_plot6
Ankle_plot7 <- ggplot(uEMA01AnkleCounts, aes(DATE_TIME_ANKLE, COUNTS_MAGNITUDE_ANKLE)) +
geom_bar(stat="identity", fill = "deeppink3") + xlab("Date-Time") + ylab("Ankle counts") +
ggtitle("Ankle counts 1s interval") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10)) +
scale_x_datetime(limits = as.POSIXct(c('2018-02-08 00:00:00', '2018-02-08 23:59:59')))
#Ankle_plot7
uEMAPlot7 <- ggplot(countsEMADataFrame, aes(ANSWER_TIME, ACTIVITY_NUMERIC)) +
geom_bar(stat="identity", fill = "grey14") + xlab("Date-Time") + ylab("uEMA responses") +
ggtitle("Responses on uEMA prompts") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10))+
scale_x_datetime(limits = as.POSIXct(c('2018-02-08 00:00:00', '2018-02-08 23:59:59')))
#uEMA_plot7
Ankle_plot8 <- ggplot(uEMA01AnkleCounts, aes(DATE_TIME_ANKLE, COUNTS_MAGNITUDE_ANKLE)) +
geom_bar(stat="identity", fill = "deeppink3") + xlab("Date-Time") + ylab("Ankle counts") +
ggtitle("Ankle counts 1s interval") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10)) +
scale_x_datetime(limits = as.POSIXct(c('2018-02-09 00:00:00', '2018-02-09 23:59:59')))
#Ankle_plot8
uEMAPlot8 <- ggplot(countsEMADataFrame, aes(ANSWER_TIME, ACTIVITY_NUMERIC)) +
geom_bar(stat="identity", fill = "grey14") + xlab("Date-Time") + ylab("uEMA responses") +
ggtitle("Responses on uEMA prompts") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10))+
scale_x_datetime(limits = as.POSIXct(c('2018-02-09 00:00:00', '2018-02-09 23:59:59')))
#uEMA_plot8
multiplot(Ankle_plot1, uEMAPlot1, rows = 2)
Wrist_plot <- ggplot(uEMA01WristCounts, aes(DATE_TIME_ANKLE, COUNTS_MAGNITUDE_ANKLE)) +
geom_bar(stat="identity", fill = "dodgerblue4") + xlab("Date-Time") + ylab("Wrist counts") +
ggtitle("Wrist counts 1s interval") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10))
Wrist_plot
uEMA_plot <- ggplot(uEMA01AnsweredPrompts, aes(ANSWER_TIME, ACTIVITY_NUMERIC)) +
geom_bar(stat="identity", fill = "grey14") + xlab("Date-Time") + ylab("uEMA responses") +
ggtitle("Responses on uEMA prompts") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10))
uEMA_plot
multiplot(Ankle_plot, Wrist_plot, uEMA_plot, rows = 3)
Ankle_segment <- ggplot(uEMA01AnkleCounts, aes(DATE_TIME_ANKLE, COUNTS_MAGNITUDE_ANKLE)) +
geom_bar(stat="identity", fill = "deeppink3") + xlab("Date-Time") + ylab("Ankle counts") +
ggtitle("Ankle counts 1s interval") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10)) +
scale_x_datetime(limits = as.POSIXct(c('2018-02-07 10:15:00', '2018-02-07 10:45:00')))
uEMAPlot_segment <- ggplot(countsEMADataFrame, aes(ANSWER_TIME, ACTIVITY_NUMERIC)) +
geom_bar(stat="identity", fill = "grey14") + xlab("Date-Time") + ylab("uEMA responses") +
ggtitle("Responses on uEMA prompts") +
theme(plot.title = element_text(lineheight=.8, face="bold", size = 10)) +
theme(text = element_text(size=10))+
scale_x_datetime(limits = as.POSIXct(c('2018-02-07 10:15:00', '2018-02-07 10:45:00')))
multiplot(Ankle_segment, uEMAPlot_segment, rows = 2)
|
89601034713300d8e3ac4ebb8a24200dd2d958eb | b6546b222e1e81e962f3ea51c4933700b0b57142 | /share/scientific_packages/Hotelling_Test/scripts/Fig-common-EOFS-First-reduction.R | 6c803017bf74daa015497db4b7d44321d25e8c32 | [] | no_license | jservonnat/C-ESM-EP | c4ae885e3ef91ef7d255c3725af6e91e4ee75d15 | b06405ab51d61615fa03b0e4787306c6387d3d6f | refs/heads/master | 2023-08-09T09:10:37.750316 | 2023-07-26T08:54:32 | 2023-07-26T08:54:32 | 99,813,557 | 3 | 5 | null | 2023-09-08T16:03:14 | 2017-08-09T13:49:49 | Jupyter Notebook | UTF-8 | R | false | false | 4,252 | r | Fig-common-EOFS-First-reduction.R | # -- Launch the multiplot device ---------------------------------------------------------- #
source(paste(main_dir,"scripts/Rtools/filled.contour3.R",sep="/"))
neofpc=neof1
marpc=c(4,2,4,1)
marmap=c(4,5,4,1)
figname = InputTest[[Test]]$output_ceof1_figname
print(figname)
pdf(figname,width=12,height=neofpc*(12/5))
layout(t(matrix(1:(3*neofpc),3,neofpc)),widths=c(4,1,1))
dumref=c()
for (ref in names(REFFIELDS)){
dumref=cbind(dumref,indiv.ref.proj[[ref]])
}#end for ref
dumtest=c()
for (test in names(TESTFIELDS)){
dumtest=cbind(dumtest,indiv.test.proj[[test]])
}#end for test
dumylim=range(cbind(P1.Ref,P1.Test,dumref,dumtest),na.rm=TRUE)
for (i in 1:neofpc){
# -- On recupere l'EOF et la PC
dum.eof=C_eofs1[,i]
dum.pc.ref=P1.Ref[,i]
dum.pc.test=P1.Test[,i]
# -- Explained variance: calculated in the main script ; ExpVar_Model and ExpVar_RefMean
# -- On remet les valeurs des eofs sur une matrice qui contient les continents (une matrice de taille lon*lat)
eof.ref.tmp=BlankField ; eof.ref.tmp[NoNA]=dum.eof
# -- On remet la matrice dans la dimensions lon/lat
tmp.eof=eof.ref.tmp ; dim(tmp.eof)=c(length(lat),length(lon)) ; tmp.eof=t(tmp.eof)
# -- Plot de l'EOF i
source(paste(main_dir,"scripts/Rtools/Rpalettes.R",sep="/"))
source(paste(main_dir,"scripts/graphical-parameters.R",sep="/"))
pal=CORREL
zmax=0.1
levels=seq(-zmax,zmax,by=zmax/10)
library(maps)
land=map('world2',ylim=range(lat),xlim=range(lon),interior=FALSE,plot=FALSE)
par(mar=marmap)
tmp.eof=borne(tmp.eof,-zmax,zmax)
if (lat[2]<lat[1]){plat=sort(lat,decreasing=FALSE) ; tmp.eof=tmp.eof[,length(lat):1]}else{plat=lat}
if (lon[2]<lon[1]){plon=sort(lon,decreasing=FALSE) ; tmp.eof=tmp.eof[length(lon):1,]}else{plon=lon}
dumcex = 0.9
filled.contour3(plon,plat,tmp.eof,color.palette=pal,levels=levels,plot.axes=c(
contour(plon,plat,tmp.eof,levels=levels,col="darkgrey",lwd=0.7,add=T,labels=NULL),
lines(land$x,land$y),
lines(plon,rep(0,length(plon)),type="l",lty=2),
axis(2,at=seq(latS,latN,by=10)),
axis(1,at=seq(0,180,by=30)),
axis(1,at=seq(210,360,by=30),labels=seq(-150,0,by=30)),
mtext(paste("Common EOF",i," ",varlongname[[variable]],": ",Test," and Ref. Mean",sep=""),side=3,line=2.5,adj=0,cex=1.2*dumcex),
mtext(paste("Variance expl. by EOF ",i," : Ref.Mean = ",round(ExpVar_RefMean[i],digits=1),"% ; ",Test," = ",round(ExpVar_Model[i],digits=1),"%",sep=""),side=3,line=1,adj=0,cex=1*dumcex),
mtext("Longitude",side=1,line=2.5),mtext("Latitude",side=2,line=2.5,las=3)
))
# -- Plot des projections des refs et des tests sur l'EOF i
lwdindivref=1
lwdtest=3
par(mar=marpc)
xlab="Months"
ylab=paste("PC",i)
titre=paste("Proj. on C.EOF",i)
# -- On construit un vecteur dum avec toutes les PCs a afficher pour connaitre les limites du plot
x=1:length(P1.Ref[,i])
plot(x,rep(NA,length(x)),type="l",lwd=3,xlab=xlab,ylab=ylab,main="",col="white",ylim=dumylim,xaxs="i",xaxt="n")
grid()
for (refnames in names(REFFIELDS)){
lines(x,indiv.ref.proj[[refnames]][,i],type="l",lwd=1,col="darkgrey")
}#end for refnames
lines(x,P1.Ref[,i],type="l",lwd=3,col="black")
axis(1,at=1:12,labels=c('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'),las=2)
lines(x,P1.Test[,i],type="l",lwd=3,col="dodgerblue")
zero(x)
mtext(titre,3,line=1,adj=0)
# -- Plot des PCs des EOFs intrinseques
par(mar=marpc)
titre=paste("Intrinsic PC",i)
# -- Demarrage du plot
plot(x,rep(NA,length(x)),type="l",lwd=3,xlab=xlab,ylab=ylab,main="",col="white",ylim=dumylim,xaxs="i",xaxt="n")
grid()
axis(1,at=1:12,labels=c('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'),las=2)
for (ref in names(REFFIELDS)){
pc=REFFIELDS[[ref]]$EOF$x[,i] ; eof=REFFIELDS[[ref]]$EOF$rotation[,i]
test=sum(eof*C_eofs1[,i]) ; if (test<0){pc=-pc}
lines(x,pc,type="l",lwd=1,col="darkgrey")
}#end for refnames
pc=EOF.MeanRef$x[,i] ; eof=EOF.MeanRef$rotation[,i]
test=sum(eof*C_eofs1[,i]) ; if (test<0){pc=-pc}
lines(x,pc,type="l",lwd=3,col="black")
pc=EOF.MeanTest$x[,i] ; eof=EOF.MeanTest$rotation[,i]
test=sum(eof*C_eofs1[,i]) ; if (test<0){pc=-pc}
lines(x,pc,type="l",lwd=3,col="dodgerblue")
zero(x)
mtext(titre,3,line=1,adj=0)
}#end for neofpc
dev.off()
|
979102abae0dd22c9d09206aa800cf3248869938 | af0c40b56578baf14b8bd06fc841eb0125e47b80 | /run_analysis.R | 0d710d1ef721e73d384513e9dfd10a3295466748 | [] | no_license | lucyb/getting-and-cleaning-data-project | e9726bf742d6e0b8f256d418733b789cd5cc6903 | 746b7b618ef633937022dc8297daf876714dc837 | refs/heads/master | 2021-01-02T08:42:07.878654 | 2017-06-25T22:12:21 | 2017-08-01T22:31:40 | 99,047,144 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,285 | r | run_analysis.R | library(dplyr)
outputdir <- "data"
orgdataset <- "dataset.zip"
temp <- file.path(outputdir, "original")
testfiles <- file.path(temp, 'UCI HAR Dataset', 'test')
trainfiles <- file.path(temp, 'UCI HAR Dataset', 'train')
# Download and extract the dataset
if (!dir.exists(outputdir)) {
dir.create(outputdir)
}
if (!file.exists(file.path(outputdir, orgdataset))) {
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", file.path(outputdir, orgdataset))
}
unzip(file.path(outputdir, orgdataset), exdir = temp)
# Read in the train and test datasets
# Read the Subjects
subjectTest <- read.table(file.path(testfiles, "subject_test.txt"), header = F)
subjectTrain <- read.table(file.path(trainfiles, "subject_train.txt"), header = F)
# Read the Activities
activityTest <- read.table(file.path(testfiles , "y_test.txt" ), header = F)
activityTrain <- read.table(file.path(trainfiles, "y_train.txt"), header = F)
# Read the Features
featuresTest <- read.table(file.path(testfiles, "X_test.txt" ), header = F)
featuresTrain <- read.table(file.path(trainfiles, "X_train.txt"), header = F)
#1. Merges the training and the test sets to create one data set.
subject <- rbind(subjectTrain, subjectTest)
activity <- rbind(activityTrain, activityTest)
features <- rbind(featuresTrain, featuresTest)
# Appropriately name the columns
names(subject) <- c("subject")
names(activity) <- c("activity")
featuresNames <- read.table(file.path(file.path(temp, 'UCI HAR Dataset'), "features.txt"), header = F)
names(features) <- featuresNames$V2
combinedData <- cbind(features, cbind(subject, activity))
#2. Extracts only the measurements on the mean and standard deviation for each measurement.
# Take the features with mean() or std() in their name, as well as subject or activity
combinedData <- combinedData[,grep("((mean|std)\\(\\))|(^subject$)|(^activity$)", ignore.case = T, names(combinedData))]
#3. Uses descriptive activity names to name the activities in the data set
activityLabels <- read.table(file.path(file.path(temp, 'UCI HAR Dataset'), "activity_labels.txt"), header = F)
# Give activities appropriate names and convert to a factor
combinedData[,"activity"] <- factor(combinedData[,"activity"], levels = activityLabels$V1, labels = activityLabels$V2)
#4. Appropriately labels the data set with descriptive variable names.
names(combinedData)<-gsub("^t", "time", names(combinedData))
names(combinedData)<-gsub("Acc", "Accelerometer", names(combinedData))
names(combinedData)<-gsub("Gyro", "Gyroscope", names(combinedData))
names(combinedData)<-gsub("^f", "frequency", names(combinedData))
names(combinedData)<-gsub("Mag", "Magnitude", names(combinedData))
names(combinedData)<-gsub("BodyBody", "Body", names(combinedData))
#5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidyData <- combinedData %>% group_by(activity, subject) %>% summarise_all(mean, na.rm = T)
# Write out the results into the data directory
write.table(tidyData, file = file.path(outputdir, "tidydata.txt"), row.name=FALSE)
# Tidy Up by deleting the extracted data
unlink(temp) |
f4a918b75df3d7c5e63cdeeda10b8921c4ad7049 | a85e536f8cbe2af99fab307509920955bd0fcf0a | /R/xqqmath.R | e2bb867f3c20ed7c323b29739ef078d8a9f1436f | [] | no_license | ProjectMOSAIC/mosaic | 87ea45d46fb50ee1fc7088e42bd35263e3bda45f | a64f2422667bc5f0a65667693fcf86d921ac7696 | refs/heads/master | 2022-12-13T12:19:40.946670 | 2022-12-07T16:52:46 | 2022-12-07T16:52:46 | 3,154,501 | 71 | 27 | null | 2021-02-17T21:52:00 | 2012-01-11T14:58:31 | HTML | UTF-8 | R | false | false | 2,953 | r | xqqmath.R | #' Augmented version of `qqmath`
#'
#'
#' @param x,data,panel,xqqmath,\dots as in [lattice::qqmath()]
#'
#' @return a trellis object
#'
#' @examples
#' x <- rnorm(100)
#' xqqmath( ~ x) # with quartile line
#' xqqmath( ~ x, fitline = TRUE) # with fitted line
#' xqqmath( ~ x, idline = TRUE) # with y = x
#' x <- rexp(100, rate = 10)
#' xqqmath( ~ x, distribution = qexp) # with quartile line
#' xqqmath( ~ x, distribution = qexp, slope = 1/10)
#' xqqmath( ~ x, distribution = qexp, slope = mean(x))
#' @export
xqqmath <-
function (x, data = NULL, panel = "panel.xqqmath", ...)
{
qqmath(x, data = data, panel = panel, ...)
}
#' @rdname xqqmath
#' @param qqmathline a logical: should line be displayed passing through first and third quartiles?
#' @param idline a logical; should the line y=x be added to the plot?
#' @param fitline a logical; should a fitted line be added to plot? Such a line will use `slope`
#' and `intercept` if provided, else the standard deviation and mean of the data.
#' If `slope` is specified, the line will be added unless `fitline` is
#' `FALSE`.
#' @param slope slope for added line
#' @param intercept intercept for added line
#' @param overlines a logical: should lines be on top of qq plot?
#' @param groups,pch,lwd,lty as in lattice plots
#' @param col.line color to use for added lines
#'
#' @export
panel.xqqmath <-
function (x, qqmathline = !(fitline || idline), idline = FALSE,
fitline = NULL, slope = NULL, intercept = NULL, overlines = FALSE,
groups = NULL, ..., col.line = trellis.par.get("add.line")$col,
pch = 16, lwd = 2, lty = 2)
{
if (!is.null(groups)) {
panel.superpose(x, groups = groups, panel.groups = "panel.xqqmath",
qqmathline = qqmathline, idline = idline, fitline = fitline,
intercept = intercept, slope = slope, overlines = overlines,
..., col.line = col.line, pch = pch, lwd = lwd, lty = lty)
}
else {
if( !is.null(slope) ) {
if (is.null(fitline)) fitline <- TRUE
if (is.null(intercept)) intercept <- 0
}
lty <- rep(lty, length = 3)
if (overlines) {
panel.qqmath(x, ..., pch = pch)
}
if (idline) {
panel.abline(0, 1, col.line = col.line, lty = lty[3], lwd = lwd)
}
if (is.null(fitline)) fitline <- FALSE
if (fitline) {
if (is.null(slope)) {
slope = sd(x, na.rm=TRUE)
}
if (is.null(intercept)) {
intercept = mean(x, na.rm=TRUE)
}
panel.abline(intercept, slope, col.line = col.line,
lty = lty[2], lwd = lwd)
}
if (qqmathline) {
panel.qqmathline(x, col.line = col.line, lty = lty[1],
lwd = lwd, ...)
}
if (!overlines) {
panel.qqmath(x, ..., pch = pch)
}
}
}
|
0d26d3d369298fa70c1f287575d16c7f647c065c | ab3252066b73be82b71dee740af9ea0528bb4239 | /scripts/write_anomalies_to_db.R | 87c501afd922a1fab278951e53361d04af964862 | [] | no_license | slauricella/datakind_fii_project | f0437f585d2d2c12ffbd4ce758e38c6540a6d921 | e0d35509bd48c0408ec4a5c41f075593fd4a7c13 | refs/heads/master | 2021-01-16T21:03:25.796883 | 2017-08-14T02:52:55 | 2017-08-14T02:52:55 | 100,208,947 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,083 | r | write_anomalies_to_db.R | # Import output of anomaly detection python script (CSV) into Postgres table * with timestamp
library(dplyr)
require(RPostgreSQL)
# User-entered variables
ANOMALY_PATH <- "/Users/shannonlauricella/Documents/datakind/anomalous.csv"
date_in_algo <- '2017-04-01'
measure_in_algo <- 'TotalIncome'
# Database info
db <- "fii_data"
hostname = "localhost"
port_no = 5432
username = 'shannonlauricella'
password = pw
drv <- dbDriver("PostgreSQL")
# Input Data --------------------------------------------------------------
# creates a connection to the postgres database
con <- dbConnect(drv, dbname = db,
host = hostname, port = port_no,
user = username, password = pw)
rm(pw) # removes the password
anomaly_output <- read.csv(ANOMALY_PATH, header=T) %>%
rename(Value = TotalIncome) %>%
mutate(JournalDate = as.Date(date_in_algo), Measure = measure_in_algo, Status = 'FLAGGED', AuthorID = "System generated", InsertDate = Sys.time())
dbWriteTable(con, "fii_anomalies",
value = anomaly_output, append=TRUE, row.names = FALSE)
|
3a625aac785ed92beb9e5fc2e2fe5d0091fe821b | 178752f4e65ce5b218fbb76a00d65d9260d2ee54 | /run_analysis.R | 4768795aab0465cf4f470e76ed90c4ea96b561a5 | [] | no_license | daudk/GettingAndCleaningDataProject | 905cdbedd91441c81ce4456f24b65c987d544f2b | 7e395c6f482ff3a6934f4629b729ff8430e4d360 | refs/heads/master | 2021-01-23T06:06:02.782202 | 2017-06-01T02:39:20 | 2017-06-01T02:39:20 | 93,009,589 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,953 | r | run_analysis.R |
## Import relevant libraries
library(plyr)
library(dplyr)
## Check to see if file already exists before downloading
if(!file.exists("dataset.zip")){
url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20
HAR%20Dataset.zip"
download.file(url,"C:/Users/Daud/Desktop/Coursera R/dataset.zip",mode="wb")
unzip("dataset.zip")
}
## Get column names from the features file
col_names <- read.table("UCI HAR Dataset/features.txt")
## Get table of all activity labels
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
## Get table of all subjects
subjects_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
subjects_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
subjects <- rbind(subjects_train,subjects_test)
## Read Training data
data_train <- read.table("UCI HAR Dataset/test/X_test.txt",
col.names = col_names$V2)
## Read Test data
data_test <- read.table("UCI HAR Dataset/train/X_train.txt",
col.names = col_names$V2)
## Bind both data sets into 1 combined data set
data_set <- rbind(data_train,data_test)
## Select column names containing information about Mean and Standard Deviation
useful_features <- c(grep(".*-std\\(\\)", col_names$V2),
grep(".*-mean\\(\\)", col_names$V2))
## Extract Means and Standard Deviations from data_set
subset<-select(data_set,useful_features)
## Read activity for testing and training data
activity_data_train <- read.table("UCI HAR Dataset/train/y_train.txt")
activity_data_test <- read.table("UCI HAR Dataset/test/y_test.txt")
## Bind together for 1 big set of activity for training and testing data
activity_data <- rbind(activity_data_train,activity_data_test)
## Perform relevant conversions for following step
activity_data$V1 <- as.character(activity_data$V1)
activity_labels$V2 <- as.list(activity_labels$V2)
## Use "mapvalues" to map the names of Activities from the activity_label
## file onto the dataset of all activityand finally transpose to form a
## coolumn with the right dimensions.
activity_data_final <- transpose(activity_data$V1<-mapvalues(activity_data$V1,
activity_labels$V1,activity_labels$V2))
## Column bind this activity name column to the overall dataset
subset <- cbind(subset,activity_data_final,subjects)
## Tediously created function that renames all variable names with more
## names based on original names. See README for more information.
colNamer <- function (x){
colnames(x)[ncol(x)] <- "Subject_ID"
colnames(x)[ncol(x)-1] <- "Activity_Label"
for (i in grep("^t.+.mean",colnames(x))){
colnames(x)[i]<-paste("Time-MeanOf",substr(colnames(x)[i],2,
regexpr("mean",colnames(x)[i])[1]-2),substr(colnames(x)[i],
nchar(colnames(x)[i]),nchar(colnames(x)[i])),sep="")
}
for (i in grep("^f.+.mean",colnames(x))){
colnames(x)[i]<-paste("F-MeanOf",substr(colnames(x)[i],2,
regexpr("mean",colnames(x)[i])[1]-2),substr(colnames(x)[i],
nchar(colnames(x)[i]),nchar(colnames(x)[i])),sep="")
}
for (i in grep("^f.+.std",colnames(x))){
colnames(x)[i]<-paste("F-StdDevOf",substr(colnames(x)[i],2,
regexpr("std",colnames(x)[i])[1]-2),substr(colnames(x)[i],
nchar(colnames(x)[i]),nchar(colnames(x)[i])),sep="")
}
for (i in grep("^t.+.std",colnames(x))){
colnames(x)[i]<-paste("Time-StdDevOf-",substr(colnames(x)[i],2,
regexpr("std",colnames(x)[i])[1]-2),substr(colnames(x)[i],
nchar(colnames(x)[i]),nchar(colnames(x)[i])),sep="")
}
names(x)<- gsub("Acc","Acceleration",names(x))
names(x)<- gsub("Gyro","Gyroscope",names(x))
return(x)
}
## Call the Naming function to rename all columns in our dataset.
subset<- colNamer(subset)
tidy_data<-aggregate(.~Subject_ID+Activity_Label, subset, mean)
tidy_data <- tidy_data[order(tidy_data$Subject_ID,tidy_data$Activity_Label),]
|
7629da6a6811821947159989d863e0a74e69e52e | d30a668c836c8a4f04c881f1f38d50aef4bb39a3 | /COTSModel_Figures.R | 5d2ba7d9322915345f222951bc814b812d85248e | [] | no_license | sammatthews990/COTS_Model | efe206c2bc2b891102401bad5af16d91ffab7b38 | 1c2dfa82b7d269eb684de6fb1198d67f9f022c83 | refs/heads/master | 2021-01-01T19:46:39.549009 | 2019-09-08T23:32:22 | 2019-09-08T23:32:22 | 98,679,819 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,241 | r | COTSModel_Figures.R | # COTSModel_Figure Creation
b = -0.4/22
yint = 1-b*3
df = data.frame(CCRatio = 1:35, Mortality = c(rep(1,2), yint+b*(3:25), rep(0.6, 10)))
p1 =ggplot(df, aes(x=CCRatio, y=Mortality)) + geom_line(color="coral", size=1.5) +
theme_classic() + scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_x_continuous(limits = c(0,35), expand = c(0,0)) +
geom_vline(xintercept = c(3,25), linetype = "dashed") +
labs(title="Ratio-Dependent COTS Mortality",
x ="% Coral Cover / COTS (per Manta Tow)", y = "COTS Mortality") +
annotate(geom="text", x=30, y=0.64, label="M = 0.6")
# Fecundity
b = 0.8/25
yint = 0.2
df = data.frame(CCRatio = 0:35, Mortality = c(yint+b*(0:25), rep(1, 10)))
p2 = ggplot(df, aes(x=CCRatio, y=Mortality)) + geom_line(color="forestgreen", size=1.5) +
theme_classic() + scale_y_continuous(limits = c(0,1), expand = c(0,0)) +
scale_x_continuous(limits = c(0,35), expand = c(0,0)) +
geom_vline(xintercept = c(25), linetype = "dashed") +
labs(title="Ratio-Dependent COTS Fecundity",
x ="% Coral Cover / COTS (per Manta Tow)", y = "COTS Fecundity") +
annotate(geom="text", x=30, y=0.94, label="MaxFecund = 2e^7")
gridExtra::grid.arrange(p1,p2, ncol=2)
|
b6f6bafa2a74ca45cf347465254c7851561c4380 | 92450585184ef442f54bdd066404dad473fc12fa | /Task6-Model.R | dffc813e6745719327e3c07095638123272dbbd3 | [] | no_license | zgamez/DM | 3d63962fa01c95bd9d4bf151c476903dfba48bc1 | 8dc78b49a2a761ce1889ffd23f895a6108b9acc3 | refs/heads/master | 2020-08-01T15:29:48.812312 | 2016-12-04T22:20:05 | 2016-12-04T22:20:05 | 73,572,892 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,227 | r | Task6-Model.R | setwd("/coursera/task6")
#rm(list = ls())
library(caret)
library(randomForest)
library(parallel)
library(doParallel)
library(MLmetrics)
testfile <- read.csv("formodelingB.csv", header = TRUE, sep = ",")
## Cleaning
# Convert classe to factor
testfile$hygiene <- as.factor(testfile$hygiene)
testfile$zip <- as.factor(testfile$zip)
testfile$stars <- as.factor(testfile$stars)
# Set train-test files
set.seed(123456)
fortrain <- createDataPartition(testfile$hygiene, p = 0.95, list=FALSE)
training <- testfile[fortrain,]
validation <- testfile[-fortrain,]
#rm(fortrain)
## A measure based on regression smoothers
RocImp <- filterVarImp(x = training[, -ncol(training)], y = training$hygiene)
RocImp[order(RocImp$X0, decreasing = TRUE), , drop = FALSE][1:6,]
###########################################################################
# Full Model
#
#
set.seed(123456)
fortrain <- createDataPartition(testfile$hygiene, p = 0.95, list=FALSE)
training <- testfile[fortrain,]
validation <- testfile[-fortrain,]
# Train model - GBM Accuracy : 0.5648 90:Accuracy : 0.6481 95:Accuracy : 0.6923
#
#> F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
#[1] 0.7142857
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
system.time(
modFitGBM <- train(hygiene ~., method="gbm", data=training,
trControl=trainControl(method="cv", 10,
allowParallel=TRUE))
)
stopCluster(cl)
save(modFitGBM, file = "modelfull-GBMcp95cv10.rda")
modFitGBM
ValPred <- predict(modFitGBM, validation) ### Change model here <- <-
confusionMatrix(ValPred, validation$hygiene)
F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
###########################################################################
# LDA no - Sent yes
#
#
# Remove irrelevant columns
myvars <- names(testfile) %in% c("lda")
testfile2 <- testfile[!myvars]
set.seed(123456)
fortrain <- createDataPartition(testfile2$hygiene, p = 0.95, list=FALSE)
training <- testfile2[fortrain,]
validation <- testfile2[-fortrain,]
# Train model - LOGISTICS REGRESSION Accuracy : 0.7308
#
#> F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
#[1] 0.7407407
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
system.time(
modFitLR2 <- train(hygiene ~., method="multinom", data=training,
trControl=trainControl(method="cv", 10,
allowParallel=TRUE))
)
stopCluster(cl)
save(modFitLR2, file = "modelnoLDA-LRp95cv10.rda")
modFitLR2
ValPred <- predict(modFitLR2, validation) ### Change model here <- <-
confusionMatrix(ValPred, validation$hygiene)
F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
#modFitLR2$finalModel
exp(coef(modFitLR2$finalModel))
# Train model - GBM Accuracy : 0.7692
#
#> F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
#[1] 0.75
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
system.time(
modFitGBM2 <- train(hygiene ~., method="gbm", data=training,
trControl=trainControl(method="cv", 10,
allowParallel=TRUE))
)
stopCluster(cl)
save(modFitGBM2, file = "modelnoLDA-GBMcp95cv10.rda")
modFitGBM2
ValPred <- predict(modFitGBM2, validation) ### Change model here <- <-
confusionMatrix(ValPred, validation$hygiene)
F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
# Model Comparison
#
rValues <- resamples(list(lr=modFitLR2,gbm=modFitGBM2))
rValues$values
summary(rValues)
rValmod1 <- rValues$values$`lr~Accuracy`
rValmod2 <- rValues$values$`gbm~Accuracy`
xyplot(rValmod1 ~ rValmod2) # scatter plot
# Train model - Boosted Tree C5.0 Accuracy : 0.6538
#
#> F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
#[1] 0.5714286
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
system.time(
modFitC502 <- train(hygiene ~., method="C5.0", data=training,
trControl=trainControl(method="cv", 10,
allowParallel=TRUE),
tuneGrid = expand.grid(model = "tree", winnow = FALSE,
trials = c(1:10, (1:5)*10)))
)
stopCluster(cl)
modFitC502
save(modFitC502, file = "modelnoLDA-C502cp95cv10.rda")
ValPred <- predict(modFitC502, validation) ### Change model here <- <-
confusionMatrix(ValPred, validation$hygiene)
F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
# Train model - RANDOM FOREST Accuracy : 0.6538
#
#> F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
#[1] 0.64
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
system.time(
modFitRF2 <- train(hygiene ~., method="rf", data=training,
trControl=trainControl(method="cv", 10,
allowParallel=TRUE))
)
stopCluster(cl)
#print(modFitRF2$finalModel)
save(modFitRF2, file = "modelnoLDA-RFp95cv10.rda")
#VI <- varImp(modFitRF2, scale = FALSE)
#plot(VI, main = "Variable Importance", top = 10)
ValPred <- predict(modFitRF2, validation) ### Change model here <- <-
confusionMatrix(ValPred, validation$hygiene)
F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
# Train model - SVM Accuracy : 0.7308
#
#> F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
#[1] 0.6956522
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
system.time(
modFitSVM2 <- train(hygiene ~., method="svmLinear", data=training,
trControl=trainControl(method="cv", 10,
allowParallel=TRUE))
)
stopCluster(cl)
print(modFitSVM2$finalModel)
save(modFitSVM2, file = "modelnoLDA-SVMp95cv10.rda")
ValPred <- predict(modFitSVM2, validation) ### Change model here <- <-
confusionMatrix(ValPred, validation$hygiene)
F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
##########################################################################
###########################################################################
# LDA no - Sent no
#
#
# Remove irrelevant columns
myvars <- names(testfile) %in% c("lda","sent")
testfile3 <- testfile[!myvars]
set.seed(123456)
fortrain <- createDataPartition(testfile3$hygiene, p = 0.95, list=FALSE)
training <- testfile3[fortrain,]
validation <- testfile3[-fortrain,]
# Train model - LOGISTICS REGRESSION Accuracy : 0.7308
#
#> F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
#[1] 0.7407407
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
system.time(
modFitLR3 <- train(hygiene ~., method="multinom", data=training,
trControl=trainControl(method="cv", 10,
allowParallel=TRUE))
)
stopCluster(cl)
save(modFitLR3, file = "modelnoLDAnosent-LRp95cv10.rda")
ValPred <- predict(modFitLR3, validation) ### Change model here <- <-
confusionMatrix(ValPred, validation$hygiene)
F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
# Train model - GBM Accuracy : 0.8077
#
#> F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
#[1] 0.7826087
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
system.time(
modFitGBM3 <- train(hygiene ~., method="gbm", data=training,
trControl=trainControl(method="cv", 10,
allowParallel=TRUE))
)
stopCluster(cl)
save(modFitGBM3, file = "modelnoLDAnosent-GBMcp95cv10.rda")
ValPred <- predict(modFitGBM3, validation) ### Change model here <- <-
confusionMatrix(ValPred, validation$hygiene)
F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
# Train model - Boosted Tree Accuracy : 0.8077
#
#> F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
#[1] 0.7826087
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
system.time(
modFitC503 <- train(hygiene ~., method="C5.0", data=training,
trControl=trainControl(method="cv", 10,
allowParallel=TRUE),
tuneGrid = expand.grid(model = "tree", winnow = FALSE,
trials = c(1:10, (1:5)*10)))
)
stopCluster(cl)
save(modFitC503, file = "modelnoLDAnosent-C503cp95cv10.rda")
ValPred <- predict(modFitC503, validation) ### Change model here <- <-
confusionMatrix(ValPred, validation$hygiene)
F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
# Train model - RANDOM FOREST Accuracy : 0.5769
#
#> F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
#[1] 0.5217391
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
system.time(
modFitRF3 <- train(hygiene ~., method="rf", data=training,
trControl=trainControl(method="cv", 10,
allowParallel=TRUE))
)
stopCluster(cl)
print(modFitRF3$finalModel)
save(modFitRF3, file = "modelnoLDAnoSent-RFp95cv10.rda")
#VI <- varImp(modFitRF3, scale = FALSE)
#plot(VI, main = "Variable Importance", top = 10)
ValPred <- predict(modFitRF3, validation) ### Change model here <- <-
confusionMatrix(ValPred, validation$hygiene)
F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
# Train model - SVM Accuracy : 0.7308
#
#> F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
#[1] 0.6956522
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
system.time(
modFitSVM3 <- train(hygiene ~., method="svmLinear", data=training,
trControl=trainControl(method="cv", 10,
allowParallel=TRUE))
)
stopCluster(cl)
print(modFitSVM3$finalModel)
save(modFitSVM3, file = "modelnoLDAnoSent-SVMp95cv10.rda")
#VI <- varImp(modFitSVM3, scale = FALSE)
#plot(VI, main = "Variable Importance", top = 10)
ValPred <- predict(modFitSVM3, validation) ### Change model here <- <-
confusionMatrix(ValPred, validation$hygiene)
F1_Score(y_pred = ValPred, y_true = validation$hygiene, positive = "0")
|
89e99f88e3b5c8fc1c1a998488c03e6848f5cf43 | a906a68e4e9244c2b40b665523f77ce866225ab5 | /04DataAnalysis/AssignmentQ6.R | 30d8b3bfeae264bf78430510e2b802b390350f58 | [] | no_license | lbedon/DataScienceSpecialization | d333bcb630b1f3100149bb9ab91cf08968d5e678 | 3f7a9cb77cb45443c9cf072fa1957ff570502da6 | refs/heads/master | 2021-08-08T21:39:54.370105 | 2017-11-11T09:42:33 | 2017-11-11T09:42:33 | 106,543,504 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,217 | r | AssignmentQ6.R | # Peer-graded Assignment: Course Project 2 Exploratory Data Analysis Question 6
# Compare emissions from motor vehicle sources in Baltimore City with emissions
# from motor vehicle sources in Los Angeles County, California (𝚏𝚒𝚙𝚜 == 𝟶𝟼𝟶𝟹𝟽)
# Which city has seen greater changes over time in motor vehicle emissions?
# Luis David Bedon Gomez
setwd("~/Coursera/DataAnalysis")
library(dplyr)
library(ggplot2)
####################################################################################
# Step 1: Getting the data
## If "exdata%2Fdata%2FNEI_data.zip" does not exist, download and unzip the file.
filename<-"exdata%2Fdata%2FNEI_data.zip"
if(!file.exists("exdata%2Fdata%2FNEI_data/summarySCC_PM25.rds")){
urldata<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(urldata,destfile = filename,method = "curl")
unzip(filename)
}
## Get the data (and do not read the file if the Variable exists!!):
if(!exists("NEI")){
NEI<-readRDS("exdata%2Fdata%2FNEI_data/summarySCC_PM25.rds")#,stringsAsFactors = FALSE
}
## Read "Source_Classification_Code.rds"
SCC <- readRDS("exdata%2Fdata%2FNEI_data/Source_Classification_Code.rds")
####################################################################################
# Question 6: How have emissions from motor vehicle sources changed
# from 1999–2008 in Baltimore City?
## Take all categories in SCC containing "Coal" and merge NEI with the needed categories
BL<-data.frame(BL=SCC$SCC[grep("[Mm]otor|[Vv]ehicle", SCC$EI.Sector)])
NEIBL<-merge(NEI[NEI$fips=="24510"|NEI$fips=="06037",],BL,by.x="SCC",by.y="BL")
### Take the last 50 1000-quantiles
tailquantile<-tail(quantile(NEIBL$Emissions,1:1000/1000),50)
### Create function to summarize the data per quantile and year if
### this variable does not exist. Takes time to calculate!!
if(!file.exists("chartsNEIBL.RDS")){
chart<-function(x) NEIBL[NEIBL$Emissions<=x,] %>% group_by(year,fips)%>%summarize(sum=sum(Emissions),max(Emissions),N=length(Emissions),Mean=mean(Emissions),Avg=sum/N)
chartsNEIBL<-lapply(tailquantile,chart)
saveRDS(chartsNEIBL,file="chartsNEIBL.RDS")
}else {
chartsNEIBL<-readRDS("chartsNEIBL.RDS")
}
## Create new variable to show the development per each city for all measurements
LA<-chartsNEIBL[[50]][chartsNEIBL[[50]]$fips=="06037",]
BA<-chartsNEIBL[[50]][chartsNEIBL[[50]]$fips=="24510",]
####################################################################################
## Making the ggplot:
###
png(filename = "plot6.png",width=600,height = 480)
ggplot()+
geom_col(data=LA,aes(x=year, y=sum),fill=rgb(.5,.5,.5,.5))+
geom_col(data=BA,aes(x=year, y=sum),fill=rgb(.25,.5,.75,.5))+
xlab("Year")+
ylab("Sum of the motor-vehicle-related\nPM2.5-Emission-Data [tons]")+
ggtitle("Change in the motor-vehicle-related PM2,5-Emission-Data\nfor Baltimore and LA by Year")+
annotate("text", x = c(1999,2002,2005,2008), y = c(200,100,100,100), label = "Baltimore",size = 3)+
annotate("text", x = c(1999,2002,2005,2008), y = c(3700,4100,4400,3900), label = "LA",size = 3)+
scale_x_continuous(name="Year",breaks=c(1999,2002,2005,2008))+
theme(axis.title.y = element_text(size=12))
dev.off()
|
36e7d9bf9fc0b75fc2a9bffd032576bf02516552 | 014d031ed37d4847b9ba3a4990d681b11c327373 | /functions week 2.R | a86a549f9938244fbed527e4111ff37a0c8d10b6 | [] | no_license | YuriyMikheev/datasciencecoursera | a094dae46f4c4f19584123e11e36f20955628c5a | f4ac35571354fdd683114113d101dcbddfaf6be9 | refs/heads/master | 2021-05-09T15:43:32.329013 | 2018-02-01T18:23:26 | 2018-02-01T18:23:26 | 119,100,183 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 992 | r | functions week 2.R | pollutantmean <- function(directory, polutant, id = 1:332) {
monitorlist <- paste0(directory,"/",dir(directory)[id])
tab <- data.frame()
for (monitor in monitorlist)
tab <- rbind(tab,read.table(monitor, header = TRUE, sep = ","))
mean(tab[[polutant]],na.rm = TRUE)
}
complete <- function(directory, id = 1:332) {
monitorlist <- paste0(directory,"/",dir(directory)[id])
v <- vector();
for (monitor in monitorlist) {
tab <- read.table(monitor, header = TRUE, sep = ",")
v <- c(v,length(tab$sulfate[!is.na(tab$sulfate) & !is.na(tab$nitrate)]))
}
tr <- data.frame(id,v)
tr
}
corr <- function(directory, threshold = 0) {
monitorlist <- paste0(directory,"/",dir(directory))
v <- vector();
for (monitor in monitorlist) {
tab <- read.table(monitor, header = TRUE, sep = ",")
if (length(tab$sulfate[!is.na(tab$sulfate) & !is.na(tab$nitrate)]) > threshold)
v <- c(v,cor(tab$sulfate, tab$nitrate, use = "complete.obs"))
}
v
} |
58e6cab18b564aaf18a06a99c7ba26028fae20c9 | d17896bff5eec2f0400c8868a67c34c77c0815b1 | /HW3/IntroCompFinR/R/tangency.portfolio.R | bf8c4f3869d6a6070068adf3a1779f3b9119bac9 | [] | no_license | Saechaoc/CFRM-462 | c80f3c7eb3e07992ed89b90f89a59b67686ab2f9 | 511278d58034c8803ef5e541274823809f66c2e5 | refs/heads/master | 2021-01-20T01:33:48.167522 | 2019-12-15T06:30:30 | 2019-12-15T06:30:30 | 89,296,680 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,764 | r | tangency.portfolio.R | #' @title Compute tangency portfolio
#'
#' @author Eric Zivot
#'
#' @description
#' Compute tangency (maximum Sharpe ratio) portfolio. The portfolio can allow all assets to be
#' shorted or not allow any assets to be shorted.
#'
#' @details
#' The tangency portfolio \samp{t} is the portfolio of risky assets with the highest Sharpe's slope
#' and solves the optimization problem: max \eqn{(t(t)\mu-r_f)/(t(t)\Sigma t^{1/2})} s.t.
#' \eqn{t(t)1=1} where \eqn{r_f} denotes the risk-free rate. If short sales are allowed then there
#' is an analytic solution using matrix algebra. If short sales are not allowed then the maximum
#' Sharpe ratio portfolio must be computed numerically.
#'
#' @param er \samp{N x 1} vector of expected returns
#' @param cov.mat \samp{N x N} return covariance matrix
#' @param risk.free numeric, risk free rate
#' @param shorts logical, if \samp{TRUE} then short sales (negative portfolio weights)
#' are allowed. If \samp{FALSE} then no asset is allowed to be sold short.
#'
#' @return
#' \item{call}{captures function call}
#' \item{er}{portfolio expected return}
#' \item{sd}{portfolio standard deviation}
#' \item{weights}{\samp{N x 1} vector of portfolio weights}
#'
#' @examples
#' # construct the data
#' asset.names = c("MSFT", "NORD", "SBUX")
#' er = c(0.0427, 0.0015, 0.0285)
#' names(er) = asset.names
#' covmat = matrix(c(0.0100, 0.0018, 0.0011,
#' 0.0018, 0.0109, 0.0026,
#' 0.0011, 0.0026, 0.0199),
#' nrow=3, ncol=3)
#' r.free = 0.005
#' dimnames(covmat) = list(asset.names, asset.names)
#'
#' # compute tangency portfolio
#' tan.port <- tangency.portfolio(er, covmat, r.free)
#' tan.port
#' summary(tan.port, risk.free=r.free)
#' plot(tan.port, col="blue")
#'
#' # compute tangency portfolio with no short sales
#' tan.port.ns <- tangency.portfolio(er, covmat, r.free, shorts=FALSE)
#' tan.port.ns
#' summary(tan.port.ns, risk.free=r.free)
#' plot(tan.port.ns, col="blue")
#'
#' @export tangency.portfolio
tangency.portfolio <-
function(er,cov.mat,risk.free, shorts=TRUE)
{
call <- match.call()
#
# check for valid inputs
#
asset.names <- names(er)
if(risk.free < 0)
stop("Risk-free rate must be positive")
er <- as.vector(er)
cov.mat <- as.matrix(cov.mat)
N <- length(er)
if(N != nrow(cov.mat))
stop("invalid inputs")
if(any(diag(chol(cov.mat)) <= 0))
stop("Covariance matrix not positive definite")
# remark: could use generalized inverse if cov.mat is positive semi-definite
#
# compute global minimum variance portfolio
#
gmin.port <- globalMin.portfolio(er, cov.mat, shorts=shorts)
if(gmin.port$er < risk.free)
stop("Risk-free rate greater than avg return on global minimum variance portfolio")
#
# compute tangency portfolio
#
if(shorts==TRUE){
cov.mat.inv <- solve(cov.mat)
w.t <- cov.mat.inv %*% (er - risk.free) # tangency portfolio
w.t <- as.vector(w.t/sum(w.t)) # normalize weights
} else if(shorts==FALSE){
Dmat <- 2*cov.mat
dvec <- rep.int(0, N)
er.excess <- er - risk.free
Amat <- cbind(er.excess, diag(1,N))
bvec <- c(1, rep(0,N))
result <- quadprog::solve.QP(Dmat=Dmat,dvec=dvec,Amat=Amat,bvec=bvec,meq=1)
w.t <- round(result$solution/sum(result$solution), 6)
} else {
stop("Shorts needs to be logical. For no-shorts, shorts=FALSE.")
}
names(w.t) <- asset.names
er.t <- crossprod(w.t,er)
sd.t <- sqrt(t(w.t) %*% cov.mat %*% w.t)
tan.port <- list("call" = call,
"er" = as.vector(er.t),
"sd" = as.vector(sd.t),
"weights" = w.t)
class(tan.port) <- "portfolio"
return(tan.port)
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.