blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fd5d5f2d24d08721a96c0580f87e70166dfb03cd
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ggfortify/examples/autoplot.glmnet.Rd.R
|
2f8a89a1bbffdcb6bc19bac10aaf2c1916eb8b70
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 208
|
r
|
autoplot.glmnet.Rd.R
|
library(ggfortify)
### Name: autoplot.glmnet
### Title: Autoplot 'glmnet::glmnet'
### Aliases: autoplot.glmnet
### ** Examples
autoplot(glmnet::glmnet(data.matrix(Orange[-3]), data.matrix(Orange[3])))
|
df8445dadb197fdf55dc19a52da905974aeb4542
|
05435fbdd6a6f58d2ebd4092129f4219132b707f
|
/code/Tross_Sebens_ER_model5_time.R
|
8d8859d94a05aa3f37a419ba74b0fd057739af4a
|
[] |
no_license
|
earobert/BE_2019_01_25
|
760587a5ec2ad6ba367c232bb1b311442213ea59
|
e15f1e74c14328ba708cbe9ceb98e82c892cce28
|
refs/heads/master
| 2020-03-21T08:39:29.381960
| 2019-02-16T00:36:42
| 2019-02-16T00:36:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,383
|
r
|
Tross_Sebens_ER_model5_time.R
|
#_____________________________________________________________________----
# This is the main working model right now. Now I'm adding time as an aspect in...
#===============================================#
# Model 5 - there is no gamma, and no cost of threads
# MLE estimation of cost of byssus given growth and thread production####
# With separate estimates of a and the cost of thread production
# BUT including a baseline value
# -use Ken's suggestion of having their be a baseline... Tried that here. Doesn't change anything.
# I could assume that 0 thread cutting was for that baseline value.
#===============================================#
mod.1 <- function(params, season) {
food_scalar <- params[1]
cost_per_thread <- params[2]
#baseline_byssus_multiplier <- 0.08
sigma <- params[3]
season <- season
# conversion factors
conversion_gWW_per_gDW <- 3.918 # From Summer 2015 collection, shape coeff estimation
shape_coeff <- .304 # From Summer 2015 collection, shape coeff estimation
mass_ww_per_dw <- 3.9 #coverts from mass_DW (dry weight) to mass_WW (wet weight)
# exponents ####
d <- 0.67 # intake; the model is very sensitive to this. (hope this is an exponent for g WW not mg WW)
e <- 1 # the model is very sensitive to this (hope this is an exponent for g WW not mg WW)
# calculation of b
respiration_reference_J_per_day <- 0.07*4.75*4.184*24 # Units: J / (day) from 0.07mlO2/hr, Fly and Hilbish 2013
respiration_reference_gDW <- 0.24 # Fly and Hilbish 2013
respiration_reference_gWW <- respiration_reference_gDW * 3.9 # Using wet:dry conversion
b_permass <- respiration_reference_J_per_day / (respiration_reference_gWW)^e # Note that e is just 1
b_J_per_g_WW_per_day <- b_permass # Units: J / (gWW * day)
# Temp response, other ####
Tmult_cost <- 1
Tmult_int <- 1
reprod_multiplier <- 1 # reproduction multiplier, brachi was 1, but if k is about half and half then this should be more like .5
opt_size_reducer <- 1 # lets reproduction equal surplus early
# other parameters ####
en_density_g_p_J <- .002 #energy_density
size_maturity <- 10 # mg, size at maturity, 3000mg from tross del_M calc worksheet, somatic<full tissue weight
# calculated scalers
b <- b_J_per_g_WW_per_day*Tmult_cost #Wait the byssus multiplier is earlier here than I thought
Wopt_measured_gDW <- 0.8 #gDW from sample collection
Wopt_measured_gWW <- Wopt_measured_gDW * conversion_gWW_per_gDW
a_fromWopt <- (b*e)/((Wopt_measured_gWW)^(d-e)*d) # backwards calculation of a
a_J_per_day_per_food_scalar <- a_fromWopt
a <- a_J_per_day_per_food_scalar*food_scalar*Tmult_int
# import data ####
setwd("~/BE/BE/Datasets")
df <- read.csv(file="Spring_Fall.csv", stringsAsFactors = FALSE)
df$treatment <- as.factor(df$treatment)
df$season <- as.factor(df$season)
df$treatment <- ordered(df$treatment, levels = c("never", "weekly","daily"))
df <- df[df$season==season,]
df.never <- df[df$treatment=="never",]
# all data
df <- df[!is.na(df$len_init_QC)&!is.na(df$len_final_QC),]
len_init <- df[df$season==season,]$len_init_QC/10 #converted from mm to cm
len_final <- df[df$season==season,]$len_final_QC/10# converted from mm to cm
thread_num <- df[df$season==season,]$thread_count_QC
mass_ww_init <- (len_init*.304)^3
mass_ww_final <- (len_final*.304)^3
mass_ww <- mass_ww_init
growth_shell <- len_final-len_init
growth_tissue <- mass_ww_final-mass_ww_init
gonad_wt_dry <- df[df$season==season,]$gonad_wt_dry
total_wt_dry <- df[df$season==season,]$total_wt_dry
gonad_proportion <- gonad_wt_dry / total_wt_dry
# model####
intake <- a*(mass_ww^d) #good, this is already in mass_ww... hope this is g not mg
cost <- b*(mass_ww^e) #*(1-baseline_byssus_multiplier)
# byssus_baseline <- b*(mass_ww^e)*baseline_byssus_multiplier
byssus_induced <- thread_num*cost_per_thread
reproduction <- 0
model.predG_J <- intake-cost-byssus_induced-reproduction #predicts mussel growth in J
model.predG_g <- model.predG_J*en_density_g_p_J #predicts mussel growth in g DW or WW???
out <- data.frame(
growth_tissue = growth_tissue,
model.predG_g = model.predG_g
)
return(out)
}
#For troubleshooting:
params <- c(food = 3, cost_induced_byssus = 0.01, sigma = 100)
season <- "Autumn"
out <- mod.1(params,season)
growth_tissue <- out$growth_tissue
model.predG_g <- out$model.predG_g
#plot(growth_tissue,model.predG_g)
#
a.est.NLL1 <- function(params, season) {
out <- mod.1(params,season)
growth_tissue <- out$growth_tissue
model.predG_g <- out$model.predG_g
sigma <- params[3]
NLL <- -sum(dnorm(x=growth_tissue, mean=model.predG_g, sd=sigma, log=TRUE))
return(NLL)
}
Autumn.est <- optim(fn=a.est.NLL1, par=c(food = 3, cost_induced_byssus = 0.01, sigma = 100), season = "Autumn") # par are the starting values
Spring.est <- optim(fn=a.est.NLL1, par=c(food = 3, cost_induced_byssus = 0.01, sigma = 100), season = "Spring") # par are the starting values
#=====================
#plot####
#=====================
a.plot.NLL1 <- function(params, season) {
out <- mod.1(params,season)
growth_tissue <- out$growth_tissue
model.predG_g <- out$model.predG_g
# import data ####
setwd("~/BE/BE/Datasets")
df <- read.csv(file="Spring_Fall.csv")
df <- df[df$season==season,]
df <- df[!is.na(df$len_init_QC)&!is.na(df$len_final_QC),]
p.1 <- lm(growth_tissue~ df$thread_count_QC)
plot(x = df$thread_count_QC, y = growth_tissue,
col = as.numeric(df$treatment)+26,
pch = 19,
xlab = "Thread production (#)",
ylab = "Observed growth (gWW)"
)
abline(p.1)
p.2 <- lm(model.predG_g~growth_tissue)
plot(x = growth_tissue, y = model.predG_g,
col = as.numeric(df$treatment)+26,
pch = 19,
ylim = c(0,.3), xlim = c(0,.3),
xlab = "Observed growth (gWW)",
ylab = "Predicted growth (gWW)"
)
abline(p.2)
x <- seq(from = -.4, to=.4, by=.1)
lines(x,x, lty = 2)
}
dev.off()
par(mfrow = c(2,2),mar = c(5,4,4,2)+0.1)
a.plot.NLL1(par=c(food = Autumn.est$par[1], cost_per_byssus = Autumn.est$par[2], sigma = Autumn.est$par[3]), season = "Autumn")
a.plot.NLL1(par=c(food = Spring.est$par[1], cost_per_byssus = Spring.est$par[2], sigma = Spring.est$par[3]), season = "Spring")
# plot cost using estimated costs.
#Spring####
season <- "Spring"
# import data ####
setwd("~/BE/BE/Datasets")
df <- read.csv(file="Spring_Fall.csv", stringsAsFactors = FALSE)
df$treatment <- as.factor(df$treatment)
df$season <- as.factor(df$season)
df$treatment <- ordered(df$treatment, levels = c("never", "weekly","daily"))
df <- df[df$season==season,]
df <- df[!is.na(df$len_init_QC)&!is.na(df$len_final_QC),]
df.never <- df[df$treatment=="never",]
plot(df$treatment, df$thread_count_QC*Spring.est$par[2], ylab = "Cost (J)")
params <- Spring.est$par
food_scalar <- params[1]
cost_per_thread <- params[2]
#baseline_byssus_multiplier <- 0.08
sigma <- params[3]
season <- season
# conversion factors
conversion_gWW_per_gDW <- 3.918 # From Summer 2015 collection, shape coeff estimation
shape_coeff <- .304 # From Summer 2015 collection, shape coeff estimation
mass_ww_per_dw <- 3.9 #coverts from mass_DW (dry weight) to mass_WW (wet weight)
# exponents ####
d <- 0.67 # intake; the model is very sensitive to this. (hope this is an exponent for g WW not mg WW)
e <- 1 # the model is very sensitive to this (hope this is an exponent for g WW not mg WW)
# calculation of b
respiration_reference_J_per_day <- 0.07*4.75*4.184*24 # Units: J / (day) from 0.07mlO2/hr, Fly and Hilbish 2013
respiration_reference_gDW <- 0.24 # Fly and Hilbish 2013
respiration_reference_gWW <- respiration_reference_gDW * 3.9 # Using wet:dry conversion
b_permass <- respiration_reference_J_per_day / (respiration_reference_gWW)^e # Note that e is just 1
b_J_per_g_WW_per_day <- b_permass # Units: J / (gWW * day)
# Temp response, other ####
Tmult_cost <- 1
Tmult_int <- 1
reprod_multiplier <- 1 # reproduction multiplier, brachi was 1, but if k is about half and half then this should be more like .5
opt_size_reducer <- 1 # lets reproduction equal surplus early
# other parameters ####
en_density_g_p_J <- .002 *29 #energy_density
size_maturity <- 10 # mg, size at maturity, 3000mg from tross del_M calc worksheet, somatic<full tissue weight
# calculated scalers
b <- b_J_per_g_WW_per_day*Tmult_cost #Wait the byssus multiplier is earlier here than I thought
Wopt_measured_gDW <- 0.8 #gDW from sample collection
Wopt_measured_gWW <- Wopt_measured_gDW * conversion_gWW_per_gDW
a_fromWopt <- (b*e)/((Wopt_measured_gWW)^(d-e)*d) # backwards calculation of a
a_J_per_day_per_food_scalar <- a_fromWopt
a <- a_J_per_day_per_food_scalar*food_scalar*Tmult_int
# import data ####
setwd("~/BE/BE/Datasets")
df <- read.csv(file="Spring_Fall.csv", stringsAsFactors = FALSE)
df$treatment <- as.factor(df$treatment)
df$season <- as.factor(df$season)
df$treatment <- ordered(df$treatment, levels = c("never", "weekly","daily"))
df <- df[df$season==season,]
df.never <- df[df$treatment=="never",]
# all data
df <- df[!is.na(df$len_init_QC)&!is.na(df$len_final_QC),]
len_init <- df[df$season==season,]$len_init_QC/10 #converted from mm to cm
len_final <- df[df$season==season,]$len_final_QC/10# converted from mm to cm
thread_num <- df[df$season==season,]$thread_count_QC
mass_ww_init <- (len_init*.304)^3
mass_ww_final <- (len_final*.304)^3
mass_ww <- mass_ww_init
growth_shell <- len_final-len_init
growth_tissue <- mass_ww_final-mass_ww_init
gonad_wt_dry <- df[df$season==season,]$gonad_wt_dry
total_wt_dry <- df[df$season==season,]$total_wt_dry
gonad_proportion <- gonad_wt_dry / total_wt_dry
# model####
intake <- a*(mass_ww^d) #good, this is already in mass_ww... hope this is g not mg
cost <- b*(mass_ww^e) #*(1-baseline_byssus_multiplier)
# byssus_baseline <- b*(mass_ww^e)*baseline_byssus_multiplier
byssus_induced <- thread_num*cost_per_thread
reproduction <- 0
model.predG_J <- intake-cost-byssus_induced-reproduction #predicts mussel growth in J
model.predG_g <- model.predG_J*en_density_g_p_J #predicts mussel growth in g DW or WW???
length(df$treatment)
length(model.predG_J)
new <- data.frame(treat = df$treatment, pred_J =model.predG_J)
new$treat <- as.factor(new$treat)
dev.off()
par(mfrow = c(2,2))
plot(new$treat, intake, ylim = c(0,120), ylab = "intake (J)")
plot(new$treat,byssus_induced, ylim = c(0,120), ylab = "cost byssus (J)")
plot(new$treat,cost, ylim = c(0,120), ylab = "cost non-byssus (J)")
plot(new$treat,new$pred_J, ylim = c(0,120), ylab = "surplus (J)")
dev.off()
par(mfrow = c(2,2))
prop_byss <- byssus_induced / (intake)
plot(new$treat, prop_byss*100, ylim = c(0,40), ylab = "% energy to byssus")
prop_predG_J <- new$pred_J/ intake
plot(new$treat,prop_predG_J*100, ylim = c(0,80), ylab = "% energy to growth")
prop_byss <- cost / (intake)
plot(new$treat, prop_byss*100, ylim = c(0,100), ylab = "% energy to non-byssus costs")
plot(df$treatment, gonad_proportion*100*prop_byss, ylim = c(0,100), ylab = "% energy to reproduction")
# plot cost using estimated costs.
#Autumn####
season <- "Autumn"
# import data ####
setwd("~/BE/BE/Datasets")
df <- read.csv(file="Spring_Fall.csv", stringsAsFactors = FALSE)
df$treatment <- as.factor(df$treatment)
df$season <- as.factor(df$season)
df$treatment <- ordered(df$treatment, levels = c("never", "weekly","daily"))
df <- df[df$season==season,]
df <- df[!is.na(df$len_init_QC)&!is.na(df$len_final_QC),]
df.never <- df[df$treatment=="never",]
plot(df$treatment, df$thread_count_QC*Autumn.est$par[2], ylab = "Cost (J)")
params <- Autumn.est$par
food_scalar <- params[1]
cost_per_thread <- params[2]
#baseline_byssus_multiplier <- 0.08
sigma <- params[3]
season <- season
# conversion factors
conversion_gWW_per_gDW <- 3.918 # From Summer 2015 collection, shape coeff estimation
shape_coeff <- .304 # From Summer 2015 collection, shape coeff estimation
mass_ww_per_dw <- 3.9 #coverts from mass_DW (dry weight) to mass_WW (wet weight)
# exponents ####
d <- 0.67 # intake; the model is very sensitive to this. (hope this is an exponent for g WW not mg WW)
e <- 1 # the model is very sensitive to this (hope this is an exponent for g WW not mg WW)
# calculation of b
respiration_reference_J_per_day <- 0.07*4.75*4.184*24 # Units: J / (day) from 0.07mlO2/hr, Fly and Hilbish 2013
respiration_reference_gDW <- 0.24 # Fly and Hilbish 2013
respiration_reference_gWW <- respiration_reference_gDW * 3.9 # Using wet:dry conversion
b_permass <- respiration_reference_J_per_day / (respiration_reference_gWW)^e # Note that e is just 1
b_J_per_g_WW_per_day <- b_permass # Units: J / (gWW * day)
# Temp response, other ####
Tmult_cost <- 1
Tmult_int <- 1
reprod_multiplier <- 1 # reproduction multiplier, brachi was 1, but if k is about half and half then this should be more like .5
opt_size_reducer <- 1 # lets reproduction equal surplus early
# other parameters ####
en_density_g_p_J <- .002 #energy_density
size_maturity <- 10 # mg, size at maturity, 3000mg from tross del_M calc worksheet, somatic<full tissue weight
# calculated scalers
b <- b_J_per_g_WW_per_day*Tmult_cost #Wait the byssus multiplier is earlier here than I thought
Wopt_measured_gDW <- 0.8 #gDW from sample collection
Wopt_measured_gWW <- Wopt_measured_gDW * conversion_gWW_per_gDW
a_fromWopt <- (b*e)/((Wopt_measured_gWW)^(d-e)*d) # backwards calculation of a
a_J_per_day_per_food_scalar <- a_fromWopt
a <- a_J_per_day_per_food_scalar*food_scalar*Tmult_int
# import data ####
setwd("~/BE/BE/Datasets")
df <- read.csv(file="Spring_Fall.csv", stringsAsFactors = FALSE)
df$treatment <- as.factor(df$treatment)
df$season <- as.factor(df$season)
df$treatment <- ordered(df$treatment, levels = c("never", "weekly","daily"))
df <- df[df$season==season,]
df.never <- df[df$treatment=="never",]
# all data
df <- df[!is.na(df$len_init_QC)&!is.na(df$len_final_QC),]
len_init <- df[df$season==season,]$len_init_QC/10 #converted from mm to cm
len_final <- df[df$season==season,]$len_final_QC/10# converted from mm to cm
thread_num <- df[df$season==season,]$thread_count_QC
mass_ww_init <- (len_init*.304)^3
mass_ww_final <- (len_final*.304)^3
mass_ww <- mass_ww_init
growth_shell <- len_final-len_init
growth_tissue <- mass_ww_final-mass_ww_init
gonad_wt_dry <- df[df$season==season,]$gonad_wt_dry
total_wt_dry <- df[df$season==season,]$total_wt_dry
gonad_proportion <- gonad_wt_dry / total_wt_dry
# model####
intake <- a*(mass_ww^d) #good, this is already in mass_ww... hope this is g not mg
cost <- b*(mass_ww^e) #*(1-baseline_byssus_multiplier)
# byssus_baseline <- b*(mass_ww^e)*baseline_byssus_multiplier
byssus_induced <- thread_num*cost_per_thread
reproduction <- 0
model.predG_J <- intake-cost-byssus_induced-reproduction #predicts mussel growth in J
model.predG_g <- model.predG_J*en_density_g_p_J #predicts mussel growth in g DW or WW???
length(df$treatment)
length(model.predG_J)
new <- data.frame(treat = df$treatment, pred_J =model.predG_J)
new$treat <- as.factor(new$treat)
dev.off()
par(mfrow = c(2,2))
plot(new$treat, intake, ylim = c(0,120), ylab = "intake (J)")
plot(new$treat,byssus_induced, ylim = c(0,120), ylab = "cost byssus (J)")
plot(new$treat,cost, ylim = c(0,120), ylab = "cost non-byssus (J)")
plot(new$treat,new$pred_J, ylim = c(0,120), ylab = "surplus (J)")
dev.off()
par(mfrow = c(2,2))
prop_byss <- byssus_induced / (intake)
plot(new$treat, prop_byss*100, ylim = c(0,40), ylab = "% energy to byssus")
prop_predG_J <- new$pred_J/ intake
plot(new$treat,prop_predG_J*100, ylim = c(0,80), ylab = "% energy to growth")
prop_byss <- cost / (intake)
plot(new$treat, prop_byss*100, ylim = c(0,100), ylab = "% energy to non-byssus costs")
plot(df$treatment, gonad_proportion*100*prop_byss, ylim = c(0,100), ylab = "% energy to reproduction")
# Some things to try...
# -use frequency of manipulation rather than number of threads (.3per week, 1per week, 7per week) ...
# -use Ken's suggestion of having their be a baseline... Tried that here.
# -calculate with same cost of threads
# -perform a monte carlo on a and threads
# Some things to try...
# -use frequency of manipulation rather than number of threads (.3per week, 1per week, 7per week) ...
# -use Ken's suggestion of having their be a baseline... Tried that here.
# -calculate with same cost of threads
# -perform a monte carlo on a and threads
|
fa2ff59477effa8323d8a6a1fc3146dc30f02ef9
|
9638273b355612ca5b366eb79927129ac51fa6d9
|
/scripts/DBI_connection.R
|
d59cf11727dff2212141c4370ab0419284ed5a08
|
[] |
no_license
|
RyanFarquharson/ACPMD
|
f8984344205a7faff890df8b75186dee929dfdcf
|
c7dc779096754ac330c505d8dcd7571321699647
|
refs/heads/master
| 2020-04-07T22:49:57.484592
| 2019-02-01T05:45:37
| 2019-02-01T05:45:37
| 158,787,059
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,670
|
r
|
DBI_connection.R
|
# connected to Access database using Connections tab in Rstudio
# help available at https://db.rstudio.com/odbc/
library(DBI)
con <- dbConnect(odbc::odbc(), "ABS198296")
# Look at what is in the database
dbListTables(con)
dbListFields(con, "Ag1983")
dbListFields(con, "Item_list")
dbListFields(con, "ASGC96")
# Create a list of tables
Aglist <- dbListTables(con, table_name = "Ag%")
# iterate through list to import into R as dataframes and use inner join to match up item info and save as csv
Item_listR <- dbReadTable(con, "Item_list")
ASGC96R <- dbReadTable(con, "ASGC96")
for (item in Aglist) {
tablename <- paste0(item,"R")
assign(tablename, dbReadTable(con, item) %>%
inner_join(Item_listR) %>%
inner_join(ASGC96R) %>%
select("Area_id", "Name", "Item_id", "Item_Name", "Units", "Value")%>%
write_csv(path = paste0("./data/198296/",tablename))
)
}
# Add a column for the census year in each table first then bind rows.
Yearlist <- seq(1982,1996,1)
ABS <- data.frame(Area_id = integer(),
Name = character(),
Item_id = integer(),
Item_Name = character(),
Units = character(),
Value = double(),
Year = integer())
counter <- 1
for (f in list.files("./data/198296/")) {
new_table <- read_csv(paste0("./data/198296/",f))
new_table$Year <- Yearlist[counter]
ABS <- bind_rows(ABS, new_table)
counter <- counter + 1
}
# concord to SA2s
# import ABS correspondence file
SLA_1996_SA2_2011 <- head(read_excel("./data/raw_data/concordance/CG_SLA_1996_SA2_2011.xls", sheet = "Table 3", skip = 5), n = -3)
# use an inner join to match up SLA names with the correspondence data
ABS_SA2 <- inner_join(ABS, SLA_1996_SA2_2011, by = c("Name" = "SLA_NAME_1996"))
# Use the concordance data to calculate new "Estimate" values by SA2
ABS_SA2$Estimate_SA2 <- ABS_SA2$Value * ABS_SA2$RATIO
-------------
# Use a group_by and summarise to sum Estimates for each commodity by SA2
commodities_198296_SA2 <- ABS_SA2 %>%
select(`SA2_MAINCODE_2011`, SA2_NAME_2011, `Item_Name`, Estimate_SA2) %>%
group_by(`SA2_MAINCODE_2011`, SA2_NAME_2011, `Item_Name`) %>%
summarise(SA2Est = sum(Estimate_SA2))
# rename columns to be consistent across all epochs
commodities_198296_SA2 <- rename(commodities_198296_SA2,
ASGS_code = "SA2_MAINCODE_2011",
ASGS_label = "SA2_NAME_2011",
Commodity = "Item_Name",
Estimate = "SA2Est"
)
write_csv(commodities_198296_SA2, "./data/commodities_198296_SA2.csv")
|
5ced8e7a4cd043df9ee6c270a65b84d8dd56041e
|
8c1daa6967fd693652dd1eac38a9f666fc65c8ee
|
/man/get.varitas.options.Rd
|
18f9928ed177db607d5afa9f2f761493dd22ce10
|
[] |
no_license
|
cran/varitas
|
ae90d05e61f5004a07d09ec5861724218215fdcd
|
603e4ec1d6d90678eb54486f7a0faf6a76a14114
|
refs/heads/master
| 2021-01-13T21:42:36.802682
| 2020-11-13T23:30:03
| 2020-11-13T23:30:03
| 242,504,094
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 731
|
rd
|
get.varitas.options.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.varitas.options.R
\name{get.varitas.options}
\alias{get.varitas.options}
\title{Return VariTAS settings}
\usage{
get.varitas.options(option.name = NULL, nesting.character = "\\\\.")
}
\arguments{
\item{option.name}{Optional name of option. If no name is supplied, the full list of VariTAS options will be provided.}
\item{nesting.character}{String giving Regex pattern of nesting indication string. Defaults to '\\.'}
}
\value{
varitas.options list specifying VariTAS options
}
\description{
Return VariTAS settings
}
\examples{
reference.build <- get.varitas.options('reference_build');
mutect.filters <- get.varitas.options('filters.mutect');
}
|
d04fd40113bfe51e877ae4ed1812e479dc40d88c
|
645ff6a53c2093037c7154cdd87714942385ffd4
|
/R/collection_rebalanceleaders.R
|
a6cdc72c0f0929770187e35750b6be6ec63b578a
|
[
"MIT"
] |
permissive
|
1havran/solrium
|
04c6754d14509e0e46e50d39f074d17b190eb050
|
a30015c1d1a28fc7293d67854c12d8f3fc99fad0
|
refs/heads/master
| 2021-01-01T06:09:13.350637
| 2017-02-01T19:44:27
| 2017-02-01T19:44:27
| 97,371,390
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,075
|
r
|
collection_rebalanceleaders.R
|
#' @title Rebalance leaders
#'
#' @description Reassign leaders in a collection according to the preferredLeader
#' property across active nodes
#'
#' @export
#' @param name (character) Required. The name of the collection rebalance preferredLeaders on.
#' @param maxAtOnce (integer) The maximum number of reassignments to have queue up at once.
#' Values <=0 are use the default value Integer.MAX_VALUE. When this number is reached, the
#' process waits for one or more leaders to be successfully assigned before adding more
#' to the queue.
#' @param maxWaitSeconds (integer) Timeout value when waiting for leaders to be reassigned.
#' NOTE: if maxAtOnce is less than the number of reassignments that will take place,
#' this is the maximum interval that any single wait for at least one reassignment.
#' For example, if 10 reassignments are to take place and maxAtOnce is 1 and maxWaitSeconds
#' is 60, the upper bound on the time that the command may wait is 10 minutes. Default: 60
#' @param raw (logical) If \code{TRUE}, returns raw data
#' @param ... curl options passed on to \code{\link[httr]{GET}}
#' @examples \dontrun{
#' solr_connect()
#'
#' # create collection
#' collection_create(name = "mycollection2") # bin/solr create -c mycollection2
#'
#' # balance preferredLeader property
#' collection_balanceshardunique("mycollection2", property = "preferredLeader")
#'
#' # balance preferredLeader property
#' collection_rebalanceleaders("mycollection2")
#'
#' # examine cluster status
#' collection_clusterstatus()$cluster$collections$mycollection2
#' }
collection_rebalanceleaders <- function(name, maxAtOnce = NULL, maxWaitSeconds = NULL,
raw = FALSE, ...) {
conn <- solr_settings()
check_conn(conn)
args <- sc(list(action = 'REBALANCELEADERS', collection = name, maxAtOnce = maxAtOnce,
maxWaitSeconds = maxWaitSeconds, wt = 'json'))
res <- solr_GET(file.path(conn$url, 'solr/admin/collections'), args, conn$proxy, ...)
if (raw) {
return(res)
} else {
jsonlite::fromJSON(res)
}
}
|
9af3d143f5b1634500e606df0d89c5a3cac2c874
|
8748271d8301a95a15c9b2effb121e8bae0a418a
|
/Rcodes/Eq1ProdFun1/explanatory.R
|
15688660b938e2e5223b8e6a4ed88b2c2ffe5200
|
[] |
no_license
|
MonikaNovackova/FoodSystemGitH
|
2a8c9606451cd5b68e387b0f460a50140059da80
|
849480432be11062d16812830d894e6f3f7fd8c5
|
refs/heads/master
| 2021-10-10T21:55:14.555493
| 2019-01-17T16:05:13
| 2019-01-17T16:05:13
| 157,595,866
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,495
|
r
|
explanatory.R
|
rm(list=ls())
WDuni<-c("/home/m/mn/mn301/foodSystems/dataFS") # uni
WDhome<-c("/home/trennion/foodSystems/dataFS") # doma
setwd(WDuni)
setwd(WDhome)
#wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww
#wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww
# load and prepare Maize data
Crops<-read.csv( "New31Jan2018/KE_Ag_Stats_2014.csv",header=TRUE, na.strings = c("NA","NC","NC.","-","#DIV/0!"),colClasses=c("factor","factor","factor","factor","factor","factor","factor","factor","numeric","numeric","numeric")) #numeric,numeric,numeric))
CrMaize<-subset(Crops, Crop=="Maize" & !Admin2=="")
CrMaize2<-subset(Crops, Crop=="Maize" & !Admin2=="" & Season=='')
#wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww
sapply(unique(PercZ1$Year), function (x) summary(CrMaize2$Yield[CrMaize2$Year==x]))
sapply(seq(1970,2014),function (x) summary(CrMaize2$Yield[CrMaize2$Year==x]))
sapply(unique(PercZ1$Year),function (x) summary(CrMaize2$Yield[CrMaize2$Year==x]))
sapply(unique(PercZ1$Year),function (x) sum(is.na(CrMaize2$Yield[CrMaize2$Year==x])))
is.na(CrMaize2$Yield[CrMaize2$Year==2015])
summary(CrMaize2$Yield[CrMaize2$Year==2010])
|
9182d5fd934bd0a1726664a81c36a50006d94416
|
1181197b6995d81a5982597db18e9a2bf4d3346e
|
/normalising/TPM_normalised_look.R
|
68f9539e1c053e92a28db0b30b79cdc95e96e3ff
|
[] |
no_license
|
oknox/Research-project
|
011a6054a43b3cb11a1688227d05d1439934487d
|
c9e19cd5952f64f5e41336dd039c2254605860c0
|
refs/heads/master
| 2020-03-26T11:41:12.994576
| 2018-08-15T13:21:16
| 2018-08-15T13:21:16
| 144,854,214
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 476
|
r
|
TPM_normalised_look.R
|
#Looking at TPM normalised counts
setwd("Library/Mobile Documents/com~apple~CloudDocs/CAMBRIDGE/Research project/")
library(data.table)
norm_counts <- fread("Transcripts/Nonlogged_tpm_normalised_counts.csv", data.table = F)
jcounts <- norm_counts[,2:ncol(norm_counts)]
jcounts <- as.matrix(jcounts)
#Make histogram of logged 10 normalised counts
hist(log10(jcounts), breaks = 100, xlab="log10 TPM normalised counts", main = "Distribution of log10 TPM normalised counts")
|
5b102647cd68cab5c83a9aad65c4d5a68ee2fcdf
|
d86913a4c99d7666bf457f81be2581ad0241d86e
|
/ui.R
|
bb9d891c8100efa5a3b30133fec6a36b61a05f22
|
[] |
no_license
|
cgtyoder/DevDataProdWk4
|
5de37ad6aa3874194b71fb4acd0eaf5e7c92e760
|
84108a3718623e980ec6fdfbbdd5b52bf2da4639
|
refs/heads/master
| 2021-09-08T09:14:46.761295
| 2018-03-09T02:46:54
| 2018-03-09T02:46:54
| 124,477,513
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,011
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("mtcars Data"),
# Sidebar with 3 selectors to narrow data
sidebarLayout(
sidebarPanel(
h3("Instructions:"),
h5("Change the values of the selectors below to plot the observations from the mtcars data set which match the selected critera."),
br(),
selectInput("cyls", "Cylinders:",
c("4" = 4,
"6" = 6,
"8" = 8)),
selectInput("automan", "Tranmission:",
c("Automatic" = 0,
"Manual" = 1)),
selectInput("gears", "Gears:",
c("3" = 3,
"4" = 4,
"5" = 5)), br(),
h5("Please see https://github.com/cgtyoder/DevDataProdWk4 for the server.R and ui.R files")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
))
|
381e6f51618909cb67e1f89ab481291411ec779d
|
0b46530108be813ad07dddc3bf3cd10b01f8c10c
|
/man/temperature_graph.Rd
|
b372752b8441c878deb14f0427064dc5a64cde9c
|
[] |
no_license
|
jcorain/NewBoRn
|
86a76d0a596eefeb4096e87baa3411df016fc1f1
|
62f7764fe5514c416f941e0180ef97f38012cdb4
|
refs/heads/main
| 2023-02-27T07:33:16.558981
| 2021-01-28T17:05:53
| 2021-01-28T17:05:53
| 329,053,524
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 723
|
rd
|
temperature_graph.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graph.R
\name{temperature_graph}
\alias{temperature_graph}
\title{temperature_graph}
\usage{
temperature_graph(dataframe = NULL, birthdate = NULL)
}
\arguments{
\item{dataframe}{The dataframe you want to analyze}
\item{birthdate}{The birthdate of the child. If NULL we assume that this is the first day of the dataframe. Defaut is NULL}
}
\value{
plotly graphic object
}
\description{
Function to plot the temperature versus time
}
\examples{
dummy_data <- utils::read.csv(file.path(system.file("extdata", package = "NewBoRn"),
"dummy_data.csv"))
dummy_data <- dplyr::select(.data = dummy_data, -X)
temperature_graph(dataframe = dummy_data)
}
|
142a41e2f4cca2e2f5ad056fdea3998cb5063e53
|
f1dd4979186d90cc479c48d7673f4ce4c633cf35
|
/psf/astro/zone099.r
|
e63d92b44589045e112d34d6cbeafc5b9c3f6dc2
|
[] |
no_license
|
flaviasobreira/DESWL
|
9d93abd3849f28217ae19c41d9b270b9b1bd5909
|
6ba2a7f33196041aa7f34d956535f7a0076ae1f2
|
refs/heads/master
| 2020-03-26T05:50:05.292689
| 2018-08-12T21:43:34
| 2018-08-12T21:43:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 441
|
r
|
zone099.r
|
350947
350957
350960
350962
369473
369490
369508
369512
369514
377872
377875
377881
377883
379013
379021
379024
379027
379031
379033
381899
381907
391292
398748
398750
400382
400386
400388
400395
401153
401155
401158
401163
402248
402256
402258
402265
474402
474417
474422
483199
483200
484949
484951
484972
484974
486435
486833
486834
489938
490354
490763
490765
490767
490772
491146
492768
493523
494333
495022
495025
508068
510091
510478
|
7001ce8b84707ae51617654bd508a64963e11e84
|
fac8df50cfc58cc485cfba09115866d7650afc39
|
/testfiles/r/input/functions/functions0.r
|
38158127ecd3c7c029ac8d0d8bd2da793406e5fe
|
[
"MIT"
] |
permissive
|
jorconnor/senior-design-rpl
|
5be9b69fc646976334413de881e0b5751f2d15d2
|
10afb254e3591c6fc367a95f42df43fbee51fd26
|
refs/heads/master
| 2021-01-09T05:51:17.992817
| 2017-05-09T18:50:46
| 2017-05-09T18:50:46
| 80,846,535
| 0
| 1
| null | 2017-04-19T14:57:40
| 2017-02-03T16:24:40
|
Java
|
UTF-8
|
R
| false
| false
| 519
|
r
|
functions0.r
|
# Read file
statesInfo <- read.csv('stateData.csv')
# subset data by region if region is 1
subset(statesInfo, state.region == 1)
stateSubset <- statesInfo[statesInfo$illiteracy == 0.5, ]
library(ggplot2)
library(plyr)
library(twitteR)
# Attributes and dimensions of data
dim(stateSubset)
str(stateSubset)
source(file.path(codeDir, 'dependencies.r'))
stateSubset # print out stateSubset
square <- function(x) {
sq <- x * x
return(square)
}
sum <- function(a, b) {
return(a + b)
}
square(4)
sum(1, 2)
|
4ab4091dfadf985dcc908888b2a94726f35adf10
|
229be3eec8eda763405e9147de5279d0c783b4ac
|
/xmlR/mergedCatalog.R
|
da580f47973f53459f577239f9f9027391f08ca2
|
[] |
no_license
|
gvravi/healapp
|
98ed06127651361048d7f5e43add08f24c9d01bb
|
5ca7d0774f313f137bf4308706dc4d8fbf8d7976
|
refs/heads/master
| 2020-12-24T06:08:17.933512
| 2016-11-08T11:35:20
| 2016-11-08T11:35:20
| 49,939,162
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,593
|
r
|
mergedCatalog.R
|
# Chapter 3 section 3.2
setwd("~/R/xmlR")
library(XML)
#Read XML document into R with xmlParse
doc1 = xmlParse("merged_catalog.xml")
#Identify root element of xml class
root = xmlRoot(doc1)
#Name of root
xmlName(root)
#No of children in Root
xmlSize(root)
#Accessing Nodes in the DOM(Document Object Model)
#The [[ and [ operators allows us to treat an XML node as a list of its child nodes.
#Similar to subsetting lists, we can use [ and [[ to access child nodes by positions,
#names, a logical vector, or exclusion. When we subset by "name",we use the node's
#element name
## Note: We can see that root[[event]] give first element
#but root[element] gives all elements in
event1 = root[["event"]]
event1
names(event1)
xmlName(event1)
#We can retrieve, say, the tenth child of the first <event> with
event1[[10]]
#get the first seven <event> nodes with
root[1:7]
#get all but the first seven children of the root node with
root[ -(1:7) ]
#we subset by name using the single square bracket to extract all <event> nodes
#from the <merge> node.
evs = root["event"]
evs
# The evs object is of class XMLInternalNodeList, which is essentially a list of
# XMLInternalElementNode objects. This means that we can apply the methods xmlName(),
# xmlValue(), etc. to the elements in evs, e.g., we find that the first <event> node
#has 18 children with
xmlSize(evs[[1]])
#Alternately
root[ names(root) == "event" ]
#then we extract all <event> nodes from the <merge> node.
#A call to length() confirms this:
length(evs)
|
62c1f2b58277ec39562f1a7718a3b5b272bfaea4
|
2db27775c676d46d2f28f239b148463aa9288371
|
/man/eptplot.Rd
|
fba15d76362a47964329a176a3e25d019d697cde
|
[] |
no_license
|
cran/EPT
|
f2af2930cc2791fa75a91a42a78c0d69f59b1408
|
a86526fab7d443983e0fa72ab21523f8d9c1ed77
|
refs/heads/master
| 2022-01-21T09:38:38.437692
| 2022-01-05T00:10:02
| 2022-01-05T00:10:02
| 236,593,183
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,811
|
rd
|
eptplot.Rd
|
\name{eptplot}
\alias{eptplot}
\title{Plot of Components by Ensemble Patch Transform of a Signal}
\description{
This function plots ensemble patch transform of a signal for a sequence of size parameters tau's.
}
\usage{
eptplot(eptransf, taus = eptransf$parameters$tau)
}
\arguments{
\item{eptransf}{R object of ensemble patch transform by \code{eptransf()} or \code{meptransf()}.}
\item{taus}{specifies size parameters for which ensemble patch transform of a signal is displayed.}
}
\details{
This function plots ensemble patch transform of a signal for a sequence of size parameters \code{taus}.
}
\value{
plot
}
\seealso{
\code{\link{eptransf}}, \code{\link{meptransf}}, \code{\link{eptmap}}.
}
\examples{
n <- 500
set.seed(1)
x <- c(rnorm(n), arima.sim(list(order = c(1,0,0), ar = 0.9), n = n, sd=sqrt(1-0.9^2)))
taus <- seq(10, 100, by=10)
# eptr1 : Multiscale EPT by average patch transform and average ensemble transform
eptr1 <- meptransf(tindex=1:(2*n), signal=x, taus=taus, process=c("average", "average"),
boundary="none")
names(eptr1)
op <- par(mfcol=c(4,1), mar=c(4,2,2,0.1))
plot(x, xlab="", type="l", main="signal")
eptplot(eptr1)
eptplot(eptr1, taus=20)
eptplot(eptr1, taus=c(20, 30))
lines(eptr1$Epstat[, 2], col="blue")
lines(eptr1$Epstat[, 3], col="red")
# eptr2 : Multiscale EPT by envelope patch transform and average ensemble transform
eptr2 <- meptransf(tindex=1:(2*n), signal=x, type="oval", taus=taus,
process=c("envelope", "average"), pquantile=c(0,1), gamma=0.06, boundary="none")
names(eptr2)
plot(x, xlab="", type="l")
eptplot(eptr2)
eptplot(eptr2, taus=20)
eptplot(eptr2, taus=c(20, 30))
lines(eptr2$EpM[, 2], col="blue")
lines(eptr2$EpM[, 3], col="red")
par(op)
}
\keyword{nonparametric}
|
36fa6520452a9ad646304b1bc9c41f4c05fffde2
|
63f247fa699153303a6481f86d98764a7d88529d
|
/2/2.2/3.R
|
ae457f167939d3dda903c701d720388e40cdb550
|
[] |
no_license
|
Alex1472/Statistic-on-R.-Part-1
|
86ec8c2e1024de0aec3c3922b863f156cb57b200
|
ce071d7c93cedf395c452f61a48fbafd4eaf99d1
|
refs/heads/master
| 2020-03-27T21:10:15.314847
| 2018-09-09T01:05:29
| 2018-09-09T01:05:29
| 147,120,542
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 86
|
r
|
3.R
|
df = read.table("dataset_11504_15.txt")
bartlett.test(V1 ~ V2, df)
t.test(V1 ~ V2, df)
|
084ac597a2037909c759ec1ee47db45f8818c7c0
|
508fa9bfaae7fab2b5662b93f5dd858cae7767ff
|
/R/geneModel.R
|
8391b8cda83916b5c3eb4b99766252a2a2060d75
|
[] |
no_license
|
cran/refGenome
|
2ec5e1be73d89ec48a1506d0323bf2ebb0c305f2
|
5f92690b4099770bedf3ae1d8cef5a28ac6250a9
|
refs/heads/master
| 2021-01-17T10:10:20.769482
| 2019-05-22T16:10:09
| 2019-05-22T16:10:09
| 17,699,105
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,619
|
r
|
geneModel.R
|
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Declaration of generics for geneModel.r
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
setGeneric("getTranscript",
function(object, i) standardGeneric("getTranscript"))
setGeneric("getExonData",
function(object) standardGeneric("getExonData"))
setGeneric("getCdsData",
function(object) standardGeneric("getCdsData"))
setGeneric("geneModel",
function(object, gene_id, interior=TRUE) standardGeneric("geneModel"))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Generate consecutive identifer from two data.frame columns
# Only for internal use (not exported)
# Used in geneModel - function.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
uid <- function(dfr, f, s)
{
# - - - - - - - - - - - - - - #
# dfr : data.frame
# f : name of first column
# s : name of second column
# - - - - - - - - - - - - - - #
f <- f[1]
s <- s[1]
dfr <- dfr[order(dfr[, f], dfr[, s]), ]
fn <- c(dfr[-1, f], dfr[nrow(dfr), f])
sn <- c(dfr[-1, s], dfr[nrow(dfr), s])
fne <- dfr[, f] == fn
fse <- dfr[, s] == sn
inc <- as.numeric(!(fne & fse))
# Leading 1 means that uid starts with 1
cinc <- c(1, inc[-length(inc)])
uid <- cumsum(cinc)
dfr$uid <- cumsum(cinc)
return(invisible(dfr))
}
# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #
# CLASS transcriptModel
# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #
# ident : transcript_id, transcript_name, gene_id, gene_name, seq_name
# coords : start, end
.transcriptModel <- setClass("transcriptModel",
slots=c(
id="character", # transcript_id
name="character", # transcript_name
gene_id="character",
gene_name="character",
seq_name="character",
strand="character",
biotype="character",
coords="integer", # start, end of transcript
exons="data.frame", # start, end of exons
cds="data.frame", # start, end of CDS
stcodon="integer", # start_codon, stop_codon
prime_utr="integer", # five, three
#introns="data.frame", # begin <(?) end (Position of 1st nucleotde)
version="integer"
)
)
setMethod("initialize", "transcriptModel", function(.Object)
{
.Object@id <- ""
.Object@name <- ""
#
.Object@coords <- rep(0L, 2)
names(.Object@coords) <- c("start", "end")
#
.Object@exons <- data.frame(start=0L, end=0L)
.Object@cds <- data.frame(start=0L, end=0L)
#
.Object@stcodon <- rep(0L, 2)
names(.Object@stcodon) <- c("start", "stop")
#
.Object@prime_utr <- rep(0L, 2)
names(.Object@prime_utr) <- c("five", "three")
#
return(.Object)
})
setMethod("show", "transcriptModel", function(object){
bm<-Sys.localeconv()[7]
cat("An object of class '", class(object), "'.\n", sep="")
cat("ID : ", object@id, "\n")
cat("Name : ", object@name, "\n")
cat("Gene ID : ", object@gene_id, "\n")
cat("Gene Name : ", object@gene_name, "\n")
cat("Start : ", format(object@coords[1], big.mark=bm), "\t\t")
cat("End : ", format(object@coords[2], big.mark=bm), "\n")
cat("Start codon : ", format(object@stcodon[1], big.mark=bm), "\t\t")
cat("Stop codon : ", format(object@stcodon[2], big.mark=bm), "\n")
cat("5' prime utr: ", format(object@prime_utr[1], big.mark=bm), "\t\t")
cat("3' prime utr: ", format(object@prime_utr[2], big.mark=bm), "\n")
cat("Seq Name : ", object@seq_name, "\n")
cat("Strand : ", object@strand, "\n")
})
plot.transcriptModel <- function(x, ylim, col, lwd=2, ...)
{
# adjustcolor: Package grDevices
# cols=border, background = alpha(border, 0.5)
for(i in 1:nrow(x@exons))
rect(
x@exons$start[i], ylim[1],
x@exons$end[i], ylim[2],
col=adjustcolor(col[1], alpha.f=0.5), border=col[1],
lwd=lwd, ...)
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Extract TranscriptModel Object from GTF table
# For internal use only (not exported)
# Used in geneModel function
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
getTranscriptFromGtf <- function(gtf, transcript_id)
{
transcript_id <- as.character(transcript_id[1])
gtf <- gtf[gtf$transcript_id==transcript_id, ]
if(nrow(gtf)==0)
stop("Transcript_id '", transcript_id, "' not found", sep="")
res <- new("transcriptModel") # .transcriptModel()
res@id <- transcript_id
res@name <- as.character(gtf$transcript_name[1])
res@gene_id <- as.character(gtf$gene_id[1])
res@gene_name <- as.character(gtf$gene_name[1])
res@strand <- as.character(gtf$strand[1])
res@seq_name <- as.character(gtf$seqid[1])
res@coords[1] <- min(gtf$start)
res@coords[2] <- max(gtf$end)
res@version <- as.integer(gtf$transcript_version[1])
wc <- which(gtf$feature=="start_codon")
res@stcodon[1] <- ifelse(length(wc)>0, gtf$start[wc[1]], NA)
wc <- which(gtf$feature=="stop_codon")
res@stcodon[2] <- ifelse(length(wc)>0, gtf$start[wc[1]], NA)
wc <- which(gtf$feature=="five_prime_utr")
res@prime_utr[1] <- ifelse(length(wc)>0, gtf$start[wc[1]], NA)
wc <- which(gtf$feature=="three_prime_utr")
res@prime_utr[2] <- ifelse(length(wc)>0, gtf$start[wc[1]], NA)
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Exon data.frame
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
exons <- gtf[gtf$feature=="exon", ]
if(nrow(exons) > 0)
{
# Missing exon
if(is.na(match("exon_number", names(exons))))
{
# + strand: increasing, - strand: decreasing
exons <- exons[order(exons$start), ]
if(exons$strand[1]=="+"){
exons$exon_number <- 1:nrow(exons)
}else{
exons$exon_number <- nrow(exons):1
}
}else{
exons$exon_number <- as.numeric(exons$exon_number)
}
exn <- c("start", "end",
"exon_id", "exon_number", "exon_version",
"seqid", "strand")
# Missing columns are removed from column names
mtc <- match(exn, names(exons))
exn <- exn[!is.na(mtc)]
res@exons <- exons[order(exons$exon_number), exn]
rownames(res@exons) <- as.character(res@exons$exon_number)
}else{
cat("No exons found for transcript '", transcript_id, "'\n", sep="")
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
# CDS data.frame
# - - - - - - - - - - - - - - - - - - - - - - - - - - #
cds <- gtf[gtf$feature=="CDS", ]
res@cds <- cds
return(res)
}
setMethod("getExonData", "transcriptModel", function(object){
return(object@exons)
})
setMethod("getCdsData", "transcriptModel", function(object){
return(object@exons)
})
# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #
# CLASS geneModel
# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #
.geneModel <- setClass("geneModel",
slots=c(gene_id="character",
gene_name="character",
seq_name="character",
strand="character",
transcripts="character",
coords="integer"),
contains="refGenome")
setMethod("initialize", "geneModel", function(.Object)
{
# ev needs to be done explicitly assigned here
# because otherwise obscure copies appear
.Object@ev <- new.env()
.Object@gene_id <- ""
.Object@gene_name <- ""
.Object@seq_name <- ""
.Object@strand <- "*"
.Object@transcripts <- ""
.Object@coords <- rep(0L, 2)
return(.Object)
})
setGeneric("geneName", function(object) standardGeneric("geneName"))
setMethod("geneName", "geneModel", function(object) {
return(object@gene_name)
})
setGeneric("geneName<-", function(object, value) standardGeneric("geneName<-"))
setReplaceMethod("geneName", c("geneModel", "character"), function(object, value){
object@gene_name <- value[1]
return(object)
})
setReplaceMethod("geneName", c("geneModel", "factor"), function(object, value){
geneName(object) <- as.character(value[1])
return(object)
})
setGeneric("geneId", function(object) standardGeneric("geneId"))
setMethod("geneId", "geneModel", function(object) {
return(object@gene_name)
})
setGeneric("geneId<-", function(object, value) standardGeneric("geneId<-"))
setReplaceMethod("geneId", c("geneModel", "character"), function(object, value){
object@gene_name <- value[1]
return(object)
})
setReplaceMethod("geneId", c("geneModel", "factor"), function(object, value){
geneId(object) <- as.character(value[1])
return(object)
})
setMethod("show", "geneModel", function(object)
{
bm<-Sys.localeconv()[7]
cat("Object of class '", class(object), "'\n", sep="")
cat("Gene id : ", object@gene_id[1] , "\n")
cat("Gene name : ", object@gene_name[1], "\n")
cat("Seqid : ", object@seq_name[1], "\n")
cat("Strand : ", object@strand[1], "\n")
cat("Start : ", format(object@coords[1], big.mark=bm), "\n")
cat("End : ", format(object@coords[2], big.mark=bm), "\n")
cat("Transcripts : ", length(object@transcripts), "\n")
if(!exists("exons", where=object@ev, inherits=FALSE))
cat("(No exon table present)\n")
else
{
n<-min(nrow(object@ev$exons), 6L)
cat("Exon Nr : ",
format(nrow(object@ev$exons), big.mark = bm),
"\n")
print(head(object@transcripts))
}
})
plot.geneModel <- function(x, cols=c("firebrick2", "gray40"), ...)
{
gene_text <- paste("Gene id :", x@gene_id,
" Seqid : " , x@seq_name,
" Strand: " , x@strand)
ylim=c(0,10)
op <- par(mar=c(5,8,4,2) + 0.1)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Do main plot
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
plot(x@coords, ylim, type="n", bty="n", yaxt="n",
main=paste("Gene model :", x@gene_name),
xlab=paste("Position on seqid", x@seq_name),
ylab="", ...)
#bm<-Sys.localeconv()[7]
mtext(gene_text, side=3, line=0)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Strand arrow
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
if(x@strand == "+"){
arrows(x0=x@coords[1], y0=ylim[2],
x1=x@coords[2], y1=ylim[2],
code=2, length=0.1)
} else {
arrows(x0=x@coords[1], y0=ylim[2],
x1=x@coords[2], y1=ylim[2],
code=1, length=0.1)
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Draw "All exons" line
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
if(!exists("exons", envir=x@ev))
{
cat("[plot.geneModel] No exon data found!\n")
}else{
# Draw all exon boxes for gene using second color value
# adjustcolor: Package grDevices
# cols=border, background = alpha(border, 0.5)
exons <- x@ev$exons
for(i in 1:nrow(exons))
rect(exons$start[i], 0.2, exons$end[i], 0.8,
col=adjustcolor(cols[2], alpha.f=0.5), border=cols[2])
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Draw exon boxes for transcripts
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
if(!exists("transcripts", envir=x@ev))
{
cat("[plot.geneModel] No transcript data found!\n")
}else{
trans <- x@ev$transcripts
ntrans <- length(trans)
ylim_t <- c(1, 8)
ywid <- ylim_t[2] - ylim_t[1]
allrects <- ywid * 2/3
allgaps <- ywid * 1/3
rectwid <- allrects / ntrans # vertical number of transcript boxes
gapwid <- allgaps / (ntrans - 1) # number of vertical gaps
ylolim <- (0:(ntrans-1) * (rectwid + gapwid)) + ylim_t[1]
for(i in 1:ntrans)
plot(trans[[i]], ylim=c(ylolim[i], ylolim[i] + rectwid),
col=cols[1])
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Draw labels on y axis
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
axis(side=2,
at=c(0.5, ylolim + rectwid/2),
tick=FALSE,
line=NA,
labels=c("All exons",names(x@transcripts)),
las=1, cex.axis=0.6)
par(op)
}
setMethod("getTranscript", c("geneModel", "numeric"), function(object, i)
{
return(object@ev$transcripts[[i]])
})
setMethod("getTranscript", c("geneModel", "character"), function(object, i)
{
mtc <- match(i[1], names(object@ev$transcripts))
if(!is.na(mtc))
return(object@ev$transcripts[[mtc]])
mtc <- match(i[1], object@transcripts)
if(!is.na(mtc))
return(object@ev$transcripts[[mtc]])
stop("No Match for transcript name '", i[1], "'")
})
setMethod("geneModel", c("ensemblGenome", "character"),
function(object, gene_id, interior=TRUE)
{
# Object only contains data for one single gene_id
gene_id <- as.character(gene_id[1])
gg <- extractByGeneId(object, gene_id)
if(!exists("genes", envir=object@ev))
stop("genes table missing in ensemblGenome object")
gtb <- gg@ev$gtf # shortcut
# Create output object
res <- .geneModel()
res@gene_id <- gene_id
res@gene_name <- as.character(gg@ev$gtf$gene_name[1])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Extract gene data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
mtc <- match(gene_id, gg@ev$genes$gene_id)
assign("genes", gg@ev$genes[mtc, ], envir=res@ev)
genes<-res@ev$genes
res@seq_name <- as.character(genes$seqid[1])
res@strand <- as.character(genes$strand[1])
res@coords <- c(genes$start[1], genes$end[1])
names(res@coords) <- c("start", "end")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Exon and transcript data eventually is skipped
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
if(!interior)
return(res)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Extract exon data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
exn <- c("seqid", "start", "end",
"exon_id", "exon_number", "exon_version",
"transcript_version", "transcript_id","transcript_name")
# Missing columns are removed from column names
mtc <- match(exn, names(gtb))
exn <- exn[which(!is.na(mtc))]
exons <- gtb[gtb$feature=="exon", exn]
assign("exons", exons, envir=res@ev)
# Generate coordinates of unique exons
exons <- uid(exons, "start", "end")
exid <- sort(unique(exons$uid))
mtc <- match(exid, exons$uid)
assign("uexons", exons[mtc, ], envir=res@ev)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Extract transcript data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
tr <- sort(unique(gtb$transcript_name))
mtc <- match(tr, gtb$transcript_name)
res@transcripts <- as.character(gtb$transcript_id[mtc])
names(res@transcripts) <- tr
l <- lapply(rep("transcriptModel", length(res@transcripts)), new)
for(i in 1:length(l))
{
tro <- getTranscriptFromGtf(gtb, res@transcripts[i])
etr <- extractTranscript(gg, res@transcripts[i])
if(nrow(etr@ev$gtf) < 2)
{
# Empty splice table
tdf <- data.frame(begin=character(0), end=character(0))
}else{
etj <- getSpliceTable(etr)
utj <- unifyJuncs(etj)
tdf <- data.frame(begin=utj@ev$gtf$lend + 1,
end=utj@ev$gtf$rstart - 1)
}
#tro@introns <- tdf
l[[i]] <- tro
}
names(l) <- tr
assign("transcripts", l, envir=res@ev)
return(res)
}
)
setMethod("geneModel", c("ucscGenome", "character"),
function(object, gene_id, interior=TRUE)
{
# Object only contains data for one single gene_id
gene_id <- as.character(gene_id[1])
gg <- extractByGeneId(object, gene_id)
if(!exists("genes", envir=object@ev))
stop("genes table missing in ensemblGenome object")
gtb <- gg@ev$gtf # shortcut
# Create output object
res <- .geneModel()
res@gene_id <- gene_id
res@gene_name <- as.character(gg@ev$gtf$gene_name[1])
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Extract gene data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
mtc <- match(gene_id, gg@ev$genes$gene_id)
assign("genes", gg@ev$genes[mtc, ], envir=res@ev)
genes<-res@ev$genes
res@seq_name <- as.character(genes$seqid[1])
res@strand <- as.character(genes$strand[1])
res@coords <- c(genes$start[1], genes$end[1])
names(res@coords) <- c("start", "end")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Exon and transcript data eventually is skipped
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
if(!interior)
return(res)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Extract exon data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
exn <- c("seqid", "start", "end",
"exon_id", "exon_number", "exon_version",
"transcript_version", "transcript_id","transcript_name")
# Missing columns are removed from column names
mtc <- match(exn, names(gtb))
exn <- exn[which(!is.na(mtc))]
exons <- gtb[gtb$feature=="exon", exn]
assign("exons", exons, envir=res@ev)
# Generate coordinates of unique exons
exons <- uid(exons, "start", "end")
exid <- sort(unique(exons$uid))
mtc <- match(exid, exons$uid)
assign("uexons", exons[mtc, ], envir=res@ev)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Extract transcript data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
tr <- sort(unique(gtb$transcript_id))
mtc <- match(tr, gtb$transcript_id)
res@transcripts <- as.character(gtb$transcript_id[mtc])
names(res@transcripts) <- tr
l <- lapply(rep("transcriptModel", length(res@transcripts)), new)
for(i in 1:length(l))
{
l[[i]] <- getTranscriptFromGtf(gtb, res@transcripts[i])
}
names(l) <- tr
assign("transcripts", l, envir=res@ev)
return(res)
}
)
# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #
# CLASS geneList
# + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #
.geneList <- setClass("geneList",
slots=c(
l="list"
)
)
setMethod("initialize", "geneList", function(.Object){
.Object@l <- list()
return(.Object)
})
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# S3 generics
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
setMethod("length", "geneList", function(x){ return(length(x@l)) })
setMethod("names", "geneList", function(x) { return(names(x@l))})
setMethod("names<-", c("geneList", "character"),
function(x, value)
{
names(x@l) <- value
return(x)
}
)
setMethod("names<-", c("geneList", "numeric"),
function(x, value)
{
names(x@l) <- value
return(x)
}
)
setMethod("show", "geneList", function(object)
{
bm<-Sys.localeconv()[7]
cat("Object of class '", class(object), "'\n", sep="")
cat("Length : ", length(object) , "\n")
cat("Names:\n")
print(names(object))
})
setMethod("[", signature="geneList", function(x, i)
{
if(length(i) == 1)
return(x@l[[i]])
res <- .geneList()
res@l <- x@l[i]
return(res)
})
setMethod("+", signature=c("geneList", "geneList"), function(e1, e2){
res <- .geneList()
res@l <- c(e1@l, e2@l)
return(res)
})
setMethod("+", c("geneModel", "geneModel"), function(e1, e2){
res <- .geneList()
res@l <- list(e1, e2)
names(res@l) <- c(e1@gene_id, e2@gene_id)
return(res)
})
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Creation of geneList objects
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
setGeneric("geneList", function(ref, genes, interior=TRUE)
standardGeneric("geneList"))
setMethod("geneList", c("ensemblGenome", "character"),
function(ref, genes, interior=TRUE)
{
ng <- length(genes)
# convert
genl <- split(genes, 1:ng)
names(genl) <- genes
getGeneModel <- function(x) { return(geneModel(ref, x, interior))}
gl <- .geneList()
gl@l <- lapply(genl, getGeneModel)
return(gl)
})
setMethod("geneList", c("ensemblGenome", "factor"),
function(ref, genes, interior=TRUE)
{
return(geneList(ref, as.character(genes), interior))
})
setMethod("geneList", c("ucscGenome", "character"),
function(ref, genes, interior=TRUE)
{
ng <- length(genes)
# convert
genl <- split(genes, 1:ng)
names(genl) <- genes
getGeneModel <- function(x) { return(geneModel(ref, x, interior))}
gl <- .geneList()
gl@l <- lapply(genl, getGeneModel)
return(gl)
})
setMethod("geneList", c("ucscGenome", "factor"),
function(ref, genes, interior=TRUE)
{
return(geneList(ref, as.character(genes), interior))
})
|
ec9391c3d35ef2e5491a5e6e425716ec7f643096
|
5639bad159509b15a8cc6951e22fed8cf8439b1b
|
/scripts/endo_tss/endo_tss_format.R
|
cf0764cb70779105aac864252e95142652f292f6
|
[] |
no_license
|
KosuriLab/ecoli_promoter_mpra
|
3f9fe47e40a81f9bafd27f672d0e7ece5924789f
|
484abdb6ba2d398501009ee6eb25ca7f190d3b4c
|
refs/heads/master
| 2021-07-09T00:38:46.186513
| 2020-08-10T17:28:54
| 2020-08-10T17:28:54
| 179,758,077
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,910
|
r
|
endo_tss_format.R
|
library(dplyr)
library(tidyr)
options(stringsAsFactors = F)
# args = commandArgs(trailingOnly=TRUE)
#
# infile <- args[1]
# outfile <- args[2]
infile <- '../../processed_data/endo_tss/lb/rLP5_Endo2_lb_expression.txt'
outfile <- '../../processed_data/endo_tss/lb/rLP5_Endo2_lb_expression_formatted.txt'
# infile <- '../../processed_data/endo_tss/alt_landing_pads/fLP3/fLP3_Endo2_lb_expression.txt'
# outfile <- '../../processed_data/endo_tss/alt_landing_pads/fLP3/fLP3_Endo2_lb_expression_formatted.txt'
# infile <- '../../processed_data/endo_tss/alt_landing_pads/rLP6/rLP6_Endo2_lb_expression.txt'
# outfile <- '../../processed_data/endo_tss/alt_landing_pads/rLP6/rLP6_Endo2_lb_expression_formatted.txt'
data <- read.table(file = infile, header = T)
# parse name
data <- data %>%
mutate(name = gsub('>', '', orig_name),
name = gsub('_rc', '', name)) %>%
separate(name, into = c('tss_name', 'tss_position', 'strand'), sep = ',', remove = F) %>%
mutate(tss_position = as.numeric(tss_position),
start = ifelse(strand == '+', tss_position - 120, tss_position - 30),
end = ifelse(strand == '+', tss_position + 30, tss_position + 120)) %>%
select(name:end, variant:num_barcodes_integrated, -orig_name)
data$category <- "tss"
data$category[grep("pos_control", data$name)] <- "pos_control"
data$category[grep("neg_control", data$name)] <- "neg_control"
# separately format negative controls
neg <- filter(data, category == 'neg_control') %>%
mutate(strand = '+') %>%
separate(name, into = c('dummy1', 'dummy2', 'loc'), sep = '_', remove = F) %>%
select(-dummy1, -dummy2) %>%
separate(loc, into = c('start', 'end'), sep = ':', convert = T)
data <- filter(data, category != 'neg_control') %>%
bind_rows(select(neg, name, tss_name:strand, start, end, variant:category))
write.table(data, file = outfile, quote = F, row.names = F)
|
322da95fb3bb94cc03ba560bfdd7d0a1acd3dd8b
|
4e4bc3bb5e2186fde35f3b2fcd2d2ea92195d1c5
|
/Plot3.R
|
72f06c239f92b9d06016927de0132e451b3b0d3b
|
[] |
no_license
|
thuangpham/ExData_Plotting1
|
36a4315c00fd8a2b9455f081cc1504adea226de3
|
d74834248ff48e68c1b3c37e16a796e3e19023a3
|
refs/heads/master
| 2021-01-17T09:59:07.337152
| 2016-07-16T11:10:22
| 2016-07-16T11:10:22
| 63,211,217
| 0
| 0
| null | 2016-07-13T03:28:19
| 2016-07-13T03:28:17
| null |
UTF-8
|
R
| false
| false
| 1,027
|
r
|
Plot3.R
|
#set the working directory
setwd("C:/DataScience/Exploratory Data/ExData_Plotting1/ExData_Plotting1")
library(dplyr)
file<-"Data/household_power_consumption.txt"
df <- read.csv(file, sep=";", header=TRUE,stringsAsFactors=FALSE,skip = 66637, nrow = 2880,
na.strings="?")
name <- sapply(read.table(file, nrow = 1, sep = ";"), as.character)
names(df) <- name
##set datetime column by combining Date and Time columns
df$DateTime <- strptime(paste(df$Date, df$Time), format="%d/%m/%Y %H:%M:%S")
#plot to png
png("plot3.png",width=500,height=500)
plot(df$DateTime, df$Sub_metering_1,ylab="Energy sub metering",xlab=NA,type="l",col="black")
#add lines for sub_metering_2 column using red
lines(df$DateTime, df$Sub_metering_2, col="red")
#add lines for sub_metering_3 column using blue
lines(df$DateTime, df$Sub_metering_3, col="blue")
# add legend
legend("topright", col = c("black", "red", "blue"), lty=1, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
3f48d7612c431bfb7b09ff726f9b86e664848d5c
|
26ba3082f3586332d3a81c7cd5f71a0bd7eb6889
|
/R/omim.R
|
acb89e1735f619fe29f62235fe9a29cee919e278
|
[] |
no_license
|
ExoLab-UPLCMS/MetaboliteHub
|
0ec60bcf2b50169b9288077786c206049be99d4d
|
b5491b63ccec3589a4a3c1d84a1ec544cb303e30
|
refs/heads/master
| 2022-04-11T15:45:06.191108
| 2020-03-31T15:34:51
| 2020-03-31T15:34:51
| 203,401,595
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 663
|
r
|
omim.R
|
#' Retrieves diseases and metabolic pathways alttered present in OMIM database.
#'
#' For a gene, the functions searches on OMIM database and retrieves what is publicated there, the diseases and metabolic alterations reported.
#'
#' @param x is the gene of interest.
#' @return returns a list of the different alterations described for the gene.
#' @import rentrez
#' @export
omim<-function(x){
omim_id<-entrez_search("omim", x)$ids
if (length(omim_id)==0){
summary_omim<-c('No entries found for this gene')
}
else {
summary_omim<-sapply(omim_id, FUN=function(x){
entrez_summary("omim", x)$title
})
}
return(unname(summary_omim))
}
|
8f3ee363eeff6c02fe24966486b8af6453f03de5
|
643a4f814e3696da39814bec6ff21e90c16995f3
|
/man/getLevels-LagOperator.Rd
|
58616fcbeccee1570d2c4c037c199d34a9811d32
|
[] |
no_license
|
LiangCZhang/quantspec
|
186a66f508155f093fe37dcd9067bfe12db54265
|
6e7e929893a68bd1af76ac259180584aa28b3813
|
refs/heads/develop
| 2021-01-12T08:15:11.620545
| 2016-03-28T16:17:11
| 2016-03-28T16:17:11
| 76,523,170
| 2
| 1
| null | 2016-12-15T04:13:16
| 2016-12-15T04:13:15
| null |
UTF-8
|
R
| false
| true
| 730
|
rd
|
getLevels-LagOperator.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class-LagOperator.R
\docType{methods}
\name{getLevels-LagOperator}
\alias{getLevels,LagOperator-method}
\alias{getLevels-LagOperator}
\title{Get attribute \code{levels} from a \code{LagOperator}.}
\usage{
\S4method{getLevels}{LagOperator}(object, j)
}
\arguments{
\item{object}{\code{LagOperator} from which to get the \code{levels}.}
\item{j}{Index pointing to a set of levels in the list; optional.}
}
\value{
Returns levels attribute, as a vector of real numbers.
}
\description{
If the optional parameter \code{j} is supplied, then the \code{j}th vector of
levels will be returned, a list with all vectors otherwise.
}
\keyword{Access-functions}
|
5176825fc5a82d5a5efbe88cc96499e99a116c14
|
3ee04b4129e86c9218a34f402349649727baa646
|
/man/jtrace_is_installed.Rd
|
718f9149e3969b0b7fd927eda87eba12f5e2cd9f
|
[
"MIT"
] |
permissive
|
gongcastro/jtracer
|
c34233cfcebba4dce8e7c5be72f09b626c3573ec
|
ed4126d5a6b92034182eb9e77d6c357453af34c5
|
refs/heads/master
| 2023-09-04T11:31:43.980588
| 2021-10-15T15:37:16
| 2021-10-15T15:37:16
| 365,167,721
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 556
|
rd
|
jtrace_is_installed.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/install.R
\name{jtrace_is_installed}
\alias{jtrace_is_installed}
\title{Check if jTRACE is installed}
\usage{
jtrace_is_installed()
}
\value{
A logical values indicating whether jTRACE has been already installed
}
\description{
Check if jTRACE is installed
}
\details{
jTRACe website: \code{https://magnuson.psy.uconn.edu/jtrace/}
}
\examples{
jtrace_is_installed()
}
\author{
Gonzalo Garcia-Castro \href{mailto:gonzalo.garciadecastro@upf.edu}{gonzalo.garciadecastro@upf.edu}
}
|
22a07916ff0f0748a8456e393ac5c860d520e09e
|
4a033f9a65e4dcf36533b6a5ec92620fe229ab75
|
/cachematrix.R
|
4ee892721e29593f049e77da669456871c6f1e6e
|
[] |
no_license
|
JayaCh/ProgrammingAssignment2
|
bd8ec125361e3ace7d94af9d8d20c05a8f2bd96b
|
4fac1e45ff804e2ab9289cb9b819d28854abdc1a
|
refs/heads/master
| 2020-12-01T09:32:10.031816
| 2014-11-20T15:36:31
| 2014-11-20T15:36:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,938
|
r
|
cachematrix.R
|
## makeCacheMatrix: Creates and returns the list of getMatrix, setMatrix, getInverse and setInverse functions.
## The superassignment operator is used in the setMatrix and setInverse methods to cache the
## values of input matrix and the inverse matrix.
## cacheSolve: Calculates the inverse matrix of the matrix, that is retrieved by makeCacheMatrix object.
## First checks if the inverse matrix is available and if so gets it from cache and returns it.
## Otherwise calcluates the inverse matrix, cahces it and returns it.
# makeCacheMatrix returns the list with the setMatrix, getMatrix, setInverse and getInverse functions.
makeCacheMatrix <- function(x = matrix()) { # input x will be a matrix
cachedInv <- NULL
# Cache the input matrix
setMatrix <- function(y) {
x <<- y
# Input matrix changed, hence the cachedInv needs to be reset to NULL
cachedInv <<- NULL
}
# Retreive the input matrix
getMatrix <- function() { x }
# Cache the inverse matrix
setInverse <- function(inverse) { cachedInv <<- inverse }
# Retrieve the inverse matrix
getInverse <- function() { cachedInv }
# Return the list with the functions
list( setMatrix = setMatrix,
getMatrix = getMatrix,
setInverse= setInverse,
getInverse = getInverse)
}
# cacheSolve returns the inverse matrix of the matrix retrieved by makeCacheMatrix.
cacheSolve <- function(x, ...) {
# input x will be makeCacheMatrix object
# Retrieve the chached inverse matrix
inverse <- x$getInverse()
# If the cached inverse matrix is not NULL, return it
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
# Retrieve the input matrix
data <- x$getMatrix()
# Calculate the inverse matrix
inverse <- solve(data)
# Cache the inverse matrix
x$setInverse(inverse)
# Return the calculated matrix
inverse
}
|
03ec4a8bd4fd198b69ae2bf3c3bdbb202a063f69
|
d916b13e8151b66bae458dba011ad02a3a699f1d
|
/src/ggplot2 separate mean segment.R
|
6dc074eb7396c51901373d897ccf8233adb57166
|
[
"MIT"
] |
permissive
|
korkridake/cssejhucovid19
|
8fdc3906332ef047bab75e42d0f01f49d30574a1
|
fc13516eeeca87b5345a169587be5776b061f83d
|
refs/heads/main
| 2023-05-26T17:36:51.182358
| 2021-06-16T08:31:31
| 2021-06-16T08:31:31
| 375,930,293
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,627
|
r
|
ggplot2 separate mean segment.R
|
### separate mean segment
# install.packages("ggplot2", "dplyr")
library(ggplot2)
library(dplyr)
set.seed(22)
d <- data.frame(t = seq(as.Date("2011-1-1"), by = "month", length.out = 48),
v = as.integer(rnorm(48, mean=50, sd=10)))
### plain line chart
ggplot(d, aes(x=t, y=v)) +
geom_line()
### add average line
ggplot(d, aes(x=t, y=v)) +
geom_line() +
geom_hline(yintercept = mean(d$v), color="limegreen") +
theme_bw()
### create mean by year
d2 <-
d %>%
group_by(year=format(t,'%Y')) %>%
summarise(avg=mean(v), minT=min(t), maxT=max(t))
### draw only avg lines for each year with geom_segment
ggplot() +
geom_segment(data=d2,
aes(x = minT,
y = avg,
xend = maxT,
yend = avg))
# add color=year to aes().
# Note: year is a character not numeric (continuous variable)
ggplot() +
geom_segment(data=d2,
aes(x = minT,
y = avg,
xend = maxT,
yend = avg, color=year)) +
ylim(min(d$v), max(d$v))
### use geom_segment to draw separate avg line segment for each year
# data sources came from 2 data frames: one for geom_line, another for geom_segment
ggplot(d, aes(x=t, y=v)) +
geom_line(color="grey") +
geom_segment(data=d2,
aes(x = minT,
y = avg,
xend = maxT,
yend = avg, color=year),
size = 1,
linetype="solid") +
theme_bw() +
theme(legend.position="none") +
xlab("") + ylab("")
|
f751cc15b40e1bd6afbbe480b8997989d520a895
|
e66f8e5ef689a7b6dae10ca5958a4b510dbde13f
|
/man/write_shinyrates_data.Rd
|
ddb91b28e24213b89e4182587f610e6a3b0115c7
|
[] |
no_license
|
delabj/pogoshinyrates
|
11a5914a7ec017917f378085cf9edf0e5f146c79
|
98d04f1a704e0bac74cc1c9ecec7ccd193913d35
|
refs/heads/master
| 2022-11-12T19:50:17.295599
| 2020-07-09T21:26:10
| 2020-07-09T21:26:10
| 264,212,546
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 565
|
rd
|
write_shinyrates_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_shinyrates_data.R
\name{write_shinyrates_data}
\alias{write_shinyrates_data}
\title{Write the shinyrates data.}
\usage{
write_shinyrates_data(df, name = "shinyrates.csv")
}
\arguments{
\item{df:}{a data frame to write}
\item{name:}{the name of the file (with file type ie name.csv)}
}
\description{
writes the data from shinyrates.com
}
\examples{
date <- Sys.Date()
df <- scrape_shinyrates_website()
df <- format_shinyrates_data(df=df, timestamp=date)
write_shinyrates_data(df)
}
|
9f4e557abd5141db60232133b3353c53bac968f7
|
d64c6c986730b2e989673679796cf4c968474de8
|
/data_wrangling.R
|
afa48e082a411443149593ca6961ec576f50aa2c
|
[] |
no_license
|
han-tun/vizrisk
|
4e5e1e341706075d7f8458cfb693d192fcceaa09
|
5219f3cde6b6089024e932c1b5acf45a562c7def
|
refs/heads/master
| 2020-12-08T11:40:37.503228
| 2020-01-10T06:29:47
| 2020-01-10T06:29:47
| 232,973,137
| 0
| 0
| null | 2020-01-10T05:34:12
| 2020-01-10T05:34:11
| null |
UTF-8
|
R
| false
| false
| 3,645
|
r
|
data_wrangling.R
|
library(tidyverse)
library(geosphere)
library(geojsonio)
##landers
landers <- read_csv("landers.csv")
landers_mainshock <- landers %>% filter(mag == 7.3)
landers_rupture_length <- 80 * 2 * 1000
landers_mainshock_time <- landers_mainshock$time
landers_dist <-
landers %>%
mutate(mainshock_lon = landers_mainshock$longitude, mainshock_lat = landers_mainshock$latitude) %>%
group_by(id) %>%
mutate(dist = distHaversine(p1 = c(mainshock_lon, mainshock_lat), p2 = c(longitude, latitude))) %>%
ungroup()
landers_triggered <-
landers_dist %>%
filter(time > landers_mainshock_time) %>%
filter(dist > landers_rupture_length) %>%
arrange(time) %>%
mutate(id_overall = row_number()) %>%
mutate(day = lubridate::floor_date(time, unit = "day")) %>%
group_by(day) %>%
mutate(id_day = row_number()) %>%
ungroup()
landers_before <-
landers_dist %>%
filter(time < landers_mainshock_time) %>%
filter(dist > landers_rupture_length) %>%
arrange(time) %>%
mutate(id_overall = row_number()) %>%
mutate(day = lubridate::floor_date(time, unit = "day")) %>%
group_by(day) %>%
mutate(id_day = row_number()) %>%
ungroup()
landers_aftershocks <-
landers_dist %>%
filter(time > landers_mainshock_time) %>%
filter(dist < landers_rupture_length) %>%
arrange(time) %>%
mutate(id_overall = row_number()) %>%
mutate(day = lubridate::floor_date(time, unit = "day")) %>%
group_by(day) %>%
mutate(id_day = row_number()) %>%
ungroup()
#sumatra
sumatra <- read_csv("sumatra.csv")
sumatra_mainshock <- sumatra %>% filter(mag == 8.6)
sumatra_rupture_length <- 500 * 2 * 1000
sumatra_mainshock_time <- sumatra_mainshock$time
sumatra_dist <-
sumatra %>%
mutate(mainshock_lon = sumatra_mainshock$longitude, mainshock_lat = sumatra_mainshock$latitude) %>%
group_by(id) %>%
mutate(dist = distHaversine(p1 = c(mainshock_lon, mainshock_lat), p2 = c(longitude, latitude))) %>%
ungroup()
sumatra_triggered <-
sumatra_dist %>%
filter(time > sumatra_mainshock_time) %>%
filter(dist > sumatra_rupture_length) %>%
arrange(time) %>%
mutate(id_overall = row_number()) %>%
mutate(day = lubridate::floor_date(time, unit = "day")) %>%
group_by(day) %>%
mutate(id_day = row_number()) %>%
ungroup()
sumatra_before <-
sumatra_dist %>%
filter(time < sumatra_mainshock_time) %>%
filter(dist > sumatra_rupture_length) %>%
arrange(time) %>%
mutate(id_overall = row_number()) %>%
mutate(day = lubridate::floor_date(time, unit = "day")) %>%
group_by(day) %>%
mutate(id_day = row_number()) %>%
ungroup()
sumatra_aftershocks <-
sumatra_dist %>%
filter(time > sumatra_mainshock_time) %>%
filter(dist < sumatra_rupture_length) %>%
arrange(time) %>%
mutate(id_overall = row_number()) %>%
mutate(day = lubridate::floor_date(time, unit = "day")) %>%
group_by(day) %>%
mutate(id_day = row_number()) %>%
ungroup()
#write data as geojson
landers_before %>%
select(time, latitude, longitude, mag, id_overall, id_day, day) %>%
geojson_write(., lat = "latitude", lon = "longitude", file = "landers_before.geojson")
landers_triggered %>%
select(time, latitude, longitude, mag, id_overall, id_day, day) %>%
geojson_write(., lat = "latitude", lon = "longitude", file = "landers_triggered.geojson")
sumatra_before %>%
select(time, latitude, longitude, mag, id_overall, id_day, day) %>%
geojson_write(., lat = "latitude", lon = "longitude", file = "sumatra_before.geojson")
sumatra_triggered %>%
select(time, latitude, longitude, mag, id_overall, id_day, day) %>%
geojson_write(., lat = "latitude", lon = "longitude", file = "sumatra_triggered.geojson")
|
81537d634660c74a072f68a35427f0f431c56852
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ProFit/examples/profitOpenCLEnv.Rd.R
|
ccb77717548f7f9c5f26af419c1d15333082c72d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 728
|
r
|
profitOpenCLEnv.Rd.R
|
library(ProFit)
### Name: profitOpenCLEnv
### Title: Create OpenCL Pointer Object
### Aliases: profitOpenCLEnv
### Keywords: GPU OpenCL
### ** Examples
modellist = list(
sersic = list(
xcen = c(180, 60),
ycen = c(90, 10),
mag = c(15, 13),
re = c(14, 5),
nser = c(3, 10),
ang = c(46, 80),
axrat = c(0.4, 0.6),
box = c(0.5,-0.5)
),
pointsource = list(
xcen = c(34,10,150),
ycen = c(74,120,130),
mag = c(10,13,16)
),
sky = list(
bg = 3e-12
)
)
magimage(profitMakeModel(modellist=modellist, dim=c(200,200)))
## Not run:
##D tempCL=profitOpenCLEnv()
##D magimage(profitMakeModel(modellist=modellist, dim=c(200,200), openclenv=tempCL))
## End(Not run)
|
aee2a95454f0bc35e6460ea0fdcb2359123e57ca
|
ce94e221e5fd686cfb1218b0a9625decb77ac0c7
|
/man/mmplot.Rd
|
a171650f07b6e5f553ab0dd39128931d7248b144
|
[] |
no_license
|
daniel-gerhard/medrc
|
bb95f91a63e150dd4a114fbfa409dcddc89195ea
|
232b2f3887510add1851e6eae21f6ac529b6bf33
|
refs/heads/master
| 2020-12-24T08:24:02.948797
| 2017-12-27T03:39:07
| 2017-12-27T03:39:07
| 10,939,171
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 511
|
rd
|
mmplot.Rd
|
\name{mmplot}
\alias{mmplot}
\title{Plot multiple medrc objects}
\description{Plot multiple predicted dose-response curves based on fixed effect estimates from multiple medrc objects}
\usage{
mmplot(x, ..., ndose=25, logx = FALSE)
}
\arguments{
\item{x}{An object of class medrc}
\item{...}{further objects of class medrc}
\item{ndose}{Number of points to interpolate the dose response curve}
\item{logx}{If TRUE, plot x-axis on a logarithmic scale}
}
\author{Daniel Gerhard}
\keyword{ graphics }
|
5ed4082e7f679a2ac91b72b38b4b23cb8e729967
|
3b54cf65d257611c74f23fab637a6c86c05d84c6
|
/R/sequences.R
|
758ceb34f8a85153c6619d6072fce0a45072bd98
|
[] |
no_license
|
epigen/RnBeadsAnnotationCreator
|
624a02f2ae039a6ee4c3ddb0a497296bddbe9058
|
62ee92cd76c601966656c3bf134e3e7a9ba794a7
|
refs/heads/master
| 2022-07-07T20:30:18.419055
| 2022-06-22T07:55:34
| 2022-06-22T07:55:34
| 64,145,268
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,458
|
r
|
sequences.R
|
########################################################################################################################
## sequences.R
## created: 2015-12-10
## creator: Yassen Assenov
## ---------------------------------------------------------------------------------------------------------------------
## Utility functions for working with genomic sequences encoded as character vectors.
########################################################################################################################
## F U N C T I O N S ###################################################################################################
rnb.seq.bisulfite.convert <- function(x.char) {
for (i in 1:length(x.char)) {
xx <- x.char[[i]]
for (j in 1:length(xx)) {
if (xx[j] == "C" && (j == length(xx) || xx[j + 1] != "G")) {
xx[j] <- "T"
}
}
x.char[[i]] <- xx
}
x.char
}
########################################################################################################################
rnb.seq.reverse.complement <- function(x.char) {
lapply(x.char, function(xx) { rev(unname(c("A" = "T", "C" = "G", "G" = "C", "T" = "A", "N" = "N")[xx])) })
}
########################################################################################################################
rnb.seq.replace <- function(x.char, s.old, s.new) {
for (i in 1:length(x.char)) {
xx <- x.char[[i]]
xx[xx == s.old] <- s.new
x.char[[i]] <- xx
}
x.char
}
########################################################################################################################
rnb.infinium.probe.coords <- function(loci, pr.design, pr.strand) {
if (pr.design == "I") {
if (pr.strand == "+") {
return(cbind(loci + 0L, loci + 50L))
}
return(cbind(loci - 48L, loci + 2L))
} # else pr.design == "II"
if (pr.strand == "+") {
return(cbind(loci + 1L, loci + 51L))
}
return(cbind(loci - 49L, loci + 1L))
}
########################################################################################################################
rnb.seq.get.expected.sequence <- function(chrom.sequence, loci, pr.design, pr.strand) {
p.coords <- rnb.infinium.probe.coords(loci, pr.design, pr.strand)
dna.seq <- suppressWarnings(Views(chrom.sequence, start = p.coords[, 1], end = p.coords[, 2]))
dna.seq <- strsplit(as.character(dna.seq), NULL)
if (pr.strand == "-") {
dna.seq <- rnb.seq.reverse.complement(dna.seq)
}
alleles.exp <- rnb.seq.reverse.complement(rnb.seq.bisulfite.convert(dna.seq))
if (pr.design == "I") {
alleles.exp <- list("A" = rnb.seq.replace(alleles.exp, "G", "A"), "B" = alleles.exp)
} else { # pr.design == "II"
alleles.exp <- rnb.seq.replace(alleles.exp, "G", "R")
}
alleles.exp
}
########################################################################################################################
rnb.seq.probe.mismatches <- function(probes.exp, probes.obs) {
result <- sapply(probes.exp, length) - sapply(probes.obs, length)
result[sapply(probes.exp, function(x) { length(x) == 1 && is.na(x) })] <- as.integer(NA)
result[sapply(probes.obs, function(x) { length(x) == 1 && is.na(x) })] <- as.integer(NA)
i <- which(result < 0L | result > 1L)
result[i] <- NA
if (length(i) != 0) {
warning("Unexpected differences in probe sequence lengths")
}
for (i in 1:length(result)) {
if (is.na(result[i])) {
next
}
x <- probes.exp[[i]]
if (result[i] == 0) {
result[i] <- sum(x != probes.obs[[i]])
} else { # result[i] == 1
result[i] <- min(sum(x[-length(x)] != probes.obs[[i]]), sum(x[-1] != probes.obs[[i]]))
}
}
result
}
########################################################################################################################
rnb.seq.guess.strand.allele <- function(alleles.exp.pos, alleles.exp.neg, alleles.obs) {
mismatches.pos <- rnb.seq.probe.mismatches(alleles.exp.pos, alleles.obs)
mismatches.neg <- rnb.seq.probe.mismatches(alleles.exp.neg, alleles.obs)
sel.strand <- as.factor(1L + (mismatches.neg < mismatches.pos))
levels(sel.strand) <- c("+", "-", "*")
sel.strand[mismatches.pos == mismatches.neg] <- "*"
mismatches.sel <- ifelse(sel.strand == "+", mismatches.pos, mismatches.neg)
return(data.frame("Strand" = sel.strand, "Mismatches" = mismatches.sel))
}
########################################################################################################################
rnb.seq.guess.strands <- function(chrom.sequence, loci, pr.design, alleles.A, alleles.B = NULL) {
alleles.exp.pos <- rnb.seq.get.expected.sequence(chrom.sequence, loci, pr.design, "+")
alleles.exp.neg <- rnb.seq.get.expected.sequence(chrom.sequence, loci, pr.design, "-")
alleles.char <- strsplit(alleles.A, NULL)
if (pr.design == "I") {
result <- rnb.seq.guess.strand.allele(alleles.exp.pos$A, alleles.exp.neg$A, alleles.char)
alleles.char <- strsplit(alleles.B, NULL)
resultB <- rnb.seq.guess.strand.allele(alleles.exp.pos$B, alleles.exp.neg$B, alleles.char)
i <- which(result$Strand != resultB$Strand)
result[i, "Strand"] <- "*"
result <- data.frame(
"Guessed Strand" = result$Strand,
"Mismatches A" = result$Mismatches,
"Mismatches B" = resultB$Mismatches, check.names = FALSE)
} else { # pr.design == "II"
result <- rnb.seq.guess.strand.allele(alleles.exp.pos, alleles.exp.neg, alleles.char)
result <- data.frame(
"Guessed Strand" = result$Strand,
"Mismatches A" = result$Mismatches,
"Mismatches B" = 0L, check.names = FALSE)
}
result
}
|
c38574489df6112f22a40f81cb60e32b59092048
|
bc304dc82564cf44b7fe73d8722f1ac6e680dacc
|
/Elections/BosniaHoR-drive .R
|
5f72efe209d935b3cb2c392ee8dc2fe5d5062a7b
|
[] |
no_license
|
rs2903/BiH
|
7f993a06724e6a72a73810d2f59823c67dba8882
|
6c2f9b7413634ea0e0bd347cd74756cb989986ae
|
refs/heads/master
| 2020-06-19T04:09:30.983391
| 2019-07-12T09:34:33
| 2019-07-12T09:34:33
| 196,551,852
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,670
|
r
|
BosniaHoR-drive .R
|
# load packages -----------------------------------------------------------
require(ggplot2)
#require(plotly)
require(stringi)
require(tidyr)
require(purrr)
require(dplyr)
#require(rio)
require(lubridate)
require(readxl)
require(stringdist)
library(xlsx)
library(reldist)
library(ggthemes)
library(highcharter)
#setwd("//fs.univie.ac.at/homedirs/schmidr9/Documents/ro - ceu/R")
setwd("C:/Users/Roland/Google Drive/CEU/THESIS/R")
#setwd("~/Google Drive/CEU/THESIS/R")
#setwd("Z:/Documents/ro - ceu/R")
Sys.setlocale("LC_CTYPE", "russian") #allows displaying party names in cyrillic
# file 1996 ---------------------------------------------------------------
res96 <- read_excel("Elections/Electoral Results Bosnia.xlsx",sheet="HoR 1996")
hor96 <- res96 %>%
mutate(date = as.Date(date),
year = year(date)) %>%
select(-(source))
# IMPORT FILE 1998 & 2000 -------------------------------------------------------------
#df <- import("Electoral Results Bosnia.xlsx")
res98_00 <- read_excel("Elections/Electoral Results Bosnia.xlsx",sheet="HoR 1998 2000")
res98_00$seats98 <- as.numeric(res98_00$seats98)
#drop perc since only realte to entity
res98_00 <- subset(res98_00, select = -c(perc98,perc00) )
# 1998 --------------------------------------------------------------------
#only parties which ran in 98
hor98 <- res98_00 %>%
select(entity,party,type,seats98,abs98) %>%
filter(type=="Total:") %>%
select(-(type)) %>%
filter(abs98>0) %>%
rename(seats=seats98,
votes=abs98)
hor98 <- hor98 %>%
mutate(date=as.Date("1998-9-13", format="%Y-%m-%d"),
year=year(date))
hor98$seats[is.na(hor98$seats)] <- 0
# 2000 --------------------------------------------------------------------
#only parties which ran in 00
hor00 <- res98_00 %>%
select(entity,party,type,seats00,abs00) %>%
filter(type=="Total:") %>%
select(-(type)) %>%
filter(abs00>0) %>%
rename(seats=seats00,
votes=abs00)
hor00 <- hor00 %>%
mutate(date=as.Date("2000-11-11", format="%Y-%m-%d"),
year=year(date))
hor00$seats[is.na(hor00$seats)] <- 0
# 2002 --------------------------------------------------------------------
res02 <- read_excel("Elections/Electoral Results Bosnia.xlsx",sheet="HoR 2002")
hor02 <- res02 %>%
select(entity, party, votes, seats, year) %>%
mutate(date=as.Date("2002-10-5", format="%Y-%m-%d"))
class(hor02$date)
# import 2006 - 2014 ------------------------------------------------------
res06_14 <- read_excel("Elections/Electoral Results Bosnia.xlsx",sheet="HoR 2006 - 2014")
hor06_14 <- res06_14 %>%
select(date.election, entity, party, votes, seats) %>%
mutate(date=as.Date(date.election),
year=year(date),
seats=as.numeric(seats)) %>%
select(-(date.election))
hor06_14$seats[is.na(hor06_14$seats)] <- 0
# bind results -----------------------------------------------------------
hor <- bind_rows(hor96, hor98, hor00, hor02, hor06_14)
i <- hor %>%
group_by(year, entity) %>%
summarise(sum.seats=sum(seats))
i
# unify names -------------------------------------------------------------
# length(unique(hor$party)) #180 different party names
# p <- as.data.frame(unique(hor$party))
# export(p,"partynames.xlsx")
# >> import df w with standardized names ----------------------------------
#party.names2 from party.nat project
party.names <- read_excel("Elections/Electoral Results Bosnia.xlsx",sheet="party.names2")
hor <- hor %>%
rename(party.original=party)
#hor$party.standard <- party.names$name.unified2[match(gsub("\\s","",tolower(hor$party.original)),gsub("\\s","",tolower(party.names$name.original)))]
hor$party.standard <- party.names$name.unified2[amatch(gsub("\\s","",tolower(hor$party.original)),gsub("\\s","",tolower(party.names$name.original)), maxDist = 5)]
length(unique(hor$party.original))
length(unique(hor$party.standard))
d<- hor %>%
filter(is.na(hor$party.standard)) %>% #non-matched party names
filter(seats > 0)
non.matched<- unique(d$party.original)
length(unique(d$party.original))
#write.xlsx(non.matched, "Elections/NonMatchedNames.xlsx")
non.matched.seats <- hor %>%
filter(seats>0)%>%
filter(is.na(party.standard))%>%
select(party.original,party.standard)
# Unique standardized party names with seats ------------------------------
unique.standard.seats <- hor %>%
filter(seats > 0) %>%
select(party.standard) %>%
distinct(party.standard)
class(unique.standard.seats)
unique.standard.seats <- as.data.frame(unique.standard.seats)
# write.xlsx2(unique.standard.seats, "Elections/Electoral Results Bosnia.xlsx",
# sheetName="UniquePartyNamesSeats",
# col.names=TRUE,row.names = FALSE,
# append=TRUE, showNA=FALSE)
# >> Percentage in votes & seats in Entity and State -------------------------------------------
hor <- hor %>%
group_by(year) %>%
mutate(votes.perc=round(votes/sum(votes)*100, 2))%>% #wrong: doesn't consider 1 party in RS & FED in same year
mutate(seats.perc=round(seats/sum(seats)*100, 2))%>%
group_by(year, entity) %>%
mutate(votes.perc.ent=round(votes/sum(votes)*100,2)) %>%
mutate(seats.perc.ent=round(seats/sum(seats)*100,2))
# FILTER - only parties with at least 1 seat in HoR -----------------------
x <- hor %>% filter(seats>0)
party.filter <- unique(x$party.standard)
# > percentage of votes & abs seats over year entire state -------------------------
year.sums<- hor %>%
select(year, entity, party.original, party.standard, votes, seats) %>%
group_by(year, party.standard) %>%
summarize(votes.annual=sum(votes),
seats.annual=sum(seats)) %>%
ungroup() %>%
group_by(year) %>%
mutate(vote.perc.annual=round(votes.annual/sum(votes.annual)*100, 2))
length(unique(year.sums$party.standard))
# >> add ethnicity and coalition to results -------------------------------
# >>> load ethnicity from file --------------------------------------------
eth.coal <- read_excel("Elections/Electoral Results Bosnia.xlsx",sheet="UniquePartyNamesSeats")
year.sums$ethnicity <- eth.coal$ethnicity[match(year.sums$party.standard,eth.coal$party.standard)]
year.sums$coalition <- eth.coal$coalition[match(year.sums$party.standard,eth.coal$party.standard)]
# Creating Table on Election Results -------------------------------------
# not based on harmonized party names
#https://stackoverflow.com/questions/24929954/is-it-possible-to-use-spread-on-multiple-columns-in-tidyr-similar-to-dcast
#gather - unite - spread sequence
hor.table <- hor %>%
filter(seats > 0) %>%
select(year, entity, party.original, seats, votes.perc.ent) %>%
gather(key, value, -c(1:3)) %>%
unite(year.key,year,key)%>%
spread(year.key,value)
hor.table <- as.data.frame(hor.table) #if not dataframe write.xlsx crashes
class(hor.table)
# write.xlsx2(hor.table,"Electoral Results Bosnia.xlsx",
# sheetName="ResultTable",col.names = TRUE,row.names = FALSE,
# append=TRUE, showNA=FALSE)
# > plot - annual % and seats ---------------------------------------------
# >> seats p.a. ----------------------------------------------------------
seats.plot <- year.sums %>%
filter(party.standard %in% party.filter) %>%
ggplot(.,aes(year, seats.annual)) +
geom_bar(stat="identity",aes(fill=party.standard))+
labs(x="year", y="seats",title="Seats in House of Representatives")+
scale_x_continuous(breaks=c(1996,1998,2000,2002, 2006,2010,2014)) +
theme_minimal()
print(seats.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BosniaHoR"
plotname <-"seat.plot.pdf"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=15, height=7)
# highchart bar -------------------------------------------------------
wide <- year.sums %>%
filter(party.standard %in% party.filter) %>%
select(year,party.standard, seats.annual)%>%
spread(.,year,seats.annual) %>%
gather("year","n",2:8)
#data is spread and gathered; spread creates cells with NA for years without observations
#transforming the data back with gather does not result in a loss of rows/years with NA
#NAs are needed as element in list; otherwise data is projected to wrong year
wide.list <- wide %>%
group_by(name=party.standard)%>%
do(data=.$n)
#creates list
series <- list_parse(wide.list)
#see http://stackoverflow.com/questions/38093229/multiple-series-in-highcharter-r-stacked-barchart
hc <- highchart() %>%
hc_chart(type = "column") %>%
hc_xAxis(categories=unique(wide$year)) %>%
hc_plotOptions(column = list(
dataLabels = list(enabled = FALSE),
stacking = "normal",
enableMouseTracking = TRUE)) %>%
hc_add_series_list(series)
hc
#unique for years since otherwise always frist year
# >> votes perc per year ---------------------------------------------------
year.sums %>%
filter(party.standard %in% party.filter) %>%
ggplot(.,aes(year, vote.perc.annual)) +
geom_line(aes(color=party.standard))+
xlab("year")+ylab("votes %")+
scale_x_continuous(breaks=c(1996,1998,2000,2002, 2006,2010,2014)) #+
theme_minimal()+
# distribution of votes btw entities per party ----------------------------
v <- hor %>%
select(year, party.standard, votes, entity,seats) %>%
group_by(year, party.standard) %>%
mutate(entity.perc=round(votes/sum(votes),2),
number=max(row_number(party.standard)))
v %>%
filter(party.standard %in% party.filter) %>%
ggplot(.,aes(year, seats))+
geom_bar(stat="identity", aes(fill=entity, color="red"))+
facet_wrap(~party.standard)+# +
xlab("year")+ylab("seats in total")+
scale_x_continuous(breaks=c(1996,1998,2000,2002, 2006,2010,2014))
hor1 <- hor %>%
filter(party.standard %in% party.filter)
nrow(hor1)
ggplot(hor1, aes(year, votes.perc.ent))+
# geom_line()+
geom_point(aes(color=entity, shape=entity))+
facet_wrap(~party.standard)+
xlab("year")+ylab("% of votes in entity")+
scale_x_continuous(breaks=c(1996,1998,2000,2002, 2006,2010,2014))
d <- hor1 %>%
group_by(year, party.standard) %>%
mutate(number=max(row_number(party.standard)))
# Number of Parties per Ethnic Group --------------------------------------
hor$ethnicity <- eth.coal$ethnicity[match(hor$party.standard,eth.coal$party.standard)]
hor$coalition <- eth.coal$coalition[match(hor$party.standard,eth.coal$party.standard)]
hor$party.abbrev <- eth.coal$party.abbrev[match(hor$party.standard,eth.coal$party.standard)]
hor$ethnicity <- as.factor(hor$ethnicity)
levels(hor$ethnicity)
hor$ethnicity <- ordered(hor$ethnicity, c("B","C","S","M","nk"))
#number of parties per ethnic group with a seat in HoR per year
n <- hor %>%
filter(seats>0)%>%
group_by(year,ethnicity) %>%
summarise(n.parties=n_distinct(party.standard),
n.coalition=n_distinct(party.standard[coalition=="y"], na.rm=TRUE),
name.parties=paste(party.standard, sep=" ",collapse=";"))
#reordering levels of ethnic groups
n$ethnicity <- as.factor(n$ethnicity)
levels(n$ethnicity)
n$ethnicity <- ordered(n$ethnicity, c("B","C","S","M","nk"))
n.parties.ethnicity.plot <- n %>%
filter(ethnicity %in% c("B","C","S","M")) %>%
ggplot(.,aes(year,n.parties))+
geom_step(aes(color=ethnicity), size=2)+
labs(x="year",y="seats",
title=paste0("Number of Parties/Coalitions per Ethnic Group with Seats in the House of Representatives"),
subtitle="1 coalition = 1 party")+
scale_x_continuous(breaks=c(1996,1998,2000,2002,2006,2010,2014)) +
scale_y_continuous(limits = c(0,5))+
theme_minimal()+
theme(legend.position="bottom", panel.grid.minor.x = element_blank())+
facet_grid(ethnicity~.)
print(n.parties.ethnicity.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BosniaHoR"
plotname <-"n.parties.ethnicity.plot.pdf"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=15, height=7)
### >> differentiation parties/coalitions (STACKED & FACET bar plot) -----------------------------------------------------------
parties.col.facet.ethnicity.plot <- n %>%
mutate(n.single.party = n.parties-n.coalition) %>%
gather(key,number, n.single.party,n.coalition) %>%
ggplot(.,aes(as.factor(year),number,group=ethnicity))+
geom_bar(stat="identity",aes(fill=key))+
labs(x="year", y="seats",
title="Number of parties and coalitions with seats in House of Representatives",
subtitle="Disaggregated by ethnic affiliation of party/coalition")+
# scale_x_continuous(breaks=c(1996,1998,2000,2002, 2006,2010,2014)) +
scale_fill_manual(values=c("red","darkblue"), labels=c("coalitions","single parties"))+
#scale_fill_discrete(labels=c("coalitions","single parties"), values=c("red","darkblue"))+
theme_minimal()+
theme(legend.position="bottom", legend.title = element_blank(),
axis.title.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank())+
#facet_grid(ethnicity~.)
facet_grid(.~ethnicity)
print(parties.col.facet.ethnicity.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BosniaHoR"
plotname <-"parties.col.facet.ethnicity.plot.pdf"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=15, height=7)
## >> differentiation parties/coalitions (STACKED & NO FACET bar plot) -----------------------------------------------------------
parties.col.facet.yar.plot <- n %>%
mutate(n.single.party = n.parties-n.coalition) %>%
filter(ethnicity %in% c("B","C","S","M")) %>%
gather(key,number, n.single.party,n.coalition) %>%
ggplot(.,aes(ethnicity, number))+
geom_bar(stat="identity",aes(fill=key))+
labs(y="seats",
title="Number of parties and coalitions with seats in House of Representatives",
subtitle="Disaggregated by ethnic affiliation of party/coalition")+
scale_fill_manual(values=c("red","darkblue"), labels=c("coalitions","single parties"))+
# #scale_fill_discrete(labels=c("coalitions","single parties"), values=c("red","darkblue"))+
theme_minimal()+
theme(legend.position="bottom", legend.title = element_blank(),
axis.title.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank())+
facet_grid(~year)
print(parties.col.facet.yar.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BosniaHoR"
plotname <-"parties.col.facet.yar.plot .pdf"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=15, height=7)
# >> Gini Seats in HOR per Ethic groups ---------------------------------------------------
gini.seats <- hor %>%
ungroup() %>%
filter(seats > 0) %>%
select(year, party.standard,seats,votes,ethnicity)%>%
group_by(year, ethnicity, party.standard) %>%
summarize(seats.year=sum(seats)) %>%
group_by(year, ethnicity) %>%
summarize(gini.seats=gini(seats.year))
gini.seats.plot <- gini.seats %>%
ggplot(.,aes(year,gini.seats))+
geom_point(aes(color=ethnicity), size=3)+
geom_step(aes(color=ethnicity), size=2)+
labs(y="Gini Coefficient",
title="Intra-Group Concentration of seats in House of Represenatitves",
subtitle="Gini Coefficient on seats per ethnic group")+
# scale_fill_manual(values=c("red","darkblue"), labels=c("coalitions","single parties"))+
# #scale_fill_discrete(labels=c("coalitions","single parties"), values=c("red","darkblue"))+
scale_color_manual(values=c("darkgreen","darkblue","darkred","orange","grey"))+
scale_x_continuous(breaks=c(1996,1998,2000,2002, 2006,2010,2014)) +
theme_minimal()+
theme(legend.position="bottom", legend.title = element_blank(),
axis.title.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank())+
facet_grid(.~ethnicity)
print(gini.seats.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BosniaHoR"
plotname <-"gini.seats.plot .pdf"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=15, height=7)
#>> Composition of HoR ------------------------------------------------------
comp.hor.plot <- hor %>%
select(entity,votes,year,party.standard,ethnicity,coalition,seats)%>%
filter(seats>0) %>%
ggplot(.,aes(year,seats))+
geom_col(aes(fill=ethnicity,color=party.standard))+
scale_x_continuous(breaks=c(1996,1998,2000,2002, 2006,2010,2014)) +
scale_fill_manual(values=c("darkgreen","darkblue","darkred","orange","grey"))+
#scale_color_manual(values = c("blacks"))+
labs(title="Composition of House of Representatives",
subtitle="categorized by ethnic affiliation",
caption="Federation: 28 seats; RS: 14 seats")+
theme_minimal()+
theme(legend.position="none", legend.title = element_blank(),
axis.title.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank())+
geom_text(data=subset(hor, party.standard %in% c("Stranka Demokratske Akcije (SDA)")),
aes(label = party.standard),
size=2,
position = position_stack())+
facet_grid(.~entity, scales="free_y") +coord_flip()
print(comp.hor.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BosniaHoR"
plotname <-"comp.hor.plot.pdf"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=15, height=7)
# >> Composition of HoR - Geom_Area ---------------------------------------
comp.hor.area <- hor %>%
filter(seats>0)%>%
select(year,seats, party.standard, ethnicity)%>%
group_by(year,party.standard,ethnicity)%>%
summarise(seats.y=sum(seats))%>%
group_by(year,ethnicity)%>%
summarise(seats.eth=sum(seats.y))
comp.hor.area$ethnicity <- ordered(comp.hor.area$ethnicity, c("M","B","C","S"))
comp.hor.area.plot <- comp.hor.area %>%
ggplot(.,aes(year,seats.eth))+
geom_area(aes(fill=ethnicity))+
scale_x_continuous(breaks=c(1996,1998,2000,2002, 2006,2010,2014)) +
scale_fill_manual(values=c("orange","darkgreen","darkblue","darkred","orange"))+
labs(title="Composition of House of Representatives",
subtitle="categorized by ethnic affiliation",
caption="comp.hor.area.plot")+
theme_minimal()+
theme(legend.position="bottom", legend.title = element_blank(),
axis.title.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank())
print(comp.hor.area.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BosniaHoR"
plotname <-"comp.hor.plot.pdf"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=15, height=7)
print(comp.hor.area.plot)
# >> Composition of HoR - Tree map -------------------------------------------
# install.packages("devtools")
# library(devtools)
# install_github("wilkox/ggfittext")
# install_github("wilkox/treemapify")
#https://github.com/wilkox/treemapify
library(treemapify)
tree <- hor %>%
ungroup(entity) %>%
select(year, party.abbrev, ethnicity, seats, votes.perc, entity)%>%
filter(seats>0)%>%
group_by(year,ethnicity,party.abbrev)%>%
summarise(seats.y=sum(seats))%>%
ungroup()
tree.plot <- tree %>%
ggplot(., aes(area=seats.y,
fill=ethnicity,
subgroup=ethnicity, label=paste(party.abbrev," (",seats.y,")",sep="")))+
geom_treemap()+
geom_treemap_text(
colour = "white",
place = "topleft",
reflow = T)+
theme_minimal()+
scale_fill_manual(values=c("darkgreen","darkblue","darkred","orange","grey"))+
labs(title="Ethnic segmentation of parties in House of Representative",
subtitle="intertemporal changes largely within segments")+
facet_wrap(~year)
print(tree.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BosniaHoR"
plotname <-"tree.plot.pdf"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=15, height=7)
# >> Election results per Ethnic Group and Entity------------------------------------------------------------------------
require(ggalt)
hor.res <- hor %>%
select(entity,seats, votes,year, party.standard,coalition,ethnicity, party.abbrev)%>%
group_by(year, entity) %>%
mutate(votes.perc.ent=round(votes/sum(votes)*100,2)) %>%
ungroup()%>%
filter(votes.perc.ent>1)%>%
filter(seats>0)%>%
arrange(year, entity, votes.perc.ent)%>%
mutate(order=row_number())%>%
ungroup()
hor.res.plot <- ggplot(hor.res,
aes(as.factor(order),votes.perc.ent))+
geom_lollipop(aes(color=ethnicity), size=1, horizontal = FALSE)+
scale_color_manual(values=c("darkgreen","darkblue","darkred","orange","grey"))+
scale_y_continuous(breaks=c(0,10,20,30,40,50,60)) +
scale_x_discrete(breaks = hor.res$order,labels = hor.res$party.abbrev, expand = c(0.1,.1)) +
labs(title="Election Results: House of Representatives",
subtitle="Categorized by ethnic affiliation and entity")+
theme_minimal()+
theme(legend.position="bottom", legend.title = element_blank(),
axis.title=element_blank(),
# axis.text.x=element_text(angle=90, hjust=5),
#axis.text.y=element_blank(),
panel.grid.major.x =element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.y = element_blank())+
#facet_wrap(year~entity, scales="free",drop=TRUE, space="free")+coord_flip()
facet_wrap(year~entity, labeller=label_parsed, scales="free",drop=TRUE, ncol=2)+coord_flip()
print(hor.res.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BosniaHoR"
plotname <-"hor.res.plot.pdf"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=21, height=29.7, unit="cm")
# >> Number of parties ----------------------
#discrepancy when running on party.original
#run on original; standradization of party names leads to NAs with some parties who did not get seats
library(dplyr)
n.parties <- hor %>%
group_by(year) %>%
summarise(n.parties.run=n_distinct(party.original),
n.parties.seats=n_distinct(party.original[seats>0]))%>%
ungroup()
# >>> plot number of parties ---------------------------------------------------------------------
plot.n.parties <- n.parties %>%
gather(parties,number,n.parties.run, n.parties.seats)%>%
ggplot(.,aes(year,number))+
geom_line(aes(color=parties),size=2)+
geom_point(aes(color=parties), size=3)+
scale_x_continuous(breaks=c(1996,1998,2000,2002, 2006,2010,2014)) +
labs(title="Number of Parties" ,
subtitle="running for or winning seat for House of Representatives",
caption="plot.n.parties",
x="",y="")+
theme_minimal()+
theme(legend.position="bottom", legend.title = element_blank(),
axis.title.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank())+
scale_y_continuous(limits=c(0,40))
print(plot.n.parties)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BosniaHoR"
plotname <-"plot.n.parties.png"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=21, height=29.7, unit="cm")
freq.table <- hor %>%
ungroup()%>%
select(party.standard, year) %>%
distinct()%>%
ungroup()%>%
count(party.standard)%>%
arrange(-n)%>%
ungroup()
freq.table %<>%
group_by(n) %>%
summarise(n.parties=n_distinct(party.standard),
names=paste(party.standard, sep=";",collapse=";"))
# >> Inter-Segment volatility ---------------------------------------------
hor <- hor %>% ungroup()
# >>> inter-seg volatility based on votes [pending]----------------------------------
vol.seg <- hor %>%
ungroup() %>%
select(year, ethnicity, votes)%>%
group_by(ethnicity, year)%>%
summarise(sum.y.ethno=sum(votes))%>%
group_by(year)%>%
mutate(sum.y=sum(sum.y.ethno))%>%
mutate(ethno.share=round(sum.y.ethno/sum.y*100,2))%>%
arrange(ethnicity, year) %>%
group_by(ethnicity) %>%
mutate(diff.ethno=ethno.share-lag(ethno.share))%>%
filter(ethnicity %in% c("B","C","S","M")) %>%
group_by(year)%>%
summarise(vol.y=sum(abs(diff.ethno))/2)
# >>> inter-seg volatility based on seats ----------------------------------
vol.seg <- hor %>%
ungroup()%>%
filter(seats>0)%>%
select(year, ethnicity, seats) %>%
group_by(year, ethnicity) %>%
summarise(y.eth.total=sum(seats))%>%
group_by(year) %>%
mutate(y.total=sum(y.eth.total))%>%
ungroup()%>%
mutate(share=round(y.eth.total/y.total,2))%>%
group_by(ethnicity)%>%
mutate(vol.eth.annual=share-lag(share))%>%
group_by(year)%>%
summarise(vol.y=sum(abs(vol.eth.annual))/2)
# >>> inter-seg volatility plot (seats)----------------------------------
vol.seg.plot<- vol.seg %>%
ggplot(.,aes(year,vol.y))+
geom_line()+
geom_point()+
scale_x_continuous(breaks=c(1996,1998,2000,2002, 2006,2010,2014)) +
labs(title="Inter-Segment Volatility",
subtitle="volatility of relative size of Bosniak, Croat, Serb, and mulitethnic/civic electoral segment; based on seats in HoR",
caption="vol.seg.plot",
x="",y="")+
theme_minimal()+
theme(legend.position="none", legend.title = element_blank(),
axis.title.x=element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank())+
scale_y_continuous(limits=c(0,1))
print(vol.seg.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BosniaHoR"
plotname <-"vol.seg.plot.pdf"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=21, height=29.7, unit="cm")
# >> Intra-Segment Volatility ---------------------------------------------
vol.intra <- hor %>%
filter(seats>0)%>%
group_by(ethnicity, year, party.standard)%>%
summarise(votes.y=sum(votes))%>%
arrange(year, party.standard) %>%
group_by(year, ethnicity)%>%
mutate(sum.y=sum(votes.y))%>%
mutate(perc=round(votes.y/sum.y*100,2))%>%
group_by(party.standard)%>%
mutate(diff.perc=perc-lag(perc))%>%
group_by(year)%>%
summarise(vol.y=sum(abs(diff.perc))/2)
# number of parties running & getting seats-----------------------------------------------
n.parties.entity <- hor %>%
select(party.original, year, entity, seats, votes) %>%
group_by(year, entity) %>%
summarize(parties.all=n_distinct(party.original),
parties.seats=n_distinct(party.original[seats>0]),
gini.votes=gini(votes),
gini.seats=gini(seats))
n.parties.state <- hor %>%
select(party.original, year, entity, seats, votes) %>%
group_by(year, party.original) %>%
summarize(votes.year=sum(votes),
seats.year=sum(seats)) %>%
ungroup() %>%
group_by(year) %>%
summarize(parties.all=n_distinct(party.original),
parties.seats=n_distinct(party.original[seats.year>0]),
gini.votes=gini(votes.year),
gini.seats=gini(seats.year))
n.parties <- bind_rows(n.parties.state,n.parties.entity)
n.parties$entity[is.na(n.parties$entity)] <- "total"
#number of parties for the state level is calculated not as the
#sum of fed + rs since one party may run in two entities; avoid double counting
n.parties <- n.parties %>%
gather(parties,number, c(parties.all,parties.seats))
n.parties.plot <- n.parties %>%
ggplot(.,aes(year, number, group=entity))+
geom_line(aes(color=entity))+
labs(x="year", y="number of parties",
title="Number of Parties",
subtitle="Coalitions are counted as parties")+
scale_x_continuous(breaks=c(1996,1998,2000,2002, 2006,2010,2014))+
theme_minimal()+
theme(legend.position="bottom", panel.grid.minor.x = element_blank())+
facet_wrap(.~parties, scales = "free")
print(n.parties.plot)
folder <-"graphs/draft/"
time <- format(Sys.time(),"%Y%m%d-%H%M%S")
scriptname <- "BosniaHoR"
plotname <-"n.parties.plot.pdf"
ggsave(paste(folder,time,scriptname,plotname, sep="-"), width=15, height=7)
# vote & seat concentration in entites -------------------------------------------
n.parties %>%
select(year, entity, gini.votes, gini.seats) %>%
gather(concentration, number, c(gini.votes, gini.seats)) %>%
ggplot(.,aes(year, number, group=entity))+
geom_line(aes(color=entity))+
xlab("year")+ylab("Gini Coefficient")+
scale_x_continuous(breaks=c(1996,1998,2000,2002, 2006,2010,2014))+
facet_grid(concentration~.)
seats$name.original <- seats$party
seats$party <- partynames$name.harmonized[match(seats$party, partynames$name.original)]
length(unique(seats$party))
length(unique(seats$name.original))
p.s<- as.data.frame(unique(seats$party))
colnames(p.s) <- "name"
setdiff(p.l[,1], p.s[,1])
anti_join(p.l, p.s, by="name")
# add Ethnicity to each party ---------------------------------------------
# str(partynames)
# party.ethnicity <- as.data.frame(unique(partynames$name.harmonized))
# colnames(party.ethnicity) <- c("name.harmonized")
# #party.ethnicity<- export(party.ethnicity,"party.ethnicity.xlsx")
# class(party.ethnicity$name.harmonized)
#
# party.ethnicity.list <- read_excel("party.ethnicity1.xlsx")
# class(party.ethnicity.list$name.harmonized)
#
# #check whether new parties are in party.ethnicity which were previously not included
# new2 <- anti_join(party.ethnicity, party.ethnicity.list, by="name.harmonized")
#graph
ggplot(data=seats,aes(x=as.factor(year), y=seats, fill=party, label=party))+
geom_bar(stat="identity") +
geom_text(size = 3, position = position_stack(vjust = 0.5))+
#geom_bar(stat = "identity", position = "fill")#+
geom_density(adjust=1.5, position="fill")+
theme(legend.position="none")
facet_grid(.~entity)
# Number of Parties in HoR ------------------------------------------------
n.parties <- seats %>%
group_by(year) %>%
summarise(n.parties=length(party))
ggplot(n.parties, aes(x=as.Date(as.character(year),"%Y"), n.parties, group=1))+
geom_point()+
geom_line()+
xlab("year")+ylab("number of parties in HoR")+
expand_limits(y=0) +
scale_x_date(date_breaks="years", date_labels="%Y")
#scale_x_continuous(breaks=1996:2014)
class(n.parties$year)
# % Seats SDA, HDZ, SNSD, SDS ---------------------------------------------
u <- seats %>%
group_by(party, year) %>%
summarise(party.seats=sum(seats))%>%
filter(party %in% c("SDA", "HDZ", "SDS", "SNSD"))
class(u)
u <- as.data.frame(u)
dput(u)
ggplot(u, aes(x=as.Date(as.character(year), "%Y"), party.seats, group=party, color=party))+
geom_point()+
geom_step()+
xlab("year")+ylab("seats in HoR")+
scale_x_date(date_breaks="years", date_labels="%Y") #+
# > joint share of seats --------------------------------------------------------
ggplot(u, aes(x=as.Date(as.character(year), "%Y"), y = party.seats, fill = party)) +
geom_area(position = "stack")+
xlab("year")+ylab("seats in HoR")+
scale_x_date(date_breaks="years", date_labels="%Y")
#https://www.safaribooksonline.com/library/view/r-graphics-cookbook/9781449363086/ch04.html
#http://stackoverflow.com/questions/29807080/geom-area-not-stacking-ggplot
#problem: lines run as if seats would in/decrease during legislative period
d <- as.data.frame(xtabs(party.seats~year+party, u))
ggplot(d,aes(x=as.Date(as.character(year), "%Y"), y = Freq, fill = party)) +
#geom_bar(position="stack")+
geom_area(position = "stack")+
xlab("year")+ylab("seats in HoR")+
scale_x_date(date_breaks="years", date_labels="%Y")
|
c52460d6ba5253d1840d661a215f9aa9ee8e7ea6
|
1201b4b111882eef0960608d2e9262349e1bf88c
|
/R/swing.r
|
c844d86997b7c4211b9a72df9ac4427d073c4402
|
[] |
no_license
|
awaardenberg/KinSwingR
|
c96610e04f340a06072f036bd77ea37e980a859a
|
6d8e6b4c8f49e50346727c6d7948c06636856ff7
|
refs/heads/master
| 2020-03-29T08:05:28.567017
| 2019-04-30T05:32:15
| 2019-04-30T05:32:15
| 149,693,793
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,537
|
r
|
swing.r
|
#' Swing statistic
#'
#' @description This function integrates the kinase-substrate predictions,
#' directionality of phosphopeptide fold change and signficance to assess
#' local connectivity (swing) of kinase-substrate networks. The final score
#' is a normalised and weighted score of predicted kinase activity. If
#' permutations are selected, network node:edges are permutated. P-values will
#' be calculated for both ends of the distribution of swing scores (positive and
#' negative swing scores).
#' @param input_data A data.frame of phoshopeptide data. Must contain 4 columns
#' and the following format must be adhered to. Column 1 - Annotation, Column 2
#' - centered peptide sequence, Column 3 - Fold Change [-ve to +ve], Column 4
#' - p-value [0-1]. This must be the same dataframe used in scoreSequences()
#' @param pwm_in List of PWMs created using buildPWM()
#' @param pwm_scores List of PWM-substrate scores created using
#' scoreSequences()
#' @param pseudo_count Pseudo-count acts at two levels. 1) It adds a small
#' number to the counts to avoid zero divisions, which also 2) avoids log-zero
#' transformations. Note that this means that pos, neg and all values in the
#' output table include the addition of the pseudo-count. Default: "1"
#' @param p_cut_pwm Significance level for determining a significant
#' kinase-substrate enrichment. Default: "0.05"
#' @param p_cut_fc Significance level for determining a significant level of
#' Fold-change in the phosphoproteomics data. Default: "0.05"
#' @param permutations Number of permutations to perform. This will shuffle the
#' kinase-subtrate edges of the network n times. To not perform permutations and
#' only generate the scores, set permutations=1 or permutations=FALSE. Default:
#' "1000"
#' @param return_network Option to return an interaction network for visualising
#' in cystoscape. Default = FALSE
#' @param verbose Turn verbosity on/off. To turn on, verbose=TRUE. Options are:
#' "TRUE, FALSE". Default=FALSE
#'
#' @examples
#' ## import data
#' data(example_phosphoproteome)
#' data(phosphositeplus_human)
#'
#' ## clean up the annotations
#' ## sample 100 data points for demonstration
#' sample_data <- head(example_phosphoproteome, 100)
#' annotated_data <- cleanAnnotation(input_data = sample_data)
#'
#' ## build the PWM models:
#' set.seed(1234)
#' sample_pwm <- phosphositeplus_human[sample(nrow(phosphositeplus_human),
#' 1000),]
#' pwms <- buildPWM(sample_pwm)
#'
#' ## score the PWM - substrate matches
#' ## Using a "random" background, to calculate the p-value of the matches
#' ## Using n = 100 for demonstration
#' ## set.seed for reproducibility
#' set.seed(1234)
#' substrate_scores <- scoreSequences(input_data = annotated_data,
#' pwm_in = pwms,
#' background = "random",
#' n = 100)
#'
#' ## Use substrate_scores and annotated_data data to predict kinase activity.
#' ## This will permute the network node and edges 10 times for demonstration.
#' ## set.seed for reproducibility
#' set.seed(1234)
#' swing_output <- swing(input_data = annotated_data,
#' pwm_in = pwms,
#' pwm_scores = substrate_scores,
#' permutations = 10)
#'
#' @return A data.table of swing scores
#'
#' @export swing
#' @importFrom BiocParallel bplapply
#' @importFrom BiocParallel MulticoreParam
#' @importFrom stats setNames
#' @importFrom stats sd
#' @importFrom data.table melt.data.table
swing <-
function(input_data = NULL,
pwm_in = NULL,
pwm_scores = NULL,
pseudo_count = 1,
p_cut_pwm = 0.05,
p_cut_fc = 0.05,
permutations = 1000,
return_network = FALSE,
verbose = FALSE) {
#----------------------------------------------
#format checks:
if (is.null(input_data))
stop("input_data not provided; you must provide an input table")
if (!is.data.frame(input_data))
stop("input_data is not a data.frame; you must provide an input table")
if (is.null(pwm_in))
stop(
"pwm_in not provided; you must provide an input table containing
computed position weight matrices using buildPWM()"
)
if (!is.list(pwm_in))
stop(
"pwm_in is not a list format; something has gone wrong. Make sure you
compute the position weight matrices using buildPWM()"
)
if (is.null(pwm_scores))
stop(
"pwm_scores not provided; you must provide an input table containing
computed scores using scoreSequences()"
)
if (!is.list(pwm_scores))
stop(
"pwm_scores is not a list format; something has gone wrong. Make sure
PWM-substrate matches are scored using scoreSequences()"
)
if (p_cut_pwm >= 1)
stop("p_cut_pwm needs to be less than 1; make sure your p-values are not
log transformed")
if (p_cut_fc >= 1)
stop("p_cut_fc needs to be less than 1; make sure your p-values are not
log transformed")
if (permutations != FALSE &
(!is.numeric(permutations) | permutations < 1))
stop(
"permutations needs to be a numerical number. Either 1 or FALSE (for no
permutation) or greater than 1 to set the number of pemutations"
)
#----------------------------------------------
# 1. binarise p-values and fold change
if (verbose) {
start_time <- Sys.time()
message("Start: ", start_time)
message("[Step1/3] : Calculating Swing Scores")
}
pwm_pval <- pwm_scores$peptide_p
# binarise p-values
pwm_pval[, 3:ncol(pwm_pval)] <-
ifelse(pwm_pval[, 3:ncol(pwm_pval)] > p_cut_pwm, 0, 1)
# binarise FC
input_data[, 3] <- ifelse(as.numeric(input_data[, 3]) > 0, 1, -1)
# binarise p-value
input_data[, 4] <-
ifelse(as.numeric(input_data[, 4]) > p_cut_fc, 0, 1)
# compute Sipk statistic (Sipk - see paper):
input_data$Sipk <-
as.numeric(input_data[, 3]) * as.numeric(input_data[, 4])
# 2. merge tables, summarise counts and unique
input_data$annotation <-
paste(input_data$annotation, input_data$peptide, sep = "::")
pwm_pval$annotation <-
paste(pwm_pval$annotation, pwm_pval$peptide, sep = "::")
data_merge <-
unique(merge(input_data, pwm_pval, by = "annotation"))
# 3. Swing scores of REAL data.
swing_out <- swingScore(
data_merge = data_merge,
pwm_in = pwm_in,
permute = FALSE,
pseudo_count = pseudo_count
)
# 4. permute and calculate p-values
if (permutations != FALSE && permutations > 1) {
if (verbose) {
message("[Step2/3] : Permuting Network")
}
# obtain permuted column names
swing_names <- colnames(data_merge)[7:ncol(data_merge)]
n_permute <- lapply(seq_len(permutations), function(i)
sample(as.character(swing_names), length(swing_names),replace = FALSE))
names(n_permute) <- paste("rand", seq_len(permutations), sep = "")
# calculate swing scores (with permuted names) and return as vector
swing_permute <- list(bplapply(seq_len(permutations), function(i)
swingScore(
data_merge = setNames(data.frame(data_merge),
c(colnames(data_merge)[1:6],
n_permute[[i]])),
pwm_in = pwm_in,
permute = TRUE,
pseudo_count = pseudo_count,
n = i
)))
# returns ordered dataframe; order names and merge
swing_permute_names <- swing_names[order(swing_names)]
swing_permute <- data.frame("kinase" = swing_permute_names,
swing_permute)
colnames(swing_permute) <- c("kinase", names(n_permute))
if (verbose) {
message("[Step3/3] : Calculating p-values")
}
# obtain p-values, two sided
swing_out$p_greater <-
unlist(bplapply(seq_len(nrow(swing_out)), function(i)
(sum(
ifelse(
as.numeric(swing_permute[swing_permute$kinase ==
swing_out$kinase[i], ][2:ncol(swing_permute)]) >
as.numeric(swing_out$swing_raw[i]), 1, 0),
na.rm = TRUE) + 1)
/ (as.numeric(permutations) + 1)))
swing_out$p_less <-
unlist(bplapply(seq_len(nrow(swing_out)), function(i)
(sum(
ifelse(
as.numeric(swing_permute[swing_permute$kinase ==
swing_out$kinase[i], ][2:ncol(swing_permute)]) <
as.numeric(swing_out$swing_raw[i]), 1, 0
), na.rm = TRUE) + 1)
/ (as.numeric(permutations) + 1)))
# order by p-value
swing_out <- swing_out[order(swing_out$p_greater),]
}
if (verbose) {
message("[FINISHED]\n")
end_time <- Sys.time() - start_time
message("Finish: ", Sys.time())
}
# set option to return a network
network <- NULL
if (return_network == "TRUE"){
network <- data.table::melt(data_merge,
id.vars = c("annotation"),
measure.vars =
colnames(data_merge[7:ncol(data_merge)]))
# keep "significant" edges
network <- network[network$value == 1, ]
network <- data.frame(
"source" = as.character(network$variable),
"target" = as.character(network$annotation)
)
}
return(list("scores" = swing_out, "network" = network))
}
# describeIn swing This helper function performs the swing score calculation
# no verbose in this helper
# permute - for setting right table format (using data.table) for merging.
swingScore <-
function(data_merge,
pwm_in,
pseudo_count = 1,
permute = FALSE,
n = 1) {
#----------------------------------------------
data_merge <- data_merge$Sipk * data_merge[7:ncol(data_merge)]
# propogation of NaN here due to small number division, add psuedo-count
# count significant positive
p_k <- colSums(data_merge == 1, na.rm = TRUE) + pseudo_count
# count significant negative
n_k <- colSums(data_merge == -1, na.rm = TRUE) + pseudo_count
#all counts (both positive and negative).
t_k <- p_k + n_k
#----------------------------------------------
# compute swing statistics
pk_ratio <- (p_k)/(t_k)
nk_ratio <- (n_k)/(t_k)
swing_raw <- (pk_ratio) / (nk_ratio)
p_n_all <-
data.frame(
"kinase" = colnames(data_merge),
"pos" = p_k,
"neg" = n_k,
"all" = t_k,
"pk" = pk_ratio,
"nk" = nk_ratio,
"swing_raw" = swing_raw
)
# include the count numbers:
p_n_all <- merge(p_n_all, pwm_in$kinase, by = "kinase", sort = TRUE)
# weighted by number of substrates
# not possible to have zero - must have substrates to build model
p_n_all$swing_raw <-
log2(p_n_all$swing_raw) * log2(p_n_all$n)
# weighted by size of kinase-substrate network:
p_n_all$swing_raw <-
p_n_all$swing_raw * log2(p_n_all$all)
# z-score transform
p_n_all$swing <-
(p_n_all$swing_raw - mean(p_n_all$swing_raw, na.rm = TRUE)) /
sd(p_n_all$swing_raw, na.rm = TRUE)
if (permute == "TRUE") {
p_n_all <- p_n_all$swing_raw
}
return(p_n_all)
}
|
75c910857c226328ddf684d55f0354897617714e
|
6b3215ae22fb53df23457105f4249e2a7e56bd2e
|
/inst/doc/Intro.R
|
1a61f4303ad9686cdc1ae48f31953d8bf70b4177
|
[] |
no_license
|
cran/lmvar
|
26f45be9ebeb93467ae389ec00f034bb8054c239
|
a40d38d4d227aa3aade534bae73fb4625d4ae96d
|
refs/heads/master
| 2021-01-22T14:15:23.807165
| 2019-05-16T09:10:10
| 2019-05-16T09:10:10
| 82,301,074
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,130
|
r
|
Intro.R
|
## ---- include=FALSE------------------------------------------------------
knitr::opts_chunk$set(fig.width=6, fig.height=6)
## ---- message = FALSE----------------------------------------------------
# As example we use the dataset 'cats' from the library 'MASS'.
require(lmvar); require(MASS)
# A plot of the heart weight versus the body weight in the data set
plot(cats$Bwt, cats$Hwt, xlab = "Body weight", ylab = "Heart weight")
## ------------------------------------------------------------------------
# Create the model matrix. It only contains the body weight. An intercept term will be added by 'lmvar'.
X = model.matrix(~ Bwt - 1, cats)
# Carry out the fit with the same model matrix for mu (the expected heart weight) and for log sigma (the standard deviation)
fit = lmvar(cats$Hwt, X_mu = X, X_sigma = X)
## ------------------------------------------------------------------------
summary(fit)
## ------------------------------------------------------------------------
nobs(fit)
logLik(fit)
## ------------------------------------------------------------------------
dfree(fit, sigma = FALSE)
## ------------------------------------------------------------------------
fit = lmvar(cats$Hwt, X_mu = X, X_sigma = X, intercept_mu = FALSE, intercept_sigma = FALSE)
## ------------------------------------------------------------------------
dfree(fit, sigma = FALSE)
## ------------------------------------------------------------------------
summary(fit)
## ------------------------------------------------------------------------
sigma = fitted(fit, mu = FALSE)
plot(cats$Bwt, residuals(fit) / sigma, xlab = "Body weight", ylab = "z-score")
abline(0, 0, col = "red")
## ------------------------------------------------------------------------
mu = fitted(fit, sigma = FALSE)
plot(cats$Bwt, mu, xlab = "Body weight", ylab = "Average heart weight", ylim = c(7, 16))
intervals = fitted(fit, interval = "confidence", level = 0.95)
lwr = intervals[, "mu_lwr"]
upr = intervals[, "mu_upr"]
segments(cats$Bwt, lwr, cats$Bwt, upr)
## ------------------------------------------------------------------------
fit_lm = lm(cats$Hwt ~ Bwt - 1, cats)
## ------------------------------------------------------------------------
AIC(fit); AIC(fit_lm)
BIC(fit); BIC(fit_lm)
## ------------------------------------------------------------------------
plot(cats$Bwt, mu, xlab = "Body weight", ylab = "Average heart weight", ylim = c(7, 16))
segments(cats$Bwt, lwr, cats$Bwt, upr)
points(cats$Bwt, fitted(fit_lm), col = "red")
## ------------------------------------------------------------------------
plot(cats$Bwt, sigma, xlab = "Body weight", ylab = "St dev of heart weight", ylim = c(1, 2))
lwr = intervals[, "sigma_lwr"]
upr = intervals[, "sigma_upr"]
segments(cats$Bwt, lwr, cats$Bwt, upr)
sigma_lm = summary(fit_lm)$sigma
abline(sigma_lm, 0, col = "red")
## ------------------------------------------------------------------------
coef(fit)
## ------------------------------------------------------------------------
coef(fit, mu = FALSE)
## ------------------------------------------------------------------------
vcov(fit)
## ---- eval=FALSE---------------------------------------------------------
# require(statmod)
#
# # Run the regression including intercept terms
# intercept = rep(1, nrow(cats))
#
# fit = remlscore( cats$Hwt, cbind(intercept, X), cbind(intercept, X))
## ---- eval=FALSE---------------------------------------------------------
# require(crch)
#
# # Run the regression including intercept terms
# fit = crch(Hwt ~ Bwt | Bwt , data = cats)
## ---- eval=FALSE---------------------------------------------------------
# require(mgcv)
#
# # Run the regression including intercept terms
# fit = gam(list(Hwt ~ Bwt, ~ Bwt) , data = cats, family = gaulss())
## ---- eval=FALSE---------------------------------------------------------
# require(gamlss)
#
# # Run the regression including intercept terms
# fit = gamlss(Hwt ~ Bwt, ~ Bwt, data = cats)
|
82f381c3feb4ef90080b5e5b84d5fec877818c37
|
d9274644eeb7a0414c7f80a93878681baddc8812
|
/final_code_check/script.R
|
d71136596715d77ee75723d92115fc2447841897
|
[] |
no_license
|
lynnzoo/hfd-research
|
99979cfc7f4068b455d5ba0ae9e9e134782d7d45
|
220588b3c09bb34e5777766816d859a121d7a945
|
refs/heads/master
| 2022-02-17T07:11:23.303625
| 2019-08-23T01:41:26
| 2019-08-23T01:41:26
| 196,340,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,605
|
r
|
script.R
|
#!/usr/bin/Rscript
#######################
## title: "Running Pipeline for HFD Choropleth Maps"
## author: "Shannon Chen"
#######################
## Automatically detect the file path of this file and set the working directory the to folder where this file is located
this_dir <- function(directory)
setwd( file.path(getwd(), directory) )
# setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
## Print out the current working directory
getwd()
## Add the names of all the packages that you used in the pipeline to list.of.packages
list.of.packages <- c("cartography", "rgdal", "gsubfn", "plyr", "rstudioapi")
## Automatically install any necessary packages that have not already been installed
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
## load all the packages that you used in the pipeline here
library("cartography")
library("rgdal")
library("gsubfn")
library("plyr")
library("rstudioapi")
#######################
## title: "Creating the Choropleth Maps for HFD"
## author: "Shannon Chen"
#######################
################### Median EMS Response Times ###################
yearextract <- function(df, yr) {
df_year <- as.data.frame(df[which(df$year == yr),])
return(df_year)
}
#mutates the dataframe to create intervals used in the choropleth map.
breaksplit <- function(df, breaks, idx){
df <- as.data.frame(df)
df$fac <- as.numeric(df[,idx])
cutintervals <- cut(df$fac, breaks)
ranges <- cbind(as.numeric( sub("\\((.+),.*", "\\1", cutintervals) ),
as.numeric( sub("[^,]*,([^]]*)\\]", "\\1", cutintervals) ))
df$fac <- ranges[,2]
return(df)
}
################### Slow EMS ###################
slowems <- function(data, time){
slow <- data[which(data$responsetime > time),]
return(slow)
}
slowambulance <- function(data){
data$unit <- as.character(data$unit)
slowunits <- data[which(startsWith(data$unit, "A")==TRUE | startsWith(data$unit, "M")==TRUE),]
return(slowunits)
}
slowcounting <- function(slowdf, col){
slowcounted <- count(slowdf, col)
return(slowcounted)
}
################### Incident Densities ###################
incidentyears <- function(data, yr){
data$eventnum <- as.numeric(data$eventnum )
incidentyr <- data[which(data$eventnum >= (yr * 100000000) & data$eventnum < ((yr + 1)*100000000)), ]
return(incidentyr)
}
incidentcounts <- function(data, colname){ #column name in string
incidentcount <- count(data, colname)
incidentcount[,1] <- as.numeric(unlist(incidentcount[,1]))
incidentcount[,2] <- as.numeric(unlist(incidentcount[,2]))
incident_grouped <- cbind(incidentcount[,1], incidentcount[,2])
colnames(incident_grouped) <- c("Station", "Count")
incident_grouped[1,1] <- 1 #get rid of station 0 because it's useless
return(incident_grouped)
}
################### Busy Fractions ###################
busyfractionfixed <- function(data){ #Formats the busy fraction to fit the shapefile
data[which(data$X==23),1] <- 22
return(data)
}
################### Choropleth Plotting Function ###################
choroplot <- function(shape, data, variable, brk, color, legendtitle, maintitle, png_name){
png(paste0("./figures/",png_name), width = 8, height = 7, units = 'in', res = 300)
plot(shape, border = NA, col = NA, bg = "#A6CAE0", xlim=c(2891000, 3353840) , ylim= c(13750000, 14038770), main = maintitle)
choroLayer(spdf = shape, df = data, var = variable,
breaks = brk, col = color,
border = "grey40", legend.pos = "right", lwd = 0.5,
legend.title.txt = legendtitle,
legend.values.rnd = 2, add = TRUE)
dev.off()
}
#######################
## title: "Pipeline for HFD Choropleth Maps"
## author: "Shannon Chen"
#######################
run_pipeline <- function(shapefile_dsn, shapefile, med_ems_file, ems_file, incidents_file, distress_file, helping_file, chain_file){
#Shapefile of HFD Station Jurisdictions
HFD_jurs <- readOGR(dsn=shapefile_dsn, layer=shapefile)
HFD_jurs <- HFD_jurs[-1, ] #Removed station 0 for aesthetic reasons
HFD_jurs[which(HFD_jurs$D1==23),1] <- 22 #Changed to 22 because R doesn't recognize 22 as having a jurisdiction
#Raw files that were uploaded
med_ems <- read.csv(med_ems_file)
ems <- read.csv(ems_file)
incidents <- read.csv(incidents_file)
distress <- read.csv(distress_file)
helping <- read.csv(helping_file)
averagechains <- read.csv(chain_file)
################### Median EMS Maps ###################
medems12 <- yearextract(med_ems, 2012)
medems16 <- yearextract(med_ems, 2016)
medems17 <- yearextract(med_ems, 2017)
write.csv(medems12, "./data/median_ems2012.csv")
write.csv(medems16, "./data/median_ems2016.csv")
write.csv(medems17, "./data/median_ems2017.csv")
colsmed <- carto.pal(pal1 = "turquoise.pal", n1 = 5) #Median EMS Color Scheme
medems12_fac <-breaksplit(medems12, c(0, 5, 6, 7, 10, 30), 3)
medems16_fac <-breaksplit(medems16, c(0, 5, 6, 7, 10, 30), 3)
medems17_fac <-breaksplit(medems17, c(0, 5, 6, 7, 10, 30), 3)
write.csv(medems12_fac, "./data/medems2012factorized.csv")
write.csv(medems16_fac, "./data/medems2016factorized.csv")
write.csv(medems17_fac, "./data/medems2017factorized.csv")
choroplot(HFD_jurs, medems12_fac, "fac", c(5, 6, 7, 10, 30), colsmed, "Median Minutes", "2012 Response Times", "figure10a.png")
choroplot(HFD_jurs, medems16_fac, "fac", c(5, 6, 7, 10, 30), colsmed, "Median Minutes", "2016 Response Times", "figure10b.png")
choroplot(HFD_jurs, medems16_fac, "fac", c(5, 6, 7, 10, 30), colsmed, "Median Minutes", "2016 Response Times", "figure11b.png")
choroplot(HFD_jurs, medems17_fac, "fac", c(5, 6, 7, 10, 30), colsmed, "Median Minutes", "2017 Response Times", "figure12.png")
################### Slow EMS Map ###################
colslow <- carto.pal(pal1 = "sand.pal", n1 = 6) #Slow Ambulances Color Palette
slowstuff <- slowems(ems, 10)
write.csv(slowstuff, "./data/slowems.csv")
slow_amb <- slowambulance(slowstuff)
write.csv(slow_amb, "./data/slowunits.csv")
slowcount <- slowcounting(slow_amb, "incident_juris")
write.csv(slowcount, "./data/slowunitscount.csv")
slow_grouped <- breaksplit(slowcount, c(0, 2500, 4500, 7500, 12000, 18000), 2)
write.csv(slow_grouped, "./data/slowresponsefactorized.csv")
choroplot(HFD_jurs, slow_grouped, "fac", c(2500, 4500, 7500, 12000, 18000), colslow, "EMS Dispatches\n2011-18", "Slow Response Counts (>10 Minutes)", "figure13.png")
################### Incident Density Maps ###################
colsincidents <- carto.pal(pal1 = "red.pal", n1 = 6) #Incident Map Color Palette
incidents12 <- incidentyears(incidents, 12)
incidents16 <- incidentyears(incidents, 16)
write.csv(incidents12, "./data/incidents2012.csv")
write.csv(incidents16, "./data/incidents2016.csv")
incidents12_grouped <- incidentcounts(incidents12, "incident_juris")
incidents12split <- breaksplit(incidents12_grouped, c(0, 1000, 2000, 3000, 5000, 7000, 10000), 2)
incidents16_grouped <- incidentcounts(incidents16, "incident_juris")
incidents16split <- breaksplit(incidents16_grouped, c(0, 1000, 2000, 3000, 5000, 7000, 10000), 2)
write.csv(incidents12split, "./data/incidents2012factorized.csv")
write.csv(incidents16split, "./data/incidents2016factorized.csv")
choroplot(HFD_jurs, incidents12split , "fac", c(1000, 2000, 3000, 5000, 7000, 10000), colsincidents , "Counts", "2012 All Incidents", "figure7a.png")
choroplot(HFD_jurs, incidents16split , "fac", c(1000, 2000, 3000, 5000, 7000, 10000), colsincidents , "Counts", "2016 All Incidents", "figure7b.png")
choroplot(HFD_jurs, incidents16split , "fac", c(1000, 2000, 3000, 5000, 7000, 10000), colsincidents , "Counts", "2016 All Incidents", "figure11a.png")
################### Busy Fractions Maps ###################
colsbusy <- carto.pal(pal1 = "orange.pal", n1 = 5) #Busy Fraction Color Palette
distress_grouped <- breaksplit(distress, c(0, 0.2, 0.3, 0.4, 0.6, 0.7), 2)
write.csv(distress_grouped, "./data/distressfractiongrouped2018.csv")
helpingfixed <- busyfractionfixed(helping)
helping_grouped <- breaksplit(helpingfixed, c(0, 0.2, 0.3, 0.4, 0.6, 0.7), 2)
write.csv(helping_grouped, "./data/helpingfractiongrouped2018.csv")
choroplot(HFD_jurs, distress_grouped, "fac", c(0.2, 0.3, 0.4, 0.6, 0.7), colsbusy, "2018", "Distress Fractions", "distress_fractions2018.png")
choroplot(HFD_jurs, helping_grouped, "fac", c(0.2, 0.3, 0.4, 0.6, 0.7), colsbusy, "2018", "Helping Fractions", "figure14.png")
################## Average Chain Length Maps ########################
colschain <- carto.pal(pal1="turquoise.pal", n1=5) #Average Chain Length Color Palette
avchain_grouped <- breaksplit(averagechains, c(0, 1.12, 1.20, 1.28, 1.36, 1.44), 2)
write.csv(avchain_grouped, "./data/averagechainsfactorized.csv")
choroplot(HFD_jurs, avchain_grouped, "fac", c(1.12, 1.20, 1.28, 1.36, 1.44), colschain, "Chain Analaysis", "Average Chain Length", "figure22.png")
}
## call the driver function to run the entire automated data wrangling pipeline
hfd_maps <- run_pipeline(shapefile_dsn = "data/raw", shapefile="Still_Alarms_012319", med_ems_file = "data/median_by_station.csv",
ems_file= "data/data_ems.csv", incidents_file = "data/incident_jurisdictions.csv", distress_file = "data/distress_fractions.csv",
helping_file = "data/helping_fractions.csv", chain_file="data/average_chain_lengths.csv")
|
0c056b5b601014e2c0a55f09f51bb6b757dc5947
|
6cb339f74f32213420a1ad2b535a7d819231581d
|
/cbind.fill.R
|
cf16bb8ded380ab547fe6f3971c4118713389915
|
[] |
no_license
|
MHS-R/miscHelper
|
879518d6d3e8229cc84c37f5c34dc38c4c4400da
|
16cffcd7aa96e5e97db5b2a81b1468a74fe4f65e
|
refs/heads/master
| 2021-01-25T06:18:03.605379
| 2017-06-15T15:31:04
| 2017-06-15T15:31:04
| 93,549,719
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 574
|
r
|
cbind.fill.R
|
#' Function to draw observations from stratified norm sample
#'
#' This function creates a stratified random sample, given an arbitrary number of
#' factors and levels within those factors.
#'
#' @param ... x, y, z can be matrices or data.frames
#' @references http://stackoverflow.com/questions/7962267/cbind-a-df-with-an-empty-df-cbind-fill
#' @export cbind.fill
cbind.fill <- function(...){
nm <- list(...)
nm <- lapply(nm, as.matrix)
n <- max(sapply(nm, nrow))
do.call(cbind, lapply(nm, function (x)
rbind(x, matrix(, n-nrow(x), ncol(x)))))
}
|
1b9efc1280bc2a3557f1a26363f416417694fa06
|
117ee80f5a04d6dd69d665eaa5d5b50c997c7faf
|
/analysis/dropout/simulate_dropout.R
|
3a781d1a72b425f538d11cf2cf3ba2c91fb79ea8
|
[] |
no_license
|
kieranrcampbell/phenopath_revisions
|
1a60f4e3a8d330dea73af435492cdf4f604af996
|
2c52ed25cedb0abb49465e6bf71b1b25640fd068
|
refs/heads/master
| 2021-03-16T08:28:27.378349
| 2018-10-11T18:11:06
| 2018-10-11T18:11:06
| 103,834,518
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,145
|
r
|
simulate_dropout.R
|
library(SummarizedExperiment)
library(scater)
library(dplyr)
library(tidyr)
library(magrittr)
library(ggplot2)
library(polyester)
library(Biostrings)
library(readr)
source("analysis/simulations/simulate_funcs.R")
set.seed(12345L)
get_dropout_relationship <- function() {
sce_rds <- "data/dropout/tec_sceset_qc.rds"
sce <- readRDS(sce_rds)
cc <- assayData(sce)[['counts']]
cc <- cc[rowMeans(cc) > 0, ] # genes that are expressed at all
pdrop_df <- data.frame(p_dropout = rowMeans(cc == 0), base_expression = rowMeans(cc))
fit <- glm(p_dropout ~ base_expression, data = pdrop_df, family = "binomial")
coefs <- coef(fit)
pdrop <- function(x) 1 / (1 + exp(-(coefs[1] + coefs[2] * x)))
pdrop
}
tidy_sim_data <- function(mat, pst, x, sim_str) {
df <- as_data_frame(mat)
names(df) <- paste0(sim_str, "_gene_", seq_len(ncol(mat)))
df <- mutate(df, pst, x, sim_str)
gather(df, gene, expression, -pst, -x, -sim_str)
}
#' Simulate the mean function for sigmoidal interactions
#' @param N Number of cells
#' @param G Number of genes
#' @param prop_interaction Proportion of genes exhibiting interaction, in [0,1]
simulate_mean_function <- function(N, G, prop_interaction){
pst <- runif(N)
x <- sample(c(0, 1), N, replace = TRUE)
G_pst <- round(G * (1 - prop_interaction))
G_int <- G - G_pst
G_class <- rep(round(G_int / 4), 3)
G_class <- c(G_class, G_int - sum(G_class))
k_lower <- 2
k_upper <- 20
sample_k <- function() runif(1, k_lower, k_upper)
t0_lower <- 0.2
t0_upper <- 0.8
sample_t0 <- function() runif(1, t0_lower, t0_upper)
mu0_lower <- 3
mu0_upper <- 6
sample_mu0 <- function() runif(1, mu0_lower, mu0_upper)
y_pst_only <- replicate(G_pst, f_pseudotime_sample(pst, sample_k(), sample_t0(), sample_mu0()))
df_pst_only <- tidy_sim_data(y_pst_only, pst, x, "pseudotime_only")
y_interaction_1 <- replicate(G_class[1], fx_1(pst, x, sample_k(), sample_t0(), sample_mu0()))
df_interaction_1 <- tidy_sim_data(y_interaction_1, pst, x, "int_type_1")
y_interaction_2 <- replicate(G_class[2], fx_2(pst, x, sample_k(), sample_t0(), sample_mu0()))
df_interaction_2 <- tidy_sim_data(y_interaction_2, pst, x, "int_type_2")
y_interaction_3 <- replicate(G_class[3], fx_3(pst, x, sample_k(), sample_t0(), sample_mu0()))
df_interaction_3 <- tidy_sim_data(y_interaction_3, pst, x, "int_type_3")
y_interaction_4 <- replicate(G_class[4], fx_4(pst, x, sample_k(), sample_t0(), sample_mu0()))
df_interaction_4 <- tidy_sim_data(y_interaction_4, pst, x, "int_type_4")
bind_rows(
df_pst_only,
df_interaction_1,
df_interaction_2,
df_interaction_3,
df_interaction_4
)
}
mean_to_wide <- function(df) {
ydf <- select(df, -sim_str) %>%
spread(gene, expression)
ymat <- select(ydf, -pst, -x) %>% as.matrix() %>% t()
colnames(ymat) <- paste0("cell_", seq_len(ncol(ymat)))
is_interaction <- grepl("int", rownames(ymat))
list(
exprsmat = ymat,
pst = ydf$pst,
x = ydf$x,
is_interaction = is_interaction
)
}
#' Simulate the mean function for sigmoidal interactions
#' @param N Number of cells
#' @param G Number of genes
#' @param prop_interaction Proportion of genes exhibiting interaction, in [0,1]
#' @param replication The particular replication for saving files
#' @param ad Additional dropout (proportion of dropout to add to a dataset)
simulate_counts <- function(N, G, prop_interaction, replication, noise, ad) {
# The signature string for this run, to be used in output files
# So we can always parse what comes back
sig_str <- paste0("ad_", ad,
"_rep_", replication)
df <- simulate_mean_function(N, G, prop_interaction)
sim <- mean_to_wide(df)
pos_gex <- 2^sim$exprsmat - 1 # Make expression positive
count_mat <- sapply(seq_len(nrow(pos_gex)), function(i) {
x <- pos_gex[i,]
if(noise == "low") {
return(NB(x, x / 3 + 1))
} else {
return(NB(x, 1))
}
})
count_mat <- t(count_mat) - 1
rownames(count_mat) <- rownames(sim$exprsmat)
colnames(count_mat) <- colnames(sim$exprsmat)
# Time to add in dropout -------
pdrop <- get_dropout_relationship()
# how many counts are non-zero?
is_zero <- count_mat == 0
original_prop_zero <- mean(is_zero)
non_zero_counts <- sum(!is_zero)
# of these we want to set proportion ad to 0
num_to_zero <- ad * non_zero_counts
potential_zero <- which(!is_zero)
# probabilities of being set to zero -
p_zero <- pdrop(count_mat[potential_zero])
to_zero <- sample(potential_zero, num_to_zero, prob = p_zero)
count_mat[to_zero] <- 0
new_prop_zero <- mean(count_mat == 0)
if(any(count_mat < 0)) stop("Negative counts!")
output_sce_file <- file.path("data", "dropout", "scesets",
paste0("sceset_", sig_str, ".rds"))
pdata <- data.frame(pst = sim$pst, x = sim$x,
original_prop_zero = original_prop_zero,
new_prop_zero = new_prop_zero,
proportion_set_to_zero = ad)
rownames(pdata) <- colnames(count_mat)
fdata <- data.frame(is_interaction = sim$is_interaction)
rownames(fdata) <- rownames(count_mat)
bioc_version <- tools:::.BioC_version_associated_with_R_version()
sce <- NULL
if(bioc_version > 3.5) {
library(SingleCellExperiment)
sce <- SingleCellExperiment(assays = list(counts = count_mat),
colData = pdata,
rowData = fdata)
sce <- normaliseExprs(sce)
} else {
sce <- newSCESet(countData = count_mat,
phenoData = new("AnnotatedDataFrame", pdata),
featureData = new("AnnotatedDataFrame", fdata))
}
saveRDS(sce, output_sce_file)
}
## Make sure these line up with the snakefile
N <- 200
G <- 500
# prop_interactions <- c(0.05, 0.1, 0.2, 0.3, 0.4, 0.5)
pi <- 0.2
reps <- 40
# noises <- c("low", "high")
noise <- "high"
additional_dropout <- c(0.05, 0.1, 0.2, 0.5, 0.8, 0.9)
for(r in seq_len(reps)) {
for(ad in additional_dropout) {
simulate_counts(N, G, pi, r, noise, ad)
}
}
|
3b8204a5b4baed74df194a9c0c15e8ed476fdea4
|
6542f5b12f43a4021e9defd0f230e1e3ce719432
|
/regresionLogistica.R
|
fe2676f47d5fdf155006b68d119376392165dcdd
|
[] |
no_license
|
andreamcm/HDT6RegresionLogistica
|
f9434d49fc7589588c9b7d034aa52462e6c04b01
|
a720b486bc7f9f77f6d850fd0aaee34e1fdbd061
|
refs/heads/master
| 2020-05-02T16:19:35.061664
| 2019-04-01T07:09:13
| 2019-04-01T07:09:13
| 178,065,228
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,622
|
r
|
regresionLogistica.R
|
#-----------------------------------------------------------------------------------------------------------------------------------------------
# Universidad del Valle de Guatemala
# Autores: Andrea Maria Cordon Mayen, 16076
# Cristopher Sebastian Recinos RamÃ?rez, 16005
# Fecha: 18/03/2019
# arboles.R
#-----------------------------------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------------------------------
# Librerias a utilizar
library(caret)
install.packages("fmsb")
library(fmsb)
install.packages("e1071")
library(e1071)
install.packages("mlr")
library(mlr)
install.packages("dummy")
library(dummy)
#-----------------------------------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------------------------------
# Set de ambientes y variables generales
# --------------------------------------
# Set del working directory de Andrea
setwd("~/2019/UVG/Primer Semestre/Minería de Datos/Laboratorios/Laboratorio6/HDT6RegresionLogistica/Datos")
# Set del working directory de Sebastian
setwd("C:/Users/sebas/Documents/UVG/201901/Mineria/Laboratorio5/HDT5NaiveBayes/Datos")
# Se cargan todos los datos
datos <- read.csv("train2.csv")
str(datos) # Tipos de variables de las columnas de la base de datos
#-----------------------------------------------------------------------------------------------------------------------------------------------
# Regresión Logistica
#-----------------------------------------------------------------------------------------------------------------------------------------------
porcentaje<-0.7
set.seed(123)
datos<-cbind(datos,dummy(datos,verbose = T))
# Datos de entrenamiento y prueba
corte <- sample(nrow(datos), nrow(datos)*porcentaje)
train <- datos[corte, ]
test <- datos[-corte, ]
modelo <- glm(AdoptionSpeed~., data = train[,c(1, 3:18)],family = binomial(), maxit = 10000)
modelo<-glm(Species_virginica~., data = train[,c(1:4,8)],family = binomial(), maxit=100)
#-------------------------------------------------
# Regresión Logistica
#-------------------------------------------------
##Modelo con todas las variables
pred<-predict(modelo,newdata = test[,1:4], type = "response")
prediccion<-ifelse(pred>=0.5,1,0)
confusionMatrix(as.factor(test$Species_virginica),as.factor(prediccion))
|
94cbc57c23a179fc47e805e46d192eeea0146c9a
|
74453745dd2a15c8e310e8f4446ccada9702435e
|
/R/index.R
|
db6236fd865d07d196fc29fd850b2ecabb534e6d
|
[
"MIT"
] |
permissive
|
rstudio/renv
|
ffba012525e8b1e42094899c3df9952b54ecb945
|
8c10553e700cad703ddf4dd086104f9d80178f3a
|
refs/heads/main
| 2023-08-29T08:45:28.288471
| 2023-08-28T22:01:19
| 2023-08-28T22:01:19
| 159,560,389
| 958
| 169
|
MIT
| 2023-09-14T00:55:28
| 2018-11-28T20:25:39
|
R
|
UTF-8
|
R
| false
| false
| 4,622
|
r
|
index.R
|
the$index <- new.env(parent = emptyenv())
index <- function(scope, key = NULL, value = NULL, limit = 3600L) {
enabled <- renv_index_enabled(scope, key)
if (!enabled)
return(value)
# resolve the root directory
root <- renv_paths_index(scope)
# make sure the directory we're indexing exists
memoize(
key = root,
value = ensure_directory(root, umask = "0")
)
# make sure the directory is readable / writable
# otherwise, attempts to lock will fail
# https://github.com/rstudio/renv/issues/1171
if (!renv_index_writable(root))
return(value)
# resolve other variables
key <- if (!is.null(key)) renv_index_encode(key)
now <- as.integer(Sys.time())
# acquire index lock
lockfile <- file.path(root, "index.lock")
renv_scope_lock(lockfile)
# load the index file
index <- tryCatch(renv_index_load(root, scope), error = identity)
if (inherits(index, "error"))
return(value)
# return index as-is when key is NULL
if (is.null(key))
return(index)
# check for an index entry, and return it if it exists
item <- renv_index_get(root, scope, index, key, now, limit)
if (!is.null(item))
return(item)
# otherwise, update the index
renv_index_set(root, scope, index, key, value, now, limit)
}
renv_index_load <- function(root, scope) {
filebacked(
context = "renv_index_load",
path = file.path(root, "index.json"),
callback = renv_index_load_impl
)
}
renv_index_load_impl <- function(path) {
json <- tryCatch(
withCallingHandlers(
renv_json_read(path),
warning = function(w) invokeRestart("muffleWarning")
),
error = identity
)
if (inherits(json, "error")) {
unlink(path)
return(list())
}
json
}
renv_index_get <- function(root, scope, index, key, now, limit) {
# check for index entry
entry <- index[[key]]
if (is.null(entry))
return(NULL)
# see if it's expired
if (renv_index_expired(entry, now, limit))
return(NULL)
# check for in-memory cached value
value <- the$index[[scope]][[key]]
if (!is.null(value))
return(value)
# otherwise, try to read from disk
data <- file.path(root, entry$data)
if (!file.exists(data))
return(NULL)
# read data from disk
value <- readRDS(data)
# add to in-memory cache
the$index[[scope]] <-
the$index[[scope]] %||%
new.env(parent = emptyenv())
the$index[[scope]][[key]] <- value
# return value
value
}
renv_index_set <- function(root, scope, index, key, value, now, limit) {
# force promises
force(value)
# files being written here should be shared
renv_scope_umask("0")
# write data into index
data <- tempfile("data-", tmpdir = root, fileext = ".rds")
ensure_parent_directory(data)
saveRDS(value, file = data, version = 2L)
# clean up stale entries
index <- renv_index_clean(root, scope, index, now, limit)
# add index entry
index[[key]] <- list(time = now, data = basename(data))
# update index file
path <- file.path(root, "index.json")
ensure_parent_directory(path)
# write to tempfile and then copy to minimize risk of collisions
tempfile <- tempfile(".index-", tmpdir = dirname(path), fileext = ".json")
renv_json_write(index, file = tempfile)
file.rename(tempfile, path)
# return value
value
}
renv_index_encode <- function(key) {
key <- stringify(key)
memoize(key, renv_hash_text(key))
}
renv_index_clean <- function(root, scope, index, now, limit) {
# figure out what cache entries have expired
ok <- enum_lgl(
index,
renv_index_clean_impl,
root = root,
scope = scope,
index = index,
now = now,
limit = limit
)
# return the existing cache entries
index[ok]
}
renv_index_clean_impl <- function(key, entry, root, scope, index, now, limit) {
# check if cache entry has expired
expired <- renv_index_expired(entry, now, limit)
if (!expired)
return(TRUE)
# remove from in-memory cache
cache <- the$index[[scope]]
cache[[key]] <- NULL
# remove from disk
unlink(file.path(root, entry$data), force = TRUE)
FALSE
}
renv_index_expired <- function(entry, now, limit) {
now - entry$time >= limit
}
renv_index_enabled <- function(scope, key) {
getOption("renv.index.enabled", default = TRUE)
}
renv_index_writable <- function(root) {
memoize(
key = root,
value = unname(file.access(root, 7L) == 0L)
)
}
# in case of emergency, break glass
renv_index_reset <- function(root = NULL) {
root <- root %||% renv_paths_index()
lockfiles <- list.files(root, pattern = "^index\\.lock$", full.names = TRUE)
unlink(lockfiles)
}
|
629966b86f6e6692d3ee7f56c6f4ba59cae033f0
|
173e8e734ee2d8e3eeeac0a86ce06ab48db15a08
|
/man/Corner_text.Rd
|
747af63a09bd4c77d96a1d31d134d78974e696f6
|
[
"MIT"
] |
permissive
|
aemon-j/gotmtools
|
b0ba213f83d0f3ccbee9f34b7efc69fc0e0dc86a
|
4eb90e9e6ad960c36a78cc51e9c77b4d826a6197
|
refs/heads/main
| 2023-04-28T07:43:19.483815
| 2021-01-28T19:17:37
| 2021-01-28T19:17:37
| 220,067,540
| 5
| 5
|
MIT
| 2022-02-04T15:27:59
| 2019-11-06T18:52:40
|
R
|
UTF-8
|
R
| false
| true
| 437
|
rd
|
Corner_text.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Corner_text.R
\name{Corner_text}
\alias{Corner_text}
\title{Add text to the corner of a plot}
\usage{
Corner_text(text, location = "topleft")
}
\arguments{
\item{text}{text to be added to the plot as a language object}
\item{location}{location within the plot for the text to be placed}
}
\value{
data
}
\description{
Add text to a location within a plot.
}
|
86b58266f14fd3c549d28c535ede6d4715134572
|
813e8ed246baf7ff8d039df4fb92b0b97e6ddad8
|
/e-commerce tweet.R
|
d75c3c854eb3bbd90ad0a342d5de155ed4846d21
|
[] |
no_license
|
iqbalhanif/Twitter-Crawling
|
94c40d342d6465b63591111aa54ce409d95249ff
|
deaba7ed0c84035f6f3653d55e60b4db999d2ee9
|
refs/heads/master
| 2020-06-02T20:47:17.239429
| 2020-02-04T02:56:25
| 2020-02-04T02:56:25
| 191,304,925
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,566
|
r
|
e-commerce tweet.R
|
install.packages("rtweet")
install.packages("httpuv")
install.packages("openssl")
install.packages("httpuv")
install.packages("TweetR")
install.packages("devtools")
library(rtweet)
library(jsonlite)
library(magrittr)
library(dplyr)
library(stringr)
library("httr")
library(httpuv)
library(openssl)
library(devtools)
##set work directory
setwd('D:/DATA/Desktop/')
##create token
twitter_token <- create_token(app = a,
consumer_key = b,
consumer_secret = c,
access_token = d,
access_secret = e)
##note: setiap token hanya mampu mengambil tweet dalam jumlah terbatas, diperlukan token tambahan jika dibutuhkan
##a,b,c,d,e disesuaikan dengan token masing2
##e-commerce keyword
keywords <- c("BLANJA OR blanjacom",
"Blibli OR bliblidotcom OR BlibliCare",
"Bukalapak OR bukalapak OR BukaBantuan",
"Elevenia OR eleveniaID OR eleveniacare",
"JD.ID OR JDid OR csjd_id",
"Lazada OR LazadaID OR LazadaIDCare",
"Matahari Mall OR MatahariMallCom OR MatahariMallCS",
"Shopee OR ShopeeID OR ShopeeCare",
"Tokopedia OR tokopedia OR TokopediaCare"
)
##create directory for output
sapply(c("out_rds", "out_json", "out_csv"), dir.create, showWarnings=FALSE)
##looping for get tweet
for (key in keywords){
twitter_token <- twitter_token
message(key)
#edit untuk tentukan max dan min tweet
twit <- search_tweets(
key,
n = 18000, include_rts = TRUE,
token = twitter_token
)
#file name output hasil crawling
file_name <- key %>% str_replace_all("\\s+", "-") %>% str_to_lower()
twit %>%
saveRDS(file = file.path("out_rds", paste0(file_name, ".Rds")))
toJSON(rt, auto_unbox = TRUE) %>%
cat(file = file.path("out_json", paste0(file_name, ".json")))
#create twit dataframe
twitDf <- twit %>%
select(status_id:retweet_count, place_full_name)
#ambil text dari twit
twitDf$text <- twitDf$text %>%
str_replace_all("\r|\n", " ") %>%
str_replace("\"", "'")
#ambil hasshtag dan mention dari twit
twitDf$hastags <- sapply(twit$hashtags, paste, collapse = ",")
twitDf$mentions_screen_name <- sapply(twit$mentions_screen_name, paste, collapse = ",")
#hasil akhir
twitDf %>%
write.table(file = file.path("out_csv", paste0(file_name, ".csv")),
row.names = FALSE,
na = "",
sep = ",",
append = TRUE)
}
|
b72da82118651aef3e311690c27fee9169c226c5
|
d8763dda7b7d7010df41aed1727346c765332de8
|
/rl/ssteps6/findWayFinLearn2.R
|
087b761f75e310e0856660e83fb85200f6943f4e
|
[] |
no_license
|
shadogray/MA-ICA
|
9a5aa9e9a5e9810cfd10b0c9a1ffad572b3d6329
|
6b48b3fd418ed3e187dfd2603e2f062df2996860
|
refs/heads/master
| 2020-12-31T11:24:38.442199
| 2020-02-07T20:49:03
| 2020-02-07T20:49:03
| 239,016,502
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,879
|
r
|
findWayFinLearn2.R
|
library(tidyverse)
#library(timeSeries)
#library(corrr)
library(lubridate)
source('finlearn.R')
sig.v <- NULL
orig.s <- NULL
test <- F
evalPlot <- T
evalPrint <- T
local <- F
args <- commandArgs(trailingOnly = T)
if (length(args) > 0) sig.v <- unlist(strsplit(args[1],','))
if (length(args) > 1) orig.s <- unlist(strsplit(args[2],','))
if (length(args) > 2) {
test <- str_detect(args[3],'T')
local <- str_detect(args[3],'L')
if (str_detect(args[3],'[pP]')) evalPlot <- !str_detect(args[3],'p')
if (str_detect(args[3],'[rR]')) evalPrint <- !str_detect(args[3],'r')
}
node <- Sys.info()['nodename']
nlog = T
deblog = F
if (deblog) {
nlog = T
}
options(tibble.width = Inf)
options(width = 2000)
#options(error = function(){traceback(1);print();stop()})
numIter = 100000
deltaSumR = 2*.001
epsilon = .0
alpha = .1
gamma = .5
ctrl.types = factor('Q','SARSA')
type = 'SARSA'
#args <- commandArgs(trailingOnly = T)
#if (length(args) > 0) type = args[1]
## load('test.rdata');res=result;env=result$myEnv;tracks=result$tracks
#######
getTrack <- function(env = myEnv) {
env$SumR <- 0
result <- findWay(env)
b <- tibble(.rows = length(env$B1))
for (n in names(env)[grep('B\\d',names(env))]) b <- bind_cols(b, env[n])
if (!has_name(result$track,'t')) result$track <- result$track %>% rowid_to_column('t')
result
}
printTrack <- function(res, env = res$myEnv, track = res$track) {
print(track, n=nrow(track))
print(paste('Type:',type,
'Iterations:',env$Iterations,
'FinalR:', track %>% filter(res.terminal) %>% select(res.R),
'SumR:',toString(env$SumR),'W:',toString(env$W)))
}
plotResults <- function(res, env = res$myEnv, track = res$track) {
print(paste0('processing result: ',env$numSigs,'/',env$numOrig,': plots'))
titleSfx <- paste0('ICA',env$numSigs,'/',env$numOrig,': ')
fileSfx <- paste0('S_density_ica_',env$numSigs,'_',env$numOrig)
b <- as_tibble(as.matrix(res$b)*env$baseX)
w.s <- colMeans(env$log %>% tail(200) %>% select(matches('W\\.S.')))
W <- env$W / sum(abs(env$W))
fs <- env$FS %>% rowid_to_column('t')
for (n in env$xColNames) fs[paste0(n,'.w')] <- w.s[paste0('W.',n)]*env$X[n]
fs$S.w <- rowSums(fs %>% select(matches('^S(\\d|C)\\.w$')))
comb <- gtools::combinations(env$xColNum-1,2)
for (i in 1:nrow(comb)) {
si <- paste0('SVal.S',comb[i,])
#ggplot(env$S %>% select(si,n,sig.p) %>% group_by_at(c(si,'sig.p')) %>% summarize(n=sum(n))) +
# geom_contour(aes_string(si[1],si[2], z='n'))+
# facet_wrap(~sig.p)+theme(strip.text.x = element_text(size = 5)) +
#ggplot(env$S %>% select(si,n) %>% group_by_at(si) %>% summarize(n=sum(n))) +
# geom_contour(aes_string(si[1],si[2], z='n'))+
# theme(strip.text.x = element_text(size = 5)) +
# ggtitle(paste0(titleSfx,'State-Evaluation Density - States ',si[2],'~',si[1]))
ggplot(env$log %>% select(si) %>% mutate_all(list(~round(.))), mapping=aes_string(si[1],si[2])) +
geom_density_2d() +
theme(strip.text.x = element_text(size = 5)) +
ggtitle(paste0(titleSfx,'State-Evaluation Density - States ',si[2],'~',si[1]))
ggsave(paste0(fileSfx,'_',i,'.png'))
}
fileSfx <- paste0('Signals_ica_',env$numSigs,'_',env$numOrig)
ggplot(bind_cols(b,FS=env$FS$Si) %>% rowid_to_column('tx') %>%
gather(signal,value,-tx))+
geom_line(aes(tx,value,color=signal)) +
ggtitle(paste0(titleSfx,'Base Timeseries (Bi) and Target Sum (FS)')) +
theme(plot.title = element_text(size = 10)) +
geom_line(data=fs, mapping=aes(t,Si), show.legend = F, linetype = 'dashed')
ggsave(paste0(fileSfx,'_BaseSignals','.png'))
x.x <- env$X %>% select(env$xColNames)
ggplot(bind_cols(x.x, FS = env$FS$Si) %>% rowid_to_column('tx') %>%
gather(signal,value,-tx))+
geom_line(aes(tx,value,color=signal))+
ggtitle(paste0(titleSfx,'Target Signal (FS), ICA Signals')) +
theme(plot.title = element_text(size = 10)) +
geom_line(data=fs, mapping=aes(t,Si), show.legend = F, linetype = 'dashed')
ggsave(paste0(fileSfx,'_ICASignals','.png'))
ggplot(fs %>% gather(signal,value,matches('^Si$|^S.?\\.w$'),-t))+
geom_line(aes(t,value,color=signal))+
ggtitle(paste0(titleSfx,'Target Signal (FS), Weighted ICA Signals (W*Si)')) +
theme(plot.title = element_text(size = 10)) +
geom_line(data=fs, mapping=aes(t,Si), show.legend = F, linetype = 'dashed')
ggsave(paste0(fileSfx,'_WeightedICASignals','.png'))
ggplot(as_tibble(env$X %>% select(env$xColNames)) %>% rowid_to_column('tx') %>%
gather(signal,value,-tx))+
geom_line(aes(tx,value,color=signal)) +
ggtitle(paste0(titleSfx,'Raw ICA Signals (Si)')) +
theme(plot.title = element_text(size = 10)) +
geom_line(data=fs, mapping=aes(t,Si), show.legend = F, linetype = 'dashed')
ggsave(paste0(fileSfx,'_RawSignals','.png'))
ggplot(as_tibble(env$X.orig) %>% rowid_to_column('tx') %>% gather(signal,value,-tx))+
geom_line(aes(tx,value,color=signal)) +
ggtitle(paste0(titleSfx,'Artificial TimeSeries generated from BaseSignals Bi')) +
theme(plot.title = element_text(size = 10))
ggsave(paste0(fileSfx,'_ArtificialSeries','.png'))
#ggplot(b %>% rowid_to_column('tx') %>% gather(signal,value,-tx))+geom_line(aes(tx,value,color=signal))
#ggplot(select(env$FS,Si) %>% rowid_to_column('tx') %>% gather(signal,value,-tx))+geom_line(aes(tx,value,color=signal))
#ggplot(select(env$FS,S) %>% rowid_to_column('tx') %>% gather(signal,value,-tx))+geom_line(aes(tx,value,color=signal))
#ggplot(select(env$FS,p) %>% rowid_to_column('tx') %>% gather(signal,value,-tx))+geom_line(aes(tx,value,color=signal))
#ggplot(env$A %>% filter(n>0)) + geom_density_2d(aes(sid,aid))
#ggplot(env$A %>% filter(n>0) %>% group_by(sid,aid) %>% summarise(n = sum(n))) + geom_point(aes(sid,aid,size=n))
fileSfx <- paste0('Tracks_ica_',env$numSigs,'_',env$numOrig)
track <- track %>% inner_join(fs, suffix = c('','.FS'), by='t')
track$F.Si <- env$FS$Si[1:nrow(track)]
track <- track %>% mutate_at(vars(matches(regex('^res\\..*R\\d?$', ignore_case = F))),
list(~./env$xColNum))
ggplot(track %>% select(t,matches('^S.i$')) %>% gather(signal,value,-t))+
geom_line(aes(t,value,color=signal)) +
ggtitle(paste0(titleSfx,'Resulting State-Track following generated Policy'), ) +
theme(plot.title = element_text(size = 10)) +
geom_line(data=track, mapping = aes(t,Si), show.legend = F, linetype = 'dashed')
ggsave(paste0(fileSfx,'_StateTrack_Si','.png'))
ggplot(track %>% select(t,matches('^res\\.R\\d$')) %>% gather(signal,value,-t))+
geom_line(aes(t,value,color=signal)) +
ggtitle(paste0(titleSfx,'Resulting Reward vector following generated Policy')) +
theme(plot.title = element_text(size = 10))
ggsave(paste0(fileSfx,'_Reward_Vector','.png'))
ggplot(track %>% select(t,matches('^res\\.SumR\\d$')) %>% gather(signal,value,-t))+
geom_line(aes(t,value,color=signal)) +
ggtitle(paste0(titleSfx,'Resulting Sum(Reward) vector following generated Policy')) +
theme(plot.title = element_text(size = 10))
ggsave(paste0(fileSfx,'_Sum_Reward_Vector','.png'))
ggplot(track %>% select(t,matches('^res\\..*R$')) %>% gather(signal,value,-t))+
geom_line(aes(t,value,color=signal)) +
ggtitle(paste0(titleSfx,'Resulting Reward following generated Policy')) +
theme(plot.title = element_text(size = 10))
ggsave(paste0(fileSfx,'_Reward','.png'))
#ggplot(track %>% select(t,matches('S\\di1')) %>% gather(signal,value,-t))+geom_line(aes(t,value,color=signal))
#ggplot(env$S %>% select(matches('S\\d'),P,n) %>% gather(key,value,-P,-n)) +
#ggplot(env$S %>% select(matches('S\\d'),n,P)) + geom_contour(aes_string(x='S1',y='S2', z='n'))+facet_wrap(~P)
}
run_many <- function(env) {
for (i in 1:9) {
assign(paste0('res',i), getTrack(env))
}
res <- gather(res1$track %>% select(matches('S\\di')), key, value)
for (i in 2:9) {
res <- bind_cols(res, gather(get(paste0('res',i))$track %>% select(matches('S\\di')), key, value) %>% select(value))
}
ggplot(res %>% rowid_to_column('t') %>% gather(k,v,-t,-key)) + geom_line(aes(t,v,color=k))
}
evalLog <- function(res, env = res$myEnv) {
print(paste0('processing result: ',env$numSigs,'/',env$numOrig,': logs'))
titleSfx <- paste0('ICA',env$numSigs,'/',env$numOrig,': ')
fileSfx <- paste0('eval_',env$numSigs,'_',env$numOrig)
idxs <- str_extract(env$xColNames, '.$')
fs <- env$FS %>% rowid_to_column('t')
l <- env$log %>% inner_join(fs, suffix = c('','.FS'), by='t')
l$Qpo <- rowSums(l %>% select(matches('Qpo.S\\d')))
for (s in env$xColNames) {
l <- l %>% mutate(!!paste0('dqw.',s) := !!as.name(paste0('dQ.',s)) * !!as.name(paste0('W.',s)))
}
ggplot(l %>% gather(key,val,matches('^[W]\\.*'),-lid))+geom_smooth(aes(lid,val,color=key))
ggsave(paste0(fileSfx,'_W','.png'))
ggplot(tail(l,500) %>% gather(key,val,matches('^[W]\\.*'),-lid))+geom_line(aes(lid,val,color=key))
ggsave(paste0(fileSfx,'_W_500','.png'))
l.w <- l %>% select(matches('^SVal\\.S.$|^W\\.S.$|^[ti]$')) %>% mutate(S.w = 0)
for (idx in idxs) l.w <- l.w %>%
mutate(!!paste0('SVal.S',idx,'.w') := !!as.name(paste0('W.S',idx)) * !!as.name(paste0('SVal.S',idx)))
l.w$SVal.S.w <- rowSums(select(l.w, matches('SVal\\.S.\\.w')))
ggplot(l.w %>% select(matches('^SVal\\.S.$|^t$')) %>% gather(signal,value,-t))+
geom_smooth(aes(t,value,color=signal)) +
ggtitle(paste0(titleSfx,'Steps SVals')) +
theme(plot.title = element_text(size = 10))
ggsave(paste0(fileSfx,'_Steps','.png'))
ggplot(l.w %>% select(matches('^SVal\\.S.?.w$|^t$')) %>% gather(signal,value,-t))+
geom_smooth(aes(t,value,color=signal)) +
ggtitle(paste0(titleSfx,'W weighted Steps (SVals.i * W)')) +
theme(plot.title = element_text(size = 10))
ggsave(paste0(fileSfx,'_WeightedSteps','.png'))
#ggplot(l %>% gather(key,val,matches('^[Z]\\.*'),-t,-n))+geom_smooth(aes(n,val,color=key))+
# facet_wrap(~t)+theme(strip.text.x = element_text(size = 5))
#ggsave(paste0(fileSfx,'_Z_t','.png'))
#ggplot(l %>% gather(key,val,matches('d[Q]\\..*'),-sid))+geom_smooth(aes(sid,val,color=key))
#ggsave(paste0(fileSfx,'_dQ','.png'))
#ggplot(l %>% gather(key,val,matches('d[Q]\\..*'),-n,-t))+geom_smooth(aes(n,val,color=key))+
# facet_wrap(~t)+theme(strip.text.x = element_text(size = 5))
#ggsave(paste0(fileSfx,'_dQ_t','.png'))
#ggplot(l %>% gather(key,val,matches('dqw\\..*'),-lid))+geom_smooth(aes(lid,val,color=key))
#ggsave(paste0(fileSfx,'_dqw','.png'))
#ggplot(l %>% gather(key,val,matches('dqw\\..*'),-n,-t))+geom_smooth(aes(n,val,color=key))+
# facet_wrap(~t)+theme(strip.text.x = element_text(size = 5))
#ggsave(paste0(fileSfx,'_dqw_t','.png'))
#comb <- gtools::combinations(env$xColNum-1,2)
#for (i in 1:nrow(comb)) {
# si <- paste0('S',comb[i,])
# #ggplot(l %>% select(si,matches('^(sid|Qpo|sig.p)$')) %>%
# # group_by_at(c(si,'sig.p')) %>% summarise(Qpo = mean(Qpo)))+
# # geom_contour(aes_string(x=si[1],y=si[2],z='Qpo'))+
# # facet_wrap(~sig.p)+theme(strip.text.x = element_text(size = 5))+
# ggplot(l %>% select(si,matches('^(sid|Qpo)$')) %>%
# group_by_at(si) %>% summarise(Qpo = mean(Qpo)))+
# geom_contour(aes_string(x=si[1],y=si[2],z='Qpo'))+
# theme(strip.text.x = element_text(size = 5))+
# ggtitle(paste0(titleSfx,'State-Evaluation Sum(Qpo.i) - States ',si[2],'~',si[1]))
# ggsave(paste0(fileSfx,'_SumQ_',i,'.png'))
#}
}
evalTracks <- function(res, env = res$myEnv, tracks = res$tracks) {
print(paste0('processing result: ',env$numSigs,'/',env$numOrig,': tracks'))
titleSfx <- paste0('ICA',env$numSigs,'/',env$numOrig,': ')
fileSfx <- paste0('track_',env$numSigs,'_',env$numOrig)
idxs <- str_extract(env$xColNames, '.$')
fs <- env$FS %>% rowid_to_column('t') %>% rename(Si.FS=Si)
tracks <- tracks %>%
mutate_at(vars(matches(regex('^res\\..*R\\d?$', ignore_case = F))), list(~./env$xColNum)) %>%
mutate_at(vars(i), as.factor) %>%
mutate(t = stid) %>%
inner_join(fs, suffix = c('','.FS'), by='t') %>%
left_join(env$X %>% set_names(paste0('X.',names(.))), by=c('t'='X.xid'))
trs <- select(tracks,matches('^SVal\\.S.$|^S.?i?$|^W\\.S.$|^S.i.w$|^t$|^i$|^X\\.S.$')) %>%
rowid_to_column('id') %>%
inner_join(fs, suffix = c('','.FS'), by='t') %>%
mutate(Si.w = 0, i = as.factor(i))
for (idx in idxs) trs <-
mutate(trs, !!as.name(paste0('S',idx,'.w')) := !!as.name(paste0('W.S',idx))*!!as.name(paste0('X.S',idx)))
for (idx in idxs) trs <-
mutate(trs, Si.w := Si.w + !!as.name(paste0('W.S',idx))*!!as.name(paste0('X.S',idx)))
for (idx in idxs) tracks <-
mutate(tracks, !!as.name(paste0('S',idx,'i.w')) := !!as.name(paste0('W.S',idx))*!!as.name(paste0('X.S',idx)))
tracks$Si.w <- rowSums(tracks %>% select(matches('S.i\\.w')))
tsi <- tracks %>% select(t,i,matches('^SVal\\.S.$')) %>% gather(si,v,-t,-i)
sumR <- tracks %>% filter(t==19) %>%
rename(SumR = res.SumR) %>%
select(i,SumR)
topSumR <- sumR %>% arrange(desc(SumR)) %>% head(5)
tsi <- tsi %>% left_join(sumR, by='i')
topTsi <- tsi %>% filter(i %in% topSumR$i)
topTracks <- tracks %>% filter(i %in% topSumR$i)
tsi.w <- tracks %>% select(t,i,matches('^S.?i.w$')) %>% gather(si,v,-t,-i)
tsi.w <- tsi.w %>% left_join(sumR, by='i')
topTsi.w <- tsi.w %>% filter(i %in% topSumR$i)
ggplot()+
geom_point(data=trs %>% tail(19*10), mapping=aes(t,Si.w), linetype='dotted', color='black', size=1) +
geom_smooth(data=trs, mapping=aes(t,Si.w), linetype='dotted', color='red', size=2, span=.2) +
geom_smooth(data=tracks, mapping=aes(t,Si.w), linetype='dashed', color='green', size=.5, span=.2)
ggplot()+
geom_smooth(data=trs, mapping=aes(t,Si.w), linetype='dotted', color='red', size=2, span=.2)+
geom_line(data=tracks, mapping=aes(t,Si.FS), show.legend = F, linetype = 'dashed')
ggplot(tail(trs,50*(env$tSteps-1)))+geom_line(aes(t,S,color=i))+
geom_smooth(data=trs, mapping=aes(t,Si.w), linetype='dotted', color='red', size=2, span=.2)+
#geom_line(aes(t,Si.w), linetype='dotted', color='red', size=2)+
geom_line(aes(t,Si.FS), linetype='dashed')+
theme(legend.position = 'none')
ggsave(paste0(fileSfx,'_WeightedTrack_FS','.png'))
ggplot(tail(tracks,50*(env$tSteps-1)) %>% gather(k,s,matches('^(S.i\\.w)$'),-t,-i))+
geom_smooth(aes(t,s,color=k), span=.2) #+
#geom_smooth(data=trs, mapping=aes(t,Si.w), linetype='dotted', color='red', size=2)+
#geom_line(data=tracks, mapping=aes(t,Si.FS), show.legend = F, linetype = 'dashed')
ggsave(paste0(fileSfx,'_WeightedTracks_Tail','.png'))
ggplot(topTracks %>% gather(k,s,matches('^(S.?i\\.w)|Si\\.FS$'),-t,-i))+
geom_smooth(aes(t,s,color=k), span=.2) +
geom_smooth(data=trs, mapping=aes(t,Si.w), linetype='dotted', color='red', size=2, span=.2)+
geom_line(data=tracks, mapping=aes(t,Si.FS), show.legend = F, linetype = 'dashed')
ggsave(paste0(fileSfx,'_WeightedTracks_Top','.png'))
ggplot(tsi)+geom_smooth(aes(t,v,color=si)) +
geom_smooth(data=trs, mapping=aes(t,Si.w), linetype='dotted', color='red', size=2, span=.2)+
geom_line(data=tracks, mapping=aes(t,Si.FS), show.legend = F, linetype = 'dashed')
ggsave(paste0(fileSfx,'_Tracks_smooth','.png'))
ggplot(tsi %>% mutate(ti=as.factor(paste0(si,'_',i)))) +
geom_line(aes(t,v,color=ti), position = position_jitter(.3,.3))+
theme(legend.position = 'none') +
geom_smooth(data=trs, mapping=aes(t,Si.w), linetype='dotted', color='red', size=2, span=.2)+
geom_line(data=tracks, mapping=aes(t,Si.FS), show.legend = F, linetype = 'dashed')+
ggtitle(paste0(titleSfx,'Tracks'))
ggsave(paste0(fileSfx,'_Tracks','.png'))
ggplot(topTsi %>% mutate(ti=as.factor(paste0(si,'_',i)))) +
geom_line(aes(t,v,color=ti), position = position_jitter(.3,.3)) +
geom_smooth(data=trs, mapping=aes(t,Si.w), linetype='dotted', color='red', size=2, span=.2)+
geom_line(data=tracks, mapping=aes(t,Si.FS), show.legend = F, linetype = 'dashed')+
ggtitle(paste0(titleSfx,'most successful Tracks'))
ggsave(paste0(fileSfx,'_Tracks_Top5','.png'))
ggplot(tsi.w)+geom_smooth(aes(t,v,color=si)) +
geom_smooth(data=trs, mapping=aes(t,Si.w), linetype='dotted', color='red', size=2, span=.2)+
geom_line(data=tracks, mapping=aes(t,Si.FS), show.legend = F, linetype = 'dashed')+
ggtitle(paste0(titleSfx,'Weighted Tracks'))
ggsave(paste0(fileSfx,'_WeightedTracks_smooth','.png'))
ggplot(tsi.w %>% mutate(ti=as.factor(paste0(si,'_',i)))) +
geom_line(aes(t,v,color=ti), position = position_jitter(.3,.3))+
theme(legend.position = 'none') +
geom_smooth(data=trs, mapping=aes(t,Si.w), linetype='dotted', color='red', size=2, span=.2)+
geom_line(data=tracks, mapping=aes(t,Si.FS), show.legend = F, linetype = 'dashed')+
ggtitle(paste0(titleSfx,'Weighted Tracks'))
ggsave(paste0(fileSfx,'_WeightedTracks','.png'))
ggplot(topTsi.w %>% mutate(ti=as.factor(paste0(si,'_',i)))) +
geom_line(aes(t,v,color=ti), position = position_jitter(.3,.3)) +
geom_smooth(data=trs, mapping=aes(t,Si.w), linetype='dotted', color='red', size=2, span=.2)+
geom_line(data=tracks, mapping=aes(t,Si.FS), show.legend = F, linetype = 'dashed')+
ggtitle(paste0(titleSfx,'most successful Weighted Tracks'))
ggsave(paste0(fileSfx,'_WeightedTracks_Top5','.png'))
discNum <- round(min(.1*nrow(tracks),200)/(env$tSteps-1))*(env$tSteps-1)
tailNum <- 500*(env$tSteps-1)
trs <- tail(tracks, -discNum)
tr.stat <- tail(tracks, tailNum) %>% filter(t > 4)
r.mean <- mean(tail(tracks, tailNum)$act.R)
r.center <- tr.stat %>% group_by(t) %>% summarize(R.center = mean(act.R))
r.sd <- inner_join(tr.stat, r.center, by='t', suffix=c('','.c')) %>%
group_by(t) %>% summarize(R.sd = sd(act.R-R.center))
r.csd <- inner_join(r.center, r.sd, by='t', suffix=c('','.sd'))
ggplot(trs)+geom_smooth(aes(t,act.R), span=.2) +
geom_hline(yintercept = r.mean, linetype='dashed') +
geom_hline(yintercept = mean(r.center$R.center), linetype='dashed', color='red') +
geom_line(data=r.csd, mapping=aes(t,R.center+R.sd), linetype='dotted', size=.5) +
geom_line(data=r.csd, mapping=aes(t,R.center-R.sd), linetype='dotted', size=.5) +
ggtitle(paste0(titleSfx,'Reward'))
ggsave(paste0(fileSfx,'_Reward_smooth','.png'))
ggplot(trs %>% filter(stid==19) %>% mutate(i = as.numeric(as.character(i))))+
geom_line(aes(i,res.SumR)) +
geom_smooth(aes(i,res.SumR), span=.5) +
ggtitle(paste0(titleSfx,'final SumReward'))
ggsave(paste0(fileSfx,'_SumReward_Tail','.png'))
ggplot(trs %>% filter(stid==19) %>% mutate(i = as.numeric(as.character(i))))+
geom_smooth(aes(i,res.SumR), span=.2)+
ggtitle(paste0(titleSfx,'SumReward'))
ggsave(paste0(fileSfx,'_SumReward','.png'))
ggplot(trs %>% filter(stid==19) %>% mutate(i = as.numeric(as.character(i))) %>%
gather(w,val,matches('W.S.'),-i))+
geom_line(aes(i,val,color=w)) +
geom_smooth(aes(i,val,color=w), linetype='dashed', span=.3, size=.5) +
ggtitle(paste0(titleSfx,'final W'))
ggsave(paste0(fileSfx,'_W_Tail','.png'))
}
runOptim <- function(xts.fs) {
ofs <- xts.fs[5:nrow(xts.fs),]
doLag <- function(d.t) {
dt<-round(d.t);
sum((ofs$Si[1:(nrow(ofs)-dt)]-ofs$S.w[(dt+1):nrow(ofs)])^2, na.rm=T)
}
o <- optim(c(0), doLag, method = 'Brent', lower=0, upper=5)
o$par
}
printTracks <- function(res, env = res$myEnv, tracks = res$tracks) {
titleSfx <- paste0('ICA',env$numSigs,'/',env$numOrig,': ')
fileSfx <- paste0('xts_',env$numSigs,'_',env$numOrig)
tracks <- tracks %>% mutate(t = stid) %>%
mutate_at(vars(matches(regex('^res\\..*R\\d?$', ignore_case = F))),
list(~./env$xColNum))%>%
left_join(env$X %>% set_names(paste0('X.',names(.))), by=c('t'='X.xid'))
tsi <- tracks %>% select(t,i,matches('^SVal\\.S.$')) %>% gather(si,v,-t,-i)
sumR <- tracks %>% filter(t==19) %>%
rename(SumR = res.SumR) %>%
select(i,SumR)
w.s <- colMeans(env$log %>% tail(200) %>% select(matches('W\\.S.')))
W <- env$W
fs <- env$FS %>% rowid_to_column('t')
for (n in env$xColNames) fs[paste0(n,'.w')] <- w.s[paste0('W.',n)]*env$X[n]
fs$S.w <- rowSums(fs %>% select(matches('^S(\\d|C)\\.w$')))
tailNum <- 500*(env$tSteps-1)
tr.stat <- tail(tracks, tailNum) %>% filter(t > 4)
r.mean <- mean(tail(tracks, tailNum)$act.R)
r.center <- tr.stat %>% group_by(t) %>% summarize(R.center = mean(act.R))
r.sd <- inner_join(tr.stat, r.center, by='t', suffix=c('','.c')) %>%
group_by(t) %>% summarize(R.sd = sd(act.R-R.center))
r.csd <- inner_join(r.center, r.sd, by='t', suffix=c('','.sd'))
numVar <- ncol(fs)
d <- as_datetime(1000*fs$t)
xts.fs <- xts::xts(as.matrix(fs), order.by = d)
xts.fs <- xts::merge.xts(xts.fs, as_datetime(1000*seq(1,max(fs$t),1/4)), fill=NA)
xts.fs <- zoo::na.spline(xts.fs[,1:numVar])
png(paste0(fileSfx,'_timeline.png'))
plot(xts.fs[,c('S.w','Si')])
dev.off()
lag <- runOptim(xts.fs)
xts.fs.lag <- xts.fs
xts.fs.lag$Si <- xts::lag.xts(xts.fs.lag$Si, lag)
png(paste0(fileSfx,'_timeline_lag.png'))
plot(xts.fs.lag[,c('S.w','Si')])
dev.off()
xts.fs.lag.diff <- diff(xts.fs)
png(paste0(fileSfx,'_timeline_lag_diff.png'))
plot(xts.fs.lag.diff[,c('S.w','Si')])
dev.off()
print(paste(titleSfx,'Track: ',titleSfx,', W=',toString(w.s)))
print(w.s)
print(paste(titleSfx,'Summary of SumR:'))
print(summary(sumR$SumR))
print(paste(titleSfx,'Track: Reward=',toString(r.center$R.center)))
print(paste(titleSfx,'Track: Reward: Sum=',toString(sum(r.center$R.center)),
', Mean=',toString(mean(r.center$R.center)),
', SD=',toString(sd(r.sd$R.sd))))
print(paste(titleSfx,'Track: Reward Summary=',toString(summary(r.center$R.center))))
print(paste(titleSfx,'Track: Reward Summary:'))
print(summary(r.center$R.center))
print(paste(titleSfx,'Summary of FS - Sum(W*Xi)=',toString(summary(fs$Si - fs$S.w))))
print(paste(titleSfx,'Summary of FS - Sum(W*Xi):'))
print(summary(fs$Si - fs$S.w))
snames <- na.exclude(names(xts.fs) %>% str_extract('^Si$|^S\\d?\\.w$'))
print(paste(titleSfx,'Correlation of Signals:'))
print(suppressWarnings(corrr::correlate(fs[5:(nrow(fs)-5),snames])))
print(paste(titleSfx,'Correlation of Signals - Lag =',lag,':'))
print(suppressWarnings(corrr::correlate(xts.fs.lag[5:(nrow(fs)-5),snames], quiet = T)))
print(paste(titleSfx,'Correlation of Diff-Signals - Lag =',lag,':'))
print(suppressWarnings(corrr::correlate(xts.fs.lag.diff[5:(nrow(fs)-5),snames], quiet = T)))
#print(sumR %>% arrange(desc(SumR)), n=20)
}
icns <- c(2,3,4,5)
#icns <- c(3)
#baseDir <- list('v3sig3'='storage','v3sig4'='storage','v3sig2'='tfrlnx.fruehbeck.at')
#baseDir <- list('v4sig2'='tfrlnx.fruehbeck.at','v4sig3'='storage','v4sig4'='storage')
#baseDir <- list('v5sig2'='tfrlnx.fruehbeck.at','v5sig3'='storage','v5sig4'='storage')
#baseDir <- list('v6sig2'='tfrlnx.fruehbeck.at','v6sig3'='storage','v6sig4'='storage')
#baseDir <- list('v6sig2'='storage','v6sig3'='storage')
#baseDir <- list('v6sig3'='monster')
baseDir <- list('v6sig2'='monster','v6sig3'='monster','v6sig4'='monster')
if (!is.null(sig.v)) {
#bd <- str_detect(names(baseDir),paste0('v\\dsig',sig.v))
baseDir <- sig.v
}
if (!is.null(orig.s)) icns <- c(as.numeric(orig.s))
dn <- NA
icn <- NA
if (evalPlot) {
for (dn in baseDir) {
currDir <- getwd()
if (local) dn <- '.'
print(paste('switched to:',getwd()))
tryCatch({
host <- baseDir[dn]
for (icn in icns) {
file <- paste0('finDemo.env_',host,'_ica_',icn,'.rdata')
if (!file.exists(file)) host <- node
file <- paste0('finDemo.env_',host,'_ica_',icn,'.rdata')
print(paste('loading:',file))
try({
load(file)
print(paste('loaded:',file))
if (test) next
assign(paste0('x',icn),myEnv)
#myEnv <- get(paste0('x',icn))
result <- getTrack(myEnv)
result$tracks <- tracks
if (icn < 5) assign(paste0(dn,'r',icn),result)
save(result, file=paste0('finDemo.env_',host,'_ica_',icn,'_Res.rdata'))
plotResults(result)
evalLog(result)
evalTracks(result)
})
}
},
error = function(err) { print(paste('Error:',dn,'/',icn,':',toString(err))) },
finally = setwd(currDir))
}
}
if (evalPrint) {
for (dn in baseDir) {
host <- baseDir[dn]
for (icn in icns) {
try({
if (local) dn <- '.'
file <- paste0(dn,'/','finDemo.env_',host,'_ica_',icn,'_Res.rdata')
if (!file.exists(file)) {
host <- node
file <- paste0(dn,'/','finDemo.env_',host,'_ica_',icn,'_Res.rdata')
}
print(paste('loading:',file))
load(file)
print(paste('loaded:',file))
printTracks(result)
})
}
}
}
|
382c9f106bb435a4ae0f11d2884b852c07ec79c3
|
d47f618c48d9be0c052402e82b4787c3397a7f84
|
/script_packages.R
|
442c3504921c73f56a62a660aea31806c1d5d2c0
|
[] |
no_license
|
agenis/citynames-clustering
|
9b927095991b02b8b8c31166623111c51132ab84
|
52d8db81b5369dc60a2d77200abf20d9e1425cb4
|
refs/heads/master
| 2020-04-15T08:07:28.330775
| 2019-01-08T00:02:40
| 2019-01-08T00:02:40
| 164,518,596
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,849
|
r
|
script_packages.R
|
# packages
library(tidyverse)
library(tm)
library(stringdist)
library(maptools)
library(cluster)
library(dbscan)
library(fpc)
deps_names= structure(list(dpmt = c("01", "02", "03", "04", "05", "06", "07", "08",
"09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19",
"2A", "2B", "21", "22", "23", "24", "25", "26", "27", "28", "29",
"30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40",
"41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51",
"52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62",
"63", "64", "65", "66", "67", "68", "69", "70", "71", "72", "73",
"74", "75", "76", "77", "78", "79", "80", "81", "82", "83", "84",
"85", "86", "87", "88", "89", "90", "91", "92", "93", "94", "95", "97"
), dpmt_long = c("Ain", "Aisne", "Allier", "Alpes de Hautes-Provence",
"Hautes-Alpes", "Alpes-Maritimes", "Ardèche", "Ardennes", "Ariège",
"Aube", "Aude", "Aveyron", "Bouches-du-Rhône", "Calvados", "Cantal",
"Charente", "Charente-Maritime", "Cher", "Corrèze", "Corse-du-Sud",
"Haute-Corse", "Côte-d'Or", "Côtes d'Armor", "Creuse", "Dordogne",
"Doubs", "Drôme", "Eure", "Eure-et-Loir", "Finistère", "Gard",
"Haute-Garonne", "Gers", "Gironde", "Hérault", "Ille-et-Vilaine",
"Indre", "Indre-et-Loire", "Isère", "Jura", "Landes", "Loir-et-Cher",
"Loire", "Haute-Loire", "Loire-Atlantique", "Loiret", "Lot",
"Lot-et-Garonne", "Lozère", "Maine-et-Loire", "Manche", "Marne",
"Haute-Marne", "Mayenne", "Meurthe-et-Moselle", "Meuse", "Morbihan",
"Moselle", "Nièvre", "Nord", "Oise", "Orne", "Pas-de-Calais",
"Puy-de-Dôme", "Pyrénées-Atlantiques", "Hautes-Pyrénées", "Pyrénées-Orientales",
"Bas-Rhin", "Haut-Rhin", "Rhône", "Haute-Saône", "Saône-et-Loire",
"Sarthe", "Savoie", "Haute-Savoie", "Paris", "Seine-Maritime",
"Seine-et-Marne", "Yvelines", "Deux-Sèvres", "Somme", "Tarn",
"Tarn-et-Garonne", "Var", "Vaucluse", "Vendée", "Vienne", "Haute-Vienne",
"Vosges", "Yonne", "Territoire-de-Belfort", "Essonne", "Hauts-de-Seine",
"Seine-Saint-Denis", "Val-de-Marne", "Val-d'Oise", "DOMTOM")), class = "data.frame", row.names = c(NA,
-97L))
|
07cfad4d92c2242c0f9ee7ed28715707f21eba7e
|
94f2d32736ee169853a4969bae5436d4764f1f5f
|
/R/keyword_relevance.R
|
073c1da7e14882d27873a8a877d2bee7544b75dd
|
[
"MIT"
] |
permissive
|
johannesharmse/watsonNLU
|
55eaf62fba16d0a37494fe34d392682b9cac5ab4
|
bfd042d9bda0df78002bb49fa6c9a15dbb515e09
|
refs/heads/master
| 2020-03-08T06:32:43.344968
| 2018-11-13T00:33:02
| 2018-11-13T00:33:02
| 127,974,332
| 4
| 3
|
MIT
| 2018-04-15T22:39:38
| 2018-04-03T22:10:37
|
R
|
UTF-8
|
R
| false
| false
| 6,635
|
r
|
keyword_relevance.R
|
#' Watson Natural Language Understanding: Relevance of Keywords
#'
#' See the \href{https://github.com/johannesharmse/watsonNLU/blob/master/README.md}{sign-up} documentation for step by step instructions to secure your own username and password to enable you to use the Watson NLU API. The \strong{keyword_relevance} function takes a text or URL input, along with the input type. The function then returns a dataframe that contains keywords and their likelihood of being a keyword, from the given input. See the \href{https://github.com/johannesharmse/watsonNLU/blob/master/README.md}{keyword_relevance} documentation for more usage cases.
#'
#' @param input Either a text string input or website URL.
#' Either \code{text} or \code{url} argument has to be specified,
#' but not both.
#' @param input_type Specify what type of input was entered.
#' Either \code{text} or \code{url} argument has to be specified,
#' but not both.
#' @param limit The number of keywords to return.
#' @param version The release date of the API version to use. Default value is \code{version="?version=2018-03-16"}
#'
#' @return A dataframe containing a list of keywords and their corresponding likelihoods for the given input.
#'
#' @examples
#'
#' credentials <- readRDS("../tests/testthat/credentials.rds")
#' username <- credentials$username
#' password <- credentials$password
#'
#' # Authenticate using Watson NLU API Credentials
#' auth_NLU(username, password)
#' # Authenticate using Watson NLU API Credentials
#' auth_NLU(username="XXXX", password="XXXX")
#'
#' # Top 5 keywords from the text input.
#' keyword_relevance(input = 'This is a great API wrapper', input_type='text', limit = 5)
#'
#' # Top 5 keywords from the URL input.
#' keyword_relevance(input = 'http://www.nytimes.com/guides/well/how-to-be-happy', input_type='url', limit = 5)
#'
#' @seealso \code{\link[watsonNLU]{keyword_sentiment}}, \code{\\link[watsonNLU]{text_categories}}, \code{\\link[watsonNLU]{keyword_emotions}}, \code{\\link[watsonNLU]{auth_NLU}}
#'
#'
#' @import httr
#'
#' @export
keyword_relevance <- function(input = NULL, input_type = NULL, limit = NULL, version="?version=2018-03-16"){
# initialization
accepted_input_types <- c('text', 'url')
# api URL
# this is the base of the API call
# the variables for the API call will get appended to this
url_NLU <- "https://gateway.watsonplatform.net/natural-language-understanding/api"
# function feature
# no attribute needs to be specified
feauture <- "keywords"
features_string <- paste0("&features=", feauture)
# input argument error checking
if (is.null(input)){
stop("Please specify an input to analyze.")
}else if(!is.character(input)){
stop("Please specify input text or URL as a character string")
}
if (is.null(input_type)){
message("Input type not specified. Assuming text input.")
input_type <- 'text'
}
if (!is.character(input_type)){
stop("Input type needs to be specified as a character string('url' or 'text').")
}else{
input_type <- tolower(input_type)
}
if (!input_type %in% accepted_input_types){
stop("Input type should be either 'url' or 'text'.")
}
if(is.null(limit)){
message("No limit specified. Using default API call limit.")
}else if(!is.numeric(limit) ||
length(limit) > 1){
message("Limit needs to be specified as a numeric integer.")
}else{
limit <- paste0("&", feauture, ".limit=", limit)
}
# check if credential environment exists
if(is.null(watson_credentials) ||
!is.environment(watson_credentials)){
stop("No credentials found. Provide credentials using watsonNLU::auth_NLU")
}
# get credentials
username <- get("username", envir = watson_credentials)
password <- get("password", envir = watson_credentials)
### ENCODING ###
# format input text/URL
# in the case of text input
# the text needs to be encoded
# in the case of url input
# no encoding is necessary
if (input_type == 'text'){
input <- URLencode(input)
}
### STANDARDISE INPUT ###
# assign either text or
# url as a general
# variable
# the pre-text is necessary
# for the API call
if (input_type == 'text'){
input <- paste0("&text=", input)
}else if(input_type == 'url'){
input <- paste0("&url=", input)
}
### API CALL ###
# GET
# the POST call doesn't seem to do anything different
# this is the actual API call
# version - version of API to be used
# input - text or url string
# feature_string - string containing all specified features
# feature_attr - string containing all specified attributes for specific features
# authenticate - used to verify credentials
# add_header - fails if not specified
response <- GET(url=paste0(
url_NLU,
"/v1/analyze",
version,
input,
features_string,
limit),
authenticate(username,password),
add_headers("Content-Type"="application/json")
)
### ERROR CHECKING ###
# check for successful response
# successful response has a code of 200
# all other codes are unsuccessful responses
status <- status_code(response)
if (status != 200){
message(response)
if(status == 401){
stop("Invalid or expired credentials provided. Provide credentials using watsonNLU::auth_NLU")
}
# include message to give user more insight into why the call was unsuccessful
# can be due to query limit, internet connection, authentication fail, etc.
stop("Please make sure you have a valid internet connection and provided a valid input. Check the response log above for further details.")
}
### API RESPONSE CONTENT ###
# get response structured content
# this is the list that gets returned
response <- content(response)
### CLEAN OUTPUT ###
# remove unwanted output
# there are generally unwanted list items
# that the user isn't interested in
# this needs to be removed
# this can include things like input text metadata
### ERROR HANDLING ###
# account for no results returned
# shouldn't ever reach this point
# since Watson will catch it if
# there isn't enough input text
# to identify keywords
if (!is.null(response$keywords) &&
length(response$keywords) > 0){
response <- response$keywords
}else{
stop("No results available")
}
### OUTPUT ###
keywords <- sapply(1:length(response), function(x) response[[x]]$text)
relevance <- sapply(1:length(response), function(x) response[[x]]$relevance)
response_df <- data.frame('keyword' = keywords, 'relevance' = relevance)
# return clean output
return(response_df)
}
|
e0787f7c587aa18948811b10276c55d2c3bf89a4
|
6cb4fbdd76a338d95f8348ef1351ec124aebc39f
|
/R/utils.R
|
91442cfea043e9fc1890ff1e93cb0d9e9617d0ec
|
[] |
no_license
|
kevinykuo/vctrs
|
f0224bd1b011535935a8bc8d198da181d090b6ef
|
2992d4737d35ff4dbd6c15b895fc4c2dc6c71066
|
refs/heads/master
| 2020-03-25T23:16:53.434722
| 2018-08-10T00:25:01
| 2018-08-10T00:25:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 909
|
r
|
utils.R
|
indent <- function(x, n) {
if (length(x) == 0)
return(character())
pad <- strrep(" ", n)
out <- Map(gsub, "\n", paste0("\n", pad), x)
unlist(out, use.names = FALSE)
}
ones <- function(...) {
array(1, dim = c(...))
}
vec_coerce_bare <- function(x, type) {
# Unexported wrapper around Rf_coerceVector()
coerce <- env_get(ns_env("rlang"), "vec_coerce")
coerce(x, type)
}
# Matches the semantics of c() - based on experimenting with the output
# of c(), not reading the source code.
outer_names <- function(x, outer) {
has_outer <- !is.null(outer) && !outer %in% c("", NA)
if (!has_outer)
return(names(x))
has_inner <- !is.null(names(x))
if (has_inner) {
paste0(outer, "..", names(x))
} else {
if (length(x) == 1) {
outer
} else {
paste0(outer, seq_along(x))
}
}
}
hash <- function(x, length = 5) {
substr(digest::digest(x), 1, length)
}
|
e6fd8a594a530be7a7b218537fece680cd895fe4
|
4bd41b3ea5014a386820af21c3f3039800cdc688
|
/scropt.R
|
d0afdb303f325ffe597289c1eeae5c93b0ed50ca
|
[] |
no_license
|
RichChu1/ma350-demo
|
650d5e08cb9ba222fc30e6d1066d18ddd886b4c3
|
8a274e02b9bb512a48553805f474ca0603b9fe11
|
refs/heads/master
| 2021-04-18T04:36:24.637506
| 2020-03-23T18:19:28
| 2020-03-23T18:19:28
| 249,505,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41
|
r
|
scropt.R
|
plot(rnorm(1000))
# New line of comments
|
f36ba77fc0a3a94d906906cb08467fea414276c0
|
5a71730ca0a0088621fc8a2f41a9f42e39acc7f8
|
/cachematrix.R
|
d4042ed885550f2e9b7d5cdb623de4308dc7e3fa
|
[] |
no_license
|
ajtrask/ProgrammingAssignment2
|
9bd2907d9cc6b321d46d4c5d33889315588f47ec
|
9684a605e63a4b9b7dc0b16ba3842e9b9a4db3a6
|
refs/heads/master
| 2021-01-18T18:43:54.224953
| 2016-02-06T00:48:24
| 2016-02-06T00:48:24
| 50,941,865
| 0
| 0
| null | 2016-02-02T18:25:53
| 2016-02-02T18:25:53
| null |
UTF-8
|
R
| false
| false
| 1,419
|
r
|
cachematrix.R
|
## These functions (makeCacheMatrix and cacheSolve) implement
## a matrix "object" with the ability to cache its inverse
## makeCacheMatrix creates a matrix "object" that has funtions to:
## set and get the matrix
## set and get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
minv <- NULL
## set the matrix and NULL the inverse placeholder
set <- function(y) {
x <<- y
minv <<- NULL
}
## get the matrix
get <- function() x
## set the inverse cache
setinv <- function(solve) minv <<- solve
## get the inverse
getinv <- function() minv
## create the list of functions
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve calculates the inverse of the matrix "object"
## created in makeCacheMatrix by:
## checking if the inverse has already been calculated
## and getting the inverse from the cache
## or calculating the invese and storing in the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
minv <- x$getinv()
## if the inverse exists, return the cached value
if(!is.null(minv)) {
message("returning cached value")
return(minv)
}
## otherwise, compute and store the cache and return the value
a <- x$get() ## get the matrix
minv <- solve(a, ...) ## compute the inverse
x$setinv(minv) ## cache the inverse
minv ## return the inverse
}
|
a7e26b792cd63b9920a9fb624ffde57ae0a789dc
|
c9a9019ebc1e1ab5921dfa63ad07e50f1db523d4
|
/CourseProjectEx.R
|
60572fdf1274b259bde38f3c86377afd13e9747a
|
[] |
no_license
|
burtks/ADEC7430-KBFinal
|
a715e6d94d9595ca19c5bb24b89506b3fb1be289
|
ed621cdd2f2063e0f63084ff4c7df7095af72fa8
|
refs/heads/main
| 2023-01-04T04:31:55.863843
| 2020-10-19T00:28:18
| 2020-10-19T00:28:18
| 304,186,574
| 0
| 2
| null | 2020-10-19T00:28:19
| 2020-10-15T02:20:14
|
HTML
|
UTF-8
|
R
| false
| false
| 8,210
|
r
|
CourseProjectEx.R
|
# ADEC 7430 Big Data Econometrics
# Course Project - Example R Script File
# OBJECTIVE: A charitable organization wishes to develop a machine learning
# model to improve the cost-effectiveness of their direct marketing campaigns
# to previous donors.
# 1) Develop a classification model using data from the most recent campaign that
# can effectively capture likely donors so that the expected net profit is maximized.
# 2) Develop a prediction model to predict donation amounts for donors - the data
# for this will consist of the records for donors only.
# load the data
charity <- read.csv(file.choose()) # load the "charity.csv" file
# predictor transformations
charity.t <- charity
charity.t$avhv <- log(charity.t$avhv)
# add further transformations if desired
# for example, some statistical methods can struggle when predictors are highly skewed
# set up data for analysis
data.train <- charity.t[charity$part=="train",]
x.train <- data.train[,2:21]
c.train <- data.train[,22] # donr
n.train.c <- length(c.train) # 3984
y.train <- data.train[c.train==1,23] # damt for observations with donr=1
n.train.y <- length(y.train) # 1995
data.valid <- charity.t[charity$part=="valid",]
x.valid <- data.valid[,2:21]
c.valid <- data.valid[,22] # donr
n.valid.c <- length(c.valid) # 2018
y.valid <- data.valid[c.valid==1,23] # damt for observations with donr=1
n.valid.y <- length(y.valid) # 999
data.test <- charity.t[charity$part=="test",]
n.test <- dim(data.test)[1] # 2007
x.test <- data.test[,2:21]
x.train.mean <- apply(x.train, 2, mean)
x.train.sd <- apply(x.train, 2, sd)
x.train.std <- t((t(x.train)-x.train.mean)/x.train.sd) # standardize to have zero mean and unit sd
apply(x.train.std, 2, mean) # check zero mean
apply(x.train.std, 2, sd) # check unit sd
data.train.std.c <- data.frame(x.train.std, donr=c.train) # to classify donr
data.train.std.y <- data.frame(x.train.std[c.train==1,], damt=y.train) # to predict damt when donr=1
x.valid.std <- t((t(x.valid)-x.train.mean)/x.train.sd) # standardize using training mean and sd
data.valid.std.c <- data.frame(x.valid.std, donr=c.valid) # to classify donr
data.valid.std.y <- data.frame(x.valid.std[c.valid==1,], damt=y.valid) # to predict damt when donr=1
x.test.std <- t((t(x.test)-x.train.mean)/x.train.sd) # standardize using training mean and sd
data.test.std <- data.frame(x.test.std)
##### CLASSIFICATION MODELING ######
# linear discriminant analysis
library(MASS)
model.lda1 <- lda(donr ~ reg1 + reg2 + reg3 + reg4 + home + chld + hinc + I(hinc^2) + genf + wrat +
avhv + incm + inca + plow + npro + tgif + lgif + rgif + tdon + tlag + agif,
data.train.std.c) # include additional terms on the fly using I()
# Note: strictly speaking, LDA should not be used with qualitative predictors,
# but in practice it often is if the goal is simply to find a good predictive model
post.valid.lda1 <- predict(model.lda1, data.valid.std.c)$posterior[,2] # n.valid.c post probs
# calculate ordered profit function using average donation = $14.50 and mailing cost = $2
profit.lda1 <- cumsum(14.5*c.valid[order(post.valid.lda1, decreasing=T)]-2)
plot(profit.lda1) # see how profits change as more mailings are made
n.mail.valid <- which.max(profit.lda1) # number of mailings that maximizes profits
c(n.mail.valid, max(profit.lda1)) # report number of mailings and maximum profit
# 1329.0 11624.5
cutoff.lda1 <- sort(post.valid.lda1, decreasing=T)[n.mail.valid+1] # set cutoff based on n.mail.valid
chat.valid.lda1 <- ifelse(post.valid.lda1>cutoff.lda1, 1, 0) # mail to everyone above the cutoff
table(chat.valid.lda1, c.valid) # classification table
# c.valid
#chat.valid.lda1 0 1
# 0 675 14
# 1 344 985
# check n.mail.valid = 344+985 = 1329
# check profit = 14.5*985-2*1329 = 11624.5
# logistic regression
model.log1 <- glm(donr ~ reg1 + reg2 + reg3 + reg4 + home + chld + hinc + I(hinc^2) + genf + wrat +
avhv + incm + inca + plow + npro + tgif + lgif + rgif + tdon + tlag + agif,
data.train.std.c, family=binomial("logit"))
post.valid.log1 <- predict(model.log1, data.valid.std.c, type="response") # n.valid post probs
# calculate ordered profit function using average donation = $14.50 and mailing cost = $2
profit.log1 <- cumsum(14.5*c.valid[order(post.valid.log1, decreasing=T)]-2)
plot(profit.log1) # see how profits change as more mailings are made
n.mail.valid <- which.max(profit.log1) # number of mailings that maximizes profits
c(n.mail.valid, max(profit.log1)) # report number of mailings and maximum profit
# 1291.0 11642.5
cutoff.log1 <- sort(post.valid.log1, decreasing=T)[n.mail.valid+1] # set cutoff based on n.mail.valid
chat.valid.log1 <- ifelse(post.valid.log1>cutoff.log1, 1, 0) # mail to everyone above the cutoff
table(chat.valid.log1, c.valid) # classification table
# c.valid
#chat.valid.log1 0 1
# 0 709 18
# 1 310 981
# check n.mail.valid = 310+981 = 1291
# check profit = 14.5*981-2*1291 = 11642.5
# Results
# n.mail Profit Model
# 1329 11624.5 LDA1
# 1291 11642.5 Log1
# select model.log1 since it has maximum profit in the validation sample
post.test <- predict(model.log1, data.test.std, type="response") # post probs for test data
# Oversampling adjustment for calculating number of mailings for test set
n.mail.valid <- which.max(profit.log1)
tr.rate <- .1 # typical response rate is .1
vr.rate <- .5 # whereas validation response rate is .5
adj.test.1 <- (n.mail.valid/n.valid.c)/(vr.rate/tr.rate) # adjustment for mail yes
adj.test.0 <- ((n.valid.c-n.mail.valid)/n.valid.c)/((1-vr.rate)/(1-tr.rate)) # adjustment for mail no
adj.test <- adj.test.1/(adj.test.1+adj.test.0) # scale into a proportion
n.mail.test <- round(n.test*adj.test, 0) # calculate number of mailings for test set
cutoff.test <- sort(post.test, decreasing=T)[n.mail.test+1] # set cutoff based on n.mail.test
chat.test <- ifelse(post.test>cutoff.test, 1, 0) # mail to everyone above the cutoff
table(chat.test)
# 0 1
# 1676 331
# based on this model we'll mail to the 331 highest posterior probabilities
# See below for saving chat.test into a file for submission
##### PREDICTION MODELING ######
# Least squares regression
model.ls1 <- lm(damt ~ reg1 + reg2 + reg3 + reg4 + home + chld + hinc + genf + wrat +
avhv + incm + inca + plow + npro + tgif + lgif + rgif + tdon + tlag + agif,
data.train.std.y)
pred.valid.ls1 <- predict(model.ls1, newdata = data.valid.std.y) # validation predictions
mean((y.valid - pred.valid.ls1)^2) # mean prediction error
# 1.867523
sd((y.valid - pred.valid.ls1)^2)/sqrt(n.valid.y) # std error
# 0.1696615
# drop wrat for illustrative purposes
model.ls2 <- lm(damt ~ reg1 + reg2 + reg3 + reg4 + home + chld + hinc + genf +
avhv + incm + inca + plow + npro + tgif + lgif + rgif + tdon + tlag + agif,
data.train.std.y)
pred.valid.ls2 <- predict(model.ls2, newdata = data.valid.std.y) # validation predictions
mean((y.valid - pred.valid.ls2)^2) # mean prediction error
# 1.867433
sd((y.valid - pred.valid.ls2)^2)/sqrt(n.valid.y) # std error
# 0.1696498
# Results
# MPE Model
# 1.867523 LS1
# 1.867433 LS2
# select model.ls2 since it has minimum mean prediction error in the validation sample
yhat.test <- predict(model.ls2, newdata = data.test.std) # test predictions
# FINAL RESULTS
# Save final results for both classification and regression
length(chat.test) # check length = 2007
length(yhat.test) # check length = 2007
chat.test[1:10] # check this consists of 0s and 1s
yhat.test[1:10] # check this consists of plausible predictions of damt
ip <- data.frame(chat=chat.test, yhat=yhat.test) # data frame with two variables: chat and yhat
write.csv(ip, file="ABC.csv", row.names=FALSE) # use your initials for the file name
# submit the csv file in Canvas for evaluation based on actual test donr and damt values
|
ca9c49e8c535851f3957375de37b274a400962d9
|
3f41dcde4498fcf47a5f8314de6086ffec3dd082
|
/man/makeplot.asdsf.Rd
|
529ac1bf862d260c102a04e40c72b847844846b7
|
[] |
no_license
|
arborworkflows/RWTY
|
6f961f76b69776d9c752be0483416dec7448661b
|
fbf5b695a1c8d16f7532123fb86715152f430b06
|
refs/heads/master
| 2020-12-31T02:01:50.609590
| 2016-08-17T22:16:18
| 2016-08-17T22:16:18
| 65,751,392
| 0
| 1
| null | 2016-08-15T17:31:51
| 2016-08-15T17:31:51
| null |
UTF-8
|
R
| false
| true
| 1,229
|
rd
|
makeplot.asdsf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeplot.asdsf.R
\name{makeplot.asdsf}
\alias{makeplot.asdsf}
\title{Plot the Standard Deviation of Split Frequencies over the course of an MCMC.}
\usage{
makeplot.asdsf(chains, burnin = 0, window.size = 20, min.freq = 0)
}
\arguments{
\item{chains}{A list of rwty.trees objects.}
\item{burnin}{The number of trees to eliminate as burnin. Defaults to zero.}
\item{window.size}{The number of trees between each point at which the ASDSFs is calculated (note, specified as a number of sampled trees, not a number of generations)}
\item{min.freq}{The minimum frequency for a node to be used for calculating ASDSF.}
}
\value{
output A cumulative plot of ASDSF across all chains
}
\description{
This function takes two or more rwty.trees ojects and returns a plot of ASDSF as the run progresses.
The solid line with points shows the Average Standard Deviation of Split Frequences at the current generation
The grey ribbon shows the upper and lower 95% quantiles of the SDSFs at the current generation
}
\examples{
data(fungus)
p <- makeplot.asdsf(fungus, burnin = 20)
p
}
\keyword{ASDSF,}
\keyword{MCMC,}
\keyword{cumulative}
\keyword{phylogenetics,}
|
a81b14d9bd610adb9f8346678fdc284a7fb98aa5
|
318c102d1f9055cac2a790363c8c3f6af024702c
|
/man/TempTraject.Rd
|
feb6ada2487e17313f9745cf3f7448e86ca8e22a
|
[] |
no_license
|
cran/OceanView
|
342ae464107823ff3324d26d9e28290779e768b3
|
1272fbf444372dfa4475c255da5be3d764651d3f
|
refs/heads/master
| 2021-07-21T02:21:50.718897
| 2021-07-12T07:00:13
| 2021-07-12T07:00:13
| 18,805,213
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,940
|
rd
|
TempTraject.Rd
|
\name{Profile data set}
\docType{data}
\alias{TrackProf}
\title{
Temperature profiles made along a ship track.
}
\description{
Profiles of temperature made along a ship track, originally made available by US NOAA NODC.
The data were merged from 29 input files named \code{gtspp_103799_xb_111.nc}
till \code{gtspp_103827_xb_111.nc}.
These data were acquired from the US NOAA National Oceanographic Data Center
(NODC) on 9/06/2012 from \url{https://www.nodc.noaa.gov/gtspp/}.
}
\usage{
data(TrackProf)
}
\format{
list with
\itemize{
\item \code{meta}, a \code{data.frame} with the metadata, containing for each of the
29 profiles the following:
\itemize{
\item \code{station}, the number of the station (part of the original filename).
\item \code{filename}, the original name of the NetCDF file.
\item \code{date}, the date of sampling.
\item \code{time}, the time of sampling, a number relative to 1-1-1900 0 hours.
\item \code{longitude}, dg E.
\item \code{latitutde}, dg N.
}
\item \code{temp}, the seawater temperature, at the \code{depth} of the
measurement in dg C. A matrix of dimension \code{(29, 93)} for the
29 profiles and (at most) 93 depth values; \code{NA} means no measurement.
\item \code{depth}, the depth of the measurement in \code{temp}, in metres,
positive downward. A matrix of dimension \code{(29, 93)} for the
29 profiles and (at most) 93 depth values; \code{NA} means no measurement.
}
}
\author{
Karline Soetaert <karline.soetaert@nioz.nl>
}
\examples{
# save plotting parameters
pm <- par(mfrow = c(2, 2))
mar <- par("mar")
## =============================================================================
## show the metadata
## =============================================================================
print(TrackProf$meta)
## =============================================================================
## display the cruisetrack on the Ocean Bathymetry data
## =============================================================================
# 1. plots the ocean's bathymetry and add sampling positions
ImageOcean(xlim = c(-50, 50), ylim = c(-50, 50),
main = "cruise track")
points(TrackProf$meta$longitude, TrackProf$meta$latitude, pch = "+")
# mark starting point
points(TrackProf$meta$longitude[1], TrackProf$meta$latitude[1],
pch = 18, cex = 2, col = "purple")
## =============================================================================
## image plots of raw data
## =============================================================================
image2D(z = TrackProf$depth, main = "raw depth values",
xlab = "station nr", ylab = "sample nr", clab = "depth")
image2D(z = TrackProf$temp, main = "raw temperature values",
xlab = "station nr", ylab = "sample nr", clab = "dgC")
## =============================================================================
## image plots of temperatures at correct depth
## =============================================================================
# water depths to which data set is interpolated
depth <- 0 : 809
# map from "sigma" to "depth" coordinates
Temp_Depth <- mapsigma (TrackProf$temp, sigma = TrackProf$depth,
depth = depth)$var
# image with depth increasing downward and increased resolution (resfac)
image2D(z = Temp_Depth, main = "Temperature-depth",
ylim = c(809, 0), y = depth, NAcol ="black", resfac = 2,
xlab = "station nr", ylab = "depth, m", clab = "dgC")
## =============================================================================
## scatterplot of surface values on ocean bathymetry
## =============================================================================
par(mar = mar + c(0, 0, 0, 2))
par(mfrow = c(1, 1))
# No colors, but add contours
ImageOcean(xlim = c(-30, 30), ylim = c(-40, 40),
main = "cruise track", col = "white", contour = TRUE)
# use data set TrackProf to add measured temperature, with color key
with (TrackProf,
scatter2D(colvar = temp[,1], x = meta[ ,"longitude"],
y = meta[ ,"latitude"], clab = "temp",
add = TRUE, pch = 18, cex = 2))
# reset plotting parameters
par(mar = mar)
par(mfrow = pm)
}
\references{
\url{https://www.nodc.noaa.gov/gtspp/}
U.S. National Oceanographic Data Center: Global Temperature-Salinity Profile Programme.
June 2006. U.S. Department of Commerce, National Oceanic and Atmosphere Administration,
National Oceanographic Data Center, Silver Spring, Maryland, 20910. Date of Access: 9/06/2012.
}
\seealso{
\link{image2D} for plotting images, package \code{plot3D}.
\link{ImageOcean} for an image of the ocean bathymetry, package \code{plot3D}.
\link{scatter2D} for making scatterplots, package \code{plot3D}.
\link{Oxsat} for a 3-D data set, package \code{plot3D}.
}
\keyword{datasets}
|
288916873461bfcda4a21c6a7f8eb32040ec355d
|
7329459bb72ddd723bc58358e80b5f0db3db730c
|
/man/Normal_ct.Rd
|
773405664d95d210173887d7414ca70a1adfb1d7
|
[] |
no_license
|
knygren/glmbayes
|
6f2411da073f3d6bfcb727e8d02d4888cacb8fef
|
3c25c08c1f4ac71a0e67d47341fb0cf39497d5f8
|
refs/heads/master
| 2021-01-17T10:49:54.755257
| 2020-08-29T21:24:08
| 2020-08-29T21:24:08
| 18,466,002
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,454
|
rd
|
Normal_ct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EnvNorm_ct.R
\name{Normal_ct}
\alias{Normal_ct}
\alias{pnorm_ct}
\alias{rnorm_ct}
\title{The Central Normal Distribution}
\usage{
pnorm_ct(a = -Inf, b = Inf, mu = 0, sigma = 1, log.p = TRUE, Diff = FALSE)
rnorm_ct(n, lgrt, lglt, mu = 0, sigma = 1)
}
\arguments{
\item{a}{lower bound}
\item{b}{upper bound}
\item{mu}{mean parameter}
\item{sigma}{standard deviation}
\item{log.p}{Logical argument. If \code{TRUE}, the log probability is provided}
\item{Diff}{Logical argument. If \code{TRUE} the second parameter is the difference between the lower and upper bound}
\item{n}{number of draws to generate. If \code{length(n) > 1},
the length is taken to be the number required}
\item{lgrt}{log of the distribution function between the
lower bound and infinity}
\item{lglt}{log of the distribution function between negative
infinity and the upper bound}
}
\value{
For \code{pnorm_ct}, vector of length equal to length of \code{a} and for
\code{rnorm_ct}, a vector with length determined by \code{n} containing draws from
the center of the normal distribution.
}
\description{
Distribution function and random generation for the center (between a lower and an upper bound)
of the normal distribution with mean equal to mu and standard deviation equal to sigma.
}
\details{
The distribution function pnorm_ct finds the probability of the
center of a normal density (the probability of the area between a lower
bound a and an upper bound b) while the random number generator rnorm_ct
samples from a restricted normal density where lgrt is the log of the
distribution between the lower bound and infinity and lglt is the log of
the distribution function between negative infinity and the upper bound.
The sum of the exponentiated values for the two (exp(lgrt)+exp(lglt)) must
sum to more than 1.
These functions are mainly used to handle cases where the differences
between the upper and lower bounds \code{b-a} are small. In such cases,
using \code{pnorm(b)-pnorm(a)} may result in 0 being returned even when the
difference is supposed to be positive.
}
\examples{
pnorm_ct(0.2,0.4)
exp(pnorm_ct(0.2,0.4))
pnorm_ct(0.2,0.4,log.p=FALSE)
log(pnorm_ct(0.2,0.4,log.p=FALSE))
## Example where difference between two pnorm calls fail
## but call to pnorm_ct works
pnorm(0.5)-pnorm(0.4999999999999999)
pnorm_ct(0.4999999999999999,0.5,log.p=FALSE)
}
\keyword{internal}
|
75d754748ca6e1b3c918aed44989829dd24c9690
|
1c531619b82ab3ab8d50f554f9e1707bcd8f3dfc
|
/class scores.R
|
7762c90f334134c9291b626998f26c058b5f0aa6
|
[] |
no_license
|
acehjy97/20180721
|
0ab5278f8cdd727e1df2d49120970aadecfba8d0
|
2c4371d08543ff23f622a5a5351591932afabf12
|
refs/heads/master
| 2020-03-23T16:15:58.924940
| 2018-08-04T09:19:09
| 2018-08-04T09:19:09
| 141,801,193
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 3,179
|
r
|
class scores.R
|
library(rJava)
library(DBI)
library(RJDBC)
library(XML)
library(memoise)
library(KoNLP)
library(wordcloud)
library(dplyr)
library(ggplot2)
library(ggmap)
library(rvest)
library(RColorBrewer)
library(data.table)
library(reshape)
read.csv("class_scores.csv")
#######################################################
## 문제 1. class_scores.csv을 데이터프레임으로 전환하시오
# 데이터프레임의 이름은 다음과 같이 하시오.
# scores
# R에서는 기본적으로 데이터파일안에 들어있는 문자열(Strings)를
# 요인(Factor)로 취급하는 것이 기본이다. 만약 요인으로 설정하지 않고
# 불러오고 싶다면 readCSV()함수내에 stringsAsFactors=FALSE; 를 추가한다.
#######################################################
#######################################################
## 문제 2. scores을 분석하시오
#######################################################
#######################################################
## 문제 3. scores 의 컬럼명을 한글로 전환하시오
## 학생아이디 = Stu_ID
## 학년 = grade
## 등급 = class
## 성별 = gender
## 수학 = Math
## 영어 = English
## 과학 = Science
## 마케팅 = Marketing
## 작문 = Writing
#######################################################
#######################################################
## 문제 4. 1학년 학생들을 학번내림차순 정렬
#######################################################
#######################################################
## 문제 5. 1학년 남학생만 보기
#######################################################
#######################################################
## 문제 6. 1학년이 아닌 학생들 중 학번이
## 가장 빠른 번호 3개만 보기
## !grade == 1
## top_n(n=3,wt=학생아이디)
## n은 상위 3개, wt 는 기준
#######################################################
#######################################################
## 문제 7. 1, 2학년 학생들을 학년,학번 순으
## grade == 1 | grade == 2
## arrange(학년,학번)
## tail 은 하단부분 6개만 보여줌
#######################################################
#######################################################
## 문제 8. 수학,영어,과학,마케팅,작문 과목의
## 평균을 나타내는 컬럼을 추가한 후
## 평균점수가 80이상인 학생들을 출력하시오.
## 단 평균내림차순, 학번오름차순으로 정렬하시오.
## mutate(평균 = (수학+영어+과학+마케팅+작문) %/% 5)
## 평균 >= 80
## getter, setter 개념설명
#######################################################
#######################################################
## 문제 9. 학생들 중 한 과목이라도 100점이 있는
## 학생만 보기
## ※ | 를 연속사용하기
#######################################################
#######################################################
## 문제 10. 학번이 홀수인 학생들 중 남자이면서
## 수학과 과학이 모두 90점 이상인
## 학생들을 학번 오름차순으로 정렬
#######################################################
|
270d1b12289d4b708ce4160e1dd7f07343db3046
|
af72407b36c1ee3182f3a86c3e73071b31456702
|
/data-raw/ames.R
|
4fc5ade4acd17e9fd1b2ac680d3048f0ad46174d
|
[
"MIT"
] |
permissive
|
bcjaeger/ipa
|
f89746d499500e0c632b8ca2a03904054dc12065
|
2e4b80f28931b8ae6334d925ee8bf626b45afe89
|
refs/heads/master
| 2021-07-12T20:52:23.778632
| 2020-04-26T16:44:01
| 2020-04-26T16:44:01
| 207,016,384
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 662
|
r
|
ames.R
|
## code to prepare `amesHousing` dataset goes here
set.seed(329)
# library(tidyverse)
# library(AmesHousing)
ames_complete <- ames_missing <- drop_na(make_ames())
rows <- sample(nrow(ames_complete), 1200)
for(i in rows){
cols <- sample(ncol(ames_complete)-1, size = ncol(ames_complete) - 5)
ames_missing[i, cols] <- NA
}
ames <- list(
complete = ames_complete,
missing = ames_missing
) %>%
map(mutate, Sale_Price = log(Sale_Price)) %>%
map(mutate, Kitchen_AbvGr = factor(Kitchen_AbvGr)) %>%
map(select, Sale_Price, everything(), -Utilities)
ames$missing$Sale_Price <- ames$complete$Sale_Price
usethis::use_data(ames, overwrite = TRUE)
|
1193a1c2b1ce70c45e74207be77dab84b98d6f00
|
718f5c39e5749f259cfa3a64ac9fd7c314dd3408
|
/Log log.R
|
4056d246622321d202cfddf162322455830f6b94
|
[] |
no_license
|
Reinaldodos/Covid19
|
3be30953d482c18b7c53d320a26cc134728e8d7a
|
e791b7dd69559fe60070b5480ea7494815029b17
|
refs/heads/master
| 2023-03-02T22:33:37.553423
| 2021-02-18T11:51:01
| 2021-02-18T11:51:01
| 255,569,854
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 766
|
r
|
Log log.R
|
TEST =
TEST %>%
group_by(country) %>%
mutate(Daily_cases = Number - lag(Number)) %>% ungroup %>%
drop_na(Daily_cases) %>%
filter(Daily_cases>0)
TEST %>%
filter(Daily_cases> 200) %>%
count(country) %>%
top_n(n = 12, wt = n) %>%
semi_join(x = TEST, by = "country") %>%
# filter(str_detect(string = country, pattern = "China", negate = T)) %>%
# mutate_at(.vars = vars(Number, Daily_cases), .funs = ~./Population) %>%
ggplot(mapping = aes(
x = Number,
y = Daily_cases,
colour = country,
group = country
)) +
geom_smooth(se = F) +
scale_x_log10() +
scale_y_log10() +
theme(legend.position = "bottom") +
xlab(label = "Total confirmed cases") +
ylab(label = "New confirmed cases")+
facet_wrap(facets = ~region)
|
eb2dd23442e249a96bcbc62e2dffeaa7fbcfdec1
|
b3c5f225ac5fc4e3280591206bac374565c65a86
|
/R/check_bw.R
|
bb4753183ef663670b3cb62684020831c6e31049
|
[] |
no_license
|
ClaudiaRHD/chipAnalyseR
|
4d5c404b51130ab650383022801bb2c50eba3111
|
2e41b66048e3945f90d3ac30be22700d4157ba04
|
refs/heads/master
| 2021-04-15T10:09:31.490707
| 2020-06-15T09:41:37
| 2020-06-15T09:41:37
| 126,216,813
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,108
|
r
|
check_bw.R
|
#' checks for bwtool binary
#' @description Takes inserted path to bwtool and checks if bwtool is already installed.
#' @param bw_path The path to directory where bwtool is installed on the computer. Default value is NULL.
#' @import ggplot2
#' @import data.table
#' @import cowplot
#check if bwtool is available on computer
check_bw = function(bw_path = NULL){
if(is.null(bw_path)){
bw_path = 'bwtool'
if(Sys.which(names = bw_path) == ""){
stop("bwtool could not be found. Please download bwtool: https://github.com/CRG-Barcelona/bwtool/wiki ")
} else{
if(file.exists(bw_path)){
message("bwtool found in sys")
} else{
stop("bwtool could not be found. Please download bwtool: https://github.com/CRG-Barcelona/bwtool/wiki ")
}
}
}else{
bw_path = paste0(bw_path, 'bwtool')
if (file.exists(bw_path )){
message("inserted bwpath is correct. bwtool found")
} else {
stop(paste0("bwtool could not be found in ", bw_path,". Please download bwtool: https://github.com/CRG-Barcelona/bwtool/wiki"))
}
}
return(bw_path)
}
|
6cf119625a89160c73d08a8738d4297496fe8c10
|
4aae56c278bde19385a0e52ce75edc8bdd241740
|
/man/multinomial_metrics.Rd
|
bc482f5f3762e96041b540b4236f8458708588e8
|
[
"MIT"
] |
permissive
|
LudvigOlsen/cvms
|
627d216939203b7e6e1da6b68802d2208138b622
|
38dc4d5117d67d00c81fb05677b771d3b39a6c28
|
refs/heads/master
| 2023-07-05T08:33:44.190664
| 2023-06-30T04:06:31
| 2023-06-30T04:06:31
| 71,063,931
| 38
| 7
|
NOASSERTION
| 2022-09-23T10:57:12
| 2016-10-16T16:54:21
|
R
|
UTF-8
|
R
| false
| true
| 4,697
|
rd
|
multinomial_metrics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/choosing_metrics_functions.R
\name{multinomial_metrics}
\alias{multinomial_metrics}
\title{Select metrics for multinomial evaluation}
\usage{
multinomial_metrics(
all = NULL,
overall_accuracy = NULL,
balanced_accuracy = NULL,
w_balanced_accuracy = NULL,
accuracy = NULL,
w_accuracy = NULL,
f1 = NULL,
w_f1 = NULL,
sensitivity = NULL,
w_sensitivity = NULL,
specificity = NULL,
w_specificity = NULL,
pos_pred_value = NULL,
w_pos_pred_value = NULL,
neg_pred_value = NULL,
w_neg_pred_value = NULL,
auc = NULL,
kappa = NULL,
w_kappa = NULL,
mcc = NULL,
detection_rate = NULL,
w_detection_rate = NULL,
detection_prevalence = NULL,
w_detection_prevalence = NULL,
prevalence = NULL,
w_prevalence = NULL,
false_neg_rate = NULL,
w_false_neg_rate = NULL,
false_pos_rate = NULL,
w_false_pos_rate = NULL,
false_discovery_rate = NULL,
w_false_discovery_rate = NULL,
false_omission_rate = NULL,
w_false_omission_rate = NULL,
threat_score = NULL,
w_threat_score = NULL,
aic = NULL,
aicc = NULL,
bic = NULL
)
}
\arguments{
\item{all}{Enable/disable all arguments at once. (Logical)
Specifying other metrics will overwrite this, why you can
use (\code{all = FALSE, accuracy = TRUE}) to get only the Accuracy metric.}
\item{overall_accuracy}{\code{Overall Accuracy} (Default: TRUE)}
\item{balanced_accuracy}{\code{Balanced Accuracy} (Default: TRUE)}
\item{w_balanced_accuracy}{\code{Weighted Balanced Accuracy} (Default: FALSE)}
\item{accuracy}{\code{Accuracy} (Default: FALSE)}
\item{w_accuracy}{\code{Weighted Accuracy} (Default: FALSE)}
\item{f1}{\code{F1} (Default: TRUE)}
\item{w_f1}{\code{Weighted F1} (Default: FALSE)}
\item{sensitivity}{\code{Sensitivity} (Default: TRUE)}
\item{w_sensitivity}{\code{Weighted Sensitivity} (Default: FALSE)}
\item{specificity}{\code{Specificity} (Default: TRUE)}
\item{w_specificity}{\code{Weighted Specificity} (Default: FALSE)}
\item{pos_pred_value}{\code{Pos Pred Value} (Default: TRUE)}
\item{w_pos_pred_value}{\code{Weighted Pos Pred Value} (Default: FALSE)}
\item{neg_pred_value}{\code{Neg Pred Value} (Default: TRUE)}
\item{w_neg_pred_value}{\code{Weighted Neg Pred Value} (Default: FALSE)}
\item{auc}{\code{AUC} (Default: FALSE)}
\item{kappa}{\code{Kappa} (Default: TRUE)}
\item{w_kappa}{\code{Weighted Kappa} (Default: FALSE)}
\item{mcc}{\code{MCC} (Default: TRUE)
Multiclass Matthews Correlation Coefficient.}
\item{detection_rate}{\code{Detection Rate} (Default: TRUE)}
\item{w_detection_rate}{\code{Weighted Detection Rate} (Default: FALSE)}
\item{detection_prevalence}{\code{Detection Prevalence} (Default: TRUE)}
\item{w_detection_prevalence}{\code{Weighted Detection Prevalence} (Default: FALSE)}
\item{prevalence}{\code{Prevalence} (Default: TRUE)}
\item{w_prevalence}{\code{Weighted Prevalence} (Default: FALSE)}
\item{false_neg_rate}{\code{False Neg Rate} (Default: FALSE)}
\item{w_false_neg_rate}{\code{Weighted False Neg Rate} (Default: FALSE)}
\item{false_pos_rate}{\code{False Pos Rate} (Default: FALSE)}
\item{w_false_pos_rate}{\code{Weighted False Pos Rate} (Default: FALSE)}
\item{false_discovery_rate}{\code{False Discovery Rate} (Default: FALSE)}
\item{w_false_discovery_rate}{\code{Weighted False Discovery Rate} (Default: FALSE)}
\item{false_omission_rate}{\code{False Omission Rate} (Default: FALSE)}
\item{w_false_omission_rate}{\code{Weighted False Omission Rate} (Default: FALSE)}
\item{threat_score}{\code{Threat Score} (Default: FALSE)}
\item{w_threat_score}{\code{Weighted Threat Score} (Default: FALSE)}
\item{aic}{AIC. (Default: FALSE)}
\item{aicc}{AICc. (Default: FALSE)}
\item{bic}{BIC. (Default: FALSE)}
}
\description{
\Sexpr[results=rd, stage=render]{lifecycle::badge("experimental")}
Enable/disable metrics for multinomial evaluation. Can be supplied to the
\code{`metrics`} argument in many of the \code{cvms} functions.
Note: Some functions may have slightly different defaults than the ones supplied here.
}
\examples{
\donttest{
# Attach packages
library(cvms)
# Enable only Balanced Accuracy
multinomial_metrics(all = FALSE, balanced_accuracy = TRUE)
# Enable all but Balanced Accuracy
multinomial_metrics(all = TRUE, balanced_accuracy = FALSE)
# Disable Balanced Accuracy
multinomial_metrics(balanced_accuracy = FALSE)
}
}
\seealso{
Other evaluation functions:
\code{\link{binomial_metrics}()},
\code{\link{confusion_matrix}()},
\code{\link{evaluate_residuals}()},
\code{\link{evaluate}()},
\code{\link{gaussian_metrics}()}
}
\author{
Ludvig Renbo Olsen, \email{r-pkgs@ludvigolsen.dk}
}
\concept{evaluation functions}
|
d71649acf3303ef445c0556985631de660bbfa55
|
5d820c8e7f4b458f6783a35d1022d4f6a8f8dc02
|
/man/crop.Rd
|
c4a0a27e470a67fdb93881556b451b3003008c2a
|
[
"MIT"
] |
permissive
|
UBC-MDS/image-compression-toolkit--R
|
307a5968f1adda8638b6cfe0552d1b1ac60b9353
|
d0879412ab8d24e621e8da4754392d4893660ba4
|
refs/heads/master
| 2020-04-21T19:25:07.974713
| 2019-03-06T05:09:25
| 2019-03-06T05:09:25
| 169,805,189
| 0
| 2
|
NOASSERTION
| 2019-03-06T05:09:26
| 2019-02-08T22:06:29
|
R
|
UTF-8
|
R
| false
| true
| 616
|
rd
|
crop.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/func_crop.R
\name{crop}
\alias{crop}
\title{Crop images}
\usage{
crop(img_path, H, W, out_path)
}
\arguments{
\item{img_path}{---- String , file path of the image .}
\item{H}{---- Integer, the desired height of the cropped image}
\item{W}{---- Integer, the desired width of the cropped image}
\item{out_path}{---- String , desired file path of the cropped image}
}
\value{
---String , cropped image path and saves the image .
}
\description{
Crop images
}
\examples{
# crop("..data/image.png", 10L, 15L, "..data/cropped_image.png")
}
|
d641a6c0ebbbce9892399b355fc3bc6f82dc09c3
|
1c9c5d98c6a1d4132762ed058f6b007bb5648afb
|
/Coursera Statistics Princeton/Stats1.13.HW.02.LAB.R
|
1538610bcc7ebf78e3c450286b03dcdd50e979a9
|
[
"MIT"
] |
permissive
|
dmpe/R
|
4475efefd5921551dde96ef19e971773b33d9451
|
ba8cb71f54ec969073d2b64a6cfdbc9a6042d178
|
refs/heads/master
| 2022-08-14T07:26:06.512865
| 2021-04-17T11:03:40
| 2021-04-17T11:03:40
| 8,522,174
| 685
| 440
|
MIT
| 2021-04-17T11:03:41
| 2013-03-02T15:04:10
|
R
|
UTF-8
|
R
| false
| false
| 2,449
|
r
|
Stats1.13.HW.02.LAB.R
|
library(psych)
setwd("C:/Users/Dima/Documents/R/coursera/")
file <- read.table("Stats1.13.HW.02.txt", header=T)
names(file)
dim(file) # number of rows
mean(file$SR)
var(file$SR)
subfile1 <- subset(file, file$time=="pre")
mean(subfile1$SR)
subfile2 <- subset(file, file$time=="post")
sd(subfile2$SR)
median(subfile2$SR)
subfile1
subfile2
# Either you do the subsetting or just
# describeBy(subfile2, subfile2$condition)
# I did the subsetting
# POST
test1 <- subset(subfile2, subfile2$condition=="WM")
mean(test1$SR)
test2 <- subset(subfile2, subfile2$condition=="PE")
mean(test2$SR)
test3 <- subset(subfile2, subfile2$condition=="DS")
mean(test3$SR)
# PRE
test4 <- subset(subfile1, subfile1$condition=="WM")
mean(test4$SR)
test5 <- subset(subfile1, subfile1$condition=="PE")
mean(test5$SR)
test6 <- subset(subfile1, subfile1$condition=="DS")
mean(test6$SR)
par(mfrow= c(2,3))
file1 = subset(subfile1, subfile1$condition=="WM")
file2 = subset(subfile2, subfile2$condition=="WM")
file3 = subset(subfile1, subfile1$condition=="PE")
file4 = subset(subfile2, subfile2$condition=="PE")
file5 = subset(subfile1, subfile1$condition=="DS")
file6 = subset(subfile2, subfile2$condition=="DS")
# http://stackoverflow.com/a/10759521/1607152
hist(file1$SR)
hist(file2$SR)
hist(file3$SR)
hist(file4$SR)
hist(file5$SR)
hist(file6$SR)
# Beware: if you compare density then the more "normal distributed" is Pre PM
# But this is wrong. The right one is Post WM
par(mfrow = c(2,3))
plot(density(test1[, 4]), xlab = "Total sympton score", main = "")
plot(density(test2[, 4]), xlab = "Total sympton score", main = "")
plot(density(test3[, 4]), xlab = "Total sympton score", main = "")
plot(density(test4[, 4]), xlab = "Total sympton score", main = "")
plot(density(test5[, 4]), xlab = "Total sympton score", main = "")
plot(density(test6[, 4]), xlab = "Total sympton score", main = "")
subfile1.wm = subset(subfile1, subfile1$condition=="WM")
subfile2.wm = subset(subfile2, subfile2$condition=="WM")
subfile1.pe = subset(subfile1, subfile1$condition=="PE")
subfile2.pe = subset(subfile2, subfile2$condition=="PE")
subfile1.ds = subset(subfile1, subfile1$condition=="DS")
subfile2.ds = subset(subfile2, subfile2$condition=="DS")
mean(subfile2.wm$SR)-mean(subfile1.wm$SR)
mean(subfile2.pe$SR)-mean(subfile1.pe$SR)
mean(subfile2.ds$SR)-mean(subfile1.ds$SR)
|
93be9995fe7290e933e39ef95010df69ac8cc78c
|
155cfb7d883ad64a68c77185d41c9501ed8b91d8
|
/Fig3_Figure_smoothed_LA_data_per_species.r
|
41aa18613c9947a3cb3fb7472119d76212a0bad2
|
[
"MIT"
] |
permissive
|
nielsjdewinter/Sr_spiking
|
7db1ad2602491bbd8603037bcb5a1dc0e3beaffc
|
9a43c00646bf2e517cb80fe9d24bccf47e0900d1
|
refs/heads/main
| 2023-04-14T19:47:50.192420
| 2023-03-21T08:36:50
| 2023-03-21T08:36:50
| 559,837,029
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,796
|
r
|
Fig3_Figure_smoothed_LA_data_per_species.r
|
# Plot smothed Sr profiles for Figure 1
# Project "LAICPMS_Sr_spiking"
require(tidyverse)
require(RColorBrewer)
require(readxl)
require(ggpubr)
# Load data - Batch 1
Cedule_G003_1 <- read.csv("Batch1/HR/Cedule_003_1.csv", header = TRUE)[-c(1, 2), ]
Cedule_G003_2 <- read.csv("Batch1/HR/Cedule_003_2.csv", header = TRUE)
Cedule_G003_3 <- read.csv("Batch1/HR/Cedule_003_3.csv", header = TRUE)
Cedule_G511_1 <- read.csv("Batch1/HR/Cedule_511_1.csv", header = TRUE)
Cedule_G511_2 <- read.csv("Batch1/HR/Cedule_511_2.csv", header = TRUE)
Cedule_G511_3 <- read.csv("Batch1/HR/Cedule_511_3.csv", header = TRUE)
Cedule_G600_1 <- read.csv("Batch1/HR/Cedule_600_1.csv", header = TRUE)
Cedule_G600_2 <- read.csv("Batch1/HR/Cedule_600_1.csv", header = TRUE)
Cedule_G600_3 <- read.csv("Batch1/HR/Cedule_600_3.csv", header = TRUE)
# Vectorize dataframe names
dfnames_batch1 <- c("Cedule_G003_1",
"Cedule_G003_2",
"Cedule_G003_3",
"Cedule_G511_1",
"Cedule_G511_2",
"Cedule_G511_3",
"Cedule_G600_1",
"Cedule_G600_2",
"Cedule_G600_3"
)
# Rename columns and remove CaCa column
for(i in dfnames_batch1){
df <- get(i)
df <- df[-c(1, 2), ]
df <- as.data.frame(apply(df, 2, as.numeric))
colnames(df) <- c("Time", "Depth", "MgCa24", "MgCa", "CaCa", "CaCa44", "CaCa48", "MnCa", "SrCa87", "SrCa", "BaCa")
df$MgCa24 <- df$CaCa <- df$CaCa44 <- df$CaCa48 <- df$SrCa87 <- NULL
df$Specimen <- strsplit(i, "_")[[1]][2]
df$Species <- strsplit(i, "_")[[1]][1]
df$Profile <- strsplit(i, "_")[[1]][3]
df$Depth2 <- max(df$Depth) - df$Depth # Invert growth direction
assign(i, df)
}
LA_combined_batch1 <- bind_rows(Cedule_G003_1,
Cedule_G003_2,
Cedule_G003_3,
Cedule_G511_1,
Cedule_G511_2,
Cedule_G511_3,
Cedule_G600_1,
Cedule_G600_2,
Cedule_G600_3,
.id = "Specimen_id"
)
# Create profile plots of batch1 data
Profile_plot_Sr_offset_batch1 <- ggplot(LA_combined_batch1) +
geom_point(aes(Depth2,
SrCa + as.numeric(Specimen_id) * 2 - 2,
col = Specimen),
alpha = 0.1,
size = 0.1) +
scale_y_continuous("Sr/Ca (mmol/mol)",
breaks = seq(0, 23, 5),
labels = seq(0, 23, 5)) +
scale_x_continuous("Distance from ventral margin [mm]") +
coord_cartesian(ylim = c(0, 23)) +
ggtitle("Offset (+ 2 mmol/mol) Sr/Ca curves \n Cerastoderma edule batch 1") +
theme_bw()
# ------------------------------------------------------------------------------
# Load smoothed data
filename_smooth <- "Batch2/LA data/01_20211025_SEQ326_NdW_LR__profiles_smooth_0.05.xlsx"
sheets_smooth <- excel_sheets(filename_smooth)
datalist <- lapply(sheets_smooth, function(X) read_excel(filename_smooth, sheet = X))
names(datalist) <- sheets_smooth
Medulis_G177_smooth <- as.data.frame(datalist["G177"])
Medulis_G191_smooth <- as.data.frame(datalist["G191"])
Medulis_G259_smooth <- as.data.frame(datalist["G259"])
Oedulis_G271_smooth <- as.data.frame(datalist["G271"])
Oedulis_G271_chalky_smooth <- as.data.frame(datalist["G271_chalky"])
Oedulis_G282_smooth <- as.data.frame(datalist["G282"])
Oedulis_G282_chalky_smooth <- as.data.frame(datalist["G282_chalky"])
Oedulis_G372_smooth <- as.data.frame(datalist["G372"])
Oedulis_G372_chalky_smooth <- as.data.frame(datalist["G372_chalky"])
Cedule_G457_smooth <- as.data.frame(datalist["G457"])
Cedule_G472_smooth <- as.data.frame(datalist["G472"])
Cedule_G555_smooth <- as.data.frame(datalist["G555"])
# Vectorize dataframe names
dfnames_smooth <- c("Medulis_G177_smooth",
"Medulis_G191_smooth",
"Medulis_G259_smooth",
"Oedulis_G271_smooth",
"Oedulis_G271_chalky_smooth",
"Oedulis_G282_smooth",
"Oedulis_G282_chalky_smooth",
"Oedulis_G372_smooth",
"Oedulis_G372_chalky_smooth",
"Cedule_G457_smooth",
"Cedule_G472_smooth",
"Cedule_G555_smooth"
)
# Rename columns and remove CaCa column
for(i in dfnames_smooth){
df <- get(i)
colnames(df) <- c("Time", "Depth", "Xpos", "Ypos", "NaCa", "MgCa", "CaCa", "MnCa", "SrCa", "BaCa")
df$CaCa <- NULL
df$Specimen <- strsplit(i, "_")[[1]][2]
df$Species <- strsplit(i, "_")[[1]][1]
# calculate distance along profile
df$Distance <- 0
for(j in 2:length(df$Xpos)){
df$Distance[j] <- df$Distance[j - 1] + sqrt((df$Xpos[j] - df$Xpos[j - 1]) ^ 2 + (df$Ypos[j] - df$Ypos[j - 1]) ^ 2)
}
assign(i, df)
}
# Load relative start and end positions of chalky lines in O. edulis
chalkypos <- read.csv("Batch2/Oedulis_chalky_positions.csv", header = TRUE)
# Update position of chalky data
Oedulis_G271_chalky_smooth$Distance <- Oedulis_G271_chalky_smooth$Distance + chalkypos$start[1] * 1000
Oedulis_G282_chalky_smooth$Distance <- Oedulis_G282_chalky_smooth$Distance + chalkypos$start[2] * 1000
Oedulis_G372_chalky_smooth$Distance <- Oedulis_G372_chalky_smooth$Distance + chalkypos$start[3] * 1000
# Combine data
LA_combined_smooth <- bind_rows(Medulis_G177_smooth,
Medulis_G191_smooth,
Medulis_G259_smooth,
Oedulis_G271_smooth,
Oedulis_G271_chalky_smooth,
Oedulis_G282_smooth,
Oedulis_G282_chalky_smooth,
Oedulis_G372_smooth,
Oedulis_G372_chalky_smooth,
Cedule_G457_smooth,
Cedule_G472_smooth,
Cedule_G555_smooth,
.id = "Specimen_id"
)
#save(LA_combined_smooth, file = "Batch2/LA data/LA_combined_batch2_smooth.Rdata")
# Create profile plots
Profile_plot_Sr_offset_all <- ggplot(LA_combined_smooth) +
geom_point(data = subset(LA_combined_smooth, !(Specimen_id %in% c(5, 7, 9))),
aes(Distance,
SrCa + as.numeric(Specimen_id) - 1,
col = Species),
alpha = 0.1,
size = 0.1) +
scale_y_continuous("Sr/Ca (mmol/mol)",
breaks = seq(0, 20, 2),
labels = seq(0, 20, 2),
limits = c(0, 20)) +
scale_x_continuous("Distance from ventral margin [um]") +
ggtitle("Offset (+ 1) Sr/Ca curves") +
theme_bw()
Profile_plot_Sr_offset_all2 <- ggplot(LA_combined_smooth) +
geom_point(data = LA_combined_smooth,
aes(Distance,
SrCa + as.numeric(Specimen_id) - 1,
col = Species),
alpha = 0.1,
size = 0.1) +
scale_y_continuous("Sr/Ca (mmol/mol)",
breaks = seq(0, 20, 2),
labels = seq(0, 20, 2),
limits = c(0, 20)) +
scale_x_continuous("Distance from ventral margin [um]") +
ggtitle("Offset (+ 1) Sr/Ca curves") +
theme_bw()
# Isolate C. edule data
Profile_plot_Sr_Cedule <- ggplot(LA_combined_smooth) +
geom_point(data = subset(LA_combined_smooth, Species == "Cedule"),
aes(Distance / 1000,
SrCa + as.numeric(Specimen_id) * 2 - 20,
col = Specimen),
alpha = 0.1,
size = 0.1) +
scale_y_continuous("Sr/Ca (mmol/mol)",
breaks = seq(0, 20, 5),
labels = seq(0, 20, 5),
limits = c(0, 20)) +
scale_x_continuous("Distance from ventral margin [mm]") +
ggtitle("Offset (+ 2 mmol/mol) Sr/Ca curves \n Cerastoderma edule") +
theme_bw()
# Isolate O. edulis data
Profile_plot_Sr_Oedulis <- ggplot(LA_combined_smooth) +
geom_point(data = subset(LA_combined_smooth, Species == "Oedulis" & Specimen_id %in% c(4, 6, 8)),
aes(Distance / 1000,
SrCa + as.numeric(Specimen_id) - 4,
col = Specimen),
alpha = 0.1,
size = 0.1) +
scale_y_continuous("Sr/Ca (mmol/mol)",
breaks = seq(0, 10, 2),
labels = seq(0, 10, 2),
limits = c(0, 8)) +
scale_x_continuous("Distance from ventral margin [mm]") +
ggtitle("Offset (+ 2 mmol/mol) Sr/Ca curves \n Ostrea edulis") +
theme_bw()
# Isolate O. edulis data, excluding chalky records
Profile_plot_Sr_Oedulis_nochalky <- ggplot(LA_combined_smooth) +
geom_point(data = subset(LA_combined_smooth, Species == "Oedulis" & !(Specimen_id %in% c(5, 7, 9))),
aes(Distance / 1000,
SrCa + as.numeric(Specimen_id) - 4,
col = Specimen),
alpha = 0.1,
size = 0.1) +
scale_y_continuous("Sr/Ca (mmol/mol)",
breaks = seq(0, 10, 2),
labels = seq(0, 10, 2),
limits = c(0, 10)) +
scale_x_continuous("Distance from ventral margin [mm]") +
ggtitle("Offset (+ 2 mmol/mol) Sr/Ca curves \n Ostrea edulis") +
theme_bw()
# Isolate M. edulis data
Profile_plot_Sr_Medulis <- ggplot(LA_combined_smooth) +
geom_point(data = subset(LA_combined_smooth, Species == "Medulis"),
aes(Distance / 1000,
SrCa + as.numeric(Specimen_id) * 2 - 2,
col = Specimen),
alpha = 0.1,
size = 0.1) +
scale_y_continuous("Sr/Ca (mmol/mol)",
breaks = seq(0, 10, 2),
labels = seq(0, 10, 2),
limits = c(0, 10)) +
scale_x_continuous("Distance from ventral margin [mm]") +
ggtitle("Offset (+ 2 mmol/mol) Sr/Ca curves \n Mytilus edulis") +
theme_bw()
# Combine species plots into one multi-panel plot
Species_combined <- ggarrange(
Profile_plot_Sr_offset_batch1 +
theme(legend.position = "none") +
coord_cartesian(ylim = c(0, 23)),
Profile_plot_Sr_Cedule +
theme(legend.position = "none") +
coord_cartesian(ylim = c(0, 23)),
Profile_plot_Sr_Medulis +
theme(legend.position = "none") +
coord_cartesian(ylim = c(0, 10),
xlim = c(0, 18)),
Profile_plot_Sr_Oedulis +
theme(legend.position = "none") +
coord_cartesian(ylim = c(0, 10),
xlim = c(0, 36)),
ncol = 2,
nrow = 2,
labels = c("A", "B", "C", "D")
)
|
269afecf3cac76201afd2afd239ec12503e13f37
|
b32192ef5ee4869289ead0cac00702906bb4b488
|
/src/utsg2021/getSocio.R
|
2a3831d037c331c26ab62c6b28df5fdab7e48054
|
[
"MIT"
] |
permissive
|
3rfm-its-davis/covid-19-ldt
|
10fe23996512a38de01813abdd82f6a77070ded9
|
11367f772054798b182de49ccdca558b7ca18264
|
refs/heads/master
| 2022-12-20T09:40:23.217388
| 2020-09-20T10:52:26
| 2020-09-20T10:52:26
| 275,208,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,344
|
r
|
getSocio.R
|
getSocio <- function(raw, y) {
socio <- raw %>%
inner_join(y, by = "ResponseId") %>%
select(c(
"ResponseId", "City", "B02_5", "D01",
"D09_2", "C07", "C12_3", "C12_13", "C13_3",
"I01_1", "I04_2", "I04_3", "I04_1", "I04_5",
"I04_4", "I05", "I09_USD", "I07", "I12"
)) %>%
mutate(Age = cut(I01_1, c(1920, 1965, 1980, 1995, 2010),
labels = c("(1920-1965]", "(1965-1980]", "(1980-1995]", "(1995-2010]")
)) %>%
mutate(Gen = case_when(
I05 == 2 ~ "Male",
I05 != 2 ~ "Non-Male"
)) %>%
mutate(Edc = case_when(I07 > 3 ~ ">=Bachelor", I07 < 4 ~ "<Bachelor")) %>%
mutate(Chl = case_when(D09_2 > 0 ~ "Yes", D09_2 == 0 ~ "No")) %>%
mutate(Inc = case_when(
I09_USD < 3 ~ "<50,000",
I09_USD > 4 ~ ">=100,000",
T ~ "50,000-99,999"
)) %>%
mutate(Rem = case_when(C07 == 0 ~ "No", C07 > 0 ~ "Yes")) %>%
mutate(Hel = C12_3 - 3) %>%
mutate(Vdc = C12_13 - 3) %>%
mutate(Fut = case_when(
C13_3 < 3 ~ "less",
C13_3 > 3 ~ "more",
T ~ "same"
)) %>%
mutate(Raa = case_when(I04_1 == 1 ~ "Yes", I04_1 == 0 ~ "No")) %>%
mutate(Rab = case_when(I04_2 == 1 ~ "Yes", I04_2 == 0 ~ "No")) %>%
mutate(Ran = case_when(I04_3 == 1 ~ "Yes", I04_3 == 0 ~ "No")) %>%
mutate(Raw = case_when(I04_4 == 1 ~ "Yes", I04_4 == 0 ~ "No")) %>%
mutate(Rao = case_when(I04_5 == 1 ~ "Yes", I04_5 == 0 ~ "No")) %>%
mutate(Ngh = case_when(D01 == 1 ~ "Urban", D01 > 1 ~ "Non-Urban")) %>%
mutate(City = case_when(
City %in% c(
"LA", "SF", "Sacramento",
"Seattle", "SD", "Denver", "SLC"
) ~ "West",
City %in% c("Kansas", "Detroit", "Chicago") ~ "Midwest",
City %in% c("Atlanta", "Tampa", "DC") ~ "South",
City %in% c("NYC", "Boston") ~ "Northeast"
)) %>%
mutate_at(c(
"City", "Age", "Inc", "Edc",
"Rem", "Gen", "Ngh", "Raa", "Rab",
"Ran", "Raw", "Rao"
), as.factor) %>%
mutate(Age = factor(Age, levels = c(
"(1965-1980]",
"(1995-2010]",
"(1980-1995]",
"(1920-1965]"
))) %>%
mutate(Inc = factor(Inc, levels = c(
"<50,000",
"50,000-99,999",
">=100,000"
))) %>%
mutate(Raa = factor(Raa, levels = c("No", "Yes"))) %>%
mutate(Rab = factor(Rab, levels = c("No", "Yes"))) %>%
mutate(Ran = factor(Ran, levels = c("No", "Yes"))) %>%
mutate(Raw = factor(Raw, levels = c("No", "Yes"))) %>%
mutate(Rao = factor(Rao, levels = c("No", "Yes"))) %>%
mutate(Chl = factor(Chl, levels = c("No", "Yes"))) %>%
mutate(Edc = factor(Edc, levels = c("<Bachelor", ">=Bachelor"))) %>%
mutate(Fut = factor(Fut, levels = c("less", "same", "more"))) %>%
mutate(Ngh = factor(Ngh, levels = c("Urban", "Non-Urban"))) %>%
mutate(Rem = factor(Rem, levels = c("No", "Yes"))) %>%
mutate(City = factor(City, levels = c(
"West",
"Midwest",
"South",
"Northeast"
))) %>%
mutate_at(c(
"City", "Age", "Inc", "Edc", "Rem",
"Gen", "Ngh", "Raa", "Rab", "Ran",
"Raw", "Rao", "Chl", "Fut"
), as.numeric) %>%
select(
ResponseId, City, Age, Inc, Hel,
Vdc, Rem, Edc, Chl, Gen,
Ngh, Raa, Rab, Ran, Raw,
Rao, Fut
)
return(socio)
}
|
cef7ad4f0acd315163d6db2ca47f03d907b01c0f
|
2e27d0ee5455c14be50ac40871cbb9538b29b8d1
|
/R/sphere.smooth.R
|
58b63dff88b387a9dfca58816e7cdc3aed64f483
|
[] |
no_license
|
antiphon/sphere
|
66975fa754559dc61f4bfa0aa51e4757f22353b2
|
e6e3b7e5e31741503718da18d102695f277258a3
|
refs/heads/master
| 2022-05-06T13:33:35.720737
| 2022-03-25T05:37:34
| 2022-03-25T05:37:34
| 29,916,065
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,220
|
r
|
sphere.smooth.R
|
#' Smoothed unit sphere values
#'
#' Krige on a sphere, return a plottable orb.
#'
#' @param latlon latitude-longitude of data
#' @param v values on data points
#' @param N refinenement of the icosahedron triangulation
#' @param s smoothing sd for gaussian smoothing
#'
#' @import rgl
#' @export
sphere.smooth <- function(latlon, v, newlatlon, N=3, s=.25){
# the smoothing locations
ico <- icosahedron3d()
for(i in 1:N) ico <- subdivision3d(ico)
newxyz <- t(ico$vb[1:3,])/ico$vb[4,]
newlatlon <- xyz2ll(newxyz)
# predict
f <- sphere.predict(latlon, v, newlatlon, s=s)
# assign to icosahedron vertices
ico$vb <- t( t(ico$vb)/ico$vb[4,])
ico$data <- if(missing(v)) latlon else cbind(latlon, v)
ico$prediction <- f
class(ico) <- c("spheresmooth", class(ico))
ico
}
#' plot the 3d sphere of predictions
#'
#' @import rgl
#' @export
plot.spheresmooth <- function(obj, col=heat.colors, col_zlim, ..., data=FALSE) {
co <- values2colors(obj$prediction, col=col, zlim=col_zlim, ...)
cols <- co[c(obj$it)]
plot3d(obj, col=cols, aspect=FALSE, ...)
if(data){
cod <- values2colors(obj$data[,3], col=col, ...)
text3d(ll2xyz(obj$data[,1:2]), col=cod, texts=1:nrow(obj$data))
}
}
|
c3d09b90f1dd8913a1d417b2b76f2222de6d0d3a
|
fb0cbf6db81ee5ff6dfe73b618e2d649251b8b77
|
/Model1.R
|
1a19db5090f166db08a2f519145625b356c292e2
|
[] |
no_license
|
PGC-PTSD-EWAS/PGC-PTSD-Longitudinal-Analysis
|
6a5841a9a2a33388959851e555a608315611b677
|
8ff7f5832bccac3343f5fefc3ee1d408885edeee
|
refs/heads/main
| 2023-08-12T00:50:22.291579
| 2021-10-11T14:49:57
| 2021-10-11T14:49:57
| 416,046,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,674
|
r
|
Model1.R
|
################################################################################
# Model 1: RE model to evaluate CpGs associated with PTSS in each cohort
################################################################################
library(lme4)
library(lmerTest)
library(data.table)
# Load methylation data
beta.norm<-fread(" ",data.table=F) # beta matrix. CpG x Participants
rownames(beta.norm)<-beta.norm$V1
beta.norm<-beta.norm[,-1]
# Load phenotype file
pheno<-read.csv(" ",stringsAsFactors=FALSE,header=TRUE, row.names = 1)
# Define Variables
ptsdVariable<-" " # 0 -1 scaled
covariates<-c("age","CD8T.Epic","CD4T.Epic","NK.Epic","Bcell.Epic","Mono.Epic","Comp.2","Comp.3") # E.g., c("Age", "Gender", "CD8T", ....)
idVariable<-"studyid" # Participant ID for the Random Effects. E.g., "IID"
studyID<-" " # E.g. "MRS","Prismo"
beta.norm<-beta.norm[,names(beta.norm)%in%row.names(pheno)]
pheno<-pheno[row.names(pheno)%in%names(beta.norm),]
beta.norm<-beta.norm[,order(names(beta.norm))]
pheno<-pheno[order(row.names(pheno)),]
table(colnames(beta.norm)==rownames(pheno))
range(pheno[,ptsdVariable])
# Remove NAs
naindex<-pheno[,c(ptsdVariable,covariates,idVariable)]
naindex<-complete.cases(naindex)
beta.norm<-beta.norm[,naindex]
pheno<-pheno[naindex,]
table(rownames(pheno)==colnames(beta.norm))
# Converting to M-values
range(beta.norm, na.rm=T)
# Changing beta values of 0 to 0.0001
if(min(beta.norm, na.rm=T)==0){
beta.norm[which(beta.norm==0)]<-0.0001
}
# Changing beta values of 1 to 0.9999
if(max(beta.norm, na.rm=T)==1.000000e+00){
beta.norm[which(beta.norm==1.000000e+00)]<-0.9999
}
range(beta.norm, na.rm=T)
sum(is.na(beta.norm))
# Convert to Mvalues using log2
beta.norm<-log2(beta.norm/(1-beta.norm)) # log transforming
sum(beta.norm=="-Inf", na.rm=T) # Should be 0
sum(is.na(beta.norm)) # should be same as the number of missing in the beta.norm matrix
range(beta.norm, na.rm=T)
# Configuring phenotypes
pheno$meth<-NA
pheno[,idVariable]<-factor(pheno[,idVariable])
# Results objects
resultsBeta<-matrix(nrow=nrow(beta.norm), ncol=2+length(covariates))
rownames(resultsBeta)<-rownames(beta.norm)
colnames(resultsBeta)<-c("(Intercept)", ptsdVariable, covariates)
resultsSE<-resultsT<-resultsP<-resultsDF<-resultsBeta
errorProbes<-NULL
warningProbes<-NULL
assign("last.warning", NULL, envir = baseenv())
formula<-as.formula(paste("meth~",ptsdVariable, "+",
paste(covariates, collapse="+"),
"+(1|", idVariable, ")", sep=""))
# Running analyses
start<-proc.time()[3]
for(ii in 1:nrow(beta.norm)){
pheno$meth<-t(beta.norm[ii, rownames(pheno)])
fit<-try(lmer(formula, data=pheno), silent=F)
if(class(fit)!="try-error" & is.null(warnings())){
res<-coef(summary(fit))
resultsBeta[ii,]<-res[, "Estimate"]
resultsSE[ii,]<-res[, "Std. Error"]
resultsT[ii,]<-res[, "t value"]
resultsP[ii,]<-res[, "Pr(>|t|)"]
resultsDF[ii,]<-res[, "df"]
}
if(class(fit)=="try-error"){
errorProbes<-append(errorProbes, rownames(beta.norm)[ii])
}
if(!is.null(warnings())){
warningProbes<-append(warningProbes, rownames(beta.norm)[ii])
assign("last.warning", NULL, envir = baseenv())
}
if(ii%%10==0){print(ii)}
pheno$meth<-NA
}
end<-proc.time()[3]
end-start
rm(res, fit)
# For main Pheno
final<-as.data.frame(cbind(resultsBeta[,ptsdVariable],resultsSE[,ptsdVariable],resultsT[,ptsdVariable],resultsP[,ptsdVariable],resultsDF[,ptsdVariable]))
rownames(final)<-rownames(beta.norm)
colnames(final)<-c("BETA","SE","t","pval","df")
write.csv(final,file = paste0(studyID,"_RE_PCL01scaled_covar_age_epicCellTypes_methPC.csv"),quote = F,row.names = F)
|
c265d727f2d15a84fc01b8c9811e4ef91bd6b76d
|
f697336c25ca6fadd13c8746309f3d1b47d13864
|
/man/validateIcon.Rd
|
da76da12c5f002c963b470e63df5c4340ceae6b2
|
[] |
no_license
|
cran/shiny.pwa
|
c1116d1086192c064789e3cda570206be92e1268
|
d15ae361ec8be31278ec92d6b7282a6d06d17e7c
|
refs/heads/master
| 2023-05-31T08:19:17.120970
| 2021-06-19T15:50:02
| 2021-06-19T15:50:02
| 298,777,337
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 637
|
rd
|
validateIcon.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validators.R
\name{validateIcon}
\alias{validateIcon}
\title{Validates the provided icon. If the icon does not exist returns a default one.}
\usage{
validateIcon(icon)
}
\arguments{
\item{icon}{Path location for an icon relative to the project root}
}
\value{
A valid icon path
}
\description{
Validates the provided icon. If the icon does not exist returns a default one.
}
\seealso{
[validateDomain()], [validateLocation()]
Other validators:
\code{\link{validateDomain}()},
\code{\link{validateLocation}()}
}
\concept{validators}
|
0f0586aab28f60679599806ec5e3d55c0bbf8cea
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/iccbeta/examples/Hofmann.Rd.R
|
403c16c7fc7aa655a2787c484e365afdb2e12e45
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,482
|
r
|
Hofmann.Rd.R
|
library(iccbeta)
### Name: Hofmann
### Title: A multilevel dataset from Hofmann, Griffin, and Gavin (2000).
### Aliases: Hofmann
### Keywords: datasets
### ** Examples
## Not run:
##D
##D if(requireNamespace("lme4") && requireNamespace("RLRsim")){
##D data(Hofmann)
##D library("lme4")
##D
##D # Random-Intercepts Model
##D lmmHofmann0 = lmer(helping ~ (1|id), data = Hofmann)
##D vy_Hofmann = var(Hofmann[,'helping'])
##D
##D # Computing icca
##D VarCorr(lmmHofmann0)$id[1,1]/vy_Hofmann
##D
##D # Estimating Group-Mean Centered Random Slopes Model, no level 2 variables
##D lmmHofmann1 <- lmer(helping ~ mood_grp_cent + (mood_grp_cent |id),
##D data = Hofmann, REML = FALSE)
##D X_Hofmann = model.matrix(lmmHofmann1)
##D P = ncol(X_Hofmann)
##D T1_Hofmann = VarCorr(lmmHofmann1)$id[1:P,1:P]
##D
##D # Computing iccb
##D icc_beta(X_Hofmann, Hofmann[,'id'], T1_Hofmann, vy_Hofmann)$rho_beta
##D
##D # Performing LR test
##D # Need to install 'RLRsim' package
##D library("RLRsim")
##D lmmHofmann1a <- lmer(helping ~ mood_grp_cent + (1 | id),
##D data = Hofmann, REML = FALSE)
##D obs.LRT <- 2*(logLik(lmmHofmann1) - logLik(lmmHofmann1a))[1]
##D X <- getME(lmmHofmann1,"X")
##D Z <- t(as.matrix(getME(lmmHofmann1,"Zt")))
##D sim.LRT <- LRTSim(X, Z, 0, diag(ncol(Z)))
##D (pval <- mean(sim.LRT > obs.LRT))
##D } else {
##D stop("Please install packages `RLRsim` and `lme4` to run the above example.")
##D }
## End(Not run)
|
242d38a87b2456fca802d0bffb11124b8a01c425
|
db2b9f7e9ae8019b3ad7f9c154ac38389c129bd8
|
/aargh/dataprocessing.R
|
f50d7ee20def4d08f26428c20acb54c31bfb8ee5
|
[] |
no_license
|
borealbirds/foam
|
bc70fb46efb744d202eddc6804f086e82766b1de
|
9b30065e500156372767bc66ea5d7c59d86bc5ca
|
refs/heads/master
| 2021-09-04T10:10:20.055552
| 2018-01-17T21:24:27
| 2018-01-17T21:24:27
| 114,273,850
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,624
|
r
|
dataprocessing.R
|
##---
##title: "Data processing for nationam BAM analyses"
##author: "Peter Solymos"
##date: "Apr 18, 2016"
##output:
## pdf_document:
## toc: true
## toc_depth: 2
##---
### Preliminaries start here
## Define root folder where data are stored
ROOT <- "c:/bam/May2015"
ROOT2 <- "e:/peter/bam/Apr2016"
## Load required packages
library(mefa4)
library(RODBC)
library(maptools)
library(QPAD)
## Load functions kept in separate file
source("~/repos/bamanalytics/R/dataprocessing_functions.R")
### Pulling in tables
## Define MS Access database connection
con <- odbcConnectAccess2007(file.path(ROOT, "BAM_BayneAccess_BAMBBScore.accdb"))
con2 <- odbcConnectAccess2007(file.path(ROOT2, "BBS_V4draft_2016_aprilForErin.accdb"))
#### Species lookup table
TAX <- sqlFetch(con, "dbo_DD_BAM_AOU52_SPECIES_TBL")
TAX$SSMA_TimeStamp <- NULL
#### SS level stuff for BAM+BBS combined
## This table has time zone, BCR, jurisdiction, XY
SS01 <- sqlFetch(con, "dbo_BBSBAM_V4_XYTBL_ATTRIBUTES1")
SS01 <- nonDuplicated(SS01, SS, TRUE)
SS01$COUNTRY <- ifelse(SS01$JURSALPHA %in% c("AB","BC","MB","NB",
"NL","NS","NT","NU","ON","PEI","QC","SK","YK"), "CAN", "USA")
save(SS01, file=file.path(ROOT2, "out", "SS-regions-and-xy.Rdata"))
## Tree proportions
SS02 <- sqlFetch(con, "dbo_TREE_BBSBAM_V4_tbl")
SS02 <- nonDuplicated(SS02, SS, TRUE)
SS02 <- SS02[rownames(SS01),]
## TREE update 2016-12-01
tr <- read.csv("e:/peter/bam/Apr2016/tree_update/BAM_BBS_2015_XY_AVHRRTREE_nov30_2016.csv")
SS02$TREE_WRONG <- SS02$TREE
SS02$TREE <- tr$VCFAVHRR[match(SS02$SS, tr$SS)]
SS02$TREE[SS02$TREE > 100] <- NA
SS02$TREE[SS02$TREE < 0] <- NA
SS02$TREE <- SS02$TREE / 100
SS02$TREE3 <- factor(NA, levels=c("Open", "Sparse", "Dense"))
SS02$TREE3[SS02$TREE < 0.25] <- "Open"
SS02$TREE3[SS02$TREE >= 0.25 & SS02$TREE < 0.60] <- "Sparse"
SS02$TREE3[SS02$TREE >= 0.60] <- "Dense"
## Point level land cover
SS03 <- sqlFetch(con, "dbo_BAMBBS_LANDCOVER_PTS")
SS03 <- nonDuplicated(SS03, SS, TRUE)
SS03 <- SS03[rownames(SS01),]
## comment out all LCC05 and EOSD
## Reclass LCC05
ltlcc <- read.csv("~/repos/bamanalytics/lookup/lcc05.csv")
SS03$LCC05 <- SS03$LCC05_PT
SS03$LCC05_PT[SS03$LCC05_PT < 1 | SS03$LCC05_PT > 39] <- NA
SS03$LCC05_PT[SS01$COUNTRY == "USA"] <- NA
SS03$HAB_LCC1 <- ltlcc$BAMLCC05V2_label1[match(SS03$LCC05_PT, ltlcc$lcc05v1_2)]
SS03$HAB_LCC2 <- ltlcc$BAMLCC05V2_label2[match(SS03$LCC05_PT, ltlcc$lcc05v1_2)]
SS03$HAB_LCC1 <- relevel(SS03$HAB_LCC1, "ConifDense")
SS03$HAB_LCC2 <- relevel(SS03$HAB_LCC2, "Conif")
if (FALSE) {
## for Nicole
SS_Nicole <- data.frame(
PCODE=SS01$PCODE,
SS=SS01$SS,
X=SS01$X_GEONAD83,
Y=SS01$Y_GEONAD83,
JURS=SS01$JURSALPHA,
COUNTRY=SS01$COUNTRY,
BCR=as.factor(SS01$BCR),
SS03[,c("LCC05","HAB_LCC1","HAB_LCC2")])
## Reclass EOSD
lteosd <- read.csv("~/repos/bamanalytics/lookup/eosd.csv")
levels(SS03$EOSD_PT) <- sub(",", "", levels(SS03$EOSD_PT))
SS03$EOSD_PT <- as.integer(as.character(SS03$EOSD_PT))
SS03$EOSD_PT[SS03$EOSD_PT < 1] <- NA
SS03$HAB_EOSD1 <- lteosd$Reclass_label1[match(SS03$EOSD_PT, lteosd$Value)]
SS03$HAB_EOSD2 <- lteosd$Reclass_label2[match(SS03$EOSD_PT, lteosd$Value)]
SS03$HAB_EOSD1 <- relevel(SS03$HAB_EOSD1, "ConifDense")
SS03$HAB_EOSD2 <- relevel(SS03$HAB_EOSD2, "Conif")
}
## Reclass NALCMS
ltnalc <- read.csv("~/repos/bamanalytics/lookup/nalcms.csv")
SS03$HAB_NALC2 <- ltnalc$Label[match(SS03$NALCMS_PT, ltnalc$Value)]
tmp <- as.character(interaction(SS03$HAB_NALC2, SS02$TREE3, sep="", drop=TRUE))
SS03$HAB_NALC1 <- as.character(SS03$HAB_NALC2)
ii <- SS03$HAB_NALC1 %in% c("Conif", "Decid", "Mixed", "Wet")
SS03$HAB_NALC1[ii] <- tmp[ii]
SS03$HAB_NALC1 <- as.factor(SS03$HAB_NALC1)
SS03$HAB_NALC1 <- relevel(SS03$HAB_NALC1, "ConifDense")
SS03$HAB_NALC2 <- relevel(SS03$HAB_NALC2, "Conif")
## NALC is used for QPADv3
if (FALSE) {
## LCC for offsets
SS03$LCC_OFF1 <- as.factor(ltlcc$qpad_num[match(SS03$LCC05_PT, ltlcc$lcc05v1_2)])
SS03$LCC_OFF2 <- factor(5, 1:5)
SS03$LCC_OFF2[is.na(SS03$HAB_NALC1)] <- NA
SS03$LCC_OFF2[SS03$HAB_NALC1 %in% c("DecidSparse")] <- "4"
SS03$LCC_OFF2[SS03$HAB_NALC1 %in% c("ConifSparse","MixedSparse")] <- "3"
SS03$LCC_OFF2[SS03$HAB_NALC1 %in% c("DecidDense")] <- "2"
SS03$LCC_OFF2[SS03$HAB_NALC1 %in% c("ConifDense","MixedDense")] <- "1"
SS03$LCC_combo <- SS03$LCC_OFF1
SS03$LCC_combo[is.na(SS03$LCC_OFF1)] <- SS03$LCC_OFF2[is.na(SS03$LCC_OFF1)]
}
## Grid ID 4x4 km
SS_grid <- read.csv(file.path(ROOT, "BAMBBS_Gridcode.csv"))
rownames(SS_grid) <- SS_grid$SS
compare_sets(rownames(SS01), rownames(SS_grid))
SS_grid <- SS_grid[rownames(SS01),"gridcode",drop=FALSE]
levels(SS_grid$gridcode) <- gsub(",", "", levels(SS_grid$gridcode))
## Road: dist to, class, #lanes, surface
SS_road <- sqlFetch(con, "dbo_BAMBBS_2015_NearDistanceRoadJoin1000M")
rownames(SS_road) <- SS_road$SS
compare_sets(rownames(SS01), rownames(SS_road))
SS_road <- SS_road[rownames(SS01),]
SS_road$d2road <- SS_road[["Distance to Road"]]
table(SS_road$ROADCLASS, SS_road$d2road > 0, useNA="a")
table(SS_road$NBRLANES, SS_road$d2road > 0, useNA="a")
table(SS_road$PAVSTATUS, SS_road$d2road > 0, useNA="a")
table(SS_road$ROADCLASS, SS_road$NBRLANES, useNA="a")
SS_road <- SS_road[,c("d2road","ROADCLASS","NBRLANES","PAVSTATUS")]
## need to exclude # lanes >2
## Fire: year, size
SS_fire <- sqlFetch(con, "dbo_BBSBAM_2015_FIRE")
rownames(SS_fire) <- SS_fire$SS
compare_sets(rownames(SS01), rownames(SS_fire))
SS_fire <- SS_fire[rownames(SS01),]
SS_fire <- SS_fire[,c("Year","SIZE_HA")]
colnames(SS_fire) <- c("YearFire","FIRE_HA")
## Terrain: slope, twi, elev
SS_terr <- sqlFetch(con, "dbo_BBSBAM_2015_TERRAIN90")
rownames(SS_terr) <- SS_terr$SS
compare_sets(rownames(SS01), rownames(SS_terr))
SS_terr <- SS_terr[rownames(SS01),]
t(table(is.na(SS_terr$cti90), SS01$PCODE)) # mostly affects BBS in some states
SS_terr <- SS_terr[,c("slp90","cti90","elv90")]
## Climate variables from DS and NALCMS 4x4 level
SS_clim <- sqlFetch(con, "dbo_BBSBAM_2015__CLIMLU")
rownames(SS_clim) <- SS_clim$SS
compare_sets(rownames(SS01), rownames(SS_clim))
SS_clim <- SS_clim[rownames(SS01),]
tmp <- as.matrix(SS_clim[,grepl("NALCMS05_", colnames(SS_clim))])
SS_clim <- SS_clim[,!grepl("NALCMS05_", colnames(SS_clim))]
colnames(tmp) <- gsub("NALCMS05_", "", colnames(tmp))
Col <- as.character(ltnalc$Label)[match(colnames(tmp), as.character(ltnalc$Value))]
Col[is.na(Col)] <- "Water"
## 4x4 stuff is not done
#SS_NALC4x4 <- data.frame(groupSums(tmp, 2, Col, na.rm=TRUE))
#colnames(SS_NALC4x4) <- paste0("GRID4_NALC_", colnames(SS_NALC4x4))
SS_clim$NALCMS05 <- NULL
SS_clim$PCODE <- NULL
SS_clim$SS <- NULL
## 4x4 stuff is not done
if (FALSE) {
## LCC05 4x4 level
SS_LCC4x4 <- sqlFetch(con, "dbo_BBSBAM_V4_LCC05CND_4X4SUMM")
SS_LCC4x4 <- nonDuplicated(SS_LCC4x4, SS, TRUE)
#rownames(SS_LCC4x4) <- SS_LCC4x4$SS
compare_sets(rownames(SS01), rownames(SS_LCC4x4))
SS_LCC4x4 <- SS_LCC4x4[rownames(SS01),]
SS_LCC4x4$SS <- NULL
SS_LCC4x4$gridcode <- NULL
SS_LCC4x4$LCCVVSUM <- NULL
SS_LCC4x4 <- as.matrix(SS_LCC4x4)
colnames(SS_LCC4x4) <- gsub("LCCVV", "", colnames(SS_LCC4x4))
#Col <- as.character(ltlcc$BAMLCC05V2_label1)[match(colnames(SS_LCC4x4),
# as.character(ltlcc$lcc05v1_2))]
Col <- as.character(ltlcc$BAMLCC05V2_label2)[match(colnames(SS_LCC4x4),
as.character(ltlcc$lcc05v1_2))]
Col[is.na(Col)] <- "BARREN"
SS_LCC4x4 <- data.frame(groupSums(SS_LCC4x4, 2, Col, na.rm=TRUE))
SS_LCC4x4[is.na(SS03$HAB_LCC2),] <- NA
colnames(SS_LCC4x4) <- paste0("GRID4_LCC_", colnames(SS_LCC4x4))
## EOSD 4x4 level
SS_EOSD4x4 <- sqlFetch(con, "dbo_BBSBAM_V4_EOSD_4X4SUMM")
SS_EOSD4x4$upsize_ts <- NULL
rownames(SS_EOSD4x4) <- SS_EOSD4x4$SS
compare_sets(rownames(SS01), rownames(SS_EOSD4x4))
SS_EOSD4x4 <- SS_EOSD4x4[match(rownames(SS01), rownames(SS_EOSD4x4)),]
rownames(SS_EOSD4x4) <- SS01$SS
SS_EOSD4x4 <- as.matrix(SS_EOSD4x4[,grepl("eosdVV", colnames(SS_EOSD4x4))])
colnames(SS_EOSD4x4) <- gsub("eosdVV", "", colnames(SS_EOSD4x4))
#Col <- as.character(lteosd$Reclass_label1)[match(colnames(SS_EOSD4x4),
# as.character(lteosd$Value))]
Col <- as.character(lteosd$Reclass_label2)[match(colnames(SS_EOSD4x4),
as.character(lteosd$Value))]
Col[is.na(Col)] <- "BARREN"
SS_EOSD4x4 <- data.frame(groupSums(SS_EOSD4x4, 2, Col, na.rm=TRUE))
SS_EOSD4x4[is.na(SS03$HAB_EOSD2),] <- NA
colnames(SS_EOSD4x4) <- paste0("GRID4_EOSD_", colnames(SS_EOSD4x4))
}
## HEIGHT (Simard)
SS_height <- sqlFetch(con, "dbo_Height")
SS_height <- nonDuplicated(SS_height, SS, TRUE)
compare_sets(rownames(SS01), rownames(SS_height))
SS_height <- SS_height[rownames(SS01),]
SS_height <- SS_height[,"HEIGHTSIMARD",drop=FALSE]
if (FALSE) {
## Nature Serve range: 3 spp (Can clipped range used 0/1)
SS_nserv <- sqlFetch(con, "dbo_BBSBAM_SARSPPLOCATIONSRange")
SS_nserv <- nonDuplicated(SS_nserv, SS, TRUE)
compare_sets(rownames(SS01), rownames(SS_nserv))
SS_nserv <- SS_nserv[rownames(SS01),]
SS_nserv <- SS_nserv[,c("CAWAINOUT","OSFLINOUT","CONIINOUT")]
}
## GFW yearly loss intersections and 1st year of loss
#SS_gfw <- sqlFetch(con, "dbo_BAMBBS_GFWLossYear")
SS_gfw <- read.csv(file.path(ROOT, "GFWLossYear.csv"))
SS_gfw <- nonDuplicated(SS_gfw, SS, TRUE)
compare_sets(rownames(SS01), rownames(SS_gfw))
levels(SS_gfw$YearLoss) <- gsub(",", "", levels(SS_gfw$YearLoss))
SS_gfw$YearLoss <- as.integer(as.character(SS_gfw$YearLoss))
SS_gfw <- SS_gfw[rownames(SS01),]
SS_gfw <- SS_gfw[,"YearLoss",drop=FALSE]
## Pasher disturbance
SS_pash <- read.csv(file.path(ROOT, "bambbs2015beadandpasher.csv"))
SS_pash <- nonDuplicated(SS_pash, SS, TRUE)
compare_sets(rownames(SS01), rownames(SS_pash))
SS_pash <- SS_pash[rownames(SS01),]
SS_pash <- SS_pash[,c("BEADTotalL","BEADtotPol")]
## Local spring
SS_sprng <- read.csv("e:/peter/bam/May2015/NRCAN_SG_001_BAMBBS2015_71_13.csv")
SS_sprng <- SS_sprng[,c("SS","RASTERVALU")]
SS_sprng$SPRNG <- SS_sprng$RASTERVALU
levels(SS_sprng$SPRNG) <- gsub(",", "", levels(SS_sprng$SPRNG))
SS_sprng$SPRNG <- as.numeric(as.character(SS_sprng$SPRNG))
SS_sprng$SPRNG[SS_sprng$SPRNG < 0] <- NA
rownames(SS_sprng) <- SS_sprng$SS
SS_sprng <- SS_sprng[rownames(SS01),]
## Put together the main SS level object
SS <- data.frame(
PCODE=SS01$PCODE,
SS=SS01$SS,
X=SS01$X_GEONAD83,
Y=SS01$Y_GEONAD83,
Xcl=SS01$X_CLCC,
Ycl=SS01$Y_CLCC,
JURS=SS01$JURSALPHA,
COUNTRY=SS01$COUNTRY,
TZONE=SS01$TZONE_CODE,
BOREALLOC=SS01$BOREALLOC,
BCR=as.factor(SS01$BCR),
TREE=SS02$TREE,
TREE3=SS02$TREE3,
SPRNG=SS_sprng$SPRNG,
#LCC05_PT=SS03$LCC05_PT, # -- FOR NICOLE
SS03[,c("LCC05","HAB_LCC1","HAB_LCC2")],
SS03[,c("HAB_NALC2", "HAB_NALC1")],
SS_grid,
#SS_nserv,
SS_road,
SS_terr,
SS_fire,
SS_clim,
SS_pash,
SS_gfw,
SS_height)
#SS_NALC4x4,
#SS_LCC4x4,
#SS_EOSD4x4)
#### Project summary table
## This table needed local tweaks to be operable
#PCODE <- sqlFetch(con, "dbo_National_Proj_Summary_V4_2015")
#PCODE$SSMA_TimeStamp <- NULL
PCODE <- read.csv(file.path(ROOT,"proj.csv"))
levels(PCODE$Maxdist) <- tolower(levels(PCODE$Maxdist))
levels(PCODE$Maxdist)[levels(PCODE$Maxdist)=="unlimited"] <- "Inf"
PCODE$Maxdist[PCODE$Maxdist=="unknown"] <- NA
PCODE$Maxdist <- droplevels(PCODE$Maxdist)
PCODE$Maxdist <- as.numeric(as.character(PCODE$Maxdist))
PCODE$Maxdur <- pmin(PCODE$MaxDuration, 10)
#### Survey level fields
## Pull in PKEY tables
pkbam <- sqlFetch(con, "dbo_National_PKEY_V4_2015")
pkbam$SSMA_TimeStamp <- NULL
#pkbbs <- sqlFetch(con, "dbo_PKEY_BBS_V3_2015")
pkbbs <- sqlFetch(con2, "PKEY_BBS_V4_2016")
## What columns to retain
PKCOLS <- c("PKEY","SS","PCODE","METHOD","SITE","STN","ROUND",
"YEAR","MONTH","DAY","HOUR","MIN","PART","MAXDUR","MAXDIS")
colnames(pkbam) <- toupper(colnames(pkbam))
pkbam$MAXDUR <- PCODE$Maxdur[match(pkbam$METHOD, PCODE$Method)]
pkbam$MAXDIS <- PCODE$Maxdist[match(pkbam$METHOD, PCODE$Method)]
pkbam$PART <- 1L
pkbam$MONTH <- pkbam$MM
pkbam$DAY <- pkbam$DD
pkbam$HOUR <- pkbam$HR
pkbam$YEAR <- pkbam$YYYY
levels(pkbam$ROUND) <- sub("[[:alpha:]]+$", "", levels(pkbam$ROUND))
pkbam$ROUND <- as.integer(as.character(pkbam$ROUND))
pkbam <- pkbam[,PKCOLS]
colnames(pkbbs) <- toupper(colnames(pkbbs))
pkbbs$MAXDUR <- 3
pkbbs$MAXDIS <- Inf
pkbbs$PART <- 2L
pkbbs$METHOD <- as.factor("BBS:9999")
pkbbs$SITE <- as.factor(pkbbs$SITE)
pkbbs$YEAR <- pkbbs$YYYY
pkbbs$MONTH <- pkbbs$MM
pkbbs$DAY <- pkbbs$DD
pkbbs$HOUR <- pkbbs$HR
pkbbs <- pkbbs[,PKCOLS]
PKEY <- rbind(pkbam, pkbbs)
#rm(pkbam, pkbbs)
#gc()
## Map `METHOD` field from project summary table onto `PKEY$METHOD`
## so that duration and distance method can be carried forward to
## point count table
levels(PCODE$Method)[levels(PCODE$Method) == "QCAtlas:118"] <- "QCATLAS:118"
compare_sets(PCODE$Method, PKEY$METHOD)
setdiff(PKEY$METHOD, PCODE$Method)
setdiff(PCODE$Method, PKEY$METHOD)
PKEY$DURMETH <- PCODE$DURMETH[match(PKEY$METHOD, PCODE$Method)]
PKEY$DISMETH <- PCODE$DISTMETH[match(PKEY$METHOD, PCODE$Method)]
## Identifying roadside surveys
PKEY$ROAD <- 0L
treat.as.bbs <- c("HOBBS","CF","MNBBA", levels(pkbbs$PCODE))
PKEY$ROAD[PKEY$PCODE %in% treat.as.bbs] <- 1L
#### Offset specific variables
## Date/time components
PKEY$MIN[is.na(PKEY$MIN)] <- 0 # min that critical, is not -- said Yoda
MM <- ifelse(PKEY$MONTH < 10, paste0("0", PKEY$MONTH), as.character(PKEY$MONTH))
HH <- ifelse(PKEY$HOUR < 10, paste0("0", PKEY$HOUR), as.character(PKEY$HOUR))
mm <- ifelse(PKEY$MIN < 10, paste0("0", PKEY$MIN), as.character(PKEY$MIN))
#mm[is.na(mm) & !is.na(HH)] <- "00"
DD <- with(PKEY, paste0(YEAR, "-", MM, "-", DAY, " ", HH, ":", mm, ":00"))
DD <- strptime(DD, "%Y-%m-%e %H:%M:%S")
PKEY$DATE <- DD
## Julian day
PKEY$JULIAN <- DD$yday # this is kept as original
PKEY$JDAY <- DD$yday / 365
summary(PKEY$JDAY)
## prevent too far extrapolation
PKEY$JDAY[PKEY$JDAY < 0.35 | PKEY$JDAY > 0.55] <- NA
## TSSR = time since sunrise
Coor <- as.matrix(cbind(as.numeric(SS$X),as.numeric(SS$Y)))[match(PKEY$SS, rownames(SS)),]
JL <- as.POSIXct(DD)
subset <- rowSums(is.na(Coor))==0 & !is.na(JL)
sr <- sunriset(Coor[subset,], JL[subset], direction="sunrise", POSIXct.out=FALSE) * 24
PKEY$srise <- NA
PKEY$srise[subset] <- sr
PKEY$start_time <- PKEY$HOUR + PKEY$MIN/60
TZ <- SS$TZONE[match(PKEY$SS, rownames(SS))]
lttz <- read.csv("~/repos/bamanalytics//lookup/tzone.csv")
lttz <- nonDuplicated(lttz, Timezone, TRUE)
PKEY$MDT_offset <- lttz$MDT_offset[match(TZ, rownames(lttz))]
table(TZ, PKEY$MDT_offset)
PKEY$TSSR <- (PKEY$start_time - PKEY$srise + PKEY$MDT_offset) / 24
PKEY$TSSR_orig <- PKEY$TSSR # keep a full copy
PKEY$TSSR[PKEY$start_time > 12] <- NA ## after noon
summary(PKEY$TSSR)
summary(PKEY$start_time)
PKEY <- PKEY[PKEY$DURMETH != "J",] # unknown duration
PKEY <- PKEY[PKEY$DISMETH != "O",] # unknown distance
PKEY <- droplevels(PKEY)
## QC Atlas problem
#with(PKEY, table(PCODE, is.na(MAXDUR)))
#with(PKEY, table(PCODE, is.na(MAXDIS)))
#### Point count tables and methodology
## Some of these tables are also tweaked locally (see below)
BEH <- sqlFetch(con, "dbo_DD_DescripBEH")
#DISINT <- sqlFetch(con, "dbo_DD_DescripDistance")
#DISINT$SSMA_TimeStamp <- NULL
#DURINT <- sqlFetch(con, "dbo_DD_DescripPeriod")
DISMET <- sqlFetch(con, "dbo_DD_distance_codes_methodology")
DURMET <- sqlFetch(con, "dbo_DD_duration_codes_methodology")
## Trailing whitespace removed from factor levels
levels(DISMET$DISTANCECODE) <- gsub(" *$", "", levels(DISMET$DISTANCECODE))
levels(DURMET$DURATIONCODE) <- gsub(" *$", "", levels(DURMET$DURATIONCODE))
#### Point count tables
pcbam <- sqlFetch(con, "dbo_National_PtCount_V4_2015")
pcbam$SSMA_TimeStamp <- NULL
pcbbs <- sqlFetch(con2, "POINTCOUNT_BBS_V4_2016")
colnames(pcbbs) <- toupper(colnames(pcbbs))
colnames(pcbbs)[colnames(pcbbs) == "SPECIES_ID"] <- "SPECIES"
colnames(pcbbs)[colnames(pcbbs) == "PERIOD"] <- "DURATION"
pcbbs$PCODE <- "BBS"
## Columns to keep
pccols <- c("PCODE","SS","PKEY","DURATION","DISTANCE",
"SPECIES","ABUND","BEH")
## Close the database connections
close(con)
close(con2)
## Duration and distance intervals (locally tweaked)
DURINT <- read.csv("~/repos/bamanalytics/lookup/durint.csv")
DISINT <- read.csv("~/repos/bamanalytics/lookup/disint.csv")
DURINT$dur <- paste0(DURINT$Dur_Start, "-", DURINT$DUR_end)
DISINT$dis <- paste0(DISINT$DIST_START, "-", DISINT$DIST_END)
rownames(DURINT) <- DURINT[,1]
rownames(DISINT) <- DISINT[,1]
## Combined point count table
PCTBL <- rbind(pcbam[,pccols], pcbbs[,pccols])
## Mapping duration and distance intervals
PCTBL$dur <- as.factor(DURINT$dur[match(PCTBL$DURATION, rownames(DURINT))])
PCTBL$dis <- as.factor(DISINT$dis[match(PCTBL$DISTANCE, rownames(DISINT))])
## Methodology
PCTBL$DISMETH <- droplevels(PKEY$DISMETH[match(PCTBL$PKEY, PKEY$PKEY)])
PCTBL$DURMETH <- droplevels(PKEY$DURMETH[match(PCTBL$PKEY, PKEY$PKEY)])
## Filtering surveys (need to exclude PKEY)
keeppkey <- rep(TRUE, nrow(PCTBL))
## 11=0-20
## 8=unk
keeppkey[PCTBL$DURATION %in% c(11,8)] <- FALSE
## Excluding unknown distance bands
keeppkey[PCTBL$DISTANCE %in% c(4,5,9)] <- FALSE
## Excluding unknown duration methodology
keeppkey[PCTBL$DURMETH == "J"] <- FALSE
## Excluding unknown distance methodology
keeppkey[PCTBL$DISMETH == "O"] <- FALSE
## Actual filtering -- but dropping PKEYs
PCTBL <- droplevels(PCTBL[keeppkey,])
## Filtering within survey (do not exclude PKEY)
## Filtering behaviour
#sort(100 * table(PCTBL$BEH) / sum(table(PCTBL$BEH)))
## 1=Heard
## 11=no birds observed at station - added 2011
## 6=seen and heard
## Excluding non-aerial detections
table(PCTBL$BEH, PCTBL$PCODE=="BBS")
keep <- rep(TRUE, nrow(PCTBL))
keep[!(PCTBL$BEH %in% c("1","6","11"))] <- FALSE
## this is fake, but there is no other option until a fix
#keep[is.na(PCTBL$BEH)] <- TRUE # dont know what this in -- FIXED in BBS_V4
## Excluding >10 min intervals
## 10=10-20
## 3=before or after
## 9=10-15
keep[PCTBL$DURATION %in% c(10,3,9)] <- FALSE
## Excluding NA values
keep[is.na(PCTBL$dur)] <- FALSE
keep[is.na(PCTBL$dis)] <- FALSE
keep[is.na(PCTBL$ABUND)] <- FALSE
## Actual filtering -- but keeping PKEYs (do not drop levels)
#PCTBL$keep <- keep
PCTBL <- PCTBL[keep,]
## Excluding/dropping species
PCTBL$SPECIES <- droplevels(PCTBL$SPECIES)
levels(PCTBL$SPECIES) <- toupper(levels(PCTBL$SPECIES))
compare_sets(PCTBL$SPECIES, TAX$Species_ID)
setdiff(PCTBL$SPECIES, TAX$Species_ID)
levels(TAX$Species_ID)[levels(TAX$Species_ID) == "YWAR"] <- "YEWA"
levels(TAX$Species_ID)[levels(TAX$Species_ID) == "SCJU"] <- "DEJU" # change SCJU to DEJU
levels(TAX$Species_ID)[levels(TAX$Species_ID) == "MYWA"] <- "YRWA" # change MYWA to YRWA
levels(TAX$Species_ID)[levels(TAX$Species_ID) == "COSN"] <- "WISN" # change COSN to WISN
levels(PCTBL$SPECIES)[levels(PCTBL$SPECIES) == "YWAR"] <- "YEWA"
levels(PCTBL$SPECIES)[levels(PCTBL$SPECIES) == "SCJU"] <- "DEJU" # change SCJU to DEJU
levels(PCTBL$SPECIES)[levels(PCTBL$SPECIES) == "MYWA"] <- "YRWA" # change MYWA to YRWA
levels(PCTBL$SPECIES)[levels(PCTBL$SPECIES) == "COSN"] <- "WISN" # change COSN to WISN
PCTBL$SPECIES <- droplevels(PCTBL$SPECIES)
setdiff(PCTBL$SPECIES, TAX$Species_ID)
PCTBL$SPECIES_ALL <- PCTBL$SPECIES
sspp <- read.csv("~/repos/bamanalytics/lookup/singing-species.csv")
levels(PCTBL$SPECIES)[!(levels(PCTBL$SPECIES) %in% sspp$Species_ID[sspp$Singing_birds])] <- "NONE"
## Excluding columns
PCTBL$DURATION <- NULL
PCTBL$DISTANCE <- NULL
PCTBL$BEH <- NULL
PCTBL$dur <- droplevels(PCTBL$dur)
PCTBL$dis <- droplevels(PCTBL$dis)
compare_sets(SS$SS, PKEY$SS)
compare_sets(SS$SS, PCTBL$SS)
compare_sets(PKEY$PKEY, PCTBL$PKEY)
save(SS, PKEY, PCTBL, TAX,
file=file.path(ROOT2, "out",
#paste0("data_package_2016-04-18.Rdata")))
# paste0("data_package_2016-07-05.Rdata")))
paste0("data_package_2016-12-01.Rdata")))
|
7912041bec3dd5b4fc230887ea4aaae6f414b69a
|
011e1a0d512282aca1397335d1be5db1ae75de5b
|
/somPlot/plotUMatrix.R
|
22a7580707e88b9f108292f829330144d1c2911e
|
[] |
no_license
|
spmunc/TrumpNN
|
6df78a3bda1e08e5d542b2c236f803d7777caf5c
|
33b3c3f7ea66d0e478ea10b0509ca834c0c94665
|
refs/heads/master
| 2021-01-21T13:14:20.872935
| 2016-04-23T20:53:18
| 2016-04-23T20:53:18
| 54,686,068
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,670
|
r
|
plotUMatrix.R
|
##########################################
##PLOT HEXAGONAL U-MATRIX
##from "kohnonen" library output
##
##
##BY SETH E. SPIELMAN, UNIVERSITY OF COLORADO
##
##NEEDS SOME LOVE.
##RUDIMENTRARY BUT FUNCTIONAL
##
##BORROWS CODE FROM
##http://nbremer.blogspot.nl/2013/11/how-to-create-hexagonal-heatmap-in-r.html
################################################
library(RColorBrewer) #to use brewer.pal
library(fields) #to use designer.colors
library(network)
library(deldir)
plotUmat <- function(som_obj, type="Equal Interval"){
if (som_obj$grid$topo != "hexagonal"){
stop("function assumes hexgonal SOM")
}
#CALCULATE U-MATRIX
#Delaunay Triangulation to form network of neurons
d <- deldir(x=som_obj$grid$pts[,1], y=aSom$grid$pts[,2])
#Build network
n <- network(x=unique(d$delsgs[,5:6]), directed=FALSE, matrix.type="edgelist")
#calculate u-matrix: the average eudlidean distance between each vertex and its neighbors
neigh.dists <- NA
for(vert in network.vertex.names(n)){
neighs <- get.neighborhood(x=n, v=vert)
neigh.dists[vert] <- (sum(dist(aSom$codes[c(vert, neighs),][,1]))/length(neighs))
}
#Function to create the polygon for each hexagon
Hexagon <- function (x, y, unitcell = 1, col = "grey", border=NA) {
polygon(c(x, x, x + unitcell/2, x + unitcell, x + unitcell,
x + unitcell/2), c(y + unitcell * 0.125, y + unitcell *
0.875, y + unitcell * 1.125, y + unitcell * 0.875,
y + unitcell * 0.125, y - unitcell * 0.125),
col = col, border=border)
}
plot(0, 0, type = "n", axes = FALSE, xlim=c(0, som_obj$grid$xdim),
ylim=c(0, som_obj$grid$ydim), xlab="", ylab= "", asp=1, main="U-Matrix")
ColRamp <- rev(designer.colors(n=9, col=brewer.pal(9, "Spectral")))
#color code for each neuron
ColorCode <- rep("#FFFFFF", length(neigh.dists)) #default is all white
if(type == "Equal Interval") {
#Equal interval bins
Bins <- seq(min(neigh.dists), max(neigh.dists), length=length(ColRamp))
}
if(type == "Quantile") {
#Quantile colorbins
Bins <- quantile(x=neigh.dists, probs=cumsum(rep(1/length(ColRamp), length(ColRamp))))
}
for (i in 1:length(neigh.dists))
if (!is.na(neigh.dists[i])) ColorCode[i] <- ColRamp[which.min(abs(Bins-neigh.dists[i]))]
offset <- 0.5 #offset for the hexagons when moving up a row
ind <- 1
for (row in 1:som_obj$grid$ydim) {
for (column in 0:(som_obj$grid$xdim - 1)){
Hexagon(column + offset, row - 1, col = ColorCode[ind])
ind <- ind +1}
offset <- ifelse(offset, 0, 0.5)
}
}
|
2838e0a3b514ff1140270e92d73aba62c5ad793b
|
78255ea5630895b338fd588752b66a2e8a157702
|
/R/getters.R
|
069ec70029886abddc749d8a1276a9e8a7b90cd8
|
[] |
permissive
|
Illumina/happyR
|
d2164760d2bb5b30b01c3464f0ecea203baeef3c
|
97d093a8b00d6f631e76cfac3411c29db0cd7044
|
refs/heads/master
| 2021-01-18T09:55:50.592615
| 2019-07-12T12:02:04
| 2019-07-12T12:02:04
| 100,359,406
| 16
| 2
|
BSD-3-Clause
| 2019-07-12T12:02:05
| 2017-08-15T09:01:57
|
R
|
UTF-8
|
R
| false
| false
| 7,052
|
r
|
getters.R
|
#' Extract hap.py Precision-Recall data
#'
#' Simpler interface to retrieve a data.frame
#' of PR metrics from a happy_result object.
#'
#' @param happy_result a happy result loaded
#' via \code{\link[happyR]{read_happy}}
#' @param var_type subset for either insertions
#' and deletions \code{"indel"}, SNVs \code{"snv"}
#' or keep both
#' @param filter include all records (ALL), only
#' passing (PASS) or with selective filters applied
#' (SEL)
#' @param subtype variant subtype of the form \code{[IDC]length_range},
#' e.g. \code{"D6_15"} is deletions of length \eqn{>=5} and \eqn{<=15}
#' @param subset when run with stratification regions, the subset is
#' the region ID. \code{"*"} for genome-wide PR data. See details.
#' @param quietly suppress info messages
#'
#' @details
#'
#' \strong{Subsets}: hap.py v0.3.7+ writes subsets \code{TS_contained} and
#' \code{TS_boundary} by default, corresponding to truth variants
#' well contained or at the boundary of confident regions. In some
#' truthsets, those in \code{TS_boundary} will show worse performance
#' metrics due to issues with variant representation or a partial
#' haplotype description.
#'
#' \strong{Subtypes}: Insertion subtypes are of the form: \code{[IDC]length_range}
#' where the first letter indicates the variant classification: \code{I} insertion;
#' \code{D} deletion; and \code{C} complex. Hap.py bins the lengths of these records
#' into ranges by ALT allele length in basepairs: \code{1_5}, \code{6_15} and \code{16_PLUS}.
#'
#' @return a \code{data.frame} of Precision-Recall metrics for the
#' selected subset
#'
#' @examples
#'
#' # figure out prefix from pkg install location
#' happy_input <- system.file("extdata", "happy_demo.summary.csv", package = "happyR")
#' happy_prefix <- sub(".summary.csv", "", happy_input)
#'
#' # load happy result
#' hapdata <- read_happy(happy_prefix)
#'
#' # long deletion PR curve
#' del_pr <- pr_data(hapdata, var_type = "indel", subtype = "D16_PLUS")
#'
#' @export
pr_data <- function(happy_result,
var_type = c("both", "snv", "indel"),
filter = c("ALL", "PASS", "SEL"),
subtype = c("*", "C16_PLUS", "C1_5", "C6_15", "D16_PLUS",
"D1_5", "D6_15", "I16_PLUS", "I1_5", "I6_15"),
subset = "*",
quietly = TRUE) {
if (class(happy_result) != "happy_result") {
stop("Object must be a happy_result loaded via happyR, ",
"not ", class(happy_result))
}
filter <- match.arg(filter)
var_type <- match.arg(var_type)
if (!missing(subtype)) {
subtype <- match.arg(subtype, several.ok = TRUE)
} else {
# pick first, i.e. '*'
subtype <- match.arg(subtype)
}
# starting point: smallest possible PR file
outdf <- if (filter == "ALL" | var_type == "both") {
happy_result$pr_curve$all
} else if (filter == "SEL") {
if (var_type == "snv") {
happy_result$pr_curve$SNP_SEL
} else {
happy_result$pr_curve$INDEL_SEL
}
} else {
if (var_type == "indel") {
happy_result$pr_curve$SNP_PASS
} else {
happy_result$pr_curve$INDEL_PASS
}
}
if (!quietly){
message(nrow(outdf), " records loaded")
}
# filter var_type for all
if (filter == "ALL" & var_type != "both") {
if (var_type == "snv") {
outdf <- outdf[outdf$Type != "INDEL",]
} else {
outdf <- outdf[outdf$Type == "INDEL",]
}
}
outdf <- outdf[outdf$Subset %in% subset,]
if (!nrow(outdf) > 0){
warning("No PR data found for subset: ", subset)
}
outdf <- outdf[outdf$Subtype %in% subtype & outdf$Filter %in% filter,]
if (!quietly) {
message("subset contains ", nrow(outdf), " records")
}
outdf
}
#' Extract tables from hap.py result lists
#'
#' Extract results tables from multiple hap.py result objects and combine
#' into a single \code{data.frame}. Source information from each
#' result is added as an additional column (\code{happy_prefix}).
#'
#' @param happy_result_list A \code{happy_result_list} object.
#' @param table Table of data to extract from each result.
#' \code{"summary"} or \code{"extended"} get top level tables;
#' the \code{pr} options get Precision-Recall tables.
#'
#' @return a \code{data.frame} of combined tables from list
#'
#' @examples
#'
#' \dontrun{
#' samplesheet <- readr::read_csv("group_id,replicate_id,happy_prefix
#' PCR-Free,NA12878-I30,NA12878-I30_S1
#' PCR-Free,NA12878-I33,NA12878-I33_S1
#' Nano,NA12878-R1,NA12878-R1_S1
#' Nano,NA12878-R2,NA12878-R2_S1
#' ")
#' hap_samplesheet <- read_samplesheet_(samplesheet = samplesheet_df)
#'
#' # get collapsed summary table of high-level metrics
#' summary_df <- extract_results(hap_samplesheet$results, table = "summary")
#' unique(summary_df$happy_prefix)
#' # [1] "/output/path/prefix" "/different/path/prefix"
#' }
#'
#' @export
extract_results <- function(happy_result_list,
table = c("summary", "extended",
"pr.all",
"pr.indel.pass", "pr.indel.sel", "pr.indel.all",
"pr.snp.pass", "pr.snp.sel", "pr.snp.all")) {
# validate input
if (!"happy_result_list" %in% class(happy_result_list)) {
stop("Must provide a happy_result_list object.")
}
table <- match.arg(table)
if (grepl("^pr\\.", table)) {
if (grepl("all", table)) {
if (table == "pr.all") {
path <- "all"
} else {
# pr.indel.all -> INDEL
path <- sub(".*?\\.([[:alpha:]]*?)\\..*$", "\\U\\1\\E", table, perl = TRUE)
}
} else {
# reformat + convert to uppercase, e.g.: pr.snp.pass -> "SNP_PASS"
path <- sub(".*?\\.([[:alpha:]]*?)\\.([[:alpha:]]*$)", "\\U\\1_\\2\\E", table, perl = TRUE)
}
item_list <- lapply(happy_result_list, function(d) {
if (!exists(path, envir = d$pr_curve, inherits = FALSE)) {
warning("missing pr data: ", path,
" in R object from: ", attr(d, "happy_prefix"),
" - skipping", call. = FALSE)
return (NULL)
}
table_out <- d$pr_curve[[path]]
if (is.null(table_out)) {
warning("missing pr data: ", path,
" in R object from: ", attr(d, "happy_prefix"),
" - skipping", call. = FALSE)
return (NULL)
}
table_out$happy_prefix <- attr(d, "happy_prefix")
table_out
})
} else {
# not PR data, e.g. summary / extended
item_list <- lapply(happy_result_list, function(d) {
if (!table %in% names(d)) {
stop("Could not find ", table, " in happy_result_list")
}
table_out <- d[[table]]
table_out$happy_prefix <- attr(d, "happy_prefix")
table_out
})
}
df <- dplyr::bind_rows(item_list)
if (nrow(df) == 0) {
stop("no results found for extraction")
}
# set class
if (table == "summary") {
class(df) <- c("happy_summary", class(df))
}
if (table == "extended") {
class(df) <- c("happy_extended", class(df))
}
df
}
|
17e8e22764312dd6ceafae11394e98a7f738a651
|
d06f4860f0815281085689b706a923740476b386
|
/_site/landing/code/newsletter/src/datacamp.R
|
42a137d349f8d9576f48e59a9c1c308a29ce3eb6
|
[
"Apache-2.0"
] |
permissive
|
jacobgreen4477/withmakers.github.io
|
b94d3a9e6247e161328224761047ad3478f4e436
|
28d3b68572b195f9bc84a44f32d31228dd31a16b
|
refs/heads/master
| 2022-01-08T21:31:08.236198
| 2019-05-15T11:55:35
| 2019-05-15T11:55:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,323
|
r
|
datacamp.R
|
# title : newsletter
# author : Hyunseo Kim
# depends : rvest, dplyr, stringr, data.table, R2HTML, NLP, openNLP
# datacamp blog ----
# Get the time
baseHTML <- get_baseHTML("https://www.datacamp.com/community/blog")
Sys.sleep(3)
# Get the title
timeHTML <- get_time(baseHTML, '.jsx-566588255 a .date')
# 일자 변수 변환: format에서 %B가 안되기 때문에 숫자로 변환 후 %m 이용
timeHTML <- gsub("st","",timeHTML)
timeHTML <- gsub("nd","",timeHTML)
timeHTML <- gsub("rd","",timeHTML)
timeHTML <- gsub("th","",timeHTML)
timeHTML <- Month2Num(timeHTML)
timeHTML <- as.Date(timeHTML, format = '%m %d, %Y')
# Get the title
titleHTML <- get_title(baseHTML, 'h2')
# Get the url
urlHTML <- get_url(baseHTML, 'h2 a', 'href')
url_list <- c()
for (i in urlHTML){
tmp <- paste0("https://www.datacamp.com",i)
url_list <- c(url_list, tmp)
}
# make data frame
datacamp <- data.frame(
site = "datacamp",
date = as.Date(timeHTML),
headline = as.character(titleHTML),
url_address = as.character(url_list))
datacamp <- as.data.table(unique(datacamp))
# Get the valid information
datacamp <- datacamp[from < datacamp$date & to >= datacamp$date, ]
# Save .csv & .html after collecting the data from valid url(different condition among sites)
datacamp <- mksave_data(datacamp, "datacamp", 2, 0)
gc()
|
b5957db01a9f93fa1bbfce5341c0d1b395e3d765
|
8c26f6153cbaec6957389cd7e659def3c468e10e
|
/week37-friends.R
|
a06e735dc5f823fff116de46b815209263efaac9
|
[] |
no_license
|
TrevorKMDay/TidyTuesday
|
59be20bf67772617b73b470caed0bdacd7761310
|
93ff776ba797f1073f11fde26ed8169939a68121
|
refs/heads/master
| 2022-12-21T21:05:23.980437
| 2020-09-22T14:31:44
| 2020-09-22T14:31:44
| 297,407,134
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,356
|
r
|
week37-friends.R
|
library(tidyverse)
library(corrplot)
library(viridis)
# Load data and separate into semantically named dfs
friends <- tidytuesdayR::tt_load('2020-09-08')
friends_info <- friends[[1]]
friends_dirs <- friends[[2]]
friends_emot <- friends[[3]]
# Join emotion info with who said it
lines_emot <- right_join(friends_info, friends_emot)
# Number of lines by emotion
ggplot(lines_emot, aes(x = emotion)) +
geom_histogram(stat = "count")
# Identify which characters had more than 50 lines (so we don't get weird
# proportions)
lines_per_character <- table(lines_emot$speaker)
gt_50_lines <- names(lines_per_character[lines_per_character > 50])
# Filter down to those people
char_emot <- lines_emot %>%
filter(speaker %in% gt_50_lines)
# Now tabulate, and remove "#ALL#"
ce_table <- table(char_emot$speaker, char_emot$emotion) %>%
as.data.frame(col.names = c("speaker", "emotion", "freq")) %>%
filter(Var1 != "#ALL#") %>%
pivot_wider(id_cols = "Var1", names_from = "Var2", values_from = "Freq",
names_prefix = "emotion_") %>%
rename(speaker = Var1)
# Calculate total lines, removing Neutral because it skews
total_lines <- ce_table %>%
select(starts_with("emotion_"), -emotion_Neutral) %>%
rowSums()
# Lines by percent of speakers' total lines with that emotion
ce_table_pct <- ce_table %>%
mutate_at(vars(starts_with("emotion_")), function(x) x / total_lines) %>%
select(-emotion_Neutral)
# Scaled so we can see who stands out
ce_table_pct_z <- ce_table_pct %>%
mutate_at(vars(starts_with("emotion_")), scale)
# Pivot longer for plotting
ce_pct_long <- ce_table_pct %>%
pivot_longer(-speaker)
ggplot(ce_pct_long, aes(x = name, y = speaker, fill = value)) +
geom_tile() +
scale_fill_viridis() +
scale_x_discrete(labels = unique(ce_pct_long$name) %>%
gsub("emotion_", "", .)) +
labs(x = "Emotion", y = "Character", fill = "Proportion",
title = "Characters' lines by emotion")
ce_z_long <- ce_table_pct_z %>%
pivot_longer(-speaker)
ggplot(ce_z_long, aes(x = name, y = speaker, fill = value)) +
geom_tile() +
scale_x_discrete(labels = unique(ce_pct_long$name) %>%
gsub("emotion_", "", .)) +
scale_fill_viridis(option = "magma") +
labs(x = "Emotion", y = "Character", fill = "Z score",
title = "Characters' lines by unique emotions")
|
c721b1bbd304b3289b143693000fdb8e957bf071
|
1a03b5b0240dae14b110848d33f7da34245903db
|
/man/ml_predict.Rd
|
9796128b1f6e2536820a7680d989f30b4308e500
|
[] |
no_license
|
skranz/mlogitExtras
|
1c3dfacb04b870243c78d0d5796b3919fdb256bf
|
62bb14fed7a1302888d699be9ea489ed8ab32746
|
refs/heads/master
| 2023-07-08T14:40:47.504079
| 2023-06-26T06:47:31
| 2023-06-26T06:47:31
| 269,279,209
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,179
|
rd
|
ml_predict.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{ml_predict}
\alias{ml_predict}
\title{Alternative predict function for mlogit objects}
\usage{
ml_predict(mod, newdata, num.draws = 1000, use.halton = TRUE)
}
\arguments{
\item{mod}{An estimated mlogit model}
\item{newdata}{A data set for the prediction in long format. The data set should have a column alt the indexed the alternative. The data set must be ordered first by choice situation and each choice situation must have the same number of alternatives in the same order. You may simply add a column chid specifying choice situation then arrange by chid, alt.}
\item{num.draws}{Number of simulated consumers to compute market shares (relevant for mixed logit models only)}
\item{use.halton}{Should Halton sequences instead of pseudo-random numbers be used to simulate consumers (default TRUE)}
}
\value{
A matrix of predicted choice probabilities with one row per choice situation and one column for each alternative. Each row sums up to 1.
}
\description{
Currently only works for models without alternative specific constants or
alternative specific interaction effects.
}
|
f76418c682f03ea46888f1b2c8b15366708a6cae
|
06888de22ecff4d48a621c778aa35b18a28c32f1
|
/R/bedtools.R
|
b5b61112d854b1b8bc7134f3603e308cd92c25c9
|
[
"MIT"
] |
permissive
|
joelnitta/baitfindR
|
fcb9db8bc978a6004e988003a5b1fb4675f84162
|
de73d9451115ad143da249e5ef2146bb929cd17b
|
refs/heads/master
| 2021-07-20T14:16:15.081289
| 2020-04-29T20:04:47
| 2020-04-29T20:04:47
| 140,532,881
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,128
|
r
|
bedtools.R
|
# bedtools functions ------------------------------------------------------
#' Extract regions from a fasta file
#'
#' Wrapper for bedtools getfasta.
#'
#' @param bed_file Path to bed file with locations of regions to extract.
#' bed file is a tab-separated file with columns for chromosome (e.g., chr1),
#' start position (e.g., 1), and end position (e.g., 10), in that order.
#' No column headers are used.
#' @param fasta_file Path to file in fasta format to extract regions from.
#' @param out_fasta_file Path to write extracted regions (in fasta format).
#' @param ... Other arguments. Not used by this function, but meant to be used
#' by \code{\link[drake]{drake_plan}} for tracking during workflows.
#' @return List; output of processx::run(). Externally, a fasta file will be
#' written to the path specified by `out_fasta_file`.
#' @author Joel H Nitta, \email{joelnitta@@gmail.com}
#' @examples
#' \dontrun{
#' # First write gene, intron, and exon bed regions out as tsv files
#'
#' temp_dir <- tempdir()
#'
#' find_bed_regions(
#' gff3_file = system.file("extdata", "Arabidopsis_thaliana_TAIR10_40_small.gff3",
#' package = "baitfindR", mustWork = TRUE),
#' source_select = "araport11",
#' out_type = "write_all",
#' out_dir = temp_dir,
#' prefix = "arabidopsis"
#' )
#'
#' # Extract genes.
#' extract_regions_from_fasta(
#' bed_file = fs::path(temp_dir, "arabidopsis_introns"),
#' fasta_file = system.file("extdata", "Arabidopsis_thaliana_TAIR10_40_small.fasta",
#' package = "baitfindR", mustWork = TRUE),
#' out_fasta_file = fs::path(temp_dir, "arabidopsis_gene_seqs.fasta")
#' )
#' }
#' @export
extract_regions_from_fasta <- function (bed_file, fasta_file, out_fasta_file, ...) {
# Check input
assertthat::assert_that(assertthat::is.string(bed_file))
bed_file <- fs::path_abs(bed_file)
assertthat::assert_that(assertthat::is.readable(bed_file))
assertthat::assert_that(assertthat::is.string(fasta_file))
fasta_file <- fs::path_abs(fasta_file)
assertthat::assert_that(assertthat::is.readable(fasta_file))
assertthat::assert_that(assertthat::is.string(out_fasta_file))
out_fasta_file <- fs::path_abs(out_fasta_file)
assertthat::assert_that(assertthat::is.dir(fs::path_dir(out_fasta_file)))
bed <- readr::read_tsv(bed_file, col_names = c("chr", "start", "end"), col_types = "cdd")
checkr::check_data(
bed,
values = list(
chr = "a",
start = 1,
end = 1
),
order = TRUE)
if (!(check_bed_genome_names(fasta_file, bed))) {
stop ("Names don't match between bed file and fasta file headers")
}
# Run bedtools getfasta
processx::run(
command = "bedtools",
args = c(
"getfasta",
"-fi", fasta_file,
"-bed", bed_file,
"-fo", out_fasta_file
),
echo = TRUE
)
}
#' Mask regions in a fasta file.
#'
#' Wrapper for bedtools maskfasta.
#'
#' All regions of the `fasta_file` specified by the `bed_file` will be
#' replaced ("hard-masked") with 'N's.
#'
#' The bed file is a tab-separated file with columns for chromosome (e.g., chr1),
#' start position (e.g., 1), and end position (e.g., 10), in that order.
#' No column headers are used.
#'
#' @param bed_file Path to bed file with locations of regions to mask.
#' @param fasta_file Path to unmasked fasta file.
#' @param out_fasta_file Path to write masked fasta file.
#' @param ... Other arguments. Not used by this function, but meant to be used
#' by \code{\link[drake]{drake_plan}} for tracking during workflows.
#' @return List; output of processx::run(). Externally, a fasta file will be
#' written to the path specified by `out_fasta_file`.
#' @author Joel H Nitta, \email{joelnitta@@gmail.com}
#' @examples
#' \dontrun{
#' # First write genes, introns, and exons out as tsv files
#'
#' temp_dir <- tempdir()
#' find_bed_regions(
#' gff3_file = system.file("extdata", "Arabidopsis_thaliana_TAIR10_40_small.gff3", package = "baitfindR", mustWork = TRUE),
#' source_select = "araport11",
#' out_type = "write_all",
#' out_dir = temp_dir,
#' prefix = "arabidopsis"
#' )
#'
#' # Now mask the genome, using the bed file and genome fasta file.
#' mask_genome(
#' bed_file = "temp_dir/test_introns",
#' fasta_file = "data_raw/Arabidopsis_thaliana.TAIR10.dna.toplevel.renamed.fasta",
#' out_fasta_file = "temp_dir/test_masked"
#' )
#' }
#' @export
mask_regions_in_fasta <- function (bed_file, fasta_file, out_fasta_file, ...) {
# Check input
assertthat::assert_that(assertthat::is.string(bed_file))
bed_file <- fs::path_abs(bed_file)
assertthat::assert_that(assertthat::is.readable(bed_file))
assertthat::assert_that(assertthat::is.string(fasta_file))
fasta_file <- fs::path_abs(fasta_file)
assertthat::assert_that(assertthat::is.readable(fasta_file))
assertthat::assert_that(assertthat::is.string(out_fasta_file))
out_fasta_file <- fs::path_abs(out_fasta_file)
assertthat::assert_that(assertthat::is.dir(fs::path_dir(out_fasta_file)))
bed <- readr::read_tsv(bed_file, col_names = c("chr", "start", "end"), col_types = "cdd")
checkr::check_data(
bed,
values = list(
chr = "a",
start = 1,
end = 1
),
order = TRUE)
if (!(check_bed_genome_names(fasta_file, bed))) {
stop ("Names don't match between bed file and fasta file headers")
}
# Run bedtools maskfasta
processx::run(
command = "bedtools",
args = c(
"maskfasta",
"-fi", fasta_file,
"-bed", bed_file,
"-fo", out_fasta_file
),
echo = TRUE
)
}
#' Clean up data from a gff file and
#' convert to bed format
#'
#' Helper function for `find_bed_regions`. Merges overlapping regions and sorts
#' regions.
#'
#' @param region Dataframe; list of gene regions in "bed" format. Must include
#' the following columns in order: `chr` ('chromosome', character), `start`
#' (start position, numeric), and `end` (end position, numeric).
#' @param check.chr Logical; should coordinates be checked for chromosomal
#' format with "chr" prefix?
#' @param verbose Logical; should `bedr` functions output all messages?
#'
#' @return Dataframe in "bed" format.
#'
clean_gff <- function (region, check.chr = FALSE, verbose = FALSE) {
region %>%
# Check if region is valid
dplyr::filter(bedr::is.valid.region(., check.chr = check.chr, verbose = verbose)) %>%
# Collapse overlapping regions
bedr::bedr.merge.region(check.chr = check.chr, verbose = verbose) %>%
# Sort regions
bedr::bedr.sort.region(check.chr = check.chr, verbose = verbose) %>%
# Convert to bed format
bedr::convert2bed(check.chr = check.chr, verbose = verbose)
}
#' Find genes, exons, and introns in a gff3 file
#'
#' If tsv files are written out by selecting "write_all" for `out_type`,
#' they will overwrite any existing files with the same name in `out_dir`.
#'
#' @param gff3_file Path to input file in `gff3` format.
#' @param source_select Character vector; only use regions from these
#' sources. Must match values in `source` column of gff3 file. Optional.
#' @param gene_label String; value used to indicate genes in gff3 file.
#' Must match at least one value in `type` column of gff3 file. Default "gene".
#' @param exon_label String; value used to indicate exons in gff3 file.
#' Must match at least one value in `type` column of gff3 file. Default "exon".
#' @param verbose Logical; should `bedr` functions output all messages?
#' @param out_type Type of output to return:
#' "genes": dataframe in "bed" format of genes.
#' "introns": dataframe in "bed" format of introns.
#' "exons": dataframe in "bed" format of exons.
#' "write_all": write tab-separated files for each of `genes`, `introns`, and
#' `exons` to `out_dir`. The hash digest of the combined genes, introns, and
#' exons will be returned.
#' @param prefix String; prefix to attach to tsv files if `out_type` is
#' "write_all".
#' @param out_dir Directory to write tsv files if `out_type` is "write_all".
#' @param ... Other arguments. Not used by this function, but meant to
#' be used by \code{\link[drake]{drake_plan}} for tracking during workflows.
#' @return Dataframe or character.
#' @author Joel H Nitta, \email{joelnitta@@gmail.com}
#' @examples
#' # Find genes
#'
#' arabidopsis_gff_file <- system.file("extdata", "Arabidopsis_thaliana_TAIR10_40_small.gff3", package = "baitfindR", mustWork = TRUE)
#'
#' genes <- find_bed_regions(
#' gff3_file = arabidopsis_gff_file,
#' source_select = "araport11",
#' out_type = "genes"
#' )
#' head(genes)
#'
#' # Find introns
#' introns <- find_bed_regions(
#' gff3_file = arabidopsis_gff_file,
#' source_select = "araport11",
#' out_type = "introns"
#' )
#' head(introns)
#'
#' # Find exons
#' exons <- find_bed_regions(
#' gff3_file = arabidopsis_gff_file,
#' source_select = "araport11",
#' out_type = "exons"
#' )
#' head(exons)
#'
#' \dontrun{
#' # Write genes, introns, and exons out as tsv files
#' temp_dir <- tempdir()
#' find_bed_regions(
#' gff3_file = arabidopsis_gff_file,
#' source_select = "araport11",
#' out_type = "write_all",
#' out_dir = temp_dir,
#' prefix = "arabidopsis"
#' )
#' }
#' @export
find_bed_regions <- function (gff3_file,
source_select = NULL,
gene_label = "gene", exon_label = "exon",
verbose = FALSE,
prefix = NULL, out_dir = NULL,
out_type = c("genes", "introns", "exons", "write_all"),
...) {
# Check input
assertthat::assert_that(assertthat::is.readable(gff3_file))
assertthat::assert_that(assertthat::is.string(gene_label))
assertthat::assert_that(assertthat::is.string(exon_label))
assertthat::assert_that(assertthat::is.string(out_type))
assertthat::assert_that(is.logical(verbose))
assertthat::assert_that(out_type %in% c("genes", "introns", "exons", "write_all"),
msg = "'out_type' must be one of 'genes', 'introns', 'exons', or 'write_all'")
# Read in gff3 file as dataframe
gff3 <- ape::read.gff(gff3_file) %>%
dplyr::mutate(chr = as.character(seqid))
# Keep only annotations from selected source
if (!is.null(source_select)) {
assertthat::assert_that(is.character(source_select))
assertthat::assert_that(all(source_select %in% gff3$source))
gff3 <- dplyr::filter(gff3, source %in% source_select)
}
# Extract and clean up genes
genes <- gff3 %>% dplyr::filter(type == gene_label) %>%
dplyr::select(chr, start, end) %>%
clean_gff(verbose = verbose)
# Extract and clean up exons
exons <- gff3 %>% dplyr::filter(type == exon_label) %>%
dplyr::select(chr, start, end) %>%
clean_gff(verbose = verbose)
# Introns are genes - exons
introns <- bedr::bedr.subtract.region(
genes,
exons,
remove.whole.feature = FALSE,
check.chr = FALSE,
verbose = verbose)
# Write out all regions and return hash of genes + exons + introns
if (out_type == "write_all") {
out_dir <- fs::path_abs(out_dir)
assertthat::assert_that(assertthat::is.writeable(out_dir))
assertthat::assert_that(assertthat::is.string(prefix))
all_regions <- list(genes = genes,
exons = exons,
introns = introns)
all_regions %>%
purrr::set_names(fs::path(out_dir, paste0(prefix, "_", names(.)))) %>%
purrr::iwalk(readr::write_tsv, col_names = FALSE)
return(digest::digest(all_regions))
}
# Or, return a particular result type.
return(switch(
out_type,
genes = genes,
exons = exons,
introns = introns
))
}
# Check that genome fasta headers and
# "chromosome" names in bed file match.
# (This must be true for
# bedtools maskfasta to work).
check_bed_genome_names <- function (fasta_file, bed) {
# find all sequence headers in fasta file
seq_names <-
readr::read_lines(fasta_file) %>%
purrr::keep(~ grepl(">", .x)) %>%
purrr::map(~ gsub(">", "", .x))
# make sure "chr" (chromosome) names of bed file are all in
# fasta sequence headers
chr <- unique(bed$chr)
all(chr %in% seq_names)
}
|
82c59c5fe2484138e581f45a8368c63f8103bd8d
|
e508870d7b82ca065aff9b7bf33bc34d5a6c0c1c
|
/pkg/man/listProduct.Rd
|
3ebc402520b1b0eae3006db6765831e9d5fdab1f
|
[] |
no_license
|
Dong-po/SoilR-exp
|
596be0e6c5d291f00c6e08c348952ee23803e15e
|
c10d34e035deac8af4912c55382012dfc247eedc
|
refs/heads/master
| 2021-09-03T11:12:49.199268
| 2018-01-08T15:52:17
| 2018-01-08T15:52:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 566
|
rd
|
listProduct.Rd
|
\name{listProduct}
\alias{listProduct}
\usage{listProduct(...)}
\examples{listProduct(list('a','b'),list(1,2))}
\arguments{
\item{...}{lists}
}
\description{Creates a list of all combinations of the elements of the inputlists
(like a "tensor product list " The list elements can be of any class.
The function is used in examples and tests to produce all possible combinations
of arguments to a function.
look at the tests for example usage}
\title{tensor product of lists}
\value{a list of lists each containing one combinations of the elements of the input lists}
|
33956c49db6741f7bd71bb226f7c7a8970af0f45
|
cef4d8774f2eb276a8212ce460fb546de6122a91
|
/Rplot5.R
|
8d54ebd70fec3619cc34a2def413b63aa45911b0
|
[] |
no_license
|
universaljames/PM2.5-in-US
|
e085406a3bc5a05d0e0e1b9f02cdcfdc5c947d70
|
ac08cea08cebefde38f1c62a6949b7b21d75cdfc
|
refs/heads/master
| 2022-10-26T09:32:48.127894
| 2020-06-15T05:04:28
| 2020-06-15T05:04:28
| 272,344,398
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,138
|
r
|
Rplot5.R
|
#Data Preparation
DataFile <- "NEI_data.zip"
DataFileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url=DataFileURL,destfile=DataFileFile,method="curl")
unzip(DataFile)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
colToFactor <- c("year", "type", "Pollutant","SCC","fips")
NEI[,colToFactor] <- lapply(NEI[,colToFactor], factor)
levels(NEI$fips)[1] = NA
NEIdata<-NEI[complete.cases(NEI),]
colSums(is.na(NEIdata))
#Question 5
SCCvehicle<-grepl(pattern = "vehicle", SCC$EISector, ignore.case = TRUE)
SCCvehicleSCC <- SCC[SCCvehicle,]$SCC
NEIvehicleSSC <- NEIdata[NEIdata$SCC %in% SCCvehicleSCC, ]
NEIvehicleBaltimore <- subset(NEIvehicleSSC, fips == "24510")
NIEvehicleBaltimoreTotEm<-aggregate(Emissions~year, NEIvehicleBaltimore, sum)
g<-ggplot(aes(year, Emissions/10^5), data=NIEvehicleBaltimoreTotEm)
g+geom_bar(stat="identity",fill="grey",width=0.75) +
guides(fill=FALSE) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (10^5 Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore from 1999-2008"))
|
77c9ef64627876f3b9a1edfe3f6b1dd150a0a324
|
1203cc14b7416390beb8149c3a75c28c9177a681
|
/scripts/monthly report text 5_18.R
|
3347b99a56c2f5a34de613fec4257e9478ee6e33
|
[] |
no_license
|
AZASRS/DB_R_Testing_Environment
|
7801fc9bcf078be259ed8ef80335729623bdb1dd
|
bf65706a2244dc67e63cd25382488d2219756b11
|
refs/heads/master
| 2020-03-20T06:49:08.414225
| 2018-07-27T00:31:20
| 2018-07-27T00:31:20
| 137,261,803
| 0
| 1
| null | 2018-07-16T22:23:26
| 2018-06-13T19:33:14
|
R
|
UTF-8
|
R
| false
| false
| 11,744
|
r
|
monthly report text 5_18.R
|
#monthly report
## @knitr setup
setwd("P:/IMD/Karl/R projects/private investment performance")
load('pmedata.rdata')
source('../basic financial.r')
require(xtable)
require(zoo)
require(lubridate)
# get values for valdate and lastcfdate
valdf=read.csv('tfundvals.csv')
valdf$dates=as.Date(valdf$dates, format='%m/%d/%Y')
tfund.valdate=valdf$vals[which(valdf$dates==valdate)]
tfund.cfdate=valdf$vals[which(valdf$dates==lastcfdate)]
if (is.null(tfund.valdate)) print(paste(
'need total fund value in tfundvals.csv for', valdate))
if (is.null(tfund.cfdate)) print(paste(
'need total fund value in tfundvals.csv for', lastcfdate))
twrs=read.csv(file='twrs.csv')
twrs$Twrdate=as.Date(twrs$Twrdate,format='%m/%d/%Y')
twrs=subset(twrs,Twrdate==valdate)
p2pirrs=read.csv(file='p2pirrs.csv')
portmat=read.csv("portmat.csv")
#if working on one portfolio and running portfolios individually (or code line individually) or diagnosing report formatting
# skip first line of the function and run the rowpm and beyond. need to initialize port by assigning argument for
# portfolio you want to run - e.g. port <- "PE"
print.portfolio=function(port) {
rowpm=which(portmat$portshort==port)
port.longname=as.character(portmat$portlongname[rowpm])
pme.x=subset(pme.df,Portfolio==port)
pme.x=pme.x[-grep("Vintage",rownames(pme.x)),]
pme.x$longname=rownames(pme.x)
#summary page
cf.port=y.cf[[paste("Total",port)]]
val.valdate=pme.x[paste("Total",port),"val"]
val.cfdate=sum(pme.x[ ,"Cash Adj NAV"][pme.x$isfund&(!pme.x$sponsorsum)])
draw.valdate=-sum(cf.port[(time(cf.port)<=valdate)&cf.port<0])
draw.sincevaldate=-sum(cf.port[(time(cf.port)>valdate)&cf.port<0])
dist.valdate=-sum(cf.port[(time(cf.port)<=valdate)&cf.port>0])
dist.sincevaldate=-sum(cf.port[(time(cf.port)>valdate)&cf.port>0])
deltaval.valdate=-dist.valdate+val.valdate-draw.valdate
deltaval.sincevaldate=-dist.sincevaldate+val.cfdate-draw.sincevaldate-val.valdate
valpct.valdate=100*val.valdate/(tfund.valdate*1000000)
valpct.cfdate=100*val.cfdate/(tfund.cfdate*1000000)
# portirr.valdate=pme.x[paste("Total",port),"Fund.IRR"]
# portirr.cfdate=100*irr.z(mergesum.z(cf.port,zoo(val.cfdate,lastcfdate)))
portunf=pme.x[paste("Total",port),"unf"]
portexp=val.cfdate+(portunf*1000000)+sum(cf.port[(time(cf.port)>unfunded.date)&cf.port<0])
portexppct=100*portexp/(tfund.cfdate*1000000)
roll12draw=sum(cf.port[(time(cf.port)>(lastcfdate-years(1)))&cf.port<0])
roll12dist=sum(cf.port[(time(cf.port)>(lastcfdate-years(1)))&cf.port>0])
roll12net=roll12dist+roll12draw
titleframe=data.frame(titles=c("Arizona State Retirement System",paste(port.longname,"Portfolio"),
paste("Information compiled through",format(lastcfdate,"%m/%d/%Y"))))
title.x=xtable(titleframe)
align(title.x)='lc'
print(title.x,hline.after=NULL,include.rownames=FALSE,include.colnames=FALSE)
column1=c(rep(" ",2),formatwblank(draw.valdate/1000000),
formatwblank(dist.valdate/1000000),
formatwblank(deltaval.valdate/1000000),
formatwblank(val.valdate/1000000)," ",
formatwblank(valpct.valdate,2,'%'),
rep(" ",9)
)
column2=c(" ",formatwblank(val.valdate/1000000),
formatwblank(draw.sincevaldate/1000000),
formatwblank(dist.sincevaldate/1000000),
formatwblank(deltaval.sincevaldate/1000000),
formatwblank(val.cfdate/1000000)," ",
formatwblank(valpct.cfdate,2,'%')," ",
formatwblank(portunf),
formatwblank(portexp/1000000),
formatwblank(portexppct,2,'%')," "," ",
formatwblank(roll12draw/1000000),
formatwblank(roll12dist/1000000),
formatwblank(roll12net/1000000)
)
policy.txt=paste0(" Policy weight equals ",portmat[rowpm,"pol.low"],'%')
if (portmat[rowpm,"pol.high"]>portmat[rowpm,"pol.low"]) policy.txt=paste0(policy.txt,' to ',
portmat[rowpm,"pol.high"],'%')
textcol=c(" ","Beginning NAV","Contributions","Distributions",
"Net Increase/(Decrease)","Ending NAV",
" ","Percent of Total Fund"," ",
paste("Unfunded Commitments as of",format(unfunded.date,"%m/%d/%Y")),
"Estimated exposure of ending NAV and net unfunded",
"Estimated exposure (NAV+unfunded) as % of TF_NAV",
policy.txt,
" ",
"Rolling 12 month draws","Rolling 12 month distribution",
"Rolling 12 month net cash flow")
printmat=data.frame(textcol,column1,column2)
colnames(printmat)=c(" ",
paste("Reported by SS-AIS as of",format(valdate,"%m/%d/%Y")),
paste("Estimated as of",format(lastcfdate,"%m/%d/%Y")))
printmat.x=xtable(printmat)
align(printmat.x)='l|r|ZZ|'
print(printmat.x,tabular.environment='tabularx',
width='5.5in',scalebox=.9,include.rownames=FALSE,
hline.after=c(-1,0,6,9,13,length(column1)))
#return table
row.re=which(twrs$Asset==port)
row.odce=which(twrs$Asset==as.character(portmat[rowpm,'b1']))
if(port=='PE') row.odce=c(row.odce,row.odce+1)
if(port=='POPP') row.odce=row.odce[1]
if(port=='OPP') row.odce=row.odce[2]
twrs.re=twrs[c(row.re,row.odce),2:6]
row.re=which(p2pirrs$X==port.longname)
row.odce=which(p2pirrs$X==as.character(portmat[rowpm,'b2']))
if(port=='PE') row.odce=c(row.odce,row.odce+1)
if(port=='POPP') row.odce=row.odce[1]
if(port=='OPP') row.odce=row.odce[2]
p2pirrs.re=p2pirrs[c(row.re,row.odce),2:6]
countrows=sum(length(row.re),length(row.odce))
p2pirrs.re=matrix(paste0(format(as.vector(as.matrix(p2pirrs.re))),'%'),ncol=5,nrow=countrows)
twrs.re=matrix(paste0(format(as.vector(as.matrix(twrs.re))),'%'),ncol=5,nrow=countrows)
rn1=c(paste(port.longname,"IRR"),
paste(portmat[rowpm,'b2'],"IRR"))
if(port=='PE') rn1=c(rn1,'Burgiss IRR')
rn2=c(paste(port.longname,"TWR"),
paste(portmat[rowpm,'b2'],"TWR"))
if(port=='PE') rn2=c(rn2,'Burgiss TWR')
rownames(p2pirrs.re)=rn1
rownames(twrs.re)= rn2
colnames(twrs.re)=colnames(p2pirrs.re)=c(
"One Quarter", "One Year","Three Years","Five Years",
paste0("Inception (",portmat[rowpm,"inception"],")"))
twrs.re.x=xtable(rbind(p2pirrs.re),
caption=paste("Summary of IRRs as of",format(valdate,'%m/%d/%Y')))
align(twrs.re.x)='|rZZZZZ|'
print(twrs.re.x,tabular.environment='tabularx',width='6.5in',
scalebox=.8,caption.placement="top",hline.after=c(-1,0,nrow(twrs.re.x)))
#put a blank row after each non-fund (a subtotal)
blanksgo=vector()
nb=which(!pme.x$isfund)
if(length(nb)>=2) {
nbbot=c(1,1+nb[-length(nb)])
nbtop=nb
nbnbot=nbbot+(0:(-1+length(nb)))
nbntop=nbtop+(0:(-1+length(nb)))
}
blanksgo=nbnbot[-1]-1
pme.xn=pme.x
for (i in 1:length(nb)) {
pme.xn[nbnbot[i]:nbntop[i],]=pme.x[nbbot[i]:nbtop[i],]
if(i!=length(nb)) pme.xn[blanksgo[i],]=rep(NA,ncol(pme.xn))
}
pme.xn$longname[blanksgo]=''
rownames(pme.xn)=NULL
blanksgo=c(blanksgo,nrow(pme.xn))
## Adjust data frames for portfolio PME's
if (port == "PE") pme.xn2=pme.xn[,c("longname", "vint", "commit", "drawn", "distributed",
"unf", "appr", "Cash Adj NAV", "dpi", "Fund TVPI", "Fund IRR", "Russell 2K PME", "Fund IRR 2")]
if (port == "RE") pme.xn2=pme.xn[,c("longname","vint","commit","drawn","distributed",
"unf","appr","Cash Adj NAV","dpi","Fund TVPI","Fund IRR", "ODCE PME", "Fund IRR 2")]
if (port == "POPP") pme.xn2=pme.xn[,c("longname", "vint", "commit", "drawn", "distributed",
"unf", "appr", "Cash Adj NAV", "dpi", "Fund TVPI", "Fund IRR","Fixed 8 PME", "Fund IRR 2")]
if (port == "PD") pme.xn2=pme.xn[,c("longname","vint","commit","drawn","distributed",
"unf","appr","Cash Adj NAV","dpi","Fund TVPI", "Fund IRR", "Lev Loan+250 PME", "Fund IRR 2")]
if (port == "OPP") pme.xn2=pme.xn[,c("longname","vint","commit","drawn","distributed",
"unf","appr","Cash Adj NAV","dpi","Fund TVPI", "Fund IRR", "Fixed 8 PME", "Fund IRR 2")]
if (port == "FARM") pme.xn2=pme.xn[,c("longname","vint","commit","drawn","distributed",
"unf","appr","Cash Adj NAV","dpi","Fund TVPI", "Fund IRR", "CPIxFE+350 PME", "Fund IRR 2")]
#format numbers
pme.xn2$vint=gsub(paste(port,"V"),'',pme.xn2$vint)
pme.xn2$commit=formatwblank(pme.xn2$commit)
pme.xn2$unf=formatwblank(pme.xn2$unf)
pme.xn2$drawn=formatwblank(pme.xn2$drawn/1000000)
pme.xn2$distributed=formatwblank(pme.xn2$distributed/1000000)
pme.xn2$appr=formatwblank(pme.xn2$appr/1000000)
pme.xn2[ ,"Cash Adj NAV"]=formatwblank(pme.xn2[,'Cash Adj NAV']/1000000)
pme.xn2$dpi=formatwblank(pme.xn2$dpi/100,2,' x')
pme.xn2[ ,"Fund TVPI"]=formatwblank(pme.xn2[ , "Fund TVPI"],2,' x')
pme.xn2[ ,"Fund IRR"]=formatwblank(pme.xn2[ , "Fund IRR"],2,'%')
#test
if (port == "PE") pme.xn2[ ,"Russell 2K PME"] = formatwblank(pme.xn2[ ,"Russell 2K PME"],2)
if (port == "RE") pme.xn2[ ,"ODCE PME"] = formatwblank(pme.xn2[ ,"ODCE PME"],2)
if (port == "POPP") pme.xn2[ ,"Fixed 8 PME"] = formatwblank(pme.xn2[ ,"Fixed 8 PME"],2)
if (port == "PD") pme.xn2[ ,"Lev Loan+250 PME"] = formatwblank(pme.xn2[ ,"Lev Loan+250 PME"],2)
if (port == "OPP") pme.xn2[ ,"Fixed 8 PME"] = formatwblank(pme.xn2[ ,"Fixed 8 PME"],2)
if (port == "FARM") pme.xn2[ ,"CPIxFE+350 PME"] = formatwblank(pme.xn2[ ,"CPIxFE+350 PME"],2)
# end test
#pme.xn2[ ,"Russell 2K PME"] = formatwblank(pme.xn2[ ,"Russell 2K PME"],2)
pme.xn2[,"Fund IRR 2"]=formatwblank(pme.xn2[,"Fund IRR 2"],2,'%')
colnames(pme.xn2)=holdname=c("Fund Name","Vintage Year","Committed",
paste("Called through",format(lastcfdate,"%m/%d/%Y")),
paste("Distributed through",format(lastcfdate,"%m/%d/%Y")),
paste("Unfunded as of",format(unfunded.date,"%m/%d/%Y")),
"Est. Appreciation","Cash Adjusted Net Asset Value","DPI","TVPI Multiple",
paste("IRR as of",format(valdate,"%m/%d/%Y")), paste("PME", format(valdate, "%m/%d/%Y")),
"Most Recent IRR")
#print the results
nline=30 #lines per page
nbreak=floor(nrow(pme.xn2)/nline)
if(nbreak==0) breakvec=nrow(pme.xn2)
if(nbreak>0) {
rowgroups=blanksgo
breakvec=vector()
for (z in (1:nbreak)) {
if (any(rowgroups<nline)) {
breakvec[z]=max(rowgroups[rowgroups<nline])} else {
breakvec[z]=nline }
rowgroups=rowgroups-breakvec[z]
rowgroups=rowgroups[rowgroups>0]
}
}
breakvec=cumsum(breakvec)
startrow=c(1,breakvec+1)
breakvec=c(breakvec,nrow(pme.xn2))
for (z in 1:(nbreak+1)) {
printrange=startrow[z]:breakvec[z]
hlinevec=(blanksgo[which(blanksgo%in%printrange)])-(printrange[1]-1)
captiontxt=port.longname
if(nbreak>0) captiontxt=paste(captiontxt,z,"of",nbreak+1)
pme.xt=xtable(pme.xn2[printrange,],caption=captiontxt)
align(pme.xt)='l|lYZ|ZZZ|ZZ|ZZ|ZZZ|'
print(pme.xt,tabular.environment='tabularx',floating.environment='sidewaystable',
width='14in',scalebox=.65,include.rownames=FALSE,hline.after=c(-1,0,hlinevec))
}
cat('\\clearpage')
}
## @knitr print.reports
port.names=as.character(unique(pme.df$Portfolio))
nullvec=sapply(port.names,print.portfolio)
# ##print.re
# print.portfolio("RE")
# ## print.pe
# print.portfolio("PE")
# ## print.popp
# print.portfolio("POPP")
# ## print.opp
# print.portfolio("OPP")
# ## print.pd
# print.portfolio("PD")
#
|
e6a529edee5ad54e133f0b522650b43f7eddc826
|
44cf65e7ab4c487535d8ba91086b66b0b9523af6
|
/data/Newspapers/2001.11.22.editorial.68247.0774.r
|
58577a32b40697cacc8c4d959abfc6e8ae8d8d33
|
[] |
no_license
|
narcis96/decrypting-alpha
|
f14a746ca47088ec3182d610bfb68d0d4d3b504e
|
5c665107017922d0f74106c13d097bfca0516e66
|
refs/heads/master
| 2021-08-22T07:27:31.764027
| 2017-11-29T12:00:20
| 2017-11-29T12:00:20
| 111,142,761
| 0
| 1
| null | null | null | null |
MacCentralEurope
|
R
| false
| false
| 3,466
|
r
|
2001.11.22.editorial.68247.0774.r
|
uluitor progres !
americanii si tarile membre ale Uniunii Europene ne vor bombarda cu telegrame si scrisori de felicitare .
Salutul lor cordial va deveni el insusi un eveniment politico - mediatic .
celebrul Gioni Popescu , zis eminenta cenusie a Serviciului Roman de Informatii , zis Gioni Descurcaretuí , a fost propus de Radu Timofte pentru a ocupa postul de prim - adjunct al serviciului .
si nu va trece multa vreme pina cind serviciile straine ne vor solicita sprijinul .
" nu gasiti si voi niste marmura mai ieftina prin Romania ? " ,
" am putea procura si noi niste mobila sculptata in lemn de nuc sau trandafir ? " .
in sfirsit , Romania va da raspunsul " profesional " adecvat : " Se rezolva , sefuí ! " .
sa nu mai vorbim de " eminenta cenusie " .
am putea presupune ca generalul Gioni Popescu a fost cel care a instrumentat toate scurgerile de informatii din structurile statului .
el i - a depistat pe marii spioni prinsi in acesti ani .
i - a vopsit , le - a pus catuse si i - a livrat pe tava lui Virgil Magureanu , lui Costin Georgescu si , de miine - poimiine , cu si mai multa autoritate , lui Radu Timofte .
deci Gioni Popescu se afla , de fapt , la originea tuturor marilor victorii ale SRI , dar a fost baiat modest si n - a vrut sa iasa in fata !
sigur ca evolutia sa e impresionanta . De la un sef de complex " Universal Coop " , format din circiuma , depozit si dependinte , a evoluat .
s - a calificat mai intii ca expert in parfumuri , spray - uri , televizoare color , Pepsi si , daca aveai nevoie , iti facea rost si de camasi chinezesti , de tenisi unguresti si de ciuperci vietnameze , inclusiv de muschiulet si fleica .
inainte de a fi general , Gioni Popescu a fost un mare descurcaret in vremurile lui Nicolae Ceausescu .
cum de s - a putut reprofila atit de spectaculos , devenind " as al informatiilor " , numai Dumnezeu stie !
de multa vreme el a fost banuit ca si sub " acoperirea " de general s - ar indeletnici cu aceleasi lucruri " Ce doriti sefuí " , terminate totdeauna cu acelasi " Se rezolva ! " .
unii chiar l - au cercetat pentru ca a raspuns " mult prea corespunzator " in cazul vilei lui Virgil Magureanu , construita la Giurtelec .
ba au existat sesizari ca generalul descurcaret ( cu facultatea facuta pe fuga dupa 1989 ) a sarit si ca problematica , si ca grad , si ca preocupari .
ca problematica a avut de - a face cu exportul strategic de combustibili catre Iugoslavia ( zisa afacerea " Jimbolia " ) . Ca grad a ajuns general , iar ca preocupari a trecut de la frigidere , televizoare si videouri ( specializare antedecembrista ) la lucruri mai marunte ( cafea , tigari , vorbindu - se de diverse operatiuni , inclusiv " Tigareta " ) in perioada post Tovarasul si Tovarasa .
pentru ca s - a dovedit expert in gospodarire , a avut in subordine inclusiv crescatoria de porci de la Craiova ( din gospodaria anexa a celebrei Regii Rasirom ) .
cu siguranta , in comertul si relatiile cu arabii , Gioni Popescu i - a devansat si pe americani .
prin 1992 - 1996 el ii vina deja pe talibani , dar nu avea cui sa - i predea .
parola sa ar fi fost : " Tu cit dai ? " .
daca ar fi pe televizoare , cafea , whisky , tigari , frigidere , ulei de masline si ficat de gisca , Gioni Popescu ne - ar baga sigur in NATO si in Uniunea Europeana .
si toate serviciile secrete straine si - ar trimite oamenii la Bucuresti pentru a lua lectii de servire a sefilor si de sterpelit friptura din farfurie .
|
ab7f0153fca08c3381ee41336d37a5cd0220e8e6
|
ced1997e24a3100493ed8b4bdd116a19f101ffd7
|
/analyse-simulation-15-08.R
|
85bd762e63e6a28e3ff232fd08fccf1cdc8b6ee2
|
[] |
no_license
|
tobiriebe/analyse
|
bb2db1c43d589d1da25fcb93032d0136454fcd6b
|
41ea6ab06f8f6f2fc3b258a6b772274ed5f0159f
|
refs/heads/master
| 2020-04-06T06:56:12.088598
| 2016-09-05T13:32:58
| 2016-09-05T13:32:58
| 65,828,973
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,465
|
r
|
analyse-simulation-15-08.R
|
analyseSimulation <- function(dataFile) {
library(pROC)
library(Rmisc)
data <- dataFile #save data file for later functions appearing
Y <- dataFile$Y
Ynoise <- dataFile$Ynoise
yBin <- dataFile$yBin
yBinNoise <- dataFile$yBinNoise
originalX <- dataFile$originalX
originalXnoise <- dataFile$originalXnoise
coeffs <- dataFile$coeffs
predictors <- dataFile$predictors
kappa <- dataFile$kappa
samples <- dataFile$samples
simulations <- dataFile$simulations
nOuterFolds <- dataFile$nOuterFolds
redSteps <- dataFile$redSteps
sampleRatio <- dataFile$sampleRatio
fixedMstop <- dataFile$fixedMstop
fixedNu <- dataFile$fixedNu
offsetFinal <- dataFile$offsetFinal
predModelList <- dataFile$predModelList
offsetFinalClass <- dataFile$offsetFinalClass
predModelListClass <- dataFile$predModelListClass
predictionVector <- dataFile$predictionVector
predictionVectorClass <- dataFile$predictionVectorClass
offsetFinalNoise <- dataFile$offsetFinalNoise
predModelListNoise <- dataFile$predModelListNoise
predictionVectorNoise <- dataFile$predictionVectorNoise
offsetFinalClassNoise <- dataFile$offsetFinalClassNoise
predModelListClassNoise <- dataFile$predModelListClassNoise
predictionVectorClassNoise <- dataFile$predictionVectorClassNoise
#load(dataFile)
# if (grepl(pattern = "_Plain", x = dataFile)){
# fileType <- fileTypePlain
# } else {
# fileType <- fileTypeVols
# }
if (!exists("redSteps")){
redSteps <- dim(predictionVector)[2]
}
#initalize variables for absolute errors
AbsErrors <- matrix(0, nrow = redSteps, ncol = simulations)#matrix to save absolute errors after each simulation
sumAbsErrors <- rep(0, redSteps) #vector to calculate sum of absolut errors
AbsErrorsCI97.5 <- rep(0, redSteps)#vectors to save confidence intervals
AbsErrorsCI2.5 <- rep(0, redSteps)
AbsErrorsNoiseX <- matrix(0, nrow = redSteps, ncol = simulations)#matrix to save absolute errors after each simulation
sumAbsErrorsNoiseX <- rep(0, redSteps)#vector to calculate sum of absolut errors
AbsErrorsNoiseXCI97.5 <- rep(0, redSteps)#vectors to save confidence intervals
AbsErrorsNoiseXCI2.5 <- rep(0, redSteps)
AbsErrorsNoiseY <- matrix(0, nrow = redSteps, ncol = simulations)#matrix to save absolute errors after each simulation
sumAbsErrorsNoiseY <- rep(0, redSteps)#vector to calculate sum of absolut errors
AbsErrorsNoiseYCI97.5 <- rep(0, redSteps)#vectors to save confidence intervals
AbsErrorsNoiseYCI2.5 <- rep(0, redSteps)
AbsErrorsNoiseXNoiseY <- matrix(0, nrow = redSteps, ncol = simulations)#matrix to save absolute errors after each simulation
sumAbsErrorsNoiseXNoiseY <- rep(0, redSteps)#vector to calculate sum of absolut errors
AbsErrorsNoiseXNoiseYCI97.5 <- rep(0, redSteps) #vectors to save confidence intervals
AbsErrorsNoiseXNoiseYCI2.5 <- rep(0, redSteps)
#initialize variables for AUC
AUCsb <- matrix(0, nrow = redSteps, ncol = simulations)#matrix to save AUCafter each simulation
sumAUC <- rep(0, redSteps) #vector for sum of AUC and
CIAUC2.5 <- rep(0, redSteps)#vectors to save confidence intervals
CIAUC97.5 <- rep(0, redSteps)
AUCNoiseX <- matrix(0, nrow = redSteps, ncol = simulations)#matrix to save AUCafter each simulation
sumAUCNoiseX <- rep(0, redSteps) #vector for sum of AUC and
CIAUCNoiseX2.5 <- rep(0, redSteps)#vectors to save confidence intervals
CIAUCNoiseX97.5 <- rep(0, redSteps)
AUCNoiseY <- matrix(0, nrow = redSteps, ncol = simulations)#matrix to save AUCafter each simulation
sumAUCNoiseY <- rep(0, redSteps) #vector for sum of AUC and
CIAUCNoiseY2.5 <- rep(0, redSteps)#vectors to save confidence intervals
CIAUCNoiseY97.5 <- rep(0, redSteps)
AUCNoiseXNoiseY <- matrix(0, nrow = redSteps, ncol = simulations)#matrix to save AUCafter each simulation
sumAUCNoiseXNoiseY <- rep(0, redSteps) #vector for sum of AUC and
CIAUCNoiseXNoiseY2.5<- rep(0, redSteps)#vectors to save confidence intervals
CIAUCNoiseXNoiseY97.5<- rep(0, redSteps)
#initialize variables for variance
variance <- matrix(0, nrow = redSteps, ncol = simulations)#matrix save variance after each reduction step
sumvariance <- rep(0, redSteps) #vector for sum of variance
CIvariance2.5 <- rep(0, redSteps) #vector to save confidence intervals of variance
CIvariance97.5 <- rep(0, redSteps)
varianceNoiseX <- matrix(0, nrow = redSteps, ncol = simulations)#matrix save variance after each reduction step
sumvarianceNoiseX <- rep(0, redSteps) #vector for sum of variance
CIvariance2.5NoiseX <- rep(0, redSteps) #vector to save confidence intervals of variance
CIvariance97.5NoiseX <- rep(0, redSteps)
varianceNoiseY <- matrix(0, nrow = redSteps, ncol = simulations)#matrix save variance after each reduction step
sumvarianceNoiseY <- rep(0, redSteps) #vector for sum of variance
CIvariance2.5NoiseY <- rep(0, redSteps) #vector to save confidence intervals of variance
CIvariance97.5NoiseY <- rep(0, redSteps)
varianceNoiseXNoiseY <- matrix(0, nrow = redSteps, ncol = simulations)#matrix save variance after each reduction step
sumvarianceNoiseXNoiseY <- rep(0, redSteps) #vector for sum of variance
CIvariance2.5NoiseXNoiseY <- rep(0, redSteps) #vector to save confidence intervals of variance
CIvariance97.5NoiseXNoiseY <- rep(0, redSteps)
for (simulation in 1:simulations) {
print(simulation)
########REGRESSION
AbsErrors[,simulation] <- colSums(abs(t(predictionVector[simulation,,])-Y[,simulation]))/simulations
AbsErrorsNoiseX[,simulation] <- colSums(abs(t(predictionVectorNoise[simulation,,])-Y[,simulation]))/simulations
AbsErrorsNoiseY[,simulation] <- colSums(abs(t(predictionVector[simulation,,])-Ynoise[,simulation]))/simulations
AbsErrorsNoiseXNoiseY[,simulation] <- colSums(abs(t(predictionVectorNoise[simulation,,])-Ynoise[,simulation]))/simulations
#######CLASSIFICATION (VIA REGRESSION)
for (reduction in 1:redSteps){
#print(reduction)#DEBUG
#sumAUC[reduction] <- sumAUC[reduction] + auc(yBin[,simulation], predictionVectorClass[simulation, reduction, ])
#calculate AUC
AUCsb[reduction, simulation] <- auc(yBin[,simulation], predictionVectorClass[simulation, reduction, ])
AUCNoiseX[reduction, simulation] <- auc(yBin[,simulation], predictionVectorClassNoise[simulation, reduction, ])
AUCNoiseY[reduction, simulation] <- auc(yBinNoise[,simulation], predictionVectorClass[simulation, reduction, ])
AUCNoiseXNoiseY[reduction, simulation] <- auc(yBinNoise[,simulation], predictionVectorClassNoise[simulation, reduction, ])
#calculate variance
variance[reduction, simulation] <- var(predictionVector[simulation , reduction ,])
varianceNoiseX[reduction, simulation] <- var(predictionVectorNoise[simulation , reduction ,])
varianceNoiseY[reduction, simulation] <- var(predictionVector[simulation , reduction ,] - Ynoise[,simulation])
varianceNoiseXNoiseY[reduction, simulation] <- var(predictionVectorNoise[simulation , reduction ,] - Ynoise[,simulation] )
#print(variance2.5) #DEBUG
}
} #end simulation loop
for (reduction in 1:redSteps){
#calculate confidence intervals for sum of absolut errors
AbsErrorsCI <- CI(AbsErrors[reduction,]) #take all reductions steps over the simulations and calculate CI
AbsErrorsCI97.5[reduction] <- AbsErrorsCI[1] #upper CI
AbsErrorsCI2.5[reduction] <- AbsErrorsCI[3] #upper CI
AbsErrorsCI <- CI(AbsErrorsNoiseX[reduction,]) #take all reductions steps over the simulations and calculate CI
AbsErrorsNoiseXCI97.5[reduction] <- AbsErrorsCI[1] #upper CI
AbsErrorsNoiseXCI2.5[reduction] <- AbsErrorsCI[3] #upper CI
AbsErrorsCI <- CI(AbsErrorsNoiseY[reduction,]) #take all reductions steps over the simulations and calculate CI
AbsErrorsNoiseYCI97.5[reduction] <- AbsErrorsCI[1] #upper CI
AbsErrorsNoiseYCI2.5[reduction] <- AbsErrorsCI[3] #upper CI
AbsErrorsCI <- CI(AbsErrorsNoiseXNoiseY[reduction,]) #take all reductions steps over the simulations and calculate CI
AbsErrorsNoiseXNoiseYCI97.5[reduction] <- AbsErrorsCI[1] #upper CI
AbsErrorsNoiseXNoiseYCI2.5[reduction] <- AbsErrorsCI[3] #upper CI
#calculate confidence intervals for AUC
CIAUC <- CI(AUCsb[reduction,]) #take all reductions steps over the simulations and calculate CI
CIAUC97.5[reduction] <- CIAUC[1] #upper CI
CIAUC2.5[reduction] <- CIAUC[3] #lower CI
CIAUC <- CI(AUCNoiseX[reduction,]) #take all reductions steps over the simulations and calculate CI
CIAUCNoiseX97.5[reduction] <- CIAUC[1] #upper CI
CIAUCNoiseX2.5[reduction] <- CIAUC[3] #lower CI
CIAUC <- CI(AUCNoiseY[reduction,]) #take all reductions steps over the simulations and calculate CI
CIAUCNoiseY97.5[reduction] <- CIAUC[1] #upper CI
CIAUCNoiseY2.5[reduction] <- CIAUC[3] #lower CI
CIAUC <- CI(AUCNoiseXNoiseY[reduction,]) #take all reductions steps over the simulations and calculate CI
CIAUCNoiseXNoiseY97.5[reduction] <- CIAUC[1] #upper CI
CIAUCNoiseXNoiseY2.5[reduction] <- CIAUC[3] #lower CI
#calculate confidence intervals for variance
CIvariance <- CI(variance[reduction,]) #take all reductions steps over the simulations and calculate CI
CIvariance2.5[reduction] <- CIvariance[3] #lower band
CIvariance97.5[reduction] <- CIvariance[1] #upper band
CIvariance <- CI(varianceNoiseX[reduction,]) #take all reductions steps over the simulations and calculate CI
CIvariance2.5NoiseX[reduction] <- CIvariance[3] #lower band
CIvariance97.5NoiseX[reduction] <- CIvariance[1] #upper band
CIvariance <- CI(varianceNoiseY[reduction,]) #take all reductions steps over the simulations and calculate CI
CIvariance2.5NoiseY[reduction] <- CIvariance[3] #lower band
CIvariance97.5NoiseY[reduction] <- CIvariance[1] #upper band
CIvariance <- CI(varianceNoiseXNoiseY[reduction,]) #take all reductions steps over the simulations and calculate CI
CIvariance2.5NoiseXNoiseY[reduction] <- CIvariance[3] #lower band
CIvariance97.5NoiseXNoiseY[reduction] <- CIvariance[1] #upper band
}
sumAUC <- rowSums(AUCsb)/simulations
sumAUCNoiseX <- rowSums(AUCNoiseX)/simulations
sumAUCNoiseY <- rowSums(AUCNoiseY)/simulations
sumAUCNoiseXNoiseY <- rowSums(AUCNoiseXNoiseY)/simulations
sumAbsErrors <- rowSums(AbsErrors)/simulations
sumAbsErrorsNoiseX <- rowSums(AbsErrorsNoiseX)/simulations
sumAbsErrorsNoiseY <- rowSums(AbsErrorsNoiseY)/simulations
sumAbsErrorsNoiseXNoiseY <- rowSums(AbsErrorsNoiseXNoiseY)/simulations
sumvariance <- rowSums(variance)/simulations
sumvarianceNoiseX <- rowSums(varianceNoiseX)/simulations
sumvarianceNoiseY <- rowSums(varianceNoiseY)/simulations
sumvarianceNoiseXNoiseY <- rowSums(varianceNoiseXNoiseY)/simulations
#save these values
#####convergenceIteration(data)#after how many redSteps do analyse plots converge, for plots
####FOR LATER SAVE outcome of convergence####
redStepsconv <- convergenceIteration(data) #after how many reduction steps does boosting iteration converge
save(redStepsconv, AbsErrors, AbsErrorsNoiseX, AbsErrorsNoiseY, AbsErrorsNoiseXNoiseY,
sumAbsErrors, AbsErrorsCI97.5, AbsErrorsCI2.5,
sumAbsErrorsNoiseX, AbsErrorsNoiseXCI97.5, AbsErrorsNoiseXCI2.5,
sumAbsErrorsNoiseY, AbsErrorsNoiseYCI97.5, AbsErrorsNoiseYCI2.5,
sumAbsErrorsNoiseXNoiseY, AbsErrorsNoiseXNoiseYCI97.5, AbsErrorsNoiseXNoiseYCI2.5,
AUCsb, AUCNoiseX, AUCNoiseY, AUCNoiseXNoiseY,
sumAUC, CIAUC97.5, CIAUC2.5,
sumAUCNoiseX, CIAUCNoiseX97.5, CIAUCNoiseX2.5,
sumAUCNoiseY, CIAUCNoiseY97.5, CIAUCNoiseY2.5,
sumAUCNoiseXNoiseY, CIAUCNoiseXNoiseY97.5, CIAUCNoiseXNoiseY2.5,
variance, varianceNoiseX, varianceNoiseY, varianceNoiseXNoiseY,
sumvariance, CIvariance2.5, CIvariance97.5,
sumvarianceNoiseX, CIvariance2.5NoiseX, CIvariance97.5NoiseX,
sumvarianceNoiseY, CIvariance2.5NoiseY, CIvariance97.5NoiseY,
sumvarianceNoiseXNoiseY, CIvariance2.5NoiseXNoiseY, CIvariance97.5NoiseXNoiseY,
file = "EVAL.rda")
nred <- redStepsconv[1,] #convergence of Iteration for AbsError
nredNoiseX <- redStepsconv[2,]
nredNoiseY <- redStepsconv[3,]
nredNoiseXNoiseY <- redStepsconv[4,]
meanred <- sum(nred)/simulations #mean of reduction Steps for Absolute Error over simulations
meanredNoiseX <- sum(nredNoiseX)/simulations
meanredNoiseY <- sum(nredNoiseY)/simulations
meanredNoiseXNoiseY <- sum(nredNoiseXNoiseY)/simulations
nredAUC <- redStepsconv[5,]#convergence of Iteration for AUC
nredAUCNoiseX <- redStepsconv[6,]
nredAUCNoiseY <- redStepsconv[7,]
nredAUCNoiseXNoiseY <- redStepsconv[8,]
meanredAUC <- sum(nredAUC)/simulations #mean of reduction Steps for AUC over simulations
meanredAUCNoiseX <- sum(nredAUCNoiseX)/simulations
meanredAUCNoiseY <- sum(nredAUCNoiseY)/simulations
meanredAUCNoiseXNoiseY <- sum(nredAUCNoiseXNoiseY)/simulations
#Plots
pdf("plots.pdf") #save plot
plot(sumAbsErrors, ylim=range(sumAbsErrors, AbsErrorsCI97.5, AbsErrorsCI2.5), col='black', type="l", xlab = "Iteration", ylab = "Sum of absolute Errors", main = "Sum of absolute Errors")
lines(AbsErrorsCI97.5, col="red") #add CIs to plot
lines(AbsErrorsCI2.5, col="red")
#abline(h = sumAbsErrors[meanred], col = "green") #horizontal line that shows convergence
plot(sumAbsErrorsNoiseX, ylim=range(sumAbsErrorsNoiseX, AbsErrorsNoiseXCI97.5, AbsErrorsNoiseXCI2.5), col='black', type="l", xlab = "Iteration", ylab = "Sum of absolute Errors", main = "Sum of absolute Errors for noisy X")
lines(AbsErrorsNoiseXCI97.5, col="red") #add CIs to plot
lines(AbsErrorsNoiseXCI2.5, col="red")
#abline(h = sumAbsErrorsNoiseX[meanredNoiseX], col = "green") #horizontal line that shows convergence
plot(sumAbsErrorsNoiseY, ylim=range(sumAbsErrorsNoiseY, AbsErrorsNoiseYCI97.5, AbsErrorsNoiseYCI2.5), col='black', type="l", xlab = "Iteration", ylab = "Sum of absolute Errors", main = "Sum of absolute Errors for noisy Y")
lines(AbsErrorsNoiseYCI97.5, col="red") #add CIs to plot
lines(AbsErrorsNoiseYCI2.5, col="red")
#abline(h = sumAbsErrorsNoiseY[meanredNoiseY], col = "green") #horizontal line that shows convergence
plot(sumAbsErrorsNoiseXNoiseY, ylim=range(sumAbsErrorsNoiseXNoiseY, AbsErrorsNoiseXNoiseYCI97.5, AbsErrorsNoiseXNoiseYCI2.5), col='black', type="l", xlab = "Iteration", ylab = "Sum of absolute Errors", main = "Sum of absolute Errors for noisy X and noisy Y")
lines(AbsErrorsNoiseXNoiseYCI97.5, col="red") #add CIs to plot
lines(AbsErrorsNoiseXNoiseYCI2.5, col="red")
#abline(h = sumAbsErrorsNoiseXNoiseY[meanredNoiseXNoiseY], col = "green") #horizontal line that shows convergence
# #relative values (to the basic boosting) for first and last simulation
# plot(AbsErrors[,simulations]/AbsErrors[1, simulations], ylim=range(AbsErrors[,simulations]/AbsErrors[1, simulations], AbsErrors[,1]/AbsErrors[1, 1]), type="l", main = "Relative value of absolute error to basic boosting (first and last simulation)", xlab = "Iteration", ylab = "Relative value")
# lines(AbsErrors[,1]/AbsErrors[1, 1], type="l", col = "blue")
# abline(h = 1, col = "red") #if line lays above the line the Abs Error of basic boosting is higher
# plot(AbsErrorsNoiseX[,simulations]/AbsErrorsNoiseX[1, simulations], ylim=range(AbsErrorsNoiseX[,simulations]/AbsErrorsNoiseX[1, simulations], AbsErrorsNoiseX[,1]/AbsErrorsNoiseX[1, 1]), type="l", main = "Relative value of absolute error to basic boosting for noisy X (first and last simulation)", xlab = "Iteration", ylab = "Relative value")
# lines(AbsErrorsNoiseX[,1]/AbsErrorsNoiseX[1, 1], type="l", col = "blue")
# abline(h = 1, col = "red") #if line lays above the line the Abs Error of basic boosting is higher
# plot(AbsErrorsNoiseY[,simulations]/AbsErrorsNoiseY[1, simulations], ylim=range(AbsErrorsNoiseY[,simulations]/AbsErrorsNoiseY[1, simulations], AbsErrorsNoiseY[,1]/AbsErrorsNoiseY[1, 1]), type="l", main = "Relative value of absolute error to basic boosting for noisy Y (first and last simulation)", xlab = "Iteration", ylab = "Relative value")
# lines(AbsErrorsNoiseY[,1]/AbsErrorsNoiseY[1, 1], type="l", col = "blue")
# abline(h = 1, col = "red") #if line lays above the line the Abs Error of basic boosting is higher
# plot(AbsErrorsNoiseXNoiseY[,simulations]/AbsErrorsNoiseXNoiseY[1, simulations], ylim=range(AbsErrorsNoiseXNoiseY[,simulations]/AbsErrorsNoiseXNoiseY[1, simulations], AbsErrorsNoiseXNoiseY[,1]/AbsErrorsNoiseXNoiseY[1, 1]), type="l", main = "Relative value of absolute error to basic boosting for noisy X and noisy Y (first and last simulation)", xlab = "Iteration", ylab = "Relative value")
# lines(AbsErrorsNoiseXNoiseY[,1]/AbsErrorsNoiseXNoiseY[1, 1], type="l", col = "blue")
# abline(h = 1, col = "red") #if line lays above the line the Abs Error of basic boosting is higher
# #classification AUC
#relative values (to the basic boosting)
plot(sumAbsErrors/sumAbsErrors[1], main = "Relative value of absolute error to basic boosting ")
plot(sumAbsErrorsNoiseX/sumAbsErrorsNoiseX[1], main = "Relative value of absolute error to basic boosting ")
plot(sumAbsErrorsNoiseY/sumAbsErrorsNoiseY[1], main = "Relative value of absolute error to basic boosting ")
plot(sumAbsErrorsNoiseXNoiseY/sumAbsErrorsNoiseXNoiseY[1], main = "Relative value of absolute error to basic boosting ")
plot(sumAUC, ylim=range(sumAUC, CIAUC97.5, CIAUC2.5), col='black', type="l", xlab = "Iteration", ylab = "Sum of AUC", main = "Sum aof AUC")
lines(CIAUC2.5, col="red") #add CIs to plot
lines(CIAUC97.5, col="red")
#abline(h = sumAUC[meanredAUC], col = "green") #horizontal line that shows convergence
plot(sumAUCNoiseX, ylim=range(sumAUCNoiseX, CIAUCNoiseX97.5, CIAUCNoiseX2.5), col='black', type="l", xlab = "Iteration", ylab = "Sum of AUC", main = "Sum aof AUC for noisy X")
lines(CIAUCNoiseX2.5, col="red") #add CIs to plot
lines(CIAUCNoiseX97.5, col="red")
#abline(h = sumAUCNoiseX[meanredAUCNoiseX], col = "green") #horizontal line that shows convergence
plot(sumAUCNoiseY, ylim=range(sumAUCNoiseY, CIAUCNoiseY97.5, CIAUCNoiseY2.5), col='black', type="l", xlab = "Iteration", ylab = "Sum of AUC", main = "Sum aof AUC for noisy Y")
lines(CIAUCNoiseY2.5, col="red") #add CIs to plot
lines(CIAUCNoiseY97.5, col="red")
#abline(h = sumAUCNoiseY[meanredAUCNoiseY], col = "green") #horizontal line that shows convergence
plot(sumAUCNoiseXNoiseY, ylim=range(sumAUCNoiseXNoiseY, CIAUCNoiseXNoiseY97.5, CIAUCNoiseXNoiseY2.5), col='black', type="l", xlab = "Iteration", ylab = "Sum of AUC", main = "Sum aof AUC for noisy X and noisy Y")
lines(CIAUCNoiseXNoiseY2.5, col="red") #add CIs to plot
lines(CIAUCNoiseXNoiseY97.5, col="red")
#abline(h = sumAUCNoiseXNoiseY[meanredAUCNoiseXNoiseY], col = "green") #horizontal line that shows convergence
#variance
plot(sumvariance, ylim=range(sumvariance, CIvariance2.5, CIvariance97.5), col='black', type = "l", xlab = "Iteration", ylab = "Sum of Variance", main = "Sum of Variance")
lines(CIvariance97.5, col="red") #add CIs to plot
lines(CIvariance2.5, col="red")
plot(sumvarianceNoiseX, ylim=range(sumvarianceNoiseX, CIvariance2.5NoiseX, CIvariance97.5NoiseX), col='black', type = "l", xlab = "Iteration", ylab = "Sum of Variance", main = "Sum of Variance for noisy X")
lines(CIvariance2.5NoiseX, col="red") #add CIs to plot
lines(CIvariance97.5NoiseX, col="red")
plot(sumvarianceNoiseY, ylim=range(sumvarianceNoiseY, CIvariance2.5NoiseY, CIvariance97.5NoiseY), col='black', type = "l", xlab = "Iteration", ylab = "Sum of Variance", main = "Sum of Variance for noisy Y")
lines(CIvariance2.5NoiseY, col="red") #add CIs to plot
lines(CIvariance97.5NoiseY, col="red")
plot(sumvarianceNoiseXNoiseY, ylim=range(sumvarianceNoiseXNoiseY, CIvariance2.5NoiseXNoiseY, CIvariance97.5NoiseXNoiseY), col='black', type = "l", xlab = "Iteration", ylab = "Sum of Variance" , main = "Sum of Variance for noisy Y and noisy X")
lines(CIvariance2.5NoiseXNoiseY, col="red") #add CIs to plot
lines(CIvariance97.5NoiseXNoiseY, col="red")
dev.off() #end of saving plot
#par(mfrow=c(1,1))
}
########################################################################
#######################################################################
convergenceIteration <- function(dataFile){
###FOR LATER -> give back maximum value for no convergence
###FOR LATER -> same for noisy X/Y
library(pracma)
library(pROC)
#load(dataFile)
Y <- dataFile$Y
Ynoise <- dataFile$Ynoise
yBin <- dataFile$yBin
yBinNoise <- dataFile$yBinNoise
originalX <- dataFile$originalX
originalXnoise <- dataFile$originalXnoise
coeffs <- dataFile$coeffs
predictors <- dataFile$predictors
kappa <- dataFile$kappa
samples <- dataFile$samples
simulations <- dataFile$simulations
nOuterFolds <- dataFile$nOuterFolds
redSteps <- dataFile$redSteps
sampleRatio <- dataFile$sampleRatio
fixedMstop <- dataFile$fixedMstop
fixedNu <- dataFile$fixedNu
offsetFinal <- dataFile$offsetFinal
predModelList <- dataFile$predModelList
offsetFinalClass <- dataFile$offsetFinalClass
predModelListClass <- dataFile$predModelListClass
predictionVector <- dataFile$predictionVector
predictionVectorClass <- dataFile$predictionVectorClass
offsetFinalNoise <- dataFile$offsetFinalNoise
predModelListNoise <- dataFile$predModelListNoise
predictionVectorNoise <- dataFile$predictionVectorNoise
offsetFinalClassNoise <- dataFile$offsetFinalClassNoise
predModelListClassNoise <- dataFile$predModelListClassNoise
predictionVectorClassNoise <- dataFile$predictionVectorClassNoise
grad <- 0 #variable for gradient of curve
nred <- rep(0, simulations) #variable to count how many reduction iterations were done
nredNoiseX <- rep(0, simulations) #variable to count how many reduction iterations were done
nredNoiseY <- rep(0, simulations) #variable to count how many reduction iterations were done
nredNoiseXNoiseY <- rep(0, simulations) #variable to count how many reduction iterations were done
nredAUC <- rep(0, simulations) #variable to count how many reduction iterations were done
nredAUCNoiseX <- rep(0, simulations) #variable to count how many reduction iterations were done
nredAUCNoiseY <- rep(0, simulations) #variable to count how many reduction iterations were done
nredAUCNoiseXNoiseY <- rep(0, simulations) #variable to count how many reduction iterations were done
AbsError <- matrix(0, nrow = redSteps, ncol = simulations) #AbsError to stop reduction iteration
AbsErrorNoiseX <- matrix(0, nrow = redSteps, ncol = simulations) #AbsError to stop reduction iteration
AbsErrorNoiseY <- matrix(0, nrow = redSteps, ncol = simulations) #AbsError to stop reduction iteration
AbsErrorNoiseXNoiseY <- matrix(0, nrow = redSteps, ncol = simulations) #AbsError to stop reduction iteration
sumAUC <- matrix(0, nrow=redSteps, ncol = simulations) #sumAUC to stop reduction iteration
sumAUCNoiseX <- matrix(0, nrow=redSteps, ncol = simulations) #sumAUC to stop reduction iteration
sumAUCNoiseY <- matrix(0, nrow=redSteps, ncol = simulations) #sumAUC to stop reduction iteration
sumAUCNoiseXNoiseY <- matrix(0, nrow=redSteps, ncol = simulations) #sumAUC to stop reduction iteration
print(simulations)
for (simulation in 1:simulations) {
print(simulation)
stopred <- 0
#calculate number of iteration when Absolute Error converges
for (reduction in 1:redSteps){ #for sumabsErrors with no noisy X or Y
if(stopred == 1 ){ #if grad was small enough leave reduction iteration
break
}
AbsError[reduction, simulation] <- sum(abs(t(predictionVector[simulation,reduction,])-Y[,simulation]))
if(as.integer(reduction/10)==(reduction/10)){
#print(reduction)
grad <- mean(gradient(AbsError[(reduction-9):reduction])) #mean of gradient of error curve
#print(paste("grad:", grad)) #DEBUG
if(abs(grad) < 0.07){
nred[simulation] <- reduction #save for checking how many reduction iterations were made
# print("stop") #DEBUG
stopred <- 1 #if curve of errors is flat enough don't do the reduction iteration again
}
}
}#end iteration reduction
stopred <- 0
for (reduction in 1:redSteps){ #for sumabsErrors with no noisy X or Y
if(stopred == 1 ){ #if grad was small enough leave reduction iteration
break
}
AbsErrorNoiseX[reduction, simulation] <- sum(abs(t(predictionVectorNoise[simulation,reduction,])-Y[,simulation]))
if(as.integer(reduction/10)==(reduction/10)){
#print(reduction)
grad <- mean(gradient(AbsErrorNoiseX[(reduction-9):reduction])) #mean of gradient of error curve
#print(paste("grad:", grad)) #DEBUG
if(abs(grad) < 0.07){
nredNoiseX[simulation] <- reduction #save for checking how many reduction iterations were made
# print("stop") #DEBUG
stopred <- 1 #if curve of errors is flat enough don't do the reduction iteration again
}
}
}#end iteration reduction
stopred <- 0
for (reduction in 1:redSteps){ #for sumabsErrors with no noisy X or Y
if(stopred == 1 ){ #if grad was small enough leave reduction iteration
break
}
AbsErrorNoiseY[reduction ,simulation] <- sum(abs(t(predictionVector[simulation, reduction,])-Ynoise[,simulation]))
if(as.integer(reduction/10)==(reduction/10)){
#print(reduction)
grad <- mean(gradient(AbsErrorNoiseY[(reduction-9):reduction])) #mean of gradient of error curve
#print(paste("grad:", grad)) #DEBUG
if(abs(grad) < 0.07){
nredNoiseY[simulation] <- reduction #save for checking how many reduction iterations were made
# print("stop") #DEBUG
stopred <- 1 #if curve of errors is flat enough don't do the reduction iteration again
}
}
}#end iteration reduction
stopred <- 0
for (reduction in 1:redSteps){ #for sumabsErrors with no noisy X or Y
if(stopred == 1 ){ #if grad was small enough leave reduction iteration
break
}
AbsErrorNoiseXNoiseY[reduction ,simulation] <- sum(abs(t(predictionVectorNoise[simulation,,])-Ynoise[,simulation]))
if(as.integer(reduction/10)==(reduction/10)){
#print(reduction)
grad <- mean(gradient(AbsErrorNoiseXNoiseY[(reduction-9):reduction])) #mean of gradient of error curve
#print(paste("grad:", grad)) #DEBUG
if(abs(grad) < 0.01){
nredNoiseXNoiseY[simulation] <- reduction #save for checking how many reduction iterations were made
# print("stop") #DEBUG
stopred <- 1 #if curve of errors is flat enough don't do the reduction iteration again
}
}
}#end iteration reduction
#calculate number of iteration when AUC converges
stopred <- 0
for (reduction in 1:redSteps){
if(stopred == 1 ){ #if grad was small enough leave reduction iteration
break
}
sumAUC[reduction, simulation] <- auc(yBin[,simulation], predictionVectorClass[simulation, reduction, ])
if(as.integer(reduction/10)==(reduction/10)){
#print(reduction)
grad <- mean(gradient(sumAUC[(reduction-9):reduction])) #mean of gradient of error curve
#print(paste("grad:", grad)) #DEBUG
if(abs(grad) < 0.0001){
nredAUC[simulation] <- reduction #save for checking how many reduction iterations were made
# print("stop") #DEBUG
stopred <- 1 #if curve of errors is flat enough don't do the reduction iteration again
}
}
}#end reduction
stopred <- 0
for (reduction in 1:redSteps){
if(stopred == 1 ){ #if grad was small enough leave reduction iteration
break
}
sumAUCNoiseX[reduction, simulation] <- auc(yBin[,simulation], predictionVectorClassNoise[simulation, reduction, ])
if(as.integer(reduction/10)==(reduction/10)){
#print(reduction)
grad <- mean(gradient(sumAUCNoiseX[(reduction-9):reduction])) #mean of gradient of error curve
#print(paste("grad:", grad)) #DEBUG
if(abs(grad) < 0.0001){
nredAUCNoiseX[simulation] <- reduction #save for checking how many reduction iterations were made
# print("stop") #DEBUG
stopred <- 1 #if curve of errors is flat enough don't do the reduction iteration again
}
}
}#end reduction
stopred <- 0
for (reduction in 1:redSteps){
if(stopred == 1 ){ #if grad was small enough leave reduction iteration
break
}
sumAUCNoiseY[reduction, simulation] <- auc(yBinNoise[,simulation], predictionVectorClass[simulation, reduction, ])
if(as.integer(reduction/10)==(reduction/10)){
#print(reduction)
grad <- mean(gradient(sumAUCNoiseY[(reduction-9):reduction])) #mean of gradient of error curve
#print(paste("grad:", grad)) #DEBUG
if(abs(grad) < 0.0001){
nredAUCNoiseY[simulation] <- reduction #save for checking how many reduction iterations were made
# print("stop") #DEBUG
stopred <- 1 #if curve of errors is flat enough don't do the reduction iteration again
}
}
}#end reduction
stopred <- 0
for (reduction in 1:redSteps){
if(stopred == 1 ){ #if grad was small enough leave reduction iteration
break
}
sumAUCNoiseXNoiseY[reduction, simulation] <- auc(yBinNoise[,simulation], predictionVectorClassNoise[simulation, reduction, ])
if(as.integer(reduction/10)==(reduction/10)){
#print(reduction)
grad <- mean(gradient(sumAUCNoiseXNoiseY[(reduction-9):reduction])) #mean of gradient of error curve
#print(paste("grad:", grad)) #DEBUG
if(abs(grad) < 0.0001){
nredAUCNoiseXNoiseY[simulation] <- reduction #save for checking how many reduction iterations were made
# print("stop") #DEBUG
stopred <- 1 #if curve of errors is flat enough don't do the reduction iteration again
}
}
}#end reduction
}#end simulation
rbind(nred, nredNoiseX, nredNoiseY, nredNoiseXNoiseY, nredAUC, nredAUCNoiseX, nredAUCNoiseY, nredAUCNoiseXNoiseY) #save values as matrix to use them analyse function
}#end function
setwd("/naslx/projects/ua341/di49suy/signal-noise-ratio/mytest-files/jobs/08")
load("8-result.RData")
analyseSimulation(result)
|
69e77210610c1a76aeef766f5d70cea258352054
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/pcIRT/R/LRT.MPRM.R
|
3bc3eafd93db1fbc77e384e3730ae63a0a42764e
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,119
|
r
|
LRT.MPRM.R
|
#'@rdname lrt
#'@method LRT MPRM
#'@export
LRT.MPRM <-
function(object, splitcrit="score", ...){
if(is.character(splitcrit) && splitcrit == "score"){
sc <- rowSums(object$data)
scm <- ifelse(sc > median(sc), 1,0)
}
else{
if(!is.vector(splitcrit)){stop("Error: split criterium has to be a vector!", call. = FALSE)}
scm <- splitcrit
}
sp_dat <- split(as.data.frame(object$data), as.factor(scm), drop=FALSE)
sp_res <- lapply(sp_dat, function(dat) MPRM(dat, start=as.vector(object$itempar[-nrow(object$itempar), -ncol(object$data)])))
sp_val <- sapply(sp_res, function(ex) ex$logLikelihood)
sp_npar <- sapply(sp_res, function(ex) length(ex$estpar))
emp_Chi2 <- -2*(object$logLikelihood - sum(sp_val))
df <- sum(sp_npar) - length(object$estpar)
pval <- 1-pchisq(emp_Chi2, df)
itempar_split <- sapply(sp_res, function(re) list(re$itempar*(-1)))
itemse_split <- sapply(sp_res, function(re) list(re$itempar_se))
res_lrt <- list(emp_Chi2 = emp_Chi2, df=df, pval=pval, itempar=itempar_split, item_se=itemse_split)
class(res_lrt) <- "aLR"
res_lrt
}
|
a2f470ed1e23a9fe5801bf834bf350a537963957
|
a53b238211a2229b99941d5c9ced8b5dd42aa098
|
/man/getNOAAGuages.Rd
|
ef5127ca7476ea49609823f6b0eeca7e7d0bfa04
|
[
"CC0-1.0"
] |
permissive
|
JerryHMartin/waterDataSupport
|
ca2695806cebc91f674ca37937d47ccc3bd3af76
|
5260b296294252f01c711a538eaff53eb869eb0e
|
refs/heads/master
| 2023-01-06T21:32:31.788682
| 2023-01-05T15:47:11
| 2023-01-05T15:47:11
| 142,612,276
| 0
| 0
| null | 2020-07-14T18:27:11
| 2018-07-27T18:41:02
|
R
|
UTF-8
|
R
| false
| true
| 1,124
|
rd
|
getNOAAGuages.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getNOAAGuages.R
\name{getNOAAGuages}
\alias{getNOAAGuages}
\title{getNOAAGuages}
\usage{
getNOAAGuages(
siteID,
plotmap = TRUE,
zoomFactor = 10,
limitToWatershed = TRUE,
getElevations = TRUE,
leafletmap = NULL
)
}
\arguments{
\item{siteID}{the USGS site identified for stream flow analysis}
\item{plotmap}{TRUE if output plot is desired}
\item{zoomFactor}{the zoom factor of the output plot}
\item{limitToWatershed}{only pull datapoints on the watershed}
\item{getElevations}{retrieve elevations of datapoints}
\item{leafletmap}{pass a leaflet map arguements, note the output changes to a
list if this parameter is invoked}
}
\description{
This function retrieves information on the hydraullic unit code
given a USGS monitoring station ID.
}
\details{
Much of this code was drawn from this source.
http://cyberhelp.sesync.org/leaflet-in-R-lesson/course/archive.html
}
\examples{
getNOAAGuages("02131000")
}
\keyword{NOAA}
\keyword{USGS}
\keyword{mapping}
\keyword{precipitation}
|
dd13d140d5b064301632eebafa3c0c9a325476a0
|
9b9af459e4837a5d15c693c4ad0935be645a5667
|
/cachematrix.R
|
695acab0cca753d0014b3cf0fc6c3162d904804f
|
[] |
no_license
|
billcary/ProgrammingAssignment2
|
0a92f8edd021eb07b51b0c0ea1e5654076d6a51d
|
5b732476eb3ccd374589a536a3fc6f6cf0efdc79
|
refs/heads/master
| 2020-12-25T12:17:25.306531
| 2014-07-23T02:48:29
| 2014-07-23T02:48:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,966
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## This function creates a special "matrix" object
## that can cache its inverse. It provides methods
## to set and get the original matrix, as well as
## methods to set and get the inverse of the matrix.
makeCacheMatrix <- function(mtrx = matrix()) {
## set initial values and initialize caching of both
## the original matrix and the inverse.
## Establish the cached value of the original matrix
inv <- NULL
set <- function(y) {
mtrx <<- y
inv <<- NULL
}
get <- function() mtrx ## provide a get method that returns orig. matrix
setinv <- function(solve) inv <<- solve ## compute inverse using solve()
getinv <- function() inv ## provide method to return the inverse
## expose methods created above for use by calling functions
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of the special
## "matrix" returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
## if cached value for inv exists,
## then return the value from the cache
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
## if the cached value of the inverse does not
## exist, then get the original matrix from the cache
## and then calculate its inverse using solve()
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv) ## set the value of the inverse in the cache
inv ## return the inverse
}
|
712e001b85c89e27462e388a88a901c3095efc8f
|
728315d8c5d09e13c67641030b92d59c5e7e2222
|
/easy/split_the_number.r
|
7e146b753d1a8e2d4fbb2064783c2042a0709049
|
[
"MIT"
] |
permissive
|
shortthirdman/code-eval-challenges
|
88ea93c0e9385b2a0db95a05b1f3f753c900a62d
|
cf2197927830326539399fdd3e16c9b8a4468f7d
|
refs/heads/master
| 2023-03-06T19:34:44.607154
| 2023-02-26T13:30:56
| 2023-02-26T13:30:56
| 92,970,178
| 4
| 0
|
MIT
| 2023-02-26T13:30:57
| 2017-05-31T17:14:59
|
Go
|
UTF-8
|
R
| false
| false
| 443
|
r
|
split_the_number.r
|
splitnum <- function(s) {
r <- 0
v <- 0
o <- 1
d <- 1
for (i in 1:nchar(s[2])) {
c <- substr(s[2], i, i)
if (c == '+') {
r <- r + v*o
o <- 1
v <- 0
} else if (c == '-') {
r <- r + v*o
o <- -1
v <- 0
} else {
v <- v*10 + as.integer(substr(s[1], d, d))
d <- d + 1
}
}
r + v*o
}
cat(sapply(strsplit(readLines(tail(commandArgs(), n=1)), " "), splitnum), sep="\n")
|
23f6b1accf2bd39f23e761114905feb69973c6da
|
2c6bdee82bc3df0a9ddf65e55e2dc4019bd22521
|
/SimpleRandom/server.R
|
ee1895c814979b1a4d5993c6d3528b242d3892ec
|
[] |
no_license
|
homerhanumat/shinyGC
|
0eb1b55bcc8373385ea1091c36ccc1d577dc72fb
|
dc580961877af2459db8a69f09d539f37fd6e2ee
|
refs/heads/master
| 2021-07-11T18:20:53.862578
| 2021-06-21T19:18:32
| 2021-06-21T19:18:32
| 37,830,825
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,490
|
r
|
server.R
|
library(shiny, quietly = TRUE)
library(scales, quietly = TRUE)
library(dplyr, quietly = TRUE)
library(ggplot2, quietly = TRUE)
library(tigerstats, quietly=TRUE)
function(input, output) {
rv <- reactiveValues(
newVar = TRUE,
variable = "income",
factor = FALSE,
psData = NULL,
yMax = NULL
)
observeEvent(input$variable,
{
rv$variable <- input$variable
var <- imagpop[,rv$variable]
if (is.factor(var)) {
rv$factor <- TRUE
} else {
rv$factor <- FALSE
den <- density(var)
rv$yMax <- 1.5 * max(den$y)
}
})
observeEvent(input$sample,
{
rv$newVar <- FALSE
pop <- imagpop[,rv$variable]
samp <- sample(pop, size = input$n, replace = FALSE)
popDF <- data.frame(values = pop, type = rep("Population",length(pop)))
sampDF <- data.frame(values = samp, type = rep("Sample",input$n))
rv$psData <- rbind(popDF, sampDF)
})
observeEvent(input$reset,
{
rv$newVar <- TRUE
rv$variable <- "income"
rv$factor <- FALSE
rv$psData <- NULL
})
output$newVar <- reactive({
rv$newVar
})
outputOptions(output, "newVar", suspendWhenHidden = FALSE)
output$initialPlot <- renderPlot({
validate(
need(4 <= input$n && input$n <= 1000,
message = "Sample size must be between 4 and 1000.")
)
if ( ! rv$factor ) {
ggplot(data = imagpop, aes_string(x = rv$variable), alpha = 0.5) +
geom_density(fill = "red") +
scale_y_continuous(limits = c(0,rv$yMax)) +
labs(title = "Density Plot of the Population")
} else {
ggplot(data = imagpop, aes_string(x = rv$variable)) +
geom_bar(aes(y = ..count../sum(..count..)),fill = "red") +
scale_y_continuous(labels = percent) +
labs(title = "Bar Graph of the Population",
y = "Percent")
}
})
output$initialTable <- renderTable({
validate(
need(4 <= input$n && input$n <= 1000,
message = "Sample size must be between 4 and 1000.")
)
form <- as.formula(paste0("~",rv$variable))
if ( ! rv$factor ) {
favstats(form, data = imagpop)[1:8]
} else {
tab <- (rowPerc(xtabs(form, data = imagpop)))
rownames(tab) <- "Population Percentages"
tab
}
})
output$plot <- renderPlot({
validate(
need(4 <= input$n && input$n <= 1000,
message = "Sample size must be between 4 and 1000.")
)
if ( ! rv$newVar ) {
if ( ! rv$factor ) {
dfSamp <- subset(rv$psData, type == "Sample")
ggplot(data = rv$psData, aes(x = values, fill = type)) +
geom_density(alpha = 0.5) +
geom_rug(data = dfSamp, aes(x = values)) +
labs(title = "Density Plot of the Population, with Sample",
x = rv$variable) +
scale_y_continuous(limits = c(0, rv$yMax)) +
guides(fill = guide_legend(title = "Plot for:"))
} else {
df <- rv$psData %>% group_by(type, values) %>%
summarize(n = n()) %>%
mutate(relFreq = n/sum(n)) %>%
mutate(pos = 0.5 * relFreq) %>%
mutate(label = paste0(sprintf("%.1f", relFreq*100), "%"))
ggplot(data = df, aes(x = values, fill = type)) +
geom_bar(aes(y = relFreq), stat = "identity", position = "dodge") +
scale_y_continuous(labels = percent) +
geom_text(aes(x = values, y = pos, label = label, ymax = relFreq),
position = position_dodge(width = 1),size = 5) +
labs(title = "Density Plot of the Population, with Sample",
x = rv$variable) +
guides(fill = guide_legend(title = "Bars for:"))
}
}
})
output$table <- renderTable({
validate(
need(4 <= input$n && input$n <= 1000,
message = "Sample size must be between 4 and 1000.")
)
if (! is.null(rv$psData) ) {
df2 <- rv$psData
names(df2)[1] <- rv$variable
if ( ! rv$factor ) {
form <- as.formula(paste0(rv$variable," ~ type"))
favstats(form, data = df2)[1:8]
} else {
form <- as.formula(paste0("~ type + ",rv$variable))
tab <- rowPerc(xtabs(form, data = df2))
tab
}
}
})
}
|
d06411bcbdb200ceafe733963a6262a32b3f397e
|
b84d89b3f67fbd57e2d41f42c23c1f82fe7ba9fd
|
/R/fitted.TEfitAll.R
|
bc09bd16ba24957b992f9d769deea9fd31dbb1f2
|
[
"MIT"
] |
permissive
|
akcochrane/TEfits
|
04305849bd8393c9e816312085a228ccdbd621e3
|
e11b07b2d9fed9eb6e8221c8cfdd86b9e287180e
|
refs/heads/master
| 2023-06-08T00:04:21.025346
| 2023-06-07T22:20:11
| 2023-06-07T22:20:11
| 225,967,950
| 1
| 0
|
MIT
| 2023-06-07T22:20:13
| 2019-12-04T22:22:41
|
HTML
|
UTF-8
|
R
| false
| false
| 1,163
|
r
|
fitted.TEfitAll.R
|
#' Get fitted values and summary statistics from a set of TEfit models
#'
#' @param TEs3s A set of models fit by TEfitAll()
#'
#' @method fitted TEfitAll
#' @export
#'
#' @examples
#' \dontrun{
#' m <- TEfitAll(anstrain[,c('acc','trialNum')],groupingVar = anstrain$subID)
#' fitted_data <- fitted(m)
#' plot(fitted_data$meanPred)
#' }
#'
fitted.TEfitAll <- function(TEs3s){
# loop through the fits, get the predVals, and calculate the mean/SE
allPreds <- matrix(NA,length(TEs3s$allFitList),nrow(TEs3s$allFitList[[1]]$data)*10) ## make it plenty big for varieties of sizes
maxTime <- 0
for (curGroup in 1:length(TEs3s$allFitList)){
curFit <- TEs3s$allFitList[[curGroup]]
allPreds[curGroup,1:nrow(curFit$data)] <- curFit$model$fitVals
if(maxTime<max(curFit$data[,2],na.rm=T)){maxTime <- max(curFit$data[,2],na.rm=T)}
}
allPreds <- allPreds[,apply(allPreds,2,function(x) !all(is.na(x)))]## trim the NAs
meanPred <- apply(allPreds,2,mean,na.rm=T)
ciPred <- qnorm(.975)*(apply(allPreds,2,sd,na.rm=T)/sqrt(apply(allPreds,2,function(x) sum(!is.na(x)))))
return(list(allPreds=allPreds,meanPred=meanPred,ciPred=ciPred,maxTime=maxTime))
}
|
3b17042127610e4c1a4b8711b1428ff3edd46a49
|
63e5fc70d2e6233457fc9ad407d7e4984bfc8997
|
/man/plot_hisafe_voxels.Rd
|
9719889d54e242ab28a4c3070521efc8d412f520
|
[] |
no_license
|
Boffiro/hisafer
|
32f648f6aca222d01006d25da1b237846b13113e
|
8773fe3d5d2aa6d307af0088a6f6e79cc9a087d0
|
refs/heads/master
| 2023-05-06T17:53:22.060974
| 2020-10-16T09:48:48
| 2020-10-16T09:48:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,829
|
rd
|
plot_hisafe_voxels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_hisafe_voxels}
\alias{plot_hisafe_voxels}
\title{Tile plot of Hi-sAFe voxels output variable}
\usage{
plot_hisafe_voxels(
hop,
variable,
date.min = NA,
date.max = NA,
simu.names = "all",
X = NA,
Y = NA,
Z = NA,
summarize.by = "z",
facet.simu = TRUE,
facet.z = FALSE,
vline.dates = NA,
plot = TRUE
)
}
\arguments{
\item{hop}{An object of class hop.}
\item{variable}{A character string of the name of the variable to plot.}
\item{date.min}{A character vector containing the minimum date (yyyy-mm-dd) to include.
If \code{NA}, the default, than the minimum value in the voxels profile is used}
\item{date.max}{A character vector containing the minimum date (yyyy-mm-dd) to include.
If \code{NA}, the default, than the maximum value in the voxels profile is used}
\item{simu.names}{A character string containing the SimulationNames to include. Use "all" to include all available values.}
\item{X}{A numeric vector of the x values of the voxels to include. If \code{NA}, the default, then all x values are used.}
\item{Y}{A numeric vector of the y values of the voxels to include. If \code{NA}, the default, then all y values are used.}
\item{Z}{A numeric vector of the z values of the voxels to include. If \code{NA}, the default, then all z values are used.}
\item{summarize.by}{One of 'x', 'y', or 'z', indicating an axis over which to average voxel values.
If \code{NA}, the default, then no averaging is done and each voxel is plotted as its own line.}
\item{vline.dates}{A character vector of dates (yyyy-mm-dd) at which to plot dashed vertical reference lines.
If \code{NA}, the default, then no reference lines are drawn.}
\item{plot}{If \code{TRUE}, the default, a ggplot object is returned. If \code{FALSE}, the data that would create the plot is returned.}
}
\value{
Returns ggplot object.
}
\description{
Plots a tile plot of a single Hi-sAFe voxels output variable.
If a single date is provided, SimulationName is used as the column facet. Otherwise, Date is used as the column facet.
}
\examples{
\dontrun{
# After reading in Hi-sAFe simulation data via:
mydata <- read_hisafe(path = "./")
# You can create a tile plot of waterAvailable:
tile.plot <- plot_hisafe_voxels(mydata, "waterAvailable", paste(1998, 6:8, 1, sep = "-"))
# Once you have the plot object, you can display it and save it:
tile.plot
ggsave_fitmax("tiled_waterAvailable.png", tile.plot)
}
}
\seealso{
Other hisafe plot functions:
\code{\link{plot_hisafe_annualcells}()},
\code{\link{plot_hisafe_bg}()},
\code{\link{plot_hisafe_cells}()},
\code{\link{plot_hisafe_monthcells}()},
\code{\link{plot_hisafe_scene}()},
\code{\link{plot_hisafe_tstress}()},
\code{\link{plot_hisafe_ts}()}
}
\concept{hisafe plot functions}
|
bb6107d4f95c04c9a2d6dc2b99b5c04e323f52c1
|
dd23f7848ee431d060d56af188a88d0dedaa0522
|
/3-exploratory-data-analysis/week4/plot1.R
|
bc82a7969d1c4a442ef4f0033e0cd8be775a1b76
|
[] |
no_license
|
ryancey1/datascience-coursera
|
3d0b33d57dbb738399cfe09aba41230ea065318e
|
d71ae50686a3e00185bcf1debcb3e415692c58ff
|
refs/heads/main
| 2023-03-11T07:51:27.275516
| 2021-02-20T23:14:54
| 2021-02-20T23:14:54
| 323,975,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,346
|
r
|
plot1.R
|
# plot1.R -----------------------------------------------------------------
# 1. Have total emissions from PM2.5 decreased in the United States from 1999 to
# 2008? Using the `base` plotting system, make a plot showing the total PM2.5
# emission from all sources for each of the years 1999, 2002, 2005, and 2008.
# HOUSEKEEPING ------------------------------------------------------------
## load libraries
library(dplyr)
library(ggplot2)
## set working directory
if (!grepl("3-exploratory-data-analysis/week4", getwd(), fixed = TRUE)) {
setwd("3-exploratory-data-analysis/week4")
}
## create directories
if (!dir.exists("data")) {
dir.create("data")
}
if (!dir.exists("plots")) {
dir.create("plots")
}
## download & extract RDS files if not already done
if (!file.exists("exdata_data_NEI_data.zip")) {
zip = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
file = "exdata_data_NEI_data.zip"
download.file(url = zip, destfile = file)
}
if (!file.exists("data/Source_Classification_Code.rds") &
!file.exists("data/summarySCC_PM25.rds")) {
file = "exdata_data_NEI_data.zip"
unzip(zipfile = file, exdir = "data/")
}
# read in RDS files
if (!any(ls() == "NEI"))
NEI <- readRDS(file = "data/summarySCC_PM25.rds")
if (!any(ls() == "SCC"))
SCC <- readRDS(file = "data/Source_Classification_Code.rds")
# PLOT --------------------------------------------------------------------
# prepare data frame and variables for plot
plot <- NEI %>%
select(Emissions, year) %>%
group_by(year) %>%
summarize(across(.fns = sum), .groups = "keep")
colors = c("red", "blue", "purple", "green")
lm <- lm(Emissions ~ year, data = plot)
# plot to PNG
png("plots/plot1.png")
bar <- barplot(
Emissions ~ year,
data = plot,
col = colors,
ylab = "PM2.5 Emissions (tons)",
xlab = "",
main = "Total PM2.5 Emissions in the United States\n(1999 - 2008)"
)
lines(
x = bar,
y = lm$fitted.values,
lty = 3,
lwd = 2,
col = "black"
)
legend(
"topright",
legend = "Linear Regression",
lty = 3,
lwd = 2,
col = "black",
cex = 0.7,
bty = "o"
)
text(
x = bar,
y = plot$Emissions - 1.5e6,
labels = as.integer(plot$Emissions),
cex = 0.8
)
dev.off()
## clean up working environment
rm(list = ls(pattern = "[^NEI|^SCC]"))
|
367d37293be18766da9d706edbfa86df875adcf6
|
5500882cda0a3b4af35d647acc4c36e3d8e5ba18
|
/hw3/hw3_code.R
|
7682c6ced0397cfc0ee3512c22b063331074fbec
|
[] |
no_license
|
pinesol/texas
|
85b1fe1f5ffed386eb1fe3053d9649ca1b9ef442
|
cee029dd32bc21689f8d40063bffc7f1f3690efa
|
refs/heads/master
| 2021-01-21T13:33:39.735392
| 2016-05-11T02:07:06
| 2016-05-11T02:07:06
| 52,619,333
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,004
|
r
|
hw3_code.R
|
#install.packages("lda")
#install.packages("topicmodels")
#install.packages("stm")
#install.packages("LDAvis")
# TODO save worksave so you can load it in the markdown file
require(quanteda, warn.conflicts = FALSE, quietly = TRUE)
library(topicmodels)
data(immigNewsCorpus, package = "quantedaData")
# Get 4 most common newspapers
topPapers <- sort(table(immigNewsCorpus[["paperName"]]), decreasing = TRUE)
reducedCorpus <- subset(immigNewsCorpus, paperName %in% names(topPapers)[1:4])
# Creates custom_stopwords vector
load('~/texas/hw3/custom_stopwords.RData')
news_dfm <- dfm(reducedCorpus, ignoredFeatures = custom_stopwords)
news_dfm <- trim(news_dfm, minCount=30, minDoc=20)
NUM_TOPICS <- 30
news_lda <- LDA(news_dfm, NUM_TOPICS, method='Gibbs', control=list(seed=2, burnin=100, thin=10, iter=1000))
get_terms(news_lda, 10)
# Topic 1 Topic 2 Topic 3 Topic 4 Topic 5
# "war" "ukip" "eu" "family" "english"
# "world" "farage" "european" "life" "language"
# "prince" "party" "europe" "back" "care"
# "british" "racist" "britain" "father" "home"
# "history" "leader" "cameron" "mother" "health"
# "charles" "nigel" "referendum" "wife" "test"
# "royal" "campaign" "union" "years" "speak"
# "tour" "comments" "british" "day" "nhs"
# "time" "european" "merkel" "home" "years"
# "great" "candidate" "countries" "couple" "british"
# Topic 1: British Royalty
# Topic 2: UK Independence Party
# Topic 3: Brexit
# Topic 4: Family
# Topic 5: Domestic Policy
# Problem 1f
##Store the results of the distribution of topics over documents
# subset the dfm to guardian and daily mail
papers <- reducedCorpus$documents$paperName
# function that finds the second max
which.max2 <- function(x) {
max(which(x == sort(x, partial=(NUM_TOPICS-1))[NUM_TOPICS-1]))
}
guardian_topic_dist <- news_lda@gamma[which(papers == 'guardian'),]
top_guardian_topics <- apply(guardian_topic_dist, 1, which.max)
second_top_guardian_topics <- apply(guardian_topic_dist, 1, which.max2)
# TODO use paper names instead of dates
guardian_topics <- data.frame(first=top_guardian_topics, second=second_top_guardian_topics)
mail_topic_dist <- news_lda@gamma[which(papers == 'mail'),]
top_mail_topics <- apply(mail_topic_dist, 1, which.max)
second_top_mail_topics <- apply(mail_topic_dist, 1, which.max2)
mail_topics <- data.frame(first=top_mail_topics, second=second_top_mail_topics)
gmail_topics <- rbind(guardian_topics, mail_topics)
gmail_topics$paper <- rep(c('guardian', "mail"), times=c(nrow(guardian_topics), nrow(mail_topics)))
# plot
z <- ggplot(gmail_topics, aes(x=1:nrow(gmail_topics), y=first, pch="First", color=paper)) +
geom_point()
z + geom_point(aes(x=1:nrow(gmail_topics), y=second, pch="Second", color=paper)) +
theme_bw() + xlab(NULL)+ ylab("Topic Number") +
theme(axis.ticks = element_blank(), axis.text.x = element_blank()) +
labs(pch="Topic Order", color='Paper') + ggtitle("Paper Topics")
# Problem 1g
# Average contribution by a topic to a newspaper.
# docs by topics matrix. each row is a histogram over the topics.
first5_lda <- news_lda@gamma[,1:5]
avgTopicProportions <- function(paper) {
papers <- reducedCorpus$documents$paperName
paper_first5_lda <- first5_lda[which(papers == paper),]
ave_topic_proportions <- colSums(paper_first5_lda) / length(papers)
names(ave_topic_proportions) <- c('British Royalty', 'UKIP', 'Brexit', 'Family', 'Domestic Policy')
return(ave_topic_proportions)
}
avgTopicProportions('guardian')
# British Royalty UKIP Brexit Family Domestic Policy
# 0.006119630 0.010324730 0.008053529 0.008207289 0.006613840
# liberal paper, UKIP largest, Royalty lowest
avgTopicProportions('mail')
# British Royalty UKIP Brexit Family Domestic Policy
# 0.007216055 0.011378225 0.007308273 0.012038282 0.008992935
# conservative paper, Family largest, Royalty lowest
avgTopicProportions('telegraph')
# British Royalty UKIP Brexit Family Domestic Policy
# 0.008811142 0.012429343 0.011735386 0.008636098 0.007014645
# UKIP Largest, Domestic Policy Lowest
avgTopicProportions('times')
# British Royalty UKIP Brexit Family Domestic Policy
# 0.005364085 0.009009731 0.008006753 0.007744099 0.005294511
# UKIP Largest, Domestic Policy Lowest
# TODO problem 1h
install.packages("LDAvis")
library(LDAvis)
# TODO the example given in topicmodelExamples2.R uses the lda library, not the topic models library
# could you give us an example of how to use the LDAvis library with the topicsmodels library?
#news_lda
jsonLDA <- createJSON(phi=exp(news_lda@beta), theta=news_lda@gamma,
doc.length=ntoken(news_dfm), vocab=news_lda@terms,
term.frequency=colSums(news_dfm))
#install.packages('servr')
#serVis(jsonLDA, out.dir = "visCollLDA", open.browser = TRUE)
serVis(jsonLDA, open.browser = TRUE)
# TODO require(png)
# TODO grid.raster(readPNG())
# Using this library is not obvious at all
# example: http://cpsievert.github.io/LDAvis/reviews/reviews.html
# Problem 2
# Problem 2 Topic Stability
news_lda_2 <- LDA(news_dfm, NUM_TOPICS, method='Gibbs', control=list(seed=3, burnin=100, thin=10, iter=1000))
# Problem 2b
library(lsa) # For the cosine function
closestTopicMap <- function(lda_1, lda_2) {
# take the distribution of words for each topic from the first model, and exponentiate it to get probabilities.
words_dist_1 <- exp(lda_1@beta)
words_dist_2 <- exp(lda_2@beta)
# given a single topics's word distribution, find all topic from the second lda that most closely matches it.
closestTopic <- function(words_dist_row) {
# bind the first arg of cosine similarity to the input word distribution.
cosine_fn <- function(y) {
cosine(words_dist_row, y)
}
# Get the cosine similarity to each, and return the index of the maximum element.
which.max(apply(words_dist_2, 1, cosine_fn))
}
apply(words_dist_1, 1, closestTopic)
}
lda_2_closest_topics <- closestTopicMap(news_lda_2, news_lda)
for (i in 1:length(lda_2_closest_topics)) {
print(paste('Topic', i, '->', lda_2_closest_topics[i]))
}
# Topic 1 -> 19
# Topic 2 -> 18
# Topic 3 -> 12
# Topic 4 -> 10
# Topic 5 -> 15
# Topic 6 -> 11
# Topic 7 -> 4
# Topic 8 -> 2
# Topic 9 -> 25
# Topic 10 -> 1
# Topic 11 -> 9
# Topic 12 -> 21
# Topic 13 -> 7
# Topic 14 -> 20
# Topic 15 -> 17
# Topic 16 -> 9
# Topic 17 -> 25
# Topic 18 -> 29
# Topic 19 -> 4
# Topic 20 -> 22
# Topic 21 -> 8
# Topic 22 -> 23
# Topic 23 -> 26
# Topic 24 -> 5
# Topic 25 -> 28
# Topic 26 -> 27
# Topic 27 -> 16
# Topic 28 -> 3
# Topic 29 -> 13
# Topic 30 -> 30
# Problem 2c
# closest_topics maps topics from lda_1 to the most similar topics in lda_2
avg_terms_in_common <- function(lda_1, lda_2, closest_topics) {
num_shared <- sapply(1:length(closest_topics),
function(i) {
length(intersect(terms(lda_1, 10)[,i],
terms(lda_2, 10)[,closest_topics[i]])) })
mean(num_shared)
}
avg_terms_in_common(news_lda_2, news_lda, lda_2_closest_topics)
# returns 6.666667
# Problem 2d
small_lda_1 <- LDA(news_dfm, 10, method='Gibbs', control=list(seed=4, burnin=100, thin=10, iter=1000))
small_lda_2 <- LDA(news_dfm, 10, method='Gibbs', control=list(seed=5, burnin=100, thin=10, iter=1000))
small_closest_topics <- closestTopicMap(small_lda_1, small_lda_2)
avg_terms_in_common(small_lda_1, small_lda_2, small_closest_topics)
# Returns 5.6
# 10 topics are less stable than 30
# Problem 3
library(stm)
install.packages("Rtsne")
install.packages("geometry")
# subset the dfm to guardian and daily mail
mailGuardCorpus <- subset(immigNewsCorpus, paperName %in% c('mail', 'guardian'))
paper <- mailGuardCorpus$documents$paperName
text <- mailGuardCorpus$documents$texts
# TODO chose 2015 arbitrarily.
#date <- as.Date(as.numeric(mailGuardCorpus$documents$day) - 1, origin = "2014-01-01") # TODO numeric?
days <- as.numeric(mailGuardCorpus$documents$day)
mailGuard.df <- data.frame(paper, text, days)#TODO, date)
processed_corpus <- textProcessor(mailGuard.df$text, metadata=mailGuard.df,
customstopwords=custom_stopwords)
##remove some words for speed purposes
# TODO wtf
# TODO TODO ERROR
# "Error in prepDocuments(processed$documents, processed_corpus$vocab, processed_corpus$meta, :
# Your documents object has more unique features than your vocabulary file has entries."
out_25 <- prepDocuments(processed_corpus$documents, processed_corpus$vocab, processed_corpus$meta, lower.thresh=25)
# TODO wtf is the "spline of the date variable"? I'm ignoring it and just using the index.
# NOTE reduce max EM iterations to 5 from 25 to make it faster.
fitSpec25 <- stm(out_25$documents, out_25$vocab, K=0, init.type="Spectral",
content=~paper, prevalence = ~paper + days, #smooth.spline(date), # TODO maybe just use numeric vector instead of date
max.em.its=30, data=out_25$meta, seed=5926696)
# Top 5 topics
labelTopics(fitSpec25, 1:5)
# Topic Words:
# Topic 1: church, sex, christian, sexual, oppos, savil, spring
# Topic 2: beauti, pop, cloth, cook, hate, knew, writer
# Topic 3: birmingham, inspector, inquiri, gay, knew, friday, discuss
# Topic 4: bnp, farag, ukip, confer, euro, extremist, nigel
# Topic 5: bbcs, savil, leftw, broadcast, corpor, bbc, fee
# Covariate Words:
# Group guardian: grdn, newspaperid, caption, page, overwhelm, form, address
# Group mail: newspapermailid, damonl, daim, newspap, mail, onlin, spark
# Topic-Covariate Interactions:
# Topic 1, Group guardian: anim, muslim, islam, religion, food, scare, recommend
# Topic 1, Group mail: novemb, stress, briton, wintour, survey, betray, minimum
# Topic 2, Group guardian: itali, sea, island, euro, rent, northern, coupl
# Topic 2, Group mail: water, sea, murder, rape, road, wealthi, network
# Topic 3, Group guardian: homosexu, asian, citizenship, teacher, guest, islam, facebook
# Topic 3, Group mail: rowena, mason, brown, gordon, field, stanc, backbench
# Topic 4, Group guardian: manifesto, surg, legitim, revolt, euroscept, poll, referendum
# Topic 4, Group mail: ship, engin, human, eye, dream, smile, artist
# Topic 5, Group guardian: terribl, corrupt, academ, ian, style, salari, bloodi
# Topic 5, Group mail: offend, drug, releas, search, illeg, oper, approv
# Names for the topics:
#
# 1: Christian sex
# 2: Lifestyle
# 3: Birmingham investigation?
# 4: Right-wing politics
# 5: Public broadcasting
topic_names <- c('Christian sex', 'Lifestyle', 'Birmingham investigation',
'Right-wing politics', 'Public broadcasting')
# TODO in the recitation code
# TODO use labelTopics?
# TODO estimateEffect method=difference
# TODO use verbose_labels = F, label_type=custom, custom.albels = blah
# visualization
# TODO use estimateEffect method=continutuous
##change data types
out_25$meta$paper<-as.factor(out_25$meta$paper)
##pick specifcation
prep<-estimateEffect(topic_names ~ paper , fitSpec25, meta=out_25$meta)
##plot effects
plot.estimateEffect(prep, covariate="paper", topics=1:5, model=out_25, method="difference",
cov.value1 = "guardian", cov.value2 = "mail",
xlab = "More Guardian......More Mail", xlim=c(-.1, .1),
labeltype='custom', verboseLabels=F, custom.labels=topic_names)
##pick specifcation--over time
prep <- estimateEffect(1:5 ~ s(days) , fitSpec25, meta=out_25$meta)
##plot effects
plot.estimateEffect(prep, covariate="days", topics=1:5, model=out_25, method="continuous",
labeltype='custom', verboseLabels=F, custom.labels=topic_names)
##### Problem 4
sotu_dfm <- dfm(subset(inaugCorpus, Year>1900))
nixon_index <- which(sotu_dfm@Dimnames$docs == "1969-Nixon")
obama_index <- which(sotu_dfm@Dimnames$docs == "2009-Obama")
# are these indices correct?
df_fit <- textmodel_wordfish(sotu_dfm, c(obama_index, nixon_index))
# get index of left and rightmost things in df_fit@theta, then get their names
# @theta: scores of each doc
# Leftmost
sotu_dfm@Dimnames$docs[which.min(df_fit@theta)]
# "1993-Clinton"
# Rightmost
sotu_dfm@Dimnames$docs[which.max(df_fit@theta)]
# "1909-Taft"
# fascism
df_fit@beta[which(df_fit@features == 'fascism')]
# -4.291355
# Left????
#df_fit@features: words
#df_fit@beta: scores for words. how much it discriminates on left v right. negative is left.
# psi: fixed effects: how frequent or "stop wordy" a word is.
##most important features--word fixed effects
##guitar plot
# plotting distriminating effect (beta) vs fixed effect (psi)
words<-df_fit@psi
names(words) <- df_fit@features
sort(words)[1:50]
sort(words, decreasing=T)[1:50]
weights<-df_fit@beta
plot(weights, words)
|
4ebcac1b25a286f51c94e1b1bf884617eedb3d4e
|
3682cbe55c4d8f6f8ec1cbef4f40d7f9d20734cb
|
/shinyAppProject1/server.R
|
0d1260a7b52716220ad44638b1fd8c21175691f9
|
[] |
no_license
|
jeremiahkramer/WWTSLEDWestInternProject
|
f30c0d1d957733401a30c5856360e28e9d4fba16
|
0937f834196e81f17e9b671114ec5556e0dc70ec
|
refs/heads/master
| 2020-06-22T06:15:11.064774
| 2019-08-29T20:52:01
| 2019-08-29T20:52:01
| 197,654,888
| 0
| 1
| null | 2019-08-05T19:21:28
| 2019-07-18T20:49:31
|
Python
|
UTF-8
|
R
| false
| false
| 2,076
|
r
|
server.R
|
library(shiny)
library(ggplot2)
library(DT)
function(input, output) {
#create pie chart output
output$chart1 <- renderPlotly({
#handle if all P&L selected
if(input$`P&L1` == "All" && input$AM == "none"){
#prepare dataframe for plot_ly
chart_data <- data.frame(
"Category" = unique(data$`P&L`),
#add up GP by P&L
GP = aggregate(as.numeric(data$GP), by = list(data$`P&L`),
FUN = sum)[,2]
)
}
#handle if a P&L is selected with no AM
if(input$`P&L1` != "All" && input$AM == "none"){
chart_data <- data[data$`P&L` == input$`P&L1`,] #get data for specific P&L
#prepare dataframe for plot_ly
chart_data <- data.frame(
"Category" = unique(chart_data$`External Rep`),
#add GP by AM
GP = aggregate(as.numeric(chart_data$GP), by = list(chart_data$`External Rep`),
FUN = sum)[,2]
)
}
#handle if an AM is selected
if(input$AM != "none"){
#handle if specific AM selected
if(input$AM != "All"){
chart_data <- data[data$`External Rep` == input$AM, ] #get data for specific AM
}
#prepare dataframe for plot_ly
chart_data <- data.frame(
"Category" = unique(chart_data$`Ship to Customer`),
#add GP grouped by customer
GP = aggregate(as.numeric(chart_data$GP), by = list(chart_data$`Ship to Customer`),
FUN = sum)[,2]
)
}
plot_ly(chart_data, labels = ~Category, values = ~GP, type = 'pie')
})
#possible second chart
output$chart2 <- renderPlotly({
})
#create AM selection which depends on P&L selection
output$secondSelection <- renderUI({
if(input$`P&L1` != "All"){
chart_data <- data[data$`P&L` == input$`P&L1`,]
}
selectizeInput("AM", "AM", c("none","All", unique(as.character((chart_data$`External Rep`)))))
})
#create order option selection which will depend on AM selection
output$thirdSelection <- renderUI({
selectizeInput("order_options", label = "Order Options", c("none", "% GP", "Status"))
})
}
|
1295c5a4acbcef98e05464ebaf7f96ff35d597a1
|
e077f629946716c73ff9a53d82e3c8006c6dec2b
|
/man/neuromorpho_field_entries.Rd
|
f51879a9974f2bb0207e06b2867f2fb09e351931
|
[] |
no_license
|
natverse/neuromorphr
|
8c8a006b51b560ed686ff6d4513dda267cefe55e
|
7f23e7861bcdea1206d3d43d43bd2f08667f69b9
|
refs/heads/master
| 2023-05-03T19:42:50.918806
| 2023-04-19T00:23:54
| 2023-04-19T00:23:54
| 184,127,352
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,116
|
rd
|
neuromorpho_field_entries.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search.R
\name{neuromorpho_field_entries}
\alias{neuromorpho_field_entries}
\title{Return the available meta data entries for neuromorpho neuron fields}
\usage{
neuromorpho_field_entries(
field = "species",
neuromorpho_url = "http://neuromorpho.org",
...
)
}
\arguments{
\item{field}{a valid neuron field, as returned by \code{neuromorpho_fields}}
\item{neuromorpho_url}{the base URL for querying the neuromorpho database, defaults to \url{http://neuromorpho.org}}
\item{...}{methods passed to \code{neuromorpho_async_req}, or in some cases, \code{neuromorphr:::neuromorpho_fetch}}
}
\description{
Returns a list of values present in the repository for the neuron field requested.
These values can be used in the search criteria section of the custom queries.
}
\details{
All the data fields, and their entries, can be seen and explored on neuromorpho.org
at \url{http://neuromorpho.org/MetaData.jsp}.
}
\seealso{
\code{\link{neuromorpho_neurons_info}},
\code{\link{neuromorpho_read_neurons}},
\code{\link{neuromorpho_fields}}
}
|
3f6e5442fbbbd09caf0fe93e2c86b38b58fbaca3
|
0abc4ff64360255655b9f304716ad8122e18685a
|
/R/models/LinearRegression.R
|
4c6b727cfa9e30d6124467d9b66344bdf0aaa940
|
[] |
no_license
|
rodrigoqaz/tcc-mba-fatec
|
01ceadded6c8e794fcc600779d3ecec087d1402e
|
df37f28526b8218e05b04d2ff55244a319059e92
|
refs/heads/master
| 2023-01-14T14:20:17.984056
| 2020-11-19T13:25:00
| 2020-11-19T13:25:00
| 297,507,152
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 764
|
r
|
LinearRegression.R
|
# Linear Regression:
library(tidyverse)
library(tidymodels)
LinearRegression <- function(train, test) {
set.seed(4242)
# Cria a receita:
recipe <- train %>%
recipe(Y~.) %>%
update_role(day, new_role = "id variable") %>%
step_corr(all_predictors()) %>%
step_center(all_predictors(), -all_outcomes()) %>%
step_scale(all_predictors(), -all_outcomes()) %>%
prep()
# Trata as bases de treino e teste:
train <- recipe %>% bake(train)
test <- recipe %>% bake(test)
# Modelo de random forest
lm <- linear_reg(penalty = 0.001, mixture = 0.5) %>%
set_engine("glmnet") %>%
fit(Y ~., data = train)
# Gera as previsões
predictions <- lm %>%
predict(test) %>%
pull()
return(predictions)
}
|
937e9b0f856bec1cfd644d6cdde19a737fd5c3da
|
577cecbf31ea2ea0e82851d6f1555853e3f4f9f1
|
/Fig4B_plot_insulation_Sox9.R
|
3c7c8081f6c358cf496a344d86058606128d1a97
|
[] |
no_license
|
bianlab-hub/Chen_Sci_Adv_2022
|
a5972fc6e5b0e6711d739b74e51ded7b9c0125de
|
2072fc1e81a6d576de42de4e2d047d7b7e391b72
|
refs/heads/main
| 2023-04-13T00:27:23.818781
| 2023-02-28T10:18:36
| 2023-02-28T10:18:36
| 547,825,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,074
|
r
|
Fig4B_plot_insulation_Sox9.R
|
library(dplyr)
library(ggplot2)
MC_insulation <- read.csv('MC_HiC_combined_50kb_insulation', sep='\t', header=T)
MC_insulation_chr11<-dplyr::filter(MC_insulation,chrom == 'chr11')
FL_insulation <- read.csv('FL_HiC_combined_50kb_insulation', sep='\t', header=T)
FL_insulation_chr11<-dplyr::filter(FL_insulation,chrom == 'chr11')
pdf(file = 'Sox9_Kcnj2_TAD_insulation_score_MC_vs_FL.pdf',width = 12,height = 6)
ggplot()+geom_line(data = MC_insulation_chr11,aes(x = start, y = log2_insulation_score_500000,colour = "MC_insulation"),size=0.4)+
geom_line(data = FL_insulation_chr11,aes(x = start, y = log2_insulation_score_500000,colour = "FL_insulation"),size=0.4)+
xlab("chr11 Coordinates")+ylab("Insulation Score")+ylim(-1.5,1.5)+
theme_bw()+
theme(
panel.grid = element_blank(),
legend.position = c('none')
)+
xlim(c(110000000,114000000))+
scale_colour_manual("",values = c("MC_insulation" = "red","FL_insulation"="blue"))+
ggtitle("Sox9-Kcnj2 locus")+geom_vline(xintercept = c(111555526,111833044),linetype = 'dashed')
dev.off()
|
966bde1c73208ff6ad702d4f0255d048a7669c57
|
c445257a86a6ce0104edd866bd6a1cd1a776a695
|
/testing.R
|
ccd8cbd6016f39d218da8f5ac8f0789ab06a1221
|
[] |
no_license
|
bweiher/recallR
|
969b67c5a060f5ea341723f2a759afaceba9b77b
|
d0f767a7ed3b15d3207c7f94f94f208249674266
|
refs/heads/master
| 2020-03-16T13:31:28.374522
| 2018-10-11T02:57:02
| 2018-10-11T02:57:02
| 132,692,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,022
|
r
|
testing.R
|
library(glue)
installed_packages <- as.character(as.data.frame(installed.packages())$Package)
installed_packages_p <- paste(installed_packages, collapse = ",")
file <- ".Rprofile"
pat <- "#libs:"
# TODO decide about methodology ,,, do we wanna
# TODO do a check if its already been written to Rprofile. ~ DELTE In that case!
# TODO or append missing items
# TODO create central repoo
# TODO creat permanunent backup solution
# TODO a function that integrates with a github profile and pulls data from there.. eg .Rprofile
register_libs <- function(file = ".Rprofile"){
setwd("~")
if(file.exists(file)){ # .Rproilee exists
lgl <- grepl(pat, readLines(file))
if(any(lgl)){ # libraries have been written already
thing <- c(readLines(file), glue("{pat} {installed_packages_p"))
writeLines(text = thing, con = ".Rprofile")
shell(glue("echo {pat} {thing} > .Rprofile "))
} else { # this thingy already exists in .Rprofile
shell(glue("echo {pat} {installed_packages} >> .Rprofile"))
message(paste0("Libraries appended to .RProfile at ", getwd(), file))
}
} # rprofile dont exist
}
register_libs()
# find libs too
lines <- readLines("~/.Rprofile")
packages_registered <- trimws(strsplit(gsub(pattern = pat, x = lines[grepl("^#libs", lines)], replacement = ""),",")[[1]],"both")
what_to_install <- setdiff(packages_registered, as.character(installed.packages()[,1]))
# does the pat exist from above ?
pat_exists_vector <- grepl(pat, lines)
#paste(other_items[length(other_items)], "\n")
# if it exists already, edit that line only...
if(sum(pat_exists) > 0){
print("m")
}
# pull in other lines
other_items <- lines[!pat_exists_vector]
# item to adjust
item_to_adjust <- lines[pat_exists_vector]
lib_to_add <- "meowlibrary" # TODO add in collapse paste ,
item_to_adjust <- paste(trimws(item_to_adjust,"both"), trimws(lib_to_add, "both"), sep = ",")
writeLines(text = c(other_items, item_to_adjust),con = "~/.Rprofile")
|
1ffa9821eadc9db422f2ddd5c2a95e7452c2cf64
|
c12ddf66b4ce31aec34d8fdd99c99fea12f36a81
|
/tests/testthat/helper-versioning.R
|
8223826c78adcb4b90a18c80a73261ddf68c04a2
|
[] |
no_license
|
r-lib/oldie
|
ac49efbbde20e19d5a81961e74f27e1a4a304c81
|
36d18ec67997f748ee88391cf8b13da6a7a67a5e
|
refs/heads/master
| 2021-01-20T05:19:30.070679
| 2019-07-25T12:23:56
| 2019-07-25T12:24:12
| 101,428,392
| 18
| 4
| null | 2018-06-18T11:14:32
| 2017-08-25T17:48:09
|
R
|
UTF-8
|
R
| false
| false
| 385
|
r
|
helper-versioning.R
|
future_rlang_ver <- function() {
version <- pkg_ver("rlang")
version <- ver_trim(version, 3)
version <- ver_bump(version, "minor")
version[[3]] <- 0
version
}
past_rlang_ver <- function() {
version <- pkg_ver("rlang")
version <- ver_trim(version, 3)
if (version[[3]] == 0) {
version <- unbump(version, "minor")
} else {
version[[3]] <- 0
}
version
}
|
e0e9b22723fac1cb5ec9883d36dba4f85d8aeec0
|
ebbfc719a132c3716fedfa84942c600563f78392
|
/man/writeGhcn.Rd
|
4e819260178c2aedcb6f4bec5b4e487b414b8281
|
[] |
no_license
|
cran/CHCN
|
48f859ae33d6d6edb8d72841762cf8451fcff21f
|
270b2dc2ebd78033ed0eccbc807d417e64671879
|
refs/heads/master
| 2021-01-23T02:30:16.880022
| 2012-06-07T00:00:00
| 2012-06-07T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 689
|
rd
|
writeGhcn.Rd
|
\name{writeGhcn}
\alias{writeGhcn}
\title{A simple wrapper to \code{write.table}
}
\description{Simply writes a file to the data directory using
\code{write.table}
}
\usage{
writeGhcn(data, directory = DATA.DIRECTORY, filename = "TaveCHCN.dat")
}
\arguments{
\item{data}{The data you want to write
}
\item{directory}{defaults to the processed data directory
}
\item{filename}{ The filename you want for the data
}
}
\details{ Simply uses \code{write.table} to write the data
}
\value{side effect is a file is written
}
\author{Steven Mosher
}
\examples{\dontrun{
writeGhcn(data)
}
}
\keyword{ files}
|
6f46476065fa4301455563118aff56af9dc40817
|
ddc2b096e681398f576a95e40c7fd366b65f50a2
|
/SDPSimulations/PrevalenceRegressions.R
|
588be5327cd89d7272b5882c34680ea4de401e25
|
[] |
no_license
|
sbellan61/SDPSimulations
|
f334d96743c90d657045a673fbff309106e45fce
|
cfc80b116beafabe3e3aed99429fb03d58dc85db
|
refs/heads/master
| 2021-03-27T20:48:25.857117
| 2017-09-19T20:13:37
| 2017-09-19T20:13:37
| 21,144,447
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,258
|
r
|
PrevalenceRegressions.R
|
####################################################################################################
## Plot HIV prevalence & SDP vs fit transmission coefficients, and fit contact mixing coefficients.
rm(list=ls()) # clear workspace
library(metatest);library(coda);library(faraway); library(hier.part); library(AICcmodavg); library(abind)
if(grepl('tacc', Sys.info()['nodename'])) setwd('/home1/02413/sbellan/DHSProject/SDPSimulations/')
load("data files/allDHSAIS.Rdata") # DHS data
load("data files/ds.nm.all.Rdata") # country names
load('data files/dframe.Rdata') # country summary data (peak prevalence, country-prevalence, etc...)
load("data files/draw.Rdata") # raw country summary data (peak prevalence, country-prevalence, etc...)
load('data files/dframe.s.Rdata')# country summary data (peak prevalence, country-prevalence, etc...) by DHS survey
load('data files/draw.s.Rdata')# country summary data (peak prevalence, country-prevalence, etc...) by DHS survey (unfiltered data)
load('data files/pars.arr.ac.Rdata') # fitted transmission coefficients & other parameters for each assumed acute phase
source('SimulationFunctions.R')
## add couple prevalence to dframe
for(gg in 1:nrow(dframe)) { # for each country
sel <- dat$group==dframe$country[gg] # select DHS data % of
# coupled individuals that are HIV+ (1/discordant, 2/ ++ couple):
dframe$cprev[gg] <- (sum(dat$ser[sel] %in% 2:3) + 2*sum(dat$ser[sel] == 1)) / (sum(sel)*2)
}
acutes <- as.numeric(in.arr[,1,2])
## make a list of data frame with betas, and contact coefficients (log-ed) for each acute fit
acs.to.do <- which(rowSums(!is.na(in.arr[,,2]))>0)
rdatl <- list()
for(aa in acs.to.do) { ## for each acute fit
pars.arr <- out.arr[,,aa,] # select fitted parameters
## create data frame with couple prevalence, SDP, betas, & sexual mixing rates
rdat <- data.frame(dframe$cprev, dframe$psdc, # DHS HIV prevalence in coupled indivs; proportion serodiscordant;
## b=before (pre-); e=extra; p=partner (within-)
log(t(12*100*pars.arr['bp',c(2,1,3),])), log(t(12*100*pars.arr['be',c(2,1,3),])), # log(beta_within); log(beta_extra)
log(t(12*100*pars.arr['bmb',c(2,1,3),])), log(t(12*100*pars.arr['bfb',c(2,1,3),])), # log(beta male pre-); log(beta female pre-)
log(t(pars.arr['rr.ep',c(2,1,3),])), # log(extra- / within-) = log(extra-couple mixing coefficient) geometric mean across genders
## below: log(pre- / within-) = log(pre-couple mixing coefficient) (by gender)
log(t(pars.arr['rr.bp.m',c(2,1,3),])), log(t(pars.arr['rr.bp.f',c(2,1,3),])))
names(rdat) <- c('cprev','psdc', # DHS HIV prevalence in coupled indivs; proportion serodiscordant;
'lbp','lbp.l','lbp.u', # log(beta_within) median & credible intervals (lower, upper)
'lbe','lbe.l','lbe.u', # ditto for log(beta_extra)
'lbmb','lbmb.l','lbmb.u', # log(male beta_pre)
'lbfb','lbfb.l','lbfb.u', # log(female beta_pre)
'lrr.ep','lrr.ep.l','lrr.ep.u', # log(extra-couple mixing coefficient)
'lrr.bp.m','lrr.bp.m.l','lrr.bp.m.u',# log(male pre-couple mixing coefficient)
'lrr.bp.f','lrr.bp.f.l','lrr.bp.f.u')# log(female pre-couple mixing coefficient)
rdat$lgt.cprev <- logit(rdat$cprev) # logistic(DHS HIV prevalence in coupled individuals)
rdat$lgt.pprev <- logit(dframe$peak.nart) # logistic(peak UNAIDS HIV prevalence in country)
rdat$lgt.psdc <- logit(rdat$psdc) # logistic(DHS serodiscordant proportion)
rdat$lgt.fD <- logit(rdat$psdc / (2*rdat$cprev * (1-rdat$cprev) / (2*rdat$cprev * (1-rdat$cprev) + rdat$cprev^2) )) # fancy D (normalized SDP)
rdat$country <- dframe$country
dat$fysa <- dat$tmar-dat$tfs
dat$mysa <- dat$tmar-dat$tms
rdat$fysa <- aggregate(dat$fysa, list(dat$group), mean)[,2]/12
rdat$mysa <- aggregate(dat$mysa, list(dat$group), mean)[,2]/12
rdat$mardur <- aggregate(dat$mardur.mon, list(dat$group), mean)[,2]/12
rdat$lmpret <- rdat$lrr.bp.m + log(rdat$mysa) # pre * time
rdat$lfpret <- rdat$lrr.bp.f + log(rdat$fysa) # pre * time
rdat$lextrat <- rdat$lrr.ep + log(rdat$mardur) # extra * time
rdat$linft <- rdat$lbp + log(rdat$mardur) # infectivity * time
rdat$lmysa <- log(rdat$mysa)
rdat$lfysa <- log(rdat$fysa)
rdat$lmardur <- log(rdat$mardur)
rdat$lbp.w <- 1/((rdat$lbp.u - rdat$lbp.l)/(1.96*4))^2 # weights for each variable
rdat$lbe.w <- 1/((rdat$lbe.u - rdat$lbe.l)/(1.96*4))^2
rdat$lbmb.w <- 1/((rdat$lbmb.u - rdat$lbmb.l)/(1.96*4))^2
rdat$lbfb.w <- 1/((rdat$lbfb.u - rdat$lbfb.l)/(1.96*4))^2
rdat$geom.w <- (rdat$lbp.w * rdat$lbe.w * rdat$lbmb.w * rdat$lbfb.w)^.25 # geometric average of weights
rdatl[[aa]] <- rdat # add to array over acute fits
}
wts <- rdatl[[3]][,c('country','lbp.w','lbe.w','lbmb.w','lbfb.w','geom.w')] # weights
wts
outdir <- file.path('results','PrevFigs')
sbpairs(wts[,-1], file.nm = file.path(outdir, 'wt correlations'))
## set plot parameters
las <- 1 # axis text direction
leg.ord <- order(dframe$cprev) # have legend order be by coupled prevalence so it matches top to bottom order of points
cols <- rainbow(length(ds.nm))[leg.ord] # order colors similarly
ylab <- '' # no ylabels
r2 <- T # show p values on regression plots
yaxt <- 'n' # no default axes
xaxt <- 'n'
leg.ord <- order(dframe$cprev) # countries ordered by HIV coupled prevalence in legend
cols <- rainbow(length(ds.nm))[leg.ord] # same for colors
## plot axes, logistic y, log x, for different limits & labels (pre-determined)
axis.fxn <- function(ylogit.axis, xlog.axis, ytype, xtype) {
if(ylogit.axis) {
if(ytype=='prev') {
y.tcks <- c(seq(.01, .1, by = .01), seq(.1, .5, by = .1))
y.labels <- y.tcks
y.labels[!y.labels %in% c(.01,.1,.5)] <- NA
}else{ ## serodiscordant proportion (ranges .4-.8)
y.tcks <- c(seq(.3, .9, by = .1), seq(.91,.95, by = .01))
y.labels <- y.tcks
y.labels[!y.labels %in% c(.5,.9,.99)] <- NA
}
axis(2, at = logit(y.tcks), label = y.labels, las = 2)
}else{
axis(2) ## not logistic
}
if(xlog.axis) {
if(xtype=='within') { ## within-couple beta = HIV infectivity
x.tcks <- c(1:9, seq(10,50, by = 10)) #.4,.9, by = .1),
x.labels <- x.tcks
x.labels[!x.labels %in% c(1,10,50)] <- NA
axis(1, at = log(x.tcks), label = x.labels)
}else{
if(xtype=='prev') { ## prevalence
x.tcks <- c(seq(.01, .1, by = .01), seq(.1, .5, by = .1))
x.labels <- x.tcks
x.labels[!x.labels %in% c(.01,.1,.5)] <- NA
}else{ ## pre- & extra- contact mixing coefficients or betas (transmission coefficients)
x.tcks <- c(seq(.1,.9, by = .1), 1:9, seq(10,100, by = 10))
x.labels <- x.tcks
x.labels[!x.labels %in% c(.01,.1,1,10,100)] <- NA
}
}
axis(1, at = log(x.tcks), label = x.labels)
}
}
outdir <- file.path('results','PrevFigs')
if(!file.exists(outdir)) dir.create(outdir)
yvars <- c('lgt.cprev','lgt.pprev','lgt.psdc','lgt.fD')
ytypes <- c('prev','prev','SDP','fancyD')
ynames <- c('DHS prev','peak prev','SDP','fancyD')
ytexts <- c('DHS HIV prevalence in couples', 'peak HIV prevalence (UNAIDS)','DHS serodiscordant proportion', 'fancy D')
xclasses <- c('beta','contact')
ylims <- list(c(.01, .55), c(.01, .55), c(.3, .99), c(.3, .99))
for(yv in 1:4) { ## for each Y variable
for(xc in 1:2) { ## and each X class
xclass <- xclasses[xc]
if(xclass=='contact') { # name HIV infectivity & contact coefficients
xvars <- c('lbp','lrr.ep','lrr.bp.m','lrr.bp.f')
xlabs <- c('HIV infectivity (per 100 person-years)', expression(c['extra-couple']),
expression(c['pre-couple,male']), expression(c['pre-couple,female']))
}else{ # name HIV infectivity & other betas
xvars <- c('lbp','lbe','lbmb','lbfb')
xlabs <- c('HIV infectivity (per 100 person-years)', expression(beta['extra-couple']),
expression(beta['pre-couple,male']), expression(beta['pre-couple,female']))
}
xtypes <- c('within', rep('contact',3)) # for axis function
xlim <- list(c(.3,50),c(.08,100),c(.08,100),c(.08,100)) # xlimits for each x variable, same regardless of class
ytype <- ytypes[yv] # yvar type
yvar <- rdat[,yvars[yv]] # yvar
pdf(file.path(outdir,paste0(ynames[yv],' vs ', xclass,'.pdf')), w = 6.5, h = 4.5) # initialize PDF
for(aa in acs.to.do) { # for each assumed acute phase relative hazard
rdat <- rdatl[[aa]] # pick fitted parameters from array for that acute phase relative hazard
par(mar = c(4,3,2,.5), mfrow = c(2,2), oma = c(0,1,1,0), cex.lab = .8)
for(xv in 1:4) { ## for each plot panel (each x variable)
xvar <- rdat[,xvars[xv]] # get xvariable
xvar.l <- rdat[,paste0(xvars[xv],'.l')] # get xvariable lower credible limit
xvar.u <- rdat[,paste0(xvars[xv],'.u')] # get xvariable upper credible limit
plot(0,0, type = 'n', xlab = xlabs[xv], ylab = ylab, las = las, yaxt = yaxt, xaxt = xaxt, # initialize plot
xlim = log(xlim[[xv]]), ylim = logit(ylims[[yv]]), bty = 'n')
axis.fxn(T,T, ytype=ytype, xtype=xtypes[xv]) # add axes using function above
points(xvar, yvar, pch = 19, col = cols) # medians
arrows(xvar.l, yvar, xvar.u, yvar, code = 3, col = cols, len = .05, angle = 90) # credible intervals
## weighted regression model (variance in xvars, from fitting transmission coefficients)
mod <- lm(yvar ~ xvar, rdat, weights = 1/((xvar.u - xvar.l)/(1.96*2))^2)
newx<- seq(min(xvar.l,na.rm=T), max(xvar.u,na.rm=T), l = 120) # sequence of x variables over which to predict
prd<-predict(mod,newdata=data.frame(xvar = newx),interval = c("confidence"), level = 0.95,type="response") # predict
## plot thick solid regression line with thin dashed CI lines
for(pl in 1:3) lines(newx,prd[,pl], lty=ifelse(pl==1,1,2), lwd = ifelse(pl==1,2,1))
if(r2) mtext(paste(expression(P), '=',signif(summary(mod)[[5]][2,4],2)),
side = 3, line = 0, adj = .95, cex = .7) # add p value to plot
}
legend(.95*log(xlim[[xv]][1]), 1*logit(ylims[[yv]][2]), ncol = 2, # country legend
rev(ds.nm[leg.ord]), col = rev(cols[leg.ord]), pch = 19, cex = .5, bg = 'white', title = 'from top to bottom')
mtext(ytexts[yv], side = 2, outer = T, adj = .6, line = -.3) # add one y label
mtext(paste('acute RR =', in.arr[aa,1,2],'during fit'), side = 3, line = -.5, outer=T) # show assumed acute relative hazard
}
dev.off()
}
}
acs.to.do <- which(rowSums(!is.na(in.arr[,,2]))>0)
fg.col <- 'black'
wid <- 6.5
hei <- 4.5
cex <- 2
####################################################################################################
## New version of figures with color scale for SDP & no arrows for CI's
rmp <- colorRamp(c("red","yellow")) #create color ramp
outdir <- file.path('results','PrevFigsNew')
if(!file.exists(outdir)) dir.create(outdir)
yvars <- c('lgt.cprev','lgt.pprev','lgt.psdc','lgt.fD')
ytypes <- c('prev','prev')#,'SDP','fancyD')
ynames <- c('DHS prev','peak prev')#,'SDP','fancyD')
ytexts <- c('DHS HIV prevalence in couples', 'peak HIV prevalence (UNAIDS)')#,'DHS serodiscordant proportion', 'fancy D')
xclasses <- c('beta','contact')
ylims <- list(c(.01, .55), c(.01, .55), c(.3, .99), c(.3, .99))
for(yv in 1:2) { ## for each Y variable
for(xc in 1:2) { ## and each X class
xclass <- xclasses[xc]
if(xclass=='contact') { # name HIV infectivity & contact coefficients
xvars <- c('lbp','lrr.ep','lrr.bp.m','lrr.bp.f')
xlabs <- c('HIV infectivity (per 100 person-years)', expression(c['extra-couple']),
expression(c['pre-couple,male']), expression(c['pre-couple,female']))
xlabs <- c('HIV infectivity (per 100 person-years)', 'extra-couple mixing coefficient',
'male pre-couple mixing coefficient', 'female pre-couple mixing coefficient')
xlim <- list(c(1,50),c(.08,50),c(.08,50),c(.08,50)) # xlimits for each x variable, same regardless of class
}else{ # name HIV infectivity & other betas
xvars <- c('lbp','lbe','lbmb','lbfb')
xlabs <- c('HIV infectivity (per 100 person-years)', expression(beta['extra-couple']),
expression(beta['pre-couple,male']), expression(beta['pre-couple,female']))
xlabs <- c('HIV infectivity (per 100 person-years)', 'extra-couple transmission coefficient',
'male pre-couple transmission coefficient', 'female pre-couple transmission coefficient')
xlim <- list(c(.3,50),c(.08,100),c(.08,100),c(.08,100)) # xlimits for each x variable, same regardless of class
}
xtypes <- c('within', rep('contact',3)) # for axis function
ytype <- ytypes[yv] # yvar type
yvar <- rdat[,yvars[yv]] # yvar
zvar <- rdat[,'psdc'] # zvar for point color
zord <- order(order(zvar))
rzvar <- max(zvar)-min(zvar)
zvar <- (zvar - min(zvar))/rzvar # scale so it's between 0 & 1
cols <- rgb(rmp(zvar), alpha = 200,max = 255)
pdf(file.path(outdir,paste0(ynames[yv],' vs ', xclass,'-',fg.col,'.pdf')), w = wid, h = hei) # initialize PDF
for(aa in acs.to.do) { # for each assumed acute phase relative hazard
rdat <- rdatl[[aa]] # pick fitted parameters from array for that acute phase relative hazard
layout(matrix(c(1,3,2,4,5,5),2,3), widths = c(1,1,.6), h = rep(1, 3))
par(mar = c(4,4,4,.5), cex.lab = .8, fg = fg.col, col.axis=fg.col, col.lab=fg.col)
for(xv in 1:4) { ## for each plot panel (each x variable)
xvar <- rdat[,xvars[xv]] # get xvariable
xvar.l <- rdat[,paste0(xvars[xv],'.l')] # get xvariable lower credible limit
xvar.u <- rdat[,paste0(xvars[xv],'.u')] # get xvariable upper credible limit
plot(0,0, type = 'n', ylab = ylab, xlab='', las = las, yaxt = yaxt, xaxt = xaxt, # initialize plot
xlim = log(xlim[[xv]]), ylim = logit(ylims[[yv]]), bty = 'n')
mtext(xlabs[xv], side = 1, line = 2.3, adj = .5, cex = .75)
axis.fxn(T,T, ytype=ytype, xtype=xtypes[xv]) # add axes using function above
## arrows(xvar.l, yvar, xvar.u, yvar, code = 3, col = cols, len = .05, angle = 90) # credible intervals
## weighted regression model (variance in xvars, from fitting transmission coefficients)
mod <- lm(yvar ~ xvar, rdat, weights = 1/((xvar.u - xvar.l)/(1.96*2))^2)
newx<- seq(min(xvar.l,na.rm=T), max(xvar.u,na.rm=T), l = 120) # sequence of x variables over which to predict
prd<-predict(mod,newdata=data.frame(xvar = newx),interval = c("confidence"), level = 0.95,type="response") # predict
## plot thick solid regression line with thin dashed CI lines
for(pl in 1:3) lines(newx,prd[,pl], lty=ifelse(pl==1,1,2), lwd = ifelse(pl==1,2,1))
points(xvar, yvar, cex = cex, pch = 19, col = cols) # medians
text(xvar, yvar, c(16:1)[zord], cex = .5)
if(r2) mtext(paste(expression(P), '=',signif(summary(mod)[[5]][2,4],2)),
side = 3, line = 0, adj = .95, cex = .7) # add p value to plot
}
par(mar=rep(0,4))
plot(0,0, xlim = c(0, 10), ylim = c(0, 100), type = 'n', bty = 'n', axes = F, xlab = '', ylab ='')
points(rep(.5,16), seq(40, 90, l = 16), pch = 19, cex = cex, col = cols[order(zvar)])
text(.5, seq(40, 90, l = 16), 16:1, cex = cex*.3)
rdat$country[rdat$country=='WA'] <- 'West Africa'
text(1, seq(40, 90, l = 16), paste0(rdat$country[order(zvar)],
' (',signif(rdat$psdc[order(zvar)],2)*100,'%)'), pos = 4)
text(5, 100, 'country \n(serodiscordant \nproportion as %)')
## legend('topleft', ncol = 1, paste0(rdat$country, ' (',signif(zvar,2)*100,'%)'),
## pch = 19, col = cols, pt.cex = 1.5, cex = .8, bg = 'white')
## legend('topleft', ncol = 1, paste0(rdat$country, ' (',signif(zvar,2)*100,'%)'),
## pch = as.character(1:16), col = 'black', pt.cex = .5, cex = .8, bty = 'n')
mtext(ytexts[yv], side = 2, outer = T, adj = .6, line = -1.5) # add one y label
mtext(paste('acute RR =', in.arr[aa,1,2],'during fit'), side = 3, line = -2, outer=T) # show assumed acute relative hazard
}
dev.off()
}
}
####################################################################################################
####################################################################################################
####################################################################################################
####################################################################################################
## for PPT
r2 <- T # show p values on regression plots
acs.to.do <- which(in.arr[,2,2]==7)
fg.col <- 'black'
wid <- 5.5
hei <- 3.5
cex <- 2
####################################################################################################
## New version of figures with color scale for SDP & no arrows for CI's
rmp <- colorRamp(c("red","yellow")) #create color ramp
outdir <- file.path('results','PrevFigsNew')
if(!file.exists(outdir)) dir.create(outdir)
yvars <- c('lgt.cprev','lgt.pprev','lgt.psdc','lgt.fD')
ytypes <- c('prev','prev')#,'SDP','fancyD')
ynames <- c('DHS prev','peak prev')#,'SDP','fancyD')
ytexts <- c('HIV prevalence', 'peak HIV prevalence')#,'DHS serodiscordant proportion', 'fancy D')
xclasses <- c('contact')
ylims <- list(c(.01, .5), c(.01, .5), c(.3, .99), c(.3, .99))
for(yv in 2) { ## for each Y variable
for(xc in 1) { ## and each X class
xclass <- xclasses[xc]
if(xclass=='contact') { # name HIV infectivity & contact coefficients
xvars <- c('lbp','lrr.ep','lrr.bp.m','lrr.bp.f')
xlabs <- c('intrinsic HIV transmission rate \n(per 100 person-years)', expression(c['extra-couple']),
expression(c['pre-couple,male']), expression(c['pre-couple,female']))
xlabs <- c('intrinsic HIV transmission rate \n(per 100 person-years)', 'extra-couple mixing coefficient',
'male pre-couple mixing coefficient', 'female pre-couple mixing coefficient')
xlim <- list(c(1,50),c(.08,50),c(.08,50),c(.08,50)) # xlimits for each x variable, same regardless of class
}else{ # name HIV infectivity & other betas
xvars <- c('lbp','lbe','lbmb','lbfb')
xlabs <- c('HIV transmission rate (per 100 person-years)', expression(beta['extra-couple']),
expression(beta['pre-couple,male']), expression(beta['pre-couple,female']))
xlabs <- c('HIV transmission rate (per 100 person-years)', 'extra-couple transmission coefficient',
'male pre-couple transmission coefficient', 'female pre-couple transmission coefficient')
xlim <- list(c(.3,50),c(.08,100),c(.08,100),c(.08,100)) # xlimits for each x variable, same regardless of class
}
xtypes <- c('within', rep('contact',3)) # for axis function
ytype <- ytypes[yv] # yvar type
yvar <- rdat[,yvars[yv]] # yvar
zvar <- rdat[,'psdc'] # zvar for point color
zord <- order(order(zvar))
rzvar <- max(zvar)-min(zvar)
zvar <- (zvar - min(zvar))/rzvar # scale so it's between 0 & 1
cols <- rgb(rmp(zvar), alpha = 200,max = 255)
for(aa in acs.to.do) { # for each assumed acute phase relative hazard
rdat <- rdatl[[aa]] # pick fitted parameters from array for that acute phase relative hazard
for(xv in 1:4) { ## for each plot panel (each x variable)
pdf(file.path(outdir,paste0(ynames[yv],' vs ', xclass,xv,'-',fg.col,'.pdf')), w = wid, h = hei) # initialize PDF
layout(matrix(c(1,2),1,2), widths = c(1,.4))
par(mar = c(4,4,1,.5), oma = c(0,0,0,0), cex.lab = .8, fg = fg.col, col.axis=fg.col, col.lab=fg.col)
xvar <- rdat[,xvars[xv]] # get xvariable
xvar.l <- rdat[,paste0(xvars[xv],'.l')] # get xvariable lower credible limit
xvar.u <- rdat[,paste0(xvars[xv],'.u')] # get xvariable upper credible limit
plot(0,0, type = 'n', ylab = ytexts[yv], xlab='', las = las, yaxt = yaxt, xaxt = xaxt, # initialize plot
xlim = log(xlim[[xv]]), ylim = logit(ylims[[yv]]), bty = 'n')
mtext(xlabs[xv], side = 1, line = 2.3, adj = .5, cex = .75)
axis.fxn(T,T, ytype=ytype, xtype=xtypes[xv]) # add axes using function above
## arrows(xvar.l, yvar, xvar.u, yvar, code = 3, col = cols, len = .05, angle = 90) # credible intervals
## weighted regression model (variance in xvars, from fitting transmission coefficients)
mod <- lm(yvar ~ xvar, rdat, weights = geom.w)
newx<- seq(min(xvar.l,na.rm=T), max(xvar.u,na.rm=T), l = 120) # sequence of x variables over which to predict
prd<-predict(mod,newdata=data.frame(xvar = newx),interval = c("confidence"), level = 0.95,type="response") # predict
## plot thick solid regression line with thin dashed CI lines
for(pl in 1:3) lines(newx,prd[,pl], lty=ifelse(pl==1,1,2), lwd = ifelse(pl==1,2,1))
points(xvar, yvar, cex = cex, pch = 19, col = cols) # medians
text(xvar, yvar, c(16:1)[zord], cex = .5)
if(r2) mtext(paste(expression(P), '=',signif(summary(mod)[[5]][2,4],2)),
side = 3, line = -2, adj = .25, cex = .7) # add p value to plot
par(mar=rep(0,4), cex = .8)
plot(0,0, xlim = c(0, 10), ylim = c(0, 100), type = 'n', bty = 'n', axes = F, xlab = '', ylab ='')
points(rep(.5,16), seq(0, 85, l = 16), pch = 19, cex = cex, col = cols[order(zvar)])
text(.5, seq(0, 85, l = 16), 16:1, cex = cex*.3)
rdat$country[rdat$country=='WA'] <- 'West Africa'
text(1, seq(0, 85, l = 16), paste0(rdat$country[order(zvar)], ' (',signif(rdat$psdc[order(zvar)],2)*100,'%)'),
pos = 4, cex = cex*.5)
text(5, 95, 'country \n(serodiscordant \nproportion as %)', cex = .9)
par(xpd=NA)
arrows(8,0,8,85, code = 2, len = .05, lwd = 3)
mtext('serodiscordant proportion', side = 4, line = -1.3, cex = .8, adj = .4)
dev.off()
}
}
}
}
####################################################################################################
####################################################################################################
####################################################################################################
## Figure 4
r2 <- T # show p values on regression plots
acs.to.do <- which(in.arr[,2,2]==7)
fg.col <- 'white'
wid <- 5.5
hei <- 3.5
cex <- 2
####################################################################################################
## New version of figures with color scale for SDP & no arrows for CI's
rmp <- colorRamp(c("red","yellow")) #create color ramp
outdir <- file.path('results','PrevFigsNew')
if(!file.exists(outdir)) dir.create(outdir)
yvars <- c('lgt.cprev','lgt.pprev','lgt.psdc','lgt.fD')
ytypes <- c('prev','prev')#,'SDP','fancyD')
ynames <- c('DHS prev','peak prev')#,'SDP','fancyD')
ytexts <- c('HIV prevalence', 'peak HIV prevalence')#,'DHS serodiscordant proportion', 'fancy D')
xclasses <- c('contact')
ylims <- list(c(.01, .5), c(.01, .5), c(.3, .99), c(.3, .99))
for(yv in 2) { ## for each Y variable
for(xc in 1) { ## and each X class
xclass <- xclasses[xc]
if(xclass=='contact') { # name HIV infectivity & contact coefficients
xvars <- c('lbp','lrr.ep','lrr.bp.m','lrr.bp.f')
xlabs <- c('intrinsic HIV transmission rate \n(per 100 person-years)', expression(c['extra-couple']),
expression(c['pre-couple,male']), expression(c['pre-couple,female']))
xlabs <- c('intrinsic HIV transmission rate \n(per 100 person-years)', 'extra-couple mixing coefficient',
'male pre-couple mixing coefficient', 'female pre-couple mixing coefficient')
xlabs <- c('intrinsic HIV transmission rate \n(per 100 person-years)', 'extra-couple mixing coefficient',
'male pre-couple mixing coefficient', 'female pre-couple mixing coefficient')
xlim <- list(c(1,50),c(.08,50),c(.08,50),c(.08,50)) # xlimits for each x variable, same regardless of class
}else{ # name HIV infectivity & other betas
xvars <- c('lbp','lbe','lbmb','lbfb')
xlabs <- c('HIV transmission rate (per 100 person-years)', expression(beta['extra-couple']),
expression(beta['pre-couple,male']), expression(beta['pre-couple,female']))
xlabs <- c('HIV transmission rate (per 100 person-years)', 'extra-couple transmission coefficient',
'male pre-couple transmission coefficient', 'female pre-couple transmission coefficient')
xlim <- list(c(.3,50),c(.08,100),c(.08,100),c(.08,100)) # xlimits for each x variable, same regardless of class
}
xtypes <- c('within', rep('contact',3)) # for axis function
ytype <- ytypes[yv] # yvar type
yvar <- rdat[,yvars[yv]] # yvar
rdat$psdc.raw <- draw$psdc
zvar <- rdat[,'psdc'] # zvar for point color
zord <- order(order(zvar))
rzvar <- max(zvar)-min(zvar)
zvar <- (zvar - min(zvar))/rzvar # scale so it's between 0 & 1
cols <- rgb(rmp(zvar), alpha = 200,max = 255)
for(aa in acs.to.do) { # for each assumed acute phase relative hazard
rdat <- rdatl[[aa]] # pick fitted parameters from array for that acute phase relative hazard
pdf(file.path(outdir,paste0(ynames[yv],' vs ', xclass,'-',fg.col,'.pdf')), w = wid, h = hei) # initialize PDF
layout(matrix(c(1,3,2,4,5,5),2,3), widths = c(1,1,.8))
cex.lab <- .7
par(mar = c(4.5,4,1.5,.5), oma = c(0,0,0,0), cex.lab = cex.lab, fg = fg.col, col.axis=fg.col, col.lab=fg.col)
for(xv in 1:4) { ## for each plot panel (each x variable)
xvar <- rdat[,xvars[xv]] # get xvariable
xvar.l <- rdat[,paste0(xvars[xv],'.l')] # get xvariable lower credible limit
xvar.u <- rdat[,paste0(xvars[xv],'.u')] # get xvariable upper credible limit
plot(0,0, type = 'n', ylab = '', xlab='', las = las, yaxt = yaxt, xaxt = xaxt, # initialize plot
xlim = log(xlim[[xv]]), ylim = logit(ylims[[yv]]), bty = 'n')
mtext(xlabs[xv], side = 1, line = 2.8, adj = .5, cex = cex.lab)
axis.fxn(T,T, ytype=ytype, xtype=xtypes[xv]) # add axes using function above
## arrows(xvar.l, yvar, xvar.u, yvar, code = 3, col = cols, len = .05, angle = 90) # credible intervals
## weighted regression model (variance in xvars, from fitting transmission coefficients)
if(xv==1) {
mod <- lm(yvar ~ xvar, rdat, weights = geom.w)
newx<- seq(min(xvar.l,na.rm=T), max(xvar.u,na.rm=T), l = 120) # sequence of x variables over which to predict
prd<-predict(mod,newdata=data.frame(xvar = newx),interval = c("confidence"), level = 0.95,type="response") # predict
## plot thick solid regression line with thin dashed CI lines
for(pl in 1:3) lines(newx,prd[,pl], lty=ifelse(pl==1,1,2), lwd = ifelse(pl==1,2,1))
if(r2) mtext(paste(expression(P), '=',signif(summary(mod)[[5]][2,4],2)),
side = 3, line = -2, adj = .25, cex = .7) # add p value to plot
}
points(xvar, yvar, cex = cex, pch = 19, col = cols) # medians
text(xvar, yvar, c(16:1)[zord], cex = .5)
}
par(mar=rep(0,4), cex = .8)
plot(0,0, xlim = c(0, 10), ylim = c(0, 100), type = 'n', bty = 'n', axes = F, xlab = '', ylab ='')
fros <- 25
tos <- 85
points(rep(.5,16), seq(fros, tos, l = 16), pch = 19, cex = cex*.95, col = cols[order(zvar)])
text(.5, seq(fros, tos, l = 16), 16:1, cex = cex*.3)
rdat$country[rdat$country=='WA'] <- 'West Africa'
text(1, seq(fros, tos, l = 16), paste0(rdat$country[order(zvar)], ' (',signif(rdat$psdc[order(zvar)],2)*100,'%)'),
pos = 4, cex = cex.lab)
text(5, 95, 'country \n(serodiscordant \nproportion as %)', cex = .9)
par(xpd=NA)
arrows(8,fros,8,tos, code = 2, len = .05, lwd = 3)
mtext('serodiscordant proportion', side = 4, line = -2, cex = .8, adj = .6)
mtext('peak HIV prevalence', side = 2, line = -1, cex = .8, adj = .5, outer = T)
dev.off()
}
}
}
####################################################################################################
## Full model & univariate models
acs.to.do <- 1:8
prev.type <- 'lgt.pprev'
for(aa in acs.to.do) {
modlist <- list(NA)
rdat <- rdatl[[aa]]
xvars <- c('lbp','lrr.ep','lrr.bp.m','lrr.bp.f')
form4c <- as.formula(paste(prev.type, ' ~', paste(xvars, collapse = '+')))
modlist[[5]] <- lm(form4c, dat = rdat, weights = geom.w)
modtab <- cbind(summary(modlist[[5]])[[5]], confint(modlist[[5]]))
univtab <- modtab[-1,] # get rid of intercept row
univtab[,] <- NA
for(xx in 1:4) {
xvar <- xvars[xx]
temp.form <- as.formula(paste(prev.type, ' ~', xvar))
modlist[[xx]] <- lm(temp.form, dat = rdat, weights = geom.w)
univtab[xx,] <- cbind(summary(modlist[[xx]])[[5]], confint(modlist[[xx]]))[-1,]
}
tab <- aictab(modlist, second.ord=T, modnames = c(xvars,'mult'))
tab[,-1] <- signif(tab[,-1],3)
write.csv(tab, file.path(outdir, paste0('aicc table Ac',acutes[aa],'.csv')))
un.est <- c(NA,paste0(signif(univtab[,'Estimate'],2), ' (', signif(univtab[,'2.5 %'],2), ', ', signif(univtab[,'97.5 %'],2), ')'))
un.p <- c(NA,signif(univtab[,'Pr(>|t|)'],2))
mult.est <- paste0(signif(modtab[,'Estimate'],2), ' (', signif(modtab[,'2.5 %'],2), ', ', signif(modtab[,'97.5 %'],2), ')')
mult.p <- signif(modtab[,'Pr(>|t|)'],2)
outtab <- data.frame('univariate'=un.est, 'P'=un.p, 'multivariate'=mult.est, 'P'=mult.p)[-1,]
rownames(outtab) <- c('transmission rate','extra-couple contact coefficient',
'male pre-couple contact coefficient', 'female pre-couple contact coefficient')
outtab
write.csv(outtab, file.path(outdir, paste0('final model table Ac',acutes[aa],'.csv')))
print(paste('R2 = ', summary(modlist[[1]])$r.squared, 'for acute = ', acutes[aa]))
}
####################################################################################################
## Show evolution of {SDP,Prev} ove time
ds <- function(x) (2*x*(1-x))/(2*x*(1-x) + x^2) # Null SDP model based on mixing SDP = (2p(1-p)) / (2p(1-p) + p^2)
col <- 'black'
cc <- F
xlim <- c(0,.3)
ylim <- c(0,1)
pdf(file.path(outdir, paste('SDP vs prevalence by survey over time','curve'[cc],'lm.pdf')), w = 7, h=4)
par(mar=c(4,5,1,1), col = col, fg = col, col.axis = col, col.main = col, col.lab=col, bty = 'l')
plot(0,0, type = 'n', xlim=xlim, ylim = ylim, xlab = 'HIV prevalence', ylab = 'serodiscordance proportion', axes=F)
if(cc) curve(ds(x), from = 0, to = .3, col = 'red', lty = 2, add=T)
axis(2, seq(0,1,l=5), las = 2)
axis(1, seq(0,.3,by=.05), las = 1)
w.africa <- c("Burkina Faso", "Cameroon", 'Cote dIvoire', "Ghana", "Guinea", "Liberia", "Mali", "Niger", "Senegal",
"Sierra Leone")
temp <- draw.s[!draw.s$country %in% w.africa & !is.na(draw.s$country),]
temp$country <- factor(temp$country)
temp$col <- rainbow(nlevels(temp$country), alpha = .7)[as.numeric(temp$country)]
points(temp$prev, temp$psdc, pch = 19, col = temp$col, cex = 2.5)
leg <- as.character(unique(temp$country))
for(cc in 1:nlevels(temp$country)) {
sel <- temp$country==levels(temp$country)[cc]
leg[cc] <- paste(leg[cc], paste(round(temp$tint.yr[sel]), collapse = ', '))
lines(temp$prev[sel], temp$psdc[sel], col = temp$col[sel][1])
for(jj in which(sel)) text(temp$prev[sel], temp$psdc[sel], substr(round(temp$tint.yr[sel]),3,4), cex = .6) #, col = temp$col[sel])
}
abline(lm(temp$psdc ~ temp$prev), col = 'black')
legend('topright', leg = leg, col = unique(temp$col), pch = 19, bty = 'n', cex = .7, ncol=2)
dev.off()
|
c7dbf01a5c3ed94ef37d14ecb2ac967420135c28
|
765e74a96d7a0a7d56c2cf056350ccf1265ee9f5
|
/R/cub_dist.R
|
4fa533b98f72163dc97396078e504b0866fa7fec
|
[] |
no_license
|
fhernanb/cubm
|
a2a480af9a5516f6d6fc0dfe13fb553c705907bd
|
1c88e80f68e8ab21a681ecfd0d551e065de30f30
|
refs/heads/master
| 2021-07-02T06:53:45.658112
| 2020-12-07T21:58:35
| 2020-12-07T21:58:35
| 74,044,405
| 4
| 7
| null | 2016-12-02T15:56:17
| 2016-11-17T16:14:06
|
R
|
UTF-8
|
R
| false
| false
| 4,253
|
r
|
cub_dist.R
|
#' cub distribution
#'
#' Density, distribution function, quantile function and random generation for the cub distribution given parameters pi and xi.
#'
#' @param x,q vector of quantiles.
#' @param p vector of probabilities.
#' @param pi uncertainty parameter belongs to \code{(0, 1]}.
#' @param xi feeling parameter belongs to \code{[0, 1]}.
#' @param n number of observations
#' @param m the maximum value.
#' @param log logical; if TRUE, densities are given as log.
#' @param lower.tail logical; if TRUE (default), probabilities are \code{P[X <= x]} otherwise, \code{P[X > x]}.
#'
#' @examples
#' # Examples with dcub
#'
#' dcub(x=4, pi=0.3, xi=0.7, m=5)
#' dcub(x=1, pi=0.5, xi=0.4, m=8)
#' dcub(x=c(4, 1), pi=c(0.3, 0.5), xi=c(0.7, 0.4), m=c(5, 8))
#'
#' # Examples with pcub
#'
#' # low xi is associated with high ratings
#' pcub(q=5, pi=0.5, xi=0.2, m=10, lower.tail=FALSE)
#'
#' # high pi is associated with indecision in choosing
#' pcub(q=3, pi=0.9, xi=0.5, m=4)
#'
#' # probability for several quantiles
#' pcub(q=c(1,3,5), pi=0.3, xi=0.6, m=5)
#'
#' # Examples with qcub
#'
#' # low xi is associated with high ratings
#' qcub(p=0.1, pi=0.5, xi=0.1, m=7, lower.tail=TRUE)
#'
#' # high pi is associated with indecision in choosing
#' qcub(p=0.86, pi=0.9, xi=0.5, m=4)
#'
#' #quantiles for several probabilities
#' qcub(p=c(1,0.5,0.9), pi=0.3, xi=0.6, m=5)
#'
#' # Examples with rcub
#' # Random sample, low xi is associated with high ratings
#' x <- rcub(n=1000, pi=0.9, xi=0.1, m=5)
#' barplot(prop.table(table(x)))
#'
#' # Random sample, low pi is associated with random choices
#' y <- rcub(n=1000, pi=0.1, xi=0.1, m=5)
#' barplot(prop.table(table(y)))
#' @name cub_dist
NULL
#' @rdname cub_dist
#' @importFrom stats dbinom
#' @export
dcub <-function(x, pi, xi, m, log=FALSE) {
if(any(x %% 1 != 0))
stop(paste("x must be an integer number", "\n", ""))
if (any(m <= 0))
stop(paste("m must be positive", "\n", ""))
if (any(pi < 0 | pi > 1)) ## Era pi <= 0
stop(paste("pi must be in (0,1]", "\n", ""))
if (any(xi < 0 | xi > 1))
stop(paste("xi must be in [0, 1]", "\n", ""))
dens <- log(pi * dbinom(x=x-1, size=m-1, prob=1-xi) + (1 - pi) / m)
dens[x <= 0 | x > m] <- -Inf
if (log == FALSE) dens <- exp(dens)
return(dens)
}
#' @rdname cub_dist
#' @export
pcub <- function(q, pi, xi, m, lower.tail=TRUE, log=FALSE) {
if (any(m <= 1))
stop("m parameter must be greater than 1", "\n", "")
if (any(pi <= 0 | pi > 1))
stop(paste("pi must be in (0,1]", "\n", ""))
if (any(xi < 0 | xi > 1))
stop(paste("xi must be in [0, 1]", "\n", ""))
# This is an auxiliar function -----------
aux <- function(q, pi, xi, m, lower.tail=TRUE, log=FALSE) {
val <- seq(from=-100, to=q)
prob <- dcub(x=val, pi=pi, xi=xi, m=m)
sum(prob)
}
aux <- Vectorize(aux)
# End of auxiliar function --------
p <- aux(q=q, pi=pi, xi=xi, m=m)
if (lower.tail == FALSE) p <- 1-p
if (log) p <- log(p)
return(p)
}
#' @rdname cub_dist
#' @export
qcub <- function(p, pi, xi, m, lower.tail=TRUE, log=FALSE) {
if (any(p < 0 | p > 1))
stop(paste("p must be in [0,1]", "\n", ""))
if (any(m <= 1))
stop("m parameter must be greater than 1", "\n", "")
if (any(pi <= 0 | pi > 1))
stop(paste("pi must be in (0,1]", "\n", ""))
if (any(xi < 0 | xi > 1))
stop(paste("xi must be in [0, 1]", "\n", ""))
if (lower.tail == FALSE) p <- 1 - p
# This is an auxiliar function -----------
aux <- function(p, pi, xi, m) {
cdf <- pcub(q=0:(m+1), pi=pi, xi=xi, m=m)
findInterval(p, cdf)
}
aux <- Vectorize(aux)
# End of auxiliar function --------
r <- aux(p=p, pi=pi, xi=xi, m=m)
r
}
#' @rdname cub_dist
#' @importFrom stats rbinom runif
#' @export
rcub <- function(n, pi, xi, m=5) {
if (any(pi <= 0 | pi >1))
stop(paste("pi must be in (0,1]", "\n", ""))
if (any(xi < 0 | xi > 1))
stop(paste("xi must be in [0, 1]", "\n", ""))
if (any(m <= 1))
stop("m parameter must be greater than 1", "\n", "")
# Define the component distributions
rshifted.binom <- rbinom(n=n, size=m-1, prob=1-xi) + 1
rdiscrete.unif <- sample(1:m, n, replace=T)
mixture <- runif(n)
r <- ifelse(mixture < pi, rshifted.binom, rdiscrete.unif)
return(r)
}
|
40cb279c336514a19f1d9a393720178662cd724a
|
3d299999dd13ccfc9353379d5a34111364cf5a81
|
/R/main.R
|
b5337d861bf7e9931bf45a885d0e55d3e725ee8b
|
[] |
no_license
|
shizelong1985/networkpanel
|
08777aa5b0549e873c1aa770bf0ea2fc30679ade
|
7aa7f68e0669369ee792f69392172abe459442b3
|
refs/heads/master
| 2023-03-17T06:54:14.324282
| 2020-06-19T08:09:45
| 2020-06-19T08:09:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,597
|
r
|
main.R
|
#' Distance matrix normalization
#'
#' This function normalize the distance matrix
#' @param dist The original distance matrix
#' @keywords distance matrix
#' @export
#' @examples
dist_normalize <- function(dist) {
distNorm = matrix(0, nrow(dist), ncol(dist))
d = dist[upper.tri(dist)]
d = rescale(d)
distNorm[upper.tri(distNorm)] = d
distNorm = distNorm + t(distNorm)
distNorm
}
#' Pearson correlation between time series
#'
#' This function calcualte pearson correlation between time series
#' @param ts1 The first time series
#' @param ts2 The second time series
#' @keywords pearson correlation
#' @export
#' @examples
tsdiss.correlation <- function(ts1, ts2) {
CorDistance(ts1, ts2)
}
#' Euclidean L2 distance between time series
#'
#' This function calcualte L2 distance between time series
#' @param ts1 The first time series
#' @param ts2 The second time series
#' @keywords L2 distance
#' @export
#' @examples
tsdiss.euclidean <- function(ts1, ts2) {
diss.EUCL(ts1, ts2)
}
#' Manhattan L1 distance between time series
#'
#' This function calcualte L1 distance between time series
#' @param ts1 The first time series
#' @param ts2 The second time series
#' @keywords L1 distance
#' @export
#' @examples
tsdiss.manhattan <- function(ts1, ts2) {
sum(abs(ts1-ts2))
}
#' Infinite Norm between time series
#'
#' This function calcualte infinite norm between time series
#' @param ts1 The first time series
#' @param ts2 The second time series
#' @keywords L infinity distance
#' @export
#' @examples
tsdiss.infiniteNorm <- function(ts1, ts2) {
max(abs(ts1-ts2))
}
#' Time series clustering using community detection
#'
#' This function conduct time series clustering using community detection
#' @param dist Distance matrix between every pair of time series.
#' @param epsilon Parameter for the network construction method.
#' @param communityDetectionFunc Community detection function. The igraph package has some community detection algorithms implemented.
#' @keywords community detection
#' @export
#' @examples
ts.community.detection <- function(dist,epsilon, communityDetectionFunc=cluster_louvain){
net = net.epsilon.create(dist, epsilon)
communities = communityDetectionFunc(net)
communities=membership(communities)
#return(communities)
}
#' Network construction using epsilon threshold
#'
#' This function creats network using threshold epsilon
#' @param dist Distance matrix between every pair of time series.
#' @param epsilon Parameter for the network construction method.
#' @keywords network construction
#' @export
#' @examples
net.epsilon.create <- function(dist, epsilon) {
n = matrix(0, ncol(dist), nrow(dist))
n[dist < epsilon] = 1;
graph.adjacency(n, mode="undirected", diag=F)
}
#' Epanechnikov kernel function
#' @param x Variable
#' @keywords Epanechnikov kernel
#' @export
#' @examples
epan <- function(x)
{ return(0.75*(1-x^2)*((sign(1-x^2)+1)/2))}
#' NW estimator of nonparametric function
#' @param X indepent variable with dimension T*1
#' @param Y dependent variable with dimension T*1
#' @param bw bandwidth
#' @param N number of grid points
#' @keywords NW estimator
#' @export
#' @examples
m.i.hat <- function(X,Y,bw,N)
{ m.vec <- rep(0,N)
for(j in 1:N)
{ rh <- sum(epan((X-j/N)/bw) * Y)
fh <- sum(epan((X-j/N)/bw))
m.vec[j] <- rh/fh
}
return(m.vec)
}
#' Parallel distance matrix
#' @param tsList indepent variable with dimension T*1
#' @param distFunc dependent variable with dimension T*1
#' @param cores bandwidt
#' @keywords Parallel distance matrix
#' @export
#' @examples
dist.parallel <- function(tsList, distFunc=tsdiss.euclidean, cores=2) {
distFuncCompiled <- cmpfun(distFunc)
tsListLength = length(tsList)
combs = combn(tsListLength, 2)
d = mcmapply(dist.parallel2.compute, x=combs[1,], y=combs[2,],
MoreArgs=list(tsList=tsList, distFunc=distFuncCompiled), mc.cores=cores)
dist = matrix(0, tsListLength, tsListLength)
dist[lower.tri(dist)] = d
dist = as.matrix(as.dist(dist))
return(dist)
}
#' calculate distance matrix
#' @param x the first time series
#' @param y the second time series
#' @param tsList independent variable with dimension T*1
#' @param distFunc dependent variable with dimension T*1
#' @keywords Parallel distance matrix
#' @export
#' @examples
dist.parallel2.compute <- function(x, y, tsList, distFunc) {
distFunc(tsList[[x]], tsList[[y]])
}
|
036f2b5370a351fff28145bb2dc2ee390537f5cd
|
501e69463cc39cbb331a251b16c25ac4f4855caa
|
/man/vcov.Rd
|
62f1fec494d71b8d5130492699e379fe8c935817
|
[] |
no_license
|
cran/mexhaz
|
22b3040da11a6cf5bcc16b957d3e3cd95e87d476
|
55e138836e58bbb04ec05eb5848afe60d43bab90
|
refs/heads/master
| 2022-11-10T11:59:07.330374
| 2022-10-31T13:47:48
| 2022-10-31T13:47:48
| 58,455,607
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 694
|
rd
|
vcov.Rd
|
\name{vcov}
\alias{vcov}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Method for extracting the covariance matrix}
\description{
This is a generic function.}
\usage{
vcov(object, ...)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{a fitted object from which the covariance matrix can be extracted.}
\item{\dots}{may be required by some methods using this generic function.}
}
\value{
see the documentation for the specific class.}
\examples{
## See the specific class documentation.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{models}
|
3bd1fac85bfd220876b43e558ce5797c8ad908d6
|
dc75a9160840901a4f6252c05e307bee4178c78b
|
/R/Chunk.R
|
a425179dff25393afa853093060c201ea80a4ee1
|
[] |
no_license
|
lewinfox/balance-tracker
|
5c1b05324e87f8fd82503fb9fd7041e9ea76565d
|
6beec7a3476773a3ecf4563fae8d28e3b478d510
|
refs/heads/master
| 2020-07-02T22:14:26.306996
| 2019-08-10T21:46:49
| 2019-08-10T21:46:49
| 201,684,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,416
|
r
|
Chunk.R
|
#' A single "lump" of cash
#'
#' A Chunk is the result of a single payment into an account. Money can be
#' debited from a Chunk but not credited to it. The value of a Chunk cannot fall
#' below zero - if this happens the Chunk is destroyed.
Chunk <- R6::R6Class(
classname = "Chunk",
public = list(
value = 0,
birthdate = NULL,
uuid = NULL,
initialize = function(value = 0, birthdate = Sys.time()) {
self$value <- value
self$birthdate <- birthdate
self$uuid <- uuid::UUIDgenerate()
},
debit = function(amount) {
self$value <- max(self$value - amount, 0)
},
is_empty = function() {
self$value == 0
},
print = function() {
cat("Chunk", self$uuid, "\n")
cat(" Value:", self$value, "\n")
cat(" Birthdate:", self$birthdate, "\n")
invisible(self)
}
)
)
#' A variant of the Chunk that allows overdrafts
#'
#' A BaseChunk is the first Chunk created in a Balance. Unline a regular Chunk,
#' a BaseChunk has debit and credit methods. The value of a BaseChunk can
#' decrease indefinitely (i.e. it can go overdrawn) but it can never increase
#' above zero.
BaseChunk <- R6::R6Class(
classname = "BaseChunk",
inherit = Chunk,
public = list(
credit = function(amount) {
self$value <- min(self$value + amount, 0)
},
debit = function(amount) {
self$value <- self$value - amount
}
)
)
|
8793851aa9383e225af3b2fdb2de19416610d246
|
a1502451963856d226e8523097e89407b6ec85bc
|
/run_analysis.R
|
7875dae4082d8fe5aa43bbbe52f9a6d9db00154f
|
[] |
no_license
|
Rowena752/Getting-and-Cleaning-Data-Course-Project
|
d0e224115923a346909a997b10c30aac9a51988f
|
b87a6649b97a9547e4f2d8df935aac083c4ca518
|
refs/heads/master
| 2020-05-25T02:00:03.160311
| 2017-03-14T22:30:52
| 2017-03-14T22:30:52
| 84,900,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,761
|
r
|
run_analysis.R
|
packages <- c("data.table", "reshape2")
sapply(packages, require, character.only=TRUE, quietly=TRUE)
path <- getwd()
path
#Download the file and put in folder
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
f <- "Dataset.zip"
if (!file.exists(path)) {dir.create(path)}
download.file(url, file.path(path, f))
#Unzip the file
unzip("./Dataset.zip")
#Put unzipped files into UCI HAR Dataset folder
pathIn <- file.path(path, "UCI HAR Dataset")
#Read the subject files
SubjectTrain <- read.table(file.path(pathIn, "train", "subject_train.txt"))
SubjectTest <- read.table(file.path(pathIn, "test" , "subject_test.txt" ))
#Read the activity files
ActivityTrain <- read.table(file.path(pathIn, "train", "Y_train.txt"))
ActivityTest <- read.table(file.path(pathIn, "test" , "Y_test.txt" ))
#Read the features files
fileToDataTable <- function (f) {
df <- read.table(f)
dtbl <- data.table(df)
}
Train <- fileToDataTable(file.path(pathIn, "train", "X_train.txt"))
Test <- fileToDataTable(file.path(pathIn, "test" , "X_test.txt" ))
# Merge the training and testing sets
Subject <- rbind(SubjectTrain, SubjectTest)
setnames(Subject, "V1", "subject")
Activity <- rbind(ActivityTrain, ActivityTest)
setnames(Activity, "V1", "activity_num")
dtbl <- rbind(Train, Test)
Subject <- cbind(Subject, Activity)
dtbl <- cbind(Subject, dtbl)
dtbl <- as.data.table(dtbl)
setkey(dtbl, subject, activity_num)
#Extract mean and std
Features <- fread(file.path(pathIn, "features.txt"))
setnames(Features, names(Features), c("feat_num", "feat_name"))
Features <- Features[grepl("mean\\(\\)|std\\(\\)", feat_name)]
Features$feat_code <- Features[, paste0("V", feat_num)]
head(Features)
Features$feat_code
#Descriptive Activity Names
select <- c(key(dtbl), Features$feat_code)
dtbl <- dtbl[, select, with=FALSE]
ActivityNames <- fread(file.path(pathIn, "activity_labels.txt"))
setnames(ActivityNames, names(ActivityNames), c("activity_num", "activity_name"))
dtbl <- merge(dtbl, ActivityNames, by="activity_num", all.x=TRUE)
setkey(dtbl, subject, activity_num, activity_name)
dtbl <- data.table(melt(dtbl, key(dtbl), variable.name="feat_code"))
dtbl <- merge(dtbl, Features[, list(feat_num, feat_code, feat_name)], by="feat_code", all.x=TRUE)
dtbl$activity <- factor(dtbl$activity_name)
dtbl$feature <- factor(dtbl$feat_name)
grepthis <- function (regex) {
grepl(regex, dtbl$feature)
}
## Features with 2 categories
n <- 2
y <- matrix(seq(1, n), nrow=n)
x <- matrix(c(grepthis("^t"), grepthis("^f")), ncol=nrow(y))
dtbl$featDomain <- factor(x %*% y, labels=c("Time", "Freq"))
x <- matrix(c(grepthis("Acc"), grepthis("Gyro")), ncol=nrow(y))
dtbl$featInstrument <- factor(x %*% y, labels=c("Accelerometer", "Gyroscope"))
x <- matrix(c(grepthis("BodyAcc"), grepthis("GravityAcc")), ncol=nrow(y))
dtbl$featAcceleration <- factor(x %*% y, labels=c(NA, "Body", "Gravity"))
x <- matrix(c(grepthis("mean()"), grepthis("std()")), ncol=nrow(y))
dtbl$featVariable <- factor(x %*% y, labels=c("Mean", "SD"))
## Features with 1 category
dtbl$featJerk <- factor(grepthis("Jerk"), labels=c(NA, "Jerk"))
dtbl$featMagnitude <- factor(grepthis("Mag"), labels=c(NA, "Magnitude"))
## Features with 3 categories
n <- 3
y <- matrix(seq(1, n), nrow=n)
x <- matrix(c(grepthis("-X"), grepthis("-Y"), grepthis("-Z")), ncol=nrow(y))
dtbl$featAxis <- factor(x %*% y, labels=c(NA, "X", "Y", "Z"))
#Create a tidy data set
setkey(dtbl, subject, activity, featDomain, featAcceleration, featInstrument, featJerk, featMagnitude, featVariable, featAxis)
tidy_data <- dtbl[, list(count = .N, average = mean(value)), by=key(dtbl)]
# Creating a file for the tidy data set created
write.table(tidy_data, file = "./tidy_data.txt", row.name = FALSE)
|
6df3956c417830d0034a6889cb495a47ab16eee9
|
5f5bdc7d277212c3f3b0d1d7721ae7813c7339f7
|
/shiny-retirement-planning-with-stocks/global.R
|
67804544d8438c9ace00178fb35968bc59951fd9
|
[] |
no_license
|
michaelwfouts/shiny-retirement-planning-with-stocks
|
5cb4e44cb19c36b0b21fd62d95a6bdb61e8490b3
|
24e725a3a3279a3b859e7fb99d2ad4b3bd90d580
|
refs/heads/main
| 2023-08-11T06:06:31.622600
| 2021-09-17T12:29:26
| 2021-09-17T12:29:26
| 407,531,472
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 192
|
r
|
global.R
|
# Load in modules
library(shiny)
library(plotly)
library(dplyr)
library(shinyWidgets)
library(bslib)
# Source module scripts
source("R/infoModule.R")
source("R/calculatorModule.R")
|
c0ba647a69741e11943c7330c2f826ad0d7b714f
|
f044402735a52fa040c5cbc76737c7950406f8b2
|
/BrCa_Age_Associated_TMA/Packages/biostatUtil/man/sem.Rd
|
9d214289bd6592396aece704c41e2251de268309
|
[] |
no_license
|
BCCRCMO/BrCa_AgeAssociations
|
5cf34f3b2370c0d5381c34f8e0d2463354c4af5d
|
48a11c828a38a871f751c996b76b77bc33d5a3c3
|
refs/heads/master
| 2023-03-17T14:49:56.817589
| 2020-03-19T02:18:21
| 2020-03-19T02:18:21
| 247,175,174
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 552
|
rd
|
sem.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{sem}
\alias{sem}
\title{Standard error of the mean}
\usage{
sem(x, missing.value = NA, return.missing.value = NA)
}
\arguments{
\item{x}{input vector}
\item{missing.value}{values that are missing}
\item{return.missing.value}{the value to return where there are missing values}
}
\value{
The standard error of the mean of \code{x}
}
\description{
Standard error of the mean
}
\references{
http://en.wikipedia.org/wiki/Standard_error
}
\author{
Samuel Leung
}
|
5f2548a2ad7abf957268affe68d354e7a29fa4fa
|
169a6494a475f42d0452d3ade4622bde1eb939cc
|
/R/scrapenames.r
|
48c17f68152a85535653388a6ff8cfff72f40310
|
[
"MIT"
] |
permissive
|
ropensci/taxize
|
d205379bc0369d9dcdb48a8e42f3f34e7c546b9b
|
269095008f4d07bfdb76c51b0601be55d4941597
|
refs/heads/master
| 2023-05-25T04:00:46.760165
| 2023-05-02T20:02:50
| 2023-05-02T20:02:50
| 1,771,790
| 224
| 75
|
NOASSERTION
| 2023-05-02T20:02:51
| 2011-05-19T15:05:33
|
R
|
UTF-8
|
R
| false
| false
| 5,141
|
r
|
scrapenames.r
|
#' @title Resolve names using Global Names Recognition and Discovery.
#'
#' @description Uses the Global Names Recognition and Discovery service, see
#' http://gnrd.globalnames.org/
#'
#' Note: this function sometimes gives data back and sometimes not. The API
#' that this function is extremely buggy.
#'
#' @export
#' @param url An encoded URL for a web page, PDF, Microsoft Office document, or
#' image file, see examples
#' @param file When using multipart/form-data as the content-type, a file may
#' be sent. This should be a path to your file on your machine.
#' @param text Type: string. Text content; best used with a POST request, see
#' examples
#' @param engine (optional) (integer) Default: 0. Either 1 for TaxonFinder,
#' 2 for NetiNeti, or 0 for both. If absent, both engines are used.
#' @param unique (optional) (logical) If `TRUE` (default), response has
#' unique names without offsets.
#' @param verbatim (optional) Type: boolean, If `TRUE` (default to
#' `FALSE`), response excludes verbatim strings.
#' @param detect_language (optional) Type: boolean, When `TRUE` (default),
#' NetiNeti is not used if the language of incoming text is determined not to
#' be English. When `FALSE`, NetiNeti will be used if requested.
#' @param all_data_sources (optional) Type: boolean. Resolve found names
#' against all available Data Sources.
#' @param data_source_ids (optional) Type: string. Pipe separated list of
#' data source ids to resolve found names against. See list of Data Sources
#' http://resolver.globalnames.org/data_sources
#' @param return_content (logical) return OCR'ed text. returns text
#' string in `x$meta$content` slot. Default: `FALSE`
#' @param ... Further args passed to [crul::verb-GET]
#' @author Scott Chamberlain
#' @return A list of length two, first is metadata, second is the data as a
#' data.frame.
#' @details One of url, file, or text must be specified - and only one of them.
#' @examples \dontrun{
#' # Get data from a website using its URL
#' scrapenames('https://en.wikipedia.org/wiki/Spider')
#' scrapenames('https://en.wikipedia.org/wiki/Animal')
#' scrapenames('https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0095068')
#' scrapenames('https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0080498')
#' scrapenames('http://ucjeps.berkeley.edu/cgi-bin/get_JM_treatment.pl?CARYOPHYLLACEAE')
#'
#' # Scrape names from a pdf at a URL
#' url <- 'https://journals.plos.org/plosone/article/file?id=
#' 10.1371/journal.pone.0058268&type=printable'
#' scrapenames(url = sub('\n', '', url))
#'
#' # With arguments
#' scrapenames(url = 'https://www.mapress.com/zootaxa/2012/f/z03372p265f.pdf',
#' unique=TRUE)
#' scrapenames(url = 'https://en.wikipedia.org/wiki/Spider',
#' data_source_ids=c(1, 169))
#'
#' # Get data from a file
#' speciesfile <- system.file("examples", "species.txt", package = "taxize")
#' scrapenames(file = speciesfile)
#'
#' nms <- paste0(names_list("species"), collapse="\n")
#' file <- tempfile(fileext = ".txt")
#' writeLines(nms, file)
#' scrapenames(file = file)
#'
#' # Get data from text string
#' scrapenames(text='A spider named Pardosa moesta Banks, 1892')
#'
#' # return OCR content
#' scrapenames(url='https://www.mapress.com/zootaxa/2012/f/z03372p265f.pdf',
#' return_content = TRUE)
#' }
scrapenames <- function(url = NULL, file = NULL, text = NULL, engine = NULL,
unique = NULL, verbatim = NULL, detect_language = NULL,
all_data_sources = NULL, data_source_ids = NULL,
return_content = FALSE, ...) {
method <- tc(list(url = url, file = file, text = text))
if (length(method) > 1) {
stop("Only one of url, file, or text can be used", call. = FALSE)
}
base <- "http://gnrd.globalnames.org/name_finder.json"
if (!is.null(data_source_ids))
data_source_ids <- paste0(data_source_ids, collapse = "|")
args <- tc(list(url = url, text = text, engine = engine, unique = unique,
verbatim = verbatim, detect_language = detect_language,
all_data_sources = all_data_sources,
data_source_ids = data_source_ids,
return_content = as_l(return_content)))
cli <- crul::HttpClient$new(base, headers = tx_ual, opts = list(...))
if (names(method) == 'url') {
tt <- cli$get(query = args)
tt$raise_for_status()
out <- jsonlite::fromJSON(tt$parse("UTF-8"))
token_url <- out$token_url
} else {
if (names(method) == "text") {
tt <- cli$post(body = list(text = text), encode = "form",
followlocation = 0)
} else {
tt <- cli$post(query = argsnull(args), encode = "multipart",
body = list(file = crul::upload(file)),
followlocation = 0)
}
if (tt$status_code != 303) tt$raise_for_status()
token_url <- tt$response_headers$location
}
st <- 303
while (st == 303) {
dat <- crul::HttpClient$new(token_url, headers = tx_ual)$get()
dat$raise_for_status()
datout <- jsonlite::fromJSON(dat$parse("UTF-8"))
st <- datout$status
}
meta <- datout[!names(datout) %in% c("names")]
list(meta = meta, data = nmslwr(datout$names))
}
|
ddc63567bb8b0ce6718b9b4c1aabf46123e1056a
|
648ceb127101da98e0371f90e83c2613b20ee5d1
|
/R/get_all_funs.R
|
ba605d7babb31521fef48aaeac9b96c79d772188
|
[] |
no_license
|
paulponcet/bazar
|
b561b9914300d4eb72d998028b4c2db061f9b07e
|
cacccfed36ed5650dbef2e78f584e0c07c321581
|
refs/heads/master
| 2021-01-11T21:59:55.950238
| 2019-07-13T23:51:42
| 2019-07-13T23:51:42
| 78,890,817
| 0
| 0
| null | 2019-04-05T09:14:21
| 2017-01-13T22:12:23
|
R
|
UTF-8
|
R
| false
| false
| 871
|
r
|
get_all_funs.R
|
#' @title
#' Functions exported by a package
#'
#' @description
#' \code{get_all_funs} provides all the functions exported by a given
#' installed package.
#'
#' @param pkg
#' character. The package of interest. (Must be installed already.)
#'
#' @return
#' A character vector, the functions exported.
#'
#' @export
#'
#' @examples
#' get_all_funs("stats")
#'
get_all_funs <-
function(pkg)
{
stopifnot(length(pkg) == 1L)
if (pkg %in% c("Deducer", "DeducerExtra", "gstat", "rattle",
"rggobi", "RGtk2", "spacetime", "translations")) {
warning(paste0("get_all_funs() returns 'character(0)' for package ", pkg),
call. = FALSE)
return(character(0L))
}
tryCatch(suppressWarnings(getNamespaceExports(pkg)),
error = function(e) character(0L))
#unclass(lsf.str(envir = asNamespace(pkg), all = TRUE))
}
|
281dd1e188e0bdd1edff5ab887d91b94aa90fb68
|
fb0ac2162a0aece893c57fa06c2a4acced5f7cd4
|
/run_analysis.R
|
e982a3cf422a3bd531e96d224b739409d7344855
|
[] |
no_license
|
JoeAllyn/Getting-and-Cleaning-Data-Project
|
b32a4b80d0af3f693132772aafc0f8fe5a866ffa
|
748513e7547d7da21fa9b2ff53192534ae9a3fbb
|
refs/heads/master
| 2020-06-22T16:30:31.173787
| 2016-11-23T15:23:10
| 2016-11-23T15:23:10
| 74,586,370
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,558
|
r
|
run_analysis.R
|
# Getting and Cleaning Data - Project
# Merges the training and the test sets to create one data set.
# Download Data Files and Unzip
filename <- "UCIHARDataset.zip"
if(!file.exists(filename)) {
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl, filename)
}
if(!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
#Load all datasets
#Train datasets
training <- read.table("./UCI HAR Dataset/train/X_train.txt")
trainingAct <- read.table("./UCI HAR Dataset/train/y_train.txt")
trainingSub <- read.table("./UCI HAR Dataset/train/subject_train.txt")
#Test datasets
testing <- read.table("./UCI HAR Dataset/test/X_test.txt")
testingAct <- read.table("./UCI HAR Dataset/test/y_test.txt")
testingSub <- read.table("./UCI HAR Dataset/test/subject_test.txt")
#Features and Activity Labels
features <- read.table("./UCI HAR Dataset/features.txt")
activityLabels <- read.table("./UCI HAR Dataset/activity_labels.txt")
#Combine Training and Activity Data
training <- cbind(trainingSub, trainingAct, training)
testing <- cbind(testingSub, testingAct, testing)
combined <- rbind(training, testing)
#Add Labels with Descriptive Variable Names
featuresLabels <- as.character(features[,2])
featuresLabels <- gsub("[()-]","",featuresLabels)
featuresLabels <- gsub("[-]","",featuresLabels)
featuresLabels <- gsub("[,]","",featuresLabels)
colnames(combined) <- c("subjectid", "activityid", featuresLabels)
#Extracts only the measurements on the mean and standard deviation for each measurement.
#Identify Columns with Mean and Std Dev Measures and IDs and Create New Dataset
keepColumns <- colnames(combined)
extractColumns <- (grepl("subjectid", keepColumns) |
grepl("activityid", keepColumns) |
grepl("*[Ss]td*", keepColumns) |
grepl("*[Mm]ean*", keepColumns))
subCombined <- combined[ , extractColumns == TRUE]
#Add descriptive activity names to name the activities in the data set
subCombined$activityid <- factor(subCombined$activityid, levels = activityLabels[,1], labels = activityLabels[,2])
subCombined$subjectid <- as.factor(subCombined$subjectid)
#Create a second, independent tidy data set with the average of each variable for each activity and each subject.
tidyDataSet <- aggregate(. ~subjectid + activityid, data = subCombined , FUN = "mean")
write.table(tidyDataSet, "tidyDataSet.txt", row.name=FALSE)
#Code to read back in Tidy Data Set
#data <- read.table("tidyDataSet.txt", header=TRUE)
#View(data)
|
3e7fe210bb1ecfc04c9837c84d00b87320eef94d
|
f92a7585f054a393674716f4e178a78f099f4917
|
/man/common_lookup.Rd
|
32f94ddc7ea92f00f9b09b8d3a7cefd1aea512cb
|
[] |
no_license
|
parvezrana/forvol
|
7a29c9c2c77777ddb0ed94c47f398b04056f2e84
|
767d1739061fce7fc207a500ebff0216173d6512
|
refs/heads/master
| 2020-03-14T13:42:17.524275
| 2018-01-03T18:09:12
| 2018-01-03T18:09:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 435
|
rd
|
common_lookup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lookup.R
\name{common_lookup}
\alias{common_lookup}
\title{Returns the common name of a species
given some input FIA species code}
\usage{
common_lookup(spcd, latin = FALSE)
}
\arguments{
\item{spcd}{The FIA species code}
}
\value{
A string of the species common name
}
\description{
Returns the common name of a species
given some input FIA species code
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.