blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c8e64af6ab866aeac439146196afd9a618d6a6f4
|
809c790aebf2f784c358e396e79d7b3b9bfba815
|
/r_code/master_reduced_form.R
|
3a4029390b6769f931c8fb6c9c389905a398c934
|
[] |
no_license
|
aalexee/give_me_challenge
|
8ecb05a6e61aea4a7bd8f9a730a770bfca268221
|
7f1034a5a8c330f6a31a2fe140eec3480cb157e0
|
refs/heads/main
| 2023-03-26T15:44:15.203098
| 2021-03-11T11:41:55
| 2021-03-11T11:41:55
| 345,990,469
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,634
|
r
|
master_reduced_form.R
|
# Packages ----------------------------------------------------------------
library(tidyverse)
library(magrittr)
library(broom)
library(ggpubr)
library(ggsci)
library(plm)
library(lmtest)
library(sandwich)
library(margins)
library(ggeffects)
library(fixest)
library(lubridate)
library(stargazer)
# Functions ---------------------------------------------------------------
# rounding of p-values
round_p <- function(x) {
res <- round(x, 3)
if (res < 0.001) {
res <- "$< 0.001$"
} else
res <- paste("=", res)
return(res)
}
# Globals -----------------------------------------------------------------
# load graph settings
source("./graph_settings.R")
# substitutions for variable names
var_names <-
tibble(
Var = c("z", "w", "k", "theta", "I(theta^2)")
, Variable = c("Bonus", "Wage", "Cost", "Difficulty", "Difficulty2")
)
# Load Data ---------------------------------------------------------------
# read filenames of data files
filenames <-
list.files(path = "../data",
pattern = "\\.csv$",
full.names = T)
filenames.short <-
list.files(path = "../data",
pattern = "\\.csv$",
full.names = F)
filenames.short <- gsub(".csv", "", filenames.short)
for (i in 1:length(filenames)) {
assign(paste0(filenames.short[i]), read_csv(filenames[i]))
}
# remove redundant objects
rm(filenames, filenames.short, i)
# Table A.1 -------------------------------------------------
treatments <- data_effort %>%
select(treatment_id, theta, w, z, k) %>%
group_by(treatment_id) %>%
summarise_at(vars(-group_cols()), mean)
data_effort %>%
group_by(treatment_id, session) %>%
summarize(mean_effort = mean(effort)) %>%
spread(session, mean_effort) %>%
ungroup() %>%
mutate(mean_effort = rowMeans(select(., -treatment_id), na.rm = T)) %>%
mutate(mean_effort = round(mean_effort, 2)) %>%
mutate_at(vars(-treatment_id, -mean_effort), function(x) (!is.na(x))*1) %>%
left_join(treatments) %>%
select(treatment_id, theta, w, z, k, everything()) %>%
rename("id" = "treatment_id") %>%
rename_at(vars(contains("pm")), function(x) str_replace(x, "_.{0,3}pm", "")) %>%
rename_at(vars(contains("_")), function(x) str_replace_all(x, "_", "/"))
# Figures 2, D.1, D.2 -----------------------------------------------------
# > Bonus --------------------------------
data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
group_by(id, z) %>%
summarise(effort = mean(effort)) %>%
ungroup() %>%
mutate(sd = sd(effort)) %T>%
{{ggplot(., aes(x = effort, color = z)) +
stat_ecdf(size = graphs$linesize) +
geom_segment(
aes(x = median(effort[z == 2]), xend = median(effort[z == 4]), y = 0.5, yend = 0.5)
, arrow = arrow(length = unit(0.03, "npc"), type = "open")
, color = "black"
) +
labs(
x = "Effort",
y = NULL
) +
theme(
legend.background = element_rect(colour = 'white', size = 0.25*graphs$linesize),
legend.justification = c(0, 0),
legend.position = c(0.1, 0.5),
legend.direction = "vertical",
legend.box.margin = margin(c(10,10,10,10))
)
} %>%
print(.)
} %T>% # plot means
{{group_by(., z) %>%
do( # test that choice proportions are different from 50%
tidy(
t.test(x = .data$effort)
)
) %>%
ggplot(., aes(x = z, y = estimate, fill = z)) +
geom_bar(stat = "identity", width = 0.25) +
geom_errorbar(
aes(ymin = conf.low, ymax = conf.high)
, width = 0.05
, color = "grey"
, size = graphs$linesize
) +
labs(
x = NULL,
y = NULL
) +
ylim(c(0,1)) +
theme(legend.position = "none")
} %>%
print(.)
} %T>% # plot histograms
{{ggplot(., aes(x = effort, y = 2*..count../sum(..count..), fill = z, group = z)) +
geom_histogram(binwidth = 0.1, position = position_dodge(), color = "white") +
labs(
x = "Effort",
y = NULL
) +
ylim(c(0,0.3)) +
theme(
legend.background = element_rect(colour = 'white', size = 0.25*graphs$linesize),
legend.justification = c(0, 1),
legend.position = c(-0.05, 1.1),
legend.direction = "vertical"
, legend.box.margin = margin(c(10,10,10,10))
)
} %>%
print(.)
}
# > Wage --------------------------------------------------------------------
data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
filter(k == 1) %>%
group_by(id, w) %>%
summarise(effort = mean(effort)) %T>%
{{ggplot(., aes(x = effort, color = w)) +
stat_ecdf(size = graphs$linesize) +
labs(
x = "Effort",
y = NULL
) +
geom_segment(
aes(x = median(effort[w == 1]), xend = median(effort[w == 2]), y = 0.5, yend = 0.5)
, arrow = arrow(length = unit(0.03, "npc"), type = "open")
, color = "black"
) +
theme(
legend.background = element_rect(colour = 'white', size = 0.25*graphs$linesize),
legend.justification = c(0, 0),
legend.position = c(0.1, 0.5),
legend.direction = "vertical",
legend.box.margin = margin(c(10,10,10,10))
)
} %>%
print(.)
} %T>% # plot means
{{group_by(., w) %>%
do(
tidy(
t.test(x = .data$effort)
)
) %>%
ggplot(., aes(x = w, y = estimate, fill = w)) +
geom_bar(stat = "identity", width = 0.25) +
geom_errorbar(
aes(ymin = conf.low, ymax = conf.high)
, width = 0.05
, color = "grey"
, size = graphs$linesize
) +
labs(
x = NULL,
y = NULL
) +
ylim(c(0,1)) +
theme(legend.position = "none")
} %>%
print(.)
} %>% # plot histograms
{{ggplot(., aes(x = effort, y = 2*..count../sum(..count..), fill = w, group = w)) +
geom_histogram(binwidth = 0.1, position = position_dodge(), color = "white") +
labs(
x = "Effort",
y = NULL
) +
ylim(c(0,0.3)) +
theme(
legend.background = element_rect(colour = 'white', size = 0.25*graphs$linesize),
legend.justification = c(0, 1),
legend.position = c(-0.05, 1.1),
legend.direction = "vertical"
, legend.box.margin = margin(c(10,10,10,10))
)
} %>%
print(.)
}
# > Cost --------------------------------------------------------------------
data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
filter(w == 2) %>%
group_by(id, k) %>%
summarise(effort = mean(effort)) %T>%
{{ggplot(., aes(x = effort, color = k)) +
stat_ecdf(size = graphs$linesize) +
labs(
x = "Effort",
y = NULL
) +
geom_segment(
aes(x = median(effort[k == 1]), xend = median(effort[k == 2]), y = 0.5, yend = 0.5)
, arrow = arrow(length = unit(0.03, "npc"), type = "open")
, color = "black"
) +
theme(
legend.background = element_rect(colour = 'white', size = 0.25*graphs$linesize),
legend.justification = c(0, 0),
legend.position = c(0.1, 0.5),
legend.direction = "vertical",
legend.box.margin = margin(c(10,10,10,10))
)
} %>%
print()
} %T>% # plot means
{{group_by(., k) %>%
do(
tidy(
t.test(x = .data$effort)
)
) %>%
ggplot(., aes(x = k, y = estimate, fill = k)) +
geom_bar(stat = "identity", width = 0.25) +
geom_errorbar(
aes(ymin = conf.low, ymax = conf.high)
, width = 0.05
, color = "grey"
, size = graphs$linesize
) +
labs(
x = NULL,
y = NULL
) +
ylim(c(0,1)) +
theme(legend.position = "none")
} %>%
print(.)
} %>% # plot histograms
{{ggplot(., aes(x = effort, y = 2*..count../sum(..count..), fill = k, group = k)) +
geom_histogram(binwidth = 0.1, position = position_dodge(), color = "white") +
labs(
x = "Effort",
y = NULL
) +
ylim(c(0,0.3)) +
theme(
legend.background = element_rect(colour = 'white', size = 0.25*graphs$linesize),
legend.justification = c(0, 1),
legend.position = c(-0.05, 1.1),
legend.direction = "vertical"
, legend.box.margin = margin(c(10,10,10,10))
)
} %>%
print(.)
}
# > Difficulty --------------------------------------------------------------------
data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
group_by(id, theta) %>%
summarise(effort = mean(effort)) %T>%
{{filter(., theta %in% c(0, 0.5, 1)) %>%
ggplot(., aes(x = effort, color = theta)) +
stat_ecdf(size = graphs$linesize) +
geom_segment(
aes(x = median(effort[theta == 0]), xend = median(effort[theta == 0.5]), y = 0.5, yend = 0.5)
, arrow = arrow(length = unit(0.03, "npc"), type = "open")
, color = "black"
) +
geom_segment(
aes(x = quantile(effort[theta == 0.5], 0.45), xend = quantile(effort[theta == 1], 0.45), y = 0.45, yend = 0.45)
, arrow = arrow(length = unit(0.03, "npc"), type = "open")
, color = "black"
) +
labs(
x = "Effort",
y = NULL
) +
theme(
legend.background = element_rect(colour = 'white', size = 0.25*graphs$linesize),
legend.justification = c(0, 0),
legend.position = c(0.1, 0.5),
legend.direction = "vertical",
legend.box.margin = margin(c(10,10,10,10))
)
} %>%
print(.)
} %T>% # plot means
{{group_by(., theta) %>%
do(
tidy(
t.test(x = .data$effort)
)
) %>%
ggplot(., aes(x = theta, y = estimate, fill = theta)) +
geom_bar(stat = "identity", width = 0.5) +
geom_errorbar(
aes(ymin = conf.low, ymax = conf.high)
, width = 0.1
, color = "grey"
, size = graphs$linesize
) +
labs(
x = NULL,
y = NULL
) +
ylim(c(0,1)) +
theme(legend.position = "none")
} %>%
print(.)
} %T>% # plot histograms
{filter(., theta %in% c(0,0.5,1)) %>%
{ggplot(., aes(x = effort, y = 3*..count../sum(..count..), fill = theta, group = theta)) +
geom_histogram(binwidth = 0.1, position = position_dodge(), color = "white") +
labs(
x = "Effort",
y = NULL
) +
ylim(c(0,0.3)) +
theme(
legend.background = element_rect(colour = 'white', size = 0.25*graphs$linesize),
legend.justification = c(0, 1),
legend.position = c(-0.05, 1.1),
legend.direction = "vertical"
, legend.box.margin = margin(c(10,10,10,10))
)
} %>%
print(.)
}
# Figure 3 --------------------------------------------------------------
rbind(
{data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
group_by(id, z) %>%
summarise(effort = mean(effort)) %>%
t.test(effort ~ z, data = ., paired = T) %>%
tidy(.) %>%
mutate(var = "Bonus")
},
{data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
filter(k == 1) %>%
group_by(id, w) %>%
summarise(effort = mean(effort)) %>%
t.test(effort ~ w, data = ., paired = T) %>%
tidy(.) %>%
mutate(var = "Wage")
},
{data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
filter(w == 2) %>%
group_by(id, k) %>%
summarise(effort = mean(effort)) %>%
t.test(effort ~ k, data = ., paired = T) %>%
tidy(.) %>%
mutate(var = "Cost")
},
{data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
filter(theta %in% c(0, 0.5)) %>%
group_by(id, theta) %>%
summarise(effort = mean(effort)) %>%
t.test(effort ~ theta, data = ., paired = T) %>%
tidy(.) %>%
mutate(var = "Difficulty 1")
},
{data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
filter(theta %in% c(1, 0.5)) %>%
group_by(id, theta) %>%
summarise(effort = mean(effort)) %>%
t.test(effort ~ theta, data = ., paired = T) %>%
tidy(.) %>%
mutate(var = "Difficulty 2")
}
) %>%
mutate_at(vars(c("estimate", "conf.low", "conf.high")), multiply_by, -1) %>%
mutate(var = factor(var)) %>%
{ggplot(., aes(x = estimate, y = fct_reorder(var, desc(estimate)))) +
geom_vline(xintercept = 0, linetype = "dashed") +
geom_errorbarh(
aes(xmin = conf.low, xmax = conf.high),
size = graphs$linesize,
height = 0.1,
color = "grey"
) +
geom_point(aes(color = (estimate >= 0)), size = 2*graphs$linesize) +
geom_text(
stat = "identity",
aes(label = paste0(round(estimate, 2))),
# position = position_dodge2(width = 0.5),
vjust = -1,
size = 9*5/14
) +
xlim(-0.2, 0.2) +
labs(x = "Average Treatment Effect",
y = NULL) +
theme(legend.position = "none")
} %>%
print()
# Table 3 --------------------------------------------------------------
data_effort %>%
as.data.frame() %>%
plm(
effort ~ z + w + k + theta + I(theta^2)
, model = "within"
, index = c("id", "round")
, data = .
) %>%
coeftest(., vcov = function(x) vcovHC(x, type = "sss", cluster = "group")) %>%
tidy() %>%
mutate_if(is.numeric, round, digits = 3) %>%
rename(
Var = term,
Coefficient = estimate,
SE = std.error,
Statistic = statistic,
`p-value` = p.value
) %>%
left_join(var_names) %>%
select(Variable, everything()) %>%
select(-Var) %>%
mutate(across(`p-value`, as.character)) %>%
mutate(across(`p-value`, ~replace(., . == 0, "<0.001"))) %>%
print()
# Figure 4 --------------------------------------------------------------------
rbind(
{data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
filter(., theta %in% c(0, 1)) %>%
group_by(., id, z, theta) %>%
summarise(., effort = mean(effort)) %>%
group_by(., theta) %>%
do(
tidy(
t.test(effort ~ z, data = ., paired = T)
)
) %>%
mutate(var = "z")
} # z, for diff = 0 or 1
, {data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
filter(., theta %in% c(0.5)) %>%
group_by(., id, z, theta) %>%
summarise(., effort = mean(effort)) %>%
group_by(., theta) %>%
do(
tidy(
t.test(effort ~ z, data = ., paired = F)
)
) %>%
mutate(var = "z")
} # z, for diff = 0.5
, {data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
filter(k == 1) %>%
filter(., theta %in% c(0, 1)) %>%
group_by(., id, w, theta) %>%
summarise(., effort = mean(effort)) %>%
group_by(., theta) %>%
do(
tidy(
t.test(effort ~ w, data = ., paired = T)
)
) %>%
mutate(var = "w")
} # w, for diff = 0 or 1
, {data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
filter(k == 1) %>%
filter(., theta %in% c(0.5)) %>%
group_by(., id, w, theta) %>%
summarise(., effort = mean(effort)) %>%
group_by(., theta) %>%
do(
tidy(
t.test(effort ~ w, data = ., paired = F)
)
) %>%
mutate(var = "w")
} # z, for diff = 0.5
, {data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
filter(w == 2) %>%
filter(., theta %in% c(0, 1)) %>%
group_by(., id, k, theta) %>%
summarise(., effort = mean(effort)) %>%
group_by(., theta) %>%
do(
tidy(
t.test(effort ~ k, data = ., paired = T)
)
) %>%
mutate(var = "k")
} # k, for diff = 0 or 1
, {data_effort %>%
mutate_at(
vars(matches("theta|z|w|k")),
as.factor
) %>%
filter(w == 2) %>%
filter(., theta %in% c(0.5)) %>%
group_by(., id, k, theta) %>%
summarise(., effort = mean(effort)) %>%
group_by(., theta) %>%
do(
tidy(
t.test(effort ~ k, data = ., paired = F)
)
) %>%
mutate(var = "k")
} # z, for diff = 0.5
) %>%
mutate_at(vars(c("estimate", "conf.low", "conf.high")), multiply_by, -1) %>%
group_by(var) %>%
mutate(int_eff = estimate[2] - estimate[1]) %>%
ungroup() %>%
mutate(var = factor(var, labels = c("Cost", "Wage", "Bonus"))) %>%
{ggplot(., aes(x = estimate, y = theta, color = theta)) +
geom_vline(xintercept = 0, linetype = "dashed") +
geom_errorbarh(
aes(xmin = conf.low, xmax = conf.high),
size = graphs$linesize,
height = 0.1,
color = "grey"
) +
geom_point(size = 2*graphs$linesize) +
geom_text(
stat = "identity",
aes(label = paste0(round(estimate, 2))),
# position = position_dodge2(width = 0.5),
vjust = -1,
size = 9*5/14,
color = "black"
) +
# xlim(-0.2, 0.2) +
labs(x = "Average Treatment Effect",
y = "Difficulty") +
facet_wrap( ~ var) +
# coord_flip() +
theme(legend.position = "none")
} %>%
print()
# Figure D.3 ----------------------------------------------------------
data_effort %>%
filter(theta %in% c(0, 0.5, 1)) %>%
mutate(
incentives = case_when(
(w == 1 & z == 2) ~ "low"
, (w == 2 & z == 4) ~ "high"
)
) %>%
filter(!is.na(incentives)) %>%
mutate(across(c("w", "z", "k", "theta"), factor)) %>%
select(incentives, theta, effort) %>%
nest(data = effort) %>%
rename(effort = data) %>%
mutate(t.test = map(effort, t.test)) %>%
mutate(t.test = map(t.test, tidy)) %>%
unnest(t.test) %>%
{ggplot(., aes(theta, estimate, fill = theta)) +
geom_col(width = 0.5) +
geom_errorbar(
aes(ymin = conf.low, ymax = conf.high),
size = graphs$linesize,
width = 0.1,
color = "grey") +
facet_wrap(~ incentives, labeller = label_both) +
ylim(0, 1) +
labs(x = "Difficulty", y = "Mean Effort") +
theme(legend.position = "none")
} %>% # plot
print()
# Table D.1 ---------------------------------------------------------------------
reg_fe_inter <-
data_effort %>%
mutate(across(c("z", "w", "k", "theta"), as_factor)) %>%
filter(theta %in% c(0, 0.5, 1)) %>%
as.data.frame() %>%
plm(
# effort ~ z*w*k*theta
effort ~ z + w + k + theta + z:theta + w:theta + k:theta + z:w + z:k
# effort ~ z + w + k + theta
, model = "within"
, index = c("id", "round")
, data = .
)
# export
reg_fe_inter %>%
coeftest(., vcov = function(x) vcovHC(x, type = "sss", cluster = "group")) %>%
print() %>%
tidy() %>%
mutate_if(is.numeric, round, digits = 3) %>%
rename(
Variable = term,
Coefficient = estimate,
SE = std.error,
Statistic = statistic,
`p-value` = p.value
) %>%
mutate(
Variable = str_replace_all(Variable, "z4", "Bonus = 4")
, Variable = str_replace_all(Variable, "w2", "Wage = 2")
, Variable = str_replace_all(Variable, "k2", "Cost = 2")
, Variable = str_replace_all(Variable, "theta0.5", "Difficulty = 0.5")
, Variable = str_replace_all(Variable, "theta1", "Difficulty = 1")
, Variable = str_replace_all(Variable, ":", " x ")
) %>%
mutate(across(`p-value`, as.character)) %>%
mutate(across(`p-value`, ~replace(., . == 0, "<0.001"))) %>%
print()
# Figure D.7 -----------------------------------------------------------
# > bonus ----
data_effort %>%
select(id, effort, z) %>%
group_by(id, z) %>%
summarize(across(.cols = everything(), mean)) %>%
arrange(id, z) %>%
mutate(ate = diff(effort)) %>%
select(id, ate) %>%
distinct() %>%
left_join(., data_demog %>% select(id, gender)) %>%
filter(gender %in% c("Male", "Female")) %>%
mutate(ate_positive = if_else(ate >= 0, "Increasing", "Decreasing")) %>%
group_by(ate_positive, gender) %>%
summarise(count = n()) %>%
mutate(prop = count / sum(count)) %>%
group_by(gender) %>%
mutate(sum_count = sum(count)) %>%
group_by(gender, ate_positive) %>%
do(
tidy(
binom.test(
x = .data$count, n = .data$sum_count
)
)
) %>%
group_by(ate_positive) %>%
mutate(
ate = diff(estimate)
, phi = 2*asin(sqrt(estimate))
, cohen_h = diff(phi)
) %>%
{ggplot(., aes(factor(gender), estimate, fill = ate_positive)) +
geom_hline(yintercept = 0.5, linetype = 2, color = "grey", size = 1) +
geom_bar(
stat = "identity",
position = position_dodge2(),
width = 0.5
) +
geom_errorbar(
aes(ymin = conf.low, ymax = conf.high)
, position = position_dodge(width = 0.5)
, width = 0.1
, color = "grey"
, size = 1
) +
geom_text(
stat = "identity",
aes(
# label = paste0(round(100 * prop), "% (", count, ")"))
label = paste0(round(100 * estimate), "%"))
, position = position_dodge2(width = 0.5)
, vjust = -1
, hjust = -0.5
, size = 9*5/14
) +
scale_y_continuous(limits = c(0, 1)) +
labs(
title = NULL,
x = "Gender",
y = NULL
) +
theme(
legend.background = element_rect(colour = 'white', size = 0.25*graphs$linesize)
, legend.justification = c(0, 0)
, legend.position = c(0.05, 0.9)
, legend.direction = "horizontal"
# , legend.box.margin = margin(c(10,10,10,10))
)
} %>%
print()
# > wage ----
data_effort %>%
filter(k == 1) %>%
select(id, effort, w) %>%
group_by(id, w) %>%
summarize(across(.cols = everything(), mean)) %>%
arrange(id, w) %>%
mutate(ate = diff(effort)) %>%
select(id, ate) %>%
distinct() %>%
left_join(., data_demog %>% select(id, gender)) %>%
filter(gender %in% c("Male", "Female")) %>%
mutate(ate_positive = if_else(ate >= 0, "Increasing", "Decreasing")) %>%
group_by(ate_positive, gender) %>%
summarise(count = n()) %>%
mutate(prop = count / sum(count)) %>%
group_by(gender) %>%
mutate(sum_count = sum(count)) %>%
group_by(gender, ate_positive) %>%
do(
tidy(
binom.test(
x = .data$count, n = .data$sum_count
)
)
) %>%
group_by(ate_positive) %>%
mutate(
ate = diff(estimate)
, phi = 2*asin(sqrt(estimate))
, cohen_h = diff(phi)
) %>%
{ggplot(., aes(factor(gender), estimate, fill = ate_positive)) +
geom_hline(yintercept = 0.5, linetype = 2, color = "grey", size = 1) +
geom_bar(
stat = "identity",
position = position_dodge2(),
width = 0.5
) +
geom_errorbar(
aes(ymin = conf.low, ymax = conf.high)
, position = position_dodge(width = 0.5)
, width = 0.1
, color = "grey"
, size = 1
) +
geom_text(
stat = "identity",
aes(
# label = paste0(round(100 * prop), "% (", count, ")"))
label = paste0(round(100 * estimate), "%"))
, position = position_dodge2(width = 0.5)
, vjust = -1
, hjust = -0.5
, size = 9*5/14
) +
scale_y_continuous(limits = c(0, 1)) +
labs(
title = NULL,
x = "Gender",
y = NULL
) +
theme(
legend.background = element_rect(colour = 'white', size = 0.25*graphs$linesize)
, legend.justification = c(0, 0)
, legend.position = c(0.05, 0.9)
, legend.direction = "horizontal"
# , legend.box.margin = margin(c(10,10,10,10))
)
} %>%
print()
# > cost ----
data_effort %>%
filter(w == 2) %>%
select(id, effort, k) %>%
group_by(id, k) %>%
summarize(across(.cols = everything(), mean)) %>%
arrange(id, k) %>%
mutate(ate = diff(effort)) %>%
select(id, ate) %>%
distinct() %>%
left_join(., data_demog %>% select(id, gender)) %>%
filter(gender %in% c("Male", "Female")) %>%
mutate(ate_positive = if_else(ate >= 0, "Increasing", "Decreasing")) %>%
group_by(ate_positive, gender) %>%
summarise(count = n()) %>%
mutate(prop = count / sum(count)) %>%
group_by(gender) %>%
mutate(sum_count = sum(count)) %>%
group_by(gender, ate_positive) %>%
do(
tidy(
binom.test(
x = .data$count, n = .data$sum_count
)
)
) %>%
group_by(ate_positive) %>%
mutate(
ate = diff(estimate)
, phi = 2*asin(sqrt(estimate))
, cohen_h = diff(phi)
) %>%
{ggplot(., aes(factor(gender), estimate, fill = ate_positive)) +
geom_hline(yintercept = 0.5, linetype = 2, color = "grey", size = 1) +
geom_bar(
stat = "identity",
position = position_dodge2(),
width = 0.5
) +
geom_errorbar(
aes(ymin = conf.low, ymax = conf.high)
, position = position_dodge(width = 0.5)
, width = 0.1
, color = "grey"
, size = 1
) +
geom_text(
stat = "identity",
aes(
# label = paste0(round(100 * prop), "% (", count, ")"))
label = paste0(round(100 * estimate), "%"))
, position = position_dodge2(width = 0.5)
, vjust = -1
, hjust = -0.5
, size = 9*5/14
) +
scale_y_continuous(limits = c(0, 1)) +
labs(
title = NULL,
x = "Gender",
y = NULL
) +
theme(
legend.background = element_rect(colour = 'white', size = 0.25*graphs$linesize)
, legend.justification = c(0, 0)
, legend.position = c(0.05, 0.9)
, legend.direction = "horizontal"
# , legend.box.margin = margin(c(10,10,10,10))
)
} %>%
print()
# > difficulty ----
data_effort %>%
select(id, effort, theta) %>%
filter(theta %in% c(0, 0.5)) %>%
group_by(id, theta) %>%
summarize(across(.cols = everything(), mean)) %>%
arrange(id, theta) %>%
mutate(ate1 = diff(effort)) %>%
select(-c(theta, effort)) %>%
distinct() %>%
left_join(
data_effort %>%
select(id, effort, theta) %>%
filter(theta %in% c(0.5, 1)) %>%
group_by(id, theta) %>%
summarize(across(.cols = everything(), mean)) %>%
arrange(id, theta) %>%
mutate(ate2 = diff(effort)) %>%
select(-c(theta, effort)) %>%
distinct()
) %>%
mutate(ate_type = case_when(
ate1 >= 0 & ate2 >= 0 ~ "Incr",
ate1 <= 0 & ate2 <= 0 ~ "Decr",
ate1 >= 0 & ate2 <= 0 ~ "Inv-U",
ate1 <= 0 & ate2 >= 0 ~ "U"
)
) %>%
select(id, ate_type) %>%
left_join(., data_demog %>% select(id, gender)) %>%
filter(gender %in% c("Male", "Female")) %>%
group_by(ate_type, gender) %>%
summarise(count = n()) %>%
mutate(prop = count / sum(count)) %>%
group_by(gender) %>%
mutate(sum_count = sum(count)) %>%
group_by(gender, ate_type) %>%
do(
tidy(
binom.test(
x = .data$count, n = .data$sum_count
)
)
) %>%
group_by(ate_type) %>%
mutate(
ate = diff(estimate)
, phi = 2*asin(sqrt(estimate))
, cohen_h = diff(phi)
) %>%
{ggplot(., aes(factor(gender), estimate, fill = ate_type)) +
geom_hline(yintercept = 0.5, linetype = 2, color = "grey", size = 1) +
geom_bar(
stat = "identity",
position = position_dodge2(),
width = 0.5
) +
geom_errorbar(
aes(ymin = conf.low, ymax = conf.high)
, position = position_dodge(width = 0.5)
, width = 0.1
, color = "grey"
, size = 1
) +
geom_text(
stat = "identity",
aes(
# label = paste0(round(100 * prop), "% (", count, ")"))
label = paste0(round(100 * estimate), "%"))
, position = position_dodge2(width = 0.5)
, vjust = -0.5
, hjust = -0.1
, size = 9*5/14*0.75
) +
scale_y_continuous(limits = c(0, 1)) +
labs(
title = NULL,
x = "Gender",
y = NULL
) +
theme(
legend.background = element_rect(colour = 'white', size = 0.25*graphs$linesize)
, legend.justification = c(0, 0)
, legend.position = c(0.05, 0.9)
, legend.direction = "horizontal"
# , legend.box.margin = margin(c(10,10,10,10))
)
} %>%
print()
|
ba7ee06f6953e872a3bf502d546cad97d5f82df1
|
dc7ae82a9ac699342701307aff96dfe753d9c455
|
/01_scripts/Rscripts/subset_ind_coordinates.r
|
8fbc6919e5e302c00d7a913b4db6e09cfc6388a9
|
[] |
no_license
|
clairemerot/angsd_pipeline
|
b30aca8a7c649a9bb2c903c9024c2e8da84263d8
|
27e284ddd7a578892d5cef7aba097c9befc42a6e
|
refs/heads/master
| 2023-06-22T07:53:03.059492
| 2023-06-15T20:30:36
| 2023-06-15T20:30:36
| 138,884,266
| 18
| 10
| null | 2022-10-16T14:03:23
| 2018-06-27T13:18:43
|
Shell
|
UTF-8
|
R
| false
| false
| 1,176
|
r
|
subset_ind_coordinates.r
|
#this R script output a file with the column to keep in the beagle file.
argv <- commandArgs(T)
GROUP <- argv[1] #file with the list of bamfile from the subgroup
BAM_ALL <- argv[2] # file with all the bamfiles that was used to construct the whole beagle
library(dplyr)
BAM_beagle<-as.data.frame(read.table(BAM_ALL))
head(BAM_beagle)
colnames(BAM_beagle)<-c("file_name")
BAM_group<-as.data.frame(read.table(paste0("02_info/",GROUP,"bam.filelist")))
colnames(BAM_group)<-c("file_name")
BAM_group$keep<-"yes"
BAM_beagle_group<-left_join(BAM_beagle, BAM_group)
#this is the index of our sample of interest
pos_vec<-which(BAM_beagle_group$keep=="yes")
print(pos_vec)
#this will be the column of our sample of interest in the bamfile
#initialisation: 3 column with position, major minor
pos_beagle<-c(1,2,3)
for (j in 1 : length(pos_vec))
{
k<-pos_vec[j]
pos_beagle<-c(pos_beagle, (3*k)+1, (3*k)+2, (3*k)+3)
}
print(t(as.matrix(pos_beagle))[1:20])
#write the position of the target samples in a file to split the beagle
write.table(t(as.matrix(pos_beagle)), paste0("03_saf_maf_gl_all/subset_beagle/",GROUP,"_column.pos"),row.names=F, col.names=F, quote=F, sep=",")
|
4ebafb5fdef7857605269a0773359dc567572a08
|
84304d9256e55242443143fcd78df223627ef15b
|
/man/conicMatrix.Rd
|
54c115254d5dfd140c1153c752c60f15fb442324
|
[] |
no_license
|
cran/conics
|
87674f8dfd4f2a9fc188259a2b64292c3dc1ac31
|
70ac06a0a404b423308728383174ad08abdbec44
|
refs/heads/master
| 2021-03-12T20:33:37.057711
| 2013-11-22T00:00:00
| 2013-11-22T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,194
|
rd
|
conicMatrix.Rd
|
\name{conicMatrix}
\alias{conicMatrix}
\title{Matrix representing a conic}
\description{
Build a symmetric matrix representing a quadratic polynomial in two variables.
}
\usage{ conicMatrix(v)}
\arguments{
\item{v}{(\code{vector}) a 6-length vector containing the coefficients of a quadratic polynomial.}
}
\details{
The \code{v} argument is a 6-length vector containing the
coefficients of a quadratic polynomial of the form:
\preformatted{
P(x_1,x_2) = v_1 x_1^2 + v_2 x_1 x_2 + v_3 x_2^2 + v_4 x_1 + v_5 x_2 + v_6
}
The associated quadratic form is:
\preformatted{
Q(x_1,x_2,x_3) = v_1 x_1^2 + v_2 x_1 x_2 + v_3 x_2^2 + v_4 x_1 x_3 + v_5 x_2 x_3 + v_6 x_3^2
}
}
\value{
Return the symmetric 3x3 matrix representing the associated quadratic form.
}
\author{
Bernard Desgraupes \cr
\email{bernard.desgraupes@u-paris10.fr}\cr
University of Paris Ouest - Nanterre\cr
Lab Modal'X (EA 3454)\cr
}
\seealso{
\code{\link{conicAsymptotes}},
\code{\link{conicAxes}},
\code{\link{conicCenter}},
\code{\link{conicPlot}}
}
\examples{
# Equation: 2*x_1^2 + 2*x_1*x_2 + 2*x_2^2 - 20*x_1 - 28*x_2 + 10 = 0
v <- c(2,2,2,-20,-28,10)
conicMatrix(v)
}
\keyword{conics, matrix}
|
cda05ed13191eff28f9d5ba9b180b8d04a7f5e43
|
2b5728585d67ad9f0210a21189459a1515faa72f
|
/man/importLimeSurveyData.Rd
|
77fea8269a593cacc4fada94f753d981ca0e6c8c
|
[] |
no_license
|
Matherion/userfriendlyscience
|
9fb8dd5992dcc86b84ab81ca98d97b9b65cc5133
|
46acf718d692a42aeebdbe9a6e559a7a5cb50c77
|
refs/heads/master
| 2020-12-24T16:35:32.356423
| 2018-09-25T06:41:14
| 2018-09-25T06:41:14
| 49,939,242
| 15
| 9
| null | 2018-11-17T10:34:37
| 2016-01-19T08:50:54
|
R
|
UTF-8
|
R
| false
| false
| 5,556
|
rd
|
importLimeSurveyData.Rd
|
\name{importLimeSurveyData}
\alias{importLimeSurveyData}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
importLimeSurveyData
}
\description{
This function can be used to import files exported by LimeSurvey, a powerful
Open Source online survey application that can be used for, for example,
psychological experiments and other research.
}
\usage{
importLimeSurveyData(datafile = NULL,
dataPath = NULL,
datafileRegEx = NULL,
scriptfile = NULL,
limeSurveyRegEx.varNames =
"names\\\\(data\\\\)\\\\[\\\\d*\\\\] <- ",
limeSurveyRegEx.toChar =
"data\\\\[, \\\\d*\\\\] <- as.character\\\\(data\\\\[, \\\\d*\\\\]\\\\)",
limeSurveyRegEx.varLabels =
"attributes\\\\(data\\\\)\\\\$variable.labels\\\\[\\\\d*\\\\] <- \\".*\\"",
limeSurveyRegEx.toFactor =
paste0("data\\\\[, \\\\d*\\\\] <- factor\\\\(data\\\\[, \\\\d*\\\\], ",
"levels=c\\\\(.*\\\\),.*labels=c\\\\(.*\\\\)\\\\)"),
limeSurveyRegEx.varNameSanitizing =
list(list(pattern = "#", replacement = "."),
list(pattern = "\\\\$", replacement = ".")),
setVarNames = TRUE,
setLabels = TRUE,
convertToCharacter = FALSE,
convertToFactor = FALSE,
categoricalQuestions = NULL,
massConvertToNumeric = TRUE,
dataHasVarNames = TRUE,
encoding = "NULL",
dataEncoding = "unknown",
scriptEncoding = "ASCII")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{datafile}{
The path and filename of the file containing the data (comma separated values).
}
\item{dataPath, datafileRegEx}{
Path containing datafiles: this can be used to read multiple datafiles, if the data is split between those. This is useful when downloading the entire datafile isn't possible because of server restrictions, for example when the processing time for the script in LimeSurvey that generates the datafiles is limited. In that case, the data can be downloaded in portions, and specifying a path here enables reading all datafiles in one go. Use the regular expression to indicate which files in the path should be read.
}
\item{scriptfile}{
The path and filename of the file containing the R script to import the data.
}
\item{limeSurveyRegEx.varNames}{
The regular expression used to extract the variable names from the script file. The
first regex expression (i.e. the first expression between parentheses) will be
extracted as variable name.
}
\item{limeSurveyRegEx.toChar}{
The regular expression to detect the lines in the import script where variables
are converted to the character type.
}
\item{limeSurveyRegEx.varLabels}{
The regular expression used to detect the lines in the import script where
variable labels are set.
}
\item{limeSurveyRegEx.toFactor}{
The regular expression used to detect the lines in the import script where
vectors are converted to factors.
}
\item{limeSurveyRegEx.varNameSanitizing}{
A list of regular expression patterns and their replacements to sanitize the
variable names (e.g. replace hashes/pound signs ('#') by something that is not
considered the comment symbol by R).
}
\item{setVarNames, setLabels, convertToCharacter, convertToFactor}{
Whether to set variable names or labels, or convert to character or factor,
using the code isolated using the specified regular expression.
}
\item{categoricalQuestions}{
Which variables (specified using LimeSurvey variable names)
are considered categorical questions; for these, the script to convert
the variables to factors, as extracted from the LimeSurvey import file, is
applied.
}
\item{massConvertToNumeric}{
Whether to convert all variables to numeric using
\code{\link{massConvertToNumeric}}.
}
\item{dataHasVarNames}{
Whether the variable names are included as header (first line) in the comma
separated values file (data file).
}
\item{encoding, dataEncoding, scriptEncoding}{
The encoding of the files; \code{encoding} overrides \code{dataEncoding}
and \code{scriptEncoding}, and so can be used to specify the same encoding
for both.
}
}
\details{
This function was intended to make importing data from LimeSurvey a bit easier.
The default settings used by LimeSurvey are not always convenient, and this
function provides a bit more control.
}
\value{
The dataframe.
}
\author{
Gjalt-Jorn Peters
Maintainer: Gjalt-Jorn Peters <gjalt-jorn@userfriendlyscience.com>
}
\seealso{
\code{\link{getData}}
}
\examples{
\dontrun{
### Of course, you need valid LimeSurvey files. This is an example of
### what you'd do if you have them, assuming you specified that path
### containing the data in 'dataPath', the name of the datafile in
### 'dataFileName', the name of the script file in 'dataLoadScriptName',
### and that you only want variables 'informedConsent', 'gender', 'hasJob',
### 'currentEducation', 'prevEducation', and 'country' to be converted to
### factors.
dat <- importLimeSurveyData(datafile = file.path(dataPath, dataFileName),
scriptfile = file.path(dataPath, dataLoadScriptName),
categoricalQuestions = c('informedConsent',
'gender',
'hasJob',
'currentEducation',
'prevEducation',
'country'));
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ utility }
|
da9cc68dc97a5fba27f30d0a3580e494081eaee9
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/4270_1/rinput.R
|
d01244038b095c84c033eadeb3442678d899e46f
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("4270_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4270_1_unrooted.txt")
|
2f2d165ee12066bc6d85c7e8c8540a4c4b374abb
|
ffc4849987a0c059ae306deb4747a83b015cc35c
|
/SensorFiles/initialize.R
|
7901cc7017f8cd577d946b4732679769af17ffe0
|
[] |
no_license
|
AGarsha/presentation
|
f11b4526dde363918acaaaf133e05aba96548d77
|
02798b55a408f536ecc7d20544881e4e68802873
|
refs/heads/master
| 2020-12-25T10:14:28.133535
| 2016-02-13T00:39:26
| 2016-02-13T00:39:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,101
|
r
|
initialize.R
|
f.initialize <- function(sdSystemAge = 7, sdBuilding = 7,
sdTemp = 7, sdNonLinear = 1, sampleFlag = FALSE,
downSampleTime = 10, fold = 1, type = "linearModel", transformFlag = TRUE ) {
#check install packages
pkgs = c("rms", "caret","survival","pec","prodlim","randomForestSRC","ggRandomForests","rpart","partykit","rpart.plot","data.table")
isPkgInstal = pkgs %in% rownames(installed.packages())
for(k in 1 : length(isPkgInstal) ){
if(!isPkgInstal[k]){
install.packages(pkgs[k], dependencies = TRUE, repos="http://cran.rstudio.com/")
}
}
library(rms)
library(caret)
library(survival)
library(pec)
library(MASS)
library(prodlim)
library(randomForestSRC)
library(ggRandomForests)
library(rpart)
library(rpart.plot)
library(partykit)
library(data.table)
# to do: install packages
# features to add:
# geographic location, client, price of electricity,
# dat1 <- data.table(read.csv("SensorFiles/dat1.csv"))
# dat2 <- data.table(read.csv("SensorFiles/dat2.csv"))
#
# setkey(dat1, BuildingID)
# setkey(dat2, BuildingID)
#
# dat <- merge(dat1, dat2, by.x = BuildingID)
dat = read.csv("SensorFiles/dat.csv")
# write.csv(dat, file = "SensorFiles/dat.csv", row.names = FALSE)
levels(dat$product) <- c("A", "B", "C", "D", "E")
table(dat$BuildingID)
table(dat$product)
dat$tempDiff <- dat$TargetTemp - dat$ActualTemp
dat$System <- as.numeric(dat$System)
# Z transform variables
if(transformFlag){
dat$SystemAge <- (dat$SystemAge - mean(dat$SystemAge) )/ sd(dat$SystemAge)
dat$SystemAge <- (dat$SystemAge - mean(dat$SystemAge) )/ sd(dat$SystemAge)
dat$tempDiff <- (dat$tempDiff - mean(dat$tempDiff) )/ sd(dat$tempDiff)
}
# define a "failure" as 1.5 standard deviations away from historical averages + system age Affect +
# higher order interaction of building age and system age
if(type == "linearModel"){
# build a linear phenomena
dat$isDeath <- (dat$BuildingAge + dat$SystemAge + dat$tempDiff) >
rnorm(length(dat$tempDiff), mean(((dat$SystemAge)), sd = sdSystemAge)) +
rnorm(length(dat$tempDiff), mean(((dat$BuildingAge))), sd = sdBuilding) +
rnorm(length(dat$tempDiff), mean((abs( dat$tempDiff)), sd = sdTemp))
}
else if (type == "nonLinearModel"){
dat$isDeath <- generateRNVec(product = as.character(dat$product), tempDiff = dat$tempDiff, SystemAge = dat$SystemAge,
BuildingAge = dat$BuildingAge, System = dat$System,
sdNonLinear = sdNonLinear, nonRandomFlag = TRUE) >
generateRNVec(product = as.character(dat$product), tempDiff = dat$tempDiff, SystemAge = dat$SystemAge,
BuildingAge = dat$BuildingAge, System = dat$System,
sdNonLinear = sdNonLinear, nonRandomFlag = FALSE)
dat$time2 <- generateRNVec(product = as.character(dat$product), tempDiff = dat$tempDiff, SystemAge = dat$SystemAge,
BuildingAge = dat$BuildingAge, System = dat$System,
sdNonLinear = sdNonLinear, nonRandomFlag = FALSE)
dat$time2 <- dat$time2 - min(dat$time2)
}
# convert dates to times
dateTransform <- as.Date(as.character(dat$Date), "%m/%d/%y")
dat$time <- as.POSIXct(paste0(as.character(dateTransform)," ", as.character(dat$Time)), format = "%Y-%m-%d %H:%M:%S", tz = "EST")
dat$timeDays <- as.numeric(difftime(Sys.time(), dat$time, units = "days"))
dat$timeDaysTransformed <- dat$timeDays - min(dat$timeDays)
dat$time <- dat$timeDaysTransformed
# hist(dat$time )
# remove unecessary columns, transform variables
dat$Date <- NULL
dat$Time <- NULL
dat$timeDaysTransformed <- NULL
dat$timeDays <- NULL
dat$BuildingID <- as.factor(dat$BuildingID)
# dat$System <- as.factor(dat$System)
dat$ActualTemp <- NULL
dat$TargetTemp <- NULL
str(dat)
#dummy transform dataset
if(sampleFlag) {
indx <- createFolds(dat$BuildingID, downSampleTime)
}
return(dat[indx[[fold]],])
}
|
c591847e25f8e3151cac48752dc5d774df7771b7
|
73794aa4e4d95d02b167c2cb3891e3d883df15c9
|
/man/dies.ok.Rd
|
37153faab296faeacdde5cdc6cb2ff539dd64d46
|
[] |
no_license
|
apomatix/AnalysisPageServer
|
6283263e25011f01c59dcfdc1531647b557d143c
|
1a89fd712a601d5df195175b2d8770a2fddfeda0
|
refs/heads/master
| 2021-09-17T21:58:34.357330
| 2018-07-05T19:11:05
| 2018-07-05T19:11:05
| 110,210,811
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 747
|
rd
|
dies.ok.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test.R
\name{dies.ok}
\alias{dies.ok}
\title{dies.ok}
\usage{
dies.ok(call, regex, testname)
}
\arguments{
\item{call}{An expression to evaluate}
\item{regex}{A regular expression to match the error against. If omitted then don't test the exception text.}
\item{testname}{A name for the test. Defaults to deparsing the call.}
}
\value{
Runs one or two tests (the second test to match the error message against regex, if it was provided and
if an error was successfully thrown).
}
\description{
Test that an expression throws an error
}
\details{
Test that an expression throws an error.
}
\examples{
dies.ok(stop("foo"), "foo", "it stops")
}
\author{
Brad Friedman
}
|
c94a1121d1648c3e8e16d5d8ff76b74dba4d8000
|
3bfc998ab6d6e275f2ad1b2e6d23aa3aa36d22c3
|
/R/stringdb.score.R
|
c13f7685d130ce9da9ec20a5ceb18d3f99402910
|
[
"BSD-3-Clause"
] |
permissive
|
unmtransinfo/metap
|
93ecf33e6a02bb6bf7741221a5e23e198abafc7b
|
159cfcdd6f96f82cdaf7d9fe7764b1e2136c366c
|
refs/heads/master
| 2020-04-05T09:57:47.161855
| 2019-10-30T19:56:24
| 2019-10-30T19:56:24
| 156,782,128
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,292
|
r
|
stringdb.score.R
|
#!/usr/bin/env Rscript
library(data.table)
library(RPostgreSQL)
conn <- dbConnect(PostgreSQL(), user = "oleg", host = "localhost", dbname = "metap")
protein <- dbGetQuery(conn, "select protein_id,stringdb_id from stringdb")
dbDisconnect(conn)
rm(conn)
setDT(protein)
download.file("https://stringdb-static.org/download/protein.links.v10.5/9606.protein.links.v10.5.txt.gz", destfile = "data/stringdb/9606.protein.links.v10.5.txt.gz")
string <- fread("gunzip -c data/stringdb/9606.protein.links.v10.5.txt.gz", header = T, sep = " ", quote = "")
string[, key := sprintf("%s.%s", ifelse(protein1 >= protein2, protein1, protein2), ifelse(protein2 < protein1, protein2, protein1))]
string <- unique(string, by = "key")
string[, key := NULL]
string <- merge(string, protein, by.x = "protein1", by.y = "stringdb_id")
setnames(string, "protein_id", "protein_id1")
string <- merge(string, protein, by.x = "protein2", by.y = "stringdb_id")
setnames(string, "protein_id", "protein_id2")
fwrite(string[, .(protein_id1, protein_id2, combined_score)], "data/stringdb/stringdb.score.tsv", quote = T, sep = "\t", col.names = T, row.names = F)
if(file.exists("data/stringdb/stringdb.score.tsv.gz")) {
file.remove("data/stringdb/stringdb.score.tsv.gz")
}
system("gzip -9v data/stringdb/stringdb.score.tsv")
|
87c93427be61e264c1f274f84b3163308145c52c
|
c8d3eac72924cc8952e6bdf77497cd2c571194df
|
/fluodilution/man/proliferation.Rd
|
a9a528fe092013cfd3ba926abe970321fd9d226b
|
[
"MIT"
] |
permissive
|
hchauvin/fluodilution
|
af57ec858aefe41ae4ad92377ba80ec699d8d2b4
|
1fd52c705edfd3a0951152511f3d1b54b8762f4a
|
refs/heads/master
| 2021-04-03T08:19:23.059053
| 2019-01-11T16:05:03
| 2019-01-11T16:05:03
| 125,096,130
| 0
| 0
|
MIT
| 2019-09-24T21:52:03
| 2018-03-13T18:22:18
|
R
|
UTF-8
|
R
| false
| true
| 5,653
|
rd
|
proliferation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proliferation-.R, R/proliferation-branching.R,
% R/proliferation-cyton.R
\name{proliferation}
\alias{proliferation}
\alias{fd_proliferation_branching}
\alias{fd_proliferation_cyton}
\title{Implementation of the Cyton and branching models.}
\usage{
fd_proliferation_branching(...)
fd_proliferation_cyton(...)
}
\arguments{
\item{...}{\describe{ \item{\code{categories}}{Name of categories for the
proliferation model (branching processes only).} \item{\code{mgen}}{Maximum
number of generations to model.} \item{\code{length.out}}{Number of time
points at which division/death rates are evaluated. The more the better,
but also the slower (Cyton models only).} \item{\code{log10_scale}}{Whether
to reparametrize, when fitting, proportions (p, p0, res/res0,
mrates/mrates0) using a \code{log10} scale in order to potentially speed up
convergence (branching processes only).} \item{\code{initial}}{A matrix
giving the initial relative number of seeds in each category (rows) and for
each number of generations (columns). Categories are given in the order of
\code{levels(data$Category)}, with \code{data} an \code{\link{fd_data}}
object and generations from 0 to \code{mgen} (included). If a different
\code{mgen} is chosen for \code{\link{fd_formula}}, the matrix is
automatically resized and padded by zeroes if necessary.} }}
}
\value{
An \code{fd_proliferation} object.
}
\description{
With this package, a large variety of proliferation models can
be implemented. They all rest on two broad sets of assumptions. On one hand,
a generalized Cyton model and its nested, simpler models, consider that
division and death occur concurrently, in competition (Hawkins et al.
2007). On the other hand, a branching process (Hyrien et al. 2010)
considers that birth and death have both a given probability of occurring in
a cell (probability of transition) and a given probability of occurring after
a certain time (time to transition).
}
\section{Parametrization}{
A proliferation model is parametrized as a named
list with as many elements as there are compartments/categories. The order
in which those elements are arranged has its importance: because no
feedback is allowed, migration can only happen from a compartment with a
lesser index to a compartment with a greater index. Moreover, although
with Cyton models migration is not implemented, for symmetry the parameters
are bundled in a list with one element named \code{"One"}.
The shared parameters within those elements, for both models, are as
follows (be careful, they however DO NOT share the exact meaning, see
Chauvin et al. (2016)): \describe{ \item{\code{f0$mm, f0$delta,
f0$ss}}{Parametrization of the times to first division. See
\code{\link{fd_gamma_dist}} for details.} \item{\code{f$mm, f$delta,
f$ss}}{Parametrization of the times to subsequent divisions.}
\item{\code{g0$mm, g0$delta, g0$ss}}{Parametrization of the times to death
before the first division.} \item{\code{g$mm, g$delta,
g$ss}}{Parametrization of the times to death after the first division.}
\item{\code{res0, res0}}{Proportion of cells that never divide nor die
(\code{res0}) and that do not divide further nor die after the first
generation (\code{res}).} }
Additionally, \itemize{ \item{In the natural parametrization (see
\code{\link{fd_model}}), \code{fd_proliferation_branching} accepts
\code{p0} and \code{p}, the proportion of cells that undergo first division
and undergo division in any number of generations greater than 0, and
\code{mrates}, which is a named list of migration "rates" between 0 and 1L
for instance, \code{Two$mrates$One} gives the migration "rate" from
compartment "One" to "Two".} \item{For branching processes only, additional
parameters \code{h$mm, h$delta, h$ss} can specify a decay distribution,
giving times to disintegration for dying/dead cells, and
\code{"hist_lost"}, \code{"props_lost"} and \code{"Ns_lost"} types (see
\code{\link{fd_data}}) take this into account. The default constraining
does not specify \code{h} and as a consequence no decay is used. This
decaying is as of now not available for Cyton models. Notice moreover
that, as of now, decaying is not implemented properly for
multicompartmental models as decay would also impact emigrating cells.} }
}
\section{Reparametrization}{
For \code{fd_proliferation_branching}, if
\code{log10_trans} is set to \code{TRUE}, proportions are reparametrized
(see \code{\link{fd_model}}) by their logarithm in base 10. Moreover,
\code{p0} and \code{p} are the proportions of cells that do not die at the
current generation, either because they divide or are nongrowers, and
\code{res0} and \code{res} are given relative to \code{p0} and \code{p}:
therefore, for branching processes the proportion of nongrowers, among the
total number of cells in a given number of generations, is \code{p0 * res0}
or \code{p * res}.
For \code{fd_proliferation_cyton}, no reparametrization is undertaken.
}
\section{Special cases}{
Notice that proliferation models allow the
\code{delta} to be 0, in which case the respective distributions reduce to
Dirac distributions, and the \code{g$mm} and \code{g0$mm} to be infinite
(\code{Inf}), in which case no death occurs (see
\code{\link{gamma-distributions}}).
}
\examples{
fd_model(proliferation="cyton")
fd_model(proliferation=fd_proliferation_branching(c("One", "Two"), mgen=10))
fd_proliferation_branching(c("One", "Two"), mgen = 10L,
initial = matrix(c(1, 0.5, 0, 0), nrow=2))
}
|
34ac7e112d45941e02c945d91290b5b9e1dbd1d0
|
54de529bed13c31f7eaa70d05831f9af562a4650
|
/R/moustache.R
|
082a4fff217ecc0fd6817e186766b2a7df8e4d97
|
[
"MIT"
] |
permissive
|
dtkaplan/checkr2
|
0f850351af1ec39f2294485c15dd70a821f9fc32
|
c0e633873d0cab355d2c5f3073a01d1ac62f2605
|
refs/heads/master
| 2021-09-03T18:13:09.969279
| 2018-01-11T01:33:37
| 2018-01-11T01:33:37
| 113,483,746
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,908
|
r
|
moustache.R
|
#' evaluate expressions in a string using a particular environment
#'
#' @param string a character string which presumably contains some moustaches
#' referring to objects found in the bindings environment.
#' @param bindings an environment or list in which the objects moustached in `string` are defined.
#' @examples
#' checkr2:::moustache("hello")
#' checkr2:::moustache("Three plus five is {{3+5}}.")
moustache <- function(string, bindings = rlang::env_parent()) {
# pull out all the instances of {{expr}} from the string
the_moustache <- '\\{\\{.*?\\}\\}' # not greedy
matches <- unlist(stringr::str_extract_all(string, the_moustache))
if (length(matches) == 0) return(string)
# evaluate the contents of the moustache
expressions <- gsub("\\}\\}", "", gsub("\\{\\{", "", matches))
for (j in seq_along(expressions)) {
val <- try(eval(parse(text = expressions[j]), envir = bindings))
if (inherits(val, "try-error")) {
# it wasn't a valid expression
val <- paste0("'{{", expressions[j], "}}' could not be evaluated.")
}
string <- gsub(matches[j], to_sensible_character(val), string, fixed = TRUE)
}
return(string)
}
to_sensible_character <- function(v) {
if (is.vector(v)) {
v <- if(is.numeric(v)) signif(v, 3) else v
if (length(v) > 5) {
S <- paste(paste(as.character(head(v,2)), collapse = ", "),
"...",
paste(as.character(tail(v,2)),collapse = ", "))
} else {
S <- paste(as.character(v), collapse = ",")
}
if (length(v) > 5) paste0("a vector of length ", length(v), ": ", S)
else S
} else if (is.matrix(v)) {
paste(paste(dim(v), collapse = "by"), "matrix with vals", to_sensible_character(v[]))
} else if (is.data.frame(v)) {
paste("data frame with", nrow(v), "rows,",
length(v), "columns, and names", to_sensible_character(names(v)))
} else {
deparse(v)
}
}
|
ea013ed684134aee80dc0cf04453e9d1f7353338
|
8a166837b22915d7597ed8fe81e1284cb96ead39
|
/Mouse/Metatranscriptome/MultiOmics_Mouse_Metatranscriptome_GIT.r
|
cc94d36a6649c3c8f8fa0f01ed3fcb4dbccf6a64
|
[] |
no_license
|
segalmicrobiomelab/functional_microbiomics
|
5a824070087d535cc0c06f29bee61fcd02c3ee78
|
f4015ebf638b62676a5da3debdca16899f73bc03
|
refs/heads/master
| 2022-04-03T04:56:46.413545
| 2020-01-31T19:47:51
| 2020-01-31T19:47:51
| 208,831,180
| 7
| 1
| null | 2019-10-21T20:35:18
| 2019-09-16T15:14:45
| null |
UTF-8
|
R
| false
| false
| 21,330
|
r
|
MultiOmics_Mouse_Metatranscriptome_GIT.r
|
#Load Packages
library(DESeq2)
library(edgeR)
library(limma)
library(Glimma)
library(gplots)
library(RColorBrewer)
library(pheatmap)
library(ggplot2)
library(ggrepel)
library(pathfindR)
library(scales)
library(data.table)
library(fBasics)
library(forcats)
library(omu)
library(maptools)
library(phyloseq)
library(SpiecEasi)
library(vegan)
library(cowplot)
library(funrar)
library(ggpmisc)
library(dplyr)
#Set Theme for Figures
theme<-theme(panel.background = element_blank(),panel.border=element_rect(fill=NA),panel.grid.major = element_blank(),panel.grid.minor = element_blank(),strip.background=element_blank(),axis.text.x=element_text(colour="black"),axis.text.y=element_text(colour="black"),axis.ticks=element_line(colour="black"),plot.margin=unit(c(1,1,1,1),"line"), legend.position="none")
#Choose Alpha/FDR
alpha = 0.01
#Load Meta Data
coldata <- read.delim2("Map.Mouse2.txt", sep="\t")
#Remove Sample with 0 Reads for all Genes
coldata <- coldata[coldata$Mouse.number!="M18",]
#Remove Sample with dimer peak too large
coldata <- coldata[coldata$Mouse.number!="M15",]
#Order Meta Data by SampleId
coldata <- coldata[order(coldata$Mouse.number),]
#load Count Data
mycounts <-read.delim2("KEGG_gene_table.txt", sep="\t", row.names=1)
#Remove dimer peak too large
mycounts = mycounts[,!(names(mycounts) %in% "M15")]
#Order Count Data by SampleID
mycounts <-mycounts[, order(colnames(mycounts))]
#Confirm Sample IDs match for Count and Meta Data
table(colnames(mycounts)==as.character(coldata$Mouse.number))
#Convert any NAs to 0
mycounts[is.na(mycounts)] <- 0
#Create Copy of Count Table
mycounts2 <- mycounts
#Convert Count Table into a Numeic Data Frame
d1 = data.frame(lapply(mycounts, function(x) as.numeric(as.character(x))),
check.names=F, row.names = rownames(mycounts3))
#Convert Data to Integers to Run DESEq
d1[] <- lapply(d1, as.integer)
dds <- DESeqDataSetFromMatrix(countData = d1,
colData = coldata,
design= ~ Time_exp)
#Calculate geometric means prior to estimate size factor
gm_mean = function(x, na.rm=TRUE){ exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))}
geoMeans = apply(counts(dds), 1, gm_mean)
#Estimate Factors of DESeq Object
dds <- estimateSizeFactors(dds, geoMeans = geoMeans)
#For genes with lower counts, however, the values are shrunken towards the genes' averages across all sample
rld <- rlog(dds, fitType="local")
vsd <- varianceStabilizingTransformation(dds)
#Make sure all unwanted levels are removed from dataset
dds$Time_exp <- droplevels(dds$Time_exp)
rld$Time_exp <- droplevels(rld$Time_exp)
vsd$Time_exp <- droplevels(vsd$Time_exp)
#keep Remved ungrouped KEGGS
dds1 <- dds[!grepl("UNGROUPED|UNMAPPED",rownames(assay(dds))),]
rld1 <- rld[!grepl("UNGROUPED|UNMAPPED",rownames(assay(rld))),]
vsd1 <- vsd[!grepl("UNGROUPED|UNMAPPED",rownames(assay(vsd))),]
#keep only data which includes Taxa Data
dds2 <- dds1[grep("g_|unclassified",rownames(assay(dds1))),]
rld2 <- rld1[grep("g_|unclassified",rownames(assay(rld1))),]
vsd2 <- vsd1[grep("g_|unclassified",rownames(assay(vsd1))),]
#keep only data for KEGG
dds3 <- dds1[!grepl("g_|unclassified",rownames(assay(dds1))),]
rld3 <- rld1[!grepl("g_|unclassified",rownames(assay(rld1))),]
vsd3 <- vsd1[!grepl("g_|unclassified",rownames(assay(vsd1))),]
#Remove Negative Controls for analysis
dds2noneg <- dds2[, dds2$Mouse.number!= "NEG2"]
dds2noneg <- dds2noneg[, dds2noneg$Mouse.number!= "NEG1"]
dds3noneg <- dds3[, dds3$Mouse.number!= "NEG2"]
dds3noneg <- dds3noneg[, dds3noneg$Mouse.number!= "NEG1"]
rld3noneg <- rld3[, rld3$Mouse.number!= "NEG2"]
rld3noneg <- rld3noneg[, rld3noneg$Mouse.number!= "NEG1"]
#=========================================================
/////////////////////////PCOA PLOT///////////////////////
#=========================================================
#Create Distance Matrix
vegdist = vegdist(t(assay(rld3noneg)), method="bray")
#Formulate principal component co-ordinates for PCOA plot, k as the choice of PCs
CmdScale <- cmdscale(vegdist, k =10)
#calculated Sample variance for each PC
vars <- apply(CmdScale, 2, var)
#Create Variable with the Percent Variance
percentVar <- round(100 * (vars/sum(vars)))
#Merge PC Data with MetaData
require(data.table)
newResults <- merge(x = CmdScale, y = colData(rld3noneg), by = "row.names", all.x = TRUE)
#Rename Variables for PC1 and PC2
colnames(newResults)[colnames(newResults)=="V1"] <- "PC1"
colnames(newResults)[colnames(newResults)=="V2"] <- "PC2"
colnames(newResults)[colnames(newResults)=="Row.names"] <- "name"
#Calculate the Centroid Value
centroids <- aggregate(cbind(PC1,PC2)~Time_exp,data= newResults, mean)
#Merge the Centroid Data into the PCOA Data
newResults <- merge(newResults,centroids,by="Time_exp",suffixes=c("",".centroid"))
#---------------
#------Figure 6E
#---------------
pdf("KEGG_no_M15_Metatranscriptome_BAL_Time_BRAY.pdf", height = 7, width = 10)
ggplot(newResults, aes(PC1, PC2, color=Time_exp)) +
geom_point(size=5,alpha=0.5) +
xlab(paste0("PC1: ",percentVar[1],"% variance")) +
ylab(paste0("PC2: ",percentVar[2],"% variance")) +
#coord_fixed() +
#scale_color_manual(values=c("#296218", "#EA3323", "#000000","#932CE7")) +
#plot ellipse
#stat_ellipse(type = "t") +
#plot point and lines from centroid
geom_point(data=centroids, aes(x=PC1, y=PC2, color=Time_exp), size=0) +
geom_segment(aes(x=PC1.centroid, y=PC2.centroid, xend=PC1, yend=PC2, color=Time_exp))+
#geom_text_repel(aes(label=ifelse(newResults$name%in% c("COPD.0002.BAL.L.171", "COPD.0030.BAL.L.171", "COPD.0035.BAL.L.171") , as.character(newResults$name),'')),size=3,force=25) +
#labels centroids
geom_label_repel(data = centroids, aes(x=PC1, y=PC2, label=c("1 Day", "1 Hour", "3 Days","4 Hours","7 Days","MOC", "PBS")), size=10) +
scale_color_manual(values=c("#6ABD23","#C49A02","#F8766D","#18C59D","#06B9EB","#A88EFF","#FB83DF")) +
#geom_label_repel(data = centroids, aes(x=PC1, y=PC2, label=Time_exp), size=10) +
#scale_x_reverse() +
theme(panel.background = element_blank(),panel.border=element_rect(fill=NA),
panel.grid.major = element_line(color="grey",size=0.2,linetype=3),panel.grid.minor = element_blank(),
strip.background=element_blank(),axis.title=element_text(size=30,face="bold"),
axis.text.x=element_blank(),axis.text.y=element_blank(),axis.ticks=element_blank(),
plot.margin=unit(c(1,1,1,1),"line"), legend.position="none")
dev.off()
#=========================================================
//////////InterGroup Beta Diversity///////////////////////
#=========================================================
####################### DON'T CHANGE THIS FUNCTION: STARTS HERE ##################################################
intergroup.distances <- function(sample.data, Variable.Intergroup, distance.matrix, extraVar.addtotable, filename){
#melt the distance matrix into columns with each sample site and distance
b <- melt(as.matrix(distance.matrix))
#Then need to remove self distances and duplicated distances
p <- t(apply(b[,c(1,2)],1,FUN=sort))
rmv1 <- which(p[,1] == p[,2])
p <- paste(p[,1],p[,2],sep="|")
rmv2 <- which(duplicated(p))
#establish new data frame that removes those values
b.df <- b[-c(rmv1,rmv2),]
##Now we need to ADD variable rows with location
#set up new data frame
new.df <- b.df
#create new data frame with the variable you want to use for group comparisons. This code goes through the distance data frame and makes columns for with the group for each sample
new.df[] <- lapply(b.df, function(x) Variable.Intergroup[match(x, rownames(sample.data))])
#create two lists of the group variable
topo.var1 <- new.df[,1]
topo.var2 <-new.df[,2]
#Add the two columns of group variable data onto the end of the distance data frame
b.var.df <- cbind(b.df, topo.var1, topo.var2)
##We will now need to make sure we do not have intra groups, so we will remove those
#create new data frame
btw.b.var.df <- b.var.df
#set row names to re-zero
rownames(btw.b.var.df) <- NULL
#establish matrix for input of indexes to be removed
toremove<-numeric()
#select indexes to remove
for (i in 1:nrow(btw.b.var.df)) {
if (btw.b.var.df$topo.var1[i] == btw.b.var.df$topo.var2[i]) {
toremove <- append(toremove, i)
}
}
#remove indexes we selected
btw.b.var.df <- btw.b.var.df[-toremove,]
#Now the intragroup should be removed, can confirm
head(btw.b.var.df)
##Now we need to see that the between groups we have are not reverse permutations of each other
#Use the two group categories two create a new category of the permutation and set as a data frame
new.cat.btw = paste(btw.b.var.df$topo.var1, "to", btw.b.var.df$topo.var2)
new.cat.btw.df <- data.frame(btw.b.var.df$topo.var1, btw.b.var.df$topo.var2)
#create a list of combinations from our specific data frame and select for the unique ones for comparison
dat.sort = t(apply(new.cat.btw.df, 1, sort))
unique.new.cat.btw <- unique(new.cat.btw.df[!duplicated(dat.sort),])
colnames(unique.new.cat.btw) <- NULL
rownames(unique.new.cat.btw) <- NULL
unique.new.cat.btw <- paste(unique.new.cat.btw[,1], "to", unique.new.cat.btw[,2])
#create new data frame
clean.btw.b.var.df <- btw.b.var.df
#reset row names
rownames(clean.btw.b.var.df) <- NULL
#this code checks if any of the reverse combinations exist in the unique list of permutations and will reverse them if so. Reversing them allows them to be plotted as one group rather than deleting any data
for (i in 1:nrow(clean.btw.b.var.df)){
if (paste(clean.btw.b.var.df$topo.var2[i], "to", clean.btw.b.var.df$topo.var1[i]) %in% unique.new.cat.btw) {
clean.btw.b.var.df$topo.var1[i] <- btw.b.var.df$topo.var2[i]
clean.btw.b.var.df$topo.var2[i] <- btw.b.var.df$topo.var1[i]
}
}
#Use the two new categories two create a new category of the permutation without the doubles
new.cat.btw.clean = paste(clean.btw.b.var.df$topo.var1, "to", clean.btw.b.var.df$topo.var2)
#confirm permutations
unique(new.cat.btw.clean)
# create all-encompassing data frame using the names, category and distance data, Var1 and Var2 are the names of the columns with the sampleIDs
comb.inter.data.braypart <- data.frame(Sample1 = clean.btw.b.var.df$Var1, Sample2 = clean.btw.b.var.df$Var2, Category = new.cat.btw.clean, Distance = as.numeric(clean.btw.b.var.df$value))
##Now we need to ADD variable rows with location
var1 <- comb.inter.data.braypart$Sample1
var2 <- comb.inter.data.braypart$Sample2
x <- match(var1, rownames(sample.data))
y <- extraVar.addtotable[x]
#y now has the extra var for var 1
x2 <- match(var2, rownames(sample.data))
y2 <- extraVar.addtotable[x2]
#y2 has the extra var for var 2
##Add the category
b <- Variable.Intergroup[x]
#b now has the grouping var for var 1
b2 <- Variable.Intergroup[x2]
#b2 has the grouping var var 2
# create all-encompassing data frame using the names, category and distance data, Var1 and Var2 are the names of the columns with the sampleIDs AND the subject IDs for comparison later
allEncompassing.df <<- data.frame(Sample1 = clean.btw.b.var.df$Var1, Sample2 = clean.btw.b.var.df$Var2, Category = new.cat.btw.clean, Distance = as.numeric(clean.btw.b.var.df$value), Subject1 = y, SampleType1 = b, Subject2 = y2, SampleType2 = b2)
}
####################### DON'T CHANGE THIS FUNCTION: STARTS HERE ##################################################
#Create the tables for the function
mat <- ifelse(assay(rld3noneg)<0,0,assay(rld3noneg))
Bray.dist = vegdist(t(mat), method="bray")
######## Here, you set your parameters. In this case we use: Bray ###################
{
#set the sample data
sample.data = colData(rld3noneg)
#set the variable you want to do groupings by
Variable.Intergroup = colData(rld3noneg)$Time_exp
#set the distance.matrix
distance.matrix = Bray.dist
##Select if you would like to add any other variables to the final table for clarifiation and processing. If none, then just repeat the variable.intergroup
#in this care s we need to add the Subject_ID, in this case: ID_Sample_Type_Subject_Type_Simple
extraVar.addtotable = colData(rld3noneg)$Time_exp
#set the file name you'd like
filename = "Unique.ID.Bronch.Cohort.Bray.txt"
allEncompassing.df = NULL
#run it for all distances between samples
intergroup.distances(sample.data, Variable.Intergroup, distance.matrix, extraVar.addtotable, filename)
#Create Comparisson Variable
allEncompassing.df$comparison <- ifelse(allEncompassing.df$Subject1=="PBS",
paste(allEncompassing.df$Subject2,"to",allEncompassing.df$Subject1), paste(allEncompassing.df$Subject1,"to",allEncompassing.df$Subject2))
#Keep only PBS Comparisson
allEncompassing.df <- allEncompassing.df[grepl("PBS",allEncompassing.df$comparison),]
#Create a variable for SampleID
allEncompassing.df$SampleID <- ifelse(allEncompassing.df$Subject2=="PBS",
as.character(allEncompassing.df$Subject1), as.character(allEncompassing.df$Subject2))
#Keep only the Sample ID and the distance
allEncompassing.df <- allEncompassing.df[,names(allEncompassing.df) %in% c("SampleID","Distance")]
#Summarize the Mean of the data per sampleID
data <- allEncompassing.df %>%
group_by(SampleID) %>%
summarize(mean_size = mean(Distance, na.rm = TRUE), sd= sd(Distance, na.rm=TRUE))
#Set Order Of Figure
data$or <-ifelse(data$SampleID=="MOC",0 ,NA)
data$or <-ifelse(data$SampleID=="1hour",0 ,data$or)
data$or <-ifelse(data$SampleID=="4hour",2 ,data$or)
data$or <-ifelse(data$SampleID=="1day",12 ,data$or)
data$or <-ifelse(data$SampleID=="3day",36 ,data$or)
data$or <-ifelse(data$SampleID=="7day",84 ,data$or)
#MOC only data
moc <- data[data$SampleID=="MOC",]
#All other time data
time <- data[data$SampleID!="MOC",]
#MOC PLOT
mocplot <-
ggplot(moc, aes(x=or, y=mean_size, fill=SampleID)) +
geom_bar(stat="identity", width =0.2)+
geom_errorbar(aes(ymin=mean_size, ymax=sd,color=SampleID), width=.2, position=position_dodge(.9)) +
xlab("") +
ylab("Mean Distance from PBS")+
scale_y_continuous(expand = c(0, 0))+
scale_x_discrete(labels=c("MOC"))+
scale_fill_manual(values=c("#A88EFF")) +
scale_color_manual(values=c("#A88EFF")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(),
axis.line = element_line(colour = "black"), strip.text.y = element_blank(), strip.background = element_blank(),
axis.text.x = element_text(size=20,face="bold"),legend.position = "none",strip.text.x = element_text(size = 20, face = "bold"),
axis.text.y = element_text(size=20,face="bold"),panel.spacing.x=unit(2.5, "lines"),axis.title=element_text(size=20, face="bold"))
#All other Times Plot
timeplot <-
ggplot(time, aes(x=or, y=mean_size, fill=SampleID)) +
geom_area(stat = "identity", color= "black",fill="lightgrey", linetype="dashed",alpha=0.2)+
geom_bar(stat="identity", width =1)+
geom_errorbar(aes(ymin=mean_size, ymax=sd,color=SampleID), width=.2, position=position_dodge(.9)) +
xlab("") +
ylab("")+
scale_y_continuous(expand = c(0, 0))+
scale_x_discrete(labels=c("1hour"="1 Hour", "4hour"= "4 Hours",
"1day"="1 Day", "3day"="3 Days","7day"="7 Days"))+
scale_fill_manual(values=c("#6ABD23","#C49A02","#F8766D","#18C59D","#06B9EB")) +
scale_color_manual(values=c("#6ABD23","#C49A02","#F8766D","#18C59D","#06B9EB")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(),
axis.line.y = element_blank(), strip.text.y = element_blank(), strip.background = element_blank(),
axis.line.x = element_line(colour = "black"),axis.ticks.x=element_line(colour = "black"),
axis.text.x = element_text(size=20,face="bold"),legend.position = "none",strip.text.x = element_text(size = 20, face = "bold"),
axis.text.y = element_blank(),axis.ticks.y=element_blank(),panel.spacing.x=unit(2.5, "lines"),
axis.title=element_text(size=20, face="bold"))
#-------------------
#-------Figure 6F
#-------------------
pdf("Metatrans_InterGroup_Distance_From_PBS.pdf", height = 5, width = 10)
plot_grid(mocplot, timeplot, align = "h", ncol = 2, rel_widths = c(1.7/10, 8.3/10))
dev.off()
#Extract just TAXA Data from Count Table
d2 <- d1[!grepl("UNGROUPED|UNMAPPED",rownames(d1)),]
#keep only data which includes Taxa Data
taxa <- d2[grep("g_|unclassified",rownames(d2)),]
#Create Column with Taxa Names
taxa$gs<- substring(rownames(taxa), 8)
#Create Column with KO NAMES
taxa$ko<- substr(rownames(taxa), 1,6)
#Get DATA just for MOC Innoculum
taxa.moc <- data.frame(MOC=taxa$MOC)
#Keep the rownames
rownames(taxa.moc) <- rownames(taxa)
#Extract the Top 3 TAXA that made up MOC
taxa2 <- taxa[grep("g__Veillonella.s__Veillonella_parvula|g__Streptococcus.s__Streptococcus_mitis_oralis_pneumoniae|g__Prevotella.s__Prevotella_melaninogenica",taxa$gs),]
#Keep only the count data
taxa2 <- taxa2[,1:21]
#Combine count data with MetaDAta
dds.taxa <- DESeqDataSetFromMatrix(countData = taxa2,
colData = coldata,
design= ~ Time_exp)
#=========================================================
//////////////BAR PLOTS OF SIGNFICANT GENES////////////////
#=========================================================
#Take only count data with Genus Column
taxas <- taxa[,1:22]
#Calculate sum based on Taxa and divide by colsum (relative Abundance)
taxas <-
taxas %>%
group_by(gs) %>%
summarise_all(funs(sum)) %>%
mutate_if(is.numeric, funs(./sum(.)))
#Melt the Table
as <- data.table(melt(taxas))
#Only keep top 3 TAXA that made up MOC
as <- as[grep("g__Veillonella.s__Veillonella_parvula|g__Streptococcus.s__Streptococcus_mitis_oralis_pneumoniae|g__Prevotella.s__Prevotella_melaninogenica",as$gs),]
#Create a Variable name to match with MetaData
as$SampleID <- as$variable
#Create a Data Frame of MetaData
assay <- as.data.frame(colData(dds.taxa))
assay$SampleID <- assay$Mouse.number
#Merge Assay Data and Meta Data
as2 <- merge(as,assay, all=TRUE)
#Change variable name to Genus
as2$Genus<- as2$gs
#Calculate Median and IQR for each TAXA by the TIME Course
data <- setDT(as2)[,list(value=as.numeric(median(value, na.rm=TRUE)), iqr=as.numeric(quantile(value, probs=.75, na.rm=TRUE))), by=c("Time_exp", "Genus")]
#Remove NEG/n.a.
data <- data[data$Time_exp!="n.a.",]
#Look at the MOC
datamoc <- data[data$Time_exp=="MOC"]
#Create Varibles for Pie Chart
datamoc$ymax = cumsum(datamoc$value)
datamoc$ymin = c(0, head(datamoc$ymax, n=-1))
datamoc <- datamoc[datamoc$value!=0,]
#------------------
#-----------Figure 6A
#------------------
pdf("MetaTrans_MOC_Donut.pdf", height = 5, width = 10)
mocdo <-
ggplot(datamoc, aes(fill=Genus, ymax=ymax, ymin=ymin, xmax=8, xmin=3)) +
geom_rect() +
coord_polar(theta="y") +
xlim(c(0, 8)) +
scale_fill_manual(values=c("#D6504C","#47B665","#FBBE51","#D6D6D6"))+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(),
axis.line = element_blank(), strip.text.y = element_blank(), strip.background = element_blank(),
axis.ticks= element_blank(),axis.text=element_blank(),legend.position="none")
dev.off()
#Remove MOC
data2 <- data[data$Time_exp!="MOC"]
data2 <- data2[data2$Time_exp!="PBS"]
#Calculate the Other Taxa rel Abundance
data3 <-
data2 %>%
bind_rows(data2 %>%
group_by(Time_exp) %>%
summarise_if(is.numeric, funs(sum))) %>%
mutate(Genus = ifelse(is.na(Genus), "Other", Genus))
#Covert Other to 1- and covert all to Percentage
data3$value <- ifelse(data3$Genus=="Other",(1-data3$value)*100,data3$value*100)
data3$or <-ifelse(data3$Time_exp=="1hour",0 ,NA)
data3$or <-ifelse(data3$Time_exp=="4hour",4 ,data3$or)
data3$or <-ifelse(data3$Time_exp=="1day",12 ,data3$or)
data3$or <-ifelse(data3$Time_exp=="3day",36 ,data3$or)
data3$or <-ifelse(data3$Time_exp=="7day",84 ,data3$or)
#------------------
#---------Figure 6G
#------------------
pdf("Metatrans_Relative_Abundance_Median_Stacked_BAL_OTU_over_Time.pdf", height = 7, width = 10)
ggplot(data3, aes(x=or, y=value)) +
geom_bar(stat="identity",width=2.5,aes(fill=factor(Genus,
levels=c("Other","g__Prevotella.s__Prevotella_melaninogenica","g__Streptococcus.s__Streptococcus_mitis_oralis_pneumoniae",
"g__Veillonella.s__Veillonella_parvula"))))+
xlab("") +
ylab("% Relative Abundance")+
scale_y_continuous(limits=c(0, 105), expand = c(0, 0))+
scale_x_discrete(labels=c("MOC"="MOC", "1hour"="1Hr", "4hour"= "4Hr",
"1day"="1D", "3day"="3D","7day"="7D","PBS"="PBS"))+
scale_fill_manual(values=c( "#D6D6D6","#D6504C","#47B665","#FBBE51")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(), panel.background = element_blank(),
axis.line = element_line(colour = "black"), strip.text.y = element_blank(), strip.background = element_blank(),
axis.text.x = element_text(size=20,face="bold"),legend.position = "none",strip.text.x = element_text(size = 20, face = "bold"),
axis.text.y = element_text(size=20,face="bold"),panel.spacing.x=unit(2.5, "lines"),axis.title=element_text(size=20, face="bold"))
dev.off()
|
5199dddc97aeab292067b5bdbe578e0fa4f2b458
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/r/generated/R/ComDayCqDamCoreImplJmxAssetMigrationMBeanImplProperties.r
|
80cac3c415330aab3684a7d9e7a4a982bb481564
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
R
| false
| false
| 2,487
|
r
|
ComDayCqDamCoreImplJmxAssetMigrationMBeanImplProperties.r
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComDayCqDamCoreImplJmxAssetMigrationMBeanImplProperties Class
#'
#' @field jmx.objectname
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComDayCqDamCoreImplJmxAssetMigrationMBeanImplProperties <- R6::R6Class(
'ComDayCqDamCoreImplJmxAssetMigrationMBeanImplProperties',
public = list(
`jmx.objectname` = NULL,
initialize = function(`jmx.objectname`){
if (!missing(`jmx.objectname`)) {
stopifnot(R6::is.R6(`jmx.objectname`))
self$`jmx.objectname` <- `jmx.objectname`
}
},
toJSON = function() {
ComDayCqDamCoreImplJmxAssetMigrationMBeanImplPropertiesObject <- list()
if (!is.null(self$`jmx.objectname`)) {
ComDayCqDamCoreImplJmxAssetMigrationMBeanImplPropertiesObject[['jmx.objectname']] <- self$`jmx.objectname`$toJSON()
}
ComDayCqDamCoreImplJmxAssetMigrationMBeanImplPropertiesObject
},
fromJSON = function(ComDayCqDamCoreImplJmxAssetMigrationMBeanImplPropertiesJson) {
ComDayCqDamCoreImplJmxAssetMigrationMBeanImplPropertiesObject <- jsonlite::fromJSON(ComDayCqDamCoreImplJmxAssetMigrationMBeanImplPropertiesJson)
if (!is.null(ComDayCqDamCoreImplJmxAssetMigrationMBeanImplPropertiesObject$`jmx.objectname`)) {
jmx.objectnameObject <- ConfigNodePropertyString$new()
jmx.objectnameObject$fromJSON(jsonlite::toJSON(ComDayCqDamCoreImplJmxAssetMigrationMBeanImplPropertiesObject$jmx.objectname, auto_unbox = TRUE))
self$`jmx.objectname` <- jmx.objectnameObject
}
},
toJSONString = function() {
sprintf(
'{
"jmx.objectname": %s
}',
self$`jmx.objectname`$toJSON()
)
},
fromJSONString = function(ComDayCqDamCoreImplJmxAssetMigrationMBeanImplPropertiesJson) {
ComDayCqDamCoreImplJmxAssetMigrationMBeanImplPropertiesObject <- jsonlite::fromJSON(ComDayCqDamCoreImplJmxAssetMigrationMBeanImplPropertiesJson)
ConfigNodePropertyStringObject <- ConfigNodePropertyString$new()
self$`jmx.objectname` <- ConfigNodePropertyStringObject$fromJSON(jsonlite::toJSON(ComDayCqDamCoreImplJmxAssetMigrationMBeanImplPropertiesObject$jmx.objectname, auto_unbox = TRUE))
}
)
)
|
e341c7cb1cd8367b8d274bd40c6ccb9425e7c79d
|
be935ba1a1e91ba791c424437653b1adea81bbce
|
/R/write_files.R
|
7a7aa1b55216e2ef47a7e18260ef255172149c6c
|
[] |
no_license
|
sbpost/myco-project
|
0f9b751164d7deaaf6ce49f071521cd39b456e8f
|
c6ec05975f9084167af8a49f3140eac17e17d833
|
refs/heads/master
| 2023-01-11T12:50:21.062987
| 2020-11-18T09:14:36
| 2020-11-18T09:14:36
| 313,873,903
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 727
|
r
|
write_files.R
|
write_files <- function(
tbl_ls,
corp_path,
corp_filings_path,
activities_path,
addresses_path,
mortgages_path,
officers_path
) {
# This function just writes the files to .csv. Nothing fancy.
# Corp
write_csv(
tbl_ls$corp_tbl,
file = corp_path
)
# CorpFilings
write_csv(
tbl_ls$corp_filings_tbl,
file = corp_filings_path
)
# Activities
write_csv(
tbl_ls$activities_tbl,
file = activities_path
)
# Addresses
write_csv(
tbl_ls$addresses_tbl,
file = addresses_path
)
# Mortgages
write_csv(
tbl_ls$mortgages_tbl,
file = mortgages_path
)
# Officers
write_csv(
tbl_ls$officers_tbl,
file = officers_path
)
}
|
c2dc9891cfcc15c79f01e13b2b4b8dcf115517c5
|
6c12225069086e6c544199652ef147c7d7c2e5ba
|
/spde tutorial functions.R
|
ada2e56e6232b7a5460a2c2fccfa9e8f012a91cc
|
[] |
no_license
|
maquins/ewars_dashboard
|
237eed007b758b588c7f51a7d11acf006c359461
|
edb8c94b7d2d5162d445c0e32e564cac74ccbc9a
|
refs/heads/master
| 2023-06-08T04:26:12.012375
| 2021-06-24T14:38:43
| 2021-06-24T14:38:43
| 287,126,031
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,463
|
r
|
spde tutorial functions.R
|
## http://jfly.iam.u-tokyo.ac.jp/color/
## source inla spde book
c7rgb <- rgb(c(80, 80, 90, 95, 35,0, 0)/100,
c(60, 40, 60, 90, 70, 45, 0)/100,
c(70, 0, 0, 25, 90, 70, 0)/100)
rMatern <- function(n, coords, kappa, variance, nu=1) {
m <- as.matrix(dist(coords))
m <- exp((1-nu)*log(2) + nu*log(kappa*m)-
lgamma(nu))*besselK(m*kappa, nu)
diag(m) <- 1
return(drop(crossprod(chol(variance*m),
matrix(rnorm(nrow(coords)*n), ncol=n))))
}
rspde <- function(coords, kappa, variance=1, alpha=2, n=1, mesh,
verbose=FALSE, seed, return.attributes=FALSE) {
t0 <- Sys.time()
theta <- c(-0.5*log(4*pi*variance*kappa^2), log(kappa))
if (verbose) cat('theta =', theta, '\n')
if (missing(mesh)) {
mesh.pars <- c(0.5, 1, 0.1, 0.5, 1)*sqrt(alpha-ncol(coords)/2)/kappa
if (verbose) cat('mesh.pars =', mesh.pars, '\n')
attributes <- list(
mesh=inla.mesh.2d(,
coords[chull(coords), ], max.edge=mesh.pars[1:2],
cutoff=mesh.pars[3], offset=mesh.pars[4:5]))
if (verbose) cat('n.mesh =', attributes$mesh$n, '\n')
}
else attributes <- list(mesh=mesh)
attributes$spde <- inla.spde2.matern(attributes$mesh, alpha=alpha)
attributes$Q <- inla.spde2.precision(attributes$spde, theta=theta)
attributes$A <- inla.mesh.project(mesh=attributes$mesh, loc=coords)$A
if (n==1)
result <- drop(attributes$A%*%inla.qsample(
Q=attributes$Q,
constr=attributes$spde$f$extraconstr))
t1 <- Sys.time()
result <- inla.qsample(n, attributes$Q,
seed=ifelse(missing(seed), 0, seed),
constr=attributes$spde$f$extraconstr)
if (nrow(result)<nrow(attributes$A)) {
result <- rbind(result, matrix(
NA, nrow(attributes$A)-nrow(result), ncol(result)))
dimnames(result)[[1]] <- paste('x', 1:nrow(result), sep='')
for (j in 1:ncol(result))
result[, j] <- drop(attributes$A%*%
result[1:ncol(attributes$A),j])
}
else {
for (j in 1:ncol(result))
result[1:nrow(attributes$A), j] <-
drop(attributes$A%*%result[,j])
result <- result[1:nrow(attributes$A), ]
}
t2 <- Sys.time()
attributes$cpu <- c(prep=t1-t0, sample=t2-t1, total=t2-t0)
if (return.attributes)
attributes(result) <- c(attributes(result), attributes)
return(drop(result))
}
negLogLikFun <- function(pars, X, A, y, spde, verbose=0) {
q1 <- inla.spde2.precision(spde, c(pars[2], 0))
l1 <- chol(q1)
ld1 <- sum(log(diag(l1)))
q2 <- q1 + crossprod(A)/exp(pars[1])
l2 <- chol(q2)
ld2 <- sum(log(diag(l2)))
ldet <- ld2 - ld1 + pars[1]*nrow(A)/2
W <- Diagonal(nrow(A), exp(-pars[1])) -
(A%*%solve(q2, t(A)))/exp(pars[1]*2)
xw <- crossprod(X, W)
betah <- drop(solve(xw%*%X, xw%*%y))
z <- drop(y-X%*%betah)
s2x.h <- mean(crossprod(W, z)@x*z)
if (verbose) print(c(betah, s2x.h, pars))
return(ldet + nrow(A)*(1+log(2*pi*s2x.h))/2)
}
par2user <- function(pars, X, A, y, spde) {
q1 <- inla.spde2.precision(spde, c(pars[2], 0))
l1 <- chol(q1)
ld1 <- sum(log(diag(l1)))
q2 <- q1 + crossprod(A)/exp(pars[1])
l2 <- chol(q2)
ld2 <- sum(log(diag(l2)))
ldet <- ld2 - ld1 + pars[1]*nrow(A)/2
W <- Diagonal(nrow(A), exp(-pars[1])) -
(A%*%solve(q2, t(A)))/exp(pars[1]*2)
xw <- crossprod(X, W)
betah <- drop(solve(xw%*%X, xw%*%y))
z <- drop(y-X%*%betah)
s2x.h <- mean(crossprod(W, z)@x*z)
c(beta=betah, s2e=exp(pars[1])*s2x.h,
s2x=s2x.h, range=exp(pars[2]))
}
negLogLikFunGA <- function(theta, A, y, qfun,
verbose=0, only.value=TRUE) {
qx <- qfun(theta)
lx <- chol(qx)
qx.y <- qx + crossprod(A)/exp(theta[1])
lx.y <- chol(qx.y)
mx.y <- solve(qx.y, crossprod(A, y)/exp(theta[1]))
if (verbose)
print(theta)
n <- nrow(A); m <- ncol(A)
z <- y-A%*%mx.y
py <- (-n*(log(2*pi) + theta[1]) -sum(z*z)/exp(theta[1]))/2
px <- sum(log(diag(lx))) -sum(crossprod(qx,mx.y)*mx.y)/2
px.y <- sum(log(diag(lx.y)))
nll <- px.y-py-px
if (!only.value) {
attr(nll, 'x') <- mx.y
attr(nll, 'qx') <- qx
attr(nll, 'qx.y') <- qx.y
}
return(nll)
}
inla.mesh.dual <- function(mesh) {
if (mesh$manifold=='R2') {
ce <- t(sapply(1:nrow(mesh$graph$tv), function(i)
colMeans(mesh$loc[mesh$graph$tv[i, ], 1:2])))
library(parallel)
pls <- mclapply(1:mesh$n, function(i) {
p <- unique(Reduce('rbind', lapply(1:3, function(k) {
j <- which(mesh$graph$tv[,k]==i)
if (length(j)>0)
return(rbind(ce[j, , drop=FALSE],
cbind(mesh$loc[mesh$graph$tv[j, k], 1] +
mesh$loc[mesh$graph$tv[j, c(2:3,1)[k]], 1],
mesh$loc[mesh$graph$tv[j, k], 2] +
mesh$loc[mesh$graph$tv[j, c(2:3,1)[k]], 2])/2))
else return(ce[j, , drop=FALSE])
})))
j1 <- which(mesh$segm$bnd$idx[,1]==i)
j2 <- which(mesh$segm$bnd$idx[,2]==i)
if ((length(j1)>0) | (length(j2)>0)) {
p <- unique(rbind(mesh$loc[i, 1:2], p,
mesh$loc[mesh$segm$bnd$idx[j1, 1], 1:2]/2 +
mesh$loc[mesh$segm$bnd$idx[j1, 2], 1:2]/2,
mesh$loc[mesh$segm$bnd$idx[j2, 1], 1:2]/2 +
mesh$loc[mesh$segm$bnd$idx[j2, 2], 1:2]/2))
yy <- p[,2]-mean(p[,2])/2-mesh$loc[i, 2]/2
xx <- p[,1]-mean(p[,1])/2-mesh$loc[i, 1]/2
}
else {
yy <- p[,2]-mesh$loc[i, 2]
xx <- p[,1]-mesh$loc[i, 1]
}
Polygon(p[order(atan2(yy,xx)), ])
})
return(SpatialPolygons(lapply(1:mesh$n, function(i)
Polygons(list(pls[[i]]), i))))
}
else stop("It only works for R2!")
}
genColor <- function(n, type=c('red', 'green', 'blue'), u=NULL) {
cbp <- list(
red = list(c(255, 254, 252, 252, 251, 239, 203, 165, 103),
c(245, 224, 187, 146, 106, 59, 24, 15, 0),
c(240, 210, 161, 114, 74, 44, 29, 21, 13)),
green = list(c(247, 229, 199, 161, 116, 65, 35, 0, 0),
c(252, 245, 233, 217, 196, 171, 139, 109, 68),
c(245, 224, 192, 155, 118, 93, 69, 44, 27)),
blue = list(c(247, 222, 198, 158, 107, 66, 33, 8, 8),
c(251, 235, 219, 202, 174, 146, 113, 81, 48),
c(255, 247, 239, 225, 214, 198, 181, 156, 107)))
if (n<2) stop("Works for 'n>2'!")
if (is.null(u))
u <- 0:(n-1)/(n-1)
u0 <- 0:8/8
i <- findInterval(u, u0, TRUE)
k <- pmatch(match.arg(type), c('red', 'green', 'blue'))
w1 <- 8*(u0[i+1]-u)/255; w2 <- 8*(u-u0[i])/255
rgb(cbp[[k]][[1]][i]*w1 + cbp[[k]][[1]][i+1]*w2,
cbp[[k]][[2]][i]*w1 + cbp[[k]][[2]][i+1]*w2,
cbp[[k]][[3]][i]*w1 + cbp[[k]][[3]][i+1]*w2)
}
plot.dgTMatrix <- function(x, y, ...) {
cl <- match.call()
if (is.null(cl$digits))
digits <- 2
z <- sort(unique(round(x@x, digits)))
nz <- length(z)
n1 <- sum(z<0)
n2 <- sum(z>0)
if (is.null(cl$colors))
if (any(c(n1,n2)==0))
colors <- gray(0.9*(1-(z-min(z))/diff(range(z))))
else
colors <- c(genColor(n1, 'red', z[z<0]/min(z)),
rep('white', nz-n1-n2),
genColor(n2, 'blue', z[z>0]/max(z)))
z.breaks <- c(z[1]-diff(z[1:2])/2,
z[-nz]/2 + z[-1]/2,
z[nz]+diff(z[nz-1:0])/2)
x@x <- round(x@x, digits)
image(x, at=z.breaks, col.regions=colors, ...)
}
|
9222d0d26e695e13ba5d5f09c495d4d15b0f5793
|
58fb33dd28029b82fb688bc8ab537354a46b59c1
|
/Q1.R
|
e37ccd91a5198f39095d43648e4c2657985d1101
|
[] |
no_license
|
sarveshsuresh/SamplingAssignment2
|
26c6b4ce7ddd6ed801f1bdc5b99921369c786268
|
f6a972cedeeca4ae73a76fb99d3e05838189c736
|
refs/heads/main
| 2023-07-19T20:51:46.703324
| 2021-08-30T09:05:47
| 2021-08-30T09:05:47
| 401,261,807
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,859
|
r
|
Q1.R
|
sam_size=c()
N=10
bm=c()
bv=c()
pm=c()
pv=c()
nm=c()
nv=c()
bn1m=c()
bn1v=c()
bn2m=c()
bn2v=c()
for(j in 1:100){
n=j*N
print(j)
for(i in 1:100){
sam_size=append(sam_size,n)
binom_sample=rbinom(n,12,0.25)
#binom_sample
binom_mean=mean(binom_sample)
binom_variance=(n*var(binom_sample))/(n-1)
bm=append(bm,binom_mean)
bv=append(bv,binom_variance)
poi_sample=rpois(n,5.7)
poi_sample
poi_mean=mean(poi_sample)
poi_variance=(n*var(poi_sample))/(n-1)
pm=append(pm,poi_mean)
pv=append(pv,poi_variance)
norm_sample=rnorm(n,mean=3.5,sd=sqrt(5.75))
norm_sample
norm_mean=mean(norm_sample)
norm_variance=(n*var(norm_sample))/(n-1)
nm=append(nm,norm_mean)
nv=append(nv,norm_variance)
mu=c(15.7,42.8)
s1=9.2
s2=11.7
rho=0.79
sigma=matrix(c(s1^2, s1*s2*rho, s1*s2*rho, s2^2),2)
biv_norm_sample=as.data.frame(mvrnorm(n, mu = mu, Sigma = sigma ))
colnames(biv_norm_sample)=c('BN1','BN2')
biv_norm_sample
bn1_mean=mean(biv_norm_sample$BN1)
bn1_variance=(n*var(biv_norm_sample$BN1))/(n-1)
bn1m=append(bn1m,bn1_mean)
bn1v=append(bn1v,bn1_variance)
bn2_mean=mean(biv_norm_sample$BN2)
bn2_variance=(n*var(biv_norm_sample$BN2))/(n-1)
bn2m=append(bn2m,bn2_mean)
bn2v=append(bn2v,bn2_variance)
}
}
sum_df=data.frame(
sam_size,
bm,
bv,
pm,
pv,
nm,
nv,
bn1m,
bn1v,
bn2m,bn2v)
colnames(sum_df)=c('Sample_Size','Binomial Mean','Binomial Variance','Poisson Mean','Poisson Variance','Normal mean','Normal variance',
'BivNorm1mean','BivNorm1variance','BivNorm2mean','BivNorm2Variance')
summary_df=sum_df%>%group_by(Sample_Size)%>%summarise_all(mean)
write.csv(sum_df,'D:/sum_df.csv')
write.csv(summary_df,'D:/summary_df.csv')
|
14cb397ae0d6056034788326eeaf32802f0d1f93
|
387f308b3c2283a1491028dae275c34001292aa4
|
/src/session01/example1.1.R
|
6f0e6bf697f85d71ebefc343e4b6adc8eae29a2d
|
[] |
no_license
|
limves/MAIM
|
2185c95b318beba2021701dc85fb28312109d8a9
|
ba96e77874bbddafd55fa0aa9ea9b21ec65a0cf3
|
refs/heads/master
| 2020-04-17T06:43:48.632029
| 2016-08-23T04:02:57
| 2016-08-23T04:02:57
| 66,323,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,338
|
r
|
example1.1.R
|
library(genalg)
library(ggplot2)
dataset <- data.frame(item = c("pocketknife", "beans", "potatoes", "unions",
"sleeping bag", "rope", "compass"), survivalpoints = c(10, 20, 15, 2, 30,
10, 30), weight = c(1, 5, 10, 1, 7, 5, 1))
weightlimit <- 20
chromosome = c(1, 0, 0, 1, 1, 0, 0)
dataset[chromosome == 1, ]
## item survivalpoints weight
## 1 pocketknife 10 1
## 4 unions 2 1
## 5 sleeping bag 30 7
cat(chromosome %*% dataset$survivalpoints)
## 42
#We define the evaluation function as follows.
evalFunc <- function(x) {
current_solution_survivalpoints <- x %*% dataset$survivalpoints
current_solution_weight <- x %*% dataset$weight
if (current_solution_weight > weightlimit)
return(0) else return(-current_solution_survivalpoints)
}
iter = 100
GAmodel <- rbga.bin(size = 7, popSize = 200, iters = iter, mutationChance = 0.01,
elitism = T, evalFunc = evalFunc)
cat(summary(GAmodel))
solution = c(1, 1, 1, 1, 1, 0, 1)
dataset[solution == 1, ]
## item survivalpoints weight
## 1 pocketknife 10 1
## 2 beans 20 5
## 3 potatoes 15 10
## 4 unions 2 1
## 5 sleeping bag 30 7
## 7 compass 30 1
|
2f78bbb725a2478617de98810e2bc846371387af
|
928312a96a59805cea732721ffc366202559e007
|
/R/build_ssgsea.R
|
3dc7086c3088d121e00287c6858b5f29a2921768
|
[] |
no_license
|
pujana-lab/systematicBNR
|
3c81f9b4818b5281d1b71ba98d1bdaf0f17f5ccb
|
fa44d30b07300e27dac8141d26a5421d70530fca
|
refs/heads/master
| 2022-11-07T13:03:25.655488
| 2020-06-10T10:49:49
| 2020-06-10T10:49:49
| 269,133,821
| 1
| 0
| null | 2020-06-08T18:10:12
| 2020-06-03T16:04:51
|
R
|
UTF-8
|
R
| false
| false
| 470
|
r
|
build_ssgsea.R
|
#' Perform ssGSEA pipeline for related signatures in defined genome.
#'
#' Encapsulates ssGSEA pipeline at GSVA package
#'
#' @param genome Genome expression object in matrix format, with samples in columns and genes in rows.
#' @param signatures Signatures list object
#' @param ... GSVA ssgsea extra parameters
build_ssgsea <- function(genome, signatures, ... ){
ssGSEA = GSVA::gsva(genome, signatures, method = 'ssgsea', kcdf = 'Gaussian', ...)
return(ssGSEA)
}
|
c54e18467776dc9f7c685b08635a677bc6ba532b
|
78b8a059bca8dbb4dedce7699c9b41ea66b22647
|
/Shiny app example 1.R
|
9bd9b0c7a770394c0d27fdaf42704d04d50c7582
|
[] |
no_license
|
uwolanowska1/Advanced-Data-Analysis-course
|
40ee13a1a4bfbf486227d98a2230cb346c71a9eb
|
792465ec7cbf7284b68c94b1c00c497561475f10
|
refs/heads/main
| 2023-06-07T05:24:55.346704
| 2021-07-07T12:46:35
| 2021-07-07T12:46:35
| 381,996,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,926
|
r
|
Shiny app example 1.R
|
library(shiny)
library(palmerpenguins)
library(ggplot2)
library(dplyr)
penguins <- palmerpenguins::penguins
colnames(penguins) <- c('species', 'islands', 'bill length mm',
'bill depth mm', 'flipper length mm', 'body mass g',
'sex', 'year')
ui <- fluidPage(
titlePanel('Task 1 - Data Visualisation Shiny app 2322312w'),
sidebarLayout(fluid = TRUE,
sidebarPanel(
selectInput('xInput', label = 'x-axis',
choices = c('islands', 'sex'),
selected = 'sex'),
selectInput('yInput', label = 'y-axis',
choices = c('bill length mm', 'bill depth mm',
'flipper length mm', 'body mass g'),
selected = 'bill length mm'),
selectInput('speciesInput', label = 'Show different species levels',
choices = c('Yes', 'No'),
selected = 'No'),
checkboxGroupInput('yearInput', label = 'Year',
choices = c('2007', '2008', '2009'))
),
mainPanel(
plotOutput('coolplot')
)
))
server <- function(input,output, session){
output$coolplot <- renderPlot({
if(is.null(input$yearInput)){
if(input$speciesInput == 'Yes'){
ggplot(data = penguins,
aes(x = .data[[input$xInput]], y = .data[[input$yInput]])) +
geom_violin(aes(colour = species))
} else {
ggplot(data = penguins,
aes(x = .data[[input$xInput]], y = .data[[input$yInput]])) +
geom_violin()
}
} else if(!is.null(input$yearInput)){
filtered.penguins <- penguins %>%
filter(year == c(input$yearInput))
if(input$speciesInput == 'Yes'){
ggplot(data = filtered.penguins,
aes(x = .data[[input$xInput]], y = .data[[input$yInput]])) +
geom_violin(aes(colour = species), na.rm = FALSE) +
facet_wrap(~year, dir = 'v', drop = FALSE)
} else {
ggplot(data = filtered.penguins,
aes(x = .data[[input$xInput]], y = .data[[input$yInput]])) +
geom_violin() +
facet_wrap(~year, dir = 'v', drop = FALSE)
}
}
})
}
shinyApp(ui = ui, server = server)
|
03da88278167e19680f14468ba9bec7e1b385146
|
d6ff90166d2f6fbacdb23b8062666b2e122b7ac9
|
/data_preprocessing_template.R
|
60cac2f1d7c66b4fbadf50d4ce348e9bbf1977e8
|
[] |
no_license
|
PSAB/pythonML1
|
5d1db5501c7f453ff1663a5e43eb8ae5e4459f55
|
fb7a03343a533dfabbb1a3d7f6f5e670bed48106
|
refs/heads/master
| 2020-03-12T12:30:49.430382
| 2018-09-18T19:12:10
| 2018-09-18T19:12:10
| 130,620,009
| 0
| 0
| null | 2018-04-23T01:53:49
| 2018-04-23T00:34:43
|
Python
|
UTF-8
|
R
| false
| false
| 737
|
r
|
data_preprocessing_template.R
|
# Data Preprocessing Template
# Importing the dataset
dataset = read.csv('Data.csv')
# Taking care of missing data:
# Pointing to the column "Age":
# ifelse 3 values: condition, value you want to input if
#condition is true, and value you want to input if condition is
#false
dataset$Age = ifelse(is.na(dataset$Age), ave(dataset$Age, FUN = function(x) mean(x, na.rm = TRUE)), dataset$Age)
# if there was a missing value, it was replaced w/ col average
# Do the same thing for salary column
dataset$Salary = ifelse(is.na(dataset$Salary), ave(dataset$Salary, FUN = function(x) mean(x, na.rm = TRUE)), dataset$Salary)
# Encoding categorical data
# Use the factor function to transform categorical data into numerical values
|
c6b70d01eb1a1ce5a65665f4d37a34a7c4514878
|
ca609a94fd8ab33cc6606b7b93f3b3ef201813fb
|
/2015/4.EDA-graphics/eda-graphics.R
|
8c09cc9034b6877b7e74ca5f81348ddea0e910ee
|
[] |
no_license
|
rajesh2win/datascience
|
fbc87def2a031f83ffceb4b8d7bbc31e8b2397b2
|
27aca9a6c6dcae3800fabdca4e3d76bd47d933e6
|
refs/heads/master
| 2021-01-20T21:06:12.488996
| 2017-08-01T04:39:07
| 2017-08-01T04:39:07
| 101,746,310
| 1
| 0
| null | 2017-08-29T09:53:49
| 2017-08-29T09:53:49
| null |
UTF-8
|
R
| false
| false
| 392
|
r
|
eda-graphics.R
|
library(ggplot2)
students=read.csv("E:/data analytics/datasets/students.csv")
dim(students)
summary(students$Sex)
table(students$Level)
with(students, table(Level))
ggplot(students, aes(x = BloodType)) + geom_bar()
with(students, table(Sex, Level))
ggplot(students, aes(x = Level, fill = BloodType)) + geom_bar(position = "dodge")`-
ggplot(students, aes(x = Height)) + geom_dotplot()
|
493aa67f84d3eb058244cba2e06a2719e8a6430e
|
f604c8b18cd46043a1c5a52b8ae8e82d57ce0ed1
|
/configure_template.R
|
c29222e0537699a6164abf9f31dde52dd1ed4ea0
|
[
"Apache-2.0"
] |
permissive
|
rstudio/distill
|
0d720ffb8041efae435ac8e3e08beecbd6f9dee7
|
ac5e3bf1dca2054a5bf61cfe81b59d7bdb5e3705
|
refs/heads/main
| 2023-09-01T03:47:43.142431
| 2023-08-28T16:44:23
| 2023-08-28T16:44:23
| 130,758,590
| 323
| 79
|
Apache-2.0
| 2023-08-28T16:44:24
| 2018-04-23T21:24:50
|
HTML
|
UTF-8
|
R
| false
| false
| 2,166
|
r
|
configure_template.R
|
#!/usr/bin/env Rscript
library(git2r)
library(stringr)
library(readr)
path <- file.path(tempfile(pattern = "git2r-"), "template")
dir.create(path, recursive = TRUE)
repo <- clone("https://github.com/rstudio/template", branch = "radix", path)
system2("npm", args = c("--prefix", path, "install"))
system2("npm", args = c("--prefix", path, "run", "build"))
transform_for_distill <- function(script) {
# Expect `Prism.languages.clike` and `n.languages.clike`
count_match <- str_count(script, "(?<=\\.languages.clike=\\{)")
if (!identical(2L, num_matches <- sum(count_match)))
stop(paste0(num_matches, " matches for pattern ", p, " found, but two expected."), call. = FALSE)
line_to_modify <- which(count_match == 2L)
# Add comment patterns, being explicit about keys
comment_patterns <- c(
"(?<=Prism\\.languages\\.clike=\\{comment:\\[)",
"(?<=n\\.languages\\.clike=\\{comment:\\[)"
)
comment_pattern_to_insert <- "{pattern:/(^|[^\\\\])#.*/,lookbehind:!0},"
for (p in comment_patterns) {
if (!str_detect(script[[line_to_modify]], p))
stop("Comment pattern insertion failed: cannot find pattern `", p, "`.", call. = FALSE)
splitted <- script[[line_to_modify]] %>%
str_split(p) %>%
unlist()
script[[line_to_modify]] <- paste0(splitted[[1]], comment_pattern_to_insert, splitted[[2]])
}
# Replace function patterns
function_patterns <- c(
"(?<=Prism\\.languages\\.clike=\\{.{0,550},function:)(.+?)(?=,number)",
"(?<=n\\.languages\\.clike=\\{.{0,550},function:)(.+?)(?=,number)"
)
function_pattern_replacement <- "/[a-z\\\\.0-9_]+(?=\\\\()/i"
for (p in function_patterns) {
if (!str_detect(script[[line_to_modify]], p))
stop("Function pattern replacement failed: cannot find pattern `", p, "`.", call. = FALSE)
script[[line_to_modify]] <- script[[line_to_modify]] %>%
str_replace(p, function_pattern_replacement)
}
# Function wrapper
c("function load_distill_framework() {", script, "}")
}
read_lines(file.path(path, "dist", "template.v2.js")) %>%
transform_for_distill() %>%
write_lines("inst/www/distill/template.v2.js")
unlink(path, recursive = TRUE)
|
aa8e0ec5900137e864f1f000c729bc28315f2e61
|
4d6a64ab3bc0813e6888d58eeff8a809739f826b
|
/FishBase/man/fb_ecotox.Rd
|
f6a16ee30d20367cfec40fcc6d167711d70df35c
|
[
"Apache-2.0"
] |
permissive
|
cornejotux/FishBaseR
|
2d54075078238c89887ed8356b878c6f1cb7b900
|
72fbabbda485583fbcf06fb5502593e0f6e7fe7a
|
refs/heads/master
| 2022-12-25T18:03:43.153346
| 2020-09-29T20:13:50
| 2020-09-29T20:13:50
| 299,727,130
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,225
|
rd
|
fb_ecotox.Rd
|
\name{fb_ecotox}
\alias{fb_ecotox}
\title{
Obtain a list of CL50 by chemical used.
}
\description{
Download, as a table, the list of chemical and their CL50 for the specie. Read the HTMLwebpage, recognize the table and transform it into a Data.Frame object.
}
\usage{
fb_ecotox(idFB, Genus, Species, server)
}
\arguments{
\item{idFB}{
FishBase id of the specie to download the data
}
\item{Genus}{
Genus part of the scientific specific name to download the data
}
\item{Species}{
Species part of the scientific specific name to download the data
}
\item{server}{
Optional, set up the desired (closer, faster) FishBase server
}
}
\details{
idFB is required. If is not available, it is possible to use Genus and Species and the function \link{fb_ids} will obtain the idFB.
}
\value{
\item{ChemName}{Chemical name used in the Ecotoxycology assay.}
\item{CL50}{Concentration (mg/l) at half of the individual dies.}
\item{ExpTime}{Duration time of the assay.}
\item{Reference}{Reference ID to the cite in FishBase system.}
}
\seealso{
\code{\link{fb_ids}}
}
\examples{
fb_ecotox(idFB = 236, server = "http://www.fishbase.de/")
}
\keyword{FishBase}
\keyword{CL50}
\keyword{bioassay}
\keyword{ecotoxycoloy}
|
4bd02b91a0a8f1179e2f4460da3c544b44ae9c60
|
af582f033de3521151b72fa326c4bf135a23c651
|
/R/spectral-plots.R
|
f7aa332ead92dc8d5e5f8db3527ee5eb39e90a6f
|
[] |
no_license
|
mbjoseph/hyperspec-beta
|
51f8aac80f3505de7a1e959783d178b8b2a2ae0e
|
cf40c8059dc24c5775b0d345a8950776a744acd3
|
refs/heads/master
| 2020-04-06T06:59:12.945261
| 2017-10-02T22:39:49
| 2017-10-02T22:39:49
| 61,833,137
| 0
| 2
| null | 2016-07-13T16:00:55
| 2016-06-23T20:02:43
|
HTML
|
UTF-8
|
R
| false
| false
| 4,769
|
r
|
spectral-plots.R
|
library(tidyverse)
library(ggthemes)
d <- read_csv('data/neon_plants.csv') %>%
select(-X1, -starts_with('site'), -plotid,
-easting, -northing, -taxonid, -pointid, -individualdistance, -individualazimuth,
-starts_with('dbh'), - starts_with('basal'), -starts_with('canopy'),
-starts_with('stem'), -livingcanopy, -inplotcanopy, -materialsampleid,
-chm_height, -maxcanopydiam) %>%
gather(wavelength, reflectance, -scientificname, -indvidualid) %>%
mutate(wavelength = parse_number(wavelength)) %>%
filter(reflectance > 0, reflectance < 1) %>%
group_by(scientificname) %>%
mutate(n_examples = n() / length(unique(wavelength))) %>%
filter(n_examples > 30, wavelength > 100) %>%
mutate(reflectance = ifelse((wavelength > 1340 & wavelength < 1430) |
(wavelength > 1800 & wavelength < 1950),
NA, reflectance))
summary_d <- d %>%
group_by(scientificname, wavelength) %>%
summarize(median = median(reflectance),
q25 = quantile(reflectance, .25, na.rm = TRUE),
q75 = quantile(reflectance, .75, na.rm = TRUE))
d <- d %>%
separate(scientificname, c('genus', 'species'), remove = FALSE) %>%
ungroup %>%
mutate(common_name = case_when(
.$scientificname == 'Arctostaphylos viscida' ~ 'Sticky whiteleaf manzanita ',
.$scientificname == 'Calocedrus decurrens' ~ 'Incense cedar',
.$scientificname == 'Ceanothus cuneatus' ~ 'Buck brush',
.$scientificname == 'Ceanothus integerrimus' ~ 'Deer brush',
.$scientificname == 'Lupinus albifrons' ~ 'Silver lupine',
.$scientificname == 'Pinus ponderosa' ~ 'Ponderosa pine',
.$scientificname == 'Quercus chrysolepis' ~ 'Canyon live oak',
.$scientificname == 'Quercus kelloggii' ~ 'California black oak',
.$scientificname == 'Quercus wislizeni' ~ 'Interior live oak')) %>%
mutate(facet_label = paste0(scientificname, ' (', common_name, ')')) %>%
mutate(functional_group = case_when(
.$genus %in% c('Quercus', 'Arctostaphylos') ~ 'Deciduous trees',
.$genus %in% c('Pinus', 'Calocedrus') ~ 'Coniferous trees',
.$genus %in% c('Ceanothus', 'Lupinus') ~ 'Shrubs')) %>%
distinct(individualid, .keep_all = TRUE)
# unlabeled
d %>%
ggplot(aes(x = wavelength, reflectance,
group = indvidualid)) +
geom_point(alpha = .1, size = .1, pch = 1) +
theme_minimal() +
xlab('Wavelength (nm)') +
ylab('Reflectance') +
theme(legend.position = 'none')
ggsave(filename = 'unlabeled-spectra.png', width = 9, height = 5)
d %>%
ggplot(aes(x = wavelength, reflectance, group = indvidualid, color = genus)) +
geom_point(alpha = .1, size = .1, pch = 1) +
theme_minimal() +
scale_color_gdocs() +
xlab('Wavelength (nm)') +
ylab('Reflectance') +
facet_wrap(~ facet_label, strip.position = 'bottom') +
theme(legend.position = 'none')
ggsave(filename = 'species-dict.png', width = 9, height = 5)
d %>%
ggplot(aes(x = wavelength, reflectance, group = indvidualid, color = genus)) +
geom_point(alpha = .1, size = .5) +
theme_minimal() +
scale_color_gdocs('Genus') +
xlab('Wavelength (nm)') +
ylab('Reflectance') +
facet_wrap(~ functional_group) +
guides(colour = guide_legend(override.aes = list(alpha = 1, size = 1))) +
theme(legend.text = element_text(face = 'italic'))
ggsave(filename = 'function-group-dict.png', width = 8, height = 2.5)
d %>%
filter(functional_group != 'Shrubs') %>%
group_by(functional_group, wavelength) %>%
summarize(mean = mean(reflectance),
sd = sd(reflectance, na.rm = TRUE),
lo = mean - sd,
hi = mean + sd) %>%
ggplot(aes(wavelength, mean, color = functional_group, fill = functional_group)) +
theme_minimal() +
geom_ribbon(aes(ymin = lo, ymax = hi), color = NA,
alpha = .5) +
geom_line() +
scale_color_gdocs('') +
scale_fill_gdocs('') +
xlab('Wavelength (nm)') +
ylab('Reflectance') +
theme(legend.position = 'top')
ggsave(filename = 'mean-sd.png', width = 6, height = 3)
d %>%
filter(functional_group != 'Shrubs') %>%
group_by(functional_group, wavelength) %>%
summarize(mean = mean(reflectance),
sd = sd(reflectance, na.rm = TRUE),
lo = mean - sd,
hi = mean + sd) %>%
ggplot(aes(wavelength, mean, color = functional_group, fill = functional_group)) +
theme_minimal() +
geom_ribbon(aes(ymin = lo, ymax = hi), color = NA,
alpha = .5) +
geom_line() +
scale_color_gdocs('') +
scale_fill_gdocs('') +
xlab('Wavelength (nm)') +
ylab('Reflectance') +
theme(legend.position = 'none') +
facet_wrap(~ functional_group)
ggsave(filename = 'mean-sd-panels.png', width = 6, height = 3)
|
0420d6c05b66aed2f06ccc0a9741af7f08ae116c
|
7759c70261a6f897d2595105f0b137e2ea2c883a
|
/man/whop.eg.selectOrganism.Rd
|
78a108414055722e648e7b90dddef84e69956a22
|
[] |
no_license
|
cran/WhopGenome
|
b0b7ef46183496bf6ab8e3e2a0eb10c7d5ba84d7
|
81b6f858a5c964ea72870f36d787c1db28ce6b6b
|
refs/heads/master
| 2022-02-04T05:14:35.344752
| 2017-03-13T16:10:56
| 2017-03-13T16:10:56
| 17,694,110
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 620
|
rd
|
whop.eg.selectOrganism.Rd
|
\name{whop.eg.selectOrganism}
\alias{whop.eg.selectOrganism}
\title{
Select the organism to query with subsequent whop.eg calls and load the appropiate database(s).
}
\description{
Select the organism to query with subsequent whop.eg calls and load the appropiate database(s).
}
\usage{
whop.eg.selectOrganism(organismname, dontload = FALSE, install.if.missing = F)
}
\arguments{
\item{organismname}{Organism to query}
\item{dontload}{Whether to load the database}
\item{install.if.missing}{Whether to install the database, if it does not exist locally}
}
\value{
Success status
}
\author{
Ulrich Wittelsbuerger
}
|
a4b67e6f6a1dae66551152fbca2e986ce7299511
|
574fba4068d17d46857e4aa3373fce5fab65c121
|
/KYTJ.Web/R/Pro/9.4/9.4.R
|
1239a0ae1beb91987382508c1d0f1b1cbbd5e55b
|
[] |
no_license
|
cykb518hu/kytj
|
96ad38d942e35257eceb7f09da047b5127f5c3d0
|
da952767348304207c2c4f805490f7c0271ce506
|
refs/heads/master
| 2023-06-10T08:30:30.554410
| 2021-06-29T02:35:36
| 2021-06-29T02:35:36
| 349,309,075
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 18,437
|
r
|
9.4.R
|
args <- commandArgs()
rlib<-args[6] #libary path
if (is.na(rlib)) {output<-args[6]} else {output<-args[7]} #param path
print(output)
#output<-c('D:\\Works\\产品\\科研\\代码\\ScienceStatistics.Web\\Output\\1\\1_1\\636525834691779319')
setwd(output)
d<-read.csv("./data.csv")
p<-read.csv('./Parameters.csv')
a<-p
#参数设定
idata<-d
R.Version4RUN<-343;
if (is.na(rlib)) {R.LibLocation<-"C:/R/R-3.4.3/library"} else {R.LibLocation=rlib}
Sys.setlocale("LC_TIME", "C")
library(doBy,lib.loc=R.LibLocation)
library(plotrix,lib.loc=R.LibLocation)
library(showtext,lib.loc=R.LibLocation)
showtext_auto(enable=TRUE)
pdfwd<-6; pdfht<-6
if (length(which(ls()=="ClinStats"))==0) ClinStats<-get(ls()[1])
names(ClinStats)<-toupper(names(ClinStats))
#--#
iynumber<-as.numeric(as.character(a[1,1]));
iyn1<-as.character(a[1,2]);iyv1<-as.character(a[1,3]);iys1<-as.numeric(a[1,4]);iydist1<-as.character(a[1,5]);iylink1<-as.character(a[1,6])
iyn2<-as.character(a[1,7]);iyv2<-as.character(a[1,8]);iys2<-as.numeric(a[1,9]);iydist2<-as.character(a[1,10]);iylink2<-as.character(a[1,11])
if(iynumber==1) {
vname<-c("_N_","_STAT_","_TOTAL_",iyn1)
vlabel<-c("样本量(%)","统计量","合计",iyn1)
}else if(iynumber==2) {
vname<-c("_N_","_STAT_","_TOTAL_",iyn1,iyn2)
vlabel<-c("样本量(%)","统计量","合计",iyn1,iyn2)
}
#--#
ixnumber<-as.numeric(as.character(a[2,1]));
ixn1<-as.character(a[2,2]);ixv1<-as.character(a[2,3]);ixs1<-as.numeric(as.character(a[2,4]))
ixn2<-as.character(a[2,5]);ixv2<-as.character(a[2,6]);ixs2<-as.numeric(as.character(a[2,7]))
ixn3<-as.character(a[2,8]);ixv3<-as.character(a[2,9]);ixs3<-as.numeric(as.character(a[2,10]))
ixn4<-as.character(a[2,11]);ixv4<-as.character(a[2,12]);ixs4<-as.numeric(as.character(a[2,13]))
ixn5<-as.character(a[2,14]);ixv5<-as.character(a[2,15]);ixs5<-as.numeric(as.character(a[2,16]))
if(ixnumber==1) {
vname<-c(vname,ixn1)
vlabel<-c(vlabel,ixn1)
}else if(ixnumber==2) {
vname<-c(vname,ixn1,ixn2)
vlabel<-c(vlabel,ixn1,ixn2)
}else if(ixnumber==3) {
vname<-c(vname,ixn1,ixn2,ixn3)
vlabel<-c(vlabel,ixn1,ixn2,ixn3)
}else if(ixnumber==4) {
vname<-c(vname,ixn1,ixn2,ixn3,ixn4)
vlabel<-c(vlabel,ixn1,ixn2,ixn3,ixn4)
}else if(ixnumber==5) {
vname<-c(vname,ixn1,ixn2,ixn3,ixn4,ixn5)
vlabel<-c(vlabel,ixn1,ixn2,ixn3,ixn4,ixn5)
}
#--#
isnymber<-as.numeric(as.character(a[3,1]))
isnp<-as.numeric(as.character(a[3,2]))
isn1<-as.character(a[3,3]);isv1<-as.character(a[3,4]);iss1<-as.numeric(as.character(a[3,5]))
row1<-as.character(a[3,6]);col1<-as.character(a[3,7])
irow1<-unlist(strsplit(row1,"[|]"));icol1<-unlist(strsplit(col1,"[|]"))
isn2<-as.character(a[3,8]);isv2<-as.character(a[3,9]);iss2<-as.numeric(as.character(a[3,10]))
row2<-as.character(a[3,11]);col2<-as.character(a[3,12])
irow2<-unlist(strsplit(row2,"[|]"));icol2<-unlist(strsplit(col2,"[|]"))
if(isnymber==1) {
vname<-c(vname,irow1)
vlabel<-c(vlabel,icol1)
}else if(isnymber==2) {
vname<-c(vname,irow1,irow2)
vlabel<-c(vlabel,icol1,icol2)
}
#--#
izn1<-as.character(a[4,1]);izv1<-as.character(a[4,2]);izs1<-as.numeric(as.character(a[4,3]))
izn2<-as.character(a[4,4]);izv2<-as.character(a[4,5]);izs2<-as.numeric(as.character(a[4,6]))
izlink<-as.character(a[4,7])
library(mgcv,lib.loc=R.LibLocation)
library(geepack,lib.loc=R.LibLocation)
library(gdata,lib.loc=R.LibLocation)
ofname<-"9_4";
svy.DSN.YN <- FALSE;
WD<-idata; wd.subset="";
title<-"基因型与表型关联分析";
#WD<-WD[order(WD$FMYID),];
attach(WD)
gee.SUBJ<-idata[,izn2];subjvname<-c(izn2);gee.TYPE<-tolower(c(izlink));
if(iynumber==1) {
yv<-as.data.frame(idata[c(iyn1)]);
yvname<-c(iyn1);
yvar<-c(iyv1);
ydist<-c(iydist);
ylink<-c(iylink);
ylv<-c(NA,iys1)[-1];
}else if(iynumber==2) {
yv<-as.data.frame(idata[c(iyn1,iyn2)]);
yvname<-c(iyn1,iyn2);
yvar<-c(iyv1,iyv2);
ydist<-c(iydist1,iydist2);
ylink<-c(iylink1,iylink2);
ylv<-c(NA,iys1,iys2)[-1];
}
if(isnymber==1) {
xv<-as.matrix(idata[isn1]);
xvname<-c(isn1);
xvar<-c(isv1);
xlv<-c(NA,iss1)[-1];
}else if(isnymber==2) {
xv<-as.matrix(idata[,c(isn1,isn2)]);
xvname<-c(isn1,isn2);
xvar<-c(isv1,isv2);
xlv<-c(NA,iss1,iss2)[-1];
}
sxf<-NA;
svname<-NA; sv<-NA; slv<-NA;
if(ixnumber==1) {
av<-as.matrix(idata[,c(ixn1)])
avname<-c(ixn1)
if (!is.na(avname[1])) avlbl<-vlabel[match(avname, vname)]
nadj<-length(avname);alv<-c(NA,ixs1)[-1]
saf<-c(NA,0)[-1]
}else if(ixnumber==2) {
av<-as.matrix(idata[,c(ixn1,ixn2)])
avname<-c(ixn1,ixn2)
if (!is.na(avname[1])) avlbl<-vlabel[match(avname, vname)]
nadj<-length(avname);alv<-c(NA,ixs1,ixs2)[-1]
saf<-c(NA,0,0,0)[-1]
}else if(ixnumber==3) {
av<-as.matrix(idata[,c(ixn1,ixn2,ixn3)])
avname<-c(ixn1,ixn2,ixn3)
if (!is.na(avname[1])) avlbl<-vlabel[match(avname, vname)]
nadj<-length(avname);alv<-c(NA,ixs1,ixs2,ixs3)[-1]
saf<-c(NA,0,0,0)[-1]
}else if(ixnumber==4){
av<-as.matrix(idata[,c(ixn1,ixn2,ixn3,ixn4)])
avname<-c(ixn1,ixn2,ixn3,ixn4)
if (!is.na(avname[1])) avlbl<-vlabel[match(avname, vname)]
nadj<-length(avname);alv<-c(NA,ixs1,ixs2,ixs3,ixs4)[-1];
saf<-c(NA,0,0,0,0)[-1]
}
timev<-NA; timevname<-NA;
bv<-idata[,izn1];bvar<-c(izn1);bvname<-c(izv1);
colv<-NA; colvname<-NA;
v.start<-NA; vname.start<-NA;
v.stop<-NA; vname.stop<-NA;
par1<-NA;dec<-4;
if(isnp==1) {
parm<-c(1,NA, NA, NA);
}else if(isnp==1) {
parm<-c(NA,NA, NA, NA);
}
if (!exists("pdfwd")) pdfwd<-6;
if (!exists("pdfht")) pdfht<-6;
##R package## mgcv geepack gdata ##R package##;
pvformat<-function(p,dec) {
if (dec>8) dec<-8
p1<-round(as.numeric(p),dec); pp<-p1
tmp<-(substr(p1,2,9)=="e-04" & !is.na(p1))
pp[tmp]<-paste("0.000",substr(pp[tmp],1,1),sep="")
tmp<-(p1==0 & !is.na(p1))
pp[tmp]<-paste(c("<0.",rep("0",dec-1),"1"),collapse="")
tmp<-(p1>0 & !is.na(p1))
pp[tmp]<-paste(pp[tmp],substr(rep("000000000",length(pp[tmp])),1,dec+2-nchar(pp[tmp])),sep="")
tmp<-(p1==1 & !is.na(p1))
pp[tmp]<-substr("0.999999999",1,dec+2)
return(pp)
}
numfmt<-function(p,dec) {
if (is.list(p)) p<-as.matrix(p)
if (is.matrix(p)) {nr<-nrow(p);} else {nr<-1;}
p1<-round(as.numeric(p),dec);
p2<-p1-floor(p1);
tmp<-(p2==0 & !is.na(p2))
p1[tmp]<-paste(p1[tmp],".0",sep="")
p2[tmp]<-"0.0";
p1<-paste(p1,substr(rep("0000000",length(p1)),1,dec+2-nchar(p2)),sep="")
p1[as.numeric(p)>10000000]<-"inf."
p1[is.na(p) | p=="" | p==" "]<-""
p1[p=="-Inf"]<-"-Inf"
p1[p=="Inf"]<-"Inf"
if (is.matrix(p)) {
p1<-matrix(p1,nrow=nr);colnames(p1)<-colnames(p);rownames(p1)<-rownames(p)
}
return(p1)
}
mat2htmltable<-function(mat) {
t1<- apply(mat,1,function(z) paste(z,collapse="</td><td>"))
t2<- paste("<tr><td>",t1,"</td></tr>")
return(paste(t2,collapse=" "))
}
setgam<-function(fml,yi) {
if (ydist[yi]=="") ydist[yi]<-"gaussian"
if (ydist[yi]=="exact") ydist[yi]<-"binomial"
if (ydist[yi]=="breslow") ydist[yi]<-"binomial"
if (ydist[yi]=="gaussian") mdl<-try(gam(formula(fml),data=wdtmp, family=gaussian(link="identity")))
if (ydist[yi]=="binomial") mdl<-try(gam(formula(fml),data=wdtmp, family=binomial(link="logit")))
if (ydist[yi]=="poisson") mdl<-try(gam(formula(fml),data=wdtmp, family=poisson(link="log")))
if (ydist[yi]=="gamma") mdl<-try(gam(formula(fml),data=wdtmp, family=Gamma(link="inverse")))
if (ydist[yi]=="negbin") mdl<-try(gam(formula(fml),data=wdtmp, family=negbin(c(1,10), link="log")))
return(mdl)
}
setgee<-function(fml,yi) {
if (ydist[yi]=="") ydist[yi]<-"gaussian"
if (ydist[yi]=="exact") ydist[yi]<-"binomial"
if (ydist[yi]=="breslow") ydist[yi]<-"binomial"
if (ydist[yi]=="gaussian") md<-try(geeglm(formula(fml),id=wdtmp[,subjvname],corstr=gee.TYPE,family="gaussian",data=wdtmp))
if (ydist[yi]=="binomial") md<-try(geeglm(formula(fml),id=wdtmp[,subjvname],corstr=gee.TYPE,family="binomial",data=wdtmp))
if (ydist[yi]=="poisson") md<-try(geeglm(formula(fml),id=wdtmp[,subjvname],corstr=gee.TYPE,family="poisson",data=wdtmp))
if (ydist[yi]=="gamma") md<-try(geeglm(formula(fml),id=wdtmp[,subjvname],corstr=gee.TYPE,family="Gamma",data=wdtmp))
if (ydist[yi]=="negbin") md<-try(geeglm.nb(formula(fml),id=wdtmp[,subjvname],corstr=gee.TYPE,data=wdtmp))
return(md)
}
setglm<-function(fml,yi) {
if (ydist[yi]=="") ydist[yi]<-"gaussian"
if (ydist[yi]=="exact") ydist[yi]<-"binomial"
if (ydist[yi]=="breslow") ydist[yi]<-"binomial"
if (ydist[yi]=="gaussian") md<-try(glm(formula(fml),family="gaussian",data=wdtmp))
if (ydist[yi]=="binomial") md<-try(glm(formula(fml),family="binomial",data=wdtmp))
if (ydist[yi]=="poisson") md<-try(glm(formula(fml),family="poisson",data=wdtmp))
if (ydist[yi]=="gamma") md<-try(glm(formula(fml),family="Gamma",data=wdtmp))
if (ydist[yi]=="negbin") md<-try(glm.nb(formula(fml),data=wdtmp))
return(md)
}
mdl2oo<-function(mdl,opt, xvnamei) {
if (is.na(mdl[[1]][1])) return(c("","",""))
if (substr(mdl[[1]][1],1,5)=="Error") return(c("","",""))
gs<-summary(mdl); print(gs);
if (opt=="gam") {gsp <- gs$p.table;} else {gsp <- gs$coefficients;}
gsp<-gsp[match(xvnamei,rownames(gsp)),]
oo<-c(numfmt(gsp[1],dec),numfmt(gsp[2],dec),pvformat(gsp[4],8))
return(oo)
}
readgeno <- function(genofile) {
genof <- read.table(genofile,sep=" ",header=TRUE)
if (ncol(genof)==1) genof <- read.table(genofile,sep="\t",header=TRUE)
if (ncol(genof)==1) genof <- read.table(genofile,sep=",",header=TRUE)
return(genof)
}
sumxx<-function(x) {sum(x,na.rm=TRUE)}
maxw<-function(x) {ifelse(max(x)>0,which.max(x),NA)}
minw<-function(x) {ifelse(max(x)>0,(1:4)[-c(which.max(x),which(x==0))],NA)}
ped2snp <- function(pedfile,outfile) {
pedf <- read.table(pedfile,sep=" ",skip=1,header=FALSE)
snpname <- read.table(pedfile,sep=" ",skip=0,nrow=1,header=FALSE)
if (ncol(pedf)==1) {
pedf <- read.table(pedfile,sep="\t",skip=1,header=FALSE)
snpname <- read.table(pedfile,sep="\t",skip=0,nrow=1,header=FALSE)
}
if (ncol(pedf)==1) {
pedf <- read.table(pedfile,sep=",",skip=1,header=FALSE)
snpname <- read.table(pedfile,sep=",",skip=0,nrow=1,header=FALSE)
}
nsnp<-(ncol(pedf)-6)/2
snpname<-snpname[1:nsnp]
if (sum(is.na(snpname))>0) snpname<-paste("SNP",(1:nsnp),sep="")
if (length(unique(pedf[,2]))!=length(pedf[,2])) {print("Error, found duplicated ID in .ped file!"); quit();}
pedf_ID<-pedf[,2]
pedf <- pedf[,-c(1:6)]
pedf[pedf==0]<-NA
nsub <- nrow(pedf)
col1 <- seq(1,ncol(pedf),2)
col2 <- seq(2,ncol(pedf),2)
a1 <- pedf[,col1]
a2 <- pedf[,col2]
colnames(a1)<-t(snpname)
colnames(a2)<-t(snpname)
aa <- rbind(a1,a2)
alle <- rbind(apply(aa==1,2,sumxx),apply(aa==2,2,sumxx),apply(aa==3,2,sumxx),apply(aa==4,2,sumxx))
rownames(alle)<-c("A","C","G","T")
alle1<-apply(alle,2,maxw)
alle2<-apply(alle,2,minw)
allele=c("A","C","G","T")
line1<-paste(allele[alle1],"/",allele[alle2],sep="")
a1[a1==matrix(rep(alle1,nsub),nrow=nsub,byrow=TRUE)]<-0
a1[a1==matrix(rep(alle2,nsub),nrow=nsub,byrow=TRUE)]<-1
a2[a2==matrix(rep(alle1,nsub),nrow=nsub,byrow=TRUE)]<-0
a2[a2==matrix(rep(alle2,nsub),nrow=nsub,byrow=TRUE)]<-1
a1<-cbind(pedf_ID, a1 + a2)
colnames(a1)<-c("ID",t(snpname))
write.table(a1,file=outfile,col.names=TRUE,row.names=FALSE,append=FALSE,quote=FALSE,sep=" ")
rm(aa,a2,pedf,alle,alle1,alle2,line1)
return(a1)
}
fdrcut<-function(p,alpha) {
p0<-cbind(1:length(p),p); n<-sum(!is.na(p));p1<-p0[!is.na(p),]
p1<-cbind(p1[order(p1[,2]),],(1:n)/n*alpha)
return(p1[match(p0[,1],p1[,1]),3])
}
if (!exists("dec")) dec<-2
vlabelN<-(substr(vlabel,1,1)==" ");
vlabelZ<-vlabel[vlabelN];vlabelV<-vlabel[!vlabelN]
vnameV<-vname[!vlabelN];vnameZ<-vname[vlabelN]
w<-c("<html><head>","<meta http-equiv=\"Content-Type\" content=\"text/html\" charset=\"gb2312\" /></head><body>")
if (!is.na(avname[1])) {
if (sum((saf=="s" | saf=="S") & alv>0)>0) w<-c(w,"</br>Spline smoothing only applies for continuous variables")
if (!is.na(subjvname) & (sum((saf=="s" | saf=="S") & alv==0)>0)) w<-c(w,"</br>Generalized estimate equation could not be used with spline smoothing terms")
}
allvname<-c(yvname,xvname,colvname,bvar,avname,subjvname,vname.start,vname.stop,timevname);
allvname<-allvname[!is.na(allvname)]
WD<-WD[,allvname]
tmpID<-rep(1,nrow(WD))
WD<-cbind(WD,tmpID)
allvname1<-c(colvname,bvar,avname,subjvname,vname.start,vname.stop,timevname,"tmpID");
allvname1<-allvname1[!is.na(allvname1)]
WD<-WD[apply(is.na(cbind(1,WD[,allvname1])),1,sum)==0,]
if (!is.na(subjvname)) {
if (!is.na(avname[1])) saf<-rep(0,length(saf));
WD<-WD[order(WD[,subjvname]),];
}
rm(xv,yv,bv,av,colv,v.start,v.stop)
fml0<-""; na=0; avb=""; smoothav<-0;
if (!is.na(avname[1])) {
na<-length(avname)
avb<-vlabelV[match(avname,vnameV)]; avb[is.na(avb)]<-avname[is.na(avb)]
avname_ <- avname
smoothavi<-((saf=="s" | saf=="S") & alv==0)
smoothav<-sum(smoothavi)
smoothavname<-avname[smoothavi]
avname_[smoothavi]<-paste("s(",avname[smoothavi],")",sep="")
avb1<-avb
avb1[smoothavi]<-paste(avb[smoothavi],"(Smooth)",sep="")
avname_[alv>0]<-paste("factor(",avname[alv>0],")",sep="")
fml0<-paste("+",paste(avname_,collapse="+"))
}
ny=length(yvname);
yb<-vlabelV[match(yvname,vnameV)]; yb[is.na(yb)]<-yvname[is.na(yb)]
if (is.na(xvname[1])) {
if (!exists("PED_FNAME")) {print("No genotype information!"); quit();}
if (is.na(bvar)) {print("Must specify subject ID variable!"); quit();}
tmp<-strsplit(PED_FNAME,'\\.')[[1]]
tmp<-toupper(tmp[length(tmp)])
if (tmp=="PED") {geno<-ped2snp(PED_FNAME, paste(ofname,"_snps.xls",sep=""));
} else {geno<-readgeno(PED_FNAME); }
id_ord<-match(WD[,bvar],geno[,1])
if (sum(!is.na(id_ord))<10) {print("<10 subjects ID match with genotype file!"); quit();}
WD_GENO<-geno[id_ord,]
nx<-ncol(WD_GENO)-1
xvname<-colnames(WD_GENO)[-1]
xb<-xvname
} else {
nx=length(xvname);
xb<-vlabelV[match(xvname,vnameV)]; xb[is.na(xb)]<-xvname[is.na(xb)]
WD_GENO<-WD[,c("tmpID",xvname)]
}
if (is.na(colvname)) {
nclv<-1; clvb<-"Total"; clvb_<-"Total"; cvb=""
} else {
clv<-levels(factor(WD[,colvname])); nclv<-length(clv)+1
clvb_<-vlabelZ[match(paste(colvname,".",clv,sep=""),vnameZ)]; clvb_[is.na(clvb_)]<-clv[is.na(clvb_)];
clvb<-c(paste(vlabelV[vnameV==colvname],clvb_,sep="="),"Total");
clvb_<-c(clvb_,"Total")
cvb <- vlabelV[vnameV==colvname];
}
opt<-ifelse(!is.na(subjvname), "gee", ifelse(smoothav>0, "gam", "glm")) ;
sink(paste(ofname,".lst",sep=""))
w<-c(w,paste("<h2>", title, "</h2>"))
oo<-rep(" ",times=18)
tmp.xyg<-c(NA,NA,NA)
for (i in (1:nx)) {
geno.i<-WD_GENO[,i+1];
geno.d<-geno.i; geno.d[geno.d==2]<-1
geno.r<-geno.i; geno.r[geno.r==1]<-0; geno.r[geno.r==2]<-1
geno.i<-cbind(geno.i,geno.d,geno.r)
genoi.name<-paste(xvname[i],c("","_dom","_rec"),sep="")
colnames(geno.i)<-genoi.name
for (j in (1:ny)) {
for (k in (1:nclv)) {
ooi<-rep(" ",times=18);
ooi[1]<-clvb_[k];
ooi[2]<-xb[i];
ooi[3]<-yb[j];
wdtmp<-cbind(geno.i,WD[,c(yvname[j],allvname1)]);
wdtmp<-wdtmp[apply(is.na(wdtmp),1,sum)==0,]
if (k<nclv) wdtmp<-wdtmp[wdtmp[,colvname]==clv[k],];
tmp.nn<-table(wdtmp[,1])
ooi[4]<-tmp.nn["0"];
ooi[5]<-tmp.nn["1"];
ooi[6]<-tmp.nn["2"];
tmp.mm<-numfmt(tapply(wdtmp[,yvname[j]],wdtmp[,1],function(z) mean(z,na.rm=TRUE)),dec)
ooi[7]<-tmp.mm[1];
ooi[8]<-tmp.mm[2];
ooi[9]<-tmp.mm[3];
fmli<-paste(yvname[j],"~",genoi.name[1],fml0);
fmld<-paste(yvname[j],"~",genoi.name[2],fml0);
fmlr<-paste(yvname[j],"~",genoi.name[3],fml0);
if (k==nclv & !is.na(colvname)) {
fmli<-paste(fmli,"+factor(",colvname,")",sep="")
fmld<-paste(fmld,"+factor(",colvname,")",sep="")
fmlr<-paste(fmlr,"+factor(",colvname,")",sep="")
}
tmp.n1<-sum(c(as.numeric(ooi[5]),as.numeric(ooi[6])),na.rm=TRUE)
tmp.n2<-sum(as.numeric(ooi[6]),na.rm=TRUE)
if (tmp.n1>1) {
if (opt=="gam") tmp.mdl<-setgam(fmli,j)
if (opt=="gee") tmp.mdl<-setgee(fmli,j)
if (opt=="glm") tmp.mdl<-setglm(fmli,j)
mdla<-mdl2oo(tmp.mdl,opt,genoi.name[1]); rm(tmp.mdl)
ooi[10]<-mdla[1];
ooi[11]<-mdla[2];
ooi[12]<-mdla[3];
if (opt=="gam") tmp.mdl<-setgam(fmld,j)
if (opt=="gee") tmp.mdl<-setgee(fmld,j)
if (opt=="glm") tmp.mdl<-setglm(fmld,j)
mdld<-mdl2oo(tmp.mdl,opt,genoi.name[2]); rm(tmp.mdl)
ooi[13]<-mdld[1];
ooi[14]<-mdld[2];
ooi[15]<-mdld[3];
}
if (tmp.n2>1) {
if (opt=="gam") tmp.mdl<-setgam(fmlr,j)
if (opt=="gee") tmp.mdl<-setgee(fmlr,j)
if (opt=="glm") tmp.mdl<-setglm(fmlr,j)
mdlr<-mdl2oo(tmp.mdl,opt,genoi.name[3]); rm(tmp.mdl)
ooi[16]<-mdlr[1];
ooi[17]<-mdlr[2];
ooi[18]<-mdlr[3];
}
oo<-rbind(oo,ooi)
rm(wdtmp, ooi)
tmp.xyg<-rbind(tmp.xyg,c(i, j, k))
}
}
rm(geno.i, genoi.name)
}
tmp.oo<-c(cvb,"SNP","Outcome","N0","N1","N2","MEAN0","MEAN1","MEAN2");
tmp.oo<-c(tmp.oo,"beta-add","se-add","P-add","beta-dom","se-dom","P-dom","beta-rec","se-rec","P-rec")
if (nx > 1) {
tmp.xyg<-tmp.xyg[-1,];oo<-oo[-1,]; tmp.oo<-c(tmp.oo,"FDRCUT")
for (k in (1:nclv)) {
for (j in (1:ny)) {
tmp.gyrow<-((tmp.xyg[,3]==k) & (tmp.xyg[,2]==j))
tmp.oo<-rbind(tmp.oo,cbind(oo[tmp.gyrow,],pvformat(fdrcut(oo[tmp.gyrow,12],0.05),8)))
}
}
} else {tmp.oo<-rbind(tmp.oo,oo[-1,]);}
if (is.na(colvname)) tmp.oo<-tmp.oo[,-1]
print(tmp.oo)
sink()
w<-c(w,"</br><table border=3>", mat2htmltable(tmp.oo), "</table>")
w<-c(w,"</br>N0,N1,N2: number of subjects with genotype=0,1,2")
w<-c(w,"</br>MEAN0,MEAN1,MEAN2: mean outcome for genotype=0,1,2")
w<-c(w,"</br>beta-add, se-add, P-add: regression coefficient, standard err, pvalue from additive model")
w<-c(w,"</br>beta-dom, se-dom, P-dom: regression coefficient, standard err, pvalue from dominant model")
w<-c(w,"</br>beta-rec, se-rec, P-rec: regression coefficient, standard err, pvalue from recessive model")
w<-c(w,"</br>fdrcut: FDR cut point for P-value from additive model based on p<=0.05*k/m (Benjamini and Hochberg 1995)")
if (smoothav>0) w<-c(w,"</br>Generalized additive models were applied")
if (opt=="gee") w<-c(w, paste("</br>Generalized estimate equation were used, subject ID=", subjvname, "(", gee.TYPE,")",sep=""))
w<-c(w,wd.subset)
if (is.na(avname[1])) avb1<-"None";
w<-c(w,paste(c("</br>调整变量:",paste(avb1,collapse="; ")),collapse=" "))
w<-c(w,paste("</br>生成日期:",Sys.Date()))
w<-c(w,"</body></html>")
fileConn<-file(paste(ofname,".htm",sep="")); writeLines(w, fileConn)
|
9926f8098c673c01f547bd4589168598402c7188
|
7cd8e6ac8097d2ad5811eab2f3688ff22b0a0feb
|
/man/GetAnyXMLAttribute.Rd
|
800ef62cbc7d3595f930ab15b6ed5d28bb2e1054
|
[] |
no_license
|
noahhl/r-google-analytics
|
400e492011fd096448f7db677f6adaf81094f9f6
|
5c396e1bded0ef00a84c15f000f6fde37d45040f
|
refs/heads/master
| 2016-08-04T15:04:37.911940
| 2011-03-23T15:21:06
| 2011-03-23T15:21:06
| 1,411,707
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 413
|
rd
|
GetAnyXMLAttribute.Rd
|
\name{GetAnyXMLAttribute}
\alias{GetAnyXMLAttribute}
\title{Function to return the value attribute of the nodes of the parsed XML.}
\usage{GetAnyXMLAttribute(vNode, attr.name)}
\description{Function to return the value attribute of the nodes of the parsed XML.}
\value{The value contained with the XML node.}
\arguments{\item{vNode}{The XML node to be inspected.}
\item{attr.name}{The attribute to be returned.}}
|
bcb03246b1ffe6506160c80ee70daf52baed4d2e
|
473736973c702e196327e702808fae70b5685c6a
|
/zeroSum/tests/testthat/test_costfunction_logistic.R
|
d24d86b38d0cd221faaa226c9eb6c8a8df623474
|
[] |
no_license
|
mshardel/zeroSum
|
c8a93429045d9b3f0063d915cac1efa80190da3d
|
efbd2e120d6f976b6b131b1bfadd75f2ef4b2c77
|
refs/heads/master
| 2020-03-22T11:55:25.428712
| 2018-05-24T14:06:31
| 2018-05-24T14:06:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,126
|
r
|
test_costfunction_logistic.R
|
context("Testing logistic cost function")
test_that( "check whether the R and C logistic cost function yield the same result",{
library(glmnet)
set.seed(10)
## logistic regression test
x <- log2(exampleData$x)
P <- ncol(x)
N <- nrow(x)
y <- exampleData$ylogistic
alpha <- 0.5
lambda <- 0.01
w <- runif(N)
v <- runif(P)
lin <- zeroSumFit( x, y, lambda, alpha, weights=w, type="binomial")
fusion <- Matrix(0, nrow = P-1, ncol = P, sparse = TRUE)
for(i in 1:(P-1)) fusion[i,i] <- 1
for(i in 1:(P-1)) fusion[i,(i+1)] <- -1
gamma <- 0.30
cost <- extCostFunction( x, y, coef(lin), lambda, alpha, type="fusionBinomial",gamma=gamma, fusion=fusion, useC=TRUE )
expect_equal( cost$loglikelihood, cost$C$loglikelihood, tolerance=1e-15)
expect_equal( cost$ridge, cost$C$ridge, tolerance=1e-15)
expect_equal( cost$lasso, cost$C$lasso, tolerance=1e-15)
expect_equal( cost$fusion, cost$C$fusion, tolerance=1e-15)
expect_equal( cost$cost, cost$C$cost, tolerance=1e-15)
})
|
58d3efbbbb749f22f7a99fedcc8e93b8bf7e173d
|
33a743d734d68adb0ac9b05d08ee6e3d2593f7ec
|
/ui.R
|
2bb34e251313898904bed7abe1bba2b251d0641b
|
[] |
no_license
|
leonaQ620/New-York-Tree
|
0ead35b2f381c328dfcfae73c9b71085f3af216e
|
6cdbc4396ee273821899e0e411be561a048dd894
|
refs/heads/main
| 2023-03-16T23:11:09.647997
| 2021-03-09T23:54:50
| 2021-03-09T23:54:50
| 346,171,928
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,908
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(leaflet)
library(shinythemes)
# Define UI for application that draws a histogram
navbarPage("Street Tree in NewYork",
theme = shinytheme('cosmo'),
tabPanel("Tree MAP",
sidebarLayout(
sidebarPanel(
selectInput(
inputId = "Zip_City",
label = "Choose the City:",
choices = unique(tree$zip_city),
selected="Forest Hills"
),
p(h3(strong("For the first time, you have access to information
about every street tree in New York City."))),
br(),
p(strong("Selecting the city,
find the distribution and specific number of trees in the city.
See a specific position of trees by clicking on the number.
At the same time, a status of the tree can be distinguished by color.")),
br(),
p(strong("Based on a tree diagram, You can better know
the specific cities with the most dead and stump trees.
Through the city selection, you can view the specific location of stump or dead tree.")),
br(),br(),
p(h4("%trees of each borough")),
p(strong("Queens"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),
span(strong("37%"),style ="font-size: 120%;", style = "color:orange")),
p(strong("Brooklyn"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),
span(strong("26%"),style ="font-size: 120%;", style = "color:red")),
p(strong("Staten Island"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),
span(strong("15%"),style ="font-size: 120%;", style = "color:green")),
p(strong("Bronx"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),
span(strong("12%"),style ="font-size: 120%;", style = "color:blue")),
p(strong("Manhattan"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
span(strong("10%"),style ="font-size: 120%;", style = "color:grap"))
),
# Show a plot of the generated distribution
mainPanel(
p(span(strong("Mapped on Tree"), style = "font-size: 120%;"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
span(strong("Trees Alive"), style = "font-size: 120%;"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
span(strong("Trees Dead"), style = "font-size: 120%;"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
span(strong("Trees Stump"), style = "font-size: 120%;")),
p(span(strong("683,788"), style = "font-size: 150%;",style="color:green"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
span(strong("652,173"), style = "font-size: 150%;",style="color:green"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),
span(strong("13,961"), style = "font-size: 150%;",style="color:green"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
span(strong("17,654"), style = "font-size: 150%;",style="color:green")),
br(),
leafletOutput(outputId ="mymap"),
br(),br(),
plotOutput(outputId ="PlotT"),
)
)
),
tabPanel("The Condition of Trees",
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
dateRangeInput(inputId = "Date",
label = "Choose the date",
start = min(tree$created_at),
end = max(tree$created_at),
min = min(tree$created_at),
max = max(tree$created_at),
startview = "month",
weekstart = 0
),
checkboxGroupInput(inputId = "User_Type",
label="Choose the User_Type",
choices = unique(tree$user_type),
selected = unique(tree$user_type)
),
checkboxGroupInput(inputId = "Borough",
label="Choose the Borough",
choices = unique(tree$borough),
selected = unique(tree$borough)
),
checkboxGroupInput(inputId = "Status",
label="Choose the status of trees",
choices = unique(tree$status),
selected = unique(tree$status)
),
checkboxGroupInput(inputId = "Guard",
label="Choose the Guard type",
choices = c("None","Helpful","Harmful","Unsure"),
selected = c("None","Helpful","Harmful","Unsure")
),
p(h4("%trees of each Health condition")),
p(strong("Good"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),
span(strong("76.16%"),style ="font-size: 120%;", style = "color:green")),
p(strong("Fair"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
span(strong("18.84%"),style ="font-size: 120%;", style = "color:blue")),
p(strong("Poor"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),
span(strong("5.01%"),style ="font-size: 120%;", style = "color:red"))
),
mainPanel(
p(h4("Records created by User_Type")),
p(span(strong("NYC Parks Staff: 169,986"),style = "color:red"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
span(strong("TreeCount Staff: 296,284"),style = "color:green"),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
HTML(' '),HTML(' '),HTML(' '),HTML(' '),
span(strong("Volunteer: 217,518"),style = "color:blue")),
br(),
plotOutput("PlotTS", width = "100%",height = 500),
br(),br(),
plotOutput(outputId = "Plot")
)
)
),
tabPanel("Species of Trees",
fluidRow(
column(4, offset = 4,
selectInput( inputId = "Species",
label="Choose the species of trees",
choices = c("London planetree", "honeylocust",
"Callery pear", "pin oak",
"Norway maple"),
multiple = TRUE,
selected = "London planetree")
),
column(4,
selectInput(inputId = "Boroughd",
label="Choose the Borough",
choices = unique(tree$borough),
multiple = TRUE,
selected = "Queens")
),
br(),
fluidRow(
plotOutput("STmap",width = "100%", height = 700),
br(),br(),
br(),br(),
plotOutput("PlotH")
)
)
),
tabPanel("Tree Problems",
sidebarLayout(
sidebarPanel(
checkboxGroupInput(inputId = "Problems",
label = "Choose the Problems",
choices = c("Stones","BranchLights",
"Stones,BranchLights", "RootOther",
"TrunkOther"),
selected = "Stones"),
p(strong("Stones: Light Blue")),
p(strong("BranchLights: Grape")),
p(strong("Stones&BranchLights: Green")),
p(strong("RootOther: Blue")),
p(strong("TrunkOther: Yellow")),
),
mainPanel(
leafletOutput("Plotp1"),
br(),br(),
plotOutput("Plotp2",height = 700)
)
)
)
)
|
fab195f961aaeac0257edd33aa546961282f3955
|
262a347beae2643b62367b59bc45d2a96aa7dbfc
|
/man/update_segments_data.Rd
|
fd741943f3d2d9976003c6dcc2146c86141422bc
|
[] |
no_license
|
BenioffOceanInitiative/whalesafe4r
|
18ab7e7875808f2db91f42b62dea7e9eefc2f8f8
|
ef4341c0622c9e32ccdd1d45ca3f5ca76b15effb
|
refs/heads/master
| 2022-11-28T02:20:43.257741
| 2020-08-07T01:08:12
| 2020-08-07T01:08:12
| 162,746,601
| 0
| 1
| null | 2020-06-18T19:26:37
| 2018-12-21T18:39:01
|
HTML
|
UTF-8
|
R
| false
| true
| 407
|
rd
|
update_segments_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update_ais.R
\name{update_segments_data}
\alias{update_segments_data}
\title{Update AIS Segments Database Table}
\usage{
update_segments_data(segs_data = NULL)
}
\arguments{
\item{ais_data}{}
}
\description{
Creates database connection and writes new ais segments data frame to the "ais_segments" database table.
}
\examples{
}
|
56eca3d6fb11b23b54a9e185a1afbc02a391bc6f
|
8f18bfbe67f75a7f7718c2f1eee031373ad35b97
|
/tagPrediction/markov_chain.R
|
dcb967471249cc75783b03c56e46da2b4e48664f
|
[] |
no_license
|
jhudsl/fitbit_donation
|
cbf69e11556cc990e10e97d8f20849fcabd18466
|
e24c0e7d8aecb0d8646fedc76a18b40c4dc7f0ef
|
refs/heads/master
| 2020-04-05T12:34:31.365718
| 2017-08-16T17:44:07
| 2017-08-16T17:44:07
| 95,149,868
| 0
| 0
| null | 2017-08-16T16:12:39
| 2017-06-22T19:33:40
|
R
|
UTF-8
|
R
| false
| false
| 2,227
|
r
|
markov_chain.R
|
# Attempting classification with markov chain model
library(here)
library(tidyverse)
library(lubridate)
rawData <- here::here("tagPrediction/raw_data.csv") %>% read_csv() %>% select(-X1)
tagData <- here::here("tagPrediction/activity_tags.csv") %>% read_csv() %>% select(-X1) %>% mutate(start = round(start/60), end = round(end/60))
#takes data in the standard report format and adds the column totalMins which starts on the first day and counts up.
makeLong <- function(data){
minsInDay <- 24*60
data %>%
group_by(type) %>%
arrange(date) %>%
mutate(day = as.integer(difftime(date, min(date), units = "days"))) %>%
mutate(minute = time/60,
totalMins = (day*minsInDay) + minute)
}
long_data <- rawData %>%
makeLong() %>%
spread(type, value) %>%
select(totalMins, hr = `heart rate`, steps)
# add in the classes at each minute
minsInDay <- 24*60
#expand tag data to cover their ranges.
expandedTags <- tagData %>%
mutate(day = as.integer(difftime(date, min(rawData$date), units = "days")),
startTotal = (day*minsInDay) + start,
endTotal = (day*minsInDay) + end) %>%
select(tag, start = startTotal, end = endTotal)
library(rucrdtw)
# Pick out a tag from the data.
tagOfInterest <- expandedTags[4,] #One of two hiking examples
tagStart <- tagOfInterest$start[1]
tagEnd <- tagOfInterest$end[1]
# Subset tag data into series
tagSequence <- long_data %>% filter(totalMins > tagStart & totalMins < tagEnd)
# Remove tag series from full data
leftOverSequence <- long_data %>% filter(!(totalMins > tagStart & totalMins < tagEnd))
# Search full data for taged series
dtw_search <- ucrdtw_vv(data = leftOverSequence$hr, query = tagSequence$hr, dtwwindow = 0.05)
foundClosest <- leftOverSequence[dtw_search$location:(dtw_search$location + dim(tagSequence)[1]),]
long_data %>% mutate(
found = ifelse(totalMins %in% foundClosest$totalMins, "classified",
ifelse(totalMins %in% tagSequence$totalMins, "training","not"))) %>%
gather(type, value, -totalMins, -found) %>%
ggplot(aes(x = totalMins, y = value, group = 1, color = found)) +
geom_line() + facet_grid(type~.)
foundClosest %>%
gather(type, value, -totalMins) %>%
|
d35770b51b8028610554f4ce1ebc843442ba58d7
|
a11470a5ca9a46b6d723bfd4aa1c5f40838649d8
|
/inTrees_wrapper.R
|
31ceb24ae606165ff15f865702d1ae71cb4389eb
|
[] |
no_license
|
julianhatwell/interpret_basics_auxr
|
3c2f9393c291f2e3228e048de3e7d9810217b905
|
7564bf89c02374507ef37edce828311beece1347
|
refs/heads/master
| 2021-05-10T12:40:59.193357
| 2020-10-24T06:20:40
| 2020-10-24T06:20:40
| 118,448,401
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 705
|
r
|
inTrees_wrapper.R
|
library(inTrees)
inTrees_wrapper <- function(X_train
, y_train
, model
, ntree) {
# extract rules
ruleExec0 <- extractRules(RF2List(model)
, X_train
, ntree = ntree) # hard limit on 5000
ruleExec <- unique(ruleExec0)
ruleMetric <- getRuleMetric(ruleExec
, X_train
, y_train)
#build the simplified tree ensemble learner
learner <- buildLearner(ruleMetric
, X_train
, y_train)
return(learner)
}
print("Created inTrees_wrapper function")
|
ad5de7dfb37a102684656445c517176ec89d110b
|
3841ba01f6675cac7278939803560ff990ad7318
|
/analysis/data.R
|
fa1104f0700f7ae18b8f10df1a2cb82b3b44d021
|
[
"MIT"
] |
permissive
|
stormxuwz/SeabirdCode
|
32a6210a1861e0091cd200a2c433a87451adfb54
|
943c38b0ef0272c04157700ee6ecc2e87f2c2aaa
|
refs/heads/master
| 2021-08-25T10:29:49.251792
| 2019-03-30T16:22:27
| 2019-03-30T16:22:27
| 57,337,298
| 1
| 5
| null | 2017-12-18T07:58:21
| 2016-04-28T22:17:54
|
Python
|
UTF-8
|
R
| false
| false
| 4,231
|
r
|
data.R
|
require(RMySQL)
require(reshape2)
dbConfig <- list(dbname = "Seabird", username="root", password="XuWenzhaO", host="127.0.0.1")
sqlQuery <- function (sql) {
if(nchar(sql)<1){
stop("wrong sql")
}
conn <- dbConnect(MySQL(), dbname = dbConfig$dbname, username=dbConfig$username, password=dbConfig$password, host=dbConfig$host, port=3306)
result <- dbGetQuery(conn,sql)
dbDisconnect(conn)
return(result)
}
stationAllYearPlot <- function(site){
allYear = 1996:2013
site = "SU01"
fileId_sql <- "SELECT fileId,year(systemUpLoadTime) as year,fileName from summer_meta where stationInfered = 'SU01'"
fileIdDF <- sqlQuery(fileId_sql)
maxDepth_sql <- sprintf("Select max(Depth) from summer_data where fileId in (%s)", paste(fileIdDF$fileId,collapse=","))
temperature_range_sql <- sprintf("Select min(Temperature),max(Temperature) from summer_data where fileId in (%s)", paste(fileIdDF$fileId,collapse=","))
fluorescence_range_sql <- sprintf("Select min(Fluorescence),max(Fluorescence) from summer_data where fileId in (%s)", paste(fileIdDF$fileId,collapse=","))
maxDepth <- sqlQuery(maxDepth_sql)[1,1]
temperatureRange <- as.numeric(sqlQuery(temperature_Range_sql)[,1:2])
fluorescenceRange <- as.numeric(sqlQuery(fluorescence_range_sql)[,1:2])
plotList_temperature <- list()
plotList_fluorescence <- list()
for(i in 1:nrow(fileIdDF)){
id <- fileIdDF$fileId[i]
name <- paste(fileIdDF$year[i],basename(fileIdDF$fileName[i]))
sql <- sprintf("Select * from summer_data where fileId = %s order by 'index'", id)
data <- sqlQuery(sql)
# data <- melt(data,id.vars = ("index","Depth","id","fileId")))
p_temperature <- qplot(Temperature,-Depth,data=data,color=I("red"))+ylim(c(-maxDepth,0))+xlim(temperatureRange)+ggtitle(name)
plotList_temperature[[as.character(id)]] <- p_temperature
p_fluorescence <- qplot(Fluorescence,-Depth,data=data,color=I("red"))+ylim(c(-maxDepth,0))+xlim(fluorescenceRange)+ggtitle(name)
plotList_fluorescence[[as.character(id)]] <- p_fluorescence
}
args.list <- c(plotList_temperature,list(nrow=2,ncol=16))
png(sprintf("~/Desktop/Temperature.png"),width = 2000,height = 500)
print(do.call(grid.arrange, args.list))
dev.off()
args.list <- c(plotList_fluorescence,list(nrow=2,ncol=16))
png(sprintf("~/Desktop/Fluorescence.png"),width = 2000,height = 500)
print(do.call(grid.arrange, args.list))
dev.off()
pm <- ggmatrix(plotList,nrow=2,ncol=16)
}
readFeature <- function(){
# feature <- read.csv("../../output/testFeature.csv")
feature <- read.csv("../../output/detectedFeatures.csv")
validStation <- read.csv("../../input/station_loc.csv")
waterChemistryData <- read.csv("../../output/waterFeature.csv")[,-1]
feature <- cbind(feature,waterChemistryData)
yearRange <- unique(feature$year)
siteRange <- unique(feature$site)
fullRange <- expand.grid(year = yearRange,site = siteRange)
feature <- merge(fullRange,feature, by = c("year","site"),all.x=T,all.y=F)
for(var in c(waterChemistryVariables,detectedVariables)){
feature[,var] <- ifelse(feature[,var]<0.001,NA,feature[,var])
}
# feature[,waterChemistryVariables] <- ifelse(feature[,waterChemistryVariables]<0,NA,feature[,waterChemistryVariables])
# feature[,detectedVariables] <- ifelse(feature[,detectedVariables]<0,NA,feature[,detectedVariables])
feature$lake <- addLake(feature$site)
feature_SU <- filter(feature,lake=="SU")
validStation$Station <- as.character(validStation$Station)
# validStation$bathymetry <- retriveBathyMetriy(validStation,"../../input/erie_lld/erie_lld.asc")
validStation$bathymetry <- retriveBathyMetriy(validStation,"../../input/superior_lld/superior_lld.asc")
validStation$Long <- -validStation$Long
# feature <- subset(feature,site %in% validStation$Station)
feature_SU <- merge(feature_SU,validStation,by.x = "site", by.y = "Station")
return(feature_SU)
}
addLake <- function(site){
return(strtrim(site,2))
}
retriveBathyMetriy <- function(spData,sourceMapFile){
require(raster)
spData$Long <- -spData$Long
coordinates(spData)=~Long+Lat
# spData must be a sp data class
bathymetry_raster = raster(sourceMapFile)
bathymetry <- extract(bathymetry_raster,spData)
return(bathymetry)
}
|
59f9bdc01940e44c7a2c056dc290cf64d0c4305e
|
4bce1164c09a6a35646c5fb262c495c26d228224
|
/R/print.R
|
95e5ec2a3e3b9206de36516488e32295486a76c9
|
[
"MIT"
] |
permissive
|
USCbiostats/MethCon5
|
1d45c78f5bc044148fd39345a5fcb05753acf57b
|
71aae030b648ebca0dbaa2d45385a14ea0ba7a29
|
refs/heads/master
| 2022-04-06T11:16:33.583390
| 2019-12-20T18:57:46
| 2019-12-20T18:57:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 214
|
r
|
print.R
|
#' @export
print.methcon <- function(x, ...) {
cat("# Methcon object\n")
cat("# .id:", attr(x, ".id"), "\n")
cat("# .value:", attr(x, ".value"), "\n")
class(x) <- setdiff(class(x), "methcon")
print(x)
}
|
f4fac2c907a776755600ca7894c2e81623459417
|
9b6a37d925d0cc87800bdc017e4d331baf40ef50
|
/clarite/modules/analyze/regression/r_code/ewas_r.R
|
1f28800f587a9607f3d24c39da975c4fa839a20e
|
[
"BSD-3-Clause"
] |
permissive
|
HallLab/clarite-python
|
921b964894e57578a93abdc9a7394bfd30f937cb
|
817ccad90e3773a2f2e85290ea6b2bcaf621bcf6
|
refs/heads/master
| 2023-07-09T09:05:17.399598
| 2023-07-03T17:47:42
| 2023-07-03T17:47:42
| 183,051,306
| 5
| 3
|
BSD-3-Clause
| 2021-07-19T14:04:27
| 2019-04-23T16:07:53
|
Python
|
UTF-8
|
R
| false
| false
| 27,736
|
r
|
ewas_r.R
|
library(survey)
# Catch errors from glm and similar, warning instead
warn_on_e <- function(var_name, e){
warning(paste0("NULL result for ", var_name, " due to: ", e), call=FALSE)
return(NULL)
}
# Quote variable names with backticks to account for special characters
quote_name <- function(s){paste0("`", s, "`")}
# Get required data for regressing a specific variable
get_varying_covariates <- function(df, covariates, variable, allowed_nonvarying){
# Get number of unique values in covariates among observations where the variable is not NA
cov_counts <- sapply(covariates, function(c) {length(unique(df[!is.na(df[c]) & !is.na(df[variable]), c]))})
varying_covariates <- covariates[cov_counts >= 2]
nonvarying_covariates <- covariates[cov_counts <2]
# Compare to the covariates that are allowed to vary
not_allowed_nonvarying <- setdiff(nonvarying_covariates, allowed_nonvarying)
if(length(not_allowed_nonvarying) > 0){
# Null Result
print(paste0(" NULL result: Some covariates don't vary when '", variable, "' is not NA and aren't specified as allowed: ",
paste(not_allowed_nonvarying, collapse = ", ")))
return(NULL)
} else if(length(nonvarying_covariates) > 0){
# Ignore those
print(paste0(" Some covariates don't vary when '", variable, "' is not NA but are allowed to do so: ",
paste(nonvarying_covariates, collapse = ", ")))
}
# Return the list of covariates that are kept
return(varying_covariates)
}
###Continuous###
regress_cont <- function(data, varying_covariates, outcome, var_name, regression_family){
# Create a regression formula
if(length(varying_covariates)>0){
fmla <- paste(quote_name(outcome), "~", quote_name(var_name), "+", paste(lapply(varying_covariates, quote_name), collapse="+"), sep="")
} else {
fmla <- paste(quote_name(outcome), "~", quote_name(var_name), sep="")
}
var_result <- tryCatch(glm(stats::as.formula(fmla),
family=regression_family,
data=data,
na.action=na.omit),
error=function(e) warn_on_e(var_name, e))
# Collect Results
if (!is.null(var_result)){
var_summary <- summary(var_result)
# Update with processed summary results
# Assume non-convergence if no p values are generated
num_coeff_cols <- length(var_summary$coefficients)/nrow(var_summary$coefficients)
if (num_coeff_cols < 4){
return(NULL)
} else {
return(data.frame(
Converged = TRUE,
Beta = var_summary$coefficients[2,1],
SE = var_summary$coefficients[2,2],
Beta_pvalue = var_summary$coefficients[2,4],
pval = var_summary$coefficients[2,4]
))
}
} else{
return(NULL)
}
}
regress_cont_survey <- function(data, varying_covariates, outcome, var_name, regression_family,
weight_values, strata_values, fpc_values, id_values, subset_array, ...){
# Create survey design
if(is.null(id_values)){
survey_design <- survey::svydesign(ids = ~1,
weights = weight_values,
data = data,
strata = strata_values,
fpc = fpc_values, ...)
} else{
survey_design <- survey::svydesign(ids = id_values,
weights = weight_values,
data = data,
strata = strata_values,
fpc = fpc_values, ...)
}
# Update subset array to drop NA values of the outcome variable and subset the survey design
survey_design <- subset(survey_design, subset_array)
# Create a regression formula
if(length(varying_covariates)>0){
fmla <- paste(quote_name(outcome), "~", quote_name(var_name), "+", paste(lapply(varying_covariates, quote_name), collapse="+"), sep="")
} else {
fmla <- paste(quote_name(outcome), "~", quote_name(var_name), sep="")
}
var_result <- tryCatch(survey::svyglm(stats::as.formula(fmla), survey_design, family=regression_family, na.action=na.omit),
error=function(e) warn_on_e(var_name, e))
# Collect Results
if (!is.null(var_result)){
var_summary <- summary(var_result)
# Update with processed summary results
num_coeff_cols <- length(var_summary$coefficients)/nrow(var_summary$coefficients)
if (num_coeff_cols < 2){
# Assume non-convergence if no p values are generated
return(NULL)
} else if (num_coeff_cols == 2) {
return(data.frame(
Converged = TRUE,
Beta = var_summary$coefficients[2,1],
SE = var_summary$coefficients[2,2],
Beta_pvalue = 1.0,
pval = 1.0
))
} else {
return(data.frame(
Converged = TRUE,
Beta = var_summary$coefficients[2,1],
SE = var_summary$coefficients[2,2],
Beta_pvalue = var_summary$coefficients[2,4],
pval = var_summary$coefficients[2,4]
))
}
} else{
return(NULL)
}
}
###Categorical###
regress_cat <- function(data, varying_covariates, outcome, var_name, regression_family){
# Create a regression formula and a restricted regression formula
if(length(varying_covariates)>0){
fmla <- paste(quote_name(outcome), "~", quote_name(var_name), "+", paste(lapply(varying_covariates, quote_name), collapse="+"), sep="")
fmla_restricted <- paste(quote_name(outcome), "~", paste(lapply(varying_covariates, quote_name), collapse="+"), sep="")
} else {
fmla <- paste(quote_name(outcome), "~", quote_name(var_name), sep="")
fmla_restricted <- paste(quote_name(outcome), "~1", sep="")
}
# Run GLM Functions
var_result <- tryCatch(glm(stats::as.formula(fmla), family=regression_family, data=data, na.action=na.omit),
error=function(e) warn_on_e(var_name, e))
# Only run if the full model did not have an error (isn't NULL)
if(!is.null(var_result)){
restricted_result <- tryCatch(glm(stats::as.formula(fmla_restricted), family=regression_family,
data=var_result$model), # Use the same data as the full model
error=function(e) warn_on_e(var_name, e))
} else {
restricted_result <- NULL
}
if(!is.null(restricted_result)){
# Get the LRT using anova
lrt <- list(p=NA) # Start with NA for p in case anova fails
tryCatch(lrt <- anova(var_result, restricted_result, test = "LRT"), error=function(e) warn_on_e(var_name, e))
result <- data.frame(
Converged = var_result$converged,
LRT_pvalue = lrt$`Pr(>Chi)`[2],
Diff_AIC = var_result$aic - restricted_result$aic,
pval = lrt$`Pr(>Chi)`[2]
)
# Expand to multiple rows if reporting betas
if(report_categorical_betas){
vars <- setdiff(names(var_result$coefficients), names(restricted_result$coefficients))
var_summary <- summary(var_result)
num_coeff_cols <- length(var_summary$coefficients)/nrow(var_summary$coefficients)
if (num_coeff_cols < 2){
# Assume non-convergence if no p values are generated
return(NULL)
} else if (num_coeff_cols == 2) {
Beta = var_summary$coefficients[vars,1]
SE = var_summary$coefficients[vars,2]
Beta_pvalue = 1.0
} else {
Beta = var_summary$coefficients[vars,1]
SE = var_summary$coefficients[vars,2]
Beta_pvalue = var_summary$coefficients[vars,4]
}
result <- result[rep(1, length(vars)), ]
result$Category <- vars
result$Beta <- Beta
result$SE <- SE
result$Beta_pvalue <- Beta_pvalue
}
return(result)
} else {
return(NULL)
}
}
regress_cat_survey <- function(data, varying_covariates, outcome, var_name, regression_family,
weight_values, strata_values, fpc_values, id_values, subset_array, ...) {
# Create survey design
if(is.null(id_values)){
survey_design <- survey::svydesign(ids = ~1,
weights = weight_values,
data = data,
strata = strata_values,
fpc = fpc_values, ...)
} else{
survey_design <- survey::svydesign(ids = id_values,
weights = weight_values,
data = data,
strata = strata_values,
fpc = fpc_values, ...)
}
# Subset the survey design. Including where the variable is NA to ensure same data is used for both models.
subset_array <- subset_array & !is.na(data[var_name])
survey_design <- subset(survey_design, subset_array)
# Create a regression formula and a restricted regression formula
if(length(varying_covariates)>0){
fmla <- paste(quote_name(outcome), "~", quote_name(var_name), "+", paste(lapply(varying_covariates, quote_name), collapse="+"), sep="")
fmla_restricted <- paste(quote_name(outcome), "~", paste(lapply(varying_covariates, quote_name), collapse="+"), sep="")
} else {
fmla <- paste(quote_name(outcome), "~", quote_name(var_name), sep="")
fmla_restricted <- paste(quote_name(outcome), "~1", sep="")
}
# Results using surveyglm
survey_design <<- survey_design # needed to make the anova function work
regression_family <<- regression_family # needed to make the anova function work
var_result <- tryCatch(survey::svyglm(stats::as.formula(fmla), design=survey_design, family=regression_family, na.action=na.omit),
error=function(e) warn_on_e(var_name, e))
# Restricted result uses the design from the full result to ensure the same observations are used.
# Otherwise some dropped by 'na.omit' may be included in the restricted model.
# Only run if the full model did not have an error (isn't NULL)
if(!is.null(var_result)){
restricted_result <- tryCatch(survey::svyglm(stats::as.formula(fmla_restricted), design=var_result$survey.design,
family=regression_family),
error=function(e) warn_on_e(var_name, e))
} else {
restricted_result <- NULL
}
# Collect results if restricted_result is not NULL
if(!is.null(restricted_result)){
# Get the LRT using anova
lrt <- list(p=NA) # Start with NA for p in case anova fails
tryCatch(lrt <- anova(var_result, restricted_result, method = "LRT"), error=function(e) warn_on_e(var_name, e))
result <- data.frame(
Converged = var_result$converged,
LRT_pvalue = lrt$p,
pval = lrt$p
)
# Expand to multiple rows if reporting betas
if(report_categorical_betas){
vars <- setdiff(names(var_result$coefficients), names(restricted_result$coefficients))
var_summary <- summary(var_result)
result <- result[rep(1, length(vars)), ]
result$Category <- vars
result$Beta <- var_summary$coefficients[vars, 1]
result$SE <- var_summary$coefficients[vars, 2]
result$Beta_pvalue <- var_summary$coefficients[vars, 4]
}
return(result)
} else {
return(NULL)
}
}
# General Regression function which applies some filters/tests before calling the actual regression
regress <- function(data, y, var_name, covariates, min_n, allowed_nonvarying, regression_family, var_type,
use_survey, single_weight, weights, strata, fpc, ids, subset_array, drop_unweighted, ...){
# The result list will be used to update results for this variable
result = list()
# Figure out which observations will drop due to NAs
subset_data <- complete.cases(data[, c(y, var_name, covariates)]) # Returns a boolean array
if(!is.null(subset_array)){
subset_data <- subset_data & subset_array
}
# Gather survey info if needed
if(use_survey){
# Get weight
if(single_weight){
weight <- weights
} else {
weight <- weights[[var_name]]
}
# Record weight name
if(is.null(weight)){
warning(paste(var_name, " had a NULL result because no weight was specified"))
return(NULL)
} else {
result$weight <- weight
}
# Get weight values, returning early if there is a problem with the weight
if(!(weight %in% names(data))){
# Weight values are missing
warning(paste(var_name, " had a NULL result because its weight (", weight, ") was not found"))
result$weight <- paste(weight, " (not found)")
return(data.frame(result, stringsAsFactors = FALSE))
}
missing_weight_count <- sum(!is.na(data[var_name]) & is.na(data[weight]) & subset_data)
if(missing_weight_count > 0){
# Some weights in the subset are missing when the variable is not
warning(paste(var_name, " had a NULL result because its weight (", weight, ") had ", missing_weight_count, " missing values when the variable was not missing"))
result$weight <- paste0(weight, " (", missing_weight_count, " observations are missing weights)")
if (!drop_unweighted){
# Return early with no result if dropping unweighted was not enabled
return(data.frame(result, stringsAsFactors = FALSE))
} else {
# Drop rows with missing weights
subset_data <- subset_data & !(!is.na(data[var_name]) & is.na(data[weight]))
}
}
# Get weights
weight_values <- data[weight]
# Fill NA weight values with 0 to pass an internal check by survey
weight_values[is.na(weight_values),] <- 0
# Load strata, fpc, and ids
if(!is.null(strata)){
strata_values <- data[strata]
} else {
strata_values <- NULL
}
if(!is.null(fpc)){
fpc_values <- data[fpc]
} else {
fpc_values <- NULL
}
if(!is.null(ids)){
id_values <- data[ids]
} else {
id_values <- NULL
}
}
# Skip regression if any covariates are constant (after removing NAs) without being specified as allowed
varying_covariates <- get_varying_covariates(data[subset_data,], covariates, var_name, allowed_nonvarying)
# If 'get_varying_covarites' returned NULL it found a nonvarying covariate the wasn't allowed)
if (is.null(varying_covariates) && !is.null(covariates)){
return(data.frame(result, stringsAsFactors = FALSE))
}
# Record N and skip regression if the min_n filter isn't met
non_na_obs <- sum(subset_data)
result$N <- non_na_obs
if (non_na_obs < min_n){
warning(paste(var_name, " had a NULL result due to the min_n filter (", non_na_obs, " < ", min_n, ")"))
return(data.frame(result, stringsAsFactors = FALSE))
}
# Standardize data if needed
if(standardize_data){
allowed_to_scale_cols <- colnames(data) %in% c(y, var_name, varying_covariates)
numeric_cols <- sapply(data, is.numeric) # Exclude factors
binary_cols <- sapply(data, function(s){all(s==0 | s==1 | is.na(s))}) # Exclude binary encoded as 0/1/missing
scale_cols <- allowed_to_scale_cols & numeric_cols & !binary_cols
data[scale_cols] <- scale(data[scale_cols])
}
# Run Regression for the single variable
if(!use_survey){
if(var_type == 'bin'){
regression_result <- regress_cont(data, varying_covariates, outcome=y, var_name, regression_family)
} else if(var_type == 'cat'){
regression_result <- regress_cat(data, varying_covariates, outcome=y, var_name, regression_family)
} else if(var_type == 'cont'){
regression_result <- regress_cont(data, varying_covariates, outcome=y, var_name, regression_family)
}
} else {
if(var_type == 'bin'){
regression_result <- regress_cont_survey(data, varying_covariates, outcome=y, var_name, regression_family,
weight_values, strata_values, fpc_values, id_values,
subset_data, ...)
} else if(var_type == 'cat'){
regression_result <- regress_cat_survey(data, varying_covariates, outcome=y, var_name, regression_family,
weight_values, strata_values, fpc_values, id_values,
subset_data, ...)
} else if(var_type == 'cont'){
regression_result <- regress_cont_survey(data, varying_covariates, outcome=y, var_name, regression_family,
weight_values, strata_values, fpc_values, id_values,
subset_data, ...)
}
}
# Update result with the regression results
if(!is.null(regression_result)){
regression_result[names(result)] <- result
} else {
regression_result <- data.frame(result, stringsAsFactors = FALSE)
}
# Return
return(regression_result)
}
#' ewas
#'
#' Run environment-wide association study, optionally using \code{\link[survey]{svydesign}} from the \pkg{survey} package
#' Note: It is possible to specify \emph{ids} and/or \emph{strata}. When \emph{ids} is specified without \emph{strata},
#' the standard error is infinite and the anova calculation for categorical variables fails. This is due to the
#' \href{http://r-survey.r-forge.r-project.org/survey/exmample-lonely.html}{lonely psu} problem.
#' @param d data.frame containing all of the data
#' @param bin_vars List of variables to regress that are binary
#' @param cat_vars List of variables to regress that are categorical
#' @param cont_vars List of variables to regress that are continuous
#' @param y name(s) of response variable(s)
#' @param bin_covars List of covariates that are continuous
#' @param cat_covars List of covariates that are categorical
#' @param cont_covars List of covariates that are continuous
#' @param regression_family family for the regression model as specified in glm ('gaussian' by default)
#' @param allowed_nonvarying list of covariates that are excluded from the regression when they do not vary instead of returning a NULL result.
#' @param min_n minimum number of observations required (after dropping those with NA values) before running the regression (200 by default)
#' @param weights NULL by default (for unweighted). May be set to a string name of a single weight to use for every variable, or a named list that maps variable names to the weights that should be used for that variable's regression
#' @param ids NULL by default (for no clusters). May be set to a string name of a column in the data which provides cluster IDs.
#' @param strata NULL by default (for no strata). May be set to a string name of a column in the data which provides strata IDs.
#' @param fpc NULL by default (for no fpc). May be set to a string name of a column in the data which provides fpc values.
#' @param subset_array NULL by default (for no subset). May be set to a boolean array used to subset the data after creating the design
#' @param report_categorical_betas FALSE by default
#' @param standardize_data FALSE by default
#' @param ... other arguments passed to svydesign which are ignored if 'weights' is NULL
#' @return data frame containing following fields Variable, Sample Size, Converged, SE, Beta, Variable p-value, LRT, AIC, pval, outcome, weight
#' @export
#' @family analysis functions
#' @examples
#' \dontrun{
#' ewas(d, cat_vars, cont_vars, y, cat_covars, cont_covars, regression_family)
#' }
ewas <- function(d, bin_vars=NULL, cat_vars=NULL, cont_vars=NULL, y,
bin_covars=NULL, cat_covars=NULL, cont_covars=NULL,
regression_family="gaussian", allowed_nonvarying=NULL, min_n=200, weights=NULL,
ids=NULL, strata=NULL, fpc=NULL, subset_array=NULL,
report_categorical_betas=FALSE, standardize_data=FALSE, ...){
# Record start time
t1 <- Sys.time()
# Record global options
report_categorical_betas <<- report_categorical_betas
standardize_data <<- standardize_data
# Validate inputs
#################
if(missing(y)){
stop("Please specify an outcome 'y' variable")
}
if(is.null(bin_vars)){
bin_vars <- list()
}
if(is.null(cat_vars)){
cat_vars <- list()
}
if(is.null(cont_vars)){
cont_vars <- list()
}
if(is.null(bin_covars)){
bin_covars <- list()
}
if(is.null(cat_covars)){
cat_covars <- list()
}
if(is.null(cont_covars)){
cont_covars <- list()
}
if(is.null(allowed_nonvarying)){
allowed_nonvarying <- list()
}
if(!is.null(ids)){
if(!(ids %in% colnames(d))){
stop(paste("'ids' was specified (", ids, ") but not found in the data", sep=""))
}
}
if(!is.null(strata)){
if(!(strata %in% colnames(d))){
stop(paste("'strata' was specified (", strata, ") but not found in the data", sep=""))
}
}
if(!is.null(fpc)){
if(!(fpc %in% colnames(d))){
stop(paste("'fpc' was specified (", fpc, ") but not found in the data", sep=""))
}
}
if(!is.null(ids) && is.null(strata) && is.null(fpc)){
warning("PSU IDs were specified without strata or fpc, preventing calculation of standard error")
}
# Ignore the covariates, outcome, and ID if they were included in the variable lists
remove <- c(y, bin_covars, cat_covars, cont_covars, "ID")
bin_vars <- setdiff(bin_vars, remove)
cat_vars <- setdiff(cat_vars, remove)
cont_vars <- setdiff(cont_vars, remove)
# Ignore the outcome, and ID if they were included in the covariates lists
remove <- c(y, "ID")
bin_covars <- setdiff(bin_covars, remove)
cat_covars <- setdiff(cat_covars, remove)
cont_covars <- setdiff(cont_covars, remove)
# Check data
if(class(d)[1] != "data.frame"){
stop("Data must be a data.frame object")
}
# Check weights
if(is.null(weights)){
print("Running without a survey design adjustment")
use_survey <- FALSE
} else if(class(weights) == "character"){
single_weight <- TRUE
if(!(weights %in% names(d))){
stop(paste(weights, "was specified as the weight, but was not found in the dataframe", sep=" "))
}
print("Running with a single weight used for all variables")
use_survey <- TRUE
} else if(class(weights) == "list"){
single_weight <- FALSE
print("Running with specific weights assigned for each variable")
use_survey <- TRUE
} else {
stop("weights must be a string or a list")
}
#Correct the types and check for IDs
#####################################
# ID
if(is.element('ID', names(d))==FALSE){stop("Please add ID to the data as column 1")}
d$ID <- factor(d$ID)
# Binary
if(length(bin_vars) > 0){d[bin_vars] <- lapply(d[bin_vars], factor)}
if(length(bin_covars) > 0){d[bin_covars] <- lapply(d[bin_covars], factor)}
# Categorical
if(length(cat_vars) > 0){d[cat_vars] <- lapply(d[cat_vars], factor)}
if(length(cat_covars) > 0){d[cat_covars] <- lapply(d[cat_covars], factor)}
# Continuous
if(length(cont_vars) > 0){
if(sum(sapply(d[cont_vars], is.numeric))!=length(cont_vars)){
# TODO: This isn't right
non_numeric_cont_vars <- setdiff(cont_vars, names(d[sapply(d, is.numeric)]))
stop("Some continuous variables are not numeric: ", paste(non_numeric_cont_vars, collapse=", "))
}
}
if (length(cont_covars) > 0){
if(sum(sapply(d[cont_covars], is.numeric))!=length(cont_covars)){
non_numeric_cont_covars <- setdiff(cont_covars, names(d[sapply(d, is.numeric)]))
stop("Some continuous covariates are not numeric: ", paste(non_numeric_cont_covars, collapse=", "))
}
}
# Get a combined vector of covariates (must 'unlist' lists to vectors)
covariates <- c(unlist(bin_covars), unlist(cat_covars), unlist(cont_covars))
# Run Regressions
#################
# Create a placeholder dataframe for results, anything not updated will be NA
if(report_categorical_betas & length(cat_vars)>0){
n <- length(bin_vars) + length(cont_vars) + sum(sapply(cat_vars, function(v) length(table(d[v]))-1))
} else {
n <- length(bin_vars) + length(cat_vars) + length(cont_vars)
}
ewas_result_df <- data.frame(Variable = character(n),
Variable_type = character(n),
N = numeric(n),
Converged = logical(n),
Beta = numeric(n),
SE = numeric(n),
Beta_pvalue = numeric(n),
LRT_pvalue = numeric(n),
Diff_AIC = numeric(n),
pval = numeric(n),
outcome = character(n),
weight = character(n),
stringsAsFactors = FALSE)
ewas_result_df[] <- NA # Fill df with NA values
ewas_result_df$Converged <- FALSE # Default to not converged
i <- 0 # Increment before processing each variable
# Process binary variables, if any
print(paste("Processing ", length(bin_vars), " binary variables", sep=""))
for(var_name in bin_vars){
# Update var name and outcome
i <- i + 1
ewas_result_df$Variable[i] <- var_name
ewas_result_df$outcome[i] <- y
ewas_result_df$Variable_type[i] <- "binary"
result <- regress(d, y, var_name, covariates, min_n, allowed_nonvarying, regression_family, var_type="bin",
use_survey, single_weight, weights, strata, fpc, ids, subset_array, ...)
# Save results
if(!is.null(result)){
ewas_result_df[i, colnames(result)] <- result
}
}
# Process categorical variables, if any
print(paste("Processing ", length(cat_vars), " categorical variables", sep=""))
for(var_name in cat_vars){
result <- regress(d, y, var_name, covariates, min_n, allowed_nonvarying, regression_family, var_type="cat",
use_survey, single_weight, weights, strata, fpc, ids, subset_array, ...)
# Save results
if(!is.null(result)){
for(ridx in 1:nrow(result)){
i <- i + 1
# Update var name and outcome
ewas_result_df$Variable[i] <- var_name
ewas_result_df$outcome[i] <- y
ewas_result_df$Variable_type[i] <- "categorical"
# Update results
ewas_result_df[i, colnames(result)] <- result[ridx,]
}
}
}
# Process continuous variables, if any
print(paste("Processing ", length(cont_vars), " continuous variables", sep=""))
for(var_name in cont_vars){
# Update var name and outcome
i <- i + 1
ewas_result_df$Variable[i] <- var_name
ewas_result_df$outcome[i] <- y
ewas_result_df$Variable_type[i] <- "continuous"
result <- regress(d, y, var_name, covariates, min_n, allowed_nonvarying, regression_family, var_type="cont",
use_survey, single_weight, weights, strata, fpc, ids, subset_array, ...)
# Save results
if(!is.null(result)){
ewas_result_df[i, colnames(result)] <- result
}
}
t2 <- Sys.time()
print(paste("Finished in", round(as.numeric(difftime(t2,t1, units="secs")), 6), "secs", sep=" "))
n_null_results <- sum(is.null(ewas_result_df$pval))
if (n_null_results > 0){
warning(paste(n_null_results, "of", nrow(ewas_result_df), "variables had a NULL result due to an error (see earlier warnings for details)"))
}
# Sort by pval
# TODO: Sort differently
if(report_categorical_betas){
ewas_result_df <- ewas_result_df[order(ewas_result_df$pval, ewas_result_df$Beta_pvalue),]
} else {
ewas_result_df <- ewas_result_df[order(ewas_result_df$pval),]
}
# Replace NA with 'None' for correct conversion back to Pandas format
ewas_result_df[is.na(ewas_result_df)]='None'
return(ewas_result_df)
}
|
75c11f6d5f7d0925bcc173072e6beb3c2514cdd5
|
e6b2a1c8b46e86551ed2a0711aca72d8b9f361a1
|
/homework/tf_answers/codigo/2_analisis.R
|
94cc19a4ec006861c8073eb8090f8dd112afcc13
|
[] |
no_license
|
JosefaHernandez/dar_soc4001
|
e597c2e7795dfaa2e7c1bc0b2950b9578c4e8b49
|
a817a119ca0fdf11fe91b81965da4b7c51c2f4bc
|
refs/heads/master
| 2023-07-25T06:07:50.099707
| 2021-08-25T18:30:48
| 2021-08-25T18:30:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,783
|
r
|
2_analisis.R
|
# Tabla Descriptivos
# total país
name_file <- paste0(dirresultados, "tabla_pais.txt")
covid_comunas %>% select(Poblacion, edad, esc,ytotcorh, npers, `2020-06-12`,`2020-11-27`) %>%
as.data.frame() %>%
stargazer(summary.stat = c("n", "mean","median", "sd"),
covariate.labels = c("Población", "Edad", "Escolaridad","Ingreso total hogar", "Número integrantes hogar", "Muertes por 100k hbs al 2020-06-12", "Muertes por 100k hbs al 2020-11-27"),
type = "text",
out = name_file)
# por región
regiones <- unique(covid_comunas$region_nombre)
for (r in regiones) {
name_file <- paste0(dirresultados, "tabla_",r,".txt")
cat("===== Región : ", r, "=====")
covid_comunas %>% select(region_nombre, Poblacion, edad, esc,ytotcorh, npers, `2020-06-12`,`2020-11-27`) %>%
filter(region_nombre==r) %>%
select(-region_nombre) %>%
as.data.frame() %>%
stargazer(summary.stat = c("n", "mean","median", "sd"),
covariate.labels = c("Población", "Edad", "Escolaridad","Ingreso total hogar", "Número integrantes hogar", "Muertes por 100k hbs al 2020-06-12", "Muertes por 100k hbs al 2020-11-27"),
type = "text",
out = name_file)
}
# Histograma
histograma <- covid_comunas %>% select(comuna,`2020-06-12`,`2020-11-27`) %>%
pivot_longer(-comuna, names_to = "mes", values_to = "muertes_100k") %>%
ggplot(aes(x=muertes_100k, group=mes, fill=mes,colour=mes)) + geom_histogram() +
labs(x="Muertos por cada 100 mil habitantes al día 2020-11-27", y="Recuento", title="Distribución de muertes por Covid-19 al día 2020-11-27 por comuna") +
scale_color_viridis_d(option="plasma") + scale_fill_viridis_d(option="plasma") +
facet_grid( . ~ mes)
name_file <- paste0(dirresultados, "histograma_muertes.jpg")
ggsave(name_file,histograma, width = 20, height = 12, units = c("cm"), dpi = 320)
# Trajectoria comunas
trajectorias <- covid_comunas %>%
pivot_longer(cols = `2020-06-12`:`2020-11-27`, names_to = "mes", values_to = "muertes_100k") %>%
ggplot(aes(x= as.Date(mes), y=muertes_100k, group=comuna, colour=region_nombre)) + geom_line(alpha=0.7) +
labs(x="Fecha", y="Muertes Covid-19 por 100k habitantes") +
scale_color_viridis_d(option="plasma") + scale_fill_viridis_d(option="plasma") +
facet_wrap( . ~ region_nombre)
name_file <- paste0(dirresultados, "trajectoria_muertes.jpg")
ggsave(name_file,trajectorias, width = 20, height = 12, units = c("cm"), dpi = 320)
# Tabla de regresión
modelo_inicio <- lm(`2020-06-12` ~ edad + esc + ytotcorh + npers, data= covid_comunas)
modelo_actual <- lm(`2020-11-27` ~ edad + esc + ytotcorh + npers, data= covid_comunas)
name_file <- paste0(dirresultados, "modelos_muertes.txt")
stargazer(modelo_inicio, modelo_actual,
type = "text",
column.labels = c("Junio 2020", "Noviembre 2020"),
out = name_file)
|
e2ca74ad7a0b0a632b280eebb2d156ee1c108ee0
|
8c4a74b0a344440a15a2edee5bb761bcd2dfcad9
|
/man/testClosedBelow.Rd
|
dacbc37b7a9a56e16573efb36dab6609f2715552
|
[
"MIT"
] |
permissive
|
xoopR/set6
|
341950b7649629dc9594b9230710df5140679bf7
|
e65ffeea48d30d687482f6706d0cb43b16ba3919
|
refs/heads/main
| 2023-05-22T22:46:30.493943
| 2022-08-27T17:20:08
| 2022-08-27T17:20:08
| 197,164,551
| 9
| 0
|
NOASSERTION
| 2021-11-16T15:02:05
| 2019-07-16T09:36:22
|
R
|
UTF-8
|
R
| false
| true
| 1,016
|
rd
|
testClosedBelow.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assertions.R
\name{testClosedBelow}
\alias{testClosedBelow}
\alias{checkClosedBelow}
\alias{assertClosedBelow}
\title{assert/check/test/ClosedBelow}
\usage{
testClosedBelow(object, errormsg = "This is not a set closed below")
checkClosedBelow(object, errormsg = "This is not a set closed below")
assertClosedBelow(object, errormsg = "This is not a set closed below")
}
\arguments{
\item{object}{object to test}
\item{errormsg}{error message to overwrite default if check fails}
}
\value{
If check passes then \code{assert} returns \code{object} invisibly and \code{test}/\code{check}
return \code{TRUE}. If check fails, \code{assert} stops code with error, \code{check} returns
an error message as string, and \code{test} returns \code{FALSE}.
}
\description{
Validation checks to test if a given object is closedbelow.
}
\examples{
testClosedBelow(Interval$new(1, 10, type = "[]"))
testClosedBelow(Interval$new(1, 10, type = "(]"))
}
|
e5ab26e532a89b3e0f7746afeb756457c1916ebf
|
0ed9873bfbe30499aeaad03fbdef16204dec8296
|
/Stocks and Bonds.R
|
8381129244c659a5ee23295ce5d90b283c054be1
|
[] |
no_license
|
levineol/stats-r
|
85c76dee0d323c7a35e5f237738d0e9192460c4f
|
0aa1bd636cb0ed61de8334d2b7487668c224026f
|
refs/heads/master
| 2020-04-07T10:52:52.672856
| 2018-11-19T23:37:45
| 2018-11-19T23:37:45
| 158,303,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 859
|
r
|
Stocks and Bonds.R
|
source("http://jgscott.github.io/teaching/r/mvnorm/rbvnorm.R")
mu_stocks = 0.065
mu_bonds = 0.017
sd_stocks = 0.195
sd_bonds = 0.075
rho = -0.15
returns = rbvnorm(50, mu_stocks, mu_bonds, sd_stocks, sd_bonds, rho)
plot(returns)
Wealth = 10000
Horizon = 40
for(year in 1:Horizon) {
return_stocks = rnorm(1, mu_stocks, sd_stocks)
Wealth = Wealth * (1 + return_stocks)
}
Wealth
total_wealth = 10000
weights = c(0.6, 0.4) # how much of your wealth in each asset?
wealth_by_asset = total_wealth * weights
Horizon = 40
for(year in 1:Horizon) {
# Simulate a bivariate normal set of returns
returns = rbvnorm(1, mu_stocks, mu_bonds, sd_stocks, sd_bonds, rho)
# Update wealth in each asset
wealth_by_asset = wealth_by_asset * (1 + returns)
# rebalance
total_wealth = sum(wealth_by_asset)
wealth_by_asset = total_wealth * weights
}
total_wealth
|
0951110db2fd0248b15ed892f13572f00b478a53
|
897251074da9cac85a547b9e7f380ef06a0a8a98
|
/Makefile.R
|
486dd92d88be8b63803bb549096d9e40a5c1636f
|
[] |
no_license
|
rongmastat/stat-627
|
a09b43a61ba8ff6a134b8d2505a952c59f71893a
|
af7897c0c41172e64de4493f6c24ced685cf856e
|
refs/heads/master
| 2021-06-26T21:09:56.565166
| 2016-11-29T13:54:32
| 2016-11-29T13:54:32
| 42,261,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 114
|
r
|
Makefile.R
|
# One script to rule them all
source("00_download_data.R")
source("01_data_analysis.R")
source("02_make_plot.R")
|
535f6c8ed18c87ba6518e102c8631483ebe2d698
|
1aaffc3ad2c374ca5d308fecb92f9f832110d97c
|
/worksheets/recipe_examples/02_variable_recipe.r
|
5853198e58540a64c75209ae83968168e92a08a8
|
[] |
no_license
|
bertozzivill/infx572_winter17
|
ab3a234a16904faa328da4a82ec16e49c2873b3a
|
87547966dadc480a3756a16f780e0fd4f99f68f3
|
refs/heads/master
| 2021-01-12T03:35:32.212280
| 2019-10-24T05:13:13
| 2019-10-24T05:13:13
| 78,232,320
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 788
|
r
|
02_variable_recipe.r
|
##---------------------------------------------------------
## Blueberry muffin recipe (makes 12):
## 3 cups all-purpose flour
## 2 tablespoons sugar
## 1.5 tablespoons baking powder
## 0.5 teaspoon salt
## 3 eggs
## 0.5 cup butter
## 1 cup buttermilk
## 1 teaspoon vanilla extract
## 2 cups frozen blueberries
##---------------------------------------------------------
# I want to make 9 muffins instead. What is my new recipe?
multiplier <- 9/12
# flour (cups)
3 * multiplier
# sugar (tablespoons)
2 * multiplier
# baking powder (tablespoons)
1.5 * multiplier
# salt (teaspoons)
0.5 * multiplier
# eggs
3 * multiplier
# butter (cups)
0.5 * multiplier
# buttermilk (cups)
1 * multiplier
# vanilla extract (teaspoons)
1 * multiplier
# frozen blueberries (cups)
2 * multiplier
|
c19cffbcda8b3098c57a53563ba6a605542985bf
|
8646d753247f7dddea6d73d81422b1ec742c9f5a
|
/WSGeometry/R/bin2d.R
|
463ad22462ff723c7c3ab20b36c2844c96f596e4
|
[] |
no_license
|
akhikolla/updatedatatype-list4
|
24f5271c9d2807aca7dc8620b56eb96e740cd3c4
|
0a2a0a46e9fb6e066f30899c4cb3c016ba3a1504
|
refs/heads/master
| 2023-04-01T20:11:05.484102
| 2021-04-02T01:19:34
| 2021-04-02T01:19:34
| 352,124,197
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,238
|
r
|
bin2d.R
|
#' Bin data onto a grid.
#' @description Bin data onto a equidistant grid in [0,1]^2.
#' @param data.pos A Mx2 matrix specifying the positions of the data measure.
#' @param data.weights A list of vectors of the same size as the number of rows in data.pos.
#' All entries in the vector must be non-negative and the entries in the vector must sum to one.
#' @param gridsize A vector of two integers specifying the dimensions of the grid, which the data should be binned to.
#' @param turn A boolean specifying whether the output should be rotated to keep the previous orientation when the matrix
#' is plotted with the image function.
#' @return A matrix containing the weights of the measure in each bin.
#' @export
bin2d<-function(data.pos,data.weights,gridsize,turn=FALSE){
M.out<-matrix(0,gridsize[1],gridsize[2])
data.pos<-t(t(data.pos)*gridsize)
for (i in 1:gridsize[1]){
for (j in 1:gridsize[2]){
lb1<-i-0.5
lb2<-j-0.5
ub1<-i+0.5
ub2<-j+0.5
bool<-data.pos[,1]>=lb1 & data.pos[,1]<ub1 & data.pos[,2]>=lb2 & data.pos[,2]<ub2
w<-sum(data.weights[bool])
M.out[i,j]<-w
}
}
if (turn==TRUE){
M.out<- apply(t(M.out),2,rev)
}
return(M.out)
}
|
86bba6a43b760d48eba033eaf2e43e18c6dea2f0
|
b19114efbef9e22421753abd5dfa4520806718c3
|
/R/cmd_add.R
|
318c5268bb10e50b860ee65e988acb0a4662bd6f
|
[
"MIT"
] |
permissive
|
mcomsa/ado
|
3c2b041a0bd88a22c385ac3d77bad1686b147127
|
d403f435eb3f4b5e9f58bfd8dad9df3a4b0eed92
|
refs/heads/master
| 2020-03-25T12:15:22.350946
| 2018-08-03T14:44:51
| 2018-08-03T14:44:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,563
|
r
|
cmd_add.R
|
## Add user-defined commands provided at runtime, rather than defined in this
## package's source.
ado_cmd_addCommand <-
function(context, expression, option_list=NULL)
{
if(context$debug_match_call)
return(match.call())
valid_opts <- c("env", "newname")
option_list <- validateOpts(option_list, valid_opts)
#Figure out which environment we should look for this function in
if(hasOption(option_list, "env"))
{
env <- optionArgs(option_list, "env")
raiseif(is.null(env), msg="Must provide an environment with option env")
raiseif(length(env) > 1, msg="Too many envs")
env <- env[[1]]
#Get the environment
env <- tryCatch(as.environment(env), error=function(e) e)
raiseif(inherits(env, "error"), msg="No such environment")
} else
{
env <- globalenv()
}
#Figure out what name we should use for it
if(hasOption(option_list, "newname"))
{
nm <- optionArgs(option_list, "newname")
raiseif(is.null(nm), msg="Must provide a new name with option newname")
raiseif(length(nm) > 1, msg="Too many new names")
nm <- nm[[1]]
} else
{
nm <- as.character(expression)
}
#Get the function from the environment
src <- as.character(expression)
fn <- tryCatch(get(src, envir=env, mode="function", inherits=FALSE),
error=function(e) e)
raiseif(inherits(fn, "error"), msg="No such function")
context$usercmd_set('ado_cmd_' %p% nm, fn)
return(invisible(NULL))
}
|
357714ee2ff94ccda77b728df235c03e08b04a91
|
2b9965f115cfb6acc674070dfb974546cc1837cb
|
/inst/shiny_apps/FRAME/Data_trim.R
|
ca3c6a1c8c9b6288d66ce2463ccfcd4166df4680
|
[] |
no_license
|
tcarruth/FRAME
|
d378f8fc186f0527710879af9fb351ce3b38fa63
|
268e45f98fe2ab7f27e92be539e87cb78f2b54ca
|
refs/heads/master
| 2021-04-15T16:59:05.658261
| 2019-04-18T14:52:43
| 2019-04-18T14:52:43
| 126,888,989
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,430
|
r
|
Data_trim.R
|
Data_trimer<-function(Data){
if(is.na(Data@LHYear)|is.null(Data@LHYear)|Data@LHYear==Data@Year[length(Data@Year)]){
message("Data could not be trimmed, make sure LHYear is less than max(Year)")
return(NA)
}else if(Data@LHYear>(Data@Year[length(Data@Year)]-3)){
return(NA)
}else{
DataT<-Data
orglength<-length(Data@Year)
ind<-(1:length(Data@Year))[Data@Year<(Data@LHYear+1)]
newlength<-length(ind)
slots<-slotNames(Data)
for(i in 1:length(slots)){
temp<-slot(Data,slots[i])
if(orglength%in%dim(temp)|length(temp)==orglength){
dims<-dim(temp)
ndim<-length(dims)
if(ndim==2){
slot(DataT,slots[i])<-array(slot(Data,slots[i])[,ind],c(dim(temp)[1],newlength))
}else if(ndim==3){
slot(DataT,slots[i])<-array(slot(Data,slots[i])[,ind,],c(dim(temp)[1],newlength,dim(temp)[3]))
}else{
slot(DataT,slots[i])<-slot(Data,slots[i])[ind]
}
}
}
return(DataT)
}
}
CALsimp<-function(Data,nbins=10,simno=1){
oldbins<-Data@CAL_bins
nold<-length(oldbins)
ind<-rep((1:nold),each=floor(nold/nbins))[1:nold]
maxbin<-max(ind)
newCAL_bins<-c(Data@CAL_bins[match(1:maxbin,ind)],Data@CAL_bins[nold])
ny<-dim(Data@CAL)[2]
newCAL<-array(0,c(1,ny,maxbin))
for(b in 1:(nold-1)) newCAL[1,,ind[b]]<-newCAL[1,,ind[b]]+Data@CAL[simno,,b]
Data@CAL_bins<-newCAL_bins
Data@CAL<-newCAL
Data
}
|
5aaa3295aa8d628c2270f8fd4db229e660d0e1b5
|
5a2137abc519deb2c19b19922926a48901d4be62
|
/R/otus_correlation_cluster.r
|
cd93e1d4d49ffbdfb4a3adf37f008f81a53416cf
|
[] |
no_license
|
markap/PandaPlayground
|
92e9cf3a6548887cfef012ecf3fcfcfc0150ac2d
|
921e0c857259384f2668107d125c89f7ed862d98
|
refs/heads/master
| 2021-01-19T03:01:26.073543
| 2014-03-29T18:48:48
| 2014-03-29T18:48:48
| 17,605,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 808
|
r
|
otus_correlation_cluster.r
|
library(gplots)
otus = read.csv('../preprocessed/otus.csv', row.name=1)
cor_otus <- as.dist(1-cor(t(otus)))
h_cluster <- hclust(cor_otus)
X11()
#png("myplot.png")
plot(h_cluster)
savePlot("out.jpg", type="jpeg")
#dev.off()
message("Press Return To Continue")
invisible(readLines("stdin", n=1))
groups = cutree(h_cluster, k=10)
x<-cbind(otus, groups)
for (i in 1:10) {
df = subset(x, groups==i)
print(row.names(df))
print("----------")
df$groups <- NULL
if (nrow(df) > 1) {
tmp_cor <- as.dist(1-cor(t(df)))
tmp_cluster <- hclust(tmp_cor)
plot(tmp_cluster)
savePlot(paste(i, ".jpg", sep=""), type="jpeg")
message("Press Return To Continue")
invisible(readLines("stdin", n=1))
}
}
#for testing
#sort(cor(t(otus))[,'OTU_53'])
|
8a6514f67a718e07f3eefe692557189c466bad5e
|
91be8ed16a4daad36b22acdd172d75a6a1674693
|
/run_analysis.R
|
f43d97397c7d9016ce3ceda9742c0324c81a29ff
|
[] |
no_license
|
RaymondJiangkw/Getting_And_Cleaning_Data_Project
|
3c9b9730f26c2a23f7b6e6ab56bc158ea122c0e6
|
4297b4b2fcb21b38698ae1fa83ab0b1e5af6c8cb
|
refs/heads/master
| 2020-12-26T05:41:38.444208
| 2020-01-31T11:32:04
| 2020-01-31T11:32:04
| 237,404,664
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,775
|
r
|
run_analysis.R
|
# Getting and Cleaning Data Project from @John Hopkins University
# Author: RaymondKevin
# Date: 2020-01-31
# Do Preparations and Get the Data
library(reshape2)
library(dplyr)
path = ".//src//UCI HAR Dataset/"
Features_Label <- read.table(file.path(path,"features.txt"),col.names = c("index","featureNames"))
Activity_Label <- read.table(file.path(path,"activity_labels.txt"),col.names = c("classLabels","activityName"))
# Extract only the measurements on the mean and standard deviation for each measurement.
Features_Selected <- grep("(mean|std)\\(\\)",Features_Label$featureNames)
measurements <- Features_Label[Features_Selected,]$featureNames
measurements <- gsub('[()]','',measurements)
# Load Training Datasets
train <- read.table(file.path(path,"train//X_train.txt"))[,Features_Selected]
colnames(train) <- measurements
trainActivities <- read.table(file.path(path,"train//Y_train.txt"),col.names = c("Activity"))
trainSubjects <- read.table(file.path(path,"train//subject_train.txt"),col.names = c("Subject"))
train <- cbind(trainSubjects,trainActivities,train)
# Load Test Datasets
test <- read.table(file.path(path,"test//X_test.txt"))[,Features_Selected]
colnames(test) <- measurements
testActivities <- read.table(file.path(path,"test//Y_test.txt"),col.names = c('Activity'))
testSubjects <- read.table(file.path(path,"test//subject_test.txt"),col.names = c('Subject'))
test <- cbind(testSubjects,testActivities,test)
# Merge
mergedDT = rbind(train,test)
# Create a seond data set.
colNames <- c("Subject","Activity")
tidyDT = melt(data = mergedDT,id = colNames,measure.vars = setdiff(colnames(mergedDT),colNames)) %>% dcast(Subject + Activity~variable,mean)
# Write the file
write.table(tidyDT, file.path(path,"tidyData.txt"),sep = ",",row.names = FALSE)
|
55712595bbc39623706fd3012ac8ca156bba820f
|
4910e6fcaa1556c8916dcd6031d2288201443d9d
|
/spPlotSampCourse/man/stdizePlots.Rd
|
d93e76f952a2e8481bd5b19f8dbb53a45ac93aed
|
[] |
no_license
|
jayverhoef/spPlotSampCourse_package
|
80c5537a08a9d7f4f08e17878493f3a65c5272e4
|
107608dcc29ce93c9926112781e955b46dfa6baa
|
refs/heads/master
| 2021-01-20T11:00:20.312211
| 2013-06-05T13:03:23
| 2013-06-05T13:03:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 910
|
rd
|
stdizePlots.Rd
|
\name{stdizePlots}
\alias{stdizePlots}
\title{Standardize the plot coordinates}
\usage{
stdizePlots(plots, xmean.fix, ymean.fix, xystdv.fix)
}
\arguments{
\item{plots}{as polygons of sp Class SpatialPolygons}
\item{xmean.fix}{mean for standardizing x coordinates}
\item{ymean.fix}{mean for standardizing y coordinates}
\item{xystdv.fix}{standard deviation for standardizing
both x and y coordinates}
}
\value{
a list, where the input plots have standarized
coordinates in the plts item. The original plot areas are
returned as plts.originalarea item. The values used for
standardization are arguments xmean.fix, ymean.fix, and
xystdv.fix. Standardized x values are computed as
(x-xmean.fix)/xystdv.fix and standardized y values are
computed as (y-xmean.fix)/xystdv.fix.
}
\description{
Standardize the plot coordinates
}
\author{
Jay Ver Hoef \email{jay.verhoef@noaa.gov}
}
|
df966b7cf18c8dc92edf47778d940a643fcc8275
|
a53ca7e5df8e663bad100a0aa5bc47c74274ce58
|
/R/PrzetwarzanieDanychUstrukturyzowanych/PD3/bitcoin_analysis/posts_questions_answers.R
|
b771762cb3af9cf24ddd35f1f5302d3141966b70
|
[] |
no_license
|
pawel99k/Studia
|
f4799e41b7468e9a2d3a9493c34fd283b2ee3ed5
|
bfd378adcc64d2d47a5e44520edd631901792e6d
|
refs/heads/master
| 2021-06-16T19:32:16.816899
| 2021-04-15T11:11:01
| 2021-04-15T11:11:01
| 191,762,389
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 787
|
r
|
posts_questions_answers.R
|
source("xmluj.R")
xmluj("bitcoin.stackexchange.com/Posts.xml") -> Posts
Posts1 <- Posts[PostTypeId==1,]
Posts2 <- Posts[PostTypeId==2]
Posts1$ViewCount <- as.numeric(Posts1$ViewCount)
Posts2$ParentId <- as.numeric(Posts2$ParentId)
Posts2$Score <- as.numeric(Posts2$Score)
Posts1[order(Posts1$ViewCount, decreasing = TRUE)][1:5,] -> bestquestions
bestquestions$Id -> bqId
Posts2[ParentId %in% bqId] -> bqanswers
bqanswers[order(bqanswers$ParentId, bqanswers$Score, decreasing = T)] -> bqanswers
bqanswers[!duplicated(bqanswers$ParentId), c(6, 18)] -> ba
bestquestions[,c(1, 5, 6, 11)] -> bq
bq$Id <- as.numeric(bq$Id)
merge(bq, ba, by.y = "ParentId", by.x = "Id") -> out
colnames(out)[5] <- "Answer"
out[order(out$ViewCount, decreasing = T),] -> out
head(out$Title)
head(out$Answer)
|
71aff68397faab3c1d047076892151c48eeb6278
|
ae3a01bcafd7b940c15d8edb9b5a4105655d5fe2
|
/source_functions/ww_genetic_corr_start.R
|
80d2c5eab89365042cce38e19905442604f0f1e4
|
[] |
no_license
|
harlydurbin/angus_hairshed
|
a5c29713c340f839e3fe6b6ae5f831b812555d11
|
dc31e4d5bb69945ae41753f494896aacea272133
|
refs/heads/master
| 2023-03-02T01:42:41.507055
| 2021-02-12T19:00:08
| 2021-02-12T19:00:08
| 276,473,429
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,664
|
r
|
ww_genetic_corr_start.R
|
library(readr)
library(dplyr)
library(tidyr)
library(stringr)
library(tibble)
library(glue)
library(rlang)
library(lubridate)
library(magrittr)
library(purrr)
library(readxl)
library(tidylog)
source(here::here("source_functions/hair_weights.R"))
source(here::here("source_functions/three_gen.R"))
#### Setup ####
## ---------------------------------------------------------------------------------------------------------------------------
source(here::here("source_functions/hair_ped.R"))
start <-
read_rds(here::here("data/derived_data/start.rds"))
genotyped <-
read_table2(here::here("data/derived_data/genotyped_id.txt"), col_names = FALSE) %>%
pull(X1)
## ---------------------------------------------------------------------------------------------------------------------------
## Weights
# All hair shedding growth data
hair_weights <-
melt_hair_weights(path = here::here("data/raw_data/HairShedGrowthData_090919.csv"), full_ped = hair_ped)
# Weaning weight data from dams with hair shedding scores or their calves
wean_dat <-
hair_weights %>%
# Create a list of CGs containing dams with hair scores or their calves
filter(dam_reg %in% start$full_reg | full_reg %in% start$full_reg) %>%
filter(trait == "ww") %>%
group_by(cg_num) %>%
# Drop WW CGs with fewer than 5 animals
filter(n() >= 5) %>%
ungroup() %>%
distinct(cg_num) %>%
# Re-join data for animals in remaining CGs
left_join(hair_weights %>%
filter(trait == "ww")) %>%
distinct()
#### Data ####
## Model 1
model1dat <-
start %>%
select(full_reg, hair_cg = cg_num, hair_score) %>%
# Attach weaning weight data
bind_rows(wean_dat %>%
select(full_reg, wean_cg = cg_num, adj_weight)) %>%
select(full_reg, hair_cg, wean_cg, hair_score, adj_weight) %>%
mutate_all(~ replace_na(., "0")) %>%
arrange(desc(full_reg))
model1dat %>%
write_delim(here::here("data/derived_data/ww_genetic_corr/single_step/model1/data.txt"),
delim = " ",
col_names = FALSE)
## Model 2
model2dat <-
start %>%
select(dam_reg = full_reg, hair_cg = cg_num, hair_score, year) %>%
full_join(
wean_dat %>%
# Join by hair shedding scoring year of dam, weaning year of calf
mutate(year = lubridate::year(weigh_date)) %>%
select(full_reg, dam_reg, ww_cg = cg_num, adj_weight, year),
by = c("dam_reg", "year")
) %>%
rownames_to_column(var = "rowname") %>%
# FDC for fake dummy calf
mutate(
full_reg =
case_when(
is.na(full_reg) ~ as.character(glue("FDC{rowname}")),
TRUE ~ full_reg)
) %>%
select(full_reg, dam_reg, hair_cg, ww_cg, hair_score, adj_weight) %>%
mutate_all(~ replace_na(., "0"))
purrr::map(.x = c("model2", "model3"),
~ write_delim(x = model2dat %>%
select(-dam_reg),
path = here::here(glue("data/derived_data/ww_genetic_corr/single_step/{.x}/data.txt")),
delim = " ",
col_names = FALSE))
#### Ped ####
## Model 1
model1ped <-
model1dat %>%
select(full_reg) %>%
distinct() %>%
left_join(
hair_ped %>%
select(full_reg, sire_reg, dam_reg)
) %>%
# Three generation pedigree
three_gen(full_ped = hair_ped) %>%
mutate_all(~ replace_na(., "0"))
model1ped %>%
write_delim(here::here("data/derived_data/ww_genetic_corr/single_step/model1/ped.txt"),
delim = " ",
col_names = FALSE)
## Model 2
fdc_dummy_ped <-
model2dat %>%
filter(str_detect(full_reg, "^FDC")) %>%
mutate(sire_reg = "0") %>%
select(full_reg, sire_reg, dam_reg) %>%
bind_rows(hair_ped)
model2ped <-
model2dat %>%
select(full_reg) %>%
distinct() %>%
left_join(
fdc_dummy_ped %>%
select(full_reg, sire_reg, dam_reg)
) %>%
# Three generation pedigree
three_gen(full_ped = fdc_dummy_ped) %>%
mutate_all(~ replace_na(., "0"))
purrr::map(.x = c("model2", "model3"),
~ write_delim(x = model2ped,
path = here::here(glue("data/derived_data/ww_genetic_corr/single_step/{.x}/ped.txt")),
delim = " ",
col_names = FALSE))
#### List of genotypes to pull ####
bind_rows(model1ped, model2ped) %>%
select(full_reg) %>%
filter(full_reg %in% genotyped) %>%
distinct() %>%
write_delim(path = here::here("data/derived_data/ww_genetic_corr/single_step/pull_list.txt"),
delim = " ",
col_names = FALSE)
|
d880ed5f7a565101b544a86d00b7159d33d8dac9
|
e120654c5c8380dcd8e993af2720446f2f05b075
|
/plot4.R
|
9ae4f5cd59792299156f3e927bb04b612267e080
|
[] |
no_license
|
robertbounds/ExData_Plotting1
|
971e95f9129d9a4d01844ee840197dc107a7078f
|
bab28786dbb613c061ee30c59bd177c3102a556f
|
refs/heads/master
| 2021-01-20T19:45:44.348187
| 2014-06-07T17:27:48
| 2014-06-07T17:27:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,468
|
r
|
plot4.R
|
### This file, plot4.R, contains five functions:
### (1) GAPplot;
### (2) voltPlot;
### (3) subMeterPlot;
### (4) GRPplot;
### (5) plot4.
###
### functions (1)-(4):
### take required input parameters
### (data, dayVec, and DayIndexVec)
### to accordingly subset data, and create
### the requested graph.
GAPplot <- function(data, dayVec, dayIndexVec) {
## GAPplot creates the line graph associated
## with global active power data
globalActivePower <- as.numeric(data[[3]])
plot(globalActivePower,
xaxt = "n",
xlab = "",
ylab = "Global Active Power",
type = "l"
)
axis(1, at = dayIndexVec, labels = dayVec)
}
voltPlot <- function(data, dayVec, dayIndexVec) {
## voltPlot creates the line graph associated with voltage data
voltage <- as.numeric(data[[5]])
plot(voltage,
xaxt = "n",
xlab = "datetime",
ylab = "Voltage",
type = "l"
)
axis(1, at = dayIndexVec, labels = dayVec)
}
subMeterPlot <- function(data, dayVec, dayIndexVec) {
## subMeterPlot creates the layered line graph associated
## with the sub metering data subsets
subMeter1 <- as.numeric(data[[7]])
subMeter2 <- as.numeric(data[[8]])
subMeter3 <- as.numeric(data[[9]])
plot(subMeter1,
xaxt = "n",
xlab = "",
ylab = "Energy sub metering",
type = "l"
)
lines(subMeter2, col = "red")
lines(subMeter3, col = "blue")
axis(1, at = dayIndexVec, labels = dayVec)
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = 1,
col = c("black", "red", "blue"),
bty = "n"
)
}
GRPplot <- function(data, dayVec, dayIndexVec) {
## GRPplot creates the line graph associated
## with the global reactive power data
globalReactivePower <- as.numeric(data[[4]])
plot(globalReactivePower,
xaxt = "n",
xlab = "datetime",
ylab = "Global_reactive_power",
type = "l")
axis(1, at = dayIndexVec, labels = dayVec)
}
plot4 <- function() {
## the plot4 function is tasked with reading in the
## necessary information from the data table,
## opening a png editor, and then calling each of
## the four screen plotting functions to write their
## images to the opened png file.
energyData <- read.csv2("household_power_consumption.txt",
colClasses = "character",
nrows = 2880,
skip = 66636
)
days <- c("Thu", "Fri", "Sat")
dayIndeces <- c(1, 1440, 2880)
png(file = "plot4.png", width = 480, height = 480)
par(mfrow = c(2, 2), bg = NA)
## plotting functions called in this sequence
## to fill the 2x2 frame as required by the assignment.
GAPplot(energyData, days, dayIndeces)
voltPlot(energyData, days, dayIndeces)
subMeterPlot(energyData, days, dayIndeces)
GRPplot(energyData, days, dayIndeces)
dev.off()
}
|
8bae12f0130e0508dca006fada4704e63e959fb9
|
b471a4ea59cc3d74d57b8f35564c83e53b5b61f4
|
/Project 1/DATA607_Project1_Full.R
|
0082ef26877bec19220f25c47e6cb12382a2b42c
|
[] |
no_license
|
Jennier2015/DATA-607
|
54f95107d89a6c61152e06184c4d6289111df8f4
|
ed2722e6fefc481b7f7241d0ed57df1110494daf
|
refs/heads/master
| 2021-01-13T11:17:19.723702
| 2017-03-24T04:02:49
| 2017-03-24T04:02:49
| 81,400,574
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,730
|
r
|
DATA607_Project1_Full.R
|
# DATA 607 - Project 1 Chess Tournament Results
# Introduction
# In this project, you're given a text file with chess tournament results where the information has some structure. Your
# job is to create an R Markdown file that generates a .CSV file (that could for example be imported into a SQL database)
# with the following information for all of the players:
# Player's Name, Player's State, Total Number of Points, Player's Pre-Rating, and Average Pre Chess Rating of Opponents
# For the first player, the information would be:
# Gary Hua, ON, 6.0, 1794, 1605
library(stringr)
# Read Data
#
tournament <- read.csv('https://raw.githubusercontent.com/Jennier2015/DATA-607/master/Project%201/tournamentinfo.txt', skip = 3)
head(tournament)
# Clean Data using Regular Expressions
# Extract Player's Name
Player_Name <- str_trim(unlist(str_extract_all(unlist(tournament), "[[:upper:]_]+(\\ [[:upper:]_]+ ([[:upper:]_]|-)* [[:upper:]_]*)")))
Player_Name
# Extract Player's State
Player_State <- str_trim(unlist(str_extract_all(unlist(tournament), " {3}[[:upper:]]{2} ")))
Player_State
# Extract Total Number of Point
Total_Number_Points <- str_trim(unlist(str_extract_all(unlist(tournament), "([[:digit:]]\\.[[:digit:]])")))
Total_Number_Points
# Extract Player's Pre-Rating
pre_rating1 <- str_trim(unlist(str_extract_all(unlist(tournament), "R: [[:digit:] ]*")))
pre_rating <- str_replace_all(pre_rating1, "R: ", "")
pre_rating
data <- data.frame(Player_Name, Player_State, Total_Number_Points, pre_rating)
data
# Extract Average Pre Chess Rating of Opponents
# Reference: https://rstudio-pubs-static.s3.amazonaws.com/212587_0c7bf965b27747da8c66803b15fe4534.html
# Step 1: Extract player's opponents
opp_number <- unlist(str_extract_all(unlist(tournament),"[WDL]...\\d{1,2}"))
opp_number <- unlist(str_extract_all(unlist(opp_number),"\\.?\\d{1,2}"))
opp_number <- str_replace_all(opp_number,"\\b[0]\\b",".")
opp_number <- as.numeric(opp_number)
head(opp_number)
opp_number <- str_extract_all(unlist(tournament),"[WDL]...\\d{1,2}")
opp_number <- str_extract_all(unlist(opp_number),"\\.?\\d{1,2}")
opp_number <- str_replace_all(opp_number,"\\b[0]\\b",".")
opp_number <- as.numeric(opp_number)
head(opp_number)
# Step 2: Convert to numeric data
data$Total_Number_Points <- as.numeric(as.character(data$Total_Number_Points))
data$pre_rating <- as.numeric(as.character(data$pre_rating))
# Step 3: Calculate Mean
ave <- array(0, dim = nrow(data))
for (i in 1:nrow(data)){
match_opp <- as.numeric(unlist(str_extract_all(opp_number[i])))
ave[i] <- mean(data(match_opp, colnames(data) == "Pre_rating_opponents", na.rm = T))
};
data$Pre_rating_opp <- ave
head(data)
# export
write.csv(data, "data.cvs", row.names = FALSE)
|
b383f49292224aba7809608c411f5619e44475f9
|
2072f3bd397d10e689da90535ad6774cbcd70840
|
/model1_fixtime.R
|
469ee9f7b1fe457210277f8c5e520d7448f8dc0b
|
[] |
no_license
|
mam737/ParentalCurseScripts
|
1f08e23aa39ce96a8c90aef8476b6cf2f8f5c3bd
|
8f21755b9f1bd149abb9ac27b203ac58bc267894
|
refs/heads/master
| 2020-03-19T22:49:52.361412
| 2018-12-20T22:59:57
| 2018-12-20T22:59:57
| 136,983,156
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,296
|
r
|
model1_fixtime.R
|
# Model 1: mito-auto Mother's curse - 11 Mar 2018
# Invasion and dynamics of Mother's curse mitochondrial type with autosomal restorer
# female genotypes AAM AaM aaM AAm Aam aam
# and fitnesses 1 1 1 1+sf 1+sf 1+sf
# male genotypes AAMb AaMb aaMb AAmb Aamb aamb
# and fitnesses 1 1 1 1-sm 1-sm+sa/2 1-sm+sa
# four egg types: AM aM Am am
# two sperm types: A a
# Specify initial genotype frequencies (6 female, 6 male)
# Weight each male and female genotype by fitness and normalize
library(gplots)
library(reshape2)
## 3 paramters of interest, sf, sm, and sa
sf_list = seq(0,0.4,length.out = 20)
sm_list = seq(0,0.2,length.out = 20)
sa_list = seq(0,0.2,length.out = 20)
generations <- 1000
output.df <- data.frame(matrix(NA,nrow=(length(sf_list)*length(sm_list)*length(sa_list)), ncol=5))
colnames(output.df) <- c('sf','sm','sa' ,'a_fix_time','m_fix_time')
row_update <- 1
######### MODEL 1 ##########
for (sf_val in sf_list) {
for (sm_val in sm_list) {
for (sa_val in sa_list) {
A = 0.99
a = 0.01
M = 0.99
m = 0.01
current_a <- a
current_m <- m
a_fix <- FALSE
m_fix <- FALSE
#Initialize genotype frequencies
#Females
AAM = A * A * M
AAm = A * A * m
AaM = 2 * A * a * M
Aam = 2 * A * a * m
aaM = a * a * M
aam = a * a * m
#Males
AAMb = A * A * M
AAmb = A * A * m
AaMb = 2 * A * a * M
Aamb = 2 * A * a * m
aaMb = a * a * M
aamb = a * a * m
if (current_a != a | a_fix!=FALSE) {print("ERROR")}
for (gen in 1:generations) {
if (current_a > 0.9999 & a_fix==FALSE) {
a_fix_gen <- gen
a_fix <- TRUE
}
if (current_m > 0.9999 & m_fix==FALSE) {
m_fix_gen <- gen
m_fix <- TRUE
}
if ((a_fix == TRUE) & (m_fix == TRUE)) {
break
}
# Weight each male and female genotype by fitness and normalize
Fsum=(AAM+AaM+aaM)+(1+sf_val)*(AAm+Aam+aam)
AAM=AAM/Fsum
AaM=AaM/Fsum
aaM=aaM/Fsum
AAm=(AAm*(1+sf_val))/Fsum
Aam=(Aam*(1+sf_val))/Fsum
aam=(aam*(1+sf_val))/Fsum
Msum=(AAMb+AaMb+aaMb)+(1-sm_val)*AAmb+(1-sm_val+sa_val/2)*Aamb+(1-sm_val+sa_val)*aamb
AAMb=AAMb/Msum
AaMb=AaMb/Msum
aaMb=aaMb/Msum
AAmb=(AAmb*(1-sm_val))/Msum
Aamb=(Aamb*(1-sm_val+sa_val/2))/Msum
aamb=(aamb*(1-sm_val+sa_val))/Msum
# Calculate the egg and sperm frequencies
eAM=AAM+.5*AaM
eAm=AAm+.5*Aam
eaM=aaM+.5*AaM
eam=aam+.5*Aam
sA=AAMb+AAmb+.5*AaMb+.5*Aamb
sa=aaMb+aamb+.5*AaMb + .5*Aamb
# Calculate zygote frequencies the next generstion (zygotes are the same freq in both sexes)
AAM=eAM*sA
AaM=eAM*sa+eaM*sA
aaM=eaM*sa
AAm=eAm*sA
Aam=eAm*sa+eam*sA
aam=eam*sa
AAMb=eAM*sA
AaMb=eAM*sa+eaM*sA
aaMb=eaM*sa
AAmb=eAm*sA
Aamb=eAm*sa+eam*sA
aamb=eam*sa
current_a_female <- (2*aaM + AaM + 2*aam + Aam)/(2*(AAM + AaM + aaM + AAm + Aam + aam))
current_a_male <- (2*aaMb + AaMb + 2*aamb + Aamb)/(2*(AAMb + AaMb + aaMb + AAmb + Aamb + aamb))
current_a <- (current_a_female + current_a_male)/2
current_m_female <- (AAm + Aam + aam)/(AAM + AaM + aaM + AAm + Aam + aam)
current_m_male <- (AAmb + Aamb + aamb)/(AAMb + AaMb + aaMb + AAmb + Aamb + aamb)
current_m <- (current_m_female + current_m_male)/2
}
if (a_fix==FALSE) {
a_fix_gen <- generations
}
if (m_fix==FALSE) {
m_fix_gen <- generations
}
output.df[row_update,] <- c(round(sf_val,digits=3),round(sm_val,digits=3),round(sa_val,digits=3),a_fix_gen,m_fix_gen)
row_update <- row_update + 1
}
}
}
pdf('./model1_fix_time_a_heatmap.pdf')
for (i in round(sf_list,digits=3)) {
fix_time_heatmap <- dcast(output.df[output.df$sf==i,c(2,3,4)],sm~sa, value.var = 'a_fix_time')
row.names(fix_time_heatmap) <- fix_time_heatmap$sm
fix_time_heatmap <- as.matrix(fix_time_heatmap[,-1])
if (!all(fix_time_heatmap[1,1]==fix_time_heatmap)) {
title <- paste("Fix Time For sf = ", i)
heatmap.2(fix_time_heatmap,Rowv=FALSE,Colv=FALSE,dendrogram='none',main=title,xlab='Autosome',ylab='sm',key=T,trace='none',cexCol=1.25, cexRow=1.25,breaks=seq(0,1000,50),col=redblue(20))
}
else {
print(paste("a Fix Time the Same for sf= ", i))
}
}
dev.off()
pdf('./model1_fix_time_m_heatmap.pdf')
for (i in round(sf_list,digits=3)) {
fix_time_heatmap <- dcast(output.df[output.df$sf==i,c(2,3,5)],sm~sa, value.var = 'm_fix_time')
row.names(fix_time_heatmap) <- fix_time_heatmap$sm
fix_time_heatmap <- as.matrix(fix_time_heatmap[,-1])
if (!all(fix_time_heatmap[1,1]==fix_time_heatmap)) {
title <- paste("m Fix Time For sf = ", i)
heatmap.2(fix_time_heatmap,Rowv=FALSE,Colv=FALSE,dendrogram='none',main=title,xlab='Autosome',ylab='sm',key=T,trace='none',cexCol=1.25, cexRow=1.25,breaks=seq(0,1000,50),col=redblue(20))
}
else {
print(paste("Fix Time the Same for sf= ", i))
}
}
dev.off()
|
7199491396196b70d3390b1cd8de027eaa6ba062
|
6eddde9b74487719db12c51caefa7a788bcdf04a
|
/man/VIP.Rd
|
7b19f60ecb96aed99ecdf46711b8d085fbaad517
|
[] |
no_license
|
uwadaira/plsropt
|
79be7e7e91398b78ce4c662caed2cef81fcdd2c5
|
b633eaa63257333bd7ee5f64d824e8101f1855c7
|
refs/heads/master
| 2020-04-12T01:46:42.868932
| 2017-08-15T07:49:58
| 2017-08-15T07:49:58
| 45,820,246
| 2
| 1
| null | 2016-03-30T05:52:14
| 2015-11-09T06:45:12
|
R
|
UTF-8
|
R
| false
| true
| 432
|
rd
|
VIP.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VIP.R
\name{VIP}
\alias{VIP}
\title{Variable importance in projection for PLS regression}
\usage{
VIP(object)
}
\arguments{
\item{object}{a model object}
}
\value{
VIP values
}
\examples{
data(yarn)
yarn.pls <- plsr(density ~ NIR, 6, data = yarn, method = "oscorespls", validation = "CV")
VIP(yarn.pls)
}
\author{
Bjørn-Helge Mevik (bhx6@mevik.net)
}
|
8526da30c2c448b675bc29e69e3a9cc8f7e531e5
|
a867658310e4a922b2d1484ffa5f6f1d532c6ce0
|
/grad/bioinformatics-and-genomes/part-2/project.R
|
c7ae6e19fdf3f258e3a505c37fcf32a002945fd4
|
[
"MIT"
] |
permissive
|
positivevaib/nyu-archive
|
d48174c81bd5ca0fbc5c370fc74cffbce83ecbaf
|
6d6aa06bf0303dbb5918a0db4bdba4dad17c5d8a
|
refs/heads/master
| 2023-03-02T04:08:28.096090
| 2021-02-14T06:15:07
| 2021-02-14T06:15:07
| 149,333,963
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,275
|
r
|
project.R
|
# Load libraries
library(HMM)
library(seqinr)
library(ape)
library(phangorn)
# Q2
# Setup old HMM as discussed in class
states <- c("Exon", "5site", "Intron")
symbols <- c("A","C","G","T")
transProbs = matrix(c('EE'=0.9,'E5'=0.1,'EI'=0, '5E'=0, '55'=0, '5I'=1.0, 'IE'=0, 'I5'=0, 'II'=1.0), c(length(states), length(states)), byrow = TRUE)
rownames(transProbs) <- c("Exon", "5Site", "Intron")
colnames(transProbs) <- c("Exon", "5Site", "Intron")
emissionProbs = matrix(c('A'=0.25,'C'=0.25,'G'=0.25,'T'=0.25, 'A'=0.05,'C'=0.0,'G'=0.95,'T'=0.0, 'A'=0.4,'C'=0.1,'G'=0.1,'T'=0.4), c(length(states), length(symbols)), byrow = TRUE)
rownames(emissionProbs) <- c("Exon", "5Site", "Intron")
colnames(emissionProbs) <- c("A","C","G","T")
hmm <- initHMM(states, symbols, startProbs = c(1,0,0), transProbs = transProbs, emissionProbs = emissionProbs)
# Run old HMM on assigned human gene
cab45Seq <- s2c("GGCTCTGTGTCCCCAGGACGGCCGCAGGATGGGGACAAGCAGCTCACAGTCTGCAGAGAGACACAGACACATCATTAGCAAGACTCAGCAAAGACTTCCC")
vitCab45 <- viterbi(hmm, cab45Seq)
vitCab45
# Run old HMM on chr2:85539313-85539468
chr2Seq <- s2c("ACGAGGCGTTCATCGAGGAGGGCACATTCCTTTTCACCTCAGAGTCGGTCGGGGAAGGCCACCCAGGTGAGGGGACGGCCTGAAGCGAAGCGTGGGGCGGGGCAGAAGGCAGCGCCAAGGTCCGGCTGGCTGCGGCCGGCCGGTGGTGGGGCCCGC")
vitChr2 <- viterbi(hmm, chr2Seq)
vitChr2
# Setup new HMM
states <- c("Exon", "Base1", "Base2", "5site", "Base4", "Intron")
symbols <- c("A","C","G","T")
transProbs <- matrix(rep(0, len = length(states)^2), nrow = length(states))
rownames(transProbs) <- states
colnames(transProbs) <- states
transProbs["Exon", "Exon"] <- 0.9
transProbs["Exon", "Base1"] <- 0.1
transProbs["Base1", "Base2"] <- 1
transProbs["Base2", "5site"] <- 1
transProbs["5site", "Base4"] <- 1
transProbs["Base4", "Intron"] <- 1
transProbs["Intron", "Intron"] <- 1
emissionProbs <- matrix(rep(0, len = length(states) * length(symbols)), nrow = length(states))
rownames(emissionProbs) <- states
colnames(emissionProbs) <- symbols
emissionProbs["Exon", "A"] <- 0.2
emissionProbs["Exon", "C"] <- 0.3
emissionProbs["Exon", "G"] <- 0.3
emissionProbs["Exon", "T"] <- 0.2
emissionProbs["Base1", "A"] <- 0.997
emissionProbs["Base1", "C"] <- 0.001
emissionProbs["Base1", "G"] <- 0.001
emissionProbs["Base1", "T"] <- 0.001
emissionProbs["Base2", "A"] <- 0.001
emissionProbs["Base2", "C"] <- 0.001
emissionProbs["Base2", "G"] <- 0.997
emissionProbs["Base2", "T"] <- 0.001
emissionProbs["5site", "A"] <- 0.001
emissionProbs["5site", "C"] <- 0.001
emissionProbs["5site", "G"] <- 0.997
emissionProbs["5site", "T"] <- 0.001
emissionProbs["Base4", "A"] <- 0.001
emissionProbs["Base4", "C"] <- 0.001
emissionProbs["Base4", "G"] <- 0.001
emissionProbs["Base4", "T"] <- 0.997
emissionProbs["Intron", "A"] <- 0.15
emissionProbs["Intron", "C"] <- 0.35
emissionProbs["Intron", "G"] <- 0.35
emissionProbs["Intron", "T"] <- 0.15
hmm <- initHMM(states, symbols, startProbs = c(1, 0, 0, 0, 0, 0), transProbs = transProbs, emissionProbs = emissionProbs)
# Run old HMM on assigned human gene
vitCab45 <- viterbi(hmm, cab45Seq)
vitCab45
# Run old HMM on chr2:85539313-85539468
vitChr2 <- viterbi(hmm, chr2Seq)
vitChr2
# Q3
# Read in toy MSA and tree
toy_msa <- read.phyDat("toy_MSA.fasta", format = "fasta", type = "DNA")
tree <- read.tree("toy_tree.tre")
# Plot unrooted tree
plot(tree, type = "unrooted")
# Compute the log-likelihood of given tree
fit <- pml(tree, data = toy_msa, model = "Jukes-Cantor")
fit
# Q4
# Read in PF13499 MSA seed
seed <- read.phyDat("PF13499_seed.fasta", format = "fasta", type = "AA")
d <- dist.ml(seed)
# Neighbor joining
treeNJ <- NJ(d)
# Parsimony
treePars <- optim.parsimony(treeNJ, data = seed, method = "sankoff")
# Maximum likelihood
fit = pml(treeNJ, data = seed, method = "sankoff")
fit
fitJC = optim.pml(fit, TRUE)
logLik(fitJC)
# Q5
sars <- read.phyDat("sars.fasta", format = "fasta", type = "DNA")
dsars <- dist.ml(sars, model = "JC69")
sarsNJ <- NJ(dsars)
sarsNJ2 <- root(sarsNJ, outgroup = "Himalayan palm civet sars cov, complete genome")
sarsFit = pml(sarsNJ2, data = sars, model="Jukes-Cantor")
sarsFit
fitJC <- optim.pml(sarsFit, TRUE)
logLik(fitJC)
# Bootsrap
bs <- bootstrap.pml(fitJC, bs=100, optNni=TRUE, multicore=FALSE, control = pml.control(trace=0))
plotBS(fitJC$tree, bs, p = 50, type="p")
|
18f0fbb7158b7f65a386c368be2843fc2e8bb870
|
ff13ebac5f03c26551ef3f508cc37a2033e89107
|
/code/demo.R
|
d802ef5d4b25a9b0491a43d17f1586da581727f3
|
[] |
no_license
|
ejosymart/winLossArea
|
e4f212bfed34c45bb392907e71a776d430f76e96
|
d70a06b2a9ca6878928b15086163c3451353c521
|
refs/heads/master
| 2020-05-02T11:05:31.066361
| 2020-04-09T10:36:57
| 2020-04-09T10:36:57
| 177,917,357
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 893
|
r
|
demo.R
|
library(raster)
library(SDMTools)
source("code/joinRaster_functions.R")
#Junta raster y suma valores
JR <- joinRaster(files = c("data/Binario_Presente.asc",
"data/Binario_2050.asc",
"data/Binario_2100.asc"), newValue = 14)
JR
#Grafico
cols <- rev(terrain.colors(JR@data@max+1))
plot(JR, xlab = "Longitude", ylab = "Latitude", col = cols)
# -----------------------------------------------------------------------
# AREA ESTIMATION ---------------------------------------------------------
# -----------------------------------------------------------------------
library(raster)
library(SDMTools)
areaEstimationPresence(data = "data/Binario_Presente.asc", to = "km2")
areaEstimationPresence(data = "data/Binario_2050.asc", to = "km2")
areaEstimationPresence(data = "data/Binario_2100.asc", to = "km2")
|
04b6359131bc51695c05f2bb19c0ed265c8e517e
|
b0b54c7b80ff22fbb7d3910c7f4eaa0fa5e970c1
|
/check_top_ld.R
|
c202e6de5f9e666bf4cd91c5901d09160ff9df0e
|
[] |
no_license
|
yumifoo/finemap
|
7880e572fcf333fb1b458eeb65110aeb81dcc5ef
|
9d3132c3683c2409ab8d5249e062bd8ceac387fc
|
refs/heads/master
| 2020-09-24T18:33:45.759851
| 2019-12-11T12:08:11
| 2019-12-11T12:08:11
| 225,817,684
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,252
|
r
|
check_top_ld.R
|
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
trait <- as.character(args[1])
chromosomes <- 1:23
need_merge <- c()
for(ii in 1:length(chromosomes)){
chr <- chromosomes[ii]
if(file.exists(sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/top_snp_sex_combined_win_1mb.ld",trait, chr))){
ld <- read.table( sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/ld/top_snp_sex_combined_win_1mb.ld",trait, chr), header = FALSE)
ld <- ld^2
ld <- as.matrix(ld)
z <- read.table(sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%d/z_files/top_snp_sex_combined_%s_chr%d_wim_1mb.z",trait, chr, trait, chr), header = TRUE, stringsAsFactors = FALSE)
rownames(ld) <- z$rsid
colnames(ld) <- z$rsid
get_upper_tri<-function(cormat){
cormat[lower.tri(cormat)] <- NA
return(cormat)
}
ld_upper <- get_upper_tri(ld)
for (jj in 1:nrow(ld_upper)){
ld_upper[jj,jj] <- NA
}
print(paste("The largest ld^2 value on chromosome", chr, "is", max(ld_upper, na.rm = TRUE)))
print("Any LD^2 > 0.1?")
print(any(ld_upper > 0.1, na.rm = TRUE))
need_merge[ii] <- any(ld_upper > 0.1, na.rm = TRUE)
}
else
need_merge[ii] <- FALSE
}
names(need_merge) <- 1:23
need_merge <- names(need_merge)[need_merge]
need_merge
for(ii in seq_along(need_merge)){
print(paste("chr",need_merge[ii]))
ld <- read.table(sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%s/ld/top_snp_sex_combined_win_1mb.ld",trait, need_merge[ii]), header = FALSE)
ld <- ld^2
index <- which(ld>0.1 & ld < 1, arr.ind = TRUE)[1,]
# THE INDEX IS THE ROW NUMBER IN THE REGION FILE
region <- read.table(sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%s/regions/merged_region_1mb.txt",trait, need_merge[ii]),header = FALSE)
print(region)
region <- rbind(region,c(min(region[index,]),max(region[index,])))
print(region)
region <- region[-index,]
region <- region[order(region[,1]),]
print(region)
write.table(region, sprintf("/fs/projects/ukbb/yu/BOLT_basicQT_agesq/%s/sex_combined/chr%s/regions/merged_region_1mb.txt",trait, need_merge[ii]), quote = FALSE, row.names = FALSE, col.names = FALSE)
}
|
7d4d93bb4f4bb2507e05ed036177ef1692719dc0
|
c981caf103a3540f7964e6c41a56ca34d67732c4
|
/R/plausible.value.draw.R
|
9aa768d859ce633637a463dd51bac2f4516a8b13
|
[] |
no_license
|
alexanderrobitzsch/miceadds
|
8285b8c98c2563c2c04209d74af6432ce94340ee
|
faab4efffa36230335bfb1603078da2253d29566
|
refs/heads/master
| 2023-03-07T02:53:26.480028
| 2023-03-01T16:26:31
| 2023-03-01T16:26:31
| 95,305,394
| 17
| 2
| null | 2018-05-31T11:41:51
| 2017-06-24T15:16:57
|
R
|
UTF-8
|
R
| false
| false
| 1,853
|
r
|
plausible.value.draw.R
|
## File Name: plausible.value.draw.R
## File Version: 0.15
plausible.value.draw <- function( data, X, beta0, sig0, b=b,
a=rep(1, length(b) ), c=rep(0, length(b) ),
theta.list=seq(-5,5,len=40), pvdraw=1 )
{
# recode missings
y <- data
y[ is.na(data) ] <- 1
respind <- 1 - is.na(data)
sig0[ sig0 < 0] <- 0
n <- nrow(y)
# predicted values from lalent regression
M.Regr <- ( X %*% beta0 )[,1]
if (length(sig0) > 1){ SD.Regr <- sig0 } else { SD.Regr <- rep( sig0, n ) }
# matrix of theta values
thetaM <- outer( rep(1,n), theta.list )
# compute density resulting from regression
dens.Regr <- stats::dnorm( thetaM, mean=M.Regr, sd=SD.Regr )
# conditional distribution of item responses
dens.Resp <- matrix( 0, nrow=n, ncol=ncol(dens.Regr) )
for (tt in seq(1, length(theta.list)) ){
ptt <- outer( rep(1, n), c + (1-c)*stats::plogis( a * ( theta.list[tt] - b ) ) )
dens.Resp[,tt] <- exp( rowSums( respind *y * log( ptt) + respind*(1-y) *
log( 1-ptt) ) )
}
dens.total <- dens.Resp * dens.Regr
dens.total <- dens.total / rowSums( dens.total)
theta.listM <- outer( rep(1,n), theta.list )
# mean of individual posterior distribution
EAP <- rowSums( theta.listM * dens.total )
# SD of posterior distribution
SD.Post <- sqrt( rowSums( theta.listM^2 * dens.total ) - EAP^2 )
# one draw of plausible values
if (pvdraw==FALSE ){ pvdraw <- NULL } else {
pvdraw <- matrix( stats::rnorm( n*pvdraw, mean=rep(EAP,each=pvdraw),
sd=rep(SD.Post,each=pvdraw) ), ncol=pvdraw, byrow=TRUE )
}
# results
res <- list( posterior.density=dens.total, EAP=EAP, SE.EAP=SD.Post,
plausible.value=pvdraw, M.Regr=M.Regr, SD.Regr=SD.Regr )
return(res)
}
|
2d750999d1a461a331e6032328a7e48ab94397f8
|
aa5daf106a59917da72a67aafe2901f2cc7882db
|
/scripts/5-transfer-to-s3.R
|
3ad7df99d939abac05526ff53362c2647d304ec3
|
[] |
no_license
|
brittany-durkin/covid-neighborhood-job-analysis
|
752574dd4e705456bd4c5132109b84a926fb84dc
|
b9dd0bdaa552c3a11147b8ab0197c69c385db903
|
refs/heads/master
| 2022-04-22T02:20:01.799767
| 2020-04-18T14:58:24
| 2020-04-18T14:58:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,739
|
r
|
5-transfer-to-s3.R
|
# Transfer necesssary files from local computer to S3
library(aws.s3)
library(tidyverse)
#----AWS Setup--------------------------------------
# read in AWS secret keys
secret_keys <- read_csv("data/raw-data/small/secret_keys.csv")
# set keys
key <- secret_keys$`Access key ID`
secret_key <- secret_keys$`Secret access key`
# set bucket name
my_bucket_name <- "ui-lodes-job-change-public"
Sys.setenv("AWS_ACCESS_KEY_ID" = key,
"AWS_SECRET_ACCESS_KEY" = secret_key,
"AWS_DEFAULT_REGION" = "us-east-1")
s3_filepath <- "data/processed-data/s3_final/"
#----Transfer Files----------------------------------
# put tract job loss file in root directory
put_object(file = paste0(s3_filepath, "job_loss_by_tract.geojson"),
object = "job_loss_by_tract.geojson",
bucket = my_bucket_name,
multipart = F)
# put geojson file in root directory
put_object(file = paste0(s3_filepath, "no_cbsa_tracts.geojson"),
object = "no_cbsa_tracts.geojson",
bucket = my_bucket_name,
multipart = F)
# # put cbsa csv in bucket directory
# put_object( paste0(s3_filepath, "cbsa_job_loss.csv"),
# "cbsa_job_loss.csv",
# my_bucket_name)
#
# # put county csv in bucket directory
# put_object(paste0(s3_filepath, "county_job_loss.csv"),
# "county_job_loss.csv",
# my_bucket_name)
# put state geojson in root directory
put_object(paste0("data/raw-data/big/states.geojson"),
"states.geojson",
my_bucket_name)
#
# # put cbsa geojson in root directory
# put_object(paste0(s3_filepath, "cbsas.geojson"),
# "cbsas.geojson",
# my_bucket_name)
#
# # put county geojson in bucket directory
# put_object(paste0(s3_filepath, "counties.geojson"),
# "counties.geojson",
# my_bucket_name,
# multipart = T)
# put sum county summaries geojson in root directory
put_object(paste0(s3_filepath, "sum_job_loss_county.geojson"),
"sum_job_loss_county.geojson",
my_bucket_name)
# put sum cbsa summaries geojson in root directory
put_object(paste0(s3_filepath, "sum_job_loss_cbsa.geojson"),
"sum_job_loss_cbsa.geojson",
my_bucket_name)
# put sum county summaries csv in root directory
put_object(paste0(s3_filepath, "sum_job_loss_county.csv"),
"sum_job_loss_county.csv",
my_bucket_name)
# put sum cbsa summaries csv in root directory
put_object(paste0(s3_filepath, "sum_job_loss_cbsa.csv"),
"sum_job_loss_cbsa.csv",
my_bucket_name)
# put sum USA summaries csv in root directory
put_object(file = paste0(s3_filepath, "sum_job_loss_us.csv"),
"sum_job_loss_us.csv",
my_bucket_name)
# list files in county directory
county_files <- list.files(paste0(s3_filepath, "county"))
# put all files in county directory on s3
county_files %>%
walk(~put_object(file = paste0(s3_filepath, "county/", .),
object = paste0("county/", .),
bucket = my_bucket_name))
# list files in cbsa directory
cbsa_files <- list.files(paste0(s3_filepath, "cbsa"))
# put all files in cbsa directory on s3
cbsa_files %>%
walk(~put_object(file = paste0(s3_filepath, "cbsa/", .),
object = paste0("cbsa/", .),
bucket = my_bucket_name))
# put lehd_types.csv on s3. This is transalation list beween
# geojson industry codes and human readable industry names
put_object("data/raw-data/small/lehd_types_s3.csv",
"lehd_types_s3.csv",
my_bucket_name)
put_object(file = paste0(s3_filepath, "sum_job_loss_us.csv"),
"sum_job_loss_us.csv",
my_bucket_name)
|
3248b769ab8c60ff21a8434dae9a4f687624d98d
|
c0222d0bd4a9815fa66bf57bb6d817197c0d7558
|
/Scripts/plots_for_prez.R
|
ad8c2f4b493f2d6ad667e9f801306dc83f3842d5
|
[
"MIT"
] |
permissive
|
achafetz/AIL
|
6d2df590ab33b6a8cc92472acc55fabf81086a46
|
988b907f1ac6adf0f9f7779efe81fb53fff052dc
|
refs/heads/main
| 2023-04-14T18:29:44.988692
| 2021-04-28T01:17:47
| 2021-04-28T01:17:47
| 360,957,412
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,066
|
r
|
plots_for_prez.R
|
# PROJECT: AIL
# AUTHOR: A.Chafetz | USAID
# PURPOSE: charts for Sheperd Center Talk
# LICENSE: MIT
# DATE: 2021-04-23
# UPDATED: 2021-04-27
# DEPENDENCIES ------------------------------------------------------------
library(tidyverse)
library(glitr)
library(glamr)
library(extrafont)
library(scales)
library(tidytext)
library(patchwork)
library(ggtext)
library(glue)
library(janitor)
library(readxl)
library(tidytext)
library(sf)
library(rnaturalearth)
library(gisr)
# GLOBAL VARIABLES --------------------------------------------------------
# CUMULATIVE COVID CASES --------------------------------------------------
#source: https://ourworldindata.org/grapher/cumulative-covid-cases-region
df_covid_cases <- read_csv("Data/cumulative-covid-cases-region.csv") %>%
clean_names()
df_covid_cases_glob <- df_covid_cases %>%
filter(entity == "World")
date <- max(df_covid_cases_glob$day)
df_covid_cases_glob %>%
ggplot(aes(day, total_confirmed_cases_of_covid_19)) +
geom_area(fill = "#1B68B3") +
geom_text(data = df_covid_cases_glob %>% filter(day == max(day)),
aes(label = number(total_confirmed_cases_of_covid_19,
accuracy = .1,
suffix = "m",
scale = 1/1000000)),
hjust = .6,
family = "Source Sans Pro SemiBold", color = "white") +
scale_y_continuous(label = number_format(scale = 1/1000000,
suffix = "m"),
position = "right") +
scale_x_date(date_breaks = "2 month",
date_labels = "%b %y") +
labs(x = NULL, y = "cumulative COVID cases",
caption = glue("as of {date}
Source: Our World in Data")) +
si_style() +
theme(panel.grid.major.x = element_line(color = "white"),
panel.grid.major.y = element_line(color = "white"),
axis.text.x = element_text(color = "#1B68B3"),
axis.text.y = element_text(color = "#1B68B3"),
axis.title.y = element_text(color = "#1B68B3"),
plot.caption = element_text(color = "white"),
plot.background = element_rect(fill = "#A8C5E7", color = "#A8C5E7"))
si_save("Images/covid_global.png",
width = 8.6, height = 4.84)
# COVID DEATHS ------------------------------------------------------------
#source: https://ourworldindata.org/grapher/cumulative-covid-deaths-region
df_covid_deaths <- read_csv("Data/cumulative-covid-deaths-region.csv") %>%
clean_names()
df_covid_deaths_glob <- df_covid_deaths %>%
filter(entity == "World")
date <- max(df_covid_deaths_glob$day)
df_covid_deaths_glob %>%
ggplot(aes(day, total_confirmed_deaths_due_to_covid_19)) +
geom_area(fill = "#1B68B3") +
geom_text(data = df_covid_deaths_glob %>% filter(day == max(day)),
aes(label = number(total_confirmed_deaths_due_to_covid_19,
accuracy = .1,
suffix = "m",
scale = 1/1000000)),
hjust = .6,
family = "Source Sans Pro SemiBold", color = "white") +
scale_y_continuous(label = number_format(scale = 1/1000000,
suffix = "m"),
position = "right") +
scale_x_date(date_breaks = "2 month",
date_labels = "%b %y") +
labs(x = NULL, y = "cumulative COVID deaths",
caption = glue("as of {date}
Source: Our World in Data")) +
si_style() +
theme(panel.grid.major.x = element_line(color = "white"),
panel.grid.major.y = element_line(color = "white"),
axis.text.x = element_text(color = "#1B68B3"),
axis.text.y = element_text(color = "#1B68B3"),
axis.title.y = element_text(color = "#1B68B3"),
plot.caption = element_text(color = "white"),
plot.background = element_rect(fill = "#A8C5E7", color = "#A8C5E7"))
si_save("Images/covid_deaths_global.png",
width = 8.6, height = 4.84)
# PLHIV -------------------------------------------------------------------
#source: https://ourworldindata.org/hiv-aids#
df_plhiv <- read_csv("Data/number-of-people-living-with-hiv.csv") %>%
clean_names()
df_plhiv_glob <- df_plhiv %>%
filter(entity == "World")
year <- max(df_plhiv_glob$year)
df_plhiv_glob %>%
ggplot(aes(year, prevalence_hiv_aids_sex_both_age_all_ages_number)) +
geom_area(fill = "#1B68B3") +
geom_text(data = df_plhiv_glob %>% filter(year == max(year)),
aes(label = number(prevalence_hiv_aids_sex_both_age_all_ages_number,
accuracy = .1,
suffix = "m",
scale = 1/1000000)),
hjust = .6,
family = "Source Sans Pro SemiBold", color = "white") +
scale_y_continuous(label = number_format(scale = 1/1000000,
suffix = "m"),
position = "right") +
scale_x_continuous(breaks = seq(1990, 2017, 5)) +
labs(x = NULL, y = "People living with HIV",
caption = glue("through {year}
Source: Our World in Data")) +
si_style() +
theme(panel.grid.major.x = element_line(color = "white"),
panel.grid.major.y = element_line(color = "white"),
axis.text.x = element_text(color = "#1B68B3"),
axis.text.y = element_text(color = "#1B68B3"),
axis.title.y = element_text(color = "#1B68B3"),
plot.caption = element_text(color = "white"),
plot.background = element_rect(fill = "#A8C5E7", color = "#A8C5E7"))
si_save("Images/plhiv_global.png",
width = 8.6, height = 4.84)
# NEW HIV INFECTIONS ------------------------------------------------------
df_hiv_new <- read_csv("Data/new-cases-of-hiv-infection.csv") %>%
clean_names()
df_hiv_new_glob <- df_hiv_new %>%
filter(entity == "World")
year <- max(df_hiv_new_glob$year)
df_hiv_new_glob %>%
ggplot(aes(year, incidence_hiv_aids_sex_both_age_all_ages_number)) +
geom_area(fill = "#1B68B3") +
geom_text(data = df_hiv_new_glob %>% filter(year == max(year)),
aes(label = number(incidence_hiv_aids_sex_both_age_all_ages_number,
accuracy = .1,
suffix = "m",
scale = 1/1000000)),
hjust = .2, vjust = .3,
family = "Source Sans Pro SemiBold", color = "white") +
scale_y_continuous(label = number_format(scale = 1/1000000,
accuracy = 1,
suffix = "m"),
position = "right") +
scale_x_continuous(breaks = seq(1990, 2017, 5)) +
labs(x = NULL, y = "New HIV Infections",
caption = glue("through {year}
Source: Our World in Data")) +
si_style() +
theme(panel.grid.major.x = element_line(color = "white"),
panel.grid.major.y = element_line(color = "white"),
axis.text.x = element_text(color = "#1B68B3"),
axis.text.y = element_text(color = "#1B68B3"),
axis.title.y = element_text(color = "#1B68B3"),
plot.caption = element_text(color = "white"),
plot.background = element_rect(fill = "#A8C5E7", color = "#A8C5E7"))
si_save("Images/hiv_new_global.png",
width = 8.6, height = 4.84)
# HIV DEATHS --------------------------------------------------------------
#source: https://ourworldindata.org/hiv-aids#
df_hiv_deaths <- read_csv("Data/deaths-from-aids-ihme.csv") %>%
clean_names()
df_hiv_deaths_glob <- df_hiv_deaths %>%
filter(entity == "World")
year <- max(df_hiv_deaths_glob$year)
df_hiv_deaths_glob %>%
ggplot(aes(year, deaths_hiv_aids_sex_both_age_all_ages_number)) +
geom_area(fill = "#1B68B3") +
geom_text(data = df_hiv_deaths_glob %>% filter(year == max(year)),
aes(label = number(deaths_hiv_aids_sex_both_age_all_ages_number,
accuracy = .1,
suffix = "m",
scale = 1/1000000)),
hjust = .6,
family = "Source Sans Pro SemiBold", color = "white") +
scale_y_continuous(label = number_format(scale = 1/1000000,
accuracy = .1,
suffix = "m"),
position = "right") +
scale_x_continuous(breaks = seq(1990, 2017, 5)) +
labs(x = NULL, y = "AIDS Deaths",
caption = glue("through {year}
Source: Our World in Data")) +
si_style() +
theme(panel.grid.major.x = element_line(color = "white"),
panel.grid.major.y = element_line(color = "white"),
axis.text.x = element_text(color = "#1B68B3"),
axis.text.y = element_text(color = "#1B68B3"),
axis.title.y = element_text(color = "#1B68B3"),
plot.caption = element_text(color = "white"),
plot.background = element_rect(fill = "#A8C5E7", color = "#A8C5E7"))
si_save("Images/aids_deaths.png",
width = 8.6, height = 4.84)
df_hiv_deaths_glob %>%
mutate(cumsum = cumsum(deaths_hiv_aids_sex_both_age_all_ages_number)) %>%
ggplot(aes(year, cumsum)) +
geom_area(fill = "#1B68B3") +
geom_text(data = df_hiv_deaths_glob %>%
mutate(cumsum = cumsum(deaths_hiv_aids_sex_both_age_all_ages_number)) %>%
filter(year == max(year)),
aes(label = number(cumsum,
accuracy = .1,
suffix = "m",
scale = 1/1000000)),
hjust = .6,
family = "Source Sans Pro SemiBold", color = "white") +
scale_y_continuous(label = number_format(scale = 1/1000000,
suffix = "m"),
position = "right") +
scale_x_continuous(breaks = seq(1990, 2017, 5)) +
labs(x = NULL, y = "AIDS Deaths",
caption = glue("through {year}
Source: Our World in Data")) +
si_style() +
theme(panel.grid.major.x = element_line(color = "white"),
panel.grid.major.y = element_line(color = "white"),
axis.text.x = element_text(color = "#1B68B3"),
axis.text.y = element_text(color = "#1B68B3"),
axis.title.y = element_text(color = "#1B68B3"),
plot.caption = element_text(color = "white"),
plot.background = element_rect(fill = "#A8C5E7", color = "#A8C5E7"))
si_save("Images/aids_cum_deaths.png",
width = 8.6, height = 4.84)
# COVID CASES - SELECT COUNTRIES ------------------------------------------
#source: https://ourworldindata.org/explorers/coronavirus-data-explorer?yScale=log&zoomToSelection=true&time=2020-10-16..latest&pickerSort=desc&pickerMetric=total_cases&hideControls=true&Metric=Confirmed+cases&Interval=7-day+rolling+average&Relative+to+Population=false&Align+outbreaks=false&country=USA~IND~ZAF
df_covid_ctry <- read_csv("Data/owid-covid-data.csv") %>%
clean_names()
df_covid_ctry_sel <- df_covid_ctry %>%
filter(iso_code %in% c("USA","IND", "ZAF"))
date <- max(df_covid_ctry_sel$date)
df_covid_ctry_sel %>%
filter(date < "2020-05-01",
iso_code %in% c("USA","ZAF")) %>%
ggplot(aes(date, new_cases_smoothed_per_million, group = location)) +
geom_path(color = "#1B68B3", size =1.12) +
facet_wrap(~location) +
scale_x_date(date_labels = "%b %y") +
scale_y_continuous(position = "right") +
si_style() +
labs(x = NULL, y = "new COVID cases (per million)",
caption = glue("through {date}
Source: Our World in Data")) +
si_style() +
theme(panel.grid.major.x = element_line(color = "white"),
panel.grid.major.y = element_line(color = "white"),
axis.text.x = element_text(color = "#1B68B3"),
axis.text.y = element_text(color = "#1B68B3"),
axis.title.y = element_text(color = "#1B68B3"),
strip.text = element_text(color = "#1B68B3"),
plot.caption = element_text(color = "white"),
plot.background = element_rect(fill = "#A8C5E7", color = "#A8C5E7"))
si_save("Images/us_zaf_covid.png",
width = 8.6, height = 4.84)
df_covid_ctry_sel %>%
filter(iso_code %in% c("USA","ZAF")) %>%
ggplot(aes(date, new_cases_smoothed_per_million, group = location)) +
geom_path(color = "#1B68B3", size =1.12) +
facet_wrap(~location) +
scale_x_date(date_labels = "%b %y") +
scale_y_continuous(position = "right") +
si_style() +
labs(x = NULL, y = "new COVID cases (per million)",
caption = glue("through {date}
Source: Our World in Data")) +
si_style() +
theme(panel.grid.major.x = element_line(color = "white"),
panel.grid.major.y = element_line(color = "white"),
axis.text.x = element_text(color = "#1B68B3"),
axis.text.y = element_text(color = "#1B68B3"),
axis.title.y = element_text(color = "#1B68B3"),
strip.text = element_text(color = "#1B68B3"),
plot.caption = element_text(color = "white"),
plot.background = element_rect(fill = "#A8C5E7", color = "#A8C5E7"))
si_save("Images/us_zaf_covid_full.png",
width = 8.6, height = 4.84)
# LEADING CAUSE OF DEATH --------------------------------------------------
#source:https://www.who.int/data/gho/data/themes/mortality-and-global-health-estimates/ghe-leading-causes-of-death
path <- "Data/GHE2019_COD_WBIncome_2000_201933383745-a750-4d94-8491-fb209dcece6f.xlsx"
year <- 2019
read_who <- function(filepath, year, group = "LI"){
df <- read_excel(filepath,
sheet = glue("{year} {group}"),
skip =12,
col_names = FALSE)
df <- df %>%
select(category = ...2,
category_name = ...3,
cod_maj = ...4,
cod_maj_name = ...5,
cod = ...6,
deaths = ...7) %>%
mutate(year = {year},
income_group = {group},
cod = ifelse(is.na(cod), cod_maj_name, cod),
cod = ifelse(cod_maj == "Neonatal conditions", cod_maj, cod),
category = case_when(!is.na(category) ~ word(category_name) %>% str_remove(",")),
cod_maj = case_when(!is.na(cod_maj) ~ cod_maj_name,
cod == "Neonatal conditions" ~ cod),
) %>%
select(-ends_with("name")) %>%
fill(category, cod_maj) %>%
filter(!is.na(cod),
!cod %in% c("Preterm birth complications",
"Birth asphyxia and birth trauma",
"Neonatal sepsis and infections",
"Other neonatal conditions"))
return(df)
}
df_cod <- map_dfr(c(2000, 2019),
~read_who(path, .x))
df_cod_rank <- df_cod %>%
group_by(year) %>%
mutate(rank = min_rank(desc(deaths))) %>%
ungroup() %>%
arrange(year, rank)
df_cod_rank %>%
filter(rank <=10) %>%
mutate(color = ifelse(cod == "HIV/AIDS", "#1B68B3", "#ffffff"),
cod_formatted = glue("<span style='color:{color}'>{rank}\\. {cod}</span>")) %>%
ggplot(aes(deaths, reorder_within(cod_formatted, deaths, year), color = color)) +
geom_segment(aes(x = 0, xend = deaths, yend = reorder_within(cod_formatted, deaths, year))) +
geom_point(size = 4) +
facet_wrap(~year, scales = "free_y") +
scale_y_reordered() +
scale_color_identity() +
scale_x_continuous(label = number_format(scale = 1/1000,
accuracy = 1,
suffix = "k"), expand = c(.05, .5)) +
labs(x = NULL, y = NULL,
title = "SIGNIFICANT DECLINE IN HIV/AIDS DEATHS SINCE 2000",
subtitle = "leading cause of deaths in Low Income Countries",
caption = glue("Source: WHO Estimated deaths by cause and region")) +
si_style() +
theme(strip.text = element_text(family = "Source Sans Pro SemiBold", color = "#1B68B3"),
axis.text.y = element_markdown(),
panel.grid.major.x = element_line(color = "gray90"),
panel.grid.major.y = element_blank(),
axis.text.x = element_text(color = "#1B68B3"),
axis.title.y = element_text(color = "#1B68B3"),
plot.title = element_text(color = "#1B68B3"),
plot.subtitle = element_text(color = "#1B68B3"),
plot.caption = element_text(color = "white"),
plot.background = element_rect(fill = "#A8C5E7", color = "#A8C5E7"))
si_save("Images/leading_cod.png",
width = 8.6, height = 4.84)
# USAID MAP ---------------------------------------------------------------
spdf <- ne_countries(type = "sovereignty",
scale = 110,
returnclass = "sf") %>%
select(sovereignt, admin, name, adm0_a3) %>%
filter(admin != "Antarctica") %>% # Remove Antarctica
clean_countries(colname = "admin")
## Raw data
df_ou <- si_path() %>%
return_latest("OU_IM") %>%
read_rds()
# Join MSD to spdf
spdf_ou <- spdf %>%
left_join(df_ou %>%
filter(fundingagency == "USAID",
fiscal_year == 2021,
!is.na(targets)) %>%
distinct(operatingunit, countrynamename),
by = c("admin" = "countrynamename")) %>%
filter(!is.na(operatingunit))
## VIZ ---------------------------------------------------------
#source: https://github.com/USAID-OHA-SI/lastmile/blob/master/Scripts/99_FY20Q4_USAID_PEPFAR_Countries.R
## Global Maps
ggplot() +
geom_sf(data = spdf, fill = NA, color = "white", size = .4) +
geom_sf(data = spdf_ou,
fill = "#1B68B3",
color = "white",
size = .2) +
labs(
caption = "USAID - Office of HIV/AIDS - Programs Overview as of FY 2021"
) +
si_style_map() +
theme(
legend.direction = "horizontal",
legend.position = "bottom",
legend.title = element_blank(),
legend.key.width = unit(1.5, "cm"),
plot.title = element_text(face = "bold"),
plot.caption = element_text(color = "white"),
plot.background = element_rect(fill = "#A8C5E7", color = "#A8C5E7")
)
si_save("Images/usaid_pepfar_map.png",
scale = 1.2, dpi = 310,
width = 10, height = 7)
|
372112bc469c9be4f6132b6a08599fd4294d4a30
|
29de3b9256ca8eb59b74203c7392d29939f161b5
|
/ui.r
|
027aebb2e6d267e8a4eb7ebad96625fdc78302af
|
[] |
no_license
|
mratliff/iris
|
24db14d133e60dc4d9dc68c9f0627206234988ff
|
d078b8aaa0106e1c4c85e302bd951729e8404bda
|
refs/heads/master
| 2020-12-30T14:56:03.842708
| 2015-01-08T20:55:13
| 2015-01-08T20:55:13
| 28,984,161
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 796
|
r
|
ui.r
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Predict me an Iris!"),
sidebarPanel(
h3('Input Values'),
numericInput('slength', 'Sepal Length (4.3 to 7.9)', 5.8, min = 4.3, max = 7.9, step = .1),
numericInput('swidth', 'Sepal Width (2 to 4.4)', 3, min = 2, max = 4.4, step = .1),
numericInput('plength', 'Petal Length (1 to 6.9)', 4, min = 1, max = 6.9, step = .1),
numericInput('pwidth', 'Petal Width (.1 to 2.5)', 2, min = .1, max = 2.5, step = .1),
h2('-----------------------------'),
h3('Select any values in range above to get a prediction of the kind of iris'),
h2('-----------------------------'),
h3('Model is trained using pls classification algorithm')
),
mainPanel(
h3('Prediction'),
verbatimTextOutput("prediction")
)
))
|
3c19848f40a299a342618e4ff9a3f9d24901fb02
|
1d7fb86444c74f5dcf6f0ff19b0c5b986f2b38eb
|
/R/test_run_as_long_as.R
|
0e22d0a48f3fab2485eb53117ada666b451fef9c
|
[] |
no_license
|
benjaminguinaudeau/hideR
|
da303a7be3139efa8cccf7c6ea5934e813d4d0cd
|
39de37f596f828a9fd84b38cc562e0eb2e903d6e
|
refs/heads/master
| 2020-05-14T18:33:32.105765
| 2019-12-19T15:24:03
| 2019-12-19T15:24:03
| 181,911,260
| 5
| 2
| null | 2019-12-19T15:08:44
| 2019-04-17T14:42:38
|
R
|
UTF-8
|
R
| false
| false
| 215
|
r
|
test_run_as_long_as.R
|
#' second_modulo
#' @export
second_modulo <- function(x) round(lubridate::second(Sys.time())) %% x
#' expr
#' @export
expr <- rlang::expr(map_lgl(1:3, ~{cat(".") ; Sys.sleep(1) ; return(second_modulo(5) == 0)}))
|
552758bb0e6288a4dc4c261724d2b51ff88b6269
|
f5f88607d563579112b0b07503898e139e8f40be
|
/textanalysis/src/plot_generator.R
|
b4b2b3629fb4638548fc3ba2ac18ca468dab1a9e
|
[] |
no_license
|
Costax/text_analysis
|
9bc0f765e28337656a83d0de31333c310c10f8a6
|
5f98f9c81b59bd844169797ec859b0ff8963e0de
|
refs/heads/master
| 2021-01-22T06:27:55.295159
| 2017-05-26T23:37:50
| 2017-05-26T23:37:50
| 92,555,843
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,168
|
r
|
plot_generator.R
|
#
# Author: Adan Hirales Carbajal
# Email: adan.hirales@cetys.mx
#
plot_generator <- function( freq = 10 ) {
# Get command line arguments
args = commandArgs(trailingOnly=TRUE)
# Parse arguments (basename & dirname)
inFile <- args[1]
path <- args[2]
outFile <- basename(inFile)
outFile <- strsplit(outFile, "\\.")[[1]]
outFile <- paste(outFile[1],"png",sep=".")
# Load stopwords and text
textCorpus <- readLines(inFile)
# Apply filters
textCorpus <- Corpus(VectorSource(textCorpus))
textCorpus <- tm_map(textCorpus,stripWhitespace)
textCorpus <- tm_map(textCorpus,removePunctuation)
textCorpus <- tm_map(textCorpus,content_transformer(tolower))
#textCorpus <- tm_map(textCorpus,removeWords,stopWords[[1]])
textCorpusMtz <-DocumentTermMatrix(textCorpus)
# Initiate redering process
cwd <- getwd()
setwd(path)
png(outFile, width=12, height=8, units="in", res=300)
wordcloud(textCorpus, min.freq = freq, random.order = FALSE, colors=brewer.pal(8, "Dark2"))
dev.off()
setwd(cwd)
}
library(tm, quietly = TRUE)
library(wordcloud, quietly = TRUE)
plot_generator(50)
|
4e3d2462bda0efcb1d282ebac47b004d4fd29f5c
|
c5faa9a2e350978662624f73725eb7ee02c55cb0
|
/man/RGData.Rd
|
6244d2f5cd9ec2aa575deaf29b4cd11dbea3d79c
|
[] |
no_license
|
HenrikBengtsson/aroma
|
341cc51ddd8f9c111347207535bfe2a85ea7622a
|
c0314ea003fb1d99d0db7f314e86059502d175c6
|
refs/heads/master
| 2016-09-05T18:24:56.275671
| 2014-06-19T04:13:11
| 2014-06-19T04:13:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,230
|
rd
|
RGData.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% RGData.R
%
% on Tue Jan 15 18:36:16 2008.
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{RGData}
\docType{class}
\alias{RGData}
\title{The RGData class}
\description{
Package: aroma \cr
\bold{Class RGData}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[aroma]{MicroarrayData}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{RGData}\cr
\bold{Directly known subclasses:}\cr
\cr
public static class \bold{RGData}\cr
extends \link[aroma]{MicroarrayData}\cr
Creates a new \code{RGData} object.
The philosophy behind this data structure is to think about the data in the form of the signals in one channel (R) versus the signals in the other channel (G).
This is in contrast to the idea of the \code{\link{MAData}} structure, which thinks about the data as the log ratios (M) and log intensites (A) for the spot signals.
}
\usage{RGData(R=NULL, G=NULL, layout=NULL, extras=list())}
\arguments{
\item{R,G}{A NxM \code{\link[base]{matrix}} containing (non-logged) signals of the red
(green) channel, where N is the number of spots on each slide and M
is the number of slides in this data set.}
\item{layout}{A \code{\link{Layout}} object specifying the spot layout of the
slides in this data set.}
\item{extras}{Private argument. Do not use.}
}
\section{Fields and Methods}{
\bold{Fields}
\tabular{rll}{
\tab \code{R} \tab The signal for channel R (non-logged). \cr
\tab \code{G} \tab The signal for channel G (non-logged). \cr
}
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{as.character} \tab -\cr
\tab \code{\link[aroma:as.MAData.RGData]{as.MAData}} \tab Transform from the red and green intensities into log ratios between them and the log product of them.\cr
\tab \code{as.RawData} \tab -\cr
\tab \code{as} \tab -\cr
\tab \code{as.RGData} \tab -\cr
\tab \code{boxplot} \tab -\cr
\tab \code{\link[aroma:calibrateMultiscan.RGData]{calibrateMultiscan}} \tab Calibrates multiple re-scanned images based on an affine model.\cr
\tab \code{getCalibratedMultiscan} \tab -\cr
\tab \code{getChannelNames} \tab -\cr
\tab \code{\link[aroma:getColors.RGData]{getColors}} \tab Generates red to green colors for each of the specified spots.\cr
\tab \code{\link[aroma:getLogIntensities.RGData]{getLogIntensities}} \tab Calculates the log-intensitites (A values).\cr
\tab \code{\link[aroma:getLogRatios.RGData]{getLogRatios}} \tab Calculates the log-ratios (M values).\cr
\tab \code{getWithinChannelPairs} \tab -\cr
\tab \code{\link[aroma:mean.RGData]{mean}} \tab Genewise Average Mean for channel R and G.\cr
\tab \code{\link[aroma:normalizeAffine.RGData]{normalizeAffine}} \tab Weighted affine normalization between channels and arrays.\cr
\tab \code{\link[aroma:normalizeCurveFit.RGData]{normalizeCurveFit}} \tab Within-slide intensity-dependent normalization in (A,M).\cr
\tab \code{normalizeGenewise} \tab -\cr
\tab \code{normalizeLoess} \tab -\cr
\tab \code{normalizeLowess} \tab -\cr
\tab \code{normalizeQuantile} \tab -\cr
\tab \code{normalizeRobustSpline} \tab -\cr
\tab \code{normalizeSpline} \tab -\cr
\tab \code{plot} \tab -\cr
\tab \code{plotSpatial} \tab -\cr
\tab \code{plotXY} \tab -\cr
\tab \code{range} \tab -\cr
\tab \code{\link[aroma:shift.RGData]{shift}} \tab Shift the log-ratios, log-intensities or the raw signal.\cr
\tab \code{\link[aroma:swapDyes.RGData]{swapDyes}} \tab Swap dyes of one or many slides.\cr
\tab \code{\link[aroma:var.RGData]{var}} \tab Genewise Variance for channel R and G.\cr
}
\bold{Methods inherited from MicroarrayData}:\cr
addFlag, append, applyGenewise, applyGroupwise, applyPlatewise, applyPrintdipwise, applyPrinttipwise, as.character, as.data.frame, boxplot, clearCache, clearFlag, createColors, dataFrameToList, equals, extract, getBlank, getCache, getChannelNames, getColors, getExcludedSpots, getExtra, getExtreme, getFieldNames, getFlag, getInclude, getLabel, getLayout, getProbeWeights, getSignalWeights, getSlideNames, getSlidePairs, getSpotPosition, getSpotValue, getTreatments, getView, getWeights, getWeightsAsString, hasExcludedSpots, hasLayout, hasProbeWeights, hasSignalWeights, hasWeights, highlight, hist, isFieldColorable, keepSlides, keepSpots, listFlags, lowessCurve, nbrOfDataPoints, nbrOfFields, nbrOfSlides, nbrOfSpots, nbrOfTreatments, normalizePlatewise, normalizePrintorder, normalizeQuantile, plot, plotDensity, plotGene, plotPrintorder, plotReplicates, plotSpatial, plotSpatial3d, plotXY, points, putGene, putSlide, qqnorm, quantile, range, range2, read, readHeader, readToList, removeSlides, removeSpots, resetProbeWeights, resetSignalWeights, select, seq, setCache, setExcludedSpots, setExtra, setFlag, setLabel, setLayout, setProbeWeights, setSignalWeights, setSlideNames, setTreatments, setView, setWeights, size, str, subplots, summary, text, updateHeader, validateArgumentChannel, validateArgumentChannels, validateArgumentGroupBy, validateArgumentSlide, validateArgumentSlides, validateArgumentSpotIndex, validateArgumentWeights, write, writeHeader
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clone, detach, equals, extend, finalize, gc, getEnvironment, getFields, getInstanciationTime, getStaticInstance, hasField, hashCode, ll, load, objectSize, print, save
}
\details{
The mapping between M and A, and R and G is a one-to-one function.
Given the signal R and G for the R and the G channels you get the
M and the A values by:
\deqn{
M = \log_2\frac{R}{G},\quad
A = \log_2\sqrt{R{\cdot}G} = \frac{1}{2}\log_2 R{\cdot}G,
}{
M = log2(R/G), A = log2(sqrt(R*G)) = 1/2*log2(R*G),
}
which in [R] can be done by \code{ma <- as.MAData(rg)}. The reverse function, i.e. going back to the R and the G is:
\deqn{
R = \sqrt{2^{2A+M}},\quad G = \sqrt{2^{2A-M}}
}{
R = sqrt(2^(2A+M)), G = sqrt(2^(2A-M))
}
which in [R] can be done by \code{rg <- as.RGData(rg)}.
Note that if the signal in one or both channels is non-positive,
the log-transform will make these values undefined, that is, set
them to \code{\link[base]{NA}}. When going back to (G,R) from (A,M) these values
will remain \code{\link[base]{NA}}.
}
\note{
There are several functions that returns an object of this class, and
it is only in very special cases that you actually have to create one
yourself.
}
\author{Henrik Bengtsson (\url{http://www.braju.com/R/})}
\examples{
# Create a raw data object from the preexisting example data in
# the sma package.
SMA$loadData("mouse.data")
layout <- Layout$read("MouseArray.Layout.dat", path=system.file("data-ex", package="aroma"))
raw <- RawData(mouse.data, layout=layout)
# Get the signal (here by default non-background corrected)
ma <- getSignal(raw)
# Transform (M,A) into (R,G)
rg <- as.RGData(ma)
}
\keyword{classes}
|
220ae470562ec1209d90e984e2626a45457c069d
|
0b30a3f1d338e8e10c9192aa8b1709d5174aaa16
|
/data/data_carpentry/generate-tidy-adaptation-data.R
|
4f3a1228e4ff4146201d1e99b953de85289f6f5f
|
[] |
no_license
|
mikoontz/ppp-adaptation
|
a5495a3817bfdb3683b10da82fcfa1eb436701a5
|
0133fc8935c0054b5d5d1112f28dcae605a9d59d
|
refs/heads/master
| 2020-04-05T00:51:04.578014
| 2018-11-06T16:59:40
| 2018-11-06T16:59:40
| 156,412,890
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,782
|
r
|
generate-tidy-adaptation-data.R
|
# Title: generate tidy adaptation data
#
# Author: Michael Koontz
# Email: mikoontz@gmail.com
#
# Date Created: 20150411
# Last Updated: 20150414
# This function takes the entered Tribolium flour beetle data from the "Eco-evolutionary consequences of multiple introductions" adaptation experiment (which is in long form), and puts it in a 2 dimensional form to more easily represent the time series. Each population is defined by a unique ID, which is included in each of the produced dataframes here. This makes for easy merging (using ID as a key) with the 'adaptation attributes.csv' file (which includes block number, treatment types, the degree of expected heterozygosity lost in generation 8 and 9, etc.)
# Requires the tidyr package for reshaping the data.
# Input is the long-form entered data in a dataframe object type.
# Returns a list of dataframes representing the different values that are unique to each ID/Generation combination.
# Further manipulations on these data frames are possible using other functions.
# Load tidyr library
if (!require("tidyr"))
{install.packages("tidyr"); library(tidyr)}
tidy.adapt <- function(adapt)
{
#----------
# N[t+1] dataframe
# ---------
head(adapt)
# Subset to relevant parts
a <- subset(adapt, select=c(ID, Generation, Census))
# Spread it
Ntp1 <- spread(a, Generation, Census)
# Check it
head(Ntp1)
tail(Ntp1)
#----------
# N[t] dataframe
# ---------
a <- subset(adapt, select=c(ID, Generation, N0))
Nt <- spread(a, Generation, N0)
names(Nt) <- c("ID", paste(9:10))
head(Ntp1)
head(Nt)
tail(Nt)
#----------
# Census taker dataframe
# ---------
a <- subset(adapt, select=c(ID, Generation, Person))
person <- spread(a, Generation, Person)
head(person)
tail(person)
return(list(Nt=Nt, Ntp1=Ntp1, person=person))
}
|
77ec9ab686a71ff7bcaf4010f884c1acb3aaecaa
|
527160365d8a10036149ed196f5aa1ff04fe6f14
|
/models/increase_in_sqft.R
|
ad263f736e46c497babf66baca73c5d8256a3da9
|
[] |
no_license
|
SumedhSankhe/DS5110Project
|
4907c84510f5cb049f50343df9d28f7309a2139f
|
5c8a62ab454f49b2a157b3dc8d72c2f3a95707c8
|
refs/heads/master
| 2020-04-20T10:16:11.715101
| 2017-12-11T00:51:09
| 2017-12-11T00:51:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,365
|
r
|
increase_in_sqft.R
|
model.data <- read_csv("models/model_data.csv",
col_types = cols(Latitude = col_skip(),
Longitude = col_skip(),
STRUCTURE_CLASS = col_skip(),
X1 = col_skip(),
upid = col_skip()))
model.data%>%
group_by(regions)%>%
transmute(price_per_sqft14 = X2014/LIVING_AREA,
price_per_sqft15 = X2015/LIVING_AREA,
price_per_sqft16 = X2016/LIVING_AREA,
price_per_sqft17 = X2017/LIVING_AREA)%>%
na.omit()%>%
group_by(regions)%>%
summarise_all(mean)-> rise_per_sqft
rise_per_sqft%>%
melt()%>%
ggplot(aes(x = reorder(regions,value),
y = value,
fill = variable))+
geom_bar(stat = "identity",
position = position_dodge(width =0.8))+
coord_flip()+
labs(x = "Neighborhoods",
y = "Value per sqft in $",
fill = "",
title = "Assesment value per sqft per year")
x <- as.matrix(rise_per_sqft[2:4])
y <- rise_per_sqft$price_per_sqft17
lm_fit <- lm(y~x)
summary(lm_fit)
sqrt(mean(resid(lm_fit)^2))
mean(abs(resid(lm_fit)))
rise_per_sqft$projectedfor2018 <- predict(lm_fit,
newdata = rise_per_sqft[3:5])
write.csv(rise_per_sqft,"ProjectedRiseInSqft")
|
eea5d5c00f031044f2a273c614efaf76306b7b1f
|
52193a50c81771fb2186cc55e27a23d7e07cc9b8
|
/R/GetSongsArtistsID.R
|
615dc1d41cc2c9899097db1c86f2fb1fcd15b73f
|
[] |
no_license
|
epmrio/AutomatedGeniusR
|
7258c8a7f8864301e59f72656c44db07ab4df6f2
|
1607f4ab9fe2fdbafb135dc179fcf0d8eb20478b
|
refs/heads/master
| 2021-05-21T19:02:32.717739
| 2020-10-31T17:52:21
| 2020-10-31T17:52:21
| 252,762,970
| 0
| 0
| null | 2020-10-31T17:52:22
| 2020-04-03T14:56:02
|
R
|
UTF-8
|
R
| false
| false
| 3,688
|
r
|
GetSongsArtistsID.R
|
#' Scrap Artists and songs information from ID
#'
#' This function allow you to get the information aboute artists and songs from an artist ID from Genius API. You need to have package geniusr installed
#'
#' @param x A list of artists IDs
#'
#' @return A dataframe
#'
#' @examples
#'
#' \dontrun{
#'
#' ## Get a dataset from the following list :
#' ID_list <- c("ID-1", "ID-2")
#' # Use the Function to retrieve information
#' artists_info <- GetSongsArtistsID(ID_list)
#'
#' }
#'
#' @export
GetSongsArtistsID <- function(x) {
require(geniusr)
require(stringr)
len_id<-length(x)
songs_total<-as.data.frame(matrix(0,ncol = 7,nrow = 0))
colnames(songs_total)<-c("song_id","song_name","song_lyrics_url","song_annotation_count","artist_id","artist_name","artist_url")
# création du total des annotations, etc
id_annot_total<-as.data.frame(matrix(0,ncol = 3,nrow = 0))
colnames(id_annot_total)<-c("artist_id","annotation_count_artist","number_songs_by_artist")
for (id in x) {
print(paste0("Récupération des chansons de ",len_id," artistes"))
print(1)
try(songs_artistes<-get_artist_songs_df(id))
print(2)
try(songs_artistes$number_songs_by_artist<-nrow(songs_artistes))
print(3)
try(songs_total<-rbind(songs_total,songs_artistes))
print(4)
#on rempli l'id_annot_total
try(songs_artistes$annotation_count_artist<-sum(songs_artistes$song_annotation_count))
print(5)
try(id_annot_brouillon<-songs_artistes[,c("artist_id","annotation_count_artist","number_songs_by_artist")])
print(6)
try(id_annot_total<-rbind(id_annot_total,id_annot_brouillon))
print(7)
len_id<-len_id-1
}
id_songs<-songs_total$song_id
len_songs_meta<-length(id_songs)
songs_meta_total<-as.data.frame(matrix(0,ncol = 13,nrow = 0))
colnames(songs_meta_total)<-c("song_id","song_name","song_lyrics_url","song_art_image_url","song_release_date","song_pageviews","song_annotation_count","artist_id","artist_name","artist_url","album_id","album_name","album_url")
for (song_id_ in id_songs) {
print(paste0("Recuperation des metadata pour ",len_songs_meta," chansons..."))
try(song_meta<-get_song_meta(song_id_))
songs_meta_total<-rbind(songs_meta_total,song_meta)
len_songs_meta<-len_songs_meta-1
}
len_infoscomp<-length(x)
info_comp_total<-as.data.frame(matrix(0,ncol = 5,nrow = 0))
colnames(info_comp_total)<-c("artist_id","artist_name","artist_url","artist_image_url","followers_count")
for (art_id in x) {
print(paste0("Recuperation d'informations complémentaires pour ",len_infoscomp," artistes. C'est presque fini"))
try(art_meta<-get_artist_df(art_id))
info_comp_total<-rbind(info_comp_total,art_meta)
len_infoscomp<-len_infoscomp-1
}
songs_meta_total<-merge(songs_meta_total,info_comp_total,by="artist_id")
songs_meta_total<-merge(songs_meta_total,id_annot_total,by="artist_id")
songs_meta_total<-songs_meta_total[-which(duplicated(songs_meta_total$song_id)==TRUE),]
# colonne des views total
songs_meta_total$song_pageviews[is.na(songs_meta_total$song_pageviews)]<-0
songs_meta_total$views_artists<-NA
for (artist_identifiant in x) {
songs_meta_total$views_artists[which(songs_meta_total$artist_id == artist_identifiant)]<-sum(songs_meta_total$song_pageviews[which(songs_meta_total$artist_id == artist_identifiant)])
}
print("Le processus de récupération est terminé")
songs_meta_total$artist_name.y<-NULL
songs_meta_total$artist_url.y<-NULL
names(songs_meta_total)[match("artist_name.x",names(songs_meta_total))] <- "artist_name"
names(songs_meta_total)[match("artist_url.x",names(songs_meta_total))] <- "artist_url"
return(songs_meta_total)
}
|
a427c86869197e841582f5c655f15bd6a23ecab1
|
69288158e4f6663f0828dc99d17db2aa3bda6284
|
/scripts/analyses_R/old/positive_selection.R
|
b9c51ab580ab6b75b8edea35df192185dd60034d
|
[] |
no_license
|
wbglizhizhong/DNA-methylation-signatures-of-duplicate-gene-evolution-in-angiosperms
|
6ec00301db157100be21e83301d47e5af811464f
|
a76269d01277ddb0986f705fb7efef986d40981a
|
refs/heads/master
| 2023-04-29T06:24:45.870065
| 2021-05-18T14:56:35
| 2021-05-18T14:56:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,350
|
r
|
positive_selection.R
|
library(ggplot2)
library(scales)
species = c("Aduranensis","Aipaensis","Alyrata","Athaliana","Atrichopoda",
"Bdistachyon","Boleracea","Brapa","Bvulgaris","Cclementina","Cpapaya",
"Clanatus","Cmelo","Crubella","Csativus","Egrandis","Eguineensis",
"Esalsugineum","Fvesca","Fxananassa","Gmax","Graimondii","Ljaponicus",
"Macuminata","Mdomestica","Mesculenta","Mguttatus","Mtruncatula","Osativa",
"Phallii","Ppersica","Ptrichocarpa","Pvirgatum","Pvulgaris","Pxbretschneideri",
"Sbicolor","Sitalica","Slycopersicum","Stuberosum","Sviridis","Tcacao",
"Vvinifera","Zmays")
#species = c("Athaliana")
FE <- data.frame()
for(a in species){
df1 <- read.csv(paste("../figures_tables/",a,"/",a,"_KaKs_values.csv",sep=""),header=T)
df2 <- merge(data.frame(table(df1[df1$Ka.Ks > 1,]$Duplication)),data.frame(table(df1$Duplication)),
by="Var1",all=TRUE)
colnames(df2) <- c("Duplication","Positive.Selection","Total")
df2 <- rbind(df2,data.frame(Duplication=c("Total"),Positive.Selection=sum(df2$Positive.Selection),
Total=sum(df2$Total)))
df2$Percent <- df2$Positive.Selection/df2$Total
df3 <- merge(data.frame(table(df1[df1$Ka.Ks > 1,]$Classification)),data.frame(table(df1$Classification)),
by="Var1",all=TRUE)
colnames(df3) <- c("Classification","Positive.Selection","Total")
df3 <- rbind(df3,data.frame(Classification=c("Total"),Positive.Selection=sum(df3$Positive.Selection),
Total=sum(df3$Total)))
df3$Percent <- df3$Positive.Selection/df3$Total
df3 <- df3[c(1,2,4,5),]
df3$order <- c(2,3,4,1)
p <- ggplot(df3) +
geom_bar(aes(y=Percent,x=reorder(Classification,order),
fill=Classification),stat="identity") +
scale_y_continuous("Percent genes Ka/Ks > 1",labels=percent,expand=c(0,0)) +
theme_bw() +
theme(axis.text=element_text(color="black"),
axis.ticks=element_line(color="black"),
legend.position="None") + xlab("") +
scale_fill_manual(values=c("Total"="black","gbM"="#56B4E9",
"teM"="#E69F00","Unmethylated"="#CC79A7"))
ggsave(paste("../figures_tables/",a,"/",a,"_positive_selection.pdf",sep=""),p,device="pdf")
for(i in 1:3){
FE <- rbind(FE,data.frame(species=c(a),classification=df3[i,1],
estimate=fisher.test(matrix(c(df3[i,2],
df3[i,3]-df3[i,2],
df3[4,2]-df3[i,2],
df3[4,3]-df3[4,2]-df3[i,3]),
nrow=2,ncol=2),alternative="two.sided")$estimate,
p.value=fisher.test(matrix(c(df3[i,2],
df3[i,3]-df3[i,2],
df3[4,2]-df3[i,2],
df3[4,3]-df3[4,2]-df3[i,3]),
nrow=2,ncol=2),alternative="two.sided")$p.value))
}
}
FE$p.adjust <- p.adjust(FE$p.value,method="BH")
write.csv(FE,"../figures_tables/positive_selection.csv",quote=FALSE,row.names=FALSE)
|
0379f042491da69bdbbbb730a8b1211d4d012f49
|
107f1f46755f354c35f66a11bcfca2d71ec088e6
|
/Optimization/NelderMead.R
|
35bb9b9f743de99d8f94ab787543ec829f9bbc19
|
[] |
no_license
|
AngelPone/SCalgorithm
|
a0db81da45014bbbd02ae046f6cbd2e8326e87c9
|
65538ccac1b32cafcd406e432ae942bbcce779eb
|
refs/heads/master
| 2020-05-26T20:09:27.563866
| 2019-05-24T05:35:56
| 2019-05-24T05:35:56
| 188,358,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,498
|
r
|
NelderMead.R
|
NelderMead <- function(func, ps, aim = 0, param = list(alpha = 1, gamma = 2, rho = 0.5), plot = FALSE, plot_bg = NULL){
#### Library Requirment####
library(dplyr)
library(animation)
#### Sub Functions ####
judge_1 <- function(ps, p, aim){
if(class(p) == "data.frame"){p <- as.vector(as.matrix(p))}
if(aim){
if(func(p) <= ps[1,ncol(ps)] & func(p) > ps[nrow(ps),ncol(ps)]){return(1)}
if(func(p) >= ps[1,ncol(ps)]){return(2)}
if(func(p) <= ps[nrow(ps),ncol(ps)]){return(3)}
}else{
if(func(p) >= ps[1,ncol(ps)] & func(p) < ps[nrow(ps),ncol(ps)]){return(1)}
if(func(p) <= ps[1,ncol(ps)]){return(2)}
if(func(p) >= ps[nrow(ps),ncol(ps)]){return(3)}
}
}
judge_2 <- function(p1,p2, aim){
p2 <- as.vector(as.matrix(p2))
p1 <- as.vector(as.matrix(p1))
if(aim == 1){if(func(p1) <= func(p2)){return(1)}else{return(2)}
}
if(aim == 0){if(func(p1) >= func(p2)){return(1)}else{return(2)}}
}
#### shrink ####
shrink <- function(ps){
ps <- ps %>% apply(MARGIN = 1,function(x){return((x+as.matrix(ps)[1,])/2)}) %>% t %>% as.data.frame()
return(ps)
}
#### iter-stop ####
iter_stop <- function(ps, iter_num){
if(iter_num >= 50){return(0)}
X <- ps[-nrow(ps),-ncol(ps)] %>% apply(1,FUN = function(x){x-as.matrix(ps)[nrow(ps),-ncol(ps)]}) %>% t
if(abs(det(X))<1e-09){return(0)}
if(abs(ps$fx[1] - ps$fx[nrow(ps)]) < 1e-09){return(0)}
else(return(1))
}
#### Plot ####
if(plot){
ani.record(reset = TRUE)
plot_bg(main = "iter_Num = 0")
polygon(x = ps[,1], y = ps[,2])
ani.record()
}
#### Main ####
iter_num = 0
ps$fx <- apply(ps, MARGIN = 1, func)
if(aim){
ps <- ps %>% arrange(desc(fx))
}else{
ps <- ps %>% arrange(fx)
}
while(iter_stop(ps, iter_num)){
p0 <- sapply(ps[1:nrow(ps)-1,1:ncol(ps)-1], mean)
pr <- p0 + param$alpha*(p0-ps[nrow(ps),1:ncol(ps)-1])
a = judge_1(ps, pr, aim = aim)
if(a == 1){ps[nrow(ps),] = c(pr, func(pr))}
if(a == 2){
pe = p0 + param$gamma*(pr-p0)
if(judge_2(pe, pr, aim) == 1){
ps[nrow(ps),] = c(pr, func(pr))
}else{
ps[nrow(ps),] = c(pe, func(pe))
}
}
if(a == 3){
pc <- p0 + param$rho*(ps[nrow(ps),-ncol(ps)] - p0)
if(judge_2(pc, ps[nrow(ps),-1], aim) == 2){
ps[nrow(ps),] = c(pc, func(pc))
}else{ ps <- shrink(ps)}
}
if(aim){
ps <- ps %>% arrange(desc(fx))
}else{
ps <- ps %>% arrange(fx)
}
iter_num = iter_num + 1
if(plot){
plot_bg(main = paste0('iter_Num = ',iter_num), xlab = expression(x),ylab = expression(y))
polygon(x=ps[,1],y=ps[,2])
ani.record()
}
}
if(plot){
ani.options(interval = 0.5)
saveGIF(ani.replay())}
return(list(simplex = ps, iter_num = iter_num))
}
#### 目标函数 ####
f <- function(x){
return(x[1]^2 + x[2]^2)
}
a = data.frame(x1 = c(1,1,2), x2 = c(1,2,2))
a = data.frame(x1 = c(2,3,2), x2 = c(1,2,2))
#### plot background #####
plot_bg <- function(...){
x = seq(-3,3, 0.1)
y = seq(-3,3, 0.1)
z = expand.grid(x = x, y = y)
z = matrix(z$x^2 + z$y^2, nrow = length(x))
image(x = x,y = y,z = z,...)
}
NelderMead(f, a, plot = TRUE, plot_bg = plot_bg)
#### Page Rank ####
A = matrix(c(0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,1,1,1,0,0,1,1,0,1,0),5,5)
a <- eigen(A)
(Vectors <- a$vectors %>% apply(MARGIN = 2, function(x){Mod(x)/sum(Mod(x))}))
(Rirank <- rank(Vectors[,1]))
# 2 1 3 4 5
# Web site 5 is most important
|
f0cdc5624a176e6943cb7153ffef99733f776a42
|
5bbe298693fd1f49b5367d3dff473108cf82770f
|
/permutation.R
|
b0680608071163a06c94f38292e985f59321eb14
|
[] |
no_license
|
percylinhai/stat545
|
f61fcd129aad4706af8c58ddc87280092bea6bc0
|
13c40f85b90c7f69a8986fb98b9fb8d18f851d2a
|
refs/heads/master
| 2020-05-27T01:03:46.953584
| 2019-05-24T14:05:46
| 2019-05-24T14:05:46
| 188,432,071
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,535
|
r
|
permutation.R
|
library(ggplot2)
dat<-read.table("~/Study/study resources/STAT545/Project/Transcripts/Split_Transcripts/sample12.txt",h=T)
Trump<-dat%>%filter(Identity==0)
Clinton<-dat%>%filter(Identity==1)
null_dist_trump<-NULL
null_dist_clinton<-NULL
for (i in 1:1000){
b<-sample(Clinton$Broadness,1)
d<-sample(Clinton$demonstrative,1)
i<-sample(Clinton$indef,1)
n<-sample(Clinton$Negative,1)
p<-predict(glm_fit,
data.frame(Broadness=b,demonstrative=d,indef=i,Negative=n))
null_dist_clinton<-c(p,null_dist_clinton)
}
for (i in 1:1000){
b<-sample(Trump$Broadness,1)
d<-sample(Trump$demonstrative,1)
i<-sample(Trump$indef,1)
n<-sample(Trump$Negative,1)
p<-predict(glm_fit,
data.frame(Broadness=b,demonstrative=d,indef=i,Negative=n))
null_dist_trump<-c(p,null_dist_trump)
}
null_dist_clinton<-sort(null_dist_clinton)
null_dist_trump<-sort(null_dist_trump)
full_dat<-data.frame(logit=c(null_dist_clinton,null_dist_trump),speaker=c(rep("Clinton",1000),rep("Trump",1000)))
ggplot()+
geom_density(aes(x=logit,fill="Blue",color="Blue"),data=subset(full_dat,speaker=="Clinton"),alpha=0.4)+
geom_density(aes(x=logit,fill="Red",color="Red"),data=subset(full_dat,speaker=="Trump"),alpha=0.4)+
scale_colour_manual(name="Speaker",values=c("Blue","Red"),labels=c("Clinton","Trump"))+
scale_fill_manual(name="Speaker",values=c("Blue","Red"),labels=c("Clinton","Trump"))
hist(null_dist_clinton,col="Blue",xlim=c(-40,40))
hist(null_dist_trump,col="Red",add=T)
box()
plot(density(null_dist_clinton),col="Blue")
lines(density(null_dist_trump),col="Red")
|
6977e4dec1b5ff651f1d34ba5fc48f2b7902e968
|
01d5318b66d8b7fe8e9942e4c5171725c01f0400
|
/ui.R
|
eba15c69c14c0758a57834538f4838162f7df853
|
[] |
no_license
|
sannpeterson/find-genes
|
c64ec396a4494d427a83dcee6f60d80d2c3b8fab
|
d9c3c20831141ca998c56727d7cb7ad9c76add86
|
refs/heads/master
| 2020-09-09T10:26:43.868815
| 2019-08-14T16:40:00
| 2019-08-14T16:40:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,033
|
r
|
ui.R
|
## app.R ##
library(shinydashboard)
library(shiny)
library(plotly)
library(DT)
jscode <- '
$(function() {
var $els = $("[data-proxy-click]");
$.each(
$els,
function(idx, el) {
var $el = $(el);
var $proxy = $("#" + $el.data("proxyClick"));
$el.keydown(function (e) {
if (e.keyCode == 13) {
$proxy.click();
}
});
}
);
});
'
dt_output = function(title, id) {
fluidRow(column(
12, h1(paste0(title)),
hr(), DTOutput(id)
))
}
ui <- dashboardPage(
dashboardHeader(title = "Genetic Streetlight"),
dashboardSidebar(
tags$head(tags$script(HTML(jscode))),
# input field
tagAppendAttributes(
textInput("user_text", label = "Enter medical term:", placeholder = "Please enter some text."),
`data-proxy-click` = "submit"
),
# submit button
actionButton("submit", label = "Submit")
),
dashboardBody(
fluidRow(box(div(plotlyOutput("plotlyBar", width = "1000px"), style = "overflow-x: scroll"),width=12),
box(dataTableOutput('x1'), width=12))
)
)
|
f13264384027bd1285408a4b0dac5cc95f24417b
|
a407fbe5b374e7639c3608ebb420d9a62675a3ba
|
/man/car.Rd
|
2ff79d71a4ecd1377bb04ec556be8ff511967d05
|
[] |
no_license
|
Barardo/FFTrees
|
c0528d0312d1db5c293ce8fe168ba6e17f484f64
|
2444c3b1c9228041d04eac4a6d9058807a3edea7
|
refs/heads/master
| 2023-04-16T01:05:01.451355
| 2021-04-28T16:03:44
| 2021-04-28T16:03:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 565
|
rd
|
car.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/car_doc.R
\docType{data}
\name{car}
\alias{car}
\title{Car acceptability data}
\format{
A data frame containing 1728 rows and 7 columns
\describe{
\item{buying.price}{Numeric}
\item{maint.price}{Factor}
\item{doors}{Factor}
\item{persons}{Numeric}
\item{luggage}{Numeric}
\item{safety}{Factor}
\item{acceptability}{Factor}
...
}
}
\source{
http://archive.ics.uci.edu/ml/datasets/Car+Evaluation
}
\usage{
car
}
\description{
Car acceptability data
}
\keyword{datasets}
|
9ada50094f9dfba49830594739d7a77604cecbe3
|
00ac235cefd57ee0316cece5c5c3163d24f04809
|
/4Neutal model-classlevel/vine-t1-C101-random/0Neutral model.R
|
590125305a4eb697ca9f36a2a9f31bd1cacebc65
|
[] |
no_license
|
wangtingting0104/HolobionT-Dataset
|
d7cfa6d098cd25e51705b47c974fae2d1f12f95c
|
04fcb2814e36d07260fec0a9a60de296dbc1f4bd
|
refs/heads/main
| 2023-04-09T15:39:21.900658
| 2022-11-01T18:33:38
| 2022-11-01T18:33:38
| 560,533,697
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,782
|
r
|
0Neutral model.R
|
library(dplyr)
source("Neutral model.r")
metacommunity = read.csv(file ="0vine-t1-c101-pool.csv" ,header=TRUE,sep=",",row.names = 1)
metacommunity[1:10,1:10]
metacommunity = t(metacommunity)
otu = read.csv(file ="0vine-t1-c101-otutable.csv" ,header=TRUE,sep=",",row.names = 1)
otu=t(otu)
design = read.csv(file ="2metadata.csv" ,header=TRUE,row.names=1,sep=",")
head(design)
###CFT1
aa=design[which(design$rotation=="X101CT1"),]
bb=aa$sample
comun=otu[bb,]
Neutral.fit(spp=comun, pool=metacommunity, stats=TRUE)
neu <- Neutral.fit(spp=comun, pool=metacommunity, stats=FALSE)
neu <- transform(neu, plog = log10(p))
neu$OTU<-rownames(neu)
up<-filter(neu,freq>pred.upr)
up$class<-"Abo"
lw<-filter(neu,freq<pred.lwr)
lw$class<-"Bel"
mid<-filter(neu,pred.lwr<=freq&freq<=pred.upr)
mid$class<-"Neu"
plot.data=merge(up,lw,all=T)
plot.data=merge(plot.data,mid,all=T)
write.table(plot.data,file="3X101CT1.csv",sep = ",",row.names=TRUE)
p1=ggplot(plot.data)+geom_point(aes(x=plog,y=freq,colour=class),alpha=0.8,size=2)+
geom_line(aes(x=plog,y=freq.pred),colour="#1E90FF",size=1)+
geom_line(aes(x=plog,y=pred.upr),colour="#1E90FF",size=1,linetype="dashed")+
geom_line(aes(x=plog,y=pred.lwr),colour="#1E90FF",size=1,linetype="dashed")+
labs(x="Mean relative abundance (log10)",y="Occurrence frequency")+
scale_colour_manual(values=c("#FFD166","#06D6A0","#8D99AE"))+
labs(title = "X101CT1")+
theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())+guides(colour=FALSE)
p1
###FCT1
aa=design[which(design$rotation=="C101T1"),]
bb=aa$sample
comun=otu[bb,]
Neutral.fit(spp=comun, pool=metacommunity, stats=TRUE)
neu <- Neutral.fit(spp=comun, pool=metacommunity, stats=FALSE)
neu <- transform(neu, plog = log10(p))
neu$OTU<-rownames(neu)
up<-filter(neu,freq>pred.upr)
up$class<-"Abo"
lw<-filter(neu,freq<pred.lwr)
lw$class<-"Bel"
mid<-filter(neu,pred.lwr<=freq&freq<=pred.upr)
mid$class<-"Neu"
plot.data=merge(up,lw,all=T)
plot.data=merge(plot.data,mid,all=T)
write.table(plot.data,file="3C101T1.csv",sep = ",",row.names=TRUE)
p2=ggplot(plot.data)+geom_point(aes(x=plog,y=freq,colour=class),alpha=0.8,size=2)+
geom_line(aes(x=plog,y=freq.pred),colour="#1E90FF",size=1)+
geom_line(aes(x=plog,y=pred.upr),colour="#1E90FF",size=1,linetype="dashed")+
geom_line(aes(x=plog,y=pred.lwr),colour="#1E90FF",size=1,linetype="dashed")+
labs(x="Mean relative abundance (log10)",y="Occurrence frequency")+
scale_colour_manual(values=c("#FFD166","#06D6A0","#8D99AE"))+
labs(title = "C101T1")+
theme_bw()+theme(panel.grid.major = element_blank(),panel.grid.minor = element_blank())+guides(colour=FALSE)
p2
library(ggpubr)
pdf("4random.pdf",width=10,height=10)
ggarrange(p1,p2, ncol=2,nrow=2)
dev.off()
library(reshape2)
library(ggplot2)
library(vegan)
OTU = read.csv(file ="0vine-t1-c101-otutable.csv" ,header=TRUE,sep=",",row.names = 1)
OTU_t=t(OTU)
OTU_t[1:10,1:10]
design = read.csv(file ="2metadata.csv" ,header=TRUE,sep=",")
#3X101CT1
model = read.csv(file ="3X101CT1.csv",header=TRUE,row.names=1,sep=",")
head(model)
aa=design[which(design$rotation=="X101CT1"),]
bb=aa$sample
OTU1=OTU_t[bb,]
OTU1 = as.data.frame(colMeans(OTU1))
OTU1=as.matrix(OTU1)
head(OTU1)
library("questionr")
a=cprop(OTU1)###
OTU2=as.data.frame(a[!rownames(a) %in% c("Total") , -which(colnames(a) %in% c("All"))])##删除求相对丰度生成的总行和列
head(OTU2)
OTU2$OTU=rownames(OTU2)
OTU3=merge(model[,c("OTU","class")],OTU2,by.x="OTU",all= T)
head(OTU3)
OTU4=aggregate(OTU3[,(-1:-2)],list(OTU3$class),sum)
head(OTU4)
label=c(rep("X101CT1", 3))
OTU5=data.frame(OTU4, label)
p1 = ggplot(OTU5, aes(x=label, y = x, fill = Group.1 )) +
geom_bar(stat = "identity",position="stack",width=0.8)+
#scale_y_continuous(labels = scales::percent) +
xlab("")+
ylab("X101CT1-Cumulative relative abundance")+
theme_classic()+
theme(axis.text = element_text(size = 12), axis.title = element_text(size = 13)) +
theme(legend.text = element_text(size = 11))+
scale_fill_manual(values =rev(c("Abo"="#FFD166","Bel"="#06D6A0","Neu"="#8D99AE"))) +
theme(panel.grid = element_blank(), panel.background = element_rect(color = 'black', fill = 'transparent')) +
theme(legend.title = element_blank())
p1
write.csv(OTU5,"5X101-com.csv")
#3FCT1
model = read.csv(file ="3C101T1.csv",header=TRUE,row.names=1,sep=",")
head(model)
aa=design[which(design$rotation=="C101T1"),]
bb=aa$sample
OTU1=OTU_t[bb,]
OTU1 = as.data.frame(colMeans(OTU1))
OTU1=as.matrix(OTU1)
head(OTU1)
library("questionr")
a=cprop(OTU1)###Column percentages##
OTU2=as.data.frame(a[!rownames(a) %in% c("Total") , -which(colnames(a) %in% c("All"))])#\
head(OTU2)
OTU2$OTU=rownames(OTU2)
OTU3=merge(model[,c("OTU","class")],OTU2,by.x="OTU",all= T)
head(OTU3)
OTU4=aggregate(OTU3[,(-1:-2)],list(OTU3$class),sum)
head(OTU4)
label=c(rep("C101T1", 3))
OTU5=data.frame(OTU4, label)
p2 = ggplot(OTU5, aes(x=label, y = x, fill = Group.1 )) +
geom_bar(stat = "identity",position="stack",width=0.8)+
#scale_y_continuous(labels = scales::percent) +
xlab("")+
ylab("3C101T1-Cumulative relative abundance")+
theme_classic()+
theme(axis.text = element_text(size = 12), axis.title = element_text(size = 13)) +
theme(legend.text = element_text(size = 11))+
scale_fill_manual(values =rev(c("Abo"="#FFD166","Bel"="#06D6A0","Neu"="#8D99AE"))) +
theme(panel.grid = element_blank(), panel.background = element_rect(color = 'black', fill = 'transparent')) +
theme(legend.title = element_blank())
p2
write.csv(OTU5,"5c101-com.csv")
library(ggpubr)
pdf("5RANDOM-composition.pdf",width=6,height=10)
ggarrange(p1,p2, ncol=2,nrow=2)
dev.off()
OTU = read.csv(file ="0vine-t1-c101-otutable.csv" ,header=TRUE,sep=",",row.names = 1)
OTU[1:10,1:10]
OTU_t=t(OTU)
OTU_t[1:10,1:10]
design = read.csv(file ="2metadata.csv" ,header=TRUE,sep=",")
#X101CT1
aa=design[which(design$rotation=="X101CT1"),]
bb=aa$sample
OTU1=as.data.frame(OTU_t[bb,])
OTU1[1:10,1:10]
OTU2=as.data.frame(t(OTU1))
OTU2$OTU=rownames(OTU2)
model = read.csv(file ="3X101CT1.csv",header=TRUE,row.names=1,sep=",")
head(model)
OTU3=merge(model[,c("OTU","class")],OTU2,by.x="OTU",all= F)
OTU3=rename(OTU3,"#OTU"=OTU)
OTU3[1:10,1:10]
Abo=OTU3[which(OTU3$class=="Abo"),]
Bel=OTU3[which(OTU3$class=="Bel"),]
Neu=OTU3[which(OTU3$class=="Neu"),]
Abo=Abo[which(rowSums(Abo[,-1:-2]) > 0),]
Bel=Bel[which(rowSums(Bel[,-1:-2]) > 0),]
Neu=Neu[which(rowSums(Neu[,-1:-2]) > 0),]
Abo=Abo[,-2]
Bel=Bel[,-2]
Neu=Neu[,-2]
write.table(Abo,"6X101CT1_Abo.txt",sep ='\t',quote = FALSE,row.names = F,col.names = T)
write.table(Bel,"6X101CT1_Bel.txt",sep ='\t',quote = FALSE,row.names = F,col.names = T)
write.table(Neu,"6X101CT1_Neu.txt",sep ='\t',quote = FALSE,row.names = F,col.names = T)
#FCT1
aa=design[which(design$rotation=="C101T1"),]
bb=aa$sample
OTU1=as.data.frame(OTU_t[bb,])
OTU1[1:10,1:10]
OTU2=as.data.frame(t(OTU1))
OTU2$OTU=rownames(OTU2)
model = read.csv(file ="3C101T1.csv",header=TRUE,row.names=1,sep=",")
head(model)
OTU3=merge(model[,c("OTU","class")],OTU2,by.x="OTU",all= F)
OTU3=rename(OTU3,"#OTU"=OTU)
OTU3[1:10,1:10]
Abo=OTU3[which(OTU3$class=="Abo"),]
Bel=OTU3[which(OTU3$class=="Bel"),]
Neu=OTU3[which(OTU3$class=="Neu"),]
Abo=Abo[which(rowSums(Abo[,-1:-2]) > 0),]
Bel=Bel[which(rowSums(Bel[,-1:-2]) > 0),]
Neu=Neu[which(rowSums(Neu[,-1:-2]) > 0),]
Abo=Abo[,-2]
Bel=Bel[,-2]
Neu=Neu[,-2]
write.table(Abo,"6C101T1_Abo.txt",sep ='\t',quote = FALSE,row.names = F,col.names = T)
write.table(Bel,"6C101T1_Bel.txt",sep ='\t',quote = FALSE,row.names = F,col.names = T)
write.table(Neu,"6C101T1_Neu.txt",sep ='\t',quote = FALSE,row.names = F,col.names = T)
library(reshape2)
library(ggplot2)
library(vegan)
#CFT1-ABO
OTU <- read.table('6X101CT1_Abo.txt', sep='\t', header=T, comment.char='', check.names=F)
{annotation = read.csv(file ="1vine-annotation.csv" ,header=TRUE,sep=",",row.names = 1)
annotation$"#OTU"=rownames(annotation)
spe1=merge(annotation,OTU,by.x="#OTU",all=F)
Phylum=aggregate(spe1[,(-1:-7)],list(spe1$Class),sum)
#Phylum=Phylum[-which(Phylum$Group.1=="unknow"),]
rownames(Phylum)=Phylum$Group.1
Phylum=as.matrix(Phylum[,-1])
library("questionr")
a=cprop(Phylum)###Column percentages##
OTU2=as.data.frame.array(a[!rownames(a) %in% c("Total") , -which(colnames(a) %in% c("All"))])##
head(OTU2)
OTU3 <- OTU2[which(rownames(OTU2)!="unknow"),]##
# Decreased sort by abundance
OTU4 = as.data.frame(OTU3[(order(-rowSums(OTU3))), ])
# Calculate average relative abundance for each group
OTU5 = as.data.frame(rowMeans(OTU4))
OTU5$Phylum=rownames(OTU5)
head(OTU5)
# Filter Top 9 , and other group into Low abundance
other = as.data.frame(sum(OTU5[10:dim(OTU5)[1],1 ]))
other$Phylum=rownames(other)
OTU6 = OTU5[1:(10 - 1), ]
library(dplyr)
OTU6=rename(OTU6,"Mean"='rowMeans(OTU4)')
other=rename(other,"Mean"=`sum(OTU5[10:dim(OTU5)[1], 1])`)
mean_sort = rbind(OTU6,other)
rownames(mean_sort)[10] = c("Low abundance")
bb=as.data.frame(rowMeans(OTU2[which(rownames(OTU2)=="unknow"),]))
bb$Phylum=rownames(bb)
bb=rename(bb,"Mean"=`rowMeans(OTU2[which(rownames(OTU2) == "unknow"), ])`)
mean_sort=rbind(mean_sort,bb)##
# data melt for ggplot2
mean_sort$Phylum = rownames(mean_sort)
data_all = as.data.frame(melt(mean_sort, id.vars=c("Phylum")))
# Set taxonomy order by abundance, default by alphabet##
data_all$Phylum = factor(data_all$Phylum, levels=rownames(mean_sort))
tax=data_all[,1]
tax=as.data.frame(tax)
rownames(tax)<-rownames(data_all)
tax$value<-as.numeric(data_all$value)
tax$variable<-as.character(data_all$variable)
data_all=tax}
###
p1 = ggplot(data_all, aes(x=variable, y = value, fill = tax )) +
geom_bar(stat = "identity",position="fill", width=0.8)+
scale_y_continuous(labels = scales::percent) +
xlab("6X101CT1_Abo")+
ylab("Relative Abundance(%)")+ theme_classic()+
theme(axis.text = element_text(size = 12), axis.title = element_text(size = 13)) +
theme(legend.text = element_text(size = 11))+
scale_fill_manual(values =rev(c("Acetothermia"="#3C3A8D","Acidobacteria"="#BEBADA","Acidobacteria_Gp1"="#BEBADA","Actinobacteria"="#ff6eb4",
"Armatimonadetes"="#000000","Bacteroidetes"="#44A8DB","BRC1"="#57B78C",
"candidate_division_WPS-1"="#EA711B","candidate_division_WPS-2"="#CECE05",
"Candidatus_Saccharibacteria"="#D72226","Chlamydiia"="#AA9953","Chloroflexi"="#093A3A",
"Cyanobacteria"="#02759E","Deinococcus-Thermus"="#3A398C","Firmicutes"="#74B662",
"Gemmatimonadetes"="#FFBB78FF","Latescibacteria"="#B53C0D", "Microgenomates"="#C49C94FF",
"Nitrospirae"="#03F1F7","Planctomycetes"="#F962C3","Proteobacteria"="#F95A5A",
"Tenericutes"="#0808B7","Verrucomicrobia"="#144404","Low abundance"="#cccccc",
"unknow"="#9E7C61","Rokubacteria"="#D3C13C","Alphaproteobacteria"="#ffec8b","Bacilli"="#89d0f5","Betaproteobacteria"="#ff9900","Cytophagia"="#66cc00","Deltaproteobacteria"="#666600","Gammaproteobacteria"="#fccde5","Flavobacteriia"="#ff0000","Sphingobacteriia"="#9f79ee","Unsigned"="#bebebe"))) +
theme(panel.grid = element_blank(), panel.background = element_rect(color = 'black', fill = 'transparent')) +
theme(legend.title = element_blank())
p1
####6C101T1_Abo
OTU <- read.table('6C101T1_Abo.txt', sep='\t', header=T, comment.char='', check.names=F)
{annotation = read.csv(file ="1vine-annotation.csv" ,header=TRUE,sep=",",row.names = 1)
annotation$"#OTU"=rownames(annotation)
spe1=merge(annotation,OTU,by.x="#OTU",all=F)
Phylum=aggregate(spe1[,(-1:-7)],list(spe1$Class),sum)
#Phylum=Phylum[-which(Phylum$Group.1=="unknow"),]
rownames(Phylum)=Phylum$Group.1
Phylum=as.matrix(Phylum[,-1])
library("questionr")
a=cprop(Phylum)###Column percentages##
OTU2=as.data.frame.array(a[!rownames(a) %in% c("Total") , -which(colnames(a) %in% c("All"))])##
OTU3 <- OTU2[which(rownames(OTU2)!="unknow"),]##
# Decreased sort by abundance
OTU4 = as.data.frame(OTU3[(order(-rowSums(OTU3))), ])
# Calculate average relative abundance for each group
OTU5 = as.data.frame(rowMeans(OTU4))
OTU5$Phylum=rownames(OTU5)
head(OTU5)
# Filter Top 9 , and other group into Low abundance
other = as.data.frame(sum(OTU5[10:dim(OTU5)[1],1 ]))
other$Phylum=rownames(other)
OTU6 = OTU5[1:(10 - 1), ]
library(dplyr)
OTU6=rename(OTU6,"Mean"='rowMeans(OTU4)')
other=rename(other,"Mean"=`sum(OTU5[10:dim(OTU5)[1], 1])`)
mean_sort = rbind(OTU6,other)
rownames(mean_sort)[10] = c("Low abundance")
bb=as.data.frame(rowMeans(OTU2[which(rownames(OTU2)=="unknow"),]))
bb$Phylum=rownames(bb)
bb=rename(bb,"Mean"=`rowMeans(OTU2[which(rownames(OTU2) == "unknow"), ])`)
mean_sort=rbind(mean_sort,bb)##
# data melt for ggplot2
mean_sort$Phylum = rownames(mean_sort)
data_all = as.data.frame(melt(mean_sort, id.vars=c("Phylum")))
# Set taxonomy order by abundance, default by alphabet##y
data_all$Phylum = factor(data_all$Phylum, levels=rownames(mean_sort))
tax=data_all[,1]
tax=as.data.frame(tax)
rownames(tax)<-rownames(data_all)
tax$value<-as.numeric(data_all$value)
tax$variable<-as.character(data_all$variable)
data_all=tax}
p4 = ggplot(data_all, aes(x=variable, y = value, fill = tax )) +
geom_bar(stat = "identity",position="fill", width=0.8)+
scale_y_continuous(labels = scales::percent) +
xlab("6C101T1_Abo")+
ylab("Relative Abundance(%)")+ theme_classic()+
theme(axis.text = element_text(size = 12), axis.title = element_text(size = 13)) +
theme(legend.text = element_text(size = 11))+
scale_fill_manual(values =rev(c("Acetothermia"="#3C3A8D","Acidobacteria"="#BEBADA","Acidobacteria_Gp1"="#BEBADA","Actinobacteria"="#ff6eb4",
"Armatimonadetes"="#000000","Bacteroidetes"="#44A8DB","BRC1"="#57B78C",
"candidate_division_WPS-1"="#EA711B","candidate_division_WPS-2"="#CECE05",
"Candidatus_Saccharibacteria"="#D72226","Chlamydiae"="#AA9953","Chloroflexi"="#093A3A",
"Cyanobacteria"="#02759E","Deinococcus-Thermus"="#3A398C","Firmicutes"="#74B662",
"Gemmatimonadetes"="#FFBB78FF","Latescibacteria"="#B53C0D", "Microgenomates"="#C49C94FF",
"Nitrospirae"="#03F1F7","Planctomycetes"="#F962C3","Proteobacteria"="#F95A5A",
"Tenericutes"="#0808B7","Verrucomicrobia"="#144404","Low abundance"="#cccccc",
"unknow"="#9E7C61","Rokubacteria"="#D3C13C","Alphaproteobacteria"="#ffec8b","Bacilli"="#89d0f5","Betaproteobacteria"="#ff9900","Cytophagia"="#66cc00","Deltaproteobacteria"="#666600","Gammaproteobacteria"="#fccde5","Flavobacteriia"="#ff0000","Sphingobacteriia"="#9f79ee","Unsigned"="#bebebe"))) +
theme(panel.grid = element_blank(), panel.background = element_rect(color = 'black', fill = 'transparent')) +
theme(legend.title = element_blank())
p4
library(ggpubr)
pdf("7TIunknow.pdf",width=12,height=12)
ggarrange(p1,p4, ncol=2,nrow=2)
#,p4,p5,p6,p7,p8,p9,
dev.off()
##
|
307f933c57cbacd45e2de6c34277a2abde9025fe
|
e1466f0fb923fa17fb98d005d632d7deb8aba44f
|
/Risk Measures vol.2.R
|
fff06451ffa1129c42b958720dcd316c33a72b97
|
[] |
no_license
|
LuckyJoni/R_finance
|
00b8aa6a7477b83ff9f9b8f7822394a6f2af1de6
|
80f51012c805fb28e7991c74f0e2790a12507886
|
refs/heads/main
| 2023-04-08T02:03:36.797100
| 2021-04-01T16:34:23
| 2021-04-01T16:34:23
| 351,757,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,640
|
r
|
Risk Measures vol.2.R
|
#============================================================================
# RISK MEASURES
#============================================================================
# load libraries
library(PerformanceAnalytics)
library(quantmod)
library(mvtnorm)
library(mnormt)
library(MASS)
options(digits=4)
# Example: Normal VaR
mu = 10
sigma = 100
alpha = 0.05
VaR.alpha = qnorm(alpha, mu, sigma)
VaR.alpha
# alternative approach
VaR.alpha = mu + sigma*qnorm(alpha,0,1)
VaR.alpha
# Example: normal ES
mu = 10
sigma = 100
alpha = 0.05
ES.alpha = -( mu + sigma*(dnorm(qnorm(alpha))/(alpha)) )
ES.alpha
# ESTIMATING RISK MEASURES
# download data
symbol.vec = c("MSFT", "^GSPC")
getSymbols(symbol.vec, from ="2000-01-03", to = "2012-04-03")
colnames(MSFT)
start(MSFT)
end(MSFT)
# extract adjusted closing prices
MSFT = MSFT[, "MSFT.Adjusted", drop=F]
GSPC = GSPC[, "GSPC.Adjusted", drop=F]
# plot prices
par(mfrow = c(2, 1))
plot(MSFT)
plot(GSPC)
# calculate returns
MSFT.ret = CalculateReturns(MSFT, method="simple")
GSPC.ret = CalculateReturns(GSPC, method="simple")
# remove first NA observation
MSFT.ret = MSFT.ret[-1,]
GSPC.ret = GSPC.ret[-1,]
# create combined data series
MSFT.GSPC.ret = cbind(MSFT.ret,GSPC.ret)
# plot returns
par(mfrow = c(2, 1))
plot(MSFT.ret)
plot(GSPC.ret)
# CALCULATE NONPARAMETRIC RISK MEASURES
# standard deviation
apply(MSFT.GSPC.ret, 2, sd)
# empirical 5% and 1% quantiles (historical VaR)
apply(MSFT.GSPC.ret, 2, quantile, probs=c(0.05, 0.01))
# historical 5% and 1% ES
ES.fun = function(x, alpha=0.05) {
qhat = quantile(x, probs=alpha)
mean(x[x <= qhat])
}
apply(MSFT.GSPC.ret, 2, ES.fun, alpha=0.05)
apply(MSFT.GSPC.ret, 2, ES.fun, alpha=0.01)
# plot 5% VaR and ES for MSFT
par(mfrow = c(1, 1))
VaR.MSFT.05 = quantile(MSFT.ret, probs=0.05)
ES.MSFT.05 = mean(MSFT.ret[MSFT.ret <= VaR.MSFT.05])
plot.zoo(MSFT.ret)
abline(h=VaR.MSFT.05, col="blue", lwd=2)
abline(h=ES.MSFT.05, col="green", lwd=2)
legend(x="topright", legend=c("5% VaR", "5% ES"), lwd=2, col=c("blue", "green"))
# use PerformanceAnalytics functions
args(VaR)
VaR(MSFT.GSPC.ret, p=0.95, method="historical")
ES(MSFT.GSPC.ret, p=0.95, method="historical")
# NORMAL VaR and ES
mu.hat = apply(MSFT.GSPC.ret, 2, mean)
sigma.hat = apply(MSFT.GSPC.ret, 2, sd)
q.05.norm = mu.hat + sigma.hat*qnorm(0.05)
q.01.norm = mu.hat + sigma.hat*qnorm(0.01)
es.05.norm = -(mu.hat + sigma.hat*dnorm(qnorm(0.05))/0.05)
es.01.norm = -(mu.hat + sigma.hat*dnorm(qnorm(0.01))/0.01)
mu.hat
sigma.hat
q.05.norm
q.01.norm
es.05.norm
es.01.norm
# Use PerformanceAnalytics functions
VaR(MSFT.GSPC.ret, p=0.95, method="gaussian")
ES(MSFT.GSPC.ret, p=0.95, method="gaussian")
# VaR and ES based on STUDENT'S T DISTRIBUTION
# use fitdistr() from MASS package
?fitdistr
# Fit Student's t distribution by MLE
# assume MSFT.ret is student's t with parameters mu, sigma and v
# note: E[MSFT.ret] = mu, var(MSFT.ret) = sigma^2 * (v/(v-2))
# ATTENTION!!! THIS MEANS THE FOLLOWING
# ==> sigma is not the standard deviation of the distribution,
# ==> but only the scale parameter of the distribution,
# We multiply the returns by 100 to improve numerical stability of MLE
MSFT.t.mle = fitdistr(MSFT.ret*100, densfun="t")
MSFT.t.mle
theta.hat = coef(MSFT.t.mle)
# rescale estimates
mu.MSFT.t = theta.hat["m"]/100
sigma.MSFT.t = theta.hat["s"]/100
v.MSFT.t = theta.hat["df"]
# Standard t quantiles
q.t.05 = qt(0.05, df=v.MSFT.t)
q.t.01 = qt(0.01, df=v.MSFT.t)
# Estimated t Quantiles for MSFT
q.MSFT.t.05 = mu.MSFT.t + sigma.MSFT.t*q.t.05
q.MSFT.t.01 = mu.MSFT.t + sigma.MSFT.t*q.t.01
q.MSFT.t.05
q.MSFT.t.01
t.adj.05 = (dt(q.t.05, df=v.MSFT.t)/0.05)*((v.MSFT.t + q.t.05^2)/(v.MSFT.t - 1))
t.adj.01 = (dt(q.t.01, df=v.MSFT.t)/0.01)*((v.MSFT.t + q.t.01^2)/(v.MSFT.t - 1))
es.MSFT.t.05 = -(mu.MSFT.t + sigma.MSFT.t*t.adj.05)
es.MSFT.t.01 = -(mu.MSFT.t + sigma.MSFT.t*t.adj.01)
es.MSFT.t.05
es.MSFT.t.01
# Simulate data from fitted Student's t distribution
# t.v ~ standardized Student t with v df. E[t.v] = 0, var(t.v) = v/(v-2)
set.seed(123)
t.sim = mu.MSFT.t + sigma.MSFT.t*rt(n=10000, df=v.MSFT.t)
q.t.sim = quantile(t.sim, probs=c(0.05, 0.01))
es.t.05 = mean(t.sim[t.sim <= q.t.sim[1]])
es.t.01 = mean(t.sim[t.sim <= q.t.sim[2]])
q.t.sim
es.t.05
es.t.01
#========================================================
# ============= PORTFOLIO RISK MEASURES =================
# equally weighted portfolio of MSFT and GSPC
port.ret = 0.5*MSFT.ret + 0.5*GSPC.ret
colnames(port.ret) = "port"
# plot(port.ret)
mean(port.ret)
sd(port.ret)
sd(as.numeric(port.ret))
# Computing portfolio volatility using covariance matrix
Sigma.hat = cov(MSFT.GSPC.ret)
w = c(0.5, 0.5)
sigma.p.hat = as.numeric(sqrt(t(w)%*%Sigma.hat%*%w))
sqrt(t(w)%*%Sigma.hat%*%w)
# Using PerformanceAnalytics function StdDev
StdDev(MSFT.GSPC.ret, portfolio_method="component",
weights=c(0.5, 0.5))
# Nonparametric portfolio risk measures
# VaR
VaR(port.ret, p=0.95, method="historical")
VaR(port.ret, p=0.99, method="historical")
# ES
ES(port.ret, p=0.95, method="historical")
ES(port.ret, p=0.99, method="historical")
# Bivariate distributions for MSFT and GPSC
# Empirical scatterplots
plot(coredata(MSFT.ret), coredata(GSPC.ret),
main="Empirical Bivariate Distribution of Returns",
ylab="GSPC", xlab="MSFT", col="blue")
abline(h=mean(GSPC.ret), v=mean(MSFT.ret))
# Empirical portfolio return distribution
chart.Histogram(port.ret, main="Equally Weighted Portfolio", methods=c("add.normal", "add.qqplot"))
# Simulate from fitted multivariate normal distribution
# Use mvtnorm package
library(mvtnorm)
n.obs = nrow(MSFT.GSPC.ret)
# Estimate mean and covariance
mu.hat = apply(MSFT.GSPC.ret, 2, mean)
Sigma.hat = cov(MSFT.GSPC.ret)
Cor.hat = cov2cor(Sigma.hat)
mu.hat
Sigma.hat
Cor.hat
set.seed(123)
sim.ret = rmvnorm(n.obs, mean=mu.hat, sigma=Sigma.hat, method="chol")
# Scatterplot of simulated returns
plot(sim.ret[,1], sim.ret[,2],
main="Simulated Bivariate Normal Distribution of Returns",
ylab="GSPC", xlab="MSFT", col="blue")
abline(v=mean(sim.ret[,1]), h=mean(sim.ret[,2]))
# Superimpose both scatter-plots
plot(coredata(MSFT.ret), coredata(GSPC.ret),
main="Empirical vs. Bivariate Normal",
ylab="GSPC", xlab="MSFT", col="blue")
abline(h=mean(GSPC.ret), v=mean(MSFT.ret))
points(sim.ret[,1], sim.ret[,2], col="red")
legend(x="topleft", legend=c("Empirical", "Normal"), col=c("blue", "red"), pch=1)
# Simulated equally weighted portfolio
port.ret.sim = 0.5*sim.ret[,1] + 0.5*sim.ret[,2]
chart.Histogram(port.ret.sim, main="Equally Weighted Portfolio",
methods=c("add.normal", "add.qqplot"))
# Portfolio risk measures from normal distribution
# volatility
StdDev(port.ret)
# VaR
VaR(port.ret, p = 0.95, method="gaussian")
VaR(port.ret, p = 0.99, method="gaussian")
# ES
ES(port.ret, p = 0.95, method="gaussian")
ES(port.ret, p = 0.99, method="gaussian")
# Fitting bivariate student's t to data - see "Statistics and Data Analysis
# for Financial Engineering ch 5
# library(MASS) is needed for cov.trob, which estimates a covariance or correlation
# matrix assuming the data came from a multivariate t distribution
library(mnormt) # needed for dmt
df = seq(2.1,5,.01) # 551 points
n = length(df)
loglik_max = rep(0,n)
for(i in 1:n)
{
#MLE of mu and cov given df
fit = cov.trob(coredata(MSFT.GSPC.ret),nu=df[i])
#MLE of df given mu and cov
loglik_max[i] = sum(log(dmt(coredata(MSFT.GSPC.ret),mean=fit$center,
S=fit$cov,df=df[i])))
}
max.lik = max(loglik_max)
v.mle = df[which(loglik_max == max.lik)] # "which":give the TRUE indices of a logical object
plot(df, loglik_max, type="l", main="Profile Likelihood for Bivariate t", lwd=2, col="blue")
abline(v=v.mle, lwd=2, col="red")
# Extract mle of mu and sigma given v.mle
fit.mle = cov.trob(coredata(MSFT.GSPC.ret),nu=v.mle)
mu.mle.t = fit.mle$center
Sigma.mle.t = fit.mle$cov
mu.mle.t
# show covariance matrix
Sigma.mle.t*(v.mle/(v.mle - 2))
Cor.mle.t = cov2cor(Sigma.mle.t*(v.mle/(v.mle - 2)))
Cor.mle.t
# Simulate portfolio returns
# Use result that Y = mu + sqrt(v/W)*Z is multivariate t
# Generate Z ~ N(0, Sigma.mle.t)
set.seed(123)
Z = rmvnorm(n=n.obs, mean=c(0,0), sigma=Sigma.mle.t*(v.mle/(v.mle - 2)))
# generate W ~ chi-sq(v.mle)
W = rchisq(n.obs,df=v.mle)
# simulate bivariate t
sim.ret.t = mu.mle.t + sqrt(v.mle/W)*Z
colnames(sim.ret.t) = c("MSFT","GSPC")
# Plot simulated data together with actual returns
plot(coredata(MSFT.ret),coredata(GSPC.ret),
main="Empirical vs. Bivariate t",
ylab="GSPC", xlab="MSFT", col="blue")
abline(h=mean(GSPC.ret), v=mean(MSFT.ret))
points(sim.ret.t, col="red")
legend(x="topleft", legend=c("Empirical", "Multivariate t"), col=c("blue", "red"), pch=1)
# Compute simulated returns
port.ret.sim.t = 0.5*sim.ret.t[,"MSFT"] + 0.5*sim.ret.t[,"GSPC"]
chart.Histogram(port.ret.sim.t, main="Equally Weighted Portfolio: Student's t",
methods=c("add.normal", "add.qqplot"))
# Calculate volatility, VaR and ES from simulated returns
# volatility
StdDev(port.ret.sim.t)
# VaR
VaR(port.ret.sim.t, p = 0.95, method="historical")
VaR(port.ret.sim.t, p = 0.99, method="historical")
# ES
ES(port.ret.sim.t, p = 0.95, method="historical")
ES(port.ret.sim.t, p = 0.99, method="historical")
|
fae31e266f4a77a0a92c5d482eadd3660b1eff5c
|
c4297fe5e9fb1c3a959aa1e5e39f0568467880e5
|
/Rproject_MW.R
|
81be9968447adf9a4b61f299e2a0e349170749fd
|
[] |
no_license
|
maccwinter/Octubre-
|
fa5b796f7cd1efb5bcdda48221ca3c1072c73092
|
a2c6a0b781e57e68a318387e224ba5452f76218f
|
refs/heads/master
| 2020-08-08T08:47:04.324282
| 2019-12-07T00:03:12
| 2019-12-07T00:03:12
| 213,796,831
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 94
|
r
|
Rproject_MW.R
|
#R Project
library(tidyverse)
st <- read_csv('sampling_stations.csv')
head(st)
names(st)
|
ca0402e1014d0f6836c44e9d2d7b38df93f5c2fd
|
e98ef98470a5d3c99c97f90273e1208e63d2ccf2
|
/setup.R
|
aec72aff8961b8e3c4d38da6b9729d8205120698
|
[] |
no_license
|
Sta523-Fa14/esmxy
|
d4003f8eedb903a354d535f4cc006831d0b7f958
|
64f97f95c22d7aa88227628a8b7033a37d54be9a
|
refs/heads/master
| 2021-01-18T17:17:58.493088
| 2014-12-11T22:10:00
| 2014-12-11T22:10:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 505
|
r
|
setup.R
|
toLoad=c('dplyr','stringr','rgdal', 'rgeos','rnoaa','RNetCDF',
'ggplot2','alphahull','igraph','prevR','gridExtra','usdm','nlme','MASS')
for(lib in toLoad){
if(!(lib %in% installed.packages()[,1])){
install.packages(lib, repos="http://cran.us.r-project.org") }
library(lib, character.only=TRUE)
}
dir.create("Cache")
dir.create("Cache/CDFfiles")
dir.create("Cache/Shapefiles")
dir.create("Output")
dir.create("Output/Figures")
dir.create("Output/Rdas")
dir.create("Output/Shapefiles")
|
eec6e272366068dc4109a8c6ec4b59aa1af4b830
|
a3de160e7678a050597d8bff7527eda267e3b84f
|
/model/reg.pca.fn.R
|
08fc1e7ab5095f78796c9b3aa3ee79a38cf17f0b
|
[
"MIT"
] |
permissive
|
salmuz/rcep
|
8bcd54c495773940b2e7e7bc43cae4f7970db106
|
e68872ad00efc77be56aff52a438515a529bdee7
|
refs/heads/master
| 2021-01-21T12:58:26.955761
| 2017-05-19T12:59:43
| 2017-05-19T12:59:43
| 91,804,346
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,025
|
r
|
reg.pca.fn.R
|
# library dependance for computing the first composante
reg.pca.regularization <- function(sample, nb.comp = 1, n.setting = 10,
cv.seed = 87, cv.k = 4, idx.y = 1, setting.max = 1,
n.iter = 200 , ...){
set.seed(cv.seed)
cv.segments <- cvsegments(nrow(sample), k = cv.k, type="random")
# matrix de directions de composantes
vt.comp <- cum.press <- cum.hat <- cum.coef <- NULL;
# compute the first component
cv.f1 <- reg.pca.f1.cv(setting.max, sample, cv.segments, n.setting, idx.y)
cum.press <- cbind(cum.press, cv.f1$press)
cum.hat <- cbind(cum.hat, cv.f1$s.hat)
reg.fit <- reg.pca.f1.regression(sample[,-idx.y], sample[, idx.y], cv.f1$s.hat, n.iter)
vt.comp <- cbind(vt.comp, reg.fit$u)
cum.coef <- cbind(cum.coef, reg.pca.f1.coef(reg.fit))
# compute the k-1 component
if(nb.comp > 1)
for(i in 2:nb.comp){
cv.pca <- reg.pca.cv(sample, vt.comp, cv.segments, n.setting, setting.max, idx.y)
cum.press <- cbind(cum.press, cv.pca$press)
cum.hat <- cbind(cum.hat, cv.pca$s.hat)
reg.fit <- reg.pca.regression(sample[,-idx.y], sample[, idx.y], cv.pca$s.hat, vt.comp, n.iter)
vt.comp <- cbind(vt.comp, reg.fit$u)
cum.coef <- cbind(cum.coef, reg.pca.f1.coef(reg.fit))
}
return(list(vt.comp = vt.comp, cum.press = cum.press,
cum.hat = cum.hat, cum.coef = cum.coef))
}
###############################################################
## cv.segments : des segments aléatoires pour la validation croissé
## cv.sample : échantillon à utiliser dans la validation croissé
## idx.y : indice de la variable à expliquer
###############################################################
reg.pca.cv <- function(cv.sample, vt.comp, cv.segments, n.setting = 10,
setting.max = 1, idx.y = 1, ...){
set.setting <- seq(0, setting.max, length = n.setting+2)
set.setting <- set.setting[c(-1,-length(set.setting))]
press <- rep(0, length(set.setting))
for(i in 1:length(cv.segments)){
sample.valid <- cv.sample[unlist(cv.segments[i]), ]
X <- cv.sample[unlist(cv.segments[-i]), -idx.y]
Y <- cv.sample[unlist(cv.segments[-i]), idx.y]
coeff <- NULL
for(j in 1:length(set.setting)){
fit <- reg.pca.regression(X, Y, set.setting[j], vt.comp)
coeff <- rbind(coeff, reg.pca.coef(fit))
}
# Chaque cellule de la matrice est une prevision + interception
prediction <- matrix(coeff[,1], nrow(coeff), nrow(sample.valid)) + coeff[,-1] %*% t(sample.valid[,-1])
press <- press + rowSums((matrix(sample.valid[,1], nrow(coeff), nrow(sample.valid), byrow=T)-prediction)^2)
}
s_hat <- set.setting[which.min(press)]
return(list(s.hat=s_hat, press=press, s.set=set.setting))
}
###############################################################
# Recovery the coefficient of the fitted model
# @param model.fit : model fitting
# @return coeff : the coeff with interception
###############################################################
reg.pca.coef <- function(model.fit){
coeff <- model.fit$gamma * model.fit$u
comps <- model.fit$vt.comp %*% model.fit$delta
scalecoeff <- t(as.matrix(coeff/model.fit$Xsd))
scalecomps <- t(as.matrix(comps/model.fit$Xsd))
intercept <- model.fit$Ym - (scalecoeff + scalecomps) %*% model.fit$Xm
return(drop(cbind(intercept, scalecoeff + scalecomps)))
}
###############################################################
# @param X : variables explicatives centre et reduites
# @param Y : variables a expliquer centre
# @param s : paramètre de reglage réel ou un vecteur des réels
###############################################################
# v.comp est le vector de directions de composantes principales (p,k) {u_1, u_2, ..., u_k}
reg.pca.regression <- function(var.x, var.y, setting = 0.5, vt.comp, Niter = 100, ...){
scale <- centre_reduit(as.matrix(var.x), var.y)
X <- scale$X
Y <- scale$Y
n <- scale$n
p <- scale$p
vt.comp <- as.matrix(vt.comp)
# initialisation des variables
u <- matrix(10^-20, ncol = Niter, nrow = p)
W <- diag(x = 1/n, n, n)
N <- t(X) %*% W %*% X
Tc <- X %*% vt.comp
tA <- t(vt.comp) %*% N # Ak'= uk'X'WX
iAA <- ginv(tA %*% t(tA)) # A'A
# svd: singular value decomposition
Xsvd <- svd(X) # X <- svd$u %*% diag(svd$d) %*% t(svd$v)
d2 <- Xsvd$d ^ 2
n.diag <- length(Xsvd$d)
Nd2 <- rep(W[1,1], n.diag) * d2 # DW*D , W* = UWU'
Nvu <- diag(Nd2, n.diag, n.diag) %*% t(Xsvd$v) %*% vt.comp
du <- diag(Xsvd$d, n.diag, n.diag) %*% t(Xsvd$u)
duY <- du %*% Y
# one stages
one.stage <- function(k = 2){
Z <- cbind(X %*% u[,k-1], Tc)
Zsvd <- svd(Z)
theta <- Zsvd$v %*% ((t(Zsvd$u) %*% Y) / Zsvd$d)
sigma <- norm(Y - Z %*% theta, "2")^2 / n
return(list(gamma = theta[1], delta = theta[-1], sigma=sigma))
}
two.stage <- function(k = 2, gamma, delta, sigma){
zeta0 <- (setting-1)*(gamma^2)
zeta1 <- (setting-1)*gamma
Nu <- N %*% u[,k-1]
uNu <- t(u[,k-1]) %*% Nu
Xu <- X %*% u[, k-1]
uX <- t(Xu)
TDelta <- Tc %*% delta
#Delta <- uX %*% Y - gamma * (norm(Xu, "2")^2) - uX %*% TDelta
#print(paste("Delta",Delta,sep = ":"))
#lambda <- drop(((zeta1 * Delta)/(2*sigma)) + setting*uNu)
lambda <- drop(setting*uNu)
cst <- (-1*zeta1/sigma)
first <- cst * t(X) %*% (Y - gamma*Xu - TDelta)
tau <- iAA %*% tA %*% (first + 2*setting*Nu)
Sigma <- zeta0 * d2 + 2*sigma*(setting*Nd2 - lambda)
sRight <- zeta1 * (duY + du %*% TDelta) + sigma * Nvu %*% tau
uk <- sRight / Sigma
dim(uk) <- c(n.diag, 1)
u[,k] <- Xsvd$v %*% uk
u[,k] <- u[,k]/norm(u[,k], "2")
return(u[,k])
}
u[,1] <- u[,1]/norm(u[,1], "2")
params <- one.stage(2)
u[,2] <- two.stage(2, params$gamma, params$delta, params$sigma)
i <- 2
condition <- (t(u[,i-1])%*%u[,i])^2
converge <- c(NULL, condition)
#print(paste("Method avec SVD avec setting", setting, sep = ":"));
while(condition < (1 - 10^-6) && i < Niter){
i <- i+1
params <- one.stage(i)
u[,i] <- two.stage(i, params$gamma, params$delta, params$sigma)
condition <- (t(u[,i-1])%*%u[,i])^2
converge <- c(converge, condition)
}
params <- one.stage(i+1)
return(list(u = u[,i], converge = converge, gamma = params$gamma, sigma=params$sigma,
delta=params$delta, vt.comp = vt.comp,
Xm=scale$Xm, Ym=scale$Ym, Xsd=scale$Xsd));
}
###############################################################
## Test of the reg.regularization function
## with cookies data and 3 setting values
###############################################################
reg.pca.test <- function(){
sets <- c(0.1, 0.5, 0.8)
reg.fit <- reg.pca.f1.regression(cookie.app[,-1], cookie.app[,1], 0.002004008)
for(j in 1:length(sets)){
resp <- reg.pca.regression(cookie.app[,-1], cookie.app[,1], sets[j], reg.fit$u)
cat(paste("resp[", j, "] : ( gamma=", resp$gamma,", sigma=", resp$sigma, ")", sep = ""), "\n")
}
}
|
e708dc470cc2a12b60b5e270f99766d5f17c2c2a
|
719df495394b36568d5a426d24973cd43c784d33
|
/calendar_plot.R
|
ed7f8c2c194f1e176883fbab9a89e32215e19882
|
[] |
no_license
|
RomanKyrychenko/library
|
dd4d5a1aacd87cbaad341d92c625f2641e24bce3
|
f38cb1c5c05f75493b343ed0c77f73c8e6266213
|
refs/heads/master
| 2021-07-15T00:33:38.965753
| 2021-03-02T12:11:55
| 2021-03-02T12:11:55
| 77,147,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,784
|
r
|
calendar_plot.R
|
sbd <- bigset %>% group_by(trans_date) %>% dplyr::summarise(am=sum(amount))
hh<- seq(as.Date("2015-09-01"), as.Date("2017-05-16"), by="days")
'%!in%' <- function(x,y)!('%in%'(x,y))
hh <- data_frame(
trans_date=hh[hh %!in% as.Date(sbd$trans_date+3600*3)],
am=NA
)
sbd <- rbind(sbd,hh)
sbd <- sbd %>%
mutate(year = as.numeric(format(trans_date+3600*3, "%Y")),
month.order = ordered(format(trans_date+3600*3, "%B"), levels = c("січень", "лютий", "березень", "квітень", "травень", "червень", "липень", "серпень", "вересень", "жовтень", "листопад", "грудень")),
weekday.order = ordered(weekdays(trans_date+3600*3), levels = rev(c("понеділок", "вівторок", "середа", "четвер", "п'ятниця", "субота", "неділя"))),
day.month = as.integer(format(trans_date+3600*3, "%d")),
year.month = format(trans_date+3600*3, "%h %Y"),
week = as.numeric(format(trans_date+3600*3, "%W")),
tmp_date = as.Date(format(trans_date+3600*3, "%Y%m%d"), "%Y%m%d")) %>%
select(-trans_date)
sbd %>% group_by(year.month) %>% dplyr::mutate(monthweek = 1+week-min(week)) %>%
ggplot(aes(monthweek, weekday.order, fill = am))+
geom_tile(colour = "white")+
facet_grid(year~month.order)+
scale_fill_gradient2(name = "", low = "royalblue3", mid = "gold2",
high = "orangered3", na.value = "#f0f0f0",breaks=seq(0,400000000,50000000),
labels=format_format(big.mark = " ", decimal.mark = ",", scientific = FALSE),
limits=c(0,400000000))+
scale_x_continuous(breaks=seq(1, 6, 1),expand=c(0,0))+
labs(y="",x = "тиждень місяця") +
geom_text(aes(label = day.month),
vjust = 0.5, family = "PT Sans", face = "bold", size = 2.6)+
theme_minimal(base_family="PT Sans")+
theme(
panel.grid=element_line(),
panel.grid.major.y=element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x=element_blank(),
panel.grid.minor.y=element_blank(),
axis.line.x=element_line(color="#2b2b2b", size=0.15),
axis.ticks=element_line(),
axis.ticks.x=element_line(color="#2b2b2b", size=0.15),
axis.ticks.y=element_blank(),
axis.ticks.length=unit(5, "pt"),
plot.margin=unit(rep(0.5, 4), "cm"),
legend.position = "bottom", legend.text = element_text(family = "PT Sans",
size = 9, lineheight = 0.8),
axis.text.y=element_text(margin=margin(r=-5)),
plot.title=element_text(family="PT Sans", margin=margin(b=15)),
plot.subtitle=element_text(family="PT Sans"),
plot.caption=element_text(size=8, hjust=0, margin=margin(t=15)))
|
8f2031408fe8b857e2209be77adb75bad74efc68
|
1fb4f8ed4a8822b970ffb145566add7a917e41d3
|
/Data example/Data Example.R
|
bb6e253148102246fbec0ce33db659fdedf10413
|
[] |
no_license
|
fuweiboy1988/Gabriel-CV
|
95591b80a3defdfdc2ad8427a5f416efeb6cb9ab
|
0f0fca78c8f59e1a63ece32c7469a5de99118f4d
|
refs/heads/master
| 2021-01-19T15:25:59.840680
| 2017-04-13T23:39:06
| 2017-04-13T23:39:06
| 88,214,143
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,515
|
r
|
Data Example.R
|
library("bcv")
library("devtools")
library("cluster")
library("e1071")
library("mclust")
library("MASS")
library("nnet")
load_all("../lib/fpc")
load_all("../lib/NbClust")
#source("../lib/NbClust.R")
source("../code/classify.R")
source("../code/cluster.R")
source("../code/gabriel.R")
source("../code/jump.R")
source("../code/wold.R")
######------------Voting data-----------------------############
Vote <- read.table("Voting.txt",sep=",")
Id <- vector()
for(i in 1:nrow(Vote)){
if(sum(Vote[i,] == "?")!=0){
Id <- c(Id,i)
}
}
vote <- Vote[-Id,-1]
VV <- matrix(NA,232,16)
for(i in 1:232){
for(j in 1:16){
if(vote[i,j]=="n"){
VV[i,j]=0
}else if(vote[i,j]=="y"){
VV[i,j]=1
}
}
}
set.seed(3)
ID <- sample(1:232, 232)
Vote <- VV[ID,]
set.seed(1)
cv.kmeans.gabriel(Vote, 5, 2, maxcenters=10, classify.method="nearest")$centers
set.seed(1)
gabriel_cor_correct(Vote, maxcenters=10, type = 2)
set.seed(1)
Wold_holdout(data = Vote, CV = 5, Errortol = 0.01, max.k =10)
set.seed(1)
Gap <- clusGap(Vote, FUN = kmeans, K.max =10)
which(Gap[[1]][,3] == max(Gap[[1]][,3]))
set.seed(1)
mcluster <- Mclust(Vote, G = 1:10)
mcluster$G
set.seed(1)
Ch <- NbClust(Vote, min.nc = 2, max.nc = 10,method = "kmeans", index = "ch")
Ch$Best.nc[1]
set.seed(1)
Hartigan <- NbClust(Vote, min.nc = 2, max.nc = 10,method = "kmeans", index = "hartigan")
Hartigan$Best.nc[1]
set.seed(1)
Jump <- jump(Vote,10, plotjumps=FALSE, trace=FALSE)
Jump$maxjump
set.seed(1)
PS <- prediction.strength(Vote, Gmin=2, Gmax=10)
PS$optimalk
set.seed(1)
SB <- nselectboot(Vote,clustermethod=kmeansCBI,classification="centroid",krange=2:10)
SB$kopt
###-----------------------------------------------------------------------------
Breast <- read.table("Breast cancer.txt",sep=",")
Breast <- Breast[,-1]
Id <- vector()
for(i in 1:nrow(Breast)){
if(sum(Breast[i,] == "?")!=0){
Id <- c(Id,i)
}
}
Breast <- Breast[-Id,]
VV <- matrix(NA,nrow(Breast),10)
for(i in 1:nrow(Breast)){
for(j in 1:10){
VV[i,j] = Breast[i,j]
}
}
set.seed(3)
ID <- sample(1:683, 683)
Breast <- VV[ID,1:9]
set.seed(1)
cv.kmeans.gabriel(Breast, 5, 2, maxcenters=10, classify.method="nearest")$centers
set.seed(1)
gabriel_cor_correct(Breast, maxcenters=10, type = 2)
set.seed(1)
Wold_holdout(data = Breast, CV = 5, Errortol = 0.01, max.k =10)
set.seed(1)
Gap <- clusGap(Breast, FUN = kmeans, K.max =10)
which(Gap[[1]][,3] == max(Gap[[1]][,3]))
set.seed(1)
mcluster <- Mclust(Breast, G = 1:10)
mcluster$G
set.seed(1)
Ch <- NbClust(Breast, min.nc = 2, max.nc = 10,method = "kmeans", index = "ch")
Ch$Best.nc[1]
set.seed(1)
Hartigan <- NbClust(Breast, min.nc = 2, max.nc = 10,method = "kmeans", index = "hartigan")
Hartigan$Best.nc[1]
set.seed(1)
Jump <- jump(Breast,10, plotjumps=FALSE, trace=FALSE)
Jump$maxjump
set.seed(1)
PS <- prediction.strength(Breast, Gmin=2, Gmax=10)
PS$optimalk
set.seed(1)
SB <- nselectboot(Breast,clustermethod=kmeansCBI,classification="centroid",krange=2:10)
SB$kopt
###-----------------------------------------------------------------------------
DATA <- read.table("promeroy.txt", header = T, sep="\t")
Data <- matrix(NA,1379,42)
for(i in 1:1379){
for(j in 1:42){
Data[i,j] = as.numeric(as.character(DATA[i+1,j+1]))
}
}
DATA <- t(Data)
set.seed(1)
cv.kmeans.gabriel(DATA, 5, 2, maxcenters=10, classify.method="nearest")$centers
set.seed(1)
gabriel_cor_correct(DATA, maxcenters=10, type = 2)
set.seed(1)
Wold_holdout(data = DATA, CV = 5, Errortol = 0.01, max.k =10)
set.seed(1)
Gap <- clusGap(DATA, FUN = kmeans, K.max =10)
which(Gap[[1]][,3] == max(Gap[[1]][,3]))
set.seed(1)
mcluster <- Mclust(DATA, G = 1:10)
mcluster$G
set.seed(1)
Ch <- NbClust(DATA, min.nc = 2, max.nc = 10,method = "kmeans", index = "ch")
Ch$Best.nc[1]
set.seed(1)
Hartigan <- NbClust(DATA, min.nc = 2, max.nc = 10,method = "kmeans", index = "hartigan")
Hartigan$Best.nc[1]
set.seed(1)
Jump <- jump(DATA,10, plotjumps=FALSE, trace=FALSE)
Jump$maxjump
set.seed(1)
PS <- prediction.strength(DATA, Gmin=2, Gmax=10)
PS$optimalk
set.seed(1)
SB <- nselectboot(DATA,clustermethod=kmeansCBI,classification="centroid",krange=2:10)
SB$kopt
|
6f5123773c1d8a65bcfef43735db564510908ac5
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/2675_0/rinput.R
|
09c7f01b159eeb856b18161d65db0b2ba8629697
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("2675_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2675_0_unrooted.txt")
|
ca66b0bd42f24edfec43b80d8d08523ed92040a3
|
b2fac096bb0d923dd26108a55da002df76cc6a48
|
/eda.R
|
ef833409629d9a63ee247265a69daad9101548a9
|
[] |
no_license
|
lianna1016/three_pointers
|
6f2d489905a9b05616398de63dc4a924ca30b20d
|
47de54d3ac4c0cafd470ef9f9a64dad86cc63654
|
refs/heads/master
| 2022-03-16T11:25:33.052429
| 2019-12-12T21:10:05
| 2019-12-12T21:10:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,812
|
r
|
eda.R
|
# Jess, Anna and Seth Project EDA
# 11/29/19
source('styleguide.R')
source('helpers.R')
source('packages.R')
source('cleaner.R')
# https://cran.r-project.org/web/packages/segmented/segmented.pdf
# Read in Clean DF
df.clean <- add_time("complete_data_clean.csv")
df.tourney <- add_time("tourney_data_clean.csv")
names(df.tourney)
# Check dimensions - len(unique schools) * len(unique years) must equal # of rows
dim_checker(df.clean)
dim_checker(df.tourney)
# Per game-ify
get_newprop = cbind(df.tourney$School, get_prop_df(df.tourney))
# Check assumption of normal distribution
p <- ggplot(get_newprop, aes(x=X3P.)) +
geom_histogram(colour="black", fill='#EE3838') +
labs(title="3P. Histogram") +
xlab("3P.") +
ylab("Counts") +
theme_hodp()
p
#### EDA ####
#### histograms ####
# Let's have X3PAr be our response
# Check assumption of normal distribution
p <- ggplot(df.tourney, aes(x=X3PAr)) +
geom_histogram(colour="black", fill='#EE3838') +
labs(title="3PAr Histogram") +
xlab("3PAr") +
ylab("Counts") +
theme_hodp()
p
# QQ plot
p <- ggplot(df.tourney, aes(sample = X3PAr)) +
stat_qq(aes(color = '#EE3838')) +
stat_qq_line() +
labs(title="3PAr QQ Plot") +
xlab("Theoretical") +
ylab("Sample") +
theme_hodp()+
theme(legend.position = "none")
p
# X3P hist
p <- ggplot(get_newprop, aes(x = time + 2003, y = X3P)) +
geom_point() +
stat_smooth(method = "lm", col = '#EE3838', se = F) +
labs(title="3P Made per Game over Time") +
xlab("Year") +
ylab("3P Made per Game") +
#ylim(c(0, 0.6)) +
theme_hodp()
p
# 3PA Hist
p <- ggplot(get_newprop, aes(x = time + 2003, y = X3PA)) +
geom_point() +
stat_smooth(method = "lm", col = '#EE3838', se = F) +
labs(title="3P Attempted per Game over Time") +
xlab("Year") +
ylab("3PA per Game") +
#ylim(c(0,0.6)) +
theme_hodp()
p
# 3P percentage Hist
p <- ggplot(get_newprop, aes(x = time + 2003, y = X3P.)) +
geom_point() +
stat_smooth(method = "lm", col = '#EE3838', se = F) +
labs(title="3P Percentage over Time") +
xlab("Year") +
ylab("3P.") +
#ylim(c(0,0.6)) +
theme_hodp()
p
#### Games Increasing ####
# since we know that games are increasing can we make those statistics into
# proportions to control for the specific effect
get_newprop = cbind(df.tourney$School, get_prop_df(df.tourney))
get_newprop
df.clean.noschool = df.clean[,2:length(df.clean)]
top_cor_list = cor(df.clean.noschool)[,ncol(df.clean.noschool)-1]
top_cor_list = sort(top_cor_list, decreasing = TRUE)
top_cor_list = top_cor_list[3:length(top_cor_list)]
top_cor_list
list_top = names(top_cor_list)
list_top
# graphs
df.clean.noschool %>%
group_by(time) %>%
summarise(mean_games = mean(G)) %>%
ggplot(df.clean.noschool, mapping = aes(x = time + 2003, y = mean_games)) +
geom_line(stat="identity") + ggtitle("Games Played Per Season in the NCAA") +
ylim(25, 35) +
xlab("Year") +
ylab("Games")+
theme_hodp()
# we noticed that games also increases over time (it's one of the top predictors)
plot(df.clean.noschool$time, df.clean.noschool$G)
### MEANS PLOTS ###
# EDA plot to show how average 3Ar changes with time
df.tourney %>%
group_by(time) %>%
summarise(mean_three = mean(X3PAr)) %>%
ggplot(df.tourney, mapping = aes(x = time + 2003, y = mean_three)) +
geom_line(stat="identity") + ggtitle("Average 3PAr Across the NCAA") +
xlab("Year") +
ylab("3PAr")+
theme_hodp()
### CORRELATION PLOT ###
#eda correlation
data <- read.csv('data/full_data_raw.csv')
wl <- data %>% select(TeamW, TeamL, W.L., ConfW, ConfL, HomeW, HomeL, AwayW, AwayL)
cor <- round(cor(wl), 1)
p <- ggcorrplot(cor) +
labs(title='Corr Plot for W-L Vars') +
xlab('') + ylab('') +
theme_hodp() +
theme(axis.text.x=element_text(angle=60)) +
theme(legend.position="right")
p
|
8831c882e03c493fcf679f8ef696cf0df7fe22c2
|
82e0ed3cb65ba4b68cd15557888a9e47ae7b79f9
|
/plot3.R
|
c6fc4c586b06d13426410fbf7757a3846dd52fe5
|
[] |
no_license
|
Kraev-Anatol/ExData_Plotting1
|
a19f3f732f2537af0ae3ac987f1071b3beaea4b2
|
b0e87ad97f3e66a7457c3ceacbd873e7454c0a6b
|
refs/heads/master
| 2020-05-25T20:31:24.288740
| 2019-05-22T06:40:00
| 2019-05-22T06:40:00
| 187,976,909
| 0
| 0
| null | 2019-05-22T06:31:41
| 2019-05-22T06:31:40
| null |
UTF-8
|
R
| false
| false
| 1,025
|
r
|
plot3.R
|
path <- getwd()
household_power <- data.table::fread(file.path(path, "exdata_data_household_power_consumption/household_power_consumption.txt"), na.strings = "?")
# Selection of the database for dates: 1/2/2007 and 2/2/2007
feb_power <- subset(household_power, household_power$Date == "1/2/2007" | household_power$Date == "2/2/2007")
# Creation of the new Data_Time column and adding in it of data and time in the POSIX format
feb_power[,Data_Time := as.POSIXct(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S")]
# Plot 3
png("plot3.png", width=480, height=480)
plot(x = feb_power[,Data_Time], y = as.numeric(feb_power[, Sub_metering_1]), type="l", xlab="", ylab="Energy sub metering")
lines(feb_power[,Data_Time], as.numeric(feb_power[, Sub_metering_2]), col = "red")
lines(feb_power[,Data_Time], as.numeric(feb_power[, Sub_metering_3]), col = "blue")
legend("topright"
, col=c("black","red","blue")
, c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 ")
,lty=c(1,1), lwd=c(1,1))
dev.off()
|
5f98fda02cefcfbd1f7b37c4a6c0fb0006faf1b1
|
0eb0513c136e513d6dd25a2d5cf12706ae1c6d74
|
/plot1.R
|
1f8663256ef393c59b9ea2dfb570d854e6d3b7b6
|
[] |
no_license
|
ycaesari/ExData_Plotting1
|
715cbf8388a94f708d34ea381ce2d16ee6bd884d
|
cdb4facc6bed1e14a11d0be6999c268dcee9e66f
|
refs/heads/master
| 2021-01-22T12:38:39.344686
| 2014-11-09T20:34:53
| 2014-11-09T20:34:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 805
|
r
|
plot1.R
|
# Create a png file - Global Active Power
# Read the data from file and filter out the relevant dates
chosen_days <- c("2007-02-01","2007-02-02")
consumption <- read.csv("household_power_consumption.txt", sep=";", header=TRUE)
consumption$Date <- as.Date(consumption$Date, "%d/%m/%Y")
consumptionOfDays <- consumption[consumption$Date %in% as.Date(chosen_days),]
# Convert Global_active_power to numeric
consumptionOfDays$Global_active_power <-
as.numeric(as.character(consumptionOfDays$Global_active_power))
# Create a plot on png device
png(filename = "plot1.png", width = 480, height = 480)
# We want to get a histogram
hist(consumptionOfDays$Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power (killowats)")
# Close the png device
dev.off()
|
e5375945f52857aa77f0c849c5dfba9ce4b968c9
|
842de311d4fe188f6bd1c384327dfa0fd71f6531
|
/NIO - Stock Movement.R
|
3654df81d09acfe6e55f318174bc90f382a03091
|
[] |
no_license
|
ltheod01/stock---time-series-analysis-
|
8bc98c6d3e9c28acb83ccccd7788bf0729ca08c0
|
c2c62e394e6ce7536997fd8f2bc8bc0dccc0de23
|
refs/heads/main
| 2023-06-03T19:23:51.865461
| 2021-06-21T15:14:30
| 2021-06-21T15:14:30
| 378,973,219
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,995
|
r
|
NIO - Stock Movement.R
|
library(devtools)
library(tidyquant)
library(crypto)
library(ggplot2)
library(tseries)
library(zoo)
library(dplyr)
library(xts)
options("getSymbols.warning4.0"=FALSE)
options("getSymbols.yahoo.warning"=FALSE)
# ===============================================================================
#We first collect our data
getSymbols("NIO", from= '2011-01-01', to = '2021-06-21', warnings= FALSE, auto.assign = TRUE)
nio <- as.data.frame(date = index(NIO), coredata(NIO))
nio<-nio[4]
nio$date <- index(NIO)
#================================================================================
#Then we check for N/A values in our dataset
sum(is.na(nio))
dtaset <- ts(data = nio$NIO.Close, frequency = 1)
plot.ts(dtaset)
# ===============================================================================
###################################################################
reg_1<-lm(nio$NIO.Close~nio$date)
windows()
chart_Series(NIO)
plot(x=nio$date, y=nio$NIO.Close)
abline(reg_1)
#From the plot, we see that the series is not covariance stationary, as it's mean
#and variance do not stay the same over a long period of time. So we will use first
#order differencing to check if we remove non-stationarity by detrending. Because
#out time series is exponential, we will differantiate the logarithm of our time-series.
#================================================================================
frst_diff<-diff(log(nio$NIO.Close))
ts.plot(frst_diff)
#From the first order differentiation we have managed to detrend the time series,
#while the existence of outliers remains.
#================================================================================
#We will start by applying the AR(1) model and test if the residuals have any serial
#correlation
ar1<- arima(frst_diff, order=c(1,0,0))
ar2<- arima(frst_diff, order=c(2,0,0))
ar1
ar2#To extract the residuals use: ar1_resid<- residuals(ar1)
#To extract the fitted values, use: ar1_fitted <- ar1 - ar1_resid
ar1_resid<- residuals(ar1)
#We use the t-test to check for error autocorrelation. Because we
#will use an autoregressive model, the Durbin watson test is invalid (because the
#independent variables include past values of the dependent variable). Hence we
#will use a t-test involving a residual autocorrelation and the std error of the
#residual autocorrelation.
ttest <- t.test(ar1_resid, mu = 0)
ttest[1]>qt(0.025,695,lower.tail= FALSE)
#Since the critical value is higher than the t-statistic of our test, we assume that
#our model is correctly specified and that we can use OLS. Since no significant serial
#correlation is found, then there is no need for the AR(2) model and we can proceed with
#the AR(1) model.
#From the summary table, we see that the t-statistic is t = -0.0263939 which is included
#in the 95% confidence interval. Since p-value = 0.979 > t-statistic, we cannot reject H0
#and we accept that our mean equals to zero.
#Plotting our time series along with our fitted values.
ar1_fitted <- frst_diff - ar1_resid
windows()
ts.plot(frst_diff)
points(ar1_fitted)
#Now we need to check the autocorrelations of the residuals from the model
windows()
ts.plot(ar1_resid)
acf(ar1_resid)
#================================================================================
#We will now try to use our model to predict 2 periods later
predictions <- predict(ar1, n.ahead = 3)
#Our models values are in a logarithmic scale and differenced. So we will need
#to apply reverse difference and then use the exponential.
predictions$pred <- exp(predictions$pred)
predictions$pred <- diffinv(predictions$pred, lag = 1, xi = nio$NIO.Close[length(nio$NIO.Close)])
#Below we plot our data with the predictions of our model.
windows()
plot.ts(nio$NIO.Close, xlim = c(0,700))
points(predictions$pred, col = 2)
#================================================================================
|
6517866ab6ad0408bb3314bb2ffc2f15e452883d
|
237a5f9cc3fdb2fbe496273f43751e335881577d
|
/protein/toy_data/src/50_31_2019/05_30_2019.R
|
8209dea8d82a7445fcdcf17ff255eecf4f09a65c
|
[] |
no_license
|
popejonpaul/maxquant
|
81c433eeb172b954b3c13e03fcdcf13a5dc2f589
|
279af6fff748f23c2d3e6a309278f9d7c6c56b3f
|
refs/heads/master
| 2020-06-19T08:40:31.665210
| 2019-07-12T22:28:44
| 2019-07-12T22:28:44
| 196,645,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,289
|
r
|
05_30_2019.R
|
library(seqinr)
library(tidyverse)
library(gtools)
fasta <- read.fasta("toy_data/uniprot-proteome_UP000002494+reviewed_yes.fasta",
seqtype = "AA")
protein_groups <- read_tsv("toy_data/proteinGroups.txt", guess_max = 20000) %>%
{set_names(., gsub(" ", "_", names(.)))}
peptides <- read_tsv("toy_data/peptides_truncated.txt", guess_max = 20000) %>%
{set_names(., gsub(" ", "_", names(.)))}
fasta_names <- getName(fasta)
f1 <- function(x1){
getSequence(fasta[str_detect(fasta_names, x1)],as.string=TRUE)[[1]][[1]]
}
x2
#f2 <- function(x2){
protein_groups %>%
filter(str_detect(Protein_IDs, x2)) %>%
select(Protein_IDs, Peptide_IDs)
filtered_list <- protein_groups %>%
filter(str_detect(Protein_IDs, x2))
peptide_ids <- filtered_list$Peptide_IDs %>%
strsplit(';') %>%
.[[1]] %>%
as.integer()
filtered_peptides <- peptides %>%
filter(id %in% peptide_ids)
peptide_sequences <- filtered_peptides$Sequence
str_subset(protein_Seq, peptide_sequences[1])
str_length(protein_Seq)
matchesloc <- str_locate_all(protein_Seq, peptide_sequences)
do.call(rbind, matchesloc) %>%
as_tibble %>%
mutate(Sequence = peptide_sequences)
#}
f1("F1LMZ8")
f2("F1LMZ8")
|
7c91eaa76083518e8148d80d232965a66af97ebb
|
39d64d0beb81dfb22dde788ee62027a6604eed68
|
/dendrograms.R
|
a5d8644326c7427a28e713c19d9817aa7aab4091
|
[] |
no_license
|
sdavison88/phylogenetictree_IMDB
|
6c90c1007448e50940e468ed6925324f21e479fb
|
9802a73001f21e57c4a6d465a89e12e8264cc13c
|
refs/heads/master
| 2020-05-18T14:24:51.494798
| 2017-03-07T21:17:30
| 2017-03-07T21:17:30
| 84,246,917
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,629
|
r
|
dendrograms.R
|
library(rJava)
library(tidyverse)
library(xlsx)
library(cluster)
library(ape)
library(dendextend)
library(dplyr)
library(tidyr)
library(tibble)
library(splitstackshape)
movie_metadata_csv <- read_csv("~/Downloads/movie_metadata.csv.zip")
library(ggplot2)
library(ggthemes)
library(party)
library(earth)
movies <- movie_metadata_csv %>%
filter(gross > 0) %>%
select(plot_keywords, genres, movie_title, imdb_score)
movies <- as.data.frame(movies)
movies <- movies[!duplicated(movies$movie_title), ]
rownames(movies) <- movies$movie_title
movies <- movies %>%
select(-movie_title) %>%
na.omit()
movies$type <- paste(movies$plot_keywords, movies$genres, sep='|')
m_matrix <- cSplit_e(select(movies,imdb_score,type), "type", sep = "|", mode = "binary",
type = "character", fill = 0, drop = TRUE)
marsModel <- earth(imdb_score ~ . , data= m_matrix) # build model
ev <- evimp (marsModel)
base.mod <- lm(imdb_score ~ . , data= m_matrix) # base intercept only model
all.mod <- lm(imdb_score ~ . , data= m_matrix) # full model with all predictors
stepMod <- step(base.mod, scope = list(lower = base.mod, upper = all.mod), direction = "both", trace = 0, steps = 1000) # perform step-wise algorithm
shortlistedVars <- names(unlist(stepMod[[1]])) # get the shortlisted variable.
shortlistedVars <- shortlistedVars[!shortlistedVars %in% "(Intercept)"] # remove intercept
print(shortlistedVars)
cf1 <- cforest(imdb_score ~ . , data= m_matrix, control=cforest_unbiased(mtry=2,ntree=20))
df_imp <- varimp(cf1)
summary(cf1)
fit = manova(m_matrix,formula = imdb_score ~ .)
fit = lm(m_matrix,formula = imdb_score ~ .)
books_pc <- prcomp(m_matrix, scale = TRUE)
df <- data.frame(books_pc$rotation)
list <- c()
list <-rownames(df[which(df$PC1 > .1),])
list <- c(list,rownames(df[which(df$PC2 > .1),]))
list <- c(list,rownames(df[which(df$PC3 > .1),]))
m_matrix <- m_matrix[,colSums(m_matrix) > 10]
m_matrix <- m_matrix[rowSums(m_matrix) > 0,]
m_matrix[,2:24] <- lapply(m_matrix[,2:24] , as.factor)
boruta_output <- Boruta(imdb_score ~ ., data=m_matrix, doTrace=2)
boruta.df <- attStats(boruta_output)
plot(boruta_output)
importantFeatures <- data.frame(
feature=labels(tail(boruta_output$ImpHistory, 1)),
weight=c(tail(boruta_output$ImpHistory, 1)))[,2:3]
names(importantFeatures) <- c("feature", "weight")
importantFeatures$feature <- factor(
importantFeatures$feature,
levels=importantFeatures$feature[order(importantFeatures$weight, decreasing=T)])
ggplot(importantFeatures[importantFeatures$weight >= 10,], aes(x=feature, y=weight)) +
geom_bar(stat="identity", position="dodge") +
theme_fivethirtyeight()
movies_d <- daisy(m_matrix)
cld <- as.dendrogram(diana(movies_d))
# Three versions of the dendrogram:
# need to use hang.dendrogram to keep the hanging raggedness
x
par(mar = c(5,4,4,7), font.main = 1)
par(bg = "grey99")
plot(hang.dendrogram(cld), horiz = TRUE, yaxt = "n", type = "triangle",
xlim = c(4.5, -1),
edgePar = list(col = "steelblue"),
main = "Selected childrens books or series, clustered by thematic elements")
par(mar = c(1,1,1,1))
par(bg = "grey99")
plot(as.phylo(cld), type = "unrooted", cex = 0.6) # maybe
svg("movies_good.svg", width=30, height=30)
par(mar=c(5,3,2,2)+0.1)
plot(as.phylo(cld), type = "fan", cex = 0.6) # maybe
dev.off()
#-----------principle components-------------
books_pc <- prcomp(m_matrix, scale = TRUE)
View(books_pc$rotation)
##par(family = "Source Sans Pro")
pdf("movies_pca.pdf", width=40, height=15)
par(bg = "grey99")
biplot(books_pc, choices = 1:2, col = c("darkblue", "grey75"), pc.biplot = TRUE)
dev.off()
|
6a0c5ed2c32c478d233c25573a4eae65b1f8b97b
|
ab272ba3abdc98ab1536dbd0e534e2f1d252092d
|
/speed.R
|
eae2eca552b891c3f4ccbd3f20d9e962ebab8220
|
[] |
no_license
|
hagr1dden/hagr1dden
|
2a1538c4d6d7c8e792c565ca7099fa2d3ebefdca
|
c3ca954ac8eca8268cb3ea7e60b06dcb95da92ac
|
refs/heads/master
| 2021-01-13T03:43:37.988188
| 2018-05-01T07:40:32
| 2018-05-01T07:40:32
| 77,278,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,255
|
r
|
speed.R
|
library(trackeR)
library(data.table)
library(isofor)
library(ggmap)
library(e1071)
#testing
runDF <- readTCX(file = '/home/evgeny/My_Route(1).tcx', timezone = "GMT")
#runTr0 <- trackeRdata(runDF)
#runSummary <- summary(runTr0)
#plotRoute(runTr0, speed = FALSE)
#print(runSummary)
#plot(runSummary, group = c("total", "moving"), what = c("avgSpeed", "distance", "duration"))
#parts_of_cluster
runDATA <- setDT(runDF)
runDATA <- runDATA[ ! duplicated(runDATA$time, fromLast = TRUE), ]
set.seed(123456789)
lm1 = kmeans(runDATA[,c("latitude", "longitude")], 30, nstart = 40, iter.max = 10)
plot(runDATA[,c("latitude", "longitude")], col = (lm1$cluster+1), pch = 20, cex = 0.2)
#change color
color.gradient <- function(x, colors=c("yellow","red"), colsteps=30) {
return(colorRampPalette(colors) (colsteps) [ findInterval(x, seq(min(x),max(x), length.out=colsteps)) ] )
}
#speed in clusters
calculate_speed <- function(data, clus){
plot(runDATA[,c("latitude", "longitude")], col = (clus$cluster+1), pch = 20, cex = 0.2)
mass_speed <- c()
for(i in 1:nrow(clus$center)) {
art <- trackeRdata(subset(data,clus$cluster == i))
center <- clus$centers[i,]
common_art <- summary(art)
mass_speed[i] <- common_art$avgSpeedMoving
text(x = center[1], y = center[2], pos = 2, cex = 0.6,round(mass_speed[i], 5))
text(x = center[1], y = center[2], pos = 3, cex = 0.8, i)
}
points(clus$centers, col = color.gradient(mass_speed), cex = 4, pch = 20)
plot(mass_speed, type = "h",lwd = 10, col = color.gradient(mass_speed))
abline(h = c(mean(mass_speed)*1.1,mean(mass_speed)*0.9), col = c("blue","green"), lwd = 3, lty = c(1,2))
return(mass_speed)
}
calculate_dist <- function(data, clus){
plot(runDATA[,c("latitude", "longitude")], col = (clus$cluster+1), pch = 20, cex = 0.2)
mass_dist <- c()
for(i in 1:nrow(clus$centers)) {
art <- subset(data[,c("latitude", "longitude")], clus$cluster == i)
distantion <- dist(art)
center <- clus$centers[i,]
mean_cluster <- mean(distantion)
mass_dist[i] <- mean_cluster
text(x = center[1], y = center[2], pos = 3, cex = 0.8,round(i, 5))
}
mean_common <- mean(mass_dist)
points(clus$centers, col = color.gradient(mass_dist), cex = 4, pch = 20)
plot(mass_dist, type = "h",lwd = 10, col = color.gradient(mass_dist))
abline(h = c(mean(mass_dist)*1.1,mean(mass_dist)*0.9), col = c("blue","green"), lwd = 3, lty = c(1,2))
return(mass_dist)
}
detected_anomaly <- function(all_coords, data, clus){
anomaly_data <- c()
mean_cen <- apply(clus$centers,2,mean)
content <- get_googlemap(center = c(mean_cen[2],mean_cen[1]), zoom = 11)
colnames(data) <- c("x", "y", "dist", "speed")
x <- subset(data, select = c("dist","speed"))
mod = iForest(x, 100, 30)
p = predict(mod, x)
col = ifelse(p > quantile(p, 0.85), "red", "blue")
ol = ifelse(p > quantile(p, 0.85), 1, 2)
plot(x, col = col, pch = ol)
text(x, pos = 3, cex = 0.8, rownames(x))
for( i in 1:length(p)){
if(p[i] > quantile(p, 0.85))
{
tra <- subset(all_coords, clus$cluster == i)
anomaly_data <- rbind(anomaly_data, tra)
}
}
ggmap(content) + geom_point(aes(x = longitude, y = latitude), data = all_coords, color = "blue", size = 0.8) + geom_point(aes(x = longitude, y = latitude), data = anomaly_data, color = "red", size = 0.8)
}
classif_bayes <- function(data){
colnames(data) <- c("x", "y", "dist", "speed")
cor(data$dist, data$speed, method = c("pearson"))
classes_speed <- ifelse(data$speed > mean(data$speed)* 1.1 | data$dist > mean(data$dist) * 1.1, "not_interesting",
ifelse(data$speed < mean(data$speed) * 0.9 | data$dist < mean(data$dist)*0.9, "interesting", "casual"))
data_new <- cbind(new_dt, classes_speed)
dat_new <- data.frame(1:30, classes_speed)
model2 <- naiveBayes(data[,3], dat_new[,2])
pr2train <- predict(model2, data[,3])
t2train <- table(pr2train, dat_new[,2], dnn=list('Предсказано ', 'На самом деле'))
return(t2train)
}
new_dt <- data.table(lm1$centers, calculate_dist(runDATA, lm1), calculate_speed(runDATA,lm1))
detected_anomaly(runDATA,new_dt, lm1)
classif_bayes(new_dt)
calculate_speed(runDATA, lm1)
|
ebcbc5eed68de902626820644121cb78877e203f
|
eeea10b971ed75bf87305d7b4163cf355eac1240
|
/RF LOWESS sim/Simulations/SimPrac.R
|
93b56bfda2f3eddd3e1ebcffc18565572994f0a2
|
[] |
no_license
|
AndrewjSage/RF-Robustness
|
42e0caa6cc5c1f46031f6a3b77e33a56dc4fc83b
|
bace62de6a191832c1a9d19462c140686a15bf1b
|
refs/heads/master
| 2022-11-21T12:09:04.041716
| 2020-07-24T04:52:51
| 2020-07-24T04:52:51
| 106,871,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,231
|
r
|
SimPrac.R
|
setwd("/work/STAT/ajsage")
library(RFLOWESS)
library(parallel)
#Apply iteratively using parSapply
# Calculate the number of cores
no_cores <- detectCores() - 1
# Initiate cluster
cl <- makeCluster(no_cores)
clusterEvalQ(cl, {
library(RFLOWESS)
})
clusterSetRNGStream(cl, 03142018)
RL1 <- parSapply(cl=cl, X=1:5, simplify="array", FUN=function(i){ApplyAcross_m_and_p(Sim = "RL", ntrain=100, ntest=100, p=c(0,.1,.2), m=c(.2,.4), contamination="Var", Vartype=NA, DGP=2, ntrees=500, ndsize=15, ntreestune=100, parvec=c(1000, 100, seq(from=1, to=30, by=0.25)), cvreps=1, cvfolds=5, tol=10^-6)})
stopCluster(cl)
save(RL1, file="RL1.Rdata")
#Apply iteratively using parSapply
# Calculate the number of cores
no_cores <- detectCores() - 1
# Initiate cluster
cl <- makeCluster(no_cores)
clusterEvalQ(cl, {
library(RFLOWESS)
})
clusterSetRNGStream(cl, 03142018)
LM2 <- parSapply(cl=cl, X=1:3, simplify="array", FUN=function(i){ApplyAcross_m_and_p(Sim = "LM", ntrain=100, ntest=100, p=c(0, 0.05, 0.1), m=c(1), contamination="Var", Vartype="Toeplitz", DGP=2, ntrees=1000, ndsize=10, ntreestune=100, parvec=c(1000, 100, seq(from=1, to=30, by=0.25)), cvreps=1, cvfolds=5, tol=10^-6)})
stopCluster(cl)
save(LM2, file="LM2.Rdata")
|
a947b09127c120b889c24bc25bd571e6930294ab
|
1716005639cc0dd03d482645a8c8239673a407f7
|
/R/getPredictionVariables.R
|
34a16fad98063cb33025811062db2f2e09cb633d
|
[
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
jlthomps/surragateRegression
|
23d0b0aed009185f2fce65712dd504ec76fe1f58
|
60085337c6bc035439ed24eb7f44a717b698bc40
|
refs/heads/master
| 2021-01-17T23:25:31.325136
| 2013-08-23T16:29:07
| 2013-08-23T16:29:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 695
|
r
|
getPredictionVariables.R
|
#' getPredictVariables
#'
#' Returns a character vector of prediction variables.
#'
#'@param DTnames column names of DT dataframe
#'@return predictVariables string predict variables based on column headers
#'@keywords predict
#'@export
#'@examples
#' DTComplete <- DTComplete
#' UV <- UV
#' predictVars <- getPredictVariables(names(UV))
getPredictVariables <- function(DTnames){
splitNames <- sapply(strsplit(DTnames, "_"),function(x)x[length(x)])
splitNamesAvoid <- sapply(strsplit(DTnames, "_"),function(x)x[1])
commentIndex <- which("cd" == splitNames & !(splitNamesAvoid %in% c("agency", "site", "tz")))
predictVariables <- DTnames[commentIndex-1]
return(predictVariables)
}
|
522118d8eac74aedf9edbca447102ff3add76ee9
|
d6875904f4d9fe89fe2536741f7b979e251323f9
|
/FinalPtj/Brown-FinalProject-121519.R
|
ad1f4666526dfde9a80005b6f5852520125d1986
|
[
"MIT"
] |
permissive
|
diedrebrown/Fa19-Info640-WDB
|
adafb417a931ab0c41ffad4ff66e62d4997450c3
|
33876f32fbbaf25214b9e89b63886edc644580f0
|
refs/heads/master
| 2022-12-06T15:48:19.052522
| 2020-08-29T15:51:32
| 2020-08-29T15:51:32
| 205,041,411
| 0
| 0
| null | 2020-08-29T15:51:33
| 2019-08-28T23:33:51
|
R
|
UTF-8
|
R
| false
| false
| 17,371
|
r
|
Brown-FinalProject-121519.R
|
####PROJECT TITLE AND CONTACT####
#Diedre Brown | dbrow207@pratt.edu
#INFO 640 Data Analysis | Pratt Institute
#Final Project
#Text Analysis of Lewis Carroll's Alice in Wonderland
#15 December 2019
####LOAD PACKAGES AND LIBRARIES####
#install.packages("tidyverse")
#install.packages("ggplot2")
#install.packages("ggthemes")
#install.packages("gmodels")
#install.packages("broom")
#install.packages("GGally")
#install.packages("tidytext")
#install.packages("gutenbergr")#project gutenberg
#install.packages("tm")#to create and work with corpora
#install.packages("topicmodels")#for LDA topic models
#install.packages("Rpoppler")
#install.packages("data.table")
#install.packages("stringr")
#install.packages("qdap")
#install.packages("RSQLite") #SQLite Interface
#install.packages("SnowballC") #text stemming library
#install.packages("wordcloud") #for wordcloud visualizations
#install.packages("syuzhet") #for text sentiment analysis
#install.packages("quanteda") #for N-grams
#install.packages("textdata") #required for sentiment dictionaries
library(ggthemes)
library(ggplot2)
library(tidyverse)
library(data.table)
library(dplyr)
library(broom)
library(GGally)
library(tidytext)
library(tm)
library(stringr)
library(topicmodels)
library(gutenbergr)
library(qdap)
library(RSQLite)
library(SnowballC)
library(wordcloud)
library(syuzhet)
library(quanteda)
library(textdata)
####DOWNLOAD LEWIS CARROLL'S ALICE IN WONDERLAND (AIW) FROM PROJECT GUTENBERG####
aiw_book <- gutenberg_download(gutenberg_id = 19033)
aiw_book #a tibble of 1299 x2 (this includes the pjt. gutenberg id's) which can be turned into a tidy text dataset
#remove gutenberg_id variable
aiw_book_text <- aiw_book %>%
select(-gutenberg_id)
aiw_book_text
summary(aiw_book_text)
#text
#Length:1299 rows
#Class :character
#Mode :character
dim(aiw_book_text)
####CLEAN DOCUMENT AND CREATE CORPUS, DTM/TDM####
#create aiw corpus from book.
aiw_source <- VectorSource(aiw_book_text)
aiw_corpus <- VCorpus(aiw_source)#aiw_corpus
aiw_corpus
#Metadata: corpus specific: 0, document level (indexed): 0
#Content: documents: 1
#specify stopwords
#in addition to stopwords("en"), add illustration, york, sons, company, 1916, gabirel, sam'l, v, vi, vii, viii, alice, dinah, sister, storyland, series, copyright, saml, alice's, alices
new_stops<-c("series_","_the","well", "way","now","illustration", "york", "sons", "company", "1916", "gabriel", "sam'l", "v", "vi", "vii", "viii", "alice", "dinah", "sister","storyland", "series", "copyright", "saml", "alice's", "alices", "said","like", "little", "went", "came", "one","just", stopwords("en"))
#also need a stopword list that doesn't include alice
wastops<-c("series_","_the","well", "way","now","illustration", "york", "sons", "company", "1916", "gabriel", "sam'l", "v", "vi", "vii", "viii","dinah", "sister","storyland", "series", "copyright", "saml","said","like", "little", "went", "came", "one","just", "alices", stopwords("en"))
#clean corpus
#create a function to clean the corpus
clean_corp <- function(corp){
#lowercase {base r}
corp<-tm_map(corp, content_transformer(tolower))
#remove punctuation {tm}
corp<-tm_map(corp, removePunctuation)
#remove stopwords
corp<-tm_map(corp, removeWords, words=new_stops)
#strip whitespace {tm}
corp<-tm_map(corp,stripWhitespace)
return(corp)
}
#clean_corp function for the stopwords that do not include 'alice'
clean_wacorp <-function(corp){
#lowercase {base r}
corp<-tm_map(corp, content_transformer(tolower))
#remove punctuation {tm}
corp<-tm_map(corp, removePunctuation)
#remove stopwords
corp<-tm_map(corp, removeWords, words=wastops)
#strip whitespace {tm}
corp<-tm_map(corp,stripWhitespace)
return(corp)
}
#clean the aiw_corpus with the clean_corp function and place in aiw_cleancorpus
aiw_cleancorpus <- clean_corp(aiw_corpus)
summary(aiw_cleancorpus)
str(aiw_cleancorpus)
#clean the aiw_corpus with the clean_wacorp function and place in aiw_cleanwacorpus
aiw_cleanwacorpus <- clean_wacorp(aiw_corpus)
summary(aiw_cleanwacorpus)
str(aiw_cleanwacorpus)
#create a term document matrix from aiw_cleancorpus
aiw_tdm <- TermDocumentMatrix(aiw_cleancorpus)
aiw_dtm <- DocumentTermMatrix(aiw_cleancorpus)
#term document matrix from aiw_cleanwacorpus
wa_aiw_tdm <- TermDocumentMatrix(aiw_cleanwacorpus)
wa_aiw_dtm <- DocumentTermMatrix(aiw_cleanwacorpus)
####TERM FREQUENCY VISUALIZATIONS####
#Previous review of the corpus showed that "alice" had the highest term frequency (163 counts). With "alice" removed, let's look at term frequency.
#convert tdm to matrix
aiw_mat <- as.matrix(aiw_tdm)
#sum rows and sort by frequency
aiw_termfreq <- rowSums(aiw_mat)
aiw_termfreq <- sort(aiw_termfreq, decreasing = TRUE)
summary(aiw_termfreq)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#1.000 1.000 1.000 3.075 3.000 144.000
glimpse(aiw_termfreq)
#plot frequency
#since the mean was 3.075, i want to see the top 50 terms that were used 3 or more times
barplot(aiw_termfreq[1:50],
col = "dodgerblue",
las = 3)
#let's see the next 50 for comparison and what characters are included
barplot(aiw_termfreq[51:100],
col = "dodgerblue",
las = 3)
#view top 100 words as a wordcloud and wordnetwork
#sum rows and sort by frequency to create aiw data frame from aiw_termfreq
aiw_freqsum<-rowSums(aiw_mat)
aiw_wordFreq <- data.frame(term=names(aiw_freqsum), num=aiw_freqsum)
#make a word cloud of top 100
wordcloud(aiw_wordFreq$term,
aiw_wordFreq$num,
max.words = 100,
colors = c("#416B8C","#6AAFE6","#8EC0E4"))
#wordcloud with "said" removed
glimpse(aiw_wordFreq)
aiw_wordFreq_sanssaid <- aiw_wordFreq %>%
filter(term != "said")
wordcloud(aiw_wordFreq_sanssaid$term,
aiw_wordFreq_sanssaid$num,
max.words = 100,
colors = c("#416B8C","#6AAFE6","#8EC0E4"))
#add said to stopword list? yes.
####TOPIC MODELING####
###Topics###
##non-alice lda function
wonderland_tm_terms_by_topic <-function(input_corpus, plot=TRUE, number_of_topics=6, number_of_words=7,
path="lda-121519/aiw_lda_norm_topics") {
aiw_dtm <- DocumentTermMatrix(input_corpus)
#unique indexes
unique_indexes<-unique(aiw_dtm$i)
aiw_dtm <-aiw_dtm[unique_indexes,]
#lda
aiw_lda <-LDA(aiw_dtm, k=number_of_topics, control = list(seed=1234))
aiw_topics<-tidy(aiw_lda, matrix="beta")
aiw_lda_words <-terms(aiw_lda, number_of_words)
aiw_lda_topics <-as.matrix(aiw_lda_words)
write.csv(aiw_lda_topics, file = paste(path, number_of_topics,".csv"))
aiw_top_terms_2<-aiw_topics%>%
group_by(topic)%>%
top_n(number_of_words, beta)%>%
ungroup()%>%
arrange(topic, -beta)
if(plot==TRUE){
aiw_top_terms_2%>%
mutate(term=reorder(term,beta))%>%
ggplot(aes(term, beta, fill=factor(topic)))+
geom_col(show.legend = FALSE)+
facet_wrap(~topic, scales="free")+
coord_flip()+
labs(title = "Topic Model for Alice in Wonderland (Alice Omitted)")
}
}
#alice_tm_function
alice_tm_terms_by_topic <-function(input_corpus, plot=TRUE, number_of_topics=6, number_of_words=7,
path="lda-121519/with-alice/alice_lda_norm_topics") {
wa_aiw_dtm <- DocumentTermMatrix(input_corpus)
#unique indexes
unique_indexes<-unique(wa_aiw_dtm$i)
wa_aiw_dtm <-wa_aiw_dtm[unique_indexes,]
#lda
wa_aiw_lda <-LDA(wa_aiw_dtm, k=number_of_topics, control = list(seed=1234))
wa_topics<-tidy(wa_aiw_lda, matrix="beta")
wa_aiw_lda_words <-terms(wa_aiw_lda, number_of_words)
wa_aiw_lda_topics <-as.matrix(wa_aiw_lda_words)
write.csv(wa_aiw_lda_topics, file = paste(path, number_of_topics,".csv"))
wa_aiw_top_terms_2<-wa_topics%>%
group_by(topic)%>%
top_n(number_of_words, beta)%>%
ungroup()%>%
arrange(topic, -beta)
if(plot==TRUE){
wa_aiw_top_terms_2%>%
mutate(term=reorder(term,beta))%>%
ggplot(aes(term, beta, fill=factor(topic)))+
geom_col(show.legend = FALSE)+
facet_wrap(~topic, scales="free")+
coord_flip()+
labs(title = "Topic Model for Alice in Wonderland")
}
}
#functions run for 6 topics -- previous running showed 6 topics with 7 words to be a good balance of themes from the book both with and without 'alice'
alice_tm_terms_by_topic (aiw_cleanwacorpus, number_of_topics = 6, number_of_words = 7)
wonderland_tm_terms_by_topic (aiw_cleancorpus, number_of_topics = 6, number_of_words = 7)
#functions run for 2 topics--
alice_tm_terms_by_topic (aiw_cleanwacorpus, number_of_topics = 2, number_of_words = 7)
wonderland_tm_terms_by_topic (aiw_cleancorpus, number_of_topics = 2, number_of_words = 7)
#alice_tm but normalized for alice
#normalize for alice six topics
k<-6
wa_aiw_lda <-LDA(wa_aiw_dtm, k=k, control = list(seed=1234))
wa_aiw_lda
wa_aiw_lda_words <-terms(wa_aiw_lda, 7)
wa_aiw_lda_topics <-as.matrix(wa_aiw_lda_words)
head(wa_aiw_lda_topics)
write.csv(wa_aiw_lda_topics, file = paste("lda-121519/with-alice/wa_lda_norm",k,".csv"))
#visualize
wa_aiw_lda_tidy<-tidy(wa_aiw_lda, matrix="beta")
wa_aiw_lda_tidy
wa_aiw_lda_tidy_norm <- wa_aiw_lda_tidy%>%
mutate(betanorm=((beta - min(beta)) / (max(beta) - min(beta))))
wa_aiw_lda_tidy_norm
#order words from most prominent to least
wa_aiw_top_terms_norm<-wa_aiw_lda_tidy_norm%>%
group_by(topic)%>%
top_n(7,betanorm)%>%
ungroup()%>%
arrange(topic, -betanorm)
wa_aiw_top_terms_norm
#plot
wa_aiw_top_terms_norm%>%
mutate(term=reorder(term,betanorm))%>%
ggplot(aes(term, betanorm, fill=factor(topic)))+
geom_col(show.legend = FALSE)+
facet_wrap(~topic, scales="free")+
coord_flip()+
labs(title = "Topic Model for Alice in Wonderland (Normalized for Alice)")
#normalize for alice 12 topics
k<-12
wa_aiw_lda12 <-LDA(wa_aiw_dtm, k=k, control = list(seed=1234))
wa_aiw_lda12
wa_aiw_lda12_words <-terms(wa_aiw_lda12, 7)
wa_aiw_lda12_topics <-as.matrix(wa_aiw_lda12_words)
head(wa_aiw_lda12_topics)
write.csv(wa_aiw_lda12_topics, file = paste("lda-121519/with-alice/wa_lda_norm",k,".csv"))
#visualize
wa_aiw_lda12_tidy<-tidy(wa_aiw_lda12, matrix="beta")
wa_aiw_lda12_tidy
wa_aiw_lda12_tidy_norm <- wa_aiw_lda12_tidy%>%
mutate(betanorm=((beta - min(beta)) / (max(beta) - min(beta))))
wa_aiw_lda12_tidy_norm
#order words from most prominent to least
wa_aiw_top_terms_norm12<-wa_aiw_lda12_tidy_norm%>%
group_by(topic)%>%
top_n(7,betanorm)%>%
ungroup()%>%
arrange(topic, -betanorm)
wa_aiw_top_terms_norm12
#plot
wa_aiw_top_terms_norm12%>%
mutate(term=reorder(term,betanorm))%>%
ggplot(aes(term, betanorm, fill=factor(topic)))+
geom_col(show.legend = FALSE)+
facet_wrap(~topic, scales="free")+
coord_flip()+
labs(title = "Twelve Topics for Alice in Wonderland (Normalized for Alice)")
####SENTIMENT ANALYSIS####
#After reviewing the four dictionaries (NRC, AFINN, Loughran, and Bing) available with R
#the most suited to the this material is NRC.
#NRC had a good mix of emotional terms that related to the story.
# Count the number of words associated with each sentiment in nrc
nrc<-get_sentiments("nrc")%>%
count(sentiment)%>%
arrange(desc(n))
#All terms of NRC are good descriptions of Alice's emotions throughout the story.
#APPENDING DICTIONARIES
#create new tidy dataframes for texts (w/o alice term) from previous dfs
aiwsenttidy <-aiw_wordFreq_sanssaid%>%
mutate(word=term, count=num)%>%
select(-term,-num)%>%
arrange(desc(count))
head(aiwsenttidy)
#append the nrc dictionary
aiwsenttidynrc<-aiwsenttidy%>%
inner_join(get_sentiments("nrc"))
#-------------------------------------------------------------------#
#CANNOT USE THIS AS CHARACTERS WERE REMOVED DURING THE JOIN TO THE SENTIMENT LEXICONS-----------
#create new tidy dataframes for texts (w/alice term) from previous dfs
#convert wa_aiw_tdm to matrix
#wa_aiw_mat <- as.matrix(wa_aiw_tdm)
#sum rows and sort by frequency
#wa_aiw_termfreq <- rowSums(wa_aiw_mat)
#wa_aiw_termfreq <- sort(wa_aiw_termfreq, decreasing = TRUE)
#wa_aiw_termfreq
#sum rows and sort by frequency to create aiw data frame from aiw_termfreq
#wa_aiw_freqsum<-rowSums(wa_aiw_mat)
#wa_aiw_wordFreq <- data.frame(term=names(wa_aiw_freqsum), num=wa_aiw_freqsum)
#wa_aiw_wordFreq
#create new tidy dataframes for texts (w/alice term) from previous dfs
#wa_aiwsenttidy <-wa_aiw_wordFreq%>%
# mutate(word=term, count=num)%>%
# select(-term,-num)%>%
# arrange(desc(count))
#head(wa_aiwsenttidy)
#append the nrc dictionary
#wa_aiwsenttidynrc<-wa_aiwsenttidy%>%
# inner_join(get_sentiments("nrc"))
#many of the characters were removed during the join
#wa_aiwsenttidyafinn<-wa_aiwsenttidy%>%
# inner_join(get_sentiments("afinn"))
#many of the characters were removed during the join
#-------------------------------------------------------------------#
####SENTIMENT ANALYSIS CONTINUED####
#visualize positive and negative sentiment in the nrc
aiwnrp_n <- aiwsenttidynrc %>%
filter(sentiment %in% c("positive", "negative"))%>%
group_by(sentiment)
summary(aiwnrp_n) #mean count = 2.335
aiwnrp_n <- aiwsenttidynrc %>%
filter(sentiment %in% c("positive", "negative"))%>%
group_by(sentiment)%>%
filter(count>=2.335)%>%
ungroup()%>%
mutate(word=reorder(word, count))
ggplot(aiwnrp_n, aes(word, count, fill=sentiment))+
geom_col(show.legend = FALSE)+
facet_wrap(~sentiment, scales = "free_y")+
labs(y= "Contribution to Sentiment as measured by NRC (Average Sentiment=2.335)", x=NULL, title = "Overall Sentiment of the Most Frequented Terms in Alice in Wonderland (Not including the term Alice)" )+
coord_flip()
#remove positive and negative sentiment and visualize each of the eight emotions
#corresponding to plutchik's wheel of emotion
aiwemotion <- aiwsenttidynrc%>%
filter(!grepl("positive|negative",sentiment))%>%
group_by(sentiment)%>%
ungroup()
summary(aiwemotion) #mean 2.39, length=467
aiwemoplot <- aiwemotion%>%
group_by(sentiment)%>%
filter(count>=2.39)%>%
ungroup()%>%
mutate(word=reorder(word, count))
ggplot(aiwemoplot, aes(word, count, fill=sentiment))+
geom_col(show.legend = FALSE)+
facet_wrap(~sentiment, scales = "free_y")+
labs(y= "Contribution to Sentiment as measured by NRC (Average Sentiment=2.39)", x=NULL, title = "Plutchik's Sentiments in Alice in Wonderland (Not including the term Alice)" )+
coord_flip()
####N-GRAMS####
# wa_aiw_bigrams_sep <-wa_aiw_bigrams%>%
# separate(bigram, c("word1", "word2"), sep = " ")
# wa_aiw_bigrams_filt <- wa_aiw_bigrams_sep %>%#filter for stop words
# filter(!word1 %in% wastops)%>%
# filter(!word2 %in% wastops)
# wa_aiw_bigrams_count<-wa_aiw_bigrams_filt%>%#count bigrams
# count(word1, word2, sort = TRUE)
wa_aiw_dtm_tidy<-tidy(wa_aiw_dtm)
wa_aiw_tfidf<-bind_tf_idf(wa_aiw_dtm_tidy, term, document, count)%>%#find words that are important but not too common
select(-document) %>% #there's only 1 document so let's elim the document column
arrange(desc(tf)) #look at terms with high tf_idf
head(wa_aiw_tfidf, 20)
summary(wa_aiw_tfidf)#mean count 2.919
write.table(wa_aiw_tfidf,file = "INFO640-Brown-FinalProject-121519/alice-tfterms.txt", sep = ",", quote = FALSE, row.names = F)
#bigrams part 1
wa_aiw_bigrams<- wa_aiw_tfidf%>%
unnest_tokens(bigram, term, token = "ngrams", n=2)%>%
arrange(desc(tf))
wa_aiw_bigrams #all characters turned into NA values
write.table(wa_aiw_bigrams, file = "INFO640-Brown-FinalProject-121519/alice-bigrams.txt", sep = ",", quote = FALSE, row.names = F)
summary(wa_aiw_bigrams) #mean count = 2.795
wa_aiw_bigrams
#trigrams part 1
wa_aiw_trigrams<-wa_aiw_tfidf%>%
unnest_tokens(trigram, term, token = "ngrams", n=3)%>%
arrange(desc(tf))
wa_aiw_trigrams #all characters turned into NA values
write.table(wa_aiw_trigrams, file = "INFO640-Brown-FinalProject-121519/alice-trigrams.txt", sep = ",", quote = FALSE, row.names = F)
summary(wa_aiw_trigrams) #mean count = 2.681
#bigrams part 2 (keep characters?)
wa_aiw_bigrams2<-aiw_book_text %>%
unnest_tokens(bigram, text, token = "ngrams", n=2)
wa_aiw_bigrams2
wa_aiw_bigrams2_sep <-wa_aiw_bigrams2%>%
separate(bigram, c("word1", "word2"), sep = " ")
wa_aiw_bigrams2_filt <- wa_aiw_bigrams2_sep %>%#filter for stop words
filter(!word1 %in% wastops)%>%
filter(!word2 %in% wastops)
wa_aiw_bigram2_count <-wa_aiw_bigrams2_filt%>%#count bigrams
count(word1, word2, sort = TRUE)
wa_aiw_bigram2_count
write.table(wa_aiw_bigram2_count, file = "INFO640-Brown-FinalProject-121519/alice-bigrams2.txt", sep = ",", quote = FALSE, row.names = F)
summary(wa_aiw_bigram2_count) #mean count = 1.163
#trigrams part 2
wa_aiw_trigrams2<-aiw_book_text %>%
unnest_tokens(trigram, text, token = "ngrams", n=3)
wa_aiw_trigrams2
wa_aiw_trigrams2_sep <-wa_aiw_trigrams2%>%
separate(trigram, c("word1", "word2", "word3"), sep = " ")
wa_aiw_trigrams2_filt <- wa_aiw_trigrams2_sep %>%#filter for stop words
filter(!word1 %in% wastops)%>%
filter(!word2 %in% wastops)%>%
filter(!word3 %in% wastops)
wa_aiw_trigram2_count <-wa_aiw_trigrams2_filt%>%#count bigrams
count(word1, word2, word3, sort = TRUE)
wa_aiw_trigram2_count
write.table(wa_aiw_trigram2_count, file = "INFO640-Brown-FinalProject-121519/alice-trigrams2.txt", sep = ",", quote = FALSE, row.names = F)
summary(wa_aiw_trigram2_count) #mean count = 1.037
|
156907ccc8f9154bae7b1b4d321d5d27aac61e47
|
b5efcab211ce6d512ef5d17c5715e767d5de2165
|
/analysis/monteCarlo.R
|
1be249605da3ca6345c045bac669a720affba29c
|
[] |
no_license
|
MBrouns/Zipfs-Law-and-city-development
|
152f5d83b735726068c574bc4b5703b32848812a
|
63ab9c7cf4e1095cbaa4812925af11148f0b9b67
|
refs/heads/master
| 2021-01-02T08:14:27.496860
| 2015-02-15T15:13:27
| 2015-02-15T15:13:27
| 26,484,266
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,845
|
r
|
monteCarlo.R
|
# This code is used to perform a monte carlo analysis on the zipf's law Netlogo model
# rf variable importance
# Analysis setup
storeAllValues <- TRUE
noOfReplications <- 10
runsToDo <- c(1:10)
runName <- "EV-testing-households"
seed <- 1338
nl.path <- "C:/Program Files (x86)/NetLogo 5.1.0"
model.path <- "C:/Users/Matthijs/Documents/GitHub/Zipfs-Law-and-city-development/netlogo/model.nlogo"
model.runtime <- 80
model.warmup <- 2
variables <- NULL
variables <- rbind(variables, c("noOfCities", 10, 10))
variables <- rbind(variables, c("noOfHouseholds", 25000, 25000))
variables <- rbind(variables, c("job4_Modifier", 10, 10))
variables <- rbind(variables, c("job5_Max", 0.6, 0.6))
variables <- rbind(variables, c("MinDistCityAttractiveness", 0.1, 0.1))
variables <- rbind(variables, c("Job6Attractiveness", 0.4, 0.6))
variables <- rbind(variables, c("job7_Value", 0.5, 0.5))
variables <- rbind(variables, c("job6_TippingPointX", 0.3, 0.3))
variables <- rbind(variables, c("rtm_TippingPointY", 0.5, 0.5))
variables <- rbind(variables, c("job2_TippingPointY", 0.5, 0.5))
variables <- rbind(variables, c("Job3Attractiveness", 0.5, 0.6))
variables <- rbind(variables, c("job5_TippingPointY", 0.5, 0.5))
variables <- rbind(variables, c("minDistBetweenCities", 30, 30))
variables <- rbind(variables, c("maxDistBetweenCities", 500, 500))
variables <- rbind(variables, c("rtm_TippingPointX", 10, 10))
variables <- rbind(variables, c("rtm_PlateauPointX", 20, 20))
variables <- rbind(variables, c("rtm_PlateauPointY", 0.75, 75))
variables <- rbind(variables, c("rtm_AgeModifier", 0.16, 0.16))
variables <- rbind(variables, c("rtm_ResistancePerChild", 0.05, 0.05))
variables <- rbind(variables, c("MinimalMovingDistance", 100, 100))
variables <- rbind(variables, c("MaximumMovingDistance", 250, 250))
variables <- rbind(variables, c("MaxDistCityAttractiveness", 0.1, 0.1))
variables <- rbind(variables, c("Job1Attractiveness", 0.5, 0.5))
variables <- rbind(variables, c("Job2Attractiveness", 0.5, 0.5))
variables <- rbind(variables, c("Job4Attractiveness", 0.5, 0.5))
variables <- rbind(variables, c("Job5Attractiveness", 0.5, 0.5))
variables <- rbind(variables, c("Job7Attractiveness", 0.5, 0.5))
variables <- rbind(variables, c("job1_TippingPointY", 0.5, 0.5))
variables <- rbind(variables, c("job3_TippingPointX", 0.3, 0.3))
variables <- rbind(variables, c("job3_TippingPointY", 0.5, 0.5))
variables <- rbind(variables, c("job4_TippingPointX", 0.04, 0.04))
variables <- rbind(variables, c("job4_TippingPointY", 0.5, 0.5))
variables <- rbind(variables, c("job4_Max", 0.6, 0.6))
variables <- rbind(variables, c("job5_TippingPointX", 0.04, 0.04))
variables <- rbind(variables, c("job5_Modifier", 10, 10))
variables <- rbind(variables, c("job6_TippingPointY", 0.5, 0.5))
variables <- rbind(variables, c("Seed", seed, seed))
variables <- rbind(variables, c("NumberOfYears", model.runtime, model.runtime))
variables <- rbind(variables, c("WarmUpTime", model.warmup, model.warmup))
options(java.parameters=c("-XX:MaxPermSize=512m","-Xmx4096m"))
#Install required packages if necessary and load them
if(!require(RNetLogo)) install.packages("RNetLogo")
library(RNetLogo)
if(!require(lhs)) install.packages("lhs")
library(lhs)
if(!require(doParallel)) install.packages("doParallel")
library(doParallel)
if(!require(foreach)) install.packages("foreach")
library(foreach)
if(!require(rJava)) install.packages("rJava")
library(rJava)
set.seed(seed)
# Create a Latin Hypercube sample to populate model with
lhs <- data.frame(randomLHS(noOfReplications, nrow(variables), preserveDraw=T))
names(lhs) <- variables[, 1]
# Transform the LHS to the correct minimum and maximum values
for (index in 1:nrow(variables)) {
row = variables[index, ]; # do stuff with the row
min <- as.numeric(variables[index, 2])
max <- as.numeric(variables[index, 3])
lhs[, c(variables[index, 1])] <- sapply(lhs[, c(variables[index, 1])], function(x){
x <- x * (max - min) + min
})
}
lhs[, 2] <- c(1000, 5000, 10000, 15000, 20000, 30000, 40000, 50000, 75000, 100000)
# Run the models and store the results in a Data Frame
start.time <- Sys.time()
results <- NULL
results <- data.frame(results)
cl<-makeCluster(3, outfile="clusterlog.txt") #change the 2 to your number of CPU cores
registerDoParallel(cl)
results <- foreach(i=runsToDo, .errorhandling="remove", .combine='rbind', .packages=c("RNetLogo","rJava")) %dopar% {
tryCatch({
options(java.parameters=c("-XX:MaxPermSize=512m","-Xmx4096m"))
print(paste("starting job", i, sep=" "))
#run the model here
# create a second NetLogo instance in headless mode (= without GUI)
# stored in a variable
nlheadless1 <- paste("nlheadless", i, sep="")
NLStart(nl.path, gui=F, nl.obj=nlheadless1)
NLLoadModel(model.path, nl.obj=nlheadless1)
NLCommand("no-display", nl.obj=nlheadless1)
for (j in 1:ncol(lhs)) {
NLCommand(paste("set", names(lhs)[j], lhs[i, j], sep = " "), nl.obj=nlheadless1)
}
NLCommand("setup", nl.obj=nlheadless1)
if(storeAllValues){
city0Size <- NLReport("count turtles with [cityIdentifier = 0]", nl.obj=nlheadless1)
city1Size <- NLReport("count turtles with [cityIdentifier = 1]", nl.obj=nlheadless1)
city2Size <- NLReport("count turtles with [cityIdentifier = 2]", nl.obj=nlheadless1)
city3Size <- NLReport("count turtles with [cityIdentifier = 3]", nl.obj=nlheadless1)
city4Size <- NLReport("count turtles with [cityIdentifier = 4]", nl.obj=nlheadless1)
city5Size <- NLReport("count turtles with [cityIdentifier = 5]", nl.obj=nlheadless1)
city6Size <- NLReport("count turtles with [cityIdentifier = 6]", nl.obj=nlheadless1)
city7Size <- NLReport("count turtles with [cityIdentifier = 7]", nl.obj=nlheadless1)
city8Size <- NLReport("count turtles with [cityIdentifier = 8]", nl.obj=nlheadless1)
city9Size <- NLReport("count turtles with [cityIdentifier = 9]", nl.obj=nlheadless1)
city10Size <- NLReport("count turtles with [cityIdentifier = 10]", nl.obj=nlheadless1)
city11Size <- NLReport("count turtles with [cityIdentifier = 11]", nl.obj=nlheadless1)
city12Size <- NLReport("count turtles with [cityIdentifier = 12]", nl.obj=nlheadless1)
city13Size <- NLReport("count turtles with [cityIdentifier = 13]", nl.obj=nlheadless1)
city14Size <- NLReport("count turtles with [cityIdentifier = 14]", nl.obj=nlheadless1)
city15Size <- NLReport("count turtles with [cityIdentifier = 15]", nl.obj=nlheadless1)
city16Size <- NLReport("count turtles with [cityIdentifier = 16]", nl.obj=nlheadless1)
city17Size <- NLReport("count turtles with [cityIdentifier = 17]", nl.obj=nlheadless1)
city18Size <- NLReport("count turtles with [cityIdentifier = 18]", nl.obj=nlheadless1)
city19Size <- NLReport("count turtles with [cityIdentifier = 19]", nl.obj=nlheadless1)
city20Size <- NLReport("count turtles with [cityIdentifier = 20]", nl.obj=nlheadless1)
city21Size <- NLReport("count turtles with [cityIdentifier = 21]", nl.obj=nlheadless1)
city22Size <- NLReport("count turtles with [cityIdentifier = 22]", nl.obj=nlheadless1)
city23Size <- NLReport("count turtles with [cityIdentifier = 23]", nl.obj=nlheadless1)
city24Size <- NLReport("count turtles with [cityIdentifier = 24]", nl.obj=nlheadless1)
city25Size <- NLReport("count turtles with [cityIdentifier = 25]", nl.obj=nlheadless1)
for (k in 1:model.runtime){
NLCommand("go", nl.obj=nlheadless1)
city0Size <- c(city0Size, NLReport("count turtles with [cityIdentifier = 0]", nl.obj=nlheadless1))
city1Size <- c(city1Size , NLReport("count turtles with [cityIdentifier = 1]", nl.obj=nlheadless1))
city2Size <- c(city2Size , NLReport("count turtles with [cityIdentifier = 2]", nl.obj=nlheadless1))
city3Size <- c(city3Size , NLReport("count turtles with [cityIdentifier = 3]", nl.obj=nlheadless1))
city4Size <- c(city4Size , NLReport("count turtles with [cityIdentifier = 4]", nl.obj=nlheadless1))
city5Size <- c(city5Size , NLReport("count turtles with [cityIdentifier = 5]", nl.obj=nlheadless1))
city6Size <- c(city6Size , NLReport("count turtles with [cityIdentifier = 6]", nl.obj=nlheadless1))
city7Size <- c(city7Size , NLReport("count turtles with [cityIdentifier = 7]", nl.obj=nlheadless1))
city8Size <- c(city8Size , NLReport("count turtles with [cityIdentifier = 8]", nl.obj=nlheadless1))
city9Size <- c(city9Size , NLReport("count turtles with [cityIdentifier = 9]", nl.obj=nlheadless1))
city10Size <- c(city10Size , NLReport("count turtles with [cityIdentifier = 10]", nl.obj=nlheadless1))
city11Size <- c(city11Size , NLReport("count turtles with [cityIdentifier = 11]", nl.obj=nlheadless1))
city12Size <- c(city12Size , NLReport("count turtles with [cityIdentifier = 12]", nl.obj=nlheadless1))
city13Size <- c(city13Size , NLReport("count turtles with [cityIdentifier = 13]", nl.obj=nlheadless1))
city14Size <- c(city14Size , NLReport("count turtles with [cityIdentifier = 14]", nl.obj=nlheadless1))
city15Size <- c(city15Size , NLReport("count turtles with [cityIdentifier = 15]", nl.obj=nlheadless1))
city16Size <- c(city16Size , NLReport("count turtles with [cityIdentifier = 16]", nl.obj=nlheadless1))
city17Size <- c(city17Size , NLReport("count turtles with [cityIdentifier = 17]", nl.obj=nlheadless1))
city18Size <- c(city18Size , NLReport("count turtles with [cityIdentifier = 18]", nl.obj=nlheadless1))
city19Size <- c(city19Size , NLReport("count turtles with [cityIdentifier = 19]", nl.obj=nlheadless1))
city20Size <- c(city20Size , NLReport("count turtles with [cityIdentifier = 20]", nl.obj=nlheadless1))
city21Size <- c(city21Size , NLReport("count turtles with [cityIdentifier = 21]", nl.obj=nlheadless1))
city22Size <- c(city22Size , NLReport("count turtles with [cityIdentifier = 22]", nl.obj=nlheadless1))
city23Size <- c(city23Size , NLReport("count turtles with [cityIdentifier = 23]", nl.obj=nlheadless1))
city24Size <- c(city24Size , NLReport("count turtles with [cityIdentifier = 24]", nl.obj=nlheadless1))
city25Size <- c(city25Size , NLReport("count turtles with [cityIdentifier = 25]", nl.obj=nlheadless1))
}
result <- data.frame(c(i, lhs[i, ], city0Size))
names(result) <- c("runNo", names(lhs), c(1:model.runtime))
result <- rbind(result, setNames(c(i, lhs[i, ], city1Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city2Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city3Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city4Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city5Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city6Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city7Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city8Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city9Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city10Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city11Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city12Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city13Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city14Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city15Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city16Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city17Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city18Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city19Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city20Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city21Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city22Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city23Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city24Size), names(result)))
result <- rbind(result, setNames(c(i, lhs[i, ], city25Size), names(result)))
}else{
NLDoCommand(model.runtime, "go", nl.obj=nlheadless1)
city0Size <- NLReport("count turtles with [cityIdentifier = 0]", nl.obj=nlheadless1)
city1Size <- NLReport("count turtles with [cityIdentifier = 1]", nl.obj=nlheadless1)
city2Size <- NLReport("count turtles with [cityIdentifier = 2]", nl.obj=nlheadless1)
city3Size <- NLReport("count turtles with [cityIdentifier = 3]", nl.obj=nlheadless1)
city4Size <- NLReport("count turtles with [cityIdentifier = 4]", nl.obj=nlheadless1)
city5Size <- NLReport("count turtles with [cityIdentifier = 5]", nl.obj=nlheadless1)
result <- data.frame(c(lhs[i, ], city0Size, city1Size, city2Size, city3Size, city4Size, city5Size))
names(result) <- c(names(lhs), "City 0", "City 1", "City 2","City 3","City 4","City 5")
}
NLQuit(nl.obj=nlheadless1)
result
}, warning = function(err){
print(paste("Error in task", i, err, sep=" "))
c(names(lhs), NA, NA, NA, NA, NA, NA)
})
}
results <- cbind(rep(0:25, 10), results)
names(results)[names(results) == 'rep(0:25, 10)'] <- 'City'
write.table(results, file=paste(runName, "-results",".csv", sep=""), append=TRUE, sep=", ", col.names=F, row.names=FALSE)
NLQuit(all=TRUE)
stopCluster(cl)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
|
ac6f64becbd566b281cfc6463e73a300ed67f672
|
5bd83f74cd2c7e88c0b56e25d3b9b415dcb18c06
|
/man/hux_pretty_numbers.Rd
|
1145178ddfd99d3a1f8ccfabea523c113bec7240
|
[] |
no_license
|
meerapatelmd/chariotViz
|
123c04e6fc6b09b2ffdc9ef1eb9fa94d227ee846
|
c45947a963b23f75237fe4417dd03b6f27c620d5
|
refs/heads/master
| 2023-07-19T21:44:18.089969
| 2021-09-04T17:30:09
| 2021-09-04T17:30:09
| 394,073,446
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 616
|
rd
|
hux_pretty_numbers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hux_pretty_numbers.R
\name{hux_pretty_numbers}
\alias{hux_pretty_numbers}
\title{FUNCTION_TITLE}
\usage{
hux_pretty_numbers(ht, ..., big.mark = ",", scientific = FALSE)
}
\arguments{
\item{ht}{PARAM_DESCRIPTION}
\item{...}{PARAM_DESCRIPTION}
\item{big.mark}{PARAM_DESCRIPTION, Default: ','}
\item{scientific}{PARAM_DESCRIPTION, Default: FALSE}
}
\value{
OUTPUT_DESCRIPTION
}
\description{
FUNCTION_DESCRIPTION
}
\details{
DETAILS
}
\examples{
\dontrun{
if(interactive()){
#EXAMPLE1
}
}
}
\seealso{
\code{\link[huxtable]{huxtable}}
}
|
6b63ab6d294eb43ed594f2ef00883cad5f6e3e30
|
763d01cdd0313411ea5b88c28dcf4ab7ad43d8f9
|
/projects/33/bh-cleanup.R
|
c891fd334ec20ee46845e2856077ff09e4baf7af
|
[
"MIT"
] |
permissive
|
NuriaQueralt/BioHackathon-projects-2020
|
bbc82a25d7eeafb5bb264802480b7830d787ee7c
|
a52d2ccaa9883aa904f60ae772272ac1354396a8
|
refs/heads/master
| 2023-02-22T19:36:30.506935
| 2021-01-24T19:13:32
| 2021-01-24T19:13:32
| 292,899,379
| 0
| 0
| null | 2020-09-04T16:45:08
| 2020-09-04T16:45:08
| null |
UTF-8
|
R
| false
| false
| 7,826
|
r
|
bh-cleanup.R
|
#continue with result from bh-apicalls.R script
#or read the txt file with ENA sequences
#brpossibles = read_tsv("brpossibles.txt")
brpossibles = br3
#filter meisenburg and different, longer BR acronyms
brf = brpossibles %>%
filter(!grepl("Meisenburg",specimen_voucher),
!grepl("[A-Z]BR",specimen_voucher))
#list space-separated chunks with numbers
#filter out the common BR indicator
#remove az of 3 or more (longer than BR)
#complication with Roman numerals, but these won't be used anyway
#we will only match collection/accession numbers by their numeric part
for (i in 1:dim(brf)[1]) {
p = str_split(brf$specimen_voucher[i],
pattern="\\s")[[1]]
p = gsub("(BR)",
"",
p,
fixed=T)
nums = p[grep("[0-9]",
p)]
nums = gsub("[a-z|A-Z]{3,}",
"",
nums)
brf$nums[i] = paste(nums,
collapse="|")
}
#extract the numeric part from the numeric-containing chunks
for (i in 1:dim(brf)[1]) {
nums = strsplit(brf$nums[i],split="\\|")[[1]]
nums = gsub("[^0-9]","",nums)
brf$numbers[i] = paste(nums,collapse="|")
}
coll = read_tsv("data/meise-coll.txt",na="")
#Add person name info to the ENA results
#Match surnames from Meise's collector db to the specimen_voucher strings
#in the ena sequences
coll = read_tsv("data/meise-coll.txt",
col_types = cols(.default = "c"))
brf$verbatimRecordedByID = ""
brf$recordedByLastName = ""
brf$recordedBy = ""
brf$recordedByID = ""
#no teams parsing
#false positives w homonyms and short last names
#needs optimizing for speed
for (i in 1:dim(coll)[1]) {
nam = filter(brf,
grepl(coll$recordedByLastName[i],
specimen_voucher)|
grepl(coll$recordedByLastName[i],
collected_by))
if (dim(nam)[1]>0) {
brf[brf$accession%in%nam$accession,
c("verbatimRecordedByID",
"recordedByLastName",
"recordedBy",
"recordedByID")] = t(sapply(seq(1,
dim(nam)[1]),
function(x)
paste(brf[brf$accession%in%nam$accession[x],
c("verbatimRecordedByID",
"recordedByLastName",
"recordedBy",
"recordedByID")],
tibble(coll[i,]),
sep="|")))
}
}
#remove leading pipes
brf$verbatimRecordedByID = gsub("^\\|",
"",
brf$verbatimRecordedByID)
brf$recordedByLastName = gsub("^\\|",
"",
brf$recordedByLastName)
brf$recordedBy = gsub("^\\|",
"",
brf$recordedBy)
brf$recordedByID = gsub("^\\|",
"",
brf$recordedByID)
#read GBIF dataset occurrence file
dwc = read_tsv("occurrence.txt",
col_types = cols(.default = "c"))
#extract numeric part from recordNumber
dwc$num = gsub("[^0-9]","",dwc$recordNumber)
#list multiple recordedByIDs if multiple person records where found
#in the collector database
brf2 = brf %>%
separate_rows(numbers,sep="\\|")
##Obtain GBIF taxonIDs through Wikidata
#function to query wikidata
querki <- function(query,h="text/csv") {
require(httr)
response <- httr::GET(url = "https://query.wikidata.org/sparql",
query = list(query = query),
httr::add_headers(Accept = h),
httr::user_agent("Matdillen"))
return(httr::content(response,
type=h,
col_types = cols(.default = "c")))
}
#general query
query <- 'SELECT ?taxon ?taxonLabel ?ncbi_taxonID ?gbifid WHERE {
VALUES ?ncbi_taxonID {%s}
?taxon wdt:P685 ?ncbi_taxonID.
OPTIONAL {?taxon wdt:P846 ?gbifid .}
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}'
#list query results
for (i in seq(1,dim(tax)[1],30)) {
subtax = paste0("\"",
paste(tax$tax_id[i:(i+29)],
collapse="\" \""),
"\"")
que = gsub("%s",
subtax,query,fixed=T)
if (i==1) {
resu = querki(que)
} else {
resu = rbind(resu,
querki(que))
}
print(i)
}
#join into ena sequence data
brf = left_join(brf,
resu,
by=c("tax_id"="ncbi_taxonID"))
#match for 3 criteria:
# - recordedByID on GBIF matches recordedByID connected to a surname recognized
# in the specimen_voucher string and present in Meise's collector db
# - numeric part of GBIF recordNumber matches a numeric chunk of the specimen
# voucher string in ENA
# - taxonKey of GBIF matches tax_id on ENA,
# mapping between GBIF and NCBI done through Wikidata
brf$gbifbc = NA
for (i in 1:dim(brf)[1]) {
nums = filter(brf2,
accession==brf$accession[i])
ids = strsplit(nums$recordedByID,
split="\\|")[[1]]
resu = filter(dwc,
recordedByID%in%ids,
num%in%nums$numbers,
!is.na(recordedByID),
!is.na(recordNumber),
taxonKey%in%nums$gbifid)
brf$gbifbc[i] = paste(resu$catalogNumber,
collapse="|")
}
write_tsv(brf,"br-results.txt",
na="")
#generate jsons for the django app
brf3 = brf
brf4 = separate_rows(brf3,
gbifbc,
sep="\\|")
#dwc is the occurrence darwin core file from GBIF
#for this instance, it's the file for the MeiseBG herbarium
gbifn = select(dwc,
gbifID,
catalogNumber,
scientificName,
countryCode,
recordedBy,
eventDate,
institutionCode,
basisOfRecord)
brf5 = left_join(brf4,
gbifn,
by=c("gbifbc"="catalogNumber"))
#render jsons
#alldata for each ENA sequence (includes GBIF data)
li = list()
names = ""
k=1
for (i in 1:dim(brf3)[1]) {
dat = filter(brf6,accession == brf3$accession[i])
if (dim(dat)[1]==1) {
li[[k]] = select(dat,-accession)
k = k + 1
}
if (dim(dat)[1]>1) {
li[[k]] = vector("list",dim(dat)[1])
for (j in 1:dim(dat)[1]) {
li[[k]][[j]] = select(dat,-accession)[j,]
}
k = k + 1
}
if (dim(dat)[1]!=0) {
names = c(names,dat$accession[1])
}
}
names = names[-1]
names(li) = names
#only ids of matches: ENA to (multiple) GBIF ids
li2 = list()
names = ""
k=1
for (i in 1:dim(brf3)[1]) {
dat = filter(brf6,accession == brf3$accession[i])
if (dim(dat)[1]==1) {
li2[[k]] = dat$gbifID
k = k + 1
}
if (dim(dat)[1]>1) {
li2[[k]] = vector("list",dim(dat)[1])
for (j in 1:dim(dat)[1]) {
li2[[k]][[j]] = dat$gbifID[j]
}
k = k + 1
}
if (dim(dat)[1]!=0) {
names = c(names,dat$accession[1])
}
}
names = names[-1]
names(li2) = names
#stilltodo: similar json listing with all data for each GBIF id
#export
comb = toJSON(li2,pretty=T,auto_unbox = T)
write(comb,"matchids.json")
comb2 = toJSON(li,pretty=T,auto_unbox = T)
write(comb2,"matchids-alldata.json")
|
ef96a12b03b2dc5172391f91d19950625f2f565b
|
86e31fb088f45fe875977dee91fbeb7bdb819706
|
/man/mkd_lm_results.Rd
|
1c905dc5ee718877109c52a762f28f31c429a355
|
[] |
no_license
|
freuerde/puzzle
|
74ab078008971735308453dfad443c7787b4321b
|
8d07a59debdaf4c1f83193746e8732daa92088fa
|
refs/heads/master
| 2022-04-28T22:20:32.194741
| 2022-03-11T05:51:15
| 2022-03-11T05:51:15
| 161,799,720
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,048
|
rd
|
mkd_lm_results.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mkd_lm_results.R
\name{mkd_lm_results}
\alias{mkd_lm_results}
\title{Creates a markdown document showing the linear regression results}
\usage{
mkd_lm_results(
lm_list,
dat = NULL,
mkd_path = NULL,
open = FALSE,
overwrite = TRUE
)
}
\arguments{
\item{lm_list}{A (named) list object including linear regression.}
\item{mkd_path}{Path to the folder, where the markdown document should be stored.
If NULL, the current path will be used.}
\item{open}{Should the HTML output be opened immediately after saving? Default is FALSE.}
\item{overwrite}{Should an already existing file be overwritten? Default is TRUE.}
}
\value{
Save a HTML document without returning anything.
}
\description{
Creates a markdown document for a (named) list of linear models (from rms::ols() or lm())
showing clearly the summary statistics and estimates in tables that can be presented.
}
\details{
This function is also used within the verif_lm() function.
}
\author{
Dennis Freuer
}
|
8e54ae5a1b158e45b56e789ef24f8b519a52803e
|
d86dc658f9e948c83a2bd0015a9412916830bbdf
|
/binomial/man/bin_variable.Rd
|
ad9f72bb715fbc76bb5076d197c509341bf97fbc
|
[] |
no_license
|
stat133-sp19/hw-stat133-kejunzhou123
|
40fe9b286b6edb964e5434f4f1455d65b4a3a625
|
f32a357833025470f8a19a1a4b860e4aba52c8b7
|
refs/heads/master
| 2020-04-28T18:20:16.608993
| 2019-05-02T21:47:15
| 2019-05-02T21:47:15
| 175,475,255
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 480
|
rd
|
bin_variable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomial.R
\name{bin_variable}
\alias{bin_variable}
\title{bin_variable}
\usage{
bin_variable(trials, prob)
}
\arguments{
\item{trials}{trial numbers}
\item{prob}{success probability}
}
\value{
a list containing trials and probability
}
\description{
check the probability and the probability and get the list
}
\examples{
bin1 <- bin_variable(trials = 10, p = 0.3)
binsum1 <- summary(bin1)
binsum1
}
|
1a0741b9bccfb747535834b99c1b7d1501341c28
|
214b0eafe04af176044ac6bf157fa56b2cfc08da
|
/R Programming/Week 1/Forms of the Extract Operator in R.R
|
18122e5d2dd6c9654dfa64eaca935846758e7587
|
[] |
no_license
|
jonathanecm/Data-Science
|
534be2fbbdf145f10f1ea785fa54d42ce6c8017b
|
bb7628e815bc1f29ffa796644c09809fc393d618
|
refs/heads/master
| 2021-04-09T08:13:34.501426
| 2016-07-09T20:57:33
| 2016-07-09T20:57:33
| 60,739,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,015
|
r
|
Forms of the Extract Operator in R.R
|
# Forms of the Extract Operator in R
## Extracting elements form a vector.
### Extractor ([]) used to extract content from vectors, lists, or data frames.
####Using [] with direct refering of elemets.
x <- 16:30 # vector definition
x[13:15] # Extracting the last thre elements (13 to 15) from the vector.
#### Using [] with the length of the object to calculate the indexes.
x[(length(x) - 2): length(x)]
## Extracting elements from a data frame.
### Approach 1: Using [] to extract files from a data frame as a vector (a data frame is also a list.)
col <- "cyl" # Assingning the column name to an object.
mtcars[[col]]
### Aproach 2: Use the name in column dimesion of the data frame.
mtcars[,col] # Selecting all the rows of the column.
### Aproach 3: Using the $ form of the stractor operator through the name of the column.
mtcars$cyl
## Advanced subsetting using the extractor operator.
### Subsetting columns.
#### Aproach 1: Subsetting with columns' numbers.
mtcars[, 1:3]
#### Aproach 2: Subsetting with columns' names.
columns <- c("mpg", "cyl", "disp") # Creates a vector with the names of the columns to extract.
mtcars[, columns] # Extracts the columns in the vector "columns."
### Subsetting rows.
#### Aproach 1: Use ecact row references.
mtcars[20:22, ]
#### Aproach 2: Use logic in the row dimension of reference.
mtcars[mtcars$cyl == 4 & mtcars$am == 1,] # Gets all the rows according to the logical expressions in all the columns.
mtcars[mtcars[, "cyl"] == 4, ] # subsets all the rows where the columns are equal to 4.
#### Aproach 3: use wich() function to extract indices.
rows_subset <- which(mtcars$cyl == 4) # Extracting the indices from the column "cyl" with rows' values equal to 4.
mtcars[rows_subset,]
#### Aproach 4: Using output from a fuction that returns a logical array instead of row numbers.
mtcars[!is.na(mtcars[, "cyl"]),] # Extracts all the rows that does not containg null values in the column "cyl."
|
58507bc7755da29bd2d1fe4b6e10da25f3735bd1
|
de33a793a74752ddb4baa657f59869d9c56161e0
|
/R/shiny_vector_filter_numeric_few.R
|
5ad9800b198f244e52fd30007f8c22e019825deb
|
[
"MIT",
"Artistic-2.0"
] |
permissive
|
MayaGans/IDEAFilter
|
e2eb8aa1f1779e7cfd2bf0a36c83269a40577627
|
c81cc1f988bd74e3588f60946c2fcb39ba2f5d04
|
refs/heads/master
| 2022-11-07T19:56:18.293838
| 2020-06-02T03:14:22
| 2020-06-02T03:14:22
| 251,663,396
| 2
| 2
|
NOASSERTION
| 2021-08-07T15:07:46
| 2020-03-31T16:27:25
|
R
|
UTF-8
|
R
| false
| false
| 3,014
|
r
|
shiny_vector_filter_numeric_few.R
|
#' A vector filter for numeric variables with only a few choices
#'
#' @param input requisite shiny module field specifying incoming ui input
#' reactiveValues
#' @param output requisite shiny module field capturing output for the shiny
#' data filter ui
#' @param session requisite shiny module field containing the active shiny
#' session
#' @param x The TODO
#' @param filter_na The \code{logical} TODO
#' @param verbose a \code{logical} value indicating whether or not to print log
#' statements out to the console
#'
#' @importFrom shiny reactive reactiveValues renderUI div plotOutput sliderInput
#' isolate tags validate need renderPlot
#' @importFrom ggplot2 ggplot aes aes_ geom_area theme_void scale_x_continuous
#' scale_y_continuous
#' @importFrom grDevices rgb
#' @importFrom stats density
#' @export
shiny_vector_filter_numeric_few <- function(input, output, session,
x = shiny::reactive(factor()), #important: changed x to factor here
filter_na = shiny::reactive(FALSE), verbose = FALSE) {
ns <- session$ns
x_wo_NA <- shiny::reactive(Filter(Negate(is.na), x()))
module_return <- shiny::reactiveValues(code = TRUE, mask = TRUE)
choices <- shiny::reactive(unique(as.character(sort(x_wo_NA()))))
output$ui <- shiny::renderUI({
filter_log("updating ui", verbose = verbose)
shiny::div(style = "position: relative;",
shiny::div(style = "
position: absolute;
top: -2px; right: 16px; bottom: -2px; left: 16px;
animation:
0.75s ease-out 0s 1 shinyDataFilterEnlargeX,
0.5s ease-in 0s 1 shinyDataFilterFadeIn;
transform-origin: left;" #,
),
shiny::checkboxGroupInput(ns("param"), NULL,
choices = choices(),
selected = shiny::isolate(input$param) %||% c(),
width = "100%"))
})
# Normalized
# ggplot2::ggplot() +
# # sort factor so that it reflects checkbox order
# ggplot2::aes(x = factor(
# as.character(x_wo_NA()),
# levels = rev(choices()))) +
# ggplot2::geom_bar(
# width = 0.95,
# fill = grDevices::rgb(66/255, 139/255, 202/255),
# color = NA,
# alpha = 0.2) +
# ggplot2::coord_flip() +
# ggplot2::theme_void() +
# ggplot2::scale_x_discrete(expand = c(0, 0)) +
# ggplot2::scale_y_continuous(expand = c(0, 0))
module_return$code <- shiny::reactive({
if (length(input$param))
bquote(.x %in% .(c(if (filter_na()) c() else NA, input$param)))
else if (filter_na())
bquote(!is.na(.x))
else
TRUE
})
module_return$mask <- shiny::reactive({
eval(do.call(substitute, list(module_return$code(), list(.x = x())))) # added numeric() to return val, got errors. Then removed
})
module_return
}
|
1bf967e4b055b1100fa883b6924624ab0e9921ca
|
ef3651d556d9f397eeb978b0f36934a7443b7cbe
|
/Project 2 final code.R
|
4d9ccbe4313fd5bc689bde487b48a82a1f9bda75
|
[] |
no_license
|
meet-chauhan/R-analytics---Customer-Contract-Renewal-Analysis
|
4c893a2ac14775ad72ef5760fd1816a73e615d19
|
a0b832df07e70eb818da52744b93b74dece5edca
|
refs/heads/master
| 2020-05-14T15:16:49.222804
| 2019-04-17T08:37:17
| 2019-04-17T08:37:17
| 181,849,628
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,158
|
r
|
Project 2 final code.R
|
library(parallel)
setwd("C:/Users/meetr/Desktop/Fall 2018/R Analytics INSY 5392/project2")
d<-read.csv("final_data.csv")
#d<-d1[sample(nrow(d1), 15000), ]
#t = d$t
#number_of_Cases = d$number_of_Cases
#number_of_escalation = d$number_of_escalation
#number_of_response_missed = d$number_of_response_missed
#number_of_parts_used = d$number_of_parts_used
#number_of_response_missed = d$number_of_response_missed
ID_Alone <- d$SAID
#n=100
n <- length(ID_Alone)
time.start<-proc.time()[3]
cl<-makeCluster(3)
# x<-matrix(runif(1000000),ncol=5)
clusterExport(cl=cl,ls(),envir=.GlobalEnv)
ffff<-function(par){
a<-par[1]
b<-par[2]
beta<-par[3]
re<-par[4]
rm<-par[5]
rp<-par[6]
rrm<-par[7]
y<-rep(0,n)
z<-rep(0,n)
p<-rep(0,n)
x<-rep(0,n)
NN<- exp((-1)*beta*d$t)*log(1+d$number_of_Cases)*(1+re*log(1+d$number_of_escalation)
+rm*log(1+d$number_of_single_visit_missed)
+rp*log(1+d$number_of_parts_used)
+rrm*log(1+d$number_of_response_missed))
y<- NN
#print(y)
#z<- a+b* y
for (j in 1:n) {
z[j]<-a+b* y[j]
p[j]<- 1/(1+exp((-1)*z[j]))
x[j]<- d$status[j]
}
pp<-rep(0,n)
for(k in 1:n) { if(x[k]==1) pp[k]<- p[k] else pp[k]<- 1-p[k]}
logproductpp<- sum(log(pp))
return (-logproductpp)
}
ans<-parSapply(cl=cl, 1:1000,ffff)
MLE<-nlminb(c(0,0,0,0,0,0,0),objective=ffff, control=list(eval.max=1000, iter.max=1500))
MLE
ffff1<-function(par){
b<-par[1]
beta<-par[2]
re<-par[3]
rm<-par[4]
rp<-par[5]
rrm<-par[6]
y<-rep(0,n)
z<-rep(0,n)
p<-rep(0,n)
x<-rep(0,n)
NN<- exp((-1)*beta*d$t)*log(1+d$number_of_Cases)*(1+re*log(1+d$number_of_escalation)
+rm*log(1+d$number_of_single_visit_missed)
+rp*log(1+d$number_of_parts_used)
+rrm*log(1+d$number_of_response_missed))
y<- NN
#print(y)
#z<- a+b* y
for (j in 1:n) {
z[j]<-b* y[j]
p[j]<- 1/(1+exp((-1)*z[j]))
x[j]<- d$status[j]
}
pp<-rep(0,n)
for(k in 1:n) { if(x[k]==1) pp[k]<- p[k] else pp[k]<- 1-p[k]}
logproductpp<- sum(log(pp))
return (-logproductpp)
}
ans<-parSapply(cl=cl, 1:1000,ffff1)
MLE_a<-nlminb(c(0,0,0,0,0,0),objective=ffff1, control=list(eval.max=1000, iter.max=1500))
MLE_a
p_a<- 1-pchisq(2*(MLE_a$objective-MLE$objective),1)
p_a
ffff2<-function(par){
a<-par[1]
beta<-par[2]
re<-par[3]
rm<-par[4]
rp<-par[5]
rrm<-par[6]
y<-rep(0,n)
z<-rep(0,n)
p<-rep(0,n)
x<-rep(0,n)
NN<- exp((-1)*beta*d$t)*log(1+d$number_of_Cases)*(1+re*log(1+d$number_of_escalation)
+rm*log(1+d$number_of_single_visit_missed)
+rp*log(1+d$number_of_parts_used)
+rrm*log(1+d$number_of_response_missed))
y<- NN
#print(y)
#z<- a+b* y
for (j in 1:n) {
z[j]<-a
p[j]<- 1/(1+exp((-1)*z[j]))
x[j]<- d$status[j]
}
pp<-rep(0,n)
for(k in 1:n) { if(x[k]==1) pp[k]<- p[k] else pp[k]<- 1-p[k]}
logproductpp<- sum(log(pp))
return (-logproductpp)
}
ans<-parSapply(cl=cl, 1:1000,ffff2)
MLE_b<-nlminb(c(0,0,0,0,0,0),objective=ffff2, control=list(eval.max=1000, iter.max=1500))
MLE_b
p_b<- 1-pchisq(2*(MLE_b$objective-MLE$objective),1)
p_b
ffff3<-function(par){
a<-par[1]
b<-par[2]
re<-par[3]
rm<-par[4]
rp<-par[5]
rrm<-par[6]
y<-rep(0,n)
z<-rep(0,n)
p<-rep(0,n)
x<-rep(0,n)
NN<- exp((-1)*d$t)*log(1+d$number_of_Cases)*(1+re*log(1+d$number_of_escalation)
+rm*log(1+d$number_of_single_visit_missed)
+rp*log(1+d$number_of_parts_used)
+rrm*log(1+d$number_of_response_missed))
y<- NN
#print(y)
#z<- a+b* y
for (j in 1:n) {
z[j]<-a+b* y[j]
p[j]<- 1/(1+exp((-1)*z[j]))
x[j]<- d$status[j]
}
pp<-rep(0,n)
for(k in 1:n) { if(x[k]==1) pp[k]<- p[k] else pp[k]<- 1-p[k]}
logproductpp<- sum(log(pp))
return (-logproductpp)
}
ans<-parSapply(cl=cl, 1:1000,ffff3)
MLE_beta<-nlminb(c(0,0,0,0,0,0),objective=ffff3, control=list(eval.max=1000, iter.max=1500))
MLE_beta
p_beta<- 1-pchisq(2*(MLE_beta$objective-MLE$objective),1)
p_beta
ffff4<-function(par){
a<-par[1]
b<-par[2]
beta<-par[3]
rm<-par[4]
rp<-par[5]
rrm<-par[6]
y<-rep(0,n)
z<-rep(0,n)
p<-rep(0,n)
x<-rep(0,n)
NN<- exp((-1)*beta*d$t)*log(1+d$number_of_Cases)*(1+(0)*log(1+d$number_of_escalation)
+rm*log(1+d$number_of_single_visit_missed)
+rp*log(1+d$number_of_parts_used)
+rrm*log(1+d$number_of_response_missed))
y<- NN
#print(y)
#z<- a+b* y
for (j in 1:n) {
z[j]<-a+b* y[j]
p[j]<- 1/(1+exp((-1)*z[j]))
x[j]<- d$status[j]
}
pp<-rep(0,n)
for(k in 1:n) { if(x[k]==1) pp[k]<- p[k] else pp[k]<- 1-p[k]}
logproductpp<- sum(log(pp))
return (-logproductpp)
}
ans<-parSapply(cl=cl, 1:1000,ffff4)
MLE_re<-nlminb(c(0,0,0,0,0,0),objective=ffff4, control=list(eval.max=1000, iter.max=1500))
MLE_re
p_re<- 1-pchisq(2*(MLE_re$objective-MLE$objective),1)
p_re
ffff5<-function(par){
a<-par[1]
b<-par[2]
beta<-par[3]
re<-par[4]
rp<-par[5]
rrm<-par[6]
y<-rep(0,n)
z<-rep(0,n)
p<-rep(0,n)
x<-rep(0,n)
NN<- exp((-1)*beta*d$t)*log(1+d$number_of_Cases)*(1+re*log(1+d$number_of_escalation)
+0*log(1+d$number_of_single_visit_missed)
+rp*log(1+d$number_of_parts_used)
+rrm*log(1+d$number_of_response_missed))
y<- NN
#print(y)
#z<- a+b* y
for (j in 1:n) {
z[j]<-a+b* y[j]
p[j]<- 1/(1+exp((-1)*z[j]))
x[j]<- d$status[j]
}
pp<-rep(0,n)
for(k in 1:n) { if(x[k]==1) pp[k]<- p[k] else pp[k]<- 1-p[k]}
logproductpp<- sum(log(pp))
return (-logproductpp)
}
ans<-parSapply(cl=cl, 1:1000,ffff5)
MLE_rm<-nlminb(c(0,0,0,0,0,0),objective=ffff5, control=list(eval.max=1000, iter.max=1500))
MLE_rm
p_rm<- 1-pchisq(2*(MLE_rm$objective-MLE$objective),1)
p_rm
ffff6<-function(par){
a<-par[1]
b<-par[2]
beta<-par[3]
re<-par[4]
rm<-par[5]
rrm<-par[6]
y<-rep(0,n)
z<-rep(0,n)
p<-rep(0,n)
x<-rep(0,n)
NN<- exp((-1)*beta*d$t)*log(1+d$number_of_Cases)*(1+re*log(1+d$number_of_escalation)
+rm*log(1+d$number_of_single_visit_missed)
+0*log(1+d$number_of_parts_used)
+rrm*log(1+d$number_of_response_missed))
y<- NN
#print(y)
#z<- a+b* y
for (j in 1:n) {
z[j]<-a+b* y[j]
p[j]<- 1/(1+exp((-1)*z[j]))
x[j]<- d$status[j]
}
pp<-rep(0,n)
for(k in 1:n) { if(x[k]==1) pp[k]<- p[k] else pp[k]<- 1-p[k]}
logproductpp<- sum(log(pp))
return (-logproductpp)
}
ans<-parSapply(cl=cl, 1:1000,ffff6)
MLE_rp<-nlminb(c(0,0,0,0,0,0),objective=ffff6, control=list(eval.max=1000, iter.max=1500))
MLE_rp
p_rp<- 1-pchisq(2*(MLE_rp$objective-MLE$objective),1)
p_rp
ffff7<-function(par){
a<-par[1]
b<-par[2]
beta<-par[3]
re<-par[4]
rm<-par[5]
rp<-par[6]
y<-rep(0,n)
z<-rep(0,n)
p<-rep(0,n)
x<-rep(0,n)
NN<- exp((-1)*beta*d$t)*log(1+d$number_of_Cases)*(1+re*log(1+d$number_of_escalation)
+rm*log(1+d$number_of_single_visit_missed)
+rp*log(1+d$number_of_parts_used)
+(0)*log(1+d$number_of_response_missed))
y<- NN
#print(y)
#z<- a+b* y
for (j in 1:n) {
z[j]<-a+b* y[j]
p[j]<- 1/(1+exp((-1)*z[j]))
x[j]<- d$status[j]
}
pp<-rep(0,n)
for(k in 1:n) { if(x[k]==1) pp[k]<- p[k] else pp[k]<- 1-p[k]}
logproductpp<- sum(log(pp))
return (-logproductpp)
}
ans<-parSapply(cl=cl, 1:1000,ffff7)
MLE_rrm<-nlminb(c(0,0,0,0,0,0),objective=ffff7, control=list(eval.max=1000, iter.max=1500))
MLE_rrm
p_rrm<- 1-pchisq(2*(MLE_rrm$objective-MLE$objective),1)
p_rrm
MLE
MLE_a
MLE_b
MLE_beta
MLE_re
MLE_rm
MLE_rp
MLE_rrm
p_a
p_b
p_beta
p_re
p_rm
p_rp
p_rrm
|
1a79076ee2d509eb0cc2ff1ca0d9572e3f6edd6e
|
84c27ec545e7a5e9448d95c0676b882317fafd7c
|
/R/MxSE.R
|
44768306e9004bc06a60a2f91b3aaac32aa2ca00
|
[] |
no_license
|
OpenMx/OpenMx
|
ac58c848b4ce63079c79ccad13f972d81c90d348
|
cbe1c3207453b92efc96b4fc37205cbe231dda27
|
refs/heads/master
| 2023-08-24T11:01:53.655345
| 2023-08-20T20:30:35
| 2023-08-20T20:30:35
| 4,393,940
| 86
| 50
| null | 2023-09-01T01:57:08
| 2012-05-21T13:38:34
|
R
|
UTF-8
|
R
| false
| false
| 8,298
|
r
|
MxSE.R
|
# Copyright 2007-2020 by the individuals mentioned in the source code history
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
# Author: Michael D. Hunter
# Date: 2016-08-12 09:28:02 by mhunter
# Date: 2016-08-13 add roxygen by tbates
# Filename: MxSE.R
# Purpose: Define the mxSE() function.
# This function lets the user obtain standard errors for arbitrary
# expressions, named entities, and algebras.
# It is a frontend-only file that works much like mxEval.
#------------------------------------------------------------------------------
##' Compute standard errors in OpenMx
##'
##' @description
##' This function allows you to obtain standard errors for arbitrary
##' expressions, named entities, and algebras.
##'
##' @param x the parameter to get SEs on (reference or expression)
##' @param model the \code{\link{mxModel}} to use.
##' @param details logical. Whether to provide further details, e.g. the full
##' sampling covariance matrix of x.
##' @param cov optional matrix of covariances among the free parameters. If
##' missing, the inverse Hessian from the fitted model is used.
##' @param forceName logical; defaults to \code{FALSE}. Set to \code{TRUE}
##' if \code{x} is an R symbol that refers to a character string.
##' @param silent logical; defaults to \code{FALSE}. If \code{TRUE},
##' message-printing is suppressed.
##' @param ... further named arguments passed to \code{\link{mxEval}}
##' @param defvar.row which row to load for any definition variables
##' @param data name of data from which to load definition variables
##'
##' @details
##' x can be the name of an algebra, a bracket address, named entity
##' or arbitrary expression.
##' When the \code{details} argument is TRUE, the full
##' sampling covariance matrix of \code{x} is also returned as part of a list.
##' The square root of the diagonals of this sampling covariance matrix are
##' the standard errors.
##'
##' When supplying the \code{cov} argument, take care that the free parameter
##' covariance matrix is given, not the information matrix. These
##' two are inverses of one another.
##'
##' This function uses the delta method to compute the standard error of arbitrary
##' and possibly nonlinear functions of the free parameters. The delta method
##' makes a first-order Taylor approximation of the nonlinear function. The
##' nonlinear function is a map from all the free parameters to some transformed
##' subset of parameters: the linearization of this map is given by the Jacobian
##' \eqn{J}. In equation form, the delta method computes standard errors by the following:
##'
##' \deqn{J^T C J}
##'
##' where \eqn{J} is the Jacobian of the nonlinear parameter transformation
##' and \eqn{C} is the covariance matrix of the free parameters (e.g., two
##' times the inverse of the Hessian of the minus two log likelihood function).
##'
##' @return SE value(s) returned as a matrix when \code{details} is FALSE.
##' When \code{details} is TRUE, a list of the SE value(s) and the full
##' sampling covariance matrix.
##' @seealso - \code{\link{mxCI}}
##' @references - \url{https://en.wikipedia.org/wiki/Standard_error}
##' @examples
##' library(OpenMx)
##' data(demoOneFactor)
##' # ===============================
##' # = Make and run a 1-factor CFA =
##' # ===============================
##'
##' latents = c("G") # the latent factor
##' manifests = names(demoOneFactor) # manifest variables to be modeled
##' # ===========================
##' # = Make and run the model! =
##' # ===========================
##' m1 <- mxModel("One Factor", type = "RAM",
##' manifestVars = manifests, latentVars = latents,
##' mxPath(from = latents, to = manifests, labels=paste0('lambda', 1:5)),
##' mxPath(from = manifests, arrows = 2),
##' mxPath(from = latents, arrows = 2, free = FALSE, values = 1),
##' mxData(cov(demoOneFactor), type = "cov", numObs = 500)
##' )
##' m1 = mxRun(m1)
##' mxSE(lambda5, model = m1)
##' mxSE(lambda1^2, model = m1)
mxSE <- function(x, model, details=FALSE, cov, forceName=FALSE, silent=FALSE, ...,
defvar.row=as.integer(NA), data='data'){
warnModelCreatedByOldVersion(model)
if(length(model@output) > 0 && missing(cov)){
ParamsCov <- try(vcov(model))
if(is(ParamsCov,"try-error")){
msg <- "Model does not have a reasonable vcov matrix or standard errors."
stop(msg)
}
# if(length(model@output$infoDefinite) && !single.na(model@output$infoDefinite)){
# # An indefinite Hessian usually means some SEs will be NaN:
# ParamsCov <- 2*solve(model@output$hessian)
# dimnames(ParamsCov) <- dimnames(model@output$hessian)
# } else {
} else if (missing(cov)){
stop("Model does not have output and 'cov' argument is missing. I'm a doctor, not a bricklayer!\nWas this model run with mxRun?")
} else {
ParamsCov <- cov
if(is.null(dimnames(ParamsCov))){
if(length(paramnames) == nrow(ParamsCov)){
dimnames(ParamsCov) <- list(paramnames, paramnames)
}else{
stop(paste0("dimnames of user-supplied parameter covariance matrix are null\nand the number of rows (", nrow(ParamsCov), ") do not match the number of free parameters (", length(paramnames), ")."))
}
}
}
xorig <- "x" #<--Initialize as something that will always be understandable in an error message.
isCallEtc <- any(c('call', 'language', 'MxAlgebraFormula') %in% is(match.call()$x))
ex <- try(eval(x), silent=TRUE)
isChar <- !('try-error' %in% is(ex)) && is.character(ex)
if(isCallEtc && !forceName && !isChar){
if(!silent){message('Treating first argument as an expression')}
xalg <- mxAlgebraFromString(Reduce(paste, deparse(match.call()$x)), name='onTheFlyAlgebra')
xorig <- Reduce(paste, deparse(match.call()$x))
x <- "onTheFlyAlgebra"
model <- mxModel(model, xalg)
} else if ('character' %in% is(x) && !isCallEtc) {
if(!silent){message('Treating first argument as a character')}
xalg <- mxAlgebraFromString(Reduce(paste, match.call()$x), name='onTheFlyAlgebra')
xorig <- x
x <- "onTheFlyAlgebra"
model <- mxModel(model, xalg)
} else if(isChar){
if(!silent){message('Treating first argument as an object that stores a character')}
xalg <- mxAlgebraFromString(ex, name='onTheFlyAlgebra')
xorig <- ex
x <- "onTheFlyAlgebra"
model <- mxModel(model, xalg)
} else {
stop("Please, sir. 'x' must be either the name of an entity in the model, or an expression for an MxAlgebra.")
}
# Get current algebra/matrix values:
freeparams <- omxGetParameters(model)
paramnames <- names(freeparams)
zoutMat <- try(mxEvalByName(x, model, compute=TRUE),silent=silent)
if(is(zoutMat, "try-error")) {
stop(paste0("Couldn't evaluate expression ", omxQuotes(xorig), ". Might help to check if it works in mxEval.\n",
"Recall also that elements of submodels are addressed as submodelName.objectName\n",
"For example, to refer to an object called 'bob' in submodel 'sub1', you would say 'sub1.bob'."))
}
covParam <- ParamsCov
jModel <- mxModel(model, mxComputeJacobian(of=x, defvar.row=defvar.row, data=data))
jModel <- mxRun(jModel, silent=TRUE)
jacTrans <- jModel$compute$output$jacobian
covSparam <- jacTrans %*% covParam %*% t(jacTrans)
# dimnames(covSparam) <- list(rownames(zoutMat), colnames(zoutMat))
if(any(diag(covSparam) < 0) || any(is.na(diag(covSparam)))){
warning("Some diagonal elements of the repeated-sampling covariance matrix of the point estimates are less than zero or NA.\nI know, right? Set details=TRUE and check the 'Cov' element of this object.")
}
SEs <- suppressWarnings(sqrt(diag(covSparam)))
SEsMat <- matrix(SEs, nrow = nrow(zoutMat), ncol = ncol(zoutMat))
if(details==TRUE){
return(list(SE=SEsMat, Cov=covSparam))
} else{
return(SEsMat)
}
}
|
89c6bba3997a3151d6fc5d17f97ad5cd05b9dbaa
|
93d3f810a4169d7bd993641e6f776af1616dd79e
|
/utils/r_tidy_utils/Docker/bin/combine_tabular_files.R
|
ea7526708317ffe88f58aed56944115e5b349637
|
[] |
no_license
|
CRI-iAtlas/iatlas-workflows
|
699c3b897e580a391e9700b48b91b64e2a03eb55
|
d94151d3aaadee96b52f27e4ce84692c0366fe75
|
refs/heads/develop
| 2023-05-24T11:08:56.155005
| 2023-05-23T15:21:58
| 2023-05-23T15:21:58
| 156,773,901
| 2
| 4
| null | 2023-05-22T21:19:12
| 2018-11-08T21:54:27
|
Common Workflow Language
|
UTF-8
|
R
| false
| false
| 864
|
r
|
combine_tabular_files.R
|
library(argparse)
library(magrittr)
library(purrr)
library(readr)
library(dplyr)
parser = ArgumentParser(description = "Combine multiple tabular files into one.")
parser$add_argument(
"-f",
"--files",
type = "character",
nargs = "+",
required = TRUE,
help = "array of tabular files to combine")
parser$add_argument(
"-i",
"--input_delimiter",
type = "character",
default = "\t")
parser$add_argument(
"-o",
"--output_delimiter",
type = "character",
default = "\t")
parser$add_argument(
"-n",
"--output_file_name",
type = "character",
default = "output.tsv")
args <- parser$parse_args()
args$files %>%
purrr::map(readr::read_delim, delim = args$input_delimiter) %>%
dplyr::bind_rows() %>%
readr::write_delim(args$output_file_name, delim = args$output_delimiter)
|
96ac0f7db154d142705f006cc4de282e56b131da
|
8724910530b7c5d927ed5019738545c3208a93e6
|
/shinyUI(pageWithSidebar(.R
|
e9e81ec507fe4a09cf3b693cba5cde48861bbf0a
|
[] |
no_license
|
saraabi/DevelopingDataProducts
|
51907d407b4c8756579aa459ce1e7546f25c103e
|
ca3300bca40196e979b85fb9df53f9068b249185
|
refs/heads/master
| 2016-09-06T17:11:40.811317
| 2015-07-16T00:16:20
| 2015-07-16T00:16:20
| 39,167,358
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 326
|
r
|
shinyUI(pageWithSidebar(.R
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Is this number prime?"),
sidebarPanel(
numericInput('input1', 'Input any integer between 2 and 10,000', 0, min = 0, max = 10000, step = 1)
),
mainPanel(
h2('You entered'), verbatimTextOutput("oid1"),
h2('This number is '), verbatimTextOutput("primeorno")
)
))
|
274ba9e60149a60d3690b6d772ebf842dde8fca7
|
4bc5ec512acba81ae288b2fe67b5c63558b51646
|
/inst/test/code/select-3.R
|
15f7a4955a9d0a4a4e57ddf53c10e3f7d9fb5745
|
[
"MIT"
] |
permissive
|
eshaimran/mario
|
5e54c3e28e9a25cb23b946315e4e1d80dceb243e
|
8ba99dbe607e322212493ded7ebdb172994f5206
|
refs/heads/main
| 2023-08-29T02:25:20.231413
| 2021-11-06T00:21:25
| 2021-11-06T00:21:25
| 425,271,666
| 0
| 0
|
NOASSERTION
| 2021-11-06T14:58:56
| 2021-11-06T14:58:55
| null |
UTF-8
|
R
| false
| false
| 145
|
r
|
select-3.R
|
library(dplyr)
mt <- mtcars %>%
slice(1:5) %>%
select(mpg, cyl, disp, hp)
mt %>%
select(-cyl, mpg, hp, disp) %>%
select(mpg, hp, disp)
|
86ed5ba50e584153a8e9aeb26d5afb9a877e3d33
|
c2f842c35192068e91ec37e55b3ae17fef589ca0
|
/plot3.R
|
76950d28b3e63b42039db7e90b619962014bd1bb
|
[] |
no_license
|
behnam8011/ExData_Plotting1
|
9600803f1503778719c364394669e794cdae76b1
|
238d537c80350f96580eb26571d98c355737080a
|
refs/heads/master
| 2021-01-17T23:18:09.564134
| 2016-03-05T05:01:52
| 2016-03-05T05:01:52
| 53,150,023
| 0
| 0
| null | 2016-03-04T16:42:51
| 2016-03-04T16:42:51
| null |
UTF-8
|
R
| false
| false
| 1,171
|
r
|
plot3.R
|
setwd("C:/Users/Behnam/Documents/Ben/Analytics Training/Data-Science-Coursera/Exploratory-Data-Analysis/Week-1/")
df <- read.table(file = "./household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE, nrows = 10)
colclass <- sapply(df,class)
df <- read.table(file = "./household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
for (i in 1:ncol(df)) {
class(df[[i]]) <- colclass[i]
}
if (!require("lubridate")) {
install.packages("lubridate")
library(lubridate)
}
df$Date_Time <- dmy_hms(paste(df$Date,df$Time,"-"))
df$Date <- NULL
df$Time <- NULL
if (!require("dplyr")) {
install.packages("dplyr")
library(dplyr)
}
d <- df %>% filter(Date_Time >= ymd_hms("2007-02-01 00:00:00") & Date_Time <= ymd_hms("2007-02-02 23:59:00"))
png('plot3.png',height = 480, width = 480)
plot(d$Date_Time, d$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "")
lines(d$Date_Time, d$Sub_metering_2, type = "l", col = 2)
lines(d$Date_Time, d$Sub_metering_3, type = "l", col = 4)
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col = c(1,2,4), lty = c(1,1,1))
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.