blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
815ac5c24a6bca5c2ca0fa6c524057188b3ece8d
|
c0e968187ca7b3f93f29562467f66f534cff43da
|
/analysis_code_and_data/mbmf_online_data_quality.R
|
c37af9786dadcc70e32419e5e428417566ba4711
|
[] |
no_license
|
eknowles-bch/online_two_step_replication
|
0cb41d471872693e48284056a7986347758a1697
|
f0a561ef15b7b55e1956669cc507a03226e1d269
|
refs/heads/master
| 2023-03-07T06:30:13.071168
| 2021-02-25T21:22:08
| 2021-02-25T21:22:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,652
|
r
|
mbmf_online_data_quality.R
|
## Check mbmf data quality ##
# Kate Nussenbaum - katenuss@nyu.edu
# Last updated: 7/17/20
# This script reads in all mbmf data files and saves a .txt file with
# mean data quality metrics across age groups, as well as plots of metrics
# across all subs and binned by age
#### Load needed libraries ####
library(tidyverse)
library(glue)
library(magrittr)
# age group function
# Add age group variable to data frame with raw ages
addAgeGroup <- function(df, ageColumn){
ageColumn <- enquo(ageColumn)
df %>% mutate(age_group = case_when((!! ageColumn) < 13 ~ "Children",
(!! ageColumn) > 12.9 & (!! ageColumn) < 18 ~ "Adolescents",
(!! ageColumn) >= 18 ~ "Adults"),
age_group = factor(age_group, levels = c("Children", "Adolescents", "Adults")))
}
#get list of files
data_files <- list.files(path = "data/online/online_csvs/")
#initialize data frame
data <- data.frame()
#### Read in data ####
for (i in c(1:length(data_files))){
sub_data <- read_csv(glue("data/online/online_csvs/{data_files[i]}"))
#get task date from filename
task_date <- sapply(strsplit(glue("data/online/online_csvs/{data_files[i]}"), '_'), `[`, 4)
sub_data$task_date <- task_date
#compute the number of browser interactions
num_interactions = length(str_split(tail(sub_data,1)$interactions, pattern = "\r\n")[[1]]) - 2
sub_data$num_interactions <- num_interactions
#compute number of quiz questions answered correctly
num_quiz_correct = nrow(sub_data %>% filter(grepl('Correct.wav', stimulus)))
sub_data$correct_quiz_questions <- num_quiz_correct
#determine whether explicit question was answered correctly
red_planet_first = sub_data$red_planet_first_rocket[1]
rocket_sides = sub_data$rocket_sides[1]
correct_explicit_response = case_when(red_planet_first == rocket_sides ~ "49",
red_planet_first != rocket_sides ~ "48")
explicit_response = sub_data %>% filter(grepl('explicit', trial_type)) %>%
select(key_press)
sub_data %<>% mutate(explicit_q_correct = case_when(explicit_response == correct_explicit_response ~ 1,
explicit_response != correct_explicit_response ~ 0))
#get explicit question reaction time
explicit_response_rt = sub_data %>%
filter(grepl('explicit', trial_type)) %>%
select("rt") %>%
pull()
sub_data %<>% mutate(explicit_rt = as.numeric(explicit_response_rt))
#combine subject data into larger data frame
data <- rbind(data, sub_data)
}
#select only the columns we care about
data %<>%
select(c(trial_index,
subject_id,
task_date,
choice,
rt,
trial_stage,
transition,
practice_trial,
reward,
num_interactions,
correct_quiz_questions,
explicit_q_correct,
explicit_rt)) %>%
filter(practice_trial == "real")
data$rt <- as.numeric(data$rt)
#compute stats
summary_stats <- data %>%
group_by(subject_id, task_date) %>%
summarize(num_quiz_correct = mean(correct_quiz_questions, na.rm = T),
reward_earned = sum(reward, na.rm = T),
left_choices =sum(choice==1, na.rm = T),
right_choices =sum(choice==2, na.rm = T),
missed_responses = sum(is.na(choice)),
mean_rt = mean(rt, na.rm = T),
fast_rts = sum(rt < 150, na.rm = T),
browser_interactions = mean(num_interactions, na.rm = T),
explicit_q_correct = mean(explicit_q_correct, na.rm = T),
explicit_rt = mean(explicit_rt, na.rm = T))
#read in subject ages
sub_ages <- read_csv('data/online/mbmf_ages.csv')
sub_ages$subject_id <- as.character(sub_ages$subject_id)
#combine with summary stats
summary_stats <- full_join(summary_stats, sub_ages, by = "subject_id")
#add age group
summary_stats <- addAgeGroup(summary_stats, age)
stats_to_plot <- summary_stats %>%
select(fast_rts, browser_interactions, num_quiz_correct, missed_responses, age_group)
#### Make histograms ####
#Fast RTs
rts_hist <- ggplot(stats_to_plot, aes(x = fast_rts)) +
geom_histogram(bins = 50, fill = "grey", color = "black", center = T) +
xlab("Number of RTs < 150 ms") +
ylab("Number of participants") +
theme_minimal()
rts_hist
ggsave('output/online_data/quality_checking/rts_hist.png', plot = last_plot(), height = 2.5, width = 3, unit = "in", dpi = 300)
#Comprehension questions
quiz_hist <- ggplot(stats_to_plot, aes(x = num_quiz_correct)) +
geom_histogram(bins = 3, fill = "grey", color = "black", center = T) +
xlab("Comprehension questions correct") +
ylab("Number of participants") +
theme_minimal()
quiz_hist
ggsave('output/online_data/quality_checking/quiz_hist.png', plot = last_plot(), height = 2.5, width = 3, unit = "in", dpi = 300)
#browser interactions
browser_hist <- ggplot(stats_to_plot, aes(x = browser_interactions)) +
geom_histogram(bins = 30, fill = "grey", color = "black", center = T) +
xlab("Number of browser interactions") +
ylab("Number of participants") +
theme_minimal()
browser_hist
ggsave('output/decker_data/quality_checking/browser_hist.png', plot = last_plot(), height = 2.5, width = 3, unit = "in", dpi = 300)
#missed responses
missed_hist <- ggplot(stats_to_plot, aes(x = missed_responses)) +
geom_histogram(bins = 50, fill = "grey", color = "black", center = T) +
xlab("Number of missed responses") +
ylab("Number of participants") +
theme_minimal()
missed_hist
ggsave('output/online_data/quality_checking/missed_hist.png', plot = last_plot(), height = 2.5, width = 3, unit = "in", dpi = 300)
#histograms with age group
stats_to_plot <- stats_to_plot %>%
filter(age_group == "Children" | age_group == "Adolescents" | age_group == "Adults")
#Fast RTs
rts_hist_age <- ggplot(stats_to_plot, aes(x = fast_rts, fill = age_group)) +
facet_wrap(~age_group) +
geom_histogram(bins = 20, center = T, position = "dodge", color = "black") +
scale_fill_brewer(palette = "Set2") +
xlab("Number of RTs < 150 ms") +
ylab("Number of participants") +
theme_minimal() +
theme(legend.position = "none")
rts_hist_age
ggsave('output/online_data/quality_checking/rts_hist_age.png', plot = last_plot(), height = 2.5, width = 5, unit = "in", dpi = 300)
#Comprehension questions
quiz_hist_age <- ggplot(stats_to_plot, aes(x = num_quiz_correct, fill = age_group)) +
facet_wrap(~age_group) +
scale_fill_brewer(palette = "Set2") +
geom_histogram(bins = 3, color = "black", center = T) +
xlab("Comprehension questions correct") +
ylab("Number of participants") +
theme_minimal() +
theme(legend.position = "none")
quiz_hist_age
ggsave('output/online_data/quality_checking/quiz_hist_age.png', plot = last_plot(), height = 2.5, width = 5, unit = "in", dpi = 300)
#browser interactions
browser_hist_age <- ggplot(stats_to_plot, aes(x = browser_interactions, fill = age_group)) +
facet_wrap(~age_group) +
scale_fill_brewer(palette = "Set2") +
geom_histogram(bins = 10, color = "black", center = T) +
xlab("Number of browser interactions") +
ylab("Number of participants") +
theme_minimal() +
theme(legend.position = "none")
browser_hist_age
ggsave('output/online_data/quality_checking/browser_hist_age.png', plot = last_plot(), height = 2.5, width = 5, unit = "in", dpi = 300)
#missed responses
missed_hist_age <- ggplot(stats_to_plot, aes(x = missed_responses, fill = age_group)) +
facet_wrap(~age_group) +
scale_fill_brewer(palette = "Set2") +
geom_histogram(bins = 20, color = "black", center = T) +
xlab("Number of missed responses") +
ylab("Number of participants") +
theme_minimal() +
theme(legend.position = "none")
missed_hist_age
ggsave('output/online_data/quality_checking/missed_hist_age.png', plot = last_plot(), height = 2.5, width = 5, unit = "in", dpi = 300)
#### Compute age group stats ####
age_group_stats <- stats_to_plot %>%
group_by(age_group) %>%
summarise(across(
.cols = is.numeric,
.fns = list(mean = mean, sd = sd, median = median), na.rm = TRUE,
.names = "{col}_{fn}"
))
age_group_stats <- age_group_stats %>%
mutate_if(is.numeric, round, digits = 3)
write_delim(age_group_stats, 'output/online_data/quality_checking/age_group_stats.txt',
delim = "\t")
#### Age and gender distribution ####
sub_ages_plot <- ggplot(sub_ages, aes(x = age, fill = gender)) +
geom_histogram(breaks = c(8:26), color = "black") +
scale_fill_brewer(type = "seq", name = "Gender") +
ylab("Number of participants") +
xlab("Age (years)") +
theme_minimal()
sub_ages_plot
ggsave('output/online_data/quality_checking/sub_ages.png', plot = last_plot(), height = 2.5, width = 5, unit = "in", dpi = 300)
#### Make summary table ####
mbmf_data_summary <- stats_to_plot %>%
group_by(age_group) %>%
summarize(one_quiz_correct = sum(num_quiz_correct >= 1),
two_quiz_correct = sum(num_quiz_correct >= 2),
three_quiz_correct = sum(num_quiz_correct == 3),
browser_int_under_3 = sum(browser_interactions <= 3),
browser_int_under_5 = sum(browser_interactions <= 5),
browser_int_under_10 = sum(browser_interactions <= 10),
missed_under_10 = sum(missed_responses <= 10),
missed_under_20 = sum(missed_responses <= 20),
missed_under_40 = sum(missed_responses <= 40),
fast_under_10 = sum(fast_rts <= 10),
fast_under_20 = sum(fast_rts <= 20),
fast_under_40 = sum(fast_rts <= 40)
)
mbmf_data_summary
|
2a69d441d8329927c1bb48b2922dac0fb1387831
|
254c032cc8490c34212da47e8a2eaaf9d00de8b0
|
/R/covariance_functions.R
|
f9ee5d45994ae40d6f242b6ffadd276633fc8009
|
[] |
no_license
|
nefan/registration
|
f60aa5ed37202d5e7bd3cac03e20cee6f85e0e3a
|
d3425cd69c0ab06f44b2136827477a789bab6013
|
refs/heads/master
| 2021-01-18T02:57:48.830580
| 2015-01-19T10:50:53
| 2015-01-19T10:50:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,167
|
r
|
covariance_functions.R
|
# TODO:
# EXPLICITLY GENERATE INVERSE OF BROWNIAN COVARIANCES (+OTHERS?!)
#' Generate Brownian covariances
#'
#' This function generates a Brownian motion/bridge covariance matrix corresponding to specified evaluation points.
#' @param t evaluation points.
#' @param tau scale parameter.
#' @param type type of covariance, either 'motion' or 'bridge'.
#' @keywords covariance
#' @export
#' @examples
#' t <- seq(0, 1, length = 10)
#' Brownian_cov(t, 1)
Brownian_cov <- function(t, tau, type = 'motion') {
m <- length(t)
C <- matrix(NA, m, m)
is_bridge <- type == 'bridge'
for (i in 1:m) {
for (j in i:m) {
C[i, j] <- C[j, i] <- tau^2 * (min(t[i], t[j]) - is_bridge * t[i] * t[j])
}
}
return(C)
}
#' Generate Brownian motion covariances
#'
#' This function generates a Brownian motion covariance matrix corresponding to specified evaluation points.
#' @param t evaluation points.
#' @param tau scale parameter.
#' @keywords covariance
#' @export
#' @examples
#' t <- seq(0, 1, length = 10)
#' Brownian_motion_cov_fast(t)
#TODO: UPDATE
Brownian_motion_cov_fast <- function(t, tau = 1) {
m <- length(t)
C <- matrix(NA, m, m)
for (i in m:1) {
C[i, 1:m] <- C[1:m, i] <- t[i]
}
return(C)
}
#' Generate Matern plus measurement noise covariances
#'
#' This function generates a Matern motion covariance matrix corresponding to specified evaluation points.
#' @param t evaluation points.
#' @param param parameter vector consisting of scale, range and smoothness.
#' @param noise logical, should a diagonal matrix be added to the Matern covariance?
#' @keywords covariance
#' @export
#' @examples
#' t <- seq(0, 1, length = 10)
#' Matern_cov(t, param = c(1, 1, 1/2))
Matern_cov <- function(t, param = c(scale = 1, range = 1, smoothness = 2), noise = TRUE) {
scale <- param[1]
range <- param[2]
smoothness <- param[3]
m <- length(t)
S <- diag(x = Matern(0, scale = scale, range = range, smoothness = smoothness) + noise, nrow = m)
i <- 1
while (i < m) {
S[cbind(1:(m - i), (1 + i):m)] <- S[cbind((1 + i):m, 1:(m - i))] <- Matern(t[i + 1], scale = scale, range = range, smoothness = smoothness)
i <- i + 1
}
return(S)
}
|
72ded32c9c5990c6022bf11dd5cfdad6dd676d88
|
d817306da0abebcc7185fcfdf4953bb9d2a48f68
|
/code/0_auxiliar_functions.R
|
3841d68a9e3bd2dda30759844d15d6357a35d645
|
[] |
no_license
|
ejosymart/MVEchubmackerel
|
c5b821dcb4723c090ce00e5d3d255c1944d63a09
|
085a7444683d8101d49f5ccf6c504dd199e2a2b6
|
refs/heads/master
| 2023-07-10T15:57:08.385722
| 2021-08-08T19:43:25
| 2021-08-08T19:43:25
| 394,046,550
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,794
|
r
|
0_auxiliar_functions.R
|
myvif <- function(mod){
v <- vcov(mod)
assign <- attributes(model.matrix(mod))$assign
if (names(coefficients(mod)[1]) == "(Intercept)") {
v <- v[-1, -1]
assign <- assign[-1]
} else warning("No intercept: vifs may not be sensible.")
terms <- labels(terms(mod))
n.terms <- length(terms)
if (n.terms < 2) stop("The model contains fewer than 2 terms")
if (length(assign) > dim(v)[1] ) {
diag(tmp_cor)<-0
if (any(tmp_cor==1.0)){
return("Sample size is too small, 100% collinearity is present")
} else {
return("Sample size is too small")
}
}
R <- cov2cor(v)
detR <- det(R)
result <- matrix(0, n.terms, 3)
rownames(result) <- terms
colnames(result) <- c("GVIF", "Df", "GVIF^(1/2Df)")
for (term in 1:n.terms) {
subs <- which(assign == term)
result[term, 1] <- det(as.matrix(R[subs, subs])) * det(as.matrix(R[-subs, -subs])) / detR
result[term, 2] <- length(subs)
}
if (all(result[, 2] == 1)) {
result <- data.frame(GVIF=result[, 1])
} else {
result[, 3] <- result[, 1]^(1/(2 * result[, 2]))
}
invisible(result)
}
corvif <- function(dataz){
dataz <- as.data.frame(dataz)
#correlation part
cat("Correlations of the variables\n\n")
tmp_cor <- cor(dataz,use="complete.obs")
print(tmp_cor)
#vif part
form <- formula(paste("fooy ~ ",paste(strsplit(names(dataz)," "),collapse=" + ")))
dataz <- data.frame(fooy=1,dataz)
lm_mod <- lm(form,dataz)
cat("\n\nVariance inflation factors\n\n")
print(myvif(lm_mod))
}
area <- function(data, to = "km2"){
dat <- data
out <- switch(to,
km2 = length(dat[dat == 1]) * res(dat)[1]^2 * 111,
miles2 = length(dat[dat == 1]) * res(dat)[1]^2 * 69)
print(paste("Area - presence:", round(out, 1), to))
}
|
04a0a8ffc749eea8ddf8b074d5acd4a0cc767130
|
24ac6bd01d0ff7256cb7c3d628672077a3ee471f
|
/AreaCentre.R
|
0a08c4d8cb8f73080d3025d405e0f7d2372431f9
|
[] |
no_license
|
fall2018-wallace/ist6
|
ed2a23acb9eabefb4deacba1433a68c217b8596e
|
6a0033227715ca87fe599effd3e114ca58f65f0a
|
refs/heads/master
| 2020-03-30T20:47:56.721907
| 2018-10-18T01:26:32
| 2018-10-18T01:26:32
| 151,604,353
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 471
|
r
|
AreaCentre.R
|
library(maps)
library(ggplot2)
stateName<- state.name #Reading in the inbuild dataset state.name
area<- state.area #Reading in the inbuilt dataset state.area
center<- state.center #Reading in the inbuilt datadrame state.center
dataframe<- data.frame(stateName,area,center) #Creates a dataframe of the three variables
dataframe
mergedDataFrame1<- merge(mergedDataframe, dataframe, by='stateName') #Merges the dataframes MergedDataframes and Dataframe
mergedDataFrame1
|
12e642f90b7bbd4a585ee5f58229aaa1c0a43130
|
c9e02a75abbd1d5048446a65aa23b10f79492b2f
|
/scripts/populations.R
|
aaddb61fa7b714108dd80b4148c42255e5efe019
|
[] |
no_license
|
somasushma/R-code
|
f8290d3ecd8ea87ef778b1deb0b7222e84b811be
|
2e1f0e05ae56ebe87354caeb374aebb19bf00080
|
refs/heads/master
| 2021-10-27T14:42:26.847193
| 2021-10-25T22:02:51
| 2021-10-25T22:02:51
| 137,162,116
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,725
|
r
|
populations.R
|
library("readr")
library("dplyr")
g25 = read_csv("R/Dataexperiments/data/Global_25_PCA.csv")
g25s = read_csv("R/Dataexperiments/data/Global25_scaled.csv")
g25=data.frame(g25)
pops=strsplit(x=g25$Pop,split = ":" )
indv=unlist(lapply(pops, function(x) x[1]))
pops=unique(indv)
colr=sapply(indv, function(x) match(x, pops))
write.table(x = pops, file = "~/R/Dataexperiments/data/pops.txt")
#Indian ethogenesis---------------
j=2
par(mar=c(2,2,2,1), mgp=c(1,.4,0))
plot(x=g25[,j], y=g25[,(j+1)], pch=16, cex=.5, col=gray.colors(30)[colr %% 30], main = substitute(paste("PC", j, ",", k), list(j=j-1, k=j)), xlab = j-1, ylab = j)
points(g25[grep("Udegram|Aligram|Loebanr|Saidu|Barikot|Butkara|Arkotkila", g25$Pop),j], g25[grep("Udegram|Aligram|Loebanr|Saidu|Barikot|Butkara|Arkotkila", g25$Pop),j+1], pch=1, cex=1, col="blue") #Gandhara Grave+Swat
points(g25[grep("Brahmin|Iyer", g25$Pop, ignore.case = T),j], g25[grep("Brahmin|Iyer", g25$Pop, ignore.case = T),(j+1)], pch=16, col="red")
points(g25[grep("Yamnaya", g25$Pop, ignore.case = T),j], g25[grep("Yamnaya", g25$Pop, ignore.case = T),j+1], pch=16, col="darkviolet")
points(g25[grep("Sintashta|Srubnaya|Poltavka|Potapovka_MLBA|Kazakh_Mys", g25$Pop, ignore.case = T),j], g25[grep("Sintashta|Srubnaya|Poltavka|Potapovka_MLBA|Kazakh_Mys", g25$Pop, ignore.case = T),j+1], pch=16, col="darkred")
points(g25[grep("Han|Japanese", g25$Pop),j], g25[grep("Han|Japanese", g25$Pop),j+1], pch=16, col="darkgreen")
points(g25[grep("Alan", g25$Pop),j], g25[grep("Alan", g25$Pop),j+1], pch=16, col="darkblue")
points(g25[grep("Onge", g25$Pop),j], g25[grep("Onge", g25$Pop),j+1], pch=16, col="orange")
points(g25[grep("Paniya", g25$Pop),j], g25[grep("Paniya", g25$Pop),j+1], pch=16, col="black")
points(g25[grep("Turkmeni", g25$Pop),j], g25[grep("Turkmeni", g25$Pop),j+1], pch=15, col="darkblue") #Turkmenistan IA
points(g25[grep("Kalash", g25$Pop),j], g25[grep("Kalash", g25$Pop),j+1], pch=8, col="blue") #Turkmenistan IA
points(g25[grep("Ror", g25$Pop),j], g25[grep("Ror", g25$Pop),j+1], pch=4, col="orangered4") #Ror
points(g25[grep("Sarmatian", g25$Pop),c(j,j+k)], pch=16, cex=1, col="green") #Sarmatian
#labels 2,3
text(x=0,y=-.04,labels = "brAh", col="red")
text(x=-0.02226465,y=-0.01911738,labels = "brAh.E", col="red", pos=4)
text(x=-0.04199393,y=0.001367171,labels = "Jap/Han", col="darkgreen", pos = 4)
text(x=0.0112972,y=-0.005385978,labels = "Alans/T'stan IA", col="darkblue", pos = 4)
text(x=0.006749008,y=0.02005088,labels = "Steppe BA", col="darkred")
text(x=-0.02331969,y=-0.03555004,labels = "Onge", col="orange", pos=4)
text(x=-0.01761788,y=-0.05493501,labels = "Paniya", col="black", pos=4)
text(x=0.006338866,y=-0.02294417,labels = "Kalash", col="blue")
text(x=-0.0005307815,y=-0.01664123,labels = "Ror", col="orangered4")
text(x=0.002825712,y=0.004293535,labels = "Sarmat", col="green")
#clines
segments(x0=0.009175605, x1=-0.011292210, y0=-0.008312343, y1= -0.048831236, lty=3, col="black", lwd = 2)
segments(x0=0.001820451, x1=0.004534657, y0=-0.01776675, y1= 0.00812032, lty=3, col="blue", lwd = 2)
#legends for 1,2
legend(x="bottomleft", legend = c("brAhmaNa", "Han/Jap", "Onge" , "Ancient NW", "Kalash", "Ror", "Sarmatian", "Alan", "Yamnaya", "MLBA steppe", "Paniya"), pch=c(16, 16, 16,1, 8, 4, 16,16,16,16, 16), col=c("red", "darkgreen", "orange", "blue", "blue", "orangered4", "green", "darkblue","darkviolet","darkred", "black"), ncol = 2)
#Botai
points(g25[grep("Botai", g25$Pop),j], g25[grep("Botai", g25$Pop),j+1], pch=8, col="green")
#CHG
points(g25[grep("CHG", g25$Pop),j], g25[grep("CHG", g25$Pop),j+1], pch=8, col="green")
#BMAC
points(g25[grep("Gonur", g25$Pop),j], g25[grep("Gonur", g25$Pop),j+1], pch=8, col="green")
#Greeks
points(g25[grep("Greek|Greec|Mycen", g25$Pop),j], g25[grep("Greek|Greec|Mycen", g25$Pop),j+1], pch=8, col="green")
#Lithuanians
points(g25[grep("Lithuania", g25$Pop),j], g25[grep("Lithuania", g25$Pop),j+1], pch=8, col="green")
#Mongols, Huns------------
j=3
k=1
par(mar=c(2,2,2,1), mgp=c(1,.4,0))
plot(x=g25[,c(j,j+k)], pch=16, cex=.5, col="gray65", main = substitute(paste("PC", j, ",", k), list(j=j-1, k=j)), xlab = j-1, ylab = j)
#Mongols/Huns
points(g25[grep("Han", g25$Pop),c(j,j+k)], pch=16, cex=1, col="darkgreen") #Han
points(g25[grep("Korean", g25$Pop),c(j,j+k)], pch=13, cex=1, col="darkgreen") #Korean
points(g25[grep("Japanese", g25$Pop),c(j,j+k)], pch=16, cex=1, col="darkviolet") #Jap
points(g25[grep("Mongoli", g25$Pop),c(j,j+k)], pch=16, cex=1, col="blue") #Mongols
te=g25[-grep("Kalmykia", g25$Pop),]
points(g25[grep("Kalmyk", te$Pop),c(j,j+k)], pch=16, cex=1, col="blue") #Kalmyk
points(g25[grep("Hazara", g25$Pop),c(j,j+k)], pch=16, cex=1, col="black") #Hazara
points(g25[grep("Hun_|Hun-", g25$Pop),c(j,j+k)], pch=16, cex=1, col="darkblue") #Huns
points(g25[grep("Xiong", g25$Pop),c(j,j+k)], pch=16, cex=1, col="cyan") #Xiongnu
points(g25[grep("Saka", g25$Pop),c(j,j+k)], pch=1, cex=1, col="red") #Saka
points(g25[grep("Sarmatian", g25$Pop),c(j,j+k)], pch=16, cex=1, col="green") #Sarmatian
points(g25[grep("Alan", g25$Pop),c(j,j+k)], pch=16, cex=1, col="red") #Alans
points(g25[grep("Tuvinian", g25$Pop),c(j,j+k)], pch=8, cex=1, col="blue") #Tuvinian
points(g25[grep("Yakut|Sakha", g25$Pop),c(j,j+k)], pch=4, cex=1, col="blue") #Yakut
points(g25[grep("Wusun", g25$Pop),c(j,j+k)], pch=4, cex=1, col="red") #Wusun
points(g25[grep("Hovsgol", g25$Pop),c(j,j+k)], pch=5, cex=1, col="blue") #Hovsgol Bronze age
points(g25[grep("Okunevo", g25$Pop),c(j,j+k)], pch=18, cex=1, col="blue") #Okunevo
points(g25[grep("Buryat", g25$Pop),c(j,j+k)], pch=15, cex=1, col="blue") #Buryat
legend(x="bottomleft", legend = c("Han", "Kor", "Jap", "Mon","Hazar" , "Hun", "Xiong", "Saka", "Sarmti", "Alan", "Tuvan", "Yakut", "Wusun", "Hovsgol", "Okunevo","Buryat"), pch=c(16, 13, rep(16,5),1,16,16, 8, 4, 4,5,18,15), col=c("darkgreen", "darkgreen", "darkviolet", "blue", "black", "darkblue", "cyan", "red","green","red", "blue", "blue", "red", "blue", "blue", "blue"), ncol = 2)
#clines
segments(x0=-0.032182042, x1=0.001051782, y0=0.01712452, y1=-0.02136843, lty=3, col="lightblue", lwd = 2)
segments(x0=0.009914134, x1=-0.014668345, y0=-0.007637028, y1= 0.020501093, lty=3, col="red", lwd = 2)
segments(x0=0.009914134, x1=-0.031232504, y0=-0.007637028, y1= 0.017349623, lty=3, col="red", lwd = 2)
segments(x0=0.009175605, x1=-0.011292210, y0=-0.008312343, y1= -0.048831236, lty=3, col="black", lwd = 2)
#text
text(x=-0.0007417899, y=-0.0380262, labels = "Indians")
#niShAda/Austroasiatic-----------
j=3
par(mar=c(2,2,2,1), mgp=c(1,.4,0))
plot(x=g25[,j], y=g25[,(j+1)], pch=16, cex=.5, col=gray.colors(30)[colr %% 30], main = substitute(paste("PC", j, ",", k), list(j=j-1, k=j)), xlab = j-1, ylab = j)
points(g25[grep("Bonda|Santhal|Korwa|Bihor|Gadaba|Bhumij|Juang|Khonda", g25$Pop),c(j,j+k)], pch=16, cex=1, col="darkblue") #Austroasiatic
points(g25[grep("Onge|Jarawa", g25$Pop),c(j,j+k)], pch=16, cex=1, col="orangered") #Onge/Jarawa
points(g25[grep("Paniya", g25$Pop),c(j,j+k)], pch=16, cex=1, col="black")
points(g25[grep("Gond|Asur", g25$Pop),c(j,j+k)], pch=1, cex=1, col="violet")
points(g25[grep("Irula|Malayan|Pulliyar|Kadar", g25$Pop),c(j,j+k)], pch=16, cex=1, col="darkgreen")
points(g25[grep("Australian", g25$Pop),c(j,j+k)], pch=16, cex=1, col="orange")
points(g25[grep("Nasoi", g25$Pop),c(j,j+k)], pch=16, cex=1, col="orange")
points(g25[grep("Papuan", g25$Pop),c(j,j+k)], pch=16, cex=1, col="orange")
points(g25[grep("Cambodian", g25$Pop),c(j,j+k)], pch=16, cex=1, col="cyan4")
points(g25[grep("Vietnam", g25$Pop),c(j,j+k)], pch=16, cex=1, col="cyan4")
points(g25[grep("Nui_Nap", g25$Pop),c(j,j+k)], pch=16, cex=1, col="cyan4")
points(g25[grep("Man_Bac", g25$Pop),c(j,j+k)], pch=16, cex=1, col="cyan4")
points(g25[grep("Thai|Dai", g25$Pop),c(j,j+k)], pch=16, cex=1, col="cyan2")
points(g25[grep("Atayal", g25$Pop),c(j,j+k)], pch=16, cex=1, col="blue")
#legend
legend(x="bottomright", legend = c("Ind.Aus.As","Andaman","Paniya", "Gond/Asur", "Tam.tribes", "Aus/Papuans","E.Aus.As" , "Kra-Dai", "Atayal"), pch=c(rep(16,3), 1, rep(16,5)), col=c("darkblue", "orangered", "black", "violet","darkgreen", "orange", "cyan4", "cyan2", "blue"), ncol = 3)
#clines
segments(x0=-0.01878741, x1= -0.04200896, y0=-0.05130739, y1= -0.01213913, lty=3, col="black", lwd = 2) #Austric cline
segments(x0= -0.022707934 , x1= 0.008957808, y0=-0.05400865, y1= -0.03037263, lty=3, col="blue", lwd = 2) #Indian hunter gatherer-Iranian farmer
#text
text(x=-0.035575283, y=-0.0384, labels = "Austroasiatic cline")
text(x=-0.0027, y=-.049, labels = "ASI cline", col = "blue")
#ASI cline/Iranian farmer-----------
j=3
par(mar=c(2,2,2,1), mgp=c(1,.4,0))
plot(x=g25[,j], y=g25[,(j+1)], pch=16, cex=.5, col=gray.colors(30)[colr %% 30], main = substitute(paste("PC", j, ",", k), list(j=j-1, k=j)), xlab = j-1, ylab = j)
points(g25[grep("Gonur", g25$Pop),c(j,j+k)], pch=16, cex=1, col="blue") #Gonur
points(g25[grep("Gonur1_BA_o", g25$Pop),c(j,j+k)], pch=16, cex=1, col="orangered") #Gonur outlier
points(g25[grep("Shahr_I_Sokhta", g25$Pop),c(j,j+k)], pch=16, cex=1, col="cyan4")
points(g25[grep("Namazga", g25$Pop),c(j,j+k)], pch=16, cex=1, col="skyblue")
points(g25[grep("CHG", g25$Pop),c(j,j+k)], pch=16, cex=1, col="hotpink")
points(g25[grep("Dzharkutan", g25$Pop),c(j,j+k)], pch=4, cex=1, col="cyan2")
points(g25[grep("Sappali", g25$Pop),c(j,j+k)], pch=1, cex=1, col="darkblue")
points(g25[grep("Hissar", g25$Pop),c(j,j+k)], pch=1, cex=1, col="darkblue")
points(g25[grep("Balochi", g25$Pop),c(j,j+k)], pch=1, cex=1, col="darksalmon")
points(g25[grep("Brahui", g25$Pop),c(j,j+k)], pch=1, cex=1, col="darkseagreen")
points(g25[grep("Maratha", g25$Pop),c(j,j+k)], pch=16, cex=1, col="blueviolet")
points(g25[grep("Velamas", g25$Pop),c(j,j+k)], pch=16, cex=1, col="blueviolet")
points(g25[grep("Yadava|Gupta|Relli|Piramalai|Chamar|Chenchu|Kapu|Kurumba|Kanjar|Dharkar", g25$Pop),c(j,j+k)], pch=16, cex=1, col="blueviolet")
points(g25[grep("Brahmin|Iyer", g25$Pop),c(j,j+k)], pch=16, cex=1, col="burlywood")
points(g25[grep("Kalash", g25$Pop),c(j,j+k)], pch=1, cex=1, col="red")
points(g25[grep("Onge|Jarawa", g25$Pop),c(j,j+k)], pch=16, cex=1, col="green") #Onge/Jarawa
points(g25[grep("Paniya", g25$Pop),c(j,j+k)], pch=16, cex=1, col="black")
points(g25[grep("Gond|Asur", g25$Pop),c(j,j+k)], pch=1, cex=1, col="black")
points(g25[grep("Irula|Malayan|Pulliyar|Kadar", g25$Pop),c(j,j+k)], pch=16, cex=1, col="black")
points(g25[grep("Australian", g25$Pop),c(j,j+k)], pch=16, cex=1, col="darkgreen")
points(g25[grep("Sintashta", g25$Pop),c(j,j+k)], pch=16, cex=1, col="orange")
points(g25[grep("Alan", g25$Pop),c(j,j+k)], pch=8, cex=1, col="red")
#legend
legend(x="bottomleft", legend = c("Gonur","Shahr_I_Sokhta","Namazga", "Dzharkutan", "Sappali/Hissar", "Balochi","Brahui" , "Mid/Ser Castes", "brAh", "Kalash", "tribes"), pch=c(16,16,1,4,1,1,1,16,16,16), col=c("blue", "cyan4", "skyblue", "cyan2","darkblue", "darksalmon", "darkseagreen", "blueviolet", "burlywood", "red","black"), ncol = 1)
text(x=0.0112972,y=-0.005385978,labels = "Alans", col="red", pos = 4)
text(x=0.006749008,y=0.02005088,labels = "Sintashta", col="orange")
text(x=-0.02331969,y=-0.03555004,labels = "Onge/Jarawa", col="green", pos=4)
text(x=-0.02341162,y=-0.06053669,labels = "Australian", col="darkgreen")
text(x=0.0106,y=-0.0224,labels = "CHG", col="hotpink", pos=4)
text(x=.00383,y=-0.0103,labels = "Gonur_o", col="orangered", pos=2)
text(x=-0.00602059,y=-0.05333334,labels = "ASI cline", col="blue")
#clines
segments(x0= -0.0221 , x1= 0.00785, y0=-0.056, y1= -0.033, lty=3, col="blue", lwd = 3) #Indian hunter gatherer-Iranian farmer
#Wusun, Brahmins Kangju etc----------
j=2
k=1
par(mar=c(2,2,2,1), mgp=c(1,.4,0))
plot(x=g25[,j], y=g25[,(j+1)], pch=16, cex=.5, col=gray.colors(30)[colr %% 30], main = substitute(paste("PC", j, ",", k), list(j=j-1, k=j)), xlab = j-1, ylab = j, xlim=c(-.003,.013), ylim=c(-.019,.0078))
points(g25[grep("Brahmin|Iyer", g25$Pop),c(j,j+k)], pch=16, cex=1, col="burlywood")
points(g25[grep("Kalash", g25$Pop),c(j,j+k)], pch=16, cex=1, col="red")
points(g25[grep("Wusun", g25$Pop),c(j,j+k)], pch=16, cex=1, col="orange")
points(g25[grep("Kangju", g25$Pop),c(j,j+k)], pch=16, cex=1, col="blueviolet")
#legend
legend(x="topleft", legend = c("brAh","Kalash","Wusun","Kangju"), pch=c(16,16,16,16), col=c("burlywood", "red", "orange", "blueviolet"), ncol = 1)
#getting some pop in PCA-----------
g25$Pop[which(g25$PC2 < (0.0005) & g25$PC2 > (-.0017) & g25$PC3 > (-.046) & g25$PC3 < (-.042))]
|
c75ebf5cda78939bd536cd567b8a8a26940a1678
|
125da5c7a9a00cb30e460259e2b63951b3a63e5d
|
/man/df_swets08.Rd
|
69153bd75df9f28b6ad08d1ab39a94a3fabe26ac
|
[
"MIT"
] |
permissive
|
anhnguyendepocen/bcogsci
|
1db2919cce749d9ac5fde721b139b043fc11ef3e
|
069accf6984c9358428147492260821ba8952147
|
refs/heads/master
| 2023-08-15T06:03:04.914839
| 2021-10-13T04:42:45
| 2021-10-13T04:42:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 953
|
rd
|
df_swets08.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{df_swets08}
\alias{df_swets08}
\title{Data from a self-paced reading experiment that records reading times in milliseconds at the post-critical region. \insertCite{swets2008underspecification;textual}{bcogsci}}
\format{
A data frame with 5,184 rows and 6 variables:
\describe{
\item{subj}{The subject id.}
\item{item}{The item id.}
\item{resp.RT}{Response times to questions.}
\item{qtype}{The three levels of the between-subjects factor, question type.}
\item{attachment}{The three levels of the within-subjects factor, attachment type.}
\item{RT}{Reading times at the post-critical region.}
}
}
\usage{
df_swets08
}
\description{
The data set is from a self-paced reading experiment by \insertCite{swets2008underspecification;textual}{bcogsci}, and contains reading times from a 3x3 design.
}
\references{
\insertAllCited{}
}
\keyword{datasets}
|
20878e7d737f9b6d9752622a429f9df13464a525
|
cb795c484ce72851bebe5f6263d5112d2b3550bc
|
/qsims_quantE/run.R
|
513bb43df723eb20a61cc26a9a511edf43623643
|
[] |
no_license
|
andywdahl/gxemm-scripts
|
14b178615a32c1b729b794289450cae39d959427
|
db2876be8c4beadafac52608f629db457ff24718
|
refs/heads/master
| 2020-06-05T08:02:52.306122
| 2020-01-02T19:04:43
| 2020-01-02T19:04:43
| 192,369,534
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,126
|
r
|
run.R
|
rm( list=ls() )
library(BEDMatrix)
library(GxEMM)
source( '../code/sim_fxn.R' )
load( 'Rdata/setup.Rdata' )
it <- as.numeric( commandArgs(TRUE)[[1]] )
set.seed( round(as.numeric(Sys.time())) + it )
for( xval in sample(nx) )
for( sigtype in sample(5) )
try({
if( sigtype == 4 & xval == 1 ) next
savefile1 <- paste0( 'Rdata/hom/' , sigtype, '_', xval, '_', it, '.Rdata' )
savefile2 <- paste0( 'Rdata/het/' , sigtype, '_', xval, '_', it, '.Rdata' )
savefile3 <- paste0( 'Rdata/diag/', sigtype, '_', xval, '_', it, '.Rdata' )
savefile4 <- paste0( 'Rdata/diag1/', sigtype, '_', xval, '_', it, '.Rdata' )
savefile5 <- paste0( 'Rdata/hom1/', sigtype, '_', xval, '_', it, '.Rdata' )
sinkfile <- paste0( 'Rout/' , sigtype, '_', xval, '_', it, '.Rout' )
if( file.exists( savefile5 ) | file.exists( sinkfile ) ) next
print( sinkfile )
sink( sinkfile )
## load X, Z, K, Xnames
load( 'Rdata/preprocess.Rdata' )
# generate pheno
if( sigtype == 4 )
Z <- Z/sqrt(2)
y <- simfxn( it, X=cbind(1,X), Z,
G=sample_G( seed=it, ncaus, Xnames, lens ),
sig2hom=all_sig2homs(xval)[ sigtype],
sig2het=all_sig2hets(xval)[[sigtype]],
tauhet=all_tauhets (xval)[[sigtype]]
)
tmpdir <- paste0( '/wynton/scratch/gxemm/tmpdir_', sigtype, '_', it, '_', xval, '_qsim' )
if( ! file.exists( savefile1 ) ){
out_hom <- GxEMM( y, X, K, Z,gtype='hom', tmpdir=tmpdir, noise_K0=TRUE )
save( out_hom, file=savefile1 )
}
if( ! file.exists( savefile2 ) ){
out_het <- GxEMM( y, X, K, Z, gtype='iid', tmpdir=tmpdir, noise_K0=TRUE )
save( out_het, file=savefile2 )
}
if( ! file.exists( savefile3 ) ){
out_diag <- GxEMM( y, X, K, Z, gtype='free', etype='free', tmpdir=tmpdir, noise_K0=TRUE )
save( out_diag, file=savefile3 )
}
if( ! file.exists( savefile4 ) ){
out_diag1 <- GxEMM( y, X, K, Z, gtype='free', etype='hom', tmpdir=tmpdir, noise_K0=TRUE )
save( out_diag1, file=savefile4 )
}
if( ! file.exists( savefile5 ) ){
out_hom1 <- GxEMM( y, X, K, Z, gtype='hom', etype='free', tmpdir=tmpdir, noise_K0=TRUE )
save( out_hom1, file=savefile5 )
}
rm( K, Z, X )
print(warnings())
print('Done')
sink()
})
|
22ab55c860ebe6040fb0ba39f346e7e666aac239
|
e9995af2a873e6cab06f48e262f9afba090d10ca
|
/man/chud.Rd
|
87920f91ff0046cc6d14384db5ad3d56f740622e
|
[] |
no_license
|
cran/SamplerCompare
|
a428addb371d32120a5aefbab275de18bd9f7964
|
a0c229fb56aeafcdfdaef7826dca0526be88e724
|
refs/heads/master
| 2023-04-27T16:55:50.126450
| 2023-04-24T05:00:02
| 2023-04-24T05:00:02
| 17,693,637
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 843
|
rd
|
chud.Rd
|
% From SamplerCompare, (c) 2010 Madeleine Thompson
\name{chud}
\alias{chud}
\alias{chdd}
\title{Cholesky Update/Downdate}
\description{Rank-one updates of Cholesky factors}
\usage{chud(R,x)
chdd(R,x)}
\arguments{
\item{R}{an upper-triangular matrix}
\item{x}{a vector}
}
\value{
An updated version of \code{R}.
}
\details{
\code{chud} computes Q such that: \deqn{Q^T Q = R^T R + x x^T}
\code{chdd} computes Q such that: \deqn{Q^T Q = R^T R - x x^T}
\code{chdd} reports an error if \eqn{R^T R - x x^T} is not positive
definite. The two functions use LINPACK's \code{dchud} and
\code{dchdd} routines respectively, two of the few routines from
LINPACK without analogues in LAPACK.
}
\seealso{
\code{chol}
}
\references{
Dongarra, J. J., Moler, C. B., Bunch, J. R., Stewart, G. W. (1979)
LINPACK User's Guide.
}
|
3d7f01d7fcd437fe499cf2adaac5f1dc60cde30a
|
064ede5b34491a58c95346d6833d99a5ee5553d1
|
/ReadInData.R
|
d0661fbdaaf2c916a5d7bfecb10fc7ffae6b292b
|
[] |
no_license
|
shamrockiris/credit-card-strategy
|
24d8338296af53add76b060d335d120b70b1f721
|
1e92b508828842f320d9feddc05c692d77830472
|
refs/heads/master
| 2016-09-06T10:03:45.014064
| 2015-02-06T00:37:40
| 2015-02-06T00:37:40
| 29,219,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 258
|
r
|
ReadInData.R
|
# import data from file, before that, sava as .csv
sourceData <- read.csv(file.choose(),header=F)
# want to delete the comma in data and put it to numeric type
attach(sourceData)
for(i in 4:15) {
sourceData[,i] <- as.numeric(gsub(",","",sourceData[,i]))
}
|
6d2e37ba8658717b17605202b06ac01095e90c4d
|
8749cd76d533720e28877bdfaf9b7fe45e1ccf5f
|
/man/garch.Rd
|
9b2704f5496302016e8241e1ff1ad6638ea7dadb
|
[] |
no_license
|
bigdatalib/RTL
|
89ecba94b24ea1fa65bb87408b0c08eaa5809b23
|
9fe64900c618cae4d5286d6440774bf92819d387
|
refs/heads/master
| 2020-12-25T00:02:56.479952
| 2014-05-21T18:19:23
| 2014-05-21T18:19:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 922
|
rd
|
garch.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{garch}
\alias{garch}
\title{\code{garch}}
\usage{
garch(x = data, ret = "rel", roll = TRUE, cmdty = "")
}
\arguments{
\item{x}{Univariate or multivariate xts price series.}
\item{ret}{"rel" for relative returns, "abs" for absolute returns or "flatprice" if no transformation of x is require.}
\item{roll}{True if you want adjust the returns for roll.}
\item{cmdty}{commodity name in expiry_table object}
}
\value{
xts series of annualised Garch(1,1) volatilities if using relative returns.
}
\description{
Computes annualised Garch(1,1) volatilities using fGarch.
}
\examples{
data(data)
RTL:::garch(x=Cl(CL1),ret="rel",roll=TRUE,cmdty="cmewti")
RTL:::garch(x=merge(Cl(CL1),Cl(CL24)),ret="rel",roll=TRUE,cmdty="cmewti")
}
\author{
Philippe Cote <coteph@mac.com,philippe.cote@scotiabank.com>, Nima Safain <nima.safaian@gmail.com,nima.safaian@scotiabank.com>
}
|
9d7cf08c17455b4261de51ce8364fb0ad53a0aae
|
610bf65e89c9c27e5f0ea0df0d95fd2bb9e65ccf
|
/Code/data_3.R
|
906a9b6d3eba4646fe25ffd52afbd9688c7d0747
|
[
"MIT"
] |
permissive
|
jingeyu/single-cell-RNA-cluster
|
7cf2e16fe20becd2547d8d63de73a57b113afeb1
|
c67f02cb650c6d18e75841908a2391f1593f6e9a
|
refs/heads/master
| 2023-01-19T19:05:04.164536
| 2023-01-12T10:17:54
| 2023-01-12T10:17:54
| 263,781,833
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,337
|
r
|
data_3.R
|
suppressMessages(library(ggplot2))
suppressMessages(library(dplyr))
#----data read ----
ST1 <- read.csv('ST_complete.csv',header = TRUE)
rownames(ST1) <- ST1[,1]
ST1 <- ST1[,-1]
X <- as.matrix(ST1)
L = 31
W = 33
G = dim(ST1)[1]
N = L * W
ind_com <- matrix(0,L*W,2)
for(j in 1:W){
for(i in 1:L){
ind_com[i+L*(j-1),] = c(i,j)
}
}
ind_com <- as.data.frame(ind_com)
names(ind_com) <- c('row_ind','col_ind')
null_na <- read.csv('null_index.csv')[,-1]
index_null <- c()
for(i in 1:16){
index_null[i] <- L * (null_na[i,2] -1) + null_na[i,1]
}
#----hyper-paramters----
eta_theta <- 0
tau_theta <- 10
eta_mu <- 0
tau_mu <- 10
par_alpha <- 5
par_beta <- 0.1
#tuning parameter in the proposal distribution
tau_0 <- 0.1
#----choose different region number S
set.seed(1996)
#----S = 5----
S = 5
km1 <- kmeans(t(X), S)
R_t_vec <- km1$cluster
# R_t_vec <- sample(2:S, N, replace = TRUE)
# R_t_vec[index_null] <- 1
R_t <- matrix(R_t_vec, L, W)
mu_t <- matrix(NA, G, S)
for(s in 1:S){
mu_t[ ,s] <- rowMeans(X[ , R_t_vec == s])
}
sgm_sq_t <- as.numeric(apply(X - mu_t[ ,R_t_vec], 1, var))
theta_t <- matrix(rnorm(S*S, eta_theta, 0.01),S,S)
theta_t <- (theta_t + t(theta_t))/2
diag(theta_t) <- 0
#iteration
num_iter <- 5000
Mu <- array(0, dim = c(G,S, num_iter))
Sgm_sq <- matrix(0,G,num_iter)
R_T <- array(0, dim = c(L,W,num_iter))
Theta <- array(0, dim = c(S,S,num_iter))
ptm <- proc.time()
for(t in 1:num_iter){
mu_t = mu_update(X, sgm_sq_t, R_t, tau_mu, eta_mu,S, G)
sgm_sq_t = sgm_sq_star_update(X, R_t, mu_t, S, G, N, par_alpha, par_beta)
R_t = R_update(X, R_t, mu_t, theta_t, sgm_sq_t,S,G,L,W)
theta_t = theta_update(X, R_t, mu_t, theta_t, sgm_sq_t,
S, G, L, W,tau_0, eta_theta,tau_theta)
Mu[,,t] <- mu_t
Sgm_sq[,t] <- sgm_sq_t
R_T[,,t] <- R_t
Theta[,,t] <- theta_t
}
print(proc.time()-ptm)
mu_sim <- Mu[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean)
R_sim <- R_T[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean) %>% floor()
sgm_sq_sim <- rowMeans(Sgm_sq[,(4*num_iter/5):num_iter])
theta_sim <- Theta[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean)
BIC_5 <- BIC_k(X, mu_sim, c(R_sim), sgm_sq_sim)
BIC_5
tmp1 <- ind_com
tmp1$Re <- as.numeric(c(R_sim))
ggplot(tmp1, aes(col_ind, row_ind,color = letters[Re])) + geom_point(alpha = 0.6) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
panel.grid.major =element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())
table(R_sim)
#---- S = 6----
S = 6
km2 <- kmeans(t(X), S)
R_t_vec <- km2$cluster
# R_t_vec <- sample(2:S, N, replace = TRUE)
# R_t_vec[index_null] <- 1
R_t <- matrix(R_t_vec, L, W)
mu_t <- matrix(NA, G, S)
for(s in 1:S){
mu_t[ ,s] <- rowMeans(X[ , R_t_vec == s])
}
sgm_sq_t <- as.numeric(apply(X - mu_t[ ,R_t_vec], 1, var))
theta_t <- matrix(rnorm(S*S, eta_theta, 0.01),S,S)
theta_t <- (theta_t + t(theta_t))/2
diag(theta_t) <- 0
num_iter <- 5000
Mu2 <- array(0, dim = c(G,S, num_iter))
Sgm_sq2 <- matrix(0,G,num_iter)
R_T2 <- array(0, dim = c(L,W,num_iter))
Theta2 <- array(0, dim = c(S,S,num_iter))
#iteration
for(t in 1:num_iter){
mu_t = mu_update(X, sgm_sq_t, R_t, tau_mu, eta_mu,S, G)
sgm_sq_t = sgm_sq_star_update(X, R_t, mu_t, S, G, N, par_alpha, par_beta)
R_t = R_update(X, R_t, mu_t, theta_t, sgm_sq_t,S,G,L,W)
theta_t = theta_update(X, R_t, mu_t, theta_t, sgm_sq_t,
S, G, L, W,tau_0, eta_theta,tau_theta)
Mu2[,,t] <- mu_t
Sgm_sq2[,t] <- sgm_sq_t
R_T2[,,t] <- R_t
Theta2[,,t] <- theta_t
}
mu_sim2 <- Mu2[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean)
R_sim2 <- R_T2[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean) %>% floor()
sgm_sq_sim2 <- rowMeans(Sgm_sq2[,(4*num_iter/5):num_iter])
theta_sim2 <- Theta2[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean)
BIC_6 <- BIC_k(X, mu_sim2, c(R_sim2), sgm_sq_sim2)
BIC_6
tmp2 <- ind_com
tmp2$Re <- as.numeric(c(R_sim2))
ggplot(tmp2, aes(col_ind, row_ind,color = letters[Re])) + geom_point(alpha = 0.6) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
panel.grid.major =element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())
plot(Mu2[10,5,],type = 'l')
plot(Theta2[1,3,], type = 'l')
plot(Sgm_sq2[100,], type = 'l')
#----S = 7----
S = 7
km3 <- kmeans(t(X), S)
R_t_vec <- km3$cluster
# R_t_vec <- sample(2:S, N, replace = TRUE)
# R_t_vec[index_null] <- 1
R_t <- matrix(R_t_vec, L, W)
mu_t <- matrix(NA, G, S)
for(s in 1:S){
mu_t[ ,s] <- rowMeans(X[ , R_t_vec == s])
}
sgm_sq_t <- as.numeric(apply(X - mu_t[ ,R_t_vec], 1, var))
theta_t <- matrix(rnorm(S*S, eta_theta, 0.01),S,S)
theta_t <- (theta_t + t(theta_t))/2
diag(theta_t) <- 0
num_iter <- 5000
Mu3 <- array(0, dim = c(G,S, num_iter))
Sgm_sq3 <- matrix(0,G,num_iter)
R_T3 <- array(0, dim = c(L,W,num_iter))
Theta3 <- array(0, dim = c(S,S,num_iter))
#iteration
ptm <- proc.time()
for(t in 3403:num_iter){
mu_t = mu_update(X, sgm_sq_t, R_t, tau_mu, eta_mu,S, G)
sgm_sq_t = sgm_sq_star_update(X, R_t, mu_t, S, G, N, par_alpha, par_beta)
R_t = R_update(X, R_t, mu_t, theta_t, sgm_sq_t,S,G,L,W)
theta_t = theta_update(X, R_t, mu_t, theta_t, sgm_sq_t,
S, G, L, W,tau_0, eta_theta,tau_theta)
Mu3[,,t] <- mu_t
Sgm_sq3[,t] <- sgm_sq_t
R_T3[,,t] <- R_t
Theta3[,,t] <- theta_t
}
print(proc.time()-ptm)
mu_sim3 <- Mu3[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean)
R_sim3 <- R_T3[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean) %>% floor()
sgm_sq_sim3 <- rowMeans(Sgm_sq3[,(4*num_iter/5):num_iter])
theta_sim3 <- Theta3[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean)
BIC_7 <- BIC_k(X, mu_sim3, c(R_sim3), sgm_sq_sim3)
BIC_7
tmp3 <- ind_com
tmp3$Re <- as.numeric(c(R_sim3))
ggplot(tmp3, aes(col_ind, row_ind,color = letters[Re])) + geom_point(alpha = 0.6) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
panel.grid.major =element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())
#---- S=3 ----
S = 3
km4 <- kmeans(t(X), S)
R_t_vec <- km4$cluster
# R_t_vec <- sample(1:S, N, replace = TRUE)
# R_t_vec[index_null] <- 1
R_t <- matrix(R_t_vec, L, W)
mu_t <- matrix(NA, G, S)
for(s in 1:S){
mu_t[ ,s] <- rowMeans(X[ , R_t_vec == s])
}
sgm_sq_t <- as.numeric(apply(X - mu_t[ ,R_t_vec], 1, var))
theta_t <- matrix(rnorm(S*S, eta_theta, 0.01),S,S)
theta_t <- (theta_t + t(theta_t))/2
diag(theta_t) <- 0
num_iter <- 5000
Mu4 <- array(0, dim = c(G,S, num_iter))
Sgm_sq4 <- matrix(0,G,num_iter)
R_T4 <- array(0, dim = c(L,W,num_iter))
Theta4 <- array(0, dim = c(S,S,num_iter))
#iteration
ptm <- proc.time()
for(t in 1:num_iter){
mu_t = mu_update(X, sgm_sq_t, R_t, tau_mu, eta_mu,S, G)
sgm_sq_t = sgm_sq_star_update(X, R_t, mu_t, S, G, N, par_alpha, par_beta)
R_t = R_update(X, R_t, mu_t, theta_t, sgm_sq_t,S,G,L,W)
theta_t = theta_update(X, R_t, mu_t, theta_t, sgm_sq_t,
S, G, L, W,tau_0, eta_theta,tau_theta)
Mu4[,,t] <- mu_t
Sgm_sq4[,t] <- sgm_sq_t
R_T4[,,t] <- R_t
Theta4[,,t] <- theta_t
}
print(proc.time()-ptm)
mu_sim4 <- Mu4[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean)
R_sim4 <- R_T4[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean) %>% floor()
sgm_sq_sim4 <- rowMeans(Sgm_sq4[,(4*num_iter/5):num_iter])
theta_sim4 <- Theta4[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean)
BIC_3 <- BIC_k(X, mu_sim4, c(R_sim4), sgm_sq_sim4)
BIC_3
tmp4 <- ind_com
tmp4$Re <- as.numeric(c(R_sim4))
ggplot(tmp4, aes(col_ind, row_ind,color = letters[Re])) + geom_point(alpha = 0.6) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
panel.grid.major =element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())
#----- S = 4----
S = 4
km5 <- kmeans(t(X), S)
R_t_vec <- km5$cluster
# R_t_vec <- sample(2:S, N, replace = TRUE)
# R_t_vec[index_null] <- 1
R_t <- matrix(R_t_vec, L, W)
mu_t <- matrix(NA, G, S)
for(s in 1:S){
mu_t[ ,s] <- rowMeans(X[ , R_t_vec == s])
}
sgm_sq_t <- as.numeric(apply(X - mu_t[ ,R_t_vec], 1, var))
theta_t <- matrix(rnorm(S*S, eta_theta, 0.01),S,S)
theta_t <- (theta_t + t(theta_t))/2
diag(theta_t) <- 0
num_iter <- 5000
Mu5 <- array(0, dim = c(G,S, num_iter))
Sgm_sq5 <- matrix(0,G,num_iter)
R_T5 <- array(0, dim = c(L,W,num_iter))
Theta5 <- array(0, dim = c(S,S,num_iter))
#iteration
ptm <- proc.time()
for(t in 1:num_iter){
mu_t = mu_update(X, sgm_sq_t, R_t, tau_mu, eta_mu,S, G)
sgm_sq_t = sgm_sq_star_update(X, R_t, mu_t, S, G, N, par_alpha, par_beta)
R_t = R_update(X, R_t, mu_t, theta_t, sgm_sq_t,S,G,L,W)
theta_t = theta_update(X, R_t, mu_t, theta_t, sgm_sq_t,
S, G, L, W,tau_0, eta_theta,tau_theta)
Mu5[,,t] <- mu_t
Sgm_sq5[,t] <- sgm_sq_t
R_T5[,,t] <- R_t
Theta5[,,t] <- theta_t
}
print(proc.time()-ptm)
mu_sim5 <- Mu5[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean)
R_sim5 <- R_T5[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean) %>% floor()
sgm_sq_sim5 <- rowMeans(Sgm_sq5[,(4*num_iter/5):num_iter])
theta_sim5 <- Theta5[,,(4*num_iter/5):num_iter] %>% apply(c(1,2),mean)
BIC_4 <- BIC_k(X, mu_sim5, c(R_sim5), sgm_sq_sim5)
BIC_4
tmp5 <- ind_com
tmp5$Re <- as.numeric(c(R_sim5))
ggplot(tmp5, aes(col_ind, row_ind,color = letters[Re])) + geom_point(alpha = 0.6) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
panel.grid.major =element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())
#------intergrate--------
#BIC plot
BIC_total <- cbind(c(3,4,5,6,7),c(BIC_3, BIC_4, BIC_5, BIC_6, BIC_7))
BIC_total <- as.data.frame(BIC_total)
ggplot(BIC_total,aes(x = BIC_total[,1], y = BIC_total[,2])) +
geom_line(color = 'blue', alpha = 0.6) + geom_point(color = 'blue', alpha = 0.6) +
labs(x = 'Region numbers', y = 'BIC') +
geom_vline(xintercept = 6,linetype = 'dotted')
#K = 6
#parameters iteration
tmp1 <- as.data.frame(cbind(c(2000:4000), Mu2[10, 3 ,][2000:4000]))
ggplot(tmp1,aes(x = tmp1[,1], y = tmp1[,2])) +
geom_line(color = 'blue') +
labs(x = 'iteration', y = expression(mu)) +
scale_y_continuous(limits = c(1.25,2.0)) +
geom_hline(yintercept = mu_sim2[10,3], color = 'red')
tmp2 <- as.data.frame(cbind(c(2000:4000), Sgm_sq2[100,][2000:4000]))
ggplot(tmp2,aes(x = tmp2[,1], y = tmp2[,2])) +
geom_line(color = 'blue') +
labs(x = 'iteration', y = expression(sigma^2)) +
scale_y_continuous(limits = c(0.02,0.18)) +
geom_hline(yintercept = sgm_sq_sim2[100], color = 'red')
tmp3 <- as.data.frame(cbind(c(2000:5000), Theta2[2, 3 ,][2000:5000]))
ggplot(tmp3,aes(x = tmp3[,1], y = tmp3[,2])) +
geom_line(color = 'blue') +
labs(x = 'iteration', y = expression(theta)) +
scale_y_continuous(limits = c(-4,8)) +
geom_hline(yintercept = theta_sim2[2,3], color = 'red')
#
# write.csv(Mu2[10,,],'mu_10.csv')
# write.csv(Sgm_sq2, 'Sgm_sq.csv')
# write.csv(Theta2[2,,],'Theta_2.csv')
|
268a8a9383a8fb0e50a107b4b0ae938ec5d74013
|
4dcf41d2a4309b6edf28e11b7f1ba1a29a1836f3
|
/question5.R
|
7b1ef24dd79ee451c13d914ad38bd447b8e11c8f
|
[] |
no_license
|
RowAtk/credit-bank
|
4d2d44a619c3b0a9d1578227254161d161355099
|
cf4caf27bf305d6caa059e7b5648a4b9ecdac756
|
refs/heads/master
| 2022-04-25T16:43:28.720108
| 2020-04-30T16:26:46
| 2020-04-30T16:26:46
| 259,198,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 338
|
r
|
question5.R
|
# make a date string
#string = paste(5, 3, year(today), sep = "-")
# make a date
#something <- mdy(paste(5, 3, year(today), sep = "-"))
bank.data$deposit_date <- mdy(paste(bank.data$month, bank.data$day, "2019", sep = "-"))
# temp <- today() - bank.data$deposit_date[1]
bank.data$last_deposit <- today() - bank.data$deposit_date
#END
|
3bd71b4165e7f87122289decb2fcce52e0308329
|
ce94d643cfad67f755ddc8f8d8123a66b5651de3
|
/14_IntroMultivariate/IntroMultivariate.R
|
7fdabbd5f5514475d3ce61e4df289d4345d889e2
|
[] |
no_license
|
lindstroje/IntroRangeR
|
79353b90f6e22632f60c943ba8b9dd112433a21b
|
36587f2958179ff218d5cea11885dc2e9c43fcd7
|
refs/heads/master
| 2020-12-18T15:11:05.009388
| 2020-01-21T17:39:18
| 2020-01-21T17:39:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,940
|
r
|
IntroMultivariate.R
|
pacman::p_load(ggplot2, vegan, vegan3d)
setwd("C:/Users/Devan.McGranahan/GoogleDrive/Teaching/Classes/Intro to R/course materials/class session materials")
# Load data
spp.d <- read.csv(file="./data/VareExample/SpeciesData.csv")
chem.d <- read.csv(file="./data/VareExample/SoilChemistryResults.csv")
man.d <- read.csv(file="./data/VareExample/Management.csv")
# Check out univariate comparisons
pairs(chem.d[2:13], upper.panel = NULL)
# Create Euclidean distance matrix
(chem.m <- round(vegdist(chem.d[2:13],
method="euclidean"),1))
# Cluster analysis
# Calculate cluster diagram
chem.clust <- hclust(chem.m, method="average")
plot(chem.clust, labels=chem.d$name,
main="Cluster diagram of soil chemistry",
xlab="Sample", ylab="Euclidean distance", las=1)
# Visualize potential groups
rect.hclust(chem.clust, 2, border="red")
rect.hclust(chem.clust, 4, border="blue")
plot(chem.clust, labels=chem.d$name,
main="Cluster diagram of soil chemistry",
xlab="Sample", ylab="Euclidean distance", las=1)
rect.hclust(chem.clust, 5, border="darkgreen")
# Principal Components Analysis
# Base R
chem.pca <- prcomp(chem.m, scale.=TRUE)
summary(chem.pca)
plot(chem.pca, type="l") # Scree plot
biplot(chem.pca)
# Package vegan
chem.pca2 <- rda(chem.d[2:13], scale=TRUE)
summary(chem.pca2)$cont
screeplot(chem.pca2, type="lines")
plot(chem.pca2)
# Compare to cluster diagram
chem.clust <- hclust(vegdist(chem.d[2:13],
method="euclidean"),
method="average")
x11(12,5.5) ; par(mgp=c(4, 1, 0), mar=c(6, 6, 1, 1),
las=1, cex.lab=1.4, cex.axis=1.4, mfrow=c(1,2))
plot(chem.clust,
labels=chem.d$name,
xlab="Sample", ylab="Euclidean distance", las=1)
plot(chem.pca2, display = "sites", las=1)
ordicluster(chem.pca2, chem.clust)
dev.off()
# View multidimensional space
ordirgl(chem.pca2, display="sites", type="text")
orgltext(chem.pca2, row.names(chem.d), display="sites") # focus on 20, 22, 23
# Plotting by known groups
plot(chem.pca2, type="n", las=1)
text(chem.pca2, display = "sites", labels=row.names(chem.d))
ordispider(chem.pca2, man.d$BurnSeason, display="sites",
label=T, lwd=2, col=c("blue","orange", "black"))
# Testing groups
envfit(chem.pca2 ~ man.d$BurnSeason)
envfit(chem.pca2 ~ man.d$BurnSeason, choices=c(1:3))
# D I S C L A I M E R:
# We use PCA here for illustration
# (Euclidean distance is conceptually easy)
# PCA is not the only choice for ordination...
# ...and for ecologists, it is rarely the best choice.
# The vegan package provides many alternatives to the
# Euclidean distance measure; see ?vegdist.
# See ?metaMDS and ?capscale for non-metric and metric
# multidimensional scaling functions for analysis.
|
c98ec1efc71e8044b6bc879b693a3793fdc501d1
|
c8e3c0c84adc9286c995e8416b2e447e5003afd4
|
/Test.R
|
a04736b3fbe33c643aec86a5fc35ab06a668b007
|
[] |
no_license
|
RoryMurphy1997/GitHubAPIVis
|
da916dcb44382a658285da1dee208aac5ea95c71
|
c445bab452a29acf79c6545104583abc2aa11e75
|
refs/heads/master
| 2020-04-04T20:01:24.708442
| 2018-11-28T10:18:09
| 2018-11-28T10:18:09
| 156,230,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 87
|
r
|
Test.R
|
install.packages("shiny")
library(shiny)
runGitHub( "GitHubAPIVis", "RoryMurphy1997")
|
7596f42cd98348b2052c8fff956bc03c9d4e5431
|
7f19f9579b5ffcc613041b0acfa94ed5971bf2a4
|
/run_analysis.R
|
bb7d423af936a1f49dafa5887c96458fb2fd9f9f
|
[] |
no_license
|
CFCubero/Getting-CleaningData_Project
|
685de138608390ac762ba9270dc4cf06ee485de4
|
768d123a0f0898823377d3cdd10b7384c5917068
|
refs/heads/master
| 2021-01-01T18:12:13.561238
| 2015-08-20T17:45:28
| 2015-08-20T17:45:28
| 40,993,592
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,338
|
r
|
run_analysis.R
|
## Download and store raw information
if (file.exists("./data")){
setwd("./data")
} else {
dir.create("./data")
setwd("./data")
}
url<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
file<-"data.zip"
download.file(url,file, method="libcurl")
unzip(file, exdir = ".")
## Read Train information and all related files and create a dataset with this information
train<-read.table("./UCI HAR Dataset/train/X_train.txt",sep="",head=FALSE)
train<-cbind(train,read.table("./UCI HAR Dataset/train/y_train.txt",head=FALSE))
train<-cbind(train,read.table("./UCI HAR Dataset/train/subject_train.txt",head=FALSE))
## The same for the Test data set
test<-read.table("./UCI HAR Dataset/test/X_test.txt",sep="",head=FALSE)
test<-cbind(test,read.table("./UCI HAR Dataset/test/y_test.txt",head=FALSE))
test<-cbind(test,read.table("./UCI HAR Dataset/test/subject_test.txt",head=FALSE))
## Bind train a test datasets
dataset<-rbind(train,test)
## Read file with features for each column, filter 'mean' and 'std' features and name
## columns of the dataset.
features<-read.table("./UCI HAR Dataset/features.txt",sep="",head=FALSE,col.names=c("id","feature"),colClasses=c("numeric","character"))
activity_labels<-read.table("./UCI HAR Dataset/activity_labels.txt",sep="",head=FALSE,col.names=c("activity_id","activity_name"),colClasses=c("numeric","character"))
selected_features<-features[grepl('std|mean',features$feature),]
dataset<-dataset[,c(selected_features[,1],562,563)]
names(dataset)<-c(selected_features[,2],"activity_id","subject_id")
## include a columns with the information of the activity labels
## and reorder the columns so that dimensions are first
dataset<-merge(dataset,activity_labels)
dataset=dataset[,c(1,ncol(dataset)-1,ncol(dataset),2:(ncol(dataset)-2))]
## creation of a tidy dataset with the average of the features for each subject and activity
tidy_dataset<-aggregate(.~dataset$activity_id+dataset$activity_name+dataset$subject_id,data=dataset[,4:ncol(dataset)],FUN="mean",na.rm=TRUE)
names(tidy_dataset)[1:3]<-c("activity_id","activity_name","subject_id")
##save the tidy dataset
write.table(tidy_dataset, file = "tidyDataSet.txt", sep=" ",row.name=FALSE)
## remove all unzipped and downloaded files
file.remove(file)
unlink("./UCI HAR Dataset",recursive=TRUE)
|
8c13ec8935f1ed98eace43c9ee399937ab291525
|
55d0bb93590f13ae17f24a28a40726dcf339b91d
|
/20210615/20210615.R
|
bf4d30d464a201d3212ae8264efa251a8d8ab592
|
[] |
no_license
|
nschampions2004/slice_data_science
|
5a0c54e64490b33f1e388bf848ef8971ea839e37
|
de009ebee69fbf4ab1270ce5787c2250a602eb5d
|
refs/heads/master
| 2023-06-02T17:16:31.860543
| 2021-06-21T13:53:46
| 2021-06-21T13:53:46
| 348,585,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,535
|
r
|
20210615.R
|
library(tidyverse)
library(tidymodels)
library(DataExplorer)
library(skimr)
data_folder <- "data"
plots_folder <- "plots"
predictions_folder <- "predictions"
models_folder <- "models"
theme_set(theme_minimal())
income_data <- readxl::read_xls(file.path(data_folder, "tabn025.xls")) %>%
janitor::clean_names() %>%
tidyr::separate(col = state, sep = " \\.",
into = c("state", "trash")) %>%
filter(state != "District") %>%
select(state, median_household_income = x2010)
store_training <- read_csv(file.path(data_folder, "train.csv")) %>%
left_join(income_data, by = "state") %>%
mutate_if(is.character, as.factor)
store_test <- read_csv(file.path(data_folder, "test.csv")) %>%
left_join(income_data, by = "state") %>%
mutate_if(is.character, as.factor)
# Alright, with a good amount of EDA out of the way,
# 1. Speed run an XGboost model with _only_the numeric variables
# on 5 fold cv.... submit as my baseline.
# 2. Bring in the categorical variables accordingly
# hoping to bring rmse down
# 3. Incorporate more data????
# - Census Data (Populations)
# - median househole income per state?
# Thoughts...
# I'm starting to see why we should be pulling more data in.
# I'm seeing a whole subsection of states missing
# I've got a feeling that "Region" field won't be able to impart the necessary information
# that we'd need to do well on the testing set
#
store_recipe <- recipe(profit ~ ., store_training) %>%
update_role(id, new_role = "id") %>%
step_rm(id,
postal_code,
country,
city,
state) %>%
# step_other(city, threshold = 0.01) %>%
step_dummy(segment,
# city,
# state,
region,
category,
sub_category) %>%
step_mutate(sales_per_quantity = sales / quantity) %>%
prep()
training <- bake(store_recipe, new_data =NULL )
testing <- bake(store_recipe, new_data =store_test)
set.seed(42069) # such a hard decision
stores_folds <- vfold_cv(data = training, v = 5)
stores_xgb <- boost_tree(mode = "regression",
learn_rate = tune(),
trees = 500,
mtry = tune(),
tree_depth = tune()
) %>%
set_engine("xgboost")
stores_wkflow <- workflow() %>%
add_model(stores_xgb) %>%
add_formula(profit ~ .)
stores_metrics <- metric_set(rmse)
doParallel::registerDoParallel(5)
stores_grid <- tune_grid(stores_wkflow,
resamples = stores_folds,
grid = crossing(learn_rate = c(0.3, 0.4),
mtry = c(0.8, 1.0),
tree_depth = c(3, 5, 8)),
metrics = stores_metrics,
control = control_grid(verbose = TRUE, save_pred = TRUE))
# $10 all models fail
# check out the behavior
stores_grid %>% autoplot()
stores_grid %>%
collect_metrics() %>%
ggplot(aes(x = learn_rate, y = mean)) +
geom_point() +
labs(title = "initial training results
# higher mtry and learn rate the better with 500 trees... which is expected with 4 variables
")
# 78 for 4
# 88 for 6
# 88 for 7
# fit the
best_params <- stores_grid %>%
select_best()
final_stores_xgb <- stores_xgb %>%
finalize_model(best_params)
final_fit <- final_stores_xgb %>%
fit(formula = profit ~ ., data = training)
final_preds <- final_fit %>%
predict(testing) %>%
bind_cols(store_test) %>%
select(id, profit = .pred)
final_preds %>%
write_csv(file.path(predictions_folder, "attempt8_no_geo_markers.csv"))
|
bdb2a7ff8d696fb6c39d67bcd67a3b04e6b9c8c3
|
d47b1dff47241e469eda6c5a6302544c0b6bf189
|
/plot3.R
|
24c450854059fa6f773b96a7c8d45d5db3dd5bfa
|
[] |
no_license
|
UshaKota/ExData_Plotting1
|
8add56ccfdf5637867979c190ae9fec06985a269
|
dae9babb55736298fd42a7441977dadc8ef23e65
|
refs/heads/master
| 2021-01-12T19:43:19.532108
| 2015-01-11T16:17:26
| 2015-01-11T16:17:26
| 29,003,708
| 0
| 0
| null | 2015-01-09T06:19:49
| 2015-01-09T06:19:49
| null |
UTF-8
|
R
| false
| false
| 1,479
|
r
|
plot3.R
|
library(data.table)
## is there an elegant way to set the global var filename??
readData <- function(filename, var1, var2){
dtime <- difftime(as.POSIXct(var2), as.POSIXct(var1),unit="mins")
numRows <- as.numeric(dtime)
DT <- fread(filename, skip="1/2/2007", nrows = numRows, na.strings = c("?", ""))
##set the colnames
setnames(DT, colnames(fread(filename, nrows=0)))
DT$DateTime = as.POSIXct(paste(DT$Date, DT$Time),format = "%d/%m/%Y %H:%M:%S")
return(DT)
}
writeToPNG<- function(filename,dataTable){
#set the margin
par(mar=c(2,4,2,2))
png(filename)
plot(myDT$DateTime, myDT$Sub_metering_1,type='l', lwd=2, xlab="",ylab="Energy sub metering")
lines(myDT$DateTime, myDT$Sub_metering_2,col = "red")
lines(myDT$DateTime, myDT$Sub_metering_3, col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1),col=c("black", "red", "blue"))
dev.off()
}
#Assumption is that the data file is in the working dir
filename<-("household_power_consumption.txt")
#get the data set
myDT<-readData(filename,date1<-c("2007-02-01"),date2<-c("2007-02-03"))
#write the graph to png file
writeToPNG("plot3.png",myDT)
|
76905036fe1c79f1e837c04808033c3debf884f3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/broom/examples/tidy.pyears.Rd.R
|
509fb5d4046ea38edf733fc28c3b689ab3d8d5fb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 707
|
r
|
tidy.pyears.Rd.R
|
library(broom)
### Name: tidy.pyears
### Title: Tidy a(n) pyears object
### Aliases: tidy.pyears pyears_tidiers
### ** Examples
library(survival)
temp.yr <- tcut(mgus$dxyr, 55:92, labels=as.character(55:91))
temp.age <- tcut(mgus$age, 34:101, labels=as.character(34:100))
ptime <- ifelse(is.na(mgus$pctime), mgus$futime, mgus$pctime)
pstat <- ifelse(is.na(mgus$pctime), 0, 1)
pfit <- pyears(Surv(ptime/365.25, pstat) ~ temp.yr + temp.age + sex, mgus,
data.frame=TRUE)
tidy(pfit)
glance(pfit)
# if data.frame argument is not given, different information is present in
# output
pfit2 <- pyears(Surv(ptime/365.25, pstat) ~ temp.yr + temp.age + sex, mgus)
tidy(pfit2)
glance(pfit2)
|
f9bd41f5f9ada9e944b9a52d41888c57dac46d62
|
12a7f100025cba229ca5cf635883c657c165c813
|
/man/env.sp.Rd
|
2bd922d0dcd678afd986eb59ae50dd22a0a5415e
|
[] |
no_license
|
cran/EnvNJ
|
d205e51da116d906e3e9fc54342e1db2084241e8
|
e5e6a81a21b56cd9eabe67f964b1cea227303b8d
|
refs/heads/master
| 2023-08-06T22:20:52.413911
| 2021-09-27T09:50:02
| 2021-09-27T09:50:02
| 372,565,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,355
|
rd
|
env.sp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/envcoding.R
\name{env.sp}
\alias{env.sp}
\title{Extract the Sequence Environments}
\usage{
env.sp(data, sp, r = 10, aa = 'all', silent = TRUE)
}
\arguments{
\item{data}{input data must be a dataframe (see details).}
\item{sp}{the species of interest (it should be named as in the input dataframe).}
\item{r}{a positive integer indicating the radius of the sequence segment considered as environment.}
\item{aa}{the amino acid(s) which environments are going to be extracted.}
\item{silent}{logical. When FALSE the program progress is reported to alleviate loneliness.}
}
\value{
A list of environment sequences. Each element from the list is a vector with the environment sequences around an amino acid. So, the length list is the same as the length of aa.
}
\description{
Extracts the sequence environments around the selected amino acid(s) in the chosen species.
}
\details{
Input data must be a dataframe where each row corresponds to an individual protein. The columns contain the sequence of the protein corresponding to the row in each species. Therefore, the columns' names of this dataframe must be coherent with the names of the OTUs being analyzed.
}
\examples{
data(bovids)
env.sp(data = bovids, sp = "Bos_taurus", r = 2)
}
\seealso{
otu.vector(), otu.space()
}
|
d2f5c4ea39a9220c1b5553da0840d62dcba01a49
|
368dee3431cd632867da90e99309139602e72725
|
/temp/CM/nhpi/new/1.SortGPS.R
|
0f69810e47a21c8537482ae8306eae46aecfd1b3
|
[] |
no_license
|
anasanz/NHBD
|
f064d8f3cb8916b87ff29a49b085a4c9c8b61f5a
|
d41eaf83e18b66bf1ce16aed08aa96327b98901e
|
refs/heads/master
| 2021-04-06T08:36:06.858463
| 2018-12-31T17:43:32
| 2018-12-31T17:43:32
| 125,345,673
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,840
|
r
|
1.SortGPS.R
|
library(sp)
library(rgdal)
library(rgeos)
# ==== I. LOAD DATA ====
#setwd("~/Norway/NHBD_humans/Antonio")
rm(list=ls())
# ==== 1. AIMEE CYRIL DATA ====
setwd("C:/Personal_Cloud/OneDrive/Work/Skandulv/NHBD2/nhbd_2/data")
gps1 <- read.csv("Final_GPS_Data_Aimee.csv", header = TRUE, sep = ",")
gps2 <- read.csv("Final_GPS_Data_Cyril.csv", header = TRUE, sep = ",")
# convert time
gps1$Date_time <- as.POSIXct(paste(gps1$Date,gps1$Time), format = "%m/%d/%Y %H:%M",tz = "GMT")
gps2$Date_time <- as.POSIXct(paste(gps2$DATE, gps2$UTC_TIME), format = "%m/%d/%Y %H:%M",tz = "GMT")
## merge
colnames(gps1)
colnames(gps2)
colnames(gps2)[5:6] <- c("X","Y")
gps <- rbind(gps1[,c("X","Y","Date_time","study","Study_Year","Study_Start", "Study_End")],
gps2[,c("X","Y","Date_time","study","Study_Year","Study_Start", "Study_End")]
)
# delete na coords
gps<- gps[!is.na(gps$X),]
cooords <- data.frame(gps$X, gps$Y)
coordinates(cooords) <- cooords
proj4string(cooords) <- CRS("+proj=tmerc +lat_0=0 +lon_0=15.80827777777778 +k=1 +x_0=1500000 +y_0=0 +ellps=bessel +units=m +no_defs")
cooords <- spTransform(cooords, CRS("+proj=utm +zone=33 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"))
#
# points(coords)
gps$X <- coordinates(cooords)[,1]
gps$Y <- coordinates(cooords)[,2]
# ==== 2. NORWEGIAN DATA ====
gps3 <- read.csv("Copy of Norwegian wolf data 2015 for Cyril.csv", header = TRUE, sep = ",")
gps3$Wolf_ID_doublecheck <- as.character(gps3$Wolf_ID_doublecheck)
# just keep one ID (the one with hourly gps lcoations
gps3 <- gps3[gps3$Wolf_ID_doublecheck %in% "M1409" ,]
# check date_time
gps3$Date_time <- as.POSIXct(paste(gps3$UTC_date, gps3$UTC_time), format = "%m/%d/%Y %H:%M:%S",tz = "GMT")
## keep hourly positions
gps3 <- gps3[420:1329,]
diff(gps3$Date_time)
#update the coordinates
coords <- data.frame(gps3$Longitude,gps3$Latitude)
coordinates(coords) <- coords
proj4string(coords) <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
##
scan <- readOGR("C:/Personal_Cloud/OneDrive/Work/Skandulv/NHBD2/nhbd_2/data/Scandinavia_border_33N.shp")
# scan <- spTransform(scan, CRS("+proj=utm +zone=32 +ellps=WGS84 +units=m +no_defs "))
# proj4string(scan) <- "+proj=utm +zone=33 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0" # As SpatialPointsDataFrame
plot(scan)
coords <- spTransform(coords, CRS("+proj=utm +zone=33 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"))
#
# points(coords)
gps3$X <- coordinates(coords)[,1]
gps3$Y <- coordinates(coords)[,2]
#update fields
gps3$study <- gps3$Wolf_ID
gps3$Study_Year <- paste(gps3$Wolf_ID, format(gps3$Date_time,"%Y"), sep="_")
gps3$Study_Start <- range(format(gps3$Date_time,"%d-%m-%Y"))[1]
gps3$Study_End <- range(format(gps3$Date_time,"%d-%m-%Y"))[1]
#join GPS
gps <- rbind(gps,gps3[,c("X","Y","Date_time","study","Study_Year","Study_Start", "Study_End")])
points(gps$Y~gps$X, col="red", pch=16, cex=0.1)
points(gps3$Y~gps3$X, col="black", pch=16, cex=0.1)
# ==== 3. SWEDISH DATA ====
# ==== 3.1. ASPAFALLET ====
gps5 <- read.csv("GPS_Collar15766.csv", header = TRUE, sep = ",")# Aspafallet - 15766 (female, 15-01),captured 2015-01-27
gps5$Date_time <- as.POSIXct(paste(gps5$UTC_DATE, gps5$UTC_TIME), format = "%m/%d/%Y %H:%M:%S",tz = "GMT")
#date capture
gps5 <- gps5[gps5$Date_time > as.POSIXct("2015-01-31", format = "%Y-%m-%d",tz = "GMT"),]
gps4 <- read.csv("GSM15762.csv", header = TRUE, sep = ",")# Aspafallet - 15762 (male, 15-02), captured 2015-01-27
gps4$Date_time <- as.POSIXct(paste(gps4$UTC_DATE, gps4$UTC_TIME), format = "%m/%d/%Y %H:%M:%S",tz = "GMT")
gps4 <- gps4[gps4$Date_time > as.POSIXct("2015-01-31", format = "%Y-%m-%d",tz = "GMT"),]
# keep the gps 4 because having high interval but remove na coords
gps4 <- gps4[!is.na(gps4$LATITUDE),]
coords <- data.frame(gps4$LONGITUDE,gps4$LATITUDE)
coordinates(coords) <- coords
proj4string(coords) <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
plot(scan)
coords <- spTransform(coords, CRS("+proj=utm +zone=33 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"))
points(coords)
# points(coords)
gps4$X <- coordinates(coords)[,1]
gps4$Y <- coordinates(coords)[,2]
#update fields
gps4$study <- "Aspafallet"
gps4$Study_Year <- paste("Aspafallet", format(gps4$Date_time,"%Y"), sep="_")
gps4$Study_Start <- range(format(gps4$Date_time,"%d-%m-%Y"))[1]
gps4$Study_End <- range(format(gps4$Date_time,"%d-%m-%Y"))[1]
#join GPS
gps <- rbind(gps,gps4[,c("X","Y","Date_time","study","Study_Year","Study_Start", "Study_End")])
plot(scan)
points(gps$Y~gps$X, col="red", pch=16, cex=0.1)
# ==== 3.2. KUKUMAKI ====
gps6 <- read.csv("GSM15761.csv", header = TRUE, sep = ",")# 15761 (female, 13-01), captured 2015-01-29
gps6$Date_time <- as.POSIXct(paste(gps6$UTC_DATE, gps6$UTC_TIME), format = "%m/%d/%Y %H:%M:%S",tz = "GMT")
#date capture
gps6 <- gps6[gps6$Date_time > as.POSIXct("2015-02-03", format = "%Y-%m-%d",tz = "GMT"),]
gps6 <- gps6[which(diff(gps6$Date_time)<70),]
range(gps6$Date_time)
# keep the gps 4 because having high interval but remove na coords
gps6 <- gps6[!is.na(gps6$LATITUDE),]
coords <- data.frame(gps6$LONGITUDE, gps6$LATITUDE)
coordinates(coords) <- coords
proj4string(coords) <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
coords <- spTransform(coords, CRS("+proj=utm +zone=33 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"))
points(coords)
# points(coords)
gps6$X <- coordinates(coords)[,1]
gps6$Y <- coordinates(coords)[,2]
#update fields
gps6$study <- "Kukumaki"
gps6$Study_Year <- paste("Kukumaki", format(gps6$Date_time,"%Y"), sep="_")
gps6$Study_Start <- range(format(gps6$Date_time, "%d-%m-%Y"))[1]
gps6$Study_End <- range(format(gps6$Date_time, "%d-%m-%Y"))[1]
#join GPS
gps <- rbind(gps, gps6[,c("X", "Y", "Date_time", "study", "Study_Year", "Study_Start", "Study_End")])
plot(scan)
points(gps$Y~gps$X, col="red", pch=16, cex=0.1)
# ==== 4. SEASON ====
gps$Month <- as.numeric(format(gps$Date_time, "%m"))
gps$Year <- as.numeric(format(gps$Date_time, "%Y"))
gps$Season <- "W"
gps$Season[gps$Month %in% c(5:7)] <- "S"
gps <- gps[-which(gps$Month %in% c(8:11)), ]
gps$Study_year <- unlist(lapply(strsplit(as.character(gps$study), '_'),function(x) x[1]))
gps$Study_year <- paste(gps$Study_year, gps$Year, gps$Season ,sep="_" )
# ==== 5. REMOVE DUPLICATES ====
#REMOVE GPS LOCATIONS HAVING SIMILAR x y and date time
length1 <- tapply(gps$Study_year, gps$Study_year,length)
gps <- gps[!duplicated(paste(gps[,c("X")], gps[,c("Y")], gps[,c("Date_time")])),]
# ==== 6. IDENTIFY MOVING GPS LOCATIONS ====
#buffer size
BufferWidth <- 100
speed <- 200#500 # (200m per hour)
moving_used <- list()
#
gps$unique.id <- 1:nrow(gps)
gps$move <- 0
ID <- unique(gps$Study_year)
for (i in 1:length(ID)){
tmp <- gps[gps$Study_year==ID[i], ]
tmp <- tmp[order(tmp$Date_time),]
# get X and Y coordinates to caclulate step length
X <- tmp$X[1:(nrow(tmp)-1)]
Y <- tmp$Y[1:(nrow(tmp)-1)]
X1 <- tmp$X[2:(nrow(tmp))]
Y1 <- tmp$Y[2:(nrow(tmp))]
# get distance and time
dist <- sqrt((X-X1)^2 + (Y-Y1)^2)
time <- diff(tmp$Date_time)
# identify moving locations
tmp$speed_m_H[2:nrow(tmp)] <- dist/as.numeric(time, units="hours")
id.move <- tmp$unique.id[tmp$speed_m_H>speed]
gps$move[gps$unique.id %in% id.move] <- 1
coordinates(tmp) <- data.frame(tmp$X, tmp$Y)
#create buffer
buffer <- gBuffer(tmp, width = BufferWidth, byid =F)
buffer <- disaggregate(buffer)
# plot check
plot(buffer)
points(tmp[tmp$unique.id%in%id.move, ], col="red")
points(tmp[which(!tmp$unique.id %in% id.move), ], col="blue")
}
# During the late-winter period (1 March - 30 April) male bears start to
# The spring period (1 May - 30 June)
# ==== II. MAKE A SUMMARY====
ID <- unique(gps$Study_year)
date.summary <- matrix(NA, nrow=length(ID), ncol=9)
# check the time of the predation study
# d$date <- as.POSIXct(strptime(d$date, "%d/%m/%Y"))
for (i in 1:length(ID)){
fc <- gps[which(gps$Study_year == ID[i]), ]
date.summary[i,1] <- as.character(ID[i])
date.summary[i,2] <- as.character(as.Date(min(fc$Date_time)))
date.summary[i,3] <- as.character(as.Date(max(fc$Date_time)))
date.summary[i,4] <- as.character(diff(range(fc$Date_time)))
date.summary[i,5] <- length(fc$Date_time)
date.summary[i,6] <- sum(fc$move==1)
date.summary[i,7] <- NA
date.summary[i,8] <- as.character(fc$Study_Start[1])
date.summary[i,9] <- as.character(fc$Study_End[1])
}
colnames(date.summary) <- c("Territory_year", "Start", "End","Range","Nblocations", "nblocsmoving", "Season","Start", "End" )
setwd("C:/Personal_Cloud/OneDrive/Work/Skandulv/NHBD2/nhbd_2/data/new")
write.csv(date.summary, file="gps.datasummary.csv")
#
setwd("C:/Personal_Cloud/OneDrive/Work/Skandulv/NHBD2/nhbd_2/data/new")
write.csv(gps, file="gps.dataCM.csv")
|
863651429b0210066e949a5d5e46258cc638790d
|
d5af8a438820926528dc00601adbba5aa71b9efb
|
/scripts/VIF_example.R
|
738fc9d10b99b282011066975bd942533f257e78
|
[] |
no_license
|
mdsi-disaster/part-a
|
0a1a30dc67f6f12f4f76f31f7ce75e99b56ef4e3
|
0431775f61250e3461419cbc7c3d0d762b39d86d
|
refs/heads/master
| 2022-12-25T17:41:46.477769
| 2020-10-09T09:38:32
| 2020-10-09T09:38:32
| 288,412,150
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,257
|
r
|
VIF_example.R
|
library(car)
library(tidyverse)
library(here)
library(gvlma)
data <- read.csv(here('data','mn_model_df.csv'),stringsAsFactors = TRUE)
data$id <- 1:nrow(data)
train <- data%>%sample_frac(.75)
test <- anti_join(data,train,by='id')
model <- lm(formula = log(house_price)~crime_murder + crime_rape + crime_robbery +
crime_arson + property_house + property_house_per + property_townhouse +
property_townhouse_per + property_low_rise_per + property_mid_rise +
property_mid_rise_per + property_high_rise_per + income +
unemployment_rate + population,data=train)
#multicollinearity
#Variance Inflation Factor
vif(model)
VIF <- function(linear.model, no.intercept=FALSE, all.diagnostics=FALSE, plot=FALSE) {
require(mctest)
if(no.intercept==FALSE) design.matrix <- model.matrix(linear.model)[,-1]
if(no.intercept==TRUE) design.matrix <- model.matrix(linear.model)
if(plot==TRUE) mc.plot(design.matrix, linear.model$model[1])
if(all.diagnostics==FALSE) output <- imcdiag(linear.model, method='VIF')$idiags[,1]
if(all.diagnostics==TRUE) output <- imcdiag(linear.model)
output
}
VIF(model)
library(car)
sqrt(vif(model)) > 2
VIF(model, plot=TRUE)
|
3fdcaf0704714b19f44393aa8b17309cbc61424d
|
17702ad07b28844fda4799bba3da728bfdc5704b
|
/modules/data.land/R/InventoryGrowthFusion.R
|
974afc3813dcfd40ae7c97d585681f7e47cade6f
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
yogeshdarji/pecan
|
c4d620590f37ab3928f1d28b006456fee0b96b7b
|
109381603fab9f9927ad18c95b9389cea68deef1
|
refs/heads/master
| 2021-06-15T05:56:03.976607
| 2017-03-25T17:33:58
| 2017-03-25T17:33:58
| 86,267,939
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,070
|
r
|
InventoryGrowthFusion.R
|
##' @name InventoryGrowthFusion
##' @title InventoryGrowthFusion
##' @description this code fuses forest inventory data with tree growth data (tree ring or dendrometer band)
##' for the same plots. Code is a rewrite of Clark et al 2007 Ecol Appl into JAGS
##'
##' @param data list of data inputs
##' @param random = whether or not to include random effects
##' @note Requires JAGS
##' @return an mcmc.list object
##' @export
InventoryGrowthFusion <- function(data, n.iter, random = TRUE, burnin_plot = FALSE) {
library(rjags)
burnin.variables <- c("tau_add", "tau_dbh", "tau_inc", "mu")
out.variables <- c("x", "tau_add", "tau_dbh", "tau_inc", "mu")
TreeDataFusionMV <- "
model{
### Loop over all individuals
for(i in 1:ni){
#### Data Model: DBH
for(t in 1:nt){
z[i,t] ~ dnorm(x[i,t],tau_dbh)
}
#### Data Model: growth
for(t in 2:nt){
inc[i,t] <- x[i,t]-x[i,t-1]
y[i,t] ~ dnorm(inc[i,t],tau_inc)
}
#### Process Model
for(t in 2:nt){
Dnew[i,t] <- x[i,t-1] + mu ##PROCESS
x[i,t]~dnorm(Dnew[i,t],tau_add)
}
#RANDOM ## individual effects
#RANDOM ind[i] ~ dnorm(0,tau_ind)
## initial condition
x[i,1] ~ dnorm(x_ic,tau_ic)
} ## end loop over individuals
#RANDOM ## year effects
#RANDOM for(t in 1:nt){
#RANDOM year[t] ~ dnorm(0,tau_yr)
#RANDOM }
#### Priors
tau_dbh ~ dgamma(a_dbh,r_dbh)
tau_inc ~ dgamma(a_inc,r_inc)
tau_add ~ dgamma(a_add,r_add)
#RANDOM tau_ind ~ dgamma(1,0.1)
#RANDOM tau_yr ~ dgamma(1,0.1)
mu ~ dnorm(0.5,0.5)
}"
Pformula <- NULL
## RANDOM EFFECTS
if (random) {
TreeDataFusionMV <- gsub(pattern = "#RANDOM", " ", TreeDataFusionMV)
Pformula <- "+ ind[i] + year[t]"
burnin.variables <- c(burnin.variables, "tau_ind", "tau_yr")
out.variables <- c(out.variables, "tau_ind", "tau_yr", "ind", "year")
}
if (!is.null(Pformula)) {
TreeDataFusionMV <- sub(pattern = "##PROCESS", Pformula, TreeDataFusionMV)
}
## state variable initial condition
z0 <- t(apply(data$y, 1, function(y) {
-rev(cumsum(rev(y)))
})) + data$z[, ncol(data$z)]
## JAGS initial conditions
nchain <- 3
init <- list()
for (i in seq_len(nchain)) {
y.samp <- sample(data$y, length(data$y), replace = TRUE)
init[[i]] <- list(x = z0,
tau_add = runif(1, 1, 5) / var(diff(y.samp), na.rm = TRUE),
tau_dbh = 1,
tau_inc = 1500,
tau_ind = 50,
tau_yr = 100,
ind = rep(0, data$ni),
year = rep(0, data$nt))
}
## compile JAGS model
j.model <- jags.model(file = textConnection(TreeDataFusionMV), data = data, inits = init, n.chains = 3)
## burn-in
jags.out <- coda.samples(model = j.model,
variable.names = burnin.variables,
n.iter = min(n.iter, 2000))
if (burnin_plot) {
plot(jags.out)
}
## run MCMC
coda.samples(model = j.model, variable.names = out.variables, n.iter = n.iter)
} # InventoryGrowthFusion
|
cc5b16a32cdb8824c3296905e9243d100b3ae0e3
|
3e9147968c3800363d57e8b1b0652f9ef0e012b5
|
/tests/testme/RtestsExpectations/integrative+T3.R
|
8851b543d28641a37b58e5d725d48d2225f46142
|
[] |
no_license
|
sboehringer/testme
|
39471fa197d7c0891d08359eb81732d5e007e6df
|
6e4c6c89255824d3bf8fe6a1b8106d3579150327
|
refs/heads/master
| 2020-09-23T06:17:03.656726
| 2020-05-01T16:42:06
| 2020-05-01T16:42:06
| 225,425,545
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 249
|
r
|
integrative+T3.R
|
structure("Error in try(if (4 != E2) stop(), silent = T) : \n", class = "try-error", condition = structure(list( message = "", call = doTryCatch(return(expr), name, parentenv, handler)), class = c("simpleError", "error", "condition" )))
|
af24107ef50b286c3926ddebdacb8c18e6e28afe
|
162ef5c93ef47fb5ce1944815e1303e179093af6
|
/R/heatmap.R
|
1cd016e91ba9f7929ab3c4f242c68d856d9f967d
|
[] |
no_license
|
MichelledeGroot/virusunburst
|
98c6bbccba7681c1aa421494bbe8c2efe9a253bf
|
b0aae2ba64b2733f343d8efd873d15c9f571b512
|
refs/heads/master
| 2023-06-26T11:09:58.111472
| 2021-08-04T08:49:33
| 2021-08-04T08:49:33
| 382,149,656
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,177
|
r
|
heatmap.R
|
#' Create a heatmap
#'
#' This function loads a ViruScreen output file as a dataframe and creates a heatmap
#' of all columns
#'
#' @param csv_file Path to the input file
#' @import tidyverse
#' @import d3heatmap
#' @export
make_heatmap <- function(csv_file){
full_taxonomy <- read.csv(csv_file
,header=T
,stringsAsFactors = FALSE
)
full_taxonomy = full_taxonomy[order(full_taxonomy[,'species'],-full_taxonomy[,'Total_reads']),]
full_taxonomy = full_taxonomy[!duplicated(full_taxonomy$species),]
full_taxonomy = subset(full_taxonomy, select = -c(Ref_GC) )
row.names(full_taxonomy) <- full_taxonomy$species
suppressWarnings(d3heatmap(full_taxonomy, scale = "column", col = 'YlOrRd',
main = "Heatmap of all species' details", dendrogram = "none") %>%
hmAxis("y", title = "species", location = 'right', font.size = 8) %>%
hmAxis("x", title = "columns", location = 'bottom', font.size = 12) %>%
hmCells(font.size = 8, color = 'blue') %>%
hmLegend(show = T, title = "Legend", location = "tl"))
}
|
aeb06922fba18d40655e94b3fc0acf12dea1c9f8
|
37707b403d3be700a7d34c137c781f96e763588c
|
/07-Modeling_Practice/modeling_practice.R
|
37ae36dab47b8997d23e02ce385bcf71fc3643d9
|
[] |
no_license
|
davidtnly/R
|
2c939e56d7bc7afeb1f5fbb3ed7800041832979a
|
b59fa6f06f839d844f5a899c434d06b0f3a245fd
|
refs/heads/master
| 2020-05-22T09:55:46.905765
| 2019-07-24T03:50:42
| 2019-07-24T03:50:42
| 186,274,640
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,470
|
r
|
modeling_practice.R
|
library(tidyverse)
library(MASS)
library(gridExtra)
library(ggthemes)
## ggplot theme Reference: http://ggplot2.tidyverse.org/reference/theme.html
pt(2.201, df = 429, lower.tail = FALSE) * 2
# Graphing
ggplot(mtcars, aes(x = `car name`, y = mpg_z, label = mpg_z)) +
geom_bar(stat = 'identity', aes(fill = mpg_type), width = .5) +
geom_point() +
scale_fill_manual(name = "Mileage",
labels = c("Above Average", "Below Average"),
values = c("above" = "#00ba38", "below" = "#f8766d")) +
labs(subtitle = "Normalised mileage from 'mtcars'",
title = "Diverging Bars") +
coord_flip() +
theme(
legend.position = "bottom",
legend.key.size = unit(0.2, "cm"),
legend.background = element_rect(colour = "black", fill = "lightblue"),
plot.title = element_text(hjust = 0.5, face = "bold", lineheight = 0.5),
plot.subtitle = element_text(hjust = 0.5, size = 8, face = "italic", lineheight = 0.5),
plot.background = element_rect("lightblue"),
axis.title = element_text(colour = "black", face = "bold"),
axis.text = element_text(colour = "black"),
axis.line = element_line(size = 3, colour = "grey80"),
panel.grid.major = element_line(colour = "black"),
panel.grid.minor = element_line(colour = "black")
)
# “Probability deals with predicting the likelihood of future events,
# while statistics involves the analysis of the frequency of past events.”
|
62aed022b5a434af5705f775cd918a362ee43d41
|
1fc421ae8d2d0cc87944ec21ea53b37b1ef02544
|
/man/DevFactors.t1.Predictive.Rd
|
a80e50b0ac0a58cc2638334c92c09cefdf803f76
|
[] |
no_license
|
EduardoRamosP/MackNet
|
5f3df28a30385e83c4d3de0eb10606a416499c92
|
1281f90ccad86df2f496b6e1a33aeab18cf81807
|
refs/heads/master
| 2022-12-18T22:17:47.097987
| 2020-09-21T20:30:55
| 2020-09-21T20:30:55
| 296,931,038
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 526
|
rd
|
DevFactors.t1.Predictive.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DevFactors.t1.Predictive.R
\name{DevFactors.t1.Predictive}
\alias{DevFactors.t1.Predictive}
\title{DevFactors.t1.Predictive}
\usage{
DevFactors.t1.Predictive(Triangle.Cumulative)
}
\arguments{
\item{Triangle.Cumulative}{Cumulative triangle}
}
\value{
Predictive development factors assuming the payment forecasted in t is true.
}
\description{
Calculation of the predictive development factors, assuming that the payment forecasted in t is true.
}
|
f71142471657883e8e0a87d5a00f58810cef55c9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MetProc/examples/subset_met.Rd.R
|
1e96992715dd733ebe5fda1d064f052a44f5202b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 715
|
r
|
subset_met.Rd.R
|
library(MetProc)
### Name: subset_met
### Title: Group Metabolites based on Pooled Plasma Missing Rate
### Aliases: subset_met
### ** Examples
library(MetProc)
#Read in metabolomics data
metdata <- read.met(system.file("extdata/sampledata.csv", package="MetProc"),
headrow=3, metidcol=1, fvalue=8, sep=",", ppkey="PPP", ippkey="BPP")
#Get indices of pooled plasma and samples
groups <- get_group(metdata,"PPP","X")
#Calculate a pooled plasma missing rate and sample missing rate
#for each metabolite in data
missrate <- get_missing(metdata,groups[['pp']],groups[['sid']])
#Group metabolites into 5 groups based on pooled plasma
#missing rate
subsets <- subset_met(metdata,missrate[['ppmiss']],5,.02,.95)
|
3e6ace656e6ff5b3e496bdee94ff2c69229daeff
|
ee2aa3368bd279171ae514fc2e5f9138364a6dc1
|
/man/rWEO-package.Rd
|
339cdd2d511876bc4572c62a320a199be4f54037
|
[] |
no_license
|
mingjerli/rWEO
|
f297f0ad0cd94c37cd2e9f15a3d46c189ee30180
|
c23b3bc7523383fa7f33d7f0e4219e3d207e6406
|
refs/heads/master
| 2020-06-02T14:10:35.554111
| 2014-10-22T14:55:54
| 2014-10-22T14:55:54
| 25,585,923
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,005
|
rd
|
rWEO-package.Rd
|
\name{rWEO-package}
\alias{rWEO-package}
\alias{rWEO}
\docType{package}
\title{
ACCESS LATEST IMF WORLD ECONOMIC OUTLOOK (WEO) DATA IN R
}
\description{
The rWEO package allows R to directly access World Economic Outlook data.
World Economic Outlook is basically a survey conducted and published by the International Monetary Fund.
It is published twice and partly updated 3 times a year.
It portrays the world economy in the near and medium context (basically 4 years).
WEO forecasts include the macroeconomic indicators, such as GDP, inflation, current account and fiscal balance of more than 180 countries around the globe. It also deals with major economic policy issues.
}
\details{
\tabular{ll}{
Package: \tab rWEO\cr
Type: \tab Package\cr
Version: \tab 0.1.1\cr
Date: \tab 2014-10-21\cr
License: \tab GPL-3\cr
}
}
\author{
Ming-Jer Lee
Maintainer: Ming-Jer Lee <mingjerli@gmail.com>
}
\references{
International Monetary Fund:
http://www.imf.org/
}
\keyword{ package }
\seealso{
}
\examples{
}
|
f7cdeac94a66080ef6642f31f9fd5d0119fdcdfd
|
aea88f9190f1f7b52815bf3035e4e470edf8d4e8
|
/notebooks/diagnose.R
|
54e42f800fede2326ec40ce4b7efcfe97c35bf6e
|
[
"MIT"
] |
permissive
|
marcpabst/bayesian-mice-decisions
|
2fe9ef6e772783073d87325c26e5b3553b3e5854
|
f393cfe506dfb0d948d1105abe5b3543f08d4062
|
refs/heads/master
| 2023-07-26T07:28:01.650109
| 2021-08-23T11:17:18
| 2021-08-23T11:17:18
| 394,764,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,545
|
r
|
diagnose.R
|
# This script uses `cmdstanr` to run the Stan model. Using R gives us access to a wider variety of tools, including those included in the `bayesplot` package.
# load stuff
library(BayesHMM)
library(bayesplot)
library(tidyverse)
library(bayestestR)
library(ggplot2)
library(abind)
library(cmdstanr)
# load data from csv
df <- read.csv("./data/ashwood.csv")
x <- df %>% group_by(session) %>%
group_split() %>%
lapply(select, "stimulus", "bias") %>%
lapply(as.matrix) %>%
abind(along=1)
y <- df %>% group_by(session) %>%
mutate(choice = 1 - choice) %>%
group_split() %>%
lapply(select, "choice") %>%
lapply(as.matrix) %>%
abind(along=1)
T <- df %>% group_by(session) %>%
count() %>%
ungroup() %>%
select(n) %>%
as.matrix() %>%
as.vector()
# specifiy input data for model
data <- list(
x = x,
y = drop(y),
T = T,
K = 3,
R = 1,
M = 4,
N = length(T),
I = sum(T)
)
# the model
model <- cmdstan_model("./stan-models/glm-hmm.stan")
# fit model
fit <- model$sample(
data = data, # named list of data
chains = 1, # number of Markov chains
refresh = 5, # print progress every 5 iterations
iter_warmup = 1000,
iter_sampling = 1000
)
# load the posterior samples
stanfit <- rstan::read_stan_csv(fit$output_files())
# plot the posterior samples
mcmc_areas_ridges(stanfit, regex_pars = "betas")
# extract posterior predictive samples
ypred <- rstan::extract(stanfit, "ypred")
# plot the posterior predictive samples
ppc_bars(
drop(y),
ypred$ypred
)
|
02d1819756061048d56133508057cdb7dbfc483c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RMOA/examples/MOA_classification_ensemblelearning.Rd.R
|
00b562868bfe9bfc14f872b359518cfcaf74a703
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 531
|
r
|
MOA_classification_ensemblelearning.Rd.R
|
library(RMOA)
### Name: MOA_classification_ensemblelearning
### Title: MOA classification using ensembles
### Aliases: MOA_classification_ensemblelearning AccuracyUpdatedEnsemble
### AccuracyWeightedEnsemble ADACC DACC LeveragingBag LimAttClassifier
### OCBoost OnlineAccuracyUpdatedEnsemble OzaBag OzaBagAdwin OzaBagASHT
### OzaBoost OzaBoostAdwin TemporallyAugmentedClassifier
### WeightedMajorityAlgorithm
### ** Examples
ctrl <- MOAoptions(model = "OzaBoostAdwin")
mymodel <- OzaBoostAdwin(control=ctrl)
mymodel
|
c025143ef991b5390d321e4db9bc501f89bf088b
|
cf88e5d651a4312623a922be1d093f0c2ff6074c
|
/Source code/Survey Analysis.R
|
5cb402082e086f75fdac09b8ce59ae9d28e71317
|
[] |
no_license
|
RezaSadeghiWSU/Sleep-quality-in-caregivers
|
8853286bf065dff5ae49639d7147b237fe1da066
|
ecf23f8937f376804033e05bdaa5b79e5ccc7db7
|
refs/heads/master
| 2021-06-07T17:45:19.665599
| 2021-05-25T00:19:20
| 2021-05-25T00:19:20
| 147,614,619
| 8
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,072
|
r
|
Survey Analysis.R
|
# Author: Reza Sadeghi
# Email: reza@knoesis.org; sadeghi.2@wright.edu
# Date: 4/22/2018
# Description: Servay analysis of Dementia Caregiver managment
# Import data
#library(readxl)
#Sleep_Survey <- read_excel("C:/Users/Reza Sadeghi/Desktop/Dementia Caregiver Sleep Dataset/Sleep Survey.xlsx")
Sleep_Survey <- survey
#View(Sleep_Survey)
#-------------------------------------------------Wake up during night------------------------------------------------
RecordNumbers<-length(Sleep_Survey$`Particiapnt #`)
ParticipantID<-unique(Sleep_Survey$`Particiapnt #`)
H<- summary (as.factor(Sleep_Survey$`Question 8`))
M<- c("Less than thirty minutes", "Between thirty and sixty minutes", "More than sixty minutes", "Not applicable")
barplot(H,names.arg = M,xlab = "Categories",ylab = "Frequency",col = "blue", main = "The time takes participants to fall back to sleep",border = "red")
#-------------------------------------------------Quality of Sleep Vs. Feeling rest------------------------------------------------
pp<- Sleep_Survey$`Question 9`
pg<- Sleep_Survey$`Question 10`
New<- NULL
pp<- as.integer(pp)
New$pp<- pp
pg<- as.factor(pg)
library(plyr)
pg<-revalue(pg, c("1"="Feeling Rest", "0"="Tired"))
summary(pg)
New$pg<- pg
New <- as.data.frame(New)
a<-boxplot(pp~pg,data=New,ylab="Quality of Sleep", xlab= "Mood")
stripchart(pp~pg, vertical = TRUE, data=New,
method = "jitter", add = TRUE, pch = 20, col = 'blue')
pp<- Sleep_Survey$`Question 9`
pg<- Sleep_Survey$`Question 10`
pp<- as.factor(pp)
pg<- as.factor(pg)
pp<-revalue(pp, c("0"="Very Good", "1"="Good", "2"="Okay", "3"="Fairly Bad", "4"="Bad"))
pg<-revalue(pg, c("1"="Feeling Rest", "0"="Feeling Tired"))
Mood<-pg
Sleep_Quality<-pp
New <- table(Mood, Sleep_Quality)
mosaicplot(New,main = "The relation of Sleep quality and tiredness", xlab = "Mood", ylab = "Sleep quality")
library(vcd)
mosaic(New, shade=T, legend=T, pop= FALSE)
labs <- round(prop.table(New), 2)
labs <- as.data.frame(labs)
labs$Freq <- " "
labs$Freq[3] <- "(a)"
labs$Freq[4] <- "(b)"
labeling_cells(text = as.data.table (labs), margin = 0)(New)
print("The percentage of caregivers Feel tired")
length(Mood[which(Mood=="Feeling Tired")])/length(Mood)
#-------------------------------------------------Quality of Sleep Vs. Time of Sleep------------------------------------------------
pp<- Sleep_Survey$`Question 4`
pg<- Sleep_Survey$`Question 9`
New<- NULL
pp<- as.double(pp)
New$pp<- pp
pg<- as.factor(pg)
pg<- factor(pg, levels = c("4", "3", "2", "1", "0"))
pg<-revalue(pg, c("0"="Very Good", "1"="Good", "2"="Okay", "3"="Fairly Bad", "4"="Bad"))
library(plyr)
New$pg<- pg
New <- as.data.frame(New)
a<-boxplot(pp~pg,data=New,ylab="Length of sleep (hour)", xlab= "Quality of Sleep")
stripchart(pp~pg, vertical = TRUE, data=New,
method = "jitter", add = TRUE, pch = 20, col = 'blue')
#-------------------------------------------------The portions Sleep Quality------------------------------------------------
print("The portion of Bad and Fairly Bad sleepint")
length(pg[which(pg=="Fairly Bad" | pg=="Bad")])/length(pg)
print("The portion of sleepint Okay")
length(pg[which(pg=="Okay")])/length(pg)
print("The portion of Good sleepint")
length(pg[which(pg=="Good")])/length(pg)
print("The portion of Very good sleepint")
length(pg[which(pg=="Very Good")])/length(pg)
#-------------------------------------------------The Statistical Features------------------------------------------------
# setwd("C:\\Users\\Reza Sadeghi\\Desktop\\Dementia Caregiver Sleep Dataset")
# FeatureSet <- read.csv("FeatureSet5.csv",header = T,as.is = T)
# #View(FeatureSet)
# # removing unlabeled records
# FeatureSet<-FeatureSet[-which(FeatureSet$Participant==2 & FeatureSet$Week==2 & FeatureSet$Day==7),]
# FeatureSet<-FeatureSet[-which(FeatureSet$Participant==3 & FeatureSet$Week==2 & FeatureSet$Day==2),]
# FeatureSet<-FeatureSet[-which(FeatureSet$Participant==3 & FeatureSet$Week==2 & FeatureSet$Day==3),]
# FeatureSet<-FeatureSet[-which(FeatureSet$Participant==3 & FeatureSet$Week==2 & FeatureSet$Day==4),]
# FeatureSet<-FeatureSet[-which(FeatureSet$Participant==4 & FeatureSet$Week==1 & FeatureSet$Day==7),]
# FeatureSet<-FeatureSet[-which(FeatureSet$Participant==5 & FeatureSet$Week==1 & FeatureSet$Day==8),]
# FeatureSet<-FeatureSet[-which(FeatureSet$Participant==5 & FeatureSet$Week==2 & FeatureSet$Day==7),]
# FeatureSet<-FeatureSet[-which(FeatureSet$Participant==7 & FeatureSet$Week==2 & FeatureSet$Day==6),]
# pg<- Sleep_Survey$`Question 9`
# pg<- as.integer (pg)
# FeatureSet$Sleep_Quality<- pg
#
# library(corrplot)
# corrplot(cor(FeatureSet[,1:38]), type = "upper", order = "hclust", tl.col = "black", tl.srt = 45)
#-------------------------------------------------The distribution of Sleep Quality ccategories------------------------------------------------
library(lattice)
#barchart(as.factor(FeatureSet$Sleep_Quality),ylab=c("Very Good", "Good", "Okay", "Fairly Bad", "Bad"))
barchart(pg)
|
4d4e2ad248bf3c816dd88bd6df628905ef6990be
|
d73d1fffc2c69ed18638262380186db28ef129c8
|
/R/reduceloadsolar.R
|
cf906f057424ad3643eda54754019d3d748ed3fb
|
[] |
no_license
|
estalenberg/ptrm
|
7f48a545e679fcefcddaf7009c8f7304e21883bf
|
262755ead3ee8b6e0900775134ac401e799ddc4c
|
refs/heads/master
| 2020-04-30T12:26:35.587724
| 2019-07-31T09:22:56
| 2019-07-31T09:22:56
| 176,826,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 881
|
r
|
reduceloadsolar.R
|
#' Reduction in energy load from solar
#'
#' Formula for calculating tbe cumulative reduction in energy load GWh from increased solar per year
#'
#'
#' @param yearslabel label of projection years from 2020
#' @param other.df dataset of DNSP static vectors
#' @param custongrid number of customers on grid
#' @param psolar percent of customers on solar
#'
#' @export
#'
rloadsolar_fun=function(yearslabel,other.df,custongrid,psolar){
energysolar=other.df$all.years[other.df$name=="energy from solar"]
solarexport=other.df$all.years[other.df$name=="solar export to grid"]
tmp <- matrix(NA, ncol=length(yearslabel), nrow=1)
tmp=as.data.frame(tmp)
names(tmp)=yearslabel
load=tmp
load[1]=0
for(i in 2:length(load))
load[i]=custongrid[i]*(psolar[i]-psolar[1])*energysolar*(1-solarexport)/1000000
load=round(load,digits=2)
return(load)
}
|
25e62d207427518d885710b3706659a3a973804f
|
0e38b8f94d9294247e8a60be458cdbcb69398c5b
|
/tests/testthat/test-branch.R
|
8a0d15cf664fde3dd306fe6868bd6792805c70c5
|
[] |
no_license
|
mverseanalysis/mverse
|
d57aa46f424f15fa9e317928c065aa06d688973b
|
bf15309a2542c14223cb53c222b0cb7e023311e9
|
refs/heads/master
| 2023-08-16T19:52:37.579380
| 2023-08-07T18:54:34
| 2023-08-07T18:54:34
| 241,745,842
| 8
| 2
| null | 2023-08-07T18:54:36
| 2020-02-19T23:03:43
|
R
|
UTF-8
|
R
| false
| false
| 7,158
|
r
|
test-branch.R
|
test_that("Branches require at least one rule.", {
expect_error(
mutate_branch(),
"Error: Provide at least one rule."
)
expect_error(
filter_branch(),
"Error: Provide at least one rule."
)
})
test_that("*_branch() defines branches without 'name'.", {
expect_equal(
rlang::quo_name(mutate_branch(x + y)$opts[[1]]), "x + y"
)
expect_equal(
rlang::quo_name(filter_branch(x > 0)$opts[[1]]), "x > 0"
)
})
test_that("*_branch() defines branches with names specified.", {
expect_equal(mutate_branch(x + y, name = "xnew")$name, "xnew")
expect_equal(filter_branch(x + y, name = "xnew")$name, "xnew")
})
test_that("*_branch() checks a provided name is a character.", {
expect_error(
mutate_branch(x + y, name = 0),
'Error: "name" must be a character object.'
)
expect_error(
filter_branch(x > 0, name = 0.5),
'Error: "name" must be a character object.'
)
})
test_that("*_brach() defines branches with multiple options.", {
mbranch <- mutate_branch(x + y, x - y, x * y)
expect_equal(
rlang::quo_name(mbranch$opts[[1]]), "x + y"
)
expect_equal(
rlang::quo_name(mbranch$opts[[2]]), "x - y"
)
expect_equal(
rlang::quo_name(mbranch$opts[[3]]), "x * y"
)
expect_equal(length(mbranch$opts), 3)
fbranch <- filter_branch(x > 0, x < 0, x == 0)
expect_equal(
rlang::quo_name(fbranch$opts[[1]]), "x > 0"
)
expect_equal(
rlang::quo_name(fbranch$opts[[2]]), "x < 0"
)
expect_equal(
rlang::quo_name(fbranch$opts[[3]]), "x == 0"
)
expect_equal(length(fbranch$opts), 3)
})
test_that("name() extracts the name of a branch.", {
mbranch <- mutate_branch(x + y, x - y, x * y, name = "mutate")
expect_equal(name(mbranch), "mutate")
fbranch <- filter_branch(x > 0, x < 0, x == 0, name = "filter")
expect_equal(name(fbranch), "filter")
frmbranch <- formula_branch(y ~ x, y ~ log(x), name = "formula")
expect_equal(name(frmbranch), "formula")
fambranch <- family_branch(poisson, gaussian(link = "log"), name = "family")
expect_equal(name(fambranch), "family")
})
test_that("name() renames a branch.", {
mbranch <- mutate_branch(x + y, x - y, x * y, name = "mutate")
mbranch <- name(mbranch, "mrename")
expect_equal(name(mbranch), "mrename")
fbranch <- filter_branch(x > 0, x < 0, x == 0, name = "filter")
fbranch <- name(fbranch, "frename")
expect_equal(name(fbranch), "frename")
})
test_that("1() creates a branching command for multiverse.", {
mbranch <- mutate_branch(x + y, x - y, x * y, name = "m")
expect_equal(
parse(mbranch),
rlang::parse_expr(
'branch(m_branch, "m_1" ~ x + y, "m_2" ~ x - y, "m_3" ~ x * y)'
)
)
fbranch <- filter_branch(x > 0, x < 0, x == 0, name = "f")
expect_equal(
parse(fbranch),
rlang::parse_expr(
'branch(f_branch, "f_1" ~ x > 0, "f_2" ~ x < 0, "f_3" ~ x == 0)'
)
)
})
test_that("parse() handles named branched options", {
mbranch <- mutate_branch(
add = x + y, subtract = x - y, multiply = x * y, name = "m"
)
expect_equal(
parse(mbranch),
rlang::parse_expr(
'branch(m_branch, "add" ~ x + y, "subtract" ~ x - y, "multiply" ~ x * y)'
)
)
fbranch <- filter_branch(x > 0, x < 0, equals = x == 0, name = "filter")
expect_equal(
parse(fbranch),
rlang::parse_expr(
paste0('branch(filter_branch, "filter_1" ~ x > 0, ',
'"filter_2" ~ x < 0, "equals" ~ x == 0)')
)
)
frml <- formula_branch(linear = x ~ y, x ~ z, name = "model")
expect_equal(
parse(frml),
rlang::parse_expr(
'branch(model_branch, "linear" ~ "x ~ y", "model_2" ~ "x ~ z")'
)
)
frml <- family_branch(linear = gaussian, name = "fam")
expect_equal(
parse(frml),
rlang::parse_expr(
'branch(fam_branch, "linear" ~ gaussian)'
)
)
})
test_that("parse() handles long branch options.", {
mydf <- data.frame(col1 = c(1, 2, 3))
mbranch <- mutate_branch(
ifelse(col1 > 1, "a", ifelse(col1 == 1, "b", "c"))
)
mv <- mverse(mydf) %>%
add_mutate_branch(mbranch) %>%
execute_multiverse()
expect_true(any(
stringr::str_detect(
unlist(sapply(multiverse::code(mv), as.character)),
"ifelse\\(col1 > 1,"
)
))
fbranch <- formula_branch(
cbind(col1, col2 - col1) ~ col3 + col3^2 + col3^3 +
col3^4 + exp(col3 + col3^2),
cbind(col1, col2 - col1) ~ col3 + col3^2 + col3^3 +
col3^4 + exp(col3) + exp(col3^2)
)
add_formula_branch(mv, fbranch)
expect_true(any(
stringr::str_detect(
unlist(sapply(multiverse::code(mv), as.character)),
"cbind\\(col1, col2 - col1\\)"
)
))
})
test_that("add_*_branch() adds a branch.", {
mydf <- data.frame(
x = c(1, 2, 3),
y = c(4, 5, 6)
)
mv <- mverse(mydf)
mbranch <- mutate_branch(x + y, x - y, x * y, name = "m")
fbranch <- filter_branch(x > 0, x < 0, x == 0, name = "f")
mv %>%
add_mutate_branch(mbranch) %>%
add_filter_branch(fbranch)
expect_equal(attr(mv, "branches_list")[[1]]$name, "m")
expect_equal(attr(mv, "branches_list")[[2]]$name, "f")
})
test_that("add_*_branch() adds multiple branches in order.", {
mydf <- data.frame(
x = c(1, 2, 3),
y = c(4, 5, 6)
)
mv <- mverse(mydf)
mv %>%
add_mutate_branch(
mutate_branch(x + y, x - y, x * y, name = "m1"),
mutate_branch(x + y, x - y, x * y, name = "m2")
) %>%
add_filter_branch(
filter_branch(x > 0, x < 0, x == 0, name = "f1"),
filter_branch(x > 0, x < 0, x == 0, name = "f2")
)
nms <- sapply(attr(mv, "branches_list"), function(x) x$name)
expect_equal(nms, c("m1", "m2", "f1", "f2"))
})
test_that("add_*_branch() checks for a new variable name.", {
mydf <- data.frame(
x = c(1, 2, 3),
y = c(4, 5, 6)
)
mverse <- create_multiverse(mydf)
expect_error(
mverse %>% add_mutate_branch(
mutate_branch(x + y)
),
"Please specify a variable name for the branch rule:.*"
)
expect_error(
mverse %>% add_filter_branch(
filter_branch(x > 0, x < 0, x == 0)
),
"Please specify a variable name for the branch rule:.*"
)
})
test_that(
"formula_branch() with covariates option creates covariate branches linked
with the formula branch.", {
mydf <- data.frame(
x = c(1, 2, 3),
y = c(4, 5, 6),
w = c(7, 8, 9),
z = c(10, 11, 12)
)
mv <- create_multiverse(mydf)
frml <- formula_branch(y ~ x, y ~ log(x),
covariates = c("z", "w"))
expect_equal(frml$covariates, c("z", "w"))
frml <- formula_branch(y ~ x,
covariates = c("z", "w"),
name = "f")
expect_equal(frml$covariates, c("z", "w"))
add_formula_branch(mv, frml)
branch_names <- names(multiverse::parameters(mv))
expect_true(any(grepl("covariate_z_branch", branch_names)))
expect_true(any(grepl("covariate_w_branch", branch_names)))
expect_equal(nrow(summary(mv)), 4)
expect_contains(summary(mv)[["covariate_z_branch"]],
c("include_z", "exclude_z"))
expect_contains(summary(mv)[["covariate_w_branch"]],
c("include_w", "exclude_w"))
}
)
|
2769edf9aa9e5cf0225eb93ff7de697605404e63
|
69103d17a9a04a02aff5c1246f30c7a45ea566ea
|
/R/load_data.R
|
e396342f63298798a54fd06935e419d0a71d7d1d
|
[
"MIT"
] |
permissive
|
jincio/covidPeru
|
0de7d30a41bacdf6385ef2d80ec422ee766473ee
|
4b3f9ff6234b913d8bd3ea3c5819d8da47c4fa01
|
refs/heads/master
| 2023-06-04T11:31:46.198051
| 2021-07-02T16:37:57
| 2021-07-02T16:37:57
| 291,585,607
| 8
| 6
|
NOASSERTION
| 2021-07-02T16:37:57
| 2020-08-31T01:32:30
|
R
|
UTF-8
|
R
| false
| false
| 3,186
|
r
|
load_data.R
|
#' Load positive COVID cases
#'
#' Cargal la base de datos de casos confirmados desde la página de datos abiertos
#'
#' @return data_frame
#' @export
#'
#' @examples
da_positivos<-function (){
file="https://cloud.minsa.gob.pe/s/Y8w3wHsEdYQSZRp/download"
data=data.table::fread(file,encoding="Latin-1")
data1 = dplyr::mutate(data,year = substr(FECHA_RESULTADO,1,4),
month = substr(FECHA_RESULTADO,5,6),
day = substr(FECHA_RESULTADO,7,8),
fecha=as.Date(paste0(year,"-",month,"-",day)),
EDAD_n = as.numeric(EDAD),
semana = lubridate::epiweek(fecha))
return(data1)
}
#' Load fallecidos Covid
#'
#'Carga la base de datos de Fallecidos desde la página de datos abiertos
#'
#' @return
#' Data.frame con la información de "fallecidos" (datos abiertos). Agregando la variable fecha y semana.
#' @export
#'
#' @examples
#'
da_fallecidos<-function (){
file="https://cloud.minsa.gob.pe/s/Md37cjXmjT9qYSa/download"
data=data.table::fread(file,encoding="Latin-1")
fallecidos=dplyr::mutate(data,year = substr(FECHA_FALLECIMIENTO,1,4),
month = substr(FECHA_FALLECIMIENTO,5,6),
day = substr(FECHA_FALLECIMIENTO,7,8),
fecha = as.Date(paste0(year,"-",month,"-",day)),
semana = lubridate::epiweek(fecha))
return(fallecidos)
}
#' da_sinadef
#' Load death data from SINADEF
#'
#' @return
#' @export
#'
#' @examples
da_sinadef<-function (){
file="https://cloud.minsa.gob.pe/s/nqF2irNbFomCLaa/download"
data=data.table::fread(file,encoding="Latin-1")
cat("si lees esto es que el archivo bajo bien :)")
cat("...limpiando el archivo")
colnames(data)[14] <-"Year"
cat("...Eliminamos informacion vacia")
data1 <- data %>% dplyr::select_if(~sum(!is.na(.)) > 0)
cat("...Creando variables standards")
data1 <- data1 %>% dplyr::filter(`DEPARTAMENTO DOMICILIO` != "EXTRANJERO",
`MUERTE VIOLENTA` %in% c("SIN REGISTRO","NO SE CONOCE")) %>%
dplyr::mutate(fecha = as.Date(FECHA),semana = lubridate::epiweek(fecha), mes = as.numeric(MES),
year = as.numeric(Year),dia = weekdays(fecha)) %>%
dplyr::select(fecha,semana,year,dia,`DEPARTAMENTO DOMICILIO`,`PROVINCIA DOMICILIO`)
return(data1)
}
#' Load Vaccinated people in Peru
#'
#' Cargal la base de datos de vacunas aplicadas desde la página de datos abiertos
#'
#' @return
#' Data.frame con la información de "vacunados" (datos abiertos). Agregando la variable fecha y semana.
#' @export
#'
#' @examples
da_vacunados<-function (){
file = "https://cloud.minsa.gob.pe/s/ZgXoXqK2KLjRLxD/download"
data = data.table::fread(file,encoding="Latin-1")
data1= dplyr::mutate(data,year = substr(FECHA_VACUNACION,1,4),
month = substr(FECHA_VACUNACION,5,6),
day = substr(FECHA_VACUNACION,7,8),
fecha=as.Date(paste0(year,"-",month,"-",day)),
EDAD_n = as.numeric(EDAD),
semana = lubridate::epiweek(fecha))
return( data1 )
}
|
3d940fcffc159814e665130e3d8256004587bc45
|
d1ae4717b37ccc96792c3c5b342b1d3be07517ec
|
/R_Q2.r
|
2085df360e8565a477b627841c85e30acfe43433
|
[] |
no_license
|
solankivaibhav789/r-programming
|
6736671612e07e415ebe3b24510cebcc8371e4e0
|
ad43fdf53a25507893f3275bc689323a16d5a5af
|
refs/heads/master
| 2020-03-14T09:39:02.860274
| 2018-04-30T04:52:47
| 2018-04-30T04:52:47
| 131,549,149
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 292
|
r
|
R_Q2.r
|
df=data.frame(instv=c("i1","i2","i3","i4"),
v1=c(1.5,2.0,1.6,1.2),
v2=c(1.7,1.9,1.8,1.5))
l=c(1,2,3,4)
Fun_euclid_distance<-function(x,y){
p=100
index=100
for(i in l){
y=sqrt((df[i,2]-x)^2+(df[i,3]-y)^2)
if(p>y){p=y
index=i}
return (i)}
}
x<-Fun_euclid_distance(1.4,1.6)
print(x)
|
dd538678b35944d39463836561296e43963f97d9
|
381ea93b5654c74584e203676ceb9bd17bd608b0
|
/man/saveload.Rd
|
e501c68051e35f5f3d332a4e9d95a9064201c05a
|
[] |
no_license
|
gmbecker/GRANCore
|
e26962f45f2ee646cc3c24f24ebe11212a51ec62
|
47809ff6789185df947159f1d73249abd7694bee
|
refs/heads/master
| 2022-01-11T10:00:33.795287
| 2019-10-31T00:35:24
| 2019-10-31T00:35:24
| 112,526,940
| 1
| 5
| null | 2019-05-24T21:23:05
| 2017-11-29T20:56:19
|
R
|
UTF-8
|
R
| false
| true
| 644
|
rd
|
saveload.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utilities.R
\name{loadRepo}
\alias{loadRepo}
\alias{saveRepo}
\title{Backwards compatible load utility}
\usage{
loadRepo(filename)
saveRepo(repo, filename)
}
\arguments{
\item{filename}{The file to load}
\item{repo}{The GRANRepository object to save}
}
\description{
Load a repository serialized to an R code file
serialize a repository to a file so that it does not require GRANBase
to load
}
\examples{
repo = GRANRepository(GithubManifest("gmbecker/rpath"), basedir = tempdir())
fil = file.path(tempdir(), "repo.R")
saveRepo(repo, fil)
repo2 = loadRepo(fil)
}
|
c5f7c4c1f1d202c096c71f48518dabba296d6db5
|
5e533da27bf1d338f42aa6363f0f217dfb592d02
|
/man/arima_boost.Rd
|
65b07184559111128c8817aae200cd3a31e8d17c
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
eashanadhikarla/finnts
|
8bb47f9e9c22d48efbb10d9091b1aa1bf2c39678
|
cba6266c8d53d21eac139003cb2d7a11d57bbb73
|
refs/heads/main
| 2023-09-04T22:15:37.359842
| 2021-11-12T17:09:52
| 2021-11-12T17:09:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 722
|
rd
|
arima_boost.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/models.R
\name{arima_boost}
\alias{arima_boost}
\title{ARIMA Boost}
\usage{
arima_boost(
train_data,
frequency,
parallel,
horizon,
tscv_initial,
date_rm_regex,
back_test_spacing,
fiscal_year_start,
pca
)
}
\arguments{
\item{train_data}{Training Data}
\item{frequency}{Frequency of Data}
\item{parallel}{Parallel Version or not}
\item{horizon}{Horizon of model}
\item{tscv_initial}{tscv initialization}
\item{date_rm_regex}{Date removal Regex}
\item{back_test_spacing}{Back Testing Spacing}
\item{fiscal_year_start}{Fiscal Year Start}
}
\value{
Get the ARIMA based model
}
\description{
ARIMA Boost
}
\keyword{internal}
|
69e3203f4db3a8ae1dbdf629dead668ec6052e68
|
a9842718967767bdb67ebab2c7f0506709a77d5f
|
/Empirical Examples/Cattaneo/Cattaneo.R
|
7fc08a2e5059a818ebd1d968ac2dab0cfea72c6f
|
[] |
no_license
|
QuantLet/Outcome-adaptive-Random-Forest
|
a0e2f762157b1dc7b2e1a5a4449fb31f4417d132
|
ad24e8171ea0be5e7926a9d0e7875a3a3c76b6a1
|
refs/heads/main
| 2023-06-23T11:03:47.549947
| 2021-07-26T12:09:26
| 2021-07-26T12:09:26
| 389,566,312
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,349
|
r
|
Cattaneo.R
|
library(ranger)
library(scales)
library(MASS)
library(ggplot2)
library(cowplot)
library(glmnet)
library(RRF)
library(haven)
cattaneo2 <- as.data.frame(read_dta("cattaneo2.dta"))
View(cattaneo2)
cattaneo2$Y <- cattaneo2$bweight
cattaneo2$A <- cattaneo2$mbsmoke
cattaneo2$bweight <- NULL
cattaneo2$mbsmoke <- NULL
cattaneo2$msmoke <- NULL
cattaneo2$lbweight <- NULL
var.list <- colnames(cattaneo2[,!(names(cattaneo2) %in% c("Y","A"))])
# Point estimates
# RF, OARF and RRF
B <- 50
RF_est_B <- matrix(NA,B,3)
for(b in 1:B){
RF_est <- all_RF(data=cattaneo2)
RF_est_B[b,] <- RF_est
}
RF_est_med <- apply(RF_est_B,2,median)
# OAL
data_m <- cattaneo2
data_m[,var.list] <- rapply(cattaneo2[,var.list],scale,c("numeric","integer"),how="replace")
OAL_est <- shortreed_est(data=data_m)
# IPTW
IPTW_est <- iptw(data=data_m)
###############
# Bootstrap iterations
nboot <- 5000
# RF, OARF and RRF
RF_ci <- all_RF_boot(data=cattaneo2)
# OAL
OAL_ci <- shortreed_boot(data=data_m,nboot=nboot)
# IPTW
IPTW_ci <- iptw_boot_ci(data=data_m,nboot=nboot)
# ATE Estimates
c(IPTW_est,IPTW_ci)
c(OAL_est,OAL_ci$iptw)
c(RF_est_med[1],RF_ci[[1]][,1]) # RF full
c(RF_est_med[3],RF_ci[[1]][,3]) # RF RRF
c(RF_est_med[2],RF_ci[[1]][,2]) # OARF
# CI width
IPTW_ci[2] - IPTW_ci[1]
OAL_ci$iptw[2] - OAL_ci$iptw[1]
RF_ci[[1]][,1][2] - RF_ci[[1]][,1][1]
RF_ci[[1]][,3][2] - RF_ci[[1]][,3][1]
RF_ci[[1]][,2][2] - RF_ci[[1]][,2][1]
# Covariate selection
sv_OAL <- OAL_ci$var_sel
sv_RF_full <- as.data.frame(RF_ci[[2]][,1])
sv_OARF <- RF_ci[[2]][,2]
sv_RRF <- RF_ci[[2]][,3]
sv <- as.data.frame(c(sv_RF_full,sv_OARF,sv_RRF,sv_OAL))
colnames(sv) <- c("value")
sv$Method <- factor(rep(c("RF full","OARF","RRF","OAL"),each=length(var.list)),levels=c("OAL","RF full","OARF","RRF"))
sv$Var <- rep(c(1:length(var.list)))
#cbp <- c("#000000", "#E69F00", "#56B4E9", "#009E73",
# "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
ggplot(sv,aes(y=value,x=Var,color=Method))+
geom_line(size=1) +
theme_cowplot() +
labs(y="Proportion of times covariates selected ",x="Covariates") +
scale_color_manual(values=c("#000000", "#E69F00","#56B4E9","#009E73"))
# Check covariates
all_var_table <- cbind(var.list,sv_RF_full,sv_RRF,sv_OARF,sv_OAL)
all_var_table[,-1] <- round(all_var_table[,-1],3)
xtable(all_var_table,digits=c(0,0,1,1,1,1))
|
d76063a90b9628481fdcc9b2dd416f155ad6430e
|
c9c7ead98512805ea11054e80096d859ebf98537
|
/R/ExampleFunction.R
|
c3c8102d58d4714841ebf8f05ecdde64e09df979
|
[] |
no_license
|
NabeelBedar/Optimization
|
d1c45dc5777fca59bc873ba0007d07c37d80e24a
|
514a0c5a66aaff63607aa93b261f5b53296d0208
|
refs/heads/main
| 2023-04-22T07:21:46.778069
| 2021-05-08T14:58:03
| 2021-05-08T14:58:03
| 351,846,258
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 147
|
r
|
ExampleFunction.R
|
#' Finds the minimum point of a function
#' @export
#' @param f function
#' @param x variable
f <- function(x){
x^3 + 5*x^2 - 4*x + 2
}
|
88e508daa6df3376bb0c6cbc62dbcf52fe3a886e
|
c88df554b1de89825402bb751b45e54a4f9e4293
|
/run_analysis.R
|
b82285613c89afda8c5e04f566a0cced90735e43
|
[] |
no_license
|
SamLawrance/gcd
|
5fe191ca696a9b6c89dbf200e1e64480fb453918
|
bbc54ce0726305c3a4441d04e187e2383f4e23f4
|
refs/heads/master
| 2016-09-01T19:33:20.038218
| 2015-02-22T12:56:30
| 2015-02-22T12:56:30
| 31,161,968
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,714
|
r
|
run_analysis.R
|
# Sam Lawrance - Getting & Cleaning Data Course Project
# Where you see [n], it refers to the numbered list in the assignment
# description. Eg. [1] refers to "Merges the training and the test
# sets to create one data set" on the assignment web page.
# dplyr is used for left_join and select functions.
library(dplyr)
# Used for testing... not required for submission.
# setwd("~/Desktop/gcd/UCI HAR Dataset")
# Load the measurement (column) descriptions first.
features <- read.table("features.txt", stringsAsFactors = FALSE)
# Load the test set, taking column names from the "features" table [4],
# and create additional columns for the subject and activity number.
test_set <- read.table("test/X_test.txt", col.names = features[,2])
test_subjects <- read.table("test/subject_test.txt")
test_labels <- read.table("test/y_test.txt")
test_set$subject <- test_subjects[,1]
test_set$activity_number <- test_labels[,1]
# Load the training set, taking column names from the "features" table [4],
# and create additional columns for the subject and activity number.
train_set <- read.table("train/X_train.txt", col.names = features[,2])
train_subjects <- read.table("train/subject_train.txt")
train_labels <- read.table("train/y_train.txt")
train_set$subject <- train_subjects[,1]
train_set$activity_number <- train_labels[,1]
# Load the activity labels and rename the columns for a later merge
# operation with the full set of data.
activity_labels <- read.table("activity_labels.txt", col.names = c("activity_number", "activity"))
# [1] Combine the testing and training sets into a full set of data.
full_set <- rbind(test_set, train_set)
# [3] Join the full dataset with activity_labels to provide descriptive activity
# names in the "activity" column.
full_set <- left_join(full_set, activity_labels, by="activity_number")
# [2] Now select just the data that we want - the subject, a nice description
# of activity, and any columns that contain ".mean." or ".std." - these are
# all of the columns that contain the required measurements.
descriptive_set <- select(full_set, subject, activity,
contains(".mean.", ignore.case = FALSE),
contains(".std.", ignore.case = FALSE))
# [5] Group by activity and subject, and then use summarise_each to apply
# the mean function across all remaining columns (the measurements).
# This produces a tidy data set with subject, activity, and then the means of
# the supplied measurement data.
grouped_set <- group_by(descriptive_set, subject, activity)
tidy_set <- summarise_each(grouped_set, funs(mean))
# Write out the data set as a text table.
write.table(tidy_set, file = "tidy_set.txt", row.names = FALSE)
|
07a9a764356362dbe8df56abc10469c72bba7a7d
|
833fad22658188b3220e563860c9319929805de1
|
/man/projectUMAP.Rd
|
c478db3be1a3f09cfbf2ec19d7386c888a69f46c
|
[] |
no_license
|
yz46606/Socrates
|
ba7622bdce687fdadb407626a2b8b4250b1cbfba
|
87ef203624ed6c5992b52ee960c39b9fed4a9917
|
refs/heads/main
| 2023-09-02T15:22:04.597315
| 2021-11-16T00:38:33
| 2021-11-16T00:38:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,038
|
rd
|
projectUMAP.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reduce_dimensions.R
\name{projectUMAP}
\alias{projectUMAP}
\title{Project cells into a reduced embedding with UMAP}
\usage{
projectUMAP(
obj,
m.dist = 0.01,
k.near = 40,
metric = "cosine",
svd_slotName = "PCA",
umap_slotName = "UMAP",
verbose = FALSE,
seed = 1
)
}
\arguments{
\item{obj}{list, object containing 'PCA' for projecting into a reduced embedding with UMAP.}
\item{m.dist}{numeric, m_dist parameter for uwot::umap. Defaults to 0.1}
\item{k.near}{numeric, k-nearest neighbors used by the uwot::umap algorithm. Defaults to 15.}
\item{metric}{character, distance metric used by uwot::umap. Defaults to cosine.}
\item{svd_slotName}{character, name of desired svd_slotName for running UMAP. Defaults to "PCA".}
\item{umap_slotName}{character, name of desired umap_slotName for return UMAP results. Defaults
to "UMAP".}
\item{verbose, }{logical. Defaults to FALSE.}
}
\description{
Project cells into a reduced embedding with UMAP
}
|
7b12a0b7e76d234e5f8029d611b27448c2ea0598
|
128025338a34d4751aa16adc2109c57d97b0da3f
|
/man/strata.Rd
|
5893bd70747ae8eab4b0b38b9ee41b0f321c88e1
|
[] |
no_license
|
MurrayEfford/openCR
|
e2de3810d9582406d598d0637fa4dd864468b961
|
949c39e3c65be41be859499fb7d3c9645e951629
|
refs/heads/master
| 2023-07-21T22:28:22.287511
| 2023-07-11T08:52:45
| 2023-07-11T08:52:45
| 116,920,401
| 2
| 0
| null | 2021-12-02T20:16:51
| 2018-01-10T06:57:54
|
R
|
UTF-8
|
R
| false
| false
| 1,797
|
rd
|
strata.Rd
|
\name{strata}
\alias{strata}
\alias{strata<-}
\title{ Stratum names }
\description{
Extract or replace the stratum names of a \code{capthist} object.
}
\usage{
strata(object, \dots)
strata(object) <- value
}
\arguments{
\item{object}{ object with `stratum' attribute e.g. \code{capthist} }
\item{value}{ character vector or vector that may be coerced to character, one value per stratum }
\item{\dots}{ other arguments (not used) }
}
\details{
Replacement values will be coerced to character.
}
\value{
a character vector with one value for each session in \code{capthist}.
}
\note{
\pkg{openCR} uses the term `stratum' for an independent set of samples,
rather like a `session' in \pkg{secr}. Strata offer flexibility in defining
and evaluating between-stratum models. The log likelihood for a stratified
model is the sum of the separate stratum log likelihoods. Although this
assumes independence of sampling, parameters may be shared across strata,
or stratum-specific parameter values may be functions of stratum-level covariates.
The detector array and mask can be specified separately for each stratum.
For open population analyses, each stratum comprises both primary and secondary
sessions of Pollock's robust design `joined' in a single-session capthist object.
The function \code{\link{stratify}} can be useful for manipulating data into
multi-stratum form.
Models are stratified only if the argument \code{stratified} of
\code{openCR.fit()} is set to TRUE. Strata will otherwise be treated as
primary sessions and concatenated as usual with \code{join()}.
}
\seealso{
\code{\link{openCR.fit}}, \code{\link{session}}, \code{\link{stratify}}
}
\examples{
# artificial example, treating years as strata
strata(ovenCH)
}
\keyword{ models }
|
f63ef723797495e05ca928678bf6921dd57d5e32
|
9d86e7d498f58a0c0cd69d969db68b1c5dcf44a4
|
/students/20201210_question_cn.R
|
309e7799d41a92dc6d021fa0c4e7fd6a6f3af463
|
[
"CC-BY-4.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
qsmei/lbgfs2020
|
f5314e4505201b4fd2ece542e64d87ee8f4a3b17
|
50001936dfaf2a4e4ec9ba3436665d4783b1dd29
|
refs/heads/master
| 2023-02-02T18:11:47.311100
| 2020-12-19T08:21:31
| 2020-12-19T08:21:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,355
|
r
|
20201210_question_cn.R
|
# Solution by C.N.
n_nr_trait <- 2
n_nr_founder <- 3
n_nr_animal <- 8
n_nr_observation <- n_nr_animal - n_nr_founder
tbl_data_sol12p01 <- tibble::tibble(Animal = c((n_nr_founder+1):n_nr_animal),
Sex = c("Male", "Female","Female","Male","Male"),
Sire = c(1,3,1,4,3),
Dam = c(NA,2,2,5,6),
WWG = c(4.5,2.9,3.9,3.5,5.0),
PWG = c(6.8,5.0,6.8,6.0,7.5))
mat_g0 <- matrix(data = c(20,18,18,40), nrow = n_nr_trait, byrow = TRUE)
mat_r0 <- matrix(data = c(40,11,11,30), nrow = n_nr_trait, byrow = TRUE)
mat_x1 <- mat_x2 <- matrix(data = c(1, 0,
0, 1,
0, 1,
1, 0,
1, 0), nrow = n_nr_observation, byrow = TRUE)
mat_zero <- matrix(0, nrow = nrow(mat_x1), ncol = ncol(mat_x1))
mat_X <- rbind(cbind(mat_x1, mat_zero), cbind(mat_zero, mat_x2))
mat_Xt <- t(mat_X)
mat_z1zero <- matrix(0, nrow = n_nr_observation, ncol = n_nr_founder)
mat_z1 <- mat_z2 <- cbind(mat_z1zero, diag(1, n_nr_observation))
mat_zzero <- matrix(0, nrow = nrow(mat_z1), ncol(mat_z2))
mat_Z <- rbind(cbind(mat_z1, mat_zzero), cbind(mat_zzero, mat_z2))
mat_Zt <- t(mat_Z)
mat_r <- mat_r0 %x% diag(1, n_nr_observation)
mat_R_1 <- solve(mat_r)
ped_sol12p01 <- pedigreemm::pedigree(sire = c(rep(NA, n_nr_founder), tbl_data_sol12p01$Sire),
dam = c(rep(NA, n_nr_founder), tbl_data_sol12p01$Dam),
label = as.character(1:n_nr_animal))
mat_ainv <- as.matrix(pedigreemm::getAInv(ped = ped_sol12p01))
mat_Ginv <- solve(mat_g0) %x% mat_ainv
linksoben <- mat_Xt %*% mat_R_1 %*% mat_X #obenlinks
rechtsoben <- mat_Xt %*% mat_R_1 %*% mat_Z
linksunten <- mat_Zt %*% mat_R_1 %*% mat_X
unten <- mat_Zt %*% mat_R_1 %*% mat_Z
rechtsunten <- unten + mat_Ginv
matbig <- rbind(cbind(linksoben, rechtsoben), cbind(linksunten, rechtsunten)) #M^-1
M_1 <- solve(matbig)
y <- c(4.5,2.9,3.9,3.5,5.0,6.8,5.0,6.8,6.0,7.5)
vec_y <- c(tbl_data_sol12p01$WWG, tbl_data_sol12p01$PWG)
vec_y - y
roben <- mat_Xt %*% mat_R_1 %*% y
runten <- mat_Zt %*% mat_R_1 %*% y
r <- c(roben, runten)
r
s <- M_1 %*% r
s
round(s, digits = 4)
|
bc3b8b1e7a320d202518ab2d9e9eb797cafcc53f
|
31e3cc6b91ee5b169685fa1c04bc49196224ec68
|
/project3_Bootstrap Project/BootstrapProject_part1_YifengLan.R
|
14d17d3d2d4aa09dac8ba53468595e72320e7a30
|
[] |
no_license
|
AliceLLLLLan/APPLD-MULTIVAR-ANAL
|
72de99c86208e9d8f53cce11504989bb807cf841
|
46d4f94e6a5aed0c96daa225c0d7fe986c4c0be6
|
refs/heads/master
| 2020-04-03T18:11:13.203224
| 2019-01-22T23:02:00
| 2019-01-22T23:02:00
| 155,474,125
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,349
|
r
|
BootstrapProject_part1_YifengLan.R
|
Heart<-read.csv("/Users/AliceLan/Desktop/R Class/Heart.csv")
my.bootstrapci.ml <-function(vec0,nboot=10000,alpha=0.1)
{
#extract sample size, mean and standard deviation from the original data
n0<-length(vec0)
mean0<-mean(vec0)
sd0<-sqrt(var(vec0))
# create a vector to store the location of the bootstrap studentized deviation vector
bootvec<-NULL
#create the bootstrap distribution using a for loop
for( i in 1:nboot){
vecb<-sample(vec0,replace=T)
#create mean and standard deviation to studentize
meanb<-mean(vecb)
sdb<-sqrt(var(vecb))
#note since resampling full vector we can use n0 for sample size of vecb
bootvec<-c(bootvec,(meanb-mean0)/(sdb/sqrt(n0)))
}
#Calculate lower and upper quantile of the bootstrap distribution
lq<-quantile(bootvec,alpha/2)
uq<-quantile(bootvec,1-alpha/2)
#incorporate into the bootstrap confidence interval (what algebra supports this?) and output result
LB<-mean0-(sd0/sqrt(n0))*uq
UB<-mean0-(sd0/sqrt(n0))*lq
#since I have the mean and standard deviation calculate the normal confidence interval here as well
NLB<-mean0-(sd0/sqrt(n0))*qt(1-alpha/2,n0-1)
NUB<-mean0+(sd0/sqrt(n0))*qt(1-alpha/2,n0-1)
list(bootstrap.confidence.interval=c(LB,UB),normal.confidence.interval=c(NLB,NUB))
}
vec0<-Heart[[4]]
my.bootstrapci.ml(vec0,nboot=10000,alpha=0.1)
|
2531627e5855ce4e52a5d78a9dfcee937ed585bb
|
fa13b20aec42378fb383aee1c43d22a322a43f9f
|
/man/select_eye.Rd
|
d90cc2e61f7ca8138b99b3261e106caa31254a75
|
[] |
no_license
|
dr-JT/pupillometry
|
5d69d06046d44d2bed9ef6b98833b1726cc51073
|
06e12f13021b214791fa7880e768fa8f3fba7b20
|
refs/heads/main
| 2023-05-31T02:06:12.279030
| 2023-05-03T22:46:14
| 2023-05-03T22:46:14
| 146,345,641
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 752
|
rd
|
select_eye.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/select_eye.R
\name{select_eye}
\alias{select_eye}
\title{Select eye}
\usage{
select_eye(x, eye_use = "")
}
\arguments{
\item{x}{dataframe.}
\item{eye_use}{Which eye to use? left or right?}
}
\description{
Choose which eye to use for analysis. See
https://dr-jt.github.io/pupillometry/ for more information.
}
\section{Output}{
This function removes columns related to the non-selected eye and renames
columns of the selected eye by removing the L_ or R_ prefix. It also adds
a column `Pupil.r`.
If both eyes were recorded from, then this function will correlate the
pupil data from both eyes and select only one eye data to keep
for further preprocessing and output.
}
|
0049963aad4b5d5368e7901a9b294c4d6988cc22
|
d495cef21676e32838467a8960331b61a638cf1e
|
/man/rocx.Rd
|
1749439b79795d087e8f409c3b6fa9425063b8dd
|
[] |
no_license
|
davan690/rocx
|
6f04f92939ab25de205bd7507bee04593fdcf5d6
|
efe822f2298beaab5fda749c983ebf0e94680f1c
|
refs/heads/master
| 2020-07-05T00:57:37.943261
| 2016-11-24T18:21:40
| 2016-11-24T18:21:40
| 202,476,354
| 1
| 0
| null | 2019-08-15T04:54:24
| 2019-08-15T04:54:24
| null |
UTF-8
|
R
| false
| true
| 365
|
rd
|
rocx.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rocx.R
\name{rocx}
\alias{rocx}
\title{rocx}
\usage{
rocx(reference_docx, draft = TRUE, keep_old = FALSE, use_bookdown = TRUE,
...)
}
\description{
This is the function that is called internally to generate the memos in different
forms. This is not meant to be called by the user.
}
|
2f07870952c26a9c7aaa52f1cda2e919a57ebdb6
|
22bbe5aeec6cca2c152a3c90b83743717f96a4a2
|
/02_INSPIRES_leaflet_example.R
|
5935f02a37600f4154c0ff4e9195fd1ce2df6f5e
|
[] |
no_license
|
onetreetwotrees/INSPIRES_shared_site_map
|
4da5224e4c73b4ff0e5eb8b90c183e92afcfe557
|
b76552232afed84f1a2b663273f884738dfec67b
|
refs/heads/main
| 2023-03-24T11:08:39.377479
| 2021-03-23T16:11:57
| 2021-03-23T16:11:57
| 350,753,289
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,710
|
r
|
02_INSPIRES_leaflet_example.R
|
library(shiny)
library(leaflet)
#library(raster)
library(rgdal)
## Leaflet examples from https://nceas.github.io/oss-lessons/publishing-maps-to-the-web-in-r/publishing-maps-to-the-web-in-r.html
## Accessed 2021-03-02
## Try with Track 2 sites
track2_sites <- read.csv("data\\Track2_site_coordinates.csv", header = T)
bnds <- readOGR(dsn = "data", layer = "S_USA_Experimental_Area_Boundaries_Inspires")
nacp <- readOGR(dsn = "data", layer = "NACP_Forest_Biophysical_Georeference_points_field_surveys_2009")
neon <- readOGR(dsn = "data", layer= "NEON_Terrestrial_Sampling_Boundaries_Northeast")
landisCnty <- readOGR(dsn = "data", layer = "US_Counties_Being_Intialized_for_Landis_at_UVM")
dart2nd <- readOGR(dsn = "data", layer = "Darmouth_2nd_College_Grant")
nulhegan1 <- readOGR(dsn = "data", layer = "Nulhegan_Basin_Simple_Boundary")
corinth1 <- readOGR(dsn = "data", layer = "Corinth_VT_Simple_Boundary")
## Simple map example
map <- leaflet() %>%
# Base groups
addTiles() %>%
# Overlay groups
addPolygons(data = landisCnty, color = "orange", opacity = 0.5,
popup = ~as.character(StudyArea), group = "Landis-II Simulation Area") %>%
addPolygons(data = neon, color = "purple", opacity = 0.5,
popup = ~as.character(siteName), group = "NEON Sites") %>%
addPolygons(data = bnds, popup = ~as.character(NAME), group = "US Exp. Forests") %>%
addPolygons(data = dart2nd, color = "green", opacity = 0.5,
popup = "Dartmouth 2nd College Grant", group = "Partner Forests") %>%
addPolygons(data = nulhegan1, color = "green", opacity = 0.5,
popup = "Nulhegan Basin", group = "Partner Forests") %>%
addPolygons(data = corinth1, color = "green", opacity = 0.5,
popup = "Corinth ? Forest", group = "Partner Forests") %>%
addMarkers(data = track2_sites, ~Long, ~Lat, popup = ~as.character(Name), group = "Sites") %>%
addCircles(data = nacp, stroke = F, popup = ~as.character(Label),
group = "Plots") %>%
# Layers control
addLayersControl(
#baseGroups = c("OSM (default)", "Toner", "Toner Lite"),
overlayGroups = c("Sites", "Plots", "US Exp. Forests", "Partner Forests",
"NEON Sites", "Landis-II Simulation Area"),
options = layersControlOptions(collapsed = FALSE)
)
map
## Other examples
## Create a leaflet map of just point markers
leaflet(track2_sites) %>%
addTiles() %>%
addMarkers(~Long, ~Lat, popup = ~as.character(Name))
## Try creating a leaflet map that layers polygons as well
leaflet() %>%
addTiles() %>%
addMarkers(data = track2_sites, ~Long, ~Lat, popup = ~as.character(Name)) %>%
addPolygons(data = bnds) %>%
addMarkers(data = nacp, popup = ~as.character(Label))
## Colored circles example
# Create a palette that maps factor levels to colors
pal <- colorFactor(c("navy", "red"), domain = c("ship", "pirate"))
leaflet(df) %>% addTiles() %>%
addCircleMarkers(
radius = ~ifelse(type == "ship", 6, 10),
color = ~pal(type),
stroke = FALSE, fillOpacity = 0.5
)
## Simple map example
map <- leaflet() %>%
# Base groups
addTiles(group = "OSM (default)") %>%
addProviderTiles(providers$Stamen.Toner, group = "Toner") %>%
addProviderTiles(providers$Stamen.TonerLite, group = "Toner Lite") %>%
# Overlay groups
addCircles(~long, ~lat, ~10^mag/5, stroke = F, group = "Quakes") %>%
addPolygons(data = outline, lng = ~long, lat = ~lat,
fill = F, weight = 2, color = "#FFFFCC", group = "Outline") %>%
# Layers control
addLayersControl(
baseGroups = c("OSM (default)", "Toner", "Toner Lite"),
overlayGroups = c("Quakes", "Outline"),
options = layersControlOptions(collapsed = FALSE)
)
map
|
fca629924db547d92196a404500b7c9016da9165
|
19d60de824aaf4a4f6bc0fb97371791ee53d5166
|
/man/getSubsetReads.Rd
|
555f042288c44de022043785a3db7ea39b92f606
|
[] |
no_license
|
tgirke/systemPipeRdata
|
6eb30d9681062fc2462cbf4c0fa561131f69c60c
|
8fa1d78931a3c68e226ec988421ee838a9058c78
|
refs/heads/master
| 2022-10-30T13:07:31.454000
| 2022-10-23T20:56:55
| 2022-10-23T20:56:55
| 39,166,821
| 10
| 15
| null | 2023-08-21T22:00:34
| 2015-07-15T23:54:51
|
R
|
UTF-8
|
R
| false
| false
| 2,259
|
rd
|
getSubsetReads.Rd
|
\name{getSubsetReads}
\alias{getSubsetReads}
\title{Subsetting fastq data}
\description{
Returns subsets of fastq files data based on specific mapping regions or
list of genes or GRanges object.
}
\usage{
getSubsetReads(args,
geneList = NULL,
gr = NULL,
MappingRegion = 1:1e+05,
sample_range = 90000:1e+05,
truncate_refs = TRUE,
id_read_number = TRUE,
annotation = "data/tair10.gff",
reference = "data/tair10.fasta",
annot_outname = "tair10_sub.gff",
ref_outname = "tair10_sub.fasta",
outdir = "data/subset/",
silent = FALSE
)
}
\arguments{
\item{args}{object of class \code{SYSargs2}.}
\item{geneList}{selected genes list to retrieve the reads from the fastq file.}
\item{gr}{an object containing genomic ranges to retrieve the reads from the fastq file.}
\item{MappingRegion}{integers ranges of start and end of chromosome position to retrieve the reads from the fastq file.}
\item{sample_range}{random range to subsetted the fastq file.}
\item{truncate_refs}{logical. If TRUE it will generate reference genome and annotation subset file.}
\item{id_read_number}{if fastq file contains sequence name with read number (\verb{$ri} - \verb{--defline-seq '@$sn[_$rn]/$ri'}).}
\item{annotation}{path to annotation file.}
\item{reference}{path to reference genome.}
\item{annot_outname}{character name of the annotation output file.}
\item{ref_outname}{character name of the reference genome output file.}
\item{outdir}{path to output directory.}
\item{silent}{if set to TRUE, all messages returned by the function will be suppressed.}
}
\value{
Workflow directory containing sample data and parameter files along with the following subdirectories:
\item{param/}{stores parameter files}
\item{data/}{stores input data}
\item{results/}{stores output results}
For more details, please consult the Overview Vignette (HTML) of the systemPipeR package (http://bioconductor.org/packages/systemPipeR).
}
\author{
Thomas Girke, Shiyuan Guo and Daniela Cassol
}
\examples{
\dontrun{
getSubsetReads(args, MappingRegion = 1:900, sample_range = 800:900, outdir = "data/subset/", silent = FALSE)
getSubsetReads(args, MappingRegion = 1:900, sample_range = NULL, outdir = "data/subset/", silent = FALSE)
}
}
\keyword{ utilities }
|
c0e5d83a5e6bfaa879cec086a5a158f24df35a18
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PET/examples/readSifData.Rd.R
|
8203f7af3dcc2417224e018a6ec3fe70dcc28f74
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 496
|
r
|
readSifData.Rd.R
|
library(PET)
### Name: readSifData
### Title: Reloaded System Matrix
### Aliases: readSifData
### Keywords: IO file
### ** Examples
## Not run:
##D P <- phantom(n=101)
##D rP <- markPoisson(P, nSample=1800000)
##D irP <- iradonIT(rP$rData, 101, 101, KernelFileSave=1,
##D KernelFileName="SystemMatrix.sif")
##D rm(irP,P,rP)
##D
##D # reading the sif-matrix:
##D SysMat <- readSifData("SystemMatrix.sif")
##D names(SysMat)
##D SysMat$Header
##D rm(SysMat)
## End(Not run)
|
ce7dd687cf8d22ae0a994fecf03fe1d9d51fbb75
|
fbfe4ad157c97e83391339e4d17b93697b9ab6be
|
/plot1.R
|
a554b2146d27426a9427d8eab90ac14e0bc7817f
|
[] |
no_license
|
mx3Carlangaz/ExData_Plotting1-1
|
50f6c0fae90dfc50907731d6400532a8e9f613b9
|
d3feeb76d07a511b2edb0ed75441148770f44656
|
refs/heads/master
| 2021-01-22T00:10:41.158727
| 2016-03-13T23:39:01
| 2016-03-13T23:39:01
| 53,813,484
| 0
| 0
| null | 2016-03-13T23:33:51
| 2016-03-13T23:33:51
| null |
UTF-8
|
R
| false
| false
| 526
|
r
|
plot1.R
|
f.path <- "household_power_consumption.txt"
file <- read.table(f.path, stringsAsFactors = FALSE, sep=";", header = TRUE)
file$Date <- as.Date(file$Date,"%d/%m/%Y")
file$Global_active_power <- as.numeric(file$Global_active_power)
file <- subset(file, Date %in% c(as.Date("1/2/2007","%d/%m/%Y"),as.Date("2/2/2007","%d/%m/%Y")))
png("plot1.png", width=480, height=480)
hist(file$Global_active_power,
col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)"
)
dev.off()
|
ed34a781baba690647f8c9cf253c1d9d318c2b91
|
c5bc2307bcead541658ccd7f49db4eda9a6a3762
|
/R/noise.R
|
8ff1c9a1702faabef995a1444dc42d29c2678dbc
|
[] |
no_license
|
shfischer/FLife
|
b2216da5bdf3f463cc7ea354e49115f598ecafe0
|
4979df14be234debeb468d89cf2659bb2f659836
|
refs/heads/master
| 2021-08-18T07:43:51.998014
| 2020-08-02T18:35:08
| 2020-08-02T18:35:08
| 238,467,826
| 0
| 0
| null | 2020-02-05T14:20:48
| 2020-02-05T14:20:46
| null |
UTF-8
|
R
| false
| false
| 1,844
|
r
|
noise.R
|
#' @title Random noise with different frequencies
#'
#' @description A noise generator for lognormal errors
#'
#' @name rlnoise
#'
#' @param n number of iterations
#' @param len an \code{FLQuant}
#' @param sd standard error for simulated series
#' @param b autocorrelation parameter a real number in [0,1]
#' @param burn gets rid of 1st values i series
#' @param trunc get rid of values > abs(trunc)
#' @param what returns time series for year, cohort or age"
#' @param ... any other parameters
#'
#' @aliases rlnoise rlnoise-method rlnoise,numeric,FLQuant-method rlnoise,numeric,missing-method
#' @export rlnoise
#'
#' @docType methods
#' @rdname rlnoise
#'
#' @importFrom methods is
#'
#' @return A \code{FLQuant} with autocorrelation equal to B.
#'
#' @references Ranta and Kaitala 2001 Proc. R. Soc.
#' vt = b * vt-1 + s * sqrt(1 - b^2)
#' s is normally distributed random variable with mean = 0
#' b is the autocorrelation parameter
#' @export
#'
#' @examples
#' \dontrun{
#' flq=FLQuant(1:100)
#' white <- rnoise(1000,flq,sd=.3,b=0)
#' plot(white)
#' acf(white)
#'
#' red <- rlnoise(1000,flq,sd=.3,b=0.7)
#' plot(red)
#' acf(red)
#'
#' data(ple4)
#' res=rnoise(1000,log(flq),sd=.3,b=0)
#'
#' ggplot()+
#' geom_point(aes(year,age,size= data),
#' data=subset(as.data.frame(res),data>0))+
#' geom_point(aes(year,age,size=-data),
#' data=subset(as.data.frame(res),data<=0),colour="red")+
#' scale_size_area(max_size=4, guide="none")+
#' facet_wrap(~iter)
#'
#' res=rlnoise(4,log(m(ple4)),burn=10,b=0.9,cohort=TRUE)
#' ggplot()+
#' geom_point(aes(year,age,size= data),
#' data=subset(as.data.frame(res),data>0))+
#' geom_point(aes(year,age,size=-data),
#' data=subset(as.data.frame(res),data<=0),colour="red")+
#' scale_size_area(max_size=4, guide="none")+
#' facet_wrap(~iter)
#'
#' }
|
7b91c9516c42aac3663559ed88042acf3fa83f29
|
ac607fe77bec7c73fc153da682651f43ea535d25
|
/man/L.Rd
|
56567f3ee8591697288d2e4095dd7e5ac44ffc04
|
[] |
no_license
|
cran/Calculator.LR.FNs
|
ee38253eb9efe983871732bd832b2fd3708bb39b
|
f7d7c1b6faef82b6ff23c24758870f0a9863d5cb
|
refs/heads/master
| 2021-01-10T13:17:31.510783
| 2018-05-02T11:04:22
| 2018-05-02T11:04:22
| 55,230,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,087
|
rd
|
L.Rd
|
\name{L}
\alias{L}
\title{
Introducing the form of L fuzzy number
}
\description{
Considering the definition of LR fuzzy number in \code{LR}, if the left and the right shape functions of a LR fuzzy number are be equal (i.e., \eqn{ L(.) = R(.) }), then LR fuzzy number is a L fuzzy number which denoted by \eqn{ (n, \alpha, \beta)L }.
Function \code{L} introduce a total form for L fuzzy number to computer.
}
\usage{
L(m, m_l, m_r)
}
\arguments{
\item{m}{
The core of L fuzzy number
}
\item{m_l}{
The left spread of L fuzzy number
}
\item{m_r}{
The right spread of L fuzzy number
}
}
%% \details{
%% ~~ If necessary, more details than the description above ~~
%% }
\value{
This function help to users to define any L fuzzy number after introducing the left shape function L. This function consider L fuzzy number L(m, m_l, m_r) as a vector with 4 elements. The first three elements are m, m_l and m_r respectively; and the fourth element is considerd equal to 0.5 for distinguish L fuzzy number from LR and RL fuzzy numbers.
}
\references{
Dubois, D., Prade, H., Fuzzy Sets and Systems: Theory and Applications. Academic Press (1980).
Taheri, S.M, Mashinchi, M., Introduction to Fuzzy Probability and Statistics. Shahid Bahonar University of Kerman Publications, In Persian (2009).
}
\author{
Abbas Parchami }
\examples{
# First introduce the left shape function of L fuzzy number
Left.fun = function(x) { (1-x^2)*(x>=0)}
A = L(20, 12, 10)
LRFN.plot(A, xlim=c(0,60), col=2, lwd=2)
## The function is currently defined as
function (m, m_l, m_r)
{
c(m, m_l, m_r, 0.5)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ Calculator for LR Fuzzy Numbers }
\keyword{ Zadeh extension principle }
\keyword{ Introducing the form of LR fuzzy number Fuzzy Number }
\keyword{ Introducing the form of RL fuzzy number Fuzzy Number }
\keyword{ Introducing the form of L fuzzy number Fuzzy Number }
\keyword{ Ploting and drawing LR fuzzy numbers }
|
8eb436ec92c395e2b47f5e6ca623d2d8de9cee01
|
105fdf07504ddc0c8093c3fa713eca42f1a3edb0
|
/src/esom.R
|
f5dcc7186b4148c2284d97834ba3691e1c09a0e1
|
[
"MIT"
] |
permissive
|
rebelrebel04/emergence
|
d27fe3463d0578feeef2e8fc2ff21c8f4ffbaf27
|
1b5bbc6ad2b3e400f9e08b8bb815e8683dd6b2e8
|
refs/heads/main
| 2023-08-28T00:27:09.203106
| 2021-11-12T00:27:24
| 2021-11-12T00:27:24
| 417,932,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 888
|
r
|
esom.R
|
library(Umatrix)
data('Hepta')
ir <-
iris %>%
select(-Species) %>%
as.matrix()
res <- esomTrain(ir, Key = 1:nrow(ir))
# res <- esomTrain(Hepta$Data, Key = 1:nrow(Hepta$Data))
res
Umatrix::plotMatrix(res$Umatrix, res$BestMatches, TransparentContours = TRUE)
map <- Umatrix::esomTrain(as.matrix(distances), Key = seq_along(trees),
Epochs = 5, # Increase for better results
Lines = 42,
Columns = 42,
Toroid = FALSE)
Umatrix::plotMatrix(Matrix = map$Umatrix,
Toroid = FALSE, FixedRatio = TRUE,
TransparentContours = FALSE, Clean = TRUE) +
ggplot2::geom_point(data = data.frame(x = map$BestMatches[, 3],
y = map$BestMatches[, 2]),
shape = 19, color = treeCols, size = 2)
|
de6e706b710c055feeafe182132bca509b6b4a7d
|
f0bff74ed6fc7d62e3f4ea7d50f2be1757f994f7
|
/PlanB/clonevol2FIshplot.R
|
00a5ae926d0f9a4cf14b660b20b441f564f42e76
|
[
"MIT"
] |
permissive
|
Ninomoriaty/Schism2Fishplot
|
379b278cb809eee6f0c3a61e591c0830171ee5ad
|
796b1677b20e783135c66ceb0f1897b2cc38b64d
|
refs/heads/master
| 2020-05-06T15:29:04.110989
| 2019-05-29T17:47:38
| 2019-05-29T17:47:38
| 180,192,163
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,726
|
r
|
clonevol2FIshplot.R
|
# dependencies of clonevol
library(gridBase)
library(gridExtra)
library(ggplot2)
library(igraph)
library(packcircles)
library(trees)
# dependencies of fishplot
library(png)
library(Hmisc)
library(plotrix)
library(clonevol)
library(fishplot)
### need to be automated
sc2 = read.table("/home/ninomoriaty/R_Project/EvolCancer/EvolCancer/CCF_data2.txt",sep="\t",stringsAsFactors=F,header=T)
vafs2 = data.frame(sc2[,2]+1,sc2[,3:9]*100)
samples = c("t1", "t2", "t3", "t4", "t5","t6", "t7")
names(vafs2)[1] = "cluster"
names(vafs2)[2:8] = samples
vafs3 <- vafs2[which(vafs2$cluster %in% cluster_ls2[,1]),]
cluster_ls3 <- as.data.frame(table(vafs2[,1]))
cluster_ls2 <- cluster_ls3[which(cluster_ls3$Freq > 5), ]
vafs3[vafs3$cluster == 22, 1] <- 2
vafs3[vafs3$cluster == 106, 1] <- 3
#------(should be replaced by EvolCancer)------#
## run clonevol
res = infer.clonal.models(variants=vafs3, cluster.col.name="cluster", ccf.col.names=samples,
subclonal.test="bootstrap", subclonal.test.model="non-parametric",
cluster.center="mean", num.boots=1000, founding.cluster=1,
min.cluster.vaf=0.01, p.value.cutoff=0.01, alpha=0.1, random.seed=63108)
## create a list of fish objects - one for each model (in this case, there's only one)
f = generateFishplotInputs(results=res)
fishes = createFishPlotObjects(f)
## plot each of these with fishplot
pdf('fish.pdf', width=8, height=4)
for (i in 1:length(fishes)){
fish = layoutClones(fishes[[i]])
fish = setCol(fish,f$clonevol.clone.colors)
fishPlot(fish,shape="spline", title.btm="PatientID", cex.title=0.7,
vlines=seq(1, length(samples)), vlab=samples, pad.left=0.5)
}
dev <- dev.off()
|
0f38de3a839a4b3cf95efeef4ca21d944e2f8533
|
10005fa385fcfb01d8dd89fbbcf6fd6001fb3f38
|
/tex_file/jcgs/figs/new_experiment_figs/plotmh.R
|
f04b9e6ee51e6cc870a8c45607189a722de2c6b2
|
[] |
no_license
|
boqian1000/MJP_Boqian
|
ca605fd1dc8a20202f7c173c15895e3c3317598f
|
53364d977fbb35827e88ee72b16d5b61ab8a9ca4
|
refs/heads/master
| 2021-05-04T11:06:32.068175
| 2020-03-25T22:04:18
| 2020-03-25T22:04:18
| 52,987,313
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,999
|
r
|
plotmh.R
|
# exp model final plot
setwd( "/Users/Isaac_Zhang/Research/MCMC/simulation_result/data/exp_dim3/")
library(ggplot2)
load("rda_var")
#load("rda_L")
load("gbs_a")
gbs_a$var2 = var[,1] + 0.02
gbs_a$var3 = var[,1] - 0.01
load("gbs_b")
gbs_b$var2 = var[,1] + 0.02
gbs_b$var3 = var[,1] - 0.01
load("mh_a")
mh_a$var2 = var[,1] + 0.025
mh_a$var3 = var[,1] - 0.015
load("mh_b")
mh_b$var2 = var[,1] + 0.025
mh_b$var3 = var[,1] - 0.015
load("omh_a")
omh_a$var2 = var[,1] + 0.023
omh_a$var3 = var[,1] - 0.013
load("omh_b")
omh_b$var2 = var[,1] + 0.023
omh_b$var3 = var[,1] - 0.013
load("p_a")
p_a$var2 = var[,1] + 0.021
p_a$var3 = var[,1] - 0.011
load("p_b")
p_b$var2 = var[,1] + 0.021
p_b$var3 = var[,1] - 0.011
# dim 5
#r1 = 1.7 / 7
#r2 = 1.7 / 8
# dim 3
#r1 = 1.7 / 30
#r2 = 1.7 / 40
#h
# r1 = 10 / 2.5
# r2 = 10 / 2
s1 = 5
s2 = 1.5
s1 = 5
s2 = 2
trans_col = 0.6
ratio.display <- 1/1
maxx = 1.8
# dim3
#maxy = 7.6
#dim10
#maxy = 1.3
maxy = 0
maxy = max(maxy, max(mh_a$mh_a_k1.5u))
maxy = max(maxy, max(mh_a$mh_a_k3u))
maxy = max(maxy, max(mh_a$mh_a_k2u))
miny = 1000
miny = min(miny, min(mh_a$mh_a_k1.5d))
miny = min(miny, min(mh_a$mh_a_k3d))
miny = min(miny, min(mh_a$mh_a_k2d))
miny = 0
ratio.values <- (maxx)/(maxy - miny)
#library(ggplot2)
## square 0, round 1, triangle 2
trans_col = 0.6
#roberts and rosenthal 2004
p_ALPHA <- ggplot() + theme_bw()+ theme(axis.text=element_text(size=40), axis.title=element_text(size=40))
p_ALPHA <- p_ALPHA + coord_fixed(ratio = ratio.values / ratio.display)
p_ALPHA <- p_ALPHA + ylim(miny, maxy) + xlim(0, maxx)+ scale_shape(solid = FALSE) +
geom_point(data = mh_a, aes(x = var, y =mh_a_k1.5) , colour = "skyblue3", size = s1,alpha = 0.6, shape = 1, stroke = 2) +
geom_errorbar(data = mh_a, aes(x= var,ymax = mh_a_k1.5u, ymin = mh_a_k1.5d), width=0.05, alpha = 0.6,colour = "skyblue3")+
geom_line(data = mh_a, aes(x = var, y =mh_a_k1.5) ,colour = "skyblue3", size = s2, alpha = trans_col) +
geom_point(data = mh_a, aes(x = var2, y =mh_a_k2) , colour = "skyblue3", size = s1, alpha = 0.6,shape = 0, stroke = 2) +
geom_errorbar(data = mh_a, aes(x= var2, ymax = mh_a_k2u, ymin = mh_a_k2d), width=0.05,alpha = 0.6, colour = "skyblue3")+
geom_line(data = mh_a, aes(x = var2, y =mh_a_k2) ,colour = "skyblue3", size = s2, alpha = trans_col) +
geom_point(data = mh_a, aes(x = var3, y =mh_a_k3) , colour = "skyblue3", size = s1,alpha = 0.6, shape = 2, stroke = 2) +
geom_errorbar(data = mh_a, aes(x= var3,ymax = mh_a_k3u, ymin = mh_a_k3d), width=0.05,alpha = 0.6, colour = "skyblue3")+
geom_line(data = mh_a, aes(x = var3, y =mh_a_k3) ,colour = "skyblue3", size = s2, alpha = trans_col) +
labs(x = expression(paste(sigma^2," of MH proposal") )) + labs(y = expression(paste("ESS/unit time for ",alpha))) + theme(legend.position="none")
p_ALPHA
setwd("/Users/Isaac_Zhang/Research/MCMC/revision/New_figures/new_whole_exp_fitures/")
#ggsave("mh_exp_alpha_dim10.pdf", height = 6.8, width = 6.8)
ggsave("mh_exp_alpha_dim3.pdf", height = 6.8, width = 6.8)
maxx = 1.8
# dim3
maxy = 0
maxy = max(maxy, max(mh_b$mh_b_k1.5u))
maxy = max(maxy, max(mh_b$mh_b_k3u))
maxy = max(maxy, max(mh_b$mh_b_k2u))
miny = 1000
miny = min(miny, min(mh_b$mh_b_k1.5d))
miny = min(miny, min(mh_b$mh_b_k3d))
miny = min(miny, min(mh_b$mh_b_k2d))
miny = 0
ratio.values <- (maxx)/(maxy - miny)
# dim10
#maxy = 1.65
#maxy = 1.65
ratio.values <- (maxx)/(maxy)
p_BETA <- ggplot() + theme_bw()+ theme(axis.text=element_text(size=40), axis.title=element_text(size=40))
p_BETA <- p_BETA + coord_fixed(ratio = ratio.values / ratio.display)
p_BETA <- p_BETA + ylim(miny, maxy) + xlim(0, maxx) + scale_shape(solid = FALSE) +
geom_point(data = mh_b, aes(x = var, y =mh_b_k1.5) , colour = "skyblue3", size = s1,alpha = 0.6, shape = 1, stroke = 2) +
geom_errorbar(data = mh_b, aes(x= var,ymax = mh_b_k1.5u, ymin = mh_b_k1.5d), width=0.05, alpha = 0.6,colour = "skyblue3")+
geom_line(data = mh_b, aes(x = var, y =mh_b_k1.5) ,colour = "skyblue3", size = s2, alpha = trans_col) +
geom_point(data = mh_b, aes(x = var2, y =mh_b_k2) , colour = "skyblue3", size = s1, alpha = 0.6,shape = 0, stroke = 2) +
geom_errorbar(data = mh_b, aes(x= var2, ymax = mh_b_k2u, ymin = mh_b_k2d), width=0.05,alpha = 0.6, colour = "skyblue3")+
geom_line(data = mh_b, aes(x = var2, y =mh_b_k2) ,colour = "skyblue3", size = s2, alpha = trans_col) +
geom_point(data = mh_b, aes(x = var3, y =mh_b_k3) , colour = "skyblue3", size = s1,alpha = 0.6, shape = 2, stroke = 2) +
geom_errorbar(data = mh_b, aes(x= var3,ymax = mh_b_k3u, ymin = mh_b_k3d), width=0.05,alpha = 0.6, colour = "skyblue3")+
geom_line(data = mh_b, aes(x = var3, y =mh_b_k3) ,colour = "skyblue3", size = s2, alpha = trans_col) +
labs(x = expression(paste(sigma^2," of MH proposal") )) + labs(y = expression(paste("ESS/unit time for ",beta))) + theme(legend.position="none")
p_BETA
#ggsave("mh_exp_beta_dim10.pdf", height = 6.8, width = 6.8)
ggsave("mh_exp_beta_dim3.pdf", height = 6.8, width = 6.8)
|
ca9cd95314ae3a10727953e72625b787b452f877
|
0cbd783c8c2323ab714d01c7bc30f30726268358
|
/src/cve_analysis/plot_err_fpsize.R
|
532d72a042cab6eefc8bbee5747b5c0be28813d5
|
[] |
no_license
|
kingjml/cveureka
|
fb3de6b448b81e40e81c4e120eaf8042d5e53e9a
|
82b1472f857ad5f8eef34a7e50d1e674f31b9c3f
|
refs/heads/master
| 2022-04-10T21:19:25.889747
| 2020-03-30T06:44:20
| 2020-03-30T06:44:20
| 255,668,508
| 0
| 1
| null | 2020-04-14T16:50:31
| 2020-04-14T16:50:30
| null |
UTF-8
|
R
| false
| false
| 1,102
|
r
|
plot_err_fpsize.R
|
# Error value vs. footprint size
source("config.r")
d = sql.fetch(sql.query("
SELECT fp_size fp,{sq.error_stat}
FROM {db.schema}.asr_error
JOIN {db.schema}.asr_grid_zone USING (id_asr)
WHERE dens_adj
AND offset_calib = '{qp.offset_calib}'
GROUP BY fp_size
"))
dcont = d[d$fp>0,]
dpdl = d[d$fp==-1,]
pdl_mean = dpdl$e_ra_mean
pdl_sd = dpdl$e_ra_sd
pdl_label = "Pulse-Doppler Limited Footprint"
ggarrange(
(
ggplot(dcont, aes(x=fp, y=e_ra_mean))
+ geom_point(size=2)
+ geom_hline(aes(yintercept=pdl_mean))
+ annotate("text", 22, pdl_mean, vjust=-1.5, label=pdl_label)
+ geom_smooth(method=lm, formula=y~poly(x, 4, raw=TRUE), se = FALSE)
+ xlab(lb.fp_size)
+ ylab(lb.e_ra_mean)
),
(
ggplot(dcont, aes(x=fp, y=e_ra_sd))
+ geom_point(size=2)
+ geom_hline(aes(yintercept=pdl_sd))
+ annotate("text", 22, pdl_sd, vjust=2, label=pdl_label)
+ geom_smooth(method=lm, formula=y~poly(x, 4, raw=TRUE), se = FALSE)
+ xlab(lb.fp_size)
+ ylab(lb.e_ra_sd)
),
labels = 'AUTO'
)
save.plot("e_ra_fpsize", 2)
|
d6f5a9d2c9671ad0a5bb4150ea4b2c39d3387783
|
2d271f2b775acde9dc6e9f42d86cccce0729ade5
|
/man/rowCofactors.Rd
|
180d75b6af6d887c766c65ba0e7a44c795c5a00c
|
[] |
no_license
|
cran/matlib
|
6275bab0cadc45b1259e87a2864012f429e3f2f8
|
876d6181d63e3f9922d01adf2e32621775ab3e03
|
refs/heads/master
| 2022-12-22T02:35:55.163954
| 2022-12-08T16:20:15
| 2022-12-08T16:20:15
| 48,083,459
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,025
|
rd
|
rowCofactors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/determinants.R
\name{rowCofactors}
\alias{rowCofactors}
\title{Row Cofactors of A[i,]}
\usage{
rowCofactors(A, i)
}
\arguments{
\item{A}{a square matrix}
\item{i}{row index}
}
\value{
a vector of the cofactors of A[i,]
}
\description{
Returns the vector of cofactors of row i of the square matrix A. The determinant, \code{Det(A)},
can then be found as \code{M[i,] \%*\% rowCofactors(M,i)} for any row, i.
}
\examples{
M <- matrix(c(4, -12, -4,
2, 1, 3,
-1, -3, 2), 3, 3, byrow=TRUE)
minor(M, 1, 1)
minor(M, 1, 2)
minor(M, 1, 3)
rowCofactors(M, 1)
Det(M)
# expansion by cofactors of row 1
M[1,] \%*\% rowCofactors(M,1)
}
\seealso{
\code{\link{Det}} for the determinant
Other determinants:
\code{\link{Det}()},
\code{\link{adjoint}()},
\code{\link{cofactor}()},
\code{\link{minor}()},
\code{\link{rowMinors}()}
}
\author{
Michael Friendly
}
\concept{determinants}
|
19cb752d72fe83559120efce7d5c241b8c10955c
|
c82776509a0587d12ab512853f6035cbb026292d
|
/productionfunction.R
|
687f1e8d3a91908f7c782ddd7960059156c65380
|
[] |
no_license
|
fidocash/SFA
|
d857f6a84c4ecea583905fd6ee6fb773b85c48b3
|
61824125ea83cf7f29eb520da7efb8a6c29a7db2
|
refs/heads/master
| 2020-06-26T20:19:35.949912
| 2016-08-20T13:22:58
| 2016-08-20T13:22:58
| 66,147,568
| 4
| 0
| null | 2016-08-20T13:33:47
| 2016-08-20T13:13:29
|
R
|
UTF-8
|
R
| false
| false
| 1,032
|
r
|
productionfunction.R
|
##translog
#B&C95 | INPUTS = LABOUR, CAPITAL (PENN WORLD TABLES 9.0) | OUTPUT = REALGDP (PENN WORLD TABLES 9.0) | VARIABLES IN THE EFF.MODEL = FDIPERGDP (UNCTAD), ECI (HARVARD), HC (PENN WORLD TABLES 9.0 BY BARRO)
# ADDED VARIABLES = t (TIME TREND), DGEO (GEOGRAPHICAL DUMMY, FOR MOST DEVELOPED COUNTRIES, 1 if MOST DEV, 0 otherwise)
translog=sfa(log(REALGDP)~log(EMP)+log(RKNA)+I(0.5*log(RKNA)^2)+I(0.5*log(EMP)^2)+I(log(EMP)*log(RKNA))+t+Dgeo|FDIperGDP+ECI+HC+I(HC*FDIperGDP)+I(FDIperGDP*ECI),data=dati)
print(summary(translog,extraPar=TRUE))
##prova cobb douglas
cd=sfa(log(REALGDP)~log(EMP)+log(RKNA)+t+Dgeo|FDIperGDP+ECI+HC+I(HC*FDIperGDP)+I(FDIperGDP*ECI),data=dati)
print(summary(cd,extraPar=TRUE))
#printing latex
#translog
#tltex=xtable(summary(translog)); tltex=print(tltex,include.rownames=FALSE,extraPar=TRUE); write.table(tltex,"Script/tables/tabletranslog.tex")
#std
#cdtex=xtable(cd); cdtex=print(cdtex,include.rownames=FALSE,extraPar=TRUE); write.table(cdtex,"Script/tables/tablecd.tex")
|
91aceb850c9b6fd709cd0ce148c9ea0567ccdaad
|
f0b76f4edc64b465f387205e026addf28581611d
|
/man/LoglikelihoodSM.Rd
|
fa11d8d5fce333c831313063cf48ea3a449c53eb
|
[] |
no_license
|
cran/SMM
|
74f37de99ee98521e80ba8dde25abd439cdee592
|
f3b14f0517376ee60ec23143994aa81a9e43658e
|
refs/heads/master
| 2021-01-01T17:20:54.441906
| 2020-01-31T12:30:02
| 2020-01-31T12:30:02
| 98,057,464
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,559
|
rd
|
LoglikelihoodSM.Rd
|
\name{LoglikelihoodSM}
\alias{LoglikelihoodSM}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Loglikelihood (semi-Markov model)
}
\description{
Computation of the loglikelihood starting from sequence(s), alphabet, initial distribution, transition matrix and type of sojourn times}
\usage{
## parametric case
LoglikelihoodSM(seq, E, mu, Ptrans, distr, param, laws = NULL, TypeSojournTime)
## non-parametric case
LoglikelihoodSM(seq, E, mu, Ptrans, distr, param = NULL, laws, TypeSojournTime)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{seq}{
List of sequence(s)
}
\item{E}{
Vector of state space
}
\item{mu}{
Vector of initial distribution of length S
}
\item{Ptrans}{
Matrix of transition probabilities of the embedded Markov chain \eqn{J=(J_m)_{m}} of size SxS
}
\item{distr}{
- "NP" for nonparametric case, laws have to be used, param is useless
- Matrix of distributions of size SxS if TypeSojournTime is equal to "fij";
- Vector of distributions of size S if TypeSojournTime is equal to "fi" or "fj";
- A distribution if TypeSojournTime is equal to "f".
The distributions to be used in distr must be one of "uniform", "geom", "pois", "dweibull", "nbinom".
}
\item{param}{
- Useless if distr = "NP"
- Array of distribution parameters of size SxSx2 (2 corresponds to the maximal number of distribution parameters) if TypeSojournTime is equal to "fij";
- Matrix of distribution parameters of size Sx2 if TypeSojournTime is equal to "fi" or "fj";
- Vector of distribution parameters of length 2 if TypeSojournTime is equal to "f".
}
\item{laws}{
- Useless if distr \eqn{\neq} "NP"
- Array of size SxSxKmax if TypeSojournTime is equal to "fij";
- Matrix of size SxKmax if TypeSojournTime is equal to "fi" or "fj";
- Vector of length Kmax if the TypeSojournTime is equal to "f".
Kmax is the maximum length of the sojourn times.
}
\item{TypeSojournTime}{
Character: "fij", "fi", "fj", "f" (for more explanations, see Details)
}
}
\details{
In this package we can choose differents types of sojourn time. Four options are available for the sojourn times:
\itemize{
\item depending on the present state and on the next state ("fij");
\item depending only on the present state ("fi");
\item depending only on the next state ("fj");
\item depending neither on the current, nor on the next state ("f").
}
}
\value{
\item{L}{Value of loglikelihood for each sequence}
\item{Kmax}{The maximal observed sojourn time}
}
\author{
Vlad Stefan Barbu, barbu@univ-rouen.fr \cr Caroline Berard, caroline.berard@univ-rouen.fr \cr Dominique Cellier, dominique.cellier@laposte.net \cr Mathilde Sautreuil, mathilde.sautreuil@etu.univ-rouen.fr \cr Nicolas Vergne, nicolas.vergne@univ-rouen.fr
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\link{simulSM},
\link{estimMk},
\link{simulMk},
\link{estimSM}
}
\examples{
alphabet = c("a","c","g","t")
S = length(alphabet)
# creation of the transition matrix
Pij = matrix(c(0,0.2,0.3,0.5,0.4,0,0.2,0.4,0.1,0.2,0,0.7,0.8,0.1,0.1,0),
nrow = S, ncol = S, byrow = TRUE)
Pij
# [,1] [,2] [,3] [,4]
#[1,] 0.0 0.2 0.3 0.5
#[2,] 0.4 0.0 0.2 0.4
#[3,] 0.1 0.2 0.0 0.7
#[4,] 0.8 0.1 0.1 0.0
################################
## Parametric estimation of a trajectory (of length equal to 5000),
## where the sojourn times depend neither on the present state nor on the next state.
################################
## Simulation of a sequence of length 5000
seq5000 = simulSM(E = alphabet, NbSeq = 1, lengthSeq = 5000, TypeSojournTime = "f",
init = c(1/4,1/4,1/4,1/4), Ptrans = Pij, distr = "pois", param = 2)
#################################
## Computation of the loglikelihood
#################################
LoglikelihoodSM(seq = seq5000, E = alphabet, mu = rep(1/4,4), Ptrans = Pij,
distr = "pois", param = 2, TypeSojournTime = "f")
#$L
#$L[[1]]
#[1] -1475.348
#
#
#$Kmax
#[1] 10
#------------------------------#
################################
## Non-parametric simulation of several trajectories (3 trajectories of length 1000,
## 10 000 and 2000 respectively),
## where the sojourn times depend on the present state and on the next state.
################################
## creation of a matrix corresponding to the conditional sojourn time distributions
lengthSeq3 = c(1000, 10000, 2000)
Kmax = 4
mat1 = matrix(c(0,0.5,0.4,0.6,0.3,0,0.5,0.4,0.7,0.2,0,0.3,0.4,0.6,0.2,0),
nrow = S, ncol = S, byrow = TRUE)
mat2 = matrix(c(0,0.2,0.3,0.1,0.2,0,0.2,0.3,0.1,0.4,0,0.3,0.2,0.1,0.3,0),
nrow = S, ncol = S, byrow = TRUE)
mat3 = matrix(c(0,0.1,0.3,0.1,0.3,0,0.1,0.2,0.1,0.2,0,0.3,0.3,0.3,0.4,0),
nrow = S, ncol = S, byrow = TRUE)
mat4 = matrix(c(0,0.2,0,0.2,0.2,0,0.2,0.1,0.1,0.2,0,0.1,0.1,0,0.1,0),
nrow = S, ncol = S, byrow = TRUE)
f <- array(c(mat1,mat2,mat3,mat4), c(S,S,Kmax))
### Simulation of 3 sequences
seqNP3 = simulSM(E = alphabet, NbSeq = 3, lengthSeq = lengthSeq3,
TypeSojournTime = "fij", init = rep(1/4,4), Ptrans = Pij, laws = f,
File.out = NULL)
#################################
## Computation of the loglikelihood
#################################
LoglikelihoodSM(seq = seqNP3, E = alphabet, mu = rep(1/4,4), Ptrans = Pij, laws = f,
TypeSojournTime = "fij")
#$L
#$L[[1]]
#[1] -429.35
#
#$L[[2]]
#[1] -4214.521
#
#$L[[3]]
#[1] -818.6451
#
#
#$Kmax
#[1] 4
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Semi-Markov models}
\keyword{Loglikelihood}% __ONLY ONE__ keyword per line
|
2e8c0957aaad0d1dcbf3d11c62ca3f8c82751612
|
1e79975906acd1d42637be95cedd0aa8219877f7
|
/R/plot_latencias.R
|
341b2a0df337fc98a83d480b8b494dbee01ef121
|
[] |
no_license
|
dekassegui/db-megasena
|
ee8d1ffd89bcb96402519395e2d669471b372e05
|
ba6ec72a3bf8b3a8168a6d3f0c608aefc652c796
|
refs/heads/master
| 2022-02-05T00:01:28.722488
| 2022-02-03T01:46:04
| 2022-02-03T01:46:04
| 32,427,202
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,907
|
r
|
plot_latencias.R
|
#!/usr/bin/Rscript --no-init-file
library(RSQLite)
con <- dbConnect(SQLite(), dbname='megasena.sqlite')
concurso <- dbGetQuery(con, 'SELECT MAX(concurso) FROM concursos')[1,1]
latencias <- dbGetQuery(con, 'SELECT latencia FROM info_dezenas')
# "prepared statement" para requisitar as máximas latências de cada número
rs <- dbSendQuery(con, "SELECT MAX(latencia) AS maxLatencia FROM (
WITH RECURSIVE this (s) AS (
SELECT GROUP_CONCAT(NOT(dezenas >> ($NUMERO - 1) & 1), '') || '0'
FROM dezenas_juntadas
), core (i) AS (
SELECT INSTR(s, '1') FROM this
UNION ALL
SELECT i + INSTR(SUBSTR(s, i), '01') AS k FROM this, core WHERE k > i
) SELECT INSTR(SUBSTR(s, i), '0')-1 AS latencia FROM this, core
)")
# loop das requisições das máximas latências históricas de cada número
for (n in 1:60) {
dbBind(rs, list('NUMERO'=n))
dat <- dbFetch(rs)
latencias[n, "maxLatencia"] <- dat$maxLatencia
}
dbClearResult(rs)
dbDisconnect(con)
latencias$dif <- latencias$maxLatencia - latencias$latencia
# dispositivo de renderização: arquivo PNG
png(filename='img/latencias.png', width=1200, height=600, pointsize=12, family="Quicksand")
par(mar=c(2.25, 3.5, 3, 1))
major=(max(latencias$maxLatencia) %/% 10 + 1) * 10
bar <- barplot(
t(latencias[, c('latencia', 'dif')]),
main=list('Latências dos Números', cex=2.5, font=1, col='black'),
border="gray80", space=.25, col=c('orange1', 'gold'),
xaxt='n', yaxt='n', # evita renderização default dos eixos
ylim=c(0, major)
)
axis(
side=1, at=bar, labels=c(sprintf('%02d', 1:60)),
mgp=c(0, .75, 0), col="transparent",
cex.axis=1.2775, font.axis=2, col.axis="orangered4"
)
# renderiza o eixo Y com visual amigável
y <- seq(0, major, 10)
axis(
side=2, at=y, las=2, col="gray10",
cex.axis=1.25, font.axis=2, col.axis="orangered3"
)
# adiciona "tick marks" extras no eixo Y
rug(head(y,-1)+5, side=2, ticksize=-.01, col="grey10", lwd=1)
# renderiza linhas de referência ordinárias
abline(h=c(y[y != 10], y+5), col="gray84", lty="dotted")
# renderiza texto e linha da esperança das latências = 60 / 6 = 10
abline(h=10, col="dodgerblue", lty="dotted")
text(par("usr")[2], 10, "esperança", adj=c(1, -0.5), cex=.8, font=2, col="dodgerblue")
# adiciona "box & whiskers" antes da primeira coluna
bp <- boxplot(
latencias$latencia, outline=T, frame.plot=F, axes=F, add=T, at=-1.25,
border="darkred", col=c("#ffddbb"), yaxt='n', width=2
)
rect(
0, bp$stats[2], bar[60]+bar[1], bp$stats[4], col="#ffffffac",
border="transparent", density=18
)
#abline(h=bp$stats, col="hotpink", lty="dotted")
legend(
x="topright", inset=0, box.col="#cccccc", box.lwd=1, bg="white",
border="#b0b0b0", fill=c("orange1", "gold"), x.intersp=.5,
legend=c("atual", "recorde"), cex=1.125, text.col="black"
)
mtext(
paste("Mega-Sena", concurso), side=4, adj=.5, line=-.75,
cex=2.75, font=1, col='orangered'
)
dev.off()
|
67067f2c01c178a82d45994f39508c4c1de41015
|
6b4fe2baa84e74af637f319ea5d887cb2fd6f9a2
|
/kevin/rimod-analysis/methylation/methylation_promoter_integration.R
|
6d02d8a101464e145bf89290f30f3f053b24824d
|
[] |
no_license
|
dznetubingen/analysis_scripts
|
1e27ca43a89e7ad6f8c222507549f72b1c4efc20
|
4fcac8a3851414c390e88b4ef4ac461887e47096
|
refs/heads/master
| 2021-06-25T10:47:40.562438
| 2021-01-04T16:02:34
| 2021-01-04T16:02:34
| 187,789,014
| 1
| 0
| null | 2020-09-03T11:37:25
| 2019-05-21T07:55:17
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,508
|
r
|
methylation_promoter_integration.R
|
#############################
# Integration of methylation and promoter shifting results
#############################
setwd("~/rimod/integrative_analysis/methylation_promoter_analysis/")
# load promshift
ps <- read.table("~/rimod/CAGE/cage_analysis/promotor_shifting/frontal/grn_shifting_promotors.txt", sep="\t", header=T, stringsAsFactors = F)
ps <- ps[ps$fdr.KS <= 0.05,]
# load CpGs
met <- read.table("~/rimod/Methylation/frontal_methylation_0818/DMPs_grn.ndc_quant.txt", sep="\t", header=T, stringsAsFactors = F)
met <- met[met$adj.P.Val <= 0.05,]
threshold <- 500
# For each promoeter that is shifting, check for DMPs in the vicinity
df <- data.frame("PromIdx" = 1, "CpG" = "asdf")
i <- 1
for (i in 1:nrow(ps)) {
print(i)
# extract promoter information
prom <- ps[i,]
chr <- prom$chr
start <- prom$start
end <- prom$end
strand <- prom$strand
# subset relevant CpGs
tmp <- met[met$chr == chr,]
tmp <- tmp[tmp$strand == strand,]
# Iterate over all CpGs
for (j in 1:nrow(tmp)) {
cpg <- tmp[j,]
if (abs(start - cpg$pos) <= threshold){
print("case")
df <- rbind(df, data.frame("PromIdx" = i, "CpG" = cpg$Name))
}
else if (abs(end - cpg$pos) <= threshold){
df <- rbind(df, data.frame("PromIdx" = i, "CpG" = cpg$Name))
print("case")
}
}
}
df <- df[-1,]
idxs <- as.numeric(df$PromIdx)
ps[idxs,]
idx = df$PromIdx[1]
prom <- ps[idx,]
cpgs <- as.character(df[df$PromIdx == idx,]$CpG)
cpgs <- met[met$Name %in% cpgs,]
cpgs
|
37953bed85ff1fc86f74a679728ac68ba45cdce6
|
530e4a6c55866b9a9ec608ca63d3b4a5c8e51bde
|
/build-script.R
|
bc16746325cc120a79d476567990d124f6001a8b
|
[
"CC0-1.0"
] |
permissive
|
christophsax/statistical-software-review-book
|
0baff7bc6d866fe29e0ab13ffe6438a94d5448d3
|
861f5df10b697dbdb691b0a773e3ef9125a64808
|
refs/heads/main
| 2023-05-24T00:01:48.260862
| 2021-06-09T14:53:22
| 2021-06-09T14:53:22
| 378,166,477
| 0
| 0
| null | 2021-06-18T13:55:33
| 2021-06-18T13:55:32
| null |
UTF-8
|
R
| false
| false
| 336
|
r
|
build-script.R
|
#!/usr/bin/env Rscript
source("scripts/getZoteroNotes.R")
Sys.setenv("CHROMOTE_CHROME" = "/usr/bin/google-chrome-stable")
# bookdown::render_book("index.Rmd")
# bookdown::render_book("index.Rmd", output_format = c ("bookdown::gitbook", "bookdown::pdf_book"))
bookdown::render_book("index.Rmd", output_format = c("bookdown::bs4_book"))
|
5535a89712cce9319ec4d0391e9eca62758f0865
|
b1ff0a836e26d49dcd4af717017f5ae17e25502b
|
/rstudio script.R
|
917fd6d2ee1dd8a1d968b58c07eca74093705404
|
[] |
no_license
|
daniellim051000/HourlyWeatherData
|
f020a37745291087ece12be34efcd79e60509ecb
|
a49a65f8afebf83f2e67317ddefb5eb582bc2368
|
refs/heads/master
| 2023-01-19T17:27:21.348143
| 2020-11-21T09:11:30
| 2020-11-21T09:11:30
| 313,641,511
| 0
| 0
| null | 2020-11-21T09:11:31
| 2020-11-17T14:16:47
|
R
|
UTF-8
|
R
| false
| false
| 6,855
|
r
|
rstudio script.R
|
#PDFA Assignment
#Lim Zheng Yu
#TP051131
#Import data
filepath = '/home/daniellim0510/Documents/HourlyWeatherData/4.Hourlyweatherdata.csv'
data = read.csv(filepath)
#Attach Library
library(ggplot2)
library(dplyr)
#summary of dataset
summary(data)
#Pre Processing
#Drop Year Column
data = select (data,-c(year))
#change time_hour column data type to time
data$time_hour = strptime(data$time_hour,'%d/%m/%Y %H:%M')
#analysis example 1
#analysis in temperature
#In this example, an analysis between X and Y is given to analysis the Histogram of Temperature
#declaration
month = facet_wrap(~month)
#Visualization and exploration
output = ggplot(data, mapping = aes(x = temp)) + geom_histogram() + labs(title = 'Histogram of Temperature',x = 'Temperature')
#output
output + month
#analysis example 2
#In this example, an analysis between X and Y is given to analyze the temperature in each month
#visualization and exploration
output = ggplot(data, mapping = aes(x = month, y = temp, color = origin)) + geom_boxplot() + labs(title = 'Temperature in each Month',x = 'Month', y = 'Temperature')
#output
output
#analysis example 3
#In this example, an analysis between X and Y is given to analyze Dew Points against temperature
#declaration
line = stat_smooth(method = "lm")
#visualization and exploration
output = ggplot(data = data, mapping = aes(x = temp, y = dewp, color = origin)) + geom_point(alpha = 0.15)+ line + labs(title = 'Dew Point against Temperature', x = 'Temperature ', y = 'Dew Point')
#output
output
#analysis example 4
#analysis Dew point against humid
#In this example, an analysis between X and Y is given to
#declaration
line = stat_smooth(method = "lm")
#visualization and exploration
output = ggplot(data = data, mapping = aes(x = dewp, y = humid, color = origin)) + geom_point(alpha = 0.15)+ line + labs(title = 'Scatter Plot of Dew Point against Humid',x = 'Dew Point ',
y = 'Humidty %')
#output
output
#analysis example 5
#analysis precipitate data
#In this example, an analysis between X and Y is given to
#visualization and exploration
output = ggplot(data = data, mapping = aes(x = precip)) + geom_histogram() + labs(title = 'Histogram of Precipitate', x = 'Precipitate (Volume)', y = 'Amount')
#output
output
#analysis example 6
#In this example, an analysis between X and Y is given to analysis precipitate per month
#visualization and exploration
output = (
data %>%
filter(data$precip>0) %>% #data manipulation to filter the data
ggplot(mapping = aes(x = factor(month), y = precip)) + geom_boxplot() + labs(title = 'Precipitate per Month',x = 'Month', y = 'Precipitate')
)
output
#analysis example 7
#In this example, an analysis between X and Y is given to analyze in visibility in miles
#visualization and exploration
output = ggplot(data = data, mapping = aes(x =visib)) + geom_histogram() + labs(title = 'Histogram of Visibility', x = 'Visibility (Miles)',y = 'Amount')
#output
output
#analysis example 8
#analysis humid and visibility in miles
#In this example, an analysis between X and Y is given to
#visualization and exploration
output = ggplot(data = data, mapping = aes(x = factor(visib), y = humid)) + geom_boxplot() + labs(title = 'Boxplot of Humid against Visibility', x = 'Visibility', y = 'Humid')
#output
output
#analysis example 9
#analysis precipitate and visibility
#In this example, an analysis between X and Y is given to
output = (
data %>%
filter(data$precip>0) %>% #data manipulation
ggplot(mapping = aes(x = factor(visib), y = precip)) + geom_boxplot() + labs(title = 'Boxplot of Precipitate against visibility',x = 'visibility (Miles)', y = 'Precipitate (Inch)')
)
#output
output
#analysis example 10
#In this example, an analysis between X and Y is given to
#analysis precipitate against humid
line = stat_smooth(method = "lm")
output = ggplot(data = data, mapping = aes(x = humid, y = precip, color = origin)) + geom_point(alpha = 0.15) + line + labs(title = 'Scatter Plot of Precipitate against Humid',
x = 'Humid ', y = 'Precipitate (Inch)')
#output
output
#analysis example 11
#In this example, an analysis between X and Y is given to
#analysis pressure per month
output = ggplot(data = data, mapping = aes(x = factor(month), y = pressure, na.rm = TRUE)) + geom_boxplot() + labs(title = 'BoxPlot of Pressure against Month',x = 'Month '
, y = 'Pressure (Milibars)')
#output
output
#analysis example 12
#In this example, an analysis between X and Y is given to
#analysis pressure against temperature
line = stat_smooth(method = "lm")
output = ggplot(data = data, mapping = aes(x = temp, y = pressure, color = origin, na.rm = TRUE)) + geom_point(alpha = 0.15)+ line + labs(title = 'Scatter Plot of Pressure against Temperature'
,x = 'Temperature ', y = 'Pressure (Milibars)')
#output
output
#analysis example 13
#In this example, an analysis between X and Y is given to
#analysis wind speed data
airport = facet_wrap(~origin)
#visualization and exploration
output = ggplot(data = data, mapping = aes(x = wind_speed, na.rm = TRUE)) + geom_histogram() + labs(title = 'Histogram of Wind speed',x = 'Wind speed (MPH)') + airport
#output
output
#analysis example 14
#In this example, an analysis between X and Y is given to compare Wind Gust against Wind Speed
line = stat_smooth(method = "lm")
#visualization and exploration
output = ggplot(data = data, mapping = aes(x = wind_speed, y = wind_gust, na.rm = TRUE, color = origin)) + geom_point(alpha = 0.15)+ line + labs(title = 'Scatter Plot of Wind Gust against Wind Speed'
,x = 'Wind Speed (MPH)', y = 'Wind Gust (MPH)')
#output the result
output
#Extra Analysis Example 1
#for this example, the scatter plot with histogram is plot to get the relationship between dew point and temperature
library(ggExtra)
g = ggplot(data = data, mapping = aes(temp, dewp, color = origin)) + geom_count() + geom_smooth(method="lm", se=F)
ggMarginal(g, type = "histogram", fill="transparent")
#Extra Analysis Example 2
#In this extra features, this will visualize the density of pressure for each month
ggplot(data = data, mapping = aes(x = factor(month), y = pressure, na.rm = TRUE)) + geom_violin() + labs(title="The Density of Pressure for each Month'", x="Month", y="Pressure")
|
d5638edf643d9d484b04ed7a6e75775a1b2afb14
|
d04ce3da9081f0b6fb09d2379cacacd36ef23629
|
/R/integral.R
|
3c22400b97836003815695d28cce8b8768a3b247
|
[
"MIT"
] |
permissive
|
gbasulto/mvdeconvolution
|
89ce5851548e335e8b0786672dcdce6cfb3358bd
|
6dafeb2f735b2bd8ee5be7bba550bb2edd48cae8
|
refs/heads/master
| 2016-09-10T00:14:23.666683
| 2016-03-04T03:29:38
| 2016-03-04T03:29:38
| 19,830,893
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,454
|
r
|
integral.R
|
#' Multivariate Fourier integral
#'
#' This function computes the a Fourier integral of a function with
#' support in a hyper-rectangle. Right now, this function only
#' computes univariate and bivariate continuous Fourier transform
#' based on the paper 'Fast computation of multidimensional Fourier
#' integrals' by Inverarity (2002). It is the formula (4.1).
#' @param f function from R^2 or R to which the FT will be applied.
#' @param n Dimension of the function f (1 or 2).
#' @param m Resolution of the integral
#' @param a nx1 vector. Lower integration limit.
#' @param b nx1 vector. Upper integration limit.
#' @param c nx1 vector. Lower limit of w.
#' @param d nx1 vector. Upper limit of w.
#' @param r Power in (4.1).
#' @param s Scale constant in (4.1).
#' @examples
#' ## library(ggplot2)
#' ##
#' ## Computing characteristic function of
#' ## univariate normal on -1, 10.
#' ##
#' cf <- FIntegral(dnorm, n = 1, m = 2^12, -20, b = 20, c = -1, d = 10,
#' r = 1, s = 1)
#' values <- data.frame(t = cf$w,
#' cf = c(Re(cf$ft),
#' sapply(cf$w, function(t) exp(-t^2/2))),
#' type = rep(c("approx", "real"), length(cf$w))
#' )
#' ggplot2:::qplot(t, cf, data = values, col = type, geom = "line")
#'
#' ## Real anf imag. parts of the characteristic function of an exponential
#' ## distribution, approximated with 128 points.
#' chf <- function(t) 1/(1 - 1i*t)
#' f <- function(x) dexp(x, 1)
#' cf <- FIntegral(f, n = 1, m = 2^7, a = 0, b = 5, c = -3, d = 10, r = 1, s = 1)
#' ##
#' realEst <- Re(cf$ft)
#' imagEst <- Im(cf$ft)
#' real <- Re(chf(cf$w))
#' imag <- Im(chf(cf$w))
#' m <- length(cf$w)
#' values <- data.frame(w = cf$w,
#' val = c(realEst, imagEst, real, imag),
#' Part = rep(rep(c("Real", "Imag"), 2), each = m),
#' Type = rep(c("FT", "Direct"), each = 2*m))
#'
#' ggplot2:::qplot(w, val, data = values, geom = "line",
#' facets = . ~ Part, colour = Type)
#'
#' ## Characteristic function of a bivariate normal distribution
#' chf <- function(t1, t2) exp(-(t1^2 + t2^2)/2)
#' f <- function(x, y) dnorm(x)*dnorm(y)
#'
#' cf <- FIntegral(f, n = 2, m = 2^8, a = c(-6, -6), b = c(6, 6),
#' c = c(-3, -3), d = c(3, 3), r = 1, s = 1)
#'
#' persp(Re(cf$ft), col = "lightblue", phi = 15, theta = 30,
#' shade = .3, border = NA)
#' @export
FIntegral <- function(f, n, m, a, b, c, d, r, s)
{
## Description:
## This function computes univariate and bivariate continuous
## Fourier tranform based on the paper by Inverarity (2002):
## "Fast computation of multidimensional Fourier integrals".
## It is the formula (4.1) on the paper.
##
## Arguments:
## f: Function from R^2 or R to C to which we will apply
## the ft.
## n: Dimension of the function above.
## m: Resolution of the integral.
## a: nx1 vector. Lower integration limit.
## b: nx1 vector. Upper integration limit.
## d: nx1 vector. Lower limit of w.
## l: nx1 vector. Upper limit of w.
## r: Power in (4.1).
## s: Scale constant in (4.1).
##
## Output:
## w: vector or matrix with the values for which the cft was computed.
## ft: Continuous Fourier transform values at w.
##
# ## This is an adjustment for the upper limit:
# d <- c + m*(d - c)/(m - 1)
## r = 1 is equivalent to the following:
if(s != 1)
{
out <- FIntegral(f, n, m, a, b, s*c,
s*d, r, 1)
w <- out$w/s
return(list(w = w,
ft = abs(s)^(n/2)*out$ft))
}
if(n == 1)
{
## The next two lines are there because the code below
## computes the integral with negative sign. By adding this
## two lines, we compute the univ. formula 4.1
c <- -c
d <- -d
bet <- (b - a)/m
gam <- (d - c)/m
del <- bet*gam/2
J1 <- 0:(m - 1)
J2 <- m:(2*m - 1)
t <- a + bet*J1
w <- c + gam*J1
y <- c(f(t)*complex(argument = -J1*(bet*c + del*J1)),
rep(0, m))
z <- complex(argument = del*(c(J1^2, (J2 - 2*m)^2)))
val <- bet*complex(argument = -(a*w + del*J1^2))*
fft(fft(y)*fft(z), inverse = T)/
(2*pi)^((1 - r)/2)/(2*m)
## ... The same with this line.
w <- -w
return(list(w = w,
ft = val[J1 + 1]))
}
## nx1 vectors.
bet <- (b - a)/m
gam <- (d - c)/m
del <- bet*gam/2
a_hat <- a + bet/2
## Aux. mx1 vectors
J1 <- 0:(m - 1)
J2 <- m:(2*m - 1)
## nxm matrices
t <- sweep(bet %o% J1, 1, a_hat, "+")
w <- sweep(gam %o% J1, 1, c, "+")
## nx2m matrix
auxArg <- cbind(- del %o% (J1^2), - del %o% (J2 - 2*m)^2)
z <- exp(1i*auxArg)
## cat(dim(z))
## cat("\n")
## Starting here, the program will work only for 2-dim. ft.
## m x m matrices
# Exponential in 4.4, mxm
auxArg <- outer(J1, J1,
function(j1, j2) j1*(bet[1]*c[1] + del[1]*j1)
+ j2*(bet[2]*c[2] + del[2]*j2))
aux1 <- exp(1i*auxArg)
# f(t) in 4.4, mxm
aux2 <- apply(matrix(t[2, ]), 1, function(y)
apply(matrix(t[1, ]), 1, function(x) f(x, y)))
## 2m x 2m matrix
# y in 4.4, first filled out
# with zeros.
y <- matrix(0, 2*m, 2*m)
y[1:m, 1:m] <- aux1*aux2
## Univariate dft, mx1 vectors.
dft1 <- drop(fft(z[1, ]))
dft2 <- drop(fft(z[2, ]))
## Values to apply inverse dft in 4.8: 2m x 2m matrix.
dft <- fft(y) * (dft1 %o% dft2)
## mxm matrix of exponentials in 4.8
aux1 <- drop(a_hat[1]*w[1, ] + del[1]*J1^2)
aux2 <- drop(a_hat[2]*w[2, ] + del[2]*J1^2)
#
expo <- complex(argument= (aux1 %o% aux2)) # mxm
expo <- exp(1i*outer(aux1, aux2, '+')) # mxm
fact <- prod(bet)*((2*pi)^(1 - r))^(-n/2) # real
idft <- (fft(dft, inverse = T)/(2*m)^2)[1:m, 1:m]
## FT
val <- expo*fact*idft
return(list(w = w,
ft = val))
}
|
ecf0971060d45167337f1774d8fc9418d4d9a8bb
|
591771c6a3972cab8c680696771fd4b4aa0c3f20
|
/R/createDirectoryStructure.R
|
e30e08b2a7a65bf3028b4bf2b4cd9757f8ccd371
|
[] |
no_license
|
Sumpfohreule/S4Level2
|
a36dfc014dde47763009dcc4420a198ce11a9a5d
|
9034cddbd04efed8cea8c5b90cb2e4fbf16209e7
|
refs/heads/main
| 2023-08-19T08:58:05.616624
| 2021-09-29T14:47:03
| 2021-09-29T14:47:03
| 304,371,990
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 246
|
r
|
createDirectoryStructure.R
|
########################################################################################################################
setGeneric("createDirectoryStructure", def = function(.Object) {
standardGeneric("createDirectoryStructure")
}
)
|
8c2307353831745c08d266798f2bee6e2aa4990b
|
90f6e07975c0b8963d8de4c34eb8f43e061c270f
|
/R/InfoCritCompare.R
|
815393a7b5c5e4f7121d30c10d4601ed14132571
|
[] |
no_license
|
jayverhoef/SSN
|
1411244d238170b90a23ac9e1f710e32d2bc1455
|
047c5fec5075422512e17b7360b797b66a113b9e
|
refs/heads/master
| 2023-05-01T17:42:59.986179
| 2023-04-21T21:04:06
| 2023-04-21T21:04:06
| 86,113,217
| 8
| 2
| null | 2020-06-03T16:13:52
| 2017-03-24T21:48:15
|
R
|
UTF-8
|
R
| false
| false
| 966
|
r
|
InfoCritCompare.R
|
InfoCritCompare <-
function(model.list)
{
IC <- NULL
for(i in 1:length(model.list)) {
if(class(model.list[[i]])[[1]] != "glmssn") {
stop("All models must be of type glmssn")
}
model.name <- NULL
ind <- !duplicated(attributes(model.list[[i]]$estimates$theta)$terms)
terms<- attributes(model.list[[i]]$estimates$theta)$terms[ind]
model.name <- paste(terms,collapse=" + ")
if(model.list[[i]]$args$family != "gaussian") {
model.AIC <- NA
model.neg2LogL <- NA
}
if(model.list[[i]]$args$family =="gaussian"){
model.AIC <- AIC(model.list[[i]])
model.neg2LogL <- model.list[[i]]$estimates$m2LL
}
IC <- rbind(IC, data.frame(
formula = deparse(model.list[[i]]$args$formula, width.cutoff = 500),
EstMethod = model.list[[i]]$args$EstMeth,
Variance_Components = model.name,
neg2LogL = model.neg2LogL,
AIC = model.AIC,
CrossValidationStatsSSN(model.list[[i]]))
)
}
IC
}
|
44b9b5bba807608a99b1224009819a2ca62e7e85
|
a210d772d34187c6dc7ebc0fdf854c25330c6f10
|
/Welcome to R Programming/rankall.R
|
82b62f7b1a0a7146b66707038edeb47276c1af7b
|
[
"Apache-2.0"
] |
permissive
|
dynamicdeploy/datasciencecoursera
|
79a36dff5c0fe60afea85914ca6e3cc05350e161
|
99229fba6a835be4003b3ebe7c2ceb89444293a0
|
refs/heads/master
| 2016-08-11T09:13:54.880398
| 2016-03-26T20:10:43
| 2016-03-26T20:10:43
| 52,928,312
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,715
|
r
|
rankall.R
|
## This function reads the outcome-of-care-measures.csv file and returns a 2-column data frame
# containing the hospital in each state that has the ranking specified in num. For example the function call
# rankall("heart attack", "best") would return a data frame containing the names of the hospitals that
# are the best in their respective states for 30-day heart attack death rates.
rankall <- function(outcome, num = "best") {
## Read outcome data
outcomev <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
suppressWarnings( outcomev[, 11] <- as.numeric(outcomev[, 11]))
suppressWarnings(outcomev[, 17] <- as.numeric(outcomev[, 17]))
suppressWarnings(outcomev[, 23] <- as.numeric(outcomev[, 23]))
## Check that state and outcome are valid
outcomestrings<-c("heart attack", "heart failure", "pneumonia")
states<-levels(factor(outcomev$State))
# states<-append(states, NA)
if(!(outcome %in% outcomestrings))
{
stop("invalid outcome")
return()
}
#find number of hospitals in the state and check if rank > number
outcomearray<-c('Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack','Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure','Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia')
if(outcome == "heart attack")
{
outputdf<- rankbyoutcome(outcomev, outcomearray[1], states=states, num=num)
}else if(outcome == "heart failure")
{
outputdf<- rankbyoutcome(outcomev, outcomearray[2], states=states, num=num)
}else if(outcome == "pneumonia")
{
outputdf<- rankbyoutcome(outcomev, outcomearray[3], states=states, num=num)
}
y<-outputdf[, 1:2]
return(y)
}
rankbyoutcome<-function(outcomev, outcomestring, states, num)
{
returncolumns<-c("hospital", "state", "Rank")
outputdf<-data.frame(hospital=as.character(""), state=as.character(""), Rank=as.numeric(0), stringsAsFactors = FALSE)
for(s in states)
{
if(is.numeric(num))
{
totalhospitalsinstate<-length(subset(outcomev$Hospital.Name, outcomev$State==s))
if(totalhospitalsinstate < num)
{
#because rbind() was messing up the column names, I had to use this approach
##http://stackoverflow.com/questions/5231540/r-losing-column-names-when-adding-rows-to-an-empty-data-frame
outputdf[nrow(outputdf)+1,]<-c(NA, s, NA)
next
}
}
#for each state rank the hospitals
outcomev <- subset(outcomev, !is.na(outcomev[outcomestring]))
index<-with(outcomev, order(outcomev[outcomestring], outcomev$Hospital.Name, na.last = TRUE))
lmn<-outcomev[index,]
#filter by state and where outcome is not NA
vals<-subset(lmn, lmn$State==s)
#add a rank column
vals$Rank <- NA
#Add ranks to the Rank column
vals$Rank <- 1:nrow(vals)
#Change the column name of the outcome to Rank
names(vals)[names(vals) == 'Hospital.Name'] <- 'hospital'
names(vals)[names(vals) == 'State'] <- 'state'
names(vals)[names(vals) == outcomestring] <- 'rate'
#return the vals
vals<-vals[returncolumns]
#get the hospitals with the specified rank
#add it to the output dataframe
if(is.numeric(num))
{
tmp<-subset(vals, vals$Rank == num)
outputdf<-rbind(outputdf, tmp)
}else if(num == "best")
{
tmp<- head(vals, 1)
#outputdf<-append(outputdf, tmp[finalcolumns])
outputdf<-rbind(outputdf, tmp)
}else if(num == "worst")
{
tmp<- tail(vals, 1)
outputdf<-rbind(outputdf, tmp)
}
}
#because we injected a fake row while creating the dataframe
return (outputdf<-outputdf[-1,])
}
##Some interesting commands learned
#tmpp<-by(outcome, outcome$State, function(x) x[with(x, order(x$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack, x$Hospital.Name, na.last = TRUE)),])
#tmpp<-by(outcome, outcome$State, function(x) subset(x[with(x, order(x$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack, x$Hospital.Name, na.last = TRUE)),], !is.na(vals$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack)))
#tmpp$AK$Rank<-NA
#tmpp$AK$Rank<-1:nrow(tmpp$AK)
#xyz<-by(outcome, outcome$State, function(x) x[which.min(x$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack), ] )
#lapply(xyz, function(x){ x[c("Hospital.Name", "State")]})
|
699bddf0142e3c5112a067c9e94f61ba52349659
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/clere/inst/doc/SimulatedDataExample.R
|
99ce0efa858d97b803ee5c541986fe78d2b39798
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,760
|
r
|
SimulatedDataExample.R
|
source("Methods.R")
## Set seed
Seed <- 1234
set.seed(Seed)
## Simulation parameters
sigma <- 1
n <- 25
p <- 50
g <- 3
## Beta
intercept <- 0
g <- 3
probs <- c(0.36+0.28,0.20,0.12+0.04)
Eff <- p * probs
a <- 4
B <- a**(0:(g-1))-1
Beta <- rep(B,Eff)
## Generate dataset
nsim <- 200
N <- ifelse(n * nsim<5000,5000,n*nsim)
Eps <- rnorm(N,mean=0,sd=sigma)
xpop <- matrix(rnorm(N*p),nrow=N,ncol=p)
ypop <- as.numeric(intercept+xpop%*%Beta+Eps)
numExpSimData <- NULL
for(isim in 1:nsim){
cat(paste("\tSimulation #",isim,".\n",sep=""))
lsim <- (1+(isim-1)*n):(isim*n)
xt <- xpop[+lsim,]; yt <- ypop[+lsim]
xv <- xpop[-lsim,]; yv <- ypop[-lsim]
numExpSimData <- rbind(numExpSimData,compare(xt,yt,xv,yv,Seed))
}
## Ouptut
save(list="numExpSimData",file="numExpSimData.RData")
save.image("SimulatedDataExample.RData")
## Plot
meths <- c("CLERE0","CLERE","PACS","LASSO",
"AVG","Ridge","Elastic net",
"Spike and Slab")
o <- order(apply(numExpSimData[,1:9],2,median))
dfs <- round(apply(numExpSimData[,10:18][,o[1:8]],2,mean),1)
sdf <- round(apply(numExpSimData[,10:18][,o[1:8]],2,sd),1)
tts <- round(apply(numExpSimData[,19:27][,o[1:8]],2,mean),1)
stt <- round(apply(numExpSimData[,19:27][,o[1:8]],2,sd),2)
cols <- rainbow(9)
pdf("Simulations.pdf")
par(mar=c(5, 2, 4, 7)+0.1)
boxplot(numExpSimData[,1:9][,o[1:8]],horizontal=TRUE,log="x",
col=cols,axes=FALSE,pch=18,xlab="Mean Squared Prediction Error")
axis(1)
labs <- paste(meths,"\ndf: ",dfs," (",sdf,")",sep="")
axis(4,at=1:8,labels=labs,las=2)
cts <- paste(tts,"s (",stt,")",sep="")
legend("topleft",legend=cts,box.lty=0,lwd=2,lty=1,col=cols,
title="Computational time")
dev.off()
|
08482c36b5bbe0b6213884779efa5cb0caf0bdf2
|
cd8932e6ebfdfaf6a1a4f1773aa703780f710c27
|
/Chapter8.R
|
35a4d0e5693cc57caece50b3f37abb4bcfa3cae0
|
[] |
no_license
|
st107755/R-for-data-science
|
97c283d8d3e48a339a720c49d903f2b6ecb3fce3
|
0e6165dd06bd95b9ab5d211de71804ae65476147
|
refs/heads/master
| 2020-04-29T00:15:40.105799
| 2019-03-14T19:35:30
| 2019-03-14T19:35:30
| 175,687,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 783
|
r
|
Chapter8.R
|
library("readr")
library("ggplot2")
locale(date_format = "%B")
d1 <- "January 1, 2010"
d2 <- "2015-Mar-07"
d3 <- "06-JUN-2017"
parse_date(d1,"%B %d, %Y")
parse_date(d2,"%Y-%b-%D")
parse_date(d3,"%D-%b-%Y")
spread(table2,key = type,value = count)
table2 %>% mutate(cases_per_day = ifelse (type == "cases" ,count/365,NA)) %>%
mutate(pooulation_per_day = ifelse(type =="population",count/365,NA)) %>%
ggplot(aes(year,cases_per_day))+
geom_line(aes(group = country))+
geom_point(aes(color= country))
t2_cases <- filter(table2, type == "cases") %>%
rename(cases = count) %>%
arrange(country, year)
table2 %>% filter(type == "cases") %>% rename("cases" = "count") %>%
ggplot(aes(year,cases))+
geom_line(aes(group = country))+
geom_point(aes(color= country))
|
53485f0f3d50caa805a530fe46e451505b59450f
|
65c596764a3edd3576c04f6ec66fa5dd7ff9d642
|
/scripts/script1.R
|
d1f6d0bccd2a73bc4d42e01d54d8b823b1d36541
|
[] |
no_license
|
anhnguyendepocen/irrs03
|
3dc45d18e0a0ca3ee93f53d7aa8796194dbe859e
|
966627755fedf825a5efcc2e2ff4e74da797d52f
|
refs/heads/master
| 2023-03-21T01:39:22.843592
| 2021-03-18T23:13:06
| 2021-03-18T23:13:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 670
|
r
|
script1.R
|
# This line does .....
composites <- c(4, 6, 8, 9, 10, 12)
# preprocess --------------------------------------------------------------
composites_plus <- composites + 1
# visualization -----------------------------------------------------------
composites_minus <- composites - 1
# foobar ------
my_mean <- function(x) {mean(x, na.rm = T)}
data_df <- data.frame(names = c('alice', 'bob', 'charlie'),
ages = c(23, 26, 21),
height = c(150, 160, 180),
weight = c(75, 65, 86))
# Read in my data ---------------------------------------------------------
anorexia_df <- read_csv("tmp/anorexia.csv")
|
f4c996baca1d1b2fb2d2d90bfa4f6cd1de233afe
|
145c99c7c8fb80dbc66e82cb370dd818427c7e9d
|
/Analysis/R_project/ReadAei.R
|
9ff3625d4c78a4359961161d3e2741bfe3a21fbe
|
[] |
no_license
|
RJWorth/Lithopanspermia
|
7efa7456aac9c774a92f34360ffe911d154edbab
|
f74b9f588778ea5b9942f33f32ca4fa5a902e671
|
refs/heads/master
| 2016-09-05T20:57:40.634528
| 2014-03-05T19:31:39
| 2014-03-05T19:31:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 996
|
r
|
ReadAei.R
|
#read .aei for planets, rocks
print('Read .aei files')
subset=rocks[rocks$File==16,]
colors=factor(subset$Destination,labels=c('gray','yellow','cyan4','cyan',
'blue','red','purple','light gray'))
colors=matrix(colors)
nobj.m=742
obj.m=list()
#for (j in ObjInd) {
for (j in c(0:10,165,638)) {
# dir=file.path('/astro/grads/rjw274/Panspermia',
# 'Lithopanspermia/Analysis/bigmars8/AEI/')
dir=file.path('../bigmars8/AEI/')
obj.m[[j+1]]=read.table(paste(dir,'M',j,'.aei', sep=''), header=F,skip=4,
col.names=c('Time','a','e','i','mass','dens', 'x','y','z','vx','vy','vz')
)[,c(2:3,7:8)]
}
#pnames=c('Mercury ','Venus ','Earth ','Mars ','Jupiter ','Saturn ',
# 'Uranus ','Neptune ')
pnames=c('Mercury ','Venus ','Earth ','Mars ','Jupiter ','Saturn ',
'Uranus ','Neptune ')
planets.m=list()
for (j in 1:length(pnames)) planets.m[[j]]=read.table(
paste(dir,pnames[j],'.aei',sep=''), header=F,skip=4,
col.names=c('Time','a','e','i','mass','dens', 'x','y','z','vx','vy','vz')
)[,c(2:3,7:8)]
|
39d22dc14aae5e37382827942042f3c71a8fc1de
|
f0f67678e9029972c3dc1846f2a4f7c9968d2623
|
/model.R
|
07c5a321d3f37bbcdf2067359feefc8ed2670ac8
|
[] |
no_license
|
marchiocchia/statistical-analysis-on-Trust-in-EU
|
35e45a79a48528988817e66cdd485cb40ba8213c
|
14a71ad0c3c725df54a561d262d39a87c553a799
|
refs/heads/master
| 2022-11-09T08:43:55.595109
| 2020-06-22T08:32:36
| 2020-06-22T08:32:36
| 274,079,290
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,918
|
r
|
model.R
|
library(arm)
library(foreign)
library(gmodels)
##MODELLO LOGISTICO TOTALE (28 STATI MEMBRI)
dati$COUNTRY<-relevel(dati$COUNTRY,"(24) LT - Lithuania")
dati$CLASSI_ETA<-relevel(as.factor(dati$CLASSI_ETA),"(2) 25 - 39 years")
dati$COMMUNITY<-relevel(as.factor(dati$COMMUNITY),"(3) Large town")
dati$CLASSI_ISTRUZIONE<-relevel(as.factor(dati$CLASSI_ISTRUZIONE),"MASTER")
fit_2.1<- glm(TRUST_EUROPA~COUNTRY+COMMUNITY+CLASSI_ETA+TRUST_TV+
CLASSI_ISTRUZIONE+FIGLI+DIFFICOLTA_ECONOMICHE+
TRUST_GOVERNO+TRUST_GIORNALI+TRUST_SOCIAL+
POL_INDEX+SODD_DEMOCRAZIA+TRUST_GOVERNO*COUNTRY
,family=binomial(link="logit"), data=dati)
display(fit_2.1)
round(invlogit(fit_2.1$coefficients[1])*100)
##DIAGNOSTICA DEL MODELLO TOTALE
pred_2.1<- fit_2.1$fitted.values
dati_fit_2.1<-na.omit(dati[,c("TRUST_EUROPA","COMMUNITY","CLASSI_ETA",
"CLASSI_ISTRUZIONE","TRUST_TV",
"COUNTRY","DIFFICOLTA_ECONOMICHE",
"TRUST_GOVERNO","TRUST_GIORNALI","TRUST_SOCIAL",
"SODD_DEMOCRAZIA","FIGLI","POL_INDEX","WEX","TRUST_EUROPA1")])
y_2.1<- dati_fit_2.1$TRUST_EUROPA
binnedplot (pred_2.1, y_2.1-pred_2.1, nclass=145, xlab="Prob (avere fiducia) stimata", main=NA,
ylab="Residui medi", mgp=c(2,.5,0),pch=16, col.pts = 'gray11',cex.axis=0.9, cex.lab=0.9)
##tasso di errore
error.rate.null <- round(mean(round(abs(y_2.1-mean(pred_2.1))))*100)
tax.error <- round(mean((pred_2.1 > 0.5 & y_2.1==0) | (pred_2.1 < 0.5 & y_2.1==1))*100)
#ANALISI DESCRITTIVA E MODELLO SUL SOTTOCAMPIONE RELATIVO AL REGNO UNITO
inghilterra<-subset(dati, ISOCNTRY=="GB ")
round(prop.table(wtd.table(inghilterra$TRUST_EUROPA1,weights = inghilterra$WEX))*100,2)
##########################################################################################################'
######################## GRAFICO A BARRE (COMMUNITY) RISPETTO A TRUST EUROPA (REGNO UNITO) ###############'
##########################################################################################################'
freq.trust.eu_COMM_rural <- as.vector(round(prop.table(wtd.table(inghilterra$COMMUNITY_GB,
inghilterra$TRUST_EUROPA1,
weights = inghilterra$WEX),
margin = 2)*100,2))[c(1,4)]
freq.trust.eu_COMM_small <- as.vector(round(prop.table(wtd.table(inghilterra$COMMUNITY_GB,
inghilterra$TRUST_EUROPA1,
weights = inghilterra$WEX),
margin = 2)*100,2))[c(2,5)]
freq.trust.eu_COMM_big <- as.vector(round(prop.table(wtd.table(inghilterra$COMMUNITY_GB,
inghilterra$TRUST_EUROPA1,
weights = inghilterra$WEX),
margin = 2)*100,2))[c(3,6)]
df1j_COMM <- data.frame("Rurale" = freq.trust.eu_COMM_rural,
"Piccola città" = freq.trust.eu_COMM_small,
"Grande città" = freq.trust.eu_COMM_big,
"livelli" = levels(inghilterra$TRUST_EUROPA1))
df2j_COMM <- melt(df1j_COMM, id.vars='livelli')
df2j_COMM <- df2j_COMM[order(df2j_COMM$variable, df2j_COMM$value, df2j_COMM$livelli),]
plotCOMM<-ggplot(df2j_COMM, aes(x=livelli, y=value, fill=variable)) +
geom_bar(stat='identity', position= 'dodge',colour="black",width=0.8) +
labs(fill = "Tipologia di zona:") + xlab("Fiducia UE") + ylab("Percentuale") +
theme(axis.text.x = element_text(angle=0, vjust=0.6)) +
geom_text(aes(label=paste(value, "%"), y=value+2.5),
position = position_dodge(0.9), vjust=0.5, size=3.7, fontface='bold')+
My_Theme+
scale_fill_manual(values=c("#B3E2CD", "#FDCDAC", "#CBD5E8"))
##########################################################################################################'
################## GRAFICO A BARRE (SODD_DEMOCRAZIA) RISPETTO A TRUST EUROPA (REGNO UNITO) ###############'
##########################################################################################################'
freq.trust.eu_DEMOCRAZ_no <- as.vector(round(prop.table(wtd.table(inghilterra$SODD_DEMOCRAZIA,
inghilterra$TRUST_EUROPA1,
weights = inghilterra$WEX),
margin = 2)*100,2))[c(1,3)]
freq.trust.eu_DEMOCRAZ_si <- as.vector(round(prop.table(wtd.table(inghilterra$SODD_DEMOCRAZIA,
inghilterra$TRUST_EUROPA1,
weights = inghilterra$WEX),
margin = 2)*100,2))[c(2,4)]
df1j_DEMOCRAZ <- data.frame("SI" = freq.trust.eu_DEMOCRAZ_si,
"NO" = freq.trust.eu_DEMOCRAZ_no,
"livelli" = levels(inghilterra$TRUST_EUROPA1))
df2j_DEMOCRAZ <- melt(df1j_DEMOCRAZ, id.vars='livelli')
df2j_DEMOCRAZ <- df2j_DEMOCRAZ[order(df2j_DEMOCRAZ$variable, df2j_DEMOCRAZ$value, df2j_DEMOCRAZ$livelli),]
df2j_DEMOCRAZ$livelli <- factor(df2j_DEMOCRAZ$livelli,
levels = df2j_DEMOCRAZ$livelli[df2j_DEMOCRAZ$variable == "NO"])
plotDEMOCRAZ<-ggplot(df2j_DEMOCRAZ, aes(x=livelli, y=value, fill=variable)) +
geom_bar(stat='identity', position= 'dodge',colour="black",width=0.8) +
labs(fill = "Hai fiducia nella democrazia:") + xlab("Fiducia UE") + ylab("Percentuale") +
theme(axis.text.x = element_text(angle=0, vjust=0.6))+
geom_text(aes(label=paste(value, "%"), y=value+2.5),
position = position_dodge(0.9), vjust=0.5, size=3.7, fontface='bold')+
My_Theme+
ylim(0, 80)+
scale_fill_manual(values=c("#BDD7E7","#08519C"))
round(prop.table(table(inghilterra$FIGLI))*100,2)
round(prop.table(table(inghilterra$DIFFICOLTA_ECONOMICHE))*100,2)
##MODELLO LOGISTICO REGNO UNITO
inghilterra$CLASSI_ETA<-relevel(as.factor(inghilterra$CLASSI_ETA),"(2) 25 - 39 years")
inghilterra$CLASSI_ISTRUZIONE<-relevel(as.factor(inghilterra$CLASSI_ISTRUZIONE),"MASTER")
fitGB<- glm(TRUST_EUROPA~CLASSI_ETA+TRUST_TV+TRUST_GIORNALI+
TRUST_GOVERNO+TRUST_SOCIAL+POL_INDEX+ CLASSI_ISTRUZIONE
,family=binomial(link="logit"), data=inghilterra)
display(fitGB)
#DIAGNOSTICA INGHILTERRA
predGB<- fitGB$fitted.values
dati_fitGB<-na.omit(inghilterra [,c("TRUST_EUROPA","CLASSI_ETA","POL_INDEX","TRUST_GIORNALI",
"TRUST_TV","TRUST_GOVERNO","TRUST_SOCIAL","CLASSI_ISTRUZIONE")])
yGB<- dati_fitGB$TRUST_EUROPA
binnedplot (predGB, yGB-predGB, nclass=30, xlab="Prob (avere fiducia) stimata",
ylab="Residui medi", main=NA, mgp=c(2,.5,0),cex.axis=0.9, cex.lab=0.9)
##tasso di errore
error.rate.null.GB <- mean(round(abs(yGB-mean(predGB))))
tax.error.GB <- mean((predGB > 0.5 & yGB==0) | (predGB < 0.5 & yGB==1))
#ANALISI DESCRITTIVA E MODELLO SUL SOTTOCAMPIONE RELATIVO AL REGNO UNITO
italia<-subset(dati, ISOCNTRY=="IT ")
round(prop.table(wtd.table(italia$TRUST_EUROPA1,weights = italia$WEX))*100,2)
##########################################################################################################'
########################## GRAFICO A BARRE (FIGLI) RISPETTO A TRUST EUROPA (ITALIA) ######################'
##########################################################################################################'
freq.trust.eu_FIGLI_si <- as.vector(round(prop.table(wtd.table(italia$FIGLI,
italia$TRUST_EUROPA1,
weights = italia$WEX),
margin = 2)*100,2))[c(2,4)]
freq.trust.eu_FIGLI_no <- as.vector(round(prop.table(wtd.table(italia$FIGLI,
italia$TRUST_EUROPA1,
weights = italia$WEX),
margin = 2)*100,2))[c(1,3)]
df1j_FIGLI <- data.frame("SI" = freq.trust.eu_FIGLI_si,
"NO" = freq.trust.eu_FIGLI_no,
"livelli" = levels(italia$TRUST_EUROPA1))
df2j_FIGLI <- melt(df1j_FIGLI, id.vars='livelli')
df2j_FIGLI <- df2j_FIGLI[order(df2j_FIGLI$variable, df2j_FIGLI$value, df2j_FIGLI$livelli),]
plotFIGLI<-ggplot(df2j_FIGLI, aes(x=livelli, y=value, fill=variable)) +
geom_bar(stat='identity', position= 'dodge',colour="black",width=0.8) +
labs(fill = "Avere figli:") + xlab("Fiducia UE") + ylab("Percentuale") +
theme(axis.text.x = element_text(angle=0, vjust=0.6)) +
geom_text(aes(label=paste(value, "%"), y=value+2.5),
position = position_dodge(0.9), vjust=0.5, size=3.7, fontface='bold')+
My_Theme+
scale_fill_manual(values=c("#D7B5D8","#980043"))
round(prop.table(wtd.table(italia$DIFFICOLTA_ECONOMICHE,
italia$TRUST_EUROPA1, weights = italia$WEX),
margin = 1)*100,2)
round(prop.table(wtd.table(italia$DIFFICOLTA_ECONOMICHE,
italia$REGION_IT, weights = italia$WEX),
margin = 2)*100,2)
round(prop.table(wtd.table(italia$CLASSI_ISTRUZIONE,
weights = italia$WEX))*100,2)
##MODELLO LOGISTICO ITALIA
italia$CLASSI_ETA<-relevel(as.factor(italia$CLASSI_ETA),"(2) 25 - 39 years")
italia$REGION_IT<-relevel(as.factor(italia$REGION_IT),"(1) Nord-Ovest")
fit_IT<- glm(TRUST_EUROPA~REGION_IT+CLASSI_ETA+TRUST_GOVERNO+TRUST_GIORNALI+TRUST_SOCIAL+TRUST_TV+
SODD_DEMOCRAZIA+POL_INDEX+DIFFICOLTA_ECONOMICHE
,family=binomial(link="logit"),data=italia)
display(fit_IT)
#DIAGNOSTICA ITALIA
pred_IT<- fit_IT$fitted.values
dati_fit_IT<-na.omit(italia[,c("TRUST_EUROPA","REGION_IT","TRUST_GOVERNO","TRUST_GIORNALI",
"TRUST_SOCIAL","CLASSI_ETA", "SODD_DEMOCRAZIA","TRUST_TV",
"POL_INDEX","DIFFICOLTA_ECONOMICHE")])
y_IT<- dati_fit_IT$TRUST_EUROPA
binnedplot (pred_IT, y_IT-pred_IT, nclass=25, xlab="Prob (avere fiducia ) stimata",
ylab="Residui medi", main=NA, mgp=c(2,.5,0), cex.axis=0.9, cex.lab=0.9)
##tasso di errore
error.rate.null.IT <- round(mean(round(abs(y_IT-mean(pred_IT))))*100,2)
error.rate.null.IT
tax.error.IT <- round(mean((pred_IT > 0.5 & y_IT==0) | (pred_IT < 0.5 & y_IT==1))*100,2)
tax.error.IT
|
ecc9ef8e1a11db0ef51a47ebecbd2ec6793aa9ca
|
ba5d410c8756292ffdd3be28def5454cd08be267
|
/chapter03/09_Applied.R
|
47b9775abb4430357f520cf48857cce63c56814e
|
[] |
no_license
|
saadlaouadi/ISLR
|
1f838022fd16c05ccef8f05a288ce351c4724c62
|
4cdb3ef128088216444f60a06ac26fb834098551
|
refs/heads/main
| 2023-03-21T09:02:34.141784
| 2021-03-18T20:45:23
| 2021-03-18T20:45:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,392
|
r
|
09_Applied.R
|
# Loading libraries
library(MASS)
library(ISLR)
# (a)
pairs(Auto)
'Horsepower and weight is highly correlated. (RED BOX)
Displacement and weight are highly correlated. (GREEN BOX)
MPG and acceleration seems like a non linear relationship (BLUE BOX)
'
# (b)
cor(Auto[,!colnames(Auto) %in% c("name")])
' Numerically view the correlation
'
# (c)
# Running a MLR on all predictors except for name
auto.mlr = lm(mpg~.-name, data=Auto)
summary(auto.mlr)
# i.
# There are multiple predictors that have relationship with the response
# because their associated p-value is significant. The p-value tells
# us the probability that the coefficient will take a value of 0. The
# typical threshold for p-value is 0.05. If the probability is below
# 0.05, then that means chances that it will be 0 is very slim.
# ii.
# The predictors: displacement, weight, year, and origin have a
# statistically significant relationship.
# iii.
# The coefficient of year is 0.7507 which is about 3/4. This tells us
# the relationship between year and MPG. It suggests that every 3 years,
# the mpg goes up by 4.
# (d)
par(mfrow=c(2,2))
plot(auto.mlr)
# Non-Linearity: The residual plot shows that there is a U-shape pattern in the residuals
# which might indicate that the data is non-linear.
# Non-constant Variance: The residual plot also shows that the variance is not constant. There
# is a funnel shape appearing at the end which indicates heteroscedasticity (non-constant variance)
# Normal Q-Q Plot shows that the residuals are normally distributed if
# the observations line up on the dashed line. In this case majority of
# the obeervations lie on the line except for 323, 327, 326.
# Outliers: There seems to not be any outliers because in the Scale-Location, all values are within
# the range of [-2,2]. It will only be an outlier if studentized residual is outside the range of
# [-3, 3].
# High Leverage Points: Based on the Residuals vs. Leverage graph, there is no observations that
# provides a high leverage. To determine if observations contains high leverage,
# we will have to look to see if there are any points above the red dotted line.
# If there is then that observation has high leverage. In this case, there
# are no high leverage observations.
# (e) ####
names(Auto)
interact.fit = lm(mpg~.-name+horsepower*displacement, data=Auto)
origin.hp = lm(mpg~.-name+horsepower*origin, data=Auto)
summary(origin.hp)
# Statistically Significant Interaction Terms:
# displacement and horsepower
# horsepower and origin
inter.fit = lm(mpg~.-name+horsepower:origin+horsepower:weight+horsepower:displacement, data=Auto)
summary(inter.fit)
# Adding more interactions, decreases the significance of previous significant values
# (f) ####
summary(lm(mpg~.-name+log(acceleration), data=Auto))
# log(acceleration) is still very significant but less significant than acceleration
summary(lm(mpg~.-name+log(horsepower), data=Auto))
# log(horsepower) is more significant than horsepower
summary(lm(mpg~.-name+I(horsepower^2), data=Auto))
# Squaring horsepower doesnt change the significance
summary(lm(mpg~.-name+I(weight^2), data=Auto))
# Squaring the weights doesnt change significance
lm.fit = lm(mpg~.-name+I(cylinders^2), data=Auto)
plot(lm.fit)
summary(lm(mpg~.-name+I(cylinders^2), data=Auto))
# Squaring the cylinders makes cylinders and horsepower significant variables
|
e25c8bd64d388a20202b38660d8686fed05931ac
|
ad799d1d329b273fa22db8ea0340f275fff46776
|
/R/data_tree.R
|
1df6808138d262c7c76617d0e7057b4808c69141
|
[] |
no_license
|
HongY23/acPCoA
|
70372001fbd1ae7a3a50b8a19ce12eb54af58bd0
|
5cb575677afbbfb4c10b5d230219980ce112509e
|
refs/heads/master
| 2023-08-13T07:15:59.441093
| 2021-10-12T09:59:07
| 2021-10-12T09:59:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,288
|
r
|
data_tree.R
|
#' @title Tree data.
#' @description NGS whole genome shotgun (WGS) sequencing data of white oak trees.
#'
#' @format A list with multiple elements, which are
#' \describe{
#' \item{DistMat.d2star}{the n by n distance matrix. n is the number of samples. d2star distance is applied.}
#' \item{DistMat.d2shepp}{the n by n distance matrix. n is the number of samples. d2shepp distance is applied.}
#' \item{DistMat.d2}{the n by n distance matrix. n is the number of samples. d2 distance is applied.}
#' \item{DistMat.cvtree}{the n by n distance matrix. n is the number of samples. Cvtree distance is applied.}
#' \item{DistMat.euclidean}{the n by n distance matrix. n is the number of samples. Euclidean distance is applied.}
#' \item{DistMat.manhattan}{the n by n distance matrix. n is the number of samples. Manhattan distance is applied.}
#' \item{ConfounderMat}{the n by q confounder matrix}
#' \item{ContinentalOri}{Samples were divided into three geographic categories according to their continental origins, which are NorthAmerica (NA), West Europe (WE), and East Europe and Asia (EEA).}
#' \item{batch}{Samples were divided into four batches according to the NCBI BioProject from which they were downloaded and the analysis platforms they used}
#' }
"data_tree"
|
0304d5ddda712ec52353a43fb7aff0654690aed9
|
e23a3c63994d87aa5c78f7c4a8a9e805df860c58
|
/Analytics/SourceCode/MAS 3e - Param Persistence.R
|
d4282e26e95842e9b01e68a6dd17c9844c20b148
|
[
"Apache-2.0"
] |
permissive
|
VHAINNOVATIONS/PerceptiveReach
|
090deab3f0a4a1019fc2f6ee18503d8311794f2c
|
29bb8c3cc47de0f75b6a6dd6be8577be661727bb
|
refs/heads/master
| 2020-04-06T03:34:13.383566
| 2017-10-27T18:36:28
| 2017-10-27T18:36:28
| 24,638,380
| 3
| 6
| null | 2016-08-30T17:04:06
| 2014-09-30T13:10:23
|
JavaScript
|
UTF-8
|
R
| false
| false
| 1,752
|
r
|
MAS 3e - Param Persistence.R
|
###############################################################################
# Another aspect of the simulations is to determine if there is significant
# fluctuation between simulation runs with the parameter coefficients. One
# major aspect of examining the parameter estimates is to determine if there
# are variables that commonly switch signs between positive and negative. This
# will demonstrate that in some of the simulated models that a parameter is a
# protective factor, and in other simulated models, it is a risk factor. Aside
# from evaluating the estimates, the significance of each parameter will also
# be analyzed to determine the percentage of the time that a parameter is shown
# to be a significant predictor.
####### INPUTS:
## OBJECTS:
# modelResults - object with the developed logistic regression
###### OUTPUTS:
## DATAFRAMES:
# paramSigResults - dataframe with P-Value for each model parameter
# paramTableResults - dataframe with coefficients for each model parameter
###############################################################################
### Save the parameter coefficients for each simulation
paramTable <- cbind(iter,as.data.frame(unlist(t(modelResults$coefficients))))
paramTableResults <- if(exists("paramTableResults")){
rbind(paramTableResults, paramTable)
} else {
paramTableResults <- paramTable
}
rm(paramTable)
### Save the parameter significance for each simulation
summary <- as.data.frame(summary(modelResults)$coefficients[,4])
colnames(summary) <- c("pVal")
paramSig <- cbind(iter,as.data.frame(t(summary)))
rownames(paramSig) <- NULL
paramSigResults <- if(exists("paramSigResults")){
rbind(paramSigResults, paramSig)
} else {
paramSigResults <- paramSig
}
rm(paramSig, summary)
|
dea59934f0c85eb48ad32972c98fb6110132ff2e
|
b570732604126b9a84305e508ddf77471bd36119
|
/cachematrix.R
|
39055608c81871f09f4064c7286b8b7c07a18502
|
[] |
no_license
|
n8atkinson/ProgrammingAssignment2
|
c75a919d24b09219aa2195e7bdabc4df1d75ca4a
|
0d5938d3ee65f3986cfd927a56cd80c036c3d2a1
|
refs/heads/master
| 2020-12-30T10:37:14.283846
| 2017-07-31T15:28:05
| 2017-07-31T15:28:05
| 98,849,059
| 0
| 0
| null | 2017-07-31T04:47:47
| 2017-07-31T04:47:46
| null |
UTF-8
|
R
| false
| false
| 1,752
|
r
|
cachematrix.R
|
## This a is secondary function call that returns already calculated matrix inverse or that it needs to be done
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
# Still a little unclear about this
set <- function(y) {
x <<- y
inverse <<- NULL
}
#
get <- function() x
#
set_inverse <- function(matrix_inverse) inverse <<- matrix_inverse
#
get_inverse <- function() inverse
# Returns a list of functions allowing them to exist in parent environment
list(set = set, get = get, get_inverse = get_inverse, set_inverse = set_inverse)
}
## This is the primary function call of the original matrix that will have its inverse calculated
cacheSolve <- function(x, ...) {
# Pull in either NULL or already calculated inverse
inverse <- x$get_inverse
# If inverse<>NULL then return already calcuated inverse
# Question: where is the check if inverse changes instead of being NULL?
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
# This is like an else_if, if previous "return" is called then cacheSolve closes at that line
# The rest of cacheSolve runs if inverse=NULL, meaning inverse needs to be calculated
data <- x$get_inverse()
#
inverse <- solve(data, ...)
#
x$set_inverse(inverse)
# Return new calculated inverse
inverse
}
# Ref: https://github.com/lgreski/datasciencectacontent/blob/master/markdown/rprog-breakingDownMakeVector.md
|
f1a99f96bb9508207094472087ee280d53084ecf
|
3875910f927debbe78f30387404e2309faae00c9
|
/R/api_client.R
|
1884b5322785b2b82b43db1e09bc2cbd928be323
|
[
"Apache-2.0"
] |
permissive
|
stjordanis/client-library-r
|
0fd44372e19196ab6b90c9c0ea5a86fd87c57a1e
|
e752cadf58135e2ebe9a4632a112acb95eec846f
|
refs/heads/main
| 2023-06-24T06:03:04.769727
| 2021-07-20T07:42:05
| 2021-07-20T07:42:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,931
|
r
|
api_client.R
|
# UbiOps
#
# Client Library to interact with the UbiOps API.
#
# UbiOps API version: v2.1
# Generated by custom generator based on: https://openapi-generator.tech
get_setting <- function(var.name, local.var, default="") {
if (!is.null(local.var)) {
local.var
} else {
Sys.getenv(var.name, unset=default)
}
}
get_base_path <- function(string) {
# Make sure basePath has no "/" at the end
if (substr(string, nchar(string), nchar(string)) == "/") {
string <- substr(string, 1, nchar(string) - 1)
}
string
}
get_authorization_headers <- function(token) {
c("Authorization" = token)
}
get_default_headers <- function(string) {
headers <- c()
for (i in strsplit(string, ",")[[1]]) {
key_value <- trimws(strsplit(i, ":", fixed=TRUE)[[1]])
if (length(key_value) == 2){
headers[key_value[1]] <- key_value[2]
}
}
headers
}
get_http_timeout <- function(timeout) {
if (!is.na(timeout)) {
httr::timeout(strtoi(timeout))
}
}
#' @title Call API
#' @description Call an endpoint of the UbiOps API
#' @param url_path API endpoint to call, e.g., "status"
#' @param http_method HTTP method to use, e.g., "POST"
#' @param body body of the request (optional)
#' @param query_params query parameters (optional)
#' @param content_type content type (optional)
#' @param encode encode (optional)
#' @param UBIOPS_PROJECT (system environment variable) UbiOps project name
#' @param UBIOPS_API_TOKEN (system environment variable) Token to connect to UbiOps API
#' @param UBIOPS_API_URL (optional - system environment variable) UbiOps API url - Default = "https://api.ubiops.com/v2.1"
#' @param UBIOPS_TIMEOUT (optional - system environment variable) Maximum request timeout to connect to UbiOps API - Default = NA
#' @param UBIOPS_DEFAULT_HEADERS (optional - system environment variable) Default headers to pass to UbiOps API, formatted like "header1:value1,header2:value2" - Default = ""
#' @param ... additional parameters to pass to httr GET/POST/PUT/PATCH/HEAD/DELETE function
#' @return Response content from the API
call_api <- function(url_path, http_method, body = NULL, query_params = NULL, content_type = NULL, encode = NULL,
UBIOPS_API_TOKEN = NULL, UBIOPS_API_URL = NULL, UBIOPS_PROJECT = NULL, UBIOPS_TIMEOUT = NULL,
UBIOPS_DEFAULT_HEADERS = NULL, ...){
project.name <- get_setting("UBIOPS_PROJECT", UBIOPS_PROJECT)
base_path <- get_base_path(get_setting("UBIOPS_API_URL", UBIOPS_API_URL, default = "https://api.ubiops.com/v2.1"))
header.params <- get_authorization_headers(get_setting("UBIOPS_API_TOKEN", UBIOPS_API_TOKEN))
header.defaults <- get_default_headers(get_setting("UBIOPS_DEFAULT_HEADERS", UBIOPS_DEFAULT_HEADERS))
timeout <- get_http_timeout(get_setting("UBIOPS_TIMEOUT", UBIOPS_TIMEOUT, default = NA))
user_agent <- "UbiOps/r/0.2.0"
if (project.name != "") {
url_path <- gsub("\\{project_name\\}", utils::URLencode(as.character(project.name), reserved = TRUE), url_path)
} else if (!grepl(url_path, "\\{project_name\\}", fixed = TRUE)) {
stop("Missing required parameter `UBIOPS_PROJECT`.")
}
url <- paste0(base_path, url_path)
headers <- httr::add_headers(c(header.params, header.defaults))
user_agent <- httr::user_agent(user_agent)
if (is.null(content_type)) {
content_type <- httr::content_type_json()
encode <- 'json'
}
if (http_method == "GET") {
resp <- httr::GET(url, query = query_params, headers, timeout, user_agent, ...)
} else if (http_method == "POST") {
resp <- httr::POST(url, query = query_params, headers, content_type, timeout, user_agent, body = body, encode = encode, ...)
} else if (http_method == "PUT") {
resp <- httr::PUT(url, query = query_params, headers, content_type, timeout, timeout, user_agent, body = body, encode = encode, ...)
} else if (http_method == "PATCH") {
resp <- httr::PATCH(url, query = query_params, headers, content_type, timeout, timeout, user_agent, body = body, encode = encode, ...)
} else if (http_method == "HEAD") {
resp <- httr::HEAD(url, query = query_params, headers, timeout, timeout, user_agent, ...)
} else if (http_method == "DELETE") {
resp <- httr::DELETE(url, query = query_params, headers, timeout, timeout, user_agent, ...)
} else {
stop("Http method must be `GET`, `HEAD`, `OPTIONS`, `POST`, `PATCH`, `PUT` or `DELETE`.")
}
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
resp
} else {
parsed_content <- tryCatch(
httr::content(resp, "parsed"), error = function(){ list() }
)
if (!is.null(parsed_content[["error"]])) {
error_msg <- paste0("Error (", httr::status_code(resp), ") : ", parsed_content[["error"]])
} else if (!is.null(parsed_content[["error_message"]])) {
error_msg <- paste0("Error (", httr::status_code(resp), ") : ", parsed_content[["error_message"]])
} else {
error_msg <- paste0("Error (", httr::status_code(resp), ") : ", "An unknown error occured")
}
stop(error_msg)
}
}
# Deserialize the content of api response
#' @param resp API response
deserialize <- function(resp) {
jsonlite::parse_json(httr::content(resp, "text", encoding = "UTF-8"))
}
# Write file to storage location
#' @param resp API response
#' @include api_response.R
deserialize_file <- function(resp, ...) {
tmp_dir <- get_setting("UBIOPS_TEMP_FOLDER_PATH", list(...), default = getwd())
result <- ApiFileResponse$new(resp)
file_name <- result$getFileName()
output_location <- file.path(tmp_dir, file_name)
output <- file(output_location, "wb")
readr::write_file(result$getContent(), output)
close(output)
output_location
}
|
487b002e50a95ac4fddb938bf7245ac179c026cf
|
5f9489f9e7b5226616de6269f25afae0a03b6209
|
/R/SCR2DNAmcmc.R
|
4de9c0f781d9390a44f328964bdd01738c7be9e6
|
[] |
no_license
|
benaug/SPIM
|
247c0c49a246d790fc1620f70a9d05b9cfc290db
|
b95be7391d5455c500677a9426b56d2944654d06
|
refs/heads/master
| 2022-02-20T03:03:35.775080
| 2022-01-22T19:46:40
| 2022-01-22T19:46:40
| 62,909,308
| 1
| 2
| null | 2021-02-04T18:27:19
| 2016-07-08T18:32:54
|
R
|
UTF-8
|
R
| false
| false
| 9,783
|
r
|
SCR2DNAmcmc.R
|
#' Run MCMC algorithm for basic SCR model with 2 observation processes that may or may not share sigma parameters
#' @param data a list produced by simSCR2DNA or in the same format
#' @param niter number of MCMC iterations to run
#' @param nburn number of MCMC iterations to discard as burn in
#' @param nthin MCMC thinning parameter. Record output on every nthin iterations. nthin=1 corresponds to no thinning
#' @param M The size of the augmented superpopulation
#' @param inits a list of user-supplied initial values. inits=list(psi=psi,lam01=lam01,lam02=lam02,sigma=sigma)
#' where sigma is of length 2 if sharesig=FALSE
#' @param proppars a list of tuning parameters for the proposal distributions
#' @param keepACs a logical indicating whether or not to keep the posteriors for z and s
#' @return a list with the posteriors for the SCR parameters (out), s, z
#' @author Ben Augustine
#' @description This function runs the MCMC algorithm for the basic SCR model with 2 observation processes. The data list should have the following elements:
#' 1. y1, a n x J x K capture history
#' 2. y2, another n x J x K capture history
#' 3. X1, a matrix with the X and Y trap locations in the first two columns that corresponds to y1
#' 4. X2, a matrix with the X and Y trap locations in the first two columns that corresponds to y2
#' 5. either buff or vertices. buff is the fixed buffer for the traps to produce the state space. It is applied to the minimum and maximum
#' X and Y locations, producing a square or rectangular state space. vertices is a matrix with the X and Y coordinates of a polygonal state
#' space.
#' @export
SCR2DNAmcmc <-
function(data,niter=2400,nburn=1200, nthin=5, M = 200,sharesig=TRUE, inits=inits,proppars=list(lam01=0.05,lam02=0.05,sigma=0.1,sx=0.2,sy=0.2),keepACs=TRUE){
###
if(sharesig==FALSE){
if(length(proppars$sigma)!=2|length(inits$sigma)!=2){
stop("must supply 2 starting values and proppars if sharesig=FALSE")
}
}else{
if(length(proppars$sigma)!=1|length(inits$sigma)!=1){
stop("must supply only 1 starting value and proppars if sharesig=TRUE")
}
inits$sigma=rep(inits$sigma,2)
}
library(abind)
y1<-data$y1
y2<-data$y2
X1<-as.matrix(data$X1)
X2<-as.matrix(data$X2)
J1<-nrow(X1)
J2<-nrow(X2)
#Remove guys not captured.
rem=which(rowSums(y1)==0&rowSums(y2)==0)
if(length(rem)>0){
y1=y1[-rem,,]
y2=y2[-rem,,]
}
n<- dim(y1)[1]
#If using polygon state space
if("vertices"%in%names(data)){
vertices=data$vertices
useverts=TRUE
xlim=c(min(vertices[,1]),max(vertices[,1]))
ylim=c(min(vertices[,2]),max(vertices[,2]))
}else if("buff"%in%names(data)){
buff<- data$buff
xlim<- c(min(c(X1[,1],X2[,1])),max(c(X1[,1],X2[,1])))+c(-buff, buff)
ylim<- c(min(c(X1[,2],X2[,2])),max(c(X1[,2],X2[,2])))+c(-buff, buff)
vertices=cbind(xlim,ylim)
useverts=FALSE
}else{
stop("user must supply either 'buff' or 'vertices' in data object")
}
##pull out initial values
psi<- inits$psi
lam01<- inits$lam01
lam02<- inits$lam02
sigma<- inits$sigma
#Augment data and make initial complete data set
if(length(dim(y1))==3){
K1<- dim(y1)[3]
y1<- abind(y1,array(0, dim=c( M-dim(y1)[1],J1, K1)), along=1)
y12D=apply(y1,c(1,2),sum)
}else if(length(dim(y1)==2)){
if(is.na(K)){
stop("if y is 2D, must supply K")
}
y12D=abind(y1,array(0, dim=c( M-dim(y1)[1],J1)), along=1)
}else{
stop("y must be either 2D or 3D")
}
if(length(dim(y2))==3){
K2<- dim(y2)[3]
y2<- abind(y2,array(0, dim=c( M-dim(y2)[1],J2, K2)), along=1)
y22D=apply(y2,c(1,2),sum)
}else if(length(dim(y2)==2)){
if(is.na(K)){
stop("if y is 2D, must supply K")
}
y22D=abind(y2,array(0, dim=c( M-dim(y2)[1],J2)), along=1)
}else{
stop("y must be either 2D or 3D")
}
known.vector=c(rep(1,n),rep(0,M-n))
z=known.vector
z[sample(which(z==0),sum(z==0)/2)]=1 #switch some uncaptured z's to 1. half is arbitrary. smarter way?
#Optimize starting locations given where they are trapped.
s<- cbind(runif(M,xlim[1],xlim[2]), runif(M,ylim[1],ylim[2])) #assign random locations
idx=which(known.vector==1) #switch for those actually caught
for(i in idx){
trps<- rbind(X1[y12D[i,]>0,1:2],X2[y22D[i,]>0,1:2])
trps<-matrix(trps,ncol=2,byrow=FALSE)
s[i,]<- c(mean(trps[,1]),mean(trps[,2]))
}
#check to make sure everyone is in polygon
if("vertices"%in%names(data)){
vertices=data$vertices
useverts=TRUE
}else{
useverts=FALSE
}
if(useverts==TRUE){
inside=rep(NA,nrow(s))
for(i in 1:nrow(s)){
inside[i]=inout(s[i,],vertices)
}
idx=which(inside==FALSE)
if(length(idx)>0){
for(i in 1:length(idx)){
while(inside[idx[i]]==FALSE){
s[idx[i],]=c(runif(1,xlim[1],xlim[2]), runif(1,ylim[1],ylim[2]))
inside[idx[i]]=inout(s[idx[i],],vertices)
}
}
}
}
#Bernoulli Likelihood function
func<- function(lamd1,lamd2,y1,y2,K1,K2,z,X1,X2){
#convert lamd to pd (gaussian hazard model)
pd1=1-exp(-lamd1)
pd2=1-exp(-lamd2)
#If data is M x K
if(is.matrix(y1)){
v <- rowSums(dbinom(y1,K1,pd1,log=TRUE))+rowSums(dbinom(y2,K2,pd2,log=TRUE))
v[z==0]<- 0
}else{
#If data is 1 x K
v <- sum(dbinom(y1,K1,pd1,log=TRUE))+sum(dbinom(y2,K2,pd2,log=TRUE))
v<- v*z
}
v
}
# some objects to hold the MCMC simulation output
nstore=(niter-nburn)/nthin
if(nburn%%nthin!=0){
nstore=nstore+1
}
if(sharesig==FALSE){
out<-matrix(NA,nrow=nstore,ncol=5)
dimnames(out)<-list(NULL,c("lam01","lam02","sigma1","sigma2","N"))
}else{
out<-matrix(NA,nrow=nstore,ncol=4)
dimnames(out)<-list(NULL,c("lam01","lam02","sigma","N"))
}
sxout<- syout<- zout<-matrix(NA,nrow=nstore,ncol=M)
idx=1 #for storing output not recorded every iteration
D1<- e2dist(s, X1)
D2<- e2dist(s, X2)
lamd1<- lam01*exp(-D1*D1/(2*sigma[1]*sigma[1]))
lamd2<- lam02*exp(-D2*D2/(2*sigma[2]*sigma[2]))
for(i in 1:niter){
#Update lam01
lik.curr<- sum( func(lamd1,lamd2,y12D,y22D,K1,K2,z,X1,X2) )
lam01.cand<- rnorm(1,lam01,proppars$lam01)
if(lam01.cand > 0){
lamd1.cand<- lam01.cand*exp(-D1*D1/(2*sigma[1]*sigma[1]))
lik.new<- sum( func(lamd1.cand,lamd2,y12D,y22D,K1,K2,z,X1,X2) )
if(runif(1) < exp(lik.new -lik.curr)){
lam01<- lam01.cand
lamd1=lamd1.cand
lik.curr<- lik.new
}
}
#Update lam02
lam02.cand<- rnorm(1,lam02,proppars$lam02)
if(lam02.cand > 0){
lamd2.cand<- lam02.cand*exp(-D2*D2/(2*sigma[2]*sigma[2]))
lik.new<- sum( func(lamd1,lamd2.cand,y12D,y22D,K1,K2,z,X1,X2) )
if(runif(1) < exp(lik.new -lik.curr)){
lam02<- lam02.cand
lamd2=lamd2.cand
lik.curr<- lik.new
}
}
#Update sigma
if(sharesig==FALSE){
#update sigma 1
sigma.cand<- rnorm(1,sigma[1],proppars$sigma[1])
if(sigma.cand > 0){
lamd1.cand<- lam01*exp(-D1*D1/(2*sigma.cand*sigma.cand))
lik.new<- sum( func(lamd1.cand,lamd2,y12D,y22D,K1,K2,z,X1,X2) )
if(runif(1) < exp(lik.new -lik.curr)){
sigma[1]<- sigma.cand
lamd1=lamd1.cand
lik.curr<- lik.new
}
}
#update sigma 2
sigma.cand<- rnorm(1,sigma[2],proppars$sigma[2])
if((sigma.cand > 0) & (sigma.cand<25000)){###informative prior
lamd2.cand<- lam02*exp(-D2*D2/(2*sigma.cand*sigma.cand))
lik.new<- sum( func(lamd1,lamd2.cand,y12D,y22D,K1,K2,z,X1,X2) )
if(runif(1) < exp(lik.new -lik.curr)){
sigma[2]<- sigma.cand
lamd2=lamd2.cand
lik.curr<- lik.new
}
}
}else{
sigma.cand<- rnorm(1,sigma[1],proppars$sigma)
if(sigma.cand > 0){
lamd1.cand<- lam01*exp(-D1*D1/(2*sigma.cand*sigma.cand))
lamd2.cand<- lam02*exp(-D2*D2/(2*sigma.cand*sigma.cand))
lik.new<- sum( func(lamd1.cand,lamd2.cand,y12D,y22D,K1,K2,z,X1,X2) )
if(runif(1) < exp(lik.new -lik.curr)){
sigma<- rep(sigma.cand,2)
lamd1=lamd1.cand
lamd2=lamd2.cand
lik.curr<- lik.new
}
}
}
#Update psi gibbs
## probability of not being captured in a trap AT ALL
pd1=1-exp(-lamd1)
pd2=1-exp(-lamd2)
pbar1=(1-pd1)^K1
pbar2=(1-pd2)^K2
prob0<- exp(rowSums(log(pbar1))+rowSums(log(pbar2)))
fc<- prob0*psi/(prob0*psi + 1-psi)
z[known.vector==0]<- rbinom(sum(known.vector ==0), 1, fc[known.vector==0])
lik.curr<- sum( func(lamd1,lamd2,y12D,y22D,K1,K2,z,X1,X2) )
psi <- rbeta(1, 1 + sum(z), 1 + M - sum(z))
## Now we have to update the activity centers
for (j in 1:M) {
Scand <- c(rnorm(1, s[j, 1], proppars$sx), rnorm(1, s[j, 2], proppars$sy))
if(useverts==FALSE){
inbox <- Scand[1] < xlim[2] & Scand[1] > xlim[1] & Scand[2] < ylim[2] & Scand[2] > ylim[1]
}else{
inbox=inout(Scand,vertices)
}
if (inbox) {
d1tmp <- sqrt((Scand[1] - X1[, 1])^2 + (Scand[2] - X1[, 2])^2)
d2tmp <- sqrt((Scand[1] - X2[, 1])^2 + (Scand[2] - X2[, 2])^2)
lamd1.thisj<- lam01*exp(-D1[j,]*D1[j,]/(2*sigma[1]*sigma[1]))
lamd1.cand<- lam01*exp(-d1tmp*d1tmp/(2*sigma[1]*sigma[1]))
lamd2.thisj<- lam02*exp(-D2[j,]*D2[j,]/(2*sigma[2]*sigma[2]))
lamd2.cand<- lam02*exp(-d2tmp*d2tmp/(2*sigma[2]*sigma[2]))
llS<- sum(func(lamd1.thisj,lamd2.thisj,y12D[j,],y22D[j,],K1,K2,z[j],X1,X2))
llcand<- sum(func(lamd1.cand,lamd2.cand,y12D[j,],y22D[j,],K1,K2,z[j],X1,X2))
if (runif(1) < exp(llcand - llS)) {
s[j, ] <- Scand
D1[j, ] <- d1tmp
D2[j, ] <- d2tmp
lamd1[j, ] <- lamd1.cand
lamd2[j, ] <- lamd2.cand
}
}
}
#Do we record output on this iteration?
if(i>nburn&i%%nthin==0){
sxout[idx,]<- s[,1]
syout[idx,]<- s[,2]
zout[idx,]<- z
if(sharesig==FALSE){
out[idx,]<- c(lam01,lam02,sigma ,sum(z))
}else{
out[idx,]<- c(lam01,lam02,sigma[1] ,sum(z))
}
idx=idx+1
}
} # end of MCMC algorithm
if(keepACs==TRUE){
list(out=out, sxout=sxout, syout=syout, zout=zout)
}else{
list(out=out)
}
}
|
a3852a46bc60f4fed28b195f948046e52ade9e38
|
13cb16a2eff4bae3ddd5f4826edfd879a03001a0
|
/man/SetTpcaResultTable-tpcaResult-method.Rd
|
435d15a2404db5b3ce741af4233433c41b05aef0
|
[] |
no_license
|
nkurzaw/Rtpca
|
2a3471ff14756699883f57a6be7590bdd05cd940
|
c1922eb6c195263cae2fe941584f32b8035b3df1
|
refs/heads/master
| 2023-05-29T19:50:58.078565
| 2023-04-25T15:20:19
| 2023-04-25T15:20:19
| 227,799,428
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 817
|
rd
|
SetTpcaResultTable-tpcaResult-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tpcaResult-class.R
\name{SetTpcaResultTable,tpcaResult-method}
\alias{SetTpcaResultTable,tpcaResult-method}
\alias{SetTpcaResultTable}
\title{Set TpcaResultTable}
\usage{
\S4method{SetTpcaResultTable}{tpcaResult}(object, df)
}
\arguments{
\item{object}{an object of class tpcaResult}
\item{df}{a data frame containing the results
from a tpca analysis}
}
\value{
an object of class tpcaResult
}
\description{
Set TpcaResultTable
}
\examples{
m1 <- matrix(1:12, ncol = 4)
m2 <- matrix(2:13, ncol = 4)
m3 <- matrix(c(2:10, 1:7), ncol = 4)
rownames(m1) <- 1:3
rownames(m2) <- 2:4
rownames(m3) <- 2:5
mat_list <- list(
m1, m2, m3
)
tpcaObj <- new("tpcaResult", ObjList = mat_list)
SetTpcaResultTable(tpcaObj, data.frame(pair = "A:B"))
}
|
2d810d428e7056cb8492bbef2a3152ccc501691f
|
109862b6950a6a8f25483d2a2568c32bc59dcd39
|
/R/Meat-data.R
|
e6edddefe2a3bd64f9fa59a5f7a40119e959ccad
|
[] |
no_license
|
cran/RPEClust
|
4a99f36edf713d74452e1f5002e743d35686c61c
|
c32a70c9c2c0375dccc3556cc4996d2bcabae970
|
refs/heads/master
| 2020-12-22T18:13:10.511347
| 2019-11-06T11:50:05
| 2019-11-06T11:50:05
| 236,885,005
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 709
|
r
|
Meat-data.R
|
#' Meat Data
#'
#' This is the near-infrared spectroscopic meat data used in Murphy, Dean and Raftery (2009) <doi:10.1214/09-AOAS279> and originally collected by McElhinney, Downey and Fearn (1999) <doi:10.1255/jnirs.245>.
#'
#' @docType data
#'
#' @usage data(Meat)
#'
#' @format A list with two components:
#' \describe{
#' \item{x}{Homogenized raw meat spectra. A matrix with 231 rows and 1050 columns.}
#' \item{y}{A vector containing the true class memberships.}}
#'
#' @keywords datasets
#'
#' @references Murphy, Dean and Raftery (2010) <doi:10.1214/09-AOAS279>
#'
#' @source McElhinney, Downey and Fearn (1999) <doi:10.1255/jnirs.245>
#'
#' @examples
#' data(Meat)
#' Meat$x[1:5,1:5]
#' Meat$y
"Meat"
|
cd30ca6b7b10c8c01ef96b006f3cec6f7f60e701
|
be74788faa9e7dbe5a902e4b4542efda99f0a9fd
|
/Code/02_all_cichlids_fishbase_info.R
|
954edce49909f09755e9cad4f7bb65451a0236c0
|
[] |
no_license
|
hiweller/mouthbrooding-morphology
|
7ab5d93ad8f17853ee0195a3e74c6b91613b2bea
|
950d85a928e916ba370608ee525789afa52ba644
|
refs/heads/master
| 2020-05-29T10:12:57.228222
| 2019-05-29T12:44:09
| 2019-05-29T12:44:09
| 189,088,574
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,697
|
r
|
02_all_cichlids_fishbase_info.R
|
# FOR ALL CICHLIDS:
# get diet, reproduction
# in a big table!
# any species with more than one entry for a category -> concatenate unique values into a single string
# manually confirm mouthbrooding and sifting
# get a location (Central America, South America, Africa - lakes, Africa - rivers)
# for each species
# CATEGORIES:
# not sifting, not mouthbrooding
# sifting, not mouthbrooding
# not sifting, mouthbrooding
# sifting, mouthbrooding
# incomplete but of interest: i.e. definitely mouthbrooding, feeding ambiguous or definitely sifting, reproduction ambiguous
# GENERA:
# ID genera that fit at least two of these categories
# i.e. one species mouthbroods but doesn't sift, another does both
## check for overlaps between species list and UMMZ and FMNH lists from fishnet 2
## get cichlid species list ####
# get a list of all cichlid species on fishbase
source("Code/FUNCTION_get_species.R")
cichlid_species <- get_species("Cichlidae", taxonomic_level = "family")
# generate an NA dataframe with relevant columns:
cichlid_df <- data.frame("Species" = cichlid_species,
"Reproduction" = rep(NA, length(cichlid_species)),
"Reproduction.comments" = rep(NA, length(cichlid_species)),
"Mouthbrooder" = rep(NA, length(cichlid_species)),
"Diet" = rep(NA, length(cichlid_species)),
"FeedingType" = rep(NA, length(cichlid_species)),
"Sifter" = rep(NA, length(cichlid_species)),
"Location" = rep(NA, length(cichlid_species)),
"Reproduction.ref" = rep(NA, length(cichlid_species)),
"Eggs" = rep(NA, length(cichlid_species)),
"Larvae" = rep(NA, length(cichlid_species)))
for (i in 1:length(cichlid_species)) {
sp <- cichlid_species[i]
# Reproduction: reproductive guild, comments
reproduction.df <- rfishbase::reproduction(sp, fields = c("RepGuild2", "AddInfos"))
if (nrow(reproduction.df) > 1) {
reproduction.df <- apply(reproduction.df, 2, function(x) paste(unique(x[!is.na(x)]), collapse = "|"))
}
# Diet
# Get diet info: major diet component ("herbivory2") and feeding strategy
diet.df <- rfishbase::ecology(sp, fields = c("Herbivory2", "FeedingType"))
# If a species has several entries, collapse the non-NA values
if (nrow(diet.df) > 1) {
diet.df <- apply(diet.df, 2, function(x) paste(unique(x[!is.na(x)]), collapse = "|"))
}
# location (continent)
fao.df <- rfishbase::faoareas(sp, fields = c("FAO", "Status"))
fao.df <- unique(fao.df$FAO[grep("endemic|native", fao.df$Status)])
location <- paste(fao.df, collapse = "|")
# if "Africa-Inland Waters" is one of the entries, narrow down to lakes/rivers:
if (length(grep("Africa", fao.df)) > 0) {
ecosystem.df <- rfishbase::ecosystem(sp)
location <- "Africa - "
ecosystem.df <- unique(ecosystem.df[grep("endemic|native", ecosystem.df$Status),
19:20])
if(length(grep("Victoria|Tanganyika|Malawi", ecosystem.df$EcosystemName)) > 0) {
location <- paste(location, "LAKES:", ecosystem.df$EcosystemName[grep("Victoria|Tanganyika|Malawi", ecosystem.df$EcosystemName)], collapse = "|")
}
if (length(grep("River", ecosystem.df$EcosystemType)) > 0) {
location <- paste(location, "RIVERS", sep = " | ")
}
}
# clumsily enter into the appropriate column:
cichlid_df$Reproduction[i] <- reproduction.df[1]
cichlid_df$Reproduction.comments[i] <- reproduction.df[2]
cichlid_df$Diet[i] <- diet.df[1]
cichlid_df$FeedingType[i] <- diet.df[2]
cichlid_df$Location[i] <- location
}
# some variables save as list types - unlist them:
cichlid_OUT <- apply(cichlid_df, 2, unlist)
# get rid of any row where reproduction, diet, and feeding type are ALL NA:
na.idx <- which(apply(is.na(cichlid_OUT[, c(2, 5, 6)]), 1, function(i) sum(i) == 3))
cichlid_OUT <- cichlid_OUT[-na.idx, ]
write.csv(cichlid_OUT, "Spreadsheets/ALL_cichlids_diet_reproduction.csv")
# read in manually-edited version
cichlids_2 <- read.csv("Spreadsheets/ALL_cichlids_diet_reproduction.csv")
cichlids_2$Comments <- rep(NA, nrow(cichlids_2))
# look up comments and add extra column
for (i in 1:nrow(cichlids_2)) {
cichlids_2$Comments[i] <- suppressWarnings(rfishbase::species(cichlids_2[i, 1])$Comments)
}
cichlids_2$Comments <- unlist(cichlids_2$Comments)
write.csv(cichlids_2, "Spreadsheets/ALL_cichlids_diet_reproduction.csv")
|
c848269bff0b4c18fbc37fc321f5c70e9810d1de
|
ee8862f0009d2844a80ad847f1d102b1e920b5b5
|
/Artificial Neural Networks Code.R
|
3195946c42b0095d82bbf7852fd59083eb230b17
|
[] |
no_license
|
kubilayerislik/Girisim_Sirketleri_Yapay_Sinir_Aglari_Lojistik_Regresyon_Analizi
|
47f4353f6ac552f8bb2d5aafa64fd1db54ad250d
|
e154e2bfad3ada38af644cc0c9445a0a0d234502
|
refs/heads/master
| 2020-12-27T20:44:32.563098
| 2020-02-03T19:49:42
| 2020-02-03T19:49:42
| 238,047,466
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,000
|
r
|
Artificial Neural Networks Code.R
|
#Prepare Data
for(i in 1:7) {data[,i] =(data[,i]-min(data[,i]))/(max(data[,i])-min(data[,i])) }
ind = sample(1:nrow(data),567)
train_data = data[ind,]
test_data = data[-ind,]
#Create Model
library(neuralnet)
n = neuralnet(Category~No_Stage+Seed+Stage_A+Stage_B+Stage_C+Stage_D,data = train_data,hidden = c(4,4,4), linear.output = F)
plot(n)
#Create Actual And Predicted Data
output = compute(n,test_data[,-7])
prediction = output$net.result * (max(data1[-ind,7])-min(data1[-ind,7]))+min(data1[-ind,7])
actual = data1[-ind,7]
actual = as.numeric(actual$Category)
#Mean Square Error
MSE = sum((prediction-actual)^2)/nrow(test_data)
table(actual,round(prediction))
MSE
#Actual And Predicted Data Table
output_train = compute(n,train_data[,-7])
prediction_train = output_train$net.result * (max(data1[-ind,7])-min(data1[-ind,7]))+min(data1[-ind,7])
actual_train = data1[ind,7]
actual_train = as.numeric(actual_train$Category)
table(actual_train,round(prediction_train))
|
c58b8c00cf3bac37cfb52c1fa4d439641c4c24f9
|
ea9f6419dcc0f72748c55bd8d46d7dda2f296296
|
/R/fgsea_with_wgcna_modules/brown-leading-edge-BTM-table.R
|
76ac099504aaddea25b09124a0f490008c16c9ab
|
[
"MIT"
] |
permissive
|
kimjhkp/baseline
|
9e3f12690c9c6ebdc2ad565b5acb830b5dcf74c6
|
8f8a40c44bc72951ace0ccb1504c41ada07f3d1b
|
refs/heads/master
| 2021-01-16T09:57:50.861227
| 2020-06-01T01:19:08
| 2020-06-01T01:19:08
| 243,071,685
| 0
| 0
|
MIT
| 2020-02-25T18:33:12
| 2020-02-25T18:33:12
| null |
UTF-8
|
R
| false
| false
| 1,486
|
r
|
brown-leading-edge-BTM-table.R
|
library(data.table)
library(tmod)
library(fgsea)
library(methods)
dn.out = file.path(PROJECT_DIR, "generated_data/fgsea_with_wgcna_modules/")
dir.create(dn.out, showWarnings = F)
fn.cd38.cor = file.path(PROJECT_DIR, "generated_data", "CHI", "robust_corr_all.genes.txt")
df.cd38.cor = fread(fn.cd38.cor)
ranked = df.cd38.cor[, .(Gene=gene, cor.mean.sd.ratio)]
ranked[,Gene:=toupper(Gene)]
mods = fread(file.path(PROJECT_DIR, "generated_data/WGCNA-modules-from-SLE-low-DA/SLE-low-34sbj-9601probes-gene.in.module.minModSize20.signed-hybrid.txt"))
mod = toupper(mods[which(Module %in% "brown")]$Symbol)
r1 = 1:nrow(ranked)
r2 = r1[which(ranked$Gene %in% mod)]
m75 = strsplit(getGenes("LI.M75")$Genes, ",")[[1]]
m150 = strsplit(getGenes("LI.M150")$Genes, ",")[[1]]
m165 = strsplit(getGenes("LI.M165")$Genes, ",")[[1]]
if(1) {
convert.to.dt = function(lst, name) {
tmp = as.data.table(list(lst, rep(1, length(lst))))
setnames(tmp,1, "Gene")
setnames(tmp, "V2", name)
return(tmp)
}
tab = Reduce(function(x,y){merge(x,y,all=T, by="Gene")},
list(convert.to.dt(m75,"LI.M75"), convert.to.dt(m150, "LI.M150"),
convert.to.dt(m165, "LI.M165")), convert.to.dt(mod, "brown"))
fn.out = file.path(dn.out, "brown-mod-m75-m150-m165-genes.csv")
fwrite(file=fn.out, tab, quote=T)
tab = tab[which(!is.na(brown) & (!is.na(LI.M75) | !is.na(LI.M150) | !is.na(LI.M165)))]
fwrite(file=sub(".csv", "-short.csv", fn.out), tab, quote=T)
}
|
4713ea24778f73f4b0dfd10c2a12a8600153d86c
|
82bb2981b8e25fb0423427200e36c8d25e982a80
|
/0802_plot_582.R
|
8fd163aaee3cf1c18624fe9ed1aca7a2cc272957
|
[] |
no_license
|
irichgreen/UnicornFactory
|
f357b1b184a289f32e9a3ec49c725c7fffa4a735
|
d916669410b200e41b07a6c87bf38109485cb836
|
refs/heads/master
| 2020-12-25T14:22:49.308401
| 2017-10-20T00:59:57
| 2017-10-20T00:59:57
| 63,758,538
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,205
|
r
|
0802_plot_582.R
|
###### 1
plot.new()
plot(df582$ts, df582$Force.Sum.Actual, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$Force.Sum.Actual),max(df582$Force.Sum.Actual)),main="Force.Sum.Actual")
mtext("Force.Sum.Actual",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
###### 2
plot.new()
plot(df582$ts, df582$DS.U_Pressure.Actual, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$DS.U_Pressure.Actual),max(df582$DS.U_Pressure.Actual)),main="DS.U_Pressure.Actual")
mtext("DS.U_Pressure.Actual",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
###### 3
plot.new()
plot(df582$ts, df582$Bending.Pressure.Measured.Negative, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$Bending.Pressure.Measured.Negative),max(df582$Bending.Pressure.Measured.Negative)),main="Bending.Pressure.Measured.Negative")
mtext("Bending.Pressure.Measured.Negative",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
###### 4
plot.new()
plot(df582$ts, df582$Bending.U.Measured.Positive, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$Bending.U.Measured.Positive),max(df582$Bending.U.Measured.Positive)),main="Bending.U.Measured.Positive")
mtext("Bending.U.Measured.Positive",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
###### 5
plot.new()
plot(df582$ts, df582$Speed.Mill.Actual, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$Speed.Mill.Actual),max(df582$Speed.Mill.Actual)),main="Speed.Mill.Actual")
mtext("Speed.Mill.Actual",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
###### 6
plot.new()
plot(df582$ts, df582$Speed.Exit.Actual, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$Speed.Exit.Actual),max(df582$Speed.Exit.Actual)),main="Speed.Exit.Actual")
mtext("Speed.Exit.Actual",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
###### 7
plot.new()
plot(df582$ts, df582$dh_raw, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$dh_raw),max(df582$dh_raw)),main="dh_raw")
mtext("dh_raw",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
###### 8
plot.new()
plot(df582$ts, df582$Tension.Exit.Actual, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$Tension.Exit.Actual),max(df582$Tension.Exit.Actual)),main="Tension.Exit.Actual")
mtext("Tension.Exit.Actual",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
###### 9
plot.new()
plot(df582$ts, df582$Thickness.Exit.Delta, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$Thickness.Exit.Delta),max(df582$Thickness.Exit.Delta)),main="Thickness.Exit.Delta")
mtext("Thickness.Exit.Delta",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
###### 10
plot.new()
plot(df582$ts, df582$VC.Ctrl.Out, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$VC.Ctrl.Out),max(df582$VC.Ctrl.Out)),main="VC.Ctrl.Out")
mtext("VC.Ctrl.Out",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
###### 11
plot.new()
plot(df582$ts, df582$Flow.Error, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$Flow.Error),max(df582$Flow.Error)),main="Flow.Error")
mtext("Flow.Error",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
###### 12
plot.new()
plot(df582$ts, df582$Oil.Pressure, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$Oil.Pressure),max(df582$Oil.Pressure)),main="Oil.Pressure")
mtext("Oil.Pressure",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
###### 13
plot.new()
plot(df582$ts, df582$Mean.Tension, type="l", pch=1, col=3, xlab="Time(PQ0582)",ylab="",ylim=c(min(df582$Mean.Tension),max(df582$Mean.Tension)),main="Mean.Tension")
mtext("Mean.Tension",side=2,line=2,col=3)
par(new=T)
plot(df582$Thickness.Deviation.Absolut, axes=F, xlab="",ylab="",ylim=c(-3.5,3.5), type="l", pch=2, col=4 )
axis(side=4)
mtext("Thickness",side=4,line=2,col=4)
abline(h = 3, col = "red")
abline(h = -3, col = "red")
|
1c88b7c5eb0ff48d6a2cf03ffc55c97e956d4e19
|
2c24a525328c494a936e78a0d2c8d72f22278f6b
|
/enhancer/getREChanges.R
|
308dbd2286fe8a2bdf6ec310fd166278233f38a2
|
[] |
no_license
|
Danko-Lab/CD4-Cell-Evolution
|
fb8edd528c32922856de217fe07a7ee615a8456e
|
745fc9f1900c0c171aeb25edc4da67a8ea40a334
|
refs/heads/master
| 2021-04-30T18:34:21.462995
| 2018-03-29T02:40:40
| 2018-03-29T02:40:40
| 20,419,882
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,694
|
r
|
getREChanges.R
|
## This script identifies branch-specific changes in RE activities.
##
load("../annotations/fdr.RData")
source("../lib/normalizeSubsample.R")
highth <- 0.30
lowth <- 0.05
require(boot)
tss_aln <- fdr_df[grepl("dREG", ca$annot_type),]
hspv <- hs[grepl("dREG", ca$annot_type),]
tss <- read.table("tss.tsv")
tss <- data.frame(tss, tss_aln[match(tss$V4, tss_aln$name),c(9,33:50)], HumanP= hspv$pvalue[match(tss$V4, tss_aln$name)])
## Alignable fraction (V20) denotes a gap in either species. Make sure gaps are in both.
## Classify as 'promoter'/ 'enhancer'
#stab <- rowMax(tss[,17:18])
#dist <- tss[,13]
#class <- rep("tss", NROW(tss)) ## tss is then unclassified as a promoter or enhancer
#class[stab < 0.1 & dist < 500] <- "Prox_Stab" ## Clearly protein coding promoter
#class[stab > 0.1 & dist > 10000] <- "Dist_UnSt" ## Clearly distal enhancer
#class[stab < 0.1 & dist > 125000] <- "Dist_Stab" ## Clearly stable, but distal
#summary(as.factor(class))
#tss$V5 <- as.factor(class)
## Change unscored to 0
for(i in 7:12) { tss[is.na(tss[,i]),i] <- 0 }
## Change in basal T-cells.
## 1:1 ortholog, mappable, complete gain/ loss, gain/ loss in magnitude.
indx_hg19_gain <- tss$V20 == 0 & !is.na(tss$mapSize) & ((tss$V7 > highth & tss$V8 < lowth & tss$V9 < lowth) | (tss$HumanFDR < PVAL & tss$HumanFC > 0))
indx_hg19_loss <- tss$V20 == 0 & !is.na(tss$mapSize) & ((tss$V7 < lowth & tss$V8 > highth & tss$V9 > highth) | (tss$HumanFDR < PVAL & tss$HumanFC < 0))
sum(indx_hg19_gain)
sum(indx_hg19_loss)
write.table(tss[indx_hg19_gain | indx_hg19_loss,], "hg19.gain.loss.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
write.table(tss[indx_hg19_gain,], "hg19.gain.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
write.table(tss[indx_hg19_loss,], "hg19.loss.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
write.table(tss[indx_hg19_gain | indx_hg19_loss,1:3], "hg19.gain.loss.insight.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
write.table(tss[(indx_hg19_gain | indx_hg19_loss) & abs(tss$HumanFC) > 5^(1/2) & tss$HumanFDR < 0.01,1:3], "hg19.gl.fold-GT5.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
write.table(tss[(indx_hg19_gain | indx_hg19_loss) & tss$HumanFDR < 0.01 & tss$HumanFDR_PI < 0.01,1:3], "hg19.gl-UPI-HC.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
## 1:1 ortholog, mappable, complete gain/ loss, gain/ loss in magnitude.
indx_rheMac3_gain <- tss$V20 == 0 & !is.na(tss$mapSize) & ((tss$V9 > highth & tss$V8 < lowth & tss$V7 < lowth) | (tss$MacaqueFDR < PVAL & tss$MacaqueFC > 0))
indx_rheMac3_loss <- tss$V20 == 0 & !is.na(tss$mapSize) & ((tss$V9 < lowth & tss$V8 > highth & tss$V7 > highth) | (tss$MacaqueFDR < PVAL & tss$MacaqueFC < 0))
sum(indx_rheMac3_gain)
sum(indx_rheMac3_loss)
write.table(tss[indx_rheMac3_gain | indx_rheMac3_loss,], "rheMac3.gain.loss.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
write.table(tss[indx_rheMac3_gain,], "rheMac3.gain.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
write.table(tss[indx_rheMac3_loss,], "rheMac3.loss.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
## 1:1 ortholog, mappable, complete gain/ loss, gain/ loss in magnitude.
indx_panTro4_gain <- tss$V20 == 0 & !is.na(tss$mapSize) & ((tss$V8 > highth & tss$V9 < lowth & tss$V7 < lowth) | (tss$ChimpFDR < PVAL & tss$ChimpFC > 0))
indx_panTro4_loss <- tss$V20 == 0 & !is.na(tss$mapSize) & ((tss$V8 < lowth & tss$V9 > highth & tss$V7 > highth) | (tss$ChimpFDR < PVAL & tss$ChimpFC < 0))
sum(indx_panTro4_gain)
sum(indx_panTro4_loss)
write.table(tss[indx_panTro4_gain | indx_panTro4_loss,], "panTro4.gain.loss.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
write.table(tss[indx_panTro4_gain,], "panTro4.gain.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
write.table(tss[indx_panTro4_loss,], "panTro4.loss.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
## Conserved in all species.
indx <- tss$V20 == 0 & !is.na(tss$mapSize) & (tss$V7 > highth & tss$V8 > highth & tss$V9 > highth) & (tss$HumanFDR > 0.25 & tss$ChimpFDR > 0.25 & tss$MacaqueFDR > 0.25) & (abs(tss$HumanFC) < 0.5 & abs(tss$ChimpFC) < 0.5 & abs(tss$MacaqueFC) < 0.5)
sum(indx)
write.table(tss[indx,], "all.conserved.bed", row.names=FALSE, col.names=FALSE, quote=FALSE, sep="\t")
## QQ Plot to show enrichment of quantiles.
pdf("QQ-plot.pdf")
qqplot(-log(seq(0, 1, 1/50000),10), -log(tss$HumanP[indx_hg19_loss | indx_hg19_gain],10), col="red", ylim=c(0,30), xlim=c(0,3.5)); #abline(0,1)
par(new=TRUE)
qqplot(-log(seq(0, 1, 1/50000),10), -log(tss$HumanP,10), ylim=c(0,30), xlim=c(0,3.5))
par(new=TRUE)
qqplot(-log(seq(0, 1, 1/50000),10), -log(tss$HumanP[indx],10), col="gray", ylim=c(0,30), xlim=c(0,3.5)); abline(0,1)
dev.off()
# Thanks: http://web.mit.edu/~r/current/arch/i386_linux26/lib/R/library/limma/html/propTrueNull.html
# Note: Returns percent of null hypotheses that are true (i.e., fraction non-significant).
require(limma)
propTrueNull(tss$HumanP[indx_hg19_loss | indx_hg19_gain]) ## Estimate proportion of true null hypotheses. Raw pvalues: 15%
propTrueNull(tss$HumanP[indx_hg19_loss | indx_hg19_gain], method="hist") ## By this method: 9.88%
## Validation in humans.
require(bigWig)
source("../lib/avg.metaprofile.R")
random_sites <- read.table("random-sites.bed.gz")
makePlot <- function(bed, mark, bwpath= "/local/storage/data/hg19/cd4/epiRoadmap_histone/", halfWindow= 5000, step= 25, ...) {
bw <- load.bigWig(paste(bwpath, mark, ".bw", sep=""))
mp <- avg.metaprofile.bigWig(center.bed(bed[,1:3], halfWindow, halfWindow), bw, step=step, ...)
plot(mp)
bed.region.bpQuery.bigWig(bw, bed[,1:3]) * 1000/(bed[,3]-bed[,2])
}
pdf("dREG-Changes.pdf")
a <- makePlot(tss[indx,], "H3K27ac", name="H3K27ac")
b <- makePlot(tss[indx_hg19_gain & (tss$V8 > lowth | tss$V9 > lowth),], "H3K27ac", name="H3K27ac gain")
c <- makePlot(tss[indx_hg19_loss & tss$V7 > lowth,], "H3K27ac", name="H3K27ac loss") ## These include sites that are decreases.
cc<- makePlot(tss[indx_hg19_gain & tss$V8 < lowth & tss$V9 < lowth,], "H3K27ac", name="H3K27ac complete gain") ## tss$HumanFDR > PVAL
d <- makePlot(tss[indx_hg19_loss & tss$V7 < lowth,], "H3K27ac", name="H3K27ac complete loss") ## tss$HumanFDR > PVAL
e <- makePlot(random_sites, "H3K27ac", name="H3K27ac random")
boxplot(list(conserved= a, gain= b, loss= c, complete.gain= cc, complete.loss= d, random= e), ylab="Reads per kilobase", main="H3K27ac", outline=FALSE)
a <- makePlot(tss[indx,], "H3K27me3", name="H3K27me3")
b <- makePlot(tss[indx_hg19_gain & (tss$V8 > lowth | tss$V9 > lowth),], "H3K27me3", name="H3K27me3 gain")
c <- makePlot(tss[indx_hg19_loss & tss$V7 > lowth,], "H3K27me3", name="H3K27me3 loss")
cc<- makePlot(tss[indx_hg19_gain & tss$V8 < lowth & tss$V9 < lowth,], "H3K27me3", name="H3K27me3 complete gain")
d <- makePlot(tss[indx_hg19_loss & tss$V7 < lowth,], "H3K27me3", name="H3K27me3 complete loss")
e <- makePlot(random_sites, "H3K27me3", name="H3K27me3 random")
boxplot(list(conserved= a, gain= b, loss= c, complete.gain= cc, complete.loss= d, random= e), ylab="Reads per kilobase", main="H3K27me3", outline=FALSE)
a <- makePlot(tss[indx,], "H3K4me3", name="H3K4me3")
b <- makePlot(tss[indx_hg19_gain & (tss$V8 > lowth | tss$V9 > lowth),], "H3K4me3", name="H3K4me3 gain")
c <- makePlot(tss[indx_hg19_loss & tss$V7 > lowth,], "H3K4me3", name="H3K4me3 loss")
cc<- makePlot(tss[indx_hg19_gain & tss$V8 < lowth & tss$V9 < lowth,], "H3K4me3", name="H3K4me3 complete gain")
d <- makePlot(tss[indx_hg19_loss & tss$V7 < lowth,], "H3K4me3", name="H3K4me3 complete loss")
e <- makePlot(random_sites, "H3K4me3", name="H3K4me3 random")
boxplot(list(conserved= a, gain= b, loss= c, complete.gain= cc, complete.loss= d, random= e), ylab="Reads per kilobase", main="H3K4me3", outline=FALSE)
a <- makePlot(tss[indx,], "H3K4me1", name="H3K4me1")
b <- makePlot(tss[indx_hg19_gain & (tss$V8 > lowth | tss$V9 > lowth),], "H3K4me1", name="H3K4me1 gain")
c <- makePlot(tss[indx_hg19_loss & tss$V7 > lowth,], "H3K4me1", name="H3K4me1 loss")
cc<- makePlot(tss[indx_hg19_gain & tss$V8 < lowth & tss$V9 < lowth,], "H3K4me1", name="H3K4me1 complete gain")
d <- makePlot(tss[indx_hg19_loss & tss$V7 < lowth,], "H3K4me1", name="H3K4me1 complete loss")
e <- makePlot(random_sites, "H3K4me1", name="H3K4me1 random")
boxplot(list(conserved= a, gain= b, loss= c, complete.gain= cc, complete.loss= d, random= e), ylab="Reads per kilobase", main="H3K4me1", outline=FALSE)
a <- makePlot(tss[indx,], "MeDIP-Seq", name="MeDIP-Seq")
b <- makePlot(tss[indx_hg19_gain & (tss$V8 > lowth | tss$V9 > lowth),], "MeDIP-Seq", name="MeDIP-Seq gain")
c <- makePlot(tss[indx_hg19_loss & tss$V7 > lowth,], "MeDIP-Seq", name="MeDIP-Seq loss")
cc<- makePlot(tss[indx_hg19_gain & tss$V8 < lowth & tss$V9 < lowth,], "MeDIP-Seq", name="MeDIP-Seq complete gain")
d <- makePlot(tss[indx_hg19_loss & tss$V7 < lowth,], "MeDIP-Seq", name="MeDIP-Seq complete loss")
e <- makePlot(random_sites, "MeDIP-Seq", name="MeDIP-Seq random")
boxplot(list(conserved= a, gain= b, loss= c, complete.gain= cc, complete.loss= d, random= e), ylab="Reads per kilobase", main="MeDIP-seq", outline=FALSE)
dev.off()
makeHeatmap <- function(bed, path, halfWindow=5000, step=25) {
bw <- load.bigWig(paste("/local/storage/data/hg19/cd4/epiRoadmap_histone/H3K27ac.bw", sep=""))
hm <- bed.step.bpQuery.bigWig(bw, center.bed(tss[indx,1:3], 5000, 5000), step=25)
hm_mat <- t(matrix(unlist(hm), NROW(hm[[1]])))
}
#2# Data playtime!
|
6235a589f3daefd865e84894d96f46b4252656a3
|
befd978632e45a391ecb8ccacbdb77dd267fe6ef
|
/Table 1.R
|
a149b8aa87cecfae6a5f462dd6fabb9dd50f088c
|
[] |
no_license
|
bank-fenyves/COVIDprot
|
41509ecf18aa90448d7dc4c27dc9c60898938e9f
|
c34af61050c347fc0e73154f1ca595c6e7d30688
|
refs/heads/main
| 2023-05-21T22:21:03.376107
| 2021-06-15T07:47:10
| 2021-06-15T07:47:10
| 351,146,052
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,365
|
r
|
Table 1.R
|
clinical <- read_excel("clinical_data.xlsx")
data <- clinical
##All patients by COVID status
table1 <- data[,c("COVID", "VTE",
"Ethnicity", "Age", "Sex", "BMI", "HEART", "KIDNEY", "LIVER", "LUNG", "DIABETES", "HTN",
"Severity", "Vaso",
"plts_0", "plts_3", "plts_7",
"crp_0", "crp_3", "crp_7",
"ddimer_0", "ddimer_3", "ddimer_7",
"fibrinogen_0", "fibrinogen_3", "fibrinogen_7")]
table1 <- tbl_summary(table1, by = COVID, missing = "no") %>%
add_n() %>%
add_p() %>%
modify_header(label = "COVID") %>%
bold_p(t = 0.05) %>%
bold_labels()
table1
##COVID-positive patients by VTE complications
table1 <- data[,c("COVID", "VTE",
"Ethnicity", "Age", "Sex", "BMI", "HEART", "KIDNEY", "LIVER", "LUNG", "DIABETES", "HTN",
"Severity", "Vaso",
"plts_0", "plts_3", "plts_7",
"crp_0", "crp_3", "crp_7",
"ddimer_0", "ddimer_3", "ddimer_7",
"fibrinogen_0", "fibrinogen_3", "fibrinogen_7")] %>%
subset(COVID == 1)
table1 <- table1[,-1]
table1 <- tbl_summary(table1, by = VTE, missing = "no") %>%
add_n() %>%
add_p() %>%
modify_header(label = "VTE") %>%
bold_p(t = 0.05) %>%
bold_labels()
table1
|
95367890c714652e5f4f78340c479ba150d7c727
|
aea5237a007ceda88a4fb1b10fbfbb27bc027583
|
/Functions/BlockBoot_apply_subregions.R
|
cc1c26f0dd45c535f0505104adeb56621ac6fdc5
|
[] |
no_license
|
EveSlavich/Block-Bootstrap
|
be6e11cb0651c642ee96b38ddb7e5304abe596fb
|
c435e84ad66fed1f19af719336c957068bcf6282
|
refs/heads/master
| 2021-04-15T17:27:15.953564
| 2018-03-26T23:21:59
| 2018-03-26T23:21:59
| 126,899,005
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,693
|
r
|
BlockBoot_apply_subregions.R
|
BlockBoot_apply_subregions = function (x,y,m_x,m_y, dat, block_Ls, Grid_space,shape,NBoot,Stat.function,sampling_type, lookuptables.folderpath,type,subregion.division,...){
print(subregion.division)
if(subregion.division == "mutually.exclusive"){
bins = break_into_subregions(x , y , m_y = m_y , m_x = m_x)
bins.levels = levels(bins$bin)
sigma_stat_subregion = matrix(nrow=length(block_Ls), ncol=(m_x*m_y))
rownames(sigma_stat_subregion) = block_Ls
for(L in 1:length(block_Ls)){
for (subregion in 1:(m_x*m_y)){
print(paste0("subregion",subregion))
print(paste0("mxy",m_x,m_y))
block_L = block_Ls[L]
dat_subregion = dat [which(bins$bin == bins.levels[subregion]),]
x_subregion = x [which(bins$bin == bins.levels[subregion])]
y_subregion = y [which(bins$bin == bins.levels[subregion])]
if(nrow(dat_subregion)>0){
if (block_L>0){
rm(lookup_table,lookup.coords,envir=.GlobalEnv)
if (is.na(lookuptables.folderpath) ==FALSE){ #check if a lookup table has been created to speed things up, if it has load it, and check the lookup table is for data with same x,y, coordinates
load_file_if_exists(paste0(lookuptables.folderpath,"lookup_table_subregion",subregion,"_L",block_L,"_grid_space_",Grid_space,"_sampling_type_","sites","_",shape,".RData"))
if(exists("lookup.coords")){
print("using existing coords...")
if( identical ( lookup.coords$lookup.x , x_subregion ) == FALSE | identical ( lookup.coords$lookup.y , y_subregion ) == FALSE ){
print("wrong sites... creating new lookup")
new_sample_subregion = resample_blocks_by_area(NBoot = NBoot, x=x_subregion , y=y_subregion , block_L=block_Ls[L],Grid_space = Grid_space, area_or_sites =sampling_type,shape=shape,lookup_tablename=paste0("lookup_table_subregion",subregion), lookuptables.folderpath = lookuptables.folderpath, ...)
}else{
new_sample_subregion = resample_blocks_by_area(NBoot = NBoot,lookup_table= lookup_table, x=x_subregion , y=y_subregion , block_L=block_Ls[L],Grid_space = Grid_space, area_or_sites =sampling_type,shape=shape, ...)
}
rm(lookup_table,lookup.coords,envir=.GlobalEnv)
}else{
print("creating new lookup")
new_sample_subregion = resample_blocks_by_area(
x = x_subregion,
y = y_subregion,
NBoot = NBoot,
block_L = block_L ,
Grid_space = Grid_space,
area_or_sites = sampling_type,
shape = shape,
lookuptables.folderpath = lookuptables.folderpath,
lookup_tablename=paste0("lookup_table_subregion",subregion)
# ...
)###will create a lookup table
}}else{print("code under developement")}
}
#If block_L =0, do an iid bootstrap
if (block_L==0){
new_sample_subregion = list()
for(i in 1:NBoot){
new_sample_subregion [[i]] = sample(1:length(x_subregion), size=length(x_subregion), replace =T)
}
}
boot.reps.of.Stat.function_subregion = bootstrap_wrapper(dat = dat_subregion, function_to_repeat = Stat.function, new_sample = new_sample_subregion, NBoot=NBoot,type=type,...)
sigma_stat_subregion[L,subregion] = sd(unlist(boot.reps.of.Stat.function_subregion))
}else{
sigma_stat_subregion[L,subregion] = 0
}
}
}
}else{
### subregion division not mutually exclusive
bins = break_into_subregions(x , y , m_y = m_y , m_x = m_x)
bins.levels = levels(bins$bin)
sigma_stat_subregion = matrix(nrow=length(block_Ls), ncol=(m_x*m_y))
rownames(sigma_stat_subregion) = block_Ls
for(L in 1:length(block_Ls)){
for (subregion in 1:(m_x*m_y)){
block_L = block_Ls[L]
dat_subregion = dat [which(bins$bin != bins.levels[subregion]),]
x_subregion = x [which(bins$bin != bins.levels[subregion])]
y_subregion = y [which(bins$bin != bins.levels[subregion])]
if(nrow(dat_subregion)>0){
if (block_L>0){
rm(lookup_table,lookup.coords,envir=.GlobalEnv)
if (is.na(lookuptables.folderpath) ==FALSE){ #check if a lookup table has been created to speed things up, if it has load it, and check the lookup table is for data with same x,y, coordinates
load_file_if_exists(paste0(lookuptables.folderpath,"lookup_table_subregion_overlap",subregion,"_L",block_L,"_grid_space_",Grid_space,"_sampling_type_","sites","_",shape,".RData"))
if(exists("lookup.coords")){
print("using existing coords...")
if( identical ( lookup.coords$lookup.x , x_subregion ) == FALSE | identical ( lookup.coords$lookup.y , y_subregion ) == FALSE ){
print("wrong sites... creating new lookup")
new_sample_subregion = resample_blocks_by_area(NBoot = NBoot, x=x_subregion , y=y_subregion , block_L=block_Ls[L],Grid_space = Grid_space, area_or_sites =sampling_type,shape=shape,lookup_tablename=paste0("lookup_table_subregion_overlap",subregion), lookuptables.folderpath = lookuptables.folderpath, ...)
}else{
new_sample_subregion = resample_blocks_by_area(NBoot = NBoot,lookup_table= lookup_table, x=x_subregion , y=y_subregion , block_L=block_Ls[L],Grid_space = Grid_space, area_or_sites =sampling_type,shape=shape, lookup_tablename=paste0("lookup_table_subregion_overlap",subregion),...)
}
rm(lookup_table,lookup.coords,envir=.GlobalEnv)
}else{
print("creating new lookup")
new_sample_subregion = resample_blocks_by_area(
x = x_subregion,
y = y_subregion,
NBoot = NBoot,
block_L = block_L ,
Grid_space = Grid_space,
area_or_sites = sampling_type,
shape = shape,
lookuptables.folderpath = lookuptables.folderpath,
lookup_tablename=paste0("lookup_table_subregion_overlap",subregion)
# ...
)###will create a lookup table
}}else{print("code under developement")}
}
#If block_L =0, do an iid bootstrap
if (block_L==0){
new_sample_subregion = list()
for(i in 1:NBoot){
new_sample_subregion [[i]] = sample(1:length(x_subregion), size=length(x_subregion), replace =T)
}
}
boot.reps.of.Stat.function_subregion = bootstrap_wrapper(dat = dat_subregion, function_to_repeat = Stat.function, new_sample = new_sample_subregion, NBoot=NBoot,type=type,...)
sigma_stat_subregion[L,subregion] = sd(unlist(boot.reps.of.Stat.function_subregion))
}else{
sigma_stat_subregion[L,subregion] = 0
}
}
}
}
;
sigma_stat_subregion
}
|
63ae0535dbd4083f75533af1ae97be9b05c508eb
|
66136f71ee8be6055328dbe616c17854b4003182
|
/Hierarchical Clustering.R
|
792d35d76c26bbd4cae52578d528bf0a90b957be
|
[
"MIT"
] |
permissive
|
rahul1192/Machine_Learning_with_R
|
60f6da99a389da2194d24749e6311004b446ca64
|
b74c4e4dcec467ab6d1338fbb406a2f13a9dca6b
|
refs/heads/master
| 2023-04-28T10:05:13.075486
| 2021-05-23T11:00:08
| 2021-05-23T11:00:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,825
|
r
|
Hierarchical Clustering.R
|
# Hierarchical Clustering
#- There are two types of Hierarchical Clustering;
# 1. Agglomerative
# - It is the bottom up approach.
# 2. Divisive
# ** Steps for Agglomerative HC **
# Step 1 : Make each data point a single point cluster -> That forms N clusters.
# Step 2 : Take the two closest data points and make them one cluster -> That forms (N - 1) clusters.
# Step 3 : Take the two closest clusters and make them one cluster -> That forms (N - 2) clusters.
# Step 4 : Repeat Step 3 until there is only one cluster. Than FIN.
# - **Option to choose Distance between clusters**
# 1. Closest Point
# 2. Furthest Point
# 3. Average Distance
# 4. Distance between Centroids.
# ----------------------------------------------------- Importing Data ------------------------------------------- #
dataset = read.csv('Mall_Customers.csv')
# Selecting particular columns
dataset = dataset[4:5]
#----------------------------- Using the Dendogram to find the optimal number of clusters ----------------------- #
dendrogram = hclust(dist(dataset, method = 'euclidean'), method = 'ward.D')
plot(dendrogram,
main = "Dendrogram",
xlab = "Customer",
ylab = "Eculidean Distance")
# --------------------------------- Fitting Hierarchical Clustering to the Mall dataset -------------------------- #
hc = hclust(dist(dataset, method = 'euclidean'), method = 'ward.D')
y_hc = cutree(hc, 5)
y_hc
# -------------------------------------------- Visualising the Cluster ------------------------------------------- #
library(cluster)
clusplot(dataset,
y_hc,
lines = 0,
shade = TRUE,
color = TRUE,
labels= 2,
plotchar = FALSE,
span = TRUE,
main = paste('Clusters of customers'),
xlab = 'Annual Income',
ylab = 'Spending Score')
|
10c07fdf748521fa9cd35b6788ae4455a15e90f5
|
e646bb6522693173d7d0c0e1c12e685139b8e543
|
/bin/quick-rnaseq-ma.R
|
9d3a424bc8eb08c48b7eddf8f836af1bab986080
|
[
"MIT"
] |
permissive
|
stracquadaniolab/quick-rnaseq-nf
|
d7ecbac1e789cea3fdaa642824d2a53d088c84b6
|
bd75a622952b8a86c4615436fdc05b34b9721ca3
|
refs/heads/master
| 2023-04-12T19:55:11.116454
| 2022-06-17T12:25:34
| 2022-06-17T12:25:34
| 411,712,364
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,645
|
r
|
quick-rnaseq-ma.R
|
#!/usr/bin/env Rscript
"quick-rnaseq-ma.R
Usage:
quick-rnaseq-ma.R <inputfile> <outputfile> [--control=<contrast_control>] [--case=<contrast_case>] [--transform=<tf>] [--log-foldchange=<lfc>]
Options:
--control=<contrast_control> Condition to use as control [default: control].
--case=<contrast_case> Condition to use as case [default: case].
-l --log-foldchange=<lfc> Log2 fold-change threshold [default: 0].
-h --help Show this screen.
--version Show version.
" -> doc
# parsing command line arguments
library(docopt)
arguments <- docopt(doc, version = "quick-rnaseq-ma.R")
# loading data processing libraries
suppressMessages(library(tidyverse))
suppressMessages(library(DESeq2))
# reading deseq object
dse <- readRDS(arguments$inputfile)
res <- lfcShrink(dse,
contrast = c("condition", arguments$case, arguments$control),
type = "normal", lfcThreshold = as.numeric(arguments$log_foldchange)
)
ma <- plotMA(res, returnData = TRUE)
# drawing a maplot using ggplot2
lfc <- ceiling(max(abs(ma$lfc), na.rm = T))
plt <- ggplot(ma, aes(x = mean, y = lfc, color = isDE)) +
geom_point() +
geom_hline(yintercept = 0, linetype = "dashed", colour = "#737373") +
scale_y_continuous("log fold change", limits = c(-lfc, lfc)) +
scale_x_continuous("mean of normalized counts") +
scale_colour_manual(values = c("#CCCCCC", "#08519c")) +
theme(
panel.background =
element_rect(fill = "#ffffff", colour = "#737373", size = 1)
) +
guides(color = "none")
# saving the plot with 4:3 ratio
ggsave(arguments$outputfile, plot = plt, width = 5, height = 5 * (3 / 4))
|
81323c8869528bf3e5f85cb555fa5b4b23efd512
|
e54b786c875ff2e6c1eae31ee774c6b6147794c2
|
/R/S4/HGRL_RNAfold.R
|
e0236881d6b57541ef2c3e800a5060ca8830f97d
|
[] |
no_license
|
hjanime/STAU1_hiCLIP
|
9f1d5516b6b5f7413709315b2f0985d729719480
|
861051cae1b2508a9c3871bf24f377479ae460a7
|
refs/heads/master
| 2020-12-11T05:26:01.387765
| 2015-01-07T22:00:06
| 2015-01-07T22:00:06
| 36,923,073
| 1
| 1
| null | 2015-06-05T09:05:11
| 2015-06-05T09:05:11
| null |
UTF-8
|
R
| false
| false
| 7,732
|
r
|
HGRL_RNAfold.R
|
##############################################################
#' shiftCD
#'
#' shiftCD was used to adjust the coordinate to the "3' UTR base" from "transcript base"
#' @param \code{hgrl}. HybridGRL object to be examined.
#'
#' @export
#' @docType methods
#' @rdname hybridGRL-methods
#'
#' @examples
#' exportHybrid(hgrl)
setGeneric(
name = "shiftCD",
def = function(object, gr.utr3){standardGeneric("shiftCD")}
)
setMethod(
f = "shiftCD",
signature = "HybridGRL",
definition = function(object, gr.utr3){
start.vec <- start(gr.utr3)
names(start.vec) <- as.character(seqnames(gr.utr3))
shift.gr <- function(gr, start.vec){
start(gr) <- start(gr) - start.vec[as.character(seqnames(gr))] + 1
end(gr) <- end(gr) - start.vec[as.character(seqnames(gr))] + 1
return(gr)
}
object$L <- shift.gr(object$L, start.vec)
object$R <- shift.gr(object$R, start.vec)
return(object)
}
)
##############################################################
#' selectRNAfoldPredictableGenes
#'
#' selectRNAfoldPredictableGenes was used to find whether a gene structure can be predicted by RNAfold with a constaint using hybrid data.
#' @param \code{hgrl}. HybridGRL object to be examined.
#'
#' @export
#' @docType methods
#' @rdname hybridGRL-methods
#'
#' @examples
#' exportHybrid(hgrl)
setGeneric(
name = "selectRNAfoldPredictableGenes",
def = function(object, selected = TRUE){standardGeneric("selectRNAfoldPredictableGenes")}
)
setMethod(
f = "selectRNAfoldPredictableGenes",
signature = "HybridGRL",
definition = function(object, selected = TRUE){
## Functions specific for this method
sub.is.RNAfoldPredictableGenes <- function(temp.object){
b = end(temp.object$L[1])
c = start(temp.object$R[1])
d = end(temp.object$R[1])
o = end(temp.object$L[2])
r = end(temp.object$R[2])
Rbf <- FALSE
if(d < o){
Rbf <- TRUE
} else {
if((r < c) & (b < o)){
Rbf <- TRUE
} else {
Rbf <- FALSE
}
}
return(Rbf)
}
is.RNAfoldPredictableGenes <- function(object){
if(length(object$L) == 1){
b.res <- TRUE
} else {
n.dup <- length(object$L)
n.comb <- combn(1:n.dup, 2)
b.results <- c()
for(i in 1:ncol(n.comb)){
temp.object <- selectHybridByIndex(object, indexes = n.comb[, i])
Rbf <- sub.is.RNAfoldPredictableGenes(temp.object)
b.results <- c(b.results, Rbf)
}
b.res <- all(b.results)
}
return(b.res)
}
## Caution: HGRL object should be sorted before running this function
gene.vec <- unique(as.character(seqnames(object$L)))
bf.genes <- c()
for(i.g in gene.vec){
tmp.object <- selectHybridByGeneName(object, i.g)
tmp.bf.genes <- is.RNAfoldPredictableGenes(tmp.object)
bf.genes <- c(bf.genes, tmp.bf.genes)
}
if(selected){
selected.gene.vec <- gene.vec[bf.genes]
} else {
selected.gene.vec <- gene.vec[!bf.genes]
}
return(selected.gene.vec)
}
)
##############################################################
#' createDB
#'
#' createDB returns structure constraint by hybrid
#' @param \code{hgrl}. HybridGRL object to be examined.
#'
#' @export
#' @docType methods
#' @rdname hybridGRL-methods
#'
#' @examples
#' exportHybrid(hgrl)
setGeneric(
name = "createDB",
def = function(object, gr.utr3, filename){standardGeneric("createDB")}
)
setMethod(
f = "createDB",
signature = "HybridGRL",
definition = function(object, gr.utr3, filename){
mergeDB <- function(vec1, vec2){
if(length(vec1) != length(vec2)){
stop("Vector length should be the same.")
}
left_elements <- lapply(strsplit(vec1, "\\_"), as.integer)
right_elements <- lapply(strsplit(vec2, "\\_"), as.integer)
sum_elements <- mapply("+", left_elements, right_elements, SIMPLIFY = FALSE)
elements_conct <- sapply(sum_elements, function(x){paste(x, collapse = "_")})
return(elements_conct)
}
createDB <- function(gr, gr.utr3, bracket = 1){
for(i in 1:length(gr)){
utr3.logical <- as.character(seqnames(gr.utr3)) == as.character(seqnames(gr[i]))
utr3.length <- end(gr.utr3[utr3.logical]) - start(gr.utr3[utr3.logical]) + 1
left <-paste(rep(0, (start(gr[i]) - 1)), collapse = "_")
mid <- paste(rep(bracket, (end(gr[i]) - start(gr[i]) + 1)), collapse = "_")
right <- paste(rep(0, (utr3.length - end(gr[i]))), collapse = "_")
all.elements <- paste(c(left, mid, right), collapse = "_")
all.elements <- gsub("^\\_", "", all.elements)
all.elements <- gsub("\\_$", "", all.elements)
elementMetadata(gr)$DB[i] <- all.elements
}
return(gr)
}
createDB.df <- function(object){
DB.df <- data.frame(
gene_id = as.character(seqnames(object$L)),
DB = "NA",
stringsAsFactors = FALSE
)
elements_conct <- mergeDB(elementMetadata(object$L)$DB, elementMetadata(object$R)$DB)
if(length(grep("3", elements_conct)) != 0){
stop("Conflicting duplexes exist")
}
DB.df$DB <- elements_conct
return(DB.df)
}
compressDBdf <- function(DB.df){
duplicated_id <- unique(
DB.df$gene_id[duplicated(DB.df$gene_id)]
)
unique.df <- DB.df[!(DB.df$gene_id %in% duplicated_id), ]
duplicated.df <- DB.df[DB.df$gene_id %in% duplicated_id, ]
compressed.df <- data.frame(
gene_id = unique(duplicated.df$gene_id),
DB = "NA",
stringsAsFactors = FALSE
)
for(gene in compressed.df$gene_id){
temp.df <- duplicated.df[duplicated.df$gene_id %in% gene, ]
merged.DB <- temp.df$DB[1]
for(i in 1:(nrow(temp.df) - 1)){
merged.DB <- mergeDB(merged.DB, temp.df$DB[i + 1])
}
compressed.df$DB[compressed.df$gene_id == gene] <- merged.DB
}
result.df <- rbind(unique.df, compressed.df)
result.df <- result.df[order(result.df$gene_id), ]
return(result.df)
}
convertIntoDB <- function(vec){
temp_dp <- gsub("_", "", vec)
temp_dp_1 <- gsub("0", ".", temp_dp)
temp_dp_2 <- gsub("1", "(", temp_dp_1)
dp_vec <- gsub("2", ")", temp_dp_2)
return(dp_vec)
}
object <- addColumnHGRL(object, "DB", default.value = "NA")
object$L <- createDB(object$L, gr.utr3, 1)
object$R <- createDB(object$R, gr.utr3, 2)
DB.df <- createDB.df(object)
compressed.DB.df <- compressDBdf(DB.df)
compressed.DB.df$DB <- convertIntoDB(compressed.DB.df$DB)
filename.faconst <- paste(filename, "faconst", sep = ".")
filename.const <- paste(filename, "const", sep = ".")
sink(filename.faconst)
for(i in 1:nrow(compressed.DB.df)){
line.id <- paste(">", compressed.DB.df[i, 1], "\n")
line.constrain <- paste(compressed.DB.df[i, 2], "\n")
cat(line.id)
cat(elementMetadata(utr3.selected)$sequence[as.character(seqnames(utr3.selected)) == compressed.DB.df[i, 1]])
cat("\n")
cat(line.constrain)
}
sink()
sink(filename.const)
for(i in 1:nrow(compressed.DB.df)){
line.id <- paste(">", compressed.DB.df[i, 1], "\n")
line.constrain <- paste(compressed.DB.df[i, 2], "\n")
cat(line.id)
cat(line.constrain)
}
sink()
return(DB.df)
}
)
|
177bcd38a7485d8d7e34609224108fa411c8cb21
|
d64acae137ca604f9ca3d8cb281cf096b82e2b79
|
/R/extract_linf_k_from_fishbase.R
|
7522e724f7885a0555b9066e09e5d639abfa5699
|
[] |
no_license
|
cddesja/R4Atlantis
|
fed3ad4e95836f9193a9b0e36ad3cc3ab6dfa938
|
13d8437ceb0c77e9bcbf97dee83f4001720f32c9
|
refs/heads/master
| 2016-09-10T10:14:02.158937
| 2016-01-05T16:32:51
| 2016-01-05T16:32:51
| 19,950,936
| 1
| 0
| null | 2015-02-12T14:34:02
| 2014-05-19T17:24:50
|
R
|
UTF-8
|
R
| false
| false
| 4,351
|
r
|
extract_linf_k_from_fishbase.R
|
#' Extract values for Linf and k from www.fishbase.org
#'
#'
#' This function extracts values for Linf and k from www.fishbase.org
#' @param vector of fish species with genus and species
#' @param specification if subspecies (e.g. Sprattus sprattus balticus) should be excluded!
#' @return a dataframe with species, country, locality, linf and k!
#'
#' @details Before the actual extraction takes place fishbaseh IDs for every species are extracted using the function "get_ids_fishbase". The IDs are needed to generate the URLs lateron. At the moment subspecies can only be excluded from the extraction.
#' @examples
#' extract_linf_k_fishbase(c("Gadus morhua", "Merlangius merlangus"))
#' @export
extract_linf_k_fishbase <- function(fish, exclude_subspecies = T){
ids <- get_ids_fishbase(fish, exclude_subspecies)
# Split up Names in species and genus part to generate URLs
ge <- sapply(str_split(ids[[2]], pattern = " "),function(x)x[1])
sp <- sapply(str_split(ids[[2]], pattern = " "),function(x)x[2])
urls <- paste0("http://fishbase.org/PopDyn/PopGrowthList.php?ID=", ids[[1]], "&GenusName=", ge, "&SpeciesName=", sp, "&fc=183")
# Extract data from fishbase!
fishbase <- lapply(urls, readLines, warn="F")
fishbase.backup <- fishbase
fishbase <- fishbase.backup
# First remove Species without Growth information!
pos_missing <- which(grepl("The system found no growth information for the requested specie.", fishbase))
if(length(pos_missing) >= 1){
warning("No growth information available:\n", paste(ids[[2]][pos_missing], collapse = "\n "))
ids <- lapply(ids, function(x)x[-pos_missing])
fishbase <- fishbase[-pos_missing]
}
# Actual extraction is performed!
table_start <- 142 # Based on: all(sapply(fishbase, grep, pattern = "<table cellpadding") == 142)
table_end <- sapply(fishbase, grep, pattern = "<table align=\"center\"") - 2
for(i in 1:length(fishbase)){
fishbase[[i]] <- fishbase[[i]][table_start:table_end[i]]
}
# Extract Linf and K!
linfk_pos <- lapply(fishbase, grep, pattern = "loo")
linfk <- list()
for(i in 1:length(fishbase)){
linfk[[i]] <- fishbase[[i]][linfk_pos[[i]]]
}
linf_start <- lapply(lapply(linfk, str_locate, pattern = "&loo="), function(x)x[,2] + 1)
linf_end <- lapply(lapply(linfk, str_locate, pattern = "&k="), function(x)x[,1] - 1)
linf <- list()
for(i in 1:length(linfk)){
linf[[i]] <- str_sub(linfk[[i]], start = linf_start[[i]], end = linf_end[[i]])
}
k_start <- lapply(lapply(linfk, str_locate, pattern = "&k="), function(x)x[,2] + 1)
k_end <- lapply(lapply(linfk, str_locate, pattern = "&id"), function(x)x[,1] - 1)
k <- list()
for(i in 1:length(linfk)){
k[[i]] <- str_sub(linfk[[i]], start = k_start[[i]], end = k_end[[i]])
}
# Extract Country and Locality!
col_pos <- lapply(fishbase, grep, pattern = "<td>")
col_length <- sapply(col_pos, length)
country_pos <- lapply(col_length, seq, from = 11, by = 14)
for(i in 1:length(country_pos)){
country_pos[[i]] <- col_pos[[i]][country_pos[[i]]]
}
country <- list()
for(i in 1:length(fishbase)){
country[[i]] <- fishbase[[i]][country_pos[[i]]]
}
country <- lapply(country, str_replace_all, pattern = "\t\t\t\t<td>", replacement = "")
country <- lapply(country, str_replace_all, pattern = "</td>", replacement = "")
locality_pos <- lapply(col_length, seq, from = 12, by = 14)
for(i in 1:length(locality_pos)){
locality_pos[[i]] <- col_pos[[i]][locality_pos[[i]]]
}
locality <- list()
for(i in 1:length(fishbase)){
locality[[i]] <- fishbase[[i]][locality_pos[[i]]]
}
locality <- lapply(locality, str_replace_all, pattern = "\t\t\t\t<td>", replacement = "")
locality <- lapply(locality, str_replace_all, pattern = "</td>", replacement = "")
# Check if dimensions are correct
if(any(c(sapply(linf, length) == sapply(k, length), sapply(linf, length) == sapply(country, length), sapply(linf, length) == sapply(locality, length)) == F)){
stop("This should not have happened. Contact package development team.")
}
rep_names <- sapply(linf, length)
names <- rep(ids[[2]], times = rep_names)
result <- data.frame(species = names, country = unlist(country), locality = unlist(locality), linf = unlist(linf), k = unlist(k))
return(result)
}
|
7f8a12b393fcf41a32930db8b9877383adb77590
|
835617872c76c175c275f6835fdf6998ab728b92
|
/run_analysis.R
|
3a4e5b91615eb828ce4422145c873a6811180c6a
|
[] |
no_license
|
analyticsexpertise/GettingAndCleaningDataProject
|
53e024539606fe9fdfcf4c2b9ef9df03878f103a
|
d8926e8843196808654c3703279527e62d13e41a
|
refs/heads/master
| 2016-09-05T18:01:03.443505
| 2015-02-23T00:11:12
| 2015-02-23T00:11:12
| 31,187,492
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,164
|
r
|
run_analysis.R
|
## run_analysis.R
## Mark Stephens
## Getting and Cleaning Data
## Coursera Course Project
## 02/22/2015
require(dplyr)
require(plyr)
require(reshape2)
require(data.table)
## This script performs the following operations:
## 1. Merges the training and the test sets to create one data set
## 2. Extracts only the mean, mean frequency, and standard deviation measurements from the merged data set
## 3. Applies descriptive names to name the activities in the data set
## 4. Lables the data set with descriptive variable names
## 5. Creates a tidy data set containing the avearge of each variable for each activity and each subject
## Refer to Readme.md for understanding of script operations
## Refer to Codebook.md for code book describing the variables
## This script assumes the following data files are located in the working directory:
## Test Set: X_test.txt, y_test.txt, subject_test.txt
## Training Set: X_train.txt, y_train.txt, subject_train.txt
## Activity Labels: activity_labels.txt
run_analysis <- function(){
## Step 1. - Merge data sets
merged_dt <- MergeDataSets()
## Step 2. - Extract mean, mean frequency, standard deviation
extract_dt <- ExtractMeasures(merged_dt)
## Step 3. - Apply descriptive names to name the activities in the data set
extract_dt <- ApplyNames(extract_dt)
## Step 4. - Label the data set with descriptive variable names
labels_dt <- LabelVars(extract_dt)
## Step 5. Create Tidy data set containing the avearge of each variable for each activity and each subject
return(CreateTidy(labels_dt))
}
## Step 1. - Merge data sets
MergeDataSets <- function(){
## Read and assemble Test Data
test_x <- read.table("./X_test.txt",header=FALSE) ## Measures
test_y <- read.table("./y_test.txt",header=FALSE) ## Activities
test_subject <- read.table("./subject_test.txt",header=FALSE) ## Subjects
test_data <- cbind(test_subject,test_y,test_x,deparse.level=0)
## Read and assemble Training Data
train_x <- read.table("./X_train.txt",header=FALSE) ## Measures
train_y <- read.table("./y_train.txt",header=FALSE) ## Activities
train_subject <- read.table("./subject_train.txt",header=FALSE) ## Subjects
train_data <- cbind(train_subject,train_y,train_x,deparse.level=0)
## Merge Test & Training Data
return(rbind(test_data,train_data))
}
## Step 2. - Extract mean, mean frequency, standard deviation
ExtractMeasures <- function(datatable) {
## Pass the merged data from step 1 to this function
## Returns only mean, mean frequency, and standard deviation columns
## Column numbers for all columns associated with mean, mean frequency, and standard deviation
## assumes data files maintain current column number to measure relationship
cols2extract <- c(1,2,3:8,43:48,83:88,123:128,163:168,203,204,216,217,229,230,242,243,255,256,268:273,296:298,
347:352,375:377,426:431,454:456,505,506,515,518,519,528,531,532,541,544,545,554)
return(datatable[,cols2extract])
}
## Step 3. - Apply descriptive names to name the activities in the data set
ApplyNames <- function(datatable){
## Pass the extract data set from step 2 to this function
## Get activity names from activity labels file provide in data set
## column 1 is activity number which maps to column 2 in extract data set from step 2
## column 2 is descriptive name to apply to extract data set from set 2
activity_names <- read.table("./activity_labels.txt",header=FALSE)
return(mutate(datatable,ACTIVITY_NAME = activity_names[V1.1,2]))
}
## Step 4. - Label the data set with descriptive variable names
LabelVars <- function(datatable){
old_names <- names(datatable)
new_names <- c("SubjectNumber",
"ActivityNumber",
"TimeBodyAccMeanX",
"TimeBodyAccMeanY",
"TimeBodyAccMeanZ",
"TimeBodyAccStdX",
"TimeBodyAccStdY",
"TimeBodyAccStdZ",
"TimeGravityAccMeanX",
"TimeGravityAccMeanY",
"TimeGravityAccMeanZ",
"TimeGravityAccStdX",
"TimeGravityAccStdY",
"TimeGravityAccStdZ",
"TimeBodyAccJerkMeanX",
"TimeBodyAccJerkMeanY",
"TimeBodyAccJerkMeanZ",
"TimeBodyAccJerkStdX",
"TimeBodyAccJerkStdY",
"TimeBodyAccJerkStdZ",
"TimeBodyGyroMeanX",
"TimeBodyGyroMeanY",
"TimeBodyGyroMeanZ",
"TimeBodyGyroStdX",
"TimeBodyGyroStdY",
"TimeBodyGyroStdZ",
"TimeBodyGyroJerkMeanX",
"TimeBodyGyroJerkMeanY",
"TimeBodyGyroJerkMeanZ",
"TimeBodyGyroJerkStdX",
"TimeBodyGyroJerkStdY",
"TimeBodyGyroJerkStdZ",
"TimeBodyAccMagMean",
"TimeBodyAccMagStd",
"TimeGravityAccMagMean",
"TimeGravityAccMagStd",
"TimeBodyAccJerkMagMean",
"TimeBodyAccJerkMagStd",
"TimeBodyGyroMagMean",
"TimeBodyGyroMagStd",
"TimeBodyGyroJerkMagMean",
"TimeBodyGyroJerkMagStd",
"FreqBodyAccMeanX",
"FreqBodyAccMeanY",
"FreqBodyAccMeanZ",
"FreqBodyAccStdX",
"FreqBodyAccStdY",
"FreqBodyAccStdZ",
"FreqBodyAccMeanFreqX",
"FreqBodyAccMeanFreqY",
"FreqBodyAccMeanFreqZ",
"FreqBodyAccJerkMeanX",
"FreqBodyAccJerkMeanY",
"FreqBodyAccJerkMeanZ",
"FreqBodyAccJerkStdX",
"FreqBodyAccJerkStdY",
"FreqBodyAccJerkStdZ",
"FreqBodyAccJerkMeanFreqX",
"FreqBodyAccJerkMeanFreqY",
"FreqBodyAccJerkMeanFreqZ",
"FreqBodyGyroMeanX",
"FreqBodyGyroMeanY",
"FreqBodyGyroMeanZ",
"FreqBodyGyroStdX",
"FreqBodyGyroStdY",
"FreqBodyGyroStdZ",
"FreqBodyGyroMeanFreqX",
"FreqBodyGyroMeanFreqY",
"FreqBodyGyroMeanFreqZ",
"FreqBodyAccMagMean",
"FreqBodyAccMagStd",
"FreqBodyAccMagMeanFreq",
"FreqBodyAccJerkMagMean",
"FreqBodyAccJerkMagStd",
"FreqBodyAccJerkMagMeanFreq",
"FreqBodyGyroMagMean",
"FreqBodyGyroMagStd",
"FreqBodyGyroMagMeanFreq",
"FreqBodyGyroJerkMagMean",
"FreqBodyGyroJerkMagStd",
"FreqBodyGyroJerkMagMeanFreq",
"ActivityName"
)
setnames(datatable,old=old_names,new=new_names)
new_name_order <- c("SubjectNumber",
"ActivityNumber",
"ActivityName",
"TimeBodyAccMeanX",
"TimeBodyAccMeanY",
"TimeBodyAccMeanZ",
"TimeBodyAccStdX",
"TimeBodyAccStdY",
"TimeBodyAccStdZ",
"TimeBodyAccMagMean",
"TimeBodyAccMagStd",
"TimeBodyAccJerkMeanX",
"TimeBodyAccJerkMeanY",
"TimeBodyAccJerkMeanZ",
"TimeBodyAccJerkStdX",
"TimeBodyAccJerkStdY",
"TimeBodyAccJerkStdZ",
"TimeBodyAccJerkMagMean",
"TimeBodyAccJerkMagStd",
"TimeBodyGyroMeanX",
"TimeBodyGyroMeanY",
"TimeBodyGyroMeanZ",
"TimeBodyGyroStdX",
"TimeBodyGyroStdY",
"TimeBodyGyroStdZ",
"TimeBodyGyroJerkMeanX",
"TimeBodyGyroJerkMeanY",
"TimeBodyGyroJerkMeanZ",
"TimeBodyGyroJerkStdX",
"TimeBodyGyroJerkStdY",
"TimeBodyGyroJerkStdZ",
"TimeBodyGyroMagMean",
"TimeBodyGyroMagStd",
"TimeBodyGyroJerkMagMean",
"TimeBodyGyroJerkMagStd",
"TimeGravityAccMeanX",
"TimeGravityAccMeanY",
"TimeGravityAccMeanZ",
"TimeGravityAccStdX",
"TimeGravityAccStdY",
"TimeGravityAccStdZ",
"TimeGravityAccMagMean",
"TimeGravityAccMagStd",
"FreqBodyAccMeanX",
"FreqBodyAccMeanY",
"FreqBodyAccMeanZ",
"FreqBodyAccStdX",
"FreqBodyAccStdY",
"FreqBodyAccStdZ",
"FreqBodyAccMagMean",
"FreqBodyAccMagStd",
"FreqBodyAccMagMeanFreq",
"FreqBodyAccMeanFreqX",
"FreqBodyAccMeanFreqY",
"FreqBodyAccMeanFreqZ",
"FreqBodyAccJerkMeanX",
"FreqBodyAccJerkMeanY",
"FreqBodyAccJerkMeanZ",
"FreqBodyAccJerkStdX",
"FreqBodyAccJerkStdY",
"FreqBodyAccJerkStdZ",
"FreqBodyAccJerkMagMean",
"FreqBodyAccJerkMagStd",
"FreqBodyAccJerkMeanFreqX",
"FreqBodyAccJerkMeanFreqY",
"FreqBodyAccJerkMeanFreqZ",
"FreqBodyAccJerkMagMeanFreq",
"FreqBodyGyroMeanX",
"FreqBodyGyroMeanY",
"FreqBodyGyroMeanZ",
"FreqBodyGyroMagMean",
"FreqBodyGyroStdX",
"FreqBodyGyroStdY",
"FreqBodyGyroStdZ",
"FreqBodyGyroMagStd",
"FreqBodyGyroMeanFreqX",
"FreqBodyGyroMeanFreqY",
"FreqBodyGyroMeanFreqZ",
"FreqBodyGyroMagMeanFreq",
"FreqBodyGyroJerkMagMean",
"FreqBodyGyroJerkMagStd",
"FreqBodyGyroJerkMagMeanFreq"
)
datatable <- data.table(datatable)
## reorder names for tidier data
setcolorder(datatable,new_name_order)
return(datatable)
}
## Step 5. Create Tidy data set containing the avearge of each variable for each activity and each subject
CreateTidy <- function(datatable){
data_tidy <- datatable[,.( TimeBodyAccMeanX=mean(TimeBodyAccMeanX,na.rm=TRUE),
TimeBodyAccMeanY=mean(TimeBodyAccMeanY,na.rm=TRUE),
TimeBodyAccMeanZ=mean(TimeBodyAccMeanZ,na.rm=TRUE),
TimeBodyAccStdX=mean(TimeBodyAccStdX,na.rm=TRUE),
TimeBodyAccStdY=mean(TimeBodyAccStdY,na.rm=TRUE),
TimeBodyAccStdZ=mean(TimeBodyAccStdZ,na.rm=TRUE),
TimeBodyAccMagMean=mean(TimeBodyAccMagMean,na.rm=TRUE),
TimeBodyAccMagStd=mean(TimeBodyAccMagStd,na.rm=TRUE),
TimeBodyAccJerkMeanX=mean(TimeBodyAccJerkMeanX,na.rm=TRUE),
TimeBodyAccJerkMeanY=mean(TimeBodyAccJerkMeanY,na.rm=TRUE),
TimeBodyAccJerkMeanZ=mean(TimeBodyAccJerkMeanZ,na.rm=TRUE),
TimeBodyAccJerkStdX=mean(TimeBodyAccJerkStdX,na.rm=TRUE),
TimeBodyAccJerkStdY=mean(TimeBodyAccJerkStdY,na.rm=TRUE),
TimeBodyAccJerkStdZ=mean( TimeBodyAccJerkStdZ,na.rm=TRUE),
TimeBodyAccJerkMagMean=mean(TimeBodyAccJerkMagMean,na.rm=TRUE),
TimeBodyAccJerkMagStd=mean(TimeBodyAccJerkMagStd,na.rm=TRUE),
TimeBodyGyroMeanX=mean(TimeBodyGyroMeanX,na.rm=TRUE),
TimeBodyGyroMeanY=mean(TimeBodyGyroMeanY,na.rm=TRUE),
TimeBodyGyroMeanZ=mean(TimeBodyGyroMeanZ,na.rm=TRUE),
TimeBodyGyroStdX=mean(TimeBodyGyroStdX,na.rm=TRUE),
TimeBodyGyroStdY=mean(TimeBodyGyroStdY,na.rm=TRUE),
TimeBodyGyroStdZ=mean(TimeBodyGyroStdZ,na.rm=TRUE),
TimeBodyGyroJerkMeanX=mean(TimeBodyGyroJerkMeanX,na.rm=TRUE),
TimeBodyGyroJerkMeanY=mean(TimeBodyGyroJerkMeanY,na.rm=TRUE),
TimeBodyGyroJerkMeanZ=mean(TimeBodyGyroJerkMeanZ,na.rm=TRUE),
TimeBodyGyroJerkStdX=mean(TimeBodyGyroJerkStdX,na.rm=TRUE),
TimeBodyGyroJerkStdY=mean(TimeBodyGyroJerkStdY,na.rm=TRUE),
TimeBodyGyroJerkStdZ=mean(TimeBodyGyroJerkStdZ,na.rm=TRUE),
TimeBodyGyroMagMean=mean(TimeBodyGyroMagMean,na.rm=TRUE),
TimeBodyGyroMagStd=mean(TimeBodyGyroMagStd,na.rm=TRUE),
TimeBodyGyroJerkMagMean=mean(TimeBodyGyroJerkMagMean,na.rm=TRUE),
TimeBodyGyroJerkMagStd=mean(TimeBodyGyroJerkMagStd,na.rm=TRUE),
TimeGravityAccMeanX=mean(TimeGravityAccMeanX,na.rm=TRUE),
TimeGravityAccMeanY=mean(TimeGravityAccMeanY,na.rm=TRUE),
TimeGravityAccMeanZ=mean(TimeGravityAccMeanZ,na.rm=TRUE),
TimeGravityAccStdX=mean(TimeGravityAccStdX,na.rm=TRUE),
TimeGravityAccStdY=mean(TimeGravityAccStdY,na.rm=TRUE),
TimeGravityAccStdZ=mean(TimeGravityAccStdZ,na.rm=TRUE),
TimeGravityAccMagMean=mean(TimeGravityAccMagMean,na.rm=TRUE),
TimeGravityAccMagStd=mean(TimeBodyAccMeanX,na.rm=TRUE),
FreqBodyAccMeanX=mean(FreqBodyAccMeanX,na.rm=TRUE),
FreqBodyAccMeanY=mean(FreqBodyAccMeanY,na.rm=TRUE),
FreqBodyAccMeanZ=mean(FreqBodyAccMeanZ,na.rm=TRUE),
FreqBodyAccStdX=mean(FreqBodyAccStdX,na.rm=TRUE),
FreqBodyAccStdY=mean(FreqBodyAccStdY,na.rm=TRUE),
FreqBodyAccStdZ=mean(FreqBodyAccStdZ,na.rm=TRUE),
FreqBodyAccMagMean=mean(FreqBodyAccMagMean,na.rm=TRUE),
FreqBodyAccMagStd=mean(FreqBodyAccMagStd,na.rm=TRUE),
FreqBodyAccMagMeanFreq=mean(FreqBodyAccMagMeanFreq,na.rm=TRUE),
FreqBodyAccMeanFreqX=mean(FreqBodyAccMeanFreqX,na.rm=TRUE),
FreqBodyAccMeanFreqY=mean(FreqBodyAccMeanFreqY,na.rm=TRUE),
FreqBodyAccMeanFreqZ=mean(FreqBodyAccMeanFreqZ,na.rm=TRUE),
FreqBodyAccJerkMeanX=mean(FreqBodyAccJerkMeanX,na.rm=TRUE),
FreqBodyAccJerkMeanY=mean(FreqBodyAccJerkMeanY,na.rm=TRUE),
FreqBodyAccJerkMeanZ=mean(FreqBodyAccJerkMeanZ,na.rm=TRUE),
FreqBodyAccJerkStdX=mean(FreqBodyAccJerkStdX,na.rm=TRUE),
FreqBodyAccJerkStdY=mean(FreqBodyAccJerkStdY,na.rm=TRUE),
FreqBodyAccJerkStdZ=mean(FreqBodyAccJerkStdZ,na.rm=TRUE),
FreqBodyAccJerkMagMean=mean(FreqBodyAccJerkMagMean,na.rm=TRUE),
FreqBodyAccJerkMagStd=mean(FreqBodyAccJerkMagStd,na.rm=TRUE),
FreqBodyAccJerkMeanFreqX=mean(FreqBodyAccJerkMeanFreqX,na.rm=TRUE),
FreqBodyAccJerkMeanFreqY=mean(FreqBodyAccJerkMeanFreqY,na.rm=TRUE),
FreqBodyAccJerkMeanFreqZ=mean(FreqBodyAccJerkMeanFreqZ,na.rm=TRUE),
FreqBodyAccJerkMagMeanFreq=mean(FreqBodyAccJerkMagMeanFreq,na.rm=TRUE),
FreqBodyGyroMeanX=mean(FreqBodyGyroMeanX,na.rm=TRUE),
FreqBodyGyroMeanY=mean(FreqBodyGyroMeanY,na.rm=TRUE),
FreqBodyGyroMeanZ=mean(FreqBodyGyroMeanZ,na.rm=TRUE),
FreqBodyGyroMagMean=mean(FreqBodyGyroMagMean,na.rm=TRUE),
FreqBodyGyroStdX=mean(FreqBodyGyroStdX,na.rm=TRUE),
FreqBodyGyroStdY=mean(FreqBodyGyroStdY,na.rm=TRUE),
FreqBodyGyroStdZ=mean(FreqBodyGyroStdZ,na.rm=TRUE),
FreqBodyGyroMagStd=mean(FreqBodyGyroMagStd,na.rm=TRUE),
FreqBodyGyroMeanFreqX=mean(FreqBodyGyroMeanFreqX,na.rm=TRUE),
FreqBodyGyroMeanFreqY=mean(FreqBodyGyroMeanFreqY,na.rm=TRUE),
FreqBodyGyroMeanFreqZ=mean(FreqBodyGyroMeanFreqZ,na.rm=TRUE),
FreqBodyGyroMagMeanFreq=mean(FreqBodyGyroMagMeanFreq,na.rm=TRUE),
FreqBodyGyroJerkMagMean=mean(FreqBodyGyroJerkMagMean,na.rm=TRUE),
FreqBodyGyroJerkMagStd=mean(FreqBodyGyroJerkMagStd,na.rm=TRUE),
FreqBodyGyroJerkMagMeanFreq=mean(FreqBodyGyroJerkMagMeanFreq,na.rm=TRUE))
,by=.(SubjectNumber,ActivityName)]
return(data_tidy)
}
|
6e1edb3bc117baafdd23897b98cd655062c03156
|
12fa376b23d0fdf46aadbf32c5a883dbff91f6ec
|
/run_analysis.R
|
0417195e66b7ee14651b4ebbb07fc45fa00a0aa5
|
[] |
no_license
|
Steeffan/Getting-and-Cleaning-Data-Project
|
07a3773c8560718eb71697029304703c97bc09a8
|
48e862757e3e5e7236d0d6e8b52372c1d890e3f8
|
refs/heads/master
| 2020-05-01T00:37:15.453231
| 2015-02-21T17:30:20
| 2015-02-21T17:30:20
| 31,068,066
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,287
|
r
|
run_analysis.R
|
##########################
## Settings and options ##
##########################
## Please, set the working directory on the UCI HAR Dataset directory that contains all the data
setwd("~/Coursera/Getting and Cleaning/Project/UCI HAR Dataset")
## Export dataset
## if TRUE export the averageData into the export File
exportData <- TRUE
exportFile <- "averageData.txt"
#####################################################################
## 1.Merges the training and the test sets to create one data set. ##
#####################################################################
## Loads test data
X_test <- read.table("./test/X_test.txt", comment.char = "")
y_test <- read.table("./test/y_test.txt", col.names = "activityid", colClasses="factor", comment.char = "")
subject_test <- read.table("./test/subject_test.txt", col.names = "subject", colClasses="factor", comment.char = "")
## Loads train data
X_train <- read.table("./train/X_train.txt", comment.char = "")
y_train <- read.table("./train/y_train.txt", col.names = "activityid", colClasses="factor", comment.char = "")
subject_train <- read.table("./train/subject_train.txt", col.names = "subject", colClasses="factor", comment.char = "")
## Merges all the training and tests data in one dataset
dataSet <- cbind(
rbind(y_test, y_train)
,rbind(subject_test, subject_train)
,rbind(X_test, X_train)
)
###############################################################################################
## 2.Extracts only the measurements on the mean and standard deviation for each measurement. ##
###############################################################################################
## Load features
features <- read.table("features.txt", col.names = c("featureid","feature"), stringsAsFactors=FALSE)
## Extract names with mean, meanFreq ans std in them
featuresFilter <- features[grep("-(mean|meanFreq|std)\\(\\)", features$feature),]
## Subset columns of the dataSet to keep only mean ans standard deviation measures
dataSet <- dataSet[,c(1,2,featuresFilter[,1]+2)] ## Add +2 because two first columns are activityID and subjectID
################################################################################
## 3. Uses descriptive activity names to name the activities in the data set. ##
################################################################################
## Load activities
activities <- read.table("activity_labels.txt", col.names = c("activityid","activity"), colClasses=c("factor","factor"))
## Merge dataSet and activities to ger activity label
dataSet <- merge(activities,dataSet,by="activityid")
## Check if there ara any NA values in the data set
if (sum(sapply(dataSet, function(x) sum(is.na(x))))==0) {
message("No NA values in the data set")
} else {
message("There are NA values in the data set, you should clean it")
}
###########################################################################
## 4. Appropriately labels the data set with descriptive variable names. ##
###########################################################################
## lower all letters in the left name part (before symbol "-") and no change in the right name part (after symbol "-")
featuresFilter$featureClean <- tolower(featuresFilter$feature)
## Define the replacement rule names (eg : bodybody -> body, -mean() -> Mean, etc...)
ruleNames <- data.frame(
searchString=c("bodybody","-mean\\(\\)$","-meanfreq\\(\\)$","-std\\(\\)$"
,"-mean\\(\\)-","-meanfreq\\(\\)-","-std\\(\\)-","x$","y$","z$")
,replaceString=c("body","Mean","Meanfreq","Std","Mean","Meanfreq","Std","X","Y","Z")
)
## Apply the replacement rule names
for (i in 1:nrow(ruleNames)) {
searchString <- as.character(ruleNames$searchString[[i]])
replaceString <- as.character(ruleNames$replaceString[[i]])
featuresFilter$featureClean <- sub(searchString, replaceString, featuresFilter$featureClean)
}
## Control that all column names are unique else throw an error
if (length(featuresFilter$featureClean)==length(unique(featuresFilter$featureClean))) {
message("Column Names are unique.")
} else {
stop("Column names are not unique!!!")
}
## Label the dataset with descriptive names
names(dataSet)[-c(1,2,3)] <- featuresFilter$featureClean ## 3 first columns are not features
###################################################################################################################
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable ##
## for each activity and each subject. ##
###################################################################################################################
averageData <- aggregate(dataSet[,-c(1,2,3)],list(activity=dataSet$activity,subject=dataSet$subject),mean,na.rm=TRUE)
####################################################################
## Output, export the dataframe averageData in the exportFile ##
####################################################################
if (exportData) {
write.table(averageData,exportFile,row.names=FALSE)
}
|
fe939e9ec489e63b3acc38998f736f2704588c1e
|
710868a96ec9172a3533e46443348addf08846da
|
/R/case.R
|
c6454016ee0fd6f73267e8666a224f95f63fcd05
|
[] |
no_license
|
cran/gdata
|
c12b7e7f59f885d31aec721c9bd5c6b514bcb1c1
|
21146034feeea59ee869df28e04114b3049481f8
|
refs/heads/master
| 2023-05-13T06:46:53.031948
| 2023-05-05T22:00:02
| 2023-05-05T22:00:02
| 17,696,299
| 4
| 3
| null | 2022-10-05T14:49:48
| 2014-03-13T04:49:01
|
Perl
|
UTF-8
|
R
| false
| false
| 415
|
r
|
case.R
|
case <- function(x, ..., default=NA)
{
magic <- "....default...."
alternatives <- c(...,"....default...."=magic)
x <- as.character(x)
retval <- factor(
x,
levels=alternatives,
labels=names(alternatives)
)
levels(retval)[length(alternatives)] <- as.character(default)
retval[is.na(retval) & !is.na(x)] <- default
retval
}
|
1b31690adff131b90fa7cae15def4d7ce7ffd2bb
|
c822df0d9fb31f76939e2190149764afb66ae593
|
/gapminder.R
|
2b10baa08ee488d5243df79b88ef2382503a7b35
|
[] |
no_license
|
sjwallace06/SoftwareCarpentryWorkshop
|
dd454c2365797812ffbcfee8fac0aef54922b1be
|
ecb2b27ff9114cd7b9e31c3402545687c8df611f
|
refs/heads/master
| 2021-01-20T16:55:19.675247
| 2017-02-22T20:25:38
| 2017-02-22T20:25:38
| 82,836,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 969
|
r
|
gapminder.R
|
library(gapminder)
gap <- gapminder
str(gap)
colnames(gap)
dim(gap)
summary(gap)
mean(gap$gdpPercap)
sum(gap$pop)
sum(as.numeric(gap$pop))
sd(gap$gdpPercap)
numbers <- c(1,5,10,15,3,5,67,NA,NA)
numbers
numbers >= 10
numbers[numbers >= 10]
mean(numbers)
numbers[is.na(numbers)]
numbers[!is.na(numbers)]
mean(numbers[!is.na(numbers)])
mean(numbers, na.rm = TRUE)
text <- c("a", "b", "c", "a")
text == "a"
text[text == "a"]
text[!text == "a"]
head(gap)
tail(gap)
gap[1000:1005,]
gap
gap$country == "Canada"
gap[gap$country == "Canada",]
gap
gap$continent == "Asia"
asia <- gap[gap$continent == "Asia",]
unique(asia$continent)
unique(asia$country)
text %in% c("a", "cheescake")
text[text %in% c("a", "cheescake")]
gap
countries <- gap[gap$country %in% c("China", "Canada", "Cambodia"),]
countries$gdp <- countries$gdpPercap * countries$pop/1000000
countries
unique(countries$country)
mean(gap$gdpPercap[(gap$country %in% c("Canada", "China", "Cambodia"))])
|
74b6ab1b7693a89c11d420cb358b7f7606f13e81
|
1be5cfcde0acfe3ac780d0a091c34a736aee502a
|
/Models/Analysis/profile.R
|
5c5d17263fab6d509779b4dd3d0abac1c1b648ff
|
[] |
no_license
|
JusteRaimbault/UrbanGrowth
|
b74e7f722a2b28fbbfdace7c7958755fed7710f8
|
aeff2db8797726f198f1bd09c66c0893f5321eed
|
refs/heads/master
| 2023-08-07T22:04:26.835342
| 2023-07-20T11:01:09
| 2023-07-20T11:01:09
| 128,037,870
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,206
|
r
|
profile.R
|
setwd(paste0(Sys.getenv('CS_HOME'),'/UrbanGrowth/Models/urbangrowth/openmole/calibration'))
library(dplyr)
library(ggplot2)
source(paste0(Sys.getenv('CS_HOME'),'/Organisation/Models/Utils/R/plots.R'))
source(paste0(Sys.getenv('CS_HOME'),'/UrbanGrowth/Models/Analysis/functions.R'))
# parameters : where calibration results are stored and where to store result figures
#sourcedir = 'PROFILE_GRID_intgib_BR_20181219_150953/'
sourcedir = 'PROFILE_GRID_intgib_BR_20181221_103649/'
resdir = paste0(Sys.getenv('CS_HOME'),'/UrbanGrowth/Results/Calibration/',sourcedir);dir.create(resdir)
#res=as.tbl(read.csv(file=paste0(sourcedir,'population6899.csv')))
res=as.tbl(read.csv(file=paste0(sourcedir,'population20000.csv')))
# g=ggplot(res[res$gravityDecay<=1000,],aes(x=gravityDecay,y=logmse))
g=ggplot(res,aes(x=gravityDecay,y=logmse))
g+geom_point()+geom_line()+stdtheme
ggsave(file=paste0(resdir,'profile_logmse-gravityDecay_gen20000.png'),width=15,height = 10,units='cm')
# Q :
# - more precise profile in the [0,100] interval ? -> relaunch more precise
# -> 0 : exp(-d/d0) -> 0 : Gibrat model : if is better, model does not improve ?
#
# - difference with grid results ? try a parcimonious grid
|
de61f6d2758e354348ea261640b2c8ce114da673
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/imp4p/examples/gen_cond.Rd.R
|
a84b6200b5abe9cd6c554c42ac9fb4e5417cef8a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
gen_cond.Rd.R
|
library(imp4p)
### Name: gen.cond
### Title: Function allowing to create a vector indicating the membership
### of each sample to a condition.
### Aliases: gen.cond
### Keywords: Simulated data
### ** Examples
cond=gen.cond(nb_cond=2,nb_sample=6)
#[1] 1 1 1 1 1 1 2 2 2 2 2 2
#Levels: 1 2
|
014bd044cb47eac13752bff84b1e86a349cca278
|
410a8d842e07781b23fac1912d30cb844ed1ca69
|
/R/trainModels.R
|
b669af316da3ba4ea4ad0f0efe21686d480f79e2
|
[
"BSD-2-Clause"
] |
permissive
|
pbalapra/mlrMBO
|
70313b3661853c581f91239debada7ca060c3bfb
|
5a279271f427170f78f546756115a8b9b732e679
|
refs/heads/master
| 2021-01-19T21:23:51.319804
| 2017-05-07T23:00:08
| 2017-05-07T23:00:08
| 88,652,344
| 0
| 0
| null | 2017-04-18T17:31:22
| 2017-04-18T17:31:22
| null |
UTF-8
|
R
| false
| false
| 469
|
r
|
trainModels.R
|
trainModels = function(learner, tasks, control) {
# if (control$multifid)
# learner = makeMultiFidWrapper(learner, control)
models = vector("list", length(tasks))
secs = NA_real_
tryCatch({
start.time <- Sys.time()
for (i in seq_along(models)) {
models[[i]] = train(learner, tasks[[i]])
}
end.time <- Sys.time()
secs <- end.time-start.time
}, error = function(e) {
print(e)
})
list(models = models, train.time = secs)
}
|
0ced8738cf16da0fdae08c65c3f60794365ff764
|
3c324a39b86bb126f3481477e9e08bdf8c5b1aba
|
/bfacs.R
|
ff324ff49b7bc4d1e07261ec4c6457aa882996c2
|
[] |
no_license
|
Joannavonberg/R-scripts
|
6f6999ba58284f2a8679a181e8753798d01f29b9
|
f69958f55fb1c2d7f6ec76c7483e2f801cca2597
|
refs/heads/master
| 2021-01-09T20:45:44.242180
| 2016-07-28T12:13:33
| 2016-07-28T12:13:33
| 63,954,300
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,651
|
r
|
bfacs.R
|
options(stringsasFactors=FALSE)
d <- AllInOneFile("norottrans_bfac_CA_1.txt")
d <- RefAndMean(d, cryo=FALSE, avg=FALSE)
d <- data.frame(scale(d))
c <- ColourByChain()
c <- rbind(c, rep(220, 8))
#c <- ColourGradient("red")
PlotB(d, c, normalized=TRUE)
ColourGradient <- function(rgb1, rgb2=NULL){
tc2 <- matrix(c(rep(rgb1[1], 8), rep(rgb1[2], 8), rep(rgb1[3], 8), seq(100, 255, length.out=8)), ncol=8, byrow=TRUE)
if(!is.null(rgb2)){
for(n in 1:3){
tc2[n,] <- seq(rgb1[n], rgb2[n], length.out=8)
}
}
tc2
}
AllInOneFile <- function(fn){
tmp <- scan(fn)
mat <- matrix(tmp, nrow=2)
mat2 <- matrix(mat[2,], ncol=8)
dat <- data.frame(mat2)
rownames(dat) <- 1:129
colnames(dat) <- LETTERS[1:8]
dat
}
SeveralFiles <- function(){
tmp <- scan("A_PC1_bfac_CA.txt")
mat <- matrix(tmp, nrow=2)
mat2 <- matrix(mat[2,], ncol=1)
dat <- data.frame(mat2)
rownames(dat) <- 1:129
n <- 2
for (l in LETTERS[2:8]){
tmp <- scan(paste(l, "_PC1_bfac_CA.txt", sep=""))
mat <- matrix(tmp, nrow=2)
dat[,n] <- as.vector(mat[2,])
n <- n+1
}
colnames(dat) <- LETTERS[1:8]
dat
}
# for reference, load in crystal B-factors
RefAndMean <- function(dat, cryo, avg){
if(avg){type <- "AvgB"} else{type <- "CA"}
if(cryo){
tmp <- scan(sprintf("/work2/berg/Simulations/2CGI/Bfacs/2CGI_%s.txt", type), what = "numeric")
}
else{
tmp <- scan(sprintf("/work2/berg/Simulations/Unit_Cells/Ref_bfacs/4O34_RT_%s.txt", type), what = "numeric")
}
mat <- matrix(tmp, nrow = 2)
vec <- c(mat[2,])
vec <- as.numeric(vec)
dat$mean <- apply(dat, 1, mean)
dat$ref <- vec
dat
}
# I want to give each monomer a different color
ColourByChain <- function(){
cols <- colors()
cols <- col2rgb(cols)
ind <- cols[1,] < 150 | cols[2,] < 150 | cols[3,] < 150
darker <- cols[,ind]
darker <- darker[,seq(1, length(darker[1,]), 40)]
numbers <- sample(1:length(darker[1,]), size = 8)
colours <- darker[,numbers]
colours
}
PlotB <- function(dat, colours, main="test", save=FALSE, normalized=TRUE){
if (save){
pdf("norm_bfactors_300K_NVT_cryodim.pdf", width=480*2.4, height=480*2)
}
if(normalized){
yl=c(-2, 7)
}
else{
yl=c(0,250)
}
lw=1
par(mar=c(5.1, 4.1, 4.1, 8.1), xpd=TRUE)
plot(dat$A, main=main, xlab="residues", ylab="B-factor (A^2)", col=rgb(t(colours[,1]), alpha=colours[4,1], maxColorValue = 255), type="l", pch=19, cex=2, lwd=lw, ylim=yl)
for (n in 2:8){
lines(dat[,n], col=rgb(t(colours[,n]), alpha=colours[4,n], maxColorValue = 255), pch=19, cex=2, lwd=lw)
}
lines(dat$ref, col="red", lwd=lw+1)
lines(dat$mean, col="blue", lwd=lw+1)
legend("topright", inset=c(-0.1,0), c(paste("chain ", LETTERS[1:8]), "reference", "mean"), col= c(rgb(t(colours[]), alpha=colours[4,], maxColorValue = 255), "red", "blue"), lwd = c(rep(lw, 8), rep(lw+1,2)))
if (save){
dev.off()
}
}
pdf("bfactors_300K_NVT_cryodim.pdf", width=480*2.4, height=480*2)
par(mar=c(5.1, 8.1, 4.1, 8.1), xpd=TRUE)
plot(dat$A, main="C-alpha B-factors of PCA results \n combined PCA on 8*50 ns of 300 K NVT simulation (cryo-dimensions)", xlab="residues", ylab="B-factor (A^2)", type="l", pch=19, cex=2, lwd=2, ylim=c(0, 90), col=rgb(t(c[,4]), alpha=c[4,4], maxColorValue = 255))
lines(dat$ref, col="red", lwd=lw+1)
legend("topright", inset=c(-0.1,0), c("PC 1 to 5", "reference"), col= c(rgb(t(c[,4]), alpha=colours[4,4], maxColorValue = 255), "red"), lwd = c(rep(lw, 8), rep(lw+1,2)))
dev.off()
|
6fa7872a2caabb130a0b18a3b8293648d0daf979
|
47bb2db5e59f6ba96533f7fd7106caa8c7ef9e5e
|
/man/make_filename.Rd
|
4f352b900dc4e4b1cbf1d182a41c3f34a1e15bfb
|
[] |
no_license
|
MartinPons/fars
|
ce9d5f5f57f275f626068e2512bc8f6498a925fa
|
9c51d782186c81f00cbe60172913e5318efd9d65
|
refs/heads/master
| 2021-01-12T06:33:32.806508
| 2016-12-27T23:49:41
| 2016-12-27T23:49:41
| 77,382,594
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 433
|
rd
|
make_filename.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{make_filename}
\alias{make_filename}
\title{Create a file name for a FARS year}
\usage{
make_filename(year)
}
\arguments{
\item{year}{four digit year either as a number or character string}
}
\value{
a character string matching a FARS filename
}
\description{
.
}
\examples{
\dontrun{
make_filename(2013)
make_filename(2014)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.