blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e3d8911025eee98644ad98f0707543cfbd2a548d
|
92e979e0e55cf88078795becc261a20850004acb
|
/man/simulate_sample.Rd
|
e2064228f3cee62a352a14a4967df58a2fe26b4a
|
[] |
no_license
|
rjnell/digitalPCRsimulations
|
c47becb1e19e2a3fb8f5056fc02f78d8c25e3f86
|
b341cf024b262cbc0cb77a6745c05b71247fd70d
|
refs/heads/master
| 2023-02-07T07:47:51.514474
| 2023-02-04T14:00:03
| 2023-02-04T14:00:03
| 295,371,471
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 532
|
rd
|
simulate_sample.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate_sample.R
\name{simulate_sample}
\alias{simulate_sample}
\title{Simulate a sample: return a random selection of droplets.}
\usage{
simulate_sample(sample_size = 20000, universe = NULL)
}
\arguments{
\item{sample_size}{A table with the results of the simulation.}
\item{universe}{The true value which was simulated.}
}
\value{
List with statistics.
}
\description{
Lorem.
}
\examples{
# Create two default universes simulating 50 ng DNA input.
}
|
815ba166e5200328427c9429d09b82c957920d3d
|
c7b52f3e9dece7611b1577332459157ad0a03aa1
|
/functions/input/read_jh_csse.R
|
a34557a3351706623d0b7fc76c960873166d7699
|
[] |
no_license
|
sueddeutsche/corona_automated_charts
|
e062dd3495ef6071c14de70407db6e42d3f16608
|
3093c9f3d25c39b262773161210cbb8f945c3f19
|
refs/heads/master
| 2021-04-07T19:29:12.734872
| 2020-03-20T08:25:28
| 2020-03-20T08:25:28
| 248,701,903
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,365
|
r
|
read_jh_csse.R
|
#
# Johns Hopkins current data from dashboard
# check to find out it there's an response from JH CSSE
successful <- FALSE
tryCatch(
{
r <- httr::RETRY("GET", url = "https://services9.arcgis.com/N9p5hsImWXAccRNI/arcgis/rest/services/Z7biAeD8PAkqgmWhxG2A/FeatureServer/2/query?f=json&where=Confirmed%20%3E%200&returnGeometry=false&spatialRel=esriSpatialRelIntersects&outFields=*&orderByFields=Confirmed%20desc&resultOffset=0&resultRecordCount=200&cacheHint=true",
config = (httr::add_headers(
Referer = "https://www.arcgis.com/apps/opsdashboard/index.html",
"sec-fetch-dest" = "empty",
"sec-fetch-mode" = "cors",
"sec-fetch-site" = "same-site",
"User-Agent" = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36"
)),
terminate_on = c(200, 201, 202, 203, 204))
response <- jsonlite::fromJSON(httr::content(r, encoding = "UTF-8"))
successful <- is_empty(response$error)
df_corona_current <- response$features$attributes
timestamp_jh_csse_max <- format(as.POSIXct(max(df_corona_current$Last_Update) / 1000, origin = "1970-01-01"), format = "%d.%m.%Y %H:%M Uhr")
source_credit_hopkins_worldometer <- "Johns-Hopkins-Universität CSSE/SZ"
source_credit_hopkins_worldometer_url <- "https://www.arcgis.com/apps/opsdashboard/index.html#/bda7594740fd40299423467b48e9ecf6"
},
error = function(cond) {
message("There was an error when connecting to JH CSSE Dashboard")
message("SKIPPING JD CSSH Dashboard in import!")
message("Here's the original error message:")
message(cond)
# Choose a return value in case of error
return(NA)
}
)
# Prepare John Hopkins Data
df_corona_current %>%
mutate_at(c("Confirmed", "Deaths", "Recovered"), make_numeric) %>%
select(Land = `Country_Region`, Infizierte = Confirmed, "Genesene" = Recovered, "Verstorbene" = Deaths) %>%
mutate(Land = countrycode::countrycode(Land, origin = "country.name", destination = "country.name.de")) %>%
mutate(Land = ifelse(Land == "Korea, Republik von", "Südkorea", Land)) %>%
mutate(Infizierte = ifelse(Land == "Deutschland", sz_sum_germany, Infizierte)) -> df_corona_current
|
1ac00c14eebffaa56a9037c13f75d983b546113b
|
8aa9ba1acd76ee4cdc993b59dd9f72b1b22df2b2
|
/ui.R
|
7f7f9ecdbea402b9e570e5654f69a3d870f84841
|
[] |
no_license
|
tbenschop/AngularJS
|
a7493ec7361715d7fe88ce0bbd325cd62b4d15eb
|
f14aaeb2943482bc7e180ac092e8fd50e73a3b83
|
refs/heads/master
| 2021-01-13T02:51:18.441855
| 2016-12-22T20:52:47
| 2016-12-22T20:52:47
| 77,136,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 175
|
r
|
ui.R
|
library(shiny)
shinyUI(bootstrapPage(includeHTML("static2.html"),
selectInput('abc','abc',choices = c('a','b')),
uiOutput('abcd')))
|
d51037e11611a056e599ad797aae6dc471e8cd9e
|
e7e0ccce84c80113d7aba41458007dd42127a94c
|
/R/i_rise_gossling_hgdp.R
|
1446039950a93e94570cf398a679569915438629
|
[] |
no_license
|
halasadi/ancient-damage
|
ea10ea94325b66b129c1d4e9e5bf4827e5377ad2
|
51387d59d3436d796a2621d3dd72afbec48f981a
|
refs/heads/master
| 2020-04-12T06:42:24.364980
| 2018-07-28T22:45:30
| 2018-07-28T22:45:30
| 62,754,123
| 2
| 0
| null | 2016-09-22T01:13:50
| 2016-07-06T21:20:43
|
HTML
|
UTF-8
|
R
| false
| false
| 4,414
|
r
|
i_rise_gossling_hgdp.R
|
################## I + RISE + Gossling + HGDP ##############################
library(aRchaic)
gossling_data <- get(load("../processed_data/annagosling2016-counts-table.rda"))
system.time(gossling_data_clubbed <- club_signature_counts(gossling_data))
gossling_data_clubbed <- gossling_data_clubbed[-28,];
names <- rownames(gossling_data_clubbed);
control_indices <- c(grep("EXN", names), grep("Libneg", names), grep("PCRneg", names))
labs <- character();
labs <- rep("ancient", dim(gossling_data_clubbed)[1])
labs[control_indices] <- "controls"
indices <- which(labs == "ancient")
gossling_ancients <- gossling_data_clubbed[indices, ]
I_data <- get(load("../processed_data/Idata-counts-table.rda"))
I_data <- club_signature_counts(I_data)
RISE_data <- get(load("../processed_data/RISE-counts-table.rda"))
RISE_data <- club_signature_counts(RISE_data)
hgdp_data <- get(load("../processed_data/HGDPmoderns-counts-table.rda"))
hgdp_data <- club_signature_counts(hgdp_data)
pooled_names <- intersect(colnames(I_data),
intersect(colnames(RISE_data),
intersect(colnames(gossling_ancients), colnames(hgdp_data))))
filtered_gossling <- gossling_ancients[, match(pooled_names, colnames(gossling_ancients))]
filtered_I <- I_data[, match(pooled_names, colnames(I_data))]
filtered_RISE <- RISE_data[, match(pooled_names, colnames(RISE_data))]
filtered_hgdp <- hgdp_data[, match(pooled_names, colnames(hgdp_data))]
pooled_data <- rbind(filtered_gossling, filtered_I, filtered_RISE, filtered_hgdp)
signature_set <- colnames(pooled_data)
sig_split <- t(sapply(1:length(signature_set), function(x) return(strsplit(signature_set[x], "")[[1]][1:8])))
new_sig_split <- matrix(0, dim(sig_split)[1], 5);
new_sig_split[,1] <- sig_split[,1]
new_sig_split[,2] <- sig_split[,2]
new_sig_split[,3] <- sapply(1:length(signature_set), function(x) return(paste(sig_split[x,3:6], collapse="")))
new_sig_split[,4] <- sig_split[,7]
new_sig_split[,5] <- sig_split[,8]
indices_notCtoA <- which(new_sig_split[,3] != "C->T")
pooled_data <- pooled_data[, indices_notCtoA]
pooled_data <- filter_signatures_wo_location(pooled_data)
levels(new_sig_split[,1]) <- c("0", "1", "2", "3", "4")
pos <- t(sapply(1:length(signature_set), function(x)
{
y = strsplit(signature_set[x], "")[[1]]
return(paste(y[10:length(y)], collapse=""))
}))
mat <- matrix(0, dim(new_sig_split)[1], dim(new_sig_split)[2])
for(k in 1:dim(new_sig_split)[2]){
temp <- as.factor(new_sig_split[,k])
mat[,k] <- as.numeric(as.matrix(plyr::mapvalues(temp, from = levels(temp), to = 0:(length(levels(temp))-1))))
}
pos <- as.numeric(pos)
pos <- pos - min(pos)
pos <- factor(pos, levels = 0:22)
signatures <- mat;
signature_pos <- cbind.data.frame(signatures, pos)
out <- topics(pooled_data, K=4, tol=100, model="independent", signatures = signature_pos)
out <- topics(pooled_data, K=4, tol=10);
save(out, file="../processed_data/maptpx-runs/i-rise-gosling-hgdp-maptpx-independent-K-4-noCtoT.rda")
out <- get(load("../processed_data/maptpx-runs/i-rise-gosling-hgdp-maptpx-independent-K-4-noCtoT.rda"))
labs <- c(rep("Gossling", dim(filtered_gossling)[1]), rep("I", dim(filtered_I)[1]),
rep("RISE", dim(filtered_RISE)[1]), rep("HGDP", dim(filtered_hgdp)[1]))
labs <- labs[1:100]
omega <- out$omega[1:100, ]
cols1 <- c("red","blue","darkgoldenrod1","cyan","firebrick", "green",
"hotpink","burlywood","yellow","darkgray","deepskyblue","darkkhaki",
"brown4","darkorchid","magenta","yellow", "azure1","azure4")
annotation <- data.frame(
sample_id = paste0("X", c(1:NROW(omega))),
tissue_label = factor(labs)
)
CountClust::StructureGGplot(omega = omega,
annotation = annotation,
palette = cols1,
yaxis_label = "Moderns vs Ancients",
order_sample = FALSE,
figure_title = paste0("StructurePlot: K=", dim(omega)[2],""),
axis_tick = list(axis_ticks_length = .1,
axis_ticks_lwd_y = .1,
axis_ticks_lwd_x = .1,
axis_label_size = 7,
axis_label_face = "bold"))
damageLogo_pos(out$theta)
damageLogo(out$theta)
|
abdf177b2027955bad79ede729471a000d46711b
|
a5b199836daeef25fceb8ba3f4ae12a915506c89
|
/02_methylation_expression_associations.R
|
6edc1d95354824eaecca766dd2cc17a245833952
|
[] |
no_license
|
WRScottImperial/Human-adipocyte-5mC-obesity
|
37ad159cfbeb1dfcc8c6f7c06421a901717b17a3
|
43b3b9a1fd544330f1f2c1b41cacf84ee8c66d63
|
refs/heads/master
| 2023-04-10T06:03:45.569090
| 2023-03-10T13:36:37
| 2023-03-10T13:36:37
| 179,325,255
| 1
| 0
| null | 2023-03-09T11:42:44
| 2019-04-03T16:06:57
|
R
|
UTF-8
|
R
| false
| false
| 3,208
|
r
|
02_methylation_expression_associations.R
|
#############################################################################################
##### Identification of sentinel DNA methylation sites associated with expression.
##### In combined subcutaneous and visceral adipocyte samples, using mixed effects controlling for sample relatedness.
#############################################################################################
##### load packages and set options
library("DESeq2")
library('biomaRt')
library('variancePartition')
library('edgeR')
library('BiocParallel')
options(stringsAsFactors=F)
# Load methylation betas, control probe PCs and annotation files.
load("...beta_QN_rep.RData")
phe.cg=read.table("...phe.rep.txt", header=T, sep='\t')
load("...ctrlpca_rep.RData")
phe.cg=merge(phe.cg, ctrlprobes.scores, by.y='row.names', by.x='ID')
scr=1-colSums(is.na(beta))/nrow(beta)
phe.cg = phe.cg[phe.cg$Array_ID %in% names(scr[scr>0.98]),]
# Load RNA seq counts, surrogate variables and annotation.
counts = round(read.csv(".../RawCounts.combinedFlowcells.csv",row.names=1)) # raw counts output from featureCounts
phe.gn=read.table("...phe.rep.with.RNA.qc.txt", header=T, sep='\t')
phe.gn=phe.gn[which(phe.gn$Lib_Prep_Batch != "Failed"),]
rownames(phe.gn)=phe.gn$Sample.ID
outliers = c("P116B.SA","P176B.VA","P38B.VA","P10B.SA","P76B.SA") #
phe.gn = phe.gn[!phe.gn$Sample.ID %in% outliers,]
SVAs = read.csv(".../vst_count_SVs.csv") # surrogate variables from sva rna seq of variance stabilised transformed counts (generated without failed samples/outliers)
phe.gn=merge(phe.gn, SVAs, by.y='row.names', by.x='ID')
# Merge datasets and remove failed RNA seq samples/extreme outliers
phe.all = merge(phe.gn,phe.cg,by=c('Sample.ID','CaseControl','Group','Age','Sex','Sample.Type','Ethnicity'))
rownames(phe.all)=phe.all$Sample.ID
beta = beta[,phe.all$Sample.ID]
counts = counts[,phe.all$Sample.ID]
# load sentinels
cpgs=read.csv(".../Obese_lean_SA_Combined_sentinels.csv")
cpgs.assoc=cpgs$CG
# Run models of 5mC ~ gene expression (all gene counts passing filter) using variancePartition: DREAM allows mixed-effects modeling.
filter=rowSums(counts >= 5) >= 0.2*ncol(counts)
filteredCounts = counts[filter,]
ds.all = DGEList(filteredCounts)
ds.all = calcNormFactors(ds.all)
# Loop for each sentinel
for(cg in 1:length(cpgs.assoc)) {
cpg = cpgs.assoc[cg]
# 2. Run with Limma in SA and VA combined using Dream mixed effect model
phe.sel$sent = beta[cpgs.assoc[cg],]
phe.all.sel = phe.sel[!is.na(phe.sel$sent),]
ds.all.sel = ds.all[,phe.all.sel$Sample.ID]
param = SnowParam(4, "SOCK", progressbar=TRUE)
register(param)
formula = ~ sent + Age + Sex + Ethnicity + RIN + SV1_SAVA + SV2_SAVA + PC1_cp + PC2_cp + (1|Participant.ID) #
voom.dream.ds.all = voomWithDreamWeights(ds.all.sel, formula, phe.all.sel)
fit.dream.ds.all = dream(voom.dream.ds.all, formula, phe.all.sel)
resu = topTable(fit.dream.ds.all, coef="sent", n = Inf)
# save results for each sentinel
write.csv(resu, file=file.path(dir,paste(cpg,"_cisgene_association_dream.csv",sep=""),row.names=T))
}
# EOF
|
d65fee65bd1ad6e3e137f139c80c1b9836b1be3b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/fastR2/examples/RatPoison.Rd.R
|
02895c1b1fd46c1e7af7dc36757d9fb58377babf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
RatPoison.Rd.R
|
library(fastR2)
### Name: RatPoison
### Title: Rat poison - unfinished documentation
### Aliases: RatPoison
### Keywords: datasets
### ** Examples
data(RatPoison)
gf_line(consumption ~ flavor, group = ~ location, color = ~ location, data = RatPoison) %>%
gf_point()
|
b4d4ec16b06c19e9e8790adc47dd0643aff16e78
|
c770b96645e3792287fad149a97a1a9d0f95e5d6
|
/Exercise14.R
|
32e9471ed5476dbfe8a88ff873c1bb82c7459e52
|
[] |
no_license
|
nickormushev/R-trainings
|
f70aa9900e8f27ed02adb7e24c4033b4fa60db6f
|
efa7df5b5075a505b2f35c50ee3b0d551023f742
|
refs/heads/master
| 2023-05-30T17:53:05.530260
| 2021-06-09T08:11:01
| 2021-06-09T08:11:01
| 367,485,275
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,000
|
r
|
Exercise14.R
|
library("tidyverse")
st <- read.csv("students.txt")
train <- st[101:400, ]
test <- st[1:100, ]
chisq.test(train$admit, train$rank)
n <- nrow(st)
correct_sum <- 0
for (i in 1:n) {
train <- st[-i, ]
test <- st[i, ]
m <- glm(admit ~ ., data = train, family = "binomial")
predictions <- predict.glm(m, test, type = "response") > 0.5
correct_sum <- correct_sum + (predictions == test$admit)
}
#Показва колко често модела ни познава
correct_sum / n
library(mlbench)
data(BreastCancer)
train_idx <- sample(1:nrow(BreastCancer), size = nrow(BreastCancer) * 0.7)
train <- BreastCancer[train_idx, ]
test <- BreastCancer[-train_idx, ]
test
m <- glm("Class ~ Cl.thickness" , train, family = "binomial")
p <- predict.glm(m, newdata = test, type = "response")
v <- 0
correctBest <- 0
for (i in colnames(BreastCancer)[-c(1, 7, 11)]) {
f <- as.formula(paste0("Class ~ ", i))
m <- glm(f, train, family = "binomial")
p <- ifelse(predict.glm(m, newdata = test, type = "response") > 0.5, "malignant", "benign")
correct <- sum(p == test$Class)
if(correct > correctBest) {
correctBest <- correct
v <- i
}
}
v
# зареждане на пакета и данните
library("mlbench")
data("BreastCancer")
# разделяне на 70% на 30% за трениране и за тестване
# 70% за трениране на модел и 30% за тестване
# общ брой елементи
n <- nrow(BreastCancer)
# Проверка на данните
BreastCancer %>%
select_if(function(x) any(is.na(x))) %>%
summarise_each(funs(sum(is.na(.))))
# Bare.nuclei има 16 NA наблюдения
# можем да ги махнем за да не влошават модела
set.seed(10)
# индекси на елементите които ще участват в тренировката
train_idx <- sample(1:n, size = round(n * 0.7))
# множество от наблюдения за трениране на модел
train_df <- BreastCancer[train_idx, ]
# множество от елементи върху което ще тестваме колко добре прогнозираме
test_df <- BreastCancer[-train_idx, ]
# базов модел в който няма никакви предиктори\независими променливи (features)
base_model <- glm(Class ~ 1, data = train_df, family = "binomial")
# Алгоритъм на forward subset selection (FSS)
# започваме с базов модел и взимаме точността му
# итерираме по всички променливи
# пробваме да видим дали всъщност някоя от променливите
# подобрява прогнозата на базовия модел
# Избираме тази променлива която дава най-добра точност
# прогнозирани вероятности на базовия модел
predicted_probs <- predict.glm(base_model, newdata = test_df, type = "response")
# прогнозирани класове на базовия модел
predicted_class <- ifelse(predicted_probs > 0.5, "malignant", "benign")
# базовия модел класифицира всичко като класа който е по-често срещан (не е много умен)
# точност на базовия модел
base_accuracy <- mean(predicted_class == test_df$Class)
# > base_accuracy
# [1] 0.6333333
# все пак дава някаква базова точност
# най-добра точност и най-добра променлива
best_accuracy <- base_accuracy
best_variable <- NULL
# итерираме по всички променливи без Id и Class
for (var in colnames(test_df)[-c(1, 6, 7)]) {
# изграждане на формула за текущия модел
current_formula <- as.formula(paste0("Class ~ ", var))
# текущ модел
current_model <- glm(current_formula, data = train_df, family = "binomial")
# прогнозирани вероятности
predicted_probs <- predict.glm(current_model, newdata = test_df, type = "response")
# прогнозирани класове
predicted_class <- ifelse(predicted_probs > 0.5, "malignant", "benign")
# Текуща точност на прогнозата
current_accuracy <- mean(predicted_class == test_df$Class)
# гледаме дали текущата точност е по-добра от базовата
if (current_accuracy > best_accuracy) {
# ако е по-добра, казваме че най-добрата е текущата (линейно търсене на максимум)
best_variable <- var
# обновяваме най-добрата точност
best_accuracy <- current_accuracy
}
}
# резултат
c("най-добра променлива" = best_variable, "точност" = best_accuracy)
# Избираме да добавим променливата "Cell.size" понеже дава най-добра точност при прогнозиране
# Можем да повторим подхода, но този път базовата ни точност ще е точността която дава
# променливата "Cell.size"
# при всяка итерация базовия модел ще е Class ~ beta0 + beta1 * Cell.size и ще търсим променлива
# която да има по-добра точност от него
# А текущата формула във всяка итерация ще е paste0(Class ~ Cell.size + ", var)
# получаваме че следващата най добра променлива е "Cl.thickness".
# Тя качва точността на прогнозата до около 95%
# Повтаряме още веднъж с нова формула paste0("Class ~ Cell.size + Cl.thickness + ", var)
# получаваме следваща променлива Marg.adhesion
# Следващи итерации на модела не ни качват точността на прогнозиране върху тестовото множество
# Финалния модел който получаваме е Class ~ Cell.size + Cl.thickness + Marg.adhesion
# а финалната точност е 96%
# Евентуално може да добавим и Bl.cromatin, макар че не качва точността особено
# пакети
library(tidyverse)
############################################################
############ Задача 1 - Решение ############################
############################################################
# зареждане на данните
students_df <- read.csv("./student.txt")
# първите пет елемента
head(students_df, 5)
# таблица и barplot на приетите студенти
students_df$admit %>%
table() %>%
prop.table() %>%
barplot()
# визуализация на разпределенията на двете групи (приети и неприети)
students_df %>%
ggplot(mapping = aes(x = gre, fill = as.factor(admit))) +
geom_density(alpha = 0.4)
# t.test за двете групи (приети и неприети)
t.test(
students_df$gre[students_df$admit == 0],
students_df$gre[students_df$admit == 1]
)
# смяна от числени към фаткорни променливи
students_df <- students_df %>%
mutate(admit = as.factor(admit), rank = as.factor(rank))
# тест дали има зависимост между rank на колеж и приемането в университет след това
table(students_df$admit, students_df$rank) %>%
# Хи-квадрат тест върху таблицата
chisq.test()
# => rank и admit са зависими
# модел на логистична регресия с всички променливи
students_model <- glm(admit ~ ., family = "binomial", data = students_df)
# тестване за модела
# Jackknife алгоритъм
# 1. махаме 1 наблюдение от данните
# 2. тренираме модела върху останалите наблюдения
# 3. гледаме колко добре прогнозираме върху наблюдението което сме махнали
# връщаме точност като процент колко сме познали
# Пример с първата итерация
students_df <- read.csv("./student.txt")
THRESHOLD <- 0.5 # праг над който да класифицираме като приет в универститет
sum <- 0
for (idx in 1:nrow(students_df)) {
# 1. Разделяне на множества за напасване и за тестване на модела
train_df <- students_df[-idx, ]
test_df <- students_df[idx, ]
# 2. Напасваме модела върху данните за напасване
model <- glm(admit ~ ., train_df, family = "binomial")
# прогнозираме върху наблюдение за което вече знаем дали е приет или не
predicted_probability <- predict.glm(model, newdata = test_df, type = "response")
predicted <- as.numeric(predicted_probability > THRESHOLD)
# истински стойности
actual <- test_df$admit[1]
# гледаме дали сме познали
sum <- sum + (predicted == actual)
}
# резултат
sum / nrow(students_df)
# продължаваме по останалите
# сумираме броя на верните прогнози и делим на броя на всички елементи (mean)
# така че да видим процентово колко сме прогнозирали\класифицирали правилно
|
4c8c1289efdfd6f7674d82a86e6c6c5ba6940d77
|
d837c3cca39d2aedcd4cc29772dde5f1eb6d621f
|
/man/holiday.Rd
|
2c02867d04d9c9338c61e33a9d7e852e354ff00e
|
[
"MIT"
] |
permissive
|
anabelberjonsanchez/christmaspackage
|
c06c04a21bea97f5b513f5e7b6d763994d808580
|
3adee6c6fe06b589846f5e9b9cb2cef011e1f67a
|
refs/heads/main
| 2023-01-24T20:57:42.013019
| 2020-12-02T22:12:46
| 2020-12-02T22:12:46
| 317,600,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 379
|
rd
|
holiday.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/holiday.R
\name{holiday}
\alias{holiday}
\title{A Holiday Function}
\usage{
holiday(christmas = TRUE)
}
\arguments{
\item{christmas}{Do you like holidays? Defaults to TRUE.}
}
\description{
This function allows you to decide what to do during Christmas Time
}
\examples{
holiday()
}
\keyword{holiday}
|
353e362fd54a2741352edd9480db00aee153d14f
|
a85a1ef7f643d2d87cf7d58c7ecd0f5bcddfd510
|
/rscripts/figures/fig3.R
|
8b4bf0764216f716ccb51830d9211e13b44c2e3f
|
[] |
no_license
|
welch16/ChIPexo
|
85c06a2d26a37f68006cbbdf03c994b3cd0b70f4
|
cc2de1b1f453c1f6ac73aad0872a41f11a28d4be
|
refs/heads/master
| 2021-09-12T12:57:09.142674
| 2018-04-16T23:06:27
| 2018-04-16T23:06:27
| 20,110,804
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,475
|
r
|
fig3.R
|
#!/usr/bin/env Rscript
library(optparse,quietly = TRUE)
optList = list(
make_option("--outdr", action = "store_true",type = "character",
default = "figs/figuresV2/fig2",
help = "Directory where all the figures are stored."),
make_option("--fig.width",action = "store_true",type = "numeric",default = 7,
help = "Width of the figure composed with all panels"),
make_option("--line.width",action = "store_true",type = "numeric",default = .8,
help = "Plot line.width")
)
opt = parse_args(OptionParser(option_list = optList))
library(grid)
library(gridExtra)
library(tidyverse)
library(magrittr)
library(viridis)
library(scales)
indr = "data/figures/fig3"
files = list.files(indr,full.names = TRUE,pattern = "tsv")
theme_set(theme_bw())
r = viridis(100,option = "D")
dir.create(opt$outdr,showWarnings = FALSE)
##
exo = files[grep("ARCv",files)] %>% read_tsv
exo = exo %>% filter(Repl == "Rep-1")
ARC = exo %>% ggplot(aes(ARC,URC))+stat_binhex(bins = 50)+
scale_fill_gradientn(colours = r , trans = "log10",
labels = trans_format("log10",
math_format(10^.x)),
name = "Nr. of islands")+
theme(legend.position = "top") + facet_grid( ~ Repl)+xlim(0,4)
ggsave(file.path(opt$outdr,"fig2A.png"),ARC)
## Region Comp
comp = files[grep("RegionComp",files)] %>% read_tsv
comp = comp %>% filter(Repl == "Rep-1")
r = brewer_pal(palette = "Pastel1")
region_comp = comp %>%
ggplot(aes(depth,prob,fill = lab))+geom_bar(stat = "identity")+
facet_grid( Repl ~ .)+
theme(legend.position = "top")+scale_fill_manual(values = r(3),name = "")+
ylab("Proportion of islands")+
xlab("Min. number of reads")
ggsave(file.path(opt$outdr,"fig2B.png"),width = 6 ,height = 4,region_comp)
## FSR dist
FSRdist = files[grep("FSR",files)] %>% read_tsv
FSRdist = FSRdist %>% filter(Repl == "Rep-1")
FSR = FSRdist %>% ggplot(aes(depth,FSR,colour =as.factor( quantiles)))+
geom_line(,size =opt$line.width)+
theme(legend.position = "top")+facet_grid( Repl ~ .)+
ylab("Forward Strand Ratio (FSR)")+xlab("Min. number of reads")+
scale_color_brewer(palette = "Set1",name = "Quantiles",
guide = guide_legend(nrow = 2))
ggsave(file.path(opt$outdr,"fig2C.png"),width = 6,height = 4 , FSR)
## Blacklist boxplot
blacklists = files[grep("blacklist",files)] %>% read_tsv %>%
mutate(repl = forcats::fct_recode(repl,"Rep-3"="rep-3","Rep-2"="rep-2","Rep-1"="rep-1"))
library(ggthemes)
blacklists = blacklists %>% filter(repl == "Rep-1")
beta1 = blacklists %>% filter(term == "uniquePos") %>%
ggplot(aes(repl,estimate,colour = blackl))+geom_boxplot()+
theme(legend.position = "top",
axis.title.x = element_blank())+
scale_color_brewer(palette = "Set1",name = "")+
scale_y_log10()+ylab(expression(beta[1]))
beta2 = blacklists %>% filter(term != "uniquePos") %>%
ggplot(aes(repl,-estimate,colour = blackl))+geom_boxplot()+
scale_color_brewer(palette = "Set1",name = "")+
theme(legend.position = "top",
axis.title.x = element_blank())+
ylab(expression(beta[2]))+ylim(0,10)
grid_arrange_shared_legend <- function(...)
{
plots <- list(...)
g <- ggplotGrob(plots[[1]] + theme(legend.position = "top"))$grobs
legend <- g[[which(sapply(g, function(x)x$name) == "guide-box")]]
lheight <- sum(legend$height)
grid.arrange(
legend,
do.call(arrangeGrob,lapply(plots,function(x)
x + theme(legend.position = "none"))),
ncol = 1,
heights = unit.c(unit(1,"npc") - lheight,lheight))
}
g = ggplotGrob(beta1 +theme(legend.position = "top"))$grobs
legend = g[[which(sapply(g,function(x)x$name) == "guide-box")]]
lh = sum(legend$height)
betascore = arrangeGrob(beta1 + theme(legend.position = "none"),
beta2 + theme(legend.position = "none"),nrow = 1)
betascore = arrangeGrob(legend,betascore,heights = unit.c(lh,unit(1,"npc") - lh),ncol =1)
ggsave(file.path(opt$outdr,"fig2D.png"),width = 6,betascore)
all = arrangeGrob(arrangeGrob(ARC,nrow = 1,ncol = 1),
arrangeGrob(region_comp,FSR,nrow = 1,ncol = 2),
betascore,
heights = c(1.2,1.3,.85))
ggsave(file = file.path(opt$outdr,"fig2.pdf"),all,
width = 180,
units = "mm")
|
532f2f5af16296c314a0818c404982871976af19
|
dce37174ec552c73c63586000f6b506cbf5a25a6
|
/R/gwfa_uniquenesses_sum.R
|
75e376559fa250419a6c2e4d372a235366c9c330
|
[] |
no_license
|
naru-T/gwefa
|
36a5a298cf488432b06584cb5df9f3aa1e4de789
|
0eb58ae65ad82536d6ca24664d763b4aa78b3860
|
refs/heads/master
| 2020-03-21T10:48:52.336020
| 2019-04-25T04:32:15
| 2019-04-25T04:32:15
| 138,472,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 397
|
r
|
gwfa_uniquenesses_sum.R
|
gwfa_uniquenesses_sum <- function(bw, x ,dp.locat, k, robust, kernel, adaptive, p, theta, longlat, dMat,vars, n.obs, fm, rotate,scores, oblique.scores, timeout, foreach){
ans <- gwfa(data=x,elocat=dp.locat, vars,bw,k, kernel, adaptive, p=2, theta=0, longlat, dMat, n.obs, fm, rotate,scores,oblique.scores=oblique.scores, timeout=timeout, foreach=foreach)
return(-sum(ans$uniquenesses^2))
}
|
363a02c66c7e4de9998fb2c067f6a0f43005733b
|
c2d79fd415b3417a56d7c0a85b453b68eebb7d7a
|
/R_Programming/demonstration.R
|
035f0ae15e023871f81fb7610e4f4c42b1dd626e
|
[] |
no_license
|
noone31173/AI_ANALYTICS
|
71d6907ce940f8818fbc0e23ee8ca9f29fad85b7
|
e0854cb80c55c64c1b4ace54b3a84af573991167
|
refs/heads/master
| 2022-11-11T14:01:21.130217
| 2019-12-08T12:30:13
| 2019-12-08T12:30:13
| 226,520,448
| 0
| 1
| null | 2022-10-26T09:09:16
| 2019-12-07T13:46:16
|
Python
|
UTF-8
|
R
| false
| false
| 632
|
r
|
demonstration.R
|
library(nycflights13)
library(tidyverse)
nycflights13::flights
?flights
view(flights)
filter(flights, month == 1, day == 1)
dec25 <- filter(flights, month == 12, day == 25)
dec25
filter(flights, month == 11 | month == 12)
arrange(flights, year, month, day)
arrange(flights, desc(dep_delay))
select(flights, year, month, day)
rename(flights, tail_num = tailnum)
flights_sml <- select(flights,
year:day,
ends_with("delay"),
distance,
air_time
)
mutate(flights_sml,
gain = dep_delay - arr_delay,
speed = distance / air_time * 60
)
|
d06a04ab45383bc1e37c29399b981712772a1974
|
f65a07c8d1c0a270f976071a3fd2f8db823a7ad4
|
/R/k_to_f.R
|
f05cee7236a4bc2bb4995cc3cba443660f32fb13
|
[] |
no_license
|
mt-climate-office/mtdrought
|
4da714fe93d5fd38f48e33745ab1196e2b15214c
|
2001a91e80ec723af931e796ea0f4416342ebfef
|
refs/heads/master
| 2021-06-24T00:37:26.185845
| 2020-10-28T20:40:38
| 2020-10-28T20:40:38
| 123,042,157
| 0
| 0
| null | 2020-06-18T19:27:08
| 2018-02-26T22:59:29
|
HTML
|
UTF-8
|
R
| false
| false
| 110
|
r
|
k_to_f.R
|
k_to_f <- function(x){
x %>%
magrittr::multiply_by(9/5) %>% # C/K to F
magrittr::subtract(459.67)
}
|
6f0ee96038dc849ad8f760188dd21910ce1e0b81
|
b83c7ce220eef7e0ce1c209b937dd8bc48577db2
|
/man/resample_times.Rd
|
15d209cf63fe4cfa72b9376a454a4de43eb51554
|
[] |
no_license
|
mahowak/peekds
|
d35a08d983284223f9ea6d3497757a151866f3e4
|
47550093666a108d31d1edfca867cb80bd147278
|
refs/heads/master
| 2020-09-21T10:56:15.359908
| 2020-07-24T00:43:48
| 2020-07-24T00:43:48
| 224,767,618
| 0
| 0
| null | 2019-11-29T03:09:11
| 2019-11-29T03:09:10
| null |
UTF-8
|
R
| false
| true
| 360
|
rd
|
resample_times.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_aoi.R
\name{resample_times}
\alias{resample_times}
\title{Resample times to be consistent across labs}
\usage{
resample_times(df)
}
\arguments{
\item{dir}{df that has subject_id, dataset_id, trial_id and times}
}
\description{
Resample times to be consistent across labs
}
|
206d566c30aeea08014b30a2e4d0f1a6d24a905f
|
856ab7e8164df470e69d989618207db9c28c666b
|
/christmas_radio_data_assets.R
|
531dd20584df0de9a76dc8a9ba0a762676d3cb0b
|
[] |
no_license
|
walkerkq/christmas-radio
|
2c2c317ec7e02fe5fe4b33943354f83aa708c036
|
768eda86739ecd99768221dba86eb6032886039c
|
refs/heads/master
| 2021-01-12T05:46:21.702619
| 2016-12-23T15:31:15
| 2016-12-23T15:31:15
| 77,192,488
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,626
|
r
|
christmas_radio_data_assets.R
|
library(ggplot2)
fm <- read.csv("christmas_radio.csv", stringsAsFactors=F)
not_us <- c("BC", "BN", "MB", "MP", "NS", "PR", "QC", "SK", "VI", "GU", "AS")
##### States with the most Christmas stations
st <- data.frame(table(fm$State[fm$Christmas==TRUE]))
st2 <- data.frame(table(fm$State))
st <- merge(st, st2, by="Var1", all=T); rm(st2)
colnames(st) <- c("State", "HolidayStations", "TotalStations")
st$HolidayStations[is.na(st$HolidayStations)] <- 0
st$Percent <- round(st$HolidayStations/st$TotalStations,4)
st <- st[!(st$State %in% not_us), ]
st <- st[order(-st$HolidayStations), ]
##### Most & Least Successful Christmas Stations
scs <- fm[fm$Christmas==TRUE & !is.na(fm$DEC.16),]
scs <- scs[order(-scs$Share_Change), c(3,5,6,11,14,15,16,17)]
##### States with the most successful Christmas stations
sst <- data.frame(aggregate(Share_Change ~ Market, fm[fm$Christmas==TRUE,], sum))
sst$Share_Change <- round(sst$Share_Change,4)
sst <- sst[order(sst$Share_Change), ]
sst$Market <- factor(sst$Market, levels=sst$Market)
ggplot(sst, aes(Market, Share_Change)) + geom_bar(stat="identity", fill="maroon4") +
xlab("") + ylab("Change in Market Share") + labs(title="Market Share in Stations Switching to Holiday Format") +
theme_bw() + coord_flip()
##### Map
library(leaflet)
fmc <- fm[fm$Christmas == TRUE, ]
m <- leaflet() %>%
setView(lng = -97.51060, lat = 37.77779 , zoom = 4) %>%
addProviderTiles("CartoDB.Positron") %>%
addCircles(data=fmc, lng=fmc$transmitter_lon, lat=fmc$transmitter_lat, radius = fmc$avg.dist,
stroke=FALSE, popup=fmc$Call, opacity=1, color="red", group="Christmas")
m
|
b19beca2e242348119acbf48974f0aabd38de287
|
9ab25c9161cbb7d3d6f1068fcbc01449ba6baa0b
|
/RVS0.0.0/man/calc_score_test.Rd
|
8293509251c544579ca4686d8bf84c5dcf7ff2de
|
[] |
no_license
|
jiafen/RVS
|
5b9d4a42f4684d6589bbbc4167849e509b3c001a
|
f1c1ba78ec3983dd3f90bc09f1496d75d0f8b7dd
|
refs/heads/master
| 2021-01-10T09:10:58.940526
| 2016-01-27T20:37:48
| 2016-01-27T20:37:48
| 49,983,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 781
|
rd
|
calc_score_test.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/help_assoc_test.R
\name{calc_score_test}
\alias{calc_score_test}
\title{clac_score_test calculates the score test for given expected value of genotype for case and controls seperately (M1, M2)}
\usage{
calc_score_test(M1, M2)
}
\arguments{
\item{M1:}{the expected value of genotype for case E(G_ij|D_ij) on one snp (dimension: ncase*1)}
\item{M2:}{the expected value of genotype for control E(G_ij|D_ij) on one snp (dimension: ncont*1)}
}
\value{
:the score test statistic calculated from M1 and M2
T=S^2/var(S), for a given j, S_j=sum_i((Y_i-bar(Y))E(G_ij|D_ij))=sum_case(1-bar(Y))E(G|D)-sum_cont(bar(Y)E(G|D))
}
\description{
calc_score_test is called in \code{\link{test_regScore_perm}}
}
|
37fb25acd4e7b3c18717914d0fe07be6c1872a24
|
69187c8d9affbadba26f5fd9e0ad9acf632284b8
|
/script/functions/flag_duplicates_by_ids.R
|
2c7a9703c08da9f4f82ae9ba9a98189a8b147b4c
|
[] |
no_license
|
nthun/videogame_psychophys_meta_analysis
|
ba232e421f2f7dfbd732107fb36d978261d7fa13
|
2086df4dd871c2f26563a7e3ad0fa7b0e1795cb6
|
refs/heads/master
| 2021-05-06T10:32:48.387612
| 2018-11-18T21:38:45
| 2018-11-18T21:38:45
| 114,146,708
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,323
|
r
|
flag_duplicates_by_ids.R
|
# Flag duplicates by id(s)
# By using several databases, it is possible to have several duplicates of the same article with the same id. As there can be several ids, removing duplicates is not that straightforward.
# INPUT: df data frame with potential duplicates
# keys a character vector of identifier variables in the data frame that
# OUTPUT: A data frame without duplicate elements
# EXAMPLE: flag_duplicates_by_ids(df, c("doi","pmid","psyid","eid"))
flag_duplicates_by_ids <- function(df, keys){
stopifnot(is.data.frame(df),
is.character(keys),
length(keys) > 0,
all(rlang::has_name(df, keys)))
# Remove whitespace and convert key variables to lowercase for the filtering of duplicates
# These changes are not present in the duplicate removed dataframe
dplyr::filter_at(df, vars(!!keys),
dplyr::any_vars(stringr::str_squish(.) %>%
stringr::str_to_lower() %>%
duplicated(incomparables = NA))) %>%
# Keep ony the keys and duplicate info
dplyr::transmute(!!!syms(keys),
duplicate_by_id = 1) %>%
# Join the duplicate info back to the original df, using all keys
dplyr::left_join(df, ., by = keys)
}
|
6dcbaa023ab200b637a83ba8c45066280eb08cad
|
3db768da9b99873ec55cdf449feaf64654da1bdf
|
/R/STAR2bSMRT_NRXN.R
|
6fe2a9bca0eb0b5a706f60325fb856969e04e14d
|
[] |
no_license
|
zhushijia/STAR2bSMRT
|
ca8cf30efaca8bed2fda287779b5f705875a4c0d
|
a3081be9455f6073e6ce1be4a229983aab7c6aad
|
refs/heads/master
| 2021-03-27T08:49:39.151251
| 2019-12-12T23:13:13
| 2019-12-12T23:13:13
| 119,112,032
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,900
|
r
|
STAR2bSMRT_NRXN.R
|
#' STAR2bSMRT_NRXN
#' the main function of STAR2bSMRT specially designed for NRXN1 alpha splicing
#' identification
#'
#' @param genomeDir character value indicating the directory of STAR genome
#' index for both STARlong and STARshort read mapping
#' @param genomeFasta character value indicating the fasta file of genome
#' reference
#' @param phqv character value indicating the Isoseq polished high QV trascripts
#' in fasta/fastq, where
#' read counts for each transcript consensus should be saved in transcript names
#' @param flnc character value indicating the Isoseq full-length non-chimeric
#' reads in fasta/fastq format
#' @param nfl character value indicating the Isoseq non-full-length reads in
#' fasta/fastq format
#' @param SR1 character value indicating the short read file in fastq format:
#' single-end or paired-end R1
#' @param SR2 character value indicating the short read file in fastq format:
#' paired-end R2
#' @param useSJout boolean value indicating whether to use the STARshort
#' generated SJ.out.tab for splicing junction. If FALSE, STAR2bSMRT infer
#' the splicing junction from bam files. By default, FALSE.
#' @param adjustNCjunc boolean value indicating whether to minimize the
#' non-canonical junction sites.
#' @param thresSR a vector of integers indicating the searching range for the
#' number of short reads which support the splicing junction sites.
#' @param thresDis a vector of integers indicating the searching range for the
#' tolerance distance between short read-derived splicing junction and long
#' read-derived junction. STAR2bSMRT will correct the long read-derived
#' junction to the short read-derived junction, if more short reads than
#' defined thresSR support that short read-derived junction, and the distance
#' between long and short read junctions is shorter than the defined thresDis.
#' @param outputDir character value indicating the direcotry where results are
#' saved.
#' @param fixedMatchedLS boolean value indicating how often the distance is
#' calculate betwen long read and short read-derived junction sites. If TRUE,
#' only calculated once at the very beginning, which may save running time;
#' otherwise, calculate repeatly after every long read correction.
#' By default, FALSE.
#' @param fuzzyMatch integer value indicating the distance for fuzzyMatch
#' @param chrom character value indicating the chromosome of interest. By default,
#' STAR2bSMRT works on the whole genome.
#' @param s integeter value indicating the start position of the transcript of
#' interest. This is useful for target Isoseq sequencing.
#' @param e integeter value indicating the end position of the transcript of
#' interest. This is useful for target Isoseq sequencing.
#' @param cores integer value indicating the number of cores for parallel computing
#'
#' @return NULL
#' @export
#'
#'
STAR2bSMRT_NRXN <- function( genomeDir, genomeFasta, LRphqv=NULL, LRflnc=NULL, LRnfl=NULL,
SR1, SR2=NULL, useSJout=TRUE, adjustNCjunc=FALSE,
thresSR, thresDis, outputDir, fixedMatchedLS=FALSE, fuzzyMatch=100,
chrom=NULL , s=0 , e=Inf , cores=10 )
{
library(Biostrings)
library(foreach)
library(doMC)
registerDoMC(cores)
############################################################################
############ STARshort mapping and junction sites for short reads
############################################################################
SoutputDir = paste0(outputDir,"/SR")
SRalignment = paste0(SoutputDir,"/alignments.bam")
system( paste0( "mkdir -p " , SoutputDir ) )
starShort( genomeDir , SR1 , SR2 , SoutputDir )
if( useSJout )
{
SRjunc = getJuncBySJout( SJout="SJ.out.tab", SoutputDir, chrom=chrom, s=s, e=e )
} else {
SRjunc = getJuncBySam( SRalignment, SoutputDir, chrom=chrom, s=s, e=e )
}
############################################################################
############ STARlong mapping and junction sites for long reads
############################################################################
############ for phqv ############
if( !is.null(LRphqv) )
{
LoutputDir = paste0(outputDir,"/LR")
LRalignment = paste0(LoutputDir,"/Aligned.out.sam")
system( paste0( "mkdir -p " , LoutputDir ) )
starLong( genomeDir=genomeDir , LR=LRphqv , outputDir=LoutputDir , cores=cores , SJ=NULL )
LRread = getReadByJI( LRalignment , LoutputDir )
LRread = subset(LRread , start < 50150000 )
exp = phqvExp(LRphqv,LoutputDir) # get coverage for all phqv
LRread = merge( LRread , exp , by="id" )
LRread$coverage = LRread$full_length_coverage
LRinfo = getLRinfo( LRread , chrom=chrom , s=s , e=e )
LRread = LRinfo$LRread
LRjunc = LRinfo$LRjunc
LRtag = LRinfo$LRtag
}
############################################################################
############ grid searching
############################################################################
if( fixedMatchedLS )
{
matchedLS = matchLSjunc( LRjunc , SRjunc )
} else {
matchedLS = NULL
}
score = gridSearch( LRjunc , SRjunc , thresSR , thresDis , adjustNCjunc , matchedLS , fuzzyMatch )
ij = which( score==max(score) , arr.ind=T )
ts = thresSR[ ij[1,1] ]
td = thresDis[ ij[1,2] ]
cat( ts , td , score[ij] , '\n ')
correction = generateCorrectedIsoform( LRjunc , SRjunc, LRtag , LRread , ts , td , matchedLS , fuzzyMatch )
print(correction[[1]][c(2,3)])
EoutputDir = paste0(outputDir,"/STAR2bSMRT")
system( paste0( "mkdir -p " , EoutputDir ) )
setwd( EoutputDir )
genome = readDNAStringSet(genomeFasta)
###############################################################################################################
pdf( paste0( "JuncExp_LR_ts",ts,"_td",td,".pdf") )
juncExp = do.call( rbind, lapply( correction , function(x) x$LSjuncCount ))
lrCount = log10(juncExp$lrCount)
srCount = log10(juncExp$srCount)
juncCorr = cor.test(srCount,lrCount,method='spearman')$estimate
cols = sapply( juncExp$motif , function(x) ifelse(x==0,1,2) )
cols[juncExp$motif==1] = 3
plot( lrCount , srCount , col=cols , pch=17 , main=paste0("JuncExp by Long and Short Reads: r=",signif(juncCorr,3)) , xlab="Log10 Long Read" , ylab="Log10 Short Read" )
abline(lm( srCount~lrCount ))
par(mfrow=c(2,1))
log10fc = lrCount - srCount
JuncNames = paste(juncExp$start , juncExp$end)
barplot( log10fc , cex.names=0.6 , col=cols , ylab="log10(lrCount/srCount)", names=JuncNames , las=3 )
dev.off()
###############################################################################################################
tag = paste0( "exp", correction[[chrom]]$normalizedIsoformCount )
exonList = juncToExon( juncList=correction[[chrom]]$isoform , s=50149082 , e=51255411 , tag=tag )
###############################################################################################################
seq = generateSeq( genome=genome , isoform=exonList )
fastaName = paste0( "isoform_ts",ts,"_td",td,".fa")
translated = sapply( seq$translated , function(x) ifelse( x , "translated" , "untranslated" ) )
names(seq$rna) = paste(names(seq$rna),translated,sep="_")
writeXStringSet( seq$rna , fastaName )
gffName = paste0( "isoform_ts",ts,"_td",td,".gff")
names(exonList) = paste( names(exonList) , translated , sep="_" )
writeGff( isoform=exonList , file = gffName )
###############################################################################################################
kallisto = kallistoQuant( fastaName , SR1 , SR2 , EoutputDir )
Sexp = log10(kallisto$tpm+1)
Lexp = log10(correction[['chr2']]$normalizedIsoformCount+1)
LSQuantCorr = cor.test(Lexp,Sexp)$estimate
LSQuantPval = cor.test(Lexp,Sexp)$p.val
###############################################################################################################
pdf( paste0( "Quant_LR_ts",ts,"_td",td,".pdf") )
cols = sapply( seq$translated , function(x) ifelse(x,2,1) )
plot( Lexp , Sexp , pch=16 , col=cols , main=paste0("Quantification by Long and Short Reads: r=",signif(LSQuantCorr,3)) , xlab="Log10 Long Read" , ylab="Log10 Short Read" )
abline(lm( Sexp~Lexp ))
dev.off()
###############################################################################################################
pdf( "gridSeach.pdf" )
heatmap( score , Rowv = NA, Colv = NA, scale='none' )
dev.off()
###############################################################################################################
isoformNum = sum(sapply(correction,function(x)x$uniNum))
isoformFrac = mean(sapply(correction,function(x)x$frac))
info = data.frame( shortRead=ts , distance=td , isoformNum=isoformNum , isoformFrac=isoformFrac , translated=sum(seq$translated) , juncCorr , LSQuantCorr , LSQuantPval )
write.table(info,"summary.txt",quote=F,sep="\t",col.names=T,row.names=F)
}
|
100a5c1a1fd0203342f593e6e21c7516c14c2599
|
329a5521af71c51419c7d40c6defd4ba659fa03c
|
/man/rdoublebinom.Rd
|
cb70a756058d1fcf5ca308d19bba593850d55427
|
[
"MIT"
] |
permissive
|
jin-yuyu/DoubleRobGam
|
96bb106e0733e99a43c489516d596a161c3ccd7e
|
2808246aaa29f4538978a2516dcf832837848a22
|
refs/heads/master
| 2023-07-14T18:46:43.361846
| 2021-08-20T08:21:55
| 2021-08-20T08:21:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 618
|
rd
|
rdoublebinom.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/generalFunctions.R
\name{rdoublebinom}
\alias{rdoublebinom}
\title{Random generator of Double Exponantial Binomial-type data (counts)}
\usage{
rdoublebinom(size, n, prob, theta)
}
\arguments{
\item{size}{sample size needed}
\item{n}{number of trials (called \code{size} in \code{rbinom})}
\item{prob}{vector of probabilities}
\item{theta}{dispersion parameter/function. \code{theta = 1} (the default) corresponds to the Binomial distribution}
}
\description{
Random generator of Double Exponantial Binomial-type data (counts)
}
|
1b6b33a9b84b61e63634e69c0c0844d2c6b17dbe
|
a09711cc3dfa313ed6c95467d8cad1f375503c8b
|
/01_Examples_Scripts.R
|
67d3fc6fe68fde7750af85a7eb207b6e94fcde53
|
[] |
no_license
|
MigueldaCosta/learnxts
|
c466687c75faab661d93129e52bdf0797abf61dc
|
8abed8112203566a0c1ab42c044606d81d1a2e88
|
refs/heads/master
| 2021-07-10T16:01:50.913131
| 2017-10-11T21:12:59
| 2017-10-11T21:12:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,124
|
r
|
01_Examples_Scripts.R
|
# studing xts and zoo packages
install.packages("xts")
library(xts)
# XTS = MATRIX + INDEX
# creating a matrix:
x <- matrix(1:4, ncol = 2, nrow = 2)
# creating an index
idx <- as.Date(c("2015-01-01", "2015-02-01"))
# creating a time series by using xts function and order by index:
X <- xts(x, order.by = idx)
X
# https://www.rdocumentation.org/packages/xts/versions/0.9-7
# Convert date column to a time-based class
flights$date <- as.Date(flights$date)
# Convert flights to an xts object using as.xts
flights_xts <- as.xts(flights [ , -5], order.by = flights$date)
# Check the class of flights_xts
class(flights_xts)
# Examine the first five lines of flights_xts
head(flights_xts, 5)
# Identify the periodicity of flights_xts
periodicity(flights_xts)
# Identify the number of periods in flights_xts
nmonths(flights_xts)
# Find data on flights arriving in BOS in June 2014
flights_xts["2014-06"]
# Use plot.xts() to view total monthly flights into BOS over time
plot.xts(flights_xts$total_flights)
# Use plot.xts() to view monthly delayed flights into BOS over time
plot.xts(flights_xts$delay_flights)
# Use plot.zoo() to view all four columns of data in their own panels
plot.zoo(flights_xts, plot.type = "multiple", ylab = labels)
# Use plot.zoo() to view all four columns of data in one panel
plot.zoo(flights_xts, plot.type = "single", lty = lty)
legend("right", lty = lty, legend = labels)
# Calculate percentage of flights delayed each month: pct_delay
flights_xts$pct_delay <- (flights_xts$delay_flights / flights_xts$total_flights) * 100
# Use plot.xts() to view pct_delay over time
plot.xts(flights_xts$pct_delay)
# Calculate percentage of flights cancelled each month: pct_cancel
flights_xts$pct_cancel <- (flights_xts$cancel_flights/flights_xts$total_flights) * 100
# Calculate percentage of flights diverted each month: pct_divert
flights_xts$pct_divert <- (flights_xts$divert_flights/flights_xts$total_flights) * 100
# Use plot.zoo() to view all three trends over time
plot.zoo(x = flights_xts[ , c("pct_delay", "pct_cancel", "pct_divert")])
# Save your xts object to rds file using saveRDS
saveRDS(object = flights_xts, file = "flights_xts.rds")
# Read your flights_xts data from the rds file
flights_xts2 <- readRDS("flights_xts.rds")
# Check the class of your new flights_xts2 object
class(flights_xts2)
# Examine the first five rows of your new flights_xts2 object
head(flights_xts2, 5)
############## Merging using rbind()
# Confirm that the date column in each object is a time-based class
class(temps_1$date)
class(temps_2$date)
# Encode your two temperature data frames as xts objects
temps_1_xts <- as.xts(temps_1[, -4], order.by = temps_1$date)
temps_2_xts <- as.xts(temps_2[, -4], order.by = temps_2$date)
# View the first few lines of each new xts object to confirm they are properly formatted
head(temps_1_xts)
head(temps_2_xts)
# Use rbind to merge your new xts objects
temps_xts <- rbind(temps_1_xts, temps_2_xts)
# View data for the first 3 days of the last month of the first year in temps_xts
first(last(first(temps_xts, "1 year"), "1 month"), "3 days")
################# Generating a monthly average
# Split temps_xts_2 into separate lists per month #this generates the list
monthly_split <- split(temps_xts_2$mean , f = "months")
# Use lapply to generate the monthly mean of mean temperatures
mean_of_means <- lapply(monthly_split, FUN = mean)
# Use as.xts to generate an xts object of average monthly temperature data
temps_monthly <- as.xts(as.numeric(mean_of_means), order.by = index)
# Compare the periodicity and duration of your new temps_monthly and flights_xts
periodicity(temps_monthly)
periodicity(flights_xts)
############## Using merge() and plotting over time
# Use merge to combine your flights and temperature objects
flights_temps <- merge(flights_xts, temps_monthly)
# Examine the first few rows of your combined xts object
head(flights_temps)
# Use plot.zoo to plot these two columns in a single panel
plot.zoo(flights_temps[,c("pct_delay", "temps_monthly")], plot.type = "single", lty = lty)
legend("topright", lty = lty, legend = labels, bg = "white")
############## Plots of periods, zooming plots
# Identify the periodicity of temps_xts
periodicity(temps_xts)
# Generate a plot of mean Boston temperature for the duration of your data
plot.xts(temps_xts$mean)
# Generate a plot of mean Boston temperature from November 2010 through April 2011
plot.xts(temps_xts$mean["2010-11/2011-04"])
# Use plot.zoo to generate a single plot showing mean, max, and min temperatures during the same period
plot.zoo(temps_xts["2010-11/2011-04"], plot.type = "single", lty = lty)
############# WORKFLOW FOR MERGING #############
# 1 eNCODE ALL TIMESERIES OBJECTS TO XTS
data_1_xts <- as.xts(data_1, order.by = index)
# 2. Examine and adjust periodicity
periodicity(data_1_xts)
to.period(data_1_xts, period = "years")
# 3. merge xts objects
merged_data <- merge(data_1_xts, data_2_xts)
|
69a5e2717af2594a2c97b04b454b50bf07b75358
|
7ab599bd9ec468a07835e1fd7048a46bfcb8e5e3
|
/Variable Importance Using WOE And IV.R
|
f73295b1daa0f799fec3b71fe9dcba645f28ab79
|
[] |
no_license
|
debasishdutta/R_Programming_Feature_Engineering
|
fed9d69245c10275bb73b29bc40b6caf096a0a23
|
177b3bf8880a9cdad138ea0da8f78f8cf9b4b80a
|
refs/heads/master
| 2022-05-28T14:51:44.030342
| 2020-05-04T06:04:05
| 2020-05-04T06:04:05
| 261,091,700
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,453
|
r
|
Variable Importance Using WOE And IV.R
|
################################################################################################
######################### Weight of Evidance & Information Value ##########################
######################### Developer: Debasish Dutta #################################
######################### Date: December 2017 ###############################
###### Input: 1. Train Data (Mixed Data Type Allowed) ######
###### 2. Test Data (Mixed Data Type Allowed) ######
###### 3. Dependent Variable As Numeric(Value i.e. 0/1) ######
###### 4. No of Bins To Be Created For Numeric Var ######
###### Output: 1. A List (First Element: IV Table) ######
###### (Second Element: WOE Table) ######
################################################################################################
woe_iv_function <- function(df_train, df_test, dep_var, n_bin) {
###################### Installing & Loading Information Package ########################
if (!require("Information", character.only = TRUE))
{
install.packages("Information", dep = TRUE)
if (!require("Information", character.only = TRUE))
stop("Information Package not found")
}
###################### Generating Information Value Table ########################
obj_iv <-
create_infotables(
data = df_train,
valid = df_test,
y = dep_var,
bins = n_bin
)
iv_table <- obj_iv$Summary
iv_table_final <-
data.frame(lapply(iv_table, function(y)
if (is.numeric(y))
round(y, 3)
else
y))
###################### Generating Weight of Evidence Table ########################
woe_table <- NULL
for (i in 1:length(obj_iv$Tables)) {
temp_table <- obj_iv$Tables[[i]]
temp_table$Variable_Name <- names(obj_iv$Tables[[i]])[1]
names(temp_table)[1] <- "Bins"
temp_table <- temp_table[, c(7, 1:6)]
woe_table <- rbind(woe_table, temp_table)
}
woe_table_final <-
data.frame(lapply(woe_table, function(y)
if (is.numeric(y))
round(y, 3)
else
y))
final_result <- list(iv_table_final, woe_table_final)
return(final_result)
}
|
598b7caa5b5bf17c126b6d48029a2faf16ffaffe
|
cd6f95463534a1c4fe88b1963ca2809d9961e6ec
|
/process/microarray/annotate.r
|
112c0c2c051085031c29ff27ec41cabf84b5c90d
|
[
"Apache-2.0"
] |
permissive
|
Manikgarg/ebits
|
aeaa5ba957397a6644ccc6769ef5d3e9ffc8477a
|
d70aa56b6d8eb7ac8aa73215f935b117deedc254
|
refs/heads/master
| 2020-03-11T16:12:52.066875
| 2018-04-18T13:44:25
| 2018-04-18T13:44:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,919
|
r
|
annotate.r
|
#' Mapping of annotation identifier to annotation package
#'
#' there should be a mapping between the two available somewhere?
#' List of BioC annots: https://www.bioconductor.org/packages/3.3/data/annotation/
mapping = list(
"pd.hg.u95a" = "hgu95a.db",
"pd.hg.u95b" = "hgu95b.db",
"pd.hg.u95av2" = "hgu95av2.db", # A-AFFY-1, GPL8300
"hgu133a" = "hgu133a.db", # A-GEOD-14668
"pd.hg.u133a" = "hgu133a.db", # A-AFFY-33, GPL96
"pd.hg.u133b" = "hgu133b.db", # A-AFFY-34, GPL97
"pd.hg.u133.plus.2" = "hgu133plus2.db", # A-AFFY-44, GPL570
"pd.hg.u133a.2" = "hgu133a2.db", # A-AFFY-37, GPL571
"pd.hg.u219" = "hgu219.db", # A-GEOD-13667, GPL13667
"pd.hugene.1.0.st.v1" = "hugene10sttranscriptcluster.db", # A-AFFY-141, GPL6244
"pd.hugene.1.0.st.v1" = "hugene10sttranscriptcluster.db",
"pd.hugene.1.1.st.v1" = "hugene11sttranscriptcluster.db",
"pd.hugene.2.0.st" = "hugene20sttranscriptcluster.db",
"pd.hugene.2.1.st" = "hugene21sttranscriptcluster.db",
"pd.huex.1.0.st.v1" = "huex10sttranscriptcluster.db", # A-AFFY-143
"pd.huex.1.0.st.v2" = "huex10sttranscriptcluster.db",
"pd.ht.hg.u133a" = "hthgu133a.db", # A-AFFY-76
# "pd.ht.hg.u133.plus.pm" = ???,
"pd.hta.2.0" = "hta20sttranscriptcluster.db",
"u133aaofav2" = "hthgu133a.db" # needs to be manually processed by affy
)
#' Function to annotate expression objects
#'
#' @param normData A normalized data object, or a list thereof
#' @param summarize IDs to annotate with: hgnc_symbol
#' @return The annotated data
annotate = function(normData, summarize="hgnc_symbol", ...) {
UseMethod("annotate")
}
annotate.list = function(normData, summarize="hgnc_symbol") {
re = lapply(normData, function(x) annotate(x) %catch% NA)
if (any(is.na(re)))
warning("dropping ", names(re)[is.na(re)])
if (all(is.na(re)))
stop("all annotations failed")
re[!is.na(re)]
}
annotate.ExpressionSet = function(normData, summarize="hgnc_symbol") {
annotation = mapping[[normData@annotation]]
if (is.null(annotation))
stop("No annotation package found for: ", normData@annotation)
# read metadata and replace matrix by annotated matrix
emat = annotate(as.matrix(exprs(normData)),
annotation = annotation,
summarize = summarize)
Biobase::ExpressionSet(assayData = emat,
phenoData = phenoData(normData))
}
annotate.NChannelSet = function(normData, summarize="hgnc_symbol") {
if (! "E" %in% ls(assayData(normData)))
stop("only single-channel implemented atm")
# note: there are agilent annotation packages available, but the
# array ID is not saved in normData@annotation
map_channel = function(expr, ids, from="agilent", to=summarize) {
rownames(expr) = ids
idmap$probeset(expr, from=from, to=summarize)
}
idmap = import('../idmap')
ad = as.list(Biobase::assayData(normData))
mapped = sapply(ad, map_channel, ids=fData(normData)$ProbeName,
simplify=FALSE, USE.NAMES=TRUE)
es = ExpressionSet(mapped$E)
phenoData(es) = phenoData(normData)
es
}
annotate.matrix = function(normData, annotation, summarize="hgnc_symbol") {
# load annotation package
if (!require(annotation, character.only=TRUE)) {
source("http://bioconductor.org/biocLite.R")
biocLite(annotation)
library(annotation, character.only=TRUE)
}
# work on expression matrix, summarize using limma
if (summarize == "hgnc_symbol")
rownames(normData) = annotate::getSYMBOL(as.vector(rownames(normData)), annotation)
else if (summarize == "entrezgene")
rownames(normData) = annotate::getEG(as.vector(rownames(normData)), annotation)
else
stop("Method", summarize, "not supported, only 'hgnc_symbol', 'entrezgene'")
limma::avereps(normData[!is.na(rownames(normData)),])
}
|
41858d23707d8a163aa98d93aa4ca08afd0b491a
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/5813_0/rinput.R
|
9d019cf5f0871743aa2337df0a5e113408b5c615
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("5813_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5813_0_unrooted.txt")
|
f3251d0fe2b98bdd924189d2ff57947537ebc7d1
|
97c2cfd517cdf2a348a3fcb73e9687003f472201
|
/R/src/SystemDB/R/System.R
|
5d30f076482d27f73b720f78427cdefa3599b125
|
[] |
no_license
|
rsheftel/ratel
|
b1179fcc1ca55255d7b511a870a2b0b05b04b1a0
|
e1876f976c3e26012a5f39707275d52d77f329b8
|
refs/heads/master
| 2016-09-05T21:34:45.510667
| 2015-05-12T03:51:05
| 2015-05-12T03:51:05
| 32,461,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,469
|
r
|
System.R
|
# System Class
#
# Author: rsheftel
###############################################################################
constructor("System", function(system=NULL){
this <- extend(RObject(), "System", .system=system)
constructorNeeds(this, system="character")
if (inStaticConstructor(this)) return(this)
failIf(!SystemDB$alreadyExists('System','Name',system),squish('System does not exist: ',system))
return(this)
})
method("pvNames", "System", function(this, ...){
return(SystemDB$pvNamesForSystem(this$.system))
})
method("parameters", "System", function(this, ...){
return(SystemDB$parameterNames(this$.system))
})
method("addPV", "System", function(this, pvName, parameterList=NULL, asOfDate, interval, version, bloombergTag=NULL, commitToDB=FALSE, ...){
needs(pvName='character', parameterList='list(character)?', asOfDate='character', interval='character', version='character', bloombergTag='character?', commitToDB='logical')
failIf(SystemDB$alreadyExists('ParameterValues', 'Name', pvName), squish('PVName already exists: ',pvName))
parameters <- this$parameters()
for (parameter in names(parameterList)){
failIf(!(parameter %in% parameters), squish('Invalid parameter name: ',parameter))
}
parameterValues <- NULL
for (parameter in parameters){
listValue <- parameterList[[parameter]]
if(is.null(listValue)){
listValue <- 'NA'
print(squish('Parameter not defined in parameterList, will be empty in systemDB: ', parameter))
}
parameterValues <- c(parameterValues, as.character(listValue))
}
sys <- SystemDBManager()
sys$commitToDB(commitToDB)
print("Inserting to ParameterValues table...")
res <- sys$insertParameterValuesTable(system=this$.system, pvName=pvName, parameterNames=parameters, parameterValues=parameterValues, asOfDate=asOfDate)
failIf((!is.logical(res) && !matches(tempDirectory(),res)), res)
print("Inserting to SystemDetails table...")
this$.testfile <- sys$insertSystemDetailsTable(systemName=this$.system, version=version, interval=interval, pvName=pvName)
failIf((!is.logical(this$.testfile) && !matches(tempDirectory(),res)), this$.testfile)
if(!is.null(bloombergTag)){
print("Inserting to BloombergTags table...")
res <- sys$insertBloombergTag(SystemDB$systemID(system=this$.system, interval=interval, version=version, pvName=pvName), bloombergTag)
failIf((!is.logical(res) && !matches(tempDirectory(),res)), res)
}
})
|
8f94a8d0c1dba8efd905a9029eba2bb56efeb0e1
|
64bcb6a325629f3af563b5f8de5d208777f1688f
|
/man/visualize_corrections.Rd
|
270a7c5657bccc5eb8dec74b7bedc5561d82d2a2
|
[] |
no_license
|
fdbesanto2/cdynfgeo
|
8a8309357e5bedc6f569479fa9bc6a8fc8b22ee9
|
86fd59e37c731db5b8018895e872d53f985b9313
|
refs/heads/master
| 2022-11-12T09:45:46.465857
| 2020-07-01T14:52:03
| 2020-07-01T14:52:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 613
|
rd
|
visualize_corrections.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualize_corrections.R
\name{visualize_corrections}
\alias{visualize_corrections}
\title{Visualize corrections made by the consolidate_data() function}
\usage{
visualize_corrections(df, dbhmin = 500, plotsPerPage = 25)
}
\arguments{
\item{df}{data.table: The output of the function consolidate_data()}
\item{dbhmin}{numeric: the minimum dbh of trees to plot}
\item{plotsPerPage}{numeric: number of plots (stemids) per page}
}
\value{
A list of ggplots
}
\description{
Visualize corrections made by the consolidate_data() function
}
|
5caae7fea221ebbc80ba4da45b4d499f43cb0cd3
|
51fdd67d355df9ed4378bc86e432386edd31d4ca
|
/man/scale_colour_gradient2_tableau.Rd
|
5e0ff19c80c5c6a86ddf968f1b51340fd2f98199
|
[] |
no_license
|
bobbbbbi/Data-Visualisation-with-ggplot2-ggthemes
|
f434c6dede486849236ddc26d6a31a3ee093ffe9
|
6ff7f1589b6199bf4c11ffde12b5fed9ceee4fce
|
refs/heads/master
| 2020-06-19T08:08:40.380067
| 2017-05-01T00:02:28
| 2017-05-01T00:02:28
| 94,182,518
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,760
|
rd
|
scale_colour_gradient2_tableau.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tableau.R
\name{scale_colour_gradient2_tableau}
\alias{scale_colour_gradient2_tableau}
\alias{scale_fill_gradient2_tableau}
\alias{scale_color_gradient2_tableau}
\title{Tableau diverging colour scales (continuous)}
\usage{
scale_colour_gradient2_tableau(palette = "Red-Blue", ..., space = "rgb",
na.value = "grey50", guide = "colourbar")
scale_fill_gradient2_tableau(palette = "Red-Blue", ..., space = "rgb",
na.value = "grey50", guide = "colourbar")
scale_color_gradient2_tableau(palette = "Red-Blue", ..., space = "rgb",
na.value = "grey50", guide = "colourbar")
}
\arguments{
\item{palette}{Palette name. See \code{ggthemes_data$tableau$divergent}.}
\item{...}{Other arguments passed on to \code{\link{discrete_scale}}
to control name, limits, breaks, labels and so forth.}
\item{space}{Colour space in which to calculate gradient.}
\item{na.value}{Colour to use for missing values}
\item{guide}{Type of legend. Use \code{'colourbar'} for continuous
colour bar, or \code{'legend'} for discrete colour legend.}
}
\description{
Tableau diverging colour scales (continuous)
}
\examples{
### scale_color_gradient2_tableau
library("ggplot2")
df <- data.frame(
x = runif(100),
y = runif(100),
z1 = rnorm(100),
z2 = abs(rnorm(100))
)
p <- ggplot(df, aes(x, y)) + geom_point(aes(colour = z2))
p + scale_colour_gradient2_tableau()
p + scale_colour_gradient2_tableau('Orange-Blue')
p + scale_colour_gradient2_tableau('Temperature')
}
\seealso{
Other colour tableau: \code{\link{scale_colour_gradient_tableau}},
\code{\link{scale_colour_tableau}},
\code{\link{tableau_color_pal}},
\code{\link{tableau_div_gradient_pal}},
\code{\link{tableau_seq_gradient_pal}}
}
|
6c127bd42786d9635f2f10057d30283ae2175f69
|
be8f72dfc6406df60603b1e1efc1903daf5351a4
|
/Scripts/enders_chapter6J.R
|
ff3e38e053ba3ec8e4964eb6eef41588d3c6cceb
|
[] |
no_license
|
nigoja97/Econometria
|
492eb2ab917e2f6432d46960b48efc7550fcfa6d
|
8a7ecb47342331a13b9c42bbe67701932794d9ee
|
refs/heads/main
| 2023-06-18T08:26:36.254427
| 2021-07-19T19:59:40
| 2021-07-19T19:59:40
| 343,526,217
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,233
|
r
|
enders_chapter6J.R
|
#########################################################
#########################################################
###### 6. COINTEGRATION AND ERRORCORRECTION MODELS ######
#########################################################
#########################################################
### PAGE 366
library("gdata")
library("readxl")
library("vars")
library(tseries)
#--------------------------------------------------------------------
# Ejemplo sencillo de cointegración entre 2 variables
set.seed(20210519)
# m random walk
# m(t) = m(t-1) + em
# em ey ez white noise
# y = m + ey
# z = m + ez
n = 25
em = rnorm(n)
ey = rnorm(n)
ez = rnorm(n)
m = em
for (i in 2:n) {
m[i]=m[i-1]+em[i]
}
y = m + ey
z = m + ez
par(mfcol = c(2,1), oma = c(0,0,1,0) + 0.2, mar = c(0,1,0,0) + 1, mgp = c(0, 0.2, 0))
w = y-z
par(mfcol = c(2,1))
plot(y,type="l",xaxs="i",las=1,xlab="",ylab="",tck=.02)
lines(z,lty=2, col="red")
plot(w,type="l",xaxs="i",las=1,xlab="",ylab="",tck=.02)
#--------------------------------------------------------------------
datos = read_excel("C:/Users/NIGOJ/Desktop/Nico/Cosas de la U/Econometria R/Econometria/Bases de Datos/Coint6.xls")
par(mfcol = c(1,1), oma = c(0,0,1,0) + 0.2, mar = c(0,1,0,0) + 1, mgp = c(0, 0.2, 0))
plot(datos$y,type="s",xaxs="i",las=1,xlab="",ylab="",ylim=c(-12,2),tck=.02)
lines(datos$z,lty=2, col="red")
lines(datos$w,lty=3, col="blue")
abline(h=0)
par(mfcol = c(1,1), oma = c(0,0,1,0) + 0.2, mar = c(1,1,0,0) + 1, mgp = c(0, 0.2, 0))
plot(datos$y+datos$z-datos$w,type="l",xaxs="i",las=1,xlab="y+z-w",ylab="",ylim=c(-1,1),tck=.02)
adf.test(datos$y,k=0)
adf.test(datos$z,k=0)
adf.test(datos$w,k=0)
adf.test(diff(datos$y),k=0)
adf.test(diff(datos$z),k=0)
adf.test(diff(datos$w),k=0)
lm1 = summary(lm(datos$y~datos$z+datos$w))
lm1
lm2 = summary(lm(datos$z~datos$y+datos$w))
lm2
lm3 = summary(lm(datos$w~datos$y+datos$z))
lm3
reslm1 = lm1$residuals
reslm2 = lm2$residuals
reslm3 = lm3$residuals
par(mfcol = c(1,1), oma = c(0,0,1,0) + 0.2, mar = c(0,1,0,0) + 1, mgp = c(0, 0.2, 0))
plot(reslm1,type="l",xaxs="i",las=1,xlab="",ylab="",ylim=c(-1,1),tck=.02)
lines(reslm2,lty=1, col="red")
lines(reslm3,lty=1, col="blue")
abline(h=0)
adf.test(reslm1)
adf.test(reslm2)
adf.test(reslm3)
library("urca")
### TABLE 6.2
# Usage
# Augmented-Dickey-Fuller Unit Root Test
#
# ur.df(y,
# type = c("none", "drift", "trend"), lags = 1,
# selectlags = c("Fixed", "AIC", "BIC"))
adf1 = ur.df(datos$y,lag=0)
adf1@testreg
adf1 = ur.df(datos$y,lag=4)
adf1@testreg
adf2 = ur.df(datos$z,lag=0)
adf2@testreg
adf2 = ur.df(datos$z,lag=4)
adf2@testreg
adf3 = ur.df(datos$w,lag=0)
adf3@testreg
adf3 = ur.df(datos$w,lag=4)
adf3@testreg
### TABLE 6.3
adf1 = ur.df(lm1$residuals,lag=0)
adf1@testreg
adf1 = ur.df(lm1$residuals,lag=4)
adf1@testreg
adf2 = ur.df(lm2$residuals,lag=0)
adf2@testreg
adf2 = ur.df(lm2$residuals,lag=4)
adf2@testreg
adf3 = ur.df(lm3$residuals,lag=0)
adf3@testreg
adf3 = ur.df(lm3$residuals,lag=4)
adf3@testreg
DATO = matrix(nrow=nrow(datos)-1,ncol=ncol(datos))
DATO[,1] = diff(datos$y)
DATO[,2] = diff(datos$z)
DATO[,3] = diff(datos$w)
colnames(DATO) = c("dy","dz","dw")
e.w = lm3$residuals
VAR(DATO,p=1,exogen=e.w[-length(e.w)])
# ¿cómo podemos determinar el orden del VAR ?
### PAGE 390
library("urca")
# ca.jo : Johansen Procedure for VAR
# ca.jo(x,
# type = c("eigen", "trace"),
# ecdet = c("none", "const", "trend"),
# K = 2,
# spec=c("longrun", "transitory"),
# season = NULL,
# dumvar = NULL)
#
jo.ci = ca.jo(datos,type="trace",ecdet="none")
summary(jo.ci)
jo.ci@lambda
jo.ci = ca.jo(datos,type="trace",ecdet="const")
summary(jo.ci)
jo.ci@lambda
jo.ci = ca.jo(datos,type="eigen",ecdet="none")
summary(jo.ci)
jo.ci@lambda
jo.ci = ca.jo(datos,type="eigen",ecdet="const")
summary(jo.ci)
jo.ci@lambda
jo.ci = ca.jo(datos,type="trace",ecdet="none")
summary(jo.ci)
jo.ci@lambda
var.ci = vec2var(jo.ci,r=1)
var.ci
plot(irf(var.ci))
## ###########
# jo.ci@V es la matriz de eigenvectors
# normalizados con respecto a la primera variable
#
# en este ejercicio r=1 hay una ecuación de cointegración
# interesa la primera columna de V
ecm = jo.ci@V[,1]%*%t(datos)
plot(c(ecm),type="l",xaxs="i",las=1,xlab="",ylab="",ylim=c(-1,1),tck=.02)
ecm = ecm[1,]
plot(ecm,type="l",xaxs="i",las=1,xlab="",ylab="",ylim=c(-1,1),tck=.02)
# cajorls : OLS regression of VECM
# sistema represenatdo como VECM
vecm1 <- cajorls(jo.ci,r=1)
vecm1
#---------------------------------------------
datos = read.xls("QUARTERLY.xls")
datos = quarterly
datos$DATE=as.yearqtr(datos$DATE)
lm1 = summary(lm(datos$r10~datos$Tbill))
res.lm1 = lm1$residuals
adf1 = ur.df(res.lm1,type="drift",lag=1)
adf1@testreg
lm2 = summary(lm(datos$Tbill~datos$r10))
res.lm2 = lm2$residuals
adf2 = ur.df(res.lm2,type="drift",lag=1)
adf2@testreg
df = data.frame(datos$r10,datos$Tbill)
jo1 = ca.jo(df)
summary(jo1)
# datos = read.xls("quarterly.xls")
lm1 = summary(lm(datos$IndProd~datos$M1NSA))
plot(lm1$residuals,type="l")
ur.df(lm1$residuals,type="drift",lag=1)
datos = read.xls("real.xls")
lm1 = summary(lm(log(datos$RGDP)~log(datos$RCons)))
lm1
### END
|
7b8db3469db492e890f541fc810381d049a0b24c
|
30e9b3b7fd58c87083c7b02ca7d67a6b13235c2e
|
/exercise/HK80/man/HK80GEO_TO_WGS84GEO.Rd
|
16570d38616abca5bba08e47856ca05ef2bb9298
|
[] |
no_license
|
zhongduowang/programing_in_r_cn
|
76bbb3888b127b8ccf2f23fc7d23d9f5f5ceed2d
|
9a7d994b204b4352d0f5ea3aa76e0fabdb54a678
|
refs/heads/master
| 2020-03-21T14:32:08.947060
| 2018-06-25T15:28:16
| 2018-06-25T15:28:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,316
|
rd
|
HK80GEO_TO_WGS84GEO.Rd
|
\name{HK80GEO_TO_WGS84GEO}
\alias{HK80GEO_TO_WGS84GEO}
\title{
Convert HK80GEO coordinates to WGS84GEO coordinates
}
\description{
Convert HK80GEO coordinates to WGS84GEO coordinates
}
\usage{
HK80GEO_TO_WGS84GEO(latitude, longitude)
}
\arguments{
\item{latitude}{
latitude in decimal degrees
}
\item{longitude}{
longitude in decimal degrees
}
}
\details{
This function utilizes the simplified relationship between HK80GEO and WGS84GEO described on Page B6 to do the transformation.
}
\value{
\item{latitude}{latitude in decimal degrees}
\item{longitude}{longitude in decimal degrees}
}
\references{
Survey & Mapping Office Lands Department, Hong Kong Government (1995).
Explanatory Notes on Geodetic Datums in Hong Kong, available at:
\url{http://www.geodetic.gov.hk/smo/gsi/data/pdf/explanatorynotes.pdf}
}
\author{
Jinlong Zhang
}
\note{
The coordinates should be within the range of Hong Kong.
Providing coordinates outside Hong Kong will lead to wrong results.
}
\seealso{
\code{\link{WGS84GEO_TO_HK80GEO}}
}
\examples{
options(digits = 15)
HK80GEO_TO_WGS84GEO(22.323701767, 114.138734989)
### $latitude
### [1] 22.3221739892222
###
### $longitude
### [1] 114.141179433444
### Answer from the online conversion tool
### 22.322172084
### 114.141187917
}
\keyword{ WGS84GEO }
\keyword{ HK80GEO }
|
6b6d9afa4bc452974db71e5a572468336849e8f3
|
d01ef90f1b72ae02ac5055c7c93c69fcf5a7be5c
|
/scripts/map.QTL.permutedPhenos.R
|
877ce82a4477bad161e5fae9ec9faae236b90d72
|
[] |
no_license
|
ngon/LgSm-DataProcessing
|
3b8dde9e4e75f1e2e6791ec342cda80b6d54070f
|
37c816f5257dcc87802124e87dbd9e445f6d146f
|
refs/heads/master
| 2020-07-04T15:07:14.876067
| 2015-11-21T02:52:33
| 2015-11-21T02:52:33
| 22,928,887
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,318
|
r
|
map.QTL.permutedPhenos.R
|
##### MAP QTL IN F50-56 AIL #####
# This file uses permuted phenotype data (5 permutations for each of 14 traits).
##### GO TO DIRECTORY
setwd("/group/palmer-lab/AIL/LgSm-DataProcessing")
##### CONSTANTS
MAF=0.05
#### READ GENOTYPED SAMPLES AND CHANGE SOME NAMES ----------------------
# 54109_11 changed to 54109 (double ear tag)
# 51717-19 changed to 51717 (double ear tag)
# 51348 changed to 51349 (inferred from ped checks - typo? - double check using sex(genotypes on X).
geno.samples <- read.table("/group/palmer-lab/AIL/GBS/dosage/genotyped.samples.txt", as.is=TRUE)
# geno.samples[which(geno.samples$V1 == "54109_11"), 1] <- "54109"
# geno.samples[which(geno.samples$V1 == "51717-19"), 1] <- "51717"
# geno.samples[which(geno.samples$V1 == "51348"), 1] <- "51349"
names(geno.samples) <- c("id")
ids <- geno.samples$id
# READ TAB DELIMITED FILE WITH PERMUTED PHENOTYPES
# Sex is an indicator variable; M=0 and F=1
pheno <- read.table("./permutedPhenotypes.txt", sep="\t", header=T, as.is=T, nrows=2)
#covars <- read.table("./covariates.orm.txt", sep="\t", header=T, as.is=T)
pheno.names <- names(pheno)
traits.to.perm <- c("act1.t", "act5.t", "startle", "wild.binary", "ppi12.logit", "tail")
#pheno <- cbind(ids[1], pheno)
#### EXTRACT DATA FOR GENOTYPED SAMPLES ------------------------------------
### Generate phenotype data for all samples with genotypes
# permPheno.allgeno <- merge(geno.samples, pheno, all.x=TRUE)
# write.table(permPheno.allgeno, file="permPhenos.allgeno.txt",
# quote=FALSE, sep="\t", row.names=FALSE, col.names=FALSE)
### Make covariate file with all ids in genotype file
# get.filenames <- function(trait){
# filenames <- c()
# for (i in 1:100){
# filenames[i] <- paste0("/group/palmer-lab/AIL/qtlmapping/covariates/permCovs/perm",
# i, ".", trait, ".cov.txt")
# }
# return(filenames)
# }
# files <- lapply(traits.to.perm, get.filenames)
# files <- unlist(files)
# merge.covars <- function(covFile, genoFile, ids){
# covariate <- read.table(covFile, as.is=T, header=T)
# covariate <- cbind(ids, covariate)
# covariate <- merge(genoFile, covariate, header=T, all.x=TRUE)
# covariate$one <- 1
# write.table(covariate[-1], file=paste0(covFile,"2"), sep="\t", row.names=F, col.names=F, quote=F)
# }
#
# lapply(files, merge.covars, genoFile=geno.samples, ids=ids)
#
### MAKE SCRIPT TO RUN GEMMA FOR CHOSEN TRAIT ----------------------------------------------
cmds <- c()
for (pheno in pheno.names) {
index.pheno <- which(pheno.names == pheno)
chosen.covars <- paste0("/group/palmer-lab/AIL/qtlmapping/covariates/permCovs/",
pheno, ".cov.txt")
for (chrom in 1:19) {
cmds <- c(cmds, paste0("gemma -g /group/palmer-lab/AIL/GBS/dosage/onlyEmpirical/chr", chrom,
".filtered.dosage -p /group/palmer-lab/AIL/LgSm-DataProcessing/permPhenos.allgeno.txt -k /group/palmer-lab/AIL/qtlmapping/kinship/onlyEmpirical/identity.kinship -a /group/palmer-lab/AIL/GBS/dosage/onlyEmpirical/chr",chrom, ".filtered.snpinfo -c ", chosen.covars," -lmm 2 -maf ", MAF, " -o ", pheno, ".chr", chrom, " -n ", index.pheno))
}
write.table(cmds, file=paste0("/group/palmer-lab/AIL/code/gemma.permPheno.emp.cmds"),
row.names=F, col.names=F, quote=F)
}
|
85ca2006cde6a14f78c89d3220f3f3872a97eaf3
|
77acf64d79612a57014f519d16635624d82ee725
|
/all/compareSpectrum.R
|
5fecce272ad2743391bc283c489475c21d31d9bc
|
[] |
no_license
|
wanghuanwei-gd/cancer
|
9ad01f6f1da805fdcff11caab5938ac7c1251834
|
5983ed246f9ae9e249761e407b521b585609a386
|
refs/heads/master
| 2021-01-24T20:01:34.616622
| 2016-06-04T04:29:26
| 2016-06-04T04:29:26
| 65,798,692
| 1
| 0
| null | 2016-08-16T07:41:55
| 2016-08-16T07:41:54
| null |
UTF-8
|
R
| false
| false
| 72
|
r
|
compareSpectrum.R
|
/Share/BP/zhenglt/02.pipeline/cancer/mutationSignature/compareSpectrum.R
|
774d95f36cedcdce33f846d1290b92beab423a42
|
b5defe24e626043a866d63be9ecaa550822e77b1
|
/zad26.R
|
e4ec15f33c985d12c6b042f0d3e7af0a1c6a7d49
|
[] |
no_license
|
marcin-mulawa/Zadania-R
|
df47e70d1bb39a93115992ec1ae2d50c837a8140
|
3d97df77b900bf4169379c95df6a4c4b60e6d6b6
|
refs/heads/master
| 2022-04-10T15:02:04.431437
| 2020-03-25T11:15:16
| 2020-03-25T11:15:16
| 249,969,758
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 49
|
r
|
zad26.R
|
v <- 1:20
for(i in v){
print('Witaj swiecie')
}
|
4f2cf2481a3b6a339f2a77a9ceea392b5cb0450a
|
fc18ec80388622ade08d1ed6af6b64cd2b01b687
|
/man/DB_weightedIdx.Rd
|
048fb521b52ac97141979a93cf6441415a6c9c27
|
[] |
no_license
|
yannabraham/Radviz
|
2942d96e8a2fbe337250c10a592aa17b0e1fe514
|
c9c0d92c6e0acb7b6df6d913f3835156e0acbe68
|
refs/heads/master
| 2022-05-15T06:33:44.126045
| 2022-03-25T09:00:31
| 2022-03-25T09:00:31
| 46,946,711
| 9
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 945
|
rd
|
DB_weightedIdx.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DB_weightedIdx.R
\name{DB_weightedIdx}
\alias{DB_weightedIdx}
\title{Computation of weighted version of the Davies-Bouldin index. This index serves as a measure of clustering quality of a 2D projection result with known class labels}
\usage{
DB_weightedIdx(x, className = NULL)
}
\arguments{
\item{x}{an object of class Radviz, as returned by \code{\link{do.radviz}}}
\item{className}{the name of the class column to use}
}
\value{
weighted DB index value
}
\description{
Computation of weighted version of the Davies-Bouldin index. This index serves as a measure of clustering quality of a 2D projection result with known class labels
}
\details{
If \code{className} is left \code{NULL} (the default) the function expects a single extra column on top of
the data columns (used to define springs) and the standard \code{Radviz} columns.
}
\author{
Nicolas Sauwen
}
|
669a6d55ea71479e70c169e768d7de0bc15b8ac8
|
56abe0a36be4fea9f4339654b94ebe9c2bcdb41d
|
/code/UniversalFunction2.R
|
1d0b14ba221c58778fafc2194ad2baa55297e5f0
|
[] |
no_license
|
SSoleymani/YieldCurve
|
7f13f58c7479a578f94f447b73adcbd9ee08e215
|
8b3f812e805d8ea2981c4393f9103360403dfa55
|
refs/heads/master
| 2020-04-08T05:30:21.269380
| 2018-11-26T04:58:47
| 2018-11-26T04:58:47
| 159,063,179
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,732
|
r
|
UniversalFunction2.R
|
universal_function_remove_outlier = function(outlier_detection_method_name = "chull-95", input_data)
{
################################## Remove Outlier ##############################
removed_outlier <- input_data
if(outlier_detection_method_name == "Mixture Model")
{
pacman::p_load(mclust)
fitted_vals <- Mclust(input_data[ , c("Term", "yield")])
threshold = 0.05
removed_outlier <- input_data[which(fitted_vals$uncertainty > threshold),]
}
else if(startsWith(outlier_detection_method_name, "chull"))
{
limit = switch(outlier_detection_method_name, "chull-95" = 0.95, "chull-90" = 0.9, "chull-85" = 0.85, "chull-80" = 0.8, "chull-75" = 0.75, "chull-70" = 0.7, "chull-65" = 0.65, "chull-60" = 0.6 )
dataLen <- length(input_data[,1])
final_data = input_data
ratio = 1.0
while(TRUE){
chullIdx <- chull(final_data[ , c("Term", "yield")])
chullData <- final_data[setdiff(1:length(final_data[,1]),chullIdx),]
ratio = length(chullData[,1])/dataLen
if(ratio > limit){
final_chull = final_data
final_data = chullData
final_chullIdx = chullIdx
}
else{
break
}
}
removed_outlier <- final_data
}
else if(outlier_detection_method_name == "Interquartile-Static-Bin")
{
sorted_input_data = input_data[with(input_data, order(Term)), ]
clean_input_data = sorted_input_data[1,]
for(i in seq(2,50, by = 3)){
box = sorted_input_data[sorted_input_data$Term >=i & sorted_input_data$Term <= i+3, ]
quant = quantile(box$yield)
iqr = quant[4] - quant[2]
box = box[(box$yield <= quant[4] + (1.5 * iqr)) & (box$yield >= (quant[2] - 1.5 * iqr)),]
clean_input_data = rbind(clean_input_data, box)
}
removed_outlier <- clean_input_data[-c(1),]
}
else if(outlier_detection_method_name == "Interquartile-Dynamic-Bin")
{
sorted_input_data = input_data[with(input_data, order(Term)), ]
clean_dynamic = sorted_input_data[1,]
boxSize = 100
for(i in seq(boxSize,length(sorted_input_data[,1])-boxSize, by = boxSize)){
box = sorted_input_data[i:(i+boxSize-1), ]
quant = quantile(box$yield)
iqr = quant[4] - quant[2]
box = box[(box$yield <= quant[4] + (1.5 * iqr)) & (box$yield >= (quant[2] - 1.5 * iqr)),]
clean_dynamic = rbind(clean_dynamic, box)
}
removed_outlier <- clean_dynamic[-c(1),]
}
return(removed_outlier)
}
universal_function_regression = function(regression_method_name, input_data)
{
################################## Regression ##############################
if(regression_method_name == "lm")
{
pacman::p_load(car)
btw <- boxTidwell(yield ~ Term ,data=input_data)
lmFit <- lm(yield~I(Term ^ btw$result[3]),data = input_data)
return(cbind2(input_data$Term, fitted(lmFit)))
}
else if(regression_method_name == "rlm")
{
pacman::p_load(MASS)
btw <- boxTidwell(yield ~ Term ,data=input_data)
rlmFit <- rlm(yield~I(Term ^ btw$result[3]),data = input_data)
return(cbind2(input_data$Term, fitted(rlmFit)))
}
else if(regression_method_name == "loess")
{
loess_result <- loess(input_data$yield ~ input_data$Term, span = 0.6, degree = 2, family="symmetric" )
return(cbind2(loess_result$x, loess_result$fitted))
}
else if(cbind2(regression_method_name =="KernelSmoothing"))
{
pacman::p_load(KernSmooth)
ksmooth_result <- ksmooth(x = input_data$Term,y = input_data$yield,
bandwidth = 10, range.x = c(min(input_data[,1]),max(input_data[,1])))
return(cbind2(ksmooth_result$x, ksmooth_result$y))
}
else if(regression_method_name == "locpoly")
{
pacman::p_load(KernSmooth)
locpoly_result <- locpoly(x = input_data$Term,y = input_data$yield,
bandwidth = 30, range.x = c(min(input_data[,1]),max(input_data[,1])),
degree = 3, bwdisc = 25, gridsize =400)
return(cbind2(locpoly_result$x, locpoly_result$y))
}
else if(regression_method_name == "ridge")
{
pacman::p_load(glmnet)
ts = cbind(input_data$Term,input_data$Term^2, input_data$Term^3)
glmfit = cv.glmnet(ts , input_data$yield, alpha = 0, lambda = 10^seq(10, -10, by = -.1))
opt_lambda = glmfit$lambda.min
ridge_vals = rbind()
min_term = min(input_data$Term);
max_term = max(input_data$Term);
for(i in seq(min_term,max_term,0.01))
{
ridge_vals = rbind(ridge_vals, predict(glmfit, s = opt_lambda, newx = cbind(i,i^2,i^3)))
}
return(cbind2(seq(min_term,max_term,0.01), ridge_vals[,1]))
}
else if(regression_method_name == "Capital IQ")
{
library(sqldf)
ticker_names <- sqldf("SELECT Ticker FROM input_data GROUP BY Ticker Having COUNT(1) > 3 ")
DataByTicker <- sqldf("SELECT * FROM input_data WHERE Ticker IN ticker_names ORDER BY Term")
minValTerm = min(DataByTicker$Term)
maxValTerm = max(DataByTicker$Term)
stepTerm = 0.5
final_value = data.frame(Term = 0, yield = 0)
for(i in seq(minValTerm, maxValTerm, stepTerm)){
avg_val = 0
counter = 0
for(ticker_name in ticker_names[,1]){
vals = DataByTicker[DataByTicker$Ticker == ticker_name,]
for(term in seq(1,length(vals[,1])-1,1)){
if(i >= vals[term,1] && i <= vals[term + 1, 1]){
avg_val = avg_val + ((vals[term + 1, 2] - vals[term, 2])*(i-vals[term, 1])/(vals[term + 1, 1] - vals[term, 1])) + vals[term, 2]
counter = counter + 1
break
}
}
}
final_value = rbind(final_value, c(i, avg_val/counter))
}
final_value = final_value[-c(1),]
return(cbind2(final_value$Term, final_value$yield))
}
}
double_smoothing = function(data)
{
first_loess = loess(data[,2]~data[,1])
second_loess = loess(first_loess$fitted~first_loess$x)
return(cbind2(second_loess$x, second_loess$fitted))
}
fully_smooth = function(data){
i=0
while(!identical(data, double_smoothing(data)) & i < 4){
data = double_smoothing(data)
i = i + 1
}
return(data)
}
# result = universal_function("Mixture Model", "loess", BBB1Data)
# result = result[order(result[,1]),]
#
#
# pacman::p_load(ggplot2)
# ggplot() +
# geom_point(aes(x = result[,1], y = result[,2], colour = "result point")) +
# geom_line(aes(x = result[,1], y = result[,2], colour = "result")) +
# geom_line(aes(x = Bloomberg_data$Term, y = Bloomberg_data$`BS187 Mid Yld USD US INDUSTRIALS BBB+ BVAL YIELD CURVE 12/30/16 CUSIP`, colour = "Bloomberg BBB+")) +
# geom_line(aes(x = Bloomberg_data$Term, y = Bloomberg_data$`BS188 Mid Yld USD US INDUSTRIALS BBB- BVAL YIELD CURVE 12/30/16 CUSIP`, colour = "Bloomberg BBB-"))
#
|
88e5d457d4c68d55af68d105dad9b57e61ef60e5
|
44bb8677f66af56471e650370441680757479ad3
|
/R/plotgroups.boxplot.R
|
1915c084a71da52d962ff4de3e604105dcfcc5b3
|
[] |
no_license
|
ilia-kats/imisc
|
930ae08eca9055c3d73e9118d291cba0c10f2e56
|
80618a220a986b31295d9029964e18d717f978d0
|
refs/heads/master
| 2021-01-17T02:23:40.848856
| 2018-04-12T11:52:57
| 2018-04-12T11:52:57
| 47,841,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,926
|
r
|
plotgroups.boxplot.R
|
#' @templateVar plottype boxplot (extended with mean, standard deviation, and standard error of the mean)
#' @templateVar additionalparams
#' \item{swarm}{whether to overplot the boxplot with a beeswarm plot}
#' \item{swarmcols}{color of the beeswarm points}
#' \item{beeswarmpars}{additional parameters passed to \code{\link{plotgroups.beeswarm}}}
#' \item{...}{additional parameters passed to \code{\link[graphics]{boxplot}} as \code{...}. Can also
#' contain the following:
#' \describe{
#' \item{meanlty, meanlwd, meancol, meanpch, meancex}{Mean line type, line width,
#' color, point character, and point size expansion. The default
#' \code{meanpch=NA} suppresses the point, and \code{meanlty="blank"}
#' does so for the line. Note that \code{meanlwd} defaults to 3x the
#' default lwd and \code{meancol} defaults to \code{"red"}}
#' \item{sdwhisklty, sdwhisklwd, sdwhiskcol}{Whisker line type(default:
#' \code{"solid"}), width, and color for standard deviation whiskers.}
#' \item{sdstaplelty, sdstaplelwd, sdstaplecol, sdstaplewex}{Staple (end of whisker) line type,
#' width, color (default: black), and length of standard deviation whiskers}
#' \item{semwhisklty, semwhisklwd, semwhiskcol}{Whisker line type(default:
#' \code{"solid"}), width, and color (default: \code{"#EDA217"})
#' of standard error of the mean whiskers.}
#' \item{semstaplelty, semstaplelwd, semstaplecol, semstaplewex}{Staple (end of whisker) line type,
#' width, color (default: \code{"#090E97"}), and length of standard error
#' of the mean whiskers}
#' \item{cistaplelty, cistaplelwd, cistaplecol, cistaplewex}{Staple (end of whisker) line type,
#' width, color (default: \code{"#EDA217"}), and length of confidence interval
#' whiskers}}}
#' @template plotgroups.-
#' @return Same as \code{\link[graphics]{boxplot}}
#' @seealso \code{\link[graphics]{boxplot}}
#' @export
#' @importFrom rlist list.merge
#' @importFrom graphics boxplot segments points
plotgroups.boxplot <- list(
plot=function(data, at, stats, colors, features, barwidth, swarm=FALSE, swarmcols='black', beeswarmpars=NULL, ...)
{
lwd.base <- par("lwd")
dots <- list(...)
pars <- list(notch=TRUE, notch.frac=0.5, outpch=NA,
meanlty=1, meanlwd=3*lwd.base, meancol="red", meanpch=NA, meancex=1,
sdwhisklty=1, sdwhisklwd=lwd.base, sdwhiskcol="black",
sdstaplelty=1, sdstaplelwd=lwd.base, sdstaplecol="black", sdstaplewex=0.5,
semwhisklty=0, semwhisklwd=lwd.base, semwhiskcol="#090E97",
semstaplelty=1, semstaplelwd=lwd.base, semstaplecol="#090E97", semstaplewex=0.5,
ciwhisklty=0, ciwhisklwd=lwd.base, ciwhiskcol="#EDA217",
cistaplelty=1, cistaplelwd=lwd.base, cistaplecol="#EDA217", cistaplewex=1)
pars$boxwex <- barwidth
pars$boxlwd <- par('lwd')
if (!("median" %in% features)) {
pars$medlty <- "blank"
pars$medpch <- NA
}
if (!("box" %in% features)) {
pars$boxlty <- "blank"
pars$boxfill <- "transparent"
}
if (!("iqr" %in% features)) {
pars$whisklty <- "blank"
pars$staplelty <- "blank"
} else {
if (is.null(pars$whisklty))
pars$whisklty <- "22"
if (is.null(pars$staplelty))
pars$staplelty <- "22"
}
if (length(dots) > 0)
pars <- list.merge(pars, dots)
calc_notch_coords <- function(x, y, wex=1, stats=NULL, conf=NULL) {
wex <- wex * pars$boxwex / 2
if (is.null(stats) || is.null(conf))
return(list(x1=x - wex, x2=x + wex, y=y))
n <- ifelse(y < stats[3,], conf[1,], conf[2,])
intersect <- (y - stats[3,]) * (pars$notch.frac * 0.5) / (n - stats[3,]) + 0.5 * (pars$boxwex * pars$notch.frac)
intersect[intersect < 0] <- wex
x1 <- pmax(x - wex, x - intersect)
x2 <- pmin(x + wex, x + intersect)
list(x1=x1, x2=x2, y=y)
}
plotsd <- function(bxpstats=NULL, conf=NULL) {
if ("sd" %in% features) {
uc <- calc_notch_coords(at, stats$means + stats$sds, pars$sdstaplewex, bxpstats, conf)
lc <- calc_notch_coords(at, stats$means - stats$sds, pars$sdstaplewex, bxpstats, conf)
segments(uc$x1, uc$y, uc$x2, uc$y, lend='butt', col=pars$sdstaplecol, lwd=pars$sdstaplelwd, lty=pars$sdstaplelty)
segments(lc$x1, lc$y, lc$x2, lc$y, lend='butt', col=pars$sdstaplecol, lwd=pars$sdstaplelwd, lty=pars$sdstaplelty)
segments(at, stats$means + stats$sds, at, stats$means - stats$sds, lend='butt', col=pars$sdwhiskcol, lty=pars$sdwhisklty, lwd=pars$sdwhisklwd)
}
}
havesd <- FALSE
if (max(stats$means + stats$sds, na.rm=TRUE) > max(stats$boxmax, na.rm=TRUE) && min(stats$means - stats$sds, na.rm=TRUE) < min(stats$boxmin, na.rm=TRUE)) {
plotsd()
havesd <- TRUE
}
bxp.toreturn <- do.call(boxplot, list.merge(pars, list(x=data, at=at, xaxt="n", col=colors, yaxt='n', add=TRUE, range=stats$range)))
if (!havesd) {
plotsd(bxp.toreturn$stats, bxp.toreturn$conf)
havesd <- TRUE
}
if ("mean" %in% features) {
mc <- calc_notch_coords(at, stats$means, 1, bxp.toreturn$stats, bxp.toreturn$conf)
segments(mc$x1, mc$y, mc$x2, mc$y, lend='butt', lty=pars$meanlty, lwd=pars$meanlwd, col=pars$meancol)
points(at, stats$means, pch=pars$meanpch, cex=pars$meancex, col=pars$meancol)
}
if ("sem" %in% features) {
uc <- calc_notch_coords(at, stats$means + stats$sems, pars$semstaplewex, bxp.toreturn$stats, bxp.toreturn$conf)
lc <- calc_notch_coords(at, stats$means - stats$sems, pars$semstaplewex, bxp.toreturn$stats, bxp.toreturn$conf)
segments(uc$x1, uc$y, uc$x2, uc$y, lend='butt', lty=pars$semstaplelty, lwd=pars$semstaplelwd, col=pars$semstaplecol)
segments(lc$x1, lc$y, lc$x2, lc$y, lend='butt', lty=pars$semstaplelty, lwd=pars$semstaplelwd, col=pars$semstaplecol)
segments(at, stats$means +stats$sems, at, stats$means - stats$sems, lend='butt', lty=pars$semwhisklty, lwd=pars$semwhisklwd, col=pars$semwhiskcol)
}
if ("ci" %in% features) {
uc <- calc_notch_coords(at, stats$cimax, pars$cistaplewex, bxp.toreturn$stats, bxp.toreturn$conf)
lc <- calc_notch_coords(at, stats$cimin, pars$cistaplewex, bxp.toreturn$stats, bxp.toreturn$conf)
segments(uc$x1, uc$y, uc$x2, uc$y, lend='butt', lty=pars$cistaplelty, lwd=pars$cistaplelwd, col=pars$cistaplecol)
segments(lc$x1, lc$y, lc$x2, lc$y, lend='butt', lty=pars$cistaplelty, lwd=pars$cistaplelwd, col=pars$cistaplecol)
segments(at, stats$cimax, 1:length(data), stats$cimin, lend='butt', lty=pars$ciwhisklty, lwd=pars$ciwhisklwd, col=pars$ciwhiskcol)
}
if (swarm) {
args <- list(data=data, at=at, stats=stats, colors=swarmcols, features=NA, barwidth=barwidth)
if (!is.null(beeswarmpars) && length(beeswarmpars))
args <- list.merge(beeswarmpars, args)
swarm.toreturn <- do.call(plotgroups.beeswarm$plot, args)
} else {
swarm.toreturn <- NULL
}
invisible(list(boxplot=bxp.toreturn, beeswarm=swarm.toreturn))
},
ylim=function(data, stats, features, ...) {
dots <- list(...)
if (!is.null(dots$swarm)) {
swarm <- dots$swarm
} else {
swarm <- formals(plotgroups.boxplot$plot)$swarm
}
if (swarm)
do.call(plotgroups.beeswarm$ylim, c(list(data=data, stats=stats, features=NA), dots$beeswarmpars))
else {
NULL
}
},
features=allparamcheck
)
|
302d5dbc7b22a604ac050491670722994ef4f6e8
|
00d84ff34c2c3c82e03413c5178bacf11459ad7d
|
/functions/fitscore.R
|
17bd5d5526522a318ed1aaaf5255d553fcb4d285
|
[] |
no_license
|
stokeinfo/DGLoadMiner
|
1641a065ee63e5fd64941b18bcd387a2e506a928
|
ec51e1d753b46da137ebbd394d95586f75f2222d
|
refs/heads/master
| 2020-12-30T09:37:51.996652
| 2014-09-29T18:58:46
| 2014-09-29T18:58:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,097
|
r
|
fitscore.R
|
#fitscore functon for computing correlation between demand and generation
fitscore <- function(df, m1=6, d1=1, m2=6, d2=30){
dem <- multiday(df, m1, d1, m2, d2,
analyzedemand=T)
gen <- multiday(df, m1, d1, m2, d2,
analyzegeneration=T)
#create window of time intervals based on the generation period
genwindow <- strftime(gen$datetime, "%H:%M")
#subset demand by time intervals that are in the generation period
dem2 <- dem[strftime(dem$datetime, "%H:%M") %in% genwindow, ]
# #validation
# all(strftime(dem2$datetime, "%H:%M") %in% strftime(gen$datetime, "%H:%M"))
#create dataframes for demand and generation that contain maxnetKW per time interval
dmax <- ddply(dem2, .(ggtime), summarize, maxnetKW=max(netKW))
gmax <- ddply(gen, .(ggtime), summarize, maxnetKW=max(netKW))
#cor(dmax$maxnetKW, gmax$maxnetKW)
#add in kfs smoothed values
dmax$kfs <- smoothkfs(dmax$maxnetKW)
gmax$kfs <- smoothkfs(gmax$maxnetKW)
#compute correlation on smoothed values
fitscore <- cor(dmax$kfs, gmax$kfs)
return(fitscore)
}
|
03d38b376553ed47b9911848ec7efe0622355fcf
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.database/man/rds_delete_db_proxy.Rd
|
c3d4be69d7ecc9c1fd1975f5513922e29e2493fe
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 464
|
rd
|
rds_delete_db_proxy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rds_operations.R
\name{rds_delete_db_proxy}
\alias{rds_delete_db_proxy}
\title{Deletes an existing DB proxy}
\usage{
rds_delete_db_proxy(DBProxyName)
}
\arguments{
\item{DBProxyName}{[required] The name of the DB proxy to delete.}
}
\description{
Deletes an existing DB proxy.
See \url{https://www.paws-r-sdk.com/docs/rds_delete_db_proxy/} for full documentation.
}
\keyword{internal}
|
c79ebf1fc472c536d59b7e57283ce51a107a1009
|
e34c6b5b46a16501607a472b78a82cc631fa65a9
|
/Practicas_TareasU2/Practica3.r
|
ea4719108df2e11a4824d3aadfa34a0d5096e8b0
|
[] |
no_license
|
manuelorozcotoro/Mineria_De_Datos
|
379598c8045dbf14aa03141f1ee37b3c8cdebd2f
|
595aedb734c045c1e2f804817d016242d3fd756c
|
refs/heads/development
| 2020-12-30T01:54:25.851040
| 2020-06-17T03:44:40
| 2020-06-17T03:44:40
| 238,821,758
| 0
| 3
| null | 2020-06-17T03:44:41
| 2020-02-07T01:42:13
|
R
|
ISO-8859-1
|
R
| false
| false
| 2,689
|
r
|
Practica3.r
|
getwd()
setwd("/users/Dell/Desktop/DataMining-master/MachineLearning/MultipleLinearRegression/")
getwd()
# Importando el conjunto de datos
dataset <- read.csv('50_Startups.csv')
# Codificación de datos categóricos
dataset$State = factor(dataset$State,
levels = c('New York', 'California', 'Florida'),
labels = c(1,2,3))
dataset
# División del conjunto de datos en el conjunto de entrenamiento y el conjunto de prueba Install.packages ('caTools')
library(caTools)
set.seed(123)
split <- sample.split(dataset$Profit, SplitRatio = 0.8)
training_set <- subset(dataset, split == TRUE)
test_set <- subset(dataset, split == FALSE)
# Atomation BackwardElimination Function
backwardElimination <- function(x, sl) {
numVars = length(x)
for (i in c(1:numVars)){
# Ajuste de la regresión lineal múltiple al conjunto de entrenamiento regresor = lm (fórmula = Beneficio ~ RDSpend + Administración + Marketing. Gasto + Estado)
regressor = lm(formula = Profit ~ ., data = x)
maxVar = max(coef(summary(regressor))[c(2:numVars), "Pr(>|t|)"])
if (maxVar > sl){
j = which(coef(summary(regressor))[c(2:numVars), "Pr(>|t|)"] == maxVar)
x = x[, -j]
}
numVars = numVars - 1
}
return(summary(regressor))
}
SL = 0.05
training_set
backwardElimination(training_set, SL)
par(mfrow=c(2,2))
plot(regressor)
# Vemos que existe la linealidad y la homocedacidad está presente (cuando la varianza del error condicional a las variables explicativas es constante a lo largo de las observaciones).
library(car)
vif(regressor) # VIF for each var<4, no multicollinearity , Thus we conclude that assumptions hold true
# Primera iteración de eliminación hacia atrás (eliminar estado)
regressor = lm(formula = Profit ~ `R.D.Spend` + Administration + `Marketing.Spend`,
data = training_set)
summary(regressor)
# Segunda iteración (Eliminar administración)
regressor = lm(formula = Profit ~ `R.D.Spend` + `Marketing.Spend`,
data = training_set)
summary(regressor)
# Tercera iteración (Eliminar gasto de marketing)
regressor <- lm(formula = Profit ~ `R.D.Spend`,
data = training_set)
# resumen (regresor)
We will consider Marketing spend in the model since it is very close to the significance level of 0.05 as well including it increases the R- Sqaured
Final
#regresor = lm (fórmula = Profit ~ R.D.Spend+ Marketing.Spend, data = training_set) resumen (regresor)
y_pred = predict(regressor, newdata = test_set)
y_pred
test_set$Profit
plot(y_pred, test_set$Profit)
|
2b5cd8438dc08fed065cdc89bed96086d9c9d0c5
|
5e832862b2e36be6ba27e874e98499bc399de699
|
/man/do.states.Rd
|
2a50820d55230709e1522dfd64f7b0338952fd40
|
[] |
no_license
|
dmgatti/DOQTL
|
c5c22306053ddbd03295207702827cf2a715bb70
|
a1a4d170bf5923ca45689a83822febdb46ede215
|
refs/heads/master
| 2021-01-17T02:08:27.831277
| 2019-05-24T19:22:35
| 2019-05-24T19:22:35
| 13,506,518
| 15
| 12
| null | 2019-02-27T13:46:31
| 2013-10-11T18:33:24
|
R
|
UTF-8
|
R
| false
| false
| 760
|
rd
|
do.states.Rd
|
\name{do.states}
\alias{do.states}
\docType{data}
\title{do.states}
\description{
The 36 unphased genotype states for the DO on the autosomes and X chromosome. Also the 8 DO founder letter codes.
}
\usage{do.states}
\format{
A list frame with 3 elements.
\describe{
\item{\code{auto}}{Character vector with two letter codes for each of the possible DO genotype states.}
\item{\code{X}}{List with two elements, FALSE and M, containing the genotype codes for the X chromosome.}
\item{\code{founders}}{Character vector containing the founder letter codes.}
}
}
\details{
This contains the letter codes for each of the 36 unphased genotype states in the DO. It also contains the founder letters.
}
\examples{
do.states
}
\keyword{datasets}
|
3206fa4452426d6f7c99437a503cc9abed5ee5d5
|
77f2c4be50c9670b0178fc09c058e520d2450272
|
/tests/testthat/test_compare.R
|
28bbea22ab4f4b99560d2833e79859130b8d13e7
|
[] |
no_license
|
cran/doctr
|
a8c686a76b9c6dde3f3f5545adf68099726b4565
|
ac12a023bd346f75472c8f527a09982c3fb8f996
|
refs/heads/master
| 2021-01-17T22:56:27.013242
| 2017-03-07T14:12:46
| 2017-03-07T14:12:46
| 84,207,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 736
|
r
|
test_compare.R
|
library(doctr)
library(tidyverse)
context("Compare")
# Setup
data(mpg)
mpg2 <- mpg %>%
mutate(class = as.factor(class))
c <- mpg2 %>%
compare(sample_n(mpg2, 100))
c2 <- mpg2 %>%
compare(sample_n(mpg2, 100), 0.5)
test_that("comapre() returns a valid list", {
expect_is(c, "list")
})
test_that("comapre() result has correct length", {
expect_length(c, 11)
})
test_that("diagnose() results have more than two FALSE", {
suppressWarnings(len <- sum(flatten_lgl(transpose(c)$result)))
expect_gt(len, 2)
})
test_that("diagnose() results differ based on ci", {
suppressWarnings(len <- sum(flatten_lgl(transpose(c)$result)))
suppressWarnings(len2 <- sum(flatten_lgl(transpose(c2)$result)))
expect_gt(len, len2)
})
|
ae740d4e148c1185d5fe66dc57df1da5399ab1d0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/fbroc/examples/plot.fbroc.perf.paired.Rd.R
|
8f9e7056115933f52875f75d545c7400628f7648
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 448
|
r
|
plot.fbroc.perf.paired.Rd.R
|
library(fbroc)
### Name: plot.fbroc.perf.paired
### Title: Plots the difference between the bootstrapped performance
### estimate of the first and the second classifier.
### Aliases: plot.fbroc.perf.paired
### ** Examples
data(roc.examples)
example <- boot.paired.roc(roc.examples$Cont.Pred, roc.examples$Cont.Pred.Outlier,
roc.examples$True.Class, n.boot = 100)
auc.diff <- perf(example, "auc")
plot(auc.diff)
|
999a2b91b79816575e5cd317bd92a81c829df847
|
6cb88733bbbe65abf43cc78f4b2d3e3e51a540f7
|
/man/fitted.metapred.Rd
|
cb996dda6644e69fdf21bfd72ce9b7d858806120
|
[] |
no_license
|
cran/metamisc
|
5ef8449ca492e0855083e619d55d1a24d518ed5d
|
d0bb31976b9999e2b326d2ff5f23619c91b6108a
|
refs/heads/master
| 2022-10-02T06:28:40.286889
| 2022-09-25T11:10:02
| 2022-09-25T11:10:02
| 17,697,425
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,296
|
rd
|
fitted.metapred.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metapred.R
\name{fitted.metapred}
\alias{fitted.metapred}
\title{Extract Model Fitted Values}
\usage{
\method{fitted}{metapred}(
object,
select = "cv",
step = NULL,
model = NULL,
as.stratified = TRUE,
type = "response",
...
)
}
\arguments{
\item{object}{object of class metapred}
\item{select}{character. Select fitted values from "cv" (default) or from "global" model.}
\item{step}{character or numeric. Name or number of step to select if \code{select} = "cv". Defaults to best step.}
\item{model}{character or numeric. Name or number of model to select if \code{select} = "cv". Defaults to
best model.}
\item{as.stratified}{logical. \code{select} = "cv" determines whether returned predictions are stratified in a list
(\code{TRUE}, default) or in their original order (\code{FALSE}).}
\item{type}{character. Type of fitted value.}
\item{...}{For compatibility only.}
}
\description{
Extract the fitted values of a \code{metapred} object. By default returns fitted values of the model in the
cross-validation procedure.
}
\details{
Function still under development, use with caution.
Only returns type = "response".
}
\author{
Valentijn de Jong
}
|
5ea8553a058a2b0be7814bc8ea1ea554324d3ac1
|
fea0a06c07a4f63f076dc375a1eb402497d0fdeb
|
/analyze/analyze.r
|
c6d9ee5b02dd787ae817884b02d6e9bfb0867271
|
[] |
no_license
|
ESS-IA-Eaton/ESS-IA
|
4b649a4b45e7b65636dafa597cf4b36f79b954d4
|
c4ea837795493306523fed44c21e365c44356a20
|
refs/heads/main
| 2023-03-03T04:38:18.063845
| 2021-02-13T17:03:45
| 2021-02-13T17:03:45
| 338,616,140
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 384
|
r
|
analyze.r
|
inFile <- read.csv(file.choose(),header=FALSE,sep=",") # file with info
outFile <- file.choose() # file to deposit summary
close(file(outFile, open="w")) # erasing all content in file
for (var in 1:(7*17)) {
avg <- mean(inFile[,var])
ci <- 1.96 * sd(inFile[,var]) / sqrt(nrow(inFile))
cat(paste(avg,avg+ci,avg-ci,sep=","), file=outFile, append = TRUE, eol = "\n")
}
|
d4ab6e5b7367b70bf8442b9e164aec8bfd119c61
|
a5ebc917557b5a8a1a889859befe1d31b20b408c
|
/man/tr2g_fasta.Rd
|
fd82f9576eeb95f60dca03d08ae23536801dac40
|
[
"BSD-2-Clause"
] |
permissive
|
BUStools/BUSpaRse
|
f81e246ca905ace2bd947958f0235222e4a6c10f
|
5b23c9b609ea20259110eb2592720a6019751a90
|
refs/heads/master
| 2022-09-19T15:48:26.420285
| 2022-04-26T15:58:37
| 2022-04-26T15:58:37
| 161,709,230
| 7
| 2
|
BSD-2-Clause
| 2020-04-24T05:45:14
| 2018-12-14T00:04:47
|
R
|
UTF-8
|
R
| false
| true
| 5,338
|
rd
|
tr2g_fasta.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tr2g.R
\name{tr2g_fasta}
\alias{tr2g_fasta}
\title{Get transcript and gene info from names in FASTA files}
\usage{
tr2g_fasta(
file,
out_path = ".",
write_tr2g = TRUE,
use_gene_name = TRUE,
use_transcript_version = TRUE,
use_gene_version = TRUE,
transcript_biotype_use = "all",
gene_biotype_use = "all",
chrs_only = TRUE,
save_filtered = TRUE,
compress_fa = FALSE,
overwrite = FALSE
)
}
\arguments{
\item{file}{Path to the FASTA file to be read. The file can remain gzipped.}
\item{out_path}{Directory to save the outputs written to disk. If this
directory does not exist, then it will be created. Defaults to the current
working directory.}
\item{write_tr2g}{Logical, whether to write tr2g to disk. If \code{TRUE}, then
a file \code{tr2g.tsv} will be written into \code{out_path}.}
\item{use_gene_name}{Logical, whether to get gene names.}
\item{use_transcript_version}{Logical, whether to include version number in
the Ensembl transcript ID. To decide whether to
include transcript version number, check whether version numbers are included
in the \code{transcripts.txt} in the \code{kallisto} output directory. If that file
includes version numbers, then trannscript version numbers must be included
here as well. If that file does not include version numbers, then transcript
version numbers must not be included here.}
\item{use_gene_version}{Logical, whether to include version number in the
Ensembl gene ID. Unlike transcript
version number, it's up to you whether to include gene version number.}
\item{transcript_biotype_use}{Character, can be "all" or
a vector of \emph{transcript} biotypes to be used. Transcript biotypes aren't
entirely the same as gene biotypes. For instance, in Ensembl annotation,
\code{retained_intron} is a transcript biotype, but not a gene biotype. If
"cellranger", then a warning will be given. See \code{data("ensembl_tx_biotypes")}
for all available transcript biotypes from Ensembl.}
\item{gene_biotype_use}{Character, can be "all", "cellranger", or
a vector of \emph{gene} biotypes to be used. If "cellranger", then the biotypes
used by Cell Ranger's reference are used. See \code{data("cellranger_biotypes")}
for gene biotypes the Cell Ranger reference uses. See
\code{data("ensembl_gene_biotypes")} for all available gene biotypes from Ensembl.
Note that gene biotypes and transcript biotypes are not always the same.}
\item{chrs_only}{Logical, whether to include chromosomes only, for GTF and
GFF files can contain annotations for scaffolds, which are not incorporated
into chromosomes. This will also exclude haplotypes. Defaults to \code{TRUE}.
Only applicable to species found in \code{genomeStyles()}.}
\item{save_filtered}{If filtering for biotype and chromosomes, whether to
save the filtered fasta file. If \code{TRUE}, the file will be \code{tx_filtered.fa} in
\code{out_path}.}
\item{compress_fa}{Logical, whether to compress the output fasta file. If
\code{TRUE}, then the fasta file will be gzipped.}
\item{overwrite}{Logical, whether to overwrite if files with names of outputs
written to disk already exist.}
}
\value{
A data frame with at least 2 columns: \code{gene} for gene ID,
\code{transcript} for transcript ID, and optionally \code{gene_name} for gene
names.
}
\description{
FASTA files, such as those for cDNA and ncRNA from Ensembl, might have genome
annotation information in the name of each sequence entry. This function
extracts the transcript and gene IDs from such FASTA files.
}
\details{
At present, this function only works with FASTA files from Ensembl, and uses
regex to extract vertebrate Ensembl IDs. Sequence names should be formatted
as follows:\preformatted{ENST00000632684.1 cdna chromosome:GRCh38:7:142786213:142786224:1
gene:ENSG00000282431.1 gene_biotype:TR_D_gene transcript_biotype:TR_D_gene
gene_symbol:TRBD1 description:T cell receptor beta diversity 1
[Source:HGNC Symbol;Acc:HGNC:12158]
}
If your FASTA file sequence names are formatted differently, then you must
extract the transcript and gene IDs by some other means. The Bioconductor
package \code{Biostrings} is recommended; after reading the FASTA file into
R, the sequence names can be accessed by the \code{names} function.
While normally, you should call \code{\link{sort_tr2g}} to sort the
transcript IDs from the output of the \code{tr2g_*} family of functions, If
the FASTA file supplied here is the same as the one used to build the
kallisto index, then the transcript IDs in the output of this function are in
the same order as in the kallisto index, so you can skip \code{\link{sort_tr2g}}
and proceed directly to \code{\link{EC2gene}} with the output of this
function.
}
\examples{
toy_path <- system.file("testdata", package = "BUSpaRse")
file_use <- paste(toy_path, "fasta_test.fasta", sep = "/")
tr2g <- tr2g_fasta(file = file_use, save_filtered = FALSE, write_tr2g = FALSE)
}
\seealso{
ensembl_gene_biotypes ensembl_tx_biotypes cellranger_biotypes
Other functions to retrieve transcript and gene info:
\code{\link{sort_tr2g}()},
\code{\link{tr2g_EnsDb}()},
\code{\link{tr2g_TxDb}()},
\code{\link{tr2g_ensembl}()},
\code{\link{tr2g_gff3}()},
\code{\link{tr2g_gtf}()},
\code{\link{transcript2gene}()}
}
\concept{functions to retrieve transcript and gene info}
|
ed6bbc1b3ea6a99cc9af290124a5d8da3a1ed872
|
625ababd193e2c3ff77081ab0a3640aca26fa7bf
|
/IntroductiontoLinearModels/CalculationofLinearModels.R
|
5c3585dfa570fef747692309c10250f8b35c1291
|
[] |
no_license
|
AndrewS622/Data-Analysis-for-the-Life-Sciences
|
2121db45368efb0e51c31b3688f20ed556c22518
|
7aec64fca87c22b07b86a9002358617c55973917
|
refs/heads/master
| 2022-12-05T18:10:42.885403
| 2020-09-02T00:24:47
| 2020-09-02T00:24:47
| 282,118,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,728
|
r
|
CalculationofLinearModels.R
|
## Collinearity
sex <- factor(rep(c("female","male"), each=4))
trt <- factor(c("A", "A", "B", "B", "C", "C", "D", "D"))
X <- model.matrix(~ sex + trt)
# matrix is not full rank --> collinear
c(paste("rank = ", qr(X)$rank), paste("nrow = ", nrow(X)))
# outcome observed
Y <- 1:8
# Fix two coefficients by subtracting off their contributions
makeYstar <- function(a,b) Y-X[,2] * a - X[,5] * b
# Once fixed, optimize the rest by minimizing the SSE
fitTheRest <- function(a,b) {
Ystar <- makeYstar(a,b)
Xrest <- X[,-c(2,5)]
betarest <- solve(t(Xrest) %*% Xrest) %*% t(Xrest) %*% Ystar
residuals <- Ystar - Xrest %*% betarest
sum(residuals^2)
}
fitTheRest(1,2)
betas <- expand.grid(-2:8, -2:8)
rss <- apply(betas,1,function(x) fitTheRest(x[1],x[2]))
rss
min(rss)
index <- which (rss %in% min(rss))
betas[index,]
# multiple beta combinations show minimum due to collinearity
library(rafalib)
themin <- min(rss)
plot(betas[which(rss==themin),])
## QR Decomposition
url <- "https://raw.githubusercontent.com/genomicsclass/dagdata/master/inst/extdata/spider_wolff_gorb_2013.csv"
filename <- "spider_wolff_gorb_2013.csv"
library(downloader)
if (!file.exists(filename)) download(url, filename)
spider <- read.csv(filename, skip=1)
fit <- lm(friction ~ type + leg, data=spider)
betahat <- coef(fit)
# Use of QR decomposition is less prone to numerical instability
Y <- matrix(spider$friction, ncol=1)
X <- model.matrix(~ type + leg, data=spider)
QR <- qr(X)
Q <- qr.Q(QR)
R <- qr.R(QR)
Q[1,1]
R[1,1]
(t(Q) %*% Y)[1,1]
solve(R) %*% t(Q) %*% Y
fit$coefficients
# Instead of using solution beta-hat = (XtX)^-1 * XtY
# decompose X to QR,and solution is R^-1 (QtY)
|
84314ee10c678fd912ee751863172afbc69255b7
|
27e0500a35d8d3980bfea03363de0a53f4c50314
|
/plotlyflowdataviewer v-2.R
|
388cae017e932e42cace5b176d9f87148dde9627
|
[] |
no_license
|
ecprich/flowing-water
|
991ca3c070b42cb45c1132ad4c334bc5b456e4c2
|
189871150b6bff70dd6474ba5e6a1eea4d3a97d8
|
refs/heads/master
| 2020-04-22T17:07:29.750507
| 2019-02-13T15:25:57
| 2019-02-13T15:25:57
| 170,530,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,638
|
r
|
plotlyflowdataviewer v-2.R
|
# make sure all these library("packages") are installed on your machine
library(RODBC)
library(BESdata)
library(plotly)
library(dplyr)
library(lubridate)
library(bindrcpp)
# pull the rain data with the date range you want first
rain.start <- as.Date("2018-09-27") #starting date for flow and rain data
rain.end <- as.Date("2018-11-01") #end date for flow and rain data
# change the rain gague with station = ###
# change the interval the rain gague is reporting with daypart = "x" and interval = x
rain <- read.rain(station = 4, start = rain.start, end = rain.end, daypart = "hour", interval = 1)
rain$cumu <- cumsum(na.omit(rain$rainfall.amount.inches))
#pick your hansen id here
mh.id <- c('ACD362')
# pulls all the flow data from the hansen id you just picked
allflow <- read.flow(mv.manhole_hansen_id = mh.id)
# restricts the data to the same date range as the rain data you pulled
flowdate <- allflow %>% filter(reading_datetime >= rain.start & reading_datetime <= rain.end)
# merges the rain data and the flow data into one dataframe
mydata<- merge(allflow, rain[,c('end.local','rainfall.amount.inches', 'cumu')],
by.x = 'reading_datetime', by.y = 'end.local', all.x = FALSE)
# graph, don't change anything here
p1 <- plot_ly() %>% add_trace(data = mydata, x =~reading_datetime, y =~depth_inches, type = "scatter", mode = "lines",
name = "Depth (inches)")
p2 <- plot_ly() %>% add_trace(data = mydata, x =~reading_datetime, y =~velocity_fps, type = "scatter", mode = "lines",
name = "Velocity (fps)")
p3 <- plot_ly() %>% add_trace(data = mydata, x =~reading_datetime, y =~flow_cfs_AxV, type = "scatter", mode = "lines",
name = "Flow (Cfs)")
p4 <- plot_ly() %>% add_trace(data = mydata, x =~reading_datetime, y =~rainfall.amount.inches, type = "scatter", mode = "lines",
name = "Rainfall (inches) at X rain gague")
p5 <- plot_ly() %>% add_trace(data = mydata, x =~reading_datetime, y =~cumu, type = "scatter", mode = "lines",
name = "Cumulative Rainfall")
# full plot with cumulative rainfall
z <- subplot(p1, p2, p3, p4,p5, nrows = 5, shareX = TRUE) %>%
layout(title = "Depth, Velocity, Flow, Rainfall, Cumulative Rainfall",
xaxis = list(
rangeslider = list(type = "date"),
namelength = -1,
ticks = "inside",
showspikes = TRUE,
spikethickness = 1,
spikemode = "across",
showgrid = FALSE))
z
|
3f1198d53500e264468b98f3d6f3cc9efb0b1109
|
9e0e911074a02d773632016c1f20be21639e94fa
|
/man/sstr.Rd
|
255fe25812cc4af4aa6309569e7604748bec1bd8
|
[
"MIT"
] |
permissive
|
mmuurr/zzz
|
4ce5b6d7f38eca0c6651f6b8ab7632348ec8fa55
|
9489289d1c1feac374784ff7c806a33f16c26302
|
refs/heads/master
| 2019-08-28T00:32:48.803114
| 2018-05-09T21:52:49
| 2018-05-09T21:52:49
| 57,738,462
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 697
|
rd
|
sstr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/str.R
\name{sstr}
\alias{sstr}
\title{Object structure string.}
\usage{
sstr(obj, ..., .name = FALSE, .newline = FALSE)
}
\arguments{
\item{obj}{Any R object.}
\item{...}{Arguments passed on to \code{\link[utils]{str}}.}
\item{.name}{Provide obj's variable name (as passed when \code{sstr} was called)?
(Adds an additional line to the output.)}
\item{.newline}{Trail the returned string with a newline character?}
}
\value{
A string capturing the results of \code{\link[utils]{str(obj, ...)}}.
}
\description{
Provide internal _str_ucture of an R object as a returned string.
}
\seealso{
\code{\link[utils]{str}}.
}
|
c198feeda861c18d550a75a95681dcc85f17cd7b
|
f160f4532d41201a9ca0849bd1b1527070487eda
|
/Code/figures.R
|
d0746b51446633648993cfc3793018fff79ec27c
|
[] |
no_license
|
rwrandles/freexp
|
f8185a0f115125490c0febb0dd6efefea14cd937
|
570de2e55788000314d8dc6da2350f51988ea0e0
|
refs/heads/master
| 2020-07-04T09:51:58.161133
| 2019-08-14T09:36:29
| 2019-08-14T09:36:29
| 202,247,025
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,572
|
r
|
figures.R
|
###############################
## Loading required packages ##
###############################
library(ggplot2) ## contains functions for producing visualizations
library(dplyr) ## contains functions for data cleaning/manipulation
library(tidyr) ## contains functions for data cleaning/manipulation
library(maps) ## contains data and functions for polygons to draw maps
## Set working directory to the location of the master replication folder
setwd(" ")
#########################################################################################################
## Preliminaries -- This section MUST be run in order for other parts of the code to function properly ##
#########################################################################################################
## Load master_table.csv, containinng the data frame produced by data_processing.R in the ./Code/ directory
## Contains all compiled data from the ./Data/ directory, which is used to produce all of the figures in the final paper
master_table <- read.csv("./data/master_table.csv",
header = TRUE, stringsAsFactors = FALSE)
## Load multivar_model.RData, which contains the multivariate OLS model generated and output by models.R
load(file = "./data/multivar_model.RData")
## Set the theme for following figures to theme_minmal(). This theme includes no background shading, no axis lines, and an overall minimalist theme.
theme_set(theme_minimal())
## All of the figures in the paper follow the same color scheme and formatting. The color palette was provideed by ColorBrewer
## The hex codes for the colors used in the palette are:
## #4A1486,
## #6A51A3,
## #807DBA,
## #9E9AC8,
## #BCBDDC,
## #DADAEB,
## #F2F0F7
##
## The URL to the ColorBrewer page for this palette is:
## http://colorbrewer2.org/#type=sequential&scheme=Purples&n=7
##
## General figure theme guidelines:
## Axis titles font size = 18.0
## Axis text font size = 12.0
## Legend title font size = 18.0
## Legend text font size = 12.0
## Histogram bin alpha = 0.6
## CI ribbon alpha = 0.4
##
## Plot fills colored as #9E9AC8
## Plot outlines (color argument) colored as #4A1486
###################################
## Dependent Variable Histograms ##
###################################
## Create ggplot objects for both the normal and log-transformed dependent variables
vdem_distribution <- ggplot(master_table[-c(86,122)], aes(x = freedom_vdem)) + ## ggplot object for normal DV. Remove rows 86 and 122, as they are not part of the final anlysis (countries with unwritten constitutions)
geom_histogram(fill = "#9E9AC8", color = "#4A1486", alpha = 0.6) + ## create the histogram layer, with standard fill, color, and alpha
labs(x = "V-Dem Free Expression", y = "Count (No. of Countries)") + ## add X and Y axis labels
theme(axis.title = element_text(size = 18.0),
axis.text = element_text(size = 12.0),
legend.title = element_text(size = 18.0),
legend.text = element_text(size = 12.0)) ## add standard theme objects (setting font sizes for axes and legend)
vdem_log_distribution <- ggplot(master_table[-c(86,122)], aes(x = log_vdem)) + ## ggplot object for log-transformed DV. Remove rows 86 and 122, as they are not part of the final analysis (countries with unwritten constitutions)
geom_histogram(fill = "#9E9AC8", color = "#4A1486", alpha = 0.6) + ## create the histogram layer, with standard fill, color, and alpha
labs(x = "V-Dem Free Expression", y = "Count (No. of Countries)") + ## add X and Y axis labels
theme(axis.title = element_text(size = 18.0),
axis.text = element_text(size = 12.0),
legend.title = element_text(size = 18.0),
legend.text = element_text(size = 12.0)) ## add standard theme objects (setting font sizes for axes and legend)
####################################
## Independent Variable World Map ##
####################################
## Create a new data frame, which is an identical copy of master_table, with rows 86 and 122 removed, as they are not part of the final analysis (countries with unwritten constitutions)
map_table <- master_table[-c(86,122),]
## Read in a file from the ./Data/ directory called "countries_map.csv"
## This file contains the vector of country names as they are used in the maps package
## Set this vector of countries equal to the map_table country variable to ensure that all spellings match those used in the maps package
map_table$country <- (read.csv("./Data/countries_map.csv",
header = TRUE, stringsAsFactors = FALSE, fileEncoding = "UTF-8-BOM")$x)
## Create a dataframe called map.world, which holds polygon information from the maps package for drawing the world map, except for Antarctice and Greenland
map.world <- map_data("world") %>% filter(!(region %in% c("Antarctica", "Greenland")))
## Create a dataframe called map.world_joined
## This object is a join between the map.world polygon dataframe and the map_table dataframe
## Essentially, this step is the one that is "applying" the data to the polygons to be drawn
map.world_joined <- left_join(map.world, map_table, by = c("region" = "country"))
## Create a second map object called map.borders
## This is so that the two maps can be layerd on top of one another, one that is filled based on the data, and a second that is white with only black borders
map.borders <- geom_polygon(data = map.world, aes(x = long, y = lat, group = group), size = 0.15, colour = "white", fill = NA)
map.spi <- ggplot() + ## create a new ggplot object to hold both map.world_joined and map.borders
geom_polygon(data = map.world_joined, aes(x = long, y = lat, group = group, fill = SPI_index)) + ## draw the polygons in map.world_joined, and set the fill according to the index value from the map_table dataframe
scale_fill_gradientn(colors = c("#4A1486", "#6A51A3", "#807DBA", "#9E9AC8", "#BCBDDC", "#DADAEB", "#F2F0F7"),
name = "Separate Powers Index",
limits = c(0,1),
breaks = seq(0,1,0.2)) + ## create the legend, with a manual gradient given by the standard color palette, a legend title, min and max values of 0 and 1, and ticks located every increment of 0.2
theme_void() + ## set the theme for following figures to theme_void(). This theme includes no axes, no labels, and no legends
map.borders + ## add the map.borders object as an overlay
theme(legend.position = "bottom", legend.title = element_text(size = 16.0)) + ## set the theme properties, which inlucdes moving the legend to the bottom of the map, and setting its text size to 16.0
guides(fill = guide_colorbar(title.position = "top",
barwidth = 20,
barheight = 0.8,
nbin = 100,
title.hjust = 0.5,
frame.colour = "black")) + ## manually set the properties of the legend color bar; move the title to the top, manually set width and height, adjust the resoution using nbin, center the bar, and apply a black border
coord_fixed(1.3) ## manually set the dimensions of the graph where width = 1.3 * height
#######################################################
## Hypothesis and Predictive Analysis Visualizations ##
#######################################################
## Use expand.grid to create a dataframe of hypothetical scenarios
## This dataframe contains all of the IVs used in the multivariate model
## All control variables are set at their mean values (with the exception of dummy_freexp), and the main IV is adjusted in each case, moving from 0 to 0.5 in increments of 0.02
## This creates a data frame that is 26 x 7
test_cases <- expand.grid(SPI_index = seq(0,0.5,0.02),
dummy_freexp = 1,
mil_spend = mean(master_table$mil_spend),
nat_inc = mean(master_table$nat_inc),
mil_events = mean(master_table$mil_events),
dummy_eu = mean(master_table$dummy_eu),
dummy_am = mean(master_table$dummy_am))
## Run a predictive model using the predict() function
## What this function does is takes all 26 observations in test_cases and inputs their values into the multivariate model
## It returns the predicted value of the DV, along with a 95% confidence interval
pred_results <- predict(multivar_model, newdata = test_cases, interval = "confidence")
## Because the DV in the regression model is log-transformed, this performs the inverse of the transform function used in models.R
## This will turn all predicted values from their log-transformed state back into terms of the original freedom index
trans_results <- -1 * exp(pred_results) + 1
pred_graph <- ggplot(bind_cols(test_cases, as.data.frame(trans_results)), aes(x = SPI_index, y = fit, ymin = lwr, ymax = upr)) + ## create a ggplot object based on the combined dataframes from expand.grid() and predict(), setting the X-axis to the IV, and Y-axis to the predicted value of the DV, along with ymin and ymax values for the 95% CI
geom_line(color = "#4A1486", size = 1.2) + ## create a line plot, setting the color to the standard value and adjusting its thickness
geom_ribbon(alpha = 0.4, fill = "#9E9AC8") + ## create a ribbon for the 95% CI, setting the fill and alpha to the standard values
labs(x = "Separate Powers Index", y = "Predicted level of Free Expression", title = "") + ## set the axis titles
theme_minimal() + ## set the theme to theme_minimal()
scale_x_continuous(breaks = c(0,0.2,0.4)) + ## adjust the X-axis scale, with breaks at 0, 0.2, and 0.4
lims(y = c(0.5,1)) + ## set the min and max values of the Y-axis to 0.5 and 1, respectively
theme(axis.title = element_text(size = 18.0), axis.text = element_text(size = 12.0)) ## set the axis title and text to their standard font sizes
## Use expand.grid to create a dataframe of hypothetical scenarios
## This dataframe contains all of the IVs used in the multivariate model
## All control variables are set at their mean values, and the main IV is set at the min and max values in the dataset
## This creates a data frame that is 2 x 7
predict <- data.frame(SPI_index = c(min(master_table$SPI_index), max(master_table$SPI_index)),
dummy_freexp = mean(master_table$dummy_freexp),
dummy_eu = mean(master_table$dummy_eu),
dummy_am = mean(master_table$dummy_am),
mil_spend = mean(master_table$mil_spend),
nat_inc = mean(master_table$nat_inc),
mil_events = mean(master_table$mil_events))
## Run a predictive model using the predict() function
## What this function does is takes both observations in predict and inputs their values into the multivariate model
## It returns the predicted value of the DV, along with a 95% confidence interval
results <- predict(multivar_model, newdata = predict, interval = "confidence")
## Because the DV in the regression model is log-transformed, this performs the inverse of the transform function used in models.R
## This will turn all predicted values from their log-transformed state back into terms of the original freedom index
trans <- -1 * exp(results) + 1
## Create a new dataframe that binds the expand.grid() and predict() datasets together
case_table <- bind_cols(predict, as.data.frame(trans))
case_graph <- ggplot(case_table, aes(x = factor(SPI_index), y = fit, ymin = lwr, ymax = upr)) + ## create a ggplot object based on the combined dataframes from expand.grid() and predict(), setting the X-axis to the IV, and Y-axis to the predicted value of the DV, along with ymin and ymax values for the 95% CI
theme_minimal() + ## set the theme to theme_minimal()
geom_col(fill = "#9E9AC8", color = "#4A1486", width = 0.5, alpha = 0.6) + ## create a column plot, where the X-axis distinguishes the two cases and the Y-axis displays the level of the DV
geom_point() + ## add points on top of the columns
geom_errorbar(aes(width = 0.3)) + ## create error bars for the 95% CI, and set their width
scale_x_discrete(labels = c("Minimum", "Maximum")) + ## create a discrete X-axis, with labels for min and max values
scale_y_continuous(breaks = seq(0,1,0.25), limits = c(0,1)) + ## create a continuous Y-axis, with breaks at increments of 0.25, and min and max values of 0 and 1, respectively
labs(x = "Separate Powers Index", y = "Predicted Level of Free of Expression") + ## set axis titles
geom_text(size = 6.0, x = c(1,2), y = c(0.95,0.95), label = c(paste("[", round(case_table$upr[1],3), ", ", round(case_table$lwr[1],3), "]", sep = ""), paste("[", round(case_table$upr[2],3), ", ", round(case_table$lwr[2],3), "]", sep = ""))) + ## create text objects to display the confidence intervals, taking the data from the lwr and upr columns of the dataframe; set their size and manually place them, along with text formatting using paste() in the form of "[lwr, upr]"
theme(axis.title = element_text(size = 18.0), axis.text = element_text(size = 12.0), legend.title = element_text(size = 18.0), legend.text = element_text(size = 12.0)) ## set axis and legend text to their standard font sizes
## Use expand.grid to create a dataframe of hypothetical scenarios
## This dataframe contains all of the IVs used in the multivariate model
## All variables are set at their mean values except dummy_freexp, which is changed from 0 to 1 between the cases
## This creates a data frame that is 2 x 7
freexp_pred <- expand.grid(SPI_index = mean(master_table$SPI_index),
dummy_freexp = c(0,1),
dummy_eu = mean(master_table$dummy_eu),
dummy_am = mean(master_table$dummy_am),
mil_spend = mean(master_table$mil_spend),
nat_inc = mean(master_table$nat_inc),
mil_events = mean(master_table$mil_events))
## Run a predictive model using the predict() function
## What this function does is takes both observations in freexp_pred and inputs their values into the multivariate model
## It returns the predicted value of the DV, along with a 95% confidence interval
freexp_results <- predict(multivar_model, newdata = freexp_pred, interval = "confidence")
## Because the DV in the regression model is log-transformed, this performs the inverse of the transform function used in models.R
## This will turn all predicted values from their log-transformed state back into terms of the original freedom index
freexp_trans <- -1 * exp(freexp_results) + 1
## Create a new dataframe that binds the expand.grid() and predict() datasets together
freexp_cases <- bind_cols(freexp_pred, as.data.frame(freexp_trans))
freexp_graph <- ggplot(freexp_cases, aes(x = factor(dummy_freexp), y = fit, ymin = lwr, ymax = upr)) + ## create a ggplot object based on the combined dataframes from expand.grid() and predict(), setting the X-axis to the IV, and Y-axis to the predicted value of the DV, along with ymin and ymax values for the 95% CI
theme_minimal() + ## set the theme to theme_minimal()
geom_col(fill = "#9E9AC8", color = "#4A1486", width = 0.5, alpha = 0.6) + ## create a column plot, where the X-axis distinguishes the two cases and the Y-axis displays the level of the DV
geom_point() + ## add points on top of the columns
geom_errorbar(aes(width = 0.35)) + ## create error bars for the 95% CI, and set their width
scale_x_discrete(labels = c("Without Provision", "With Provision")) + ## create a discrete X-axis, with labels for min and max values
scale_y_continuous(breaks = seq(0,1,0.25), limits = c(0,1)) + ## create a continuous Y-axis, with breaks at increments of 0.25, and min and max values of 0 and 1, respectively
labs(x = "Constitutional Provisions for Free Expression", y = "Predicted Level of Free Expression") + ## set axis titles
geom_text(size = 6.0, x = c(1,2), y = c(0.95,0.95), label = c(paste("[", round(freexp_cases$upr[1],3), ", ", round(freexp_cases$lwr[1],3), "]", sep = ""), paste("[", round(freexp_cases$upr[2],3), ", ", round(freexp_cases$lwr[2],3), "]", sep = ""))) + ## create text objects to display the confidence intervals, taking the data from the lwr and upr columns of the dataframe; set their size and manually place them, along with text formatting using paste() in the form of "[lwr, upr]"
theme(axis.title = element_text(size = 18.0), axis.text = element_text(size = 12.0), legend.title = element_text(size = 18.0), legend.text = element_text(size = 12.0)) ## set axis and legend text to their standard font sizes
#################
## Plot Saving ##
#################
## Save all figures created in this code to the ./Figures/ directory
ggsave("vdem_distribution.pdf", plot = vdem_distribution, path = "./figures/")
ggsave("vdem_log_distribution.pdf", plot = vdem_log_distribution, path = "./figures/")
ggsave("map_fullscale.pdf", plot = map.spi, path = "./figures/")
ggsave("model_results.pdf", plot = pred_graph, path = "./figures/")
ggsave("mean_pred.pdf", plot = case_graph, path = "./figures/")
ggsave("freexp_pred.pdf", plot = freexp_graph, path = "./figures/")
|
a9db1043ba706cec4dd96e8d766408cfb4fa023d
|
c7b21b8da05cd2066ac45bb86adbb3266501384c
|
/man/run.nomogram.Rd
|
e1adc8eb6b06b1798c153d2b2e5f81d65f6f639d
|
[
"MIT"
] |
permissive
|
zhangkaicr/doRadiomics
|
1d68a14408773b9e30314d65c097a6e9a11c8cb5
|
458d38aacbebf646cb8588be780ef2c6026fde0c
|
refs/heads/main
| 2023-03-22T00:19:13.636809
| 2021-03-09T04:17:20
| 2021-03-09T04:17:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 346
|
rd
|
run.nomogram.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class_definition.R
\name{run.nomogram}
\alias{run.nomogram}
\title{Train nomogram model}
\usage{
run.nomogram(x, dt)
}
\arguments{
\item{x}{a nomogram object}
\item{dt}{the training dataset}
}
\value{
an updated nomogram object
}
\description{
Train nomogram model
}
|
d84d17b74cf1a078cb8cb12c9e75ab1456a52c44
|
d94ded7d1a3a0a89a809962dc3e4239f03ab429b
|
/R/Package_Documentation.R
|
f2caae9e904331ad35d85748c319a803a47b747f
|
[] |
no_license
|
paulstillman/GERGM
|
4a9a7d999ca9fec0cfb26ad82e9cffd34dc37ec0
|
01c3d20249f999b422b7fe04c86f09cf05c8415c
|
refs/heads/master
| 2021-01-12T17:48:24.303688
| 2016-09-19T00:53:56
| 2016-09-19T00:53:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,069
|
r
|
Package_Documentation.R
|
#' GERGM: Generalized Exponential Random Graph Model
#'
#' @section GERGM functions:
#' To use this package, first load in the network you wish to use as a (square)
#' matrix, following the example provided below. You may then use the gergm()
#' function to estimate a model using any combination of the following statistics:
#' "out2stars", "in2stars", "ctriads", "mutual", "ttriads", "edges",
#' "absdiff(covariate)", "edgecov(covariate)", "sender(covariate)",
#' "reciever(covariate)", "nodematch(covariate)", "nodemix(covariate)",
#' "netcov(network_covariate)". The gergm() function provides all of the basic
#' estimation and diagnostic functionality and the parameters of this function
#' can be querried by typing ?gergm into the R console. If you wish to access
#' additional fit and degeneracy diagnostic functionaly, the GOF(),
#' Estimate_Plot(), Trace_Plot() and hysteresis() functions can be accessed
#' directly by the user. You may also plot the initial network using
#' plot_network() and simulate networks for given structural parameters using
#' the simulate_networks() function. Experimental support for specifying
#' multiple GERGMs in parallel (allowing for different equations, dependent
#' networks and covariates) is available in the parallel_gergm() function.
#' An experimental feature which seeks to automatically optimize model
#' hyperparameters for best fit and to attempt to deal with degeneracy issues
#' can be turned on be specifying hyperparameter_optimization = TRUE.
#'
#' @docType package
#' @name GERGM
NULL
#> NULL
#' @import methods
NULL
#' @importFrom grDevices dev.off gray pdf rgb colorRampPalette
NULL
#' @importFrom graphics boxplot legend lines par plot text axis plot.new layout abline points
NULL
#' @import plyr
NULL
#' @importFrom stats as.formula dgamma dt lm optim pgamma pnorm pt qgamma qnorm qt rnorm runif sd t.test cor dbeta pbeta qbeta density
NULL
#' @importFrom utils combn write.table
NULL
#' @useDynLib GERGM
#' @importFrom Rcpp sourceCpp
#' @importFrom RcppParallel RcppParallelLibs
NULL
#> NULL
|
d03e6fb98cc21dd3607ca4b20a306dc84685d2ac
|
c900792007ef419990fb6ef194cfd9edf14f7e9c
|
/tests/testthat/test-database_path.R
|
24229f71bf5b85ff4f94323546f0d14fb0818fba
|
[] |
no_license
|
ybkamaleri/boligfinn
|
1e6c01eb00a11ad15e9b5967ebd4ec58987e5cef
|
02833b372448b8268cf49a746508442bc0a4e9a9
|
refs/heads/master
| 2021-02-13T20:06:06.835470
| 2020-06-18T21:10:07
| 2020-06-18T21:10:07
| 244,728,060
| 6
| 0
| null | 2020-04-28T21:44:43
| 2020-03-03T19:52:01
|
R
|
UTF-8
|
R
| false
| false
| 243
|
r
|
test-database_path.R
|
test_that("Path to database", {
if (.Platform$OS.type == "windows"){
dirLocal <- "C:/boligfinn"
} else {
dirLocal <- "~/boligfinn"
}
sti <- paste(dirLocal, "boligfinn.sqlite", sep = "/")
expect_identical(path_db(), sti)
})
|
a09d5074c98917a9e544ae83d6955e737bc4f7ef
|
8ffe7592ffce7fe0a36515e23fdf889d09f076a3
|
/R/auc_aupr.R
|
f7e0d3d6e124889dc7212c4105db7ed047595784
|
[] |
no_license
|
Auroraywood/misc
|
880a8522b2d87d893d2d1182696bcf9825f324e0
|
dfc576b534bc1a50f30f20060bea3d489cee249c
|
refs/heads/master
| 2022-01-06T11:16:45.177921
| 2019-05-09T07:38:48
| 2019-05-09T07:38:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,577
|
r
|
auc_aupr.R
|
# compute auc of single file
library(PRROC)
# fg: vector of postive prob; bg: vector of negative prob;
# roc <- roc.curve(scores.class0 = fg, scores.class1 = bg, curve = T)
# roc$auc
# pr <- pr.curve(scores.class0 = fg, scores.class1 = bg, curve = T)
# pr$auc.davis.goadrich
## function of compute auc
auc <- function(x){
l_a <- which(x==1)
l <- length(l_a)
sum <- 0
for(i in 1:l){
tmp <- x[1:l_a[i]]
sum <- sum + length(which(tmp==0))
}
a <- 1 - sum/(length(which(x==1))*length(which(x==0)))
return(a)
}
## function of compute auc
auc <- function(x){
l_a <- which(x==1)
l <- length(l_a)
sum <- 0
for(i in 1:l){
tmp <- x[1:l_a[i]]
sum <- sum + length(which(tmp==0))
}
a <- 1 - sum/(length(which(x==1))*length(which(x==0)))
return(a)
}
dat <- read.csv("/home/zdx/Desktop/score/hsp90/docked_gnina_hsp90n.csv", header = T, stringsAsFactors = F)
auc(dat$label)
aupr(dat$label)
active <- read.table("/home/zdx/Desktop/scoring_smina/active.smina.tsv", header = T, stringsAsFactors = F)
active$label <- 1
decoy <- read.table("/home/zdx/Desktop/scoring_smina/decoy.smina.tsv", header = T, stringsAsFactors = F)
decoy$label <- 0
dat <- rbind(active, decoy)
colnames(dat)
dat <- dat[order(dat$Best_energy, decreasing = F), ]
head(dat)
probs <- dat$cnnscore
fg <- probs[dat$label == 1]
bg <- probs[dat$label == 0]
roc <- roc.curve(scores.class0 = fg, scores.class1 = bg, curve = T)
roc$auc
plot(roc)
pr <- pr.curve(scores.class0 = fg, scores.class1 = bg, curve = T)
pr$auc.davis.goadrich
plot(pr)
|
75c850dc582d237c88ed775af199c5c4e14f7ccb
|
9a6ca52a3152fe631597d7d2f44b6e1c91f6aa63
|
/global.R
|
495ca9046f41ff06b43be38300844525565e7a56
|
[] |
no_license
|
dnaxy/Airline-On-Time-Statistics-and-Delay-Causes
|
12460e3372c658d807388d8001b50ac1aee27168
|
1d1cde355bbac829c9ba4646b8e137e6aa8fe78b
|
refs/heads/master
| 2021-01-18T19:10:23.066911
| 2017-04-01T07:01:00
| 2017-04-01T07:01:00
| 86,891,016
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 951
|
r
|
global.R
|
# load required library
library(DBI)
library(RPostgreSQL)
library(dplyr)
# DB connection
flights.db <- src_postgres(
dbname = "joe_db",
host = "localhost",
port = 5432,
user = "joe",
password = "test"
)
# after test with the data come from 1 month, 3 month, the whole 12 month,
# choose the 3 month data, from Oct, 2015 to Dec, 2015 for demo due to
# the performance issue
flights <- tbl(flights.db, "month10to12")
# create a subset database to increase the performance
jetblue <-
flights %>% select(
Carrier,
OriginCityName,
DestCityName,
DepDelayMinutes,
ArrDelayMinutes,
CarrierDelay,
WeatherDelay,
NASDelay,
SecurityDelay,
LateAircraftDelay
) %>% filter(Carrier == 'B6')
# get the unique OriginCityName and DestCityName combination
airport.list <-
jetblue %>% distinct(OriginCityName, DestCityName) %>%
collect() %>% arrange(OriginCityName, DestCityName)
|
cd61143a9789eba2e87c454dbe56f924b4c31646
|
e667d89c3a081edd7a1284b652d97688ef603a80
|
/R Programming/Excercice 2/plot1.R
|
1ac6563cf3299febaf5f2592f92c36a2f6d8dce6
|
[] |
no_license
|
fabianoal/My-Coursera-Data-Science-Projects
|
2d15b8a1148df478e35bd7c06396bafa64595bc7
|
b118e02ed26bc920e6f68bc7db6513ac812fe531
|
refs/heads/master
| 2021-01-01T05:51:12.904670
| 2016-01-24T15:00:59
| 2016-01-24T15:00:59
| 40,630,138
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,778
|
r
|
plot1.R
|
#For this to work, just put the file household_power_consumption.txt on
#the same directory as this script, and hit "source"...
print("Starting script. Cleaning the environment...")
remove(list = ls())
#Setting the work directory for the directory where the script was open from.
setwd("/Users/fabianoal/Documents/Coursera/Material/Exploratory Data Analysis/Project2")
fileName <- "summarySCC_PM25.rds"
fileName2 <- "Source_Classification_Code.rds"
if(!file.exists(fileName) | !file.exists(fileName2)){
print(paste0("Please, put the files [", fileName, "] and [", fileName2 ,"] on the [", getwd() ,"] directory"))
}else{
labelIt <- function(number){
intervals = c(1000, 1000000, 1000000000)
sig = c('', 'M', 'MM', 'MMM')
div = c(1,intervals)
paste(floor(number / div[findInterval(number,intervals)+1]), sig[findInterval(number,intervals)+1])
}
print("Loading packages...")
#Loading necessary packages
packages <- c("data.table", "dplyr", "reshape2")
sapply(packages, require, character.only=TRUE, quietly=TRUE)
print("Reading files...")
#Reading, selecting necessary variables, filtering, tbl'ing and mutating the txt.
pm25 <- readRDS(fileName)
#Sinse there is only one pollutant in the file, it's ok to sum them all...
tot_by_year <- tapply(pm25$Emissions, pm25$year, sum)
print('Generating the plot...')
png(file="plot1.png", width = 480, height = 480)
par("mar" = c(4,4,3,2))
pts <- pretty(c(0,max(as.vector(tot_by_year))))
barplot(tot_by_year, main="Total US pm25 Emissions", ylim = c(0,max(pts)), axes = FALSE, col="cadetblue4")
axis(2, at=pts, labels=labelIt(pts), las=1, cex.axis = 0.7)
title(ylab = "Emissions (tons)")
title(xlab = "Year", line = 2)
dev.off()
print("Done!")
}
|
5ea90fce760be96924385ac38d23bb0ddb172498
|
9f665364a2fd7b211699bb89b55951dd6125a2f8
|
/R/sthdScrapiInput.R
|
fb78c225c37a369cb4fccdf509d8585129e0353e
|
[] |
no_license
|
lawrykatherine/SCOBI
|
b9c0ca3fa44b771ec46456c8a29e94bf6db42618
|
7a3346d2a25df7e5f8eb85c1a1be70250d1b8d41
|
refs/heads/master
| 2020-08-06T07:47:49.988629
| 2019-07-17T23:29:20
| 2019-07-17T23:29:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 387
|
r
|
sthdScrapiInput.R
|
#' @title Steelhead Smolt Data from LGTrappingDB formatted for SCRAPI
#'
#' @description This is a steelhead smolt data set from MY2015 that has already been formatted for \code{SCRAPI()}.
#' @name sthdScrapiInput
#' @docType data
#' @usage sthdScrapiInput
#' @format Data is formatted and ready for \code{SCRAPI()}. Originally saved as a csv file
#' @keywords steelhead smolt data
NULL
|
b85f93683575a81608177d521a98ebce7e63bb70
|
13ddcf944e06e15de7936dbd68346053e6fc793c
|
/R/calcPredictionError.R
|
5341c30667d803a15801284154e2830db16378e2
|
[] |
no_license
|
cran/mlegp
|
e9049cf9e5080e6b744050bad12c184f94f774c1
|
48bc5f5814bfcdb42b06cd3258f3e5c72607108e
|
refs/heads/master
| 2023-04-14T16:23:34.570025
| 2022-03-10T12:40:02
| 2022-03-10T12:40:02
| 17,697,577
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 238
|
r
|
calcPredictionError.R
|
`calcPredictionError` <-
function(gp, newData, nugget = 0) {
r = calcCorOneObs(gp$X, gp$beta, gp$a, newData)
v = gp$sig2 + nugget - gp$sig2*r%*%gp$invVarMatrix%*%t(r)*gp$sig2
if (v < 0) return (0)
return (sqrt(v))
}
|
f10b7e8f8baea3b128f76b44319d937f48879630
|
7cf99de143882e9013238c053d0503eb96c3bfd2
|
/R/calc_dose.R
|
40fecbfb4731a98ac5c4ab77358a2c66162c0557
|
[] |
no_license
|
jgrevel/BAST1-R-Library
|
f2df669dfe45bc783b7df45afc01b519b48e3a8e
|
636034254f97c7368ba2434d334ed1f84e1aafb3
|
refs/heads/master
| 2020-04-11T04:21:43.244917
| 2018-12-12T15:49:33
| 2018-12-12T15:49:33
| 161,495,141
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,144
|
r
|
calc_dose.R
|
#' Calculates most recent DOSE size for a NONMEM data file.
#'
#' Calculates size of most recent dose for any NONMEM file that contains dosing records.
#'
#' @author Rupert Austin
#' @param data data frame containing data in NONMEM format.
#' @param ID_col name of column containing patient IDs.
#' @param TIME_col name of column containing TIME.
#' @param DOSE_col name of the new column that will contain DOSE.
#' @param CMT_vals vector of value(s) of CMT for which doses should contribute to the DOSE calculation. If CMT_vals is left as FALSE and a CMT
#' column exists then all doses will contribute to the calculation of DOSE, irrespective of the value of CMT. As an example, you might
#' have oral doses with CMT=1 and IV doses with CMT=2 and you only want the IV doses to register as doses in the DOSE calculation, then
#' set: CMT_vals=2.
#' @return The input data frame with DOSE added in new column with name DOSE_col.
#' @details RULES FOR CALCULATION OF DOSE:
#'
#' DOSE = first non-zero value of AMT (which has CMT within CMT_vals) for all observations before the first dose. Whenever AMT (which has
#' CMT within CMT_vals) changes to a new non-zero value, that becomes the new value of DOSE until AMT (which has CMT within CMT_vals)
#' changes again to a non-zero value.
#' @note If a patient has no qualifying dosing records, then DOSE will be set equal to 0 for all records.
#' @export
#######################
#### calc_dose #############
#######################
# calculates most recent DOSE size for a NONMEM data file.
#
# Function arguments:
#
# data Dataframe containing data in NONMEM format
#
# ID_col (optional, default=ID) Name of column containing patient IDs
#
# TIME_col (optional, default=TIME) Name of column containing TIME
#
# DOSE_col (optional, default=DOSE) Name of the new column that will contain DOSE
#
# CMT_vals (optional, default=FALSE) Vector of value(s) of CMT for which doses should contribute to the DOSE
# calculation. If CMT_vals is left as FALSE and a CMT column exists then all
# doses will contribute to the calculation of DOSE, irrespective of the value of CMT.
# As an example, you might have oral doses with CMT=1 and IV doses with CMT=2 and you only want the
# IV doses to register as doses in the DOSE calculation, then set: CMT_vals=2.
#
# function returns the input data frame with DOSE added in new column with name DOSE_col
#
# RULES FOR CALCULATION OF DOSE ###
# DOSE=first non-zero value of AMT (which has CMT within CMT_vals) for all observations before the first dose.
# Whenever AMT (which has CMT within CMT_vals) changes to a new non-zero value, that becomes the new value
# of DOSE until AMT (which has CMT within CMT_vals) changes again to a non-zero value
#
# NOTE: if a patient has no qualifying dosing records then DOSE will be set equal to 0 for all records
#
############################################################################################
calc_dose=function(data,ID_col='ID',TIME_col='TIME',DOSE_col='DOSE',CMT_vals=FALSE){
### First check for required columns
nms=names(data)
required_names=c('AMT','EVID',TIME_col)
if(!all(required_names%in%nms)){
stop("Cannot calculate DOSE as one or more required columns (AMT / EVID / TIME) are missing")
}
### FIRST WE MUST SORT THE DATA BY ASCENDING ID THEN ASCENDING TIME THEN DESCENDING EVID
### THIS WILL ENSURE THAT WHEN DOSES AND OBSERVATIONS HAVE THE SAME TIME, THE DOSE WILL APPEAR FIRST
data=data[order(data[,ID_col],data[TIME_col],-data$EVID),]
data[,DOSE_col]=0
subject=unique(data[,ID_col])
for(i in 1:length(subject)){
s=data[data[,ID_col]==subject[i],]
if(CMT_vals[1]!=FALSE){
doserows=which(s[,'EVID']%in%c(1,4) & s$CMT%in%CMT_vals)
}else{
doserows=which(s[,'EVID']%in%c(1,4))
}
if(length(doserows)>0){
first_dose=s[doserows[1],'AMT']
for(k in 1:nrow(s)){
if(k==1){
s[k,DOSE_col]=first_dose
c_dose=first_dose
}
if(k>1 & s[k,'AMT']==0){
s[k,DOSE_col]=c_dose
}
if(CMT_vals[1]!=FALSE){
if(k>1 & s[k,'AMT']!=0 & !s[k,'CMT']%in%CMT_vals){
s[k,DOSE_col]=c_dose
}
if(k>1 & s[k,'AMT']!=0 & s[k,'CMT']%in%CMT_vals){
c_dose=s[k,'AMT']
s[k,DOSE_col]=c_dose
}
}else{
if(k>1 & s[k,'AMT']!=0){
c_dose=s[k,'AMT']
s[k,DOSE_col]=c_dose
}
}
}
} else {
# no dose records
# leave DOSE=0 for all records
}
# copy DOSE from s to data
data[data[,ID_col]==subject[i],DOSE_col]=s[,DOSE_col]
} # end of i loop
return(data)
}
##############################################################
#################### END OF calc_dose function ####################
##############################################################
|
b3b175cf35f17c49e38a25ff6954daf502613f7d
|
624725876409f179d3bab0bf26250b600b760a45
|
/man/turb.iec.plot.Rd
|
89731b4cfb307b17534f8e20878e322fca012fc2
|
[] |
no_license
|
paulponcet/bReeze
|
a05fe65ac651a2e9a8e3f17f7fe275a1cca9e1b2
|
b03c144efbb93c17b7464a760bbf49877c710cb3
|
refs/heads/master
| 2021-01-16T00:22:13.632582
| 2017-01-07T21:08:06
| 2017-01-07T21:08:06
| 78,894,730
| 2
| 0
| null | 2017-01-13T23:25:22
| 2017-01-13T23:25:22
| null |
UTF-8
|
R
| false
| false
| 6,496
|
rd
|
turb.iec.plot.Rd
|
\encoding{UTF-8}
\name{turb.iec.plot}
\alias{turb.iec.plot}
\alias{iec}
\title{Plot turbulence intensity site classification}
\description{Plots the turbulence intensity and site classification after IEC.}
\usage{
turb.iec.plot(mast, set, subset, ...)
iec(mast, set, subset, ...)
}
\arguments{
\item{mast}{Met mast object created by \code{mast}.}
\item{set}{Set used for plotting specified as set number or set name.}
\item{subset}{Optional start and end time stamp for a data subset, as string vector \code{c(start, end)}. The time stamps format shall follow the rules of ISO 8601 international standard, e.g. "2012-08-08 22:55:00".}
\item{\dots}{Optional graphical parameters, see below for details.}
}
\section{Optional graphical parameters}{
The following graphical parameters can optionally be added to customize the plot:
\itemize{
\item \code{border}: Colour, used for the border around the bars -- default is \code{"white"}.
\item \code{bty}: Type of box to be drawn around the plot region. Allowed values are \code{"o"} (the default), \code{"l"}, \code{"7"}, \code{"c"}, \code{"u"}, or \code{"]"}. The resulting box resembles the corresponding upper case letter. A value of \code{"n"} suppresses the box.
\item \code{bty.leg}: Type of box to be drawn around the legend. Allowed values are \code{"n"} (no box, the default) and \code{"o"}.
\item \code{cex}: Amount by which text on the plot should be scaled relative to the default (which is \code{1}), as numeric. To be used for scaling of all texts at once.
\item \code{cex.axis}: Amount by which axis annotations should be scaled, as numeric value.
\item \code{cex.lab}: Amount by which axis labels should be scaled, as numeric value.
\item \code{cex.leg}: Amount by which legend text should be scaled, as numeric value.
\item \code{col}: Colour, used to fill the bars.
\item \code{col.axis}: Colour to be used for axis annotations -- default is \code{"black"}.
\item \code{col.box}: Colour to be used for the box around the plot region (if \code{bty}) -- default is \code{"black"}.
\item \code{col.lab}: Colour to be used for axis labels -- default is \code{"black"}.
\item \code{col.leg}: Colour to be used for legend text -- default is \code{"black"}.
\item \code{col.ticks}: Colours for the axis line and the tick marks respectively -- default is \code{"black"}.
\item \code{las}: Style of axis labels. One of \code{0} (always parallel to the axis, default), \code{1} (always horizontal), \code{2} (always perpendicular to the axis), \code{3} (always vertical).
\item \code{legend}: If \code{TRUE} (the default) a legend is drawn.
\item \code{leg.text}: A character or \code{\link{expression}} vector to appear in the legend.
\item \code{line}: Vector of three colours -- one for each IEC turbulence class.
\item \code{lty}: Vector of three line types -- one for each IEC turbulence class. See \code{\link{par}} for available line types.
\item \code{lwd}: Vector of three line widths -- one for each IEC turbulence class. See \code{\link{par}} for usage.
\item \code{mar}: A numerical vector of the form c(bottom, left, top, right) which gives the number of lines of margin to be specified on the four sides of the plot (only for plots with one dataset) -- default is \code{c(4.5, 4.5, 1, 1)}.
\item \code{mgp}: A numerical vector of the form c(label, annotation, line), which gives the margin line for the axis label, axis annotation and axis line. The default is \code{c(2.2, 0.7, 0)}.
\item \code{pos.leg}: Position of legend -- one of \code{"bottomright"}, \code{"bottom"}, \code{"bottomleft"}, \code{"left"}, \code{"topleft"}, \code{"top"}, \code{"topright"}, \code{"right"} or \code{"center"}. Use \code{NULL} to hide the legend.
\item \code{space}: Numeric value between 0 and 1, giving the space left before each bar. Default space is \code{0.2}.
\item \code{xlab}: Alternative label for the x axis.
\item \code{ylab}: Alternative label for the y axis.
\item \code{xlim}: Limits of the x axis, as vector of two values.
\item \code{ylim}: Limits of the y axis, as vector of two values.
\item \code{x.intersp}: Horizontal interspacing factor for legend text, as numeric -- default is \code{0.4}.
\item \code{y.intersp}: Vertical interspacing factor for legend text, as numeric -- default is \code{0.8}.
}
}
\details{
The IEC defines wind turbine classes by wind speed and turbulence characteristics. In terms of turbulence intensity three reference values (at 15 m/s) are defined:
\tabular{ll}{
\emph{ Turbulence class} \tab \emph{Reference value} \cr
A \tab 0.16 \cr
B \tab 0.14 \cr
C \tab 0.12
}
\code{plotTurbIEC} plots these IEC references together with the sites values to allow for a classification.
See \code{\link{turbulence}} for a definition of turbulence intensity.
}
\references{
International Electrotechnical Commission (2005) IEC 61400-1 Wind Turbines -- Part 1: Design Requirements. IEC Standard
}
\author{Christian Graul}
\seealso{\code{\link{mast}}}
\examples{
\dontrun{
# load and prepare data
data(winddata)
set40 <- set(height=40, v.avg=winddata[,2], v.std=winddata[,5])
set30 <- set(height=30, v.avg=winddata[,6], v.std=winddata[,9])
set20 <- set(height=20, v.avg=winddata[,10], v.std=winddata[,13])
ts <- timestamp(timestamp=winddata[,1])
neubuerg <- mast(timestamp=ts, set40, set30, set20)
neubuerg <- clean(mast=neubuerg)
# plot
turb.iec.plot(mast=neubuerg, set=1)
turb.iec.plot(mast=neubuerg, set="set1") # same as above
# data subsets
turb.iec.plot(mast=neubuerg, set=1,
subset=c("2009-12-01 00:00:00", "2009-12-31 23:50:00"))
turb.iec.plot(mast=neubuerg, set=1,
subset=c("2010-01-01 00:00:00", NA)) # just 'start' time stamp
turb.iec.plot(mast=neubuerg, set=1,
subset=c(NA, "2009-12-31 23:50:00")) # just 'end' time stamp
# customize plot
turb.iec.plot(mast=neubuerg, set=1, bty="l", cex.axis=0.8, cex.lab=0.9,
cex.leg=0.7, col.axis="darkblue", col.box="lightblue", col.lab=
"darkblue", col.leg="darkblue", col.ticks="darkblue", las=0,
leg.text=c("IEC class A", "IEC class B", "IEC class C", "measured"),
mar=c(3,3,0.5,0.5), mgp=c(1.8,0.5,0), pos.leg="top", xlab="v [m/s]",
ylab="ti [-]", xlim=c(0,25), ylim=c(0,0.5), x.intersp=1, y.intersp=1)
# customize bars
turb.iec.plot(mast=neubuerg, set=1, col="gray", border="black", space=0.6)
# customize lines
turb.iec.plot(mast=neubuerg, set=1, line=gray(1:3 / 10), lty=2:4,
lwd=0.5:2.5)
}
}
\keyword{methods}
|
3b2c5e2b7374b89cadb9787262274f0de34ddd81
|
779130a18c03edcca3c3a684146a1644222156b5
|
/R/RcppExports.R
|
47eb7e431d67890ef24d393f1782e87e833fa9db
|
[
"MIT"
] |
permissive
|
washingtonquintero/GarchMidas
|
7aff6fdd0ae045e5947f7ecffccf8ebb8211c098
|
5439b54e133ce88f5613ccfe7e0f591c57223595
|
refs/heads/master
| 2023-03-15T15:31:12.931891
| 2019-10-02T04:27:45
| 2019-10-02T04:27:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 443
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
calculate_g <- function(omega, alpha, beta, gamma, returns, g0) {
.Call('_GarchMidas_calculate_g', PACKAGE = 'GarchMidas', omega, alpha, beta, gamma, returns, g0)
}
sum_tau <- function(m, theta, phivar, covariate, K) {
.Call('_GarchMidas_sum_tau', PACKAGE = 'GarchMidas', m, theta, phivar, covariate, K)
}
|
b4924a44560ea9ad88c4ea8685ed2010f19997ad
|
f6401021b5655bf14acba25664a389e0a2d024b5
|
/scripts/DataMunging/08_ExtractingPEAnames.R
|
273939eb84ea38dc7647040cdc82aa5b2cf76901
|
[] |
no_license
|
JulietteArchambeau/HeightPinpinClonapin
|
56726076b40f274d1bfc4b4534382907cc41027b
|
8395f9b320665c8610a80ae0a4e396bbcb1e1616
|
refs/heads/master
| 2023-04-19T00:41:01.330853
| 2023-02-03T15:22:55
| 2023-02-03T15:22:55
| 254,827,204
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,191
|
r
|
08_ExtractingPEAnames.R
|
##################################################################################################################"
################## #######################"
################## Extracting rPEAs and gPEAs names #######################"
################## #######################"
##################################################################################################################"
library(readr) # CRAN v1.3.1
library(tidyverse)
# Load the genomic data:
geno <- read.csv("~/Documents/Pinpin_Clonapin/HeightPinpinClonapin/data_DRYAD/GenomicData_5165SNPs_523clones.csv", row.names=1)
# 1/ Identifying the global PEAs ####
# =================================="
# We load the piMASS outputs:
beta.snp <- read_delim("data_DRYAD/height_all_sites_res.mcmc.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
beta.snp <- as.data.frame(beta.snp)
row.names(beta.snp) <- beta.snp$rs
beta.snp$rs <- NULL
# Checking that the genotypes names in the two files are the same and are ranked in the same order.
compare::compare(row.names(beta.snp),row.names(geno))
# We select the 350 SNPs that show the strongest association with height
beta.snp.order <- beta.snp[order(abs(beta.snp$betarb),decreasing=T),]
beta.snp.order350 <- beta.snp.order[1:350,]
DF <- data.frame(gPEAs=row.names(beta.snp.order350),rPEAs_FrenchAtlantic=NA,rPEAs_IberianAtlantic=NA,rPEAs_Med=NA)
# 2/ Identifying the regional PEAs ####
# ==============================="
# 2.a/ French Atlantic region ####
# ================================"
# We load the piMASS outputs:
beta.snp <- read_delim("data_DRYAD/height_french_atlantic_res.mcmc.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
beta.snp <- as.data.frame(beta.snp)
row.names(beta.snp) <- beta.snp$rs
beta.snp$rs <- NULL
# We select the 350 SNPs that show the strongest association with height
beta.snp.order <- beta.snp[order(abs(beta.snp$betarb),decreasing=T),]
beta.snp.order350 <- beta.snp.order[1:350,]
DF$rPEAs_FrenchAtlantic <- row.names(beta.snp.order350)
# 2.b/ Iberian Atlantic region ####
# ================================="
beta.snp <- read_delim("data_DRYAD/height_iberian_atlantic_res.mcmc.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
beta.snp <- as.data.frame(beta.snp)
row.names(beta.snp) <- beta.snp$rs
beta.snp$rs <- NULL
beta.snp.order <- beta.snp[order(abs(beta.snp$betarb),decreasing=T),]
beta.snp.order350 <- beta.snp.order[1:350,]
DF$rPEAs_IberianAtlantic <- row.names(beta.snp.order350)
# 2.c/ Mediterranean region ####
# =============================="
beta.snp <- read_delim("data_DRYAD/height_mediterranean_res.mcmc.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
beta.snp <- as.data.frame(beta.snp)
row.names(beta.snp) <- beta.snp$rs
beta.snp$rs <- NULL
beta.snp.order <- beta.snp[order(abs(beta.snp$betarb),decreasing=T),]
beta.snp.order350 <- beta.snp.order[1:350,]
DF$rPEAs_Med <- row.names(beta.snp.order350)
write.csv(DF, file= "data/PEAsNames.csv", row.names = F)
saveRDS(DF,file="data/PEAsNames.RDS")
|
6bebff035d93d887b66e1796c37c7c30379c0465
|
ccdf80dd77137b4193c74270f375a26a9b6bd2cd
|
/Gene_base_analysis.R
|
c739b86a426c0b43290b65609f584e4efcf2ba05
|
[] |
no_license
|
hameddashti/somatic
|
11387192e041ace909d4e42a46da5e537a52c2bc
|
d8718a6fa88cdfabc721cce30601a962b43d1ba1
|
refs/heads/master
| 2021-04-30T15:07:22.436552
| 2018-02-26T17:41:56
| 2018-02-26T17:41:56
| 121,232,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,946
|
r
|
Gene_base_analysis.R
|
{
library(BSgenome.Hsapiens.UCSC.hg19)
library(SomaticSignatures)
library(GenomicRanges)
library(readr)
library(ggplot2)
library(dplyr)
library(VariantAnnotation)
library(tidyr)
library(csv)
library(readxl)
}
b<-'Brain'
inputfile <- paste0('/home/san/halinejad/Desktop/Masroor Bayati/DeepCancer project/Data/ICGC/',b,'/simple_somatic_mutation.open.tsv')
outputfile <- paste0('/home/san/halinejad/Desktop/Masroor Bayati/DeepCancer project/Dashti/',b,'_barplot.pdf')
Cancer<- data.frame(read_tsv(inputfile,col_names = TRUE))
gene_list <- read_excel('/home/san/halinejad/Desktop/Masroor Bayati/DeepCancer project/Data/annotation/FANTOM5_gene_list.xlsx')
annot <- as.data.frame(gene_list[,c(1,3,4,5,6,8)])
colnames(annot) <- c('id','chromosome2','start','end','strand','geneClass')
gene<- subset(annot, annot$geneClass =='coding_mRNA')
lncRNA <- subset(annot, annot$geneClass =='lncRNA')
tmp<- as.data.frame(Cancer[,c(1,5,9,10,11,12,16,17)])
tmp1<- data.frame(subset(tmp, tmp$icgc_mutation_id !="NA" ))
tmp3<- data.frame(subset(tmp1, nchar(tmp1$mutated_from_allele)==1 & nchar(tmp1$mutated_to_allele)==1
& tmp1$mutated_from_allele !='-'& tmp1$mutated_to_allele != "-"
& tmp1$mutated_from_allele !='_'& tmp1$mutated_to_allele != "_"
& tmp1$chromosome != "MT" & tmp1$chromosome != "M") )
tmp2<- tmp3[!duplicated(tmp3),]
tmp2$chromosome_strand[which(tmp2$chromosome_strand == '1')] <- '+'
tmp2$chromosome_strand[which(tmp2$chromosome_strand == '2')] <- '-'
tmp2$chromosome <- paste0("chr",tmp2$chromosome)
gr <- makeGRangesFromDataFrame(tmp2,keep.extra.columns = T,
seqnames.field = 'chromosome',
start.field='chromosome_start',
end.field = 'chromosome_end',
strand.field = 'chromosome_strand')
idx <- match(c('mutated_from_allele','mutated_to_allele','icgc_sample_id'),names(mcols(gr)))
mcols(gr) <- cbind(mcols(gr)[idx],mcols(gr)[-idx])
vr <- makeVRangesFromGRanges(gr,ref.field='mutated_from_allele',
alt.field='mutated_to_allele',
sampleNames.field = 'icgc_sample_id',
keep.extra.columns = T)
vr <- mutationContext(vr,Hsapiens)
variations <- data.frame(icgc_samlpe_id = mcols(gr)$icgc_sample_id,
chromosome = as.character(seqnames(vr)),
position = start(vr),
strand = as.character(strand(gr)),
motif = paste0(as.character(mcols(vr)$alteration),
'-',as.character(mcols(vr)$context)))
clear_var <- variations[!grepl("N", variations$motif),]
x<- as.data.frame(clear_var[,c(1,5,2,3)],header=T)
{
df1<-structure(x)
df2<-structure(as.data.frame(gene[,c(1,2,3,4)]) )
q<- df1 %>% inner_join(df2, "chromosome") %>%
mutate(geneID_motif = paste(id, motif, sep = ","),
n = if_else(position >= start & position <= end, 1, 0)) %>%
select(icgc_samlpe_id, geneID_motif, n) %>%
group_by(icgc_samlpe_id, geneID_motif) %>%
summarise(n = sum(n)) %>%
spread(key = geneID_motif, value = n, fill = 0)
q0<-data.frame(q)
plot.barplots(q0) # create the barplot
dev.copy2pdf(file = outputfile)
}
{
df1<-structure(x)
df2<-structure(as.data.frame(lncRNA[,c(1,2,3,4)]) )
q<- df1 %>% inner_join(df2, "chromosome") %>%
mutate(geneID_motif = paste(id, motif, sep = ","),
n = if_else(position >= start & position <= end, 1, 0)) %>%
select(icgc_samlpe_id, geneID_motif, n) %>%
group_by(icgc_samlpe_id, geneID_motif) %>%
summarise(n = sum(n)) %>%
spread(key = geneID_motif, value = n, fill = 0)
q0<-data.frame(q)
plot.barplots(q0) # create the barplot
dev.copy2pdf(file = outputfile)
}
|
4ff02b2b373ace20b7d654f2abdf83f8d7379955
|
2e5c75ecabf2785c83da261b6d10b62eef3f21eb
|
/Peer-graded Assignment- Getting and Cleaning Data Course Project/run_analysis.R
|
015d16a0ba9a3ed969b120e4dd29ce547afcff79
|
[] |
no_license
|
chimizo/datasciencecoursera
|
008bd55e3dfa63f8e57f5a572fc4e5286ff0327e
|
1da16d1c040408659666597e20a5d85430367a17
|
refs/heads/master
| 2021-01-21T22:34:41.436691
| 2017-11-05T21:40:14
| 2017-11-05T21:40:14
| 102,160,423
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,488
|
r
|
run_analysis.R
|
#You should create one R script called run_analysis.R that does the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#script
#activating libraries
library(plyr)
library(data.table)
library(lubridate)
library(tidyr)
# 1. Merges the training and the test sets to create one data set
#defining data
setwd("C:/Users/Andre/Desktop/UCI HAR Dataset/")
x_train <- read.table("./train/X_train.txt", header = FALSE)
x_test <- read.table("./test/X_test.txt", header = FALSE)
y_train <- read.table("./train/y_train.txt", header = FALSE)
y_test <- read.table("./test/y_test.txt", header = FALSE)
subject_train <- read.table("./train/subject_train.txt", header = FALSE)
subject_test <- read.table("./test/subject_test.txt", header = FALSE)
features <- read.table("./features.txt",header=FALSE)
activity_labels <- read.table("./activity_labels.txt",header=FALSE)
colnames(activityLabel)<-c("activityId","activityType")
#merging
train <- rbind(x_train, y_train)
test <- rbind(x_test, y_test)
subject <- rbin(subject_train, subject_test)
x <- rbind(x_train, X_test)
y <- rbind(y_train, y_test)
all_data <- rbind(x_train, y_train, x_test, y_test, subject_train, subject_test)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
mean_stdv <- all_data(,grepl("mean|std|subject|activityId",colnames(all_data)))
# 3. Uses descriptive activity names to name the activities in the data set
mean_stdv <- join(mean_stdv, activity_labels, match = "first", by = "activityId")
mean_stdv <-data_mean_std[,-1]
# 4. Appropriately labels the data set with descriptive variable names.
names(mean_stdv) <- gsub("\\(|\\)", "", names(mean_stdv), perl = TRUE)
names(mean_stdv) <- trim(mean_stdv)
names(mean_stdv) <- make.names(names(mean_stdv))
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidyData <- aggregate(all_data, mean)
write.table(tidyData, file = "Tidy.txt", row.names = FALSE)
|
aebf90c42ae09da07d809d5014053753e12c1851
|
df36258042863fa8de9c563d7ec6e2927ee83ca6
|
/lib/AnimalFeature.R
|
7b05cb800e8d0c0b4f05892ce55e08cf3ee0d02a
|
[] |
no_license
|
bz2290/Spr2017-proj5-grp8
|
ac36ac44776fb39a878a491f424590c39642db55
|
6575ba02db1ac3e0fb0082c6f94c1ea912689db5
|
refs/heads/master
| 2021-06-14T05:08:26.832566
| 2017-04-28T17:15:44
| 2017-04-28T17:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 924
|
r
|
AnimalFeature.R
|
#setwd("../doc")
AnimalFeature = function(description_vector, title_vector)
{
library(Hmisc)
animal_list_filename = "../data/animals.txt"
animal_df = as.character(read.delim(animal_list_filename, header=FALSE, sep = "\n")[,1])
animal_df = tolower(animal_df)
animal_df = append(animal_df, capitalize(animal_df))
#description_vector = tolower(description_vector)
#title_vector = tolower(title_vector)
word_list_description = strsplit(description_vector, split = " ")
word_list_title = strsplit(title_vector, split = " ")
animal_feature = rep(0, length(description_vector))
for(i in 1:length(animal_feature))
{
if(sum(as.numeric(is.element(animal_df, as.list(word_list_description[[i]])))) >= 1)
{
animal_feature[i] = 1
}
if(sum(as.numeric(is.element(animal_df, as.list(word_list_title[[i]])))) >= 1)
{
animal_feature[i] = 1
}
}
return(animal_feature)
}
|
9e9727e3642efdb235c29554ced540715c937439
|
7189a65f956a85e74157c37253b087e0cb1835da
|
/R/GENERAL/integral_x^n_exp(ax).R
|
d80bce251b313e0c6bff45af60fbb7da73eccef9
|
[] |
no_license
|
maxbiostat/CODE
|
22b4a16dd55377cfdfbfe5a658a51032ae4722ef
|
bbfa7bde29ccde58f16db5381ec14655afc66032
|
refs/heads/master
| 2023-02-07T13:25:46.533953
| 2023-01-26T15:20:36
| 2023-01-26T15:20:36
| 20,405,024
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 495
|
r
|
integral_x^n_exp(ax).R
|
f1 <- function(x, n, a) x^n * exp(a*x)
f2 <- function(x, n, a) x^{n-1} * exp(a*x)
sol1 <- function(x, n, a) (x^n * exp(a*x))/a - (n/a) * integrate(f2, n = n, a = a, 0, Inf)$value
# analytic solution given by Gradshteyn & Ryzhik
# works for every a<0
sol2 <- function(n, a) gamma(n+1) * (1/a)^{n+1}
# simply recognizing this is the kernel of a gamma fdp
####
a <- -1.5
n <- 3
x <- seq(.1,10,.1)
plot(x, f1(x, n, a), type = "l", lwd = 3)
sol1(x = 10, n = n, a = a)
sol2 (n, a)
|
1bf1211c8de38ebe9b11be35f42a3095ee0ccfc7
|
5ddea1ad62a397deee8f8e348afbce631d5417ac
|
/man/quiz_clean_drive.Rd
|
5e0bd8e34be9e4093f3578c1fab7cffbd7803af0
|
[
"MIT"
] |
permissive
|
martina-starc/peRson
|
da0513dd7f08e8cfba6055b2a7eb3350b3a311ed
|
7376da729836bff767581d5ad566dfd27b655787
|
refs/heads/master
| 2023-04-08T19:09:49.814150
| 2021-04-09T17:04:30
| 2021-04-09T17:04:30
| 336,203,610
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 453
|
rd
|
quiz_clean_drive.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/answer_sheets.R
\name{quiz_clean_drive}
\alias{quiz_clean_drive}
\title{Deletes the answer sheets}
\usage{
quiz_clean_drive(quiz = getOption("peRson.quiz"))
}
\arguments{
\item{quiz}{Quiz environment with quiz variables.}
}
\value{
No result, just trashes the quiz sheets.
}
\description{
Puts the participants' answer sheets and the summary answer sheet into drive trash.
}
|
8d49959f1482c5d83ef5be6c17d65a695a0f25d0
|
dea77147a8d60f99afc2740b3184a88f889032b0
|
/R/deblank.R
|
4dc971f275c1b3e061038362a25adae5cc06ce42
|
[] |
no_license
|
cran/ProfessR
|
98834f2043512110249f53996d45ad40d552b8f5
|
ddfb2f35ad46fb4a648ba32ca57f7e524c87e157
|
refs/heads/master
| 2020-04-20T19:45:41.076089
| 2019-09-23T10:20:02
| 2019-09-23T10:20:02
| 17,713,804
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 234
|
r
|
deblank.R
|
deblank<-function(a)
{
b = a
for(i in 1:length(a) )
{
h = unlist( strsplit(a[i], split='') )
b[i] = paste(h[h!=' '], collapse='')
}
return( b)
}
|
ad4560157e8621ae46ac1bcd6d67fc6d558b74c5
|
8b61baaf434ac01887c7de451078d4d618db77e2
|
/man/is.url.Rd
|
049119d5059dcd63712d8ec8658ffab60edb8fd3
|
[] |
no_license
|
drmjc/mjcbase
|
d5c6100b6f2586f179ad3fc0acb07e2f26f5f517
|
96f707d07c0a473f97fd70ff1ff8053f34fa6488
|
refs/heads/master
| 2020-05-29T19:36:53.961692
| 2017-01-17T10:54:00
| 2017-01-17T10:54:00
| 12,447,080
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 481
|
rd
|
is.url.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.url.R
\name{is.url}
\alias{is.url}
\title{Which entries are URL's.}
\usage{
is.url(x)
}
\arguments{
\item{x}{a character vector.}
}
\value{
a logical vector of length x. values are TRUE if the entry looks
like a URL. NB the URL's are not checked for 404 (dead links) errors.
}
\description{
x must start with http://, ftp://, or file://
See also download.file
}
\author{
Mark Cowley, 2009-12-11
}
|
212a452f65dc778f9860cb46c52db4584ced920f
|
e05660d01c88dce798659c9e2dc2b47e45545cd4
|
/R/correlate.R
|
0298df81ef5c565b8f8e4d2efd37c18a91f7df5c
|
[] |
no_license
|
nickilott/NGSKit
|
22718b22784cc85060e7a0c490cc09644f3724d9
|
7a22c372685d54c9e61e32bd49007dd263c53839
|
refs/heads/master
| 2021-07-11T23:01:33.764197
| 2020-06-04T21:57:20
| 2020-06-04T21:57:20
| 129,386,003
| 0
| 1
| null | 2020-04-03T10:16:27
| 2018-04-13T10:17:44
|
R
|
UTF-8
|
R
| false
| false
| 409
|
r
|
correlate.R
|
library(dplyr)
correlateMatrixWithVector <- function(mat, vector, method="spearman"){
cors <- list()
for (i in 1:nrow(mat)){
res <- cor.test(unlist(mat[i,]), vector, method=method)
res <- data.frame(cor=res$estimate, p.value=res$p.value)
cors[[i]] <- res
}
result <- bind_rows(cors)
rownames(result) <- rownames(mat)
result$padj <- p.adjust(result$p.value)
return(result)
}
|
22b36b3991ce45bf636a73297195ad985b639957
|
a8fb113cbe592a059d558a8f7b925a77ca510a0e
|
/R/cannon_get_data.R
|
8d83edd8f52b2b8883de29225e99cc24b9736ee2
|
[] |
no_license
|
CannonCloud/coronavirus
|
7ab9fccfa54d512894bc223f2e899ad6b179bce9
|
01dd19339eb1fafe086174cc746eeca8790799d1
|
refs/heads/master
| 2021-03-26T17:14:06.509780
| 2020-03-16T14:36:32
| 2020-03-16T14:36:32
| 247,724,915
| 0
| 0
| null | 2020-03-16T14:34:19
| 2020-03-16T14:34:18
| null |
UTF-8
|
R
| false
| false
| 9,935
|
r
|
cannon_get_data.R
|
library(tidyverse)
library(readr)
library(dplyr)
library(tidyr)
library(lubridate)
library(ggplot2)
library(countrycode)
library(magrittr)
##########################################################
# Download Johns Hopkins CSSE Data from GitHub
##########################################################
var_list <- list("Confirmed", "Deaths", "Recovered")
#create list of links
create_jh_link <- function(x) {
paste0("https://raw.github.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-", x, ".csv")
}
links <- lapply(var_list, create_jh_link)
#read links to create datasets
jh_data <- lapply(links, read_csv)
names(jh_data) <- var_list
#save data
lapply(1:length(jh_data), function(i) write_csv(jh_data[[i]],
path = paste0("data/", Sys.Date(), "_", names(jh_data[i]), ".csv")))
#make data long, and merge variables
long_data <- lapply(jh_data, gather, key = "time", value = "variable", -c(1:4))
#I can't figure out how to lapply this since I can't pass a list of strings to the rename argument...
long_data$Confirmed %<>% rename(confirmed = variable)
long_data$Deaths %<>% rename(deaths = variable)
long_data$Recovered %<>% rename(recovered = variable)
panel_data <-long_data
#merge data on province/state, country/region, time
panel_data %<>% reduce(full_join, by = c("Province/State", "Country/Region", "Lat", "Long", "time" ))
# data cleaning and preparation
# full data set
panel_data %<>%
rename(province = `Province/State`,
country = `Country/Region`,
latitude = Lat,
longitude = Long) %>%
mutate(time = mdy(time),
netinfected = confirmed - deaths - recovered)
write_csv(panel_data, paste0("data/", Sys.Date(), "_total.csv"))
# generate aggregate data set over all regions per single date
sum_CDRN <- function(x) {
summarize(x, confirmed = sum(confirmed),
deaths = sum(deaths),
recovered = sum(recovered),
netinfected = sum(netinfected))
}
panel_data_sum_all <- group_by(panel_data, time) %>%
sum_CDRN()
write_csv(panel_data_sum_all, paste0("data/", Sys.Date(), "_summed.csv"))
panel_data_sum_country <- group_by(panel_data, country, time) %>%
sum_CDRN()
write_csv(panel_data_sum_country, paste0("data/", Sys.Date(), "_country_sum.csv"))
##########################################################
# Download and Save Country Lookup Table
##########################################################
country_lookup <- read_csv("https://raw.github.com/lukes/ISO-3166-Countries-with-Regional-Codes/master/all/all.csv")
write_csv(country_lookup, paste0("data/", "country_lookup.csv", sep = ""))
##########################################################
# Generate Prediction Data Set
##########################################################
# health preparedness index from https://www.ghsindex.org/
health <- read.csv("data/health_index.csv", sep = ";")
# import population data
pop <- read_csv("https://raw.github.com/datasets/population/master/data/population.csv")
pop_c <- filter(pop, Year == 2016) %>%
select(c(`Country Code`, "Value")) %>%
rename(countrycode = `Country Code`,
population = Value)
# create countrycode to match data sets
health %<>%
mutate(countrycode = countrycode(country,
origin = "country.name",
destination = "iso3c"))
# aggregate by country and time
dfc %>%
group_by(country, time) %>%
summarize(confirmed = sum(confirmed),
deaths = sum(deaths),
recovered = sum(recovered),
netinfected = sum(netinfected),
lat = mean(latitude),
lon = mean(longitude)) %>%
mutate(countrycode = countrycode(country,
origin = "country.name",
destination = "iso3c")) -> dfc_grouped
# get date of first confirmed case
dfc_grouped %>%
group_by(country) %>%
filter(confirmed != 0) %>%
arrange(time) %>%
slice(1) %>%
ungroup() %>%
select(countrycode, time) %>%
rename(first_confirmed = time) -> date_first_occurence
# get date of number of cases larger than 100
dfc_grouped %>%
group_by(country) %>%
filter(confirmed >= 100) %>%
arrange(time) %>%
slice(1) %>%
ungroup() %>%
select(countrycode, time) %>%
rename(datelarger100 = time) -> date_larger_100
# add first confirmed date to df and calculate days since outbreak in each country
dfc_grouped %>%
left_join(date_first_occurence,
by = "countrycode") %>%
left_join(date_larger_100,
by = "countrycode") %>%
mutate(days_outbreak = time - first_confirmed,
days_outbreak = replace(days_outbreak, which(days_outbreak < 0), 0),
days_larger_100 = time - datelarger100,
days_larger_100 = replace(days_larger_100, which(days_larger_100 < 0), 0)) -> dfc_grouped_add
stock_df %>%
select(c("symbol", "date", "adjusted")) %>%
pivot_wider(names_from = symbol, values_from = adjusted) %>%
rename(time = date,
SP500 = `^GSPC`,
DAX30 = `^GDAXI`) -> stocks_wide
# get SARS data from https://www.kaggle.com/imdevskp/sars-outbreak-2003-complete-dataset/data
sars <- read.csv("data/misc/sars.csv")
sars %>%
rename(time = Date,
country = Country,
sars_confirmed = Cumulative.number.of.case.s.,
sars_deaths = Number.of.deaths,
sars_recovered = Number.recovered) %>%
mutate(time = as.Date(time),
sars_netinfected = sars_confirmed - sars_deaths - sars_recovered,
countrycode = countrycode(country,
origin = "country.name",
destination = "iso3c")) -> df_sars
# idea: get overall SARS outbreak curve over all countries
df_sars %>%
group_by(time) %>%
summarize(sars_confirmed = sum(sars_confirmed),
sars_deaths = sum(sars_deaths),
sars_recovered = sum(sars_recovered),
sars_netinfected = sum(sars_netinfected)) %>%
tibble::rowid_to_column("sars_days_since_outbreak") %>%
rename(days_larger_100 = sars_days_since_outbreak,
sars_time = time) -> df_sars_grouped # inconsistent naming, but need that for merging later
df_sars_grouped %>%
ggplot(aes(x = sars_days_since_outbreak, y = sars_recovered)) +
geom_point()
# this uncovers a serious measurement error in the data. Netinfected only started counting at >1000 infections -> distorts netinfected
# solution: approximate function (tried polynomial, which is definitely not suitable)
# next try: gaussian quadrature
# # get date of first confirmed case for sars
# df_sars %>%
# group_by(countrycode) %>%
# filter(sars_confirmed != 0) %>%
# arrange(time) %>%
# slice(1) %>%
# ungroup() %>%
# select(countrycode, time) %>%
# rename(sars_first_confirmed = time) -> sars_date_first_occurence
#
# # get date of number of cases larger than 100 for sars
# df_sars %>%
# group_by(countrycode) %>%
# filter(sars_confirmed >= 100) %>%
# arrange(time) %>%
# slice(1) %>%
# ungroup() %>%
# select(countrycode, time) %>%
# rename(sars_datelarger100 = time) -> sars_date_larger_100
#
# # add first confirmed date to df and calculate days since outbreak in each country
# df_sars %>%
# left_join(sars_date_first_occurence,
# by = "countrycode") %>%
# left_join(sars_date_larger_100,
# by = "countrycode") %>%
# mutate(sars_days_outbreak = time - sars_first_confirmed,
# sars_days_outbreak = replace(sars_days_outbreak, which(sars_days_outbreak < 0), 0),
# sars_days_larger_100 = time - sars_datelarger100,
# sars_days_larger_100 = replace(sars_days_larger_100, which(sars_days_larger_100 < 0), 0)) %>%
# rename(sars_time = time)-> dfc_sars_grouped_add
# merge all data sets
dfc_grouped_add %>%
left_join(stocks_wide, by = "time") %>%
left_join(gtrend_df %>%
rename(time = date,
searchindex = hits) %>%
select(c("time", searchindex)),
by = "time") %>%
left_join(health[,c("health_index", "countrycode")], by = "countrycode") %>%
left_join(pop_c, by = "countrycode") %>%
mutate(days_outbreak = as.numeric(days_outbreak),
days_larger_100 = as.numeric(days_larger_100)) %>%
left_join(df_sars_grouped, by = "days_larger_100") %>%
mutate(sars_confirmed = replace_na(sars_confirmed, 0),
sars_deaths = replace_na(sars_deaths, 0),
sars_recovered = replace_na(sars_recovered, 0),
sars_netinfected = replace_na(sars_netinfected, 0),
confirmed_capita = confirmed/population,
deaths_capita = deaths/population,
recovered_capita = recovered/population,
netinfected_capita = netinfected/population,
sars_confirmed_capita = sars_confirmed/population,
sars_deaths_capita = sars_deaths/population,
sars_recovered_capita = sars_recovered/population,
sars_netinfected_capita = sars_netinfected/population) %>%
group_by(country) %>%
mutate(lag_confirmed = dplyr::lag(confirmed, n = 14, default = NA)) %>%
tibble::rowid_to_column("id") -> dfc_predict
# check which countries aren't included in health_index/population data
dfc_predict %>%
filter(is.na(population)) %>%
pull(country) %>%
unique()
# move old prediction file to archive, rename it and write new one
file.copy("data\\predict\\predict.csv", "data\\predict\\archive")
file.rename("data\\predict\\archive\\predict.csv",
paste("data\\predict\\archive\\", as.Date(file.info("data\\predict\\predict.csv")$mtime), "_predict.csv", sep = ""))
write_csv(dfc_predict, paste("data\\predict\\", "predict.csv", sep = ""))
© 2020 GitHub, Inc.
Terms
Privacy
Security
Status
Help
Contact GitHub
Pricing
API
Training
Blog
About
|
5b70eeb3048df50ddf2965e7b9fd7288fa7e3bd5
|
32ea541936d63a63d196ab2c338ca87d2c5ff0fb
|
/q1/plot1.R
|
b0d02ef6734c8dc35907d3d7f10433eb6e58b9d5
|
[] |
no_license
|
mohiiieldin/Coursera-Explatory-Data-Analysis-project
|
d80cbc231a698994fc9b114a89236ebe25c0170e
|
85d97ba2daaacb8dc6e99dae25514a2424564a26
|
refs/heads/master
| 2020-07-14T03:57:43.248575
| 2019-08-29T20:00:22
| 2019-08-29T20:00:22
| 205,232,250
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,078
|
r
|
plot1.R
|
setwd("E:/Coursera data science specialization/4. Explatory data analysis/week4/project2")
SCC <- readRDS("Source_Classification_Code.rds") #Source Classification Codr
NEI <- readRDS("summarySCC_PM25.rds") #National Emissions Inventory
library(dplyr)
total_emmision_per_year <- NEI %>% group_by(year) %>% summarise(total_emmision_per_year = sum(Emissions)) %>% as.data.frame()
png( file = "plot1_base_plot.png" , width = 600 , height = 600,res = 140 ,type = "cairo")
plot(total_emmision_per_year[,1], (total_emmision_per_year[,2])/1000 , type = "b" , pch =19 , xlab = "Years" , ylab = "Emmision" , main = "Emissions across years")
dev.off()
#just making a barplot for fun :
cols <- c("red","orangered","steelblue","green")
png( file = "plot1_barplot.png" , width = 600 , height = 600,res = 140 ,type = "cairo")
barplot(height = total_emmision_per_year[,2]/1000 ,
names.arg = total_emmision_per_year[,1] ,
xlab = "Years" , ylab = "Emmision" ,
main = "Emissions across years in US",
ylim = c(0,8000),
col = cols
)
dev.off()
|
e223d7e7c61cb2c7972143ab0528fd4b7d3f44c2
|
6d07290565118ab94d32b101a8f0e0c200422cd8
|
/jug-master/man/simple_error_handler_json.Rd
|
7f01ac5b19fdd751a7f672d606551026cf7d7e4d
|
[
"MIT"
] |
permissive
|
XWU-UT/Clustering_D3platform
|
0597294649b5a91bd7dd4b06645fa3182841f166
|
e79daa9a7b7582d168a6673d49d776d7b029a2f0
|
refs/heads/master
| 2020-12-17T07:05:57.731697
| 2020-01-28T13:48:36
| 2020-01-28T13:48:36
| 235,286,160
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 534
|
rd
|
simple_error_handler_json.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/middleware_error.R
\name{simple_error_handler_json}
\alias{simple_error_handler_json}
\title{An error handler middleware which returns a error description in JSON format}
\usage{
simple_error_handler_json(jug, path = NULL)
}
\arguments{
\item{jug}{the jug instance}
\item{path}{the path to bind to, default = NULL (all paths)}
}
\description{
An error handler middleware which returns a error description in JSON format
}
\seealso{
simple_error_handler
}
|
62fc17b9c0c88cf5c9dabb14e60bdd915909e7eb
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PROSPER/examples/sel_herb.Rd.R
|
328fe04bc8b1633793f7eacca5f82456f245c05c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 320
|
r
|
sel_herb.Rd.R
|
library(PROSPER)
### Name: sel_herb
### Title: Surviving the herbicide
### Aliases: sel_herb
### ** Examples
struc_preparation(Rmx=10, n_loci=2, epis=0, dom=1)
gen_freq( af=c(0.01,0.8), n_seeds=10000)
sel_herb(start="initialSB", result="winter",
thresh=20, sdrate=0.4, rate=100, put=0.04)
|
72886c6259547a0dd8b61ba0e062d8d17a80ffbe
|
432cc53cc851c9130ec6d0f2ce6c6aee41f6c57e
|
/plot1.R
|
e6fef382c083ce44bd38ecea2021f75db4eb6644
|
[] |
no_license
|
ramyasridharan5/ExData_Plotting1
|
1ae385131671c3c1c7a4481645dec8a8bd9c8928
|
6d8f3f23caea0a4d2d8b85a2b231ad178cee3a39
|
refs/heads/master
| 2020-04-14T00:53:51.439197
| 2018-12-30T07:09:31
| 2018-12-30T07:09:31
| 163,545,588
| 0
| 0
| null | 2018-12-29T22:36:20
| 2018-12-29T22:36:19
| null |
UTF-8
|
R
| false
| false
| 562
|
r
|
plot1.R
|
df<-read.table("household_power_consumption.txt",
sep=";",header=TRUE, na.strings="?")
df[,1] <- as.Date(df$Date, "%d/%m/%Y")
d<-df
df[,2]<- strptime(paste(df[,1], d$Time), "%d/%m/%Y %H:%M:%S")
s<- subset(d, Date>="2007-02-01"& Date<="2007-02-02", select = c(names(d)))
png(filename = "plot1.png", width=480, height=480)
h <- hist(as.numeric(s$Global_active_power),
main="Global Active Power",
xlab = "Global Active Power (kilowatts)",
ylab = "Frequency",
col = "red",
ylim = c(0,1200))
dev.off()
|
fd651e14adfb72990c259995733e6ffbae56284b
|
e091c1711b087acdfdca9b84b7050d65cd7b8400
|
/man/att.Rd
|
2dfcff59a85ff1751cbfed49430f371f04008e34
|
[
"MIT"
] |
permissive
|
pboesu/GenieR
|
ee1bfd256a1cb26b1b389a6c17f0af48cb5b0876
|
008d3a55cedd1dab6670b1100bbe220dd4d5271f
|
refs/heads/master
| 2020-03-07T10:12:14.443447
| 2018-01-09T13:38:59
| 2018-01-09T13:38:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 871
|
rd
|
att.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/att.R
\name{att}
\alias{att}
\title{Sort out sampling times, coalescent times and sampling lineages from a phylogenetic tree}
\usage{
att(phy, eps = 1e-06)
}
\arguments{
\item{phy}{A phylogenetic tree.}
\item{eps}{Difference parameter to separate coalescent and sampling event.}
}
\value{
Sorted sampling times, coalescent times and sampling lineages.
}
\description{
\code{att} sorts out sampling times, coalescent times and sampling lineages from a phylogenetic tree.
}
\examples{
library(ape)
t1=rcoal(20)
att(t1)
}
\references{
Palacios JA and Minin VN. Integrated nested Laplace approximation for Bayesian nonparametric phylodynamics, in Proceedings of the Twenty-Eighth Conference on Uncertainty in Artificial Intelligence, 2012.
}
\author{
Simon Frost (\email{sdwfrost@gmail.com})
}
|
9878ccdf64d45cb38fd2aa5d4c3e5489009f3771
|
3eefcbaa7faaff48f1335a3a3e4dc56e114c1ab0
|
/revision_probaDiffMatchedPairs.R
|
7b789f5b95e0b61c618526c808f09c02c649b134
|
[] |
no_license
|
marzuf/v2_Yuanlong_Cancer_HiC_data_TAD_DA
|
9a435c08a9064d127a86d9909042bb4ff59ad82d
|
e33a0683ac7a9afe21cfec06320c82251d3ba0d5
|
refs/heads/master
| 2021-06-16T15:57:30.182879
| 2021-05-18T08:36:44
| 2021-05-18T08:36:44
| 202,159,949
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,888
|
r
|
revision_probaDiffMatchedPairs.R
|
outFolder <- file.path("REVISION_PROBADIFFMATCHEDPAIRS_CORRECTED")
dir.create(outFolder, recursive = TRUE)
all_inter_intra1_dt <- get(load("REVISION_INTER_INTRA_PROBA_CORRECTED/all_inter_intra_dt.Rdata"))
all_inter_intra2_dt <- get(load("REVISION_INTER_INTRA_PROBA2_CORRECTED/all_inter_intra_dt.Rdata"))
# outFolder <- file.path("REVISION_PROBADIFFMATCHEDPAIRS_V2_CORRECTED")
# dir.create(outFolder, recursive = TRUE)
# all_inter_intra1_dt <- get(load("REVISION_INTER_INTRA_PROBA_V2_CORRECTED/all_inter_intra_dt.Rdata"))
# all_inter_intra2_dt <- get(load("REVISION_INTER_INTRA_PROBA2_V2_CORRECTED/all_inter_intra_dt.Rdata"))
# Rscript revision_probaDiffMatchedPairs.R
require(ggplot2)
require(ggpubr)
require(ggsci)
require(doMC)
require(foreach)
registerDoMC(40)
source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
plotType <- "png"
myWidth <- 500
myWidthGG <- 7
myHeightGG <- 5
myHeight <- 400
plotCex <- 1.2
tadSignifThresh <- 0.01
# 2nd part => for matching pairs
# refID signif. or not signif.
#
# similar to normFC and tumorFC -> normInterIntraProba tumor
#
# diff. inter/intra vs. signif/not signif. norm tumor
# diff. inter/intra vs. norm tumor FC
source("revision_settings.R")
all_col_vars <- c("mean_intraNorm")
col_var = "mean_intraNorm"
###################
### PREPARE SIGNIF DATA
###################
final_table_file <- file.path("CREATE_FINAL_TABLE/all_result_dt.Rdata")
stopifnot(file.exists(final_table_file))
final_table_DT <- get(load(final_table_file))
final_table_DT$regionID <- file.path(final_table_DT$hicds, final_table_DT$exprds, final_table_DT$region)
stopifnot(!duplicated(final_table_DT$regionID))
regionID_pvals <- setNames(final_table_DT$adjPvalComb, final_table_DT$regionID)
###################
### PREPARE PROBA DIFF DATA
###################
stopifnot(! all_inter_intra1_dt$hicds %in% all_inter_intra2_dt$hicds)
all_inter_intra_dt <- rbind(all_inter_intra1_dt, all_inter_intra2_dt)
stopifnot(final_table_DT$hicds %in% all_inter_intra_dt$hicds)
stopifnot(col_var %in% colnames(all_inter_intra_dt))
all_inter_intra_dt$region_hicdsID <- file.path(all_inter_intra_dt$hicds, all_inter_intra_dt$region)
stopifnot(!duplicated(all_inter_intra_dt$region_hicdsID))
colVar_values <- setNames(all_inter_intra_dt[,paste0(col_var)], all_inter_intra_dt$region_hicdsID)
###################
### PREPARE THE pairs_data
###################
pairFolder <- file.path("REVISION_RANKDIFF_ACTIVDIFF/")
outFile <- file.path(pairFolder, "matching_data.Rdata")
matching_data <- get(load(file=outFile))
ds1_matching_dt <- do.call(rbind, lapply(matching_data, function(x)x[["norm_matching_pval_tadRank_dt"]]))
unique(ds1_matching_dt$ref_hicds)
# [1] "LI_40kb" "LG1_40kb" "LG2_40kb" "GSE118514_RWPE1_40kb"
ds2_matching_dt <- do.call(rbind, lapply(matching_data, function(x)x[["tumor_matching_pval_tadRank_dt"]]))
unique(ds2_matching_dt$ref_hicds)
# [1] "GSE105381_HepG2_40kb" "ENCSR444WCZ_A549_40kb" "ENCSR489OCU_NCI-H460_40kb"
# [4] "ENCSR346DCU_LNCaP_40kb" "GSE118514_22Rv1_40kb"
matching_withRank_dt <- rbind(ds1_matching_dt, ds2_matching_dt)
rownames(matching_withRank_dt) <- NULL
stopifnot(matching_withRank_dt$matching_exprds == matching_withRank_dt$ref_exprds )
matching_withRank_dt$ref_region_ID <- file.path(matching_withRank_dt$ref_hicds,
matching_withRank_dt$ref_exprds,
matching_withRank_dt$refID
)
matching_withRank_dt$ref_region_hicdsID <- file.path(matching_withRank_dt$ref_hicds,
matching_withRank_dt$refID
)
matching_withRank_dt$matching_region_ID <- file.path(matching_withRank_dt$matching_hicds,
matching_withRank_dt$matching_exprds,
matching_withRank_dt$matchingID_maxOverlapBp)
matching_withRank_dt$matching_region_hicdsID <- file.path(matching_withRank_dt$matching_hicds,
matching_withRank_dt$matchingID_maxOverlapBp)
stopifnot(matching_withRank_dt$ref_region_ID %in% names(regionID_pvals))
matching_withRank_dt$ref_region_pval <- regionID_pvals[paste0(matching_withRank_dt$ref_region_ID)]
stopifnot(!is.na(matching_withRank_dt$ref_region_pval))
stopifnot(round(matching_withRank_dt$ref_region_pval, 6) == round(matching_withRank_dt$adjPval, 6))
stopifnot(matching_withRank_dt$matching_region_ID %in% names(regionID_pvals))
matching_withRank_dt$matching_region_pval <- regionID_pvals[paste0(matching_withRank_dt$matching_region_ID)]
stopifnot(!is.na(matching_withRank_dt$matching_region_pval))
matching_withRank_dt$ref_tadSignif <- ifelse(matching_withRank_dt$adjPval <= tadSignifThresh, "signif.", "not signif.")
my_cols <- setNames(pal_jama()(5)[c(3, 2,4)], unique(matching_withRank_dt$ref_tadSignif))
stopifnot(matching_withRank_dt$ref_region_hicdsID %in% names(colVar_values))
stopifnot(matching_withRank_dt$matching_region_hicdsID %in% names(colVar_values))
matching_withRank_dt_s <- matching_withRank_dt
stopifnot(length(all_col_vars) == 1) # will not work with more variables because colVar_values was built to work with 1 variable
foo <- foreach(col_var=all_col_vars) %dopar% {
matching_withRank_dt <- matching_withRank_dt_s
matching_withRank_dt[,paste0("ref_", col_var)] <- colVar_values[matching_withRank_dt$ref_region_hicdsID]
matching_withRank_dt[,paste0("matching_", col_var)] <- colVar_values[matching_withRank_dt$matching_region_hicdsID]
matching_withRank_dt <- matching_withRank_dt[
!is.na( matching_withRank_dt[,paste0("ref_", col_var)] ) & !is.na( matching_withRank_dt[,paste0("matching_", col_var)] ),
]
matching_withRank_dt[,paste0("norm_", col_var)] <- ifelse(matching_withRank_dt$ref_hicds %in% all_normal_ds,
matching_withRank_dt[,paste0("ref_", col_var)],
ifelse(matching_withRank_dt$matching_hicds %in% all_normal_ds,
matching_withRank_dt[,paste0("matching_", col_var)],NA))
stopifnot(!is.na(matching_withRank_dt[,paste0("norm_", col_var)]))
matching_withRank_dt[,paste0("tumor_", col_var)] <- ifelse(matching_withRank_dt$ref_hicds %in% all_tumor_ds,
matching_withRank_dt[,paste0("ref_", col_var)],
ifelse(matching_withRank_dt$matching_hicds %in% all_tumor_ds,
matching_withRank_dt[,paste0("matching_", col_var)],NA))
stopifnot(!is.na(matching_withRank_dt[,paste0("tumor_", col_var)]))
all_cmps <- unique(file.path(matching_withRank_dt$matching_hicds, matching_withRank_dt$matching_exprds,
matching_withRank_dt$ref_hicds, matching_withRank_dt$ref_exprds))
mySub <- paste0("# DS comparisons = ", length(all_cmps), "; # TADs = ", nrow(matching_withRank_dt),
" (signif.: ", sum(matching_withRank_dt$adjPval <= tadSignifThresh), ")")
plotTit <- ""
# plot
matching_withRank_dt[,paste0("tumorOverNorm_", col_var)] <- matching_withRank_dt[,paste0("tumor_", col_var)] / matching_withRank_dt[,paste0("norm_", col_var)]
outFile <- file.path(outFolder, paste0(col_var, "_ratio_vs_refAdjPvalComb_densplot.", plotType))
do.call(plotType, list(outFile, height=myWidth, width=myWidth))
densplot(
x=matching_withRank_dt[,paste0("tumorOverNorm_", col_var)] ,
xlab=paste0("tumorOverNorm_", col_var),
y=matching_withRank_dt[,paste0("ref_region_pval")] ,
ylab=paste0("ref_region_pval"),
cex.main=plotCex,
cex.axis=plotCex,
cex.lab=plotCex,
main=plotTit
)
mtext(side=3, text=mySub)
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
matching_withRank_dt$tumorMinusNormFCdiff <- matching_withRank_dt$tumorMeanFC - matching_withRank_dt$normMeanFC
outFile <- file.path(outFolder, paste0(col_var, "_ratio_vs_FC_diff_densplot.", plotType))
do.call(plotType, list(outFile, height=myWidth, width=myWidth))
densplot(
x=matching_withRank_dt[,paste0("tumorOverNorm_", col_var)] ,
xlab=paste0("tumorOverNorm_", col_var),
y=matching_withRank_dt[,paste0("tumorMinusNormFCdiff")],
ylab=paste0("tumorMinusNormFCdiff"),
cex.main=plotCex,
cex.axis=plotCex,
cex.lab=plotCex,
main=plotTit
)
mtext(side=3, text=mySub)
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
outFile <- file.path(outFolder, paste0(col_var, "_matching_withRank_dt.Rdata"))
save(matching_withRank_dt, file=outFile, version=2)
cat(paste0("... written: ", outFile, "\n"))
}
|
e5983834ca50f738be5a0de558b995550c14444a
|
f0454bfe7f3cf48ed64ddd3a7dd1b3defdf742ed
|
/man/ukb_gen_samples_to_remove.Rd
|
f65d92dfb2c290a1b4ddbd539cc6190923d62401
|
[] |
no_license
|
kenhanscombe/ukbtools
|
96cd9f42a8d9210979dd0c73080e788af776fd7d
|
d0e3248eae12429b4dbc2c278960367df19a3511
|
refs/heads/master
| 2023-03-12T18:54:43.822541
| 2023-02-22T09:47:39
| 2023-02-22T09:47:39
| 82,282,787
| 82
| 19
| null | 2020-06-22T21:05:18
| 2017-02-17T09:53:22
|
R
|
UTF-8
|
R
| false
| true
| 2,970
|
rd
|
ukb_gen_samples_to_remove.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genetics_qc.R
\name{ukb_gen_samples_to_remove}
\alias{ukb_gen_samples_to_remove}
\title{Related samples (with data on the variable of interest) to remove}
\usage{
ukb_gen_samples_to_remove(data, ukb_with_data, cutoff = 0.0884)
}
\arguments{
\item{data}{The UKB relatedness data as a dataframe (header: ID1, ID2, HetHet, IBS0, Kinship)}
\item{ukb_with_data}{A character vector of ukb eids with data on the phenotype of interest}
\item{cutoff}{KING kingship coefficient cutoff (default 0.0884 includes pairs with greater than 3rd-degree relatedness)}
}
\value{
An integer vector of UKB IDs to remove.
}
\description{
There are many ways to remove related individuals from phenotypic data for genetic analyses. You could simply exclude all individuals indicated as having "excess relatedness" and include those "used in pca calculation" (these variables are included in the sample QC data, ukb_sqc_v2.txt) - see details. This list is based on the complete dataset, and possibly removes more samples than you need to for your phenotype of interest. Ideally, you want a maximum independent set, i.e., to remove the minimum number of individuals with data on the phenotype of interest, so that no pair exceeds some cutoff for relatedness. \code{ukb_gen_samples_to_remove} returns a list of samples to remove in to achieve a maximal set of unrelateds for a given phenotype.
}
\details{
Trims down the UKB relatedness data before selecting individuals to exclude, using the algorithm: step 1. remove pairs below KING kinship coefficient 0.0884 (3rd-degree or less related, by default. Can be set with \code{cutoff} argument), and any pairs if either member does not have data on the phenotype of interest. The user supplies a vector of samples with data. step 2. count the number of "connections" (or relatives) each participant has and add to "samples to exclude" the individual with the most connections. This is the greedy part of the algorithm. step 3. repeat step 2 till all remaining participants only have 1 connection, then add one random member of each remaining pair to "samples to exclude" (adds all those listed under ID2)
\emph{Another approach from the UKB email distribution list:}
To: UKB-GENETICS@JISCMAIL.AC.UK
Date: Wed, 26 Jul 2017 17:06:01 +0100
\strong{Subject: A list of unrelated samples}
(...) you could use the list of samples which we used to calculate the PCs,
which is a (maximal) subset of unrelated participants after applying some QC
filtering. Please read supplementary Section S3.3.2 for details. You can
find the list of samples using the "used.in.pca.calculation" column in the
sample-QC file (ukb_sqc_v2.txt) (...). Note that this set contains diverse
ancestries. If you take the intersection with the white British ancestry
subset you get ~337,500 unrelated samples.
}
\seealso{
\code{\link{ukb_gen_rel_count}}, \code{\link{ukb_gen_related_with_data}}
}
|
ee5fc020b9eb358d27c59a23ed8cc5d22684fbc1
|
e5b89b428ccda56241171ac3cdae9a1dbbb462e4
|
/solve.QP.R
|
1fee897911e12d40227d6bcc729917810bc40d81
|
[] |
no_license
|
hankHeish/TSSCO
|
7dff6a8296cd5a824cbf80be3a1eab8e94b73703
|
9d73611f76dc03377e0c54b11a9322a5fcecbfec
|
refs/heads/master
| 2021-01-19T07:15:18.881522
| 2020-05-01T15:34:57
| 2020-05-01T15:34:57
| 87,529,139
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,551
|
r
|
solve.QP.R
|
#Statistics and Data Analysis for Financial Engineering
#Chapter 16: Portfolio Selection
library(Ecdat)
library(quadprog)
library(lpSolve)
library(linprog)
#16.6 Risk-Efficiency Portfolios with N Risky Asstes
data(CRSPday)
R <- 100 * CRSPday[, 4:6]
mean_vect <- apply(R, 2, mean)
cov_vect <- cov(R)
sd_vect <- sqrt(diag(cov_vect))
corr_vect <- cor(R)
#Set the Constraints Matrix
Amat <- cbind(rep(1, 3), mean_vect)
#Amat <- cbind(rep(1, 3), mean_vect, diag(1, nrow = 3))
#Target Portfolio mean
muP <- seq(0.05, 0.14, length = 300)
#muP <- seq(min(mean_vect) + 0.0001, max(mean_vect) - 0.0001, length = 300)
#For the Expect Portfolio Return
sdP <- muP
weights <- matrix(0, nrow = 300, ncol = 3)
for (i in 1:length(muP))
{
#bvec <- c(1, muP[i], rep(0, 3))
bvec <- c(1, muP[i])
result <- solve.QP(Dmat = 2 * cov_vect,
dvec = rep(0, 3),
Amat = Amat,
bvec = bvec,
meq = 2)
sdP[i] <- sqrt(result$value)
weights[i, ] <- result$solution
}
par(mfrow = c(1, 1))
#pdf("quad_prog_plot.pdf", width = 6, height = 5)
#plot(sdP, muP, type = "l", xlim = c(0, 0.25), ylim = c(0, 0.15), lty = 3, main = "Portfolio Selection")
plot(sdP, muP, type = "l", lty = 3, main = "Portfolio Selection")
mufree = 1.3 / 252
points(0, mufree, cex = 4, pch = "*")
sharpe <- (muP - mufree) / sdP
#Find maximum sharpe's ratio
ind <- (sharpe == max(sharpe))
weights[ind, ]
#Show line of Optimal Portfolio
lines(c(0, 2), mufree + c(0, 2) * (muP[ind] - mufree) / sdP[ind], lwd = 4, lty = 1, col = "blue")
#Tangency Portfolio
points(sdP[ind], muP[ind], cex = 2, pch = "*")
#Find Minimum Variance Portfolio
ind2 <- (sdP == min(sdP))
points(sdP[ind2], muP[ind2], cex = 2, pch = "+")
ind3 <- (muP > muP[ind2])
#lines(sdP[ind3], muP[ind3], type = "l", xlim = c(0, 0.25), ylim = c(0, 0.3), lwd = 3, col = "red")
lines(sdP[ind3], muP[ind3], type = "l", lwd = 3, col = "red")
text(sd_vect[1], mean_vect[1], "GE", cex = 1.15)
text(sd_vect[2], mean_vect[2], "IBM", cex = 1.15)
text(sd_vect[3], mean_vect[3], "Mobile", cex = 1.15)
#graphics.off()
#Utility
#data <- read.csv("C:/Users/J1060019/Desktop/datasets/Stock_Bond.csv", header = T)
data <- read.csv("/Users/Heishminghan/Desktop/Statistics and Data Analysis for Financial Engineering/Stock_Bond.csv", header = T)
price <- data[, c(3, 5, 7, 9, 11, 13, 15, 17, 19, 21)]
n <- dim(price)[1]
m <- dim(price)[2] - 1
return <- price[-1, ] / price[-n, ] - 1
mean_vect <- colMeans(return)
cov_mat <- cov(return)
nlambda <- 250
loglambda_vect <- seq(2, 8, length = nlambda)
w_matrix <- matrix(nrow = nlambda, ncol = 10)
mu_vect <- matrix(nrow = nlambda, ncol = 1)
sd_vect <- mu_vect
ExUtil_vect <- mu_vect
conv_vect <- mu_vect
for (i in 1:nlambda)
{
lambda <- exp(loglambda_vect[i])
opt <- solve.QP(Dmat = as.matrix(lambda^2 * cov_mat),
dvec = lambda * mean_vect,
Amat = as.matrix(rep(1, 10)),
bvec = 1,
meq = 1)
w <- opt$solution
mu_vect[i] <- w %*% mean_vect
sd_vect[i] <- sqrt(w %*% cov_mat %*% w)
w_matrix[i, ] <- w
ExUtil_vect[i] <- opt$value
}
par(mfrow = c(1, 3))
plot(loglambda_vect, mu_vect, type = "l", col = "darkred")
plot(loglambda_vect, sd_vect, type = "l", col = "darkblue")
plot(sd_vect, mu_vect, type = "l", col = "darkgreen", main = "Efficiency Frontier")
#16.10 R Lab
#16.10.1 Efficiency Equity Portfolio
data <- read.csv("/Users/Heishminghan/Desktop/Statistics and Data Analysis for Financial Engineering/Stock_Bond.csv", header = T)
prices <- cbind(data$GM_AC, data$F_AC, data$CAT_AC, data$UTX_AC, data$MRK_AC, data$IBM_AC)
n <- dim(prices)[1]
returns <- (prices[-1, ] / prices[-n, ] - 1) * 100
pairs(returns, pch = 20, col = "darkgreen", main = "Correlation among 6 Stock's Return")
mean_vect <- colMeans(returns)
cov_mat <- cov(returns)
sd_vect <- sqrt(diag(cov_mat))
M <- length(mean_vect)
Amat <- cbind(rep(1, 6), mean_vect, diag(1, nrow = 6), diag(-1, nrow = 6))
#Amat <- cbind(rep(1, 6), mean_vect)
muP <- seq(0.05, 0.08, length = 300)
sdP <- muP
weight <- matrix(0, nrow = 300, ncol = 6)
for (i in 1:length(muP))
{
bvec <- c(1, muP[i], rep(-0.1, 6), rep(-0.5, 6))
#bvec <- c(1, muP[i])
result <- solve.QP(Dmat = 2 * cov_mat,
dvec = rep(0, 6),
Amat = Amat,
bvec = bvec,
meq = 2)
sdP[i] <- sqrt(result$value)
weight[i, ] <- result$solution
}
par(mfrow = c(1, 1))
plot(sdP, muP, type = "l", lty = 1.5, main = "Portfolio Selection")
#mufree <- 1.3 / 252
mufree <- 3 / 365
points(0, mufree, cex = 2.5, pch = "*")
SharpeRatio <- (muP - mufree) / sdP
index_max <- (SharpeRatio == max(SharpeRatio))
print(weight[index_max, ])
lines(c(0, 4), mufree + c(0, 4) * (muP[index_max] - mufree) / sdP[index_max], lwd = 2, lty = 1, col = "darkblue")
points(sdP[index_max], muP[index_max], cex = 1, pch = "+")
index_min <- (sdP == min(sdP))
points(sdP[index_min], muP[index_min], cex = 1, pch = "#")
index_effi <- (muP > muP[index_min])
lines(sdP[index_effi], muP[index_effi], cex = 1.5, type = "l", col = "darkred", lwd = 2)
brand <- c("GM", "F", "CAT", "UTX", "MRK", "IBM")
for (i in 1:length(brand))
text(sd_vect[i], mean_vect[i], brand[i])
#16.10.3 Finding the Set of Possible Expected Return
data <- read.csv("/Users/Heishminghan/Desktop/Statistics and Data Analysis for Financial Engineering/Stock_Bond.csv", header = T)
prices <- cbind(data$GM_AC, data$F_AC, data$CAT_AC, data$UTX_AC, data$MRK_AC, data$IBM_AC)
n <- dim(prices)[1]
returns <- (prices[-1, ] / prices[-n, ] - 1) * 100
mean_vect <- colMeans(returns)
M <- length(mean_vect)
B1 <- 0.3
B2 <- 0.1
AmatLP1 <- cbind(diag(1, nrow = M), matrix(0, nrow = M, ncol = M))
AmatLP2 <- cbind(matrix(0, nrow = M, ncol = M), diag(1, nrow = M))
AmatLP3 <- c(rep(1, M), rep(-1, M))
AmatLP <- rbind(AmatLP1, AmatLP2, AmatLP3)
bvecLP <- c(rep(B1, M), rep(B2, M), 1)
cLP <- c(mean_vect, -mean_vect)
const.dir <- c(rep("<=", 2 * M), "=")
resultLP_min <- solveLP(cvec = cLP,
bvec = bvecLP,
Amat = AmatLP,
lpSolve = T,
const.dir = const.dir,
maximum = F)
resultLP_max <- solveLP(cvec = cLP,
bvec = bvecLP,
Amat = AmatLP,
lpSolve = T,
const.dir = const.dir,
maximum = T)
|
a4244121aff58828d50629b1faf6fdfcb791ff9c
|
e0dacb4e31dab0928e139ee0904bb87a1c860e3f
|
/ReadProcessData.R
|
8bd2e9be1a3569e394f6351cddc616dbe37d13b0
|
[] |
no_license
|
cuict/Schlegel_et_al_core_analysis
|
47e1d19a43f3a0889584ddf21c5ef5a715558f30
|
d349c8dbb92c4285e699c05b69f38826f0f261bd
|
refs/heads/main
| 2023-05-14T17:06:09.476817
| 2021-05-30T21:12:18
| 2021-05-30T21:12:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,159
|
r
|
ReadProcessData.R
|
library(Seurat)
library(dplyr)
library(SingleR)
setSeurat <- function(dirpath, name, projectName){
name.data <- Read10X(data.dir = dirpath)
name <- CreateSeuratObject(raw.data = name.data, project = projectName, min.cells = 3, min.genes = 200)
name@meta.data$expt <- projectName
name.mito <- grep(pattern = "mt-", x=rownames(x = name@data), value=TRUE)
name.percent.mito <- Matrix::colSums(name@raw.data[name.mito, ])/Matrix::colSums(name@raw.data)
name <- AddMetaData(object = name, metadata = name.percent.mito, col.name="percent.mito")
return(name)
}
plotToFilter <- function(seurat){
VlnPlot(object = seurat, features.plot = c("nGene", "nUMI", "percent.mito"), nCol = 3)
}
filterMitoGenes <- function(seurat, maxmito, mingenes, maxgenes){
seurat <- FilterCells(object = seurat, subset.names = c("percent.mito", "nGene"),
low.thresholds = c(-Inf, mingenes),
high.thresholds = c(maxmito, maxgenes))
}
topVarGenes <- function(seurat){
return(head(rownames(seurat@hvg.info),1000))
}
wt.bl <- setSeurat("../10x_Files/Martin.WT.BL/", wt.bl, "WT.BL")
plotToFilter(wt.bl)
wt.bl <- filterMitoGenes(wt.bl, 0.05, 500, 3000)
wt.bl <- NormalizeData(wt.bl, display.progress = F)
wt.bl <- ScaleData(wt.bl, vars.to.regress = c("nUMI", "percent.mito"))
wt.bl <- FindVariableGenes(wt.bl, do.plot = F)
saveRDS(wt.bl, file = "martin.wt.bl.norm.scale.regressnUMImito.rds")
ko.bl <- setSeurat("../10x_Files/Martin.KO.BL/", ko.bl, "KO.BL")
plotToFilter(ko.bl)
ko.bl <- filterMitoGenes(ko.bl, 0.05, 500, 2750)
ko.bl <- NormalizeData(ko.bl, display.progress = F)
ko.bl <- ScaleData(ko.bl, vars.to.regress = c("nUMI", "percent.mito"))
ko.bl <- FindVariableGenes(ko.bl, do.plot = F)
saveRDS(ko.bl, file = "martin.ko.bl.norm.scale.regressnUMImito.rds")
wt.reg <- setSeurat("../10x_Files/Martin.WT.Reg/", wt.reg, "WT.Reg")
plotToFilter(wt.reg)
wt.reg <- filterMitoGenes(wt.reg, 0.075, 0, 2000)
wt.reg <- NormalizeData(wt.reg, display.progress = F)
wt.reg <- ScaleData(wt.reg, vars.to.regress = c("nUMI", "percent.mito"))
wt.reg <- FindVariableGenes(wt.reg, do.plot = F)
saveRDS(wt.reg, file = "martin.wt.reg.norm.scale.regressnUMImito.rds")
ko.reg <- setSeurat("../10x_Files/Martin.KO.Reg/", ko.reg, "KO.Reg")
plotToFilter(ko.reg)
ko.reg <- filterMitoGenes(ko.reg, 0.05, 0, 3000)
ko.reg <- NormalizeData(ko.reg, display.progress = F)
ko.reg <- ScaleData(ko.reg, vars.to.regress = c("nUMI", "percent.mito"))
ko.reg <- FindVariableGenes(ko.reg, do.plot = F)
saveRDS(ko.reg, file = "martin.ko.reg.norm.scale.regressnUMImito.rds")
g.wt.bl <- topVarGenes(wt.bl)
g.ko.bl <- topVarGenes(ko.bl)
g.wt.reg <- topVarGenes(wt.reg)
g.ko.reg <- topVarGenes(ko.reg)
genes.use <- unique(c(g.wt.bl, g.ko.bl, g.wt.reg, g.ko.reg))
length(genes.use)
genes.use <- intersect(genes.use, rownames(wt.bl@scale.data))
genes.use <- intersect(genes.use, rownames(ko.bl@scale.data))
genes.use <- intersect(genes.use, rownames(wt.reg@scale.data))
genes.use <- intersect(genes.use, rownames(ko.reg@scale.data))
mar.list <- c(wt.bl, ko.bl, wt.reg, ko.reg)
cellids <- c("WT.BL", "KO.BL", "WT.Reg", "KO.Reg")
mar.merge.cca <- RunMultiCCA(object.list = mar.list, genes.use = genes.use,
add.cell.ids = cellids, num.ccs = 25)
p1 <- DimPlot(object = mar.merge.cca, reduction.use = "cca", group.by = "expt",
pt.size = 0.5, do.return = T)
p2 <- VlnPlot(mar.merge.cca, features.plot = "CC1", group.by = "expt", do.return = T)
p3 <- VlnPlot(mar.merge.cca, features.plot = "CC2", group.by = "expt", do.return = T)
p1
plot_grid(p2, p3)
mar.merge.cca <- AlignSubspace(mar.merge.cca, reduction.type = "cca",
grouping.var = "expt", dims.align = 1:25)
VlnPlot(mar.merge.cca, features.plot = c("ACC1", "ACC2"), group.by = "expt", nCol=2)
mar.merge.cca <- RunTSNE(mar.merge.cca, reduction.use = "cca.aligned", dims.use = 1:25,
do.fast=T)
mar.merge.cca <- FindClusters(mar.merge.cca, reduction.type = "cca.aligned",
resolution = 0.6, dims.use = 1:25, print.output = F)
saveRDS(mar.merge.cca, file = "martin.merge.CCA.rds")
|
bb6c2b13fac1159423c2ff67978b3d7c7bb12868
|
de8af7e9fc82c2ff647d74a2b2b150ba7db9b7f6
|
/man/download_sra_db.Rd
|
98709d4a2957a7b4ad2902f0ddc8204a16cf0c46
|
[] |
no_license
|
joseah/srametadata
|
ccb5e21cbb5fc5d1708a16c018167ac5df317795
|
c991bb111ef6373cc446dcf05da96a52341f1d15
|
refs/heads/master
| 2021-01-10T09:20:37.679671
| 2015-06-05T04:02:58
| 2015-06-05T04:02:58
| 36,451,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 674
|
rd
|
download_sra_db.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/download_sra_db.R
\name{download_sra_db}
\alias{download_sra_db}
\title{Download SRA metadata database}
\usage{
download_sra_db(file.name = "SRAmetadb.sqlite", path = getwd(),
method = "auto")
}
\arguments{
\item{file.name}{File name for uncompressed database. Default: "SRAmetadb.sqlite"}
\item{path}{Path used to save database. If not provided, current directory is used.}
\item{method}{Method for downloading database: "auto", "internal", "libcurl", "wget", "curl", "lynx". See
\code{\link{download.file}} function.}
}
\description{
Downloads and extracts SRA metadata database
}
|
a82954b90cd7464433c51b479e64705cfa7705fc
|
d9a4dce87b2975f3242e722955e69e221057b034
|
/R/cleanup.R
|
0fcd088925b49b9507ceeac881bddca7cd6af431
|
[] |
no_license
|
JoeLugo-zz/Classification
|
63d4e19b91546712b2d7e8155ed4b92ab73b5215
|
4772d055c177c7c6797d5cec50d6df15b7e80000
|
refs/heads/master
| 2022-11-04T08:12:39.271258
| 2017-02-01T04:02:19
| 2017-02-01T04:02:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,593
|
r
|
cleanup.R
|
library(MASS)
library(class)
library(plyr)
library(reshape2)
library(dplyr)
library(tidyr)
library(chron)
library(caret)
library(e1071)
library(gbm)
library(MLmetrics)
library(randomForest)
library(foreach)
library(parallel)
library(doParallel)
library(nnet)
library(RCurl)
set.seed(1017)
TRAIN_DATA_PATH="/Users/josephlugo/Google Drive/School/4B/STAT 441/Group Project/combined.csv"
TEST_DATA_PATH="/Users/josephlugo/Google Drive/School/4B/STAT 441/Group Project/test.csv"
AVARS_DATA_PATH="/Users/josephlugo/Google Drive/School/4B/STAT 441/Group Project/avars1.csv"
SAMPLE_DATA_PATH="/Users/josephlugo/Google Drive/School/4B/STAT 441/Group Project/sample_submission.csv"
sample_data <- read.csv(SAMPLE_DATA_PATH,
header=TRUE,
sep=",",
stringsAsFactors=F)
original_train_data <- read.csv(TRAIN_DATA_PATH,
header=TRUE,
sep=",",
stringsAsFactors=F)
original_test_data <- read.csv(TEST_DATA_PATH,
header=TRUE,
sep=",",
stringsAsFactors=F)
avars_data <- read.csv(AVARS_DATA_PATH,
header=TRUE,
sep=",",
stringsAsFactors=F)
colnames(avars_data) <- c("id","gender","position","year_birth","age_member","age_cat","age_head","num_members",
"num_children","partner","civil_status","dom_sit","dwell_type","urban_char","occ","gross_monthly_income",
"gross_monthly_income_imputed","net_monthly_income","net_monthly_income_capped","net_monthly_income_imputed",
"gross_monthly_income_cat","net_monthly_income_cat","gross_household_income","net_household_income",
"edu","edu_diploma","edu_cat","is_member","recruitment","origin","have_simPC")
f_measure <- function(data_frame){
tp <- 0
fp <- 0
fn <- 0
for (i in 1:nrow(data_frame)){
tp <- tp + cm[i,i]
fp <- fp + sum(cm[, i])-cm[i, i]
fn <- fn + sum(cm[i,])-cm[i, i]
}
precision <- tp/(tp+fp)
recall <- tp/(tp+fn)
f <- 2*(precision*recall)/(precision + recall)
return(f)
}
parse_datetimes <- function(data_frame, delimeter,col){
splits <- strsplit(data_frame[,c(col)],delimeter)
split_1 <- unlist(lapply(1:nrow(data_frame), function(x) paste(splits[[x]][1],sep="")))
split_2 <- unlist(lapply(1:nrow(data_frame), function(x) paste(splits[[x]][2],sep="")))
split_3 <- unlist(lapply(1:nrow(data_frame), function(x) paste(splits[[x]][3],sep="")))
output <- list(c(split_1), c(split_2), c(split_3))
return(output)
}
cleanup <- function(data_frame, test = FALSE){
if (test == FALSE){
data_frame <- na.omit(data_frame)
data_frame$interesting <- as.factor(data_frame$interesting)
data_frame$difficult <- as.factor(data_frame$difficult)
data_frame$clear <- as.factor(data_frame$clear)
data_frame$thinking <- as.factor(data_frame$thinking)
data_frame$enjoy <- as.factor(data_frame$enjoy)
}
data_frame$year <- as.factor(substr(data_frame$year_month_m,0,4))
data_frame$month <- as.factor(substr(data_frame$year_month_m,5,6))
data_frame[data_frame$core == "",]$core <- "leisure"
data_frame$core <- as.factor(data_frame$core)
start_dates <- parse_datetimes(data_frame,"-", "startdate")
start_dates[[3]] <- unlist(lapply(1:nrow(data_frame), function(x) if(start_dates[[3]][x] == "08"){start_dates[[3]][x] <- "2008"} else {start_dates[[3]][x]}))
start_dates[[3]] <- unlist(lapply(1:nrow(data_frame), function(x) if(start_dates[[3]][x] == "09"){start_dates[[3]][x] <- "2009"} else {start_dates[[3]][x]}))
data_frame$start_day <- as.factor(as.numeric(start_dates[[1]]))
data_frame$start_month <- as.factor(as.numeric(start_dates[[2]]))
data_frame$start_year <- as.factor(as.numeric(start_dates[[3]]))
end_dates <- parse_datetimes(data_frame,"-", "enddate")
end_dates[[3]] <- unlist(lapply(1:nrow(data_frame), function(x) if(end_dates[[3]][x] == "08"){end_dates[[3]][x] <- "2008"} else {end_dates[[3]][x]}))
end_dates[[3]] <- unlist(lapply(1:nrow(data_frame), function(x) if(end_dates[[3]][x] == "09"){end_dates[[3]][x] <- "2009"} else {end_dates[[3]][x]}))
data_frame$end_day <- as.factor(as.numeric(end_dates[[1]]))
data_frame$end_month <- as.factor(as.numeric(end_dates[[2]]))
data_frame$end_year <- as.factor(as.numeric(end_dates[[3]]))
new_start <- unlist(lapply(1:nrow(data_frame), function(x) paste(start_dates[[1]][x],"-",start_dates[[2]][x],"-",start_dates[[3]][x],sep="")))
new_end <- unlist(lapply(1:nrow(data_frame), function(x) paste(end_dates[[1]][x],"-",end_dates[[2]][x],"-",end_dates[[3]][x],sep="")))
data_frame$startdate <- new_start
data_frame$enddate <- new_end
data_frame$startdate_epoch <- floor(as.integer(as.POSIXct(data_frame$startdate, format = "%d-%M-%Y"))/86400)
data_frame$enddate_epoch <- floor(as.integer(as.POSIXct(data_frame$enddate, format = "%d-%M-%Y"))/86400)
count_table <- as.data.frame(table(data_frame$id))
colnames(count_table) <- c("id","num_surveys")
data_frame <- merge(data_frame, count_table, all.x = TRUE,by = "id")
core_table <- as.data.frame(table(data_frame$id,data_frame$core))
core_table_wide <- dcast(core_table,formula = Var1~Var2,value.var="Freq")
colnames(core_table_wide) <- c("id","num_health","num_income","num_leisure")
data_frame <- merge(data_frame, core_table_wide, all.x = TRUE,by = "id")
# Finding the number of surveys up to that point
data_frame <- data_frame[with(data_frame, order(id,startdate_epoch)),]
data_frame <- data_frame %>% group_by(id) %>% mutate(past_surveys=0:(n()-1))
# Finding the number of past surveys done for each core
data_frame_leisure <- data_frame[data_frame$core == "leisure",]
data_frame_leisure$past_health_surveys <- NA
data_frame_leisure$past_income_surveys <- NA
data_frame_leisure <- data_frame_leisure %>% group_by(id) %>% mutate(past_leisure_surveys=0:(n()-1))
data_frame_health <- data_frame[data_frame$core == "health",]
data_frame_health$past_leisure_surveys <- NA
data_frame_health$past_income_surveys <- NA
data_frame_health <- data_frame_health %>% group_by(id) %>% mutate(past_health_surveys=0:(n()-1))
data_frame_income <- data_frame[data_frame$core == "income",]
data_frame_income$past_health_surveys <- NA
data_frame_income$past_leisure_surveys <- NA
data_frame_income <- data_frame_income %>% group_by(id) %>% mutate(past_income_surveys=0:(n()-1))
data_frame <- rbind(data_frame_health, data_frame_income)
data_frame <- rbind(data_frame, data_frame_leisure)
data_frame <- data_frame[with(data_frame, order(id,startdate_epoch)),]
data_frame <- data_frame %>% group_by(id) %>% fill(past_leisure_surveys)
data_frame <- data_frame[with(data_frame, order(id,startdate_epoch)),]
data_frame <- data_frame %>% group_by(id) %>% fill(past_health_surveys)
data_frame <- data_frame[with(data_frame, order(id,startdate_epoch)),]
data_frame <- data_frame %>% group_by(id) %>% fill(past_income_surveys)
data_frame[is.na(data_frame$past_income_surveys),]$past_income_surveys <- 0
data_frame[is.na(data_frame$past_health_surveys),]$past_health_surveys <- 0
data_frame[is.na(data_frame$past_leisure_surveys),]$past_leisure_surveys <- 0
duration_means <- ddply(data_frame,~id,summarise,mean=mean(duration))
colnames(duration_means) <- c("id","duration_mean")
data_frame <- merge(data_frame, duration_means, all.x = TRUE,by = "id")
factors <- c("gender","position","year_birth","age_member","age_cat","age_head","num_members", "num_children","partner","civil_status",
"dom_sit","dwell_type","urban_char","occ", "gross_monthly_income_cat","net_monthly_income_cat", "edu","edu_diploma",
"edu_cat","is_member","recruitment","origin","have_simPC")
combined <- merge(x=data_frame,y=avars_data,by="id",all.x=TRUE)
combined_clean <- combined
combined_clean$train <- NULL
combined_clean$year_month_m <- as.factor(combined_clean$year_month_m)
combined_clean$startdate <- as.factor(combined_clean$startdate)
combined_clean$enddate <- as.factor(combined_clean$enddate)
combined_clean$starttime <- NULL
combined_clean$endtime <- NULL
combined_clean$core <- as.factor(combined_clean$core)
for (i in 1:length(factors)) {
combined_clean[,factors[i]] <- as.factor(combined_clean[,factors[i]])
}
combined_clean <- subset(combined_clean, select=-c(gross_monthly_income,net_monthly_income,net_monthly_income_capped))
return(combined_clean)
}
get_means <- function(data_frame){
data_frame_enjoy <- data_frame %>% group_by(id) %>% summarise(enjoy_mean=round(mean(as.numeric(enjoy))))
data_frame_difficult <- data_frame %>% group_by(id) %>% summarise(difficult_mean=round(mean(as.numeric(difficult))))
data_frame_thinking <- data_frame %>% group_by(id) %>% summarise(thinking_mean=round(mean(as.numeric(thinking))))
data_frame_clear <- data_frame %>% group_by(id) %>% summarise(clear_mean=round(mean(as.numeric(clear))))
data_frame_interesting <- data_frame %>% group_by(id) %>% summarise(interesting_mean=round(mean(as.numeric(interesting))))
data_frame <- merge(x=data_frame,y=data_frame_enjoy,by="id")
data_frame <- merge(x=data_frame,y=data_frame_difficult,by="id")
data_frame <- merge(x=data_frame,y=data_frame_thinking,by="id")
data_frame <- merge(x=data_frame,y=data_frame_clear,by="id")
data_frame <- merge(x=data_frame,y=data_frame_interesting,by="id")
data_frame$enjoy_mean <- as.factor(data_frame$enjoy_mean)
data_frame$difficult_mean <- as.factor(data_frame$difficult_mean)
data_frame$thinking_mean <- as.factor(data_frame$thinking_mean)
data_frame$interesting_mean <- as.factor(data_frame$interesting_mean)
data_frame$clear_mean <- as.factor(data_frame$clear_mean)
data_frame <- subset(data_frame, select=-c(enjoy,thinking,clear,difficult))
return(data_frame)
}
subset_train <- function(data_frame){
# TESTING ON OUR KNOWN DATA
train_percentage <- 0.8
train_index <- sample(1:nrow(data_frame), floor(nrow(data_frame)*train_percentage))
train_data <- data_frame[train_index, ]
row.names(train_data) <- NULL
test_set <- data_frame[-train_index, ]
test_index <- as.numeric(row.names(test_set))
row.names(test_set) <- NULL
train_data <- get_means(train_data)
train_data$id <- as.factor(train_data$id)
test_set$id <- as.factor(test_set$id)
test_set <- subset(test_set, select=-c(enjoy,thinking,clear,difficult))
test_set <- merge(x=test_set,
y=unique(train_data[,c("id","thinking_mean","enjoy_mean","difficult_mean","clear_mean","interesting_mean")]),
by="id",
all.x=TRUE)
data <- list(train_data, test_set)
return(data)
}
scale_data <- function(data_frame, test = FALSE)
{
data_frame$duration <- log(data_frame$duration)
data_frame$gross_monthly_income_imputed <- log(data_frame$gross_monthly_income_imputed + 1)
data_frame$net_monthly_income_imputed <- log(data_frame$net_monthly_income_imputed + 1)
data_frame$gross_household_income <- log(data_frame$gross_household_income + 1)
data_frame$net_household_income <- log(data_frame$net_household_income + 1)
nums_og_train <- sapply(data_frame, is.numeric)
ints_og_train <- sapply(data_frame, is.integer)
if(test == TRUE){
id <- data_frame$id
obs <- data_frame$obs
data_frame[,nums_og_train | ints_og_train] <- scale(data_frame[,nums_og_train | ints_og_train])
data_frame$id <- id
data_frame$obs <- obs
}
else{
id <- data_frame$id
obs <- data_frame$obs
data_frame[,nums_og_train | ints_og_train] <- scale(data_frame[,nums_og_train | ints_og_train])
data_frame$id <- id
data_frame$obs <- obs
}
return(data_frame)
}
# CLEANING UP THE TRAIN_DATA
train_data <- cleanup(original_train_data, test=FALSE)
trainlist <- subset_train(train_data)
newtrain <- trainlist[[1]]
newtest <- trainlist[[2]]
train_data <- get_means(train_data)
train_data <- scale_data(train_data,test=FALSE)
# CLEANING UP THE TEST_DATA
test_data <- cleanup(original_test_data, test=TRUE)
test_data <- merge(x=test_data,
y=unique(train_data[,c("id","thinking_mean","enjoy_mean","difficult_mean","clear_mean","interesting_mean")]),
by="id",
all.x=TRUE)
test_data <- scale_data(test_data, test=TRUE)
|
401b10381236dbb0c215c7d526c143d9df599fa3
|
b299b0ab63c24534198ba671b4eb63a23c4e6c60
|
/man/cell_ids.Rd
|
65e46a696c604deb1b2a5fa17ba31754fb0a990d
|
[] |
no_license
|
kueckelj/celltracer
|
a94bfd97075b8128b5b2871b65f6a22a161c68ac
|
1cb3def126ea685f88ed2d785a0f0ab882535af5
|
refs/heads/master
| 2023-05-02T22:53:20.531195
| 2021-05-29T08:58:00
| 2021-05-29T08:58:00
| 303,118,630
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 289
|
rd
|
cell_ids.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation-dummies.R
\name{cell_ids}
\alias{cell_ids}
\title{cell_ids}
\usage{
cell_ids(cell_ids)
}
\arguments{
\item{cell_ids}{Character vector. Denotes the cell ids of interest.}
}
\description{
cell_ids
}
|
ae9329c3905790ce15d27ef1d925aaf822cd6030
|
1f2473c8b6693efd1edc9cdcb37c967903dacb97
|
/plot3.R
|
66d565fb678ba47b2da5d0f51f246c46cee993c7
|
[] |
no_license
|
jromania-sykes/ExData_Plotting1
|
07f8390e431e8373fa791aac34f94d47963b9401
|
bd9636edf861f65cebf0bffc3017c185cbdc3713
|
refs/heads/master
| 2021-01-18T05:22:35.795252
| 2015-08-08T04:13:55
| 2015-08-08T04:13:55
| 40,377,704
| 0
| 0
| null | 2015-08-07T19:35:18
| 2015-08-07T19:35:17
| null |
UTF-8
|
R
| false
| false
| 1,016
|
r
|
plot3.R
|
plot3<-function(){
# preprocess file with grep to only bring in data for days we will use
df<-read.csv(pipe('grep ^[12]/2/2007 household_power_consumption.txt'),sep=';')
# set column names
colnames(df)<-c('Date','Time','Global_active_power','Global_reactive_power','Voltage','Global_intensity','Sub_metering_1','Sub_metering_2','Sub_metering_3')
#combine date and time columns into a datetime column
df$DateTime <- as.POSIXct(paste(df$Date, df$Time), format="%d/%m/%Y %H:%M:%S")
#create png device
png('~/datascience/ExData_Plotting1/plot3.png',width=480,height=480)
#draw plot with proper labels
plot(df$DateTime,df$Sub_metering_1,type='l',ylab='Energy sub metering',xlab='')
#add additional lines
lines(df$DateTime,df$Sub_metering_2,col='red')
lines(df$DateTime,df$Sub_metering_3,col='blue')
#setup the legend
legend("topright",40,c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),lwd=c(1,1),col=c("black","red","blue"),cex=.7,lty=1)
dev.off()
}
|
9e769b8dc7b35c8f935a379e7f242e80bb4c9e13
|
f300f25414e56879874f8540e00382517a44d78f
|
/inst/simulations/simulation_analysis.R
|
e2ff6af19ec74ae36ff81735fd4eb9c01b1ac7c1
|
[] |
no_license
|
robertsy/assimilr
|
c9e266a64dd20a7df55c1cef60e37a860cd3b349
|
0eb77afe8ca79274285c8ae7d659f429155c8041
|
refs/heads/master
| 2021-01-12T05:52:23.288713
| 2017-07-07T13:49:33
| 2017-07-07T13:49:36
| 77,225,544
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,844
|
r
|
simulation_analysis.R
|
## final experiments analysis for paper
## analyse of high frequency and low-frequency experiments for paper
library(RColorBrewer)
library(ggthemes)
# tikz_flag <- TRUE
mainpath <- 'inst/simulations/figures/'
mlatex <- c('BLOCK-LEnKPF', 'NAIVE-LEnKPF', 'LEnKF')
crps_n <- 'CRPS'
# load simulation results -------------------------------------------------
EXP <- '000'; nsim <- 1000 ## final version
enstype <- 'ensB'
output_name <- paste('inst/simulations/data/high_freq_experiment_nsim',nsim,'_EXP', EXP,'.rds', sep='')
output <- readRDS(output_name)
## compute error FF error:
ff_error <- output %>% filter(method == 'FF') %>% rename(ff_error=error) %>% select(-method)
## relative error:
rel_error <- output %>% filter(method != 'FF') %>%
# left_join(ff_error, by=c('field', 'type', 'error_type', 'K','l','ndim', 'time', 'sim')) %>%
left_join(ff_error) %>%
mutate(relative_error=error/ff_error*100)
## reorder methods:
rel_error$method <- factor(rel_error$method, levels = c("LEnKF","naive-LEnKPF", "block-LEnKPF"),
labels=mlatex[c(3,2,1)])
# plotting of CRPS for 1 hour ----------------------------------------------------------------
myblue <- '#116699'
lwd <- .8
leg_coord <- c(.85, .8)
ylims <- c(50, 126)
# ylims <- c(45, 126)
g <-
rel_error %>% filter(error_type %in% c('crps'), type==enstype) %>% ungroup() %>%
mutate(time=time/60*5) %>% ## time in hours
filter(time <= 60) %>%
group_by(method, type, error_type, field, time) %>%
summarise(mean_error=mean(relative_error), sd_error=sd(relative_error)) %>% ## sim average
ggplot(aes(x=time, y=mean_error, color=method)) +
geom_line(size=lwd) +
facet_wrap(~field) +
xlab('time (min)') + ylab(crps_n) + theme_bw() +
ylim(ylims)
g <- g +
scale_y_continuous(breaks=seq(50,125,25), limits=ylims) +
scale_colour_tableau("greenorange6")
g <- g +
theme(
plot.background=element_blank(),
legend.position=leg_coord,
legend.title=element_blank(),
legend.background=element_blank(),
legend.key=element_blank())
g_hf1hr <- g + guides(colour = guide_legend(override.aes = list(size = 6)))
# for 6hrs ----------------------------------------------------------------
leg_coord <- c(.3, .3)
g <-
rel_error %>%
filter(field %in% c('fluid height', 'rain content')) %>%
filter(error_type %in% c('crps'), type==enstype) %>% ungroup() %>%
mutate(time=time/60*5) %>% ## time in hours
group_by(method, type, error_type, field, time) %>%
summarise(mean_error=mean(relative_error), sd_error=sd(relative_error)) %>% ## sim average
ggplot(aes(x=time, y=mean_error, color=method)) +
geom_line(size=lwd) +
facet_wrap(~field) +
xlab('time (min)') + ylab(crps_n) + theme_bw() +
ylim(ylims)
g <- g +
scale_y_continuous(breaks=seq(50,125,25), limits=ylims) +
scale_colour_tableau("greenorange6") +
theme(
plot.background=element_blank(),
legend.position=leg_coord,
legend.title=element_blank(),
legend.background=element_blank(),
legend.key=element_blank())
g_hf6hr <- g + guides(colour = guide_legend(override.aes = list(size = 6)))
# low-freq exp ------------------------------------------------------------
EXP <- '000'; nsim <- 100 ##
output_name <- paste('inst/simulations/data/low_freq_experiment_nsim',nsim,'_EXP', EXP,'.rds', sep='')
output <- readRDS(output_name)
# output <- output %>% filter(sim <=100)
## compute error FF error:
ff_error <- output %>% filter(method == 'FF') %>% rename(ff_error=error) %>% select(-method)
## relative error:
rel_error <- output %>% filter(method != 'FF') %>%
# left_join(ff_error, by=c('field', 'type', 'error_type', 'K','l','ndim', 'time', 'sim')) %>%
left_join(ff_error) %>%
mutate(relative_error=error/ff_error*100)
## reorder methods:
rel_error$method <- factor(rel_error$method, levels = c("LEnKF","naive-LEnKPF", "block-LEnKPF"),
labels=mlatex[c(3,2,1)])
# boxplot -----------------------------------------------------------------
g <-
rel_error %>% filter(error_type %in% c('crps'), type==enstype) %>%
# ungroup() %>%mutate(time=time/60*5) %>% ## time in hours
# filter(time >= 1000) %>%
# filter(time <= 360*48*1) %>%
group_by(method, type, error_type, field, sim ) %>%
summarise(sim_error=mean(relative_error)) %>% ## time average
ggplot(aes(x=method, y=sim_error, color=method, fill=method)) +
geom_boxplot(alpha=0.6, outlier.size=.8) + # geom_violin(alpha=0.6) + #geom_boxplot() +
facet_wrap(~field) +
ylab(crps_n) +
xlab(NULL) +
theme_bw() + theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
ylim(ylims)
g <- g +
scale_y_continuous(breaks=seq(50,125,25), limits=ylims) +
scale_colour_tableau("greenorange6", guide='none') +
scale_fill_tableau("greenorange6", guide='none') +
theme(
plot.background=element_blank(),
legend.position=leg_coord,
legend.title=element_blank(),
legend.background=element_blank(),
legend.key=element_blank())
g_lfbox <- g
g
# ts plot -----------------------------------------------------------------
## or clearer as a time series?
leg_coord <- c(.85, .8)
g <-
rel_error %>% filter(error_type %in% c('crps'), type==enstype) %>% ungroup() %>%
# filter(time <= 360*48*3) %>%
group_by(method, type, error_type, field, time) %>%
summarise(mean_error=mean(relative_error, na.rm=T), sd_error=sd(relative_error)) %>% ## sim average
ggplot(aes(x=time/60*5/60/24, y=mean_error, color=method)) +
geom_line(size=lwd) +
facet_wrap(~field) +
xlab('time (days)') + ylab(crps_n) + theme_bw() +
ylim(ylims)
g <- g +
scale_y_continuous(breaks=seq(50,125,25), limits=ylims) +
scale_colour_tableau("greenorange6") +
# guides(colour = guide_legend(override.aes = list(size = 6))) +
theme(
plot.background=element_blank(),
legend.position=leg_coord,
legend.title=element_blank(),
legend.background=element_blank(),
legend.key=element_blank())
g_lfts <- g + guides(colour = guide_legend(override.aes = list(size = 6)))
# calibration -------------------------------------------------------------
EXP <- '000'; nsim <- 1000
output_name <- paste('inst/simulations/data/pit_high_freq_experiment_nsim',nsim,'_EXP', EXP,'.rds', sep='')
output <- readRDS(output_name)
output %>% filter(field=='rain content' & error_type=='rank')
g <-
output %>%
filter(type=='ensB') %>%
filter(error_type=='rank') %>%
filter(method=='block-LEnKPF') %>%
ggplot(aes(x=error, ..density..)) +
geom_histogram(bins = 51)+
facet_wrap(~field, ncol=3) +
xlab('rank')+ ylab('frequency') +
theme_bw()
g <- g + #theme(panel.margin = unit(0, "lines"))+
scale_x_continuous(expand = c(0,0))
gcal <- g + guides(colour = guide_legend(override.aes = list(size = 6)))
|
f6f0c63f4fa9c2a32e05451102bb8e69e247a999
|
1ae370c92d271056fa8947dcf2ed36adbb88543c
|
/R/blockrandfast.r
|
3b0355f6acb4783f5a925f5fb72c0924ab7eee00
|
[] |
no_license
|
keaven/nphsim
|
ee007d737b24809931410acc226c24446132548d
|
b91a716ada4ddd1aee84bc30ab3b7d84112826bd
|
refs/heads/master
| 2021-01-20T14:47:57.223459
| 2020-05-21T16:07:25
| 2020-05-21T16:07:25
| 90,663,939
| 21
| 10
| null | 2020-05-21T16:07:27
| 2017-05-08T19:16:51
|
R
|
UTF-8
|
R
| false
| false
| 701
|
r
|
blockrandfast.r
|
blockrandfast <- function(n, num.lebvels=2, levels=c("Control","Experimental"),
block.size=1)
{ block.num<-ceiling(n/block.size/length(levels)) #number of blocks
block.length<-length(levels)*block.size #Length of each block
block.content<-rep(levels,block.size) # block content trt, control etc
n.id<-block.num*block.length
tmp<-data.frame(id=1:n.id,rand=stats::runif(n.id),trt=rep(block.content,block.num))#genderate random number
tmp$block<-floor((tmp$id-1)/block.length)+1 #generate block
treatment<-tmp[order(tmp$block,tmp$rand),]$trt #generate block randomization
out<-data.frame(treatment=treatment) #output
out$block.size<-block.size
return(out)
}
|
badf5dd2be03047fd74a745eba180bc4daea0621
|
1b99784e6090d80356bfa7d7e261078e35281dc3
|
/overall plots zoom.R
|
22dcd49305d8259df99644731c239b7d76541ef5
|
[] |
no_license
|
ChanJeunlam/Last-Glacial-Maximum
|
790cfabef011dfd273896853ec94c51ea48104ee
|
d2ef54a9cb3fad3c87f31309bb693e5d616a75ea
|
refs/heads/main
| 2023-04-19T13:52:50.473483
| 2021-05-11T15:31:36
| 2021-05-11T15:31:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,063
|
r
|
overall plots zoom.R
|
#Conf plotting LGM
library(raster)
library(foreign)
############# plots
library(plotrix)
library(raster)
library(MASS)
library(audio)
library(sp)
library(foreign)
library(rgdal)
library(maptools)
library(rgeos)
library(doParallel)
library(rasterVis)
library(dismo)
library(plotKML)
library(SDMTools)
library(PBSmapping)
library(lme4)
library(blme)
library(mailR)
library(raster)
library(fields)
load("E:/Work for RA/Comps/load data comps 10-1-2014.RData")
list1<-c(1,1,2,2,2,2,2,1,2,2,2,2) #species to choose from
list2<-c(58,30,65,129,140,180,238,233,395,398,455,470)
vers<-4 # this is the species choosing number, 10 is p.lobata, 1 is a hyacinthus
for (vers in 1:12){ ############### WARNING TAKES 3 hours
set<-list1[vers]
bnm<-list2[vers]
setwd("E:/Work for RA/IUCN shapes")
corals2<-read.dbf(paste("CORALS",set,".dbf",sep=""))
corals<-corals2[order(corals2$binomial),]
Species<-paste(corals[bnm,2])
setwd("E:/Paleoreconstruction/plots Oct 11 2019")
SPP<-Species
#time<-"A2"
time<-"LGM"
R<-stack(paste("E:/paleoreconstruction/paleoreconstruction/",SPP,"/",SPP," LGM stack.nc",sep=''))
R<-sum(R)
time<-"initial"
Z<-raster(paste("E:/paleoreconstruction/paleoreconstruction/",SPP,"/",SPP," 1 ",time,' dist.nc',sep=''))
for (i in 2:25){ #2:25
x<-raster(paste("E:/paleoreconstruction/paleoreconstruction/",SPP,"/",SPP," ",i," ",time,' dist.nc',sep=''))
Z<-sum(Z,x,na.rm=T)
print(i)
}
R12<-R>12
Z12<-Z>12
if(vers==1){ R2<-R12; Z2<-Z12}
if(vers!=1){R2<-sum(R2,R12,na.rm=T);Z2<-sum(Z2,Z12,na.rm=T)}
}
compassRose<-function(x,y,rot=0,cex=1,cex.dir=1,llwd=1) {
oldcex<-par(cex=cex)
mheight<-strheight("M")
xylim<-par("usr")
plotdim<-par("pin")
xmult<-(xylim[2]-xylim[1])/(xylim[4]-xylim[3])*plotdim[2]/plotdim[1]
point.angles<-seq(0,7*pi/4,by=pi/4)+pi*rot/180
crspans<-rep(c(mheight*3,mheight/2),4)
xpoints<-cos(point.angles)*crspans*xmult+x
ypoints<-sin(point.angles)*crspans+y
polygon(xpoints,ypoints,lwd=llwd)
txtxpoints<-cos(point.angles[c(1,3,5,7)])*1.33*crspans[1]*xmult+x
txtypoints<-sin(point.angles[c(1,3,5,7)])*1.33*crspans[1]+y
text(txtxpoints,txtypoints,c("E","N","W","S"),cex=cex.dir)
par(oldcex)
}
moll<-"+proj=moll"
plot.map<- function(database,center,transf=T,projectione=newproj,...){
Obj <- map(database,...,plot=F)
coord <- cbind(Obj[[1]],Obj[[2]])
newproj <- "+proj=merc +lon_0=150 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs" #utm
nextproj<-"+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0" #latlong
moll<-"+proj=moll"
# split up the coordinates
id <- rle(!is.na(coord[,1]))
id <- matrix(c(1,cumsum(id$lengths)),ncol=2,byrow=T)
polygons <- apply(id,1,function(i){coord[i[1]:i[2],]})
# split up polygons that differ too much
polygons <- lapply(polygons,function(x){
x[,1] <- x[,1] + center
x[,1] <- ifelse(x[,1]>180,x[,1]-360,x[,1])
if(sum(diff(x[,1])>300,na.rm=T) >0){
id <- x[,1] < 0
x <- rbind(x[id,],c(NA,NA),x[!id,])
}
x
})
# reconstruct the object
polygons <- do.call(rbind,polygons)
colnames(polygons)<-c("x",'y')
polygons<-as.data.frame(polygons)
z<-complete.cases(polygons)
p<-z
z<-cbind(z,z)
polygons<-polygons[complete.cases(polygons),]
coordinates(polygons)<-~x+y
proj4string(polygons)<-CRS(nextproj)
if(transf==T){ polygons<-spTransform(polygons,CRS(projectione))}
z[p==F,]<-c(NA,NA)
z[which(p==T),]<-coordinates(polygons)
Obj[[1]] <- z[,1]
Obj[[2]] <- z[,2]
map(Obj,...)
}
source("E:/Work for RA/R functions/revrotate..R")
source("E:/Work for RA/R functions/rotater.R")
setwd("E:/Paleoreconstruction/plots Oct 11 2019")
# writeRaster(Z2,'Modern12.nc')
# writeRaster(R2,'LGM12.nc')
#
Z2<-raster('Modern12.nc')
R2<-raster('LGM12.nc')
#
# writeRaster(R2,'LGM12.tif')
# writeOGR(R2, "LGM12.kml", layer='R2', driver="KML")
riversBufferLGM<-readShapeSpatial('E:/paleoreconstruction/rivers buffer/riversBufferLGM.shp')
proj4string(riversBufferLGM)<-'+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0'
proj4string(riversBuffer)<-'+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0'
R2<-mask(R2,riversBufferLGM,inverse=T)
palette<-rev(colorRampPalette(c("red3","orange",'yellow','white'))(12))
setwd("E:/Paleoreconstruction/plots Oct 11 2019/Earth/LGM")
LGMKML<-R2
LGMKML[LGMKML==0]<-NA
plotKML(LGMKML,colour_scale=palette,z.lim=c(0,12))
setwd("E:/Paleoreconstruction/plots Oct 11 2019/Earth/Modern")
MODKML<-Z2
MODKML[MODKML==0]<-NA
plotKML(MODKML,colour_scale=palette,z.lim=c(0,12))
setwd("E:/Paleoreconstruction/plots Oct 11 2019/Earth/RiversLGM")
plotKML(riversBufferLGM)
setwd("E:/Paleoreconstruction/plots Oct 11 2019/Earth/Rivers Modern")
( p.df <- data.frame( ID=1:length(riversBuffer)) )
modRivKml<-SpatialPolygonsDataFrame(riversBuffer,data=p.df)
plotKML(modRivKml)
R3<-rotater(R2,center=210+32)
extent(R3)<-c(-180,180,-37,37)
Z3<-rotater(Z2,center=210+32)
extent(Z3)<-c(-180,180,-37,37)
LGMshelf<-raster('E:/paleoreconstruction/sea-level/LGMshelf.nc')
extent(LGMshelf)<-c(c(-180,180,-70,70))
tLGMshelf<-rotater(LGMshelf>0,center=210+32)
extent(tLGMshelf)<-c(-180,180,-70,70)
tLGMshelf<-crop(tLGMshelf,c(-180,180,-45,45))
tLGMshelf[tLGMshelf==1]<-NA
# ################# FIGURES
makeTransparent<-function(someColor, alpha=100)
{
newColor<-col2rgb(someColor)
apply(newColor, 2, function(curcoldata){rgb(red=curcoldata[1], green=curcoldata[2],
blue=curcoldata[3],alpha=alpha, maxColorValue=255)})
}
makeTransparent('red3',1)
redcol<-colorRampPalette(c("#00000000","red3"),alpha=T)(12)
bluecol<-colorRampPalette(c("#00000000","blue"),alpha=T)(12)
#lgmbound<-boundaries(tLGMshelf,type='outer',directions=4)
setwd("E:/Paleoreconstruction/Stats plots feb 2020")
tiff(paste("overall conf zoom zoneA mar2020.tif"),width=3000,height=2000, res = 300)
#plot.map("world", transf=F, center=210+32 , col="burlywood",bg="white",ylim=c(-25,25),fill=TRUE,mar=c(2,5,2,0),add=T) #center is still 0
#,xlim=c(210-62,2210+16.5)
image(tLGMshelf,add=F,col=rev(c('lightsteelblue1','khaki1')),xlim=c(-35,40),ylim=c(-25,25), axes=F,xlab='',ylab='')
#rect(par("usr")[1], par("usr")[3], par("usr")[2], par("usr")[4], col ="lightsteelblue1")
image(tLGMshelf,add=F,col=rev(c('lightsteelblue1','khaki1')),xlim=c(-35,40),ylim=c(-25,25), axes=F,xlab='',ylab='')
plot.map("world", transf=F, center=210+32 , col="burlywood",bg="white",ylim=c(-25,25),fill=TRUE,mar=c(2,5,2,0),add=T) #center is still 0
box(lwd=1.5)
compassRose(-28,-17,cex=.75,cex.dir=1.2,llwd=1.5)
axis(1,at=c(-18,2,22),labels=c("",'',''), tck = .025,mgp=c(3,.3,0),cex.axis=.975,lwd=1.5)
mtext("100", 1, line=-1.5, adj=0.22,cex=.975)
mtext("120", 1, line=-1.5, adj=0.49,cex=.975)
mtext('140',1,line=-1.5,adj=0.77,cex=.975)
axis(2,at=c(-15,0,15),labels=c('','',''), tck = .025,mgp=c(3,.405,0),cex.axis=.975,lwd=1.5)
mtext("15", 2, line=-1.5, adj=0.817,cex=.975)
mtext("0", 2, line=-1.5, adj=0.5,cex=.975)
#mtext('140',2,line=-1.5,adj=0.675,cex=.975)
#image(get(paste('RTDone',scenario,sep='')),maxpixels=3836160,add=T,col = c(color.palette2,color.palette1),breaks=palette.breaks)
image(R3,add=T,maxpixels=3836160,col=redcol,legend=F) ### new habitat
image(Z3,maxpixels=3836160,add=T,col=bluecol,legend=F) ### currently inhabited
#image((losthab2>0),add=T,col=c("red"),maxpixels=3836160)
legend("topright",c('LGM land',"LGM coral","Modern coral"),col=c('khaki1',"red3","blue"),pch=c(16,16,16),box.col=NA,bg="#00000000",cex=1.1)
legend("topright",c('LGM land',"LGM coral","Modern coral"),col=c('black',"black","black"),pch=c(1,NA,NA),box.col=NA,bg="#00000000",cex=1.1)
color.legend(9,19,20,21,legend=c(0,6,12,18), rect.col=redcol,cex=.7)
color.legend(9,15,20,17,legend=c(0,6,12,18), rect.col=bluecol,cex=.7)
#mtext("mm above sea-level", 1, line=-12.15, adj=0.99,cex=.975)
text(-14,-14,'Indian Ocean',cex=1.2)
text(20,3,'Pacific Ocean',cex=1.2)
scalebar(d=500,xy=c(11,-22),label=c(0,'',500),cex=.9,type='bar',divs=4,below="kilometers",adj=c(0.5,-1.1))
dev.off()
##################### zoom 2 ###################
#setwd("E:/Paleoreconstruction/plots Oct 11 2019")
tiff(paste("overall conf zoom seychelles mar2020.tif"),width=2000,height=2000, res = 300)
image(LGMshelf,add=F,col=rev(c('white','khaki1')),legend=F,xlim=c(35,90),ylim=c(-30,25), axes=F,xlab='',ylab='')
plot.map("world", transf=F, center=0 , col="burlywood",bg="white",ylim=c(-25,25),fill=TRUE,mar=c(2,5,2,0),add=T) #center is still 0
box(lwd=1.5)
compassRose(83,-3,cex=.75,cex.dir=1.2,llwd=1.5)
axis(1,at=c(50,65,80),labels=c("",'',''), tck = .025,mgp=c(3,.3,0),cex.axis=.975,lwd=1.5)
mtext("50", 1, line=-1.5, adj=0.26,cex=.975)
mtext("65", 1, line=-1.5, adj=0.545,cex=.975)
mtext('80',1,line=-1.5,adj=0.83,cex=.975)
axis(2,at=c(-15,0,15),labels=c('','',''), tck = .025,mgp=c(3,.405,0),cex.axis=.975,lwd=1.5)
mtext("15", 2, line=-1.5, adj=0.827,cex=.975)
mtext("0", 2, line=-1.5, adj=0.55,cex=.975)
mtext('-15',2,line=-1.5,adj=0.255,cex=.975)
image(R2,add=T,maxpixels=3836160,col=redcol,legend=F) ### new habitat
image(Z2,maxpixels=3836160,add=T,col=bluecol,legend=F) ### currently inhabited
#image((losthab2>0),add=T,col=c("red"),maxpixels=3836160)
legend("topright",c('LGM land',"LGM coral","Modern coral"),col=c('khaki1',"red3","blue"),pch=c(16,16,16),box.col=NA,bg="#00000000",cex=1.1)
legend("topright",c('LGM land',"LGM coral","Modern coral"),col=c('black',"black","black"),pch=c(1,NA,NA),box.col=NA,bg="#00000000",cex=1.1)
color.legend(75,-19,85,-17,legend=c(0,6,12,18), rect.col=redcol,cex=.7)
color.legend(75,-23,85,-21,legend=c(0,6,12,18), rect.col=bluecol,cex=.7)
text(75,-12,'Indian Ocean',cex=1.2)
#text(20,3,'Pacific Ocean',cex=1.2)
scalebar(d=500,xy=c(83,-27),label=c(0,'',500),cex=.9,type='bar',divs=4,below="kilometers",adj=c(0.5,-1.1))
dev.off()
R4<-R2
extent(R4)<-c(0,360,-37,37)
Z4<-(Z2)
extent(Z4)<-c(0,360,-37,37)
LGMshelf<-raster('E:/paleoreconstruction/sea-level/LGMshelf.nc')
extent(LGMshelf)<-c(c(-180,180,-70,70))
tLGMshelf2<-LGMshelf
extent(tLGMshelf2)<-c(0,360,-70,70)
#tLGMshelf2<-crop(tLGMshelf,c(0,360,-45,45))
#tLGMshelf2[tLGMshelf2==1]<-NA
############################### zoom 3 ##############
# setwd("E:/Paleoreconstruction/plots Oct 11 2019")
tiff(paste("overall conf zoom central eastern pacific mar2020.tif"),width=3200,height=2000, res = 300)
image(tLGMshelf2,add=F,col=rev(c('white','khaki1')),legend=F,xlim=c(0,110),ylim=c(-30,25), axes=F,xlab='',ylab='')
plot.map("world", transf=F, center=180 , col="burlywood",bg="white",ylim=c(-25,25),fill=TRUE,mar=c(2,5,2,0),add=T) #center is still 0
box(lwd=1.5)
compassRose(83,-8,cex=.75,cex.dir=1.2,llwd=1.5)
axis(1,at=c(20,50,80),labels=c("",'',''), tck = .025,mgp=c(3,.3,0),cex.axis=.975,lwd=1.5)
mtext("-160", 1, line=-1.5, adj=0.17,cex=.975)
mtext("-130", 1, line=-1.5, adj=0.45,cex=.975)
mtext('-100',1,line=-1.5,adj=0.73,cex=.975)
axis(2,at=c(-15,0,15),labels=c('','',''), tck = .025,mgp=c(3,.405,0),cex.axis=.975,lwd=1.5)
mtext("15", 2, line=-1.5, adj=0.827,cex=.975)
mtext("0", 2, line=-1.5, adj=0.55,cex=.975)
mtext('-15',2,line=-1.5,adj=0.255,cex=.975)
image(R4,add=T,maxpixels=3836160,col=redcol,legend=F) ### new habitat
image(Z4,maxpixels=3836160,add=T,col=bluecol,legend=F) ### currently inhabited
#image((losthab2>0),add=T,col=c("red"),maxpixels=3836160)
legend("topleft",c('LGM land',"LGM coral","Modern coral"),col=c('khaki1',"red3","blue"),pch=c(16,16,16),box.col=NA,bg="#00000000",cex=1.1)
legend("topleft",c('LGM land',"LGM coral","Modern coral"),col=c('black',"black","black"),pch=c(1,NA,NA),box.col=NA,bg="#00000000",cex=1.1)
color.legend(75,-19,90,-17,legend=c(0,6,12,18), rect.col=redcol,cex=.7)
color.legend(75,-23,90,-21,legend=c(0,6,12,18), rect.col=bluecol,cex=.7)
#text(75,-12,'Indian Ocean',cex=1.2)
text(180,3,'Pacific Ocean',cex=1.2)
scalebar(d=500,xy=c(100,-27),label=c(0,'',500),cex=.9,type='bar',divs=4,below="kilometers",adj=c(0.5,-1.1))
dev.off()
|
1021549c92a6a8f2169c1eca1f1b30877a9900dd
|
f36a0e00e552abfe2e8dd82bf3ec67cbafc10b7d
|
/cachematrix.R
|
c8878150dd26c7e4b8ea448b528ce4ff978efc7a
|
[] |
no_license
|
LMT337/ProgrammingAssignment2
|
5b2126056c4dc8a2156dcb558d22143def3b5242
|
1712fa11c82b509e29ef86cbe5cfc90aac567371
|
refs/heads/master
| 2021-01-14T14:28:18.225384
| 2015-11-21T17:03:32
| 2015-11-21T17:03:32
| 46,380,445
| 0
| 0
| null | 2015-11-17T22:48:46
| 2015-11-17T22:48:45
| null |
UTF-8
|
R
| false
| false
| 2,536
|
r
|
cachematrix.R
|
#code uses the framework of the makeVector and cachemean examples to complete the
#Caching the Inverse of a Matrix assignment.
#elements of those examples were changed to complete the assignment otherwise everything is
#exactly the same as original example code.
#makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
#Assign function expects a matrix as input.
#input n x n matrix (will error if matrix is not invertable)
#I tried to add tests to validiate if matrix was invertable or return an error if not,
#but as of right now that was beyond my R skills and it was more important to submit the assignment.
#usage: $variable <- makeCacheMatix(input_matrix)
makeCacheMatix <- function(x = matrix()) {
#local m assigned to Null value
m <- NULL
#set the vector, y values are the function input, y is assigned to x
#cache m is reset to null as new y values are going to generate a new m value
set <- function(y) {
x <<- y
m <<- NULL
}
# get value of matrix assigned to x
get <- function() x
#inverted matrix stored to cache
setm_var <- function(m_var) m <<- m_var
#recall value of matrix variable stored to cache
getm_var <- function() m
#listing the four functions in this function
#lets you make specific calls to these functions as needed
#from the working environment
list(set = set, get = get,
setm_var = setm_var,
getm_var = getm_var)
}
#cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix
# If the inverse has already been calculated (and the matrix has not changed),
#then the cachesolve should retrieve the inverse from the cache.
#usage: cacheSolve($variable)
cacheSolve <- function(x, ...) {
#function call to makeCacheMatix get matrix variable stored in cache
m <- x$getm_var()
#if m is not null print "getting cached data, return m
if(!is.null(m)) {
message("Yo dawg, I heard you liked cached matrices:")
return(m)
}
#assign matrix to data from makeCacheMatix get
data <- x$get()
#compute inverse of matrix using solve function
m <- solve(data, ...)
#store inverse of matrix in cache using makeCacheMatix set matrix_variable function
x$setm_var(m)
#return inverse matrix
m
}
#Thank you for taking the time to review my code, I hope you're doing well with this course, best of luck to you with
#the rest of this course and the other courses in the data scientist track.
|
1da834ae2a0dee06aa63da5ee125a2d437df8a32
|
122b7034f73d20fe3eda07a587239b114da49d14
|
/monte_carlo_option_pricing.R
|
57a0690e4c2210560d72f21532b3f1719abd0c71
|
[] |
no_license
|
ozadabuk/monte-carlo-option-pricing
|
d72be0dc4c03630210e4884f697051d2ce68159a
|
d005b599c1bb3b2d4c2feb517cd4519010423b9e
|
refs/heads/master
| 2023-02-13T13:07:44.013176
| 2021-01-09T06:20:15
| 2021-01-09T06:20:15
| 328,087,424
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,356
|
r
|
monte_carlo_option_pricing.R
|
library(pracma)
tic()
# X ~ N(0, 1)
# r(t) <- r(t-1) + 0.18 * (0.086 - r(t-1)) * delta + 0.02 * sqrt(delta) * X
r_path<-function(n){
X <- matrix(rnorm(n*d, 0, 1),nrow=n)
r_t <- rep(0.07, n)
r <- matrix(r_t)
for(i in 2:d){
r_t <- r_t + 0.18 * (0.086 - r_t) * delta + 0.02 * sqrt(delta) * X[,i]
r <- cbind(r, r_t)
}
return(r)
}
S_path<-function(n, r){
X<-matrix(rnorm(n*d, 0, 1),nrow=n)
RM = t(apply(r, 1, cumsum)) # apply cumsum function to matrix R
BM<-sqrt(delta)*t(apply(X,1,cumsum)) # apply cumsum function to matrix X and scale by sqrt(delta)
S<-S_0*exp(sweep(sigma*BM + RM*delta,MARGIN=2,(k*sigma^2/2)*delta,'-'))
return(S)
}
# Oguz price path
# S_path<-function(n, r){
# Z<-matrix(rnorm(n*(d), 0, 1),nrow=n)
# S_t <- S_0 * exp((r[,1] - (0.5 * sigma^2)) * delta + sigma * Z[,1]* sqrt(delta))
# S<-matrix(S_t)
#
# for(i in 2:(d)){
# S_t <- S_t * exp((r[,i] - (0.5 * sigma^2)) * delta + sigma * Z[,i]* sqrt(delta))
# S<-cbind(S, S_t)
# }
# return(S)
# }
### 12 time steps
S_0 <- 50
K <- 50
r_0 <- 0.07
sigma <- 0.13
epsilon <- 0.05
MT <- 1 # 1 year
d <- 12 #monthly steps
delta <- MT / d
k <- seq(1,d)
# create first sample to find actual sample size
n.sigma <- 10000
r <- r_path(n.sigma) # get an interest rate path
S <- S_path(n.sigma, r) # get a Vasicek stock price path
euro_call_payoffs <- pmax(S[,d] - K, 0) * exp(-delta * rowSums(r)) # Vasicek call payoffs
N_Call <- ceiling((2.58 * 1.1 * sd(euro_call_payoffs) / epsilon)^2) # find sample size required for error tolerance (Vasicek)
r <- r_path(N_Call) # generate new interest rate path with new sample size
S <- S_path(N_Call, r) # generate new Vasicek price path with new sample size
euro_call_payoffs_monthly <- pmax(S[,d] - K, 0) * exp(-delta * rowSums(r)) # Vasicek call payoffs
euro_call_price_monthly <- mean(euro_call_payoffs_monthly) # Vasicek call price
error_call_price_monthly <- 2.58 * sd(euro_call_payoffs_monthly) / sqrt(N_Call) # error for Vasicek call price
### 52 time steps
d <- 52 # weekly time steps
delta <- MT / d
k<-seq(1,d)
n.sigma <- 1000
r <- r_path(n.sigma) # get an interest rate path
S <- S_path(n.sigma, r) # get a Vasicek stock price path
euro_call_payoffs <- pmax(S[,d] - K, 0) * exp(-delta * rowSums(r)) # Vasicek call payoffs
N_Call <- ceiling((2.58 * 1.1 * sd(euro_call_payoffs) / epsilon)^2) # find sample size required for error tolerance (Vasicek)
r <- r_path(N_Call) # generate new interest rate path with new sample size
S <- S_path(N_Call, r) # generate new Vasicek price path with new sample size
euro_call_payoffs_weekly <- pmax(S[,d] - K, 0) * exp(-delta * rowSums(r)) # Vasicek call payoffs
euro_call_price_weekly <- mean(euro_call_payoffs_weekly) # Vasicek call price
error_call_price_weekly <- 2.58 * sd(euro_call_payoffs_weekly) / sqrt(N_Call) # error for Vasicek call price
# true European call price
ExactEuroCall <- S_0*pnorm((log(S_0/K)+(r_0+sigma^2/2)*MT)/(sigma*sqrt(MT))) - K*exp(-r_0*MT)*pnorm((log(S_0/K)+(r_0-sigma^2/2)*MT)/(sigma*sqrt(MT)))
cat("Vasicek call price for monthly steps - price:", euro_call_price_monthly, " error:", error_call_price_monthly,"\n")
cat("Vasicek call price for weekly steps - price:", euro_call_price_weekly, " error:", error_call_price_weekly, "\n")
cat("True European call price from Black Scholes Model:", ExactEuroCall, "\n")
toc()
|
2498af7ae92f9f83c129b7d03a96c21831d7f563
|
522685c66aae28b1579148a9c99fd77b013f2498
|
/Voter Files/NC/NC_EarlyVoteStats.R
|
88f315fa09dc14b9bd7221921607221b210fe953
|
[] |
no_license
|
mcdisc/UF-Election-Sciences
|
eb49220341c1814eb4a5384619d588f144aa10ad
|
dd0a144172dba64dde016a6997bc8f11ea3905b7
|
refs/heads/master
| 2021-04-19T02:51:48.606226
| 2017-10-03T01:32:11
| 2017-10-03T01:32:11
| 50,298,295
| 20
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,087
|
r
|
NC_EarlyVoteStats.R
|
library(dplyr)
library(data.table)
library(magrittr)
library(tidyr)
NC2017hisall <- fread('/Users/Potalora/Downloads/ncvhis_Statewide.txt')
NC2017votfile <- fread('/Users/Potalora/Downloads/ncvoter_Statewide-2.txt')
NC2018hisallless <- subset(NC2017hisall, select = -c(1, 4, 7, 9, 13, 14))
NC2017less <- subset(NC2017votfile, select = -c(1:8, 14:25, 29:68, 70:71))
NC2017test2016 <- subset(NC2018hisallless, election_desc == '11/08/2016 GENERAL')
NC2017test2012 <- subset(NC2018hisallless, election_desc == '11/06/2012 GENERAL')
NC2017_1216 <- full_join(NC2017test2016, NC2017test2012, by = 'ncid')
NC2017_1216 <- subset(NC2017_1216, select = -c(6,8,9,15,16,17))
NCjoined <- left_join(NC2017_1216, NC2017less, by = 'ncid')
NCtable <- NCjoined %>%
filter(race_code == 'B') %>%
group_by(county_desc.x) %>%
count(voting_method.x)
nctable1 <- spread(NCtable, voting_method.x, n)
nctable1 <- subset(nctable1, select = -c(9))
nctable1 <- nctable1[1:100,]
nctable1[is.na(nctable1)] <- 0
nctable1$total <- nctable1$`ABSENTEE BY MAIL` + nctable1$`ABSENTEE CURBSIDE` + nctable1$`ABSENTEE ONESTOP` + nctable1$CURBSIDE + nctable1$`IN-PERSON` + nctable1$PROVISIONAL + nctable1$TRANSFER
nctable1$early <- nctable1$`ABSENTEE BY MAIL` + nctable1$`ABSENTEE CURBSIDE` + nctable1$`ABSENTEE ONESTOP`
nctable1$percent_early <- nctable1$early/nctable1$total
ncfips <- fread("/Users/Potalora/Documents/UF_Elections/NC_Early_Voting/NCFIPS.csv")
nctable1 <- left_join(nctable1, ncfips, by = 'county_desc.x')
write.csv(nctable1, 'Black_Early_NC.csv', row.names = FALSE)
NC2012voters <- filter(NCjoined, election_desc.y != "N/A")
NCtable2 <- NC2012voters %>%
filter(race_code == 'B') %>%
group_by(county_desc.y) %>%
count(voting_method.y)
NCtable2 <- spread(NCtable2, voting_method.y, n)
NCtable2 <- subset(NCtable2, select = -c(9))
NCtable2 <- NCtable2[1:100,]
NCtable2[is.na(NCtable2)] <- 0
NCtable2$total <- NCtable2$`ABSENTEE BY MAIL` + NCtable2$`ABSENTEE` + NCtable2$`ABSENTEE ONESTOP` + NCtable2$CURBSIDE + NCtable2$`IN-PERSON` + NCtable2$PROVISIONAL + NCtable2$TRANSFER
NCtable2$early <- NCtable2$`ABSENTEE BY MAIL` + NCtable2$`ABSENTEE` + NCtable2$`ABSENTEE ONESTOP`
NCtable2$percent_early <- NCtable2$early/NCtable2$total
NCtable3 <- NC2012voters %>%
filter(race_code == 'B') %>%
group_by(county_desc.x) %>%
count(voting_method.x)
NCtable3 <- spread(NCtable3, voting_method.x, n)
NCtable3 <- subset(NCtable3, select = -c(9))
NCtable3 <- NCtable3[1:100,]
NCtable3[is.na(NCtable3)] <- 0
NCtable3$total <- NCtable3$`ABSENTEE BY MAIL` + NCtable3$`ABSENTEE CURBSIDE` + NCtable3$`ABSENTEE ONESTOP` + NCtable3$CURBSIDE + NCtable3$`IN-PERSON` + NCtable3$PROVISIONAL + NCtable3$TRANSFER
NCtable3$early <- NCtable3$`ABSENTEE BY MAIL` + NCtable3$`ABSENTEE CURBSIDE` + NCtable3$`ABSENTEE ONESTOP`
nctable4 <- left_join(NCtable2, NCtable3, by = c('county_desc.y' = 'county_desc.x'))
nctable4$repeatratio <- nctable4$early.y/nctable4$early.x
nctable4 <- left_join(nctable4, ncfips, by = c('county_desc.y'='county_desc.x'))
write.csv(nctable4, 'Black_repeat1.csv', row.names = FALSE)
|
13f8a2cf5acdb2623afa56cfb01aa3f2caae1c7a
|
f42d9d77c06cff51b4a628a49ce3be7a2d0633e9
|
/man/wleRelFromRes.rd
|
8056e617d2b4513a9cb02c48f663a1837cf68b6b
|
[] |
no_license
|
weirichs/eatModel
|
131af527d3ee28d1339e2d2cc2cc1a49a0ecfd33
|
008cb8b51986d8d4c29fe2e474a15a979625ff4f
|
refs/heads/master
| 2023-08-17T15:56:19.829254
| 2023-08-01T21:49:37
| 2023-08-01T21:49:37
| 155,389,798
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 740
|
rd
|
wleRelFromRes.rd
|
\name{wleRelFromRes}
\alias{wleRelFromRes}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Gives WLE reliability from the object returned by getResults()}
\description{WLE reliability in a data frame. }
\usage{
wleRelFromRes(resultsObj)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{resultsObj}{
%% ~~Describe \code{file} here~~
The object returned by \code{\link{getResults}}.
}
}
\value{
A data frame with three columns, containing model name, dimension name, and
the corresponding WLE reliability.
}
\examples{
### read exemplary results object
file <- system.file("extdata", "results.rda", package = "eatModel")
load(file)
wleRel <- wleRelFromRes(res)
}
|
796ec3338027f18ff3a7be4fffd351da79d0c888
|
c98cabd9038d9129c4d9a6ff194ec9958f247743
|
/proyecto final/facebook.R
|
38d32e4929b835239a97782c4509a62a69107746
|
[] |
no_license
|
davidandresvalles/MCPP_david.valles
|
4fc6d31c7d0af4e6937fbb6de6037a13c1aa434d
|
689e02a9031f2225c3501ba91fa76e82bc33f1ad
|
refs/heads/master
| 2021-07-14T21:30:00.512879
| 2017-05-26T10:00:25
| 2017-05-26T10:00:25
| 80,293,020
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 9,515
|
r
|
facebook.R
|
library(devtools)
install_github("pablobarbera/Rfacebook/Rfacebook")
install.packages("Rfacebook")
library(Rfacebook)
token <- 'EAACEdEose0cBAE1ZAZB293ZAMGEyrMuwOjE3RMGKi4rr4sTR2hhFZBUZBxPk9npWe7XDNXZBZBHrJqmBLU0gVPBqFqcOMvMbleLnQMexfRmE1tGGnkt1P9gakg8uuYczNZAbDwggdRBk3m1KqpGCZAxuaPoVngC7dJx19oNOrKZC7TwG48xbyZBDbzZC'
me <- getUsers("me", token, private_info = TRUE)
me$name # my name
me$hometown
"Post de Uribe, Robledo, Santo, Lopez y Petro desde 2 de enero de 2010 hasta 1 de abril de 2017 "
"posts de Uribe"
user_Uribe <- getUsers(c("AlvaroUribeVel"), token)
fb_pagee <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2010/01/02', until='2011/01/01')
fb_pagee1 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2011/01/02', until='2012/01/01')
fb_pagee2 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2012/01/02', until='2013/01/01')
fb_pagee3 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2013/01/02', until='2014/01/01')
fb_pagee4 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2014/01/02', until='2014/04/01')
fb_pagee5 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2014/04/02', until='2014/07/01')
fb_pagee6 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2014/07/02', until='2015/10/01')
fb_pagee7 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2015/10/02', until='2015/12/31')
fb_pagee8 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2016/01/01', until='2016/03/01')
fb_pagee9 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2016/03/02', until='2016/06/01')
fb_pagee10 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2016/06/02', until='2016/09/01')
fb_pagee11 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2016/09/02', until='2016/12/01')
fb_pagee12 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2016/12/02', until='2017/02/01')
fb_pagee13 <- getPage(page="AlvaroUribeVel", token,n=3000 ,since='2017/02/02', until='2017/04/01')
"Agrupando los posts de uribe"
fburibe <- do.call("rbind", list(fb_pagee, fb_pagee1,fb_pagee2,fb_pagee3,fb_pagee4,fb_pagee5,fb_pagee6,fb_pagee7,fb_pagee8,fb_pagee9,fb_pagee10,fb_pagee11,fb_pagee12,fb_pagee13))
"Santos"
fb_spagee <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2010/01/02', until='2011/01/01')
fb_spagee1 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2011/01/02', until='2012/01/01')
fb_spagee2 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2012/01/02', until='2013/01/01')
fb_spagee3 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2013/01/02', until='2014/01/01')
fb_spagee4 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2014/01/02', until='2014/04/01')
fb_spagee5 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2014/04/02', until='2014/07/01')
fb_spagee6 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2014/07/02', until='2015/10/01')
fb_spagee7 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2015/10/02', until='2015/12/31')
fb_spagee8 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2016/01/01', until='2016/03/01')
fb_spagee9 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2016/03/02', until='2016/06/01')
fb_spagee10 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2016/06/02', until='2016/09/01')
fb_spagee11 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2016/09/02', until='2016/12/01')
fb_spagee12 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2016/12/02', until='2017/02/01')
fb_spagee13 <- getPage(page="JMSantos.Presidente", token,n=3000 ,since='2017/02/02', until='2017/04/01')
fbsantos <- do.call("rbind", list(fb_spagee, fb_spagee1,fb_spagee2,fb_spagee3,fb_spagee4,fb_spagee5,fb_spagee6,fb_spagee7,fb_spagee8,fb_spagee9,fb_spagee10,fb_spagee11,fb_spagee12,fb_spagee13))
"robledo"
fb_rpagee <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2010/01/02', until='2011/01/01')
fb_rpagee1 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2011/01/02', until='2012/01/01')
fb_rpagee2 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2012/01/02', until='2013/01/01')
fb_rpagee3 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2013/01/02', until='2013/07/01')
fb_rpagee3a <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2013/07/02', until='2014/01/01')
fb_rpagee4 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2014/01/02', until='2014/04/01')
fb_rpagee5 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2014/04/02', until='2014/07/01')
fb_rpagee6 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2014/07/02', until='2015/10/01')
fb_rpagee7 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2015/10/02', until='2015/12/31')
fb_rpagee8 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2016/01/01', until='2016/03/01')
fb_rpagee9 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2016/03/02', until='2016/06/01')
fb_rpagee10 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2016/06/02', until='2016/09/01')
fb_rpagee11 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2016/09/02', until='2016/12/01')
fb_rpagee12 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2016/12/02', until='2017/02/01')
fb_rpagee13 <- getPage(page="jorge.robledo.castillo", token,n=3000 ,since='2017/02/02', until='2017/04/01')
fbrobledo <- do.call("rbind", list(fb_rpagee, fb_rpagee1,fb_rpagee2,fb_rpagee3,fb_rpagee3a,fb_rpagee4,fb_rpagee5,fb_rpagee6,fb_rpagee7,fb_rpagee8,fb_rpagee9,fb_rpagee10,fb_rpagee11,fb_rpagee12,fb_rpagee13))
"claudia"
fb_cpagee <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2010/01/02', until='2011/01/01')
fb_cpagee1 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2011/01/02', until='2012/01/01')
fb_cpagee2 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2012/01/02', until='2013/01/01')
fb_cpagee3 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2013/01/02', until='2013/07/01')
fb_cpagee3a <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2013/07/02', until='2014/01/01')
fb_cpagee4 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2014/01/02', until='2014/04/01')
fb_cpagee5 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2014/04/02', until='2014/07/01')
fb_cpagee6 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2014/07/02', until='2015/10/01')
fb_cpagee7 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2015/10/02', until='2015/12/31')
fb_cpagee8 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2016/01/01', until='2016/03/01')
fb_cpagee9 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2016/03/02', until='2016/06/01')
fb_cpagee10 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2016/06/02', until='2016/09/01')
fb_cpagee11 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2016/09/02', until='2016/12/01')
fb_cpagee12 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2016/12/02', until='2017/02/01')
fb_cpagee13 <- getPage(page="ClaudiaLopezCL", token,n=3000 ,since='2017/02/02', until='2017/04/01')
fbclaudia <- do.call("rbind", list(fb_cpagee, fb_cpagee1,fb_cpagee2,fb_cpagee3,fb_cpagee3a,fb_cpagee4,fb_cpagee5,fb_cpagee6,fb_cpagee7,fb_cpagee8,fb_cpagee9,fb_cpagee10,fb_cpagee11,fb_cpagee12,fb_cpagee13))
"petro"
fb_ppagee <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2010/01/02', until='2011/01/01')
fb_ppagee1 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2011/01/02', until='2012/01/01')
fb_ppagee2 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2012/01/02', until='2013/01/01')
fb_ppagee3 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2013/01/02', until='2013/07/01')
fb_ppagee3a <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2013/07/02', until='2014/01/01')
fb_ppagee4 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2014/01/02', until='2014/04/01')
fb_ppagee5 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2014/04/02', until='2014/07/01')
fb_ppagee6 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2014/07/02', until='2015/10/01')
fb_ppagee7 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2015/10/02', until='2015/12/31')
fb_ppagee8 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2016/01/01', until='2016/03/01')
fb_ppagee9 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2016/03/02', until='2016/06/01')
fb_ppagee10 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2016/06/02', until='2016/09/01')
fb_ppagee11 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2016/09/02', until='2016/12/01')
fb_ppagee12 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2016/12/02', until='2017/02/01')
fb_ppagee13 <- getPage(page="GustavoPetroUrrego", token,n=3000 ,since='2017/02/02', until='2017/04/01')
fbpetro <- do.call("rbind", list(fb_ppagee, fb_ppagee1,fb_ppagee2,fb_ppagee3,fb_ppagee3a,fb_ppagee4,fb_ppagee5,fb_ppagee6,fb_ppagee7,fb_ppagee8,fb_ppagee9,fb_ppagee10,fb_ppagee11,fb_ppagee12,fb_ppagee13))
"Agrupando los posts de los 5 Políticos"
datafb<- do.call("rbind", list(fbpetro, fbclaudia, fburibe, fbsantos,fbrobledo))
"Convertir a .csv para exportar a python"
write.csv(datafb, file = "datosdefacebook.csv")
save(datefb,file="datefb.Rda")
|
013ac18867a2a26beb7bab88b7deb8982da4e657
|
b974bb36c70bd4cff617bec91dd667d73b1c940e
|
/R/excel.R
|
96e3c590a354b5a93274ea79ea93438ebd1120e5
|
[] |
no_license
|
crazybilly/muadc
|
6ad29493609f2a787a814467aaf51dd7845d1dfd
|
6be923163d076bb91d4476f5488405255cb6e1ca
|
refs/heads/master
| 2021-05-22T11:24:26.173269
| 2021-03-03T14:58:09
| 2021-03-03T14:58:09
| 24,445,150
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 427
|
r
|
excel.R
|
#' Excel
#'
#' @description open a file in Excel. This is convience wrapper around `shell.exec(normalizePath(file) )`.
#'
#' @param file a character string describing the path of the file to open. If the file does not open with Excel by default in Windows, it will open in the default program.
#'
#' @return returns NULL if successfull.
#' @export
#'
excel <- function(file) {
shell.exec(normalizePath(file) )
}
|
94cdf993e281ac35ea0dea2136736d280ab70a18
|
705255987191f8df33b8c2a007374f8492634d03
|
/man/NextBestDualEndpoint-class.Rd
|
76edd378174c7f7b24bf426d48bb90c96c4f5519
|
[] |
no_license
|
Roche/crmPack
|
be9fcd9d223194f8f0e211616c8b986c79245062
|
3d897fcbfa5c3bb8381da4e94eb5e4fbd7f573a4
|
refs/heads/main
| 2023-09-05T09:59:03.781661
| 2023-08-30T09:47:20
| 2023-08-30T09:47:20
| 140,841,087
| 24
| 9
| null | 2023-09-14T16:04:51
| 2018-07-13T11:51:52
|
HTML
|
UTF-8
|
R
| false
| true
| 3,522
|
rd
|
NextBestDualEndpoint-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Rules-class.R
\docType{class}
\name{NextBestDualEndpoint-class}
\alias{NextBestDualEndpoint-class}
\alias{.NextBestDualEndpoint}
\alias{NextBestDualEndpoint}
\alias{.DefaultNextBestDualEndpoint}
\title{\code{NextBestDualEndpoint}}
\usage{
NextBestDualEndpoint(
target,
overdose,
max_overdose_prob,
target_relative = TRUE,
target_thresh = 0.01
)
.DefaultNextBestDualEndpoint()
}
\arguments{
\item{target}{(\code{numeric})\cr see slot definition.}
\item{overdose}{(\code{numeric})\cr see slot definition.}
\item{max_overdose_prob}{(\code{proportion})\cr see slot definition.}
\item{target_relative}{(\code{flag})\cr see slot definition.}
\item{target_thresh}{(\code{proportion})\cr see slot definition.}
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
\code{\link{NextBestDualEndpoint}} is the class for next best dose that is based on the
dual endpoint model.
}
\details{
Under this rule, at first admissible doses are found, which are those
with toxicity probability to fall in \code{overdose} category and being below
\code{max_overdose_prob}. Next, it picks (from the remaining admissible doses) the
one that maximizes the probability to be in the \code{target} biomarker range. By
default (\code{target_relative = TRUE}) the target is specified as relative to the
maximum biomarker level across the dose grid or relative to the \code{Emax}
parameter in case a parametric model was selected (i.e. \code{\link{DualEndpointBeta}},
\code{\link{DualEndpointEmax}}). However, if \code{target_relative = FALSE}, then the
absolute biomarker range can be used as a target.
}
\section{Slots}{
\describe{
\item{\code{target}}{(\code{numeric})\cr the biomarker target range that needs to be
reached. For example, the target range \eqn{(0.8, 1.0)} and
\code{target_relative = TRUE} means that we target a dose with at least
\eqn{80\%} of maximum biomarker level. As an other example,
\eqn{(0.5, 0.8)} would mean that we target a dose between \eqn{50\%} and
\eqn{80\%} of the maximum biomarker level.}
\item{\code{overdose}}{(\code{numeric})\cr the overdose toxicity interval (lower limit
excluded, upper limit included).}
\item{\code{max_overdose_prob}}{(\code{proportion})\cr maximum overdose probability that
is allowed.}
\item{\code{target_relative}}{(\code{flag})\cr is \code{target} specified as relative? If
\code{TRUE}, then the \code{target} is interpreted relative to the maximum, so it
must be a probability range. Otherwise, the \code{target} is interpreted as
absolute biomarker range.}
\item{\code{target_thresh}}{(\code{proportion})\cr a target probability threshold that
needs to be fulfilled before the target probability will be used for
deriving the next best dose (default to 0.01).}
}}
\note{
Typically, end users will not use the \code{.DefaultNextBestDualEndpoint()} function.
}
\examples{
# Target a dose achieving at least 0.9 of maximum biomarker level (efficacy)
# and with a probability below 0.25 that prob(DLT) > 0.35 (safety).
my_next_best <- NextBestDualEndpoint(
target = c(0.9, 1),
overdose = c(0.35, 1),
max_overdose_prob = 0.25
)
# Now, using absolute target on the natural biomarker scale.
my_next_best_absolute <- NextBestDualEndpoint(
target = c(200, 300),
overdose = c(0.35, 1),
max_overdose_prob = 0.25,
target_relative = FALSE
)
}
|
b02308ecb8651084438bc1ea796f991edc19bd3e
|
fd7a1da4ff3ea6730a858287df6e874328cb1787
|
/functions-to-know.R
|
479ae45b6eb3b17f046bf227eb5a6682264f8a01
|
[] |
no_license
|
vanbibn/LearningR
|
842571464f4a9faf780a1612ba88f49fcbd46d56
|
8ad03cbcb9b28331cf940d822771df23fc96239a
|
refs/heads/master
| 2020-12-22T02:44:15.225349
| 2020-09-14T03:24:52
| 2020-09-14T03:24:52
| 236,647,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,840
|
r
|
functions-to-know.R
|
# Functions to Know as Learning R #############################################
## Packages ===================================================================
strinstalled.packages() #check what packages are installed
library()
old.packages() #check what packages need to update
## Session Info and Help ======================================================
version
sessionInfo()
help() # shortcut is to just use ? at start of line
help.search("") # searches through R's documentation (?? is shortcut)
help(package = 'ggplot2')
browseVignettes('ggplot2')
str() #displays the internal structure of any R object
example() # runs code from Examples section of Help
vector('numeric', length = 10) #creates vector (0 is default value)
### Ways to create a matrix: --------------------------------------------------
m1 <- matrix(1:6, nrow = 2,ncol = 3) #filled collumn-wise
m2 <- 1:10
dim(m2) <- c(2,5)
x4 <- 1:3
y4 <- 10:12
cbind(x4,y4)
rbind(x4,y4)
### Reading tabular data -----------------------------------------------------
?read.table # read this help page --Memorize!- optimize how read large datasets
#Eg. optimize: comment.char, nrows, colClasses
# how much memory needed to read in = (#row * #col * 8bytes)/(2^30 bytes/GB)
#double to account for overhead in reading data in
# Know your: OS, memory size, 64/32bit, other apps open or users logged in)
## Workspace and Files =======================================================
getwd()
dir.create('testdir')
setwd("testdir")
file.create("mytest.R")
list.files()
file.exists('mytest.R')
# before running a program that loops through a series of files
# you will want to first check to see if each file exists
file.info('mytest.R') #use $ to grab spec field eg.| file.info("mytest.R")$mode
# construct file and directory paths that are independent of the operating sys
dir.create(file.path('testdir2','testdir3'), recursive = TRUE)
# It is helpful to save the initial settings before you began an analysis and
# go back to them at the end. -- This trick is often used within functions too.
# "Take nothing but results. Leave nothing but assumptions."
## Sequences of Numbers =======================================================
1:20
?`:` # `backtick` key is above Tab
seq(1,20)
seq(0,10,0.5)
my_seq <- seq(5,10, length.out = 30)
length(my_seq)
1:length(my_seq)
seq(along = my_seq)
seq_along(my_seq)
rep(0, times = 40)
rep(c(0,1,2), times = 10)
rep(c(0,1,2), each = 10)
## Vectors ====================================================================
num_vect <- c(0.5,55,-10,6)
tf <- num_vect < 1
#logicls
(3 > 5) & (4 == 4)
(TRUE == TRUE) | (TRUE == FALSE) # | = OR
((111 >= 111) | !(TRUE)) & ((4 + 1) == 5)
# character vectors
my_char <- c("My","name","is")
paste(my_char, collapse = " ")
my_name <- c(my_char, "Nathan")
paste(my_name, collapse = " ")
paste('Hello','world!', sep = ' ')
paste(1:3,c('X','Y','Z'), sep = "")
## Missing Values =============================================================
# NA is used to represent any value that is 'not available' or 'missing'
# NA is not a value, it is placeholder for a quantity that is not available.
# Therefore the logical expressions will not work as expected!!
# R represents TRUE as 1 and FALSE as 0 so...
# sum of a bunch of TRUEs and FALSEs = total number of TRUEs.
x <- c(44, NA, 5, NA)
x * 3
y <- rnorm(1000)
z <- rep(NA, 1000)
# take a random sample of numbers and NA's
my_data <- sample(c(y,z), 100)
my_na <- is.na(my_data)
sum(my_na) # counts num of TRUE's ie. # NA values
any(is.na(my_data))
# to quickly replace null values with average value of col
mtcars$mpg[is.na(mtcars$mpg)] <- mean(mtcars$mpg) # but no na in mtcars dataset
# use subsetting to remove missing values from my_data
my_data[!my_na]
# NaN stands for "not a number"
0/0
Inf - Inf
## Subsetting Vectors ========================================================
# to select a subset of elements use an 'index vector' in [] after vname
# 4 diff flavors of index vectors: logical, (+ or -)integers, character strings
x[1:10]
x[!is.na(x)]
x[!is.na(x) & x > 0]
x[c(3,5,7)]
x[-c(2,10)] # all except index 2 and index 10 == x[c(-2,-10)]
# create a vector with named elements
vect <- c(foo = 11, bar = 2, norf = NA)
vect2 <- c(11,2,NA)
names(vect2) <- c("foo", "bar", "norf") # names can be added after the fact
identical(vect,vect2)
vect2["bar"]
vect[c("foo", "bar")]
## Matrices and Data Frames ==================================================
my_vector <- 1:20
dim(my_vector)
length(my_vector)
dim(my_vector) <- c(4,5)
dim(my_vector)
attributes(my_vector)
class(my_vector)
my_data <- data.frame(patients, my_matrix)
cnames <- c("patient", "age", "weight", "bp", "rating", "test")
colnames(my_data) <- cnames
good <- complete.cases()
subset()
order()
## Dates and Times ===========================================================
# dates use the Date class
# Times use POSIXct and POSIXlt classes
strptime()
as.Date()
as.POSIXct()
as.POSIXlt()
## Logicals ===================================================================
identical()
isTRUE()
xor() # exclusive OR (T | T --> FALSE)
which()
any()
all()
## Loop Functions =============================================================
lapply()
apply()
mapply()
tapply()
do.call() # specify a function & a list (each elmt. of list is argument to the fn)
split()
## Debugging ===============================================================
# Questions to ask yourself when something goes wrong
# What was your input?
# How did you call the function?
# What were you expecting?
# Output messages or other results?
# What did you get?
# How does what you get differ from what you were expecting?
# Were your expectations correct in the first place?
# Can you reproduce the problem (exactly)?
|
05d5380a5d66e4b19cf0378fe8893cba02ab6463
|
4606b475270a56b0bba89599e7a57faac89fdc5a
|
/R/dust.R
|
6f4b09c8f77bfbddb93daf2839b87b5bd2d981c3
|
[] |
no_license
|
kateharborne/ProSpect
|
46785f548c15462c1543d348918ca20baf0447ca
|
c9a36c556ab1c97d92cba167d02ed5bf1555c710
|
refs/heads/master
| 2022-04-20T07:08:13.879769
| 2020-04-21T13:34:28
| 2020-04-21T13:34:28
| 257,155,864
| 0
| 0
| null | 2020-04-20T02:56:21
| 2020-04-20T02:56:21
| null |
UTF-8
|
R
| false
| false
| 5,431
|
r
|
dust.R
|
CF=function(wave, tau=0.3, pow=-0.7, pivot=5500){
return=exp(-tau*(wave/pivot)^(pow))
}
CF_birth=function(wave, tau=1.0, pow=-0.7, pivot=5500){
return=exp(-tau*(wave/pivot)^(pow))
}
CF_screen=function(wave, tau=0.3, pow=-0.7, pivot=5500){
return=exp(-tau*(wave/pivot)^(pow))
}
CF_birth_atten=function(wave, flux, tau=1.0, pow=-0.7, pivot=5500){
flux_atten=CF_birth(wave, tau=tau, pow=pow, pivot=pivot)*flux
unatten=sum(flux*c(0,diff(wave)))
atten=sum(flux_atten*c(0,diff(wave)))
total_atten=unatten-atten
return=list(flux=flux_atten, total_atten=total_atten, attenfrac=atten/unatten)
}
CF_screen_atten=function(wave, flux, tau=0.3, pow=-0.7, pivot=5500){
flux_atten=CF_screen(wave, tau=tau, pow=pow, pivot=pivot)*flux
unatten=sum(flux*c(0,diff(wave)))
atten=sum(flux_atten*c(0,diff(wave)))
total_atten=unatten-atten
return=list(flux=flux_atten, total_atten=total_atten, attenfrac=atten/unatten)
}
CF_atten=function(wave, flux, tau=0.3, pow=-0.7, pivot=5500){
flux_atten=CF_screen(wave, tau=tau, pow=pow, pivot=pivot)*flux
unatten=sum(flux*c(0,diff(wave)))
atten=sum(flux_atten*c(0,diff(wave)))
total_atten=unatten-atten
return=list(flux=flux_atten, total_atten=total_atten, attenfrac=atten/unatten)
}
.k_lambda=function(wave, beta=1.5){
return=wave^(-beta)/(850e4^(-beta))
}
blackbody=function(wave, Temp = 50, k850=0.077){
A = 4*pi*.msol_to_kg*k850/.lsol_to_w/1e10
return=A*cosplanckLawRadWave(wave/1e10, Temp=Temp)
}
blackbody_norm=function(wave, Temp = 50, z=0, norm=1){
output=rep(0,length(wave))
if(length(Temp)>1){
if(length(norm)==1){norm=rep(norm,length(Temp))}
}
for(i in 1:length(Temp)){
lims=cosplanckPeakWave(Temp=Temp[i])*c(1e-3,1e3)*1e10
scale=integrate(blackbody, lims[1], lims[2], Temp=Temp[i])$value
output=output+blackbody(wave=wave/(1+z), Temp=Temp[i])*norm[i]/scale
}
return=output
}
greybody=function(wave, Temp=50, beta=1.5, k850=0.077){
return=.k_lambda(wave=wave, beta=beta)*blackbody(wave=wave, Temp=Temp, k850=k850)
}
greybody_norm=function(wave, Temp = 50, beta=1.5, z=0, norm=1){
output=rep(0,length(wave))
if(length(Temp)>1){
if(length(beta)==1){beta=rep(beta,length(Temp))}
if(length(norm)==1){norm=rep(norm,length(Temp))}
}
for(i in 1:length(Temp)){
lims=cosplanckPeakWave(Temp=Temp[i])*c(1e-3,1e3)*1e10
scale=integrate(greybody, lims[1], lims[2], Temp=Temp[i], beta=beta[i])$value
output=output+greybody(wave=wave/(1+z), Temp=Temp[i], beta=beta[i])*norm[i]/scale
}
return=output
}
Dale_interp=function(alpha_SF=1.5, AGNfrac=0, type='NormTot', Dale=NULL){
if(type=='Orig'){
if(is.null(Dale)){
Dale_Orig=NULL
data('Dale_Orig', envir = environment())
Dale=Dale_Orig
}
}
if(type=='Msol'){
if(is.null(Dale)){
Dale_Msol=NULL
data('Dale_Msol', envir = environment())
Dale=Dale_Msol
}
}
if(type=='NormTot'){
if(is.null(Dale)){
Dale_NormTot=NULL
data('Dale_NormTot', envir = environment())
Dale=Dale_NormTot
}
}
if(type=='NormAGN'){
Dale_NormAGN=NULL
data('Dale_NormAGN', envir = environment())
Dale=Dale_NormAGN
}
if(type=='NormSFR'){
if(is.null(Dale)){
Dale_NormSFR=NULL
data('Dale_NormSFR', envir = environment())
Dale=Dale_NormSFR
}
}
if(AGNfrac>0 & AGNfrac<1){
AGNinterp=interp_param(x=AGNfrac, Dale$AGNfrac)
}
if(AGNfrac<1){
SFinterp=interp_param(x=alpha_SF, Dale$alpha_SF)
}
if(AGNfrac==0){
output=rep(0,1496)
output=output+Dale$Aspec[[1]][SFinterp$ID_lo,]*SFinterp$weight_lo
output=output+Dale$Aspec[[1]][SFinterp$ID_hi,]*SFinterp$weight_hi
return(invisible(data.frame(Wave=Dale$Wave, Aspec=output)))
}
if(AGNfrac>0 & AGNfrac<1){
output=rep(0,1496)
output=output+Dale$Aspec[[AGNinterp$ID_lo]][SFinterp$ID_lo,]*AGNinterp$weight_lo*SFinterp$weight_lo
output=output+Dale$Aspec[[AGNinterp$ID_lo]][SFinterp$ID_hi,]*AGNinterp$weight_lo*SFinterp$weight_hi
output=output+Dale$Aspec[[AGNinterp$ID_hi]][SFinterp$ID_lo,]*AGNinterp$weight_hi*SFinterp$weight_lo
output=output+Dale$Aspec[[AGNinterp$ID_hi]][SFinterp$ID_hi,]*AGNinterp$weight_hi*SFinterp$weight_hi
return(invisible(data.frame(Wave=Dale$Wave, Aspec=output)))
}
if(AGNfrac==1){
return(invisible(data.frame(Wave=Dale$Wave, Aspec=Dale$Aspec[[21]][1,])))
}
}
Dale_scale=function(alpha_SF=1.5, AGNfrac=0.5, Dale_in){
if(missing(Dale_in)){
Dale_NormTot=NULL
data('Dale_NormTot', envir = environment())
Dale_in=Dale_interp(alpha_SF=alpha_SF, AGNfrac=0, Dale=Dale_NormTot)
}
tempapproxSF=approxfun(Dale_in$Wave/1e4, Dale_in$Aspec)
tempSFint=integrate(tempapproxSF, lower=5, upper=20)$value
tempAGNint=3.39296e-05 #This is always the same, by definition
AGNscale=tempSFint/(tempAGNint+tempSFint)
NormScale=(AGNfrac*AGNscale+(1-AGNfrac)*(1-AGNscale))
AGNfrac=(AGNfrac*AGNscale)/NormScale
return(c(Dustfrac_bol=1-AGNfrac, AGNfrac_bol=AGNfrac))
}
dustmass=function(wave_star, lum_star_nodust, lum_star_dust, wave_dust, lum_dust){
DustLum=sum(c(0,diff(wave_star))*(lum_star_nodust-lum_star_dust))
#total_atten=sum(c(0,diff(wave_star))*(flux_star_nodust-flux_star_dust))
#DustLum=total_atten/Lum2FluxFactor(z=z, H0=H0, OmegaM=OmegaM, OmegaL=OmegaL)
LtoM=sum(c(0,diff(wave_dust))*lum_dust, na.rm=TRUE)
DustMass=DustLum/LtoM
return=c(DustMass=DustMass, DustLum=DustLum, M2L=1/LtoM)
}
|
6a448e26d2d0d903f305c0eeb83d065f3f7b5049
|
804dd2645a1b53fefdd4dd8e4674512448c881da
|
/R/mcmc_sampler.R
|
687ebcc8815603a21f5ecd8badf6dea928335f4e
|
[] |
no_license
|
drkowal/fosr
|
03ddb6fae2c7dcefa01286f535d9b0e8e5b5a03c
|
ab225ff3d8ccb4a2b410880dd89b2447e45400f7
|
refs/heads/master
| 2021-06-26T07:14:29.714398
| 2020-09-26T03:05:27
| 2020-09-26T03:05:27
| 129,114,497
| 3
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,964
|
r
|
mcmc_sampler.R
|
#' @useDynLib fosr
#' @importFrom Rcpp sourceCpp
NULL
#' MCMC Sampling Algorithm for the Function-on-Scalars Regression Model
#'
#' Runs the MCMC for the function-on-scalars regression model based on
#' an FDLM-type expansion. Here we assume the factor regression has independent errors,
#' which allows for subject-specific random effects,
#' as well as some additional default conditions.
#'
#' @param Y the \code{n x m} data observation matrix, where \code{n} is the number of subjects and \code{m} is the number of observation points (\code{NA}s allowed)
#' @param tau the \code{m x d} matrix of coordinates of observation points
#' @param X the \code{n x p} matrix of predictors; if NULL, only include an intercept
#' @param K the number of factors; if NULL, use SVD-based proportion of variability explained
#' @param nsave number of MCMC iterations to record
#' @param nburn number of MCMC iterations to discard (burin-in)
#' @param nskip number of MCMC iterations to skip between saving iterations,
#' i.e., save every (nskip + 1)th draw
#' @param mcmc_params named list of parameters for which we store the MCMC output;
#' must be one or more of
#' \itemize{
#' \item "beta" (factors)
#' \item "fk" (loading curves)
#' \item "alpha" (regression coefficients)
#' \item "sigma_e" (observation error SD)
#' \item "sigma_g" (random effects SD)
#' \item "Yhat" (fitted values)
#' \item "trsigma" (the sum of trace Sigma_i)
#' \item "sigma_delta_k" (random effects gamma specific SD)
#' }
#' @param computeDIC logical; if TRUE, compute the deviance information criterion \code{DIC}
#' and the effective number of parameters \code{p_d}
#' @return A named list of the \code{nsave} MCMC samples for the parameters named in \code{mcmc_params}
#'
#' @note If \code{nm} is large, then storing all posterior samples for \code{Yhat}, which is \code{nsave x n x M}, may be inefficient
#'
#' @examples
#' # Simulate some data:
#' sim_data = simulate_fosr(n = 100, m = 20, p_0 = 100, p_1 = 5)
#'
#' # Data:
#' Y = sim_data$Y; X = sim_data$X; tau = sim_data$tau
#'
#' # Dimensions:
#' n = nrow(Y); m = ncol(Y); p = ncol(X)
#'
#' # Run the FOSR:
#' out = fosr(Y = Y, tau = tau, X = X, K = 6, mcmc_params = list("fk", "alpha", "Yhat"))
#'
#' # Plot a posterior summary of a regression function, say j = 3:
#' j = 3; post_alpha_tilde_j = get_post_alpha_tilde(out$fk, out$alpha[,j,])
#' plot_curve(post_alpha_tilde_j, tau = tau)
#' # Add the true curve:
#' lines(tau, sim_data$alpha_tilde_true[,j], lwd=6, col='green', lty=6)
#'
#' # Plot the loading curves:
#' plot_flc(out$fk, tau = tau)
#'
#' # Plot the fitted values for a random subject:
#' i = sample(1:n, 1)
#' plot_fitted(y = Y[i,], mu = colMeans(out$Yhat[,i,]),
#' postY = out$Yhat[,i,], y_true = sim_data$Y_true[i,], t01 = tau)
#'
#' @import truncdist
#' @export
fosr = function(Y, tau, X = NULL, K = NULL,
nsave = 1000, nburn = 1000, nskip = 3,
mcmc_params = list("beta", "fk", "alpha", "sigma_e", "sigma_g", "trsigma"),
computeDIC = TRUE){
# Some options (for now):
sample_nu = TRUE # Sample DF parameter, or fix at nu=3?
sample_a1a2 = TRUE # Sample a1, a2, or fix at a1=2, a2=3?
#----------------------------------------------------------------------------
# Assume that we've done checks elsewhere
#----------------------------------------------------------------------------
# Convert tau to matrix, if necessary:
tau = as.matrix(tau)
# Compute the dimensions:
n = nrow(Y); m = ncol(Y); d = ncol(tau)
# Rescale observation points to [0,1]
tau01 = apply(tau, 2, function(x) (x - min(x))/(max(x) - min(x)))
#----------------------------------------------------------------------------
# Initialize the main terms:
# Initialize the FLC coefficients and factors:
inits = fdlm_init_d(Y, tau, K); Beta = inits$Beta; Psi = inits$Psi; splineInfo = inits$splineInfo
K = ncol(Beta) # to be sure we have the right value
# Also use the imputed data values here for initialization:
Yna = Y # The original data, including NAs
any.missing = any(is.na(Yna)) # Any missing obs?
if(any.missing){na.ind = which(is.na(Yna), arr.ind = TRUE); Y = inits$Y0}
BtY = tcrossprod(t(splineInfo$Bmat), Y)
# FLC matrix:
Fmat = splineInfo$Bmat%*%Psi
# Initialize the conditional expectation:
Yhat = tcrossprod(Beta, Fmat)
# Initialize the (time-dependent) observation error SD:
sigma_e = sd(Y - Yhat, na.rm=TRUE); sigma_et = rep(sigma_e, n)
# Initialize the FLC smoothing parameters (conditional MLE):
tau_f_k = apply(Psi, 2, function(x) (ncol(splineInfo$Bmat) - (d+1))/crossprod(x, splineInfo$Omega)%*%x)
#----------------------------------------------------------------------------
# Predictors:
if(!is.null(X)){
# Assuming we have some predictors:
X = as.matrix(X)
# Remove any predictors which are constants/intercepts:
const.pred = apply(X, 2, function(x) all(diff(x) == 0))
if(any(const.pred)) X = as.matrix(X[,!const.pred])
# Center and scale the (non-constant) predictors:
# Note: may not be appropriate for intervention effects!
#X = scale(X)
}
# Include an intercept:
X = cbind(rep(1, n), X); #colnames(X)[1] = paste(intercept_model, "-Intercept", sep='')
# Number of predictors (including the intercept)
p = ncol(X)
#----------------------------------------------------------------------------
# Initialize the regression terms (and the mean term)
alpha_pk = matrix(0, nrow = p, ncol = K) # Regression coefficients
gamma_ik = matrix(0, nrow = n, ncol = K) # Residuals
# Initialize the regression coefficients via sampling (p >= n) or OLS (p < n)
for(k in 1:K) {
if(p >= n){
alpha_pk[,k] = sampleFastGaussian(Phi = X/sigma_et,
Ddiag = rep(.01*sigma_e^2, p),
alpha = tcrossprod(Y, t(Fmat[,k]))/sigma_e)
} else alpha_pk[,k] = lm(Beta[,k] ~ X - 1)$coef
# Residuals:
gamma_ik[,k] = Beta[,k] - X%*%alpha_pk[,k]
}
# Intercept term:
mu_k = as.matrix(alpha_pk[1,])
# SD term for mu_k:
a1_mu = 2; a2_mu = 3
delta_mu_k = sampleMGP(matrix(mu_k, ncol = K), rep(1,K), a1 = a1_mu, a2 = a2_mu)
sigma_mu_k = 1/sqrt(cumprod(delta_mu_k))
#----------------------------------------------------------------------------
# Initialize the corresponding SD term(s):
xi_gamma_ik = 1/gamma_ik^2; # Precision scale
nu = 3 # (initial) degrees of freedom
# MGP term:
a1_gamma = 2; a2_gamma = 3;
delta_gamma_k = rep(1,K); sigma_delta_k = 1/sqrt(cumprod(delta_gamma_k))
# Update the error SD for gamma:
sigma_gamma_ik = rep(sigma_delta_k, each = n)/sqrt(xi_gamma_ik)
#----------------------------------------------------------------------------
if(p > 1){
omega = matrix(alpha_pk[-1,], nrow = p-1) # Not the intercept
# predictor p, factor k:
sigma_omega_pk = abs(omega)
xi_omega_pk = matrix(1, nrow = p-1, ncol = K) # PX term
# predictor p:
lambda_omega_p = rowMeans(sigma_omega_pk)
xi_omega_p = rep(1, (p-1)) # PX term
# global:
lambda_omega_0 = mean(lambda_omega_p)
xi_omega_0 = 1 # PX term
}
#----------------------------------------------------------------------------
# Store the MCMC output in separate arrays (better computation times)
mcmc_output = vector('list', length(mcmc_params)); names(mcmc_output) = mcmc_params
if(!is.na(match('beta', mcmc_params))) post.beta = array(NA, c(nsave, n, K))
if(!is.na(match('fk', mcmc_params))) post.fk = array(NA, c(nsave, m, K))
if(!is.na(match('alpha', mcmc_params))) post.alpha = array(NA, c(nsave, p, K))
if(!is.na(match('sigma_e', mcmc_params)) || computeDIC) post.sigma_e = array(NA, c(nsave, 1))
if(!is.na(match('sigma_g', mcmc_params))) post.sigma_g = array(NA, c(nsave, n, K))
if(!is.na(match('Yhat', mcmc_params)) || computeDIC) post.Yhat = array(NA, c(nsave, n, m))
if(!is.na(match('trsigma', mcmc_params))) post.trsigma = array(NA, c(nsave))
if(!is.na(match('sigma_delta_k', mcmc_params))) post.sigma_delta_k = array(NA, c(nsave, K))
if(!is.na(match('nu', mcmc_params))) post.nu = array(NA, c(nsave))
if(computeDIC) post_loglike = numeric(nsave)
# Total number of MCMC simulations:
nstot = nburn+(nskip+1)*(nsave)
skipcount = 0; isave = 0 # For counting
# Run the MCMC:
timer0 = proc.time()[3] # For timing the sampler
for(nsi in 1:nstot){
#----------------------------------------------------------------------------
# Step 1: Impute the data, Y:
#----------------------------------------------------------------------------
if(any.missing){
Y[na.ind] = Yhat[na.ind] + sigma_et[na.ind[,1]]*rnorm(nrow(na.ind))
BtY = tcrossprod(t(splineInfo$Bmat), Y)
}
#----------------------------------------------------------------------------
# Step 2: Sample the FLCs
#----------------------------------------------------------------------------
# Sample the FLCs
Psi = fdlm_flc(BtY = BtY,
Beta = Beta,
Psi = Psi,
BtB = splineInfo$BtB, #diag(nrow(BtY)),
Omega = splineInfo$Omega,
lambda = tau_f_k,
sigmat2 = sigma_et^2)
# And update the loading curves:
Fmat = splineInfo$Bmat%*%Psi;
# Sample the smoothing parameters:
#tau_f_k = sample_lambda(tau_f_k, Psi, Omega = splineInfo$Omega, d = d, uniformPrior = TRUE, orderLambdas = TRUE)
tau_f_k = sample_lambda(tau_f_k, Psi, Omega = splineInfo$Omega, d = d, uniformPrior = TRUE, orderLambdas = FALSE)
#----------------------------------------------------------------------------
# Step 3: Sample the regression coefficients (and therefore the factors)
#----------------------------------------------------------------------------
# Pseudo-response and pseudo-variance:
Y_tilde = crossprod(BtY, Psi); sigma_tilde = sigma_et
# Draw Separately for each k:
for(k in 1:K){
# Marginalize over gamma_{tk} to sample {alpha_pk}_p for fixed k:
y_tilde_k = Y_tilde[,k]; sigma_tilde_k = sqrt(sigma_tilde^2 + sigma_gamma_ik[,k]^2)
if(p >= n){
# Fast sampler for p >= n (BHATTACHARYA et al., 2016)
alpha_pk[,k] = sampleFastGaussian(Phi = X/sigma_tilde_k,
Ddiag = as.numeric(c(sigma_mu_k[k],sigma_omega_pk[,k])^2),
alpha = y_tilde_k/sigma_tilde_k)
} else {
# Fast sampler for p < n (Rue, 2001?)
if(p > 1){
chQ_k = chol(crossprod(X/sigma_tilde_k) + diag(as.numeric(1/c(sigma_mu_k[k],sigma_omega_pk[,k])^2)))
} else chQ_k = chol(crossprod(X/sigma_tilde_k) + diag(as.numeric(1/c(sigma_mu_k[k])^2), p))
ell_k = crossprod(X, y_tilde_k/sigma_tilde_k^2)
alpha_pk[,k] = backsolve(chQ_k, forwardsolve(t(chQ_k), ell_k) + rnorm(p))
}
}
# And sample the errors gamma_ik:
postSD = 1/sqrt(rep(1/sigma_tilde^2, times = K) + matrix(1/sigma_gamma_ik^2))
postMean = matrix((Y_tilde - X%*%alpha_pk)/rep(sigma_tilde^2, times = K))*postSD^2
gamma_ik = matrix(rnorm(n = n*K, mean = postMean, sd = postSD), nrow = n)
# Update the factors:
Beta = X%*%alpha_pk + gamma_ik
# And the fitted curves:
Yhat = tcrossprod(Beta, Fmat)
#----------------------------------------------------------------------------
# Step 4: Sample the observation error variance
#----------------------------------------------------------------------------
# Or use uniform prior?
sigma_e = 1/sqrt(rgamma(n = 1, shape = sum(!is.na(Y))/2, rate = sum((Y - Yhat)^2, na.rm=TRUE)/2))
sigma_et = rep(sigma_e, n)
#----------------------------------------------------------------------------
# Step 5: Sample the intercept/gamma parameters (Note: could use ASIS)
#----------------------------------------------------------------------------
mu_k = alpha_pk[1,]
# Prior variance: MGP
# Mean Part
delta_mu_k = sampleMGP(theta.jh = matrix(mu_k, ncol = K),
delta.h = delta_mu_k,
a1 = a1_mu, a2 = a2_mu)
sigma_mu_k = 1/sqrt(cumprod(delta_mu_k))
# And hyperparameters:
if(sample_a1a2){
a1_mu = uni.slice(a1_mu, g = function(a){
dgamma(delta_mu_k[1], shape = a, rate = 1, log = TRUE) +
dgamma(a, shape = 2, rate = 1, log = TRUE)}, lower = 0, upper = Inf)
a2_mu = uni.slice(a2_mu,g = function(a){
sum(dgamma(delta_mu_k[-1], shape = a, rate = 1, log = TRUE)) +
dgamma(a, shape = 2, rate = 1, log = TRUE)},lower = 0, upper = Inf)
}
# Variance part:
# Standardize, then reconstruct as matrix of size n x K:
delta_gamma_k = sampleMGP(theta.jh = matrix(gamma_ik*sqrt(xi_gamma_ik), ncol = K),
delta.h = delta_gamma_k,
a1 = a1_gamma, a2 = a2_gamma)
sigma_delta_k = 1/sqrt(cumprod(delta_gamma_k))
# And hyperparameters:
if(sample_a1a2){
a1_gamma = uni.slice(a1_gamma, g = function(a){
dgamma(delta_gamma_k[1], shape = a, rate = 1, log = TRUE) +
dgamma(a, shape = 2, rate = 1, log = TRUE)}, lower = 0, upper = Inf)
a2_gamma = uni.slice(a2_gamma, g = function(a){
sum(dgamma(delta_gamma_k[-1], shape = a, rate = 1, log = TRUE)) +
dgamma(a, shape = 2, rate = 1, log = TRUE)},lower = 0, upper = Inf)
}
# Sample the corresponding prior variance term(s):
xi_gamma_ik = matrix(rgamma(n = n*K,
shape = nu/2 + 1/2,
rate = nu/2 + (gamma_ik/rep(sigma_delta_k, each = n))^2/2), nrow = n)
# Sample degrees of freedom?
if(sample_nu){
nu = uni.slice(nu, g = function(nu){
sum(dgamma(xi_gamma_ik, shape = nu/2, rate = nu/2, log = TRUE)) +
dunif(nu, min = 2, max = 128, log = TRUE)}, lower = 2, upper = 128)
}
# Update the error SD for gamma:
sigma_gamma_ik = rep(sigma_delta_k, each = n)/sqrt(xi_gamma_ik)
#----------------------------------------------------------------------------
# Step 6: Sample the non-intercept parameters:
#----------------------------------------------------------------------------
# Non-intercept term:
if(p > 1){
omega = matrix(alpha_pk[-1,], nrow = p-1) # Not the intercept
#----------------------------------------------------------------------------
# predictor p, factor k:
omega2 = omega^2; omega2 = omega2 + (omega2 < 10^-16)*10^-8
sigma_omega_pk = matrix(1/sqrt(rgamma(n = (p-1)*K,
shape = 1/2 + 1/2,
rate = xi_omega_pk + omega2/2)), nrow = p-1)
xi_omega_pk = matrix(rgamma(n = (p-1)*K,
shape = 1/2 + 1/2,
rate = rep(1/lambda_omega_p^2, times = K) + 1/sigma_omega_pk^2), nrow = p-1)
#----------------------------------------------------------------------------
# predictor p:
lambda_omega_p = 1/sqrt(rgamma(n = p-1,
shape = 1/2 + K/2,
rate = xi_omega_p + rowSums(xi_omega_pk)))
xi_omega_p = rgamma(n = p-1,
shape = 1/2 + 1/2,
rate = rep(1/lambda_omega_0^2, p-1) + 1/lambda_omega_p^2)
#----------------------------------------------------------------------------
# global:
lambda_omega_0 = 1/sqrt(rgamma(n = 1,
shape = 1/2 + (p-1)/2,
rate = xi_omega_0 + sum(xi_omega_p)))
xi_omega_0 = rgamma(n = 1,
shape = 1/2 + 1/2,
rate = 1 + 1/lambda_omega_0^2)
}
#----------------------------------------------------------------------------
# Step 7: Adjust the ordering
#----------------------------------------------------------------------------
#if(nsi == 10 && K > 1){adjOrder = order(tau_f_k, decreasing = TRUE); tau_f_k = tau_f_k[adjOrder]; Psi = Psi[,adjOrder]; Beta = as.matrix(Beta[,adjOrder])}
# Store the MCMC output:
if(nsi > nburn){
# Increment the skip counter:
skipcount = skipcount + 1
# Save the iteration:
if(skipcount > nskip){
# Increment the save index
isave = isave + 1
# Save the MCMC samples:
if(!is.na(match('beta', mcmc_params))) post.beta[isave,,] = Beta
if(!is.na(match('fk', mcmc_params))) post.fk[isave,,] = Fmat
if(!is.na(match('alpha', mcmc_params))) post.alpha[isave,,] = alpha_pk
if(!is.na(match('sigma_e', mcmc_params)) || computeDIC) post.sigma_e[isave,] = sigma_e
if(!is.na(match('sigma_g', mcmc_params))) post.sigma_g[isave,,] = sigma_gamma_ik
if(!is.na(match('Yhat', mcmc_params)) || computeDIC) post.Yhat[isave,,] = Yhat # + sigma_e*rnorm(length(Y))
if(!is.na(match('trsigma', mcmc_params))) post.trsigma[isave] = n*m*sigma_e^2 + sum(sigma_gamma_ik^2)
if(!is.na(match('sigma_delta_k', mcmc_params))) post.sigma_delta_k[isave,] = sigma_delta_k
if(!is.na(match('nu', mcmc_params))) post.nu[isave] = nu
if(computeDIC) post_loglike[isave] = sum(dnorm(matrix(Yna), mean = matrix(Yhat), sd = rep(sigma_et,m), log = TRUE), na.rm = TRUE)
# And reset the skip counter:
skipcount = 0
}
}
computeTimeRemaining(nsi, timer0, nstot, nrep = 1000)
}
if(!is.na(match('beta', mcmc_params))) mcmc_output$beta = post.beta
if(!is.na(match('fk', mcmc_params))) mcmc_output$fk = post.fk
if(!is.na(match('alpha', mcmc_params))) mcmc_output$alpha = post.alpha
if(!is.na(match('sigma_e', mcmc_params))) mcmc_output$sigma_e = post.sigma_e
if(!is.na(match('sigma_g', mcmc_params))) mcmc_output$sigma_g = post.sigma_g
if(!is.na(match('Yhat', mcmc_params))) mcmc_output$Yhat = post.Yhat
if(!is.na(match('trsigma', mcmc_params))) mcmc_output$trsigma = post.trsigma
if(!is.na(match('sigma_delta_k', mcmc_params))) mcmc_output$sigma_delta_k = post.sigma_delta_k
if(!is.na(match('nu', mcmc_params))) mcmc_output$nu = post.nu
if(computeDIC){
# Log-likelihood evaluated at posterior means:
loglike_hat = sum(dnorm(matrix(Yna),
mean = matrix(colMeans(post.Yhat)),
sd = rep(colMeans(post.sigma_e), m*n),
log = TRUE), na.rm=TRUE)
# Effective number of parameters (Note: two options)
p_d = c(2*(loglike_hat - mean(post_loglike)),
2*var(post_loglike))
# DIC:
DIC = -2*loglike_hat + 2*p_d
# Store the DIC and the effective number of parameters (p_d)
mcmc_output$DIC = DIC; mcmc_output$p_d = p_d
}
print(paste('Total time: ', round((proc.time()[3] - timer0)), 'seconds'))
return (mcmc_output);
}
|
ddb7ce3fe5cac3efb5fbcb1f10c8cc42dac44a2f
|
6a3e9b4cded7752b4949ac4281b3ad6850b9e6b3
|
/2019-11-20 New Zealand birds/nz_bird.R
|
ee766d84ae6164aa7b0b1879e115ea3884fa574b
|
[] |
no_license
|
GilHenriques/TidyTuesdays
|
cdffc1256532d01bb7b8f664fccd606a7db4393c
|
9431c7b438838bd781461c1bacf34a4bfc627748
|
refs/heads/master
| 2020-07-12T09:28:08.366365
| 2020-07-01T20:18:31
| 2020-07-01T20:18:31
| 204,779,080
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,701
|
r
|
nz_bird.R
|
# 2019-11-19 TidyTuesday
# Gil J.B. Henriques
library(tidyverse)
# Read data ---------------------------------------------------------------
nz_bird <- read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-11-19/nz_bird.csv") %>%
groupdata2::group(n = 5, method = "greedy") %>% # add voter id
select(vote_rank, bird_breed) %>%
separate(vote_rank, c(NA, "rank"))
# Simulate voting process and save each round's results -------------------
election <- tibble(bird_breed = NA, percent = NA, round = NA)
majority <- FALSE; round <- 1;
while(!majority){
round_df <- nz_bird %>%
group_by(.groups) %>%
filter(rank == min(rank)) %>% # count each voter's highest preference
ungroup %>%
group_by(bird_breed) %>%
summarize(sum = n()) %>%
ungroup %>%
mutate(percent = sum/sum(sum), round = round) %>%
select(bird_breed, percent, round)
election <- bind_rows(election, round_df)
majority <- (round_df %>% pull(percent) %>% max) >= 0.5
if(!majority) { # if no majority, remove lowest voted bird
bird_to_remove <- round_df %>% filter(percent == min(percent)) %>% pull(bird_breed)
nz_bird <- nz_bird %>% filter(!(bird_breed %in% bird_to_remove))
}
round <- round + 1
print(round) # to keep track of progress
}
# Plot --------------------------------------------------------------------
# Since I can't use nudge_x together with vjust in geom_text, I need to "fake" nudging by
# adding blank spaces at the end of the labels
election <- election %>%
mutate(bird_space = paste0(bird_breed, " "))
# I will highlight top candidates
top_birds <- election %>% filter(round > 75) %>% pull(bird_breed) %>% unique()
election %>%
ggplot(aes(x = round, y = percent, fill = bird_breed)) +
# Most species will appear in grey
geom_col(position = "stack", color = "white", fill = "gray95", size = 0.1) +
# Top species will be colorful
geom_col(data = filter(election, bird_breed %in% top_birds), position = "stack", color = "white", size = 0.1) +
geom_segment(x = 0, xend = 82, y = 0.5, yend = 0.5, lty = 2, inherit.aes = F) + # Add a line at 50%
scale_fill_brewer(palette = "Set2") +
scale_color_brewer(palette = "Set2") + # Color is needed for text labels
theme_minimal() +
theme(legend.position = "none", legend.title = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.text.y = element_text(margin = margin(r = 0))) +
scale_y_continuous(labels = scales::percent, position = "right",
expand = expand_scale(add = c(0.001, 0.05))) +
xlab("Instant runoff round") + ylab("Percent of vote") +
geom_text(data = filter(election, bird_breed %in% top_birds & round == 1), aes(label = bird_space, color = bird_breed), position = position_stack(vjust = 0.5), hjust = 1, size = 3.5) +
annotate(geom="text",x = 1, y = 0.65, label = "Other species ", hjust = 1, size = 3.5, color = "grey") +
labs(title = "The Yellow-eyed penguin is New Zealand's 2019 Bird of the Year",
subtitle = "Voting was based on the instant runoff system: the first preferences of all the votes cast are tallied in a\nfirst round of counting. If no bird has more than half of the votes, new rounds of counting are held until one\nbird has a majority. Columns in the figure show species vote shares per round.",
caption = "Visualization: @_Gil_Henriques for #TidyTuesday. Data: New Zealand Forest & Bird.") +
scale_x_discrete(expand = expand_scale(add = c(20,0)), breaks = seq(0,80,20), labels = seq(0,80,20), limits = seq(0,80,20))
# Save plot
ggsave("nz_birds.pdf", width = 7.5, height = 5)
|
58e71bc3b46e2870f6f8d7804c41837de8130102
|
e5c43a31a082bbfec5ebbc20b34d373896721579
|
/R/functions/export_k_rasters.R
|
a7d99765fb3a1d6aeae13217371f450c0f3e4710
|
[] |
no_license
|
geryan/rfst
|
3dde3a499651f3a1ccc736f8c6597c5972f0e17c
|
0aac1f0c3b17096af0c5b0b06e1ad80ac6d709ca
|
refs/heads/master
| 2023-05-02T12:32:51.743467
| 2021-04-27T01:26:47
| 2021-04-27T01:26:47
| 164,573,310
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,004
|
r
|
export_k_rasters.R
|
require(foreach)
export_k_rasters <- function(simulation_result, initial_k, period = 10, summary_function = mean) {
# get the simulation timespan
timespan <- length(simulation_result[[1]])
# divide the timespan into groups based on period
epochs <- split(1:timespan, ceiling(seq_along(1:timespan) / period))
# get non-NA cell indices
idx <- which(!is.na(simulation_result[[1]][[1]][[3]][]))
# loop over list slots and summarise data (could be parallelised to increase speed)
k_summaries <- foreach(e = 1:length(epochs)) %do% {
# get k in list of vectors
k <- lapply(epochs[[e]], FUN = function(x) simulation_result[[1]][[x]][[3]][idx])
# covert list to matrix
k <- do.call(cbind, k)
# get means of all rows
mat_calc <- apply(k, 1, summary_function)
# create shell raster
k_summary <- simulation_result[[1]][[1]][[3]]
# assign new values
k_summary[idx] <- round(mat_calc)
# return summary
k_summary
}
return(c(initial_k, k_summaries))
}
|
d79d65ee698b5034f1e70c3c7b31053dad456b61
|
8b7b4a096cabade05415edc75f8a5adce62b1576
|
/tests/testthat/helper.R
|
9e723d8532ff47727e2f26a2b370c1f39d674191
|
[
"MIT"
] |
permissive
|
2DegreesInvesting/r2dii.plot.static
|
0350b9fe0952be30d14bf8868ba3816adde6f63b
|
fcb89716c1668774cda5f0bf1ba10af9a0443057
|
refs/heads/master
| 2023-05-05T22:37:08.046611
| 2021-05-28T15:03:22
| 2021-05-28T15:03:22
| 327,862,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,223
|
r
|
helper.R
|
expect_no_error <- function(...) {
expect_error(..., NA)
}
fake_example_data <- function(sector = "automotive",
technology = "electric",
year = 2020L,
region = "global",
scenario_source = "demo_2020",
metric = "projected",
production = 1,
technology_share = 0.2,
...) {
tibble(
sector = sector,
technology = technology,
year = year,
region = region,
scenario_source = scenario_source,
metric = metric,
production = production,
technology_share = technology_share,
...
)
}
fake_lines_specs <- function(...) {
tibble(
line_name = c(
"projected",
"corporate_economy",
"target_demo",
"adjusted_scenario_demo"
),
# Not DRYing (see r2dii.plot.static/pull/91#pullrequestreview-639054150)
label = c(
"Projected",
"Corporate Economy",
"Target Demo",
"Adjusted Scenario Demo"
),
...
)
}
r_version_is_older_than <- function(major) {
as.integer(R.version$major) < major
}
|
a750e50bd091f9eb539b7acbc22ca47f84bf0a26
|
0301b1e7f15226b667b7650b44a32d5edea0977f
|
/cachematrix.R
|
8a9efc13286d4f3dc977de0077a56ac1e51da19b
|
[] |
no_license
|
iev1977/RProgAssgnmntW3
|
0a03a6407fc08c78912a75ecf9e488b73d1c043e
|
08fb222d58a5f31da9467c61c4ac6892518b491f
|
refs/heads/master
| 2021-01-10T10:18:03.349474
| 2016-04-01T21:11:36
| 2016-04-01T21:11:36
| 55,262,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,937
|
r
|
cachematrix.R
|
## makeCacheMatrix creates a special "matrix" object that can cache its inverse.
## cacheSolve computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
## Input: x - NxN matrix must be invertible
makeCacheMatrix <- function(x = matrix()) {
m <- NULL ## m - this is where inverse matrix stored, empty at the start
set <- function(y) {
x <<- y
m <<- NULL ##clears cache
}
get <- function() x
setinv <- function(inv) m <<- inv #
getinv <- function() m
list(set = set, get = get,
setinv = setinv, getinv = getinv) #returns 'special' matrix
##set - sets the matrix x to the given value y
##get - returns the matrix x
##setinv - sets the value of m in the cache to the given value
##getinv - returns the value of m from the cache
}
## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
##Check if inverse is stored in the cache
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
##If cache is empty, calculate the inverse
data <- x$get() ##assigne matrix with the data to data
m <- solve(data, ...) ##invert matrix
x$setinv(m)
m
}
|
bda2e354109a45b7f6dc687997153328047fb2c8
|
785264b5af06dfe69e44abd1c147420e240c4ab0
|
/rchive/oldplotscripts/sub_plot_10-16.R
|
773599b22fc84921f2cc4b8d63387ebf83452cf0
|
[] |
no_license
|
yash1223/limiting_advr
|
28665c1725ff56949a494f01bc939ae414274874
|
9747628413129c6fcae2ce3c7a27b4bbab8cf8d1
|
refs/heads/master
| 2020-04-10T23:02:26.147757
| 2018-12-11T14:12:53
| 2018-12-11T14:12:53
| 161,339,910
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,073
|
r
|
sub_plot_10-16.R
|
source("sub_EDA_preproc.R")
temp <- plotdf
target <- "fig"
#10-16 plots
temp <- temp %>%
subset(beta_fix_all != 0) %>%
subset(beta_fix_all < 4) %>%
subset(beta_fix_all > -4) %>%
group_by(Country) %>% as.data.frame()
pll <- plot_general(data = temp, x=demand_avg, y=beta_fix_all, country=Country, region=Region)
target_stub <- file.path(target, "Beta_v_avg_fixall")
png(paste0(target_stub, ".png"), height = 480, width = 1000)
for(out in pll) print(out); dev.off()
#vary intercept fix slope
temp <- temp %>%
group_by(Country) %>% as.data.frame()
pll <- plot_general(data = temp, x=demand_avg, y=betac, country=Country, region=Region)
target_stub <- file.path(target, "Beta_v_avg_fixB")
png(paste0(target_stub, ".png"), height = 480, width = 1000)
for(out in pll) print(out); dev.off()
#vary intercept fix slope
temp <- temp %>%
group_by(Country) %>% as.data.frame()
pll <- plot_general(data = temp, x=demand_avg, y=betac_vary, country=Country, region=Region)
target_stub <- file.path(target, "Beta_v_avg_varyB")
png(paste0(target_stub, ".png"), height = 480, width = 1000)
for(out in pll) print(out); dev.off()
#vary intercept fix slope
temp <- temp %>%
group_by(Country) %>% as.data.frame()
pll <- plot_general(data = temp, x=demand_avg, y=betac_vary_cor, country=Country, region=Region)
target_stub <- file.path(target, "Beta_v_avg_varyB_cor")
png(paste0(target_stub, ".png"), height = 480, width = 1000)
for(out in pll) print(out); dev.off()
temp <- plotdf %>% subset(year < 2007)
target <- "fig/sub_2007"
if (!dir.exists('fig/sub_2007'))
dir.create('fig/sub_2007')
#10-16 plots
temp <- temp %>%
subset(beta_fix_all != 0) %>%
subset(beta_fix_all < 4) %>%
subset(beta_fix_all > -4) %>%
group_by(Country) %>% as.data.frame()
pll <- plot_general(data = temp, x=demand_avg, y=beta_fix_all, country=Country, region=Region)
target_stub <- file.path(target, "Beta_v_avg_fixall")
png(paste0(target_stub, ".png"), height = 480, width = 1000)
for(out in pll) print(out); dev.off()
#vary intercept fix slope
temp <- temp %>%
# subset(year > 2007) %>%
group_by(Country) %>% as.data.frame()
pll <- plot_general(data = temp, x=demand_avg, y=betac, country=Country, region=Region)
target_stub <- file.path(target, "Beta_v_avg_fixB")
png(paste0(target_stub, ".png"), height = 480, width = 1000)
for(out in pll) print(out); dev.off()
#vary intercept fix slope
temp <- temp %>%
# subset(year > 2007) %>%
group_by(Country) %>% as.data.frame()
pll <- plot_general(data = temp, x=demand_avg, y=betac_vary, country=Country, region=Region)
target_stub <- file.path(target, "Beta_v_avg_varyB")
png(paste0(target_stub, ".png"), height = 480, width = 1000)
for(out in pll) print(out); dev.off()
#vary intercept fix slope
temp <- temp %>%
group_by(Country) %>% as.data.frame()
pll <- plot_general(data = temp, x=demand_avg, y=betac_vary_cor, country=Country, region=Region)
target_stub <- file.path(target, "Beta_v_avg_varyB_cor")
png(paste0(target_stub, ".png"), height = 480, width = 1000)
for(out in pll) print(out); dev.off()
|
f8c5b6331ab350e0b44f5e742173759af5fe4828
|
361149f27813dc80073279ef561a04be8eb9abc4
|
/quiz/2015-03-11_YidingLyu_yl3248_quiz3.R
|
7dfd40661bd31a5b70bbb5b995df24be84686c41
|
[] |
no_license
|
denistanwh/data-viz
|
492312a84be7bc11aa72efc08effac0b99f9cad6
|
311598bcbde38903b827cd9bb0eb9f0b1ae10303
|
refs/heads/master
| 2021-01-14T08:39:24.600147
| 2015-05-13T17:06:23
| 2015-05-13T17:06:23
| 29,634,530
| 0
| 0
| null | 2015-02-11T17:18:05
| 2015-01-22T00:43:55
|
R
|
UTF-8
|
R
| false
| false
| 1,542
|
r
|
2015-03-11_YidingLyu_yl3248_quiz3.R
|
library(maps)
library(ggplot2)
library(reshape)
data(airquality)
str(airquality)
head(data)
data=airquality
sum(is.na(data))
data=na.omit(data)
###get the time stamp
data$Time=as.character(paste("2014",data$Month, data$Day, sep="-"))
data$Date=as.Date(data$Time, "%Y-%m-%d")
###plot with ggplot2
head(data)
### The function
YidingPlot=function(Temp, Wind){
if(Temp==TRUE & Wind==TRUE){
dataplot=data[,-c(2,5,6,7)]
dataplot=melt(dataplot, id.vars=c("Date"))
p=ggplot(dataplot, aes(x=Date, y=value, colour=variable))+geom_line()
p=p+scale_y_continuous(name="Value")+labs(title="AirQuality")
}
if(Temp==TRUE & Wind==FALSE){
dataplot=data[,-c(2,3,5,6,7)]
dataplot=melt(dataplot, id.vars=c("Date"))
p=ggplot(dataplot, aes(x=Date, y=value, colour=variable))+geom_line()
p=p+scale_y_continuous(name="Value")+labs(title="AirQuality")
}
if(Temp==FALSE & Wind==TRUE){
dataplot=data[,-c(2,4,5,6,7)]
dataplot=melt(dataplot, id.vars=c("Date"))
p=ggplot(dataplot, aes(x=Date, y=value, colour=variable))+geom_line()
p=p+scale_y_continuous(name="Value")+labs(title="AirQuality")
}
if(Temp==FALSE & Wind==FALSE){
dataplot=data[,-c(2,3,4,5,6,7)]
dataplot=melt(dataplot, id.vars=c("Date"))
p=ggplot(dataplot, aes(x=Date, y=value, colour=variable))+geom_line()
p=p+scale_y_continuous(name="Value")+labs(title="AirQuality")
}
return(p)
}
###test the function
YidingPlot(Wind=TRUE, Temp=TRUE)
YidingPlot(Wind=FALSE, Temp=TRUE)
YidingPlot(Wind=TRUE, Temp=FALSE)
|
7593718b2fbfd55d91cf1378c580f54c717eb08c
|
567e4fac2f8c72e4b3189941cab1ff0ce2462011
|
/cron/update_mystats.R
|
52d3a64e36b4574bf7ef0f34d79ddfe07b01c312
|
[] |
no_license
|
cmm801/r-finance
|
14f42977c16c7407accc97b0f6f6a3f2dc984f77
|
cdbb6e8933240d26490b5d36b7f4a58d2889485f
|
refs/heads/master
| 2023-02-13T12:33:59.772411
| 2018-08-28T00:48:43
| 2018-08-28T00:48:43
| 146,369,515
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
update_mystats.R
|
source( sprintf( "%s/finance/LIBRARY.R", BASE_PATH ) );
output.pathname = WEBSITE_URL;
asOfDate = last.biz.date();
my.pf.stats( asOfDate = asOfDate, output.pathname = output.pathname );
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.