blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c235bd2324778a816ca12d39dfa40ebcc028b780
|
73514ffb85a520aeded94f6d979c9712f41f32f4
|
/BD/MEX.R
|
a429be94918fe2c51eed4173e8c18a791ac016dc
|
[] |
no_license
|
NicoGaRo/BD
|
71dbe6422f0c0fbd0b744b5a9b86509a98a60a1e
|
8b45528c4a7a3a9b5dcf885c24b51d579a773d4d
|
refs/heads/master
| 2021-01-21T11:23:22.034994
| 2017-05-18T21:57:36
| 2017-05-18T21:57:36
| 91,740,795
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,559
|
r
|
MEX.R
|
.
#Tweets SelMexico
mex <- read.csv("m.csv", sep = ";", stringsAsFactors = FALSE)
mex <- mex[1:435,] #Se eliminan todas las observaciones vacias
mex$Sentiment <- factor(mex$Sentiment) #Conversion a factores de variable relevante
mex$Sentiment
str(mex)
library(tm)
#Limpieza datos
txt = mex$Tweet.Text
# remueve retweets
txtclean = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", txt)
# remueve @otragente
txtclean = gsub("@\\w+", "", txtclean)
# remueve simbolos de puntuación
txtclean = gsub("[[:punct:]]", "", txtclean)
# remove números
txtclean = gsub("[[:digit:]]", "", txtclean)
# remueve links
txtclean = gsub("http\\w+", "", txtclean)
mex_corpus <- VCorpus(VectorSource(txtclean))
#Creacion corpus de texto para analisis
print(mex_corpus)
mex_corpus_clean <- tm_map(mex_corpus, content_transformer(tolower))
#Transformacion de todos los tweets a minusculas, para eliminar posibles duplicados en el analisis
mex_corpus_clean <- tm_map(mex_corpus_clean, removeWords, c(stopwords("spanish"), "que", "un", "una", "por", "la", "el"))
#Remueve stopwords y palabras irrelevantes del corpus
sw <- readLines("C:/Users/NICOLAS GARZON/Downloads/Nueva carpeta (2)/BD/stopwords.es.txt",encoding="UTF-8")
sw = iconv(sw, to="ASCII//TRANSLIT")
#Archivo con nuevas stopwords en espanol
mex_corpus_clean <- tm_map(mex_corpus_clean, removeWords, sw) #Remueve stopwords faltantes
mex_corpus_clean <- tm_map(mex_corpus_clean, stripWhitespace) #Elimina espacios vacios resultantes
as.character(mex_corpus_clean[[3]])
#Tokenizacion
mex_dtm <- DocumentTermMatrix(mex_corpus_clean)
mex_dtm
#Conjuntos de entrenamiento y prueba
mex_dtm_train <- mex_dtm[1:261, ]
mex_dtm_test <- mex_dtm[262:435, ]
#Vectores de sentimiento
mex_train_labels <- mex[1:261, ]$Sentiment
mex_test_labels <- mex[262:435, ]$Sentiment
prop.table(table(mex_train_labels))
#Las proporciones entre las clases de tweets muestran una distribucion de reacciones bastante diversa con las reacciones positivas liderando (0.53), seguidas de las neutrales (0.39);
#ademas los tweets negativos se presentan en una proporcion de 0.068
prop.table(table(mex_test_labels))
#Para el conjunto de prueba se observa una distribucion algo diferente con las reacciones positivas en una proporcion de 0.33, las neutrales con 0.42 y las negativas con 0.23
wordcloud(mex_corpus_clean, min.freq=10, random.order=FALSE)
#Nube de palabras de corpus
pos <- subset(mex, Sentiment=="1")
neg <- subset(mex, Sentiment=="-1")
neu <- subset(mex, Sentiment=="0")
#Separacion de mensajes según su sentimiento
wordcloud(pos$Tweet.Text, scale=c(3, 0.5))
#Nube de palabras tweets positivos
wordcloud(neg$Tweet.Text, scale=c(3, 0.5))
#Nube de palabras tweets negativos
wordcloud(neu$Tweet.Text, scale=c(3, 0.5))
#Nube de palabras tweets neutros
#Terminos frecuentes
findFreqTerms(mex_dtm_train, 5)
mex_freq_words <- findFreqTerms(mex_dtm_train, 5)
#Eliminacion terminos irrelevantes o poco frecuentes del modelo
mex_dtm_freq_train <- mex_dtm_train[ , mex_freq_words]
mex_dtm_freq_test <- mex_dtm_test[ , mex_freq_words]
#Funcion para indicar si los tweets contienen o no terminos frecuentes
convert_counts <- function(x){
x<- ifelse(x>0, "Yes", "No")
}
mex_train <- apply(mex_dtm_freq_train, MARGIN=2, convert_counts)
mex_test <- apply(mex_dtm_freq_test, MARGIN=2, convert_counts)
#Modelo de prediccion
mex_classifier <- naiveBayes(mex_train, mex_train_labels)
mex_test_pred <- predict(mex_classifier, mex_test)
CrossTable(mex_test_pred, mex_test_labels, prop.chisq=FALSE, prop.t=FALSE, dnn=c('Prediccion', 'Real'))
#El desempeño del modelo es bastante regular, en primer lugar sobreestima las reacciones neutrales ya que predice 123 cuando realmente son 74;
#reduce considerablemente las reacciones negativas prediciendo solo 14 de las 41 originales, y finalmente reduce los tweets positivos al predecir solo
#37 de los 59 originales
# los 7 que realmente son encontrados.
#Modelo 2
mex_classifier2 <- naiveBayes(mex_train, mex_train_labels, laplace=1)
mex_test_pred2 <- predict(mex_classifier2, mex_test)
CrossTable(mex_test_pred2, mex_test_labels, prop.chisq=FALSE, prop.t=FALSE, dnn=c('Prediccion', 'Real'))
#Agregando un estimador de Laplace, se observa que el desempeño del modelo no mejora, pues reduce las predicciones de tweets negativos (3 de 41),
#aumenta las predicciones positivas pero aun se encuentra lejos de acertar (40 de 59), y aumenta considerablemente las reacciones neutras (131 de 74).
|
dedad97037f9e53c51ed38b1b96b3a0d2bad2ffd
|
f81fef0ba6045f35dd56a1e3f6a295afb44a8f4a
|
/R/harvest_subset.R
|
53ed0ff8998700075e80267f3ddbbba231381913
|
[] |
no_license
|
zhoylman/MCO
|
3ce8fa2077aad0c6265f690537c84ed860a27fa9
|
ed8eb0aa79e60ea2cd4937745afc76fc5668df99
|
refs/heads/master
| 2022-02-23T00:12:42.594171
| 2022-01-27T18:15:11
| 2022-01-27T18:15:11
| 210,397,643
| 0
| 0
| null | 2020-06-18T19:27:35
| 2019-09-23T16:04:36
|
HTML
|
UTF-8
|
R
| false
| false
| 4,113
|
r
|
harvest_subset.R
|
library(tidyverse)
response = read_csv('/home/zhoylman/MCO/data/USFS/harvest_stats_rG.csv')
response$`system:index` = NULL
response_years = c('clim',paste0(c(0,seq(1:35)), '_NDVI'))
ndvi = response[,response_years] %>%
as.tibble() %>%
mutate(site = 1:200) %>%
gather("key", 'value', -clim, -site) %>%
mutate(xtile = statar::xtile(clim, 3) %>%
as.factor()) %>%
mutate(xtile = plyr::revalue(xtile, c(`1` = "Wet",
`2` = "Moderate",
`3` = "Dry"))) %>%
group_by(xtile, key) %>%
summarise(median = median(value))%>%
mutate(time = gsub("[^0-9.]", "", key) %>%
as.numeric() + 1984) %>%
filter(time < 2019)
models = ndvi %>%
filter(time > 1991) %>%
group_by(xtile) %>%
do(linearFit = lm(median ~ time, data = .)) %>%
mutate(slope = coef(linearFit)[2] ,
r2 = (summary(linearFit)$r.squared))
plots = ggplot(data = ndvi, aes(x = time, y = median))+
geom_smooth(data = ndvi %>% filter(time > 1991), method = 'lm') +
geom_point() +
geom_vline(aes(xintercept = 1990))+
theme_bw(base_size = 16) +
geom_text(data = models, aes(x = 1997, y = 1.4, label = paste0('Slope = ', round(slope, 4))))+
geom_text(data = models, aes(x = 1995, y = 1.3, label = paste0('r2 = ', round(r2, 3))))+
xlab('Year') +
ylab('Relavtive NDVI')+
facet_wrap(~xtile, labeller = labeller(c("1" = "Wet",
"2" = "Moderate",
"3" = "Dry")))+
theme(strip.background = element_blank(), strip.placement = "outside")
plots
ggsave(plots, file = '/home/zhoylman/MCO/data/USFS/harvest_plot.png', units = 'in', width = 10, height = 4)
## NPP
response_npp = read_csv('/home/zhoylman/MCO/data/USFS/harvest_stats_rNPP.csv')
response_npp$`system:index` = NULL
response_years = c('clim',paste0(1986:2019, '_annualNPP'))
npp = response_npp[,response_years] %>%
as.tibble() %>%
mutate(site = 1:200) %>%
gather("key", 'value', -clim, -site) %>%
mutate(xtile = statar::xtile(clim, 3) %>%
as.factor()) %>%
mutate(xtile = plyr::revalue(xtile, c(`1` = "Wet",
`2` = "Moderate",
`3` = "Dry"))) %>%
group_by(xtile, key) %>%
summarise(median = median(value))%>%
mutate(time = gsub("[^0-9.]", "", key) %>%
as.numeric())
models_npp = npp %>%
filter(time > 1992) %>%
group_by(xtile) %>%
do(linearFit = lm(median ~ time, data = .)) %>%
mutate(slope = coef(linearFit)[2] ,
r2 = (summary(linearFit)$r.squared))
plots_npp = ggplot(data = npp, aes(x = time, y = median))+
geom_smooth(data = npp %>% filter(time > 1992), method = 'lm') +
geom_point() +
geom_vline(aes(xintercept = 1990))+
theme_bw(base_size = 16) +
geom_text(data = models_npp, aes(x = 1997, y = 1.15, label = paste0('Slope = ', round(slope, 4))))+
geom_text(data = models_npp, aes(x = 1995, y = 1.10, label = paste0('r2 = ', round(r2, 3))))+
xlab('Year') +
ylab('Relavtive NPP')+
facet_wrap(~xtile, labeller = labeller(c("1" = "Wet",
"2" = "Moderate",
"3" = "Dry")))+
theme(strip.background = element_blank(), strip.placement = "outside")
plots_npp
ggsave(plots_npp, file = '/home/zhoylman/MCO/data/USFS/harvest_plot_npp.png', units = 'in', width = 10, height = 4)
plot(ndvi_stats)
abline(v = 7)
ndvi_t = t(ndvi) %>%
as.tibble() %>%
mutate(time = 1984:2019) %>%
gather("key", 'value', -time)
ggplot(data = ndvi_t, aes(x = time, y = value, color = key))+
geom_point(guide = F)+
theme(legend.position = 'none')
#subset
test = st_read('/home/zhoylman/Downloads/R1_timberharvest_dividewest/R1_timberharvest_dividewest.shp')
test$FY_COMPLET = as.character(test$FY_COMPLET) %>%
as.numeric()
subset = test %>%
filter(FY_COMPLET == 1992)
index = sample(1:length(subset$FY_COMPLET),200, replace = F)
subset = subset[index,]
st_write(subset, "/home/zhoylman/Downloads/R1_timberharvest_dividewest/R1_timberharvest_dividewest_1990_200.shp")
|
ba3273d387e6fb39106df344eceba272606e26a3
|
3c759a7f001e3b08f94f96648e0c8c27244e3dcb
|
/run_analysis.R
|
b00d57f7674a0c1846da6b6c21beb902daad3fc2
|
[] |
no_license
|
davejermy/GettingDataProject
|
a891393d789826cf6d1dd93badc7286cd4e19edc
|
e911cabfaeb865dda2967cca0f52333759f7b022
|
refs/heads/master
| 2016-09-06T11:47:23.060755
| 2014-07-27T15:35:52
| 2014-07-27T15:35:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,589
|
r
|
run_analysis.R
|
##---------------
## Reference Data
##---------------
## Load "activity labels" and "features" files
activity_labels<-read.table("./UCI HAR Dataset/activity_labels.txt")
features<-read.table("./UCI HAR Dataset/features.txt")
##---------
##Test Data
##---------
## Load the "y_test", "subject_test" and "x_test" files
y_test<-read.table("./UCI HAR Dataset/test/y_test.txt",sep = " ")
subject_test<-read.table("./UCI HAR Dataset/test/subject_test.txt")
x_test<-read.table("./UCI HAR Dataset/test/x_test.txt")
## Add activity labels
y_test$activity<-activity_labels[match(y_test$V1,activity_labels$V1),2]
## Join Test Data together
test_data<-cbind(y_test[2],x_test) ## Add Activity Label to start of x_test and create new data frame
test_data<-cbind(subject_test,test_data) ## Add Subject ID to start of test_data
##--------------
## Training Data
##--------------
## Load the "y_train", "subject_train" and "x_train" files
y_train<-read.table("./UCI HAR Dataset/train/y_train.txt",sep = " ")
subject_train<-read.table("./UCI HAR Dataset/train/subject_train.txt")
x_train<-read.table("./UCI HAR Dataset/train/x_train.txt")
## Add activity labels
y_train$activity<-activity_labels[match(y_train$V1,activity_labels$V1),2]
## Join Train Data together
train_data<-cbind(y_train[2],x_train) ## Add Activity Label to start of x_train and create new data frame
train_data<-cbind(subject_train,train_data) ## Add Subject ID to start of train_data
##---------------
## Merge Datasets
##---------------
## Create new data frame with combined test and train data in it
data <- rbind(test_data, train_data)
## Set the Column Names
columns<-as.character(features[,2]) ## Create vector of the variable names
columns<-c("subject","activity",columns) ## Add subject and activity headings
colnames(data)<-columns ## Update the column names of data
##---------------------
## Create Tidy Data Set
##---------------------
library(reshape2)
## Reshape data into long data set
tidydata<-recast(data,
subject+activity+variable~.,
fun.aggregate=mean,
id.var = 1:2)
## set the column names
colnames(tidydata)<-c("subject","activity","variable","average")
## Limit tidydata to only the mean and std variables
tidydata<- tidydata[grepl("mean\\(",tidydata$variable)|
grepl("std()",tidydata$variable),]
## Tidy up the variable names
tidydata$variable<-sub("^t","Time",tidydata$variable) ## Replace initial "t" with "Time"
tidydata$variable<-sub("^f","Freq",tidydata$variable) ## Replace initial "f" with "Freq"
tidydata$variable<-sub("BodyBody","Body",tidydata$variable) ## Remove repetition of "Body"
tidydata$variable<-gsub("-","",tidydata$variable) ## Remove "-"
tidydata$variable<-sub("\\(\\)","",tidydata$variable) ## Remove "()"
tidydata$variable<-sub("mean","Mean",tidydata$variable) ## Replace "mean" with "Mean"
tidydata$variable<-sub("std","Std",tidydata$variable) ## Replace "std" with "Std"
## Tidy up the activity names
tidydata$activity <- tolower(tidydata$activity) ## Convert to lower case
tidydata$activity<- sub("_"," ",tidydata$activity) ## Replace underscore
tidydata$activity<- gsub("(^|[[:space:]])([[:alpha:]])",
"\\1\\U\\2",
tidydata$activity,
perl=TRUE) ##Make initial letter of each word upper case
##---------------------
## Export Tidy Data Set
##---------------------
write.csv(tidydata,file = "Tidy Dataset.txt",row.names = F)
|
000df26ecc7d7cb13c367d51224352452993933d
|
f0b3c018aa244a6cf595a836c1cb94591a9777b8
|
/R/style.R
|
1805438f2e885e24ae8869ce7ffdcb1b923286df
|
[] |
no_license
|
krisrs1128/treelapse
|
dc5e3fe4c6cbd90fdaa6146b00a117eaaaf8b59d
|
55bea7e6256ca4e0fb88bbdfd54e04046fb9e0b2
|
refs/heads/master
| 2020-04-09T18:38:47.130805
| 2020-01-02T16:45:45
| 2020-01-02T16:45:45
| 68,158,499
| 7
| 0
| null | 2018-10-04T23:32:17
| 2016-09-14T00:22:51
|
JavaScript
|
UTF-8
|
R
| false
| false
| 1,958
|
r
|
style.R
|
#! /usr/bin/env Rscript
## File description -------------------------------------------------------------
## Functions for adapting display in treelapse views.
#' Merge in default display for timebox trees / treeboxes
#'
#' Completes a partially filled list of display options.
#'
#' @export
merge_timebox_display <- function(opts) {
default_opts <- list(
"size_min" = 1,
"size_max" = 10,
"mouseover_font_size" = 15,
"axis_font_size" = 13,
"font_family" = "Roboto",
"n_ticks_x" = 4,
"n_ticks_y" = 4,
"x_axis_rotation" = 0,
"y_axis_rotation" = 0,
"axis_text_anchor" = "middle",
"tick_size" = 6,
"scent_frac" = list(
"width" = 0.15,
"height" = 0.2
),
"margin" = list(
"bottom" = 30,
"top" = 20,
"ts_right" = 30,
"ts_left" = 30,
"tree_right" = 15,
"tree_left" = 15
),
"col_background" = "#F7F7F7",
"tree" = list(
"frac" = 0.43,
"col_unselected" = "#CDCDCD",
"col_selected" = "#2D869F",
"col_search" = "#C2571A",
"layout" = "id"
),
"ts" = list(
"col_unselected" = "#696969",
"col_selected" = "#2D869F",
"col_search" = "#C2571A",
"width_unselected" = 1,
"width_selected" = 2,
"width_search" = 3,
"opacity_unselected" = 0.1,
"opacity_selected" = 0.9,
"opacity_search" = 1,
"max_depth" = Inf,
"min_depth" = 0,
"leaves_only" = FALSE
)
)
modifyList(default_opts, opts)
}
#' Merge in default display for doi tree / sankey
#'
#' Completes a partially filled list of display options.
#'
#' @export
merge_doi_display <- function(opts) {
default_opts <- list(
"size_min" = 0,
"size_max" = 20,
"leaf_width" = 10,
"leaf_height" = 100,
"focus_font_size" = 20,
"font_size" = 10,
"text_offset" = 0.5,
"text_display_neighbors" = 1,
"transition_duration" = 1000
)
modifyList(default_opts, opts)
}
|
3accac89378dfffd022e2c778bb02c49cbede45b
|
beb988d8d6df6262d261451b72039d947cc4870b
|
/code.r
|
7222c245629211e7f9075c18805b13ad76315202
|
[] |
no_license
|
adalardo/niche_neutral
|
f1ad91776325c698daf5c35edc8442857e3db093
|
731e27ef771a3a16788968d01293a6decd75bc59
|
refs/heads/master
| 2020-03-17T07:03:59.000470
| 2018-07-20T18:45:58
| 2018-07-20T18:45:58
| 133,381,660
| 1
| 1
| null | 2018-05-14T15:20:20
| 2018-05-14T15:20:19
| null |
UTF-8
|
R
| false
| false
| 14,414
|
r
|
code.r
|
######################################################################################
# Partitioning niche and neutral dynamics on community assembly #####################
# Mortara et al ######################################################################
#####################################################################################
## Code for applying the niche-neutral GLMM framework
## Data and analysis from the manuscript
#############################
# PART 1: loading packages #
############################
# required packages
library(bbmle)
library(lme4)
#library(optimx)
library(xtable)
library(piecewiseSEM)
library(dplyr)
source("r2_table.R")
#############################
# PART 2: loading data ######
############################
fern.data <- read.csv("fern_data_Mortaraetal.csv", header=TRUE)
head(fern.data)
fern.data$site <- scale(rep(1:30, length(unique(fern.data$species))))
#####################################################################
# PART 3: building the model to represent our hypothesis ############
# Step by step building models corresponding to general hypothesis #
####################################################################
# Ecological Strategy defined by all the three traits: laminar thickness, life form and indumentum interacting with altitude, drift among species sharing the same ES, local and regional limited dispersal
m.full <- glmer(abundance ~ thickness*alt_std + thickness*I(alt_std^2)
#+ indumentum*alt_std + indumentum*I(alt_std^2)
+ life_form*alt_std + life_form*I(alt_std^2)
+ (1|species) + (1|species:mountain) + (1|species:site) + (1|site),
data=fern.data, family="poisson",
control=glmerControl(optimizer="bobyqa", optCtrl=list(maxfun=5e6)))
head(fern.data)
unique(fern.data$life_form)
fern.data$ep <- ifelse(fern.data$life_form!='ep', 'non.ep', 'ep')
head(fern.data)
unique(fern.data$ep)
m.full.lf <- glmer(abundance ~ #thickness*alt_std + thickness*I(alt_std^2)
#+ indumentum*alt_std + indumentum*I(alt_std^2)
ep*alt_std + ep*I(alt_std^2)
+ (1|species) + (1|species:mountain) + (1|species:site) + (1|site),
data=fern.data, family="poisson",
control=glmerControl(optimizer="bobyqa", optCtrl=list(maxfun=5e6)))
m.full2 <- glmer(abundance ~ alt_std + I(alt_std^2)
+ (1|species) + (1|species:mountain) + (1|species:site) + (1+alt_std|species),
data=fern.data, family="poisson",
control=glmerControl(optimizer="bobyqa", optCtrl=list(maxfun=5e6)))
m.neutral <- glmer(abundance ~ (1|species) + (1|species:mountain) + (1|species:site) + (1 |site),
data=fern.data, family="poisson",
control=glmerControl(optimizer="bobyqa", optCtrl=list(maxfun=1e6)))
m.niche <- glmer(abundance ~ thickness*alt_std + thickness*I(alt_std^2)
#+ indumentum*alt_std + indumentum*I(alt_std^2)
+ life_form*alt_std + life_form*I(alt_std^2)
+ (1|species) + (1|site),
data=fern.data, family="poisson",
control=glmerControl(optimizer="bobyqa", optCtrl=list(maxfun=5e6)))
m.env <- glmer(abundance ~ alt_std + I(alt_std^2)
+ (1|species) + (1|site) + (1+alt_std|species),
data=fern.data, family="poisson",
control=glmerControl(optimizer="bobyqa", optCtrl=list(maxfun=5e6)))
m.null <- glmer(abundance ~ 1 +
(1|species) + (1|mountain) + (1|site),
data=fern.data, family="poisson",
control=glmerControl(optimizer="bobyqa", optCtrl=list(maxfun=5e6)))
m.list <- list(m.full, m.neutral, m.niche, m.null, m.full2, m.env, m.full.lf)
bic.tab <- sapply(m.list, BIC)
mod.names <- c("niche & neutral", "neutral", "niche", "null", "env & neutral", "env", "lifeform & neutral")
names(bic.tab) <- mod.names
sort(bic.tab)
r2.table(m.neutral)
r2.tab <- sapply(m.list, r2.table)
r2.tab <- bind_rows(r2.tab)
row.names(r2.tab) <- mod.names
r2.tab
#####################################################################
# PART 4: Calculating predicted values from best model #############
####################################################################
# First, we create a data frame with all combination of sites, species and traits
comb.table <- data.frame(expand.grid(mountain=levels(fern.data$mountain),
alt_std=unique(fern.data$alt_std),
site=unique(fern.data$site),
species=unique(fern.data$species)),
life_form=fern.data$life_form,
thickness=fern.data$thickness,
indumentum=fern.data$indumentum)
comb.table <- na.omit(comb.table)
# Second, we use the function predict to create a data frame of predicted values for all possible combinations based on the best model m5.4.3
pred.values <- predict(m.full, re.form=NULL, newdata=comb.table,
type='response')
# Third we calculate mean predicted values and standard error for each altitude
## Predicted mean values
pred.table <- aggregate(pred.values, list(altitude=comb.table$alt_std, thickness=comb.table$thickness,
indumentum=comb.table$indumentum,
life_form=comb.table$life_form), mean)
names(pred.table)[5] <- "mean"
## Predicted stardard error
pred.table$se <- aggregate(pred.values, by=list(altitude=comb.table$alt_std, thickness=comb.table$thickness,
indumentum=comb.table$indumentum,
life_form=comb.table$life_form),
function(x)sd(x)/sqrt(length(x)))$x
head(pred.table)
# Finally, we calculate the upper and lower confidence interval based on t distribution
## Confidence Interval (mean +- standard error * t(pdf)
t.prev <- pt(pred.table$mean, df=(nrow(pred.table)-1))
pred.table$lower <- (pred.table$mean - pred.table$se)*t.prev
pred.table$upper <- (pred.table$mean + pred.table$se)*t.prev
# Second we create a data frame with observed mean values and its standard error
obs <- aggregate(fern.data$abundance, by=list(altitude=fern.data$alt_std, thickness=fern.data$thickness,
indumentum=fern.data$indumentum,
life_form=fern.data$life_form), mean)
## Observed standard error
obs$se <- aggregate(fern.data$abundance, by=list(altitude=fern.data$alt_std, thickness=fern.data$thickness,
indumentum=fern.data$indumentum,
life_form=fern.data$life_form),
function(x)sd(x)/sqrt(length(x)))$x
head(obs)
names(obs) <- c("Altitude", "thickness", "indumentum", "life_form", "Abundance", "std")
#############################################
######### Creating figures ##################
############################################
###############################################
######### DAQUI PRA FRENTE AINDA NAO FUNFA ####
###############################################
# all trait combinations
esp.hab <- expand.grid(c('membranacea', 'coriacea'), c('ausente','presente'), c('ter', 'hemi', 'ep'))
esp.hab
#########################################
#### GRAFICO MODELO #####################
#########################################
cor1 <-rgb(140, 1, 28, maxColorValue=255) #rgb(44, 152, 32, maxColorValue=255) # terrestre
cor3 <- rgb(4, 70, 120, maxColorValue=255) #rgb(239, 144, 33, maxColorValue=255) # hemi
cor2 <- rgb(199, 172, 29, maxColorValue=255) # ep
head(obs)
ep.cor.si <- subset(obs, thickness=="coriacea" & indumentum=="ausente" &life_form=="ep")
ep.cor.ci <- subset(obs, thickness=="coriacea" & indumentum=="presente" &life_form=="ep")
ep.mem.si <- subset(obs, thickness=="membranacea" & indumentum=="ausente" &life_form=="ep")
ep.mem.ci <- subset(obs, thickness=="membranacea" & indumentum=="presente" &life_form=="ep")
head(obs)
par(mfrow=c(1,2))
plot(Abundance ~ Altitude, data=ep.cor.si, log='x', ylim=c(0,20), col=cor1, las=1)
segments(x0=ep.cor.si[,1],
y0= ep.cor.si[,5] + ep.cor.si[,6],
y1= ep.cor.si[,5] - ep.cor.si[,6], col=cor1)
points(Abundance ~ Altitude, data=ep.cor.ci, pch=19,col=cor1)
segments(x0=ep.cor.ci[,1],
y0= ep.cor.ci[,5] + ep.cor.ci[,6],
y1= ep.cor.ci[,5] - ep.cor.ci[,6], col=cor1)
plot(Abundance ~ Altitude, data=ep.mem.si, log='x', ylim=c(0,20), col=cor1, las=1)
segments(x0=ep.mem.si[,1],
y0= ep.mem.si[,5] + ep.mem.si[,6],
y1= ep.mem.si[,5] - ep.mem.si[,6], col=cor1)
points(Abundance ~ Altitude, data=ep.mem.ci, pch=19, col=cor1)
segments(x0=ep.mem.ci[,1],
y0= ep.mem.ci[,5] + ep.mem.ci[,6],
y1= ep.mem.ci[,5] - ep.mem.ci[,6], col=cor1)
head(ep.cor.ci)
loadfonts(device = "postscript")
pdf("graf_modelo.pdf")
par(mai=c(0.5, 0.5, 0.2, 0.25), oma=c(1, 1, 1, 0.1))
layout(matrix(c(0, 0, 0, 0,
0, 1, 2, 0,
0, 3, 4, 0,
0, 5, 6, 0),4,4, byrow=TRUE),
widths=c(0.1, 1, 1, 0.1), heights=0.1)
for(i in 1:8){
plot(obs[obs$thickness==esp.hab[i,1] & obs$indumentum==esp.hab[i,2] & obs$life_form==esp.hab[i,3], c(1,5)],
pch=rep(c(21, 19), 3)[i], bty="l", xlab="", ylab="", cex=1.7, yaxt="n", xaxt="n", log='y')
#pt.bg='white' )#,
col=rep(c(cor1, cor2, cor3), each=2)[i],
ylim=rbind(c(0.1,40), c(0.1,40), c(0.1,120), c(0.1,120), c(0.1,23), c(0.1,23))[i,])
# controlando eixos
if(i %in% c(5,6)){
axis(1, at=unique(com.obs$Altitude), labels=unique(com.obs$Altitude), cex.axis=1.3)}
else{axis(1, at=unique(com.obs$Altitude), labels=FALSE)}
if(i %in% seq(1,6,2)){
axis(2, cex.axis=1.3, las=1)}
else{axis(2, labels=FALSE)}
# erro padrao obs
segments(x0=com.obs[com.obs$esp==esp.hab[i,1] & com.obs$hab==esp.hab[i,2], 1],
y0=com.obs[com.obs$esp==esp.hab[i,1] & com.obs$hab==esp.hab[i,2], 4] +
com.obs[com.obs$esp==esp.hab[i,1] & com.obs$hab==esp.hab[i,2], 5],
y1=com.obs[com.obs$esp==esp.hab[i,1] & com.obs$hab==esp.hab[i,2], 4] -
com.obs[com.obs$esp==esp.hab[i,1] & com.obs$hab==esp.hab[i,2], 5],
col=rep(c(cor1, cor2, cor3), each=2)[i])
## Previsto medio
lines(com.prev[com.prev$esp==esp.hab[i,1] & com.prev$hab==esp.hab[i,2], c(1,4)],
col=rep(c(cor1, cor2, cor3), each=2)[i])
## Intervalo de mais ou mesno 2 x se
lines(com.prev[com.prev$esp==esp.hab[i,1] & com.prev$hab==esp.hab[i,2], c(1,6)], lty=2,
col=rep(c(cor1, cor2, cor3), each=2)[i])
lines(com.prev[com.prev$esp==esp.hab[i,1] & com.prev$hab==esp.hab[i,2], c(1,7)], lty=2,
col=rep(c(cor1, cor2, cor3), each=2)[i])
mtext(paste(paste("(", letters[1:6][i], sep=""), ")", sep=""), side=3, adj=0.05, padj=-0.5, cex=1) #font=2
}
mtext("Mean species abundances (log)", side=2, outer=TRUE, padj=1, cex=1.2)
mtext("Altitude (m)", side=1, outer=TRUE, padj=-0.5, cex=1.2)
mtext("Membranaceous", side=3, adj=0.25, padj=1, outer=TRUE, font=2)
mtext("Coriaceous", side=3, outer=TRUE, adj=0.8, padj=1, font=2)
mtext("Terrestrial", side=4, outer=TRUE, padj=-1.7, adj=0.87, font=2)
mtext("Hemiepiphyte", side=4, outer=TRUE, padj=-1.7, font=2)
mtext("Epiphyte", side=4, outer=TRUE, padj=-1.7, adj=0.135, font=2)
dev.off()
embed_fonts("graf_modelo.eps", outfile = "graf_modelo.eps",
options = "-dEPSCrop")
#########################################
#### GRAFICO SADS #####################
#########################################
head(com.rank2)
head(atri)
atri.cor <- atri[,c(1, 26, 27)]
head(atri.cor)
atri.cor
atri.cor$comb <- NA
atri.cor$comb2 <- NA
head(atri.cor)
atri.cor$comb[atri.cor$habitoB=="ter" & atri.cor$espessuraB=="membranacea"] <- cor1
atri.cor$comb[atri.cor$habitoB=="ep" & atri.cor$espessuraB=="membranacea"] <- cor3
atri.cor$comb[atri.cor$habitoB=="hemi" & atri.cor$espessuraB=="membranacea"] <- cor2
atri.cor$comb[atri.cor$habitoB=="ter" & atri.cor$espessuraB=="coriacea"] <- cor1
atri.cor$comb[atri.cor$habitoB=="ep" & atri.cor$espessuraB=="coriacea"] <- cor3
atri.cor$comb[atri.cor$habitoB=="hemi" & atri.cor$espessuraB=="coriacea"] <- cor2
atri.cor$comb2[atri.cor$habitoB=="ter" & atri.cor$espessuraB=="membranacea"] <- 1
atri.cor$comb2[atri.cor$habitoB=="ep" & atri.cor$espessuraB=="membranacea"] <- 1
atri.cor$comb2[atri.cor$habitoB=="hemi" & atri.cor$espessuraB=="membranacea"] <- 1
atri.cor$comb2[atri.cor$habitoB=="ter" & atri.cor$espessuraB=="coriacea"] <- 19
atri.cor$comb2[atri.cor$habitoB=="ep" & atri.cor$espessuraB=="coriacea"] <- 19
atri.cor$comb2[atri.cor$habitoB=="hemi" & atri.cor$espessuraB=="coriacea"] <- 19
# funcao para plot das sads com abundancias relativas e atributos
cont.y <- c(1,4,7,10)
cont.x <- 8:10
graf.sad <- function(com=com.rank2, cor=atri.cor$comb, ponto=atri.cor$comb2){
par(mai=c(0.24, 0.6, 0.24, 0.05), oma=c(3, 3, 0.2, 0.1))
layout(matrix(c(1, 2, 3,
4, 5, 6,
7, 8, 9,
10, 11,0), 4, 3, byrow=TRUE))
for(i in 1:10){
plot(com.rank2[[i]], log="y", ylim=c(0.0004,0.5), xlim=c(0, 63),
col=cor[order(com.cota[i,],decreasing=TRUE )][1:riq.cota[i]],
pch=ponto[order(com.cota[i,],decreasing=TRUE )][1:riq.cota[i]],
bty="l", cex=1.9, cex.axis=1.5, xlab="", ylab="", las=1,
yaxt="n", xaxt="n")
mtext(paste(LETTERS[1:10][i], paste(unique(cota)[i], "m", sep=" "), sep=". "), adj=0.05, padj=-0.5, cex=1.2, font=2)
if(i %in% cont.y){
axis(2, las=1, cex.axis=1.5, at=c(0.0005, 0.002, 0.01, 0.05, 0.2), labels=c("0.0005", "0.002", "0.01", "0.05", "0.2"))
}
else{axis(2, at=c(0.0005, 0.002, 0.01, 0.05, 0.2), labels=rep(" ", 5))}
if(i %in% cont.x){
axis(1, las=1, cex.axis=1.5) }
else{axis(1, labels=FALSE)}
}
plot(0,0, axes=FALSE, xlab="", ylab="", col=0)
legend(x=-1.155, y=0.7, c("terrestrial and membranaceous", "terrestrial and coriaceous",
"hemiepiphyte and membranaceous", "hemiepiphyte and coriaceous",
"epiphyte and membranaceous", "epiphyte and coriaceous"),
pch=rep(c(1, 19), 3), col=rep(c(cor1, cor3, cor2), each=2), cex=1.5, pt.cex=1.6, bty="n")
mtext("Species Rank", 1, outer=TRUE, cex=1.3, padj=1)
mtext("Species Relative Abundances (log)", 2, outer=TRUE, cex=1.3, padj=-1)
}
save.image("Mortaraetal.RData")
|
bc9204801429cc231ccf08b73b78304fa08c314f
|
be9f6270df9ce29195f3181556228694475abbde
|
/Cryptography/ECdoubling.R
|
8a555a6d141c97276c96ebea662e86d3a26624ee
|
[] |
no_license
|
Arcsle09/Bitcoin-Economy-and-Blockchain
|
f97c3a2bc78e4e3445a41e2c40611cb8c2818480
|
08fb4cd98d5727df58a349611998c6a047abe795
|
refs/heads/master
| 2020-03-19T14:55:40.192936
| 2018-06-26T08:59:21
| 2018-06-26T08:59:21
| 136,646,179
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 244
|
r
|
ECdoubling.R
|
# This is called point doubling, also invented for EC.
ECdouble <- function(a){
Lam <- ((3*a[1]*a[1]+Acurve) * modinv((2*a[2]),Pcurve)) %% Pcurve
x <- (Lam*Lam-2*a[1]) %% Pcurve
y <- (Lam*(a[1]-x)-a[2]) %% Pcurve
return (c(x,y))
}
|
038d5fd951123b80c4d6e48569a04fb0b4fe530d
|
cee38f51779e94e49885cbc87f74b427184147f6
|
/R/CalcYourTax.R
|
ff27a78f6cb52be7e3d9b78a0fefb02726f9459c
|
[] |
no_license
|
atmdv/CalcYourTax
|
49b25193b6eb4a30af5e2d9ac49223a9e5d6f228
|
b166db00a484b9397b4aac2bce40a12975a03080
|
refs/heads/master
| 2020-04-14T05:28:25.638369
| 2019-01-04T14:21:03
| 2019-01-04T14:21:03
| 163,661,653
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,036
|
r
|
CalcYourTax.R
|
calcyourtax <- function(nordnet=NULL, degiro=NULL){
pacman::p_load(dplyr, tidyr, purrr, readr, stringr, lubridate, kableExtra)
if(!is.null(nordnet)){
# Import Nordnet transaction data
nordnet <- read.table(nordnet, sep=";", dec=",",
header=T, stringsAsFactors = F)
nordnet <- nordnet[, c("Valørdag", "Transaktionstype", "ISIN", "Resultat", "Vekslingskurs", "Beløb")]
nordnet$Resultat <- sub(".", "", nordnet$Resultat, fixed = TRUE)
nordnet$Resultat <- sub(",", ".", nordnet$Resultat, fixed = TRUE)
nordnet$Resultat <- as.numeric(nordnet$Resultat)
nordnet$year <- format(parse_date_time(nordnet$Valørdag,
orders="Ymd", tz="UTC"), "%Y")
nordnet$country <- ifelse(nordnet$Vekslingskurs==1, "DK", "Other")
names(nordnet) <- c("value_date", "transaction_type", "ISIN", "result",
"exchange_rate", "amount", "year", "country")
}
if(!is.null(degiro)){
# Import DeGiro transaction data
degiro <- read.table(degiro, sep=",", dec=",", header=TRUE, encoding="UTF-8")
degiro <- degiro[, c("Valør.dato", "Beskrivelse", "ISIN", "X", "FX")]
degiro$amount <- 0
degiro$year <- format(parse_date_time(degiro$Valør.dato,
orders="dmY", tz="UTC"), "%Y")
degiro$country <- ifelse(degiro$FX==1, "DK", "Other")
names(degiro) <- c("value_date", "transaction_type", "ISIN", "result",
"exchange_rate", "amount", "year", "country")
}
consolidated_portfolio <- rbind(nordnet, degiro)
consolidated_portfolio$country <- ifelse(consolidated_portfolio$country=="Other" &
substr(consolidated_portfolio$ISIN, start = 1, stop = 2)=="DE",
"DE", consolidated_portfolio$country)
consolidated_portfolio$country <- ifelse(consolidated_portfolio$country=="Other" &
substr(consolidated_portfolio$ISIN, start = 1, stop = 2)=="NO",
"NO", consolidated_portfolio$country)
# Calculate profits
profits <- calcyourprofits(consolidated_portfolio)
# Calculate dividends
dividends <- calcyourdividends(consolidated_portfolio)
# Tax brackets
bracket_limits <- data.frame(matrix(ncol=2, nrow=0))
colnames(bracket_limits) <- c("year", "bracket_limit")
bracket_limits[1, ] <- c("2016", 50600)
bracket_limits[2, ] <- c("2017", 51700)
bracket_limits[3, ] <- c("2018", 52900)
bracket_limits[4, ] <- c("2019", 54000)
bracket_limits$bracket_limit <- as.numeric(bracket_limits$bracket_limit)
# Calculate taxable income by year
tax_income <- profits %>%
left_join(dividends, by="year") %>%
mutate(tax_income=profit+dividend) %>%
left_join(bracket_limits, by="year") %>%
mutate(how_to_optimize=ifelse(tax_income>bracket_limit,
"Harvest Losses", "Realize Gains"),
amount=abs(tax_income-bracket_limit))
return(tax_income)
}
|
a8e3b99c58d44b560f973d184778138edefe1241
|
7147fbebcf896692544fc9dc3859aae0cf9270ad
|
/server.R
|
1e10728f10c3cb53d73f16aa91c05ff2d601cf76
|
[] |
no_license
|
joecat6/SpatialTissueLocator
|
2df087405032c59f5fec4287c9e09e43400ca3e4
|
afa9c27b14cc0cf53e615167f9ca7eb65dee9d03
|
refs/heads/master
| 2022-04-20T22:04:20.031605
| 2020-04-20T16:10:27
| 2020-04-20T16:10:27
| 257,335,226
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,938
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(readr)
library(RColorBrewer)
library(EBImage)
pal = colorRampPalette(c("blue", "red"))
totalReads = read_rds('../totalReads.rds')
totalGenes = read_rds('../totalGenes.rds')
clusterMat = read_rds('../clusterMat.rds')
spots = read_rds('../spots.rds')
xlims = list()
xlims$C1 = c(-5, 5)
xlims$C2 = c(-5, 5)
xlims$D1 = c(-5, 5)
xlims$D2 = c(-5, 5)
xlims$E2 = c(-8, 6)
ylims= list()
ylims$C1 = c(-6, 0)
ylims$C2 = c(-5, 1)
ylims$D1 = c(-4, 2)
ylims$D2 = c(-4, 2)
ylims$E2 = c(-6, 4)
colnames(totalReads) = c("C1", "C2", "D1", "D2", "E2")
colnames(totalGenes) = c("C1", "C2", "D1", "D2", "E2")
colnames(clusterMat) = c("C1", "C2", "D1", "D2", "E2")
makePlot = function(im, xlim, ylim, spots, cols) {
plot(NULL, xlim=c(xlim[1], max(spots$row)+xlim[2]), ylim=c(-1*max(spots$col)+ylim[1], ylim[2]))
rasterImage(im, xlim[1], -1*max(spots$col)+ylim[1], max(spots$row)+xlim[2], ylim[2])
points(spots$row, -1*spots$col, pch=19, col=pal(2)[cols])
}
makeHist = function(x, thresh) {
hist(x)
abline(v=thresh, col="red")
}
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
output$tissuePlot <- renderPlot({
imName = input$image
im = readImage(paste0('../', imName, '_histology_small.jpg'))
if (input$metric=="K-means clusters (k=2)") {
x = clusterMat[,imName]
cols = x
p = makePlot(im, xlims[imName][[1]], ylims[imName][[1]], spots, cols)
updateCheckboxInput(session, "log", value=FALSE)
updateSliderInput(session, "thresh", value=0, min=0, max=0)
} else if (input$metric=="Total Reads") {
x = totalReads[,imName]
if (input$log) {
x = log2(x)
}
updateSliderInput(session, "thresh", min=min(x), max=max(x))
cols = ifelse(x > input$thresh, 2, 1)
p = makePlot(im, xlims[imName][[1]], ylims[imName][[1]], spots, cols)
} else if (input$metric=="Total Genes") {
x = totalGenes[,imName]
if (input$log) {
x = log2(x)
}
updateSliderInput(session, "thresh", min=min(x), max=max(x))
cols = ifelse(x > input$thresh, 2, 1)
p = makePlot(im, xlims[imName][[1]], ylims[imName][[1]], spots, cols)
}
})
output$histPlot = renderPlot({
if (input$metric=="K-means clusters (k=2)") {
x = clusterMat[,imName]
thresh = 1.5
} else if (input$metric=="Total Reads") {
x = totalReads[,imName]
if (input$log) {
x = log2(x)
}
thresh = input$thresh
} else if (input$metric=="Total Genes") {
x = totalGenes[,imName]
if (input$log) {
x = log2(x)
}
thresh = input$thresh
}
h = makeHist(x, thresh)
})
})
|
b6398e874e53f165aec16e6cc281d290a88f27dc
|
ceadca5b319e44bcc73225b6ec29f05bc660c195
|
/ROracle/src/test/R/oracle-test.R
|
41b7266759b1702624fcc40885a9db007907c32c
|
[
"MIT"
] |
permissive
|
perNyfelt/renjin-dbi
|
d1fbef965c627c425fffff12e4ac758354112588
|
e2177521e6e794525d8ddcd94e4fc525f451e95b
|
refs/heads/master
| 2023-07-20T04:11:40.372505
| 2023-07-10T09:04:03
| 2023-07-10T09:04:03
| 168,132,672
| 0
| 0
|
MIT
| 2023-07-10T09:04:05
| 2019-01-29T10:03:27
|
R
|
UTF-8
|
R
| false
| false
| 182
|
r
|
oracle-test.R
|
library(hamcrest)
library(ROracle)
test.driver <- function() renjinDBITest(dbConnect(ROracle(), url="jdbc:oracle:thin:@localhost/XE", username="renjintest", password="renjintest"))
|
921e273e5a94d4a644f704658687ad3281c27d18
|
6e7c335030b39054747d88b29844102e9989e245
|
/R/model.matrix.hdlm.R
|
ca9fa369c0a3ed05755d31fe9dc56084c3ab126a
|
[] |
no_license
|
cran/hdlm
|
35011a8dd6576781bd21f4dd5166359e30ac8c75
|
8f6a01e2014b623cc9d2b1e78898358d104c0f1c
|
refs/heads/master
| 2021-06-26T21:50:37.400120
| 2016-09-20T06:44:06
| 2016-09-20T06:44:06
| 17,696,612
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 297
|
r
|
model.matrix.hdlm.R
|
model.matrix.hdlm <-
function(object, ...)
{
if(n_match <- match("x", names(object), 0L)) object[[n_match]]
else {
data <- model.frame(object, xlev = object$xlevels, ...)
NextMethod("model.matrix", data = data,
contrasts.arg = object$contrasts)
}
}
|
bdaa7ac01f2480f8fe2d4aa7555ecf5cb5124e39
|
14ee38b2616f18104d3d9921ff222017543e4a70
|
/tests/testthat/test-rewind.R
|
e483f62821035928b765b60bc829c6a296ca9e71
|
[
"MIT"
] |
permissive
|
stevage/geojsonrewind
|
e88066b5c628381ea05cd3e0c0a72a5957afa89c
|
71cd277d9a8e2f03032781b16fcbda6f591b459d
|
refs/heads/master
| 2020-03-21T21:56:39.986587
| 2018-01-08T15:28:09
| 2018-01-08T15:28:09
| 139,094,647
| 0
| 0
| null | 2018-06-29T03:07:50
| 2018-06-29T03:07:50
| null |
UTF-8
|
R
| false
| false
| 790
|
r
|
test-rewind.R
|
context("rewind")
library("jsonlite")
test_that("rewind works with character input", {
x <- '{"type":"Polygon","coordinates":[[[100.0,0.0],[101.0,0.0],[101.0,1.0],[100.0,1.0],[100.0,0.0]]]}'
aa <- rewind(x)
bb <- rewind(x, outer = FALSE)
expect_is(aa, "json")
expect_is(unclass(aa), "character")
expect_match(aa, "Polygon")
expect_equal(fromJSON(aa, FALSE)$coordinates[[1]][[2]][[1]], 101)
expect_is(bb, "json")
expect_is(unclass(bb), "character")
expect_match(bb, "Polygon")
expect_equal(fromJSON(bb, FALSE)$coordinates[[1]][[2]][[1]], 100)
})
test_that("rewind fails well", {
expect_error(rewind(), "argument \"x\" is missing")
expect_error(rewind(5), "no 'rewind' method for numeric")
expect_error(rewind(mtcars), "no 'rewind' method for data.frame")
})
|
45328180a0cc281a7c9268d6b6262daeda0f120d
|
9d3e3c3950c4101bc863a90e69606d7c7d03a4e9
|
/chilling/04_make_figures/buggy/Do_Not_use_safe_box_average_over_models.R
|
9f9329a28a08e7cadf386d1f8a48639daeda43a2
|
[
"MIT"
] |
permissive
|
HNoorazar/Ag
|
ca6eb5a72ac7ea74e4fe982e70e148d5ad6c6fee
|
24fea71e9740de7eb01782fa102ad79491257b58
|
refs/heads/main
| 2023-09-03T18:14:12.241300
| 2023-08-23T00:03:40
| 2023-08-23T00:03:40
| 146,382,473
| 3
| 6
| null | 2019-09-23T16:45:37
| 2018-08-28T02:44:37
|
R
|
UTF-8
|
R
| false
| false
| 9,504
|
r
|
Do_Not_use_safe_box_average_over_models.R
|
rm(list=ls())
library(data.table)
library(dplyr)
library(ggmap)
library(ggplot2)
options(digit=9)
options(digits=9)
##########################################################################################
### ###
### Define Functions here ###
### ###
##########################################################################################
produce_data_4_plots <- function(data, average_type="none"){
needed_cols = c("Chill_season", "sum_J1", "sum_F1","year", "model",
"scenario", "lat", "long", "climate_type")
################### CLEAN DATA
data = subset(data, select=needed_cols)
data = data %>% filter(year<=2005 | year>=2025)
# time periods are
time_periods = c("Historical","2025_2050", "2051_2075", "2076_2099")
data$time_period = 0L
data$time_period[data$year <= 2005] = time_periods[1]
data$time_period[data$year >= 2025 & data$year <= 2050] = time_periods[2]
data$time_period[data$year >= 2051 & data$year <= 2075] = time_periods[3]
data$time_period[data$year >= 2076] = time_periods[4]
data$time_period = factor(data$time_period, levels =time_periods, order=T)
#################################################################
#
# Take Average over locations, or models, or none.
#
#################################################################
if (average_type == "locations"){
data <- data %>%
group_by(time_period, model, scenario, climate_type) %>%
summarise_at(.funs = funs(averages = mean), vars(sum_J1:sum_F1)) %>%
data.table()
} else if (average_type == "models"){
data <- data %>%
group_by(time_period, lat, long, scenario, climate_type) %>%
summarise_at(.funs = funs(averages = mean), vars(sum_J1:sum_F1)) %>%
data.table()
}
data_f <- data %>% filter(time_period != "Historical")
data_h_rcp85 <- data %>% filter(time_period == "Historical")
data_h_rcp45 <- data %>% filter(time_period == "Historical")
data_h_rcp85$scenario = "RCP 8.5"
data_h_rcp45$scenario = "RCP 4.5"
# data$scenario[data$scenario=="historical"] = "Historical"
data_f$scenario[data_f$scenario=="rcp45"] = "RCP 4.5"
data_f$scenario[data_f$scenario=="rcp85"] = "RCP 8.5"
data = rbind(data_f, data_h_rcp45, data_h_rcp85)
rm(data_h_rcp45, data_h_rcp85, data_f)
################### GENERATE STATS
#######################################################################
## ##
## Find the 90th percentile of the chill units ##
## Grouped by location, model, time_period and rcp ##
## This could be used for box plots, later compute the mean. ##
## for maps ##
## ##
#######################################################################
if (average_type == "locations"){
quan_per_loc_period_model_jan <- data %>%
group_by(time_period, scenario, model, climate_type) %>%
summarise(quan_90 = quantile(sum_J1_averages, probs = 0.1)) %>%
data.table()
quan_per_loc_period_model_feb <- data %>%
group_by(time_period, scenario, model, climate_type) %>%
summarise(quan_90 = quantile(sum_F1_averages, probs = 0.1)) %>%
data.table()
# There will be no map for this case
mean_quan_per_loc_period_model_jan = NA
mean_quan_per_loc_period_model_feb = NA
median_quan_per_loc_period_model_jan = NA
median_quan_per_loc_period_model_feb = NA
} else if (average_type == "models"){
quan_per_loc_period_model_jan <- data %>%
group_by(time_period, lat, long, scenario, climate_type) %>%
summarise(quan_90 = quantile(sum_J1_averages, probs = 0.1)) %>%
data.table()
quan_per_loc_period_model_feb <- data %>%
group_by(time_period, lat, long, scenario, climate_type) %>%
summarise(quan_90 = quantile(sum_F1_averages, probs = 0.1)) %>%
data.table()
# There will be no map for this case
mean_quan_per_loc_period_model_jan = NA
mean_quan_per_loc_period_model_feb = NA
median_quan_per_loc_period_model_jan = NA
median_quan_per_loc_period_model_feb = NA
} else if (average_type == "none") {
quan_per_loc_period_model_jan <- data %>%
group_by(time_period, lat, long, scenario, model, climate_type) %>%
summarise(quan_90 = quantile(sum_J1, probs = 0.1)) %>%
data.table()
quan_per_loc_period_model_feb <- data %>%
group_by(time_period, lat, long, scenario, model, climate_type) %>%
summarise(quan_90 = quantile(sum_F1, probs = 0.1)) %>%
data.table()
# it seems there is a library, perhaps tidyverse, that messes up
# the above line, so the two variables above are 1-by-1.
# just close and re-open R Studio
mean_quan_per_loc_period_model_jan <- quan_per_loc_period_model_jan %>%
group_by(time_period, lat, long, scenario) %>%
summarise(mean_over_model = mean(quan_90)) %>%
data.table()
mean_quan_per_loc_period_model_feb <- quan_per_loc_period_model_feb %>%
group_by(time_period, lat, long, scenario) %>%
summarise(mean_over_model = mean(quan_90)) %>%
data.table()
median_quan_per_loc_period_model_jan <- quan_per_loc_period_model_jan %>%
group_by(time_period, lat, long, scenario) %>%
summarise(mean_over_model = median(quan_90)) %>%
data.table()
median_quan_per_loc_period_model_feb <- quan_per_loc_period_model_feb %>%
group_by(time_period, lat, long, scenario) %>%
summarise(mean_over_model = median(quan_90)) %>%
data.table()
}
return(list(quan_per_loc_period_model_jan,
mean_quan_per_loc_period_model_jan,
median_quan_per_loc_period_model_jan,
quan_per_loc_period_model_feb,
mean_quan_per_loc_period_model_feb,
median_quan_per_loc_period_model_feb)
)
}
#######################################################################
## ##
## Driver ##
## ##
#######################################################################
time_types = c("non_overlapping") # , "overlapping"
model_types = c("dynamic_model_stats") # , "utah_model_stats"
main_in = "/Users/hn/Desktop/Desktop/Kirti/check_point/chilling"
file_name = "summary_comp.rds"
avg_type = "models" # locations, models, none
time_type = time_types[1]
model_type = model_types[1]
for (time_type in time_types){
for (model_type in model_types){
in_dir = file.path(main_in, time_type, model_type, file_name)
out_dir = file.path(main_in, time_type, model_type, "/")
datas = data.table(readRDS(in_dir))
information = produce_data_4_plots(datas, average_type = avg_type)
safe_jan <- safe_box_plot(information[[1]], due="Jan.")
safe_feb <- safe_box_plot(information[[4]], due="Feb.")
output_name = paste0(time_type, "_", unlist(strsplit(model_type, "_"))[1], "_Jan_", avg_type, ".png")
ggsave(output_name, safe_jan, path=out_dir, width=4, height=4, unit="in", dpi=400)
output_name = paste0(time_type, "_", unlist(strsplit(model_type, "_"))[1], "_Feb_", avg_type, ".png")
ggsave(output_name, safe_feb, path=out_dir, width=4, height=4, unit="in", dpi=400)
# means over models
# mean_map_jan = ensemble_map(data=information[[2]], color_col="mean_over_model", due="Jan.")
# mean_map_feb = ensemble_map(data=information[[5]], color_col="mean_over_model", due="Feb.")
# output_name = paste0(time_type, "_", unlist(strsplit(model_type, "_"))[1], "_map_jan.png")
# ggsave(output_name, mean_map_jan, path=out_dir, width=7, height=4.5, unit="in", dpi=400)
# output_name = paste0(time_type, "_", unlist(strsplit(model_type, "_"))[1], "_map_feb.png")
# ggsave(output_name, mean_map_feb, path=out_dir, width=7, height=4.5, unit="in", dpi=400)
}
}
|
9cf9af961ed2b480ba026fa92e948f54049f8e39
|
06b2d058411a9b1e69af1446e19bb422692c7ace
|
/R/LogLikelihood4Mixtures.R
|
8dd71a08e8e21120f7ff8b2b084ed50a67eb204a
|
[] |
no_license
|
Mthrun/AdaptGauss
|
5a9a0460a04e759deb517a9100cbee6e53d5a346
|
d0a5d20d90eb51faf41af16c01a507d6e41d9416
|
refs/heads/master
| 2023-07-21T01:15:55.704721
| 2023-07-14T09:28:26
| 2023-07-14T09:28:26
| 113,838,340
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,936
|
r
|
LogLikelihood4Mixtures.R
|
LogLikelihood4Mixtures <- function(Data, Means, SDs, Weights, IsLogDistribution=Means*0){
# LogLikelihood <- LogLikelihood4Mixtures(Data,Means,SDs,Weights,IsLogDistribution)
# berechnung der Loglikelihood fuer ein Mixture model: LogLikelihood = sum(log(PDFmixture))
#
# INPUT
# Data[1:n] Daten, deren Verteilung verglichen werden soll
# Means[1:L] Means of Gaussians, L == Number of Gaussians
# SDs[1:L] estimated Gaussian Kernels = standard deviations
# Weights[1:L] relative number of points in Gaussians (prior probabilities): sum(Weights) ==1
#
# OPTIONAL
# IsLogDistribution[1:L] gibt an, ob die Einzelverteilung einer (generalisierten)Lognormaverteilung ist
# wenn IsLogDistribution[i]==0 dann Mix(i) = W[i] * N(M[i],S[i])
# wenn IsLogDistribution[i]==1 dann Mix(i) = W[i] * LogNormal(M[i],S[i])
# Default: IsLogDistribution = Means*0;
#
# OUTPUT
# LogLikelihood die Loglikelihood der Verteilung = LogLikelihood = = sum(log(PDFmixture))
# LogPDF(1:n) = log(PDFmixture);
# PDFmixture die Probability density function an jedem Datenpunkt
# Author: ALU, 2015
# Uebertrag von Matlab nach R: CL 02/2016
# 1.Editor: MT 02/2016: umbenannt in LogLikelihood4Mixture, da auch LGL Modelle moegliech und analog zu LikelihoodRatio4Mixtures, Chi2testMixtures, KStestMixtures
#Pattern Recogintion and Machine Learning, C.M. Bishop, 2006, isbn: ISBN-13: 978-0387-31073-2, p. 433 (9.14)
PdfForMix = Pdf4Mixtures(Data,Means,SDs,Weights,IsLogDistribution) # PDF ausrechnen
PDFmixture <- PdfForMix$PDFmixture
PDFmixture[PDFmixture<=0] = NaN # null zu NaN
LogPDF = log(PDFmixture) # logarithmieren (natuerlicher Logarithmus)
LogLikelihood = sum(LogPDF, na.rm=TRUE) # summieren
return(list(LogLikelihood=LogLikelihood, LogPDF = LogPDF, PDFmixture = PDFmixture))
}#end function
|
7d7201b24078768bb7e11b0765562b1dc4f8fbab
|
daa2fa86e4146df7268cd96dc71298f785adfa72
|
/man/cania.sub.ts.Rd
|
a20f9d485142f0328a002f429b7e73dba020c055
|
[] |
no_license
|
HYDauer/FlowScreen
|
e04db01eb256a175a976aa4d95b72f56579ad796
|
ef208c7d529359ab73c18d87ba5bb54093fb9022
|
refs/heads/master
| 2020-04-16T08:04:14.493872
| 2019-01-12T16:31:42
| 2019-01-12T16:31:42
| 165,410,500
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,479
|
rd
|
cania.sub.ts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cania.sub.ts.R
\docType{data}
\name{cania.sub.ts}
\alias{cania.sub.ts}
\title{Subset of the Caniapiscau River Daily Flows}
\format{Formatted as a data.frame with the following columns:
\itemize{
\item ID - Water Survey Canada Station ID
\item Date - Date of observation, formatted as YYYY-mm-dd
\item Flow - Mean daily streamflow, measured in m3/s
\item Code - Data Quality Code
\item Agency - Source Agency (Water Survey Canada)
\item Year - Calendar year
\item month - Calendar month
\item doy - Calendar day of year
\item hyear - Hydrologic year
\item hmonth - Hydrologic month
\item hdoy - Hydrologic day of year
}}
\source{
Environment Canada. 2010. EC Data Explorer V1.2.30. \cr
Water Survey of Canada V1.2.30 https://www.ec.gc.ca/rhc-wsc/
}
\usage{
data(caniapiscau)
}
\description{
This data set includes a subset of the mean daily streamflow
for the Caniapiscau Rivers. It includes observations from 1970-1995
(hydrologic years). The code used to subset and modify the original
data is shown below.
}
\examples{
# Code used to subset and modify original Caniapiscau series:
\dontrun{
data(caniapiscau)
cania.ts <- create.ts(caniapiscau, hyrstart=3)
cania.sub.ts <- subset(cania.ts, cania.ts$hyear \%in\% c(1970:1995))
}
# example use of example subset flow series
data(cania.sub.ts)
head(cania.sub.ts)
str(cania.sub.ts)
}
\keyword{datasets}
|
42d06384171dc51f94a9f736361cc9c56bb61895
|
67fef5dcc51a14471a34097c5074c27449e51856
|
/Project4/code/part2.4-Q13.R
|
0b9f4064fcdc320e6a7f6d184867116242491638
|
[] |
no_license
|
WendyCui1018/Data-Mining-and-Problem-Solving-on-Large-scale-Data-Sets
|
6049020123286ecf1fd01d611c085e8a9bbc3cf2
|
68cea14d6924fe8a32075aafdf3a06d6731c43b8
|
refs/heads/master
| 2020-03-30T22:34:54.204832
| 2019-01-27T22:17:33
| 2019-01-27T22:17:33
| 151,671,797
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,644
|
r
|
part2.4-Q13.R
|
filepath = "part1_data_original/movie_graph_edge_list.txt"
movie_network<-read.graph(filepath, format = "ncol",directed = FALSE)
fpath1 <- "part1_data_original/actorid_movieid_map.txt"
ori_actor_movie <- read.table(fpath1, header = FALSE, sep = '\t', col.names = c('actor_id','movie_id'))
valid_actor_movie <- subset(ori_actor_movie, movie_id %in% names(V(movie_network)))
fpath2 <- "part1_data_original/movie_idrating_map.txt"
ori_movie_rating <- read.table(fpath2, header = FALSE, sep = '\t', col.names = c('movie_id','rating'))
valid_movie_rating <- subset(ori_movie_rating, movie_id %in% names(V(movie_network)))
valid_movie_rating <- subset(valid_movie_rating, rating != "NaN") # remove "NaN"
#object movieID: 12596,48391,100856
actor_movie_bipartite_graph <- graph_from_data_frame(valid_actor_movie, directed = FALSE, vertices = NULL)
unique_actors <- unique(valid_actor_movie$actor_id)
ratings <- vector()
avg_ratings <- vector()
actor_rating <- data.frame(actor_id = integer(0), avg_rating = double(0))
# write by row
for (act_id in unique_actors){
mvs <- valid_actor_movie$movie_id[valid_actor_movie$actor_id == act_id]
ratings <- valid_movie_rating$rating[valid_movie_rating$movie_id %in% mvs]
avg_rating <- mean(ratings)
#avg_ratings <- append(avg_ratings, avg_rating)
row<- c(act_id, avg_rating)
actor_rating <- rbind(actor_rating, row)
}
#actor_rating <- data.frame(actor_id = unique_actors, avg_rating = avg_ratings)
colnames(actor_rating) <- c('actor_id','score')
#remove NaN -- some of the actors' score is NaN, since all the movies they involved in are non-rated
actor_rating <- subset(actor_rating, actor_rating$score != 'NaN')
# write to file
f_output <- "part1_data_original/actor_score.csv"
write.csv(actor_rating, f_output)
prediction <- function(mv_id){
inv_actors <- valid_actor_movie$actor_id[valid_actor_movie$movie_id == mv_id]
#print(inv_actors)
# print(actor_rating$score[actor_rating$actor_id %in% inv_actors])
pre_ratings <- mean(actor_rating$score[actor_rating$actor_id %in% inv_actors])
#print(pred_ratings)
return(pre_ratings)
}
gt_ratings <- valid_movie_rating$rating
pred_ratings <- vector()
for(mv_id in valid_movie_rating$movie_id){
pred_ratings <- append(pred_ratings, prediction(mv_id))
}
cat("RMSE:", sqrt(mean((gt_ratings - pred_ratings)^2)))
# predict for three movies
obj_movies <- c(12596, 48391, 100856)
obj_mv_names <- c("Batman v Superman: Dawn of Justice (2016)", "Mission: Impossible - Rogue Nation (2015)","Minions (2015)")
for (index in 1:3) {
cat(obj_mv_names[index], '\n')
cat("Predict Rating is:", prediction(obj_movies[index]), '\n', '\n')
}
|
fb03333d33e1f6e9a5e23282802d3d83d261de71
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/FinancialInstrument/examples/make_spread_id.Rd.R
|
7466de8bde501ef019aa3ca1d926170fcfdf8306
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 370
|
r
|
make_spread_id.Rd.R
|
library(FinancialInstrument)
### Name: make_spread_id
### Title: Construct a primary_id for a 'spread' 'instrument' from the
### primary_ids of its members
### Aliases: make_spread_id
### ** Examples
ids <- c('VX_aug1','VX_U11')
make_spread_id(ids, format='CY')
make_spread_id(ids, format=FALSE)
make_spread_id(c("VIX_JAN11","VIX_FEB11"),root='VX',format='CY')
|
61e52c8753c6171038cb6b0e3bca850f0770f119
|
6049000493ce2555976ae0c4d969762bd010e9cb
|
/R/utility_functions.R
|
47497299dc3e20c09be458f3124a41278ea40646
|
[
"MIT"
] |
permissive
|
uit-hdl/nowaclite
|
6b4ad6c57f4b2b1741b52f9e6b52618faa0bdcc7
|
9df4d60025634ab735fafc316c55a5134ce76c7f
|
refs/heads/master
| 2021-08-07T20:56:23.509228
| 2020-05-15T08:21:07
| 2020-05-15T08:21:07
| 177,790,668
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,114
|
r
|
utility_functions.R
|
#' Set up available designs for NOWAC
#'
#' @return vector with available designs
#'
#' @author Bjorn Fjukstad, \email{bjorn@cs.uit.no}
#'
#' @seealso \code{\link{selectDesign}}
#'
#' @keywords design
#'
#' @export
getDesigns <- function() {
return(c("case-control", "cross-sectional"))
}
#' Get hospital name
#'
#' Helper function to translate the hospital code (a number between 1 and 11) to
#' a name, such as Tromso, Nodo or Molde.
#'
#' @param code Integer code of the hospital
#'
#' @return string Hospital name
#'
#' @author Bjorn Fjukstad, \email{bjorn@cs.uit.no}
#'
#' @seealso \code{\link{nowaclite}}
#'
#' @keywords hospital
#'
#' @examples
#' hospital_name <- getHospital(1)
#'
#' @export
getHospital <- function(code) {
if (is.na(code)) {
return(NA)
}
hospital <- switch(code, "Tromso", "Bodo", "Buskerud (Drammen)", "Fredrikstad",
"Haukeland", "Molde", "Radiumhospitalet", "St. Olavs hospital",
"Stavanger", "Tonsberg", "Radium/Ulleval")
if (is.null(hospital)) {
stop("Invalid hospital code. Should be between 1 and 11.")
}
return(hospital)
}
|
a2b3cb61eef9077dd17a1a0c3f4b2a8ff440ee97
|
0838e88f31f0d102fbb5503668a3c7f4124b4224
|
/plot1.R
|
1a988c2eb48ceffeb8fc183ce692080d12dbad01
|
[] |
no_license
|
LemonCanon/ExploratoryStatsFinal
|
28c103eb2beb848275978ad142bdd5b103b8c563
|
21a41270105817a6c3e70a42c7327cc74145a28e
|
refs/heads/master
| 2016-08-12T18:33:33.036680
| 2016-04-08T13:28:23
| 2016-04-08T13:28:23
| 55,526,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 849
|
r
|
plot1.R
|
# Question 1 RP April 5, 2016
#Compair total PM2.5 between 1999, 2002, 2005, and 2008
#use Base plot system.
#set wd to dir of file to ensure files are found properly within dir
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
#test to see if pm25 exists in the global environment to save reading time
if(!exists("pm25", .GlobalEnv)){
pm25 <- readRDS("summarySCC_PM25.rds")
}
#calculate the total pm2.5 for each year measured
tot <- with(pm25, tapply(Emissions, year, sum))
#open the PNG device
png(filename = "plot1.png", width = 480, height = 480, units = "px")
#create the plot for the data
plot(names(tot), tot, pch=16, xlab = "Year", ylab = "PM2.5 (in tonnes)")
lines(names(tot), tot, lwd = 2)
par(title(main="Total PM2.5 in each year"))
#close PNG device
dev.off()
|
ec2e04f9474d1d649286fcdb24cb2467cedd9aeb
|
da6483d1cf0b1c26f8ceb9dd6cf80f186feebff5
|
/man/na_if.Rd
|
45d98fdc52a3a7966a8a4c6c6d85a7c82febc941
|
[] |
no_license
|
mattdowle/expss
|
6a60c8cecd651648359e187789175d76d5efd7e8
|
6ec5696737fd3edc39f5d427f9eb268bd33c1721
|
refs/heads/master
| 2020-04-22T09:20:12.982355
| 2018-12-11T20:13:07
| 2018-12-11T20:13:07
| 170,268,747
| 1
| 0
| null | 2019-02-12T06:59:03
| 2019-02-12T06:59:03
| null |
UTF-8
|
R
| false
| true
| 1,994
|
rd
|
na_if.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/na_if.R
\name{na_if}
\alias{na_if}
\alias{na_if<-}
\alias{\%na_if\%}
\alias{mis_val}
\alias{mis_val<-}
\alias{\%mis_val\%}
\title{Replace certain values with NA}
\usage{
na_if(x, value)
na_if(x) <- value
x \%na_if\% value
mis_val(x, value)
mis_val(x) <- value
x \%mis_val\% value
}
\arguments{
\item{x}{vector/matrix/data.frame/list}
\item{value}{vector/matrix/data.frame/function}
}
\value{
x with NA's instead of \code{value}
}
\description{
There are following options for \code{value}:
\itemize{ \item{\code{vector}}{ Vector of values which should be replaced
with \code{NA} in \code{x}. } \item{\code{logical vector/matrix/data.frame}}{
NA's will be set in places where \code{value} is TRUE. \code{value} will be
recycled if needed.} \item{\code{function}}{ NA's will be set in places where
\code{value(x)} is TRUE. Function will be applied columnwise. Additionally,
there are special functions for common cases of comparison. For example
\code{na_if(my_var, gt(98))} will replace all values which are greater than
98 in \code{my_var} with NA. For detailed description of special functions
see \link{criteria}} } \code{mis_val} is an alias for the \code{na_if} with
absolutely the same functionality.
}
\examples{
a = c(1:5, 99)
# 99 to NA
na_if(a, 99) # c(1:5, NA)
a \%na_if\% 99 # same result
# values which greater than 5 to NA
na_if(a, gt(5)) # c(1:5, NA)
set.seed(123)
dfs = data.frame(
a = c("bad value", "bad value", "good value", "good value", "good value"),
b = runif(5)
)
# rows with 'bad value' will be filled with NA
# logical argument and recycling by columns
na_if(dfs, dfs$a=="bad value")
a = rnorm(50)
# values greater than 1 or less than -1 will be set to NA
# special functions usage
na_if(a, lt(-1) | gt(1))
# values inside [-1, 1] to NA
na_if(a, -1 \%thru\% 1)
}
\seealso{
For reverse operation see \link{if_na}, \link{if_val} for more
general recodings.
}
|
1b0ae540efaf999a21f0485a5c3053d3faafd5db
|
e008acb70ddefa14d4265a3582b36345c5542293
|
/server.R
|
b2cf1ba9f8e0c25a7321088c88becb35d480af4f
|
[] |
no_license
|
mingyaaa/STAI-App
|
459ff58b59a96e7e81edc7e2b8523ce8cfb8e0db
|
a3640833fc67b6a9f41a5fb4dd32829eff4865a7
|
refs/heads/master
| 2020-12-24T19:05:11.790462
| 2016-05-06T00:53:09
| 2016-05-06T00:53:09
| 58,168,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,583
|
r
|
server.R
|
library(shiny)
# Define server logic required
shinyServer(function(input, output){
# We create an output that will be a text with the results of the questionnaire.
output$resultAE <- renderText({
AE <<- as.numeric(input$r1)+as.numeric(input$r2)+
as.numeric(input$r3)+as.numeric(input$r4)+as.numeric(input$r5)+
as.numeric(input$r6)+as.numeric(input$r7)+as.numeric(input$r8)+
as.numeric(input$r9)+as.numeric(input$r10)+as.numeric(input$r11)+
as.numeric(input$r12)+as.numeric(input$r13)+as.numeric(input$r14)+
as.numeric(input$r15)+as.numeric(input$r16)+as.numeric(input$r17)+
as.numeric(input$r18)+as.numeric(input$r19)+as.numeric(input$r20)
AR <<- as.numeric(input$r21)+as.numeric(input$r22)+
as.numeric(input$r23)+as.numeric(input$r24)+as.numeric(input$r25)+
as.numeric(input$r26)+as.numeric(input$r27)+as.numeric(input$r28)+
as.numeric(input$r29)+as.numeric(input$r30)+as.numeric(input$r31)+
as.numeric(input$r32)+as.numeric(input$r33)+as.numeric(input$r34)+
as.numeric(input$r35)+as.numeric(input$r36)+as.numeric(input$r37)+
as.numeric(input$r38)+as.numeric(input$r39)+as.numeric(input$r40)
CAE <<- if (input$sex == "H")
{if (AE >= 29 && AE <=60) { print("Alto")} else if
(AE <= 28 && AE >= 14) {print("Media")} else if
(AE <= 13 && AE >= 0){print("Bajo")}
} else if (input$sex == "M")
{if (AE >= 32 && AE <=60) { print("Alto")} else if
(AE <= 31 && AE >= 15) {print("Media")} else if
(AE <= 14 && AE >= 0){print("Bajo")}
}
CAR <<- if (input$sex == "H")
{if (AR >= 29 && AR <=60) { print("Alto")} else if
(AR <= 28 && AR >= 14) {print("Media")} else if
(AR <= 13 && AR >= 0){print("Bajo")}
} else if (input$sex == "M")
{if (AR >= 33 && AR <=60) {print("Alto")} else if
(AR <= 32 && AR >= 17){print("Media")} else if
(AR <= 16 && AR >= 0){print("Bajo")}
}
print(c("ANSIEDAD ESTADO (puntos):", AE,". Nivel:", (CAE), "."))
})
output$resultAR <- renderText({
print(c("ANSIEDAD RASGO (puntos):", AR,". Nivel", (CAR), "."))})
})
|
510cd0cd0d3927067f4c00b066b027737aaa94d9
|
16e2994bdc209e7309c9876f3ea5c491b0046348
|
/server.R
|
aad07cb4ca101b4a6c0a8c887130c68860750ffd
|
[] |
no_license
|
jh668/Coursera-DS-Capstone
|
e56bfbe8deb40f300edd4ea45e8b342693141fc8
|
5dec6398f16da2d6f4823a86517435ceb86236e3
|
refs/heads/master
| 2022-11-06T22:27:27.003969
| 2020-07-14T04:33:11
| 2020-07-14T04:33:11
| 278,269,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 339
|
r
|
server.R
|
library(shiny)
library(dplyr)
library(tidyr)
library(tm)
source("next_word_model.R")
shinyServer(function(input, output) {
output$out <- reactive({
validate(
need(input$box1, "Please type in your words in the above textbox")
)
next_word <- next_word(input$box1)
})
})
|
429e17012c349a4e6b0ac10748365e4b4cb9181a
|
175ec2deb5ec05b17d6fd1b97a8a00d160fcfa27
|
/Eimeria_Lab_code/P3_112019_Eim_combine.R
|
091ffd3bf0f3c4f9262df1489034b59ec786a7ae
|
[] |
no_license
|
LubomirBednar/PhD
|
d26980dc3b7e755181bae3bdd3e8a236331017c6
|
3271be15b34d2f7e8c4171fae578c46a91955710
|
refs/heads/master
| 2021-11-26T10:01:21.070210
| 2021-10-29T09:04:19
| 2021-10-29T09:04:19
| 204,975,093
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,269
|
r
|
P3_112019_Eim_combine.R
|
# P3 combining script for weight, oocysts, qPCR, RT-qPCR, ELISA and hopefully FACS
library(httr)
library(RCurl)
library(dplyr)
library(Rmisc)
# load in weight and oocysts
P3_oocyst1 <- read.csv("https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_oocyst.csv")
P3_oocyst1$X <- NULL
P3_oocyst2 <- read.csv("https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_oocysts.csv")
P3_oocyst2$X <- NULL
names(P3_oocyst2)[names(P3_oocyst2) == "oocyst_1"] <- "oocyst_sq1"
names(P3_oocyst2)[names(P3_oocyst2) == "oocyst_2"] <- "oocyst_sq2"
names(P3_oocyst2)[names(P3_oocyst2) == "oocyst_3"] <- "oocyst_sq3"
names(P3_oocyst2)[names(P3_oocyst2) == "oocyst_4"] <- "oocyst_sq4"
names(P3_oocyst2)[names(P3_oocyst2) == "AVG"] <- "oocyst_mean"
P3_oocyst <- merge(P3_oocyst1, P3_oocyst2, all = T)
P3a_record <- read.csv("https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3a_112019_Eim_Record.csv")
P3b_record <- read.csv("https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3b_112019_Eim_Record.csv")
P3b_record$X <- NULL
P3a_record$labels <- sub("^", "P3a", P3a_record$labels)
P3a_record$batch <- "a"
P3b_record$labels <- sub("^", "P3b", P3b_record$labels)
P3b_record$batch <- "b"
P3_record <- rbind(P3a_record, P3b_record)
P3_para <- merge(P3_record, P3_oocyst)
P3_para <- read.csv("C:/Users/exemp/Documents/P3_para.csv")
P3_design <- read.csv("https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experimental_design/P3_112019_Eim_design.csv")
P3_para <- merge(P3_para, P3_design, all.x = T)
P3_para$day_change <- NULL
# load in qPCRs
P3_qPCR <- read.csv("https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_CEWE_qPCR.csv")
P3_qPCR$X <- NULL
P3_qPCR$dpi <- 8
P3_qPCR$batch <- "b"
P3 <- merge(P3_para, P3_qPCR, all.x = T)
# load in RT-qPCRs
# P3_RT <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_CEWE_RTqPCR.csv"
# P3_RT <- read.csv(text = getURL(P3_RT))
# P3_RT$X <- NULL
# load in CEWE ELISA (important to merge CEWE ELISAs with qPCR and RTqPCR to give them labels)
P3_CEWE_ELISA <- read.csv("https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_CEWE_ELISA.csv")
P3_CEWE_ELISA$X <- NULL
colnames(P3_CEWE_ELISA)[2] <- "IFNy_CEWE"
P3 <- merge(P3, P3_CEWE_ELISA, all.x = T)
# # load in FEC ELISA
# P3_FEC_ELISA <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_FEC_ELISAs/P3_112019_Eim_FEC_ELISA1_complete.csv"
# P3_FEC_ELISA <- read.csv(text = getURL(P3_FEC_ELISA))
# P3_FEC_ELISA$X <- NULL
# colnames(P3_FEC_ELISA)[2] <- "IFNy_FEC"
# load in qPCR MCs for Eimeria
P3_MC <- read.csv("https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/E7%26P3_Eim_MCs.csv")
P3_MC$X <- NULL
P3_MC$X.1 <- NULL
P3_MC$batch <- "b"
P3_MC$dpi <- 8
P3 <- merge(P3, P3_MC, all.x = T)
# up to date most complete P3 dataset
write.csv(P3, "./Eimeria_Lab/data/Experiment_results/P3_112019_Eim_COMPLETE.csv")
write.csv(P3, "../GitHub/Eimeria_Lab/data/Experiment_results/P3_112019_Eim_COMPLETE.csv")
|
09b3c5dfabd5e78466fdb2fe8f7271d5db15457b
|
6787f2313745bb168045e0c8c01347914776244d
|
/cachematrix.R
|
94c3777a4bd46266ee0a1d0d541be2ce10744c8f
|
[] |
no_license
|
neelb84/ProgrammingAssignment2
|
4e6cb827ed7b5f9b365ff24696dd53a5a2c1811e
|
35fc6b34aed8dc0b2cfa24e25dc0d1c552be0a35
|
refs/heads/master
| 2021-01-18T03:06:40.857811
| 2015-03-22T20:01:19
| 2015-03-22T20:01:19
| 32,645,193
| 0
| 0
| null | 2015-03-21T18:46:50
| 2015-03-21T18:46:49
| null |
UTF-8
|
R
| false
| false
| 2,287
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
##The couple of functions makeCacheMatrix and cacheSolve creates a matrix and cache its inverse, this is to avoid calculating the inverse of the same matrix multiple times
## instead cache the inverse matrix and call when required without re-calculating it
##The first function makeCacheMatrix creates a special matrix that can cache its inverse
##The second function cacheSolve calculates the inverse of the matrix created above, if the inverse is already calculated then it returns the value from the cache
## Write a short comment describing this function
##Firstly the makeCacheMatrix is defined with a Matrix as an argument and the function first initializes the 'Inverse' matrix (yet to calculate it)
## Set function assigns the matrix x from makeCacheMatrix to the cached x and then initializes I to NULL in the makeCacheMatrix environment
##Next 3 set of functions, first returns the cached Matrix defined above, secondly sets cached inverse matrix to 'I' and lastly returns inverse 'I' cached in makeVector environment
##makeCacheMatrix finally returns the list of functions defined in makeCacheMatrix
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
set <- function(x) {
x <<- x
I <<- NULL
}
get <- function() x
setinv <- function(inverse) I <<- inverse
getinv <- function() I
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
##Firstly assigns the inverse defined in makeCacheMatrix to I
## If I (inverse matrix) is already defined for the 'x' above, then the function returns the cached "I" and prints "getting cached data"
##Otherwise it assigns the'x' locally to "data" and use 'solve' to calculate the inverse of matrix 'x' and sets it to the envitonment of 'x'
## Finally returns matrix inverse 'I'
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
I <- x$getinv()
if(!is.null(I)) {
message("getting cached data")
return(I)
}
data <- x$get()
I <- solve(data, ...)
x$setinv(I)
I
}
|
5c781a6692515701711c7c044e1b4c3ae8a52424
|
36e231c2defb96012d7e75269654b43ac859d714
|
/Analysis_after_BAM_Scripts/Fst_SauronPlots.R
|
432424bc0fe37ffc24cc0f110ac17153a9b8c7c3
|
[] |
no_license
|
PaulKnoops/Experimental_Evolution_Sequence_Repo
|
179b9b4124f19b707a604aa20d27a2b822953cc7
|
11f6af2ec5634181b11469f4a7f9cebf4e1ed5fe
|
refs/heads/master
| 2020-03-12T16:01:25.734641
| 2018-05-04T18:51:18
| 2018-05-04T18:51:18
| 130,705,291
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,436
|
r
|
Fst_SauronPlots.R
|
# Sauron Plots and quantiles:
require(tidyverse)
# Read in data:
# 115
# 1:2 = ConR1:ConR2, 1:3 = SelR1:ConR1, 3:4 = SelR1:SelR2, 2:4 = SelR2:ConR2
#CompR1 <- fread('../Data/Fst_combinedComparisons/combined_fst_1:3.csv')
#CompR2 <- fread('../Data/Fst_combinedComparisons/combined_fst_2:4.csv')
#Controls <- fread('../Data/Fst_combinedComparisons/combined_fst_1:2.csv')
#Selections <- fread('../Data/Fst_combinedComparisons/combined_fst_3:4.csv')
#38: 5:6 = ConR1:ConR2, 7:5 = SelR1:ConR1, 7:8 = SelR1:SelR2, 6:8 = SelR2:ConR2
CompR1 <- fread('../Data/Fst_combinedComparisons/combined_fst_5:7.csv')
CompR2 <- fread('../Data/Fst_combinedComparisons/combined_fst_6:8.csv')
Controls <- fread('../Data/Fst_combinedComparisons/combined_fst_5:6.csv')
Selections <- fread('../Data/Fst_combinedComparisons/combined_fst_7:8.csv')
datComp <- merge(CompR1, CompR2,by=c("window","chr"))
datComp$Thing <- "Comparison"
datNoncomp <- merge(Controls, Selections,by=c("window","chr"))
datNoncomp$Thing <- "WithinTreatment"
head(datComp)
head(datNoncomp)
#ggplot(datComp, aes(x=meanFst.x, y=meanFst.y)) + geom_point(size=0.5, alpha=0.5, colour='firebrick3')
#ggplot(datNoncomp, aes(x=meanFst.x, y=meanFst.y)) + geom_point(size=0.5, alpha=0.5, colour='grey30')
ppplt <- ggplot(datComp, aes(x=meanFst.x, y=meanFst.y)) +
geom_point(size=0.5, alpha=0.5, colour='firebrick3') +
geom_point(data=datNoncomp,
aes(x=meanFst.x, y=meanFst.y),
size=0.5, alpha=0.5,
colour='grey30') +
ggtitle("Mean Fst Distribution") +
xlab(expression(atop("ConR1:SelR1[Red]", 'ConR1:ConR2[Grey]'))) +
ylab(expression(atop("ConR2:SelR2[Red]", 'SelR1:SelR2[Grey]')))
print(ppplt)
#Can put as one plot if wanted
#source('multiplotFunction.R')
#ppl_115 <- ppplt
#ppl_38 <- ppplt
#multiplot(ppl_115, ppl_38, cols=1)
#Quantiles for interest sake:
with(datComp, quantile(meanFst.x,
probs = c(0, 0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975, 0.99, 0.999)))
with(datComp, quantile(meanFst.y,
probs = c(0, 0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975, 0.99, 0.999)))
with(datNoncomp, quantile(meanFst.x,
probs = c(0, 0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975, 0.99, 0.999)))
with(datNoncomp, quantile(meanFst.y,
probs = c(0, 0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975, 0.99, 0.999)))
|
5b9297361230a0ce09404961a5bfa63aca6345f4
|
3282d51ed8f89ead3d9f16af1e843501f5fbe8cb
|
/man/fun.N_1.Rd
|
8c7370f9af144f21375bff4071a5f122047231a7
|
[] |
no_license
|
cran/GMDHreg
|
7d69b110f57df5e1220c007a88b6d3f0c695013b
|
0104cbc52becf0515e3ea6007b77c66b625325ab
|
refs/heads/master
| 2021-07-09T12:34:51.724176
| 2021-07-05T11:30:02
| 2021-07-05T11:30:02
| 174,552,055
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 253
|
rd
|
fun.N_1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fun.N_1.R
\name{fun.N_1}
\alias{fun.N_1}
\title{GMDH MIA auxiliar functions}
\usage{
fun.N_1(x, y)
}
\description{
Performs auxiliar tasks to predict.mia
}
\keyword{internal}
|
a62ac82d3ae8c278981fb45ca853bd71be75b69d
|
b11ff6361b702588dfd3ec40d95b6d2cb291f34d
|
/Desktop/Courserarprogrmming/UCI HAR Dataset/run_analysis.R
|
adbd13ad0b0174825ee307d3aa505d39c6d10574
|
[] |
no_license
|
kakelly49/Assignment---Tidy-UCIHAR-dataset
|
e988dd90214e24f60b8f3ed28cf498693d7f17cd
|
88feba43b22d0533887433e6348c3efa6c490e87
|
refs/heads/master
| 2021-01-23T05:14:24.584330
| 2017-03-27T19:16:22
| 2017-03-27T19:16:22
| 86,287,887
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,136
|
r
|
run_analysis.R
|
## Run_analysis.R - creating a tidy version of a subset of the UCIHAR datasets
## Download and unzip files. Set directory in R to folder where files are saved
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile="UCI_HAR_dataset")
unzip("UCI_HAR_dataset")
library(dplyr)
## read the following files as tables from the test folder:
## - X_test, Y_test, subject_test
## and use cbind to append them together
rawtest <-read.table("test/X_test.txt")
ytest <-read.table("test/Y_test.txt")
subjecttest <-read.table("test/subject_test.txt",skip=1)
testing<-cbind(subjecttest,ytest,rawtest)
## read the following files as tables from the train folder:
## - X_train, Y_train, subject_train
## and use cbind to append them together
rawtrain <-read.table("train/X_train.txt")
ytrain <-read.table("train/Y_train.txt")
subjecttrain <-read.table("train/subject_train.txt")
training<-cbind(subjecttrain,ytrain,rawtrain)
## read activity labels and select the second column(V2)which
## has character values
temp <-read.table("activity_labels.txt")
activity_labels = tolower(as.character(temp$V2))
## Use rbind to merge test and train files
mergedata <-rbind(testing, training)
## Create column names using features.txt file:
## Read file, select a subset and transpose the
## subset so that row values are now column values
measurements<-read.table("features.txt")
measures <-select(measurements,V2)
makecolnames<-t(measures)
##clean makecolnames data
makecolnames<-gsub("\\(","",makecolnames) %>%
{gsub("()","",.)} %>%
{gsub("-","",.)} %>%
{gsub("\\)","",.)}
makecolnames<-gsub("BodyBody","Body",makecolnames)
##create name for the new column with user ID's
newcolumns<-c("subject","activity")
makecolnames<-append(newcolumns,makecolnames)
##Assign column names to the mergedata
colnames(mergedata) <-(makecolnames)
##convert activity numbers to activity names
mergedata$activity = as.factor(mergedata$activity)
levels(mergedata$activity) = activity_labels
## Create a vector of desired column names containing - "mean",
## "std", "subject", "activity" anywhere in the column name
## unselect columns with "angle" in the column name
makecolnames<-grep("angle",makecolnames,invert=TRUE,value=TRUE)
column_names<-grep("mean|subject|std|activity",makecolnames,value=TRUE)
##select the columns I want - contain "mean","subject" or "std" or "activity" in column name
l<-length(column_names)
tidyUCIHAR<-subset(mergedata, select=column_names[1:l])
write.table(tidyUCIHAR, file="tidyUCIHAR.txt",row.name=FALSE)
## Add a new data element by pasting subject and activity
## Split tidy UCIHAR on the new value subjactivity
## Process each matrix formed by the split fuction and calculate the mean
tempfile<-mutate(tidyUCIHAR,subjactivity=paste(tidyUCIHAR$subject,tidyUCIHAR$activity))
splitfile <-split(tempfile,tempfile$subjactivity)
## Process each matrix formed by the split fuction and calculate the mean
## Start by setting up the base file, the max value of the counter and
## the initial value of the counter
lastmatrix <-length(splitfile)
counter<-1
temp_file <-as.data.frame(splitfile[counter])
temp2 <-lapply(temp_file[,3:81],mean)
base_file <-append(temp_file[1,1:2],temp2)
base_file <- data.frame(base_file)
colnames(base_file)<-colnames(tidyUCIHAR)
counter<-counter+1
while (counter<=lastmatrix) {
# read nextmatrix, calculate mean for all variables and
# append result to base_file
temp_file <-as.data.frame(splitfile[counter])
temp2 <-lapply(temp_file[,3:81],mean)
new_file <-append(temp_file[1,1:2],temp2)
new_file <- data.frame(new_file)
colnames(new_file)<-colnames(tidyUCIHAR)
base_file<-rbind(base_file,new_file)
counter<-counter+1
}
tidyUCIHARmeans<-base_file
|
ee17dd203f3d860c482c8b4fc2d5f70656b1e558
|
1aaa4bb83953b88fe67455aac703664c1ce7f7aa
|
/man/with_dataset.Rd
|
687b3b94e66b1c9d95765afb740385e1d41071e7
|
[] |
no_license
|
rstudio/tfdatasets
|
9773255c6ed697d68a42e17403426bfe96c30591
|
90c846b36b02c6ce44b87bf6d379f0dbcaece859
|
refs/heads/main
| 2022-07-07T02:56:17.570781
| 2022-06-30T00:09:11
| 2022-06-30T00:09:11
| 105,773,021
| 32
| 14
| null | 2022-06-30T00:07:12
| 2017-10-04T13:39:34
|
R
|
UTF-8
|
R
| false
| true
| 985
|
rd
|
with_dataset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataset_iterators.R
\name{with_dataset}
\alias{with_dataset}
\title{Execute code that traverses a dataset}
\usage{
with_dataset(expr)
}
\arguments{
\item{expr}{Expression to execute}
}
\description{
Execute code that traverses a dataset
}
\details{
When a dataset iterator reaches the end, an out of range runtime error
will occur. You can catch and ignore the error when it occurs by wrapping
your iteration code in a call to \code{with_dataset()} (see the example
below for an illustration).
}
\examples{
\dontrun{
library(tfdatasets)
dataset <- text_line_dataset("mtcars.csv", record_spec = mtcars_spec) \%>\%
dataset_prepare(x = c(mpg, disp), y = cyl) \%>\%
dataset_batch(128) \%>\%
dataset_repeat(10)
iter <- make_iterator_one_shot(dataset)
next_batch <- iterator_get_next(iter)
with_dataset({
while(TRUE) {
batch <- sess$run(next_batch)
# use batch$x and batch$y tensors
}
})
}
}
|
4e7f41f0cfb0e0a56c4d7c6c3e79f82d81f44c7b
|
b3d2bedd9bbb288d7f4e8b9fdf6c2591a98beb72
|
/Dashboard + feedback page (compleet).R
|
69e0ae77b856a87b82ef04171ae67e8eb6dc8b79
|
[] |
no_license
|
Enzovs/Society_de_Waag
|
77a9174e446d09dfea7d052155a8124dfde86509
|
21b241319765847526886bce3503e877c236058f
|
refs/heads/master
| 2020-06-10T23:35:49.762571
| 2017-03-19T11:43:33
| 2017-03-19T11:43:33
| 75,844,964
| 2
| 0
| null | 2016-12-07T15:06:47
| 2016-12-07T14:42:40
| null |
UTF-8
|
R
| false
| false
| 21,383
|
r
|
Dashboard + feedback page (compleet).R
|
setwd("C:/Users/Win7/Desktop/HVA TWK/JAAR 3/Minor/Project de waag")
rm(list = ls())
load("C:/Users/Win7/Desktop/HVA TWK/JAAR 3/Minor/Project de waag/.RData")
library(lubridate)
library(shiny)
library(shinydashboard)
library(tidyr)
library(dplyr)
library(ggplot2)
library(ggmap)
library(dplyr)
library(leaflet)
library(timevis)
library(plotly)
library(htmltools)
library(stringr)
# TotalData <- read.csv("TotalData.csv")
# TotalData$Date <- ymd(TotalData$Date)
# TotalData$id <- as.factor(TotalData$id)
### DASHBOARD UI------
ui <- dashboardPage(
dashboardHeader(title = "Gewaagd Dashboard", titleWidth = 250,
tags$li(a(href = 'https://www.waag.org/nl',
img(src = 'waag-logo2.jpg',
title = "Company Home", height = "30px"),
style = "padding-top:10px; padding-bottom:10px;"),
class = "dropdown")),
dashboardSidebar(sidebarMenu(
menuItem("Overview", tabName = "Overview", icon = icon("dashboard")),
menuItem("Advanced", tabName = "Advanced", icon = icon("th")),
menuItem("MathFact", tabName = "MathFACT", icon = icon("dashboard")),
menuItem("Feedback", tabName = "Feedback", icon = icon("dashboard")),
menuItem("Norm voor luchtkwaliteit:",HTML(paste("Bovengrens is vastgesteld op <br/>
gemiddeld 40 μg/m3 per uur.<br/>
Overschrijding van het <br/> uurgemiddelde van 200 μg/m3 is <br/>
toegestaan op niet meer <br/> dan 18 keer per jaar. <br/>
Volgens EU-norm"))))),
dashboardBody(
#####BASIC PAGE----
tabItems(
tabItem(tabName="Overview",
fluidPage(
div(class="outer",
tags$style(type = "text/css", ".outer {position: fixed; top: 41px; left: 0; right: 0; bottom: 0; overflow: hidden; padding: 0}"),
leafletOutput("leafBA", width="100%", height="100%")),
fluidRow(splitLayout(
valueBoxOutput("WindRBoxBA",width="16.6%"),
valueBoxOutput("WindKBoxBA",width="16.6%"),
valueBoxOutput("TempBoxBA",width="16.6%"),
valueBoxOutput("RainBoxBA",width="16.6%"),
valueBoxOutput("NO2BoxBA",width="16.6%"),
valueBoxOutput("PMBoxBA",width="16.6%"))),
box(title = "Inputs", solidHeader = TRUE, width = 4, background = "black",
collapsible = TRUE,
"Pas hier de input van de kaart aan",
selectInput("stofBA", "Toon stof:", #mogelijkheid tot uitbreiden
choices = list("StikstofDioxide")),
dateInput("anaBA", "Kies datum:", value = as.Date("2016-06-16"), language = "nl",
min = as.Date("2016-06-01"),max = as.Date("2016-08-31")),
sliderInput("timeBA", "Tijdlijn",
min = 0, max = 23,
value = 18, step=1)
)
)
),
####ADVANCED PAGE------
tabItem(
tabName="Advanced",
sidebarLayout(
mainPanel(
verticalLayout(
tabBox(
title = "First tabBox", width = "100%",
# The id lets us use input$tabset1 on the server to find the current tab
id = "tabset1",
tabPanel("Map", leafletOutput("leafAD")),
tabPanel("Plot", plotlyOutput("TR"))),
timevisOutput("timelineAD"))
),
sidebarPanel(
verticalLayout(
checkboxGroupInput("sensID","Sensor:",
choices = c("Februariplein 14",
"Korte Koningsstraat 5",
"Kromme Waal 29-30 a",
"Kromme Waal 29-30 b",
"Nieuwmarkt 113",
"Prins Hendrikkade 82",
"Sarphatistraat 62",
"Sint Antoniesbreestraat 35",
"Valkenburgerstraat 123",
"Valkenburgerstraat 83 a")),
dateRangeInput("ana", "Kies periode:",
start = as.Date("2016-06-20"), end= as.Date("2016-06-28")),
sliderInput("time", "Tijdlijn",
min = 0, max = 23,
value = c(0,23), step=1),
tags$textarea(id="foo", rows=3, cols=40,
"Heeft u iets waargenomen? Plaats het in de tijdlijn!"),
actionButton("readCom",label="Plaats opmerking")
)
)
)),
#### MATHFACTS PAGE-----
tabItem(
tabName="MathFACT",
fluidRow(
box(title="Lineair model",
plotOutput("LinPlot"),
status = "primary", width = 6),
box(title="GAM model",
plotOutput("GAMPlot"),
status = "primary", width = 6)
),
fluidRow(
box(title = "Input", width = 4, solidHeader = TRUE, status = "primary",
selectInput(inputId = "featureInput1",
label = "Selecteer een GGD-Sensor",
choices = c("GGD-Vondelpark", "GGD-OudeSchans")),
selectInput(inputId = "sensorInput1",
label = "Selecteer een Waag-Sensor",
choices = c("Februariplein 14",
"Korte Koningsstraat 5",
"Kromme Waal 29-30 a",
"Kromme Waal 29-30 b",
"Nieuwmarkt 113",
"Prins Hendrikkade 82",
"Sarphatistraat 62",
"Sint Antoniesbreestraat 35",
"Valkenburgerstraat 123",
"Valkenburgerstraat 83 a"))),
tabBox(title="Model Validation", width = 8,
tabPanel("FIT",dataTableOutput('modelfit')),
tabPanel("RMSE",dataTableOutput('performance')),side="right")
)),
#### FEEDBACK PAGE--------
tabItem(
tabName="Feedback",bootstrapPage(
# We'll add some custom CSS styling -- totally optional
includeCSS("shinychat.css"),
# And custom JavaScript -- just to send a message when a user hits "enter"
# and automatically scroll the chat window for us. Totally optional.
includeScript("sendOnEnter.js"),
div(
# Definieer de layout
class = "container-fluid",
div(class = "row-fluid",
# Titel
tags$head(tags$title("ShinyChat")),
# Creeer de header
div(class="span6", style="padding: 10px 0px;",
h1("ShinyChat"),
h4("Feedback is always welcome")
), div(class="span6", id="play-nice",
"IP Addresses are logged... be a decent human being."
)
),
# The main panel
div(
class = "row-fluid",
mainPanel(
# Create a spot for a dynamic UI containing the chat contents.
uiOutput("chat"),
# Create the bottom bar to allow users to chat.
fluidRow(
div(class="span10",
textInput("entry", "")
),
div(class="span2 center",
actionButton("send", "Send")
)
)
),
# The right sidebar
sidebarPanel(
# Let the user define his/her own ID
textInput("user", "Your User ID:", value=""),
tags$hr(),
h5("Connected Users"),
# Create a spot for a dynamic UI containing the list of users.
uiOutput("userList"),
tags$hr(),
helpText(HTML("<p>Built using R & <a href = \"http://rstudio.com/shiny/\">Shiny</a>.<p>Source code available <a href =\"https://github.com/trestletech/ShinyChat\">on GitHub</a>."))
))))))))
server <- function(input, output, session) {
###Chatomgevings variabelen----
# Globally define a place where all users can share some reactive data.
vars <- reactiveValues(chat=NULL, users=NULL)
# Restore the chat log from the last session.
if (file.exists("chat.Rds")){
vars$chat <- readRDS("chat.Rds")
} else {
vars$chat <- "Welcome to Shiny Chat!"
}
#' Get the prefix for the line to be added to the chat window. Usually a newline
#' character unless it's the first line.
linePrefix <- function(){
if (is.null(isolate(vars$chat))){
return("")
}
return("<br />")
}
####FUNCTIONS------
selDat <- function(){
return(
TotalData %>% filter(Date==input$anaBA,Time==input$timeBA,!is.na(lat)))
}
selSTOFba <- reactive({switch(input$stofBA,
StikstofDioxide="lm.pred")})#mogelijkheid tot uitbreiden
selSTOFad <- reactive({switch(input$stofBA,
StikstofDioxide="lm.pred")})#mogelijkheid tot uitbreiden
selDatAD <- function(){
return(
TotalData %>% filter(Date>=input$ana[1],Date<=input$ana[2],
Time>=input$time[1],Time<=input$time[2],
!is.na(lat)))
}
selBreak <- function(){
x <- as.numeric(difftime(min(selDatAD()$localTime,na.rm=T),
max(selDatAD()$localTime,na.rm=T),
units = "days"))
if(x <= -7 & x > -30){return("1 day")}
else if(x <= -30){return("1 week")}
else {return("5 hours")}
}
selSELOmf <- reactive({switch(input$featureInput1,
`GGD-Vondelpark`="ggd",
`GGD-OudeSchans`="ggd_os")})
selSESEmf <- reactive({switch(input$sensorInput1,
`Februariplein 14`="Februariplein 14",
`Korte Koningsstraat 5`="Korte Koningsstraat 5",
`Kromme Waal 29-30 a`="Kromme Waal 29-30 a",
`Kromme Waal 29-30 b`="Kromme Waal 29-30 b",
`Nieuwmarkt 113`="Nieuwmarkt 113",
`Prins Hendrikkade 82`="Prins Hendrikkade 82",
`Sarphatistraat 62`="Sarphatistraat 62",
`Sint Antoniesbreestraat 35`="Sint Antoniesbreestraat 35",
`Valkenburgerstraat 123`="Valkenburgerstraat 123",
`Valkenburgerstraat 83 a`="Valkenburgerstraat 83 a")})
#####COMMENTTIMEVIS FUNCTIONS----------
loadData <- function() {
comdata <- read.csv("commDat.csv",stringsAsFactors = FALSE,header=TRUE,sep=";")
comdata$start <- as.Date(comdata$start)
comdata$end <- as.Date(comdata$end)
return(data.frame(comdata))
}
comText <- eventReactive(input$readCom, {
input$foo
})
obs <- observe({
cat(comText(),";",as.character(paste(input$ana[1],input$time[1],sep=" ")),";",
end = as.character(paste(input$ana[2],input$time[2],sep=" ")),
'\n', file = "commDat.csv", append = TRUE)
})
#####BASIC PAGE-----------
output$leafBA <- renderLeaflet({
leaflet() %>%
addProviderTiles("Stamen.TonerLite",
options = providerTileOptions(noWrap = TRUE)) %>%
fitBounds(4.866167, 52.35968, 4.908988, 52.37665)
})
observe({
data1 <- selDat()
leafletProxy("leafBA",data=data1) %>%
clearPopups() %>% clearShapes() %>%
addCircles(radius=20, fill=TRUE, col=~no2col,
popup=~htmlEscape(paste("Adress:",Adress,"NO2:",
round(lm.pred),
" μg/m3")))
})
###valueboxesBA----------
output$WindRBoxBA <- renderValueBox({
valueBox(
paste0(selDat()$direct), "Windrichting",
icon = icon("location-arrow", lib = "font-awesome"),
color = "blue")
})
output$WindKBoxBA <- renderValueBox({
valueBox(
paste0(selDat()$Windsnelheid," m/s"), "Windkracht",
icon = icon("fa", lib = "font-awesome"),
color = "blue")
})
output$TempBoxBA <- renderValueBox({
valueBox(
paste0(selDat()$Temp/10," °C"), "Temperatuur",
icon = icon("sun-o", lib="font-awesome"), color = 'blue')
})
output$RainBoxBA <- renderValueBox({
valueBox(
paste0(selDat()$Neerslag," mm"), "Regen",
icon = icon("tint", lib = "glyphicon"),
color = "blue")
})
output$NO2BoxBA <- renderValueBox({
valueBox(
paste0(round(selDat()$lm.pred)," μg/m3"), "Stikstof",
icon = icon("cloud", lib = "font-awesome"),
color = names(sort(table(selDat()$no2col),decreasing = T))[1])
})
output$PMBoxBA <- renderValueBox({
valueBox(
paste0(round(selDat()$ggd)," μg/m3"), "Vondel \n GGD",
icon = icon("yelp", lib = "font-awesome"),
color = names(sort(table(selDat()$ggdcol),decreasing = T))[1])
})
##### ADVANCED PAGE--------
#### timevissesAD------
output$timelineAD <- renderTimevis({
timevis(loadData())
})
observeEvent(input$readCom, {
addItem("timelineAD", list(content = comText(),
start = as.character(paste(input$ana[1],input$time[1],sep=" ")),
end = as.character(paste(input$ana[2],input$time[2],sep=" "))))
centerItem("mytime", "item1")
})
### plotlyAD met tabs----
output$tabset1Selected <- renderText({
input$tabset1
})
output$leafAD <- renderLeaflet({
leaflet() %>%
addProviderTiles("Stamen.TonerLite",
options = providerTileOptions(noWrap = TRUE)) %>%
fitBounds(4.866167, 52.35968, 4.908988, 52.37665)
})
observe({
leafletProxy("leafAD",data=selDatAD()[selDatAD()$Adress%in%c(input$sensID),]) %>%
clearShapes() %>% clearPopups() %>%
addCircles(radius=10, fill=TRUE, col="Darkred",
popup=~htmlEscape(paste("Adress:",Adress)))
})
output$TR <- renderPlotly({
TR <- ggplot(data=selDatAD()[selDatAD()$Adress%in%input$sensID,],
aes(x = localTime))+
geom_line(aes(y = lm.pred, col=Adress))+
geom_line(aes(y = ggd), col="Black", linetype = 2)+
labs(list(title = "Sensor vergelijking",x="Tijdlijn",y="NO2-waarde",col="Locatie"))+
scale_x_datetime(date_breaks=selBreak(),date_labels = "%Y-%m-%d %H:%M")+
theme(axis.text.x = element_text(size=10,angle=45,color="Black"))
ggplotly(TR) %>% layout(margin = list(b = 160))
})
### MathFacts----
output$LinPlot <- renderPlot({
OBJ <- TotalData %>% filter(Adress==selSESEmf())
LinPlot <- ggplot(data=OBJ, aes(x=lm.pred, y=OBJ[,selSELOmf()]))+
geom_point(alpha=0.2, color="black")+
geom_smooth(aes(x=lm.pred, y=OBJ[,selSELOmf()]), color="black",method="lm")+
geom_line(aes(x=lm.pred, y=lm.pred), color="blue", linetype=2)+
ggtitle(paste(selSESEmf(),"naar",input$featureInput1))+
labs(list(y = "GGD ground", x="Lineaire voorspelling"))
LinPlot
})
output$GAMPlot <- renderPlot({
OBJ2 <- TotalData %>% filter(Adress==selSESEmf())
GAMPlot <- ggplot(data=OBJ2, aes(x=gam.pred, y=OBJ2[,selSELOmf()]))+
geom_point(alpha=0.2, color="black")+
geom_smooth(aes(x=gam.pred, y=OBJ2[,selSELOmf()]), color="black")+
geom_line(aes(x=gam.pred, y=gam.pred), color="blue", linetype=2)+
ggtitle(paste(selSESEmf(),"naar",input$featureInput1))+
labs(list(y = "GGD ground", x="GAM voorspelling"))
GAMPlot
})
output$modelfit = renderDataTable({
Model.Fit
})
output$performance = renderDataTable({
RMSE
})
### FEEDBACK PAGE-----
# Create a spot for reactive variables specific to this particular session
sessionVars <- reactiveValues(username = "")
# Track whether or not this session has been initialized. We'll use this to
# assign a username to unininitialized sessions.
init <- FALSE
# When a session is ended, remove the user and note that they left the room.
session$onSessionEnded(function() {
isolate({
vars$users <- vars$users[vars$users != sessionVars$username]
vars$chat <- c(vars$chat, paste0(linePrefix(),
tags$span(class="user-exit",
sessionVars$username,
"left the room.")))
})
})
# Observer to handle changes to the username
observe({
# We want a reactive dependency on this variable, so we'll just list it here.
input$user
if (!init){
# Seed initial username
sessionVars$username <- paste0("User", round(runif(1, 10000, 99999)))
isolate({
vars$chat <<- c(vars$chat, paste0(linePrefix(),
tags$span(class="user-enter",
sessionVars$username,
"entered the room.")))
})
init <<- TRUE
} else{
# A previous username was already given
isolate({
if (input$user == sessionVars$username || input$user == ""){
# No change. Just return.
return()
}
# Updating username
# First, remove the old one
vars$users <- vars$users[vars$users != sessionVars$username]
# Note the change in the chat log
vars$chat <<- c(vars$chat, paste0(linePrefix(),
tags$span(class="user-change",
paste0("\"", sessionVars$username, "\""),
" -> ",
paste0("\"", input$user, "\""))))
# Now update with the new one
sessionVars$username <- input$user
})
}
# Add this user to the global list of users
isolate(vars$users <- c(vars$users, sessionVars$username))
})
# Keep the username updated with whatever sanitized/assigned username we have
observe({
updateTextInput(session, "user",
value=sessionVars$username)
})
# Keep the list of connected users updated
output$userList <- renderUI({
tagList(tags$ul( lapply(vars$users, function(user){
return(tags$li(user))
})))
})
# Listen for input$send changes (i.e. when the button is clicked)
observe({
if(input$send < 1){
# The code must be initializing, b/c the button hasn't been clicked yet.
return()
}
isolate({
# Add the current entry to the chat log.
vars$chat <<- c(vars$chat,
paste0(linePrefix(),
tags$span(class="username",
tags$abbr(title=Sys.time(), sessionVars$username)
),
": ",
tagList(input$entry)))
})
# Clear out the text entry field.
updateTextInput(session, "entry", value="")
})
# Dynamically create the UI for the chat window.
output$chat <- renderUI({
if (length(vars$chat) > 500){
# Too long, use only the most recent 500 lines
vars$chat <- vars$chat[(length(vars$chat)-500):(length(vars$chat))]
}
# Save the chat object so we can restore it later if needed.
saveRDS(vars$chat, "chat.Rds")
# Pass the chat log through as HTML
HTML(vars$chat)
})
}
###RUN APP----
shinyApp(ui, server)
# windroos plot:
# output$NOXplot <- renderPlotly({
# p <- plot_ly(plotly::wind, t = ~selDat()$Windrichting, r = ~(selDat()$Windsnelheid/10),
# type = 'area',color=I("Darkred"))
# layout(p, radialaxis = list(ticksuffix="m/s"),orientation = 270)
# })
|
fabafd99f4509bd361fa5d2396000bb52a0aa685
|
3dd54dec09f7d0c3cde2dae19178faf692a2b35e
|
/man/Hartnagel.Rd
|
add1eefadac73fa2794c87c86ef4300000d4d3d1
|
[] |
no_license
|
courtiol/LM2GLMM
|
34c1f63d85996edbd16c3f6518750f805e6965ee
|
6646de11a2309e63f7f3c2dd03483e4f0516b23a
|
refs/heads/master
| 2022-07-22T01:39:49.159906
| 2022-06-29T05:28:23
| 2022-06-29T05:28:23
| 86,590,032
| 3
| 2
| null | 2022-06-28T17:58:13
| 2017-03-29T14:09:26
|
R
|
UTF-8
|
R
| false
| true
| 1,754
|
rd
|
Hartnagel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{Hartnagel}
\alias{Hartnagel}
\title{Canadian Crime-Rates Time Series (from package carData)}
\format{
This data frame contains the following columns:
\describe{
\item{year}{
1931--1968.
}
\item{tfr}{
Total fertility rate per 1000 women.
}
\item{partic}{
Women's labor-force participation rate per 1000.
}
\item{degrees}{
Women's post-secondary degree rate per 10,000.
}
\item{fconvict}{
Female indictable-offense conviction rate per 100,000.
}
\item{ftheft}{
Female theft conviction rate per 100,000.
}
\item{mconvict}{
Male indictable-offense conviction rate per 100,000.
}
\item{mtheft}{
Male theft conviction rate per 100,000.
}
}
}
\source{
Personal communication from T. Hartnagel,
Department of Sociology, University of Alberta.
}
\usage{
Hartnagel
}
\description{
This data frame has 38 rows and 7 columns.
The data are an annual time-series from 1931 to 1968. There are
some missing data.
}
\details{
The post-1948 crime rates have been adjusted to account for
a difference in method of recording. Some of your results will differ
in the last decimal place from those in Table 14.1 of Fox (1997) due
to rounding of the data. Missing values for 1950 were interpolated.
}
\references{
Fox, J., and Hartnagel, T. F (1979)
Changing social roles and female crime in Canada:
A time series analysis.
\emph{Canadian Review of Sociology and Anthroplogy},
\bold{16}, 96--104.
Fox, J. (2016)
\emph{Applied Regression Analysis and Generalized Linear Models},
Third Edition. Sage.
}
\keyword{datasets}
|
b65dcf210e405ff147050a92f9519c4322542724
|
f4f12d4e01d1e31a7797e11544e6f7bfb6e2e6a7
|
/12th assignment/q1.R
|
8b2da982b58b8b504cd54a0797b6b9f889bd335c
|
[] |
no_license
|
inikhil/Monte-Carlo
|
17dc44d5b0003a7c30c6fef34735e940d1850226
|
e964f310d339e101d406dea3cb7e4f01c24efaeb
|
refs/heads/master
| 2016-09-05T09:13:51.269013
| 2015-02-14T14:06:30
| 2015-02-14T14:06:30
| 30,796,687
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,112
|
r
|
q1.R
|
a<-vector()
b<-vector()
x<-vector()
y<-vector()
z<-vector()
u<-vector()
convert<-function(n){
i=1
while(n!=0){
a[i]=n%%2
n=floor(n/2)
i=i+1
}
return(a)
}
radical<-function(b){
t=length(b)
sum1=0
for(i in 1:t){
sum1=sum1+b[i]*((1/2)^i)
}
return(sum1)
}
lcg<-function(a1,b1,m1,seed,m){
z[1]=seed
u[1]=z[1]/m1
for(i in 2:m){
z[i]=(a1*z[i-1])%%m1
u[i]=z[i]/m1
}
return(u)
}
generate<-function(x,m){
for(i in 1:m-1){
y[i]=x[i+1]
}
y[m]=0
return(y)
}
main<-function(m){
for(i in 1:m){
b=convert(i)
x[i]=radical(b)
}
y=generate(x,m)
print(x)
plot(x,y,cex=0.1,main=paste("Overlapping pairs of Van Der Corrupt Seq",
"\n","n=",paste(m)))
x11()
hist(x,breaks=99,main=paste("Sample distribution of Van Der Corrupt Seq",
"\n","n=",paste(m)))
x11()
u=lcg(16807,0,2^31-1,1631,m)
hist(u,breaks=99,main=paste("Sample distribution of LCG","\n",
"n=",paste(m)))
y=generate(u,m)
x11()
plot(u,y,cex=0.1,main=paste("Overlapping pairs of LCG","\n",
"n=",paste(m)))
}
main(1000)
x11()
main(100)
x11()
main(100000)
|
89c7ec05a850d7972592ecc2bed6008858f169a9
|
4e9f6d4fed2a6efab4fd55d5e3ae69fcea01e435
|
/plot4.R
|
6c7399ab05e6c0034fc57532ea74979870303449
|
[] |
no_license
|
JosePenedes/ExData_Plotting1
|
81a56cd3a2b93842873908827ce16ddff216d129
|
c8bd3ee721001bdc38c9843999028ec39487660a
|
refs/heads/master
| 2020-12-11T07:32:11.010418
| 2014-11-06T18:19:16
| 2014-11-06T18:19:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,362
|
r
|
plot4.R
|
### read the data
library("data.table")
datos_elec <- read.table("household_power_consumption.txt", header=T, sep=";")
index_vec <- (datos_elec[,"Date"]=="1/2/2007")|(datos_elec[,"Date"]=="2/2/2007")
datos_elec <- datos_elec[index_vec,]
### plot multi-graph and save it in a .png file
png(filename = "plot4.png",width = 480, height = 480,bg = "transparent")
Sys.setlocale(category = "LC_ALL", "C")
par(mfcol=c(2,2))
dates_vector<-paste(datos_elec[,1],datos_elec[,2])
dates_vector<-strptime(dates_vector,format="%d/%m/%Y %H:%M:%S")
plot(dates_vector,as.numeric(as.character(datos_elec[,"Global_active_power"])),type="l",ylab="Global Active Power (kilowatts)",xlab="")
plot(dates_vector,as.numeric(as.character(datos_elec[,"Sub_metering_1"])),type="l",ylab="Energy sub metering",xlab="")
lines(dates_vector,as.numeric(as.character(datos_elec[,"Sub_metering_2"])),type="l",col="red")
lines(dates_vector,as.numeric(as.character(datos_elec[,"Sub_metering_3"])),type="l",col="blue")
legend("topright", lty=1,bty="n",col=c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(dates_vector,as.numeric(as.character(datos_elec[,"Voltage"])),type="l",ylab="Voltage",xlab="datetime")
plot(dates_vector,as.numeric(as.character(datos_elec[,"Global_reactive_power"])),type="l",ylab="Global_reactive_power",xlab="datetime")
dev.off()
|
153c04d9eb2e06f854a0962781f15ecf38e170b0
|
eb00ad46af18a572619229b4be25dd4b60fab314
|
/R/population.R
|
82a03569f2128e4300a669d202e769ab97a9de9e
|
[] |
no_license
|
fransilvion/REvolution
|
784894b9b35cf0a8129621f0ffc3629af609967b
|
5b9a8bc2c93047be48a18ec32f6a390167e90751
|
refs/heads/master
| 2020-08-16T05:23:32.657524
| 2019-10-21T03:44:47
| 2019-10-21T03:44:47
| 215,460,214
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,480
|
r
|
population.R
|
#' population - class definition
#'
#' For each geneartion (G_i) in the Game of Evolution
#' There is a collection of `organism` objects which make up a population
#' which are competing with one another to survive to the next generation.
#'
#' This is the class definition of a list of organisms and their associated
#' fitness values
#'
#' @param organisms an ordered List of organism-objects
#' @param fitness an ordered Vector of numerical fitness-evaluation values for each organism in population. [NA]
#'
#' @return population
#'
#' @examples
#'
#' # The population at g0 is glider and inverse_glider organisms
#' # Fitness and maternal line are initially undefined
#' gliders_G0 <- population( organisms = list( glider, glider_inv), fitness = c(NA,NA) , maternal_line = c(NA,NA) )
#'
#' @export
population <- setClass(Class = "population", representation(organisms = "list", fitness = "vector", maternal_line = "vector"))
# # Example of a glider encoded as logical matrix
# glider_logical <- matrix( data = c(F,T,F,
# F,F,T,
# T,T,T), nrow = 3, byrow = T)
#
# # Example of a glider encoded as an organism
# glider <- organism(cells = glider_logical)
#
# # Inverse of the glider above
# glider_inv <- organism(cells = !attr(glider, "cells"))
#
# gliders_G0 <- population( organisms = list( glider, glider_inv), fitness = c(NA,NA) , maternal_line = c(NA,NA) )
#QED
|
db34f6f502070efe088f0a9b01dc7c39421a4184
|
cfe5552ea0ee7f47287aabd1d93ed36ebf7300ee
|
/plot2.R
|
7d9246d3dba09d04123a22eb1aa26cea3cf98476
|
[] |
no_license
|
mohamedaref/Exploratory_Data_Analysis-course_project1
|
28c763283b8a03a979bfd1ce45c230d093e3959a
|
d11ffd4e72ce253d09ba4c83e6d3bbbefb852458
|
refs/heads/master
| 2016-09-05T11:09:02.346316
| 2015-01-10T20:59:57
| 2015-01-10T20:59:57
| 29,070,829
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,481
|
r
|
plot2.R
|
electric_data <-
read.csv("D:/Data Science Track/Exploratory Data Analysis/Week1/exdata-data-household_power_consumption/household_power_consumption.txt",
sep=";" ,
stringsAsFactors=FALSE)
# subset the data to select the data of two dates only
target_data <- subset(electric_data ,
electric_data$Date == "1/2/2007"
| electric_data$Date == "2/2/2007")
# create a date time variable
target_data$DateTime <- as.POSIXct(paste(target_data$Date, target_data$Time), format="%d/%m/%Y %H:%M:%S") # combine data and time
# set the accurate classes
target_data$Date <- as.Date(target_data$Date , format = "%d/%m/%Y")
target_data$Global_active_power <- as.numeric(target_data$Global_active_power)
target_data$Global_reactive_power <- as.numeric(target_data$Global_reactive_power)
target_data$Voltage <- as.numeric(target_data$Voltage)
target_data$Global_intensity <- as.numeric(target_data$Global_intensity)
target_data$Sub_metering_1 <- as.numeric(target_data$Sub_metering_1)
target_data$Sub_metering_2 <- as.numeric(target_data$Sub_metering_2)
target_data$Sub_metering_3 <- as.numeric(target_data$Sub_metering_3)
# draw a histogram on graphic device
plot(target_data$Global_active_power~target_data$DateTime,
type="l",
ylab = "Global Active Power (kilowatts)",
xlab ="")
# copy the hist into a png file device and close it
dev.copy(png, file = "plot2.png")
dev.off()
|
f61234ff4dddc028de2df83bafbb08eb5eb63b05
|
2eda38f279d21957a25ba8e9a7c92986d12dda27
|
/NHSCOVIDResults.R
|
fa09879e7e8f7c44301fd9646487fb63bff07b97
|
[] |
no_license
|
VictimOfMaths/Publications
|
41e5f8599d3ea0fd7d741338ef11b0450059da83
|
ff842e2037cd1a1952485e0e53e2fc8d44ec6ce3
|
refs/heads/master
| 2023-06-23T08:14:04.880755
| 2023-06-14T09:18:48
| 2023-06-14T09:18:48
| 233,639,965
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,623
|
r
|
NHSCOVIDResults.R
|
rm(list=ls())
library(tidyverse)
library(paletteer)
library(ragg)
library(extrafont)
library(scales)
library(ggtext)
library(ggrepel)
library(forcats)
library(readxl)
library(gt)
theme_custom <- function() {
theme_classic() %+replace%
theme(plot.title.position="plot", plot.caption.position="plot",
strip.background=element_blank(), strip.text=element_text(face="bold", size=rel(1)),
plot.title=element_text(face="bold", size=rel(1.5), hjust=0,
margin=margin(0,0,5.5,0)),
text=element_text(family="Calibri"))
}
folder <- "X:/ScHARR/SARG_SAPM_3_5/General/NHS scenairos Dec2021/report/results"
#Outcomes by cause and year
S1cy <- read_excel(paste0(folder, "/S1/SAPM3_C2HHealth_Results.xlsx"),
sheet="Summary by health conditions", range="B3:BN48") %>%
select(-c(22,23,44,45)) %>%
set_names("Condition", paste0("Deaths_", 1:20), paste0("Sick_", 1:20),
paste0("Admissions_", 1:20)) %>%
mutate(Scenario=1) %>%
pivot_longer(c(2:61), names_to=c("Metric", "Year"), names_sep="_", values_to="Count")
S2cy <- read_excel(paste0(folder, "/S2/SAPM3_C2HHealth_Results.xlsx"),
sheet="Summary by health conditions", range="B3:BN48") %>%
select(-c(22,23,44,45)) %>%
set_names("Condition", paste0("Deaths_", 1:20), paste0("Sick_", 1:20),
paste0("Admissions_", 1:20)) %>%
mutate(Scenario=2) %>%
pivot_longer(c(2:61), names_to=c("Metric", "Year"), names_sep="_", values_to="Count")
S3cy <- read_excel(paste0(folder, "/S3/SAPM3_C2HHealth_Results.xlsx"),
sheet="Summary by health conditions", range="B3:BN48") %>%
select(-c(22,23,44,45)) %>%
set_names("Condition", paste0("Deaths_", 1:20), paste0("Sick_", 1:20),
paste0("Admissions_", 1:20)) %>%
mutate(Scenario=3) %>%
pivot_longer(c(2:61), names_to=c("Metric", "Year"), names_sep="_", values_to="Count")
S4cy <- read_excel(paste0(folder, "/S4/SAPM3_C2HHealth_Results.xlsx"),
sheet="Summary by health conditions", range="B3:BN48") %>%
select(-c(22,23,44,45)) %>%
set_names("Condition", paste0("Deaths_", 1:20), paste0("Sick_", 1:20),
paste0("Admissions_", 1:20)) %>%
mutate(Scenario=4) %>%
pivot_longer(c(2:61), names_to=c("Metric", "Year"), names_sep="_", values_to="Count")
S5cy <- read_excel(paste0(folder, "/S5/SAPM3_C2HHealth_Results.xlsx"),
sheet="Summary by health conditions", range="B3:BN48") %>%
select(-c(22,23,44,45)) %>%
set_names("Condition", paste0("Deaths_", 1:20), paste0("Sick_", 1:20),
paste0("Admissions_", 1:20)) %>%
mutate(Scenario=5) %>%
pivot_longer(c(2:61), names_to=c("Metric", "Year"), names_sep="_", values_to="Count")
#S6cy <- read_excel(paste0(folder, "/S6/SAPM3_C2HHealth_Results.xlsx"),
# sheet="Summary by health conditions", range="B3:BN48") %>%
# select(-c(22,23,44,45)) %>%
# set_names("Condition", paste0("Deaths_", 1:20), paste0("Sick_", 1:20),
# paste0("Admissions_", 1:20)) %>%
# mutate(Scenario=6) %>%
# pivot_longer(c(2:61), names_to=c("Metric", "Year"), names_sep="_", values_to="Count")
#Read in health conditions list
ConditionsList <- read.csv("X:/ScHARR/SARG_SAPM_3_5/General/NHS scenairos Dec2021/SAPM_v4.1_260121/HealthConditionsList.csv")
datacy <- bind_rows(S1cy, S2cy, S3cy, S4cy, S5cy) %>%
mutate(Year=as.numeric(Year), Scenario=as.factor(Scenario),
Condition=as.numeric(substr(Condition, 17,19))) %>%
merge(ConditionsList %>% select(-F4), by.x="Condition", by.y="Key") %>%
mutate(scenarioname=case_when(
Scenario==1 ~ "No rebound",
Scenario==2 ~ "Immediate rebound",
Scenario==3 ~ "Moderate-only rebound",
Scenario==4 ~ "Slower heavier rebound",
Scenario==5 ~ "Increasing consumption"),
scenarioname=factor(scenarioname, levels=c("Immediate rebound",
"Slower heavier rebound",
"No rebound",
"Moderate-only rebound",
"Increasing consumption")),
Year=2019+Year,
Type=if_else(Condition==38, "Dependence-related", Type))
#Calculate totals
datacy_tot <- datacy %>%
group_by(Metric, Year, scenarioname) %>%
summarise(Count=sum(Count))
agg_png("Outputs/NHSATSFig11.png", units="in", width=8, height=6, res=500)
ggplot(datacy_tot %>% filter(Metric!="Sick"),
aes(x=Year, y=Count, colour=scenarioname))+
geom_hline(yintercept=0, colour="Grey70")+
geom_line()+
scale_x_continuous(name="")+
scale_y_continuous(name="Change per year vs. baseline")+
scale_colour_manual(values=c("#e22618", "#eaaf38", "#01ad74", "#08b5d3", "#002e3b", "#8338EC"),
name="Scenario",
guide=guide_legend(reverse=TRUE))+
facet_wrap(~Metric, scales="free_y")+
theme_custom()+
labs(title="Changes in health outcomes under modelled scenarios",
subtitle="Annual changes in alcohol-attributable hospital admissions and deaths compared to baseline")
dev.off()
datacy_grp <- datacy %>%
group_by(Type, Metric, Year, scenarioname) %>%
summarise(Count=sum(Count))
agg_png("Outputs/NHSATSFig11.png", units="in", width=9, height=6, res=500)
ggplot()+
geom_area(data=datacy_grp %>% filter(Metric=="Admissions"),
aes(x=Year, y=Count, fill=Type))+
geom_line(data=datacy_tot %>% filter(Metric=="Admissions"),
aes(x=Year, y=Count), colour="Grey30",
linetype=2)+
geom_hline(yintercept=0)+
facet_grid(~scenarioname)+
scale_fill_paletteer_d("colorBlindness::paletteMartin",
name="Condition type")+
scale_x_continuous(name="")+
scale_y_continuous(name="Admissions per year")+
theme_custom()+
theme(legend.position = "top")+
labs(title="Changes in hospital admissions under modelled scenarios",
subtitle="Annual changes in alcohol-attributable hospital admissions by condition type compared to baseline.\nDashed lines represent the net change.")
dev.off()
agg_png("Outputs/NHSATSFig12.png", units="in", width=9, height=6, res=500)
ggplot()+
geom_area(data=datacy_grp %>% filter(Metric=="Deaths"),
aes(x=Year, y=Count, fill=Type))+
geom_line(data=datacy_tot %>% filter(Metric=="Deaths"),
aes(x=Year, y=Count), colour="Grey30",
linetype=2)+
geom_hline(yintercept=0)+
facet_grid(~scenarioname)+
scale_fill_paletteer_d("colorBlindness::paletteMartin",
name="Condition type")+
scale_x_continuous(name="")+
scale_y_continuous(name="Deaths per year")+
theme_custom()+
theme(legend.position = "top")+
labs(title="Changes in alcohol-attributable deaths under modelled scenarios",
subtitle="Annual changes in alcohol-attributable deaths by condition type compared to baseline.\nDashed lines represent the net change.")
dev.off()
datacy_grp %>%
ungroup() %>%
filter(Metric=="Admissions") %>%
select(-Metric) %>%
spread(Year, Count) %>%
gt(rowname_col="Type", groupname_col="scenarioname") %>%
fmt_number(columns=as.character(c(2020:2039)), use_seps=TRUE, decimals=0) %>%
tab_options(table.font.names="Calibri",
column_labels.font.size = "small",
table.font.size = "small",
row_group.font.size = "small",
data_row.padding = px(3)) %>%
gtsave("Table3.png", path="Outputs/JPEGS", vwidth=1100)
datacy_grp %>%
ungroup() %>%
filter(Metric=="Deaths") %>%
select(-Metric) %>%
spread(Year, Count) %>%
gt(rowname_col="Type", groupname_col="scenarioname") %>%
fmt_number(columns=as.character(c(2020:2039)), use_seps=TRUE, decimals=0) %>%
tab_options(table.font.names="Calibri",
column_labels.font.size = "small",
table.font.size = "small",
row_group.font.size = "small",
data_row.padding = px(3)) %>%
gtsave("Table4.png", path="Outputs/JPEGS", vwidth=1100)
#Cumulative by condition
datacy_cumul <- datacy %>%
group_by(Metric, Name, Scenario) %>%
summarise(Count=sum(Count))
#Cumulative by condition group
###########################
#Cumulative outcomes by subgroup
S1sg <- read_excel(paste0(folder, "/S1/SAPM3_C2HHealth_Results.xlsx"),
sheet="Summary by subgroups", range="B3:BA81") %>%
filter(substr(`...1`, 1,5)=="Cumul") %>%
mutate(Metric=c(rep("Deaths", times=5), rep("Sick", times=5),
rep("Admissions", times=5), rep("QALY", times=6),
rep("Cost", times=5))) %>%
group_by(Metric) %>%
summarise(across(c(2:52), sum)) %>%
gather(Subgroup, Count, c(2:52)) %>%
mutate(Scenario=1)
S2sg <- read_excel(paste0(folder, "/S2/SAPM3_C2HHealth_Results.xlsx"),
sheet="Summary by subgroups", range="B3:BA81") %>%
filter(substr(`...1`, 1,5)=="Cumul") %>%
mutate(Metric=c(rep("Deaths", times=5), rep("Sick", times=5),
rep("Admissions", times=5), rep("QALY", times=6),
rep("Cost", times=5))) %>%
group_by(Metric) %>%
summarise(across(c(2:52), sum)) %>%
gather(Subgroup, Count, c(2:52)) %>%
mutate(Scenario=2)
S3sg <- read_excel(paste0(folder, "/S3/SAPM3_C2HHealth_Results.xlsx"),
sheet="Summary by subgroups", range="B3:BA81") %>%
filter(substr(`...1`, 1,5)=="Cumul") %>%
mutate(Metric=c(rep("Deaths", times=5), rep("Sick", times=5),
rep("Admissions", times=5), rep("QALY", times=6),
rep("Cost", times=5))) %>%
group_by(Metric) %>%
summarise(across(c(2:52), sum)) %>%
gather(Subgroup, Count, c(2:52)) %>%
mutate(Scenario=3)
S4sg <- read_excel(paste0(folder, "/S4/SAPM3_C2HHealth_Results.xlsx"),
sheet="Summary by subgroups", range="B3:BA81") %>%
filter(substr(`...1`, 1,5)=="Cumul") %>%
mutate(Metric=c(rep("Deaths", times=5), rep("Sick", times=5),
rep("Admissions", times=5), rep("QALY", times=6),
rep("Cost", times=5))) %>%
group_by(Metric) %>%
summarise(across(c(2:52), sum)) %>%
gather(Subgroup, Count, c(2:52)) %>%
mutate(Scenario=4)
S5sg <- read_excel(paste0(folder, "/S5/SAPM3_C2HHealth_Results.xlsx"),
sheet="Summary by subgroups", range="B3:BA81") %>%
filter(substr(`...1`, 1,5)=="Cumul") %>%
mutate(Metric=c(rep("Deaths", times=5), rep("Sick", times=5),
rep("Admissions", times=5), rep("QALY", times=6),
rep("Cost", times=5))) %>%
group_by(Metric) %>%
summarise(across(c(2:52), sum)) %>%
gather(Subgroup, Count, c(2:52)) %>%
mutate(Scenario=5)
#Bring in extreme scenario
SExtsg <- read_excel(paste0(folder, "/Extreme scenario/SAPM3_C2HHealth_Results.xlsx"),
sheet="Summary by subgroups", range="B3:BA81") %>%
filter(substr(`...1`, 1,5)=="Cumul") %>%
mutate(Metric=c(rep("Deaths", times=5), rep("Sick", times=5),
rep("Admissions", times=5), rep("QALY", times=6),
rep("Cost", times=5))) %>%
group_by(Metric) %>%
summarise(across(c(2:52), sum)) %>%
gather(Subgroup, Extreme, c(2:52)) %>%
mutate(Extreme=-Extreme)
#Bring in populations for rates
SPopssg <- as.data.frame(t(read_excel(paste0(folder, "/Extreme scenario/SAPM3_P2C_Results.xlsx"),
sheet="P2C-Summary", range="C2:BV7", col_names=FALSE))) %>%
select(1, 6) %>%
set_names("Subgroup", "Drinkers") %>%
mutate(Subgroup=case_when(
Subgroup=="Mod" ~ "Moderate", Subgroup=="Haz" ~ "Hazardous",
Subgroup=="Harm" ~ "Harmful", Subgroup=="Male" ~ "Males", Subgroup=="Female" ~ "Females",
TRUE ~ Subgroup),
Drinkers=as.numeric(Drinkers))
datasg <- bind_rows(S1sg, S2sg, S3sg, S4sg, S5sg) %>%
merge(SExtsg) %>%
merge(SPopssg, all.x=TRUE) %>%
mutate(relchange=Count/Extreme,
scenarioname=case_when(
Scenario==1 ~ "No rebound",
Scenario==2 ~ "Immediate rebound",
Scenario==3 ~ "Moderate-only rebound",
Scenario==4 ~ "Slower heavier rebound",
Scenario==5 ~ "Increasing consumption"),
scenarioname=factor(scenarioname,
levels=c("Immediate rebound",
"Slower heavier rebound",
"No rebound",
"Moderate-only rebound",
"Increasing consumption")),
Subgroup=case_when(
Subgroup=="Hazardous" ~ "Increasing risk",
Subgroup=="Harmful" ~ "Higher risk",
TRUE ~ Subgroup),
Rate=Count*100000/Drinkers)
#Outcomes by scenario
datasg %>% filter(Subgroup=="Population" &
Metric %in% c("Admissions", "Deaths")) %>%
arrange(fct_rev(Metric), scenarioname) %>%
select(scenarioname, Extreme, Count, relchange) %>%
set_names("Scenario", "Baseline", "Difference", "% Difference") %>%
gt() %>%
tab_row_group(label="Deaths", rows=c(1:5)) %>%
tab_row_group(label="Admissions", rows=c(6:10)) %>%
fmt_number(columns = c(Baseline,Difference), decimals = 0, use_seps = TRUE) %>%
fmt_percent(columns=`% Difference`, decimals=1) %>%
cols_align(columns="Scenario", align="left") %>%
tab_options(table.font.names="Calibri") %>%
gtsave("Table2.png", path="Outputs/JPEGS")
agg_png("Outputs/NHSATSFig13.png", units="in", width=9, height=6, res=500)
ggplot(datasg %>% filter(Subgroup=="Population" &
Metric %in% c("Admissions", "Deaths")),
aes(y=scenarioname, x=relchange, fill=scenarioname))+
geom_vline(xintercept=0, colour="Grey70")+
geom_col(show.legend=FALSE)+
geom_text(aes(label=paste0("+", round(relchange*100, 1), "%")),
hjust=0, nudge_x=0.002, colour="Grey40", size=rel(3))+
scale_x_continuous(name="Cumulative change over 20 years",
label=label_percent(accuracy=1),
breaks=c(0,0.05,0.1,0.15,0.2),
limits=c(0,0.24))+
scale_y_discrete(name="")+
scale_fill_paletteer_d("fishualize::Scarus_tricolor")+
scale_colour_paletteer_d("fishualize::Scarus_tricolor")+
theme_custom()+
facet_wrap(~Metric)+
labs(title="Modelled changes in health outcomes over 20 years",
subtitle="Cumulative change in alcohol-attributable hospital admisisons and deaths compared to baseline")
dev.off()
#By drinker group
agg_png("Outputs/NHSATSFig14.png", units="in", width=9, height=6, res=500)
ggplot(datasg %>% filter(Subgroup %in% c("Moderate", "Increasing risk",
"Higher risk") &
Metric=="Admissions") %>%
mutate(Subgroup=factor(Subgroup, levels=c("Moderate", "Increasing risk",
"Higher risk"))),
aes(x=Subgroup, y=Rate,
fill=Subgroup))+
geom_hline(yintercept=0, colour="Grey70")+
geom_col(show.legend=FALSE)+
scale_x_discrete(name="")+
scale_y_continuous(name="Cumulative change over 20 years\nper 100,000 drinkers")+
scale_fill_manual(values=c("#92d050", "#ffc000", "#c00000"))+
theme_custom()+
facet_wrap(~scenarioname)+
labs(title="Modelled changes in hospital admissions over 20 years",
subtitle="Cumulative change in alcohol-attributable hospital admission rates compared to baseline by drinker group")
dev.off()
agg_png("Outputs/NHSATSFig15.png", units="in", width=9, height=6, res=500)
ggplot(datasg %>% filter(Subgroup %in% c("Moderate", "Increasing risk",
"Higher risk") &
Metric=="Deaths") %>%
mutate(Subgroup=factor(Subgroup, levels=c("Moderate", "Increasing risk",
"Higher risk"))),
aes(x=Subgroup, y=Rate,
fill=Subgroup))+
geom_hline(yintercept=0, colour="Grey70")+
geom_col(show.legend=FALSE)+
scale_x_discrete(name="")+
scale_y_continuous(name="Cumulative change over 20 years\nper 100,000 drinkers")+
scale_fill_manual(values=c("#92d050", "#ffc000", "#c00000"))+
theme_custom()+
facet_wrap(~scenarioname)+
labs(title="Modelled changes in deaths over 20 years",
subtitle="Cumulative change in alcohol-attributable death rates compared to baseline by drinker group")
dev.off()
datasg %>% filter(Subgroup %in% c("Moderate", "Increasing risk",
"Higher risk") &
Metric=="Admissions")%>%
mutate(Subgroup=factor(Subgroup, levels=c("Moderate", "Increasing risk",
"Higher risk"))) %>%
select(scenarioname, Subgroup, Drinkers, Extreme, Count, Rate, relchange) %>%
gt(rowname_col="Subgroup", groupname_col="scenarioname") %>%
fmt_number(columns=c(Drinkers, Extreme, Count, Rate), decimals=0, use_seps = TRUE) %>%
fmt_percent(columns=relchange, decimals=1) %>%
cols_label(Drinkers="Population", Extreme="Baseline", Count="Difference",
Rate="Per 100,000", relchange="% Difference") %>%
tab_spanner(label="Cumulative change vs. baseline",
columns=c(Count, Rate, relchange)) %>%
tab_options(table.font.names="Calibri") %>%
gtsave("Table5.png", path="Outputs/JPEGS")
datasg %>% filter(Subgroup %in% c("Moderate", "Increasing risk",
"Higher risk") &
Metric=="Deaths")%>%
mutate(Subgroup=factor(Subgroup, levels=c("Moderate", "Increasing risk",
"Higher risk"))) %>%
select(scenarioname, Subgroup, Drinkers, Extreme, Count, Rate, relchange) %>%
gt(rowname_col="Subgroup", groupname_col="scenarioname") %>%
fmt_number(columns=c(Drinkers, Extreme, Count, Rate), decimals=0, use_seps = TRUE) %>%
fmt_percent(columns=relchange, decimals=1) %>%
cols_label(Drinkers="Population", Extreme="Baseline", Count="Difference",
Rate="Per 100,000", relchange="% Difference") %>%
tab_spanner(label="Cumulative change vs. baseline",
columns=c(Count, Rate, relchange)) %>%
tab_options(table.font.names="Calibri") %>%
gtsave("Table6.png", path="Outputs/JPEGS")
#By sex
agg_png("Outputs/NHSATSFig18.png", units="in", width=9, height=6, res=500)
ggplot(datasg %>% filter(Subgroup %in% c("Males", "Females") &
Metric=="Admissions") %>%
mutate(Subgroup=factor(Subgroup, levels=c("Males", "Females"))),
aes(x=Subgroup, y=Rate,
fill=Subgroup))+
geom_hline(yintercept=0, colour="Grey70")+
geom_col(show.legend=FALSE)+
scale_x_discrete(name="")+
scale_y_continuous(name="Cumulative change over 20 years\nper 100,000 drinkers")+
scale_fill_manual(values=c("#6600cc", "#00cc99"))+
theme_custom()+
facet_wrap(~scenarioname)+
labs(title="Modelled changes in hospital admissions over 20 years",
subtitle="Cumulative change in alcohol-attributable hospital admission rates compared to baseline by sex")
dev.off()
agg_png("Outputs/NHSATSFig19.png", units="in", width=9, height=6, res=500)
ggplot(datasg %>% filter(Subgroup %in% c("Males", "Females") &
Metric=="Deaths") %>%
mutate(Subgroup=factor(Subgroup, levels=c("Males", "Females"))),
aes(x=Subgroup, y=Rate,
fill=Subgroup))+
geom_hline(yintercept=0, colour="Grey70")+
geom_col(show.legend=FALSE)+
scale_x_discrete(name="")+
scale_y_continuous(name="Cumulative change over 20 years\nper 100,000 drinkers")+
scale_fill_manual(values=c("#6600cc", "#00cc99"))+
theme_custom()+
facet_wrap(~scenarioname)+
labs(title="Modelled changes in deaths over 20 years",
subtitle="Cumulative change in alcohol-attributable death rates compared to baseline by sex")
dev.off()
datasg %>% filter(Subgroup %in% c("Males", "Females") &
Metric=="Admissions")%>%
mutate(Subgroup=factor(Subgroup, levels=c("Males", "Females"))) %>%
select(scenarioname, Subgroup, Drinkers, Extreme, Count, Rate, relchange) %>%
gt(rowname_col="Subgroup", groupname_col="scenarioname") %>%
fmt_number(columns=c(Drinkers, Extreme, Count, Rate), decimals=0, use_seps = TRUE) %>%
fmt_percent(columns=relchange, decimals=1) %>%
cols_label(Drinkers="Population", Extreme="Baseline", Count="Difference",
Rate="Per 100,000", relchange="% Difference") %>%
tab_spanner(label="Cumulative change vs. baseline",
columns=c(Count, Rate, relchange)) %>%
tab_options(table.font.names="Calibri") %>%
gtsave("Table7.png", path="Outputs/JPEGS")
datasg %>% filter(Subgroup %in% c("Males", "Females") &
Metric=="Deaths")%>%
mutate(Subgroup=factor(Subgroup, levels=c("Males", "Females"))) %>%
select(scenarioname, Subgroup, Drinkers, Extreme, Count, Rate, relchange) %>%
gt(rowname_col="Subgroup", groupname_col="scenarioname") %>%
fmt_number(columns=c(Drinkers, Extreme, Count, Rate), decimals=0, use_seps = TRUE) %>%
fmt_percent(columns=relchange, decimals=1) %>%
cols_label(Drinkers="Population", Extreme="Baseline", Count="Difference",
Rate="Per 100,000", relchange="% Difference") %>%
tab_spanner(label="Cumulative change vs. baseline",
columns=c(Count, Rate, relchange)) %>%
tab_options(table.font.names="Calibri") %>%
gtsave("Table8.png", path="Outputs/JPEGS")
#By IMDq
agg_png("Outputs/NHSATSFig20.png", units="in", width=9, height=6, res=500)
ggplot(datasg %>% filter(Subgroup %in% c("IMDQ1 (least deprived)", "IMDQ2",
"IMDQ3", "IMDQ4",
"IMDQ5 (most deprived)") &
Metric=="Admissions") %>%
mutate(Subgroup=factor(Subgroup, levels=c("IMDQ1 (least deprived)", "IMDQ2",
"IMDQ3", "IMDQ4",
"IMDQ5 (most deprived)"))),
aes(x=Subgroup, y=Rate,
fill=Subgroup))+
geom_hline(yintercept=0, colour="Grey70")+
geom_col(show.legend=FALSE)+
scale_x_discrete(name="")+
scale_y_continuous(name="Cumulative change over 20 years\nper 100,000 drinkers")+
scale_fill_manual(values=c("#fcc5c0", "#fa9fb5", "#f768a1", "#c51b8a",
"#7a0177"))+
theme_custom()+
theme(axis.text.x=element_text(angle=80, hjust=1, vjust=1))+
facet_wrap(~scenarioname)+
labs(title="Modelled changes in hospital admissions over 20 years",
subtitle="Cumulative change in alcohol-attributable hospital admisison rates compared to baseline by deprivation quintile")
dev.off()
agg_png("Outputs/NHSATSFig21.png", units="in", width=9, height=6, res=500)
ggplot(datasg %>% filter(Subgroup %in% c("IMDQ1 (least deprived)", "IMDQ2",
"IMDQ3", "IMDQ4",
"IMDQ5 (most deprived)") &
Metric=="Deaths") %>%
mutate(Subgroup=factor(Subgroup, levels=c("IMDQ1 (least deprived)", "IMDQ2",
"IMDQ3", "IMDQ4",
"IMDQ5 (most deprived)"))),
aes(x=Subgroup, y=Rate,
fill=Subgroup))+
geom_hline(yintercept=0, colour="Grey70")+
geom_col(show.legend=FALSE)+
scale_x_discrete(name="")+
scale_y_continuous(name="Cumulative change over 20 years\nper 100,000 drinkers")+
scale_fill_manual(values=c("#fcc5c0", "#fa9fb5", "#f768a1", "#c51b8a",
"#7a0177"))+
theme_custom()+
theme(axis.text.x=element_text(angle=80, hjust=1, vjust=1))+
facet_wrap(~scenarioname)+
labs(title="Modelled changes in deaths over 20 years",
subtitle="Cumulative change in alcohol-attributable death rates compared to baseline by deprivation quintile")
dev.off()
datasg %>% filter(Subgroup %in% c("IMDQ1 (least deprived)", "IMDQ2",
"IMDQ3", "IMDQ4",
"IMDQ5 (most deprived)") &
Metric=="Admissions")%>%
mutate(Subgroup=factor(Subgroup, levels=c("IMDQ1 (least deprived)", "IMDQ2",
"IMDQ3", "IMDQ4",
"IMDQ5 (most deprived)"))) %>%
select(scenarioname, Subgroup, Drinkers, Extreme, Count, Rate, relchange) %>%
gt(rowname_col="Subgroup", groupname_col="scenarioname") %>%
fmt_number(columns=c(Drinkers, Extreme, Count, Rate), decimals=0, use_seps = TRUE) %>%
fmt_percent(columns=relchange, decimals=1) %>%
cols_label(Drinkers="Population", Extreme="Baseline", Count="Difference",
Rate="Per 100,000", relchange="% Difference") %>%
tab_spanner(label="Cumulative change vs. baseline",
columns=c(Count, Rate, relchange)) %>%
tab_options(table.font.names="Calibri") %>%
gtsave("Table9.png", path="Outputs/JPEGS")
datasg %>% filter(Subgroup %in% c("IMDQ1 (least deprived)", "IMDQ2",
"IMDQ3", "IMDQ4",
"IMDQ5 (most deprived)") &
Metric=="Deaths")%>%
mutate(Subgroup=factor(Subgroup, levels=c("IMDQ1 (least deprived)", "IMDQ2",
"IMDQ3", "IMDQ4",
"IMDQ5 (most deprived)"))) %>%
select(scenarioname, Subgroup, Drinkers, Extreme, Count, Rate, relchange) %>%
gt(rowname_col="Subgroup", groupname_col="scenarioname") %>%
fmt_number(columns=c(Drinkers, Extreme, Count, Rate), decimals=0, use_seps = TRUE) %>%
fmt_percent(columns=relchange, decimals=1) %>%
cols_label(Drinkers="Population", Extreme="Baseline", Count="Difference",
Rate="Per 100,000", relchange="% Difference") %>%
tab_spanner(label="Cumulative change vs. baseline",
columns=c(Count, Rate, relchange)) %>%
tab_options(table.font.names="Calibri") %>%
gtsave("Table10.png", path="Outputs/JPEGS")
#Costs
agg_png("Outputs/NHSATSFig22.png", units="in", width=9, height=6, res=500)
ggplot(datasg %>% filter(Subgroup=="Population" &
Metric=="Cost"),
aes(y=scenarioname, x=Count/1000000000, fill=scenarioname))+
geom_vline(xintercept=0, colour="Grey70")+
geom_col(show.legend=FALSE)+
geom_text(aes(label=paste("£", round(Count/1000000000, 1), "bn")),
hjust=0, nudge_x=0.05, colour="Grey40", size=rel(3))+
scale_x_continuous(name="Cumulative change over 20 years (£bn)", limits=c(0,6))+
scale_y_discrete(name="")+
scale_fill_paletteer_d("fishualize::Scarus_tricolor")+
scale_colour_paletteer_d("fishualize::Scarus_tricolor")+
theme_custom()+
labs(title="Modelled changes in NHS costs over 20 years",
subtitle="Cumulative change in alcohol-attributable NHS costs compared to baseline")
dev.off()
|
2841d760ab9e125a2fdaf2c472431a775a1564ca
|
7e423e2e6a2fe1f9e89db4765ca44e127bcb4b9e
|
/R/kBET-utils.R
|
5d872d929b95effd066cb4e17666dcd3cc2000e5
|
[] |
no_license
|
chitrita/kBET
|
a9a72f8a07e658f9488b4ea432b3c03691c1a67b
|
d872f8fe4e64ea850d829eb4b4836fb81e377a03
|
refs/heads/master
| 2020-04-10T22:52:38.050588
| 2018-09-18T10:16:21
| 2018-09-18T10:16:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,687
|
r
|
kBET-utils.R
|
#' @importFrom stats pchisq pnorm
#a wrapper for kBET to fix a neighbourhood size
scan_nb <- function(x,df,batch, knn){
res <- kBET(df=df, batch=batch, k0=x, knn=knn, testSize=NULL,
heuristic=FALSE, n_repeat=10, alpha=0.05,
addTest = FALSE, plot=FALSE, verbose=FALSE, adapt=FALSE)
result <- res$summary
result <- result$kBET.observed[1]
}
#the residual score function of kBET
residual_score_batch <- function(knn.set, class.freq, batch)
{
#knn.set: indices of nearest neighbours
#empirical frequencies in nn-environment (sample 1)
freq.env <- table(batch[knn.set])/length(knn.set)
full.classes <- rep(0, length(class.freq$class))
full.classes[ class.freq$class %in% names(freq.env)] <- freq.env
exp.freqs <- class.freq$freq
#compute chi-square test statistics
resScore <- sum((full.classes - exp.freqs)^2/exp.freqs)
return(resScore)
}
#which batch has the largest deviance (and is underrepresented)
max_deviance_batch <- function(knn.set, class.freq, batch)
{
#knn.set: indices of nearest neighbours
#empirical frequencies in nn-environment (sample 1)
freq.env <- table(batch[knn.set])/length(knn.set)
full.classes <- rep(0, length(class.freq$class))
full.classes[ class.freq$class %in% names(freq.env)] <- freq.env
exp.freqs <- class.freq$freq
#compute chi-square test statistics
allScores <- (full.classes - exp.freqs)/exp.freqs
maxBatch <- batch[which(allScores==min(allScores))]
return(maxBatch)
}
#the core function of kBET
chi_batch_test <- function(knn.set, class.freq, batch, df)
{
#knn.set: indices of nearest neighbours
#empirical frequencies in nn-environment (sample 1)
freq.env <- table(batch[knn.set])
full.classes <- rep(0, length(class.freq$class))
full.classes[ class.freq$class %in% names(freq.env)] <- freq.env
exp.freqs <- class.freq$freq*length(knn.set)
#compute chi-square test statistics
chi.sq.value <- sum((full.classes - exp.freqs)^2/exp.freqs)
result<- 1- pchisq(chi.sq.value, df) #p-value for the result
if(is.na(result)){ #I actually would like to now when 'NA' arises.
return(0)
}else{
return(result)
}
}
lrt_approximation <- function(knn.set, class.freq, batch, df)
{
#knn.set: indices of nearest neighbours
#empirical frequencies in nn-environment (sample 1)
obs.env <- table(batch[knn.set]) #observed realisations of each category
freq.env <- obs.env/sum(obs.env) #observed 'probabilities'
full.classes <- rep(0, length(class.freq$class))
obs.classes <- class.freq$class %in% names(freq.env)
#for stability issues (to avoid the secret division by 0): introduce
#another alternative model where the observed probability
#is either the empirical frequency or 1/(sample size) at minimum
if (length(full.classes) > sum(obs.classes)){
dummy.count <- length(full.classes) -sum(obs.classes)
full.classes[obs.classes] <- obs.env/(sum(obs.env)+ dummy.count)
pmin <- 1/(sum(obs.env)+ dummy.count)
full.classes[!obs.classes] <- pmin
}else{
full.classes[ obs.classes] <- freq.env
}
exp.freqs <- class.freq$freq #expected 'probabilities'
#compute likelihood ratio of null and alternative hypothesis,
#test statistics converges to chi-square distribution
full.obs <- rep(0, length(class.freq$class))
full.obs[obs.classes] <- obs.env
lrt.value <- -2*sum(full.obs * log(exp.freqs/full.classes))
result<- 1- pchisq(lrt.value, df) #p-value for the result
if(is.na(result)){ #I actually would like to now when 'NA' arises.
return(0)
}else{
return(result)
}
}
#truncated normal distribution distribution function
ptnorm <- function(x,mu,sd, a=0, b=1, alpha=0.05,verbose=FALSE){
#this is the cumulative density of the truncated normal distribution
#x ~ N(mu, sd^2), but we condition on a <= x <= b
if(a>b){
warning("Lower and upper bound are interchanged.")
tmp <- a
a <- b
b <- tmp
}
if(sd<=0 | is.na(sd)) {
if(verbose)
{
warning("Standard deviation must be positive.")
}
if (alpha<=0)
{
stop("False positive rate alpha must be positive.")
}
sd <- alpha
}
if (x<a | x>b){
warning("x out of bounds.")
cdf <- as.numeric(x>a)
}else{
alp <- pnorm((a-mu)/sd)
bet <- pnorm((b-mu)/sd)
zet <- pnorm((x-mu)/sd)
cdf <- (zet-alp)/(bet-alp)
}
return(cdf)
}
#wrapper for the multinomial exact test function
multiNom <- function(x, y, z) {
z.f <- factor(z)
tmp <- multinomial.test(as.numeric(table(z.f[x])),y)
return(tmp$p.value)}
#significance test for pcRegression (two levels)
correlate.fun_two <- function(rot.data, batch, batch.levels){
#rot.data: some vector (numeric entries)
#batch: some vector (categoric entries)
a <- lm(rot.data ~ batch)
result <- numeric(2)
result[1] <- summary(a)$r.squared #coefficient of determination
result[2] <- summary(a)$coefficients[2,4] #p-value (significance level)
t.test.result <- t.test(rot.data[batch==batch.levels[1]],
rot.data[batch==batch.levels[2]], paired = FALSE)
result[3] <- t.test.result$p.value
return(result)
}
#significance test for pcRegression (more than two levels)
correlate.fun_gen <- function(rot.data, batch){
#rot.data: some vector (numeric covariate)
#batch: some vector (categoric covariate)
a <- lm(rot.data ~ batch)
result <- numeric(2)
result[1] <- summary(a)$r.squared #coefficient of determination
F.test.result <- aov(rot.data ~ batch)
F.test.summary <- summary(F.test.result)
result[2] <- summary(a)$coefficients[2,4] #p-value (significance level)
result[3] <- F.test.summary[[1]]$'Pr(>F)'[1] #p-value of the one-way anova test
return(result)
}
|
7fc91615ab48afaaeae96521c926cf057e17a8da
|
f4f1cd0987ad6c7a71fc400f41ca3f22e3f6ed53
|
/man/soImport2.Rd
|
9ecbe2c5f74bdee64b2791e9cff7c109d419de5d
|
[] |
no_license
|
jordandeherrera/prioritizationMatrix
|
6ee0e89e12e281c111e2794d5565e7cccd3ab0ba
|
16484b6f3a6a97e90dc3e899b57e9d77004e0f52
|
refs/heads/master
| 2022-01-21T03:50:31.217496
| 2019-06-25T05:55:01
| 2019-06-25T05:55:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 433
|
rd
|
soImport2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\docType{data}
\name{soImport2}
\alias{soImport2}
\title{Sample Strategic Options - Differentiated Importance}
\format{An object of class \code{numeric} of length 6.}
\usage{
soImport2
}
\description{
An example of strategic options importance according to a differentiated competitor.
}
\examples{
\dontrun{
soImport2
}
}
\keyword{datasets}
|
0f642a38ff3688adc2eb30d16aaf078a3438095a
|
5b77d2ffc8d564f95927b099cc021ab9ab6d3477
|
/man/addMaplibreGL.Rd
|
a4418dae0fbc9d30541cb2be4891581ac62495f4
|
[
"MIT"
] |
permissive
|
llongour/leaflet.maplibregl
|
d22ce32a02bf51086ade3db7e03a900f374bbe37
|
5bd8ccf5cd1e46aa2ed22e4fde9672ed73171bd7
|
refs/heads/master
| 2023-06-23T15:59:39.344317
| 2023-06-23T02:11:23
| 2023-06-23T02:11:23
| 563,372,437
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,377
|
rd
|
addMaplibreGL.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/maplibre.R
\name{maplibreOptions}
\alias{maplibreOptions}
\alias{addMaplibreGL}
\title{Adds a MapLibre GL layer to a Leaflet map}
\usage{
maplibreOptions(
attribution = "",
layers = NULL,
layerDefs = NULL,
opacity = 1,
position = "front",
maxZoom = NULL,
minZoom = NULL,
dynamicLayers = NULL,
proxy = NULL,
useCors = TRUE,
...
)
addMaplibreGL(
map,
style = "https://maputnik.github.io/osm-liberty/style.json",
layerId = NULL,
group = NULL,
setView = TRUE,
options = maplibreOptions()
)
}
\arguments{
\item{attribution}{Attribution from service metadata copyright text is automatically displayed in Leaflet's default control. This property can be used for customization.}
\item{layers}{An array of Layer IDs like \link{3, 4, 5} to show from the service.}
\item{layerDefs}{A string representing a query to run against the service before the image is rendered. This can be a string like "3:STATE_NAME="Kansas"" or an object mapping different queries to specific layers {3:"STATE_NAME="Kansas"", 2:"POP2007>25000"}.}
\item{opacity}{Opacity of the layer. Should be a value between 0 (completely transparent) and 1 (completely opaque).}
\item{position}{Position of the layer relative to other overlays.}
\item{maxZoom}{Closest zoom level the layer will be displayed on the map.}
\item{minZoom}{Furthest zoom level the layer will be displayed on the map.}
\item{dynamicLayers}{JSON object literal used to manipulate the layer symbology defined in the service itself. Requires a 10.1 (or above) map service which supports dynamicLayers requests.}
\item{useCors}{If this service should use CORS when making GET requests.}
\item{...}{Other options to pass to Maplibre GL JS.}
\item{map}{The Leaflet R object (see \code{\link[leaflet:leaflet]{leaflet::leaflet()}}).}
\item{style}{Tile vector URL; can begin with \verb{http://} or \verb{https://}.}
\item{layerId}{A layer ID; see
\href{https://rstudio.github.io/leaflet/showhide.html}{docs}.}
\item{group}{The name of the group the newly created layer should belong to
(for \code{\link[leaflet:remove]{leaflet::clearGroup()}} and \code{\link[leaflet:addLayersControl]{leaflet::addLayersControl()}} purposes).
(Warning: Due to the way Leaflet and MapLibre GL JS integrate, showing/hiding
a GL layer may give unexpected results.)}
\item{setView}{If \code{TRUE} (the default), drive the map to the center/zoom
specified in the style (if any). Note that this will override any
\code{\link[leaflet:map-methods]{leaflet::setView()}} or \code{\link[leaflet:map-methods]{leaflet::fitBounds()}} calls that occur between
the \code{addMaplibreGL} call and when the style finishes loading; use
\code{setView=FALSE} in those cases.}
\item{options}{A list of Map options. See the
\href{https://maplibre.org/maplibre-gl-js-docs/api/#map}{MapLibre GL JS documentation}
for more details. Not all options may work in the context of Leaflet.}
\item{token}{If you pass a token in your options it will be included in all requests to the service.}
}
\description{
Uses the \href{https://github.com/maplibre/maplibre-gl-leaflet}{MapLibre GL Leaflet plugin}
to add a MapLibre GL layer to a Leaflet map.
}
\examples{
library(leaflet)
\donttest{
leaflet() \%>\%
addMaplibreGL(style = "https://demotiles.maplibre.org/style.json")
}
}
|
5c8ddf1855c6e7f12135d30e3e30add03349091e
|
e76322de5ff72e76fa605847242bfc12fe0e32db
|
/Whatsapp project/Data preparation and reading.R
|
a9189bdd90780eba572d05b4121c12c2ed242371
|
[] |
no_license
|
scozzarro/datascience_capstone
|
0dc11aecd2e59f81c28f82be8e95fde88d47ba41
|
224927b09e876cf6c60d642a06626227e86e62ea
|
refs/heads/master
| 2023-02-14T15:15:21.335951
| 2021-01-11T16:03:42
| 2021-01-11T16:03:42
| 265,833,425
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,828
|
r
|
Data preparation and reading.R
|
#New Whatsapp project for text analysis
library(rwhatsapp)
library(lubridate)
library(tidyverse)
library(tidytext)
library(kableExtra)
library(knitr)
library(ggimage)
#1. Import chat ----
mychat<- rwa_read('chat_A_G.txt')
#2. Clean Data
summary(mychat)
str(mychat)
mychat<- mychat[-c(1:14),] #delete first raw with whatsapp privacy encoding disclaimer
mychat$author<- as.character(mychat$author)
mychat$author[mychat$author != "Andrea Marciano"] <- "Gabriel"
mychat$author<- as.factor(mychat$author)
mychat<- mychat %>%
mutate(day = date(time))%>%
mutate(season = case_when(day >= dmy(24092019) & day <= dmy(20122019) ~ 'Autumn 2019',
day >= dmy(21122019) & day <= dmy(31032020) ~ 'Winter 2020',
day >= dmy(01042020) & day <= dmy(21062020) ~ 'Spring 2020',
day >= dmy(22062020) & day <= dmy(23092020) ~ 'Summer 2020',
day >= dmy(24092020) & day <= dmy(15122020) ~ 'Autumn 2020'
))
mychat$season<- factor(mychat$season)
mychat %>% head(10) %>% kable() %>% kable_styling(font_size = 11, bootstrap_options = c("striped", 'condensed'))
#3. EDA ----
#3.1 Messages per seasons ----
mychat %>% group_by(season) %>%
count(day) %>%
ggplot(aes(day, n, fill = season)) +
geom_bar(stat = 'identity') +
ylab('Numbers of messages') +
xlab('season') +
ggtitle('Messages per Seasons') +
theme_minimal() +
theme(legend.position = 'bottom')
#3.2 Messages per day of week ----
mychat %>% mutate(wday_num = wday(day), wday_name = weekdays(day)) %>%
group_by(season, wday_num, wday_name) %>%
count() %>%
ggplot(aes(reorder(wday_name, -wday_num), n, fill = season)) +
geom_bar(stat = 'identity') +
xlab('') +
coord_flip() +
ggtitle('Messages per day of week', 'Frequency per seasons') +
theme_minimal() +
theme(legend.title = element_blank(), legend.position = 'bottom')
#3.3 Message frequency by the time of day ----
wdays<- c('Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday')
names(wdays)<- 1:7 #Messages per day hours
mychat %>% mutate(hours = hour(time), wday_num = wday(day), wday_name = weekdays(day)) %>%
count(season, wday_num, wday_name, hours) %>%
ggplot(aes(hours, n, fill = season)) +
geom_bar(stat = 'identity') +
ylab('Number of messages') +
xlab('Hours') +
ggtitle('Number of messages per day hours', 'Frequency per seasons') +
facet_wrap(~wday_num, ncol = 7, labeller = labeller(wday_num = wdays)) +
theme_minimal() +
theme(legend.title = element_blank(), legend.position = 'bottom',
panel.spacing.x = unit(0.0, 'lines'))
#3.4 Who has sent the most messages? ----
mychat %>% mutate(day = date(time)) %>%
group_by(season) %>%
count(author) %>%
ggplot(aes(reorder(author,n), n, fill = season)) +
geom_bar(stat = 'identity') +
ylab('Total number of messages') +
xlab('User') +
coord_flip() +
ggtitle('Total number of messages per user', 'Who has sent the most messages?, Freq per season') +
theme_minimal() +
theme(legend.title = element_blank(), legend.position = 'bottom')
#3.4 Lenght of messages ----
mychat %>% mutate(text_len = nchar(text)) %>%
group_by(author) %>%
summarise(avg_txt_len = mean(text_len)) %>%
ggplot(aes(author, avg_txt_len, fill = author)) +
geom_bar(stat = 'identity') +
xlab('Author') +
ylab('Average messages lenght') +
coord_flip() +
ggtitle('Average messages lenght by author') +
theme_minimal() +
theme(legend.title = element_blank(), legend.position = 'bottom')
#3.5 Emojis ----
#What are the most used emojis in chat?
# LIBRARY FOR EMOJI PNG IMAGE FETCH FROM https://abs.twimg.com
emojiplot<- mychat %>%
unnest(c(emoji, emoji_name)) %>%
mutate(emoji = str_sub(emoji, end = 1)) %>%
mutate(emoji_name = str_remove(emoji_name, ':.*')) %>%
count(emoji, emoji_name) %>%
top_n(30, n) %>%
arrange(desc(n)) %>%
mutate(emoji_url = map_chr(emoji, ~paste0('https://abs.twimg.com/emoji/v2/72x72/',
as.hexmode(utf8ToInt(.x)),'.png')))
emojiplot %>% ggplot(aes(reorder(emoji_name, n), n)) +
geom_col(aes(fill = n), show.legend = FALSE, width = .2) +
geom_point(aes(color = n), show.legend = FALSE, size = 3) +
geom_image(aes(image = emoji_url), size = .045) +
ylab('Number of times emoji was used') +
xlab('Emoji meaning') +
ggtitle('Most used emoji') +
coord_flip() +
theme_minimal() +
theme()
#What are the most used emojis in chat per user?
emojiplot2<- mychat %>%
unnest(c(emoji, emoji_name)) %>%
mutate(emoji = str_sub(emoji, end = 1))%>%
count(author, emoji, emoji_name, sort = TRUE) %>%
group_by(author) %>%
top_n(8, n) %>%
slice(1:8) %>%
mutate(emoji_url = map_chr(emoji, ~paste0('https://abs.twimg.com/emoji/v2/72x72/',
as.hexmode(utf8ToInt(.x)),'.png')))
emojiplot2 %>% ggplot(aes(reorder(emoji, -n), n)) +
geom_col(aes(fill = author, group = author), show.legend = FALSE, width = .20) +
geom_image(aes(image = emoji_url), size = .08) +
xlab('Emiji') +
ylab('Number of time emoji was used') +
facet_wrap(~author, ncol = 5, scales = 'free') +
ggtitle('Most used emoji by user') +
theme_minimal() +
theme(axis.text.x = element_blank())
#3.6 Most used words ----
useless_words<-c('il','lo','la','un','uno','una','quello','quella','quelli','nostro','vostro','di','quanto','che','se','sono',
'loro','alla','alle','niente','meno','piu','qui','qua','con','voi','chi','mio','tuo','va','ma','è','stata',
'per', 'nn','a','le','te','in','e','sto','da','sei','me','ho','ha','mi','we','per','non','sta','o','fra',
'su','so','hai','ci','mo','sn','eh','ti','c3','i','fa','al','ne','del')
mychat %>% unnest_tokens(input = text, output = word) %>%
filter(!word %in% useless_words) %>%
count(word) %>%
top_n(30, n) %>%
arrange(desc(n)) %>%
ggplot(aes(reorder(word, n), n, fill = n, color = n)) +
geom_col(show.legend = FALSE, width = .1) +
geom_point(show.legend = FALSE, size = 3) +
ggtitle('Most used words in chat') +
xlab('Words') +
ylab('Number of time it was used') +
coord_flip() +
theme_minimal()
#Most used words in chat, by user
mychat %>% unnest_tokens(input = text, output = word) %>%
filter(!word %in% useless_words) %>%
count(author, word, sort = TRUE) %>%
group_by(author) %>%
top_n(20, n) %>%
slice(1:20) %>%
ungroup() %>%
arrange(author, desc(n)) %>%
mutate(order = row_number()) %>%
ggplot(aes(reorder(word, n), n, fill = author, color = author)) +
geom_col(show.legend = FALSE, width = .1) +
geom_point(show.legend = FALSE, size = 3) +
xlab('Words') +
ylab('Number of time it was used') +
coord_flip() +
facet_wrap(~author, ncol = 3, scales = 'free') +
ggtitle('Most used words by user') +
theme_minimal()
|
c22a5a4103a41f198f6cf98f52841701fc9fff0d
|
d51374463a818baaad2ddf0da01e1cca564c3d02
|
/test/test_retrain.r
|
563e20bc857887de486d5427cabe0f183295a1b2
|
[
"MIT"
] |
permissive
|
kumc-bmi/AKI_CDM
|
99ad377c4a12f6e41bbbdc92ff14f7df65cf51a2
|
a82ef4faa7ee491d745b77a991f62f5424d0fd1d
|
refs/heads/master
| 2022-06-02T17:50:19.970746
| 2022-05-10T14:05:39
| 2022-05-10T14:05:39
| 146,350,129
| 8
| 10
| null | 2021-05-19T21:16:12
| 2018-08-27T20:26:51
|
HTML
|
UTF-8
|
R
| false
| false
| 21,377
|
r
|
test_retrain.r
|
#' ---
#' title: "Building and Validating Predictive Models for Acute Kidney Injury (AKI) using PCORnet CDM (Part II.1)"
#' author: "xing song"
#' date: "Feburary 09, 2019"
#' output: html_document
#' ---
#' ### Stage 2.2: Predictive Models Validation (Retrain)
#'
#' In this experiment, we will retrain the benchmark predictive model by quasi-replicating the model by [*Koyner et al*] for AKI risk prediction on the adult inpatients at each GPC site using PCORnet common data model. The model will be trained on 70% of the site's local data and validated on the remaining 30%.
#'
#' [*Koyner et al*] https://www.ncbi.nlm.nih.gov/pubmed/29596073
#source utility functions
source("./R/util.R")
source("./R/var_etl_surv.R")
require_libraries(c("tidyr",
"dplyr",
"magrittr",
"stringr",
"broom",
"Matrix",
"xgboost",
"ROCR",
"PRROC",
"ResourceSelection",
"knitr",
"kableExtra",
"ggplot2",
"openxlsx"))
# experimental design parameters
#----prediction ending point
pred_end<-7
#-----prediction point
pred_in_d_opt<-c(1,2)
#-----prediction tasks
pred_task_lst<-c("stg1up","stg2up","stg3")
#-----feature selection type
fs_type_opt<-c("no_fs","rm_scr_bun")
rm_key<-c('2160-0','38483-4','14682-9','21232-4','35203-9','44784-7','59826-8',
'16188-5','16189-3','59826-8','35591-7','50380-5','50381-3','35592-5',
'44784-7','11041-1','51620-3','72271-0','11042-9','51619-5','35203-9','14682-9',
'12966-8','12965-0','6299-2','59570-2','12964-3','49071-4','72270-2',
'11065-0','3094-0','35234-4','14937-7',
'48642-3','48643-1', #eGFR
'3097-3','44734-2','BUN_SCR')
#' #### Preparation
#'
#' By running `Part I` of "render_report.R", the raw data tables should have already been collected and saved in the local `./data` folder (Note: these data tables are not visiable in the github ./data folder, but should be visible in the corresponding folder locally), that are
#'
#' * `Table1.rda`: AKI patieht cohort table;
#'
#' * `AKI_DEMO.rda`: CDM demographic table cut for AKI cohort;
#'
#' * `AKI_VITAL.rda`: CDM vital table cut for AKI cohort;
#'
#' * `AKI_LAB.rda`: CDM lab table cut for AKI cohort;
#'
#' * `AKI_DX.rda`: CDM diagnosis table cut for AKI cohort;
#'
#' * `AKI_PX.rda`: CDM procedure table cut for AKI cohort;
#'
#' * `AKI_MED.rda`: CDM prescribing medication table cut for AKI cohort;
#' #### Objective 2.1: Data Cleaning and Representation
#' In this section, the raw data tables will be cleaned and transformed to a discrete-survival-like representation, which will be used in the final modeling stage. To reduce the burden on memory requirments, the ETL (extract, transform, load) process will be performed in chunks with respect to **distinct prediction task, encounter years and variable types**. Meanwhile, indices for random paritioning will be assigned to each encounter. The ETL progress will be reported as follows:
# collect and format variables on daily basis
n_chunk<-4
tbl1<-readRDS("./data//Table1.rda") %>%
dplyr::mutate(yr=as.numeric(format(strptime(ADMIT_DATE, "%Y-%m-%d %H:%M:%S"),"%Y")))
#--by chunks: encounter year
enc_yr<-tbl1 %>%
dplyr::select(yr) %>%
unique %>% arrange(yr) %>%
filter(yr>2009) %>%
dplyr::mutate(chunk=ceiling((yr-2009)/(n()/n_chunk)))
#--by variable type
var_type<-c("demo","vital","lab","dx","px","med")
for(pred_in_d in pred_in_d_opt){
#--determine update time window
tw<-as.double(seq(0,pred_end))
if(pred_in_d>1){
tw<-tw[-seq_len(pred_in_d-1)]
}
#--save results as array
for(pred_task in pred_task_lst){
start_tsk<-Sys.time()
cat("Start variable collection for task",pred_task,".\n")
#---------------------------------------------------------------------------------------------
var_by_yr<-list()
var_bm<-list()
rsample_idx<-c()
for(i in seq_len(n_chunk)){
start_i<-Sys.time()
cat("...start variable collection for year chunk",i,".\n")
#--collect end_points
yr_i<-enc_yr$yr[enc_yr$chunk==i]
dat_i<-tbl1 %>% filter(yr %in% yr_i) %>%
dplyr::select(ENCOUNTERID,yr,
NONAKI_SINCE_ADMIT,
AKI1_SINCE_ADMIT,
AKI2_SINCE_ADMIT,
AKI3_SINCE_ADMIT) %>%
gather(y,dsa_y,-ENCOUNTERID,-yr) %>%
filter(!is.na(dsa_y)) %>%
dplyr::mutate(y=recode(y,
"NONAKI_SINCE_ADMIT"=0,
"AKI1_SINCE_ADMIT"=1,
"AKI2_SINCE_ADMIT"=2,
"AKI3_SINCE_ADMIT"=3)) %>%
dplyr::mutate(y=as.numeric(y))
if(pred_task=="stg1up"){
dat_i %<>%
dplyr::mutate(y=as.numeric(y>0)) %>%
group_by(ENCOUNTERID) %>% top_n(n=1L,wt=dsa_y) %>% ungroup
}else if(pred_task=="stg2up"){
dat_i %<>%
# filter(y!=1) %>% # remove stage 1
dplyr::mutate(y=as.numeric(y>1)) %>%
group_by(ENCOUNTERID) %>% top_n(n=1L,wt=dsa_y) %>% ungroup
}else if(pred_task=="stg3"){
dat_i %<>%
# filter(!(y %in% c(1,2))) %>% # remove stage 1,2
dplyr::mutate(y=as.numeric(y>2)) %>%
group_by(ENCOUNTERID) %>% top_n(n=1L,wt=dsa_y) %>% ungroup
}else{
stop("prediction task is not valid!")
}
#--random sampling
rsample_idx %<>%
bind_rows(dat_i %>%
dplyr::select(ENCOUNTERID,yr) %>%
unique %>%
dplyr::mutate(cv10_idx=sample(1:10,n(),replace=T)))
#--ETL variables
X_surv<-c()
y_surv<-c()
var_etl_bm<-c()
for(v in seq_along(var_type)){
start_v<-Sys.time()
#extract
var_v<-readRDS(paste0("./data/AKI_",toupper(var_type[v]),".rda")) %>%
semi_join(dat_i,by="ENCOUNTERID")
if(var_type[v] != "demo"){
if(var_type[v] == "med"){
var_v %<>%
transform(value=strsplit(value,","),
dsa=strsplit(dsa,",")) %>%
unnest(value,dsa) %>%
dplyr::mutate(value=as.numeric(value),
dsa=as.numeric(dsa))
}
var_v %<>% filter(dsa <= pred_end)
}
#transform
var_v<-format_data(dat=var_v,
type=var_type[v],
pred_end=pred_end)
Xy_surv<-get_dsurv_temporal(dat=var_v,
censor=dat_i,
tw=tw,
pred_in_d=pred_in_d)
#load
X_surv %<>% bind_rows(Xy_surv$X_surv) %>% unique
y_surv %<>% bind_rows(Xy_surv$y_surv) %>% unique
lapse_v<-Sys.time()-start_v
var_etl_bm<-c(var_etl_bm,paste0(lapse_v,units(lapse_v)))
cat("\n......finished ETL",var_type[v],"for year chunk",i,"in",lapse_v,units(lapse_v),".\n")
}
var_by_yr[[i]]<-list(X_surv=X_surv,
y_surv=y_surv)
lapse_i<-Sys.time()-start_i
var_etl_bm<-c(var_etl_bm,paste0(lapse_i,units(lapse_i)))
cat("\n...finished variabl collection for year chunk",i,"in",lapse_i,units(lapse_i),".\n")
var_bm[[i]]<-data.frame(bm_nm=c(var_type,"overall"),
bm_time=var_etl_bm,
stringsAsFactors = F)
}
#--save preprocessed data
saveRDS(rsample_idx,file=paste0("./data/preproc/",pred_in_d,"d_rsample_idx_",pred_task,".rda"))
saveRDS(var_by_yr,file=paste0("./data/preproc/",pred_in_d,"d_var_by_yr_",pred_task,".rda"))
saveRDS(var_bm,file=paste0("./data/preproc/",pred_in_d,"d_var_bm",pred_task,".rda"))
#---------------------------------------------------------------------------------------------
lapse_tsk<-Sys.time()-start_tsk
cat("\nFinish variable ETL for task:",pred_task,"in",pred_in_d,"days",",in",lapse_tsk,units(lapse_tsk),".\n")
}
}
# The final preprocessed intermediate tables from this code chunk should be found in the `./data/preproc/...` folder as the following intermediate data tables for different prediction tasks:
#
# * For AKI stage ≥ 1 in 24 hours: `1d_rsample_idx_stg1up.rda`, `1d_var_by_yr_stg1up.rda`, `1d_var_bm_stg1up.rda`
#
# * For AKI stage ≥ 2 in 24 hours: `1d_rsample_idx_stg2up.rda`, `1d_var_by_yr_stg2up.rda`, `1d_var_bm_stg2up.rda`
#
# * For AKI stage = 3 in 24 hours: `1d_rsample_idx_stg3.rda`, `1d_var_by_yr_stg3.rda`, `1d_var_bm_stg3.rda`
#
# * For AKI stage ≥ 1 in 48 hours: `2d_rsample_idx_stg1up.rda`, `2d_var_by_yr_stg1up.rda`, `1d_var_bm_stg1up.rda`
#
# * For AKI stage ≥ 2 in 48 hours: `2d_rsample_idx_stg2up.rda`, `2d_var_by_yr_stg2up.rda`, `1d_var_bm_stg2up.rda`
#
# * For AKI stage = 3 in 48 hours: `2d_rsample_idx_stg3.rda`, `2d_var_by_yr_stg3.rda`, `1d_var_bm_stg3.rda`
#
#' #### Objective 2.2: Benchmark Model Development
#'
#' We will adopt the AKI prediction model by [*Koyner et al*] using all variables from each site's CDM Demographic, Vital, Diagnosis, Procedure and Prescribing Medication tables.The same strategy as in Koyner et al for outlier removal and aggregation of repeated values have been followed. Training/Validation sets are partitioned based on pre-assigned indices in the files "..._rsample_idx_..." from previous part. The model development progress will be reported as follows:
#hyper-parameter grid for xgboost
eval_metric<-"auc"
objective<-"binary:logistic"
grid_params<-expand.grid(
max_depth=10,
eta=0.05,
min_child_weight=10,
subsample=0.8,
colsample_bytree=0.8,
gamma=1
)
for(pred_in_d in pred_in_d_opt){
for(pred_task in pred_task_lst){
bm<-c()
bm_nm<-c()
start_tsk<-Sys.time()
cat("Start build reference model for task",pred_task,"in",pred_in_d,"days",".\n")
#---------------------------------------------------------------------------------------------
start_tsk_i<-Sys.time()
#--prepare training and testing set
X_tr<-c()
X_ts<-c()
y_tr<-c()
y_ts<-c()
rsample_idx<-readRDS(paste0("./data/preproc/",pred_in_d,"d_rsample_idx_",pred_task,".rda"))
var_by_task<-readRDS(paste0("./data/preproc/",pred_in_d,"d_var_by_yr_",pred_task,".rda"))
for(i in seq_len(n_chunk)){
var_by_yr<-var_by_task[[i]]
X_tr %<>% bind_rows(var_by_yr[["X_surv"]]) %>%
semi_join(rsample_idx %>% filter(cv10_idx<=6 & yr<2017),
by="ENCOUNTERID")
y_tr %<>% bind_rows(var_by_yr[["y_surv"]] %>%
left_join(rsample_idx %>% filter(cv10_idx<=6 & yr<2017),
by="ENCOUNTERID"))
X_ts %<>% bind_rows(var_by_yr[["X_surv"]]) %>%
semi_join(rsample_idx %>% filter(cv10_idx>6 | yr>=2017),
by="ENCOUNTERID")
y_ts %<>% bind_rows(var_by_yr[["y_surv"]] %>%
left_join(rsample_idx %>% filter(cv10_idx>6 | yr>=2017),
by="ENCOUNTERID"))
}
lapse_i<-Sys.time()-start_tsk_i
bm<-c(bm,paste0(round(lapse_i,1),units(lapse_i)))
bm_nm<-c(bm_nm,"prepare data")
#-----------------------
for(fs_type in fs_type_opt){
start_tsk_i<-Sys.time()
#--pre-filter
if(fs_type=="rm_scr_bun"){
X_tr %<>%
filter(!(key %in% c(rm_key,paste0(rm_key,"_change"))))
X_ts %<>%
filter(!(key %in% c(rm_key,paste0(rm_key,"_change"))))
}
#--transform training matrix
y_tr %<>%
filter(!is.na(cv10_idx)) %>%
arrange(ENCOUNTERID,dsa_y) %>%
unite("ROW_ID",c("ENCOUNTERID","dsa_y")) %>%
arrange(ROW_ID) %>%
unique
X_tr_sp<-X_tr %>%
arrange(ENCOUNTERID,dsa_y) %>%
unite("ROW_ID",c("ENCOUNTERID","dsa_y")) %>%
semi_join(y_tr,by="ROW_ID") %>%
long_to_sparse_matrix(df=.,
id="ROW_ID",
variable="key",
val="value")
#--collect variables used in training
tr_key<-data.frame(key = unique(colnames(X_tr_sp)),
stringsAsFactors = F)
#--transform testing matrix
y_ts %<>%
filter(!is.na(cv10_idx)) %>%
arrange(ENCOUNTERID,dsa_y) %>%
unite("ROW_ID",c("ENCOUNTERID","dsa_y")) %>%
arrange(ROW_ID) %>%
unique
X_ts_sp<-X_ts %>%
unite("ROW_ID",c("ENCOUNTERID","dsa_y")) %>%
semi_join(y_ts,by="ROW_ID") %>%
semi_join(tr_key,by="key")
x_add<-tr_key %>%
anti_join(data.frame(key = unique(X_ts$key),
stringsAsFactors = F),
by="key")
#align with training
if(nrow(x_add)>0){
X_ts_sp %<>%
arrange(ROW_ID) %>%
bind_rows(data.frame(ROW_ID = rep("0_0",nrow(x_add)),
dsa = -99,
key = x_add$key,
value = 0,
stringsAsFactors=F))
}
X_ts_sp %<>%
long_to_sparse_matrix(df=.,
id="ROW_ID",
variable="key",
val="value")
if(nrow(x_add)>0){
X_ts_sp<-X_ts_sp[-1,]
}
#check alignment
if(!all(row.names(X_tr_sp)==y_tr$ROW_ID)){
stop("row ids of traning set don't match!")
}
if(!all(row.names(X_ts_sp)==y_ts$ROW_ID)){
stop("row ids of testing set don't match!")
}
if(!all(colnames(X_tr_sp)==colnames(X_ts_sp))){
stop("feature names don't match!")
}
#--covert to xgb data frame
dtrain<-xgb.DMatrix(data=X_tr_sp,label=y_tr$y)
dtest<-xgb.DMatrix(data=X_ts_sp,label=y_ts$y)
lapse_i<-Sys.time()-start_tsk_i
bm<-c(bm,paste0(round(lapse_i,1),units(lapse_i)))
bm_nm<-c(bm_nm,"transform data")
cat(paste0(c(pred_in_d,pred_task,fs_type),collapse = ","),
"...finish formatting training and testing sets.\n")
#-----------------------
start_tsk_i<-Sys.time()
#--get indices for k folds
y_tr %<>% dplyr::mutate(row_idx = 1:n())
folds<-list()
for(fd in seq_len(max(y_tr$cv10_idx))){
fd_df<-y_tr %>%
filter(cv10_idx==fd) %>%
dplyr::select(row_idx)
folds[[fd]]<-fd_df$row_idx
}
#--tune hyperparameter
verb<-TRUE
bst_grid<-c()
bst_grid_cv<-c()
metric_name<-paste0("test_", eval_metric,"_mean")
metric_sd_name<-paste0("test_", eval_metric,"_std")
for(i in seq_len(dim(grid_params)[1])){
start_i<-Sys.time()
param<-as.list(grid_params[i,])
# param$scale_pos_weight=mean(train$y_train$DKD_IND_additive) #inbalance sampling
param$scale_pos_weight=1 #balance sampling
bst <- xgb.cv(param,
dtrain,
objective = objective,
metrics = eval_metric,
maximize = TRUE,
nrounds=1000,
# nfold = 5,
folds = folds,
early_stopping_rounds = 50,
print_every_n = 50,
prediction = T) #keep cv results
bst_grid<-rbind(bst_grid, cbind(grid_params[i,],
metric=max(bst$evaluation_log[[metric_name]]),
steps=which(bst$evaluation_log[[metric_name]]==max(bst$evaluation_log[[metric_name]]))[1]))
bst_grid_cv<-cbind(bst_grid_cv,bst$pred)
if(verb){
cat(paste0(c(pred_in_d,pred_task,fs_type),collapse = ","),
'...finished train case:',paste0(paste0(c(colnames(grid_params),"scale_pos_weight"),"="),param,collapse="; "),
'in',Sys.time()-start_i,units(Sys.time()-start_i),"\n")
start_i<-Sys.time()
}
}
hyper_param<-bst_grid[which.max(bst_grid$metric),]
lapse_i<-Sys.time()-start_tsk_i
bm<-c(bm,paste0(round(lapse_i,1),units(lapse_i)))
bm_nm<-c(bm_nm,"tune model")
cat(paste0(c(pred_in_d,pred_task,fs_type),collapse = ","),
"...finish model tunning.\n")
#-----------------------
start_tsk_i<-Sys.time()
#--validation
xgb_tune<-xgb.train(data=dtrain,
max_depth=hyper_param$max_depth,
maximize = TRUE,
eta=hyper_param$eta,
nrounds=hyper_param$steps,
eval_metric="auc",
objective="binary:logistic",
print_every_n = 100)
valid<-data.frame(y_ts,
pred = predict(xgb_tune,dtest),
stringsAsFactors = F)
#--feature importance
feat_imp<-xgb.importance(colnames(X_tr_sp),model=xgb_tune)
lapse_i<-Sys.time()-start_tsk_i
bm<-c(bm,paste0(round(lapse_i,1),units(lapse_i)))
bm_nm<-c(bm_nm,"validate model")
cat(paste0(c(pred_in_d,pred_task,fs_type),collapse = ","),
"...finish model validating.\n")
#-----------------------
#--save model and other results
result<-list(hyper=bst_grid,
model=xgb_tune,
valid=valid,
feat_imp=feat_imp)
saveRDS(result,file=paste0("./data/model_ref/pred_in_",pred_in_d,"d_",fs_type,"_",pred_task,".rda"))
#-------------------------------------------------------------------------------------------------------------
lapse_tsk<-Sys.time()-start_tsk
bm<-c(bm,paste0(round(lapse_tsk,1),units(lapse_tsk)))
bm_nm<-c(bm_nm,"complete task")
cat("\nFinish building reference models for task:",pred_task,"in",pred_in_d,"with",fs_type,",in",lapse_tsk,units(lapse_tsk),
".\n--------------------------\n")
#benchmark
bm<-data.frame(bm_nm=bm_nm,bm_time=bm,
stringsAsFactors = F)
saveRDS(bm,file=paste0("./data/model_ref/pred_in_",pred_in_d,"d_bm_gbm_",fs_type,"_",pred_task,".rda"))
}
}
}
#' For each prediction task, defined as "predict AKI stage X in Y days, with/without Scr", 4 intermedicate data files have been generated and saved in `./data/model_ref/...`, which are:
#'
#' * `..._hyperpar_gbm_...rda`: the final hyper-parameter sets after tunning;
#'
#' * `..._model_gbm_...rda`: the final gbm model after tunning;
#'
#' * `..._valid_gbm_...rda`: the predictted probability on validataion set;
#'
#' * `..._varimp_gbm_...rda`: the final list of variable importance
#'
#' #### Objective 2.3: Performance Evaluations for Benchmark Model
rm(list=ls()[!(ls() %in% c("pred_in_d_opt","fs_type_opt",
"get_perf_summ","get_calibr"))]);
gc() #release some memory
for(pred_in_d in pred_in_d_opt){
for(fs_type in fs_type_opt){
perf_tbl_full<-c()
perf_tbl<-c()
calib_tbl<-c()
varimp_tbl<-c()
for(i in seq_along(pred_task_lst)){
valid_out<-readRDS(paste0("./data/model_ref/pred_in_",pred_in_d,"d_",fs_type,"_",pred_task_lst[i],".rda"))
valid<-valid_out$valid
#overall summary
perf_summ<-get_perf_summ(pred=valid$pred,
real=valid$y,
keep_all_cutoffs=T)
perf_tbl_full %<>%
bind_rows(perf_summ$perf_at %>%
dplyr::mutate(pred_task=pred_task_lst[i],pred_in_d=pred_in_d,fs_type=fs_type))
perf_tbl %<>%
bind_rows(perf_summ$perf_summ %>%
dplyr::mutate(pred_task=pred_task_lst[i],pred_in_d=pred_in_d,fs_type=fs_type))
#calibration
calib<-get_calibr(pred=valid$pred,
real=valid$y,
n_bin=20)
calib_tbl %<>%
bind_rows(calib %>%
dplyr::mutate(pred_task=pred_task_lst[i],pred_in_d=pred_in_d,fs_type=fs_type))
#variable
varimp<-valid_out$feat_imp %>%
dplyr::mutate(rank=1:n(),
Gain_rescale=round(Gain/Gain[1]*100)) %>%
dplyr::select(rank,Feature,Gain_rescale)
varimp_tbl %<>%
bind_rows(varimp %>%
mutate(pred_task=pred_task_lst[i],pred_in_d=pred_in_d,fs_type=fs_type,tot_feature=nrow(varimp)))
}
perf_out<-list(perf_tbl_full=perf_tbl_full,
perf_tbl=perf_tbl,
calib_tbl=calib_tbl,
varimp_tbl=varimp_tbl)
#save results as r data.frame
saveRDS(perf_out,file=paste0("./data/model_ref/pred_in_",pred_in_d,"d_",fs_type,"_baseline_model_perf.rda"))
}
}
|
203232ebb69b2194b127798656f099111e7e3aec
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/rminizinc/man/StringSetDecl.Rd
|
7380c1cfc34c0b828e14c0277db73fdd0ce3b587
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 387
|
rd
|
StringSetDecl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DeclFunctions.R
\name{StringSetDecl}
\alias{StringSetDecl}
\title{set of string declaration}
\usage{
StringSetDecl(name, kind, value = NULL)
}
\arguments{
\item{name}{variable/parameter name}
\item{kind}{"var" or "par"}
\item{value}{value of the set (or NULL)}
}
\description{
declare a new set of string
}
|
b4821938bb036274407890d889c029f0400c6914
|
0a8e1407476a41d1d9a27db0d8d8f22143c6d823
|
/R/prep.score.files.R
|
b7a8bc67fa0e194d1b81481ec9195b8fee474997
|
[] |
no_license
|
cran/sumFREGAT
|
de8ab58682dc81871a6a0f280d3db22d738f8d25
|
f0e227c90a1c8fbadf4834a564a949e5f7ac1d42
|
refs/heads/master
| 2022-06-29T17:49:16.481191
| 2022-06-07T07:10:13
| 2022-06-07T07:10:13
| 117,830,472
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,178
|
r
|
prep.score.files.R
|
# sumFREGAT (2017-2022) Gulnara R. Svishcheva & Nadezhda M. Belonogova, ICG SB RAS
prep.score.files <- function(data, reference = 'ref1KG.MAC5.EUR_AF.RData', output.file.prefix) {
# 'CHROM', 'POS', 'ID', 'EA', 'P', 'BETA', 'EAF'
if (length(data) == 1) {
input.file <- data
if (requireNamespace("data.table", quietly = TRUE)) {
suppressWarnings(df <- data.table::fread(input.file, header = TRUE, data.table = FALSE))
} else {
df <- read.table(input.file, header = TRUE, as.is = TRUE)
}
} else if (length(data) > 1) {
df <- data
input.file <- 'scores'
}
cn <- toupper(colnames(df))
v <- which(cn %in% c('CHR', 'CHROMOSOME', 'CHROM'))
if (length(v) == 1) colnames(df)[v] <- 'CHROM'
v <- which(cn %in% c('POSITION', 'POSITIONS', 'MAP', 'POS'))
if (length(v) == 1) colnames(df)[v] <- 'POS'
v <- which(cn %in% c('PVALUE', 'PV', 'PVAL', 'P.VALUE', 'P_VALUE', 'P'))
if (length(v) == 1) colnames(df)[v] <- 'P'
v <- which(cn %in% c('RSID', 'RS.ID', 'RS_ID', 'SNP.ID', 'SNP_ID', 'ID'))
if (length(v) == 1) colnames(df)[v] <- 'ID'
v <- which(cn == 'EA')
if (length(v) == 1) {
colnames(df)[v] <- 'EFFECT.ALLELE'
df[, 'EFFECT.ALLELE'] <- toupper(df[, 'EFFECT.ALLELE'])
}
# ID and PVAL mandatory
# others from user file or reference
ColNames <- c('ID', 'P')
v <- !ColNames %in% colnames(df)
if (sum(v)) stop(paste("Mandatory column(s) missing:", paste(ColNames[v], collapse = ', ')))
df <- df[!is.na(df$P) & !is.na(df$ID), ]
if (dim(df)[1] == 0) stop("No values assigned for P or ID")
ColNames <- c('CHROM', 'POS', 'EAF')
v <- !ColNames %in% colnames(df)
take <- ColNames[v]
if (sum(v)) print(paste("Columns that are missing and will be looked for in reference data:", paste(take, collapse = ', ')))
take[take == 'EAF'] <- 'AF'
if ('BETA' %in% colnames(df)) {
df$BETA[df$BETA == 0] <- 1e-16
if ('EFFECT.ALLELE' %in% colnames(df)) {
colnames(df)[which(colnames(df) == 'REF')] <- 'REF0'
colnames(df)[which(colnames(df) == 'ALT')] <- 'ALT0'
take <- c(take, 'REF', 'ALT')
} else {
print("Effect allele column not found, effect sizes cannot be linked")
}
} else {
print("Effect sizes (beta) column not found")
}
if (length(take) > 0) {
is.ref <- 0
is.ref.object <- 0
if (length(reference) == 1) {
if (!is.na(reference)) {
if (file.exists(reference)) {
is.ref <- 1
} else {
if (reference != '') print ("Reference file not found! Please download it from https://mga.bionet.nsc.ru/sumFREGAT/ref1KG.MAC5.EUR_AF.RData to use 1000 Genome Reference correlation matrices")
}
}
} else if (length(reference) > 1) is.ref <- is.ref.object <- 1
if (is.ref) {
if (is.ref.object) {
ref <- reference
} else {
print('Loading reference file...')
ref <- get(load(reference))
}
colnames(ref) <- toupper(colnames(ref))
if ('CHROM' %in% take & !'CHROM' %in% colnames(ref)) stop ("No CHROM column in data and reference")
if ('POS' %in% take & !'POS' %in% colnames(ref)) stop ("No POS column in data and reference")
v <- match(df$ID, ref$ID)
if (!sum(v, na.rm = TRUE)) {
if (all(c('CHROM', 'POS') %in% colnames(df))) {
df$ind <- paste(df$CHROM, df$POS, sep = ':')
print('No IDs matching, trying to link through map data...')
ref$ind <- paste(ref$CHROM, ref$POS, sep = ':')
v <- match(df$ind, ref$ind)
if (sum(!is.na(v)) < (length(v) / 2)) {
print("Too few variants match between input file and reference data")
v <- NA
}
}
}
if (sum(v, na.rm = TRUE)) {
print(paste(sum(!is.na(v)), "of", length(v), "variants found in reference"))
vv <- take %in% colnames(ref)
if (sum(!vv)) {
print(paste("Columns that are missing in reference data:", paste(take[!vv], collapse = ', ')))
if ('REF' %in% take & !'REF' %in% colnames(ref)) {
print ("Reference alleles not found, effect sizes cannot be linked")
df$BETA <- df$EFFECT.ALLELE <- NULL
}
if ('AF' %in% take & !'AF' %in% colnames(ref)) print ("Allele frequencies not found, some weighted tests will be unavailable")
}
df <- cbind(df, ref[v, take[vv]])
}
} else {
v <- NA
}
if (sum(v, na.rm = TRUE) == 0) { # fail to open or link reference data
if (any(c('CHROM', 'POS') %in% take)) stop ("Cannot find map data (chromosome, position)")
if ('BETA' %in% colnames(df)) {
warning ("Reference unavailable, effect sizes not linked")
df$BETA <- df$EFFECT.ALLELE <- NULL
}
}
}
if ('REF' %in% colnames(df) & 'EFFECT.ALLELE' %in% colnames(df)) {
v <- c()
if (all(c('REF', 'REF0', 'ALT', 'ALT0') %in% colnames(df))) {
v <- which((df$REF0 != df$REF & df$REF0 != df$ALT) | (df$ALT0 != df$REF & df$ALT0 != df$ALT))
}
if ('ALT' %in% colnames(df)) {
v <- unique(c(v, which(df$EFFECT.ALLELE != df$REF & df$EFFECT.ALLELE != df$ALT)))
}
if (sum(v, na.rm = T)) {
print(paste("Effect alleles or REF/ALT alleles do not match reference data for", sum(v), "variant(s)"))
df[v, 'BETA'] <- NA
}
df[is.na(df$EFFECT.ALLELE) | is.na(df$REF), 'BETA'] <- NA
v <- which(df$EFFECT.ALLELE == df$REF)
#here we go
df$BETA[v] <- -df$BETA[v]
if ('EAF' %in% colnames(df)) {
df$EAF[v] <- 1 - df$EAF[v]
colnames(df)[colnames(df) == 'EAF'] <- 'AF'
}
print(paste('Effect sizes recoded for', length(v), 'variant(s)'))
}
if (any(df$P == 0)) {
print("Some P values equal zero, will be assigned to minimum value in the sample")
df$P[df$P == 0] <- min(df$P[df$P > 0])
}
df$Z <- qnorm(df$P / 2, lower.tail = FALSE)
if ('BETA' %in% colnames(df)) {
df$Z <- df$Z * sign(df$BETA)
df$SE.BETA <- df$BETA / df$Z
}
if (!missing(output.file.prefix)) {
fn <- paste(output.file.prefix, 'vcf', sep = '.')
} else {
fn <- paste(input.file, 'vcf', sep = '.')
}
df <- df[order(df[, 'POS']), ]
df <- df[order(df[, 'CHROM']), ]
if (!'ALT' %in% colnames(df)) df$ALT <- NA
if (!'REF' %in% colnames(df)) df$REF <- NA
vcf <- df[, c('CHROM', 'POS', 'ID', 'REF', 'ALT')]
colnames(vcf)[1] <- '#CHROM'
vcf$POS <- format(vcf$POS, scientific = FALSE)
vcf$POS <- gsub(' ', '', vcf$POS)
vcf <- cbind(vcf, QUAL = '.', FILTER = '.')
vcf$INFO <- paste0('Z=', df$Z)
title <- c('##INFO=<ID=Z,Number=1,Type=Float,Description="Z statistics">')
if ('BETA' %in% colnames(df)) {
vcf$INFO <- paste0(vcf$INFO, ';SE.Beta=', df$SE.BETA)
title <- c(title, '##INFO=<ID=SE.Beta,Number=1,Type=Float,Description="SE Beta">')
}
if ('EAF' %in% colnames(df)) colnames(df)[colnames(df) == 'EAF'] <- 'AF'
if ('AF' %in% colnames(df)) {
vcf$INFO <- paste0(vcf$INFO, ';AF=', df$AF)
title <- c(title, '##INFO=<ID=AF,Number=1,Type=Float,Description="Frequency of alternative allele">')
print(paste0('Allele frequencies found and linked'))
}
a <- grep('\\bW', colnames(df))
if (length(a) == 1) {
vcf$INFO <- paste0(vcf$INFO, ';W=', df[, a])
title <- c(title, '##INFO=<ID=W,Number=1,Type=Float,Description="Weights">')
print(paste0("User weights ('", colnames(df)[a], "') found and linked"))
}
a <- grep('\\bANNO', colnames(df), value = TRUE)
if (length(a) == 1) {
vcf$INFO <- paste0(vcf$INFO, ';ANNO=', df[, a])
title <- c(title, '##INFO=<ID=ANNO,Number=1,Type=String,Description="Variants annotations">')
print(paste0("Annotations ('", colnames(df)[a], "') found and linked"))
}
a <- grep('\\bPROB', colnames(df), value = TRUE)
for (an in a) {
vcf$INFO <- paste0(vcf$INFO, ';', an, '=', df[, as.character(an)])
title <- c(title, paste0("##INFO=<ID=", an, ",Number=1,Type=Float,Description='", an, "'>"))
print(paste0("Column '", an, "' linked"))
}
write.table(title, fn, col.names = FALSE, row.names = FALSE, quote = FALSE, sep = '\t')
if (requireNamespace("data.table", quietly = TRUE)) {
suppressWarnings(data.table::fwrite(vcf, fn, row.names = FALSE, quote = FALSE, append = TRUE, col.names = TRUE, sep = '\t', na = 'NA'))
} else {
suppressWarnings(write.table(vcf, fn, row.names = FALSE, quote = FALSE, append = TRUE, sep = '\t'))
}
fn.gz <- paste(fn, 'gz', sep = '.')
if (file.exists(fn.gz)) system(paste('rm', fn.gz))
system(paste('bgzip', fn))
system(paste('tabix -p vcf', fn.gz))
print(paste('File', fn.gz, 'has been created'))
}
|
0e98d8651f97d71768d8657a6a0fb42d84db8162
|
c750c1991c8d0ed18b174dc72f3014fd35e5bd8c
|
/pkgs/oce/tests/testthat/test_lisst.R
|
1240f5a2de104b74985cfe2ba903b394026e6da3
|
[] |
no_license
|
vaguiar/EDAV_Project_2017
|
4b190e66fe7a6b4078cfe1b875bccd9b5a594b25
|
288ffaeec1cfdd873fe7439c0fa0c46a90a16a4f
|
refs/heads/base
| 2021-01-23T02:39:36.272851
| 2017-05-01T23:21:03
| 2017-05-01T23:21:03
| 86,010,131
| 1
| 0
| null | 2017-05-01T23:43:04
| 2017-03-24T00:21:20
|
HTML
|
UTF-8
|
R
| false
| false
| 1,421
|
r
|
test_lisst.R
|
## vim:textwidth=80:expandtab:shiftwidth=2:softtabstop=2
library(oce)
context("LISST")
test_that("as.lisst()", {
set.seed(1333334L)
t <- seq(0, 6, 1/15) * 3600 + as.POSIXct("2012-01-01 00:00:00", tz="UTC")
n <- length(t)
p <- 5 + sin(as.numeric(t - t[1]) / 12.4 / 3600 * 2 * pi) + rnorm(n, sd=0.01)
dpdt <- c(0, diff(p))
T <- 10 + 5 * sin(as.numeric(t - t[1]) / 24 / 3600 * 2 * pi) + cumsum(rnorm(n, sd=0.2))
C <- (dpdt + rnorm(n, sd=0.1) + cumsum(rnorm(n, sd=0.5)))^2 * 2
sd <- rep(1, length.out=32) + (1:32) / 100
data <- matrix(nrow=n, ncol=42)
for (i in 1:32) {
fake <- abs(C * (1 + i / 5) + cumsum(rnorm(n, sd=sd[i]))) / 100
data[,i] <- fake
}
data[,33] <- rep(0, n) # lts
data[,34] <- rep(4, n) + 0.01 * cumsum(rnorm(n, 0.05)) # voltage
data[,35] <- rep(0.07, n) # aux
data[,36] <- runif(n, 3.9, 4.1) # lrs
data[,37] <- p
data[,38] <- T
tt <- as.POSIXlt(t)
data[,39] <- 100 * tt$yday + tt$hour
data[,40] <- 100 * tt$min + tt$sec
data[,41] <- abs((p - min(p)) / diff(range(p)) + cumsum(rnorm(n, sd=0.05))) # transmission
data[,42] <- 40 - 20*data[,41] # beam
lisst <- as.lisst(data, filename="(constructed)", year=2012, "UTC")
summary(lisst)
})
|
24f50ed5f06755ea3a5f72400a68d68c1504644b
|
c5d2e15e3459939d040db7f64c876dfc29b0fa4b
|
/getdata_project/run_analysis.R
|
acb039156cf713a8af4a973f1520da2c81af826f
|
[] |
no_license
|
heyroman/DSS_Assignments
|
3307c49c1730b8b00d2a1facb829e910e203a2be
|
3e1762b11e9d27b3d47406c645cd93a9a4e875a7
|
refs/heads/master
| 2021-07-14T03:43:37.551772
| 2016-01-07T14:15:10
| 2016-01-07T14:15:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 974
|
r
|
run_analysis.R
|
library(plyr)
source('./create_subset.R')
# read train and test data in two datasets created by create_subset() function
trainset <- create_subset('./X_train.txt', './y_train.txt', './subject_train.txt')
testset <- create_subset('./X_test.txt', './y_test.txt', './subject_test.txt')
# create full dataset
fulldata <- rbind(trainset, testset) # merge train and test data
fulldata <- arrange(fulldata, Subject) # sort data just fo convenience
# label activities
activities <- read.table('./activity_labels.txt') # read activity labels
activities <- activities[, 2] # take only second column that contains strings
fulldata$Activity <- cut(fulldata$Activity, length(activities), labels = activities) # assign string values to activities in dataset
# creating dataset of means
fulldata <- ddply(fulldata, .(Subject, Activity), numcolwise(mean))
# writing data to a file
write.table(fulldata, file = './tidy_data_set.txt', row.names = FALSE)
|
669c4584618181bed4f1dc38b4a5d0c2e95a6c1e
|
2a6b1b93b9388fb6c8f289efec52bb2f50963eb0
|
/Sotkanet/tmp.R
|
77af55de11c37fd29b37e4bc95eb524aa58ba2ff
|
[] |
no_license
|
louhos/takomo
|
be80209cf3ee0f1773648d8b127219ad212ae487
|
7af1752f14821b879f80f052bebcc97ba5ff5804
|
refs/heads/master
| 2021-01-17T12:25:09.186161
| 2016-07-18T12:14:39
| 2016-07-18T12:14:39
| 3,610,040
| 8
| 4
| null | 2015-08-10T19:10:22
| 2012-03-03T10:49:30
|
R
|
UTF-8
|
R
| false
| false
| 2,208
|
r
|
tmp.R
|
sotkanet.indicators <- SotkanetIndicators(type = "table")
as.character(sotkanet.indicators[sotkanet.indicators$indicator == indicator, "indicator.title.fi"])
# Sort indicators by median time correlation
s <- names(sort(sapply(corlist, function (x) {median(na.omit(x))})))
#> unname(sapply(s[1:10], function (nam) {as.character(unique(dats[[nam]]$indicator.title.fi))}))
# Pick some indicators for closer inspection
selected.indicators <- c("Väestö, keskiväkiluku",
"Yksityisten lääkäripalvelujen kustannukset, 1 000 euroa")
#"Korkea-asteen koulutuksen saaneet, % 15 vuotta täyttäneistä",
#"16-24 -vuotiaat, % väestöstä",
#"Korkea-asteen koulutuksen saaneet, % 15 vuotta täyttäneistä",
#"Muu kuin suomi, ruotsi tai saame äidinkielenä / 1000 asukasta")
#"Alkoholijuomien myynti asukasta kohti 100 %:n alkoholina, litraa")
# For each indicator,
# Correlate indicators with time in each municipality
corlist <- list()
for (i in names(dats)) {
dat <- dats[[i]];
dat <- dat[order(dat$year), ];
dat <- dat[!duplicated(dat),];
spl <- split(1:nrow(dat), dat$region.title.fi);
cors <- sapply(spl, function(inds) {cor(dat$year[inds], dat$primary.value[inds])})
corlist[[i]] <- cors
}
#sotkanet.indicators <- sotkanet.indicators[grep("oppilaista", sotkanet.indicators[, 2]),]
remove <- grep("EU", sotkanet.indicators$indicator.title.fi)
remove <- c(remove, grep("Pohjoismaat", sotkanet.indicators$indicator.title.fi))
remove <- c(remove, grep("ikävakioimaton", sotkanet.indicators$indicator.title.fi))
remove <- c(remove, grep("Vammojen ja myrkytysten", sotkanet.indicators$indicator.title.fi))
sotkanet.indicators <- sotkanet.indicators[-remove,]
#idx <- 1:78
#idx <- 1:42
#idx <- 1:6
#idx <- c(idx, grep("15-24", sotkanet.indicators[,2]))
idx <- c(idx, grep("opiskelijoista", sotkanet.indicators[,2]))
inds <- grep("keskiv", sotkanet.df$indicator.title.fi)
inds <- c(inds, grep("opiskelij", sotkanet.df$indicator.title.fi))
sotkanet.df <- sotkanet.df[inds,]
#municipality.info <- GetMunicipalityInfo()
#kunta.maakunta <- FindProvince(as.character(sotkanet.df$region.title.fi), municipality.info)
#regs <- cbind(, Maakunta = maakunnat)
|
5dd62cc62d1f34cd457c8d5c560e085ccf1f53de
|
9f10dda88e199a456430f2141cc099db2807d645
|
/tests/testthat/test_data.R
|
ff400561a6d1f5fb57eae145fd7f3a9bfb3d28bb
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ldecicco-USGS/wateRuse_swuds
|
056e5f6a5e0369af4a44237c92d99b4b00202a15
|
feba0c21728c230e8793439bd7ee914b6ff1bac5
|
refs/heads/master
| 2020-05-26T00:27:27.303813
| 2019-12-16T20:27:58
| 2019-12-16T20:27:58
| 188,052,203
| 0
| 0
| null | 2019-05-22T14:10:17
| 2019-05-22T14:10:17
| null |
UTF-8
|
R
| false
| false
| 785
|
r
|
test_data.R
|
context("Sample Data")
test_that("Data", {
testthat::skip_on_cran()
expect_equal(1, 1)
expect_equal(ncol(swuds_sample), 154)
# Test loading:
path_to_sample <- system.file("extdata", package = "WUReview")
# Read in the water quantity table
dq <- read_swuds_quant(file.path(path_to_sample,
"OH_CTF_SW_monthly_permit_sample_data.xlsx"))
expect_equal(ncol(dq), 109)
# Read in the population served table
dp <- read_swuds_pop(file.path(path_to_sample,
"OHpopserved_output.xlsx"))
expect_equal(ncol(dp), 51)
# merge the tables
df <- merge_dq_dp(dq, dp)
expect_equal(ncol(df), 157)
#melt the table
df_melt <- melt_water_quant_pop(df)
expect_equal(nrow(df_melt), 11988)
})
|
e7b0124d39bb2a40056c1790ce410fa2d7a28803
|
e23209ff19f5ad5bcc4f94b02dbd1c8c79659ed9
|
/R/companies.R
|
e5a9e34feda1070c1e06f4e76bb9fa12bbedfb5f
|
[] |
no_license
|
Pedromoisescamacho/intrinio.tcg
|
b18942fa7e58d8ea06203831a5dcc3b51eb32881
|
ad0a3457556700a92cb64131068ad1fd958c323a
|
refs/heads/master
| 2020-12-02T08:11:44.414611
| 2017-07-17T16:24:52
| 2017-07-17T16:24:52
| 96,784,263
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,687
|
r
|
companies.R
|
#` companies i.e. Returns information for all companies covered by Intrinio. the class returned is a list composed of a table and a integer. The former is the information of all the companies and the latter is the api_credits consumed by the function. Usefull to know all the companies that we have information from.
#'
#' @description This function only needs the username and API key since it is only a function as reference for you to know the avaiable companies to download information.
#'
#'
companies <- function(api_credits = FALSE) {
library(jsonlite);library(httr); library(reshape)
base <- "https://api.intrinio.com/companies"
#getting the first page of the call
tp <- GET(base, authenticate(username, password, type = "basic"))
z <- suppressMessages(unlist(content(tp, as = "text")))
list <- suppressMessages(fromJSON(z))
#creating the rest of the calls for the rest of the pages.
pages <- 2:list$total_pages
calls <- sapply(pages, function(x) {paste0(base,"?page_number=",x)})
#makin the calls for the rest of the values
df <- data.frame()
table_list <- lapply(calls, function(y) {
tp2 <- GET(y, authenticate(username, password, type = "basic"))
z2 <- suppressMessages(unlist(content(tp2, as = "text")))
table <- suppressMessages(fromJSON(z2))[[1]]
})
#putting all the tables together
finaltable<- rbind(list[[1]],do.call(rbind, table_list))
if (api_credits == TRUE) {
result <- list(table = finaltable, apicredits = list$total_pages)
}
else finaltable
}
|
4edc3fa3129b04f80e29349c5c8cc08e14ad0b01
|
f387c0a78caadf57b4cc783ad0ef5554fc352fb0
|
/ch2vms/man/load_wc_logbook.Rd
|
4706f22c73c78e3a653ecb5b887a0b6f3b324fd2
|
[] |
no_license
|
peterkuriyama/ch2vms
|
9bb1285d4e6e863186e048b543bb701625254afd
|
6a4b7ebabc9698472b02539ea86cef10abf630ab
|
refs/heads/master
| 2020-07-03T14:25:13.218193
| 2017-07-05T00:09:49
| 2017-07-05T00:09:49
| 74,161,557
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 416
|
rd
|
load_wc_logbook.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_wc_logbook.R
\name{load_wc_logbook}
\alias{load_wc_logbook}
\title{Load West Coast Logbook Data
Function to load West Coast Logbook data}
\usage{
load_wc_logbook()
}
\arguments{
\item{Parameter}{there aren't any}
}
\description{
Load West Coast Logbook Data
Function to load West Coast Logbook data
}
\examples{
load_wc_logbook()
}
|
e2903616b79ca4d0b5c62b5f56bb89610fa7e84d
|
b537f77912b851a89c5b691311ed42b8ef2d68f0
|
/shinyapp/ui.R
|
36425aec4fb9b55a16992ef8a258585ab80b199c
|
[] |
no_license
|
lahdeaho/devdataprod-015
|
5b33ae0533bccabf50c8bbcc25e7fe696dd11228
|
a90f18563cd3222a62ac5f64f6255dfef069a773
|
refs/heads/master
| 2020-05-31T07:49:17.187786
| 2015-06-16T21:53:35
| 2015-06-16T21:54:19
| 37,555,356
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 857
|
r
|
ui.R
|
library(shiny)
# Define UI for application that plots random distributions
shinyUI(pageWithSidebar(
# Application title
headerPanel("Shiny Love Calculator"),
# Sidebar with a slider input for number of observations
sidebarPanel(
p("Love calculator will calculate the love score based on two names."),
p("You should type two names, e.g yours and your partners and see how much you are in love :)."),
br(),
textInput("FirstName", "Give first name:"),
textInput("SecondName", "Give second name:"),
br(),
actionButton("goButton", "Calculate"),
p("After typing the names, click the button to calculate your love score.")
),
# Show a plot of the generated distribution
mainPanel(
verbatimTextOutput("match")
)
))
|
b103dae479996be82ec17856b2d8b767a843417f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/subscore/examples/subscore.Wainer.Rd.R
|
aa9c5ae62472bfaa6bbb2b1e4e0334298ae001c2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 450
|
r
|
subscore.Wainer.Rd.R
|
library(subscore)
### Name: subscore.Wainer
### Title: Estimating true subscores using Wainer's augmentation method
### Aliases: subscore.Wainer
### ** Examples
test.data<-data.prep(scored.data,c(3,15,15,20),
c("Algebra","Geometry","Measurement", "Math"))
subscore.Wainer(test.data)
subscore.Wainer(test.data)$summary
subscore.Wainer(test.data)$subscore.augmented
|
fb91a48b6b55f4756d1f483ee7fee98e1e678640
|
447700157cab10a9ed89a364803b0ba9caced38f
|
/run_analysis.R
|
2eabfba312fc45b1efb69a466ef2ac62fac89dfe
|
[] |
no_license
|
hyhy20/Getting-and-Cleaning-data-final-project
|
55b7fc23a8ac675b0328925f178c821b133de361
|
a84dc65945ba8185ef4ef09f81861eaa6eacf261
|
refs/heads/master
| 2022-07-18T03:14:13.182139
| 2020-05-18T07:36:17
| 2020-05-18T07:36:17
| 264,830,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,201
|
r
|
run_analysis.R
|
library(dplyr)
#Q1:Merges the training and the test sets to create one data set.
#Extract general files
file1 <- ("UCI HAR Dataset/features.txt")
features <- read.table(file1)
file2 <- ("UCI HAR Dataset/activity_labels.txt")
activity_label <- read.table(file2)
#Extract test files
test_file1 <- ("UCI HAR Dataset/test/y_test.txt")
test_y <- read.table(test_file1)
test_file2 <- ("UCI HAR Dataset/test/x_test.txt")
test_x <- read.table(test_file2)
test_file3 <- ("UCI HAR Dataset/test/subject_test.txt")
test_subject <- read.table(test_file3)
#Extract train files
tr_file1 <- ("UCI HAR Dataset/train/subject_train.txt")
tr_subject <- read.table(tr_file1)
tr_file2 <- ("UCI HAR Dataset/train/X_train.txt")
tr_x <- read.table(tr_file2)
tr_file3 <- ("UCI HAR Dataset/train/Y_train.txt")
tr_y <- read.table(tr_file3)
#Make test data dataframe
test_data <- data.frame(test_subject,test_y,test_x)
#Make train data dataframe
train_data <- data.frame(tr_subject,tr_y,tr_x)
#Combine two together
data <- rbind(test_data,train_data)
colnames(data) <- c("subject","activity",features[,2])
#Q2:Extracts only the measurements on the mean and standard deviation.
tolower(names(data))
mean <- data[,grep("mean", names(data))]
std <- data[,grep("std",names(data))]
final_data <- cbind(data[,1:2], mean, std)
#Q3:Uses descriptive activity names
final_data$activity <- factor(
final_data$activity,levels=1:6,labels = activity_label$V2)
final_data <- final_data[order(final_data$subject,final_data$activity),]
#Q4:Appropriately labels the data set with descriptive variable names.
names(final_data) <- gsub("\\()","",names(final_data))
names(final_data) <- gsub("^t","Time:",names(final_data))
names(final_data) <- gsub("^f","Frequence:",names(final_data))
names(final_data) <- gsub("-mean","'s mean",names(final_data))
names(final_data) <- gsub("-std","'s standard deviation",names(final_data))
#Q5:Creates a second, independent tidy data set with the average of
# each variable for each activity and each subject.
output<-final_data %>%
group_by(subject, activity) %>% summarise_each(funs(mean))
write.table(output,"output.txt", row.names = FALSE)
|
d0ee0683e17942cd866224df4550d445e84479d7
|
9ee0ab61b9d870fa5237a2869b60e9e8877f8403
|
/man/getRuntimePath.Rd
|
746b94bf9f0898dc53ccf1f4b012544aab23fad5
|
[] |
no_license
|
AustralianAntarcticDivision/EPOC
|
79390dc3456a9cbacfb64884f10cdcf5fedad393
|
778be35f146197c571bb1ebfb76eb2a629eaad70
|
refs/heads/master
| 2020-09-09T22:12:49.843987
| 2019-11-14T01:55:36
| 2019-11-14T01:55:36
| 221,583,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 778
|
rd
|
getRuntimePath.Rd
|
\name{getRuntimePath}
\alias{getRuntimePath}
\title{
Universe methods
}
\description{
Return the path to the current scenarios runtime output directory with extPath appended if passed.
}
\usage{
getRuntimePath(.Object, extPath)
}
\arguments{
\item{.Object}{
Universe object
}
\item{extPath}{
optional path extension
}
}
\details{}
\value{
String file path as massaged for platform by file.path()
}
\references{}
\author{
Troy Robertson
}
\note{}
\seealso{
\code{\linkS4class{Universe}, \linkS4class{EPOCObject}, \link{getBasePath}}
}
\examples{
## Return path
# getRuntimePath(universe, "B.MI.Es.KPFM.state.R")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
45ca2938f62aba931b7355ab784e1845c95d1eb7
|
b34a4c11ca70403a24d7048c42ffdfa61d4b18c9
|
/man/dalmatian.Rd
|
e3b657d692bdfb595aa58e95671ea3c917c1eb07
|
[] |
no_license
|
cran/dalmatian
|
df1365afbafff1b365828eefd3a77a52d7a59289
|
fb89cfafe6c44c4b19c61fd58c11a7d633485705
|
refs/heads/master
| 2021-11-24T17:55:29.208495
| 2021-11-22T18:40:02
| 2021-11-22T18:40:02
| 119,381,934
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,067
|
rd
|
dalmatian.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dalmatian.R, R/dalmatian_doc.R
\docType{package}
\name{dalmatian}
\alias{dalmatian}
\title{Run DGLM in \code{JAGS} via \code{rjags} or in \code{nimble}}
\usage{
dalmatian(
df,
family = "gaussian",
mean.model,
dispersion.model,
joint.model = NULL,
jags.model.args,
coda.samples.args,
response = NULL,
ntrials = NULL,
rounding = FALSE,
lower = NULL,
upper = NULL,
parameters = NULL,
svd = FALSE,
residuals = FALSE,
gencode = NULL,
run.model = TRUE,
engine = "JAGS",
n.cores = 1L,
drop.levels = TRUE,
drop.missing = TRUE,
include.checks = TRUE,
overwrite = FALSE,
debug = FALSE,
saveJAGSinput = NULL
)
}
\arguments{
\item{df}{Data frame containing the response and predictor values for each individual. (data.frame)}
\item{family}{Name of family of response distribution. Currently supported families include normal (\code{gaussian}) and negative binomial (\code{nbinom}). (character)}
\item{mean.model}{Model list specifying the structure of the mean. (list)}
\item{dispersion.model}{Model list specifying the structure of the dispersion. (list)}
\item{joint.model}{Model list specifying structure with parameter shared between linear predictors of the mean and variance. (list)}
\item{jags.model.args}{List containing named arguments of \code{jags.model}. (list)}
\item{coda.samples.args}{List containing named arguments of \code{coda.samples}. (list)}
\item{response}{Name of variable in the data frame representing the response. (character)}
\item{ntrials}{Name of variable in the data frame representing the number of independent trials for each observation of the beta binomial model.}
\item{rounding}{Specifies that response has been rounded if TRUE. (logical)}
\item{lower}{Name of variable in the data frame representing the lower bound on the response if rounded. (character)}
\item{upper}{Name of variable in the data frame representing the upper bound on the response if rounded. (character)}
\item{parameters}{Names of parameters to monitor. If NULL then default values are selected. (character)}
\item{svd}{Compute Singular Variable Decomposition of model matrices to improve convergence. (logical)}
\item{residuals}{If TRUE then compute residuals in output. (logical)}
\item{gencode}{If TRUE then generate code potentially overwriting existing model file. By default generate code if the file does not exist and prompt user if it does. (logical)}
\item{run.model}{If TRUE then run sampler. Otherwise, stop once code and data have been created. (logical)}
\item{engine}{Specifies the sampling software. Packages currently supported include JAGS (the default) and nimble. (character)}
\item{n.cores}{Number of cores to use. If equal to 1 then chains will not be run in parallel. If greater than 1 then chains will be run in parallel using the designated number of cores.}
\item{drop.levels}{If TRUE then drop unused levels from all factors in \code{df}. (logical)}
\item{drop.missing}{If TRUE then remove records with missing response variable. (logical)}
\item{include.checks}{If TRUE (default) then include extra Bernoulli variables in the model to ensure that the mean and dispersion parameters remain within their support. (logical)}
\item{overwrite}{If TRUE then overwrite existing JAGS files (non-interactive sessions only). (logical)}
\item{debug}{If TRUE then enter debug model. (logical)}
\item{saveJAGSinput}{Directory to which jags.model input is saved prior to calling \code{jags.model()}. This is useful for debugging. No files saved if NULL. (character)}
}
\value{
An object of class \code{dalmatian} containing copies of the original data frame, the mean model, the
dispersion model the arguments of \code{jags.model} and \code{coda.samples}. and the output of the MCMC sampler.
}
\description{
The primary function which automates the running of \code{JAGS} and \code{nimble}.
See vignettes included in the package for full documentation. The list
of available vignettes can be generated with
\code{vignette(package="dalmatian")}.
}
\details{
The primary function in the package, dalmatian automates the generation of code, data, and initial values. These are then passed as arguments to function from the \code{rjags} package which automates the generation of samples from the posterior.
}
\examples{
\dontrun{
## Load pied flycatcher data
data(pied_flycatchers_1)
## Create variables bounding the true load
pfdata$lower=ifelse(pfdata$load==0,log(.001),log(pfdata$load-.049))
pfdata$upper=log(pfdata$load+.05)
## Mean model
mymean=list(fixed=list(name="alpha",
formula=~ log(IVI) + broodsize + sex,
priors=list(c("dnorm",0,.001))))
## Dispersion model
myvar=list(fixed=list(name="psi",
link="log",
formula=~broodsize + sex,
priors=list(c("dnorm",0,.001))))
## Set working directory
## By default uses a system temp directory. You probably want to change this.
workingDir <- tempdir()
## Define list of arguments for jags.model()
jm.args <- list(file=file.path(workingDir,"pied_flycatcher_1_jags.R"),n.adapt=1000)
## Define list of arguments for coda.samples()
cs.args <- list(n.iter=5000)
## Run the model using dalmatian
pfresults <- dalmatian(df=pfdata,
mean.model=mymean,
dispersion.model=myvar,
jags.model.args=jm.args,
coda.samples.args=cs.args,
rounding=TRUE,
lower="lower",
upper="upper",
debug=FALSE)
}
}
\references{
Bonner, S., Kim, H., Westneat, D., Mutzel, A.,
Wright, J., and Schofield, M.. (2021). \code{dalmatian}: A Package
for Fitting Double Hierarchical Linear Models in \code{R} via \code{JAGS} and
\code{nimble}. \emph{Journal of Statistical Software}, 100, 10, 1--25.
\doi{10.18637/jss.v100.i10}.
}
\author{
Simon Bonner
}
|
9a87f78bcd60d88908a60c81cb81e60720d4f161
|
1c0aa99df5bfdf044f6da683dad7d3e8b354ce4c
|
/numerical_experiments_gwas/make_figures.R
|
431b5656cf7e6e0aa86ea89d433520c61d40f254
|
[] |
no_license
|
lsn235711/transfer_knockoffs_code
|
442bb4abed05247c23bda9a95af0bb4e864bea1f
|
650631ae025c0ad053299bea009a4f2a359957ae
|
refs/heads/main
| 2023-07-17T10:09:21.768844
| 2021-08-20T00:47:52
| 2021-08-20T00:47:52
| 396,407,145
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,166
|
r
|
make_figures.R
|
library(tidyverse)
library(gridExtra)
snr <- 5
ifile <- "../results/summary.txt"
Summary <- read_delim(ifile, delim=" ")
methods.values <- c("Vanilla knockoffs", "Transfer knockoffs - Linear", "Transfer knockoffs - Adaptive", "Transfer knockoffs - Lasso")
methods.labels <- c("Vanilla", "Transfer - linearly re-ordered (oracle)", "Transfer - adaptive (gam)", "Transfer - weighted-lasso")
color.scale <- c("#377EB8", "#4DAF4A", "#984EA3", "#FF7F00")
shape.scale <- c(17,15,3,7)
linetype.scale <- c(1,1,1,1)
Summary <- Summary %>%
mutate(Method = factor(Method, methods.values, methods.labels))
df.dummy <- tibble(Key="FDP", Value=0.1)
## Plot with equal sample sizes
p1 <- Summary %>%
filter(Population!="Everyone", SNR==snr) %>%
mutate(Full = ifelse(endsWith(Population, "-small"), FALSE, TRUE), Population = str_replace(Population, "-small", "")) %>%
filter(!Full) %>%
mutate(Population = sprintf("%s (n = %d)", Population, Samples)) %>%
ggplot(aes(x=Specificity, y=Value.mean, color=Method, linetype=Method, shape=Method)) +
geom_point() +
geom_line() +
geom_errorbar(aes(ymin=pmax(0,Value.mean-Value.se), ymax=Value.mean+Value.se), width=5) +
geom_hline(data=df.dummy, aes(yintercept=Value), linetype=2) +
facet_grid(Key~Population) +
scale_x_continuous(breaks=c(0,50,100)) +
scale_color_manual(values=color.scale) +
scale_shape_manual(values=shape.scale) +
scale_linetype_manual(values=linetype.scale) +
xlab("Heterogeneity of causal variants (%)") +
ylab("") +
ylim(0,0.4) +
guides(color = guide_legend(nrow = 2)) +
theme_bw() +
theme(legend.position="bottom", legend.key.size = grid::unit(2, "lines"))
ggsave(sprintf("../figures/transfer_snr%s_small.png", snr), p1, height=4, width=6, units="in")
## Plot with different sample sizes
p2 <- Summary %>%
filter(SNR==snr) %>%
filter(Population!="British", ! ((Population == "British")*(Method!="Vanilla"))) %>%
mutate(Full = ifelse(endsWith(Population, "-small"), FALSE, TRUE), Population = str_replace(Population, "-small", "")) %>%
mutate(Population = ifelse(Population=="Everyone", "Pooled", Population)) %>%
filter(Full) %>%
mutate(Population = sprintf("%s (n = %d)", Population, Samples)) %>%
ggplot(aes(x=Specificity, y=Value.mean, color=Method, linetype=Method, shape=Method)) +
geom_point() +
geom_line() +
geom_errorbar(aes(ymin=pmax(0,Value.mean-Value.se), ymax=Value.mean+Value.se), width=5) +
geom_hline(data=df.dummy, aes(yintercept=Value), linetype=2) +
facet_grid(Key~Population) +
scale_x_continuous(breaks=c(0,50,100)) +
scale_color_manual(values=color.scale) +
scale_shape_manual(values=shape.scale) +
scale_linetype_manual(values=linetype.scale) +
xlab("Heterogeneity of causal variants (%)") +
ylab("") +
ylim(0,1) +
guides(color = guide_legend(nrow = 2)) +
theme_bw() +
theme(legend.position="bottom", legend.box="vertical", legend.key.size = grid::unit(2, "lines"))
ggsave(sprintf("../figures/transfer_snr%s.png", snr), p2, height=4, width=8, units="in")
#########################
## Plots by population ##
#########################
ifile <- "../results/summary_separate.txt"
Summary <- read_delim(ifile, delim=" ")
methods.values <- c("Pooling", "Vanilla knockoffs", "Transfer knockoffs - Linear", "Transfer knockoffs - Adaptive", "Transfer knockoffs - Lasso")
methods.labels <- c("Vanilla on British population", "Transfer - Linear combination (oracle)", "Vanilla", "Transfer - Adaptive", "Transfer - Weighted lasso")
color.scale <- c("#377EB8", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00")
shape.scale <- c(1,17,15,3,7)
linetype.scale <- c(2,1,1,1,1)
Summary <- Summary %>%
mutate(Method = factor(Method, methods.values, methods.labels))
df.dummy <- tibble(Key="FDP", Value=0.1)
df.dummy <- tibble(Key="FDR (population-specific)", Value=0.1)
methods.values <- c("Vanilla on British population", "Vanilla", "Transfer - Linear combination (oracle)", "Transfer - Adaptive", "Transfer - Weighted lasso")
methods.labels <- c("Vanilla on British", "Vanilla", "Transfer - linearly-reordered combination (oracle)", "Transfer - adaptive (gam)", "Transfer - weighted-lasso")
p1 <- Summary %>%
filter(SNR==snr) %>%
filter((Population==Pop)|(Population=="British")) %>%
mutate(Method = ifelse(Population=="British", "Vanilla on British population", as.character(Method))) %>%
mutate(Method = factor(Method, methods.values, methods.labels)) %>%
mutate(Key = ifelse(Key=="FDP", "FDR (population-specific)", Key),
Key = ifelse(Key=="Power", "Power (population-specific)", Key)) %>%
ggplot(aes(x=Specificity, y=Value.mean, color=Method, linetype=Method, shape=Method)) +
geom_point() +
geom_line() +
geom_errorbar(aes(ymin=pmax(0,Value.mean-Value.se), ymax=Value.mean+Value.se), width=5) +
geom_hline(data=df.dummy, aes(yintercept=Value), linetype=2) +
facet_grid(Key~Pop) +
scale_x_continuous(breaks=c(0,50,100)) +
scale_color_manual(values=color.scale) +
scale_shape_manual(values=shape.scale) +
scale_linetype_manual(values=linetype.scale) +
xlab("Heterogeneity of causal variants (%)") +
ylab("") +
ylim(0,1) +
guides(color = guide_legend(nrow = 2)) +
theme_bw() +
theme(legend.position="bottom", legend.key.size = grid::unit(2, "lines"))
ggsave(sprintf("../figures/transfer_specific_snr%s.png", snr), p1, height=5, width=8, units="in")
methods.values <- c("Pooling", "Vanilla", "Transfer - Linear combination (oracle)", "Transfer - Adaptive", "Transfer - Weighted lasso")
methods.labels <- c("Heuristic (pool)", "Vanilla", "Transfer - linearly-reordered combination (oracle)", "Transfer - adaptive (gam)", "Transfer - weighted-lasso")
color.scale <- c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00")
shape.scale <- c(16,17,15,3,7)
linetype.scale <- c(1,1,1,1,1)
p2 <- Summary %>%
filter(SNR==snr) %>%
filter((Population==Pop)|(Population=="Everyone")) %>%
mutate(Method = ifelse(Population=="Everyone", "Pooling", as.character(Method))) %>%
mutate(Method = factor(Method, methods.values, methods.labels)) %>%
mutate(Key = ifelse(Key=="FDP", "FDR (population-specific)", Key),
Key = ifelse(Key=="Power", "Power (population-specific)", Key)) %>%
ggplot(aes(x=Specificity, y=Value.mean, color=Method, linetype=Method, shape=Method)) +
geom_point() +
geom_line() +
geom_errorbar(aes(ymin=pmax(0,Value.mean-Value.se), ymax=Value.mean+Value.se), width=5) +
geom_hline(data=df.dummy, aes(yintercept=Value), linetype=2) +
facet_grid(Key~Pop) +
scale_x_continuous(breaks=c(0,50,100)) +
scale_color_manual(values=color.scale) +
scale_shape_manual(values=shape.scale) +
scale_linetype_manual(values=linetype.scale) +
xlab("Heterogeneity of causal variants (%)") +
ylab("") +
ylim(0,1) +
guides(color = guide_legend(nrow = 2)) +
theme_bw() +
theme(legend.position="bottom", legend.key.size = grid::unit(2, "lines"))
ggsave(sprintf("../figures/transfer_specific_pooled_snr%s.png", snr), p2, height=5, width=8, units="in")
|
160b946b409efd14422bd5443e116e1c65ce8b9a
|
7b2830cfeda670fb70cb09daed4db370cc393e04
|
/R/snp.bar.R
|
50343d2cb6f51f68645c0d431b6f481d01979bd7
|
[] |
no_license
|
nanshanjin/script
|
579d08e75cd5aa6e4bf4e558cefa581c2dc99f2e
|
c9fbfceca6c23ba21d908eaf91af5ec5fcdbecc3
|
refs/heads/master
| 2022-03-15T19:56:30.958094
| 2019-11-21T05:20:23
| 2019-11-21T05:20:23
| 53,825,691
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 515
|
r
|
snp.bar.R
|
library(ggplot2)
a <- read.table("snp.type.new.txt",header=TRUE,sep="\t")
pdf("snp.bar.pdf",width=14, height = 12)
ggplot(a,aes(x=snp,y=number,fill=type),colour=c("red","yellow"))+geom_bar(stat="identity",position="dodge")+theme(axis.text.x=element_text(angle=30,hjust=1,vjust=1))+theme_bw()+theme(panel.background = element_rect(fill = "transparent",colour = NA),panel.grid.minor = element_blank(),panel.grid.major = element_blank(),plot.background = element_rect(fill = "transparent",colour = NA))
dev.off()
|
978d7d1b7b249505af207516731d07b671dde267
|
666d6532dedaa93d516ae0a25a2a6680595bc33a
|
/R/test.600.VEME.MaC.incremental.R
|
140b7ea99ec5ae346b2a1c156e84497ff0a85b64
|
[] |
no_license
|
wdelva/MiceABC
|
39cdc18763af51c22669e52add212df72f122151
|
382bc3bf1100feae509fc191d34241f6b07ae44b
|
refs/heads/master
| 2021-01-19T13:30:53.973769
| 2018-04-09T08:16:15
| 2018-04-09T08:16:15
| 88,095,477
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,326
|
r
|
test.600.VEME.MaC.incremental.R
|
#setwd("/Users/wimdelva/Documents/MiceABC/R")
#source("simpact.wrapper.R")
#source("VEME.wrapper.R")
#source("dummy.wrapper.R")
#source("simpact.parallel.R")
#source("MaC.incremental.R")
#source("dummy.MaC.incremental.R")
#source("/user/data/gent/vsc400/vsc40070/phylo/scripts/VEME.wrapper.R")
#source("/user/data/gent/vsc400/vsc40070/phylo/scripts/simpact.parallel.R")
#source("/user/data/gent/vsc400/vsc40070/phylo/scripts/dummy.MaC.incremental.R")
#source("/Users/delvaw/Documents/MiceABC/R/VEME.wrapper.R")
source("/Users/delvaw/Documents/MiceABC/R/VEME.wrapper2.R")
source("/Users/delvaw/Documents/MiceABC/R/VEME.wrapper2.medians.R")
source("/Users/delvaw/Documents/MiceABC/R/mice.wrapper.R")
source("/Users/delvaw/Documents/MiceABC/R/00-Functions.R")
source("/Users/delvaw/Documents/MiceABC/R/simpact.parallel.R")
source("/Users/delvaw/Documents/MiceABC/R/mice.parallel.R")
source("/Users/delvaw/Documents/MiceABC/R/dummy.MaC.incremental.R")
source("/Users/delvaw/Documents/MiceABC/R/dummy.MaC.incremental.parallel.mice.R")
library(dplyr)
library(MASS)
library(splines)
library(boot)
#library(haven)
library(ggplot2)
library(GGally)
library(fitdistrplus)
library(lmtest)
library(mclust)
#library(depth)
library(pcaPP)
#library(devtools)
#install_github("wdelva/RSimpactHelp")
library(RSimpactCyan)
library(RSimpactHelper)
library(lmtest)
library(mice)
#library(miceadds)
library(parallel)
library(randtoolbox)
library(EasyABC)
library(dplyr)
library(tidyr)
library(nlme)
library(lme4)
library(boot)
library(data.table)
library(parallel)
install.packages("adegenet", dep = TRUE) # Still to be done on vsc
library(adegenet) # Still to be done on vsc
library(ade4) # Still to be done on vsc
source("http://adegenet.r-forge.r-project.org/files/patches/auxil.R")
#install.packages("devtools")
#library(devtools)
#install_github("emvolz/treedater") # Still to be done on vsc
library(treedater) # Still to be done on vsc
dummy.input.vector <- c(1.1, 0.25, 0, 3, 0.23, 0.23, # what if 1.1 becomes 1.4
45, 45, #45, 45, # what if 45 becomes 60
-0.5, 2.8, -0.2, -0.2, -2.5, -0.52, -0.05)# c(1000, 2, 3, 4)
x.offset <- length(dummy.input.vector)
n.experiments <- 80
dummy.master2 <- simpact.parallel(model = VEME.wrapper2,
actual.input.matrix = matrix(rep(dummy.input.vector, each = n.experiments), nrow = n.experiments),
seed_count = 0,
n_cluster = 8)
save(valid.dummy.master2, file = "/Users/delvaw/Documents/MiceABC/valid.dummy.master2.RData")
#####
# The output of the master model
#####
#inc.master.vector only exists for the validation version of the master model
# inc.master.vector <- dummy.master2[, 17]
# #save(inc.master.vector, file = "/Users/delvaw/Documents/MiceABC/inc.master.vector.RData")
# #save(inc.master.vector.inflated, file = "/Users/delvaw/Documents/MiceABC/inc.master.vector.inflated.RData")
#
# hist(inc.master.vector[!is.na(inc.master.vector)], 14) # The distribution of HIV incidence after 10 years
# mean(inc.master.vector) # The mean HIV incidence after 10 years: 0.01527118
# median(inc.master.vector) # The median HIV incidence after 10 years: 0.01532902
# quantile(inc.master.vector, c(0.025, 0.975)) # 0.002462209 0.026417416
head(dummy.master2)
inc.master.vector <- dummy.master2[, 17]
new.infect.vector <- dummy.master2[, 18]
recent.ratio.vector <- dummy.master2[, 19]
mean(new.infect.vector, na.rm = TRUE) # The mean HIV incidence after 10 years
median(new.infect.vector, na.rm = TRUE) # The median HIV incidence after 10 years
quantile(new.infect.vector, c(0.025, 0.975), na.rm = TRUE)
# mean.br.len.vector <- dummy.master2[, 19]
hist(new.infect.vector[!is.na(new.infect.vector) & new.infect.vector < Inf], 20)
hist(recent.ratio.vector[!is.na(recent.ratio.vector) & recent.ratio.vector < Inf], 20)
plot(new.infect.vector[!is.na(new.infect.vector) & new.infect.vector < Inf],
inc.master.vector[!is.na(new.infect.vector) & new.infect.vector < Inf])
plot(recent.ratio.vector[inc.master.vector > 0.0142 & inc.master.vector < 0.0162],
inc.master.vector[inc.master.vector > 0.0142 & inc.master.vector < 0.0162])
cor.test(inc.master.vector[!is.na(new.infect.vector) & new.infect.vector < Inf],
new.infect.vector[!is.na(new.infect.vector) & new.infect.vector < Inf])
cor.test(inc.master.vector[!is.na(new.infect.vector) & new.infect.vector < Inf],
new.infect.vector[!is.na(new.infect.vector) & new.infect.vector < Inf])
#plot(mean.br.len.vector[!is.na(mean.br.len.vector) & mean.br.len.vector < Inf],
# inc.master.vector[!is.na(mean.br.len.vector) & mean.br.len.vector < Inf])
#cor.test(mean.br.len.vector[!is.na(mean.br.len.vector) & mean.br.len.vector < Inf],
# inc.master.vector[!is.na(mean.br.len.vector) & mean.br.len.vector < Inf])
#plot(mean.br.len.vector,
# new.infect.vector)
dummy.master2 <- dummy.master2 %>%
as.data.frame() %>%
dplyr::filter(complete.cases(.))
dummy.targets.empirical <- l1median(dummy.master2)
# dummy.targets.empirical (based on 400 repeats):
# 5.79782475 4.16132170 0.63770526 2.69246630 1.21359926 -1.56183331 0.25472579 2.09719359 0.02891330 0.13671790
# 0.06816378 0.34554390 0.31086783 0.37050293 0.41706055 1.01682475 7.07415631
dummy.targets.empirical <- c(5.79782475,
4.16132170,
0.63770526,
2.69246630,
1.21359926,
-1.56183331,
0.25472579,
2.09719359,
0.02891330,
0.13671790,
0.06816378,
0.34554390,
0.31086783,
0.37050293,
0.41706055,
1.01682475,
7.07415631)
# round(colMeans(dummy.master2), 3) # For interest sake, what are the marginal means?
predictorMatrix <- (1 - diag(1, length(c(dummy.input.vector, dummy.targets.empirical)))) # This is the default matrix.
# # Let's now modify the first 15 rows of this matrix, corresponding to the indicators of predictor variables for the input variables. In brackets the values for the master model.
predictorMatrix[1:x.offset, ] <- 0 # First we "empty" the relevant rows, then we refill them.
# We are currently not allowing input variables to be predicted by other predictor variables. Only via output variables. We could change this at a later stage.
predictorMatrix[1, x.offset + c(10, 11, 17)] <- 1 # relative susceptibility in young women is predicted by HIV prevalence in young men and women, and recent infections (~ incidence)
predictorMatrix[2, x.offset + 3] <- 1 # agescale predicted by slope
predictorMatrix[3, x.offset + c(1, 3, 6)] <- 1 # mean of the person-specific age gap preferences is predicted by slope, intercept and AAD
predictorMatrix[4, x.offset + c(2, 4, 5)] <- 1 # sd of the person-specific age gap preferences is predicted by SD, WSD, BSD
predictorMatrix[5, x.offset + c(7, 8, 9, 13, 16)] <- 1 # man gamma a predicted by gamma shape.male, scale.male, pp.cp, hiv.prev.25.34.men, exp(growthrate)
predictorMatrix[6, x.offset + c(7, 8, 9, 12, 16)] <- 1 # woman gamma a predicted by gamma shape.male, scale.male, pp.cp, hiv.prev.25.34.women, exp(growthrate)
predictorMatrix[7, x.offset + c(7, 8, 9, 13, 16, 17)] <- 1 # man gamma b predicted by gamma shape.male, scale.male, pp.cp, hiv.prev.25.34.men, exp(growthrate), and recent infections (~ incidence)
predictorMatrix[8, x.offset + c(7, 8, 9, 12, 16, 17)] <- 1 # woman gamma b predicted by gamma shape.male, scale.male, pp.cp, hiv.prev.25.34.men, exp(growthrate), and recent infections (~ incidence)
predictorMatrix[9, x.offset + c(2, 4, 5, 7, 8, 14, 15, 16, 17)] <- 1 # formation.hazard.agegapry.gap_factor_x_exp is predicted by population growth, age gap variance, hiv prevalence, and recent infections (~ incidence)
predictorMatrix[10, x.offset + c(7, 8, 9, 12, 13, 16, 17)] <- 1 # baseline formation hazard predicted by HIV prevalence, cp, degree distrib. HIV prevalence, and recent infections (~ incidence)
predictorMatrix[11, x.offset + c(7, 8, 9, 12, 13, 16, 17)] <- 1 # numrel man penalty is predicted by degree distrib, cp, prev, popgrowth, and recent infections (~ incidence)
predictorMatrix[12, x.offset + c(7, 8, 9, 12, 13, 16, 17)] <- 1 # # numrel woman penalty is predicted by degree distrib, cp, prev, popgrowth, and recent infections (~ incidence)
predictorMatrix[13, x.offset + 16] <- 1 # conception.alpha_base is predicted by popgrowth
predictorMatrix[14, x.offset + c(7, 8, 9, 16)] <- 1 # baseline dissolution hazard predicted by degree distrib, cp, popgrowth
predictorMatrix[15, x.offset + c(7, 8, 9, 16)] <- 1 # age effect on dissolution hazard predicted by degree distrib, cp, popgrowth, HIV prev in older people (maybe?)
# NOTE: As it stands, each output statistic is predicted by ALL input and ALL other output statistics. That may not be a great idea, or even possible, if there is collinearity.
# Test dummy.MaC.incremental, and also dummy.MaC.incremental.parallel.mice
test.VEME2.MaC.incremental <- dummy.MaC.incremental.parallel.mice(targets.empirical = dummy.targets.empirical,
RMSD.tol.max = 0.95,
min.givetomice = 80, # 400
n.experiments = 320, # 1000
lls = c(1, 0.12, -0.3, 2.5, 0.1, 0.1, 20, 20, -0.8, 2, -0.35, -0.35, -3.6, -0.8, -0.16),
uls = c(1.2, 0.37, 0.3, 3.5, 0.4, 0.4, 66, 66, -0.25, 3.9, -0.1, -0.1, -1.4, -0.3, -0.001),
model = VEME.wrapper2.medians, # VEME.wrapper2,
strict.positive.params = c(4:8),
predictorMatrix = predictorMatrix,
maxit = 5,
maxwaves = 10,
n_cluster = 8) # 6
#(round(l1median(head(test.MaC.incremental$selected.experiments[[length(test.MaC.incremental$selected.experiments)]]), 1), 99)[5:8] - dummy.targets.empirical[1:4]) / dummy.targets.empirical[1:4]
#round(l1median(head(test.MaC.incremental$selected.experiments[[length(test.MaC.incremental$selected.experiments)]]), 1), 2)
#test.MaC.incremental$secondspassed
test.VEME2.MaC.incremental$secondspassed
test.VEME2.MaC.incremental$max.RMSD
test.VEME2.MaC.incremental$n.close.to.targets
head(test.VEME2.MaC.incremental$selected.experiments[[length(test.VEME2.MaC.incremental$selected.experiments)]])
save(dummy.targets.empirical, test.VEME2.MaC.incremental, file = "/Users/delvaw/Documents/MiceABC/test.VEME2.MaC.incremental.RData")
### Now we simulate for the 1 (or 5?) best fitting model(s)
inputs.calib <- as.numeric(test.VEME2.MaC.incremental$selected.experiments[[length(test.VEME2.MaC.incremental$selected.experiments)]][1, 1:15])
calib.dummy.master2 <- simpact.parallel(model = VEME.wrapper2,
actual.input.matrix = matrix(rep(inputs.calib, each = n.experiments), nrow = n.experiments),
seed_count = 0,
n_cluster = 8)
save(calib.dummy.master2, file = "/Users/delvaw/Documents/MiceABC/calib.dummy.master2.RData")
## AND THE OUTPUT LOOKS LIKE:
valid.inc.wide <- as.data.frame(valid.dummy.master2[, 17:31])
names(valid.inc.wide) <- 1:15 #paste0("incid.", 1:15)
valid.inc.wide$rep <- 1:nrow(valid.inc.wide)
valid.inc.wide$type <- "Master model"
calib.inc.wide <- as.data.frame(calib.dummy.master2[, 17:31])
names(calib.inc.wide) <- 1:15 #paste0("incid.", 1:15)
calib.inc.wide$rep <- (1 + nrow(valid.inc.wide)):(nrow(valid.inc.wide) + nrow(calib.inc.wide))
calib.inc.wide$type <- "Calibrated model"
inc.both <- rbind(valid.inc.wide, calib.inc.wide)
inc.both.long <- gather(inc.both, year, incidence, 1:15)
inc.both.long$year <- as.numeric(inc.both.long$year)
inc.both.long$smoother <- factor(paste0("smoother.", inc.both.long$type))
###
# Plotting the result
###
library(metafolio)
n.colours <- 2
cols <- gg_color_hue(n.colours, l=65)
darkcols <- gg_color_hue(n.colours, l=40)
incplot <- ggplot(filter(inc.both.long,
year >=10),
aes(year, incidence, group = rep, colour = type)) +
geom_line() +
facet_wrap(~ type) +
stat_smooth(se=FALSE,
method="loess",
span=1,
aes(year, incidence, group = type, colour=smoother)) +
xlab("Time (years)") +
ylab("HIV incidence (cases/person-year)") +
# scale_colour_hue(l = c(rep(65, 5), rep(10, 5))) +
scale_color_manual(values = c("Master model"=cols[1],
"Calibrated model"=cols[2],
"smoother.Master model"=darkcols[1],
"smoother.Calibrated model"=darkcols[2]),
guide = FALSE)
plot(incplot)
|
1fb766b71233d12a0e55719c34d6801e7f149fed
|
139d4617b8d76684364d0daeb2fc6076334bda99
|
/206 project/project.R
|
c1e6f1c9692a4fc6afefe82dd8da4f074eec0aed
|
[] |
no_license
|
robinbing/Some-analysis-work
|
b2ae5bc40b08bcd22d582e671b00f3a5fbe4241d
|
961d6568a0020287810f9a21ba2544ba2c0e2e63
|
refs/heads/master
| 2021-01-10T05:12:51.823064
| 2015-12-07T00:11:03
| 2015-12-07T00:11:03
| 46,022,459
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 6,297
|
r
|
project.R
|
setwd('e:\\STA206\\assignment\\project')
abalone = read.table("abalone.txt" , sep =',')
colnames(abalone) = c("sex","length","diameter","height","whole","sucked",
"viscera","shell","rings")
a = abalone
a$sex = factor(a$sex)
#
#split data
#
set.seed(12)
n = nrow(a)
index.s = sample(1:n , size = n*2/3 , replace = FALSE)
a.train = a[index.s,]
a.test = a[-index.s,]
sapply(2:9, function(i) boxplot(a.train[,i],a.test[,i]))
###############################################################
boxplot(rings~sex,data = a.train,
main='side-by-side boxplots',xlab='factor levels',
ylab='observation',col=rainbow(6))
#############################################################################
#############################################################################
#transform response variables
a.train$rings = log(a.train$rings)
a.test$rings = log(a.test$rings)
#
#model selection
#
#
#selection of first-order effects
#
#
model.1st = lm(rings~. , data = a.train)
mse = summary(model.1st)$sigma^2
#
#best subsets selection
#
library(leaps)
sub.set = regsubsets(rings~. , data = a.train , nbest = 1,
nvmax = 16 , method = "exhaustive")
sum.sub = summary(sub.set)
#number of parameters in each model
num.p = as.numeric(rownames(sum.sub$which)) + 1L
#parameters in model
n.train = nrow(a.train)
sse = sum.sub$rss
#aic , pic
aic = n.train*log(sse/n) + 2*num.p
bic = n.train*log(sse/n) + log(n)*num.p
sub.table = cbind(sum.sub$which, sse, sum.sub$rsq, sum.sub$adjr2,
sum.sub$cp, aic ,bic)
#null model
fit0 = lm(rings~1, data = a.train)
sse0 = sum(fit0$residuals^2)
p0 = 1
c0 = sse0/mse - (n.train-2*p0)
aic0 = n.train*log(sse0/n.train) + 2*p0
bic0 = n.train*log(sse0/n.train) +log(n.train)*p0
none = c(p0, rep(0,9), sse0, 0, 0, c0, aic0, bic0)
sub.table = rbind(none, sub.table)
colnames(sub.table) = c(colnames(sum.sub$which), "sse", "R^2", "R^2_a", "cp",
"aic", "bic")
#
#forward stepwise procedure
#
library(MASS)
step.forward = stepAIC(fit0, scope = list(upper = model.1st, lower = ~1),
direction = "both", k=2)
#
#
#selection of first-order and second-order effects
#
#
model.2nd = lm(rings~.^2, data = a.train)
mse2 = summary(model.2nd)$sigma^2
#
#forward stepwise procedure
#
step.forward2 = stepAIC(fit0, scope = list(upper = model.2nd, lower = ~1),
direction = "both", k=2)
###############################################
###############################################
#
#model validation
#
#
#
#internal validation
#
model1 = lm(step.forward , data = a.train)
plot(model1, which = 1)
plot(model1, which = 2)
model2 = lm(step.forward2 , data = a.train)
plot(model, which = 1)
plot(model, which = 2)
sse.1st = anova(step.forward)["Residual" , 2]
p.1st = length(step.forward$coefficients)
cp.1st = sse.1st/mse2 - (n.train-2*p.2nd)
press.1st = sum(step.forward$residuals^2/(1-influence(step.forward)$hat)^2)
mse.1st = anova(step.forward)["Residuals",3]
#cp太大
sse.2nd = anova(step.forward2)["Residual" , 2]
p.2nd = length(step.forward2$coefficients)
cp.2nd = sse.2nd/mse2 - (n.train-2*p.2nd)
press.2nd = sum(step.forward2$residuals^2/(1-influence(step.forward2)$hat)^2)
mse.2nd = anova(step.forward2)["Residuals",3]
#(cp是51,和p 24差了些, 可能是在一开始统计数据的时候就少了些重要变量造成了model bias)
#press.2nd/n = 0.00733 , mse.2nd = 0.00706. Little difference between these two variables
#supports the validity of the model. And the mse is small which shows a good ablity of
#the model
#
#external validation
#
#caculation
model2.v = lm(step.forward2 , data = a.test)
mspr2 =round (mean((predict.lm(model2, a.test)-a.test$rings)^2),3)
press.2nd/n.train
sse_model2 = round(anova(model2)["Residuals",2],3)
sse_model2.v = round(anova(model2.v)["Residuals",2],3)
mse_model2 = round(anova(model2)["Residuals",3],3)
mse_model2.v = round(anova(model2.v)["Residuals",3],3)
model2_R2_a = round(summary(model2)$adj.r.squared,3)
model2_R2_a.v = round(summary(model2.v)$adj.r.squared,3)
#model2
mod_sum_2 = cbind(coef(summary(model2.v))[,1], coef(summary(model2.v))[,2],
coef(summary(model2))[,1],coef(summary(model2))[,2])
colnames(mod_sum_2) = c('coef validation','coef std.err validation',
'coef ','coef std.err')
Training_2 = cbind(sse_model2,mse_model2,model2_R2_a,round(press.2nd,3),
round(press.2nd/n.train,3),"--")
Validation_2 = cbind(sse_model2.v,mse_model2.v,model2_R2_a.v,"--","--",
mspr2)
con_2 = rbind(Training_2,Validation_2)
rownames(con_2) = c('Training','Validation')
colnames(con_2) = c('sse','mse','R2_2','press','press/n','mspr')
#下面这两个是table
mod_sum_2
con_2
#################################################################################
#################################################################################
#
#outlying
#
#outlying y
model.final = lm(step.forward2, data = a)
hii = influence(model.final)$hat
mse = anova(model.final)["Residuals",3]
res = model.final$residuals
stu.res = res/sqrt(mse*(1-hii)) #studentized residuals
res.del = res / (1-hii) # deleted residuals
library(MASS)
stu.res.del = studres(model.final) #studentized deleted residuals
bon.thre = qt(1-0.1/(2*n),n-model.final$rank-1)
#residuals vs. fitted values plots
plot(model.final$fitted, stu.res.del , xlab="fitted value", ylab="residual",
cex.lab=1.5, cex.axis = 1.5, pch = 19, cex = 1.5)
abline(h=0, col = grey(0.8), lwd = 2, lty = 2)
abline(h = bon.thre, lwd = 2, lty = 3)
abline(h = -bon.thre, lwd = 2, lty = 3)
#test for outlying Y
sse = sum((summary(model.final)$residuals)^2)
ti = res*sqrt((nrow(a)-fit$rank-1)/(sse*(1-hii)-res^2))
tt = qt(1-0.1/(2*nrow(a)) , nrow(a)-fit$rank-1 )
any(abs(ti)>tt)
index_outy = which(abs(ti)>tt)
#test for outlying X
any(hii>2*model.final$rank/nrow(a))
index_outx = which(hii>2*model.final$rank/nrow(a))
#cook's distance (outlying influence)
Di = stu.res^2*hii/(model.final$rank*(1-hii))
plot(Di,type="h",ylab = "Cook's distance")
Di = c(Di)
dd = pf(Di , model.final$rank, nrow(a)-model.final$rank)
any(dd>0.5)
#DFFITS DFBETAS
sta = influence.measures(model.final)
#DFFITS
2*sqrt(model.final$rank/n)
#DFBETAS
2/sqrt(n)
|
05541487fa3372d866c05634202f2a6ab54412b9
|
77dc8c2ecfa4bb7ac0c2ad028a23257a9c8b9aba
|
/cachematrix.R
|
79decaf4d00ebd5487c7a03f57941facb286366d
|
[] |
no_license
|
1nger/ProgrammingAssignment2
|
89962e54a5929ba22e4e27e4019bb6fdca581131
|
f3d4908f3d697a2475eea46469e70359f6e652fb
|
refs/heads/master
| 2021-01-18T05:14:55.510947
| 2014-08-15T23:01:49
| 2014-08-15T23:01:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,577
|
r
|
cachematrix.R
|
# R-Programming Assignment 2:
# This set of two functions can store the values of a matrix and its inverse (if the matrix is invertible)
# in the global environment, and can then later retrieve the inverse from there, rather than calculating it again.
# 1. makeCacheMatrix: This function returns a special list, which contains 4 functions.
# These functions can A) cache a matrix (save it), B) return this cached matrix, C) set the value of a variable that stores the inverse
# of the cached matrix and D) return the inverse of the matrix from that variable.
makeCacheMatrix <- function(x = matrix()) { # defines function, input is a matrix.
inv.m <- NULL # starts by setting the variable "inv.m" to empty, in the local environment (the function environment).
set <- function(input) { # 1st function can assign matrix data to a variable called "x" and create an empty variable
# called "inv.m" (where the inverse of the matrix will be stored), both in the global environment.
x <<- input
inv.m <<- NULL
}
get <- function() x #2nd function can return the value of the input matrix.
set.inv <- function(inverse) inv.m <<- inverse #3rd function assigns a value to the variable "inv.m" in the global environment.
get.inv <- function() inv.m #4th function returns the value, which is currently stored in the variable "inv.m".
list(set = set, get = get, setinverse = set.inv, getinverse = get.inv)
#above: finally a list is created, which holds these 4 functions, from where they can be called by name with the $ operator
#because it is the last statement in the function, this is what is returned when the function is called.
}
# 2. cacheSolve: This function first checks to see if there is an inverse matrix cached already ("inv.m" is not empty/NULL).
# if a value is stored there (the inverse of our matrix), then it is returned together with a message stating this,
# and otherwise (if inv.m is empty), then it fetches the cached input matrix via the list (get()) and calculates the inverse using
# the solve() function. It then stores the inverse matrix in "inv.m" (with setinverse()) and returns it.
cacheSolve <- function(xlist) { # defines function (input should be the list returned by makeCacheMatrix()).
inv.m <- xlist$getinverse() # fetches the value of the global "inv.m" via the getinverse() function from the list and stores it locally in "inv.m"
if (!is.null(inv.m)) { # checks to see if anything is already stored in "inv.m"
message("retrieving cached inverse of matrix") #and if something is stored there is returns it
return(inv.m) #together with a message saying so. The return() statement causes the function to be exited at this point.
}
my_matrix <- xlist$get() # If "inv.m" was empty/NULL, then it continues with the code and fetches the input matrix,
inv.m <- solve(my_matrix) # calculates the inverse with solve() and then stores the inverse matrix in "inv.m".
xlist$setinverse(inv.m) # It then uses the setinverse() function to store this inverse matrix in "inv.m" in the global environment
inv.m # and finally returns the inverse matrix (with no message)
}
## Thanks for reading my code, I hope it was all clear. Have a nice day! :)
|
bffe0e77e10ece71f62d25b08a86c49e21e620f4
|
1b73390902afc90781b72e7cf49f08878ddd4a08
|
/R/convert.r
|
c2d5001a041e4996e900c044dea47d351843297c
|
[] |
no_license
|
skranz/RTutor2
|
cf018a114a724a666672399e73b8da38c58bd522
|
cb19b69a6d11d2f957dad72df56a52680c2ff353
|
refs/heads/master
| 2021-01-17T13:24:57.440277
| 2017-10-07T01:51:22
| 2017-10-07T01:51:22
| 51,748,016
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,104
|
r
|
convert.r
|
example.convert = function() {
setwd("D:/libraries/RTutor2/examples/auction")
source.file = "auction_old_sol.Rmd"
dest.file = "auction_new_sol.Rmd"
convert.sol.file(source.file, dest.file)
}
convert.sol.file= function(source.file, dest.file) {
txt = readLines(source.file)
new = convert.sol.rmd(txt)
writeLines(new,dest.file)
}
convert.sol.rmd = function(txt) {
restore.point("converst.sol.rmd")
subst = rbind(
c("#< task", "#< show"),
c("## Exercise", "#. section")
)
for (r in 1:NROW(subst)) {
txt = gsub(subst[r,1],subst[r,2],txt, fixed=TRUE)
}
rows = str.starts.with(txt,"#. section ")
arg.str = str.right.of(txt[rows],"#. section ")
arg.str = quote.single.arg(arg.str)
txt[rows] = paste0("#. section ", arg.str)
txt
}
quote.single.arg = function(arg.str) {
restore.point("quote.arg")
arg.str = str.trim(arg.str)
first = substring(arg.str,1,1)
is.quoted = first == "'" | first == '"'
has.arg = nchar(arg.str) >0
rows = !is.quoted & has.arg
if (sum(rows)>0)
arg.str[rows] = paste0('"',arg.str[rows],'"')
arg.str
}
|
38128beb7dd31a56ba21167d7099c6c3f8ce1511
|
cb2b7b7c748d197a127306920a48aa6551afe659
|
/R/browse.R
|
a47aca604b7940a1dcf8475af7113f8aa507bcbc
|
[] |
no_license
|
ColinFay/debugin
|
2ee559a23c1714b898a812e77d388aadbd36a590
|
045e168f3f0b26dc7064a799193165fe7a90c87d
|
refs/heads/master
| 2020-03-15T08:19:14.793987
| 2018-06-07T12:37:35
| 2018-06-07T12:37:35
| 132,048,062
| 8
| 0
| null | 2018-05-17T15:51:14
| 2018-05-03T20:50:52
|
R
|
UTF-8
|
R
| false
| false
| 485
|
r
|
browse.R
|
unbrowse <- function(){
a <- rstudioapi::getSourceEditorContext()
a$selection[[1]]$text <- gsub('[^#]{1}browser\\(\\)', '#browser()', a$selection[[1]]$text)
rstudioapi::insertText(location = a$selection[[1]]$range, a$selection[[1]]$text)
}
browse <- function(){
a <- rstudioapi::getSourceEditorContext()
a$selection[[1]]$text <- gsub('#browser\\(\\)', ' browser()', a$selection[[1]]$text)
rstudioapi::insertText(location = a$selection[[1]]$range, a$selection[[1]]$text)
}
|
74fbb6714f77ac9c52b507e5efed0734a48bb77d
|
7b92bf61c77a88c86c0c42a61d8effa32a155c53
|
/man/cmhc_snapshot_params.Rd
|
4cf1863204947d419fcf563de68fd5f061498221
|
[
"MIT"
] |
permissive
|
cgauvi/cmhc
|
74a473a80682fc1431a24cae17a81602f6c8b8de
|
0d68fe8647f3dfbade6bab0c21ad306a2e83affc
|
refs/heads/master
| 2022-02-24T14:37:12.741330
| 2019-10-04T06:09:07
| 2019-10-04T06:09:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 581
|
rd
|
cmhc_snapshot_params.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cmhc.R
\name{cmhc_snapshot_params}
\alias{cmhc_snapshot_params}
\title{Parameters for time series}
\usage{
cmhc_snapshot_params(table_id = "2.2.12", geography_id = 2410,
geography_type = 3, breakdown_geography_type = "CSD",
filter = list(), region = NA, year = 2017, month = 7,
frequency = NA)
}
\arguments{
\item{table_id}{CMHC table id}
\item{geography_id}{Geography for which to get the data}
\item{geography_type}{type corrsponding to geography}
}
\description{
Parameters for time series
}
|
2fea7346d4b51b47f4015c401d4d7cd17e2c4b04
|
374145bc8c431c356e6543bb5965bee672b1c69c
|
/Internet privacy poll.R
|
38c8ad51d00ea7c539db744aa788f0c3aa4592c8
|
[] |
no_license
|
pareekrachit/AnalyticsEdge
|
379bb85803701f08a941b32f5d0bd766ffc20c0e
|
3962efce757bf7dcfeb8583b7e4c77a1a17f05be
|
refs/heads/master
| 2021-01-19T02:24:52.038887
| 2017-07-14T15:26:52
| 2017-07-14T15:26:52
| 87,275,096
| 0
| 0
| null | 2017-04-16T15:14:58
| 2017-04-05T06:38:51
|
R
|
UTF-8
|
R
| false
| false
| 2,577
|
r
|
Internet privacy poll.R
|
poll <- AnonymityPoll
#Number of people with smartphones
str(poll)
summary(poll)
sum(poll$Smartphone, na.rm = TRUE)
table(poll$Smartphone)
#States in the Midwest census region
table(poll$State, poll$Region == 'Midwest')
#State in the South census region with the largest number of interviewees
sort(table(poll$State[poll$Region == 'South']))
#Interviewees reported not having used the Internet and not having used a smartphone
table(poll$Internet.Use, poll$Smartphone)
#No interviewees have a missing value for their Internet use
summary(poll)
#No interviewees who reported Internet use or who reported smartphone use
Limited = subset(poll, Internet.Use == 1 | Smartphone == 1)
summary(Limited)
#Number of interviewees reported a value of 0 for Info.On.Internet
table(Limited$Info.On.Internet)
#What proportion of interviewees who answered the Worry.About.Info question worry about how much information is available about them on the Internet?
prop.table(table(poll$Worry.About.Info))
#What proportion of interviewees who answered the Anonymity.Possible question think it is possible to be completely anonymous on the Internet?
prop.table(table(poll$Anonymity.Possible))
#What proportion of interviewees who answered the Tried.Masking.Identity question have tried masking their identity on the Internet?
prop.table(table(poll$Tried.Masking.Identity))
#What proportion of interviewees who answered the Privacy.Laws.Effective question find United States privacy laws effective?
prop.table(table(poll$Privacy.Laws.Effective))
#Histogram of the age of interviewees
hist(poll$Age)
plot(Limited$Age, Limited$Info.On.Internet)
#What is the largest number of overlapping points in the plot plot(limited$Age, limited$Info.On.Internet)
sort(table(Limited$Age, Limited$Info.On.Internet))
jitter(c(1, 2, 3))
plot(jitter(Limited$Age), jitter(Limited$Info.On.Internet))
mean(Limited$Info.On.Internet[Limited$Age <= 30], na.rm = TRUE)
mean(Limited$Info.On.Internet[Limited$Age >= 60 ], na.rm = TRUE)
#What is the average Info.On.Internet value for smartphone users
mean(Limited$Info.On.Internet[Limited$Smartphone == 1], na.rm = TRUE)
#What is the average Info.On.Internet value for non-smartphone users
mean(Limited$Info.On.Internet[Limited$Smartphone == 0], na.rm = TRUE)
#What proportion of smartphone users who answered the Tried.Masking.Identity question have tried masking their identity when using the Internet?
prop.table(table(Limited$Tried.Masking.Identity, Limited$Smartphone),2)
rm(Limited, poll)
|
14e19ac6f053444bf7a99deb411f903c01b715b1
|
37538b0e1aca52e57f5fb7abc489563a70ae6083
|
/Code/E Commerce Dataset R.R
|
739b5906cc0cbfb30a6c2945d21945916c26e98a
|
[] |
no_license
|
pbagchi-DA/CIND_820-Big_Data_Analytics_Project
|
d959adc7bad57402e73273ffed441ef82f784dda
|
35c6c2cb8d333586bb521e3c1793a54e8a9ed553
|
refs/heads/main
| 2023-06-27T23:40:07.673481
| 2021-06-08T04:01:43
| 2021-06-08T04:01:43
| 374,528,538
| 0
| 0
| null | 2021-07-06T00:10:14
| 2021-06-07T03:57:58
|
R
|
UTF-8
|
R
| false
| false
| 3,758
|
r
|
E Commerce Dataset R.R
|
library(readr)
library(tidyr)
library(tidyverse)
library(readxl)
library(reshape2)
Data <- read_xlsx("D:/Data Analytics, Big Data, and Predictive Analytics Certificate/CIND 820 DA0 - Big Data Analytics Project - P2021/Data/E Commerce Dataset.xlsx",
sheet = "E Comm",
col_names = TRUE)
str(Data)
Data$CustomerID <- as.integer(Data$CustomerID)
str(Data) #Used to visualize the data types of all attributes
summary(Data)
#sum(is.na(df$col))
#Cleaned_Data <- na.omit(Data) #Removing Rows with NAs Using na.omit() Function
library(dplyr)
Numeric_Data1 <- select(Data, Churn, CityTier, HourSpendOnApp, Complain)
Numeric_Data2 <- select(Data, Tenure, WarehouseToHome)
Numeric_Data3 <- select(Data, CouponUsed, OrderCount, SatisfactionScore)
Numeric_Data4 <- select(Data, NumberOfDeviceRegistered, NumberOfAddress, DaySinceLastOrder)
boxplot(Numeric_Data1, horizontal = TRUE)
boxplot(Numeric_Data2, horizontal = TRUE)
boxplot(Numeric_Data3, horizontal = TRUE)
boxplot(Numeric_Data4, horizontal = TRUE)
melt.O_data <- melt(Data)
head(melt.O_data)
ggplot(data = melt.O_data, aes(x = value)) +
stat_density() +
facet_wrap(~variable, scales = "free")
Character_Data <- select(Data, PreferredLoginDevice, PreferredPaymentMode, Gender, PreferedOrderCat, MaritalStatus)
Character_Data %>% count(PreferredLoginDevice)
Character_Data %>% count(PreferredPaymentMode)
Character_Data %>% count(Gender)
Character_Data %>% count(PreferedOrderCat)
Character_Data %>% count(MaritalStatus)
Data$PreferredLoginDevice = str_replace_all(Data$PreferredLoginDevice,"Phone", "Mobile Phone")
Data$PreferredLoginDevice = str_replace_all(Data$PreferredLoginDevice,"Mobile Mobile Phone", "Mobile Phone")
Data$PreferedOrderCat = str_replace_all(Data$PreferedOrderCat,"Mobile", "Mobile Phone")
Data$PreferedOrderCat = str_replace_all(Data$PreferedOrderCat,"Mobile Phone Phone", "Mobile Phone")
Data$PreferredPaymentMode = str_replace_all(Data$PreferredPaymentMode,"CC", "Credit Card")
Data$PreferredPaymentMode = str_replace_all(Data$PreferredPaymentMode,"COD", "Cash on Delivery")
colSums(is.na(Data))
Tenure_Table <- (as.data.frame(Data %>% count(Tenure)))
str(Tenure_Table)
Tenure_Table <- as.numeric(Tenure_Table)
plot(Tenure_Table)
Tenure_Table
Data$Tenure[is.na(Data$Tenure)] <- 0 #Used to replace NA's with 0's
colSums(is.na(Data))
library(dplyr)
Cleaned_Data <- Data %>%
mutate_all(~ifelse(is.na(.), median(., na.rm = TRUE), .))
Cleaned_Data$Churn <- as.character(Cleaned_Data$Churn)
colSums(is.na(Cleaned_Data))
summary(Cleaned_Data)
Combined_Numeric_data <- select(Cleaned_Data, Churn, CityTier, HourSpendOnApp, Complain, Tenure, WarehouseToHome, CouponUsed, OrderCount, SatisfactionScore, NumberOfDeviceRegistered, NumberOfAddress, DaySinceLastOrder)
melt.CM_data <- melt(Cleaned_Data)
head(melt.CM_data)
ggplot(data = melt.CM_data, aes(x = value)) +
stat_density() +
facet_wrap(~variable, scales = "free")
Cleaned_Data$Churn[Cleaned_Data$Churn > 0 & Cleaned_Data$Churn < 0.5] <- 0
Cleaned_Data$Churn[Cleaned_Data$Churn < 1 & Cleaned_Data$Churn >= 0.5] <- 1
summary(Cleaned_Data)
Churn_Table <- (as.data.frame(Cleaned_Data %>% count(Churn)))
str(Churn_Table)
Tenure_Table <- as.numeric(Churn_Table)
plot(Churn_Table)
Churn_Table
#write.csv(Cleaned_Data, "D:/Data Analytics, Big Data, and Predictive Analytics Certificate/CIND 820 DA0 - Big Data Analytics Project - P2021/Data/Cleaned E Commerce Dataset.csv")
library(ggcorrplot)
data(Combined_Numeric_data)
corr <- round(cor(Combined_Numeric_data), 2)
ggcorrplot(corr, hc.order = TRUE, type = "lower",
lab = TRUE)
|
6bdda2f37e636f804fbf4338ee67451cf2e8a084
|
a93540a5b40e337da3140dc8f241e3e0632ac641
|
/man/tab2array.Rd
|
bc4ddb9596f4c36accecee18395c194c0dbdc683
|
[] |
no_license
|
Csun1992/algstat
|
3819aecfe3abf775a858a5211e7e3bd4f5999656
|
c4b42c6f1753ae8c936104de1898d2dbeff655f3
|
refs/heads/master
| 2020-04-22T13:43:49.461945
| 2019-02-12T20:38:56
| 2019-02-12T20:38:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 436
|
rd
|
tab2array.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tab2array.R
\name{tab2array}
\alias{tab2array}
\title{Table to array conversion}
\usage{
tab2array(tab)
}
\arguments{
\item{tab}{a table}
}
\value{
an array
}
\description{
Convert a table into an array.
}
\examples{
data(handy)
handy
tab2array(handy)
data(Titanic)
Titanic
tab2array(Titanic)
}
\seealso{
\code{\link{tab2vec}}, \code{\link{array2tab}}
}
|
8b0487ccdcae3a0043231d070ae4002f786728e4
|
05df63a03336e8d7c5738c797f5969ff16f8f8c1
|
/TP3/src/1_Classifieur_Euclidien.R
|
85caaad427ffa280a39af08dd021d8b9328eae30
|
[] |
no_license
|
raphaelhamonnais/UTC_SY09_DataMining
|
a1fead6eb84aa2719a65688c599d0f6a407eee3e
|
3816a16a32c7e5c9be9cc20e940bba65cb82cfa3
|
refs/heads/master
| 2021-03-22T04:20:04.637177
| 2017-06-21T15:46:47
| 2017-06-21T15:46:47
| 84,962,362
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,947
|
r
|
1_Classifieur_Euclidien.R
|
library(mclust)
library(xtable)
source("src/fonctions-tp3/distXY.R")
source("src/fonctions-tp3/front.ceuc.R")
source("src/fonctions-tp3/front.kppv.R")
source("src/fonctions-tp3/separ1.R")
source("src/fonctions-tp3/separ2.R")
source("src/Fonctions_Euclidien.R")
source("src/Fonctions_Utilities.R")
appData = read.csv("data/Synth1-40.csv")
Xapp = appData[,1:2]
zapp = factor(appData[,3])
testData = read.csv("data/Synth1-40.csv")
Xtst = testData[,1:2]
ztst = factor(testData[,3])
############## 1.1.3 Test des fonctions ##############
mu <- ceuc.app(Xapp, zapp)
front.ceuc(mu, Xtst, ztst, 500)
############## 1.2 Évaluation des performances ##############
fileNames_CE = c("data/Synth1-40.csv", "data/Synth1-100.csv", "data/Synth1-500.csv", "data/Synth1-1000.csv")
fileNames_CE = c("data/Synth2-1000.csv")
fileNames_CE = c("data/Breastcancer.csv", "data/Pima.csv")
### 1 - Classifieur euclidien - Estimer les paramètres
# Pour chacun des jeux de données, estimer les paramètres μk et Σk des distributions conditionnelles, ainsi que les proportions πk des classes.
# πk = proportion
# μk = les centre de gravité des classes => means
# Σk = matrice de covariance entre les variables
#
# on estime donc les paramètres du modèle (centres des classes, matrices de covariance, proportions)
# puis on regarde si les hypothèses sont (raisonnablement) vérifiées (raisonnablement : ex les
# matrices de covariance peuvent ne pas être exactement diagonales mais presque — ie termes non
# diagonaux négligeables).
#
# L'idée est donc d'interpréter les résultats obtenus à la lumière de ce qu'on sait sur les méthodes utilisées
# - pour ce qui est du travail avec le classifieur euclidien, il marchera bien si
# - on a bien des proportions qui se rapprochent de 1/g (donc ici 0.5 car g=2)
# - que les Σk sont égales entre-elles (même dispersion)
# - que la dispersion est sphérique, c’est à dire que les Σk sont des matrices diagonales avec
# des termes diagonaux nul ou négligeables
#
#
estimatedMu_CE = list() # centres des classes <=> mean
estimatedProportions_CE = list()
estimatedSigma_CE = list() # matrices de covariances
for (i in 1:length(fileNames_CE)) {
file = fileNames_CE[i]
data = read.csv(file)
zIndex = 3
if (file == "data/Breastcancer.csv") {
print("working with data/Breastcancer.csv")
zIndex = 10
}
if (file == "data/Pima.csv") {
print("working with data/Pima.csv")
zIndex = 8
}
X = data[,1:zIndex-1]
Z = factor(data[,zIndex])
g = length(levels(Z))
p = ncol(X)
cat("File : ", fileNames_CE[i])
writeLines("")
currentFileMu = matrix(nrow = g, ncol = p)
rownames(currentFileMu) = levels(Z) # mettre les noms des classes sur les lignes
colnames(currentFileMu) = colnames(X)
currentFileProportion = matrix(nrow = g, ncol = 1)
rownames(currentFileProportion) = levels(Z) # mettre les noms des classes sur les lignes
currentSigma = list()
for (level in levels(Z)) {
classData = X[Z == level,]
currentFileMu[level,] = apply(classData, 2, mean) # calculer la moyenne pour chaque classe
currentFileProportion[level,] = nrow(classData) / nrow(X)
currentSigma[[level]] = var(classData)
}
estimatedMu_CE[[file]] = currentFileMu
estimatedProportions_CE[[file]] = currentFileProportion
estimatedSigma_CE[[file]] = currentSigma
}
# affichage des paramètres estimés
for (file in fileNames_CE) {
writeLines("-------------------------")
writeLines(file)
writeLines("-------------------------")
writeLines("")
writeLines("estimatedMu_CE")
print(round(estimatedMu_CE[[file]], digits = 2))
writeLines("")
writeLines("estimatedProportions_CE")
print(round(estimatedProportions_CE[[file]], digits = 2))
writeLines("")
writeLines("estimatedSigma_CE")
print("Classe 1")
print(round(estimatedSigma_CE[[file]]$`1`, digits = 2))
print("Classe 2")
print(round(estimatedSigma_CE[[file]]$`2`, digits = 2))
writeLines("--------------------------------------")
writeLines("")
writeLines("")
writeLines("")
}
# affichage avec xtable
for (file in fileNames_CE) {
writeLines("-------------------------")
writeLines(file)
writeLines("-------------------------")
writeLines("")
writeLines("estimatedMu_CE")
print(xtable(round(estimatedMu_CE[[file]], digits = 2)))
writeLines("")
writeLines("estimatedProportions_CE")
print(xtable(round(estimatedProportions_CE[[file]], digits = 2)))
writeLines("")
writeLines("estimatedSigma_CE")
print("Classe 1")
print(xtable(round(estimatedSigma_CE[[file]]$`1`, digits = 2)))
print("Classe 2")
print(xtable(round(estimatedSigma_CE[[file]]$`2`, digits = 2)))
writeLines("--------------------------------------")
writeLines("")
writeLines("")
writeLines("")
}
# Estimer le taux d'erreur
nbTests_CE = 20
alpha_CE = 0.05
detailledErrorRates_CE = list()
meanErrorRates_CE = list()
sdErrorRates_CE = list()
errorVariation_CE = list()
confidenceIntervals_CE = list()
for (i in 1:length(fileNames_CE)) {
file = fileNames_CE[i]
data = read.csv(file)
zIndex = 3
if (file == "data/Breastcancer.csv") {
print("working with data/Breastcancer.csv")
zIndex = 10
}
if (file == "data/Pima.csv") {
print("working with data/Pima.csv")
zIndex = 8
}
X = data[,1:zIndex-1]
Z = data[,zIndex]
errorRates_CE = matrix(0, nrow = nbTests_CE, ncol = 2)
colnames(errorRates_CE) = c("Error On App", "Error On Test")
for (j in 1:nbTests_CE) {
sample_CE = separ1(X,Z)
mu = ceuc.app(sample_CE$Xapp, sample_CE$zapp) # calculer les paramètres du modèle, c'est à dire les centre de gravité des classes
appPredictedClasses_CE = ceuc.val(mu, sample_CE$Xapp) # prédire les classes du jeu de données d'apprentissage
testPredictedClasses_CE = ceuc.val(mu, sample_CE$Xtst) # prédire les classes du jeu de données de test
appErrorRate_CE = 1 - compute.sucess.rate(appPredictedClasses_CE, sample_CE$zapp)
testErrorRate_CE = 1 - compute.sucess.rate(testPredictedClasses_CE, sample_CE$ztst)
errorRates_CE[j,1] = appErrorRate_CE
errorRates_CE[j,2] = testErrorRate_CE
}
detailledErrorRates_CE[[file]] = errorRates_CE
meanErrorRates_CE[[file]] = apply(errorRates_CE, 2, mean)
sdErrorRates_CE[[file]] = apply(errorRates_CE, 2, sd)
errorVariation_CE[[file]] = qt(1-alpha_CE/2, df=nbTests_CE-1) * sdErrorRates_CE[[file]] / sqrt(nbTests_CE)
a = list()
a[["left"]] = meanErrorRates_CE[[file]] - errorVariation_CE[[file]]
a[["right"]] = meanErrorRates_CE[[file]] + errorVariation_CE[[file]]
confidenceIntervals_CE[[file]] = a
a = NULL
}
# Affichage des taux d'erreur
for (file in fileNames_CE) {
writeLines("-------------------------")
writeLines(file)
writeLines("-------------------------")
writeLines("Estimation de l'erreur")
print(round(meanErrorRates_CE[[file]], 3))
writeLines("")
writeLines("Intervalles de confiance")
nbCols = length(names(confidenceIntervals_CE[[file]]$left))
for (i in 1:nbCols) {
cat(
"Intervalle pour",
names(confidenceIntervals_CE[[file]]$left[i]),
"[",
round(confidenceIntervals_CE[[file]]$left[i],3),
",",
round(confidenceIntervals_CE[[file]]$right[i],3),
"]"
)
writeLines("")
}
writeLines("--------------------------------------")
writeLines("")
writeLines("")
}
############# INTERVALLES DE CONFIANCE #####################
# par définition, un moyenne suit une loi gausienne, on peut donc obtenir l'intervalle de confiance via
# 20 erreurs suivant la meme loi et tirées selon une loi qu'on ne connait pas mais on considère que ces 20 tirages sont indépendants car séparations différentes avec la fonction separ1
# "assuming that the original random variable is normally distributed, and the samples are independent"
# Donc tirages indépendants
# moyenne X-Barre de chaque tirage suit une loi gaussienne
# Vecteur de X-Barre contient 20 erreurs qui suivent une loi gausienne par le TCL
# Donc on a l'intervalle de confiance avec
# Après centrage et réduction de la moyenne empirique, on obtient : sqrt(n)(mean(x)-m)/sd(x) ~ N(0,1)
# Avec variance inconnue on a sqrt(n)(mean(x)-m)/sd(x) ~ St(n-1) loi de student à n-1 degrés de liberté
file = "data/Synth2-1000.csv"
data = read.csv(file)
X = data[,1:2]
Z = data[,3]
plot(X, col = c("blue", "orange")[Z])
sample = separ1(X,Z)
mu <- ceuc.app(sample$Xapp, sample$zapp)
mu
front.ceuc(mu, sample$Xtst, sample$ztst, 500)
|
a3ac0ed2e40c19b469e66b7a06c6940f32f09e55
|
a2b954f11b63717f7608fb900f31f83309ce4490
|
/man/RSRC_estimators.Rd
|
82d27bfc8cf1eb745fe568a1ddce56df9e514aa3
|
[] |
no_license
|
MeasurementErrorMethods/RRCME
|
69afa65b4d48d50f70dd9c1d0f91de8ca901da8a
|
20896d507f692a2fe4b0707ac2467864e3fa0749
|
refs/heads/master
| 2022-01-16T06:40:21.417064
| 2019-07-04T15:30:56
| 2019-07-04T15:30:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 907
|
rd
|
RSRC_estimators.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RSRC_estimators.R
\name{FitRSRCModel}
\alias{FitRSRCModel}
\title{Calculates regression calibration estimates}
\usage{
FitRSRCModel(valid_dat, full_dat, sampling_type, beta_x_start,
beta_z_start)
}
\arguments{
\item{valid_dat}{Validation subset}
\item{full_dat}{Full dataset}
\item{sampling_type}{String indicating either simple random
sampling or case-cohort sampling}
\item{beta_x_start}{Initial guess for beta_x in optimization}
\item{beta_x_start}{Initial guess for beta_z in optimization}
}
\value{
List of RSRC beta_x and beta_z estimates
}
\description{
These functions implement the risk set
regression calibration estimators.
Moment estimators are fit using least
squares and are used to obtain our best
prediction of the true covariate and
censored event time. This is repeated
at deciles of the failure times.
}
|
18ab9622ab2a33a5536233b1e4ff327500e64006
|
d226838e64a1d55fdaf797893f7468651b725183
|
/R/IlluminaFastqDataFile.R
|
63e5a95f5da9a1756ded867300526b77a4f81f3e
|
[] |
no_license
|
HenrikBengtsson/aroma.seq
|
5fd673cc449d9c3b89daf1125e8cc95556d0641d
|
6464f1e5e929c423978cf7dcb11ac7018d179a6d
|
refs/heads/master
| 2021-06-21T13:53:21.618898
| 2021-02-10T02:57:15
| 2021-02-10T02:57:15
| 20,848,327
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,287
|
r
|
IlluminaFastqDataFile.R
|
###########################################################################/**
# @RdocClass IlluminaFastqDataFile
#
# @title "The abstract IlluminaFastqDataFile class"
#
# \description{
# @classhierarchy
#
# A IlluminaFastqDataFile object represents a FASTQ data file.
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Arguments passed to @see "FastqDataFile".}
# }
#
# \section{Fields and Methods}{
# @allmethods "public"
# }
#
# @author "HB"
#
# \seealso{
# An object of this class is typically part of an
# @see "FastqDataSet".
# }
#*/###########################################################################
setConstructorS3("IlluminaFastqDataFile", function(...) {
extend(FastqDataFile(...), "IlluminaFastqDataFile")
})
setMethodS3("as.character", "IlluminaFastqDataFile", function(x, ...) {
s <- NextMethod("as.character")
fver <- getFileVersion(x)
s <- c(s, sprintf("Platform: %s", getPlatform(x)))
s <- c(s, sprintf("File format version: %s", fver))
if (!is.na(fver)) {
s <- c(s, sprintf("Information from the first sequence:"))
s <- c(s, sprintf("- Sample name: %s", getSampleName(x)))
s <- c(s, sprintf("- Flowcell ID: %s", getFlowcellId(x)))
s <- c(s, sprintf("- Lane: %s", getLane(x)))
s <- c(s, sprintf("- Barcode sequence: %s", getBarcodeSequence(x)))
s <- c(s, sprintf("- Read direction: %s", getReadDirection(x)))
s <- c(s, sprintf("- Instrument ID: %s", getInstrumentId(x)))
}
s
}, protected=TRUE)
setMethodS3("getFileVersion", "IlluminaFastqDataFile", function(this, ...) {
name <- getFullName(this, ...)
patterns <- c("Casava_v1.4"="^[^_]+_[ACGTN]+_L[0-9]+_R[0-9]")
for (key in names(patterns)) {
pattern <- patterns[key]
if (regexpr(pattern, name) != -1) {
return(key)
}
}
NA_character_
})
setMethodS3("getSampleName", "IlluminaFastqDataFile", function(this, ...) {
# Get the default sample name
default <- getFullName(this, ...)
# Get the "struct-inferred" sample name, if any
name <- NextMethod("getSampleName")
# Nothing more to do?
if (name != default) {
return(name)
}
# Trim it?
ver <- getFileVersion(this)
if (is.na(ver)) ver <- "<gzipped; unknown>"
if (ver == "Casava_v1.4") {
barcode <- getBarcodeSequence(this)
# AD HOC patch for observing ATGNCA when expected ATGTCA. /HB 2012-10-01
barcode <- gsub("N", ".", barcode, fixed=TRUE)
pattern <- sprintf("_%s_L[0-9]+_R[0-9](_[0-9]+)$", barcode)
if (regexpr(pattern, name) == -1L) {
throw(sprintf("The fullname (%s) of the %s with version %s does not match the expected pattern (%s): %s", sQuote(name), class(this)[1L], sQuote(ver), sQuote(pattern), getPathname(this)))
}
name <- gsub(pattern, "", name)
} else {
warning("Unknown Illumina FASTQ file version. Using fullname as sample name: ", name)
}
name
})
setMethodS3("getPlatform", "IlluminaFastqDataFile", function(this, ...) {
"Illumina"
})
setMethodS3("getLane", "IlluminaFastqDataFile", function(this, ...) {
info <- getFirstSequenceInfo(this)
info$laneIdx
})
setMethodS3("getInstrumentId", "IlluminaFastqDataFile", function(this, ...) {
info <- getFirstSequenceInfo(this)
info$instrumentId
})
setMethodS3("getFlowcellId", "IlluminaFastqDataFile", function(this, ...) {
info <- getFirstSequenceInfo(this)
info$flowcellId
})
setMethodS3("getBarcodeSequence", "IlluminaFastqDataFile", function(this, ...) {
info <- getFirstSequenceInfo(this)
info$indexSequence
})
setMethodS3("getReadDirection", "IlluminaFastqDataFile", function(this, ...) {
info <- getFirstSequenceInfo(this)
info$read
})
setMethodS3("getPlatformUnit", "IlluminaFastqDataFile", function(this, ...) {
# PU: the "platform unit" - a unique identifier which tells you what
# run/experiment created the data. For Illumina, please follow this
# convention: Illumina flowcell barcode suffixed with a period and
# the lane number (and further suffixed with period followed by
# sample member name for pooled runs). If referencing an existing
# already archived run, then please use the run alias in the SRA.
parts <- c(getFlowcellId(this), getLane(this), getSampleName(this))
paste(parts, collapse=".")
})
setMethodS3("getFirstSequenceInfo", "IlluminaFastqDataFile", function(this, force=FALSE, ...) {
use("ShortRead")
info <- this$.info
if (force || is.null(info)) {
pathnameFQ <- getPathname(this)
## Really inefficient way to find the first sequence information.
## /HB 2013-11-19
## ff <- FastqFile(pathnameFQ)
## on.exit(close(ff))
## rfq <- readFastq(ff)
fqs <- FastqSampler(pathnameFQ, n=1L, ordered=TRUE)
on.exit(if (!is.null(fqs)) close(fqs))
rfq <- yield(fqs)
close(fqs); fqs <- NULL
id <- id(rfq)[1L]
info <- as.character(id)
rfq <- NULL # Not needed anymore
patternA <- "^([^:]+):([0-9]+):([^:]+):([0-9]+):([0-9]+):([0-9]+):([0-9]+)"
patternB <- " ([^:]+):([^:]+):([0-9]+):([^:]+)$"
pattern <- sprintf("%s%s", patternA, patternB)
if (regexpr(pattern, info) == -1) {
throw(sprintf("The (first) sequence of the FASTQ file has an 'info' string (%s) that does not match the expected regular expression (%s): %s", sQuote(info), sQuote(pattern), sQuote(pathnameFQ)))
}
infoA <- gsub(patternB, "", info)
infoB <- gsub(patternA, "", info)
info <- list(
instrumentId=gsub(patternA, "\\1", infoA),
runIdx=as.integer(gsub(patternA, "\\2", infoA)),
flowcellId=gsub(patternA, "\\3", infoA),
laneIdx=as.integer(gsub(patternA, "\\4", infoA)),
tileIdx=as.integer(gsub(patternA, "\\5", infoA)),
x=as.integer(gsub(patternA, "\\6", infoA)),
y=as.integer(gsub(patternA, "\\7", infoA)),
read=as.integer(gsub(patternB, "\\1", infoB)),
isFiltered=gsub(patternB, "\\2", infoB),
controlNumber=as.integer(gsub(patternB, "\\3", infoB)),
indexSequence=gsub(patternB, "\\4", infoB)
)
this$.info <- info
}
info
}, protected=TRUE)
setMethodS3("getDefaultSamReadGroup", "IlluminaFastqDataFile", function(this, ...) {
# SM: Sample
# PL: Platform unit
# PU: Platform
SM <- getSampleName(this)
PL <- getPlatform(this)
PU <- getPlatformUnit(this)
SamReadGroup(SM=SM, PL=PL, PU=PU)
})
|
cd6f1d49257e77363c0eaf9318f3985b7a8f4118
|
94b98e593d1f10c964e13fea8eb63fb7a49162aa
|
/010GDP2000_2018/moran.R
|
08d0768cf09eb58abbcd2b208340b709d1088700
|
[] |
no_license
|
DWB1115/RDemo
|
8dda2954baa2ba10047d8b6fc2dc39e6888a274c
|
ef5de115299b54d8061af93a5ad31542980dbb2d
|
refs/heads/master
| 2023-02-03T21:34:50.834402
| 2020-12-18T07:04:35
| 2020-12-18T07:04:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,529
|
r
|
moran.R
|
library(spdep)
library(rgdal)
w = read.gal("D:\\workspace\\DevWork\\github\\RDemo_gitee\\010GDP2000_2018\\data\\cngdp2000_2018_queen.gal",
override.id=TRUE)
w2 = read.gal("D:\\workspace\\DevWork\\github\\RDemo_gitee\\010GDP2000_2018\\data\\cngdp2000_2018_area7.gal",
override.id=TRUE)
cngdp <- readOGR("D:\\workspace\\DevWork\\github\\RDemo_gitee\\010GDP2000_2018\\data\\cngdp2000_2018.shp")
map_crd <- coordinates(cngdp)
w_T = nb2listw(w,style = "W", zero.policy = TRUE)
year <- c(2000:2018)
queen_M <- c()
for (i in year){
queen_M <- c(queen_M,as.numeric(moran.test(cngdp@data[paste("GDP",i,sep="")][,1],
w_T,zero.policy = TRUE)$statistic))
}
w_T2 = nb2listw(w2,style = "W", zero.policy = TRUE)
year <- c(2000:2018)
queen_M2 <- c()
for (i in year){
queen_M2 <- c(queen_M2,as.numeric(moran.test(cngdp@data[paste("GDP",i,sep="")][,1],
w_T2,zero.policy = TRUE)$statistic))
}
plot(cngdp)
title("传统Queen空间权重模式")
points(map_crd,col="red",pch="*")
plot(w,coords=map_crd,pch=19,cex=0.1,col="red", add=T)
title("传统Queen空间权重模式莫兰指数")
plot(queen_M,x = year,col="red")
lines(queen_M,x=year,col="red")
plot(cngdp)
title("自定义七分区空间权重模式")
points(map_crd,col="blue",pch="*")
title("自定义七分区空间权重模式莫兰指数")
plot(w2,coords=map_crd,pch=19,cex=0.1,col="blue", add=T)
points(x=year,y=queen_M2,col="blue")
lines(queen_M2,x=year,col="blue")
|
96310156e69780497423fa3a32102289912580de
|
59c349f28498e89ab794f0c0f8e07dcb1e13042e
|
/bin/functions.R
|
043936dc8bf09b14d9a07e50f87f25db6420f5ff
|
[] |
no_license
|
ComunidadBioInfo/minicurso_abr_2021
|
87c4031110848a3b53d5f27492fcc727f1263fec
|
4bfde7be836ce06dd862c84f0d11dd4f1c854943
|
refs/heads/main
| 2023-09-02T04:38:32.173607
| 2021-11-19T04:24:04
| 2021-11-19T04:24:04
| 328,829,455
| 3
| 8
| null | 2021-04-15T13:00:36
| 2021-01-12T00:39:39
|
HTML
|
UTF-8
|
R
| false
| false
| 3,742
|
r
|
functions.R
|
DEGResults <- function(qlf) {
##This function returns a dataframe with all DEG
##qlf: Object obatined from the generalized linear model
qlf <- topTags(qlf, n = Inf)
qlf <- as.data.frame(qlf)
return(qlf)
}
volcanoplotR <- function(dge.obj, logfc, p.adj, type) {
##This function adds a new column (T or F) according to the FDR and LFC of each gene in edgeR list of DEG
##dge.obj: List with DEG
##logFC: logFC threshold used for the differential expression test
##p.adj: p.adj or FDR threshold to obtain significant genes
##type: Type of the output "edgeR" or "DESeq2"
##Updated 5-mar-2021 Rodolfo Chavez
if(type == "edgeR") {
volc <- dge.obj %>%
mutate(condition = ifelse((dge.obj$logFC > logfc) & (dge.obj$FDR < p.adj), "Over-expressed",
ifelse((dge.obj$logFC < -logfc) & (dge.obj$FDR < p.adj), "Sub-expressed",
ifelse((dge.obj$logFC > logfc) & (dge.obj$FDR > p.adj), "NS",
ifelse((dge.obj$logFC < -logfc) & (dge.obj$FDR > p.adj), "NS",
ifelse((dge.obj$logFC < logfc) & (dge.obj$FDR > p.adj), "NS", "NS"))))))
volcano_plot <- ggplot(volc)+
geom_point(aes(x = logFC, y = -log10(FDR), color = condition))+
scale_color_manual(name = "Condition",
labels = paste(c("NS", "Over-expressed", "Sub-expressed"), c(sum(volc$condition == "NS"), sum(volc$condition == "Over-expressed"), sum(volc$condition == "Sub-expressed"))),
values = c("#6e6d6e","#d84b47","#66c343"))+
geom_vline(aes(xintercept = logfc), linetype = "dashed")+
geom_vline(aes(xintercept = -logfc), linetype = "dashed")+
geom_hline(aes(yintercept = -log10(p.adj)), linetype = "dashed")+
theme_set(theme_bw())+
theme(plot.title = element_text(face = "bold", size = 18),
axis.title = element_text(size = 18),
legend.title = element_text(face = "bold", size = 15),
legend.text = element_text(size = 15),
legend.position = "bottom")
} else {
volc <- dge.obj %>%
mutate(condition = ifelse((dge.obj$log2FoldChange > logfc) & (dge.obj$padj < p.adj), "Over-expressed",
ifelse((dge.obj$log2FoldChange < -logfc) & (dge.obj$padj < p.adj), "Sub-expressed",
ifelse((dge.obj$log2FoldChange > logfc) & (dge.obj$padj > p.adj), "NS",
ifelse((dge.obj$log2FoldChange < -logfc) & (dge.obj$padj > p.adj), "NS",
ifelse((dge.obj$log2FoldChange < logfc) & (dge.obj$padj > p.adj), "NS", "NS")))))) %>%
drop_na()
volcano_plot <- ggplot(volc)+
geom_point(aes(x = log2FoldChange, y = -log10(padj), color = condition))+
scale_color_manual(name = "Condition",
labels = paste(c("NS", "Over-expressed", "Sub-expressed"), c(sum(volc$condition == "NS"), sum(volc$condition == "Over-expressed"), sum(volc$condition == "Sub-expressed"))),
values = c("#6e6d6e","#d84b47","#66c343"))+
geom_vline(aes(xintercept = logfc), linetype = "dashed")+
geom_vline(aes(xintercept = -logfc), linetype = "dashed")+
geom_hline(aes(yintercept = -log10(p.adj)), linetype = "dashed")+
theme_set(theme_bw())+
theme(plot.title = element_text(face = "bold", size = 18),
axis.title = element_text(size = 18),
legend.title = element_text(face = "bold", size = 15),
legend.text = element_text(size = 15),
legend.position = "bottom")
}
return(volcano_plot)
}
|
a8a0e6a5c670469c6034bce95ecd7d7bc79fb63d
|
e6e94a80958cd88d414e821df32b9258a279a7bd
|
/workout02-richard-jin/ui.R
|
3a8b2062c5dbb49b81cade826826cff7df4b88fe
|
[] |
no_license
|
richardjin0823/workout02-richard-jin
|
0ee61bbf9c7b8d23982eaf2b2e373c612a55f8b4
|
edc53f2507f3f49d080333dfa386b6677ae01f4d
|
refs/heads/master
| 2020-05-14T12:14:51.308051
| 2019-04-17T19:38:38
| 2019-04-17T19:38:38
| 181,790,989
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,053
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(yarrr)
library(DT)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Investment Strategies - Richard Jin"),
# Sidebar with a slider input for number of bins
fluidRow(
column(4,
sliderInput("init",
"Initial Amount",
min = 0,
max = 100000,
value = 1000,
step = 500)),
column(4,
sliderInput("ret",
"Return Rate (in %)",
min = 0,
max = 20,
value = 5,
step = 0.1)),
column(4,
sliderInput("yrs",
"Years",
min = 0,
max = 50,
value = 10,
step = 1))),
fluidRow(
column(4,
sliderInput("annc",
"Annual Contribution",
min = 0,
max = 50000,
value = 2000,
step = 500)),
column(4,
sliderInput("grate",
"Growth Rate (in %)",
min = 0,
max = 20,
value = 2,
step = 0.1)),
column(4,
selectInput("facet",
"Facet?",
choices = c("No", "Yes")))),
fluidRow(
column(width = 12),
mainPanel( "Timelines")),
fluidRow(
column(width = 12),
plotOutput("distPlot")),
fluidRow(
column(width = 12),
mainPanel ("Balance")),
fluidRow(
column(width = 12),
verbatimTextOutput("table")))
|
46e05d1a4036e7b15f68b066b0848ab84d20d65a
|
a8bd4a8d687b7a923bc82763a9d2e84a3235b186
|
/man/glasso.complex.Rd
|
ef5bbe3c5fa0791b25569fd842768658f760f9f6
|
[] |
no_license
|
crbaek/lwglasso
|
08e08c9d6091f5a7839ad50b7df36293c56c264f
|
0be595bb514c498b3cd7028eeaee2a0b195b45b1
|
refs/heads/master
| 2022-12-06T03:51:29.511529
| 2022-12-01T03:46:49
| 2022-12-01T03:46:49
| 266,767,907
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,543
|
rd
|
glasso.complex.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mLRDglasso.R
\name{glasso.complex}
\alias{glasso.complex}
\title{Sparse estimation of inverse G using graphical lasso.}
\usage{
glasso.complex(
Ghat,
Tt,
lambda = "ebic",
type = "soft",
gridTF = TRUE,
gg = 1,
bound = c(0.05, 1),
debiasTF = FALSE
)
}
\arguments{
\item{Ghat}{Estimated (nonsparse) long-run variance matrix to be sparsely estimated using glasso}
\item{Tt}{Data length}
\item{lambda}{"ebic" uses extended BIC criteria to select penalty parameter in graphical lasso.
User also can provide numerical value.}
\item{type}{Types of thresholding in ADMM algorithm. "hard", "soft" and "adaptive" threshold functions are possible.}
\item{gridTF}{If TRUE, penalty parameter is searched over interval provided on bound argument. Otherwise, optim function searches optimal
lambda.}
\item{gg}{The tuning parameter in the extended BIC criteria. Default value is 1. If gg=0, it is a usual BIC.}
\item{bound}{Bound of grid search in extended BIC. Default value is (.05, 1)}
\item{debiasTF}{If TRUE, debiased by applying constrained MLE introduced in the paper.}
\item{approxTF}{If TRUE, univariate LRD parameter is used in the estimation. Otherwise, multivariate LRD parameter estimator is used.}
}
\description{
This function estimates sparse long-run variance (complex) matrix using graphical Lasso.
}
\details{
glasso.complex
}
\examples{
glasso.complex(Ghat, Tt, lambda="ebic")
}
\keyword{Local}
\keyword{Whittle}
\keyword{estimation}
|
71e05c8b56904539fa7ec4c5f36bd401a12d5d4e
|
a4ef53e8d087ce99848c7c4960af09927e0a56cf
|
/Metodos remuestreo/k-fold cross validation with caret.R
|
0dfad35e2487e648f9f83e26e5756c480dcfd3f6
|
[] |
no_license
|
fhernanb/modelos-predictivos
|
0ba16c309a1f726e9e8ca36d37fe7add3a7d0cb5
|
909d65c40ba666fd473b98cb6610b698eff87aad
|
refs/heads/master
| 2023-02-19T05:57:33.799881
| 2021-01-22T12:46:22
| 2021-01-22T12:46:22
| 296,086,556
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,285
|
r
|
k-fold cross validation with caret.R
|
# -------------------------------------------------------------------------
# En este ejemplo vamos a utilizar a aplicar k-fold cross validation
# usando la base de datos Auto de ISLR.
# Lo vamos a realizar de forma manual y automatica
# y vamos a crear un lm para explicar mpg en funcion de
# horsepower y de horsepower^2
# Metrica a usar: rmse
# -------------------------------------------------------------------------
# Los datos que vamos a usar
library(ISLR)
head(Auto)
# Vamos a explorar los datos
library(tidyverse)
Auto %>% glimpse()
# Manualmente -------------------------------------------------------------
# Vamos a crear un vector para identificar los folds o particiones
folds <- rep(1:10, each=39)
# Vamos a usar solo las obs 1 a 390, las ultimas dos NO!!!
datos <- Auto[1:390, ]
# Vector vacio para almacernar los rmse
rmse <- numeric(10)
# Vamos a recorrer los folds y calcular la medida
for (i in 1:10) {
testIndexes <- which(folds == i, arr.ind=TRUE)
testData <- datos[ testIndexes, ]
trainData <- datos[-testIndexes, ]
mod <- glm(mpg ~ poly(horsepower, degree=2), data=trainData)
y_hat <- predict(object=mod, newdata=testData)
rmse[i] <- sqrt(mean((testData$mpg - y_hat)^2))
}
# Para ver los rmse
rmse
# Para ver la distribucion de los rmse
plot(density(rmse))
rug(rmse, col='tomato')
# Para ver la media de los rmse
rmse %>% mean()
# Para ver la varianza de los rmse
rmse %>% var()
# Automaticamente teniendo control de los fold ----------------------------
library(caret)
# Matriz con los i de las observaciones
x <- matrix(1:390, ncol=10)
# Creando una lista con los folds
index_to_test <- split(x=x, f=rep(1:ncol(x), each=nrow(x)))
index_to_train <- lapply(index_to_test, function(x) setdiff(1:390, x))
# Vamos a chequear lo que hay dentro de los objetos
index_to_test
index_to_train
# Definiendo
fitControl <- trainControl(method = "cv",
savePredictions=TRUE,
index = index_to_train,
indexOut = index_to_test)
# To train the model
fit1 <- train(mpg ~ poly(horsepower, degree=2),
data = datos,
method = "glm",
metric = "RMSE",
trControl = fitControl)
# To show the results
fit1
# Comparemos con el resultado manual
mean(rmse)
# Para ver los resultados para cada fold
fit1$resample
# Comparemos con el resultado manual
rmse
# Para extraer los rmse individuales
fit1$resample$RMSE
# Para ver las predicciones, aqui pred=y_hat obs=y_true
pred <- fit1$pred
pred$pred[1:5]
# Automaticamente con k=10 ------------------------------------------------
library(caret)
k <- 10
fitControl <- trainControl(method = "cv",
number = k)
# To train the model
fit2 <- train(mpg ~ poly(horsepower, degree=2),
data = datos,
method = "glm",
metric = "RMSE",
trControl = fitControl)
# To show the results
fit2
# Para ver los resultados para cada fold
fit2$resample
# Para ver la media
fit2$resample$RMSE %>% mean()
# Para ver la varianza
fit2$resample$RMSE %>% var()
# Para ver la distribucion de los rmse
plot(density(fit2$resample$RMSE), main='Densidad', las=1)
rug(fit2$resample$RMSE, col='tomato')
|
26538817af048748908a118ceded2b76d55a51cb
|
1d9fb87476c16af7ff610ae8c753c23e0fcf6c6c
|
/man/readEdges.Rd
|
710de5ab60c44af0e84a3192b50f531374405be4
|
[
"MIT"
] |
permissive
|
hartmado/diveR
|
8b3691f261427d86d0e33816dc9e73a1165f489d
|
1100d0c54914c995d51a0fb3d7ed85240bb1e8f4
|
refs/heads/master
| 2021-01-17T08:33:31.227873
| 2015-09-16T18:44:40
| 2015-09-16T18:44:40
| 42,570,044
| 0
| 0
| null | 2015-09-16T06:49:17
| 2015-09-16T06:49:17
| null |
UTF-8
|
R
| false
| false
| 755
|
rd
|
readEdges.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/diversity.R
\name{readEdges}
\alias{readEdges}
\title{A procedure to read a list of edges from a file}
\usage{
readEdges(path, sepr, we = TRUE)
}
\arguments{
\item{path}{A string representing the path to a file. Rows and columns are described by integers (Ex.: 1 2 1)}
\item{sepr}{Separator field used in the file to separate columns}
\item{we}{It indicates if the list of edges includes weights or not. Default is TRUE}
}
\value{
A matrix with objects as rows and categories as rows
}
\description{
It takes a file and creates a matrix for diversity analysis
}
\examples{
path <- "~/MyDiversity/data/toy.edges"
sepr <- ' '
we <- TRUE
X <- readEdges(path,sepr,we)
}
|
a95c40322687838ff420654482f6f68f3afe067c
|
7e1820d2fee01745b84deadeddcdb1593b035323
|
/R-CapstoneExtra.R
|
584bf6b1b680ebaa8e5048f7d7da17f32f836eda
|
[] |
no_license
|
sagarnil1989/Template
|
68ec90b5cc182faec2ec1e5ff787d02c56daab39
|
546ea809b5b6e7ff3401827b020b5c2b592deab6
|
refs/heads/master
| 2020-05-26T05:28:58.549356
| 2019-06-10T09:10:45
| 2019-06-10T09:10:45
| 188,121,587
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,719
|
r
|
R-CapstoneExtra.R
|
#row-wise-check
row_na_val=(rowSums(is.na(dataset))
na_finder_columnwise(sagar)
#removing uniquie identifiers
dataset=select(dataset,-Customer_ID)
na_value_perecent=colSums(is.na(dataset))/prod(dim(dataset)[1])
col_na_val_mean=colMeans(is.na(dataset))
#lets see its impact on chrun rate
ggplot(data=dataset, aes(x = dwlltype, fill = factor(churn)))+
geom_bar(stat='count', na.rm = TRUE,position='dodge') +
labs(x = 'dwlltype')
#But anyways lets drop it
#Na value reduced to 8445
dataset=select(dataset,-c(dwlltype))
#----check frequncy of churn----
table(dataset$churn)
#count(dataset, 'churn')
#----drop CustomerID column----
#----Analysing each categorical IV with respect to churn----
#----Mean number of monthly minutes of use----
#boxplot
plot=hist(dataset$mou_Mean,breaks = seq(0,15000,by=100),xlim = c(0,2000))
ggplot(data=dataset, aes(x = income, fill = factor(churn)))+
scale_x_continuous(limits = c(0,2000))+
scale_y_continuous(limits = c(0,10))+
geom_bar(stat='count', na.rm = TRUE,position='dodge',width=100) +
labs(x = 'Family Size')
#----active subscribersin family----
MAX=max(dataset$mou_Mean,na.rm=TRUE)
table(dataset$churn, dataset$actvsubs)
#----Logical Regression on Churn as target variable---
|
9605119aace4b4c789b6e63e717d66a0236c74db
|
217302b0f1e46c37a3d8506c0b4a61e4eae16d1e
|
/tidy_data.r
|
107a2c1e5cbf0efb963643b4cab576d79da3e06d
|
[] |
no_license
|
mattymo18/NFL_DRAFT
|
c472a8dca5fb801b62bee810321b2c81fed723c8
|
f8ade147d9d485ca5aa9adab8b665d53bc8716f0
|
refs/heads/master
| 2023-03-14T08:40:56.839759
| 2021-02-23T17:57:16
| 2021-02-23T17:57:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,808
|
r
|
tidy_data.r
|
library(tidyverse)
Draft <- read.csv("source_data/draft.csv")
Combine <- read.csv("source_data/combine.csv")
#I'm thinking i'm going to use only data from 2000 on. I think these observations will be more complete. Also since the game has changed so much, this sort of analysis is only really relevant in the modern era of football. I also do not need a lot of these variables, lets select important ones
Comb.2000 <- Combine %>%
filter(combineYear >= 2000) %>%
select(1:6, 8:18, 28:33)
Draft.2000 <- Draft %>%
filter(draft >= 2000) %>%
select(1:19)
#ok lets take out any rows with an NA just to see
Comb.compl <- na.omit(Comb.2000)
Draft.compl <- na.omit(Draft.2000)[, c(1, 3, 4)]
write.csv(Draft.compl, "derived_data/draft.csv")
write.csv(Comb.compl, "derived_data/combine.csv")
#join data into single clean df
DF.clean <- na.omit(left_join(Comb.compl, Draft.compl, by = "playerId")[, -c(1, 3, 4:9, 11:13, 16)])
DF.clean$position <- factor(DF.clean$position)
write.csv(DF.clean, "derived_data/Clean_Data.csv")
#split data into 4 groups
DF.Off.skill <- DF.clean %>%
filter(position == "WR" | position == "RB")
DF.Off.strength <- DF.clean %>%
filter(position == "C" | position == "OG" | position == "OT" | position == "OL")
DF.Def.skill <- DF.clean %>%
filter(position == "DB" | position == "S")
DF.Def.strength <- DF.clean %>%
filter(position == "DL" | position == "DT" | position == "DE")
DF.Mixed <- DF.clean %>%
filter(position == "LB" | position == "OLB" | position == "TE")
#save new csv's
write.csv(DF.Off.skill, "derived_data/Off.Skill.csv")
write.csv(DF.Off.strength, "derived_data/Off.Strength.csv")
write.csv(DF.Def.skill, "derived_data/Def.Skill.csv")
write.csv(DF.Def.strength, "derived_data/Def.Strength.csv")
write.csv(DF.Mixed, "derived_data/Df.Mix.csv")
|
0097fefd73baa0d91791baf89ec453fc12f99a31
|
43f6d2a89e611f49d1bff870e6381fa182184ce2
|
/man/sfunc.Rd
|
a40af396a6301dc0c0ffd4363db625c8ed8e701d
|
[] |
no_license
|
pedroreys/BioGeoBEARS
|
23bab5299c44b4cfa2ab0e9dbe0de4ecf2196f69
|
9aef25ebf57b854e6f02d5a3a2ca420e31833123
|
refs/heads/master
| 2021-01-17T17:12:08.479890
| 2013-08-04T02:35:28
| 2013-08-04T02:35:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,414
|
rd
|
sfunc.Rd
|
\name{sfunc}
\alias{sfunc}
\title{Extract the appropriate probability for a subset speciation event, given text code for rangesize of smaller descendant, and ancestor}
\usage{
sfunc(charcell, relprob_subsets_matrix)
}
\arguments{
\item{charcell}{The text in the cell, indicating the type
of speciation/cladogenesis range inheritance event.}
\item{relprob_subsets_matrix}{A numeric matrix describing
the relative probability of each smaller daughter range,
conditional on the ancestral rangesize.}
}
\value{
\code{prob_of_this_b}, a numeric value giving the
relative probability of that descendent-ancestor
rangesize pair.
}
\description{
Extract the appropriate probability for a subset
speciation event, given text code for rangesize of
smaller descendant, and ancestor
}
\note{
Go BEARS!
}
\examples{
testval=1
# Examples
# Probabilities of different descendant rangesizes, for the smaller descendant,
# under sympatric/subset speciation
# (plus sympatric/range-copying, which is folded in):
relprob_subsets_matrix = relative_probabilities_of_subsets(max_numareas=6,
maxent_constraint_01=0.0001, NA_val=NA)
relprob_subsets_matrix
sfunc(charcell="s1_1", relprob_subsets_matrix)
sfunc(charcell="s1_2", relprob_subsets_matrix)
sfunc(charcell="s1_3", relprob_subsets_matrix)
sfunc(charcell="s2_3", relprob_subsets_matrix)
relprob_subsets_matrix = relative_probabilities_of_subsets(max_numareas=6,
maxent_constraint_01=0.5, NA_val=NA)
relprob_subsets_matrix
sfunc(charcell="s1_1", relprob_subsets_matrix)
sfunc(charcell="s1_2", relprob_subsets_matrix)
sfunc(charcell="s1_3", relprob_subsets_matrix)
sfunc(charcell="s2_3", relprob_subsets_matrix)
relprob_subsets_matrix = relative_probabilities_of_subsets(max_numareas=6,
maxent_constraint_01=0.9999, NA_val=NA)
relprob_subsets_matrix
sfunc(charcell="s1_1", relprob_subsets_matrix)
sfunc(charcell="s1_2", relprob_subsets_matrix)
sfunc(charcell="s1_3", relprob_subsets_matrix)
sfunc(charcell="s2_3", relprob_subsets_matrix)
relprob_subsets_matrix = relative_probabilities_of_subsets(max_numareas=6,
maxent_constraint_01=0.0001, NA_val=NA)
relprob_subsets_matrix
yfunc(charcell="y1", relprob_subsets_matrix)
yfunc(charcell="y2", relprob_subsets_matrix)
yfunc(charcell="y3", relprob_subsets_matrix)
yfunc(charcell="y4", relprob_subsets_matrix)
relprob_subsets_matrix = relative_probabilities_of_subsets(max_numareas=6,
maxent_constraint_01=0.5, NA_val=NA)
relprob_subsets_matrix
yfunc(charcell="y1", relprob_subsets_matrix)
yfunc(charcell="y2", relprob_subsets_matrix)
yfunc(charcell="y3", relprob_subsets_matrix)
yfunc(charcell="y4", relprob_subsets_matrix)
relprob_subsets_matrix = relative_probabilities_of_subsets(max_numareas=6,
maxent_constraint_01=0.9999, NA_val=NA)
relprob_subsets_matrix
yfunc(charcell="y1", relprob_subsets_matrix)
yfunc(charcell="y2", relprob_subsets_matrix)
yfunc(charcell="y3", relprob_subsets_matrix)
yfunc(charcell="y4", relprob_subsets_matrix)
# Probabilities of different descendant rangesizes, for the smaller descendant,
# under vicariant speciation
relprob_subsets_matrix = relative_probabilities_of_vicariants(max_numareas=6,
maxent_constraint_01v=0.0001, NA_val=NA)
relprob_subsets_matrix
vfunc(charcell="v1_1", relprob_subsets_matrix)
vfunc(charcell="v1_2", relprob_subsets_matrix)
vfunc(charcell="v1_3", relprob_subsets_matrix)
vfunc(charcell="v1_4", relprob_subsets_matrix)
vfunc(charcell="v2_4", relprob_subsets_matrix)
vfunc(charcell="v2_2", relprob_subsets_matrix)
vfunc(charcell="v1_6", relprob_subsets_matrix)
vfunc(charcell="v2_6", relprob_subsets_matrix)
vfunc(charcell="v3_6", relprob_subsets_matrix)
relprob_subsets_matrix = relative_probabilities_of_vicariants(max_numareas=6,
maxent_constraint_01v=0.5, NA_val=NA)
relprob_subsets_matrix
vfunc(charcell="v1_1", relprob_subsets_matrix)
vfunc(charcell="v1_2", relprob_subsets_matrix)
vfunc(charcell="v1_3", relprob_subsets_matrix)
vfunc(charcell="v1_4", relprob_subsets_matrix)
vfunc(charcell="v2_4", relprob_subsets_matrix)
vfunc(charcell="v2_2", relprob_subsets_matrix)
vfunc(charcell="v1_6", relprob_subsets_matrix)
vfunc(charcell="v2_6", relprob_subsets_matrix)
vfunc(charcell="v3_6", relprob_subsets_matrix)
relprob_subsets_matrix = relative_probabilities_of_vicariants(max_numareas=6,
maxent_constraint_01v=0.9999, NA_val=NA)
relprob_subsets_matrix
vfunc(charcell="v1_1", relprob_subsets_matrix)
vfunc(charcell="v1_2", relprob_subsets_matrix)
vfunc(charcell="v1_3", relprob_subsets_matrix)
vfunc(charcell="v1_4", relprob_subsets_matrix)
vfunc(charcell="v2_4", relprob_subsets_matrix)
vfunc(charcell="v2_2", relprob_subsets_matrix)
vfunc(charcell="v1_6", relprob_subsets_matrix)
vfunc(charcell="v2_6", relprob_subsets_matrix)
vfunc(charcell="v3_6", relprob_subsets_matrix)
}
\author{
Nicholas J. Matzke \email{matzke@berkeley.edu}
}
\references{
\url{http://phylo.wikidot.com/matzke-2013-international-biogeography-society-poster}
\url{http://en.wikipedia.org/wiki/Maximum_entropy_probability_distribution}
Matzke_2012_IBS
Harte2011
ReeSmith2008
Ronquist1996_DIVA
Ronquist_1997_DIVA
Ronquist_Sanmartin_2011
Landis_Matzke_etal_2013_BayArea
}
\seealso{
\code{\link{yfunc}}, \code{\link{vfunc}},
\code{\link{relative_probabilities_of_subsets}},
\code{\link{symbolic_to_relprob_matrix_sp}},
\code{\link{get_probvals}}, \code{\link[FD]{maxent}},
\code{\link{calcZ_part}}, \code{\link{calcP_n}}
}
|
ea23e1dcc344d953a8df00ed446673960fb9b923
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/Sim.DiffProc/R/OU.R
|
980377ad225004e4230fc0cd5a56128839a2d584
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,922
|
r
|
OU.R
|
## Fri Mar 07 18:39:01 2014
## Original file Copyright © 2016 A.C. Guidoum, K. Boukhetala
## This file is part of the R package Sim.DiffProc
## Department of Probabilities & Statistics
## Faculty of Mathematics
## University of Science and Technology Houari Boumediene
## BP 32 El-Alia, U.S.T.H.B, Algiers
## Algeria
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## A copy of the GNU General Public License is available at
## http://www.r-project.org/Licenses/
## Unlimited use and distribution (see LICENCE).
###################################################################################################
######
######
OU <- function(N, ...) UseMethod("OU")
OU.default <- function(N =100,M=1,x0=2,t0=0,T=1,Dt,mu=4,sigma=0.2,...)
{
if (!is.numeric(x0)) stop("'x0' must be numeric")
if (any(!is.numeric(t0) || !is.numeric(T))) stop(" 't0' and 'T' must be numeric")
if (any(!is.numeric(N) || (N - floor(N) > 0) || N <= 1)) stop(" 'N' must be a positive integer ")
if (any(!is.numeric(M) || (M - floor(M) > 0) || M <= 0)) stop(" 'M' must be a positive integer ")
if (any(!is.numeric(sigma) || sigma <= 0) ) stop(" 'sigma' must be a numeric > 0 ")
if (any(!is.numeric(mu) || mu <= 0) ) stop(" 'mu' must be a numeric > 0 ")
if (any(t0 < 0 || T < 0 || T <= t0) ) stop(" please use positive times! (0 <= t0 < T) ")
X <- HWV(N,M,x0,t0,T,Dt,mu,theta=0,sigma)
return(X)
}
|
0ed001d3330cab23ee196d08b28794477b36ec27
|
787726956d728f95d4c2c78e6c502641fab4ccb9
|
/Problem_1.R
|
b2d818af55fee07e5c27022e5d8165d223889d6c
|
[] |
no_license
|
boyko/econometrics2018-de-G2
|
b9a4067d86b80aa2fba17177af0628b37c22bb6a
|
601dfd19778173099e9a5b895a19bbe263da30f4
|
refs/heads/master
| 2021-04-06T01:23:44.363762
| 2018-03-19T08:41:31
| 2018-03-19T08:41:31
| 124,431,051
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,547
|
r
|
Problem_1.R
|
# Installieren von R Packeten, die in diesem Programm benutzt werden.
# install.packages(c('tidyr', 'dplyr', 'ggplot2'))
library(tidyr)
library(dplyr)
library(ggplot2)
# Ergebnisse einlesen (muessen vorher heruntergeladen werden)
# Falls Fehler eintreten, checken Sie ihren aktuellen Arbeitsordner von R,
# indem Sie die getwd Funktion aufrufen.
ageGuessesRaw <- read.csv('results.csv')
# Ausdruck der ersten Zeilen des Datensatzes.
head(ageGuessesRaw)
# Anzahl von Studentengruppen bestimmen.
nGroups <-
# Die gather Funktion aus dem tidyr Packet formt den Datensatz von breitem Format in langen Format um.
ageGuesses <- gather(ageGuessesRaw, key = 'person', value = 'estAge', person1:person12)
ageGuesses$person
# Vektor mit dem wahren Alter der Personen definieren.
trueAgesVector <- c(51, 56, 61, 29, 71, 37, 31, 42, 56, 34, 45, 23)
ageGuesses <- within(ageGuesses, {
# Neue Spalte im Datensatz definieren, die das wahren Alter enthaelt.
trueAge <-
# Differenz zwischen wahrem Alter und geschaetzem Alter ausrechnen
error <-
# Absoluten Felher ausrechnen
absError <-
# Faktorvariable erstellen, die die Personen (aus dem jeweiligen Photo) im Datensatz identifizier
# wird fuer die graphische Darstellung der Daten benutzt.
personNr <-
})
# Verteilung der Fehler darstellen je Person (Photo)
ggplot(data = ageGuesses, aes(x = personNr, y = error)) +
geom_point() +
geom_boxplot() +
geom_hline(yintercept = 0, linetype=2, color=2) +
coord_flip()
# Verteilung der Fehler ohne Differenzierung nach Person (Photo)
ggplot(data = ageGuesses, aes(x = 'all', y = error)) +
geom_point() +
geom_boxplot() +
geom_hline(yintercept = 0, linetype=2, color=2) +
coord_flip()
# Datenzatz nach Person gruppieren.
ageGuessesGrouped <- group_by(ageGuesses, person)
ageGuessesGrouped
# Mittelwerte, Standardabweichungen der Schaetzungen je Person
summarise(ageGuessesGrouped,
trueAge = trueAge[1],
estAgeMean = mean(estAge),
estAgeStd = sd(estAge)
)
# Hypothesentest:
# Der erwartete Bias der Schaetzungen fuer Person 1 ist gleich 0
# vs. Alternative: Erwarteter Bias ist ungleich null.
person1Errors <-
person1Errors
t.test(person1Errors, mu=0, conf.level = 0.95)
# Hypothesentest:
# Der erwartete Bias der Schaetzungen fuer Person 11 ist gleich 0
# vs. Alternative: Erwarteter Bias ist ungleich null.
ageGuesses
person11Errors <- ageGuesses[ageGuesses$personNr == "11", 'error']
person11Errors
t.test(person11Errors, mu=0, conf.level = 0.95)
|
989c356edcdab83e2132307016416b69ec63cc29
|
67035cb9274eb660ac6836e5f13c9167e105255c
|
/man/pil_fb.Rd
|
014a5f8cbd7fc79530376b28bce7f8262b924ec7
|
[
"MIT"
] |
permissive
|
SvenGastauer/KRMr
|
39b941f9e3d3e40d28edadf98c14c168ab266b6a
|
f29ee41a7b55035cd43d0c4e632808e73e348091
|
refs/heads/main
| 2023-08-23T13:30:32.863360
| 2023-07-25T10:07:42
| 2023-07-25T10:07:42
| 602,591,946
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 578
|
rd
|
pil_fb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pil_fb-data.R
\docType{data}
\name{pil_fb}
\alias{pil_fb}
\title{Sardine Fish body Shape}
\format{
data.frame
}
\source{
https://www.fisheries.noaa.gov/data-tools/krm-model
}
\usage{
data(pil_fb)
}
\description{
Example Shapes of the fish body (fb) of a few sticklebacks (stb)
}
\examples{
data(pil_fb)
fb=pil_fb
par(mfrow=c(1,2))
KRMr::shplot(x_fb = fb$x_fb, w_fb = fb$w_fb,
z_fbU = fb$z_fbU, z_fbL = fb$z_fbL)
}
\references{
NOAA Southwest Fisheries Science Center
}
\keyword{datasets}
|
1ec14dc79fdd78df74e714afbb7d326213635590
|
01e6f98609708ebdfd6d1db5fda9cb443f9f7856
|
/man/date_month_factor.Rd
|
7dc8d36aae782d67002e66884ac6738f59b3efd3
|
[
"MIT"
] |
permissive
|
isabella232/clock-2
|
3258459fe4fc5697ce4fb8b54d773c5d17cd4a71
|
1770a69af374bd654438a1d2fa8bdad3b6a479e4
|
refs/heads/master
| 2023-07-18T16:09:11.571297
| 2021-07-22T19:18:14
| 2021-07-22T19:18:14
| 404,323,315
| 0
| 0
|
NOASSERTION
| 2021-09-08T13:28:17
| 2021-09-08T11:34:49
| null |
UTF-8
|
R
| false
| true
| 1,364
|
rd
|
date_month_factor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/date.R
\name{date_month_factor}
\alias{date_month_factor}
\title{Convert a date or date-time to an ordered factor of month names}
\usage{
date_month_factor(x, ..., labels = "en", abbreviate = FALSE)
}
\arguments{
\item{x}{\verb{[Date / POSIXct / POSIXlt]}
A date or date-time vector.}
\item{...}{These dots are for future extensions and must be empty.}
\item{labels}{\verb{[clock_labels / character(1)]}
Character representations of localized weekday names, month names, and
AM/PM names. Either the language code as string (passed on to
\code{\link[=clock_labels_lookup]{clock_labels_lookup()}}), or an object created by \code{\link[=clock_labels]{clock_labels()}}.}
\item{abbreviate}{\verb{[logical(1)]}
If \code{TRUE}, the abbreviated month names from \code{labels} will be used.
If \code{FALSE}, the full month names from \code{labels} will be used.}
}
\value{
An ordered factor representing the months.
}
\description{
\code{date_month_factor()} extracts the month values from a date or date-time and
converts them to an ordered factor of month names. This can be useful in
combination with ggplot2, or for modeling.
}
\examples{
x <- add_months(as.Date("2019-01-01"), 0:11)
date_month_factor(x)
date_month_factor(x, abbreviate = TRUE)
date_month_factor(x, labels = "fr")
}
|
cfe3aa1ad763925059ad3c599f71e7a44994fb43
|
747b971f1a7e4dc3232fe9671255536b6c03c2a8
|
/differential_analysis_example_scripts/process_E-MTAB-5697_microarray_5-time-points_3-replicates_1-dose.R
|
b072524e0a24ee34be0d5012b37c3ed30ab7c56f
|
[
"MIT"
] |
permissive
|
wsj-7416/MechSpy
|
1b58f9edbd2c7223b461b65d265006a2c2aa7fdb
|
0450321d053ece3e3cbfbd49087799fc9f5a4d00
|
refs/heads/master
| 2022-11-09T23:10:27.777537
| 2020-06-23T17:09:04
| 2020-06-23T17:09:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,730
|
r
|
process_E-MTAB-5697_microarray_5-time-points_3-replicates_1-dose.R
|
# To automate this:
# - Pass targetsdir as variable
# - Read the chip type directly from the Attributes file (and install the library as needed)
# - Come up with the control/treatment variable names from the Attributes file
library(limma)
library(affy)
library(annotate)
library(hgu133plus2.db)
library(stringr)
significance_threshold = 0.05
args = commandArgs(trailingOnly=TRUE)
#targetsdir = "time/series/dir/containing/tsv/descriptions/and/celfiles/subdir"
targetsdir = args[1]
#outputdir = "./microarray"
outputdir = args[2]
#metadata = "3R4F-7.5-ug-per-l.tsv"
#metadata = "THS2.2-7.5-ug-per-l.tsv"
#metadata = "THS2.2-37.5-ug-per-l.tsv"
#metadata = "THS2.2-150-ug-per-l.tsv"
metadata = args[3]
attr_data = readLines(paste(targetsdir, metadata, sep="/"))
Sys.setlocale(locale="C")
# can parse from any of the rows
chem_short_name = unlist(strsplit(attr_data[20], "\t"))[4]
chem_long_name = unlist(strsplit(attr_data[20], "\t"))[3]
chem_short_name = gsub("[^0-9a-zA-Z]", "-", chem_short_name)
chem_long_name = gsub("[^0-9a-zA-Z]", "-", chem_long_name)
# parse the concentrations used, always in microMolar units
chem_concentr = unlist(strsplit(attr_data[20], "\t"))[7]
chem_concentr_unit = unlist(strsplit(attr_data[20], "\t"))[8]
chem_concentr_unit = gsub("[^0-9a-zA-Z]", "-", chem_concentr_unit)
array_design = "hgu133plus2"
datadir = paste(targetsdir, "celfiles", sep="/")
targets <- readTargets(metadata, path=targetsdir, sep="\t", row.names="Sample")
# massage the barcode strings before passing them to ReadAffy to add the leading 0s and the .CEL extension
ab <- ReadAffy(filenames=targets$Sample, celfile.path=datadir)
eset <- rma(ab)
ID <- featureNames(eset)
Symbol <- getSYMBOL(ID, paste(array_design, "db", sep="."))
fData(eset) <- data.frame(Symbol=Symbol)
treatments <- factor(c(1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,
6,6,6,7,7,7,8,8,8,9,9,9,10,10,10),
labels=c("ctrl_1w", "ctrl_2w", "ctrl_4w", "ctrl_8w", "ctrl_12w",
"treatment_1w", "treatment_2w", "treatment_4w", "treatment_8w", "treatment_12w"))
contrasts(treatments) <- cbind(Time=c(0,1,2,3,4,0,1,2,3,4),
treatment_1w=c(0,0,0,0,0,1,0,0,0,0),
treatment_2w=c(0,0,0,0,0,0,1,0,0,0),
treatment_4w=c(0,0,0,0,0,0,0,1,0,0),
treatment_8w=c(0,0,0,0,0,0,0,0,1,0),
treatment_12w=c(0,0,0,0,0,0,0,0,0,1))
design <- model.matrix(~treatments)
colnames(design) <- c("Intercept","Time",
"treatment_1w","treatment_2w","treatment_4w","treatment_8w","treatment_12h",
"treatments","treatments","treatments")
fit <- lmFit(eset,design)
# contrast control 1w with treatment 1w
cont.matrix <- cbind(ctrl2=c(1,0,0,0,0,0,0,0,0,0),treat2=c(0,0,0,0,0,1,0,0,0,0))
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2)
i <- grep("AFFX",featureNames(eset))
options(digits=10)
summary_output <- summary(fit2$F.p.value[i])
p_value_cutoff <- signif(as.numeric(summary_output)[1], digits=3) *0.99
results <- classifyTestsF(fit2, p.value=p_value_cutoff)
summary(results)
table(ctrl2=results[,1],treat2=results[,2])
vennDiagram(results,include="up")
vennDiagram(results,include="down")
options(digits=3)
diff_exp <- topTable(fit2,coef="treat2",n=10000, p=significance_threshold)
diff_exp_brief <- data.frame(diff_exp$Symbol, diff_exp$logFC, diff_exp$adj.P.Val)
# the output file format will be CHEMNAME_CONCENTRATION-uM_TIMEPOINT_10k_genes.txt" and will contain
# up to the top 10k genes with a significance less than significance_threshold
write.table(diff_exp_brief,
file=paste(outputdir,
paste(paste(chem_long_name, chem_short_name, sep="-"),
paste(chem_concentr,
paste(chem_concentr_unit, "1w", "bronchial-epithelial-BEAS-2B_top_10k_genes.txt", sep="_"),
sep="-"),
sep="_"),
sep="/"),
quote=FALSE, sep='\t', col.names = NA)
# contrast control 2w with treatment 2w
cont.matrix <- cbind(ctrl2=c(0,1,0,0,0,0,0,0,0,0),treat2=c(0,0,0,0,0,0,1,0,0,0))
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2)
i <- grep("AFFX",featureNames(eset))
options(digits=10)
summary_output <- summary(fit2$F.p.value[i])
p_value_cutoff <- signif(as.numeric(summary_output)[1], digits=3) *0.99
results <- classifyTestsF(fit2, p.value=p_value_cutoff)
summary(results)
table(ctrl2=results[,1],treat2=results[,2])
vennDiagram(results,include="up")
vennDiagram(results,include="down")
options(digits=3)
diff_exp <- topTable(fit2,coef="treat2",n=10000, p=significance_threshold)
diff_exp_brief <- data.frame(diff_exp$Symbol, diff_exp$logFC, diff_exp$adj.P.Val)
# the output file format will be CHEMNAME_CONCENTRATION-uM_TIMEPOINT_10k_genes.txt" and will contain
# up to the top 10k genes with a significance less than significance_threshold
write.table(diff_exp_brief,
file=paste(outputdir,
paste(paste(chem_long_name, chem_short_name, sep="-"),
paste(chem_concentr,
paste(chem_concentr_unit, "2w", "bronchial-epithelial-BEAS-2B_top_10k_genes.txt", sep="_"),
sep="-"),
sep="_"),
sep="/"),
quote=FALSE, sep='\t', col.names = NA)
# contrast control 4w with treatment 4w
cont.matrix <- cbind(ctrl2=c(0,0,1,0,0,0,0,0,0,0),treat2=c(0,0,0,0,0,0,0,1,0,0))
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2)
i <- grep("AFFX",featureNames(eset))
options(digits=10)
summary_output <- summary(fit2$F.p.value[i])
p_value_cutoff <- signif(as.numeric(summary_output)[1], digits=3) *0.99
results <- classifyTestsF(fit2, p.value=p_value_cutoff)
summary(results)
table(ctrl2=results[,1],treat2=results[,2])
vennDiagram(results,include="up")
vennDiagram(results,include="down")
options(digits=3)
diff_exp <- topTable(fit2,coef="treat2",n=10000, p=significance_threshold)
diff_exp_brief <- data.frame(diff_exp$Symbol, diff_exp$logFC, diff_exp$adj.P.Val)
# the output file format will be CHEMNAME_CONCENTRATION-uM_TIMEPOINT_10k_genes.txt" and will contain
# up to the top 10k genes with a significance less than significance_threshold
write.table(diff_exp_brief,
file=paste(outputdir,
paste(paste(chem_long_name, chem_short_name, sep="-"),
paste(chem_concentr,
paste(chem_concentr_unit, "4w", "bronchial-epithelial-BEAS-2B_top_10k_genes.txt", sep="_"),
sep="-"),
sep="_"),
sep="/"),
quote=FALSE, sep='\t', col.names = NA)
# contrast control 8w with treatment 8w
cont.matrix <- cbind(ctrl2=c(0,0,0,1,0,0,0,0,0,0),treat2=c(0,0,0,0,0,0,0,0,1,0))
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2)
i <- grep("AFFX",featureNames(eset))
options(digits=10)
summary_output <- summary(fit2$F.p.value[i])
p_value_cutoff <- signif(as.numeric(summary_output)[1], digits=3) *0.99
results <- classifyTestsF(fit2, p.value=p_value_cutoff)
summary(results)
table(ctrl2=results[,1],treat2=results[,2])
vennDiagram(results,include="up")
vennDiagram(results,include="down")
options(digits=3)
diff_exp <- topTable(fit2,coef="treat2",n=10000, p=significance_threshold)
diff_exp_brief <- data.frame(diff_exp$Symbol, diff_exp$logFC, diff_exp$adj.P.Val)
# the output file format will be CHEMNAME_CONCENTRATION-uM_TIMEPOINT_10k_genes.txt" and will contain
# up to the top 10k genes with a significance less than significance_threshold
write.table(diff_exp_brief,
file=paste(outputdir,
paste(paste(chem_long_name, chem_short_name, sep="-"),
paste(chem_concentr,
paste(chem_concentr_unit, "8w", "bronchial-epithelial-BEAS-2B_top_10k_genes.txt", sep="_"),
sep="-"),
sep="_"),
sep="/"),
quote=FALSE, sep='\t', col.names = NA)
# contrast control 12w with treatment 12w
cont.matrix <- cbind(ctrl2=c(0,0,0,0,1,0,0,0,0,0),treat2=c(0,0,0,0,0,0,0,0,0,1))
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2)
i <- grep("AFFX",featureNames(eset))
options(digits=10)
summary_output <- summary(fit2$F.p.value[i])
p_value_cutoff <- signif(as.numeric(summary_output)[1], digits=3) *0.99
results <- classifyTestsF(fit2, p.value=p_value_cutoff)
summary(results)
table(ctrl2=results[,1],treat2=results[,2])
vennDiagram(results,include="up")
vennDiagram(results,include="down")
options(digits=3)
diff_exp <- topTable(fit2,coef="treat2",n=10000, p=significance_threshold)
diff_exp_brief <- data.frame(diff_exp$Symbol, diff_exp$logFC, diff_exp$adj.P.Val)
# the output file format will be CHEMNAME_CONCENTRATION-uM_TIMEPOINT_10k_genes.txt" and will contain
# up to the top 10k genes with a significance less than significance_threshold
write.table(diff_exp_brief,
file=paste(outputdir,
paste(paste(chem_long_name, chem_short_name, sep="-"),
paste(chem_concentr,
paste(chem_concentr_unit, "12w", "bronchial-epithelial-BEAS-2B_top_10k_genes.txt", sep="_"),
sep="-"),
sep="_"),
sep="/"),
quote=FALSE, sep='\t', col.names = NA)
sessionInfo()
|
f9a22fc72b84959074d7fe0271db631613132c95
|
e124baf55e52643da84ab6d969b0349ed591248a
|
/chaetog_functions.R
|
fa7e84a2b10e58b6481307cf0d96a2610f7064b4
|
[
"MIT"
] |
permissive
|
RetelC/PDra_Phylogeography
|
9c30b08fb8c251752b7ddccee90c9cdba1931122
|
7393f95035b2ad94d7894f3657d89b6266011413
|
refs/heads/master
| 2016-09-08T01:25:19.364787
| 2015-09-23T09:05:38
| 2015-09-23T09:05:38
| 41,354,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,755
|
r
|
chaetog_functions.R
|
################################################################
### Functions created during phylogeographic analysis of ###
### Pterosagitta draco along a basin-scale Atlantic transect ###
### Cas Retel MSc ###
### casretel@gmail.com ###
### Meant for personal use ###
################################################################
# With the various R packages all using different formats
# to store sequences, it is worthwhile to archive how to extract
# them (and their names) from every object;
# ape::DNAbin: as.character(seqs); labels(seqs)
# t(sapply(seqs, function(x) x))
# seqinr::seqFastadna: getSequence(seqs); labels(seqs)
pasteZeroes <- function(x){
# Returns a character vector with zeroes pasted at the start
# of elements of x, such that every element is the same length
# Created to make ordering of 1, 2, 3, 10 correct;
# as.factor() would order them as c(1, 10, 2, 3),
# creating inconsistencies when using ggplot()
x <- as.character(x)
nmax <- max(sapply(x, nchar))
while(any(sapply(x, nchar) != nmax)){
x <- paste("0", x, sep="")
x[sapply(x, nchar)>nmax] <-
substr(x[sapply(x, nchar)>nmax], start=2, stop=nmax+1)
}
return(x)
}
# pDist <- function(x1, x2){
# # PDist() returns uncorrected divergence of two
# # nucleotide character vectors: nr of differences per site
# # It is assumed that sequences are aligned, and
# # "N" values are excluded from calculations
# informative <- (x1!="N" & x2!="N")
# return(sum(x1[informative]!=x2[informative])/sum(informative))
# }
# Obsolete: use ape::dist.dna()
# JCDist <- function(x1, x2){
# # JCDist() returns Jukes-Cantor corrected divergence of two
# # nucleotide character vectors
# # It is assumed that sequences are aligned, and
# # "N" values are excluded from calculations
# informative <- (x1!="N" & x2!="N")
# pi <- sum(x1[informative]!=x2[informative])/sum(informative)
# return (-3*log(1-(4*pi/3))/4)
# }
# Obsolete: use ape::dist.dna()
#
# TNDist <- function(x1, x2){
# # TNDist() returns Tajima-Nei corrected divergence of two
# # nucleotide character vectors. Similar to J-C, T-N recognized
# # that unequal nucleotide frequencies will result in an
# # overestimation by this calculation
# # It is assumed that sequences are aligned, and
# # "N" values are excluded from calculations
# nucs <- c("A", "T", "G", "C")
# informative <- (x1!="N" & x2!="N")
# x1 <- x1[informative]
# x2 <- x2[informative]
# n <- sum(informative)
#
# pi <- sum(x1!=x2)/n
# x <- sapply(nucs, function(a1)
# sapply(nucs, function(a2)
# sum((x1%in%a1 & x2%in%a1) +
# (x1%in%a2 & x2%in%a2))/n))
# q <- sapply(nucs, function(x) sum( c(x1, x2)==x)/(2*n))
#
# h <- sum(unlist(lapply(1:3, function(i)
# sapply(i:4, function(j) x[i, j]^2 / (q[i]*q[j]) ))))/2
# b <- (1-sum(q^2) + (pi^2)/h)/2
# return(-1*b*log(1-(pi/b)))
# }
# Obsolete: use ape::dist.dna()
pDist2 <- function(x1, x2){
# PDist2() returns uncorrected divergence of two
# nucleotide character vectors: nr of differences per site
# Different from PDist(), it allows not only "ACTGN-", but also
# "DHKMRSTWY"
# It is assumed that sequences are aligned, and
# "N" values are excluded from calculations
require("seqinr")
informative <- which(x1!="N" & x2!="N" & x1!="-" & x2!="-")
gaps <- rbind((x1=="-"), (x2=="-"))
unequal <- sum(!sapply(informative, function(i)
any(amb(x1[i])%in%amb(x2[i])))) + sum(colSums(gaps)==1)
return(unequal/(length(informative)+sum(colSums(gaps)>0)))
}
# Obsolete: use ape::dist.dna()
distClades <- function(seq, cladeseqs, cladefact){
# distClades was created to assign new sequences clades,
# based on an available sequence set that already have clades
# seq = matrix of (unassigned sequence) alignment
# cladeseqs = matrix of sequences that already were assigned a clade
# cladefact = factor giving clades corresponding to cladeseqs
# returns a data frame with for every new sequence (per row)
# the average uncorrected divergence to all sequences per clade (columns)
if(!is.matrix(seq)) seq <- as.matrix(seq)
if(!is.matrix(cladeseqs)) cladeseqs <- as.matrix(cladeseqs)
cladefact <- as.factor(cladefact)
n <- nrow(seq)
nc <- length(levels(cladefact))
out <- matrix(0, nrow=n, ncol=nc)
for(i in 1:n){
for(cla in 1:nc){
claind <- which(cladefact==levels(cladefact)[cla])
out[i, cla] <- mean(sapply(claind, function(j)
dist.dna(seq[i, ], cladeseqs[j, ], model="raw")))
}
}
return(out)
}
revComp <- function(x){
# Returns the reverse complement of a sequence,
# of either string or character vector format
library(seqinr)
if(length(x) == 1){
return(c2s(comp(rev(s2c(x)), force=F)))
}else{
return(comp(rev(x), force=F))
}
}
seqsToLocus <- function(seqs, mname="Marker1"){
# from an aligned sequence matrix, removes invariable positions
# and returns a matrix of ordinal locus values
if(class(seqs) == "DNAbin") seqs <- as.character(seqs)
out <- data.frame(V1=rep(0, nrow(seqs)))
hnum <- 1
while(any(out$V1==0)){
uncl <- which(out==0)
out$V1[uncl[1]] <- hnum
for(i in uncl[-1]){
if(all.equal(seqs[uncl[1], ], seqs[uncl[i], ])==TRUE){
out$V1[i] <- hnum
}
}
hnum <- hnum+1
}
colnames(out) <- mname
return(out)
}
seqsToDataframe <- function(seqs, ...){
# from a sequence matrix (with individuals as rows),
# returns a data frame consisting of columns:
# ...-arguments as factors, followed by all polymorphic positions
# ! only works for my own IonTorrent data, can't handle ambiguous
# symbols etc. !
if(class(seqs) == "DNAbin") seqs <- as.character(seqs)
factors <- list(...)
if(any(sapply(factors, length)!=nrow(seqs))){
stop("Sequence and factor lengths do not match")
}
colnames(seqs) <- paste("pos", 1:ncol(seqs), sep="")
loci <- seqs[, apply(seqs, 2, function(x) length(unique(x))!=1)]
df.out <- data.frame(subject=rownames(loci))
if(length(factors)>0){
for(i in 1:length(factors)){
df.out <- cbind(df.out, factors[[i]])
}
}
colnames(df.out) <- c("subject", names(factors))
df.out <- cbind(df.out, loci)
return(df.out)
}
lengthdet <- function(...){
# convenience function
fa <- list(...)
return(length(fa))
}
haploToNum <- function(x, diploid=F){
# from a sequence matrix in seqsToDataframe-format, replaces
# nucleotide symbols with numeric values 1:4, to be compatible
# to hierfstat::test.xxx-functions.
# Compatible with seqsToDataframe(), and to this function only.
# if diploid=T, haploToNum expects a sequence matrix with
# two sequences per individual, positioned after each other
# Hence, rows 1, 2 are one individual, as are rows 205, 206.
# If homozygous, diplotype of two identical haplotypes is expected
if(diploid & (nrow(x)%%2)) stop("Odd number of sequences")
x <- as.matrix(x[, grep("pos", colnames(x))])
symbollist <- x %>% as.character %>% unique
out <- matrix(NA, nrow=nrow(x), ncol=ncol(x))
for(i in 1:length(symbollist)){
out[x==symbollist[i]] <- i
}
if(diploid){
out <- sapply(2*(1:(nrow(x)/2)), function(i)
paste(out[i-1, ], out[i, ], sep="")) %>% t
}
out <- apply(out, 2, as.numeric)
return(out)
}
nameToIndiv <- function(x, appendHetState=F){
# Convenience function: From a vector of sequence labels of the form
# Pdra_AMT22_<stationnr>_<indivnr>_<heterozygositytag>,
# returns "<stationnr>_<indivnr>"
if(!appendHetState){
x %>% strsplit(split="AMT22_") %>%
(function(x) sapply(x, "[[", 2)) %>%
substr(start=1, stop=5)
}else{
x %>% strsplit(split="AMT22_") %>%
(function(x) sapply(x, "[[", 2)) %>%
substr(start=1, stop=7)
}
}
nameToStation <- function(x){
# Convenience function: From a vector of sequence labels of the form
# Pdra_AMT22_<stationnr>_<indivnr>_<heterozygositytag>,
# returns "<stationnr>"
x %>% strsplit(split="AMT22_") %>%
(function(x) sapply(x, "[[", 2)) %>%
substr(start=1, stop=2) %>% as.factor
}
nameToBiome <- function(x){
# Convenience function: From a vector of sequence labels of the form
# Pdra_AMT22_<stationnr>_<indivnr>_<heterozygositytag>,
# returns biome
# ! Only works for sequences sampled at thirteen stations used in
# P. draco research !
out <- x %>% strsplit(split="AMT22_") %>%
(function(x) sapply(x, "[[", 2)) %>%
substr(start=1, stop=2) %>%
(function(x) as.factor(1 + (x>22) + (x>32) + (x>48) + (x>61)))
levels(out) <- c("N temp", "N gyre", "Equat", "S gyre", "S temp")
return(out)
}
|
22f179d1043b289ec85a77e702db64d6a1520b03
|
4f7cc757e1fb115e34b3e7b42f7573f9c4e99cfa
|
/scripts/get_discussions.R
|
7744b3e953578ea660777abbb96492b78728dd1f
|
[] |
no_license
|
mcc-apsis/UKParliamentaryCoalDiscourse
|
ff7ee96f0c16164729d8e83fa0688471b4819f9d
|
715e9083be259494e564864666b3271d310e0172
|
refs/heads/master
| 2020-03-10T06:19:49.656066
| 2018-04-12T12:09:17
| 2018-04-12T12:09:17
| 129,237,341
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,906
|
r
|
get_discussions.R
|
# Get path to file containing the discussions
v_discussions_file <- ifelse(u_discussions_file == "", paste0("../data/data_discussions_", u_searchTerm, ".RData"), u_discussions_file)
if (file.exists(v_discussions_file)) {
cat("Loading existing discussion file...")
load(v_discussions_file)
} else {
cat("Scraping Hansard UK web site...")
#url = "http://hansard.millbanksystems.com/commons/1871/may/04/ways-and-means-report#S3V0206P0_18710504_HOC_37"
#== Initialise ===============
data_discussions <- list()
#== Lopp over filtered session list
for (kr in 1:length(urls_sessionsToScrape)) {
id_match <- which(grepl(substr(urls_sessionsToScrape[kr], 35, nchar(urls_sessionsToScrape[kr])), data_sessionList$doc_link, fixed = TRUE))[1]
cur_title <- data_sessionList$doc_title[id_match]
cat(paste0("Processing [", kr,"]: ", paste0(substr(cur_title, 1, 50), "[...]"), "\n"))
# Initialise list
data_discussions[[kr]] <- list()
# Get web page
url <- urls_sessionsToScrape[kr]
wp <- read_html(url)
data_discussions[[kr]][["title"]] <- wp %>% html_node(xpath='//div[@id="header"]/h1[@class="title"]') %>% html_text()
data_discussions[[kr]][["url"]] <- url
cnt_procedural <- 1
cnt_memberContribution <- 1
cnt_division <- 1
cur_DP <- NULL
tmp = wp %>% html_nodes(xpath='//div[@id="content"]') %>% html_children()
for (ks in 1:length(tmp)) {
if (!is.na(tmp[[ks]] %>% html_attr("class"))) {
treated = FALSE
if (tmp[[ks]] %>% html_attr("class") == "section") {
data_discussions[[kr]][["section"]] <- tmp[[ks]] %>% html_text()
treated <- TRUE
}
if (tmp[[ks]] %>% html_attr("class") == "permalink column-permalink") {
data_discussions[[kr]][["permalink"]] <- create_permalink(tmp[[ks]])
treated <- TRUE
}
if (tmp[[ks]] %>% html_attr("class") == "procedural") {
data_discussions[[kr]][[paste0("procedural_", cnt_procedural)]] <- create_procedural(tmp[[ks]], DP=cur_DP)
cnt_procedural = cnt_procedural+1
treated <- TRUE
}
if (tmp[[ks]] %>% html_attr("class") == "hentry member_contribution") {
data_discussions[[kr]][[paste0("memberContribution_", cnt_memberContribution)]] <- create_memberContribution(tmp[[ks]], DP=cur_DP)
cnt_memberContribution = cnt_memberContribution+1
treated <- TRUE
}
if (tmp[[ks]] %>% html_attr("class") == "division") {
print("TODO: division")
data_discussions[[kr]][[paste0("division_", cnt_division)]] <- create_division(tmp[[ks]], DP=cur_DP)
cnt_division <- cnt_division + 1
treated <- TRUE
}
if (tmp[[ks]] %>% html_attr("class") == "unparsed_division") {
print("TODO: unparsed_division")
treated <- TRUE
}
if (tmp[[ks]] %>% html_attr("class") == "time published") {
cur_DP <- list(
abbr = tmp[[ks]] %>% html_node(xpath="./a/abbr") %>% html_text(),
date = tmp[[ks]] %>% html_node(xpath="./a/abbr") %>% html_attr("title"))
treated <- TRUE
} else {
cur_DP <- NULL
}
if (tmp[[ks]] %>% html_attr("class") == "table") {
print("TODO: table")
print(tmp[[ks]] %>% as.character())
treated <- TRUE
}
if (tmp[[ks]] %>% html_attr("class") == "xoxo") {
print("TODO: xoxo")
if (tmp[[ks]] %>% html_text != "") {
print(tmp[[ks]] %>% as.character())
}
treated <- TRUE
}
if (!treated) print(paste0("unknown section : ", tmp[[ks]] %>% as.character()))
}
}
}
# Save discussions data
save(data_discussions, file=v_discussions_file)
}
|
25b8831e09b7ecfcb366fb0101e981fe2c94e63d
|
180e7c14af8ef1e7809cfe95bfe0667ad72b9f02
|
/plot1.R
|
4f5890a5f0d982cdc022652d321b9112457c562f
|
[] |
no_license
|
annrcannon/EDA-course
|
7169903478fda7fdfb184b8990e0829b203b1b66
|
309d96b2daed511e02049b955a9d9453f15fe0df
|
refs/heads/main
| 2023-02-23T23:26:56.825945
| 2021-01-28T02:59:48
| 2021-01-28T02:59:48
| 333,580,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
r
|
plot1.R
|
#Read in the data
electric <- read.table("household_power_consumption.txt", sep=";", header=TRUE)
#Create the subset of interest
elec.sub <- electric[electric$Date == "1/2/2007"| electric$Date == "2/2/2007",]
#clean data
elec.sub$Global_active_power <- as.numeric(elec.sub$Global_active_power)
#Make graph
png(file="plot1.png", width=480, height=480)
hist(elec.sub$Global_active_power, main="Global Active Power", col="red",
xlab="Global Active Power (kilowatts)")
dev.off()
|
5e32daeb197f0c23d95423e917059a51c3a5932d
|
4b5cecbbf56fa2704f2d24c9255adf3dd4df6ff6
|
/AppliedDataMining/AppliedDataMining/HW2/2.1/sb2.R
|
ed89d6abc3d93b59ea3e094af5940c0bf18273ec
|
[] |
no_license
|
keithhickman08/IUH
|
fa2a8c50eb4ab86f3ea10081a18620e27dc8f021
|
20bc22bdef9523310e1e1b9b6225e6a3eb039d20
|
refs/heads/master
| 2020-04-02T07:40:22.310044
| 2019-01-04T16:16:45
| 2019-01-04T16:16:45
| 154,208,374
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36
|
r
|
sb2.R
|
summary(mydata)
sum(is.na(mydata))
|
294ae4c856be19214cd1c71f858afd5cf0da6278
|
14e19c36dba435df0a75158e14f7d00a0fa1096d
|
/man/composite-track.Rd
|
8433dd652491dc56154b950f42bac959153834a5
|
[] |
no_license
|
Marlin-Na/TnT
|
2fc227de7cae7266886104d6009b0b36add6f2b2
|
386cf9fc3c53ab861c954b73b47b3c83be35ea89
|
refs/heads/master
| 2021-01-11T20:36:04.356306
| 2020-01-30T07:08:50
| 2020-01-30T07:08:50
| 79,152,363
| 16
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,183
|
rd
|
composite-track.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tntTracks-compositeTrack.R
\name{composite-track}
\alias{composite-track}
\alias{merge-track}
\alias{merge,TnTTrack,TnTTrack-method}
\alias{merge,TnTTrack,missing-method}
\title{Composite Track}
\usage{
\S4method{merge}{TnTTrack,TnTTrack}(x, y, ...)
\S4method{merge}{TnTTrack,missing}(x, y, ...)
}
\arguments{
\item{x, y, ...}{Track constructed with \link{track-constructors} or composite track.}
}
\value{
Returns a "CompositeTrack" object.
}
\description{
Two or more arbitrary tracks can be used to create a composite track, by which
different features can be shown in the same track.
}
\examples{
gr <- GRanges("chr1", IRanges(c(11000, 20000, 60000), width = 2000))
gpos <- GRanges("chr1", IRanges(c(12000, 21000, 61000), width = 1), value = c(1, 2, 3))
btrack <- BlockTrack(gr, label = "Block Track", tooltip = as.data.frame(gr), color = "lightblue4")
ptrack <- PinTrack(gpos, label = "Pin Track", tooltip = as.data.frame(gpos), background = "beige")
ctrack <- merge(btrack, ptrack)
\dontrun{
TnTBoard(ctrack)
}
}
\seealso{
\url{http://tnt.marlin.pub/articles/examples/track-CompositeTrack.html}
}
|
2e86d71809765675ce12b2b0d9af422eb9e7045d
|
d62421e292d672e6cc430db51211eaa778e5bc07
|
/Forecasting assignment Rcode-2.R
|
00c582db15d5cfed96e2cc59d27856cb8fbfd32d
|
[] |
no_license
|
sainath7276331303/sainath
|
e25aa0d172067bb2aebaff84e7d5652168adac59
|
518ca085bd4f738901d511b1dc4644a583d83059
|
refs/heads/master
| 2021-03-10T20:06:12.529471
| 2020-08-30T08:40:16
| 2020-08-30T08:40:16
| 246,482,240
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,828
|
r
|
Forecasting assignment Rcode-2.R
|
library(readxl)
cocacola <- read_excel("F:/Excelr/Datasets/CocaCola_Sales_Rawdata.xlsx")
View(cocacola)
plot(cocacola$Sales,type = "o")
plot(log(cocacola$Sales),type= "o")
summary(cocacola)
# creating 12 dummy variables for months
x<-data.frame(outer(rep(month.abb,length = 42), month.abb,"==") + 0 )
View(x)
colnames(x) <- month.abb
View(x)
data<-cbind(cocacola,x)
head(data)
colnames(data[2])<- "sales"
colnames(data)
data["t"]<- c(1:42)
View(data)
head(data)
colnames(data)[2] <- "sales"
data["log_sales"] <- log(data["sales"])
data["t_square"]<-data["t"]*data["t"]
attach(data)
head(data)
#split data into train and test
train<-data[1:30,]
test<-data[31:42,]
#######1. Linear Model######
linear_model<-lm(sales~t,data=train)
summary(linear_model)
linear_pred<-data.frame(predict(linear_model,interval='predict',newdata =test))
View(linear_pred)
linear_pred
linear_model_rmse<-sqrt(mean((test$sales-linear_pred$fit)^2,na.rm = T))
linear_model_rmse
#######2. Exponential Model######
expo_model<-lm(log_sales~t,data = train)
expo_pred<-data.frame(predict(expo_model,interval = "predict",newdata = test))
summary(expo_model)
expo_model
rmse_expo<-sqrt(mean((test$sales-exp(expo_pred$fit))^2,na.rm = T))
rmse_expo
#######3. Quadratic Model######
quad_model<-lm(sales~t+t_square,data = train)
summary(quad_model)
quad_pred<-data.frame(predict(quad_model,interval = "predict",newdata = test))
quad_pred<-data.frame(predict(quad_model,interval = "predict",newdata = test))
quad_rmse<-sqrt(mean((test$sales-quad_pred$fit)^2,na.rm=T))
quad_rmse
##########4.Additive seasonality######################
add_seas <-lm(sales~Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data = train)
summary(add_seas)
add_seas_pred <- data.frame(predict(add_seas,interval = "predict",newdata = test))
add_seas_rmse <- sqrt(mean((test$sales-add_seas_pred$fit)^2,na.rm = T))
add_seas_rmse
######5.Additive seasonality with linear############
add_seast <- lm(sales~t+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data = train)
summary(add_seast)
add_seast_pred <- data.frame(predict(add_seast,interval = "predict",newdata = test))
add_seast_rmse <- sqrt(mean((test$sales-add_seast_pred$fit)^2,na.rm = T))
add_seast_rmse
##### 6. Additive seasonality with quadratic ###########
add_seasq <- lm(sales~t+t_square+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data = train)
summary(add_seasq)
add_seasq_pred <- data.frame(predict(add_seasq,interval = "predict",newdata = test))
add_seasq_rmse <- sqrt(mean((test$sales-add_seasq_pred$fit)^2,na.rm = T))
add_seasq_rmse
######7.multiplicative seasonality#########
mul_seas_model <- lm(log_sales~Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data = train)
summary(mul_seas_model)
mul_seas_pred <- data.frame(predict(mul_seas_model,interval = 'predict',newdata = test))
mul_seas_rmse <- sqrt(mean((test$sales-mul_seas_pred$fit)^2,na.rm = T))
mul_seas_rmse
#######8.Multiplicative seasonality with linear##########
mul_seast_model <- lm(log_sales~t+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data = train)
summary(mul_seast_model)
mul_seast_pred <- data.frame(predict(mul_seast_model,interval = 'predict',newdata = test))
mul_seast_rmse <- sqrt(mean((test$sales-mul_seast_pred$fit)^2,na.rm = T))
mul_seast_rmse
# showing all RMSE in table format
table_formate <- data.frame(c("linear_model_rmse","rmse_expo","quad_rmse","add_seas_rmse","add_seast_rmse","add_seasq_rmse","mul_seas_rmse","mul_seast_rmse"),c(linear_model_rmse,rmse_expo,quad_rmse,add_seas_rmse,add_seast_rmse,add_seasq_rmse,mul_seas_rmse,mul_seast_rmse))
colnames(table_formate) <- c("model","RMSE")
View(table_formate)
table_formate
# Final model
finalmodel <- lm(sales~t+t_square+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data = data)
finalmodel
summary(finalmodel)
# Auto.arima method
install.packages("tseries")
library(tseries)
cocacola_ts<-as.ts(cocacola$Sales)
cocacola_ts<-ts(cocacola_ts,start = c(1986,1),end = c(1996,12),frequency = 12)
class(cocacola_ts)
start(cocacola_ts)
end(cocacola_ts)
sum(is.na(cocacola_ts))
summary(cocacola_ts)
decompdata<-decompose(cocacola_ts,"multiplicative")
plot(decompdata)
cycle(cocacola_ts)
boxplot(cocacola_ts~cycle(cocacola_ts))
#Model Building
newmodel <- newmodel <- auto.arima(cocacola_ts,ic = "aic",trace = T)
newmodel
plot.ts(newmodel$residuals)
# Verifying p,d,q values using acf and pacf
acf(newmodel$residuals)
pacf(newmodel$residuals)
acf(diff(newmodel$residuals))
#Forecasting the model
install.packages("forecast")
library(forecast)
forecasting <- forecast(newmodel,level = c(95),h=10*12)
plot(forecasting)
Box.test(newmodel$residuals,lag = 5,type = "Ljung-Box")
Box.test(newmodel$residuals,lag = 10,type ="Ljung-Box" )
|
d3a3f91b80cb6dd8b05a6a2175a4f14a06007f1b
|
465543cd0db7a47e3a05df3b2062fb34a025d230
|
/climbr/tests/testthat/test-logbooks.R
|
9ca44ce23385bd491a5c849def2e18a1730fa2d3
|
[
"Apache-2.0"
] |
permissive
|
yustiks/climbing_ratings
|
22a4d0febb62de11b6a159f881ee075bf30e430b
|
5768c542cac6b3e1559c78bded751a8c3213c2fb
|
refs/heads/main
| 2023-06-13T08:30:42.122873
| 2021-07-03T02:42:55
| 2021-07-03T02:42:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,184
|
r
|
test-logbooks.R
|
# Unit tests for logbooks.R
# Copyright Contributors to the Climbing Ratings project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
context("Tests for logbooks.R")
describe(".GetPitchCount", {
it("with no pitch info", {
expect_identical(.GetPitchCount(""), NA_integer_)
expect_identical(.GetPitchCount("just a comment"), NA_integer_)
expect_identical(.GetPitchCount("multiline\ncomment"), NA_integer_)
expect_identical(.GetPitchCount("1 :"), NA_integer_)
})
it("with comments after pitch info", {
expect_identical(
.GetPitchCount("comment\n1: lead by me\ncomment"), NA_integer_
)
})
it("with pitch info", {
expect_identical(.GetPitchCount("1:"), 1L)
expect_identical(.GetPitchCount("1:[18]"), 1L)
expect_identical(.GetPitchCount("1:[ABC]"), 1L)
expect_identical(.GetPitchCount("1: lead by me"), 1L)
expect_identical(.GetPitchCount("1:[18] lead by me"), 1L)
expect_identical(.GetPitchCount("comment\n1:"), 1L)
})
it("with multiple pitches", {
expect_identical(.GetPitchCount("1:\n2:"), 2L)
expect_identical(.GetPitchCount("1: lead by me\n2: lead by me"), 2L)
expect_identical(.GetPitchCount("comment\n1:\n2:"), 2L)
})
it("with missing pitches", {
# The pitch numbers may be sparse, e.g. if users didn't log linked pitches.
expect_identical(.GetPitchCount("2:"), 1L)
expect_identical(.GetPitchCount("1:\n3:"), 3L)
})
})
describe(".ParseLogbook", {
it("extracts raw ascents", {
# This is a subset of the fields in the actual logbook exports.
df <- data.frame(
Ascent.ID = "4294967296",
Ascent.Type = "Onsight",
Route.ID = "8589934592",
Route.Grade = "18",
Comment = "",
Ascent.Date = "2019-07-21T00:00:00Z",
Log.Date = "2019-07-22T01:23:45Z",
stringsAsFactors = FALSE
)
raw <- data.frame(
ascentId = "4294967296",
route = "8589934592",
climber = "me",
tick = "onsight",
grade = 18L,
timestamp = 1563667200L,
style = 1L,
pitches = NA_integer_,
stringsAsFactors = FALSE
)
expect_equal(.ParseLogbook(df, "me"), raw)
})
it("drops bad grades", {
df <- data.frame(
Ascent.ID = "4294967296",
Ascent.Type = "Onsight",
Route.ID = "8589934592",
Route.Grade = "V8",
Comment = "",
Ascent.Date = "2019-07-21T00:00:00Z",
Log.Date = "2019-07-22T01:23:45Z",
stringsAsFactors = FALSE
)
raw <- data.frame(
ascentId = character(),
route = character(),
climber = character(),
tick = character(),
grade = integer(),
timestamp = integer(),
style = integer(),
pitches = integer(),
stringsAsFactors = FALSE
)
expect_equal(.ParseLogbook(df, "me"), raw)
})
it("with pitches", {
df <- data.frame(
Ascent.ID = c("4294967296", "4294967297"),
Ascent.Type = c("Redpoint", "Redpoint"),
Route.ID = c("8589934592", "8589934592"),
Route.Grade = c("18", "18"),
Comment = c("", "1: lead by me\n2: lead by you"),
Ascent.Date = c("2019-07-21T00:00:00Z", "2019-07-21T00:00:00Z"),
Log.Date = c("2020-01-01T01:23:45Z", "2020-01-01T01:23:45Z"),
stringsAsFactors = FALSE
)
raw <- data.frame(
ascentId = c("4294967296", "4294967297"),
route = c("8589934592", "8589934592"),
climber = c("me", "me"),
tick = c("redpoint", "redpoint"),
grade = c(18L, 18L),
timestamp = c(1563667200L, 1563667200L),
style = c(1L, 1L),
pitches = c(NA, 2L),
stringsAsFactors = FALSE
)
expect_equal(.ParseLogbook(df, "me"), raw)
})
it("orders by log date", {
df <- data.frame(
Ascent.ID = c("4294967296", "4294967297"),
Ascent.Type = c("Onsight", "Onsight"),
Route.ID = c("8589934592", "8589934593"),
Route.Grade = c("18", "19"),
Comment = c("", ""),
Ascent.Date = c("2019-07-21T00:00:00Z", "2019-07-21T00:00:00Z"),
# Row 1 was logged after row 2.
Log.Date = c("2020-01-01T01:23:45Z", "2019-07-22T01:23:45Z"),
stringsAsFactors = FALSE
)
raw <- data.frame(
ascentId = c("4294967297", "4294967296"),
route = c("8589934593", "8589934592"),
climber = c("me", "me"),
tick = c("onsight", "onsight"),
grade = c(19L, 18L),
timestamp = c(1563667200L, 1563667200L),
style = c(1L, 1L),
pitches = c(NA_integer_, NA_integer_),
stringsAsFactors = FALSE
)
expect_equal(.ParseLogbook(df, "me"), raw)
# Reverse the input row-order; output order should be the same.
df <- df[order(nrow(df):1), ]
expect_equal(.ParseLogbook(df, "me"), raw)
})
})
|
a2e9f939bbf60dbc9d931891e183cc8f85a82407
|
76f3689edf32fc688963c27477f91cd8641b312e
|
/man/getPars.Rd
|
f1295fe561e4c92d9a5fda9e1d277a74a46be81d
|
[] |
no_license
|
ppernot/msAnaLib
|
c062dcf8056bab529081d26af531ef0eb65f75dd
|
289a35c10e8570132c489a68e77b0a01c6002dcc
|
refs/heads/main
| 2023-04-17T03:48:06.755794
| 2023-02-16T14:30:46
| 2023-02-16T14:30:46
| 355,917,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 907
|
rd
|
getPars.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getPars.R
\name{getPars}
\alias{getPars}
\title{Get peak fit parameters}
\usage{
getPars(res, dimfit = c(0, 1, 2))
}
\arguments{
\item{res}{(nls-object) a nls fit result}
\item{dimfit}{(integer) dimension of the fit}
}
\value{
A list of best-fit parameters:
\describe{
\item{v}{vector of Gaussian peak best-fit parameters}
\item{u_v}{uncertainty on v elements}
\item{mzopt}{peak center along m/z}
\item{u_mz}{uncertainty on mzopt}
\item{cvopt}{peak center along CV}
\item{u_cv}{uncertainty on cvopt}
\item{fwhm_mz}{peak FWHM along m/z}
\item{u_fwhm_mz}{uncertainty on fwhm_mz}
\item{fwhm_cv}{peak FWHM along CV}
\item{u_fwhm_cv}{uncertainty on fwhm_cv}
\item{area}{peak area}
\item{u_area}{uncertainty on area}
}
Depending on 'dimfit', some values might be NAs.
}
\description{
Get peak fit parameters
}
|
74f1eeff375a6121ae62ccc86e0c1cc8773bbb89
|
7b0a3fd64e5e063cab834bc7f8edd91676955705
|
/cachematrix.R
|
e0d0750c934882422f3dc9fade5db23c3f17e1f0
|
[] |
no_license
|
starknguyen/ProgrammingAssignment2
|
717854deb5aa728bbcf885a68a3e387f40585da3
|
cb2fa1071fc3a5acb0be131974c10e3dbc5af525
|
refs/heads/master
| 2023-08-30T14:34:06.477220
| 2021-11-08T18:39:32
| 2021-11-08T18:59:05
| 425,949,325
| 0
| 0
| null | 2021-11-08T18:25:23
| 2021-11-08T18:25:21
| null |
UTF-8
|
R
| false
| false
| 1,036
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# Create a matrix that cached its inverse
makeCacheMatrix <- function(x = matrix()) {
inv_matrix <- NULL
set <- function(y) {
x <<- y
inv_matrix <<- NULL
}
get <- function() x
setInverseMatrix <- function(inverse_matrix) inv_matrix <<- inverse_matrix
getInverseMatrix <- function() inv_matrix
list(set = set, get = get,
setInverseMatrix = setInverseMatrix,
getInverseMatrix = getInverseMatrix)
}
## Write a short comment describing this function
# Computes the inverse of the matrix
# If the value was already calculated, it can be return from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_matrix <- x$getInverseMatrix()
if(!is.null(inv_matrix)) {
message("getting cached data")
return(inv_matrix)
}
data <- x$get()
inv_matrix <- solve(data,...)
x$setInverseMatrix(inv_matrix)
inv_matrix
}
|
4661f191444284e9a1428a4132102a3a5283c064
|
ba68b797f283557f3da60016c27382fb3c14b0b4
|
/R/SWORD.R
|
af3d4c02d214fb4461b0cd62a75cc58c0a395eeb
|
[] |
no_license
|
thehyve/dataverse-client-r
|
e6db5a6ee621ac7dd5a0e1dc52c2288f2e669ed3
|
b354b2457be8ea5174d5817b1661eab63e445f55
|
refs/heads/master
| 2021-01-16T22:45:58.038293
| 2016-02-18T14:27:54
| 2016-02-18T14:54:39
| 52,015,956
| 0
| 0
| null | 2016-02-18T15:11:38
| 2016-02-18T15:11:38
| null |
UTF-8
|
R
| false
| false
| 10,013
|
r
|
SWORD.R
|
# print.dataverse_dataset_atom <- function(x, ...) {}
parse_atom <- function(xml){
xmllist <- XML::xmlToList(xml)
links <- lapply(xmllist[names(xmllist) == "link"], function(x) as.vector(x[1]))
links <- setNames(links, sapply(xmllist[names(xmllist) == "link"], `[`, 2))
xmlout <- list(id = xmllist$id,
links = links,
bibliographicCitation = xmllist$bibliographicCitation,
generator = xmllist$generator,
treatment = xmllist$treatment[[1]])
xmlout$xml <- xml
structure(xmlout, class = "dataverse_dataset_atom")
}
# print.dataverse_sword_collection <- function(x, ...) {}
# print.dataverse_sword_service_document <- function(x, ...) {}
service_document <- function(key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/service-document")
r <- httr::GET(u, httr::authenticate(key, ""), ...)
httr::stop_for_status(r)
x <- xml2::as_list(xml2::read_xml(httr::content(r, "text")))
w <- x$workspace
out <- list()
if ("title" %in% names(w)) {
out$title <- w$title[[1]]
}
n <- which(names(w) == "collection")
for (i in n) {
s <- structure(list(name = w[[i]][[1]][[1]],
terms_of_use = w[[i]][[2]][[1]],
terms_apply = w[[i]][[3]][[1]],
package = w[[i]][[4]][[1]],
url = attributes(w[[i]])$href),
class = "dataverse_sword_collection")
out[[length(out) + 1]] <- s
}
out <- setNames(out, `[<-`(names(out), n, "sword_collection"))
structure(out, class = "dataverse_sword_service_document")
}
# @param
# @param body A list containing one or more metadata fields. Field names must be valid Dublin Core Terms labels (see details, below). The \samp{title} field is required.
#' Allowed fields are:
#' \dQuote{abstract}, \dQuote{accessRights}, \dQuote{accrualMethod},
#' \dQuote{accrualPeriodicity}, \dQuote{accrualPolicy}, \dQuote{alternative},
#' \dQuote{audience}, \dQuote{available}, \dQuote{bibliographicCitation},
#' \dQuote{conformsTo}, \dQuote{contributor}, \dQuote{coverage}, \dQuote{created},
#' \dQuote{creator}, \dQuote{date}, \dQuote{dateAccepted}, \dQuote{dateCopyrighted},
#' \dQuote{dateSubmitted}, \dQuote{description}, \dQuote{educationLevel}, \dQuote{extent},
#' \dQuote{format}, \dQuote{hasFormat}, \dQuote{hasPart}, \dQuote{hasVersion},
#' \dQuote{identifier}, \dQuote{instructionalMethod}, \dQuote{isFormatOf},
#' \dQuote{isPartOf}, \dQuote{isReferencedBy}, \dQuote{isReplacedBy}, \dQuote{isRequiredBy},
#' \dQuote{issued}, \dQuote{isVersionOf}, \dQuote{language}, \dQuote{license},
#' \dQuote{mediator}, \dQuote{medium}, \dQuote{modified}, \dQuote{provenance},
#' \dQuote{publisher}, \dQuote{references}, \dQuote{relation}, \dQuote{replaces},
#' \dQuote{requires}, \dQuote{rights}, \dQuote{rightsHolder}, \dQuote{source},
#' \dQuote{spatial}, \dQuote{subject}, \dQuote{tableOfContents}, \dQuote{temporal},
#' \dQuote{title}, \dQuote{type}, and \dQuote{valid}.
# @references \href{http://dublincore.org/documents/dcmi-terms/}{Dublin Core Metadata Terms}
# @link \href{http://swordapp.github.io/SWORDv2-Profile/SWORDProfile.html\#protocoloperations_creatingresource_entry}{Atom entry specification}
# @examples
# \dontrun{
#
# metadat <- list(title = "My Study",
# creator = "Doe, John",
# creator = "Doe, Jane",
# publisher = "My University",
# date = "2013-09-22",
# description = "An example study",
# subject = "Study",
# subject = "Dataverse",
# subject = "Other",
# coverage = "United States")
# create_dataset("mydataverse", body = metadat)
# }
# note that there are two ways to create dataset: native API (`create_dataset`) and SWORD API (`initiate_dataset`)
initiate_dataset <- function(dataverse, body, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
if (inherits(dataverse, "sword_collection")) {
u <- dataverse$url
} else {
if (inherits(dataverse, "dataverse")) {
dataverse <- x$alias
}
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/collection/dataverse/", dataverse)
}
if (is.character(body) && file.exists(body)) {
b <- httr::upload_file(body)
} else {
b <- do.call("build_metadata", c(body, metadata_format = "dcterms", validate = FALSE))
}
r <- httr::POST(u, httr::authenticate(key, ""), httr::add_headers("Content-Type" = "application/atom+xml"), body = b, ...)
httr::stop_for_status(r)
out <- xml2::as_list(xml2::read_xml(httr::content(r, "text")))
# clean up response structure
out
}
list_datasets <- function(dataverse, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
if (inherits(dataverse, "sword_collection")) {
u <- dataverse$url
} else {
if (inherits(dataverse, "dataverse")) {
dataverse <- x$alias
}
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/collection/dataverse/", dataverse)
}
r <- httr::GET(u, httr::authenticate(key, ""), ...)
httr::stop_for_status(r)
out <- xml2::as_list(xml2::read_xml(httr::content(r, "text")))
# clean up response structure
out
}
publish_dataverse <- function(dataverse, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
if (inherits(dataverse, "sword_collection")) {
u <- sub("/collection/", "/edit/", dataverse$url, fixed = TRUE)
} else {
if (inherits(dataverse, "dataverse")) {
dataverse <- x$alias
}
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/edit/dataverse/", dataverse)
}
r <- httr::POST(u, httr::authenticate(key, ""), httr::add_headers("In-Progress" = "false"), ...)
httr::stop_for_status(r)
out <- xml2::as_list(xml2::read_xml(httr::content(r, "text")))
# clean up response structure
out
}
create_zip <- function(x, ...) {
UseMethod("create_zip", x)
}
create_zip.character <- function(x, ...) {
f <- file.exists(x)
if (any(!f)) {
stop(paste0(ngettext(f, "One file does not", paste0(sum(f), " files do not"))), "exist: ", paste0(x[which(f)], collapse = ", "))
} else {
tmp <- tempfile(fileext = ".zip")
stopifnot(!utils::zip(tmp, x))
return(tmp)
}
}
create_zip.data.frame <- function(x, ...) {
tmpdf <- tempfile(fileext = ".zip")
on.exit(file.remove(tmpdf))
tmp <- tempfile(fileext = ".zip")
save(x, file = tmpdf)
stopifnot(!utils::zip(tmp, tmpdf))
return(tmp)
}
create_zip.list <- function(x, ...) {
tmpdf <- sapply(seq_along(x), tempfile(fileext = ".zip"))
on.exit(file.remove(tmpdf))
mapply(x, tmpdf, function(x, f) save(x, file = f), SIMPLIFY = TRUE)
tmp <- tempfile(fileext = ".zip")
stopifnot(!utils::zip(tmp, tmpdf))
return(tmp)
}
add_file <- function(dataset, file, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
dataset <- prepend_doi(dataset)
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/edit-media/study/", dataset)
# file can be: a character vector of file names, a data.frame, or a list of R objects
file <- create_zip(file)
h <- httr::add_headers("Content-Disposition" = paste0("filename=", file),
"Content-Type" = "application/zip",
"Packaging" = "http://purl.org/net/sword/package/SimpleZip")
r <- httr::POST(u, httr::authenticate(key, ""), h, ...)
httr::stop_for_status(r)
httr::content(r, "text")
}
delete_file <- function(dataset, id, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
dataset <- prepend_doi(dataset)
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/edit-media/file/", id)
r <- httr::DELETE(u, httr::authenticate(key, ""), h, ...)
httr::stop_for_status(r)
httr::content(r, "text")
}
delete_dataset <- function(dataset, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
dataset <- prepend_doi(dataset)
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/edit/study/", dataset)
r <- httr::DELETE(u, httr::authenticate(key, ""), ...)
httr::stop_for_status(r)
httr::content(r, "text")
}
publish_dataset <- function(dataset, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
dataset <- prepend_doi(dataset)
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/edit/study/", dataset)
r <- httr::POST(u, httr::authenticate(key, ""), httr::add_headers("In-Progress" = "false"), ...)
httr::stop_for_status(r)
out <- xml2::as_list(xml2::read_xml(httr::content(r, "text")))
out
}
dataset_atom <- function(dataset, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
dataset <- prepend_doi(dataset)
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/edit/study/", dataset)
r <- httr::GET(u, httr::authenticate(key, ""), ...)
httr::stop_for_status(r)
out <- parse_atom(httr::content(r, "text"))
out
}
dataset_statement <- function(dataset, key = Sys.getenv("DATAVERSE_KEY"), server = Sys.getenv("DATAVERSE_SERVER"), ...) {
dataset <- prepend_doi(dataset)
u <- paste0(api_url(server, prefix="dvn/api/"), "data-deposit/v1.1/swordv2/statement/study/", dataset)
r <- httr::GET(u, httr::authenticate(key, ""), ...)
httr::stop_for_status(r)
out <- xml2::as_list(xml2::read_xml(httr::content(r, "text")))
out
}
|
18abb253677aa2a5dbc8bfa62bc939914a1bef75
|
7520b70d92bf85143fa8b50cbb6bab39a100ada4
|
/util/scripts/Utils.R
|
6e65577b755bb331ca2cb16a491419cadfe1889d
|
[] |
no_license
|
mscrawford/IBC-grass
|
a57138df15a53438de57b4f0a4cd9f72788684b9
|
f57e88b23c551f4c6cf68285bb0662f45edbcc5f
|
refs/heads/master
| 2021-01-17T02:57:20.688048
| 2019-06-18T18:06:42
| 2019-06-18T18:06:42
| 27,534,454
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,317
|
r
|
Utils.R
|
library(data.table)
read_data <- function(data_dir, file_type, verbose = FALSE) {
main_dir = getwd()
setwd(data_dir)
files <- list.files(full.names = T)
files <- files[which(grepl(file_type, files))]
d <- rbindlist(lapply(files,
FUN = function(files) {
fread(files,
header = TRUE,
stringsAsFactors = TRUE,
na.strings = "NA",
strip.white = TRUE,
data.table = FALSE)
}))
setwd(main_dir)
return(tbl_df(d))
}
combine_data <- function(data_frames, key) {
d <- Reduce(function(...) left_join(..., by = key), data_frames)
return(d)
}
uv <- function(data) {
r <- apply(data,
2,
function(x) (unique(x)))
return(r)
}
gm_mean <- function(x, na.rm = TRUE, zero.propagate = FALSE) {
if (any(x < 0, na.rm = TRUE)) {
return(NaN)
}
if (zero.propagate) {
if (any(x == 0, na.rm = TRUE)) {
return(0)
}
exp(mean(log(x), na.rm = na.rm))
} else {
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
}
|
7e5caeb90e3369a2038bcd43bea958e763af782c
|
fad36ba43fc642cc4f05a37ff41a2bfdeb13ff6b
|
/R/in_silico_pcr.R
|
1738390b3a86c79c2f0ec4e096e4e9b370164ca1
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
ctb/metacoder
|
fd7f4830189515bacd5101a35621e4087018f576
|
f1203b17a0f90cb928cf176d040344fdfbbd0971
|
refs/heads/master
| 2022-06-04T09:11:45.810079
| 2017-08-22T20:14:49
| 2017-08-22T20:14:49
| 103,031,498
| 1
| 0
| null | 2017-09-10T13:29:41
| 2017-09-10T13:29:41
| null |
UTF-8
|
R
| false
| false
| 12,124
|
r
|
in_silico_pcr.R
|
#===================================================================================================
#' Execute EMBOSS Primerseach
#'
#' @param seq_path A character vector of length 1. The path to the fasta file containing reference
#' sequences to search for primer matches in.
#' @param primer_path A character vector of length 1. The path to the file containing primer pairs
#' to match. The file should be whitespace-delimited with 3 columns: primer name, first primer
#' sequence, and second primer sequence.
#' @param mismatch An integer vector of length 1. The percentage of mismatches allowed.
#' @param output_path A character vector of length 1. Where the output of primersearch is saved.
#' @param program_path A character vector of length 1. The location of the primersearch binary.
#' Ideally, it should be in your system's search path.
#' @param dont_run If TRUE, the command is generated, but not executed. This could be useful if you
#' want to execute the command yourself.
#' @param ... Additional arguments are passed to \code{primersearch}.
#'
#'
#' @return The command generated as a character vector of length 1.
#'
#' @seealso \code{\link{parse_primersearch}}
#'
#' @keywords internal
run_primersearch <- function(seq_path, primer_path, mismatch = 5, output_path = tempfile(),
program_path = 'primersearch', dont_run = FALSE, ...) {
# Check if primersearch is installed...
primersearch_is_installed()
extra_args <- as.list(match.call(expand.dots=F))$...
if (Sys.info()['sysname'] == "Windows") {
arguments <- c("-seqall", seq_path,
"-infile", primer_path,
"-mismatchpercent", mismatch,
"-outfile", output_path,
as.character(extra_args))
system2(program_path, arguments, stdout = TRUE, stderr = TRUE)
} else {
extra_args_string <- paste(names(extra_args), extra_args, collapse = " ", sep = " ")
command <- gettextf('%s -seqall %s -infile %s -mismatchpercent %s -outfile %s',
program_path, seq_path, primer_path, mismatch, output_path)
if (nchar(extra_args_string) > 0) command <- paste(command, extra_args_string)
system(command)
}
return(output_path)
}
#===================================================================================================
#' Parse EMBOSS primersearch output
#'
#' Parses the output file from EMBOSS primersearch into a data.frame with rows corresponding to
#' predicted amplicons and their associated information.
#' @param file_path The path to a primersearch output file.
#' @return A data frame with each row corresponding to amplicon data
#' @seealso \code{\link{run_primersearch}}
#'
#' @keywords internal
parse_primersearch <- function(file_path) {
# Split output into chunks for each primer--------------------------------------------------------
raw_output <- readLines(file_path)
primer_indexes <- grep("Primer name ", raw_output, fixed=TRUE, value=FALSE)
primer_chunk_id <- findInterval(seq_along(raw_output), primer_indexes)
primer_chunks <- vapply(split(raw_output, primer_chunk_id)[-1],
paste, character(1), collapse = "\n")
names(primer_chunks) <- stringr::str_match(primer_chunks, "Primer name ([^\n]*)")[,2]
# Extract amplicon data from each chunk and combine ----------------------------------------------
pattern <- paste("Amplimer ([0-9]+)",
"\tSequence: ([^\n]*)",
"\t([^\n]*)",
"\t([^\n]+) hits forward strand at ([0-9]+) with ([0-9]+) mismatches",
"\t([^\n]+) hits reverse strand at \\[([0-9]+)\\] with ([0-9]+) mismatches",
"\tAmplimer length: ([0-9]+) bp", sep = '\n')
primer_data <- stringr::str_match_all(primer_chunks, pattern)
primer_data <- as.data.frame(cbind(rep(names(primer_chunks), vapply(primer_data, nrow, numeric(1))),
do.call(rbind, primer_data)[, -1]), stringsAsFactors = FALSE)
# Reformat amplicon data -------------------------------------------------------------------------
colnames(primer_data) <- c("pair_name", "amplimer", "seq_id", "name", "f_primer", "f_index",
"f_mismatch", "r_primer", "r_index", "r_mismatch", "length")
primer_data <- primer_data[, c("seq_id", "pair_name", "amplimer", "length",
"f_primer", "f_index", "f_mismatch",
"r_primer", "r_index", "r_mismatch")]
numeric_cols <- c("amplimer", "length","f_index", "f_mismatch",
"r_index", "r_mismatch", "seq_id")
for (col in numeric_cols) primer_data[, col] <- as.numeric(primer_data[, col])
return(primer_data)
}
#' @rdname primersearch
#' @export
primersearch <- function(input, forward, reverse, mismatch = 5, ...) {
UseMethod("primersearch")
}
#===================================================================================================
#' Use EMBOSS primersearch for in silico PCR
#'
#' A pair of primers are aligned against a set of sequences.
#' The location of the best hits, quality of match, and predicted amplicons are returned.
#' Requires the EMBOSS tool kit (\url{http://emboss.sourceforge.net/}) to be installed.
#'
#' @param input (\code{character})
#' @param forward (\code{character} of length 1) The forward primer sequence
#' @param reverse (\code{character} of length 1) The reverse primer sequence
#' @param mismatch An integer vector of length 1. The percentage of mismatches allowed.
#' @param ... Unused.
#'
#' @return An object of type \code{\link{taxmap}}
#'
#' @section Installing EMBOSS:
#'
#' The command-line tool "primersearch" from the EMBOSS tool kit is needed to use this function.
#' How you install EMBOSS will depend on your operating system:
#'
#' \strong{Linux:}
#'
#' Open up a terminal and type:
#'
#' \code{sudo apt-get install emboss}
#'
#' \strong{Mac OSX:}
#'
#' The easiest way to install EMBOSS on OSX is to use \href{http://brew.sh/}{homebrew}.
#' After installing homebrew, open up a terminal and type:
#'
#' \code{brew install homebrew/science/emboss}
#'
#' \strong{Windows:}
#'
#' There is an installer for Windows here:
#'
#' ftp://emboss.open-bio.org/pub/EMBOSS/windows/mEMBOSS-6.5.0.0-setup.exe
#'
#' NOTE: This has not been tested by us yet.
#'
#' @examples
#' \dontrun{
#' result <- primersearch(rdp_ex_data,
#' forward = c("U519F" = "CAGYMGCCRCGGKAAHACC"),
#' reverse = c("Arch806R" = "GGACTACNSGGGTMTCTAAT"),
#' mismatch = 10)
#'
#' heat_tree(result,
#' node_size = n_obs,
#' node_label = name,
#' node_color = prop_amplified,
#' node_color_range = c("red", "yellow", "green"),
#' node_color_trans = "linear",
#' node_color_interval = c(0, 1),
#' layout = "fruchterman-reingold")
#' }
#'
#' @method primersearch character
#' @rdname primersearch
#' @export
primersearch.character <- function(input, forward, reverse, mismatch = 5, ...) {
# Write temporary fasta file for primersearch input ----------------------------------------------
sequence_path <- tempfile("primersearch_sequence_input_", fileext = ".fasta")
on.exit(file.remove(sequence_path))
writeLines(text = paste0(">", seq_along(input), "\n", input),
con = sequence_path)
# Write primer file for primersearch input -------------------------------------------------------
name_primer <- function(primer) {
if (is.null(names(primer))) {
to_be_named <- seq_along(primer)
} else {
to_be_named <- which(is.na(names(primer)) | names(primer) == "")
}
names(primer)[to_be_named] <- seq_along(primer)[to_be_named]
return(primer)
}
forward <- name_primer(forward)
reverse <- name_primer(reverse)
pair_name <- paste(names(forward), names(reverse), sep = "_")
primer_path <- tempfile("primersearch_primer_input_", fileext = ".txt")
on.exit(file.remove(primer_path))
primer_table <- as.data.frame(stringsAsFactors = FALSE,
cbind(pair_name, forward, reverse))
utils::write.table(primer_table, primer_path,
quote = FALSE, sep = '\t', row.names = FALSE, col.names = FALSE)
# Run and parse primersearch ---------------------------------------------------------------------
output_path <- run_primersearch(sequence_path, primer_path, mismatch = mismatch)
on.exit(file.remove(output_path))
output <- parse_primersearch(output_path)
# Extract amplicon input ---------------------------------------------------------------------
output$f_primer <- ifelse(vapply(output$f_primer, grepl, x = forward, FUN.VALUE = logical(1)), forward, reverse)
output$r_primer <- ifelse(vapply(output$r_primer, grepl, x = reverse, FUN.VALUE = logical(1)), reverse, forward)
output$r_index <- vapply(input[output$seq_id], nchar, numeric(1)) - output$r_index + 1
output$amplicon <- unlist(Map(function(seq, start, end) substr(seq, start, end),
input[output$seq_id], output$f_index, output$r_index))
return(output)
}
#' @method primersearch taxmap
#'
#' @param sequence_col (\code{character} of length 1) The name of the column in \code{obs_data} that has the input sequences.
#' @param result_cols (\code{character}) The names of columns to include in the output.
#' By default, all output columns are included.
#'
#' @rdname primersearch
#' @export
primersearch.taxmap <- function(input, forward, reverse, mismatch = 5,
sequence_col = "sequence", result_cols = NULL, ...) {
if (is.null(input$obs_data[[sequence_col]])) {
stop(paste0('`sequence_col` "', sequence_col, '" does not exist. Check the input or change the value of the `sequence_col` option.'))
}
result <- primersearch(input = input$obs_data[[sequence_col]],
forward = forward, reverse = reverse, mismatch = mismatch)
seq_id <- result$seq_id
result <- result[, colnames(result) != "seq_id", drop = FALSE]
pair_name <- result$pair_name
if (!is.null(result_cols)) {
result <- result[, result_cols, drop = FALSE]
}
overwritten_cols <- colnames(input$obs_data)[colnames(input$obs_data) %in% colnames(result)]
if (length(overwritten_cols) > 0) {
warning(paste0('The following obs_data columns will be overwritten by primersearch:\n',
paste0(collapse = "\n", " ", overwritten_cols)))
}
input$obs_data[ , colnames(result)] <- NA
input$obs_data[seq_id, colnames(result)] <- result
input$obs_data$amplified <- ! is.na(input$obs_data$length)
input$taxon_funcs <- c(input$taxon_funcs,
count_amplified = function(obj, subset = obj$taxon_data$taxon_ids) {
vapply(obs(obj, subset), function(x) sum(obj$obs_data$amplified[x]), numeric(1))
},
prop_amplified = function(obj, subset = obj$taxon_data$taxon_ids) {
vapply(obs(obj, subset), function(x) sum(obj$obs_data$amplified[x]) / length(x), numeric(1))
})
output <- input
return(output)
}
#' Test if primersearch is installed
#'
#' Test if primersearch is installed
#'
#' @param must_be_installed (\code{logical} of length 1)
#' If \code{TRUE}, throw an error if primersearch is not installed.
#'
#' @return \code{logical} of length 1
#'
#' @keywords internal
primersearch_is_installed <- function(must_be_installed = TRUE) {
test_result <- tryCatch(system2("primersearch", "--version", stdout = TRUE, stderr = TRUE),
error = function(e) e)
is_installed <- grepl(pattern = "^EMBOSS", test_result)
if (must_be_installed && ! is_installed) {
stop("'primersearch' could not be found and is required for this function. Check that the EMBOSS tool kit is installed and is in the program search path. Type '?primersearch' for information on installing EMBOSS.")
}
return(invisible(is_installed))
}
|
65b5b3b48f0389bcc674ba9f012910baf48a3885
|
237bcbdc6b09c57b251191471359eeefb8014410
|
/image_analysis/MASTER R CODES/old versions/AUTO analysis Gquad HIST RUN new S6.r
|
a68073facf0ea4257420d47127a6cddebf4bd9ca
|
[] |
no_license
|
achalneupane/rcodes
|
d2055b03ca70fcd687440e6262037507407ec7a5
|
98cbc1b65d85bbb6913eeffad62ad15ab9d2451a
|
refs/heads/master
| 2022-10-02T20:35:18.444003
| 2022-09-09T20:53:03
| 2022-09-09T20:53:03
| 106,714,514
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64,805
|
r
|
AUTO analysis Gquad HIST RUN new S6.r
|
## siCa2 parameters
## do.trace<-FALSE # tarce for non-linear fit
## robust.shaver<-2 # 2 / 1 is used to shave very close/close Ca screens when standard method fails, 0 to use standard method
## max.cells.per.field<-9000 # ususally 5000
## min.green.cells<-50
## min.red.cells<-10
## min.green.threshold<-50
## max.g1.posn<-27000 #6000 10000-1.1 7000-3.1 for ca3 : 20000 before
## min.g1.posn<-9000 #2750 5900-1.1 for Ca3
## expected.g1.posn<-21000 # 3000 8200-1.1 for Ca3
## max.ObjectTotalIntenCh1<-80000 # used 30000 in past bust is too low sometimes BEST DECIDED after first fit
## double.exposure<-FALSE
## use.high<-TRUE # TRUE for Ca3 FALSE for CA2highest exposure
## two.color<- FALSE # if true red and green channels SIRNA has only one - false
## red.mean.thresh<-100 # cut of for average red signal
## red.yrange.on.plot<-95000
## use.Edu.as.Sphase<-TRUE
## min.red.cells.for.Sphase<-50 ## lower than this and it will model the S-phase
## g2.over.g1.min<-1.99 #1.9 before
## g2.over.g1.max<-2.06 #2.35 before
## g2.over.g1.refit<-0.001 #0.05 before## setwd("/media/Bioinform-D/Research/Cellomics/Hugo screen/")
## ## setwd("/media/Bioinform-D/Data/Cellomics/Leo-screen")
## setwd("/media/scratch/Data/Cellomics/Ca-screen-latest")
## ## wetwd( "/media/Bioinform-D/Research/Cellomics/Ca screen/Latest")
update.packages(lib="/home/pleo/R_latest/library")
############################################## START REQUIRED FUNCTIONs ###################
############################################## START REQUIRED FUNCTIONs ###################
############################################## START REQUIRED FUNCTIONs ###################
a.model<-function(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR){
ak.curve<-aspline(static$x, static$y,x,method="improved",degree=3)
ak.curve$y[!is.finite(ak.curve$y)]<-0.0
if(use.den){
S.curve<-aspline(adenR$x, adenR$y,x,method="improved",degree=3)
S.curve$y<-abs(scaleR*S.curve$y)
S.curve$y[!is.finite(S.curve$y)]<-0.0
A*dnorm(x,mean=g1.peak.posn,sd=g1.sd, log=FALSE) + B*dnorm(x,mean=g2.peak.posn,sd=g2.sd, log=FALSE) + S.curve$y + ak.curve$y*(pnorm( sqrt(2)*( ((x-g2.peak.posn)/g2.sd)-k3), lower.tail=TRUE, log.p=FALSE)) + -ak.curve$y*(pnorm( sqrt(2)*( ((x-g1.peak.posn)/g1.sd)+k0), lower.tail=TRUE, log.p=FALSE)-1)
}else{
A*dnorm(x,mean=g1.peak.posn,sd=g1.sd, log=FALSE) + B*dnorm(x,mean=g2.peak.posn,sd=g2.sd, log=FALSE) + abs(ak.curve$y*(( 1*pnorm( sqrt(2)*( ((x-g1.peak.posn)/g1.sd.inter)-k1), lower.tail=TRUE, log.p=FALSE)-1) -(1*(pnorm( sqrt(2)*( ((x-g2.peak.posn)/g2.sd.inter)+k2), lower.tail=TRUE, log.p=FALSE))-1 ))) + ak.curve$y*(pnorm( sqrt(2)*( ((x-g2.peak.posn)/g2.sd)-k3), lower.tail=TRUE, log.p=FALSE)) + -ak.curve$y*(pnorm( sqrt(2)*( ((x-g1.peak.posn)/g1.sd)+k0), lower.tail=TRUE, log.p=FALSE)-1)
}
}
######## G2 + S
a.model.SandG2<-function(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR){
ak.curve<-aspline(static$x, static$y,x,method="improved",degree=3)
ak.curve$y[!is.finite(ak.curve$y)]<-0.0
if(use.den){
S.curve<-aspline(adenR$x, adenR$y,x,method="improved",degree=3)
S.curve$y<-abs(scaleR*S.curve$y)
S.curve$y[!is.finite(S.curve$y)]<-0.0
B*dnorm(x,mean=g2.peak.posn,sd=g2.sd, log=FALSE) + S.curve$y
}else{
B*dnorm(x,mean=g2.peak.posn,sd=g2.sd, log=FALSE) + abs(ak.curve$y*(( 1*pnorm( sqrt(2)*( ((x-g1.peak.posn)/g1.sd.inter)-k1), lower.tail=TRUE, log.p=FALSE)-1) -(1*(pnorm( sqrt(2)*( ((x-g2.peak.posn)/g2.sd.inter)+k2), lower.tail=TRUE, log.p=FALSE))-1 )))
}
}
######## G2+S+ gt4n
a.model.aboveG1<-function(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR){
ak.curve<-aspline(static$x, static$y,x,method="improved",degree=3)
ak.curve$y[!is.finite(ak.curve$y)]<-0.0
if(use.den){
S.curve<-aspline(adenR$x, adenR$y,x,method="improved",degree=3)
S.curve$y<-abs(scaleR*S.curve$y)
S.curve$y[!is.finite(S.curve$y)]<-0.0
B*dnorm(x,mean=g2.peak.posn,sd=g2.sd, log=FALSE) + S.curve$y + ak.curve$y*(pnorm( sqrt(2)*( ((x-g2.peak.posn)/g2.sd)-k3), lower.tail=TRUE, log.p=FALSE))
}else{
B*dnorm(x,mean=g2.peak.posn,sd=g2.sd, log=FALSE) + abs(ak.curve$y*(( 1*pnorm( sqrt(2)*( ((x-g1.peak.posn)/g1.sd.inter)-k1), lower.tail=TRUE, log.p=FALSE)-1) -(1*(pnorm( sqrt(2)*( ((x-g2.peak.posn)/g2.sd.inter)+k2), lower.tail=TRUE, log.p=FALSE))-1 ))) + ak.curve$y*(pnorm( sqrt(2)*( ((x-g2.peak.posn)/g2.sd)-k3), lower.tail=TRUE, log.p=FALSE))
}
}
############# change regression method based on number of cells
############ could mdify this function to include the actual S-phase
############ use the adenNR to get estimates of G1 and G2 (as some G2 aways there on staining)
a.modelN<-function(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR){
ak.curve<-aspline(static$x, static$y,x,method="improved",degree=3)
ak.curve$y[!is.finite(ak.curve$y)]<-0.0
S.curve<-aspline(adenR$x, adenR$y,x,method="improved",degree=3)
S.curve$y<-abs(scaleR*S.curve$y)
S.curve$y[!is.finite(S.curve$y)]<-0.0
A*dnorm(x,mean=g1.peak.posn,sd=g1.sd, log=FALSE) + B*dnorm(x,mean=g2.peak.posn,sd=g2.sd, log=FALSE) + S.curve$y + ak.curve$y*(pnorm( sqrt(2)*( ((x-g2.peak.posn)/g2.sd)-k3), lower.tail=TRUE, log.p=FALSE)) + -ak.curve$y*(pnorm( sqrt(2)*( ((x-g1.peak.posn)/g1.sd)+k0), lower.tail=TRUE, log.p=FALSE)-1)
}
a.model.S<-function(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR){
if(use.den){
ak.curve<-aspline(adenR$x, adenR$y,x,method="improved",degree=3)
ak.curve$y[!is.finite(ak.curve$y)]<-0.0
abs(scaleR*ak.curve$y)
}else{
ak.curve<-aspline(static$x, static$y,x,method="improved",degree=3)
ak.curve$y[!is.finite(ak.curve$y)]<-0.0
abs(ak.curve$y*(( 1*pnorm( sqrt(2)*( ((x-g1.peak.posn)/g1.sd.inter)-k1), lower.tail=TRUE, log.p=FALSE)-1) -(1*(pnorm( sqrt(2)*( ((x-g2.peak.posn)/g2.sd.inter)+k2), lower.tail=TRUE, log.p=FALSE))-1 )))
}}
a.model.gt4<-function(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static){
ak.curve<-aspline(static$x, static$y,x,method="improved",degree=3)
ak.curve$y[!is.finite(ak.curve$y)]<-0.0
ak.curve$y*(pnorm( sqrt(2)*( ((x-g2.peak.posn)/g2.sd)-k3), lower.tail=TRUE, log.p=FALSE))
}
a.model.lt2<-function(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static){
ak.curve<-aspline(static$x, static$y,x,method="improved",degree=3)
ak.curve$y[!is.finite(ak.curve$y)]<-0.0
-ak.curve$y*(pnorm( sqrt(2)*( ((x-g1.peak.posn)/g1.sd)+k0), lower.tail=TRUE, log.p=FALSE)-1)
}
a.model.G1andG2<-function(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static){
A*dnorm(x,mean=g1.peak.posn,sd=g1.sd, log=FALSE)+B*dnorm(x,mean=g2.peak.posn,sd=g2.sd, log=FALSE) }
a.model.true<-function(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static){
ak.curve<-aspline(static$x, static$y,x,method="improved",degree=3)
ak.curve$y[!is.finite(ak.curve$y)]<-0.0
ak.curve$y }
a.model.G1<-function(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static){
A*dnorm(x,mean=g1.peak.posn,sd=g1.sd, log=FALSE) }
a.model.G2<-function(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static){
B*dnorm(x,mean=g2.peak.posn,sd=g2.sd, log=FALSE) }
###############################
peaks <- function(series, span = 3, do.pad = TRUE) {
if((span <- as.integer(span)) %% 2 != 1) stop("'span' must be odd")
s1 <- 1:1 + (s <- span %/% 2)
if(span == 1) return(rep.int(TRUE, length(series)))
z <- embed(series, span)
v <- apply(z[,s1] > z[, -s1, drop=FALSE], 1, all)
if(do.pad) {
pad <- rep.int(FALSE, s)
c(pad, v, pad)
} else v
}
peaksign <- function(series, span = 3, do.pad = TRUE)
{
## Purpose: return (-1 / 0 / 1) if series[i] is ( trough / "normal" / peak )
## ----------------------------------------------------------------------
## Author: Martin Maechler, Date: 25 Nov 2005
if((span <- as.integer(span)) %% 2 != 1 || span == 1)
stop("'span' must be odd and >= 3")
s1 <- 1:1 + (s <- span %/% 2)
z <- embed(series, span)
d <- z[,s1] - z[, -s1, drop=FALSE]
ans <- rep.int(0:0, nrow(d))
ans[apply(d > 0, 1, all)] <- as.integer(1)
ans[apply(d < 0, 1, all)] <- as.integer(-1)
if(do.pad) {
pad <- rep.int(0:0, s)
c(pad, ans, pad)
} else ans
}
check.pks <- function(y, span = 3)
stopifnot(identical(peaks( y, span), peaksign(y, span) == 1),
identical(peaks(-y, span), peaksign(y, span) == -1))
for(y in list(1:10, rep(1,10), c(11,2,2,3,4,4,6,6,6))) {
for(sp in c(3,5,7))
check.pks(y, span = sp)
stopifnot(peaksign(y) == 0)
}
############################################## END REQUIRED FUNCTIONs ###################
############################################## END REQUIRED FUNCTIONs ###################
############################################## END REQUIRED FUNCTIONs ###################
#setwd("/media/Bioinform-D/Data/Cellomics/Inhibitor_data")
library(robust)
library(robustbase)
library(nls2)
library(akima)
## plate.numbers<-c(1:15,17:20,25:34) #20
## file.list=paste("Kiril_Plate",plate.numbers,sep="")
## file.name.list<-file.list
## file.list<-c("plate_1","plate_2","plate_3","plate_4","plate_5","plate_6","plate_7","plate_8","plate_9","plate_10","plate_11","plate_12","plate_13","plate_14","plate_15","plate_16","plate_17","plate_18","plate_21")
## file.name.list<-file.list
## file.list<-c("Ca_1","Ca_2","Ca_3")
## file.name.list<-file.list
## missing first of plate2
## siCa3
file.list<-c("plates_1","plates_2","plates_3","plates_3.1","plates_3.2")
setwd("/media/scratch/Data/Cellomics/Ca-screen-3")
file.list<-c("plates_3","plates_3.1","plates_3.2")
## file.list<-c("plates_2","plates_3","plates_3.1","plates_3.2")
## file.list<-c("plates_3","plates_3.1","plates_3.2")
## file.list<-c("plates_3.1","plates_3.2")
## file.list<-c("plates_3.2")
## file.list<-c("plates_3.1")
## file.list<-c("plates_1")
## file.list<-c("plates_2","plates_3.1")
## file.list<-c("plate_5")
## siCa3=2
### jane2
file.list<-c("plate_jane.1")
####
## file.list<-c("plate_4to6","plate_1to3","plate_1siCA2")
[1] "HCSExplorerExport.zip" "junk"
[3] "plate_siFB1.1.TXT" "plate_siFB1.1.well.TXT"
[5] "plate_siFB1.1.zip" "plate_siFB1.2.TXT"
[7] "plate_siFB1.2.well.TXT" "plate_siFB1.2.zip"
[9] "plate_siFB1.3.TXT" "plate_siFB1.3.well.TXT"
[11] "plate_siFB1.3.zip" "plate_siFB2_1.12.TXT"
[13] "plate_siFB2_1.12.well.TXT" "plate_siFB2_1.12.zip"
[15] "plate_siFB2_1.3.TXT" "plate_siFB2_1.3.well.TXT"
[17] "plate_siFB2_1.3.zip" "plate_siFB2.2.1.TXT"
[19] "plate_siFB2.2.1.well.TXT" "plate_siFB2.2.1.zip"
[21] "plate_siFB2_2.23.TXT" "plate_siFB2_2.23.well.TXT"
[23] "plate_siFB2_2.23.zip" "plate_siFB2_3.1.TXT"
[25] "plate_siFB2_3.1.well.TXT" "plate_siFB2_3.1.zip"
[27] "plate_siFB3.1.TXT" "plate_siFB3.1.well.TXT"
[29] "plate_siFB3.1.zip" "plate_siFB3.2.TXT"
[31] "plate_siFB3.2.well.TXT" "plate_siFB3.2.zip"
[33] "plate_siFB3.3.TXT" "plate_siFB3.3.well.TXT"
[35] "plate_siFB3.3.zip" "testA1.jpeg"
setwd("/media/scratch/Data/Cellomics/millian")
the.files<-dir(getwd())
the.files<-the.files[grep(".TXT",the.files)]
file.list<-c("plate_siFB1.1","plate_siFB1.2","plate_siFB1.3")
file.list<-c("plate_siFB1.2","plate_siFB1.3")
file.list<-c("plate_siFB2_3","plate_siFB2_2.23","plate_siFB2_2.1","plate_siFB2_1.12","plate_siFB2_1.3")
file.list<-c("plate_siFB3.1","plate_siFB3.2","plate_siFB3.3")
###,"plate_siFB3.1","plate_siFB3.2","plate_siFB3.3","plate_siFB2_1.12","plate_siFB2_1.3","plate_siFB2.2.1","plate_siFB2_2.23")
file.name.list<-file.list
################ choose plate type #####
well.type<-384
row.type<-16
col.type<-24
################################
well.type<-96
row.type<-8
col.type<-12
siFB2
max.g1.posn<-6500 #6000 10000-1.1 7000-3.1 for ca3 : 20000 before
min.g1.posn<-2500 #2750 5900-1.1 for Ca3
expected.g1.posn<-3000
max.ObjectTotalIntenCh1<-17500
siFB1
do.trace<-FALSE # tarce for non-linear fit
robust.shaver<-0 # 2 / 1 is used to shave very close/close Ca screens when standard method fails, 0 to use standard method
max.cells.per.field<-2000 # ususally 5000
min.green.cells<-50
min.red.cells<-30
min.green.threshold<-50
max.g1.posn<-15000 #6000 10000-1.1 7000-3.1 for ca3 : 20000 before max.g1.posn*g2.over.g1.max
min.g1.posn<-5000 #2750 5900-1.1 for Ca3
expected.g1.posn<-9500 # 3000 8200-1.1 for Ca3
max.ObjectTotalIntenCh1<-40000 # used 30000 in past bust is too low sometimes BEST DECIDED after first fit
double.exposure<-FALSE
use.high<-FALSE # TRUE for Ca3 FALSE for CA2 highest exposure
two.color<- FALSE # if true red and green channels SIRNA has only one - false
red.mean.thresh<-30 # cut of for average red signal
red.total.thresh<-7500 # cut of for average red signal
red.yrange.on.plot<-40000
use.Edu.as.Sphase<-TRUE
min.red.cells.for.Sphase<-50 ## lower than this and it will model the S-phase
g2.over.g1.min<-1.99 #1.9 before
g2.over.g1.max<-2.06 #2.35 before
g2.over.g1.refit<-0.001 #0.05 before
############################################# START
##################################################################
#################################################################
for (ifile in 1:length(file.list)) {
# for (ifile in 20:20) {
file<-file.list[ifile]
file.name<-file.name.list[ifile]
print(file.name)
### set file_name and jump here for single file
options(show.error.messages = TRUE)
chromo<-try(read.delim(paste(file,".TXT",sep=""),header=T,nrows=1,sep="\t",fill=TRUE))
num.vars<-dim(chromo)[2]
vars.names<-colnames(chromo)[1:dim(chromo)[2]]
vars.names<-sub("TargetActivationV3Cell.","",vars.names)
########################## dim(chromo)<-c(num.lines,num.vars)
### get the samples in the column names
reads<-100000
if(double.exposure){keep<-c(2,3,5,6,10,11,17:dim(chromo)[2])}else{keep<-c(2,3,5,6,10,11,17:dim(chromo)[2])} # columns to keep
#if(double.exposure){keep<-c(3,5,6,10,17:26)}else{keep<-c(3,5,6,10,17:24)} # for kiril
header.lines<-1
num.lines<-1
cells<-{}
################################### read one plate in one go
chromo<-try(scan(paste(file,".TXT",sep=""),what=character(num.vars),skip=header.lines,sep="\t",fill=TRUE))
num.lines<-length(chromo)/(num.vars)
dim(chromo)<-c(num.vars,num.lines)
chromo<-t(chromo)
cells<-chromo[,keep]
###################################to a read in a lrage file
# counter<- -1
# while (num.lines >0 ){
# counter<-counter+1
# counter
# chromo<-try(scan(#paste(file,"TXT",sep="."),what=character(num.vars),skip=(reads*counter)+header.lines,nlines=reads,sep="\t",fill=TRUE))
# num.lines<-length(chromo)/(num.vars) # -1 cause of ContrilCase0
# dim(chromo)<-c(num.vars,num.lines)
# chromo<-t(chromo)
# cells<-rbind(cells,chromo[,keep])
# } # while rad in one data file
colnames(cells)<-vars.names[keep]
cells<-cells[-dim(cells)[1],] # strip out last blank line redundant see *** just below
######################### remap for Kiril 2 exposure settings
if(double.exposure){
if(use.high){
colnames(cells)[colnames(cells)=="TotalIntenCh2"]<-"TotalIntenCh2b"
colnames(cells)[colnames(cells)=="AvgIntenCh2"]<-"AvgIntenCh2b"
colnames(cells)[colnames(cells)=="VarIntenCh2"]<-"VarIntenCh2b"
colnames(cells)[colnames(cells)=="TotalIntenCh3"]<-"TotalIntenCh2"
colnames(cells)[colnames(cells)=="AvgIntenCh3"]<-"AvgIntenCh2"
colnames(cells)[colnames(cells)=="VarIntenCh3"]<-"VarIntenCh2"
colnames(cells)[colnames(cells)=="TotalIntenCh4"]<-"TotalIntenCh3"
colnames(cells)[colnames(cells)=="AvgIntenCh4"]<-"AvgIntenCh3"
colnames(cells)[colnames(cells)=="VarIntenCh4"]<-"VarIntenCh3"
}else{
colnames(cells)[colnames(cells)=="TotalIntenCh3"]<-"TotalIntenCh2b"
colnames(cells)[colnames(cells)=="AvgIntenCh3"]<-"AvgIntenCh2b"
colnames(cells)[colnames(cells)=="VarIntenCh3"]<-"VarIntenCh2b"
colnames(cells)[colnames(cells)=="TotalIntenCh4"]<-"TotalIntenCh3"
colnames(cells)[colnames(cells)=="AvgIntenCh4"]<-"AvgIntenCh3"
colnames(cells)[colnames(cells)=="VarIntenCh4"]<-"VarIntenCh3"
}}
if(!two.color){
cells[,"TotalIntenCh3"]<-cells[,"TotalIntenCh2"] # is scanned as one exposure TotalIntenCh3 does not exist but assumed to be red in code below
cells[,"AvgIntenCh3"]<-cells[,"AvgIntenCh2"]
cells[,"VarIntenCh3"]<-cells[,"VarIntenCh2"]
}
## for(i in 1:10){if (i==5){next} ; print(i)}
## for(j in 1:5){
## for (i in 1:5){
## if(j==2 & i!=2){next}
## print(paste("j",j,"i",i,sep=" "))
## }
## }
#####################
#loop if have multiple plate in the file
#sizes<-tapply(cells[,"BarCode"],cells[,"BarCode"],length)
barCodes<-unique(cells[,"BarCode"])
## cells[match(barCodes,cells[,"BarCode"]),c("UPD","BarCode")] # test UPD vs Barcode
for (iBarCodes in 1:length(barCodes)){
## if(barCodes[iBarCodes]=="siCA3_1.1" | barCodes[iBarCodes]=="siCA3_1.2" | barCodes[iBarCodes]=="siCA3_3.3"| barCodes[iBarCodes]=="siCA3_3.4" | barCodes[iBarCodes]=="siCA2_3.2"){next}
## for (iBarCodes in 1:1){
print(barCodes[iBarCodes])
the.cells<-cells[cells[,"BarCode"]==barCodes[iBarCodes],]
the.cells<-the.cells[the.cells[,"Row"]!="",] #strip out crap - *** other blank line! ( one at end of each plate
file.plate.name<-paste("plate",barCodes[iBarCodes],sep="_")
## if(barCodes[iBarCodes]=="siCA3_1.1" | barCodes[iBarCodes]=="siCA3_3.1" | barCodes[iBarCodes]=="siCA3_3.4"){
## if(barCodes[iBarCodes]=="siCA3_1.1"){max.g1.posn<-10000;min.g1.posn<-5900;expected.g1.posn<-8200}
## if(barCodes[iBarCodes]=="siCA3_3.1"){max.g1.posn<-7500;min.g1.posn<-3700;expected.g1.posn<-6000}
## if(barCodes[iBarCodes]=="siCA3_3.4"){max.g1.posn<-8000;min.g1.posn<-4500;expected.g1.posn<-6000}
## }else{max.g1.posn<-6500;min.g1.posn<-2750;expected.g1.posn<-3500}
## chk.num.plates<-length(unique(barCodes))
## print(paste("FILE: ",file.name," BARCODE:",the.cells[1,"BarCode"]," UNIQUE:",chk.num.plates,sep=""))
## chk.barcode1<-gsub("OCL1030000","Kiril_Plate",the.cells[1,"BarCode"])
## chk.barcode2<-gsub("OCL103000","Kiril_Plate",the.cells[1,"BarCode"])
## if(chk.barcode1 !=file.name){
## if(chk.barcode2 !=file.name){print (paste("WARNING","ERROR","BARCODE MISMATCH",sep=" "))}}
if(double.exposure){number.cols<-c(2:dim(the.cells)[2])}else{number.cols<-c(3:dim(the.cells)[2])}
cells.num<-as.numeric(the.cells[,number.cols])
dim(cells.num)<-dim(the.cells[,number.cols] )
colnames(cells.num)<-colnames(the.cells)[number.cols]
cells.num<-as.data.frame(cells.num)
rm(the.cells)
dim(cells.num)
rows<-tapply(cells.num[,"Row"],cells.num[,"Row"],length)
cols<-tapply(cells.num[,"Col"],cells.num[,"Col"],length)
# dim(green.c)<-c(length(rows),length(cols))
# dim(red.c)<-c(length(rows),length(cols))
# dim(notGreen.c)<-c(length(rows),length(cols))
# dim(notRed.c)<-c(length(rows),length(cols))
row.index<-c("A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z")
red.c<-c(rep.int(0,row.type*col.type))
dim(red.c)<-c(row.type,col.type)
rownames(red.c)<-row.index[1:row.type] #
colnames(red.c)<-as.character(1:col.type)
green.c<-red.c
notGreen.c<-red.c
notRed.c <-red.c
redAndGreen <-red.c
redAndGreenLow <-red.c
redAndGreenMid <-red.c
redAndGreenHigh <-red.c
redLow <-red.c
redMid <-red.c
redHigh <-red.c
greenLow <-red.c
greenMid <-red.c
greenHigh <-red.c
notRedAndGreen <-red.c
redAndNotGreen <-red.c
all.c <-red.c
all.using <-red.c
big.c <-red.c
data.store<-c(rep(NA,row.type*col.type))
dim(data.store)<-c(row.type,col.type)
rownames(data.store)<-row.index[1:row.type] # +1 cause index starts at zero
colnames(data.store)<-as.character(1:col.type)
lm.red.slope<-data.store
lm.green.slope<-data.store
lm.red.inter<-data.store
lm.green.inter<-data.store
lm.red.coverg <-data.store
lm.green.coverg <-data.store
cells.P.field<-data.store
R.P.field <-data.store
G.P.field <-data.store
RG.P.field <-data.store
RnG.P.field <-data.store
nG.P.field <-data.store
DNA.G1<-red.c
DNA.G2<-red.c
DNA.fitting<-list(adenG=red.c,adenNG=red.c,aden=red.c,adenNR=red.c)
DNA.G1andG2<-list(adenG=red.c,adenNG=red.c,aden=red.c,adenNR=red.c)
DNA.aboveG1<-list(adenG=red.c,adenNG=red.c,aden=red.c,adenNR=red.c)
DNA.gt4 <-list(adenG=red.c,adenNG=red.c,aden=red.c,adenNR=red.c)
DNA.lt2<-list(adenG=red.c,adenNG=red.c,aden=red.c,adenNR=red.c)
DNA.S<-list(adenG=red.c,adenNG=red.c,aden=red.c,adenNR=red.c)
DNA.fit.success<-list(adenG=red.c,adenNG=red.c,aden=red.c,adenNR=red.c)
DNA.A<-list(adenG=red.c,adenNG=red.c,aden=red.c,adenNR=red.c)
DNA.B<-list(adenG=red.c,adenNG=red.c,aden=red.c,adenNR=red.c)
DNA.inG1<-list(adenG=red.c,adenNG=red.c,aden=red.c,adenNR=red.c)
DNA.inG2<-list(adenG=red.c,adenNG=red.c,aden=red.c,adenNR=red.c)
############RUN THE LOOP over all wells but some might be missing
wells.present<-paste(cells.num[,"Row"],cells.num[,"Col"],sep=":")
wells.present<-unique(wells.present)
for(iwells in 1:length(wells.present)){
iandj<-wells.present[iwells]
iandj<-unlist(strsplit(iandj,split=":"))
i<-as.integer(iandj[1])+1 # 1 to n label
j<-as.integer(iandj[2])+1 # 1 to n label
## print(i)
## print(j)
row.label<-i-1 #0 to n label
col.label<-j-1 #0 to n label
row.label.letter<- row.index[i]
col.label.number<- as.character(j)
print( paste("Doing ",row.label.letter," : ",col.label.number," -> iwells :",iwells,sep=""))
#####load data
test<-cells.num[ (cells.num[,"Row"]==row.label & cells.num[,"Col"]==col.label),]
### Keep record or total number of cells
all.c[i,j]<-dim(test)[1]
#### check if have enough cells
if(dim(test)[1]>100){
############# remove high density fields OR FIELDS IN GENERAL
num.per.field<-tapply(test[,"FieldIndex"],test[,"FieldIndex"],length)
fields.to.remove<-names(num.per.field[num.per.field> max.cells.per.field])
posns<-unlist(apply(as.matrix(fields.to.remove),1, function(x) grep(paste("^",x,"$",sep=""),test[,"FieldIndex"])))
if(length(posns)>0){test<-test[-posns,]}
################################
#so have index with respect to wells in HCSView
rownames(test)<-c(1:dim(test)[1])
big<- (test[,"ObjectTotalIntenCh1"] > max.ObjectTotalIntenCh1 )
big.c[i,j]<-sum(big)
all.using[i,j]<-sum(!big)
test<-test[!big,] #exclude big i.e. gt 4N cells from analysis
###############################################################################################################
###################################### begin identification of red cells ######################################
###############################################################################################################
xaxis<-"ObjectTotalIntenCh1"
yaxis<-"TotalIntenCh3"
if(robust.shaver==0){
red<-test[,"TotalIntenCh3"]>=red.total.thresh
names(red)<-rownames(test)
the.model<-try(lmrob(TotalIntenCh3~ObjectTotalIntenCh1,data=test,subset=c(1:dim(test)[1])[!red]),silent=TRUE)
## the.model<-try(lmrob(TotalIntenCh3~ObjectTotalIntenCh1,data=test),silent=TRUE) # lmrob failed badly with fungi contaminated wells
#print(as.character(the.model$converged))
if(inherits(the.model, "try-error")){
print("Failed lmrob")
lm.red.coverg[i,j]<-"FAILED"
the.model<-ltsReg(TotalIntenCh3~ObjectTotalIntenCh1,data=test,subset=c(1:dim(test)[1])[!red])
red.trimmed<- residuals(the.model)>0 & the.model$lts.wt==0 & test[!red,"AvgIntenCh3" ]>= red.mean.thresh
red[rownames(test[!red,])[red.trimmed]]<-TRUE
}else{
red.trimmed<- (abs(weights(the.model)) < 0.1/length(weights(the.model))) & residuals(the.model)>0 & test[!red,"AvgIntenCh3"] >= red.mean.thresh
red[rownames(test[!red,])[red.trimmed]]<-TRUE
lm.red.coverg[i,j]<-the.model$converged
}
lm.red.slope[i,j]<-coef(the.model)[2]
lm.red.inter[i,j]<-coef(the.model)[1]
red.c[i,j]<-sum(red)
}else{
print("no shaver")
}
#######PLOTS
xaxis<-"ObjectTotalIntenCh1"
yaxis<-"TotalIntenCh3"
#c(bottom, left, top, right)
jpeg( paste(paste(file.plate.name,row.label.letter,col.label.number,sep="_"),"jpeg",sep="."), width = 1280, height = 1024, units = "px",quality = 100 )
par(mar=c(3,5.5,2.5,5.1),mgp=c(2,1,0))
layout(matrix(c(1,2,3),3,byrow=TRUE),heights=c(1,1,1))
#layout.show(nf)
plot((test[,xaxis]),(test[,yaxis]),pch=20,cex=0.1,xlab="Total Intensity of DAPI",ylab="EDU Intensity",main=paste(file.plate.name," Well:",row.label.letter,col.label.number,sep=" "),font.lab=2,cex.lab=1.5,cex.main=2.0,ylim=c(0,red.yrange.on.plot))
if(!inherits(the.model, "try-error")){abline(coef(the.model),lty=10,col="red",lwd=2)}
points(test[red,xaxis],test[red,yaxis],col="red",pch=19,cex=0.4)
## plot((test[,xaxis]),(test[,yaxis]),pch=20,cex=0.1,xlab="Total Intensity of DAPI",ylab="EDU Intensity",main=paste(file.plate.name," Well:",row.label.letter,col.label.number,sep=" "),font.lab=2,cex.lab=1.5,cex.main=2.0,ylim=c(0,3000))
## plot((test[,xaxis]),(test[,yaxis]),pch=20,cex=0.1,xlab="Total Intensity of DAPI",ylab="EDU Intensity",main=paste(file.plate.name," Well:",row.label.letter,col.label.number,sep=" "),font.lab=2,cex.lab=1.5,cex.main=2.0)
## points(test[red,xaxis],test[red,yaxis],col="red",pch=19,cex=0.4)
## points(test[green,xaxis],test[green,yaxis],col="green",pch=19,cex=0.4)
## points(test[green & red,xaxis],test[green & red ,yaxis],col="cyan",pch=19,cex=0.4)
## points(test[blue,xaxis],test[blue,yaxis],col="blue",pch=19,cex=0.4)
## red<-residuals(the.model)>0
## green<- the.model$lts.wt==0
## blue<-test[!red,"AvgIntenCh3"] >= red.mean.thresh
## plot((test[red,xaxis]),(test[red,yaxis]),pch=20,cex=0.1,xlab="Total Intensity of DAPI",ylab="EDU Intensity",main=paste(file.plate.name," Well:",row.label.letter,col.label.number,sep=" "),font.lab=2,cex.lab=1.5,cex.main=2.0)
## lmrob(TotalIntenCh3~ObjectTotalIntenCh1,data=test[!red,])
## points(test[red,xaxis],test[red,yaxis],col="blue",pch=19,cex=0.4)
## target<-"2655" # a cell number
## points(test[target,xaxis],test[target,yaxis],col="green",pch=21,cex=1)
## identify(test[,xaxis],test[,yaxis],labels=rownames(test),col="green",pch=19,cex=1)
####checks
## plot((test[,xaxis]),(test[,yaxis]),pch=20,cex=0.1,xlab="Total Intensity of DAPI",ylab="EDU Intensity",main=paste(file.name," Well:",row.label.letter,col.label.number,sep=" "),font.lab=2,cex.lab=1.5,cex.main=2.0,ylim=c(0,20000))
## if(!inherits(the.model, "try-error")){abline(coef(the.model),lty=10,col="red",lwd=2)}
## points(test[red,xaxis],test[red,yaxis],col="blue",pch=19,cex=0.4)
## shit<-test[red,]
## the.order<-order(shit[,"TotalIntenCh3"])
## shit<-shit[the.order,]
## shit[1:5,]
## shit[shit[,"ObjectTotalIntenCh1"]>20000,][1:5,]
##################
#dev.off()
#######$$$$ PLOTS
xaxis<-"AvgIntenCh2"
yaxis<-"VarIntenCh2"
#backgroud OR stauration
well.rows.sub<-c(1:dim(test)[1])[ test[,xaxis]>min.green.threshold & test[,xaxis]<3500 & test[,yaxis]>0 ]
if(length(well.rows.sub) > min.green.cells) {
#print(length(well.rows.sub))
test[,xaxis]<-log2(test[,xaxis])
test[,yaxis]<-log2(test[,yaxis])
the.model <-ltsreg(VarIntenCh2~AvgIntenCh2+I(AvgIntenCh2^2),data=test,subset=well.rows.sub )
resid.quant<-quantile(the.model$residuals,c(0.85))
mostly.green<- ( (test[,yaxis]<= (coef(the.model)[3]* test[,xaxis]^2+coef(the.model)[2]* test[,xaxis]+ coef(the.model)[1]+resid.quant)) & is.finite(test[,xaxis]) & is.finite(test[,yaxis]) & test[,xaxis]>=log2(min.green.threshold) )
# lmrob failed badly with fungi contaminated wells
#print(as.character(the.model$converged))
the.model2 <-try(lmrob(VarIntenCh2~AvgIntenCh2+I(AvgIntenCh2^2),data=test,subset=c(1:dim(test)[1])[mostly.green] ),silent=TRUE)
if(inherits(the.model2, "try-error")){green<-rep(FALSE,dim(test)[1])}else{
#print(as.character(the.model2$converged))
lm.green.slope[i,j]<-toString(signif(coef(the.model),3))
lm.green.coverg[i,j]<-the.model2$converged
green<-(test[,yaxis]<= ((coef(the.model)[3]+1*sqrt(diag(the.model2$cov))[3])* test[,xaxis]^2 + (coef(the.model)[2]+0.15*sqrt(diag(the.model2$cov))[2])* test[,xaxis]+ coef(the.model)[1]+ 1*sqrt(diag(the.model2$cov))[1] ) & is.finite(test[,xaxis]) & is.finite(test[,yaxis]) & test[,xaxis]>=log2(min.green.threshold) )
if( sum(mostly.green) < sum(green) ){green<-mostly.green} ##flipped to < for kiril was > for joseph
############## select green cells in ranges of a third
ghist<-hist(test[green,xaxis],breaks=50,plot=FALSE)
xvals<-ghist$breaks[2:length(ghist$breaks)]
yvals<- cumsum(ghist$counts)
cuts<-quantile(yvals,c(0.333,0.666))
green.cut.low<- max(xvals[yvals<= yvals[length(yvals)]/4 ])
green.cut.mid<- max(xvals[yvals<= 2*yvals[length(yvals)]/4 ])
green.cut.high<- min(xvals[yvals>= 3*yvals[length(yvals)]/4 ])
}
green.c[i,j]<-sum(green)
#######PLOTS
#jpeg( paste(paste(file.name,"GREEN",row.label.letter,col.label.number,sep="_"),"jpeg",sep=".") )
par(mar=c(3,5.5,1.1,5.1),mgp=c(2,1,0)) #c(bottom, left, top, right)
plot((test[,xaxis]),(test[,yaxis]),pch=20,cex=0.1,xlab="log2( Average Green Signal )",ylab="log2( Varience Green Signal )",main="",font.lab=2,cex.lab=1.5)
points(test[green,xaxis],test[green,yaxis],col="green",pch=20,cex=0.1)
points(test[red,xaxis],test[red,yaxis],col="red",pch=20,cex=0.1)
order.in<-order(test[well.rows.sub,xaxis])
lines(test[well.rows.sub[order.in],xaxis],the.model$fit[order.in],col="magenta")
#dev.off()
########PLOTS
}else{green<-rep(FALSE,dim(test)[1])
green.c[i,j]<-0
#jpeg( paste(paste(file.name,"GREEN",row.label.letter,col.label.number,sep="_"),"jpeg",sep=".") )
par(mar=c(3,5.5,1.1,5.1),mgp=c(2.5,1,0))
plot(log2(test[,xaxis]),log2(test[,yaxis]),pch=20,cex=0.1,xlab="log2( Average Green Signal )",ylab="log2( Varience Green Signal )",font.lab=2,cex.lab=1.5 )
# dev.off()
} #less than 20 green objects detected
######################## do DNA histgrams #############################
use.den<-use.Edu.as.Sphase
xaxis<-"ObjectTotalIntenCh1"
pts<-512
percent.R<-sum(red)/length(red)
aden<-density(test[,xaxis])
adenNG<-density(test[!green,xaxis])
adenNR<-density(test[!red,xaxis])
if(sum(green)<min.green.cells){adenG<-adenNG}else{adenG<-density(test[green,xaxis])} # avoid error if no green cells
if(sum(red)<min.red.cells){adenR<-adenNR;use.den<-FALSE}else{adenR<-density(test[red,xaxis])} # avoid error if no red cells
if(sum(red)<min.red.cells.for.Sphase){use.den<-FALSE}
if(!two.color){adenNG<-adenNR;adenG<-aden}
par(mar=c(3.5,5.5,1.1,5.1),mgp=c(2.5,1,0)) #c(bottom, left, top, right)
the.max.range<-max(aden$y,adenG$y,adenNG$y,adenR$y,adenNR$y)
plot(adenNG,lwd=2,col="black",main="",font.lab=2,cex.lab=1.5,ylim=c(0,the.max.range))
if(two.color){
lines(adenG$x,adenG$y,col="green",lwd=2)
lines(aden$x,aden$y,col="black",lwd=2)
## lines(adenR$x,adenR$y,col="red",lwd=2)
}else{
## lines(adenR$x,adenR$y,col="red",lwd=2)
lines(aden$x,aden$y,col="black",lwd=2)
lines(adenNR$x,adenNR$y,col="grey50",lwd=2)
}
a.peak<-peaks(adenNR$y,span=25)
points(adenNR$x[a.peak],adenNR$y[a.peak],pch=23,col="red") ### found peak
potential.peaks<-adenNR$y[a.peak]
highest.place<-order(potential.peaks,decreasing=TRUE)[1] ## incase find g2 peak first
g1.peak.posn<-adenNR$x[a.peak][highest.place]
g1.peak.height<-adenNR$y[a.peak][highest.place]
g1.peak.place<-sum(adenNR$x <= g1.peak.posn)
if(g1.peak.posn>max.g1.posn | g1.peak.posn< min.g1.posn){
potential.peaks<-potential.peaks[-highest.place] # remove that highest peak
highest.place<-order(potential.peaks,decreasing=TRUE)[1]
### highest.place<-order(abs(potential.peaks-expected.g1.posn),decreasing=FALSE)[1]
g1.peak.posn<-adenNR$x[a.peak][highest.place]
g1.peak.height<-adenNR$y[a.peak][highest.place]
g1.peak.place<-sum(adenNR$x <= g1.peak.posn)
if(is.na(g1.peak.posn)){g1.peak.posn<-max.g1.posn}
if(g1.peak.posn>=max.g1.posn | g1.peak.posn< min.g1.posn){ # still not a good start
g1.peak.place<-sum(adenNR$x <= expected.g1.posn)
g1.peak.posn<-adenNR$x[g1.peak.place]
g1.peak.height<-adenNR$y[g1.peak.place]}
}
# aden$x[g1.peak.place]
g1.sd<-g1.peak.posn/3.5 # initial guess
if(g1.peak.posn >= max.g1.posn ){ g1.peak.posn<-max.g1.posn }
g1.peak.posn.ori<-g1.peak.posn # keep in case of wide G2 arrest
if(g1.peak.posn.ori >= max.g1.posn ){ g1.peak.posn.ori<-max.g1.posn }
A<-g1.peak.height/dnorm(g1.peak.posn,mean=g1.peak.posn,sd=g1.sd, log=FALSE) # initial guess
g1.region<-adenNR$x<=(g1.peak.posn+0.15*g1.peak.posn)
to.fit<-data.frame(y=adenNR$y[g1.region], x=adenNR$x[g1.region])
the.fit<-nls(y~A*dnorm(x,mean=g1.peak.posn,sd=g1.sd, log=FALSE),
data=to.fit,
start=list(A=A,g1.peak.posn=g1.peak.posn,g1.sd=g1.sd),
,lower=c(A/10, min.g1.posn, g1.sd/5)
,upper=c(10, max.g1.posn, g1.sd*5),
,trace=do.trace
,algorithm="port"
,control=list(maxiter=1000, minFactor=1/4048,tol=1e-4, warnOnly = TRUE))
the.coefs<-coef(the.fit)
A<-as.numeric(the.coefs["A"])
g1.peak.posn<-as.numeric(the.coefs["g1.peak.posn"])
g1.sd<-as.numeric(the.coefs["g1.sd"])
g1.peak.height<-A*dnorm( g1.peak.posn,mean=g1.peak.posn,sd=g1.sd, log=FALSE)
if(g1.peak.posn >= max.g1.posn ){ g1.peak.posn<-g1.peak.posn.ori } # above fit on G1 only is bollocks but keep g1.sd and A
points(g1.peak.posn,g1.peak.height,pch=23,col="blue")
second.highest.place<-order(potential.peaks,decreasing=TRUE)[2]
g2.peak.posn<-adenNR$x[a.peak][second.highest.place]
g2.peak.height<-adenNR$y[a.peak][second.highest.place]
g2.peak.place<-sum(adenNR$x < g2.peak.posn)
#### for fit range of allowed vales is 2.35 to 1.9 * gi.peak.posn
if((g2.peak.posn>=g2.over.g1.max*g1.peak.posn) | (g2.peak.posn <= g2.over.g1.min*g1.peak.posn) | is.na(adenNR$x[a.peak][second.highest.place]) ){ ## can't get a good peak
g2.peak.place<-sum(adenNR$x < g2.over.g1.max*g1.peak.posn) ## choose a point below the maximum
g2.peak.posn<-adenNR$x[g2.peak.place]
g2.peak.height<-adenNR$y[g2.peak.place]
}
points(g2.peak.posn,g2.peak.height,pch=23,col="blue")
g2.sd<-1*g1.sd # initial guess
B<-g2.peak.height/dnorm(g2.peak.posn,mean=g2.peak.posn,sd=g2.sd, log=FALSE) # initial guess
k0<-2 ## larger than 8 causes problems
k1<-0.7
k2<-0.7
k3<-0.7
g1.sd.inter<-g1.sd
g2.sd.inter<-g2.sd
scaleR<-percent.R
scaleR.min<- scaleR-0.05 # change change 5% down
if(scaleR.min<=0){scaleR.min<-0.01}
scaleR.max<- scaleR+0.05 # change change 5% down
if(scaleR.max>=0.96){scaleR.max<-0.96}
## a.model<-function(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR){
## ak.curve<-aspline(static$x, static$y,x,method="improved",degree=3)
## mid.place<-g1.peak.place+floor(g2.peak.place-g1.peak.place)/2
## mid.height<-aden$y[mid.place]
## mid.posn<-aden$x[mid.place]
######### fit aden which is RED and GREEN and not RED or GREEN:
static<-data.frame(x=aden$x,y=aden$y)
to.fit<-data.frame(y=aden$y, x=aden$x)
###try nls.lm in minpack.lm when k0-> 1 get problems for some reason
if(use.den){
the.fit<-nls(y~(a.model(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR) )
,data=to.fit
,start=list(A=A,B=B,g1.peak.posn=g1.peak.posn,g1.sd=g1.sd,g2.peak.posn=g2.peak.posn,g2.sd=g2.sd,k0=k0,k3=k3,scaleR=scaleR)
,lower=c(A-0.25*A, B-0.5*B, g1.peak.posn-0.5*g1.sd, g1.sd/1.5, g1.peak.posn*g2.over.g1.min, g2.sd/2, 0.5, 0.5, scaleR.min)
,upper=c(A+0.25*A, B+0.5*B, g1.peak.posn+0.5*g1.sd, g1.sd*1.5, g1.peak.posn*g2.over.g1.max, g2.sd*2, 6, 5,scaleR.max)
,trace=do.trace
,algorithm="port"
,control=list(maxiter=1000, minFactor=1/2048,tol=1e-4, warnOnly = TRUE) )
}else{
the.fit<-nls(y~(a.model(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR) )
,data=to.fit
,start=list(A=A,B=B,g1.peak.posn=g1.peak.posn,g1.sd=g1.sd,g2.peak.posn=g2.peak.posn,g2.sd=g2.sd,k0=k0,k1=k1,k2=k2,k3=k3,g1.sd.inter=g1.sd.inter, g2.sd.inter=g2.sd.inter)
,lower=c(A-0.25*A, B-0.5*B, g1.peak.posn-0.15*g1.sd, g1.sd/2, g1.peak.posn*g2.over.g1.min, g2.sd/2, 0.5, 0.5 , -2, 0.5, g1.sd/10,g2.sd/10)
,upper=c(A+0.25*A, B+0.5*B, g1.peak.posn+0.15*g1.sd, g1.sd*2, g1.peak.posn*g2.over.g1.max, g2.sd*2, 6, 5, 5, 5, g1.sd*7, g2.sd*7)
,trace=do.trace
,algorithm="port"
,control=list(maxiter=1000, minFactor=1/2048,tol=1e-4, warnOnly = TRUE) )
}
the.coefs<-coef(the.fit)
## a.model(to.fit$x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,0.75)
## curve( a.model(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR), add=TRUE, col="magenta",lwd=5,lty="dashed")
## curve( a.model.S(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,0.01), add=TRUE, col="red",lwd=2,lty="dashed")
## the.coefs<-c(7.807640e-01, 3.113320e-01 , 5.947625e-06 , 1.553233e+04 , 4.024419e+03, 3.319354e+04 , 5.603872e+03 , 8.992209e+03, -7.230066e+03)
## the.coefs<-c( 0.720911 ,0.173168, 2.86649e-05 , 15621.2 , 4154.37 , 34761.3 , 4168.79, 0.000169886 ,3.96350e-05 )
## names(the.coefs)<-c("A","B","C","g1.peak.posn","g1.sd","g2.peak.posn","g2.sd","g1.sd.inter","g2.sd.inter","k1","k2")
A<-the.coefs["A"]
B<-the.coefs["B"]
g1.peak.posn<-the.coefs["g1.peak.posn"]
g1.sd<-the.coefs["g1.sd"]
g2.peak.posn<-the.coefs["g2.peak.posn"]
g2.sd<-the.coefs["g2.sd"]
g1.sd.inter<-the.coefs["g1.sd.inter"]
g2.sd.inter<-the.coefs["g2.sd.inter"]
k0<-the.coefs["k0"]
k1<-the.coefs["k1"]
k2<-the.coefs["k2"]
k3<-the.coefs["k3"]
scaleR<-the.coefs["scaleR"]
DNA.G1[i,j]<- g1.peak.posn
DNA.G2[i,j]<- g2.peak.posn
curve(A*dnorm(x,mean=g1.peak.posn,sd=g1.sd, log=FALSE),add=TRUE, col="purple",lwd=2,lty="dashed")
curve(B*dnorm(x,mean=g2.peak.posn,sd=g2.sd, log=FALSE),add=TRUE, col="violet",lwd=2,lty="dashed")
curve( a.model.S(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR), add=TRUE, col="red",lwd=2,lty="dashed")
curve( a.model.gt4(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static), add=TRUE, col="turquoise4",lwd=2,lty="dashed")
curve( a.model.lt2(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static), add=TRUE, col="turquoise",lwd=2,lty="dashed")
## curve( a.model(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR), add=TRUE, col="black",lwd=2,lty="dashed")
if(two.color){profiles<-c("adenG","adenNG","aden"); the.colors<-c("green","grey34","grey50")}else{profiles<-c("adenNR","aden");the.colors<-c("grey50","black")} # densities want to analyse
for(iprofile in 1:length(profiles)){
static<-data.frame(x=eval(as.name(profiles[iprofile]))$x,y=eval(as.name(profiles[iprofile]))$y)
## g1ANDg2<-a.model.true(static$x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static)-a.model.S(static$x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR)-a.model.gt4(static$x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static)-a.model.lt2(static$x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static)
## new.to.fit<-data.frame(y=g1ANDg2, x=static$x) ### fit just A and B to the new data
## new.the.fit<-nls(y~(a.model.G1andG2(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static) )
## ,data=new.to.fit
## ,start=list(A=A,B=B)
## ,lower=c(A/10, B/10)
## ,upper=c(1.1, 1.1)
## ,trace=do.trace
## ,algorithm="port"
## ,control=list(maxiter=1000, minFactor=1/2048,tol=1e-4, warnOnly = TRUE) )
new.to.fit<-data.frame(y=static$y, x=static$x)
if(use.den){
### S phase can be very different for green / not green / not R means no s_-phase
if(profiles[iprofile]=="aden"){scaleR.use<-scaleR ;scaleR.min<- scaleR-0.05 ; if(scaleR.min<=0){scaleR.min<-0.01} ; scaleR.max<- scaleR+0.05; if(scaleR.max>=0.96){scaleR.max<-0.96}} # = - 5% for known % red
if(profiles[iprofile]=="adenNR"){scaleR.use<-0.025 ;scaleR.min<- 0 ;scaleR.max<- 0.05} # not red no S-phase within 5 %
if(profiles[iprofile]=="adenG"){ percent.R <-sum(red & green)/sum(green) ; scaleR.min<- percent.R-0.05 ; if(scaleR.min<=0){scaleR.min<-0.01} ; scaleR.max<- percent.R+0.05; if(scaleR.max>=0.96){scaleR.max<-0.96}} # red and green
if(profiles[iprofile]=="adenNG"){ percent.R <-sum(red & !green)/sum(!green) ; scaleR.min<- percent.R-0.05 ; if(scaleR.min<=0){scaleR.min<-0.01} ; scaleR.max<- percent.R+0.05; if(scaleR.max>=0.96){scaleR.max<-0.96}} # red no green
k1<-1; k2<-1; g1.sd.inter<-1; g2.sd.inter<-1 ### set up dummies else get an error in nls - it does not like dummies with an NA
rm(new.the.fit)
try( new.the.fit<-nls(y~(a.model(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR) )
,data=new.to.fit
## ,start=list(A=A,B=B,g1.peak.posn=g1.peak.posn,g1.sd=g1.sd,g2.peak.posn=g2.peak.posn,g2.sd=g2.sd,k0=k0,k3=k3,scaleR=scaleR)
## ,lower=c(A-0.25*A, B-0.5*B, g1.peak.posn-0.5*g1.sd, g1.sd/2, g1.peak.posn*1.9, g2.sd/2, 0, 0, 0.01)
## ,upper=c(A*1.5, B*1.5, g1.peak.posn+0.5*g1.sd, g1.sd*2, g1.peak.posn*2.35, g2.sd*2, 6, 5, 0.95)
,start=list(A=A, B=B, g1.peak.posn=g1.peak.posn, g1.sd=g1.sd, g2.peak.posn=g2.peak.posn, g2.sd=g2.sd, k0=k0, k3=k3, scaleR=scaleR.use)
,lower=c(A*0.1, B*0.1, g1.peak.posn-0.05*g1.sd, g1.sd*0.8, g2.peak.posn-g2.over.g1.refit*g2.sd , g2.sd*0.8, 0.48, 0.49, scaleR.min)
,upper=c(A+0.5*A, B+0.5*B, g1.peak.posn+0.05*g1.sd, g1.sd*1.2, g2.peak.posn+g2.over.g1.refit*g2.sd, g2.sd*1.2, 6.1, 5.1, scaleR.max)
,trace=do.trace
,algorithm="port"
,control=list(maxiter=1000, minFactor=1/2048,tol=1e-4, warnOnly = TRUE) ) ,silent=TRUE)
if(!exists("new.the.fit")){ new.the.fit<-the.fit; DNA.fitting[[eval(profiles[iprofile])]][i,j]<-"FAIL-singular"
}else{
if(inherits(new.the.fit, "try-error") | new.the.fit$message=="initial par violates constraints")
{new.the.fit<-the.fit; DNA.fitting[[eval(profiles[iprofile])]][i,j]<-"FAIL-initial"}
else{DNA.fitting[[eval(profiles[iprofile])]][i,j]<-"PASS"}
}
}else{
rm(new.the.fit)
try(new.the.fit<-nls(y~(a.model(x,A,B,g1.peak.posn,g2.peak.posn,g1.sd,g2.sd,k0,k1,k2,k3,g1.sd.inter,g2.sd.inter,static,use.den,adenR,scaleR) )
,data=new.to.fit
,start=list(A=A,B=B,g1.peak.posn=g1.peak.posn, g1.sd=g1.sd, g2.peak.posn=g2.peak.posn, g2.sd=g2.sd, k0=k0, k1=k1, k2=k2, k3=k3, g1.sd.inter=g1.sd.inter, g2.sd.inter=g2.sd.inter)
,lower=c(A*0.1, B*0.1, g1.peak.posn-0.05*g1.sd, g1.sd*0.8, g2.peak.posn-g2.over.g1.refit*g2.sd , g2.sd*0.8, 0.49, 0.49, -2.1, 0.29, g1.sd.inter*0.8, g2.sd.inter*0.8)
,upper=c(A+0.5*A, B+0.5*B, g1.peak.posn+0.05*g1.sd, g1.sd*1.2, g2.peak.posn+g2.over.g1.refit*g2.sd, g2.sd*1.2, 6.1, 5.1, 5.1 , 5.1, g1.sd.inter*1.2, g2.sd.inter*1.2)
,trace=do.trace
,algorithm="port"
,control=list(maxiter=1000, minFactor=1/2048,tol=1e-4, warnOnly = TRUE) ),silent=TRUE)
if(!exists("new.the.fit")){ new.the.fit<-the.fit; DNA.fitting[[eval(profiles[iprofile])]][i,j]<-"FAIL-singular"
}else{
if(inherits(new.the.fit, "try-error") | new.the.fit$message=="initial par violates constraints")
{new.the.fit<-the.fit; DNA.fitting[[eval(profiles[iprofile])]][i,j]<-"FAIL-initial"}
else{DNA.fitting[[eval(profiles[iprofile])]][i,j]<-"PASS"}
}
}
# g2.peak.posn-g2.over.g1.refit*g2.sd -> g1.peak.posn*g2.over.g1.min
# g2.peak.posn+g2.over.g1.refit*g2.sd -> g1.peak.posn*g2.over.g1.max
new.the.coefs<-coef(new.the.fit)
new.A<-as.numeric(new.the.coefs["A"])
new.B<-as.numeric(new.the.coefs["B"])
if(is.na(new.A) | is.na(new.B) ){new.the.coef<-the.coefs}
new.A<-as.numeric(new.the.coefs["A"])
new.B<-as.numeric(new.the.coefs["B"])
new.g1.peak.posn<- new.the.coefs["g1.peak.posn"]
new.g1.sd<- new.the.coefs["g1.sd"]
new.g2.peak.posn<- new.the.coefs["g2.peak.posn"]
new.g2.sd<- new.the.coefs["g2.sd"]
new.g1.sd.inter<- new.the.coefs["g1.sd.inter"]
new.g2.sd.inter<- new.the.coefs["g2.sd.inter"]
new.k0<- new.the.coefs["k0"]
new.k1<- new.the.coefs["k1"]
new.k2<- new.the.coefs["k2"]
new.k3<- new.the.coefs["k3"]
new.scaleR<-new.the.coefs["scaleR"]
#### failure can offure if outside paramneter range very rare get A-1.01 sometimes for example
## if(is.na(new.A)){new.A<-A}
## if(is.na(new.B)){new.B<-B}
s.int<-integrate(a.model.S,lower=0,upper=max(static$x),subdivisions=length(static$x),new.A, new.B, new.g1.peak.posn, new.g2.peak.posn, new.g1.sd, new.g2.sd, new.k0, new.k1, new.k2, new.k3, new.g1.sd.inter, new.g2.sd.inter, static, use.den, adenR, new.scaleR)
gt4.int<-integrate(a.model.gt4,lower=0,upper=max(static$x),subdivisions=length(static$x),new.A, new.B, new.g1.peak.posn, new.g2.peak.posn, new.g1.sd, new.g2.sd, new.k0, new.k1, new.k2, new.k3, new.g1.sd.inter, new.g2.sd.inter, static)
lt2.int<-integrate(a.model.lt2,lower=0,upper=max(static$x),subdivisions=length(static$x),new.A, new.B, new.g1.peak.posn, new.g2.peak.posn, new.g1.sd, new.g2.sd, new.k0, new.k1, new.k2, new.k3, new.g1.sd.inter, new.g2.sd.inter, static)
new.g1.int<-integrate(a.model.G1,lower=0,upper=max(static$x),subdivisions=length(static$x),new.A, new.B, new.g1.peak.posn, new.g2.peak.posn, new.g1.sd, new.g2.sd, new.k0, new.k1, new.k2, new.k3, new.g1.sd.inter, new.g2.sd.inter, static)
new.g2.int<-integrate(a.model.G2,lower=0,upper=max(static$x),subdivisions=length(static$x),new.A, new.B, new.g1.peak.posn, new.g2.peak.posn, new.g1.sd, new.g2.sd, new.k0, new.k1, new.k2, new.k3, new.g1.sd.inter, new.g2.sd.inter, static)
## new.Sandg2.int<-integrate(a.model.SandG2,lower=0,upper=max(static$x),subdivisions=length(static$x),new.A, new.B, new.g1.peak.posn, new.g2.peak.posn, new.g1.sd, new.g2.sd, new.k0, new.k1, new.k2, new.k3, new.g1.sd.inter, new.g2.sd.inter, static, use.den, adenR, new.scaleR)
## new.aboveg1.int<-integrate(a.model.aboveG1,lower=0,upper=max(static$x),subdivisions=length(static$x),new.A, new.B, new.g1.peak.posn, new.g2.peak.posn, new.g1.sd, new.g2.sd, new.k0, new.k1, new.k2, new.k3, new.g1.sd.inter, new.g2.sd.inter, static, use.den, adenR, new.scaleR)
original.int<-integrate(a.model.true,lower=0,upper=max(static$x),subdivisions=length(static$x),new.A, new.B, new.g1.peak.posn, new.g2.peak.posn, new.g1.sd, new.g2.sd, new.k0, new.k1, new.k2, new.k3, new.g1.sd.inter, new.g2.sd.inter, static)
the.fit.success<- original.int$val-(new.g1.int$val + new.g2.int$val + s.int$val + gt4.int$val + lt2.int$val)
curve( a.model(x,new.A, new.B, new.g1.peak.posn, new.g2.peak.posn, new.g1.sd, new.g2.sd, new.k0, new.k1, new.k2, new.k3, new.g1.sd.inter, new.g2.sd.inter, static, use.den, adenR, new.scaleR), add=TRUE, col=the.colors[iprofile],lwd=2,lty="dotted")
DNA.G1andG2[[eval(profiles[iprofile])]][i,j]<- (new.g2.int$val + s.int$val)/new.g1.int$val
DNA.aboveG1[[eval(profiles[iprofile])]][i,j]<- (new.g2.int$val + s.int$val + gt4.int$val)/new.g1.int$val
DNA.gt4[[eval(profiles[iprofile])]][i,j]<- gt4.int$val
DNA.lt2[[eval(profiles[iprofile])]][i,j]<- lt2.int$val
DNA.S[[eval(profiles[iprofile])]][i,j]<- s.int$val
DNA.fit.success[[eval(profiles[iprofile])]][i,j]<- the.fit.success
DNA.A[[eval(profiles[iprofile])]][i,j]<- new.A
DNA.B[[eval(profiles[iprofile])]][i,j]<- new.B
DNA.inG1[[eval(profiles[iprofile])]][i,j]<- (new.g1.int$val)
DNA.inG2[[eval(profiles[iprofile])]][i,j]<- (new.g2.int$val)
}
if(two.color){
leg.txt<-c(paste("Red=",sum(red),sep=""),
paste("Green=",sum(green),sep=""),
paste("NotGreen=",sum(!green),sep=""),
paste("Red and Green=",sum(green & red),sep=""),
paste("% Red=",round(100*sum(red)/length(red),1),sep=""),
paste("S =",round(( 100*DNA.S$aden)[i,j],1),sep=""),
"--------------------------",
"RATIOs ARE FOR Green/NotGreen",
paste("(S+G2)/G1 =",round((DNA.G1andG2$adenG/DNA.G1andG2$adenNG)[i,j],2),sep=""),
paste("(S+G2+ >4N)/G1 =",round(( DNA.aboveG1$adenG/DNA.aboveG1$adenNG)[i,j],2),sep=""),
paste("S =",round(( DNA.S$adenG/DNA.S$adenNG)[i,j],2),sep=""),
paste("> 4N =",round((DNA.gt4$adenG/DNA.gt4$adenNG)[i,j],2),sep=""),
paste("< 2N =",round(( DNA.lt2$adenG/DNA.lt2$adenNG)[i,j],2),sep=""),
"--------------------------",
paste("Fit Quality: Green[NotGreen] =",round((DNA.fit.success$adenG)[i,j],2),"[",round((DNA.fit.success$adenNG)[i,j],2) ,"]" ," : G2/G1=",round(DNA.G2[i,j]/DNA.G1[i,j],2),sep=""))
legend(x=g1.peak.posn*2.4,the.max.range,legend=leg.txt,text.col=c("black","black","black","black","black","black","black","black","red","orange4","black","black","black"),pch="",bty="n",cex=1.25)
}else{
leg.txt<-c(paste("Red=",sum(red),sep=""),
paste("Not Red=",sum(!red),sep=""),
paste("% Red=",round(100*sum(red)/length(red),1),sep=""),
"--------------------------",
"DNA histogram data (black curve)",
paste("S =",round(( 100*DNA.S$aden)[i,j],1),sep=""),
paste("(S+G2)/G1 =",round((DNA.G1andG2$aden)[i,j],2),sep=""),
paste("(S+G2+ >4N)/G1 =",round(( DNA.aboveG1$aden)[i,j],2),sep=""),
paste("> 4N =",round((DNA.gt4$aden)[i,j],2),sep=""),
paste("< 2N =",round(( DNA.lt2$aden)[i,j],2),sep=""),
paste("S(all)/S(notRed) =",round(( DNA.S$aden/DNA.S$adenNR)[i,j],2),sep=""),
"--------------------------",
paste("Fit Quality:ALL[NotRed] =",round((DNA.fit.success$aden)[i,j],2),"[",round((DNA.fit.success$adenNR)[i,j],2) ,"]",sep=""),
paste("G2/G1=",round(DNA.G2[i,j]/DNA.G1[i,j],2),sep=""))
legend(x=g1.peak.posn*2.4,the.max.range,legend=leg.txt,text.col=c("red","grey35","red","black","black","red","orange4","black","black","black","orange","black","black"),pch="",bty="n",cex=1.25)
## legend(x=0,the.max.range,legend=leg.txt,text.col=c("red","grey35","red","black","black","red","orange4","black","black","black","orange","black","black","black"),pch="",bty="n",cex=1.2)
}
dev.off()
cells.P.field.vec <-(tapply(test[,"FieldIndex"],test[,"FieldIndex"],length))
R.P.field.vec <- (tapply(test[red,"FieldIndex"],test[red,"FieldIndex"],length))
G.P.field.vec<- (tapply(test[green,"FieldIndex"],test[green,"FieldIndex"],length))
RG.P.field.vec<- (tapply(test[(red & green),"FieldIndex"],test[(red & green),"FieldIndex"],length))
nG.P.field.vec<- (tapply(test[!green,"FieldIndex"],test[!green,"FieldIndex"],length))
RnG.P.field.vec<- (tapply(test[(red & !green),"FieldIndex"],test[(red & !green),"FieldIndex"],length))
cells.P.field[i,j] <-toString(cells.P.field.vec) # don't need to peocess
G.P.field.vec<- G.P.field.vec[names(cells.P.field.vec)]
G.P.field.vec[is.na(G.P.field.vec)]<-0
G.P.field[i,j]<- toString(G.P.field.vec)
R.P.field.vec<- R.P.field.vec[names(cells.P.field.vec)]
R.P.field.vec[is.na(R.P.field.vec)]<-0
R.P.field[i,j]<- toString(R.P.field.vec)
RG.P.field.vec<- RG.P.field.vec[names(cells.P.field.vec)]
RG.P.field.vec[is.na(RG.P.field.vec)]<-0
RG.P.field[i,j]<- toString(RG.P.field.vec)
RnG.P.field.vec<- RnG.P.field.vec[names(cells.P.field.vec)]
RnG.P.field.vec[is.na(RnG.P.field.vec)]<-0
RnG.P.field[i,j]<- toString(RnG.P.field.vec)
nG.P.field.vec<- nG.P.field.vec[names(cells.P.field.vec)]
nG.P.field.vec[is.na(nG.P.field.vec)]<-0
nG.P.field[i,j]<- toString(nG.P.field.vec)
notGreen.c[i,j]<-sum(!green)
notRed.c[i,j] <-sum(!red)
redAndGreen[i,j] <-sum(red & green)
redAndGreenLow[i,j] <-sum(red & green & (test[,"AvgIntenCh2"] >= green.cut.low) )
redAndGreenMid[i,j] <-sum(red & green & (test[,"AvgIntenCh2"] >= green.cut.mid ))
redAndGreenHigh[i,j] <-sum(red & green & (test[,"AvgIntenCh2"] >= green.cut.high) )
#redLow[i,j] <-sum(red & (test[,"AvgIntenCh2"] >= green.cut.low) )
#redMid[i,j] <-sum(red & (test[,"AvgIntenCh2"] >= green.cut.low) & (test[,"AvgIntenCh2"] <= green.cut.high) )
#redHigh[i,j]<-sum(red & (test[,"AvgIntenCh2"] > green.cut.high) )
greenLow[i,j] <-sum( green & (test[,"AvgIntenCh2"] >= green.cut.low) )
greenMid[i,j] <-sum(green & (test[,"AvgIntenCh2"] >= green.cut.mid) )
greenHigh[i,j]<-sum(green & (test[,"AvgIntenCh2"] >= green.cut.high) )
notRedAndGreen[i,j] <-sum(!red & green)
redAndNotGreen[i,j] <-sum(red & !green)
}
#id test big enough
} #loop over roes and columns
score<-redAndGreen*notGreen.c/(redAndNotGreen*green.c) ###( red AND Green/Green) / (red and NOT Green/ NOT Green)
mean(as.numeric(redAndGreen*notGreen.c/(redAndNotGreen*green.c)),na.rm=TRUE)
save(list=c("score","greenLow","greenMid","greenHigh","redAndGreenLow","redAndGreenMid","redAndGreenHigh","cells.P.field","R.P.field","RnG.P.field","nG.P.field","G.P.field","RG.P.field","big.c","red.c","green.c","notRed.c","redAndGreen","notGreen.c","notRedAndGreen","redAndNotGreen","all.c","lm.red.slope","lm.green.slope","lm.red.inter","lm.green.inter","lm.red.coverg","lm.green.coverg","DNA.G1andG2","DNA.aboveG1","DNA.gt4","DNA.lt2","DNA.S","DNA.inG1","DNA.inG2","DNA.fit.success","DNA.A","DNA.B","DNA.G1","DNA.G2","do.trace","robust.shaver","max.cells.per.field","min.green.cells","min.red.cells","min.green.threshold","max.g1.posn","min.g1.posn","expected.g1.posn","max.ObjectTotalIntenCh1","double.exposure","use.high","two.color","red.mean.thresh","red.yrange.on.plot","file.list","well.type","row.type","col.type","g2.over.g1.min","g2.over.g1.max","g2.over.g1.refit" ),file=paste(file.plate.name,"_SUMMARY",".RData",sep=""))
}## loop over plates in one file
} ## loop over files
#######################################################################################################################
######################################################## END ##########################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
[1] "Ca_1"
Read 34536510 items
[1] "ARVEC02_14:07:31"
[1] "Ca_2"
Read 78717420 items
[1] "ARVEC02_04:45:25"
[1] "siCa1_3.1"
[1] "siCa1_3.2"
[1] "Ca_3"
Read 166233708 items
[1] "ARVEC02_15:58:29"
[1] "ARVEC02_17:46:35"
[1] "siCa1_2.1"
[1] "ARVEC02_21:24:00"
[1] "ARVEC02_23:13:01"
order run:
DNA.G1andG2,DNA.aboveG1,DNA.gt4,DNA.lt2,DNA.S,DNA.fit.success,DNA.A,DNA.B,
save(list=c("score","greenLow","greenMid","greenHigh","redAndGreenLow","redAndGreenMid","redAndGreenHigh","cells.P.field","R.P.field","G.P.field","big.c","red.c","green.c","notRed.c","redAndGreen","notGreen.c","notRedAndGreen","redAndNotGreen","all.c","lm.red.slope","lm.green.slope","lm.red.inter","lm.green.inter","lm.red.coverg","lm.green.coverg"),file="hugo_004_SUMMARY3.RData")
save(list=c("score","greenLow","greenMid","greenHigh","redAndGreenLow","redAndGreenMid","redAndGreenHigh","cells.P.field","R.P.field","G.P.field","big.c","red.c","green.c","notRed.c","redAndGreen","notGreen.c","notRedAndGreen","redAndNotGreen","all.c","lm.red.slope","lm.green.slope","lm.red.inter","lm.green.inter","lm.red.coverg","lm.green.coverg"),file="hugo_002_SUMMARY3.RData")
save(list=c("score","greenLow","greenMid","greenHigh","redAndGreenLow","redAndGreenMid","redAndGreenHigh","cells.P.field","R.P.field","G.P.field","big.c","red.c","green.c","notRed.c","redAndGreen","notGreen.c","notRedAndGreen","redAndNotGreen","all.c","lm.red.slope","lm.green.slope","lm.red.inter","lm.green.inter","lm.red.coverg","lm.green.coverg"),file="hugo_001_SUMMARY3.RData")
save(list=c("score","big.c","red.c","green.c","notRed.c","redAndGreen","notGreen.c","notRedAndGreen","redAndNotGreen","all.c","lm.red.slope","lm.green.slope","lm.red.inter","lm.green.inter","lm.red.coverg","lm.green.coverg"),file="Plate2_V3.6_RESCAN_FINAL.RData")
save(list=c("score","big.c","red.c","green.c","notRed.c","redAndGreen","notGreen.c","notRedAndGreen","redAndNotGreen","all.c","lm.red.slope","lm.green.slope","lm.red.inter","lm.green.inter","lm.red.coverg","lm.green.coverg"),file="Plate4_V3.6_RESCAN_FINAL.RData")
save(list=c("score","big.c","red.c","green.c","notRed.c","redAndGreen","notGreen.c","notRedAndGreen","redAndNotGreen","all.c","lm.red.slope","lm.green.slope","lm.red.inter","lm.green.inter","lm.red.coverg","lm.green.coverg"),file="Plate1_V3.6_RESCAN_FINAL.RData")
signif(redAndGreen*notGreen.c/(redAndNotGreen*green.c),2)
signif(score ,2)
signif(red.c ,2)
signif(green.c ,2)
signif(notRed.c ,2)
signif(redAndGreen ,2)
signif(notGreen.c ,2)
signif(notRedAndGreen ,2)
signif(redAndNotGreen ,2)
signif( lm.red.slope ,2)
signif(lm.green.slope ,2)
signif(lm.red.inter ,2)
signif(lm.green.inter ,2)
signif(big.c/all.c ,1)
lm.red.coverg
lm.green.coverg
rm(cells.num)
save(list=c("score","red.c","green.c","notRed.c","redAndGreen","notGreen.c","notRedAndGreen","redAndNotGreen","all.c","lm.red.slope","lm.green.slope","lm.red.inter","lm.green.inter","lm.red.coverg","lm.green.coverg"),file="Plate4_V3_ANA.RData")
save(list=c("score","red.c","green.c","notRed.c","redAndGreen","notGreen.c","notRedAndGreen","redAndNotGreen"),file="Plate1_V3_ANA.RData")
score
red.c
green.c
notRed.c
redAndGreen
notGreen.c
notRedAndGreen
redAndNotGreen
lm.red.slope
lm.green.slope
lm.red.inter
lm.green.inter
lm.red.coverg
lm.green.coverg
DE V3
> red.c
8 9
D 0 435
E 2206 0
> notRed.c
8 9
D 0 6672
E 14331 0
> redAndGreen
8 9
D 0 285
E 1057 0
> notGreen.c
8 9
D 0 1671
E 10558 0
> notRedAndGreen
8 9
D 0 5151
E 4922 0
> redAndNotGreen
8 9
D 0 150
E 1149 0
#######DE V4 no cut on Ch1 totalInt
> red.c
8 9
D 0 446
E 2302 0
> notRed.c
8 9
D 0 6740
E 14790 0
> redAndGreen
8 9
D 0 280
E 1067 0
> notGreen.c
8 9
D 0 1870
E 11106 0
> notRedAndGreen
8 9
D 0 5036
E 4919 0
> redAndNotGreen
8 9
D 0 166
E 1235 0
[,1] [,2]
[1,] 0 1671
[2,] 0 0
> notRed.c
[,1] [,2]
[1,] 0 6672
[2,] 0 0
> redAndGreen
[,1] [,2]
[1,] 0 285
[2,] 0 0
> notGreen.c
[,1] [,2] s
[1,] 0 1671
[2,] 0 0
> notRedAndGreen
[,1] [,2]
[1,] 0 5151
[2,] 0 0
> redAndNotGreen
[,1] [,2]
[1,] 0 150
[2,] 0 0
> red.c
8 9
D 0 0
E 2206 0
> notRed.c
8 9
D 0 0
E 14331 0
> redAndGreen
8 9
D 0 0
E 1057 0
> notGreen.c
8 9
D 0 0
E 10558 0
> notRedAndGreen
8 9
D 0 0
E 4922 0
> redAndNotGreen
8 9
D 0 0
E 1149 0
> red.c
1 2 3 4 5 6 7 8 9 10 11 12
A 876 603 715 507 173 323 478 329 417 1240 891 779
B 746 797 835 890 977 1041 1122 1253 866 281 226 156
C 929 702 727 607 674 770 257 171 359 434 668 132
D 423 438 924 149 207 228 755 465 435 1004 905 666
E 312 473 616 271 228 279 2571 2206 1171 421 741 522
F 645 1204 658 665 616 767 694 904 451 351 417 102
G 111 860 750 437 386 482 569 482 789 773 386 239
H 347 801 741 996 809 1413 672 764 786 486 358 71
> notRed.c
1 2 3 4 5 6 7 8 9 10 11 12
A 15568 6712 9328 7892 3607 6593 5269 6166 5607 5992 7334 10345
B 8429 14515 6790 6719 4778 7504 11382 10926 8415 3924 4000 3812
C 8759 6937 6375 7859 5806 9766 20300 24179 17317 5574 8554 3599
D 4687 16671 12422 21855 20063 19810 6119 5576 6672 14833 10395 8591
E 4636 20161 6225 19638 18924 19810 18945 14331 20189 17105 21266 17231
F 9367 17547 6223 9808 8996 6592 7350 8203 6045 4313 6190 2365
G 2649 6790 5725 15395 16474 21815 6853 5450 7246 6859 4679 4071
H 10632 9008 7647 13072 10054 13692 7474 9361 8810 15297 14394 6265
> redAndGreen
1 2 3 4 5 6 7 8 9 10 11 12
A 12 331 247 187 85 145 184 146 193 994 673 518
B 191 99 381 105 89 107 283 300 312 98 96 76
C 215 226 301 301 421 347 5 6 6 169 223 74
D 65 22 266 0 0 0 471 328 285 383 414 355
E 122 0 345 10 6 9 1062 1057 321 6 24 16
F 197 30 355 114 149 127 101 118 65 118 140 72
G 56 429 425 4 1 3 13 7 12 291 135 69
H 4 367 346 4 4 4 305 383 393 0 0 0
> notGreen.c
1 2 3 4 5 6 7 8 9 10 11 12
A 16036 2687 5206 3602 763 2890 2354 2567 2517 1843 2908 5043
B 5818 12595 2969 6282 4568 7090 8521 7950 5077 1525 1241 1097
C 6793 3901 2862 3546 1624 5286 20360 24073 17506 2933 4607 1209
D 3375 15746 9199 1 1 1 1705 1078 1671 9608 5527 3786
E 1934 1 2279 19508 18773 19713 17476 10558 19399 17357 21474 17067
F 5703 18179 2246 8685 7024 5293 6425 7434 4952 1613 2454 421
G 577 2496 1836 15652 16402 22047 7113 5557 7705 3665 2381 1771
H 10400 4657 3487 14018 10816 14998 3434 4555 4082 1 1 1
> notRedAndGreen
1 2 3 4 5 6 7 8 9 10 11 12
A 396 4297 4590 4610 2932 3881 3209 3782 3314 4395 4644 5563
B 3166 2618 4275 1222 1098 1348 3700 3929 3892 2582 2889 2795
C 2680 3512 3939 4619 4435 4903 192 271 164 2906 4392 2448
D 1670 1341 3881 0 0 0 4698 4635 5151 5846 5359 5116
E 2892 0 4217 391 373 367 2978 4922 1640 163 509 670
F 4112 542 4280 1674 2439 1939 1518 1555 1479 2933 4013 1974
G 2127 4725 4214 176 457 247 296 368 318 3676 2549 2470
H 575 4785 4555 46 43 103 4407 5187 5121 0 0 0
> redAndNotGreen
1 2 3 4 5 6 7 8 9 10 11 12
A 864 272 468 320 88 178 294 183 224 246 218 261
B 555 698 454 785 888 934 839 953 554 183 130 80
C 714 476 426 306 253 423 252 165 353 265 445 58
D 358 416 658 149 207 228 284 137 150 621 491 311
E 190 473 271 261 222 270 1509 1149 850 415 717 506
F 448 1174 303 551 467 640 593 786 386 233 277 30
G 55 431 325 433 385 479 556 475 777 482 251 170
H 343 434 395 992 805 1409 367 381 393 486 358 71
>
signif(redAndGreen/redAndNotGreen,2 )
rat<-redAndGreen/redAndNotGreen
m<-mean(as.numeric(rat))
psd<-sd(as.numeric(rat))
signif(((redAndGreen/redAndNotGreen)-m)/psd,2 )
|
29384693a1bf3990b9e3d4408a23e79d4a4a4d6e
|
7781f8cd182574893f0421929eaabf51a1db53dd
|
/man/get_posteriors.Rd
|
bd042c7a7c1a7fc055b1ee563b254cdf0ead1baf
|
[
"MIT"
] |
permissive
|
SWOT-Confluence/geobamdata
|
d154c15c461172de016d9eef3a1f5b3bf5f2fae3
|
51aa72a2f7376504724f6c580077c882d760825e
|
refs/heads/main
| 2023-03-31T22:07:28.111915
| 2021-04-13T12:10:45
| 2021-04-13T12:10:45
| 318,612,305
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 444
|
rd
|
get_posteriors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geobam_data.R
\name{get_posteriors}
\alias{get_posteriors}
\title{Generate and extract geobam posteriors from reach data.}
\usage{
get_posteriors(reach_data)
}
\arguments{
\item{reach_data}{list of reach data (input observations)}
}
\value{
list of 3 chains with mean and sd posterior values
}
\description{
Generate and extract geobam posteriors from reach data.
}
|
3f0d4758bebcb589eea2014260a06d8547e119da
|
014a09363ab8169a989848d12e1c84992bbfe38f
|
/tests/testthat/test-aov_ss.R
|
e527781f262971994d6a470b65ec10d2d0edf65a
|
[] |
no_license
|
bassam-abulnoor/TestDimorph
|
45b412e5ab0747b51fa6cb68ad3c9a5c19b4958a
|
f5d733b3bf2ee8d684d21fe5888afd5ceb2e60f4
|
refs/heads/master
| 2021-07-20T05:36:15.983925
| 2021-01-24T18:36:44
| 2021-01-24T18:36:44
| 234,365,880
| 1
| 0
| null | 2021-01-02T19:29:37
| 2020-01-16T16:46:23
|
R
|
UTF-8
|
R
| false
| false
| 990
|
r
|
test-aov_ss.R
|
test_that("aov_ss", {
library(TestDimorph)
testthat::expect_true(round(
aov_ss(
baboon.parms_df[1:3, ],
Pop = 2,
digits = 3,
letters = TRUE,
pairwise = TRUE,
)[[1]]$p.value[1],
3
) == 0.325)
testthat::expect_true(aov_ss(
baboon.parms_df[1:3, ],
Pop = 2,
digits = 3,
letters = TRUE,
pairwise = TRUE,
es_anova = "f"
)$`Female model`[[8]][1] == 0.028)
testthat::expect_true(aov_ss(
baboon.parms_df[1:3, ],
Pop = 2,
digits = 3,
letters = TRUE,
pairwise = TRUE,
es_anova = "eta"
)$`Female model`[[8]][1] == 0.027)
testthat::expect_error(aov_ss(
baboon.parms_df[1:3, ],
Pop = 2,
digits = 3,
letters = TRUE,
pairwise = TRUE,
es_anova = "qq"
)$`Female model`[[8]][1] == 0.028)
testthat::expect_error(
aov_ss(
x = matrix(NA),
Pop = 500,
digits = 3,
es = 900,
letters = 900,
pairwise = 900,
sig.level = 900
)
)
})
|
5f73f687021bf3b9d9b4a0e8e87917039da833a1
|
bade93cbfc1f25160dfbe9493bfa83f853326475
|
/doc/mwc/doc/mwc/alphabet/m.r
|
877fead402b5355c643a3be39d058a94036eb2a8
|
[
"BSD-3-Clause"
] |
permissive
|
gspu/Coherent
|
c8a9b956b1126ffc34df3c874554ee2eb7194299
|
299bea1bb52a4dcc42a06eabd5b476fce77013ef
|
refs/heads/master
| 2021-12-01T17:49:53.618512
| 2021-11-25T22:27:12
| 2021-11-25T22:27:12
| 214,182,273
| 26
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24
|
r
|
m.r
|
.sp
.ce 1
\f(CHM\fR
.sp
|
039323543c7871997f965fbb5d78b744d91e136c
|
213cddea3de10c4b51dce3629a8a481cd93f4872
|
/man/filterIndustries.Rd
|
f437dd0a16b138acfff4f89ffad9b59ab7db2f5f
|
[] |
no_license
|
ccwoolfolk/demandmap
|
83186be02cad596a98483eba1452dd82af154017
|
9129aa8c237e6750e38dbf0c4add80aaca1d4568
|
refs/heads/master
| 2020-03-29T14:29:01.666719
| 2018-09-26T16:08:27
| 2018-09-26T16:08:27
| 150,020,050
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 431
|
rd
|
filterIndustries.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{filterIndustries}
\alias{filterIndustries}
\title{Filter a tibble by industry}
\usage{
filterIndustries(dataToFilter, industries)
}
\arguments{
\item{dataToFilter}{tibble}
\item{industries}{character[n]. Industry codes to include}
}
\value{
tibble with only the NAICS codes in industries
}
\description{
Filter a tibble by industry
}
|
77e53477487c38f6cd4fc364b3af6b77fcaf8d81
|
8810a7bb371e125ec24c2978eb646fbf02e2c6ca
|
/man/LOOCV.Rd
|
a308eee1470dc221d3c6b4dcfec2c81414d3dde9
|
[] |
no_license
|
jayverhoef/KrigLinCaution
|
758ad3a3c7c832e5df6a2d236ab560dc733d295e
|
cfee3a616a5256a817aa72046064cb5a6ee309ee
|
refs/heads/master
| 2021-01-20T16:11:11.487098
| 2018-01-20T18:40:48
| 2018-01-20T18:40:48
| 90,825,285
| 10
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 563
|
rd
|
LOOCV.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LOOCV.R
\name{LOOCV}
\alias{LOOCV}
\title{Leave-one-out crossvalidation (LOOCV) for a fitted spatial model}
\usage{
LOOCV(SigMat, obsVec)
}
\arguments{
\item{SigMat}{The fitted covariance matrix for the spatial data}
\item{obsvec}{a vector of the observed data}
}
\value{
a data.frame with the predictions in the first column and the
prediction variances in the second column
}
\description{
Leave-one-out crossvalidation (LOOCV) for a fitted spatial model
}
\author{
Jay Ver Hoef
}
|
323ad24ad5f4aee4bd777934f5e6b28af997d0d9
|
b30a7754d83c85a05a9e4d4043f4f86d4b8a146e
|
/R/filter.totcor.R
|
dd351c55bfd49a4b2a2c2941f75f08129f6dc573
|
[] |
no_license
|
cran/MetFns
|
ee916cbc684ecd2c8c8bd71241e87925ede01220
|
b0bc3c7c4bd7724c0dc725c1560df6286f278b97
|
refs/heads/master
| 2021-01-17T10:05:52.743380
| 2018-10-13T21:50:12
| 2018-10-13T21:50:12
| 24,534,142
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 505
|
r
|
filter.totcor.R
|
filter.totcor<-function(data,shw,r,C)
{
if(!is.numeric(r) || r<1.5 || r>4.5)
stop("Invalid input parameter specification: check value of r")
if(!is.numeric(C) || C<1)
stop("Invalid input parameter specification: check value of C")
if(!(all(c("F","Lmg")%in%names(data))))
stop("Error: data does not contain columns named F and Lmg")
elev.data<-sinh(data,shw)
m<-ncol(elev.data)
elev.data[elev.data$F*r^(6.5-elev.data$Lmg)/elev.data$sine.h<=C,-m]
}
|
726ab6cf8fcd9d101e34be90e04951b89ef83103
|
b513e438cf48c3b2d1e603df9967aa06eba72263
|
/demo/testing_pso.R
|
1c9028d44f27774bd17265d3fece7d31f07599d9
|
[] |
no_license
|
cran/PortfolioAnalytics
|
da65651aacaff23b8b8e9e4d588ce9ed6fdcebf3
|
0a32da92a5caae29e26b2ecbc8107c66ad64dcf7
|
refs/heads/master
| 2023-05-25T06:40:44.201460
| 2018-05-17T21:48:29
| 2018-05-17T21:48:29
| 34,196,423
| 5
| 19
| null | 2023-05-25T18:30:23
| 2015-04-19T06:43:58
|
R
|
UTF-8
|
R
| false
| false
| 2,024
|
r
|
testing_pso.R
|
#' ---
#' title: "pso Demo"
#' date: "7/17/2014"
#' ---
#' This script demonstrates running optimizations using pso as the
#' optimization backend. Note that this script uses the v1 specification
#' previous to version 0.8.3.
#' Load packages
library(PortfolioAnalytics)
library(pso)
#' Load data and set general Parameters for sample code
data(edhec)
N <- 4
R <- edhec[,1:N]
funds <- names(R)
mu.port <- mean(colMeans(R))
#' Define problem with constraints and objectives
gen.constr <- constraint(assets = funds, min=-2, max=2, min_sum=0.99, max_sum=1.01, risk_aversion=1)
gen.constr <- add.objective(constraints=gen.constr, type="return", name="mean", enabled=FALSE, target=mu.port)
gen.constr <- add.objective(constraints=gen.constr, type="risk", name="var", enabled=FALSE, risk_aversion=10)
gen.constr <- add.objective(constraints=gen.constr, type="risk", name="CVaR", enabled=FALSE)
gen.constr <- add.objective(constraints=gen.constr, type="risk", name="sd", enabled=FALSE)
#' Max return under box constraints, fully invested
print('Max return under box constraints, fully invested')
max.port <- gen.constr
max.port$min <- rep(0.01,N)
max.port$max <- rep(0.30,N)
max.port$objectives[[1]]$enabled <- TRUE
max.port$objectives[[1]]$target <- NULL
max.port$objectives[[1]]$multiplier <- -1
max.solution <- optimize.portfolio(R=R, constraints=max.port, optimize_method="pso", trace=TRUE)
#' Mean-variance: Fully invested, Global Minimum Variance Portfolio
print('Mean-variance: Fully invested, Global Minimum Variance Portfolio')
gmv.port <- gen.constr
gmv.port$objectives[[4]]$enabled <- TRUE
gmv.solution <- optimize.portfolio(R=R, constraints=gmv.port, optimize_method="pso", trace=TRUE)
#' Minimize CVaR
print('Min-CVaR')
cvar.port <- gen.constr
cvar.port$min <- rep(0,N)
cvar.port$max <- rep(1,N)
cvar.port$objectives[[3]]$enabled <- TRUE
cvar.port$objectives[[3]]$arguments <- list(p=0.95, clean="boudt")
cvar.solution <- optimize.portfolio(R=R, constraints=cvar.port, optimize_method="pso", trace=TRUE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.