blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
92042192935ddb7bf2ca6476cae0ff020c2585d4 | bd89464c96bec12d4a1bd1b18b6822a0aca69dc3 | /Script/BoD_State_9_Comparison.R | 62339027a916624e6de26b4130b8d83e1a091673 | [] | no_license | raedkm/TTI-StateBurden | c27d99b11993c9a54738d5b0b4929ab433fc0a76 | c89273b5fe62a4b811dd168bb23d246ccf53574f | refs/heads/master | 2022-11-16T11:34:34.172996 | 2022-11-02T06:18:26 | 2022-11-02T06:18:26 | 201,295,135 | 0 | 0 | null | 2020-06-18T19:27:33 | 2019-08-08T16:17:29 | HTML | UTF-8 | R | false | false | 3,901 | r | BoD_State_9_Comparison.R | #---------------------------------------------#
#Project : State specific Burden of childhood asthma due to TRAP - 2019
#Part : (09) Comparison tables
#Purpose : Read in census data, income data, NO2 conc, incidence rate (original study), and prevelance rate (original study)
# Followed by joining the data sets
#Created by Raed Alotaibi
#Date Created: May-07-2019
#Last Modified: Aug-8-2019
#---------------------------------------------#
# Defning function {myspread} --------------------------------------------
myspread <- function(df, key, value) {
# quote key
keyq <- rlang::enquo(key)
# break value vector into quotes
valueq <- rlang::enquo(value)
s <- rlang::quos(!!valueq)
df %>% gather(variable, value, !!!s) %>%
unite(temp, !!keyq, variable) %>%
spread(temp, value)
}
# Burden estimate ---------------------------------------------------------
burden_c <- burden_c %>%
mutate(type = "origin")
burden <- burden %>%
mutate(type = "state")
burden_join <- rbind(burden_c, burden)
burden_join %>%
group_by(type) %>%
summarise(CASES = sum(CASES), AC = sum(AC))%>%
mutate(AF = AC/CASES ) %>%
myspread(type, c(CASES, AC, AF))
# Comparing overall --------------------------------------------------
#overall
compare_1 <- burden_join %>%
group_by(type) %>%
summarise(CASES = sum(CASES), AC = sum(AC))%>%
mutate(AF = AC/CASES ) %>%
myspread(type, c(CASES, AC, AF)) %>%
mutate(CASES_D = state_CASES - origin_CASES, CASES_P = (state_CASES - origin_CASES)/ origin_CASES ,
AC_D = state_AC - origin_AC, AC_P = (state_AC - origin_AC) / origin_AC,
AF_D = state_AF - origin_AF, AF_P = (state_AF - origin_AF) / origin_AF)
#Urban
compare_2 <- burden_join %>%
group_by(URBAN,type) %>%
summarise(CASES = sum(CASES), AC = sum(AC))%>%
mutate(AF = AC/CASES ) %>%
myspread(type, c(CASES, AC, AF)) %>%
mutate(CASES_D = state_CASES - origin_CASES, CASES_P = (state_CASES - origin_CASES)/ origin_CASES ,
AC_D = state_AC - origin_AC, AC_P = (state_AC - origin_AC) / origin_AC,
AF_D = state_AF - origin_AF, AF_P = (state_AF - origin_AF) / origin_AF)
#Income
compare_3 <- burden_join %>%
group_by(INCOME, type) %>%
summarise(CASES = sum(CASES), AC = sum(AC)) %>%
mutate(AF = AC/CASES ) %>%
myspread(type, c(CASES, AC, AF)) %>%
mutate(CASES_D = state_CASES - origin_CASES, CASES_P = (state_CASES - origin_CASES)/ origin_CASES,
AC_D = state_AC - origin_AC, AC_P = (state_AC - origin_AC) / origin_AC,
AF_D = state_AF - origin_AF, AF_P = (state_AF - origin_AF) / origin_AF)
#Joining tables
compare <- compare_1 %>%
full_join(compare_2) %>%
full_join(compare_3) %>%
select(URBAN,INCOME, state_CASES, origin_CASES, CASES_D, CASES_P,
state_AC, origin_AC, AC_D, AC_P,
state_AF, origin_AF, AF_D, AF_P) %>%
as.data.frame()
# Comparing state ---------------------------------------------------------
#overall
compare_state <- burden_join %>%
group_by(type, STATE) %>%
summarise(CASES = sum(CASES), AC = sum(AC))%>%
mutate(AF = AC/CASES ) %>%
myspread(type, c(CASES, AC, AF)) %>%
mutate(CASES_D = state_CASES - origin_CASES, CASES_P = (state_CASES - origin_CASES)/ origin_CASES ,
AC_D = state_AC - origin_AC, AC_P = (state_AC - origin_AC) / origin_AC,
AF_D = state_AF - origin_AF, AF_P = (state_AF - origin_AF) / origin_AF) %>%
select(STATE, state_CASES, origin_CASES, CASES_D, CASES_P,
state_AC, origin_AC, AC_D, AC_P,
state_AF, origin_AF, AF_D, AF_P) %>%
as.data.frame()
# Printing comparison tables ----------------------------------------------
xlsx::write.xlsx(compare, "Output/Tables/Table_7.xlsx", row.names = F)
xlsx::write.xlsx(compare_state, "Output/Tables/Table_8.xlsx", row.names = F)
|
8dc680c3fb4ad4469f37db9cbfc6c9f9cf65e077 | b996f6021b7c3498d8260944895129772c27815d | /get_functions.R | 96a1563819371dc3a4551e2a51d4631b11250224 | [] | no_license | tastyCanOfMalk/BCIRA | 1c185089e61af9f32799085bedc755d5aead4a27 | f185ac4045f1e57766d966c9d6198e284aeb0261 | refs/heads/master | 2021-01-23T03:37:34.201051 | 2017-04-04T13:41:00 | 2017-04-04T13:41:00 | 86,106,573 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,755 | r | get_functions.R | ####################
# LOAD PACKAGES
# if (require("packrat") == FALSE) install.packages("packrat")
# require("packrat")
if (require("stringr") == FALSE) install.packages("stringr") # string manipulation
if (require("reshape2") == FALSE) install.packages("reshape2") # melting casting
if (require("ggplot2") == FALSE) install.packages("ggplot2") # graphs
if (require("plyr") == FALSE) install.packages("plyr") # rbind fill
if (require("ggthemes") == FALSE) install.packages("ggthemes") # gdocs theme
if (require("gridExtra") == FALSE) install.packages("gridExtra") #
if (require("grid") == FALSE) install.packages("grid") # textgrobs
if (require("gtable") == FALSE) install.packages("gtable") # gtable
require("stringr") # string manipulation
require("reshape2") # melting casting
require("ggplot2") # graphs
require("plyr") # rbind fill
require("ggthemes") # gdocs theme
require("gridExtra") #
require("grid") # textgrobs
require("gtable") # gtable
####################
# GRAPH VARIABLES
gg.vars <- list(
"red", #2 hline color
21, #3 shape max deflection
10, #4 shape cross baseline
8, #5 shape break
4, #6 key points size
1, #7 stat_smooth size
.5 #8 stat_smooth alpha
)
####################
# DEFINE FUNCTIONS
getCross <- function(x,y){
# Computes minimum absolute value from zero
# i.e., closest point to the baseline
#
# Arguments:
# x: data frame to look in
# y: column within dataframe to use
#
# Returns:
# (x,y) coordinate corresponding to time and deflection at baseline crossing
# else a mean of the closest values
xx <- which(abs(x[y] - 0) == min(abs(x[y] - 0)))
yy <- x[xx, y]
if(length(xx) > 1){
A <- list(mean(xx), mean(yy))
return(A)
}
else{
A <- list(xx, yy)
return(A)
}
}
# getCross(A.24, 1)
getBreak <- function(x,y){
# Subsets data after crossing baseline to end
# finds largest change in slope, should correspond to break time
#
# Arguments:
# x: data frame to look in
# y: column within dataframe to use
#
# Returns:
# (x,y) coordinate corresponding to break time
# may need modification later if returns multiple values (see getCross())
t0 <- as.numeric(getCross(x, y)[[1]])
t1 <- x[t0:nrow(x), y] # select rows only after crossing baseline
t2 <- which.max(abs(diff(t1,1))) # find time at max(abs(diff)))
t2 = t2 + t0 - 1 # readjust break time, -1 to get before break
t3 <- x[t2, y] # get corresponding y-value
A <- list(t2, t3)
return(A)
}
# getBreak(A.24,1)
|
354d5b5c396cba484c73a34726f7b1b17d0100f2 | 322737d934a4697320224ab97bdccddf936a7729 | /Text Analytics Assignment/1) Wiki Vandals/preparation.R | f5815653c1027a463668008c11540616cca4c04f | [] | no_license | eleven-yi/Analytics-Edge | 651461b48b374f19391ed56b071989cc0f5582c3 | 8997093e44bd72b5902900038a1faa727eb844d1 | refs/heads/master | 2020-12-11T07:34:22.358890 | 2014-06-05T01:03:34 | 2014-06-05T01:05:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,073 | r | preparation.R | getwd()
setwd("C:\\Users\\Roberto\\Dropbox\\Coursera\\Analytics Edge\\05.2) Twitter Intro to Text Analytics")
dir()
tweets = read.csv("tweets.csv", stringsAsFactors=FALSE)
tweets
str(tweets)
tweets$negative = as.factor(Tweets$Avg <= -1)
tweets$negative = as.factor(tweets$Avg <= -1)
table(tweets$Negative)
str(tweets)
tweets(1:5)
tweets[1:5]
tweets[,1:5]
tweets[1]
tweets[2]
tweets[3]
colnames(tweets)[3]
colnames(tweets)[3] = "Negative"
colnames(tweets)[3]
str(tweets)
table(tweets$Negative)
install.packages("tm")
library(tm)
install.packages("SnowballC")
library(SnowballC)
corpus = Corpus(VectorSource(tweets$Tweet))
corpus
corpus[[1]]
corpus = tm_map(corpus, tolower)
corpus[[1]]
# Remove punctuation
corpus = tm_map(corpus, removePunctuation)
corpus[[1]]
# Look at stop words
stopwords("english")[1:10]
stopwords("portuguese")[1:10]
str(stopwords("portuguese"))
corpus = tm_map(corpus, removeWords, c("apple", stopwords("english")))
corpus[[1]]
corpus = tm_map(corpus, stemDocument)
corpus[[1]]
frequencies = DocumentTermMatrix(corpus)
frequencies
inspect(frequencies[1000:1005,505:515])
findFreqTerms(frequencies, lowfreq=20)
findFreqTerms(frequencies, lowfreq=20)
sparse = removeSparseTerms(frequencies, 0.995)
sparse
tweetsSparse = as.data.frame(as.matrix(sparse))
colnames(tweetsSparse) = make.names(colnames(tweetsSparse))
tweetsSparse$Negative = tweets$Negative
# Split the data
library(caTools)
set.seed(123)
split = sample.split(tweetsSparse$Negative, SplitRatio = 0.7)
trainSparse = subset(tweetsSparse, split==TRUE)
testSparse = subset(tweetsSparse, split==FALSE)
findFreqTerms(frequencies, lowfreq=100)
library(rpart)
library(rpart.plot)
str(trainSparse)
tweetCART = rpart(Negative ~ ., data=trainSparse, method="class")
prp(tweetCART)
predictCART = predict(tweetCART, newdata=testSparse, type="class")
table(testSparse$Negative, predictCART)
# Compute accuracy
(294+18)/(294+6+37+18)
# Baseline accuracy
table(testSparse$Negative)
300/(300+55)
library(randomForest)
set.seed(123)
tweetRF = randomForest(Negative ~ ., data=trainSparse)
predictRF = predict(tweetRF, newdata=testSparse)
table(testSparse$Negative, predictRF)
(293+21)/(293+7+34+21)
tweetLog = glm(Negative ~ ., data=trainSparse, family=binomial)
tweetLog = glm(Negative ~ ., data=trainSparse, family="binomial")
predictions = predict(tweetLog, newdata=testSparse, type="response")
table(testSparse$Negative, predictions >= 0.5)
(253+33)/(253+47+22+33)
str(testSparse)
setwd("C:\\Users\Roberto\\Dropbox\\Coursera\\Analytics Edge\\05.4) Text Analytics in the Courtroom")
setwd("C:\\Users\\Roberto\\Dropbox\\Coursera\\Analytics Edge\\05.4) Text Analytics in the Courtroom")
emails = read.csv("energy_bids.csv", stringsAsFactors=FALSE)
str(emails)
emails$email[1]
strwrap(emails$email[1])
emails$responsive[1]
strwrap(emails$email[2])
emails$responsive[2]
table(emails$responsive)
139/(139+716)
library(tm)
corpus = Corpus(VectorSource(emails$email))
corpus[[1]]
corpus <- tm_map(corpus, tolower)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, removeWords, stopwords("english"))
corpus <- tm_map(corpus, stemDocument)
# Look at first email
corpus[[1]]
dtm = DocumentTermMatrix(corpus)
dtm
# Remove sparse terms
dtm = removeSparseTerms(dtm, 0.97)
dtm
labeledTerms = as.data.frame(as.matrix(dtm))
# Add in the outcome variable
labeledTerms$responsive = emails$responsive
str(labeledTerms)
library(caTools)
set.seed(144)
spl = sample.split(labeledTerms$responsive, 0.7)
train = subset(labeledTerms, spl == TRUE)
test = subset(labeledTerms, spl == FALSE)
# Build a CART model
library(rpart)
library(rpart.plot)
emailCART = rpart(responsive~., data=train, method="class")
prp(emailCART)
pred = predict(emailCART, newdata=test)
pred[1:10,]
pred.prob = pred[,2]
table(test$responsive, pred.prob >= 0.5)
(195+25)/(195+25+17+20)
table(test$responsive)
215/(215+42)
library(ROCR)
predROCR = prediction(pred.prob, test$responsive)
str(predROCR)
perfROCR = performance(predROCR, "tpr", "fpr")
plot(perfROCR, colorize=TRUE)
performance(predROCR, "auc")@y.values
ls()
rm(c(ls())
)
rm(ls())
?rm
c(ls())
rm(list = ls())
ls()
c(0:10)
c(0:10) - c(3:5)
intersect(c(0:10), c(3:5))
setdiff(c(0:10), c(3:5))
dir
dir()
setwd("..\\05.5) Assignment")
dir()
wiki = read.csv("wiki.csv", stringAsFactors=FALSE)
wiki = read.csv("wiki.csv", stringsAsFactors=FALSE)
str(wiki)
table(wiki$Vandal)
corpusAdded = Corpus(VectorSource(wiki$Added))
corpusAdded = tm_map(corpusAdded, removeWords, stopwords("english))
corpusAdded = tm_map(corpusAdded, removeWords, stopwords("english"))
corpusAdded = tm_map(corpusAdded, stemDocument)
dtmAdded = DocumentTermMatrix(corpusAdded)
length(stopwords("english"))
str(dtmAdded)
length(dtmAdded)
dtmAdded
sparseAdded = removeSparseTerms(dtmAdded, 0.997)
sparseAdded
wordsAdded = as.data.frame(as.matrix(sparseAdded))
colnames(wordsAdded) = paste("A", colnames(wordsAdded))
paste("A", "B")
wordsAdded
|
d5c258b2c7f5fb94049c8427c967b10bafabc17d | 970908488bcf14ed44cc26768a2a5e64114d16cf | /scripts/05_figure_variants.R | d58698a8ba973450806a2809014b779fc60bff62 | [] | no_license | Stijn-A/SARS-CoV-2_Protection_SGTF_Delta_Omicron_BA1_BA2 | 4d67549d3289cb17a453beddd52b870c25e22760 | 713cb47ed51ac50276ab917a6d98e063e529b3f5 | refs/heads/main | 2023-04-16T23:26:04.905439 | 2022-08-10T08:34:09 | 2022-08-10T08:34:09 | 499,418,118 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,076 | r | 05_figure_variants.R | # Figure S1
data_teststraten_lab_c <- data_teststraten_lab %>%
left_join.(data_teststraten_all %>% select(!Afspraak_start), by = "Monsternummer") %>%
mutate(`S result` = `S result` %>% factor(levels = c("Not detected", "Detected"))) %>%
filter(`S result` %in% c("Not detected", "Detected"))
plot_1 <- ggplot() +
geom_bar(data = data_teststraten_lab_c,
aes(x = Afspraak_start_datum,
fill = `S result`), width = 0.9
) +
geom_rect(
mapping = aes(linetype = "Delta - Omicron BA.1"),
xmin = data_teststraten_ve_cohort1$Afspraak_start_datum %>% min - 0.7,
xmax = data_teststraten_ve_cohort1$Afspraak_start_datum %>% max + 0.5 ,
ymin = -20,
ymax = 20000,
col = "black",
fill = NA
) +
geom_rect(
mapping = aes(linetype = "Omicron BA.1 - BA.2"),
xmin = data_teststraten_ve_cohort2$Afspraak_start_datum %>% min - 0.5,
xmax = data_teststraten_ve_cohort2$Afspraak_start_datum %>% max + 0.7 ,
ymin = -20,
ymax = 20000,
col = "black",
fill = NA
) +
scale_colour_brewer(type = "qual") +
scale_fill_brewer(type = "qual") +
scale_linetype_manual(values = c(3,4), name = "Cohort") +
scale_x_date(breaks = seq(data_teststraten_lab$Afspraak_start %>% min %>% as.Date(), data_teststraten_lab$Afspraak_start %>% max %>% as.Date(), 7),
limits = c(data_teststraten_lab_c$Afspraak_start_datum %>% min - 1, data_teststraten_lab_c$Afspraak_start_datum %>% max + 1),
expand = expansion(add = 0.5)) +
#scale_y_continuous(breaks = seq(0, 6000, 1000)) +
theme_minimal() +
theme(
text = element_text(size = 7),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
legend.justification="left",
legend.margin=margin(0,0,0,0),
legend.box.margin=margin(-10,-1,-10,-10),
)
# Figuur kiemsurv
plot_2 <-
ggplot() +
geom_bar(data = data_kiemsurv %>% filter(`Sample date` %in% seq(data_teststraten_lab_c$Afspraak_start_datum %>% min,
data_teststraten_lab_c$Afspraak_start_datum %>% max,1)),
aes(x = `Sample date`, color = `Variant (WGS)`, fill = `Variant (WGS)`), width = 0.53
) +
geom_rect(
mapping = aes(linetype = "Delta - Omicron BA.1"),
xmin = data_teststraten_ve_cohort1$Afspraak_start_datum %>% min - 0.7,
xmax = data_teststraten_ve_cohort1$Afspraak_start_datum %>% max + 0.5 ,
ymin = -3,
ymax = 215,
col = "black",
fill = NA
) +
geom_rect(
mapping = aes(linetype = "Omicron BA.1 - Omicron BA.2"),
xmin = data_teststraten_ve_cohort2$Afspraak_start_datum %>% min - 0.5,
xmax = data_teststraten_ve_cohort2$Afspraak_start_datum %>% max + 0.7 ,
ymin = -3,
ymax = 215,
col = "black",
fill = NA
) +
scale_colour_brewer(type = "qual", palette = 2) +
scale_fill_brewer(type = "qual", palette = 2) +
scale_linetype_manual(values = c(3,4)) +
scale_x_date(breaks = seq(data_teststraten_lab$Afspraak_start %>% min %>% as.Date(), data_teststraten_lab$Afspraak_start %>% max %>% as.Date(), 7),
expand = expansion(add = 0.5),
limits = c(data_teststraten_lab_c$Afspraak_start_datum %>% min - 1, data_teststraten_lab_c$Afspraak_start_datum %>% max + 1)) +
guides(linetype = "none") +
theme_minimal() +
theme(text = element_text(size = 7),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5),
#legend.position="top",
legend.justification="left",
legend.margin=margin(0,0,0,0),
legend.box.margin=margin(-10,-1,-10,-10),)
N_wgs_samples <- data_kiemsurv %>%
filter(`Sample date` %in% seq(data_teststraten_lab_c$Afspraak_start_datum %>% min,
data_teststraten_lab_c$Afspraak_start_datum %>% max,1)) %>%
nrow
figuur_periodes_data_7 <- plot_grid(plot_1, plot_2,
align = "v", labels = c("A", "B"),
ncol = 1,
rel_heights = c(1,1.2))
|
dae95c5cfd3e968edbc618c09e805ab2cc3f82e1 | d85d612424631ad5ba4eae914edca72d2f91fd23 | /2014-mira-plos/code/numerical-comparison/util.r | 3eef57fb7160dfa1742280c2e66fe07398f9dc2e | [] | no_license | hesenp/random-paper | 23638bdd3df5ac6b556605bbc72238f00073fed3 | 4630263cd794d7bbd11f1b3bddb12a92c044dc51 | refs/heads/master | 2021-01-20T02:47:51.353891 | 2015-09-26T22:46:19 | 2015-09-26T22:46:19 | 21,009,231 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,092 | r | util.r |
## this file contains the programs used to generate the numbers for
## permutation test. we will generate: 1) mutual information 2)
## brownian covariate, 3) mira score, 4) mean distance.
library(FNN)
library(boot)
library(energy)
## this part of the code generates the test using mutual information.
###########################################
## HOW TO USE:
## use compare_test function to generate permutation test p-values
## using one of the four methods: Mira score, mutual information, mean
## observation distance, and brownian covariate.
## for example:
## x and y are matrices of 100*10 and 100*2 dimensions, respectively.
# x <- matrix(rnorm(1000),100)
# y <- matrix(rnorm(200),100)
## to generate the p-values of Mira score, use:
## compare_test(x,y,"mira_score")
## for other methods, substitute "mira_score" with one of the
## following: "mutual_information", "mean_distance", or
## "brownian_covariate"
#################################################
dist_fun <- function(x,y,type){
## this function will return the values on the observation graph
## subject to the specification of the type field. current planned
## types are below.
p <- pmatch(type,
c("mutual_information",
"mira_score",
"mean_distance"))
if(p %in% c(1,2)){
temp <- get.knn(cbind(x,y),k=1)$nn.dist
return(switch(p,
mean(log(temp)),
mean(temp)))
}else{
temp <- as.numeric(dist(cbind(x,y)))
return(mean(temp))
}
}
perm_test <- function(x,y,type,R=100){
boot_core <- function(original,index,type){
return(dist_fun(original[index,],y,type))
}
out <- boot(data=x,
statistic = boot_core,
R=R,
sim="permutation",
type=type)
return(pnorm(out$t0,mean(out$t),sd(out$t)))
}
## the function below would generate the distribution of observation
## distance and do the association testing using kolgonorove test.
dist_test_core <- function(x,y){
return(as.numeric(dist(cbind(x,y))))
}
dist_test <- function(x,y,R=50){
## this function will generate the observation distances and use
## KS test to compare it with null distribution.
test_dist <- dist_test_core(x,y)
null_dist <- as.numeric(replicate(R,
dist_test_core(x,
y[sample(nrow(y)),])))
return(ks.test(test_dist,null_dist)$p.value)
}
## finally, test the performance with one general portal
compare_test <- function(x,y,type,R=100){
## this function wraps the above operation with the dcov test in the
## energy function.
p_select <- pmatch(type,c("brownian_covariate",
"ks_dist"))
out <- switch(p_select,
dcov.test(x,y)$p.value,
dist_test(x,y))
if(is.null(out)){
out <- perm_test(x,y,type,R)
}
return(out)
}
## ## test
## n <- 100
## p <- 5
## a <- matrix(rnorm(n*p),n)
## b <- matrix(rnorm(n*p,sd=abs(a)),n)
## compare_test(a,b,"ks")
|
f8c2d0ae9d01775de130249977ac141e1fe3f041 | 0a97f73c1cf1dafca0206c0a1eec08cdf8e7698b | /Supplementary Phase Transition Script FINAL 011017.R | 978cfd185a537c41aad8eb4ef20d195af8f289f5 | [] | no_license | travisjwiltshire/PhaseTransitionsDuringTeamCollaboration | 1f64466dd3facc4598f1d3de8f23df24e46aaa7e | 52273465217a43d627a3f04d87ed6ff55706499b | refs/heads/master | 2021-01-13T08:20:21.836266 | 2020-07-23T12:19:48 | 2020-07-23T12:19:48 | 71,781,249 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 21,526 | r | Supplementary Phase Transition Script FINAL 011017.R | # Loading packages, functions, and data -----------------------------------
require(tseriesChaos)
require(plyr)
require(entropy)
require(zoo)
require(ggplot2)
require(Hmisc)
require(plotrix)
require(dplyr)
require(tidyr)
#data should be in long format with a repeating ID variable for each team and a column for the communications code time series
data<-read.csv(file= "CommCodesData_Example.csv",header=TRUE, sep=",")
data$TeamIDvar<-as.numeric(as.factor(data$TeamID))
list<-unique(data$TeamIDvar)
for (j in 1:length(list)){
case<-list[j]
subdata <- subset(data, data$TeamIDvar==case)
subdata$Order<-1:nrow(subdata)
if(j==1){
data_c<-subdata
} else{
data_c<-rbind(data_c,subdata)
}
}
#create cbind function needed for windowed entropy loop
cbind.fill <- function(...) {
transpoted <- lapply(list(...),t)
transpoted_dataframe <- lapply(transpoted, as.data.frame)
return (data.frame(t(rbind.fill(transpoted_dataframe))))
}
#This function converts the input time series of categorical codes into the required observed counts vector, calculates the entropy, and returns the value
CountAndEntropy <- function(x) {
require(entropy)
C1<-sum(x==1)
C2<-sum(x==2)
C3<-sum(x==3)
C4<-sum(x==4)
C5<-sum(x==5)
C6<-sum(x==6)
C7<-sum(x==7)
C8<-sum(x==8)
C9<-sum(x==9)
C10<-sum(x==10)
C11<-sum(x==11)
C12<-sum(x==12)
C13<-sum(x==13)
C14<-sum(x==14)
C15<-sum(x==15)
temp<-c(C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15)
entout<-entropy(temp)
return(entout)
}
# Examining AMI for appropriate window size -------------------------------
#This loop calculates average mutual information for each time series
mutualdata<- c()
list<-unique(data$TeamIDvar)
for (i in 1:length(list)){
rm(res)
case<-list[i]
subdata <- subset(data, data$TeamIDvar==case)
res = mutual(subdata$Code, lag.max=100)
res = as(res,"numeric")
if(i==1){
mutualdata<-res
} else{
mutualdata<-cbind.fill(mutualdata,res)
}
}
# This loop returns a vector of the lags for first local minima in each of the AMI series and then descriptives can be calculated
amiMins<-c(1:NCOL(mutualdata))
for (j in 1:NCOL(mutualdata)){
amiMins[j]<-which.min(mutualdata[,j])
}
mean(amiMins)
range(amiMins)
sd(amiMins)
# Calculating sliding window entropy --------------------------------------
#here is an example way to calculate and plot the results of sliding window entropy on a single team
#width determines the window size and by determines the iteration interval (i.e., how far to slide the window)
Team1Comm<-subset(data,TeamIDvar==1)
windowentropy25<-rollapply(Team1Comm$Code, width=25,by=1, FUN=CountAndEntropy,align="left") #this one seems to look most appropriate
plot(windowentropy25, type='l', ylab="entropy", xlab="Window Number/Time")
#calculate sliding windpow entropy for each team
#return results in long data format where all values are in one column
#also adds in grouping variable and order number (Time)
winentropydatalong <- NULL
list<-unique(data$TeamIDvar)
for (j in 1:length(list)){
rm(res)
case<-list[j]
subdata <- subset(data, data$TeamIDvar==case)
res = as.data.frame(rollapply(subdata$Code, width=25,by=1, FUN=CountAndEntropy,align="left"))
res$Order<-1:nrow(res)
temp<-cbind(res,rep(subdata$TeamIDvar[1],times=nrow(res)))
if(j==1){
winentropydatalong<-temp
} else{
winentropydatalong<-rbind(winentropydatalong,temp)
}
}
names(winentropydatalong)<-c("entropy","Order","TeamID")
#write out the file
write.csv(winentropydatalong,file="WinEntropyDataLong.csv",row.names=FALSE,na="-999")
# Peak identification -----------------------------------------------------
#This uses the winentropydatalong data frame/file created above
#Generate a lead and a lag and remove rows with missing data
list<-unique(winentropydatalong$TeamID)
for (j in 1:length(list)){
case<-list[j]
subdata <- subset(winentropydatalong, winentropydatalong$TeamID==case)
subdata$entropy_ld1<-Lag(subdata$entropy,-1)
subdata$entropy_lg1<-Lag(subdata$entropy,1)
if(j==1){
data1<-subdata
} else{
data1<-rbind(data1,subdata)
}
}
data1<-na.omit(data1)
#Example loop that creates a peak point variable based on current entropy value being higher than preceding and following entropy value (lead 1 and lag 1)
for (i in 1:length(data1$entropy)){
if(data1$entropy[i]>data1$entropy_ld1[i] && data1$entropy[i]>data1$entropy_lg1[i]){
data1$peakpoint[i]<-1
} else {
data1$peakpoint[i]<-0
}
}
#example plot for a single team showing identified peak points
#The xlim needs to be manually adjusted here to the lenght of the time series
Team1<-subset(data1,TeamID==1)
ss<-subset(Team1,Team1$peakpoint==1)
p<-ggplot(data=Team1, aes(x=Order,y=entropy))+geom_line(linetype=1)+geom_point() + geom_vline(xintercept=ss$Order)+xlim(1,347)+labs(list(title="Entropy Time Series", x= "Time Unit", y = "Entropy Value"))
# Smoothing procedure----------------------------------------------------
#create smoothing varables using a moving average from 2-15 time points
#the values in parentheses can be modified if certain time points should be weighted differently
ma2<-c(1,1)/2
ma3<-c(1,1,1)/3
ma4<-c(1,1,1,1)/4
ma5<-c(1,1,1,1,1)/5
ma6<-c(1,1,1,1,1,1)/6
ma7<-c(1,1,1,1,1,1,1)/7
ma8<-c(1,1,1,1,1,1,1,1)/8
ma9<-c(1,1,1,1,1,1,1,1,1)/9
ma10<-c(1,1,1,1,1,1,1,1,1,1)/10
ma11<-c(1,1,1,1,1,1,1,1,1,1,1)/11
ma12<-c(1,1,1,1,1,1,1,1,1,1,1,1)/12
ma13<-c(1,1,1,1,1,1,1,1,1,1,1,1,1)/13
ma14<-c(1,1,1,1,1,1,1,1,1,1,1,1,1,1)/14
ma15<-c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)/15
#Example smoothing for a single team using the moving average of 5
smEnt<-as.data.frame(na.omit(stats::filter(Team1$entropy, ma5)))
#apply this loop to orignal windowed entropy data frame
#be sure to change ma# in filter function if a different moving average window is desired
smoothdata <- NULL
list<-unique(winentropydatalong$TeamID)
for (j in 1:length(list)){
rm(res)
case<-list[j]
subdata <- subset(winentropydatalong, winentropydatalong$TeamID==case)
res = as.data.frame(as.numeric(na.omit(stats::filter(subdata$entropy, ma5)))) #modify this line for different moving averages
res$Order<-1:nrow(res)
temp<-as.data.frame(cbind(res,rep(subdata$TeamID[j],times=nrow(res))))
if(j==1){
smoothdata<-temp
} else{
smoothdata<-as.data.frame(rbind(smoothdata,temp))
}
}
names(smoothdata)<-c("ent_smooth","Order","TeamID")
#lead and lag the smoothed data
list<-unique(smoothdata$TeamID)
for (j in 1:length(list)){
case<-list[j]
subdata <- subset(smoothdata, smoothdata$TeamID==case)
subdata$ent_smooth_ld1<-Lag(subdata$ent_smooth,-1)
subdata$ent_smooth_lg1<-Lag(subdata$ent_smooth,1)
if(j==1){
smoothdata1<-subdata
} else{
smoothdata1<-rbind(smoothdata1,subdata)
}
}
smoothdata1<-na.omit(smoothdata1)
# Peak identification for smoothed data----------------------------------------------------
#peak picking on smoothed data
for (i in 1:length(smoothdata1$ent_smooth)){
if(smoothdata1$ent_smooth[i]>smoothdata1$ent_smooth_ld1[i] && smoothdata1$ent_smooth[i]>smoothdata1$ent_smooth_lg1[i]){
smoothdata1$peakpoint[i]<-1
} else {
smoothdata1$peakpoint[i]<-0
}
}
ss<-subset(smoothdata1,smoothdata1$peakpoint==1)
#example plot for a single team showing identified peak points for smoothed data
#The xlim needs to be manually adjusted here to the length of the time series, but it should be kept at the same value of plot p if a multiplot is desired.
Team1<-subset(smoothdata1,TeamID==1)
ss<-subset(Team1,Team1$peakpoint==1)
s<-ggplot(data=Team1, aes(x=Order,y=ent_smooth))+geom_line(linetype=1)+geom_point() + geom_vline(xintercept=ss$Order)+xlim(1,347)+labs(list(title="Smoothed Entropy Time Series", x= "Time Unit", y = "Entropy Value"))
# Peak total and proportion measures -----------------------------------------------------------
#Create new variables that take the total number of peak points and proportion of peaks to total length of time series
##For original entropy data frame with peak point variable added
list<-unique(data1$TeamID)
for (j in 1:length(list)){
case<-list[j]
subdata <- subset(data1, data1$TeamID==case)
TotalPeaks<-sum(subdata$peakpoint)
PeakProp<-sum(subdata$peakpoint)/length(subdata$entropy)
TeamID<-j
TsLength<-length(subdata$entropy)
temp<-as.data.frame(cbind(TeamID,TsLength, TotalPeaks,PeakProp))
if(j==1){
PeakRes<-temp
} else{
PeakRes<-rbind(PeakRes,temp)
}
}
write.csv(PeakRes, file="PeakResOrig.csv",row.names=FALSE)
#Create new variables that takes the total number of peak points and proportion of peaks to total length of time series
##For Smoothed data
list<-unique(smoothdata1$TeamID)
for (j in 1:length(list)){
case<-list[j]
subdata <- subset(smoothdata1, smoothdata1$TeamID==case)
TotalPeaks<-sum(subdata$peakpoint)
PeakProp<-sum(subdata$peakpoint)/length(subdata$ent_smooth)
TeamID<-j
TsLength<-length(subdata$ent_smooth)
temp<-as.data.frame(cbind(TeamID,TsLength, TotalPeaks,PeakProp))
if(j==1){
PeakResSm<-temp
} else{
PeakResSm<-rbind(PeakResSm,temp)
}
}
write.csv(PeakResSm, file="PeakResSmMA5.csv",row.names=FALSE)
# Smoothing Measures ------------------------------------------------------
#This code uses a combined data (perfdata) file that utilizes the peak proportion data from the moving average window sizes 2-15
#calculate confidence intervals on means of each moving average window for peak proportions
#must not have any missing values
perfdata<--read.csv(file= "PeakResAll.csv",header=TRUE, sep=",")
perfdata<-abs(perfdata)
se_orig<-std.error(perfdata$OrigPeakProp,na.rm=TRUE)
mn_orig<-mean(perfdata$OrigPeakProp,na.rm=TRUE)
orig_ci<-se_orig*1.96
orig_ci_up<-mn_orig+orig_ci
orig_ci_low<-mn_orig-orig_ci
se_MA2<-std.error(perfdata$MA2PeakProp,na.rm=TRUE)
mn_MA2<-mean(perfdata$MA2PeakProp,na.rm=TRUE)
MA2_ci<-se_MA2*1.96
MA2_ci_up<-mn_MA2+MA2_ci
MA2_ci_low<-mn_MA2-MA2_ci
se_MA3<-std.error(perfdata$MA3PeakProp,na.rm=TRUE)
mn_MA3<-mean(perfdata$MA3PeakProp,na.rm=TRUE)
MA3_ci<-se_MA3*1.96
MA3_ci_up<-mn_MA3+MA3_ci
MA3_ci_low<-mn_MA3-MA3_ci
se_MA3<-std.error(perfdata$MA3PeakProp,na.rm=TRUE)
mn_MA3<-mean(perfdata$MA3PeakProp,na.rm=TRUE)
MA3_ci<-se_MA3*1.96
MA3_ci_up<-mn_MA3+MA3_ci
MA3_ci_low<-mn_MA3-MA3_ci
se_MA4<-std.error(perfdata$MA4PeakProp,na.rm=TRUE)
mn_MA4<-mean(perfdata$MA4PeakProp,na.rm=TRUE)
MA4_ci<-se_MA4*1.96
MA4_ci_up<-mn_MA4+MA4_ci
MA4_ci_low<-mn_MA4-MA4_ci
se_MA5<-std.error(perfdata$MA5PeakProp,na.rm=TRUE)
mn_MA5<-mean(perfdata$MA5PeakProp,na.rm=TRUE)
MA5_ci<-se_MA5*1.96
MA5_ci_up<-mn_MA5+MA5_ci
MA5_ci_low<-mn_MA5-MA5_ci
se_MA6<-std.error(perfdata$MA6PeakProp,na.rm=TRUE)
mn_MA6<-mean(perfdata$MA6PeakProp,na.rm=TRUE)
MA6_ci<-se_MA6*1.96
MA6_ci_up<-mn_MA6+MA6_ci
MA6_ci_low<-mn_MA6-MA6_ci
se_MA7<-std.error(perfdata$MA7PeakProp,na.rm=TRUE)
mn_MA7<-mean(perfdata$MA7PeakProp,na.rm=TRUE)
MA7_ci<-se_MA7*1.96
MA7_ci_up<-mn_MA7+MA7_ci
MA7_ci_low<-mn_MA7-MA7_ci
se_MA8<-std.error(perfdata$MA7PeakProp,na.rm=TRUE)
mn_MA8<-mean(perfdata$MA8PeakProp,na.rm=TRUE)
MA8_ci<-se_MA8*1.96
MA8_ci_up<-mn_MA8+MA8_ci
MA8_ci_low<-mn_MA8-MA8_ci
se_MA9<-std.error(perfdata$MA9PeakProp,na.rm=TRUE)
mn_MA9<-mean(perfdata$MA9PeakProp,na.rm=TRUE)
MA9_ci<-se_MA9*1.96
MA9_ci_up<-mn_MA9+MA9_ci
MA9_ci_low<-mn_MA9-MA9_ci
se_MA10<-std.error(perfdata$MA10PeakProp,na.rm=TRUE)
mn_MA10<-mean(perfdata$MA10PeakProp,na.rm=TRUE)
MA10_ci<-se_MA10*1.96
MA10_ci_up<-mn_MA10+MA10_ci
MA10_ci_low<-mn_MA10-MA10_ci
se_MA11<-std.error(perfdata$MA11PeakProp,na.rm=TRUE)
mn_MA11<-mean(perfdata$MA11PeakProp,na.rm=TRUE)
MA11_ci<-se_MA11*1.96
MA11_ci_up<-mn_MA11+MA11_ci
MA11_ci_low<-mn_MA11-MA11_ci
se_MA12<-std.error(perfdata$MA12PeakProp,na.rm=TRUE)
mn_MA12<-mean(perfdata$MA12PeakProp,na.rm=TRUE)
MA12_ci<-se_MA12*1.96
MA12_ci_up<-mn_MA12+MA12_ci
MA12_ci_low<-mn_MA12-MA12_ci
se_MA13<-std.error(perfdata$MA13PeakProp,na.rm=TRUE)
mn_MA13<-mean(perfdata$MA13PeakProp,na.rm=TRUE)
MA13_ci<-se_MA13*1.96
MA13_ci_up<-mn_MA13+MA13_ci
MA13_ci_low<-mn_MA13-MA13_ci
se_MA14<-std.error(perfdata$MA14PeakProp,na.rm=TRUE)
mn_MA14<-mean(perfdata$MA14PeakProp,na.rm=TRUE)
MA14_ci<-se_MA14*1.96
MA14_ci_up<-mn_MA14+MA14_ci
MA14_ci_low<-mn_MA14-MA14_ci
se_MA15<-std.error(perfdata$MA15PeakProp,na.rm=TRUE)
mn_MA15<-mean(perfdata$MA15PeakProp,na.rm=TRUE)
MA15_ci<-se_MA15*1.96
MA15_ci_up<-mn_MA15+MA15_ci
MA15_ci_low<-mn_MA15-MA15_ci
#create lines for each variable
mean_peak_prop<-c(mn_orig,mn_MA2,mn_MA3,mn_MA4,mn_MA5,mn_MA6,mn_MA7,mn_MA8,mn_MA9, mn_MA10, mn_MA11,mn_MA12,mn_MA13,mn_MA14,mn_MA15)
up_ci_peak<-c(orig_ci_up,MA2_ci_up,MA3_ci_up,MA4_ci_up,MA5_ci_up,MA6_ci_up,MA7_ci_up,MA8_ci_up,MA9_ci_up,MA10_ci_up,MA11_ci_up,MA12_ci_up,MA13_ci_up,MA14_ci_up,MA15_ci_up)
low_ci_peak<-c(orig_ci_low,MA2_ci_low,MA3_ci_low,MA4_ci_low,MA5_ci_low,MA6_ci_low,MA7_ci_low,MA8_ci_low,MA9_ci_low,MA10_ci_low,MA11_ci_low,MA12_ci_low,MA13_ci_low,MA14_ci_low,MA15_ci_low)
ci<-c(orig_ci,MA2_ci,MA3_ci,MA4_ci,MA5_ci,MA6_ci,MA7_ci,MA8_ci,MA9_ci,MA10_ci,MA11_ci,MA12_ci,MA13_ci,MA14_ci,MA15_ci)
peak_df<-as.data.frame(cbind(mean_peak_prop,ci))
#Use the results of these plots to determine the appropriate moving average to use
#The window where the confidence intervals overlap is a justifiable choice, but this example has two few data points for a reliable CI
plot(mean_peak_prop,type="b",main="Change in Peak Proportions", xlab="Window Size",ylab="Peak Proportion")
lines(up_ci_peak,lty=2)
lines(low_ci_peak,lty=2)
pd <- position_dodge(0.1)
ggplot(peak_df, aes(x=c(1:15), y=mean_peak_prop)) +
geom_errorbar(aes(ymin=mean_peak_prop-ci, ymax=mean_peak_prop+ci), width=.1) + geom_line() + geom_point()+
labs(x="Window Size", y="Peak Proportion", title="Mean Peak Proportion Across Moving Average Window Sizes with 95% CIs" )
# Creating epoch data for MLM ---------------------------------------------
#Take peak data and then create epoch ID using smooth data
list<-unique(smoothdata1$TeamID)
for (j in 1:length(list)){
case<-list[j]
subdata <- subset(smoothdata1, smoothdata1$TeamID==case)
epoc<-subdata$peakpoint
subdata$epoch_id<-cumsum(epoc)+1
if(j==1){
data2<-subdata
} else{
data2<-rbind(data2,subdata)
}
}
#take original code time series, subset by Team ID and Order 2:length(Order-29)
#add in epoch variable from previous data frame
#also adds in separate variables reprsenting the count of each individual code
#Names need to be modified dependoning on the number of codes
list<-unique(data_c$TeamID)
for (i in 1:length(list)){
case<-list[i]
subdata <- subset(data_c, data_c$TeamID==case)
len<-length(subdata$Order)-29 #This must be modified based on window size and selected moving average
subdata1<-subdata[2:len,]
if(i==1){
data3<-subdata1
} else{
data3<-rbind(data3,subdata1)
}
}
data3$EpochID<-data2$epoch_id
ind<-model.matrix(~factor(data3$Code)-1)
data4<-cbind(data3,ind)
names(data4)<-c("TeamID","Code","TeamIDvar","Order","EpochID","C1","C2","C3","C4","C6","C7","C8","C9","C10","C11","C12","C13","C14","C15")
#No C5 in the current examples so they are ommited in the above names
#This adds the weight variable to the data frame based on the length of the epoch
#The output of this part is what can be used in MLM to test for differences in code distributions between epochs
list1<-unique(data4$TeamIDvar)
for (j in 1:length(list1)){
case1<-list1[j]
subdata1<-subset(data4,data4$TeamIDvar==case1)
list<-unique(subdata1$EpochID)
for (i in 1:length(list)){
case<-list[i]
subdata<-subset(subdata1,subdata1$EpochID==case)
temp<-nrow(subdata)
subdata$weight<-rep(1/temp, time=length(subdata$EpochID))
if(i==1){
data5<-subdata
} else{
data7<-rbind(data5,subdata)
}
}
if(j==1){
data6<-data5
} else{
data6<-rbind(data6,data5)
}
}
write.csv(data6, file="CodeTs_w_EpochId.csv",row.names=FALSE)
# Output Entropy Descriptives from Peak Points Only -----------------------
#code to take the average entropy and variability in entropy from peaks points only
#This uses the data frame that includes the smoothed entropy, peakpoint, and epoc_id
teamvar<-read.csv(file= "teamvariables.csv",header=TRUE, sep=",")
#This extra data file is added in so that different team-level variables could be added into the data file for further analyses
i=1
list<-unique(data2$TeamID)
for (i in 1:length(list)){
case<-list[i]
subdata <- subset(data2, data2$TeamID==case)
peaksubdata<-subset(subdata, subdata$peakpoint==1)
avePent<-mean(peaksubdata$ent_smooth) #These are some examples of entropy properties at the peak points that could be extracted
varPent<-var(peaksubdata$ent_smooth)
minPent<-min(peaksubdata$ent_smooth)
maxPent<-max(peaksubdata$ent_smooth)
perf<-teamvar$Performance_Rescale_NoZeros[i]#This variable and the next two would need to be modified to use different variables
gendercomp<-teamvar$GenderComp[i]
minKnowAss<-teamvar$minKnowAss[i]
temp<-as.data.frame(cbind(i,avePent, varPent, minPent,maxPent,perf, gendercomp,minKnowAss))
if(i==1){
entropyres<-temp
} else{
entropyres<-rbind(entropyres,temp)
}
}
write.csv(entropyres,file="entropyPeakres2.csv",row.names=FALSE,na="-999")
# Random Entropy Generation -----------------------------------------------
#All the analyses can be rerun on the randomly generated data
#The sampling values should be changed based on the number of codes (1:15 in this case)
for (i in 1:2){
randent<-as.data.frame(sample(1:15,size=perfdata$OrigTsLength[i],replace=TRUE))
randent$TeamIDvar<-rep(i, perfdata$OrigTsLength[i])
randent$TeamID<-rep(i, perfdata$OrigTsLength[i])
names(randent)<-c("Code", "TeamIDvar", "TeamID")
if (i==1){
randEnt<-randent
} else {
randEnt<-rbind(randEnt,randent)
}
}
data<-randEnt
# Plotting Code Distributions By Phases ----------------------------------------------------------------
#This part of the code uses the epoch segmented data frame
#For the MLM the code below subsets by each team and for each epoch within a team
#It then provide frequency counts for each of the communication codes
#If a different number of codes are used then those lines summing the codes would need to be modified
epoc_freq<-NULL
list<-unique(data3$TeamIDvar)
for (j in 1:length(list)){
case<-list[j]
subdata <- subset(data3, data3$TeamIDvar==case)
list2<-unique(subdata$EpochID)
for (i in 1:length(list2)) {
case2<-list2[i]
subdata2<-subset(subdata,subdata$EpochID==case2)
TeamIDvar<-subdata$TeamIDvar[j]
EpochID<-i
C1<-sum(subdata2$Code==1)
C2<-sum(subdata2$Code==2)
C3<-sum(subdata2$Code==3)
C4<-sum(subdata2$Code==4)
C5<-sum(subdata2$Code==5)
C6<-sum(subdata2$Code==6)
C7<-sum(subdata2$Code==7)
C8<-sum(subdata2$Code==8)
C9<-sum(subdata2$Code==9)
C10<-sum(subdata2$Code==10)
C11<-sum(subdata2$Code==11)
C12<-sum(subdata2$Code==12)
C13<-sum(subdata2$Code==13)
C14<-sum(subdata2$Code==14)
C15<-sum(subdata2$Code==15)
temp<-as.data.frame(cbind(TeamIDvar,EpochID, C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11,C12,C13,C14,C15))
if (i==1){
epoc.1<-temp
} else {
epoc.1<-rbind(epoc.1,temp)
}
}
if(j==1){
epoc_freq<-epoc.1
} else{
epoc_freq<-rbind(epoc_freq,epoc.1)
}
}
write.csv(epoc_freq, file="Code_Epoch_freq.csv",row.names=FALSE)
#Generate Phase Barplots for an Example Team
Team2<-subset(epoc_freq,epoc_freq$TeamIDvar==2)
Team2<-Team2[3:17]
par(mfrow=c(5, 3))
barplot(unlist(Team2[1,]),main="Phase 1",ylim = c(0,7),ylab="Code Freq")
barplot(unlist(Team2[2,]),main="Phase 2",ylim = c(0,7),ylab="Code Freq")
barplot(unlist(Team2[3,]),main="Phase 3",ylim = c(0,7),ylab="Code Freq")
barplot(unlist(Team2[4,]),main="Phase 4",ylim = c(0,7),ylab="Code Freq")
barplot(unlist(Team2[5,]),main="Phase 5",ylim = c(0,7),ylab="Code Freq")
barplot(unlist(Team2[6,]),main="Phase 6",ylim = c(0,7),ylab="Code Freq")
barplot(unlist(Team2[7,]),main="Phase 7",ylim = c(0,7),ylab="Code Freq")
barplot(unlist(Team2[8,]),main="Phase 8",ylim = c(0,7),ylab="Code Freq")
barplot(unlist(Team2[9,]),main="Phase 9",ylim = c(0,7),ylab="Code Freq")
barplot(unlist(Team2[10,]),main="Phase 10",ylim = c(0,7),ylab="Code Freq")
barplot(unlist(Team2[11,]),main="Phase 11",ylim = c(0,7),ylab="Code Freq")
barplot(unlist(Team2[12,]),main="Phase 12",ylim = c(0,7),ylab="Code Freq")
barplot(unlist(Team2[13,]),main="Phase 13",ylim = c(0,7),ylab="Code Freq") |
3f0340e246beb17e052ef646dc3c6e9d4490ea98 | dde5026f4e97dc47c951ea9c3695bc1bab9ba7fb | /man/fmriu.io.open_timeseries.Rd | bef6ff2f49b7215e6b30ace81d52e31473bcc357 | [] | no_license | neurodata/mriutils | b7a7114dda0c2ec797864c50cd290f732db4488b | 2dba874c3d8cbad530865de5018bc5ea0a730edb | refs/heads/master | 2021-10-15T22:54:29.103469 | 2019-02-06T10:21:55 | 2019-02-06T10:21:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,368 | rd | fmriu.io.open_timeseries.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/open_timeseries.R
\name{fmriu.io.open_timeseries}
\alias{fmriu.io.open_timeseries}
\title{Open Timeseries}
\usage{
fmriu.io.open_timeseries(fnames, dataset_id = "", atlas_id = "",
verbose = FALSE, rtype = "list")
}
\arguments{
\item{fnames:}{[n] a vector of filenames, with separation by underscores IE, dataset_subject_run_(other information).rds. Alternatively, a path to a directory containing rds files of an appropriate naming convention. See `fmriutils` vignette for details.}
\item{dataset_id:}{[1] the dataset id in the filenames.}
\item{atlas_id:}{[1] the atlas id in the filenames.}
\item{verbose=FALSE:}{whether to print the id of the scan being loaded.}
\item{rtype='list':}{the type of output to return. Options are 'list' and 'array'.}
}
\value{
ts: [[n]][nt, nroi] the ts loaded from the specified file names. list of n subjects, each with nt timesteps and nroi rois
dataset: [n] a vector of the dataset ids for each subject.
subjects: [n] the subject ids
sessions: [n] the session ids
tasks: [n] the task ids
runs: [n] the run ids
}
\description{
A utility to open timeseries information from a dataset. Assumes data is formatted by the BIDs spec.
This means that derivatives are named as follows: sub-[###]_task-[abcde]_ses-[###]_run-[###]_(generic info).file
}
|
5cd72eb5c6781d6e2db71d6ee4b891ae55af9d5f | 4c1b5492c2cafe43c4937a36f8168534c6663f12 | /R/utilities.R | 992f5f5f3229341e2dc6f16db7f1814c01062a99 | [] | no_license | muschellij2/ari | af1eb3db1e64dba0c27022a2b470c13dd0a55c09 | b1f3b641194db561914a747c562aceaf3bcff71b | refs/heads/master | 2021-06-04T19:15:38.123110 | 2021-04-28T17:09:22 | 2021-04-28T17:09:22 | 101,906,454 | 2 | 0 | null | 2017-08-30T16:52:23 | 2017-08-30T16:52:22 | null | UTF-8 | R | false | false | 2,518 | r | utilities.R | make_same_sample_rate = function(audio, verbose = TRUE) {
if (inherits(audio, "Wave")) return(audio)
sample_rate = sapply(audio, function(r) r@samp.rate)
if (!all(sample_rate == sample_rate[[1]]) && verbose) {
message("enforcing same sample rate, using minimum")
}
sample_rate = min(sample_rate, na.rm = TRUE)
if (verbose) {
message(paste0("Sample rate downsampled to ", sample_rate))
}
audio = lapply(audio, function(x) {
if (x@samp.rate == sample_rate) return(x)
tuneR::downsample(x, samp.rate = sample_rate)
})
sample_rate = sapply(audio, function(r) r@samp.rate)
stopifnot(all(sample_rate == sample_rate[[1]]))
return(audio)
}
is_Wave <- function(x){
identical(suppressWarnings(as.character(class(x))), "Wave")
}
# get random string
grs <- function(){
paste(sample(c(seq(10), letters, LETTERS),
size = 12, replace = TRUE), collapse = "")
}
# how long is a wav?
duration <- function(wav){
stopifnot(is_Wave(wav))
length(wav@left) / wav@samp.rate
}
# get from list
# list, name of element, default
gfl <- function(l, n, d){
if(is.null(l[[n]])){
d
} else {
l[[n]]
}
}
#' @importFrom purrr map_chr compose
string_tirm <- function(s){
str_rev <- function(t){
paste(rev(strsplit(t, NULL)[[1]]), collapse = "")
}
str_trim_right <- function(x){
sub("\\s+$", "", x)
}
str_trim_left <- function(x){
x <- str_rev(x)
x <- str_trim_right(x)
str_rev(x)
}
lr <- compose(str_trim_left, str_trim_right)
map_chr(s, lr)
}
# get text from html comments in an Rmd
parse_html_comments <- function(path){
lines_ <- readLines(path, warn = FALSE)
starts <- grep("<!--", lines_)
ends <- grep("-->", lines_)
if(length(starts) != length(ends)){
stop("There's a comment open/close mismatch.")
}
result <- rep(NA, length(starts))
for(i in seq_along(starts)){
if(starts[i] == ends[i]){ # Single line
result[i] <- lines_[starts[i]]
} else {
# Multiple lines
result[i] <- paste(string_tirm(lines_[starts[i]:ends[i]]),
collapse = " ")
}
result[i] <- sub("<!--", "", result[i])
result[i] <- sub("-->", "", result[i])
}
string_tirm(result)
}
# split a big string into equal-ish sized pieces
#' @importFrom purrr map
split_up_text <- function(text){
pieces <- ceiling(nchar(text)/1500)
words <- strsplit(text, " ")[[1]]
chunks <- split(words, ceiling(seq_along(words)/(length(words)/pieces)))
map(chunks, paste, collapse = " ")
}
|
af5fea99b07b324e83941853715a318d65e2abca | 8af0800ea15c6ff5ce1c30c1e8bc85102667b669 | /R/bc_trafo.R | 9c9577edb3526882ccc41cdff45960f9489ccb06 | [] | no_license | alexanderrobitzsch/mdmb | 34f0be7820987888101162fd263097c4088ab674 | d31faef05736391bee1122c4a728a606d69d791c | refs/heads/master | 2023-03-06T09:58:19.344444 | 2023-03-01T10:23:41 | 2023-03-01T10:23:41 | 95,305,766 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 367 | r | bc_trafo.R | ## File Name: bc_trafo.R
## File Version: 1.05
bc_trafo <- function( y, lambda )
{
eps <- 1E-3
# lambda <- yj_adjust_lambda( lambda=lambda, lambda0=0, eps=eps )
if ( abs(lambda) > eps ){
# yt <- ( y^lambda - 1 ) / lambda
yt <- log(y)
yt <- ( exp(lambda*yt) - 1 ) / lambda
} else {
yt <- log(y)
}
return(yt)
}
|
5c76e54dc35fd66ea3acafdd0ccabdc5172e6fc2 | 04a0b8033af0ac604b0e63e30bb86ebb3ecc84ef | /files/w13/w13.R | 42548490882748e796be0d9aacd6d7823785d0ab | [] | no_license | TheBeachLab/htgaa | 3fab2976f3941afe7c85796fcc97b1a5569fb2a3 | 019ad070c34e428cf789da682b2c48acd12a9403 | refs/heads/main | 2023-02-28T12:12:40.231375 | 2021-02-08T14:31:19 | 2021-02-08T14:31:19 | 337,067,346 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 750 | r | w13.R | # GENERAL
require(tikzDevice)
# CASE 2HJJ
setwd("~/Downloads/w13/homework/structure_prediction/2HJJ/")
case2hjj <- read.table("score.fsc", header = TRUE)
tikz('case2hjj.tex',standAlone = TRUE, width = 5,height = 5)
plot (case2hjj$rms, case2hjj$score, main="Case 2HJJ (100 models)\nEnergy vs rms", xlab="rms (Angstrom)", ylab="Energy", pch=3)
dev.off()
tools::texi2dvi('case2hjj.tex',pdf=T)
# CASE 1WHZ
setwd("~/Downloads/w13/homework/structure_prediction/1WHZ/")
case1whz <- read.table("score.fsc", header = TRUE)
tikz('case1whz.tex',standAlone = TRUE, width = 5,height = 5)
plot (case1whz$rms, case1whz$score, main="Case 1WHZ (265 models)\nEnergy vs rms", xlab="rms (Angstrom)", ylab="Energy", pch=3)
dev.off()
tools::texi2dvi('case1whz.tex',pdf=T)
|
386171663ea4a390dc44869263b85cce2d3b08aa | 531242379329f04dc4c2d43cb386f0af7d0763c7 | /man/plot-methods.Rd | 749340d95a0e2beb8daa11977276f1bb912008ec | [] | no_license | ipa-tys/ROCR | aabaf23da634fd54a373dec37e5cb8352700cfbc | 3e22ae72aef9fcdd5b3dcc8d4eb8d35bb6115df2 | refs/heads/master | 2021-01-10T19:47:46.098387 | 2020-12-04T17:28:07 | 2020-12-04T17:28:07 | 4,235,237 | 31 | 16 | null | 2020-12-04T17:28:08 | 2012-05-05T17:11:18 | R | UTF-8 | R | false | true | 6,350 | rd | plot-methods.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\name{plot-methods}
\alias{plot-methods}
\alias{plot,performance,missing-method}
\alias{plot.performance}
\title{Plot method for performance objects}
\usage{
\S4method{plot}{performance,missing}(
x,
y,
...,
avg = "none",
spread.estimate = "none",
spread.scale = 1,
show.spread.at = c(),
colorize = FALSE,
colorize.palette = rev(rainbow(256, start = 0, end = 4/6)),
colorkey = colorize,
colorkey.relwidth = 0.25,
colorkey.pos = "right",
print.cutoffs.at = c(),
cutoff.label.function = function(x) { round(x, 2) },
downsampling = 0,
add = FALSE
)
\method{plot}{performance}(...)
}
\arguments{
\item{x}{an object of class \code{performance}}
\item{y}{not used}
\item{...}{Optional graphical parameters to adjust different components of
the performance plot. Parameters are directed to their target component by
prefixing them with the name of the component (\code{component.parameter},
e.g. \code{text.cex}). The following components are available:
\code{xaxis}, \code{yaxis}, \code{coloraxis}, \code{box} (around the
plotting region), \code{points}, \code{text}, \code{plotCI} (error bars),
\code{boxplot}. The names of these components are influenced by the R
functions that are used to create them. Thus, \code{par(component)} can be
used to see which parameters are available for a given component (with the
expection of the three axes; use \code{par(axis)} here). To adjust the
canvas or the performance curve(s), the standard \code{plot} parameters can
be used without any prefix.}
\item{avg}{If the performance object describes several curves (from
cross-validation runs or bootstrap evaluations of one particular method),
the curves from each of the runs can be averaged. Allowed values are
\code{none} (plot all curves separately), \code{horizontal} (horizontal
averaging), \code{vertical} (vertical averaging), and \code{threshold}
(threshold (=cutoff) averaging). Note that while threshold averaging is
always feasible, vertical and horizontal averaging are not well-defined if
the graph cannot be represented as a function x->y and y->x, respectively.}
\item{spread.estimate}{When curve averaging is enabled, the variation around
the average curve can be visualized as standard error bars
(\code{stderror}), standard deviation bars (\code{stddev}), or by using box
plots (\code{boxplot}). Note that the function \code{plotCI}, which is used
internally by ROCR to draw error bars, might raise a warning if the spread
of the curves at certain positions is 0.}
\item{spread.scale}{For \code{stderror} or \code{stddev}, this is a scalar
factor to be multiplied with the length of the standard error/deviation
bar. For example, under normal assumptions, \code{spread.scale=2} can be
used to get approximate 95\% confidence intervals.}
\item{show.spread.at}{For vertical averaging, this vector determines the x
positions for which the spread estimates should be visualized. In contrast,
for horizontal and threshold averaging, the y positions and cutoffs are
determined, respectively. By default, spread estimates are shown at 11
equally spaced positions.}
\item{colorize}{This logical determines whether the curve(s) should be
colorized according to cutoff.}
\item{colorize.palette}{If curve colorizing is enabled, this determines the
color palette onto which the cutoff range is mapped.}
\item{colorkey}{If true, a color key is drawn into the 4\% border
region (default of \code{par(xaxs)} and \code{par(yaxs)}) of the
plot. The color key visualizes the mapping from cutoffs to colors.}
\item{colorkey.relwidth}{Scalar between 0 and 1 that determines the
fraction of the 4\% border region that is occupied by the colorkey.}
\item{colorkey.pos}{Determines if the colorkey is drawn vertically at
the \code{right} side, or horizontally at the \code{top} of the
plot.}
\item{print.cutoffs.at}{This vector specifies the cutoffs which should
be printed as text along the curve at the corresponding curve positions.}
\item{cutoff.label.function}{By default, cutoff annotations along the curve
or at the color key are rounded to two decimal places before printing.
Using a custom \code{cutoff.label.function}, any other transformation can
be performed on the cutoffs instead (e.g. rounding with different precision
or taking the logarithm).}
\item{downsampling}{ROCR can efficiently compute most performance measures
even for data sets with millions of elements. However, plotting of large
data sets can be slow and lead to PS/PDF documents of considerable size. In
that case, performance curves that are indistinguishable from the original
can be obtained by using only a fraction of the computed performance
values. Values for downsampling between 0 and 1 indicate the fraction of
the original data set size to which the performance object should be
downsampled, integers above 1 are interpreted as the actual number of
performance values to which the curve(s) should be downsampled.}
\item{add}{If \code{TRUE}, the curve(s) is/are added to an already existing
plot; otherwise a new plot is drawn.}
}
\description{
This is the method to plot all objects of class performance.
}
\examples{
# plotting a ROC curve:
library(ROCR)
data(ROCR.simple)
pred <- prediction( ROCR.simple$predictions, ROCR.simple$labels )
pred
perf <- performance( pred, "tpr", "fpr" )
perf
plot( perf )
# To entertain your children, make your plots nicer
# using ROCR's flexible parameter passing mechanisms
# (much cheaper than a finger painting set)
par(bg="lightblue", mai=c(1.2,1.5,1,1))
plot(perf, main="ROCR fingerpainting toolkit", colorize=TRUE,
xlab="Mary's axis", ylab="", box.lty=7, box.lwd=5,
box.col="gold", lwd=17, colorkey.relwidth=0.5, xaxis.cex.axis=2,
xaxis.col='blue', xaxis.col.axis="blue", yaxis.col='green', yaxis.cex.axis=2,
yaxis.at=c(0,0.5,0.8,0.85,0.9,1), yaxis.las=1, xaxis.lwd=2, yaxis.lwd=3,
yaxis.col.axis="orange", cex.lab=2, cex.main=2)
}
\references{
A detailed list of references can be found on the ROCR homepage at
\url{http://rocr.bioinf.mpi-sb.mpg.de}.
}
\seealso{
\code{\link{prediction}},
\code{\link{performance}},
\code{\link{prediction-class}},
\code{\link{performance-class}}
}
\author{
Tobias Sing \email{tobias.sing@gmail.com}, Oliver Sander
\email{osander@gmail.com}
}
|
9ad8a8c96a33061241982507c3e415afc65c32c5 | 9b2aa890ed7f5d87af800c61d7ec5ab8a07ffb66 | /numeros/sociables.R | 1f23779504c985e0eb2b7b0a36f2abeb75ff508a | [] | no_license | jjdeharo/General | 65b1456a5ef849d50a86318d825ac81bdf497a8e | 99a7b4616f029c1b1d14427b1dd0ba5fcf785457 | refs/heads/master | 2021-01-23T08:39:44.975976 | 2015-04-11T11:07:16 | 2015-04-11T11:07:16 | 32,942,956 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 768 | r | sociables.R | # Halla los terminos de la serie de números sociables partiendo de n
# Devuelve T si la serie es sociable (periódica) y F si no lo es
# max es el número máximo de términos que se hallarán en cada serie
sociables <- function(n, max=5, salida=T) {
num <- n
n0 <- n
i <- 1
if(salida == T) cat(i,num,"\n")
while(T) {
i <- i+1
num <- sum(divisoresP(num))
if(sum(n == num)) {
if(num == n0 && i > 3) {
return(T)
} else {
return(F)
}
}
if(salida == T) cat(i,num,"\n")
n <- c(n,num)
if(i>max) {
if(salida == T) cat("Programa detenido. Excede max =",max,"\n")
return(F)
}
}
}
divisoresP <- function(n) {
if(n == 1 || n == 0) {return(0)}
v <- 1:(n-1)
return(v[n %% v == 0])
} |
15763a5549c187405e202b2149ef731e18e85e4d | 3eb3c7f82edbe3e5dc027443113f93d47fabd3d1 | /tests/testthat/test-combine_words.R | 3d3d295a35d76d30cf273d0a6790596b8f56299c | [
"MIT"
] | permissive | dpc-nickwilliams/djprlabourdash | 3a6a95a2a3307400168a3a394238aa36d81b7c1b | 8be8cba6a550b5fbba469542d194aea5bad87b2d | refs/heads/main | 2023-07-16T02:13:43.236199 | 2021-08-31T05:49:10 | 2021-08-31T05:49:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 411 | r | test-combine_words.R | test_that("combine_words() combines words", {
expect_equal(combine_words("a"), "a")
expect_equal(combine_words(c("a", "b")), "a and b")
expect_equal(combine_words(c("a", "b", "c")), "a, b, and c")
expect_equal(combine_words(c(1, 2, 3)), "1, 2, and 3")
expect_equal(combine_words(c(TRUE, FALSE, TRUE)), "TRUE, FALSE, and TRUE")
expect_error(combine_words())
expect_equal(combine_words(""), "")
})
|
dbbb7979077d74f0c49b5f1f579be84262344f8f | 19b4dc0b477493329b284f2a4503111ae2becafb | /slime_mold.R | e411236ac1ef4da28a86e58eacd0677d042ce0c3 | [] | no_license | cherry8177/Slime-mould | c4e8e4f5b89bebab149c837a0328f5464aebe9f3 | 0a05fa8444fe83d84b8c1797aefc525c67829e73 | refs/heads/master | 2020-05-21T11:21:49.286296 | 2018-11-07T00:50:25 | 2018-11-07T00:50:25 | 45,693,302 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,184 | r | slime_mold.R |
#img <- readJPEG(system.file("img", "Rlogo.jpg", package="jpeg"))
# extract the coordinates from the image by finding the center pixels of each node(crazy for loop to reduce the node to single pixel)
library(jpeg)
im<-readJPEG("copy copy.jpg",FALSE)
nw<-im[,,1]
rotate <- function(x) t(apply(x, 2, rev))
nw<-rotate(nw)
nw1<-(nw<0.7)*1
#image(rotate(nw1))
image(nw)
image(nw1)
#write.csv(rotate(N1), file="network", row.names=F, col.names=F)
# remove the edge line
for (k in 1:3){
N1=matrix(NA,201, 210)
for(i in 1:nrow(nw1) ){
for(j in 1:ncol(nw1)) {
if(i==1 | j==1 | i==nrow(nw1) | j==ncol(nw1)){
N1[i,j]=0 } else {
if( ((nw1[i-1,j]==0) | (nw1[i,j-1]==0) |(nw1[i+1,j]==0) | (nw1[i,j+1]==0))){
N1[i,j]=0 }else{ N1[i,j]=nw1[i,j]}
}
}
}
nw1<-N1
}
image(N1)
# shrink the nodes to single pixel.
for (k in 1:3){
N1=matrix(NA,201, 210)
for(i in 1:nrow(nw1) ){
for(j in 1:ncol(nw1)) {
if(i==1 | j==1 | i==nrow(nw1) | j==ncol(nw1)){
N1[i,j]=0 } else {
if( ((nw1[i-1,j]==0) | (nw1[i,j-1]==0) | (nw1[i+1,j]==0) |
(nw1[i,j+1]==0) | (nw1[i-1,j-1]==0) | (nw1[i+1,j-1]==0) |
(nw1[i-1,j+1]==0) | (nw1[i+1,j+1]==0)) &
!((nw1[i-1,j]==0) & (nw1[i,j-1]==0) & (nw1[i+1,j]==0) &
(nw1[i,j+1]==0) & (nw1[i-1,j-1]==0) & (nw1[i+1,j-1]==0) &
(nw1[i-1,j+1]==0) & (nw1[i+1,j+1]==0))){
N1[i,j]=0 ;nw1[i,j]=0}else{ N1[i,j]=nw1[i,j]}
}
}
}
nw1<-N1
}
image(N1)
# need clean out 3 extract pixel mannually.
# record the coordinates by the posittion of pixel matrix
nodes=matrix(NA,36,2)
k=0
for(i in 1:nrow(nw1) ){
for(j in 1:ncol(nw1)) {
if( N1[i,j]==1){
k=k+1
nodes[k,1]= i
nodes[k,2]= j
}
}
}
# need clean out 3 extract pixel mannually.
plot(nodes[,1],nodes[,2])
nodes2<-nodes[-c(6,20,31),]
#compute the weight of the edges using Euclidean distance
weight=matrix(NA,33,33)
#nodes1<-read.table("nodes.txt")
for(i in 1:nrow(nodes2) ){
for(j in 1:nrow(nodes2) ){
weight[i,j]= sqrt((nodes2[i,1]-nodes2[j,1])^2+(nodes2[i,2]-nodes2[j,2])^2)
}
}
weight1=round(weight,digit=0)
write.csv(nodes2, file="nodes2.csv")
write.csv(weight1, file="Int weight.csv")
plot(sort(as.integer(weight)), pch=".")
points(sort(as.integer(weight[18,])))
#nodes2<-nodes[-c(6,20,31),]
N1[16,55]=0
N1[103,85]=0
N1[151,46]=0
sum(N1)
#divide the groups
kk=20
k=0
n1=sum(N1[1:(100+kk),1:(100+kk)])
nd1=matrix(NA,n1,2)
for(i in 1:(100+kk)){
for(j in 1:(100+kk) ){
if( N1[i,j]==1){
k=k+1
nd1[k,1]= i
nd1[k,2]= j
}
}
}
#kk=20
k=0
n2=sum(N1[(101-kk):201,1:(100+kk)])
nd2=matrix(NA,n2,2)
for(i in (101-kk):201){
for(j in 1:(100+kk) ){
if( N1[i,j]==1){
k=k+1
nd2[k,1]= i
nd2[k,2]= j
}
}
}
#kk=20
k=0
n3=sum(N1[1:(100+kk),(101-kk):210])
nd3=matrix(NA,n3,2)
for(i in 1:(100+kk)){
for(j in (101-kk):210 ){
if( N1[i,j]==1){
k=k+1
nd3[k,1]= i
nd3[k,2]= j
}
}
}
#kk=20
k=0
n4=sum(N1[(101-kk):201,(101-kk):210])
nd4=matrix(NA,n4,2)
for(i in (101-kk):201){
for(j in (101-kk):210 ){
if( N1[i,j]==1){
k=k+1
nd4[k,1]= i
nd4[k,2]= j
}
}
}
w1=matrix(NA,n1,n1)
w2=matrix(NA,n2,n2)
w3=matrix(NA,n3,n3)
w4=matrix(NA,n4,n4)
#nodes1<-read.table("nodes.txt")
for(i in 1:nrow(nd1) ){
for(j in 1:nrow(nd1) ){
w1[i,j]= sqrt((nd1[i,1]-nd1[j,1])^2+(nd1[i,2]-nd1[j,2])^2)
}
}
for(i in 1:nrow(nd2) ){
for(j in 1:nrow(nd2) ){
w2[i,j]= sqrt((nd2[i,1]-nd2[j,1])^2+(nd2[i,2]-nd2[j,2])^2)
}
}
for(i in 1:nrow(nd3) ){
for(j in 1:nrow(nd3) ){
w3[i,j]= sqrt((nd3[i,1]-nd3[j,1])^2+(nd3[i,2]-nd3[j,2])^2)
}
}
for(i in 1:nrow(nd4) ){
for(j in 1:nrow(nd4) ){
w4[i,j]= sqrt((nd4[i,1]-nd4[j,1])^2+(nd4[i,2]-nd4[j,2])^2)
}
}
write.csv(w1, file="w1.csv")
write.csv(w2, file="w2.csv")
write.csv(w3, file="w3.csv")
write.csv(w4, file="w4.csv")
write.csv(nd1, file="nd1.csv")
write.csv(nd2, file="nd2.csv")
write.csv(nd3, file="nd3.csv")
write.csv(nd4, file="nd4.csv")
library(vegan)
t1<-spantree(weight1)
plot(t1)
library(ape)
##mst output is the adjacency matrix of the minimum spaning tree
t2<-mst(weight1)
plot(t2)
#short2= function(x,y) { u = (x-y)/sqrt(sum((x-y)^2)); cbind( x-u*0.2,y+u*0.2)}
#short = function(x,y,a=0.2) c(x + (y-x)*a, y+(x-y)*a)
plot(nodes2[,1],nodes2[,2])
lines(nodes2[1:2,1],nodes2[1:2,2])
#function to draw a mst with the iput as all the coordinates in mst
plot.mst=function(nd){
weight=matrix(NA,nrow(nd),nrow(nd))
for(i in 1:nrow(nd) ){
for(j in 1:nrow(nd) ){
weight[i,j]= sqrt((nodes2[i,1]-nodes2[j,1])^2+(nodes2[i,2]-nodes2[j,2])^2)
}
}
weight1=round(weight,digit=0)
require(ape)
t2=mst(weight1)
plot.mst1=function(t2,nodes2){
# t2 is the minimum spanning tree matrix
# nodes2 is the coordinates of all nodes (2 x n)
pair=matrix(NA,2,sum(t2))
k=0
for(i in 1: nrow(t2)){
for(j in i : ncol(t2)){
if(t2[i,j]==1){
k=k+1
pair[1,(2*k-1)]=nodes2[i,1]
pair[2,(2*k-1)]=nodes2[j,1]
pair[1,(2*k)]=nodes2[i,2]
pair[2,(2*k)]=nodes2[j,2]
}
}
}
plot(nodes2[,1],nodes2[,2], ylim=c(0,210), xlim=c(0,210))
for( i in 1: k){
lines(pair[,(2*i-1)],pair[,(2*i)])
}
}
plot.mst1(t2, nodes2)
}
plot.mst1=function(t2,nodes2){
# t2 is the minimum spanning tree matrix
# nodes2 is the coordinates of all nodes (2 x n)
pair=matrix(NA,2,sum(t2))
k=0
for(i in 1: nrow(t2)){
for(j in i : ncol(t2)){
if(t2[i,j]==1){
k=k+1
pair[1,(2*k-1)]=nodes2[i,1]
pair[2,(2*k-1)]=nodes2[j,1]
pair[1,(2*k)]=nodes2[i,2]
pair[2,(2*k)]=nodes2[j,2]
}
}
}
plot(nodes2[,1],nodes2[,2], ylim=c(0,210), xlim=c(0,210))
for( i in 1: k){
lines(pair[,(2*i-1)],pair[,(2*i)])
}
}
plot.mst1(t2, nodes2)
######## points mst
points.mst=function(t2,nodes2){
# t2 is the minimum spanning tree matrix
# nodes2 is the coordinates of all nodes (2 x n)
pair=matrix(NA,2,sum(t2))
k=0
for(i in 1: nrow(t2)){
for(j in i : ncol(t2)){
if(t2[i,j]!=0){
k=k+1
pair[1,(2*k-1)]=nodes2[i,1]
pair[2,(2*k-1)]=nodes2[j,1]
pair[1,(2*k)]=nodes2[i,2]
pair[2,(2*k)]=nodes2[j,2]
}
}
}
points(nodes2[,1],nodes2[,2])
for( i in 1: k){
lines(pair[,(2*i-1)],pair[,(2*i)])
}
}
##############################divide by four parts with 50% overlap
#just for drawing tree
tt1<-mst(w1)
tt2<-mst(w2)
tt3<-mst(w3)
tt4<-mst(w4)
plot.mst1(tt1,nd1)
points.mst(tt1,nd1)
points.mst(tt2,nd2)
points.mst(tt3,nd3)
points.mst(tt4,nd4)
#########################################Random forest/bootstrap
#random tree generation
sampling.nodes=function(k=10, n.tree=500,seed=1){
set.seed(seed)
# random sampling k iterm
sampling.nodes=matrix(NA,k,n.tree)
for(i in 1:n.tree){
sampling.nodes[,i]=sample(nrow(nodes2), k )
}
sampling.nodes
}
sampling.nodes(10,10,1)
#####################################################
# mean of the sampled tree
average.tree=function(k=10,n.tree=10,seed=1, nodes2){
sampling.nodes=function(k=10, n.tree=500,seed=1){
set.seed(seed)
# random sampling k iterm
sampling.nodes=matrix(NA,k,n.tree)
for(i in 1:n.tree){
sampling.nodes[,i]=sample(nrow(nodes2), k )
}
sampling.nodes
}
sample1=sampling.nodes(k,n.tree,seed)
sample.nd=array(NA,c(k,2,n.tree)) #sample nodes
for (i in 1: n.tree){
sample.nd[,,i]=nodes2[sample1[,i],]
}
sample.weight=array(NA,c(k,k,n.tree))#weight
for(ii in 1:n.tree){
for(i in 1:k){
for(j in 1:k){
sample.weight[i,j,ii]= sqrt((sample.nd[i,1,ii]-sample.nd[j,1,ii])^2
+(sample.nd[i,2,ii]-sample.nd[j,2,ii])^2)
}
}
}
require(ape)# MST
sample.tree=array(NA,c(k,k,n.tree))
for(ii in 1: n.tree){
sample.tree[,,ii]=mst(sample.weight[,,ii])
}
recover.tree=array(NA,c(nrow(nodes2),nrow(nodes2),n.tree))
for(ii in 1:n.tree){
for(i in 1:k){
for(j in 1:k){
if(sample.tree[i,j,ii]==1){ recover.tree[sample1[i,ii],sample1[j,ii],ii]=1
}
}
}
}
recover.tree[is.na(recover.tree)] <- 0
mean.tree=apply(recover.tree,1:2,sum)/n.tree
mean.tree
}
mt1<-average.tree(k=16,n.tree=10,seed=1, nodes2)
color=rep(1,33)
set.seed(1)
for (i in c<-sample(33,16)){
color[i]=2
}
plot(nodes2[,1],nodes2[,2],col=color)
points.mst(mt1,nodes2)
#################################################################
plot.mst2=function(t2,nodes2,cutoff=0.1, kk=1){
# t2 is the minimum spanning tree matrix
# nodes2 is the coordinates of all nodes (2 x n)
pair=matrix(NA,2,sum(t2>0))
k=0
for(i in 1: nrow(t2)){
for(j in i : ncol(t2)){
if(t2[i,j]>cutoff){
k=k+1
pair[1,(2*k-1)]=nodes2[i,1]
pair[2,(2*k-1)]=nodes2[j,1]
pair[1,(2*k)]=nodes2[i,2]
pair[2,(2*k)]=nodes2[j,2]
}
}
}
plot(nodes2[,1],nodes2[,2], ylim=c(0,210), xlim=c(0,210),main=kk+1)
for( i in 1: k){
lines(pair[,(2*i-1)],pair[,(2*i)])
}
}
mt1<-average.tree(k=17,n.tree=500,seed=1, nodes2)
plot.mst2(mt1,nodes2,0.1)
mt1<-average.tree(k=27,n.tree=500,seed=1, nodes2)
plot.mst2(mt1,nodes2,0.13)
k1<-c(5,33,165,500,1000,5000,50000)
mt=array(NA, c(33,33,length(k1)))
for(i in 1:length(k1)){
mt[,,i]=average.tree(k=17,n.tree=k1[i],seed=1, nodes2)
}
diff1=NULL
diff2=NULL
diff3=NULL
for(i in 1: (length(k1)-1)){
diff1[i]=max(abs(mt[,, (i+1)]-mt[,,i]))
diff2[i]=sum(abs(mt[,, (i+1)]-mt[,,i]))
diff3[i]=sum((mt[,,(i+1)]-mt[,,i])^2)/(33*33)
}
diff1;diff2;diff3
par(mfrow=c(2,2))
plot(log(k1),diff1, type="o",xlab="log(n.tree)",ylab="Max absolute differene",main="Probablity covergence")
plot(log(k1),diff2,type="o", xlab="log(n.tree)",ylab="Sum of absolute differene",main="Probablity covergence")
plot(log(k1),diff3, type="o", xlab="log(n.tree)",ylab="Variance",main="Probablity covergence")
# settle the n.tree=500
mt.nd<-array(NA, c(33,33,31))
for(i in 1:31){
mt.nd[,,i]=average.tree(k=i+1,n.tree=5000,seed=1, nodes2)
}
pdf("nodes sampling number screen no cutoff.pdf", width=5, height=10)
par(mfrow=c(5,2))
for(i in 1:31){
plot.mst2(mt.nd[,,i],nodes2, cutoff=0)
}
dev.off()
##################################################################
#3 measurements
# fault tolerence
# bridge functions
library(igraph)
num.bridge=function(t2){
require(igraph)
G<-graph.adjacency(t2,mode=c( "undirected"))
num_comp <- length( decompose.graph(G) )
k=0
for (i in 1:length(E(G))) {
G_sub <- delete.edges(G, i)
if ( length( decompose.graph(G_sub) ) > num_comp )
k=k+1
}
k
}
num.bridge(mt.nd[,,31]>0.01)
#####################################cutoff screen
cutoff=NULL
c.position=NULL
for (i in 1:31){
for(j in c(1:1089)[sort(mt.nd[,,i])>0]){
if(num.bridge(mt.nd[,,i]>sort(mt.nd[,,i])[j])>0) {
cutoff[i]=sort(mt.nd[,,i])[j-1]; c.position[i]=j-1;break}
}
}
#################cutoff functions
#input probability matrix and num of bridges allowed
cutoff1=function(mt.nd,k ){
cutoff=NULL
c.position=NULL
for (i in 1:dim(mt.nd)[3]){
for(j in c(1:1089)[sort(mt.nd[,,i])>0]){
if(num.bridge(mt.nd[,,i]>sort(mt.nd[,,i])[j])>k) {
cutoff[i]=sort(mt.nd[,,i])[j-1]; c.position[i]=j-1;break}
}
}
list(cutoff,c.position)
}
##the probablity distribution of the all the edges and the cutoff for a no bridge graph
pdf("the probablity distribution of the all the edges and the cutoff for a no bridge graph",width=5,height=10)
par(mfrow=c(5,2))
for(i in 1:31){
plot(sort(mt.nd[,,i]), ylim=c(0,1), type="h")
abline(v=c.position[i], col="red")
}
dev.off()
pdf(" cutoffed graph with on bridges",width=5,height=10)
par(mfrow=c(5,2))
for(i in 1:31){
plot.mst2(mt.nd[,,i],nodes2, cutoff=cutoff[i])
}
dev.off()
pdf("total graph edge distri cutoff graph 5000",width=7.5,height=15)
par(mfrow=c(5,3))
for(i in 1:31){
plot.mst2(mt.nd[,,i],nodes2, cutoff=0,kk=i)
plot(sort(mt.nd[,,i]), ylim=c(0,1), type="h",main=i+1)
abline(v=c.position[i], col="red")
plot.mst2(mt.nd[,,i],nodes2, cutoff=cutoff[i],kk=i)
}
dev.off()
###########################################network evaluation
total.cost=NULL
m.dist=NULL
n.edge=NULL
w.mst=sum(weight1[t2>0])
for(i in 1:31){
total.cost[i]=sum(weight1[mt.nd[,,i]>cutoff[i]])
m.dist[i]=sum(weight1[mt.nd[,,i]>cutoff[i]])/sum(mt.nd[,,i]>cutoff[i])
n.edge[i]=sum(mt.nd[,,i]>cutoff[i])
}
TL=total.cost/w.mst
pdf("performance evaluation of sampling number k.pdf", height=12, width=5)
par(mfrow=c(4,1))
plot(c(2:32),total.cost,xlab= "number of sampling nodes", ylab="Total Cost",type="o", main="Total Cost of Sampling Number Screen")
plot(c(2:32),TL,type="o", xlab= "number of sampling nodes",ylab="Normalized Total Cost",main="Normalized Total Cost of Sampling Number Screen")
plot(c(2:32),m.dist,type="o", xlab= "number of sampling nodes",main="Mean Distance of Sampling Number Screen")
plot(c(2:32),n.edge,type="o",xlab= "number of sampling nodes", main="Numbers of Edges of Sampling Number Screen")
dev.off()
w.mst=sum(weight1[t2>0])
########################################################################
plot.mst2(mt.nd[,,31],nodes2, cutoff=0.026)
#########################################################################
#fault tolerance measure for the each model
FT <- vector("list", 31) #number of briges contained
TC <- vector("list", 31) #normalized cost
TW <- vector("list", 31) #total weights of each graph
TE <- vector("list", 31) #total number of edges
w.mst=sum(weight1[t2>0])
for (i in 1:31){
k=0
for(j in c(1:1089)[sort(mt.nd[,,i])>0]){
k=k+1
G<-graph.adjacency(mt.nd[,,i]>sort(mt.nd[,,i])[j],mode=c( "undirected"))
if(length( decompose.graph(G) )>1) break
FT[[i]][k]=num.bridge(mt.nd[,,i]>sort(mt.nd[,,i])[j])
TE[[i]][k]=sum(mt.nd[,,i]>sort(mt.nd[,,i])[j])/2
TW[[i]][k]=sum(weight1[mt.nd[,,i]>sort(mt.nd[,,i])[j]])
TC[[i]][k]=TW[[i]][k]/w.mst
}
}
pdf("fault tolerance screen vs Cost 5000.pdf" ,height=15,width=5)
par(mfrow=c(5,2))
for( i in 1:31){
plot(TC[[i]], 1-FT[[i]]/TE[[i]],type="o", ylab="Fault Tolerance (FT)", xlab="Normalized Cost",main=i+1)
}
dev.off()
pdf("fault tolerance screen vs Cost (clos up) 5000.pdf" ,height=15,width=5)
par(mfrow=c(5,2))
for( i in 1:31){
plot(TC[[i]], 1-FT[[i]]/TE[[i]],type="o", xlim=c(1,3), ylab="Fault Tolerance (FT)", xlab="Normalized Cost",main=i+1)
}
dev.off()
#G<-graph.adjacency(mt.nd[,,i]>sort(mt.nd[,,i])[j],mode=c( "undirected"))
#weight2<-weight1
#weight2[t2<1]=0
#G<-graph.adjacency( weight2,mode=c( "undirected"),weighted=TRUE)
#get.all.shortest.paths(G, from=V(G), to = V(G) , mode = c( "all"),weights=weight2)
#########################################################################
####### experiment for the two-cluster network
#round(cor(circ.sim(nvar=8,nsub=200)),2)
#plot(fa(circ.sim(16,500),2)$loadings,main="Circumplex Structure") #circumplex structure
#
|
6b3500a24d4e4ec3c44199611d3355ee450c9a79 | 6c441df95b644040eb1317c7eb6d775d1e3184d4 | /BKproj1og2/proj2/b1v07faster.R | bdae737017bc3d5d5284cacff78e817e7b592436 | [] | no_license | pra1981/Master-Degree-files | 194e764443111f002f69f76e026e49dfd83ba01a | 8e18dd1647bf8f3ed4fa29d84874e8db28d3a0c0 | refs/heads/master | 2020-04-25T14:22:45.183224 | 2018-06-26T08:00:22 | 2018-06-26T08:00:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,539 | r | b1v07faster.R |
case <-function(integer,rows,cols){
if(integer==1){#(1,1) first element first column
return (1)
}
if(integer==rows){#(rows,1) last element first column
return (2)
}
if(integer==(rows-1)*cols){#(1,cols) first element last column
return (3)
}
if(integer==(rows*cols)){#(rows,cols) last element last column
return (4)
}
if(integer<rows+1){#(:,1) first column
return (5)
}
if(integer>(cols-1)*rows){#(:,cols) last column
return (6)
}
if((integer-1)%%rows==0){#(1,:) first row
return (7)
}
if(integer%%rows==0){#(rows,:) last row
return (8)
}
return (9) #interior node
}
#todo
#1 Test the function case
#keep = matrix(0,rows,cols)
#for (i in 1:(rows*cols)){
# keep[i]=case(integer = i, rows=rows,cols=cols)
#}
isingModel <- function(rows=10,cols=6,runs=500,beta=0.5,
u=matrix(0, nrow = rows, ncol = cols),colsum=FALSE){
#Generates a matrix where each column is a state of the Markov Chain
#If colsum=true only retrun number of ones
#Decalare Markov Chain vector
if(!colsum){
mc=matrix(NA,nrow=rows*cols,ncol = runs+1) #each column is an observation
mc[,1]=as.vector(u) #plug in initial state
}else{
numberOfones=matrix(NA,nrow=1,ncol = runs+1)
numberOfones[1]=sum(u)
}
r=0 #run number
i=0 #index of considered node
while(r < runs){
# (1) PROPOSE CANDIDATE STATE
r=r+1
#next node to consider
i= ifelse(i==cols*rows, 1, i+1)
val=u[i]
# (2) COMPUTE ACCEPTANCE PROBABILITY
#which type of node is this?
mycase=case(integer=i,rows=rows,cols=cols)
#compute 2d -n
temp=switch(mycase,
-2+2*abs(sum(u[c(i+1,i+rows)]-val)), #(1,1)
-2+2*abs(sum(u[c(i-1,i+rows)]-val)), #(rows,1)
-2+2*abs(sum(u[c(i+1,i-rows)]-val)), #(1,cols)
-2+2*abs(sum(u[c(i-1,i-rows)]-val)), #(rows,cols)
-3+2*abs(sum(u[c(i-1, i+1, i+rows)]-val)), #(:,1)
-3+2*abs(sum(u[c(i-1, i+1, i-rows)]-val)), #(:,cols)
-3+2*abs(sum(u[c(i+1, i-rows, i+rows)]-val)), #(1,:)
-3+2*abs(sum(u[c(i-1, i-rows, i+rows)]-val)), #(rows,:)
-4+2*abs(sum(u[c(i-1, i+1, i-rows, i+rows)]-val))) #interior
#acceptance probability
accept = exp(beta*temp)
# (3) ACCEPT OR REJECT CANDIDATE STATE
random = runif(1)
u[i]=ifelse(random<=accept,1-val,val)
if(!colsum){
mc[,r+1]=as.vector(u)
}else{
numberOfones[r+1]=sum(u)
}
}
if(!colsum){
return(mc)
}else{
return(numberOfones)
}
}
#input variables
rows1=50 #rows
cols1=50 #columns
runs1=100000
beta1=0.87
#Plot functions of state with time
#function values
funValues=matrix(NA,runs1+1,4) #One column for each set of function values i.e. funValues[,1] are for a0
#for test case sum(mc[,1])
#Different initial values
#all zeroes (1)
a0=matrix(0, nrow = rows1, ncol = cols1)
funValues[,1]=isingModel(rows=rows1,cols=cols1,runs=runs1,beta=beta1,
u=a0, colsum = TRUE)
#all ones (2)
a1=matrix(1, nrow = rows1, ncol = cols1)
funValues[,2]=isingModel(rows=rows1,cols=cols1,runs=runs1,beta=beta1,
u=a1, colsum = TRUE)
#random (3)
set.seed(42)
aR=matrix(rbinom(n=rows1*cols1, size=1,prob = 0.5), nrow = rows1, ncol = cols1)
funValues[,3]=isingModel(rows=rows1,cols=cols1,runs=runs1,beta=beta1,
u=aR, colsum = TRUE)
#checkerboard (4)
if(rows1%%2==1){
#odd number of rows
aC=matrix(rep_len(c(1,0),length.out=rows1*cols1),nrow=rows1,ncol=cols1)
}else{
##even number of rows
aC=matrix(rep_len(c(1,0),length.out=(rows1+1)*cols1),nrow=rows1+1,ncol=cols1)[1:rows1,1:cols1]
}
funValues[,4]=isingModel(rows=rows1,cols=cols1,runs=runs1,beta=beta1,
u=aC, colsum = TRUE)
funValues=funValues/(rows1*cols1)
#Plot
plot(x=1:(runs1+1),y=funValues[,1],type='l',col='red',xlim=c(0,runs1+1),ylim=c(0,1),xlab='State number',ylab='Proportion of ones')
lines(x=1:(runs1+1),y=funValues[,2],type='l',col='darkorange')
lines(x=1:(runs1+1),y=funValues[,3],type='l',col='green')
lines(x=1:(runs1+1),y=funValues[,4],type='l',col='blue')
#Do I need to use a fun server or something for the function to run?
#How to make the axis a bit more readable. Is it needed though?
#Assuming it has converged (it hasn't though) this is F(x) (in the different cases):
xvals=sort(as.vector(funValues))
Fvals=(1:length(xvals))/length(xvals)
plot(x=xvals,y=Fvals)
#How to find f(x) from f(x)? I think I saw it somewhere in a function.
|
e69096b80e498e55cf6a98a38dae1de5424797fd | 7b102f9c8f2e3f9240090d1d67af50333a2ba98d | /nonfatal_code/mental_drug_cocaine/age_sex_splitting/age_split_byregion_cleaned.R | 4b4d09d6baa538c6bcc558239dbf73e2652fe9b1 | [] | no_license | Nermin-Ghith/ihme-modeling | 9c8ec56b249cb0c417361102724fef1e6e0bcebd | 746ea5fb76a9c049c37a8c15aa089c041a90a6d5 | refs/heads/main | 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,997 | r | age_split_byregion_cleaned.R |
#Setup
rm(list=ls())
if (Sys.info()["sysname"] == "Linux") {
j_root <- "/home/j/"
h_root <- "/homes/username/"
} else {
j_root <- "J:"
h_root <- "H:"
}
##Packages
library(data.table)
library(openxlsx)
##Set objects for each age split
region_pattern <- T ##if true will use regional pattern for Dismod, if false uses US age pattern
location_pattern_id <- 102 ##location id of the country whose pattern you want to use if not using region pattern
bundle_id <- 157
request_num <- "60776"
acause <- "mental_drug_cocaine"
gbd_id <- 1978 ##put me_id that you are age_splitting for
output_file <- "results_age_split_5thJune" ##name your output file
age_table_code <- paste0(j_root, "FILEPATH\age_table.R") ##wherever you are keeping the age_table code
note_variable <- "note_modeler"
##Other objects
blank_vars <- c("lower", "upper", "effective_sample_size", "uncertainty_type", "uncertainty_type_value", "seq", "standard_error", "cases")
draws <- paste0("draw_", 0:999)
age <- c(2:20, 30:32, 235) ##epi ages
##Set directories
central_function <- paste0(j_root, "FILEPATH")
uploads <- paste0(j_root, "FILEPATH")
downloads <- paste0(j_root, "FILEPATH")
##Get central functions
source(paste0(central_function, "get_draws.R"))
source(paste0(central_function, "get_population.R"))
source(paste0(central_function, "get_location_metadata.R"))
source(age_table_code)
##Load data
all_age <- as.data.table(read.xlsx(paste0(downloads, "request_", request_num, ".xlsx"), sheet = 1)) ##get all age data
epi_order <- names(all_age)
##make sure all necessary columns exist
vars <- names(all_age)
diff <- setdiff(epi_order, vars)
if (!length(diff) == 0) {
all_age[, (diff) := ""]
}
##Get and format data points to split
all_age <- all_age[measure %in% c("prevalence", "incidence"),]
all_age <- all_age[!group_review==0 | is.na(group_review),] ##don't age split excluded data
all_age <- all_age[is_outlier==0,] ##don't age split outliered data
all_age <- all_age[(age_end-age_start)>20,]
all_age <- all_age[!mean ==0, ] ##don't split points with zero prevalence
all_age[, sex_id := sex]
all_age[sex_id=="Both", sex_id :=3]
all_age[sex_id=="Female", sex_id := 2]
all_age[sex_id=="Male", sex_id :=1]
all_age[, sex_id := as.integer(sex_id)]
all_age[measure == "prevalence", measure_id := 5]
all_age[measure == "incidence", measure_id := 6]
all_age[, year_id := year_start]
##Calculate cases and sample size if missing
all_age[measure == "prevalence" & is.na(sample_size) & !is.na(effective_sample_size), sample_size := effective_sample_size]
all_age[measure == "prevalence" & is.na(sample_size) & is.na(effective_sample_size), sample_size := mean(1-mean)/standard_error^2]
all_age[measure == "incidence" & is.na(sample_size) & !is.na(effective_sample_size), sample_size := effective_sample_size]
all_age[measure == "incidence" & is.na(sample_size) & is.na(effective_sample_size), sample_size := mean/standard_error^2]
all_age[, cases := sample_size * mean] #Use "weighted cases"
all_age <- all_age[!cases==0,] ##don't want to split points with zero cases
## Round age groups to the nearest 5-y boundary
all_age_round <- copy(all_age)
all_age_round <- all_age[, age_start := age_start - age_start %%5]
all_age_round[, age_end := age_end - age_end %%5 + 4]
all_age_round <- all_age_round[age_end > 99, age_end := 99]
## Expand for age
all_age_round[, n.age:=(age_end+1 - age_start)/5]
all_age_round[, age_start_floor:=age_start]
all_age_round[, drop := cases/n.age] ##drop the data points if cases/n.age is less than 1
all_age_round <- all_age_round[!drop<1,]
all_age_parents <- copy(all_age_round) ##keep copy of parents to attach on later
expanded <- rep(all_age_round$seq, all_age_round$n.age) %>% data.table("seq" = .)
split <- merge(expanded, all_age_round, by="seq", all=T)
split[,age.rep:= 1:.N - 1, by =.(seq)]
split[,age_start:= age_start+age.rep*5]
split[, age_end := age_start + 4 ]
##Get super region information and merge on
regions <- get_location_metadata(location_set_id = 22)
regions <- regions[, .(location_id, region_id)]
split <- merge(split, regions, by = "location_id")
regions <- unique(split$region_id) ##get super regions for dismod results
## get age group ids
all_age_total <- merge(split, ages, by = c("age_start", "age_end"), all.x = T)
## create age_group_id == 1 for 0-4 age group
all_age_total[age_start == 0 & age_end == 4, age_group_id := 1]
all_age_total <- all_age_total[age_group_id %in% age | age_group_id ==1] ##don't keep where age group id isn't estimated for cause
##get locations and years for population info later
pop_locs <- unique(all_age_total$location_id)
pop_years <- unique(all_age_total$year_id)
######GET AND FORMAT AGE PATTERN DATA###############################################################
if (region_pattern == T) {
locations <- regions
} else {
locations <- location_pattern_id
}
age_pattern <- as.data.table(get_draws(gbd_id_field = "modelable_entity_id", gbd_id = gbd_id,
measure_ids = c(5, 6), location_ids = locations, source = "epi",
status = "best", sex_ids = c(1,2),
age_group_ids = age, year_ids = 2016)) ##imposing age pattern
us_population <- as.data.table(get_population(location_id = locations, year_id = 2016, sex_id = c(1, 2),
age_group_id = age))
us_population <- us_population[, .(age_group_id, sex_id, population, location_id)]
age_pattern[, se_dismod := apply(.SD, 1, sd), .SDcols = draws]
age_pattern[, rate_dis := rowMeans(.SD), .SDcols = draws]
age_pattern[, (draws) := NULL]
age_pattern <- age_pattern[ ,.(sex_id, measure_id, age_group_id, location_id, se_dismod, rate_dis)]
##Create age group id 1 (collapse all age groups by summing population weighted rates)
age_1 <- copy(age_pattern)
age_1 <- age_1[age_group_id %in% c(2, 3, 4, 5), ]
se <- copy(age_1)
se <- se[age_group_id==5, .(measure_id, sex_id, se_dismod, location_id)]
age_1 <- merge(age_1, us_population, by = c("age_group_id", "sex_id", "location_id"))
age_1[, total_pop := sum(population), by = c("sex_id", "measure_id", "location_id")]
age_1[, frac_pop := population / total_pop]
age_1[, weight_rate := rate_dis * frac_pop]
age_1[, rate_dis := sum(weight_rate), by = c("sex_id", "measure_id", "location_id")]
age_1 <- unique(age_1, by = c("sex_id", "measure_id", "location_id"))
age_1 <- age_1[, .(age_group_id, sex_id, measure_id, location_id, rate_dis)]
age_1 <- merge(age_1, se, by = c("sex_id", "measure_id", "location_id"))
age_1[, age_group_id := 1]
age_pattern <- age_pattern[!age_group_id %in% c(2,3,4,5)]
age_pattern <- rbind(age_pattern, age_1)
##Get cases and sample size
age_pattern[measure_id == 5, sample_size_us := rate_dis * (1-rate_dis)/se_dismod^2]
age_pattern[measure_id == 6, sample_size_us := rate_dis/se_dismod^2]
age_pattern[, cases_us := sample_size_us * rate_dis]
age_pattern[is.nan(sample_size_us), sample_size_us := 0] ##if all draws are 0 can't calculate cases and sample size b/c se = 0, but should both be 0
age_pattern[is.nan(cases_us), cases_us := 0]
##Get sex_id 3
sex_3 <- copy(age_pattern)
sex_3[, cases_us := sum(cases_us), by = c("age_group_id", "measure_id", "location_id")]
sex_3[, sample_size_us := sum(sample_size_us), by = c("age_group_id", "measure_id", "location_id")]
sex_3[, rate_dis := cases_us/sample_size_us]
sex_3[measure_id == 5, se_dismod := sqrt(rate_dis*(1-rate_dis)/sample_size_us)] ##back calculate cases and sample size
sex_3[measure_id == 6, se_dismod := sqrt(cases_us)/sample_size_us]
sex_3[is.nan(rate_dis), rate_dis := 0] ##if sample_size is 0 can't calculate rate and standard error, but should both be 0
sex_3[is.nan(se_dismod), se_dismod := 0]
sex_3 <- unique(sex_3, by = c("age_group_id", "measure_id", "location_id"))
sex_3[, sex_id := 3]
age_pattern <- rbind(age_pattern, sex_3)
age_pattern[, region_id := location_id]
age_pattern <- age_pattern[ ,.(age_group_id, sex_id, measure_id, cases_us, sample_size_us, rate_dis, se_dismod, region_id)]
######################################################################################################
##merge on age pattern info
if (region_pattern == T) {
age_pattern1 <- copy(age_pattern)
all_age_total <- merge(all_age_total, age_pattern1, by = c("sex_id", "age_group_id", "measure_id", "region_id"))
} else {
age_pattern1 <- copy(age_pattern)
all_age_total <- merge(all_age_total, age_pattern1, by = c("sex_id", "age_group_id", "measure_id"))
}
##get population info and merge on
populations <- as.data.table(get_population(location_id = pop_locs, year_id = pop_years,
sex_id = c(1, 2, 3), age_group_id = age))
populations[, process_version_map_id := NULL]
age_1 <- copy(populations) ##create age group id 1 by collapsing lower age groups
age_1 <- age_1[age_group_id %in% c(2, 3, 4, 5)]
age_1[, population := sum(population), by = c("location_id", "year_id", "sex_id")]
age_1 <- unique(age_1, by = c("location_id", "year_id", "sex_id"))
age_1[, age_group_id := 1]
populations <- populations[!age_group_id %in% c(2, 3, 4, 5)]
populations <- rbind(populations, age_1) ##add age group id 1 back on
all_age_total <- merge(all_age_total, populations, by = c("location_id", "sex_id", "year_id", "age_group_id"))
#####CALCULATE AGE SPLIT POINTS#######################################################################
##Create new split data points
all_age_total[, total_pop := sum(population), by = "seq"]
all_age_total[, sample_size := (population / total_pop) * sample_size]
all_age_total[, cases_dis := sample_size * rate_dis]
all_age_total[, total_cases_dis := sum(cases_dis), by = "seq"]
all_age_total[, total_sample_size := sum(sample_size), by = "seq"]
all_age_total[, all_age_rate := total_cases_dis/total_sample_size]
all_age_total[, ratio := mean / all_age_rate]
all_age_total[, mean := ratio * rate_dis]
######################################################################################################
##Epi uploader formatting
all_age_total <- all_age_total[, (blank_vars) := ""] ##these columns need to be blank
all_age_total[!is.na(specificity), specificity := paste0(specificity, ", age-split child")]
all_age_total[is.na(specificity), specificity := paste0(specificity, ",age-split child")]
all_age_total[, group_review := 1]
all_age_total <- all_age_total[,c(epi_order), with=F]
setcolorder(all_age_total, epi_order)
##Add to originals with group review 0
all_age_parents <- all_age_parents[,c(epi_order), with=F]
setcolorder(all_age_parents, epi_order)
invisible(all_age_parents[, group_review := 0])
invisible(all_age_parents[!is.na(specificity), specificity := paste0(specificity, ", age-split parent")])
invisible(all_age_parents[is.na(specificity), specificity := paste0(specificity, "age-split parent")])
total <- rbind(all_age_parents, all_age_total)
setnames(total, note_variable, "note_modeler_info")
if (region_pattern ==T) {
total[group_review==1, note_modeler_info := paste0(note_modeler_info, "| Age split using the region age pattern.")]
total[group_review==0, note_modeler_info := paste0(note_modeler_info, "| GR 0. Age split using the region age pattern in separate rows.")]
} else {
total[, note_modeler := paste0(note_modeler, "| age split using the age pattern from location id ", location_pattern_id)]
}
total[, specificity := gsub("NA", "", specificity)]
total[, note_modeler_info := gsub("NA", "", note_modeler_info)]
setnames(total, "note_modeler_info", note_variable)
total[group_review==0, input_type := "group_review"]
total[is.na(group), group := nid]
total[, unit_value_as_published := 1]
write.xlsx(total, paste0(uploads, acause,"_", bundle_id, "_", output_file, ".xlsx"), sheetName = "extraction")
|
8b13a2b15d823549a799345f65272e407b16c751 | 58d4f0b7d4d221bdb2d38328f8375e139981862f | /Topic2_Debt/Zombie_interest.R | ac71b578783ad56c9490c07823884ec3d8266b2b | [] | no_license | tsuilf/fin20 | 9bc544fbc06b17876d7a76335487a62b6990512a | fc637467601b6e13d9e75f266a4022f2e3b7dab1 | refs/heads/master | 2023-03-21T14:47:06.547701 | 2020-02-10T13:09:18 | 2020-02-10T13:09:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,149 | r | Zombie_interest.R | library(tidyverse)
library(tibbletime)
library(lubridate)
library(readxl)
CMMPI <- read_excel("data/CMMPI_Irfil.xlsx")
CMMPI <- CMMPI %>%
mutate(
begin_date = Datesgn %>% ymd(),
short_rate = if_else(is.na(Irfil02), Irfil28, Irfil02),
long_rate = if_else(is.na(Irfil04), Irfil29, Irfil04)
) %>%
select(begin_date, short_rate, long_rate)
CMMPI_year <- CMMPI %>%
mutate(
end_date = lead(begin_date, default = ymd('2017-12-31')),
date = map2(begin_date, end_date-1, ~seq(.x, .y, by = "day"))
) %>%
unnest(date) %>%
mutate(
year = year(date)
) %>%
group_by(year) %>%
summarise_at(vars(contains("rate")), mean) %>%
write_csv("Zombie_data/Loan_interest.csv")
#####################################
BND <- read_excel("data/BND_Ccbdinfo.xlsx")
BND <- BND %>%
select(Bndyer, Intrrate)
get_intrrate <- function(year){
BND %>%
filter(Bndyer <= year, Bndyer > year - 5) %>%
summarise(Intrrate = min(Intrrate)) %>%
pull(Intrrate)
}
tibble(
year = 2000:2017
) %>%
mutate(
Bond_rate = year %>% map_dbl(get_intrrate)
) %>%
write_csv("Zombie_data/Bond_interest.csv")
|
12ad108ebcb0705c641ba7b6207fecb5966c1062 | e974c37a74c8e6690c230f269fe6384e8e7fc816 | /R/Lib/downloadSingleTickerPrices.R | 74d518e11529a8b1561d868b7376b399a5bf6478 | [] | no_license | aagret/QuantDataResearch | b78854964fd149e347be044262fc1a5a77b5f7a3 | 9a76c6dcd165fd033dd5b781fe9b87e227a168f6 | refs/heads/master | 2020-04-11T09:04:21.460415 | 2018-12-13T16:52:51 | 2018-12-13T16:52:51 | 63,428,533 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 855 | r | downloadSingleTickerPrices.R |
ticker <- "AD NA Equity"
#### load Init and requested functions ####
source("prod-V-0.9/InitFundData.R")
# get oldest date in database
startDate <- min(database$Date)
# get daily Fields
fields <- scan("ConfigFiles/dailyDataFields.txt", what= "character")
# connect to Bloomberg
connection <- blpConnect()
# download from bloomberg
ndd <- bdh(ticker, fields, startDate,
options= c("CapChg"="Yes",
"CshAdjAbnormal"="No",
"CshAdjNormal"="No"))
ndd$Ticker <- ticker
colnames(ndd)[1] <- "Date"
setDT(ndd, key=c("Ticker", "Date"))
# close Bloomberg Connection
blpDisconnect(connection)
histoPrices <- rbind(histoPrices, ndd)
setkey(histoPrices, NULL)
histoPrices <- unique(histoPrices)
setkey(histoPrices, Ticker, Date)
# save histoPrices.RData
save(histoPrices, file= "histoPrices.RData")
|
655b45d7b4b62ffacf9683c13dc6177682912291 | 281c0b5f629004cdff576c6d43709f4c6d9f2a26 | /run_analysis.R | 5b2b4f01cece822903ab78f49176d79969d2ca4d | [] | no_license | stevodurbaba/Getting-and-Cleaning-Data-Project | f4b956a2c1626704c66913e4637b09a7d38793a4 | e9ae85bbbc5ff087ac97fd8fb5086d7f032a2fdf | refs/heads/master | 2021-01-22T01:06:05.473456 | 2014-05-25T20:49:10 | 2014-05-25T20:49:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,215 | r | run_analysis.R | ## Assumes data file are in the same folder as this R script
## read the data files
subject_test <- read.table("subject_test.txt")
x_test <- read.table("X_test.txt")
y_test <- read.table("y_test.txt")
subject_train <- read.table("subject_train.txt")
x_train <- read.table("x_train.txt")
y_train <- read.table("y_train.txt")
# read the features and activity file
features <- read.table("features.txt")
activity_labels <- read.table("activity_labels.txt")
## renames columns
colnames(subject_test) <- "Subject"
colnames(subject_train) <- "Subject"
colnames(y_test) <- "Activity"
colnames(y_train) <- "Activity"
colnames(features) <- c("Num","Description")
colnames(activity_labels) <- c("Activity","Lables")
# use the feature data to rename columns
colnames(x_test) <- features$Description
colnames(x_train) <- features$Description
# calculate which data is for mean and std readings by using the Description
features$Required <- grepl("mean",features$Description) | grepl("std",features$Description)
Required <- which(features$Required == TRUE)
# subset the x test and training data for mean and std
x_test_sub <-data.frame(x_test[,Required])
x_train_sub <-data.frame(x_train[,Required])
# combine test data
test_set <-cbind(subject_test,y_test)
test_set <-cbind(test_set,x_test_sub)
# combine training data
train_set <-cbind(subject_train,y_train)
train_set <-cbind(train_set,x_train_sub)
#combine test and training set
data_set <- rbind(test_set,train_set)
# aggregate means by Subject and Activity
agg_data <-aggregate(data_set, by=list(data_set$Subject,data_set$Activity),FUN=mean, na.rm=TRUE)
# replace the Activity number with the Activity stringr
agg_data$Activity <- activity_labels[agg_data$Activity,2]
#tidy up the dataframe by removing columns created by the aggregate function
agg_data$Group.1 <- NULL
agg_data$Group.2 <- NULL
# add "mean" to the colunm names
agg_colnames <-colnames(agg_data)
agg_colnames <- paste0("mean.",agg_colnames)
colnames(agg_data) <- agg_colnames
#tidy up the colunm names
colnames(agg_data)[1] <- "Subject"
colnames(agg_data)[2] <- "Activity"
#write the dataframe to the file tidy_data.txt
write.table(agg_data,"tidy_data.txt", row.names = FALSE)
|
672cdc57555e551da7195371dfb83377f79e62ee | cd679c99468a8d78beca6f971c3ccafb25e48bc0 | /Plot6.r | ee76fafff05da1811e52c71f718e6a457acf0e0c | [] | no_license | DamjanStefanovski/Exploratory-Data-Analysis-Project2 | a1888a052507f5512750fa3a2a20c99b52955dff | 8d79b665fd44a32baeb1d11cea7e69f6a55d7fb7 | refs/heads/master | 2021-01-22T11:28:08.436112 | 2017-05-29T04:40:48 | 2017-05-29T04:40:48 | 92,698,593 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,642 | r | Plot6.r | # Packs
library(plyr)
library(ggplot2)
library(RColorBrewer)
library(ggthemes)
# Setting a theme
theme_set(theme_fivethirtyeight())
# Read Data
NEI <- readRDS("./summarySCC_PM25.rds")
SCC <- readRDS("./Source_Classification_Code.rds")
## elaborate plotdata:Total PM25 emission from Baltimore City and
## Los Angeles County per year for motor vehicles
# get Baltimore and Los Angeles NEI data
NEIBaLa <- subset(NEI, fips == "24510" | fips == "06037")
# get motor vehicle SCC
VehicleSCC <- SCC[grepl("Vehicle", SCC$EI.Sector),]
# select baltimore data based on vehicle sources
vehicleBaLa <- subset(NEIBaLa, NEIBaLa$SCC %in% VehicleSCC$SCC)
# assign the city name, based on fips code
vehicleBaLa$city <- rep(NA, nrow(vehicleBaLa))
vehicleBaLa[vehicleBaLa$fips == "06037", ][, "city"] <- "Los Angeles County"
vehicleBaLa[vehicleBaLa$fips == "24510", ][, "city"] <- "Baltimore City"
# make plotdata
plotdata <- aggregate(vehicleBaLa[c("Emissions")],
list(city = vehicleBaLa$city,
year = vehicleBaLa$year), sum)
# Create and plot a file
png('plot6.png', width=480, height=480)
ggp <- ggplot(plotdata, aes(x=factor(year), y=Emissions, fill=city)) +
geom_bar(aes(fill=year),stat="identity") +
facet_grid(scales="free", space="free", .~city) +
guides(fill=FALSE) + theme_fivethirtyeight() +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Kilo-Tons)")) +
labs(title=expression("PM"[2.5]*"Emissions in Baltimore & LA, 1999-2008 for Motor Vehicles"))
print(ggp)
dev.off()
|
91397066b9f8b166ae01d12d13518af22ec771f1 | d11d7fe9df513536af20898f7dd8c36beec2ed06 | /AR_Scrapping.R | 914deb00550ea5a7945b2e7f66873419f66970e6 | [] | no_license | urosgodnov/Trademarks | b54a1b3e913825ad95c6563504709e1392d707b8 | 3cd8e9685e54c961b96bd13a591c527355ecaf36 | refs/heads/master | 2021-09-06T10:48:14.471968 | 2018-02-05T19:08:24 | 2018-02-05T19:08:24 | 105,199,426 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,383 | r | AR_Scrapping.R | translateNat = function (api_key, text = "", lang = "")
{
url = "https://translate.yandex.net/api/v1.5/tr.json/translate?"
url = paste(url, "key=", api_key, sep = "")
if (text != "") {
url = paste(url, "&text=", text, sep = "")
}
if (lang != "") {
url = paste(url, "&lang=", lang, sep = "")
}
url = gsub(pattern = " ", replacement = "%20", x = url)
d = RCurl::getURL(url, ssl.verifyhost = 0L, ssl.verifypeer = 0L)
d = jsonlite::fromJSON(d)
d$code = NULL
d
}
getStatus <- function(x) {
if (x == 'Abandonada' ||
x == 'Caducidad' ||
x == 'Denegada' || x == 'Desistida' || x == 'Nulidad') {
m <- 'INACTIVE'
}
else {
m <- 'REGISTERED'
}
return(m)
}
getType <- function(x) {
if (x == 'Denominativa') {
m <- 'WORD'
} else if (x == 'Mixta') {
m <- 'WORD-DEVICE'
} else if (x == 'Figurativa') {
m <- 'DEVICE'
} else if (x == 'Tridimensional') {
m <- '3D'
} else if (x == 'Sonora') {
m <- 'SOUND'
} else {
m <- 'NO TM LIKE THAT'
}
return(m)
}
getDatePattern <- function(text, pattern) {
result <- regmatches(text, regexec(pattern, text))
r <- result[[1]][2]
r <- strapplyc(r, "[0-9/]{8,}", simplify = TRUE)
date <- try(as.Date(r, "%d/%m/%Y"), silent = TRUE)
date <- try(format(date, "%d.%m.%Y"), silent = TRUE)
if (length(date) == 0 ||
is.na(date) || class(date) == 'try-error') {
date <- format(as.Date("1800-01-01", "%Y-%m-%d"), "%d.%m.%Y")
}
return(date)
}
getNamesPattern <- function(text, pattern) {
# pattern<-".*Denominación:(.*?)Tipo.*"
result <- regmatches(text, regexec(pattern, text))
r <- result[[1]][2]
r <- gsub("\r\n", "", r)
r <- trimws(r, 'b')
return(r)
}
ARScrap <- function(AppNo) {
#AppNo <-1845178
#Making URL and Reading data
AppNo <- gsub(",", "", AppNo)
AppNo <- gsub("/", "", AppNo)
AppNo <- gsub(".", "", AppNo, fixed = TRUE)
api_key<-"trnsl.1.1.20171210T185538Z.2c5e808ac4cefb09.3126bf9f8e6ea8f45076615d7c497b88aaf45014"
url <-
paste(
"https://portaltramites.inpi.gob.ar/Clasico/Docs/ResultadosConsultas/ResultadoSolicitudMarca2.asp?Va=",
AppNo,
sep = ""
)
data <- try(url %>% read_html(encoding = "utf-8"), silent = TRUE)
if (class(data) != "try-error")
{
AppNoUpon<-NA
#going through loops to find newer appno if any
for (i in 1:5) {
AppNoNew <-
data %>% html_node(xpath = "//tr//div[contains(.,'Direccion de Marcas')]/following::tr[4]//td[text()='Renovada por']/following::td[1]") %>%
html_text()
AppNoNew <- try(as.numeric(gsub("\\D+", "", AppNoNew)))
if (!is.na(AppNoNew)) {
AppNoUpon <- AppNoNew
url <-
paste(
"https://portaltramites.inpi.gob.ar/Clasico/Docs/ResultadosConsultas/ResultadoSolicitudMarca2.asp?Va=",
AppNoUpon,
sep = ""
)
data <-
try(url %>% read_html(encoding = "utf-8"), silent = TRUE)
} else {
break
}
}
application <-
data %>% html_node(xpath = "//tr//div[contains(.,'Presentaci')]") %>%
html_text()
application <-
getDatePattern(application, ".*Presentaci(.*?)Denominaci.*")
registrationNo <-
data %>% html_node(xpath = "//strong[contains(.,'Resoluci')]/following::tr[1]//span[text()='Nro:']/following::span") %>%
html_text()
registrationNo <- try(as.numeric(registrationNo))
if (length(registrationNo) == 0) {
registrationNo <- NA
}
#Previous registration number
prevRegNo <-
data %>% html_node(xpath = "//tr//div[contains(.,'Direccion de Marcas')]/following::tr[4]//td[span/text()='Renov. de']") %>%
html_text()
prevRegNo <- try(as.numeric(prevRegNo))
if (length(prevRegNo) == 0) {
prevRegNo <- NA
}
renewal <-
try(as.Date(
data %>% html_nodes(xpath = "//tr/td[text()='Vence:']/following::td[1]") %>%
html_text(),
"%d/%m/%Y"
))
####Acceptance - it has to be in unambiguous format
acceptance <- try(as.POSIXlt(renewal))
renewal <- format(renewal, "%d.%m.%Y")
if (length(renewal) == 0 || is.na(renewal)) {
renewal <- format(as.Date("1800-01-01", "%Y-%m-%d"), "%d.%m.%Y")
}
TMName <-
data %>% html_node(xpath = "//tr//div[contains(.,'Presentaci')]") %>%
html_text()
TMName <- getNamesPattern(TMName, ".*Denominación:(.*?)Tipo.*")
acceptance$year <- acceptance$year - 10
acceptance <- format(acceptance, "%d.%m.%Y")
if (length(acceptance) == 0 || is.na(acceptance)) {
acceptance <- format(as.Date("1800-01-01", "%Y-%m-%d"), "%d.%m.%Y")
}
#Priority date
priority <-
as.Date(
data %>% html_node(xpath = "//strong[contains(.,'Prioridades')]/following::tr[1]//span[text()='Fecha:']/following::td") %>%
html_text(),
"%d/%m/%Y"
)
priority <- format(priority, "%d.%m.%Y")
if (length(priority) == 0 || is.na(priority)) {
priority <- format(as.Date("1800-01-01", "%Y-%m-%d"), "%d.%m.%Y")
}
priorityNo <-
gsub(
" ",
"",
data %>% html_node(xpath = "//strong[contains(.,'Prioridades')]/following::tr[1]//span[text()='Prioridad: ']/following::td") %>%
html_text()
)
if (length(priorityNo) == 0) {
priorityNo <- NA
}
priorityCountry <-
gsub(
" ",
"",
data %>% html_node(xpath = "//strong[contains(.,'Prioridades')]/following::tr[1]//div[contains(.,'Pais:')]/following::td") %>%
html_text()
)
if (length(priorityCountry) == 0) {
priorityCountry <- NA
}
#PublicationDate
publication <-
data %>% html_node(xpath = "//span[contains(.,'PUBLICACI')]/following::tr[1]") %>%
html_text()
publication <- getDatePattern(publication, ".*Fecha(.*?)Numero.*")
publicationNo <-
data %>% html_nodes(xpath = "//span[contains(.,'PUBLICACI')]/following::tr[1]/td[contains(@class,'titulorojo')]//a") %>%
html_text()
publicationNo <- gsub('([0-9]+).*', '\\1', publicationNo)
if (length(publicationNo) == 0) {
publicationNo <- NA
}
owner <-
gsub(
"\r\n",
"",
data %>% html_nodes(xpath = "//strong[contains(.,'Datos de titulares')]/following::tr[1]//p[text()='Nombre']/following::td[1]")
%>% html_text()
)
if (length(owner) == 0) {
owner <- NA
}
owner <- paste(owner, collapse = " | ")
ownerAddr <-
gsub(
"\r\n",
"",
data %>% html_nodes(xpath = "//strong[contains(.,'Datos de titulares')]/following::tr[1]//td[text()='Domicilio Real']/following::span[1]")
%>% html_text()
)
ownerAddr <- str_trim(ownerAddr)
OwnerCountry <-
gsub(
"\r\n",
"",
data %>% html_nodes(xpath = "//strong[contains(.,'Datos de titulares')]/following::tr[1]//td[text()='Territorio Leg.']/following::td[1]")
%>% html_text()
)
OwnerCountry <- str_trim(OwnerCountry)
ownerAddr <- paste(ownerAddr, OwnerCountry, sep = " ")
ownerAddr <- str_trim(gsub("-", "", ownerAddr))
if (length(ownerAddr) == 0) {
ownerAddr <- NA
}
ownerAddr <- paste(ownerAddr, collapse = " | ")
agentOnRecord1 <-
gsub(
"\r\n",
"",
data %>% html_nodes(xpath = "//strong[contains(.,'Gestion')]/following::tr[1]//td[text()='Agente:']/following::td[1]")
%>% html_text()
)
agentOnRecord1 <- str_trim(gsub('[[:digit:]]+', '', agentOnRecord1))
agentOnRecord2 <-
gsub(
"\r\n",
"",
data %>% html_nodes(xpath = "//strong[contains(.,'Gestion')]/following::tr[1]//span[text()='Caracter:']/following::td[1]")
%>% html_text()
)
if (length(agentOnRecord2) > 0) {
agentOnRecord <- paste(agentOnRecord1, " (", agentOnRecord2, ")", sep =
"")
} else {
agentOnRecord <- agentOnRecord1
}
if (length(agentOnRecord) == 0) {
agentOnRecord <- NA
}
AgentOnRecordAddr <-
data %>% html_node(xpath = "//tr//div[contains(.,'Presentaci')]") %>%
html_text()
AgentOnRecordAddr <-
str_trim(getNamesPattern(AgentOnRecordAddr, ".*Domicilo legal:(.*?)Renov. de*"))
AgentOnRecordAddr <- paste(AgentOnRecordAddr, ' ARGENTINA', sep = ",")
if (length(AgentOnRecordAddr) == 0) {
AgentOnRecordAddr <- NA
}
agentOnRecord <- paste(agentOnRecord, AgentOnRecordAddr, sep = "\n ")
#NA
associatedTMs <- NA
###Dealing with images
imageUrl <-
data %>% html_node(xpath = "//tr//td[@class='titulorojo']/a") %>%
html_attr("href")
if (length(imageUrl) == 1 && !is.na(imageUrl)) {
cat(paste("\n", "Downloading image...", sep = ""))
img <- imageUrl %>% read_html() %>% html_nodes("img")
img_src <- html_attr(img, "src")
if (length(img_src) == 1) {
img_src <- gsub("^data.*base64,", "", img_src)
imageJPG <- try(image_read(base64_decode(img_src)), silent = TRUE)
if (class(imageJPG) != "try-error") {
image_write(imageJPG, path = paste("./logos/", AppNo, ".jpg", sep = ""))
}
#python.call("decodeImgData", imageUrl,as.character(AppNo))
} else {
imageUrl <- NA
}
} else {
imageUrl <- NA
}
#####Classes
tmpDF <- data.frame(matrix(ncol = 18, nrow = 1))
class <-
sapply(as.list(1:9), function(x) {
return(paste("class", x, sep = ""))
})
desc <-
sapply(as.list(1:9), function(x) {
return(paste("description", x, sep = ""))
})
colnames(tmpDF) <- c(class, desc)
classNo <-
gsub(
"\r\n",
"",
data %>% html_nodes(xpath = "//strong[contains(.,'Datos de titulares')]/following::tr[1]//td[contains(.,'Clase')]")
%>% html_text()
)
if (length(classNo) > 0)
{
classNo <- getNamesPattern(classNo, ".*Clase(.*?)Proteccion.*")
} else
{
classNo <- NA
}
#Using LimDisc to join with description
LimDis <-
gsub(
"\r\n",
"",
data %>% html_node(xpath = "//strong[contains(.,'Datos de titulares')]/following::tr[1]//td[contains(.,'Limitacion:')]/following::td[1]")
%>% html_text()
)
if (length(LimDis) == 0) {
LimDis <- ""
}
LimDis <- str_trim(gsub("-", "", LimDis))
classDes <-
gsub(
"\r\n",
"",
data %>% html_nodes(xpath = "//strong[contains(.,'Datos de titulares')]/following::tr[1]//td[contains(.,'Clase')]")
%>% html_text()
)
if (length(classDes) > 0)
{
classDes <- getNamesPattern(classDes, ".*Proteccion.(.*?)Limitacion*")
} else {
classDes <- NA
}
tmpDF$class1 <- classNo
classDes <- paste(classDes,LimDis,sep=" ")
classDes<-gsub(";","\\.",classDes)
transdata<-translateNat(api_key,text=classDes,lang="en")
tmpDF$description1 <-transdata[[2]]
LimDis <- NA
words <- NA
image <- NA
status <-
data %>% html_node(xpath = "//strong[contains(.,'Resoluci')]/following::tr[1]//strong[text()='Tipo:']/following::span") %>%
html_text()
status <- getStatus(status)
if (as.Date(renewal,"%d.%m.%Y")<today())
{status<-"INACTIVE"}
kind <-
data %>% html_node(xpath = "//tr//div[contains(.,'Direccion de Marcas')]/following::tr[2]//td[span/text()='Tipo Marca :']") %>%
html_text()
kind <- str_trim(gsub("Tipo Marca :", "", kind))
kind <- getType(kind)
#return DF
tmpDF <- cbind(
data.frame(
AppNo,
# TMName,
registrationNo,
renewal,
application,
acceptance,
priority,
priorityNo,
priorityCountry,
publication,
publicationNo,
agentOnRecord,
associatedTMs,
prevRegNo,
status,
kind,
words,
image,
imageUrl,
LimDis,
owner,
ownerAddr,
AppNoUpon,
stringsAsFactors = FALSE
),
tmpDF
)
tmpDF <- tmpDF %>% dplyr::rename(
`Application no.` = AppNo,
# Trademark=TMName,
`Application date` = application,
`Registration no.` = registrationNo,
`Registration date` = acceptance,
`Next renewal date` = renewal,
`Priority date` = priority,
`Priority no.` = priorityNo,
`Priority country` = priorityCountry,
`Publication date` = publication,
`Publication no.` = publicationNo,
`Application no. assigned upon renewal`=AppNoUpon,
`TM Type` = kind,
Status = status,
`Limitations & Disclaimers` = LimDis,
`Agent on record` = agentOnRecord,
Owner = owner,
`Owner address` = ownerAddr,
`Associated TMs` = associatedTMs,
`Previous registration no.` = prevRegNo,
`1st Class` = class1,
`1st Goods & Services` = description1,
`2nd Class` = class2,
`2nd Goods & Services` = description2,
`3rd Class` = class3,
`3rd Goods & Services` = description3,
`4th Class` = class4,
`4th Goods & Services` = description4,
`5th Class` = class5,
`5th Goods & Services` = description5,
`6th Class` = class6,
`6th Goods & Services` = description6,
`7th Class` = class7,
`7th Goods & Services` = description7,
`8th Class` = class8,
`8th Goods & Services` = description8,
`9th Class` = class9,
`9th Goods & Services` = description9
)
#
} else
{
tmpDF = as.data.frame(NULL)
}
return(tmpDF)
}
|
9494877d731e8127f0565c5ef7d1b40b072f603b | 4c4b3775c9519bdd2c6e779c091c48ca14b76f02 | /csse_covid_19_data/csse_covid_19_time_series/covid19_visualization/app.R | 0d3385b2798e8d4e04b4b201762a45bd7b0453f7 | [] | no_license | Schubisu/COVID-19 | 507ec60577b42337a5200346de29154ea74c5780 | e1bee18d96f66d0609e38f59beeec01cf86b6dbf | refs/heads/master | 2021-04-02T23:17:27.381174 | 2020-03-26T10:19:03 | 2020-03-26T10:19:03 | 248,335,163 | 0 | 0 | null | 2020-03-18T20:25:16 | 2020-03-18T20:25:15 | null | UTF-8 | R | false | false | 2,690 | r | app.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
source('visualization.r')
library(ggpubr)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("COVID-19 visualization"),
sidebarLayout(
sidebarPanel(
selectInput("countries",
"Countries:",
choices = long_data$Country.Region %>% unique(),
multiple = TRUE),
checkboxInput("log_scale",
"Log scale",
value = TRUE),
checkboxInput("by_population",
"By Population",
value = FALSE),
checkboxGroupInput("cases",
"Cases",
choices = list("Confirmed" = "confirmed",
"Deaths" = "deaths",
"Recovered" = "recovered",
"Fatality" = "fatality"),
selected = "confirmed")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("linePlot"),
sliderInput("date_range",
"Date Range",
min = min(long_data$date), max = max(long_data$date),
value = c(min(long_data$date), max(long_data$date)),
width = "100%")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
data <- reactive({
long_data %>%
filter(Country.Region %in% input$countries) %>%
select(c("Country.Region", "date", "population"), input$cases) %>%
pivot_longer(
c(-Country.Region, -date, -population),
names_to = "case",
values_to = "value") %>%
mutate(value = ifelse(rep(input$by_population, nrow(.)), value / population, value)) %>%
filter(date >= input$date_range[1], date <= input$date_range[2])
})
output$linePlot <- renderPlot({
p <- data() %>%
ggplot(aes(x = date, y = value, color = Country.Region, linetype = case)) +
geom_line(size = 1.1) +
theme_pubr()
if (input$log_scale) {
p <- p + scale_y_log10()
}
p
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
91d547b220ba4590d991d2bf8b9b202ec4eaac3f | 10ea266d27c31046cc938255c4bab83f525f09f8 | /plot_load.r | 2767c7eb03a62ddfb53fbb39f957e0ea454b4051 | [] | no_license | sadiquemohd/ExData_Plotting1 | 0826ec2dd86ccd1efd59117039d16a240029ba15 | 9b57a3903586824d41ac7597d2643a48fa4a194f | refs/heads/master | 2020-03-08T16:40:16.379970 | 2018-04-07T09:24:00 | 2018-04-07T09:24:00 | 128,245,846 | 0 | 0 | null | 2018-04-05T18:13:00 | 2018-04-05T18:12:58 | null | UTF-8 | R | false | false | 884 | r | plot_load.r | #Purpose To plotting
# Function to load the ec consumption file
plot_dataset <- function (fileurl, date1, date2)
{
fpath <- "F:/c4proj/"
zipfilename <- "household_power_consumption.zip"
setwd(fpath)
fname <- "household_power_consumption.txt"
if (!file.exists(fname)){
download.file(fileurl,zipfile)
unzip(zipfile = zipfilename)
}
#load the training data sets
# Restricted to 150K rows due to the capacity of the PC to process
ec_data <- read.table ( fname, header = TRUE , sep =";" , na.strings = "?" , nrows=150000)
ec_data$Time <- strptime(paste(ec_data$Date, ec_data$Time), "%d/%m/%Y %H:%M:%S")
ec_data$Date <- as.Date(ec_data$Date, "%d/%m/%Y")
# only use data from the dates 2007-02-01 and 2007-02-02
date_range <- as.Date(c(date1, date2), "%Y-%m-%d")
ec_subset <- subset(ec_data, Date %in% date_range)
return(ec_subset)
}
|
79526a74a44d9c1589893ffec23503a8e499e5eb | d03c9f06e40d581ac7075376c71ebc008a4a06e1 | /R/install_starred_cran.R | bf467448d18c9e9df5c2aaaa40563202d92b7cbf | [
"MIT"
] | permissive | hadley/batata | 3b90bec20713905aa32a31b70bee82280fef8323 | a606f2448e68bb9e6c798bff548bad2a959bbd19 | refs/heads/master | 2023-03-18T20:57:19.662817 | 2021-03-08T09:55:33 | 2021-03-08T09:55:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,488 | r | install_starred_cran.R |
#' Install Github Starred CRAN Packages
#'
#' @param github_user the Github user name to look for
#' @param n the last 'n' starred repositories. Defaults to 5, in which case it will look for
#' the last 5 starred repositories, filter the R repos and install them
#' @return called for the side effect of installing the Github starred packages
#' that are available on CRAN
#'
#' @export
#' @description installs the Github starred packages from CRAN
install_starred_cran <- function(github_user, n = 5) {
if (!is.character(github_user)) {
stop("'github_user' must be provided as a character string")
}
if (!is.numeric(n) || n < 1) {
stop("the 'n' parameter must be numeric and greater than 1")
}
data <- jsonlite::fromJSON(glue::glue("https://api.github.com/users/{github_user}/starred?per_page={n}"))
cran_packages <- data[data$language == "R", ]$name
cran_packages_no_na <- Filter(function(x) {
!is.na(x)
}, cran_packages)
if(length(cran_packages_no_na) == 0) {
message("I can't find any R starred package, go starr some !")
} else {
combine <- function(..., sep = ", ") {
paste(..., collapse = sep)
}
message(glue::glue("the following repositories, if R packages and availables on CRAN, will be installed:
{combine(cran_packages_no_na)}"))
purrr::map(
cran_packages_no_na,
purrr::safely(~ utils::install.packages(.x))
)
}
}
#' Install Github Starred Packages from Github
#'
#' @param github_user the Github user name to look for
#' @param n the last 'n' starred repositories. Defaults to 5, in which case it will look for
#' the last 5 starred repositories, filter the R repos and install them
#' @param upgrade whether to upgrade out of date packages. You can choose from
#' 'always' or 'never'. Defaults to 'never'. For more info,
#' see <install_github()> from the 'remote' package.
#'
#' @return called for the side effect of installing the Github starred repositories
#' @export
#' @description installs the Github starred repositories from Github and not from CRAN.
install_starred_github <- function(github_user, n = 5, upgrade = "never") {
if (!is.character(github_user)) {
stop("'github_user' must be provided as a character string")
}
if (!is.numeric(n) || n < 1) {
stop("the 'n' parameter must be numeric and greater than 1")
}
if (!(upgrade %in% c("never", "always"))) {
stop(glue::glue("upgrade takes only the following arguments:
- 'never'
- 'always'"))
}
data <- jsonlite::fromJSON(glue::glue("https://api.github.com/users/{github_user}/starred?per_page={n}"))
github_r_repos <- data[data$language == "R", ]$full_name
github_r_repos_no_na <- Filter(function(x) {
!is.na(x)
}, github_r_repos)
if (length(github_r_repos_no_na) == 0) {
message("I can't find any R starred package, go starr some !")
} else {
combine <- function(..., sep = ", ") {
paste(..., collapse = sep)
}
message(glue::glue("the following repositories, if R packages, will be installed:
{combine(github_r_repos_no_na)}"))
purrr::map(
github_r_repos_no_na,
purrr::safely(~ remotes::install_github(.x, upgrade = upgrade))
)
}
}
#' Install the most starred CRAN packages
#'
#' @param n the most starred starred CRAN packages. Defaults to 10.
#' in this case the function will look at the 10 most starred R repo and install them
#' if available on CRAN.
#'
#' @return called for the side effect of installing most starred CRAN packages
#' @export
#'
install_most_starred <- function(n = 10) {
if (!is.numeric(n) || n < 1) {
stop("the 'n' parameter must be numeric and greater than 1")
}
data <- jsonlite::fromJSON(glue::glue("https://api.github.com/search/repositories?q=language:R&sort=stars&order=desc&per_page={n}"))
data <- as.data.frame(data)
most_starred <- data$items.name
most_starred_no_na <- Filter(function(x) {
!is.na(x)
}, most_starred)
combine <- function(..., sep = ", ") {
paste(..., collapse = sep)
}
message(glue::glue("the following repositories, if availables on CRAN, will be installed:
{combine(most_starred_no_na)}"))
purrr::map(
most_starred_no_na,
purrr::safely(~ utils::install.packages(.x))
)
}
#' Display User's Github Starred Repositories
#'
#' @param github_user the Github user name to look for
#' @param n the number of the last starred repositories. Defaults to 5 in which case
#' it will return the last 5 starred repositories. Note that if the 'onlyR' parameter
#' is set to TRUE, you might get a lower number of starred repos due to filtering R from
#' all the other languages.
#' @param onlyR Logical, whether to fetch only R repositories, Default to FALSE
#'
#' @return A character vector of starred Github repositories
#' @export
#'
#'
display_starred <- function(github_user, n = 5, onlyR = FALSE) {
if (!is.character(github_user)) {
stop("'github_user' must be provided as a character string")
}
if (!is.numeric(n) || n < 1) {
stop("the 'n' parameter must be numeric and greater than 1")
}
tryCatch(
expr = {
data <- jsonlite::fromJSON(glue::glue("https://api.github.com/users/{github_user}/starred?per_page={n}"))
starred_repo <- if (onlyR) {
data[data$language == "R", ]$full_name
} else {
data$full_name
}
if(length(starred_repo) == 0 ) {
message("I can't find any R starred package, go starr some !")
return(NA)
} else {
return(starred_repo)
}
},
error = function(e) {
if (grepl("HTTP error 404", e, fixed = TRUE)) {
message(paste0("Error:", e, "maybe you've provided a non existing account???"))
return(NULL)
}
}
)
}
#' Display the most starred R Github Repositories
#'
#' @param n the number of most starred Github R repositories to fetch.
#' Defaults to 10.
#' @return a character vector of the most starred R repositories
#' @export
#'
display_most_starred <- function(n = 10) {
if (!is.numeric(n) || n < 1) {
stop("the 'n' parameter must be numeric and greater than 1")
}
data <- jsonlite::fromJSON(glue::glue("https://api.github.com/search/repositories?q=language:R&sort=stars&order=desc&per_page={n}"))
data <- as.data.frame(data)
most_starred <- data$items.full_name
return(most_starred)
}
|
0650b43c35857690b3a73af89b0720392f9e5fa2 | e54648b48eaa75e05ffa13dd4e0f7b20d092e344 | /Linear-Lasso-Classifiers.R | afa6bc208cd282df64f99fe741b3dcc87791cbb7 | [] | no_license | MI-12/Comparison-of-AI-Semantic-features-and-radiomics-features | 5b2a239f000cc733cf506b6f1b8ee4b1b78da1c5 | 48d8f923bccfcb2b05cfb7d70b10587e0e35ab94 | refs/heads/master | 2022-11-05T01:53:28.776173 | 2020-06-18T22:30:09 | 2020-06-18T22:30:09 | 265,733,242 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,229 | r | Linear-Lasso-Classifiers.R |
rm(list = ls())
gc()
library("glmnet")
library("pROC")
library("readxl")
library("nnet")
workbook<-"Feature matrix extracted by BigbiGAN from yout training dataset."
Sta = read_excel(workbook,1)
workbook1<-"Feature matrix extracted by BigbiGAN from yout test dataset."
Sta1 = read_excel(workbook1,1)
workbook2<-"Feature matrix extracted by BigbiGAN from yout validation dataset."
Sta2 = read_excel(workbook2,1)
workbook3<-"Feature matrix extracted by BigbiGAN from yout external validation dataset."
Sta3 = read_excel(workbook3,1)
Train<-data.frame(Sta)
Tval<-data.frame(Sta1)
Test<-data.frame(Sta2)
ExterV<-data.frame(Sta3)
FeaNumTrain<-"Dimension of semantic features extracted by BigBiGAN"
FeaNumTest<-"Dimension of semantic features extracted by BigBiGAN"
FeaNumTval<-"Dimension of semantic features extracted by BigBiGAN"
FeaNumExtV<-"Dimension of semantic features extracted by BigBiGAN"
TrainFea<-data.frame(Train)
TvalFea<-data.frame(Tval)
TestFea<-data.frame(Test)
ExterVFea<-data.frame(ExterV)
Trainy<-Train[(ncol(Train)-FeaNumTrain):(ncol(Train))] ##Features are stored at the end of the matrix.
Trainy<-data.frame(Trainy)
##Linear Classifier
fm <- lm("COVID"~., data=Trainy)
summary(fm)
lmpred_Val<-predict(fm,Tval)
Roc_Val<-roc(Tval$"COVID",lmpred_Val)
auc(Roc_Val)
plot(Roc_Val, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
lmpred_Test<-predict(fm,Test)
Roc_Test<-roc(Test$"COVID",lmpred_Test)
auc(Roc_Test)
plot(Roc_Test, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
lmpred_Ext<-predict(fm,ExterVFea)
Roc_Ext<-roc(ExterVFea$"COVID",lmpred_Ext)
auc(Roc_Ext)
plot(Roc_Ext, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
##Lasso Classifier
y<- Train$COVID
TrainFea_Lasso <- TrainFea
names(TrainFea_Lasso) <- NULL
TrainFea_Lasso<-data.matrix(TrainFea_Lasso)
fit<-glmnet(TrainFea_Lasso,y,alpha=1,family='binomial')
plot(fit, xvar = "lambda", label = TRUE)
cv.fit<-cv.glmnet(TrainFea_Lasso,y,family="binomial")
lmpred_Train <-predict(fit,type="response",newx = TrainFea_Lasso,s=cv.fit$lambda.1se)
Roc_Train<-roc(Train$COVID,lmpred_Train)
auc(Roc_Train)
TvalFea_Lasso <- TvalFea
names(TvalFea_Lasso) <- NULL
TvalFea_Lasso<-data.matrix(TvalFea_Lasso)
lmpred_Tval <-predict(fit,type="response",newx = TvalFea_Lasso,s=cv.fit$lambda.1se)
Roc_Tval<-roc(Tval$COVID,lmpred_Tval)
auc(Roc_Tval)
TestFea_Lasso <- TestFea
names(TestFea_Lasso) <- NULL
TestFea_Lasso<-data.matrix(TestFea_Lasso)
lmpred_Test <-predict(fit,type="response",newx = TestFea_Lasso,s=cv.fit$lambda.1se)
Roc_Test<-roc(Test$COVID,lmpred_Test)
auc(Roc_Test)
ExterFea_Lasso <- ExterVFea
names(ExterFea_Lasso) <- NULL
ExterFea_Lasso<-data.matrix(ExterFea_Lasso)
lmpred_Ext <-predict(fit,type="response",newx = ExterFea_Lasso,s=cv.fit$lambda.min)
Roc_Ext<-roc(ExterV$COVID,lmpred_Ext)
auc(Roc_Ext)
|
b362300b51c09526e8d624a4873df1f644b2ebef | 668a228704a96a15d892239ae3c60c0c304001c9 | /_run_all_data_transform_model_selection.r | b017151d4841f3455a938da26ceb921c6917c7f3 | [] | no_license | bertozzivill/viral_load_idm | cd10a56417d9e13237d54adc481cdcdc0d1f5aa7 | ddf98e329a96c424b1402ad055fee48aef0d2b99 | refs/heads/master | 2020-04-06T13:13:27.917848 | 2016-11-02T16:56:53 | 2016-11-02T16:56:53 | 44,878,503 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,085 | r | _run_all_data_transform_model_selection.r | ####################################################################################################
## Author: Amelia Bertozzi-Villa
## Description: Survival model coupling nonlinear viral load progression with informative censoring
## based events.
## Input: this_main_dir: Directory in which the file "alldata.rdata" (either original or the training data)
## lives. If not running cross-validation, this will be one of:
## "C:/Users/abertozz/Dropbox (IDM)/viral_load/cascade/data/"
## "C:/Users/cselinger/Dropbox (IDM)/viral_load (1)/cascade/data"
##
## Output: Saves a dataset called "survival.model.output" to this_main_dir, which contains a matrix of
## "lm" elements from which we can later predict and compare to the testing dataset if
## we're validating. Also generates a number of plots for whatever model is deemed best
## by the AIC method of model selection.
##
## Run from within the "vectorize" folder!
####################################################################################################
library(data.table)
library(ggplot2)
library(lme4)
library(reshape2)
library(Amelia)
## is this a validation run? if yes, a variable called "validation" should exist, and it should be TRUE.
## otherwise, set "validation" to F
if (!(exists("validation"))) { validation <- F}
#set main directory
this_main_dir <- ifelse(validation, paste0(main_dir, iteration, "/", split, "/"), # if validation==T, variables called "iteration" and "split" should also exist
"C:/Users/abertozzivilla/Dropbox (IDM)/viral_load/cascade/data/")
#set age quintile cutoffs
age_quints <- c(15.4, 28.2, 41, 53.8, 66.6, Inf)
#age_quarts <- c(0, 30.4, 40.3, 50.2, Inf)
#load data
load(paste0(this_main_dir, "prepped_data.rdata"))
setnames(surv, "event_timeNew", "event_time_debiased")
############################################################################################################
## Run data transformations
##############################################################################################################
## generate index of imputations we want to run
imputation_count <- 10
index.data.transform<-expand.grid(upper_bound=c(2.9, 3.0, 3.1),
debias=c(F,T),
pre_1996_only=c(F,T),
observed_only=c(F),
age_type=c("cont", "bin_10"))
observed.only.index <- expand.grid(upper_bound=c(2.9),
debias=c(F,T),
pre_1996_only=c(F,T),
observed_only=c(T),
age_type=c("cont", "bin_10"))
index.data.transform <- rbind(index.data.transform, observed.only.index)
## run imputations based on inputs from index.data.transform
run_imputation <- T
if (run_imputation){ # we only ever want to impute on validation datasets now
print("Running imputation")
source("TransformData.R")
run=apply(index.data.transform,1,function(x) paste(x,collapse='-'))
data.for.survival<-mapply(TransformData,
upper_bound=index.data.transform$upper_bound,
debias=index.data.transform$debias,
pre_1996_only=index.data.transform$pre_1996_only,
observed_only=index.data.transform$observed_only,
age_type=index.data.transform$age_type,
MoreArgs=list(surv=surv)
)
rownames(data.for.survival)<-paste0("imputation_number=",c(1:imputation_count))
colnames(data.for.survival)<-apply(index.data.transform,1,function(x){
name <- paste(x,collapse="-")
name <- gsub("FALSE", "0", name)
name <- gsub(" TRUE", "1", name)
return(name)
})
save(data.for.survival, index.data.transform, file=paste0(this_main_dir, "imputed_survival_data.rdata"))
}else{
load(file=paste0(this_main_dir,"imputed_survival_data.rdata"))
}
############################################################################################################
## Run Models
##############################################################################################################
## generate index of models we want to run
index.survival.models<-expand.grid(
spvl_method=paste0('spvl_',c('model','fraser')),
interaction_type=c("none", "two_way", "three_way"),
include.age=T)
index.survival.models$spvl_method<-as.character(index.survival.models$spvl_method)
age.only <- expand.grid(
spvl_method="none",
interaction_type="none",
include.age=T)
spvl.only <- expand.grid(
spvl_method=paste0('spvl_',c('model','fraser')),
interaction_type="none",
include.age=F)
null.model <- list("none", "none", F, "none")
index.survival.models <- rbind(index.survival.models, age.only, spvl.only, null.model)
save(index.data.transform, index.survival.models, file=paste0(this_main_dir, "indexes.rdata"))
## run models based on inputs from index.survival.models
source("LinearSurvivalModel.R")
survival.model.output<-list()
print("running survival models")
for (k in 1:length(data.for.survival)){
orig_data=data.table(data.for.survival[[k]])
survival.model.output[[k]]<-mapply(LinearSurvivalModel,
spvl_method=index.survival.models$spvl_method,
interaction_type=index.survival.models$interaction_type,
include.age=index.survival.models$include.age,
MoreArgs=list(orig_data=orig_data))
colnames(survival.model.output[[k]]) <- apply(index.survival.models,1, function(x) paste(x, collapse="-"))
}
#generate names for this list
data_transform_names <- apply(index.data.transform,1, function(x) paste(x, collapse="-"))
data_transform_names <- data.table(expand.grid(1:10, data_transform_names))
data_transform_names <- data_transform_names[, list(Var2, imp=paste0("imp_count_",Var1))]
data_transform_names <- apply(data_transform_names, 1, function(x) paste(x, collapse="-"))
names(survival.model.output) <- data_transform_names
if (validation){
print("calculating rmse")
source("calculate_rmse.r")
}else{
#save regression outputs for cross-validation, as well as the index values telling you what each list element means
print("saving modeled data")
save(survival.model.output, index.survival.models, index.data.transform, file=paste0(this_main_dir, "survival_model_output.rdata"))
}
############################################################################################################
## Select best model (in-sample)
##############################################################################################################
# if (!validation){
# source("model.selection.in_sample.R")
# }
|
268fc3a5ee8926c174550dde920bdada972a0ce1 | 140965c9a052f9acb869104592de931ba493ca41 | /05 Data transformation.R | 0edec65ca9e379d8ac8a5c2c3a0fa375d0e0b313 | [] | no_license | christopherhadley/textook-R4DS | 61c15bcd4347affbdbd3f4f07d1d34ad676b442a | 0ea33cd68e1f2ea7140d80011ce97081dc574968 | refs/heads/master | 2021-01-21T21:15:42.442055 | 2017-06-19T18:57:06 | 2017-06-19T18:57:06 | 94,804,725 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,346 | r | 05 Data transformation.R | # 5 Data Transformation with dplyr
# Pick observations by their values (filter()).
# Reorder the rows (arrange()).
# Pick variables by their names (select()).
# Create new variables with functions of existing variables (mutate()).
# Collapse many values down to a single summary (summarise()).
jan1 <- filter(flights, month == 1, day == 1)
# Can use logical operations:
filter(flights, month == 11 | month == 12)
# short hand:
nov_dec <- filter(flights, month %in% c(11, 12))
& and
| or
! not
# De Morgan’s law: !(x & y) is the same as !x | !y, and !(x | y) is the same as !x & !y.
#Don't use && and || for this
is.na()
# Exercises 5.2.4 - put View() around these to view the whole lot
# 1.1
filter(flights, arr_delay >= 120)
# 1.2
filter(flights, dest == "IAH" | dest == "HOU")
or
filter(flights, dest %in% c("IAH","HOU"))
# 1.3
filter(flights, carrier %in% c("AA","DL","UA"))
# 1.4
filter(flights, carrier %in% c(7,8,9))
# 1.5
filter(flights, dep_delay == 0 & arr_delay >= 120)
# 1.6
filter(flights, dep_delay >= 60 & (dep_delay - arr_delay >= 30))
# 1.7
filter(flights, dep_time >= 0 & dep_time <= 0600 )
filter(flights, between(dep_time,0,600))
filter(flights, is.na(dep_time)
# 5.3 Arrange
arrange(flights, year, month, day) # select * from flights order by year, month, day asc
arrange(flights, desc(arr_delay)) # select * from flights order by arr_delay desc
arrange(flights, is.na())
#5.3.1 Exercises
# 1 - order NAs to the start:
arrange(flights, desc(is.na(dep_time)))
# 2
arrange(flights, desc(arr_delay))
# 3
arrange(flights, air_time/distance)
# 4
arrange(flights, distance)
# Select
select(flights, year, month, day) # select year, month, day from flights
# Various helper functions: starts_with, ends_with, contains, matches, num_range
rename(flights, tail_num = tailnum)
# Mutate - add new cols
flights_sml <- select(flights,
year:day,
ends_with("delay"),
distance,
air_time
)
mutate(flights_sml,
gain = arr_delay - dep_delay,
speed = distance / air_time * 60
)
# can use transmute() to only keep new columns
transmute(flights,
dep_time,
hour = dep_time %/% 100, # integer division by 100
minute = dep_time %% 100 # integer division and keep remainder
)
# Ex 5.5.2
# 1
transmute(flights,
dep_time,
hour = dep_time %/% 100, # integer division by 100
minute = dep_time %% 100, # integer division and keep remainder
minutes_since_midnight = hour * 60 + minute
)
select(flights,air_time,arr_time - dep_time)
transmute(flights,
dep_time,
arr_time,
air_time,
arr_time - dep_time,
dep_time_minutes = (dep_time %/% 100) * 60 + (dep_time %% 100),
arr_time_minutes = (arr_time %/% 100) * 60 + (arr_time %% 100),
minutes_in_air = arr_time_minutes - dep_time_minutes,
minutes_in_air %/% 60,
minutes_in_air %% 60
)
# 4 most delayed flights:
flights_ranked <- mutate(flights,
rank = min_rank(desc(dep_delay)))
View(arrange(flights_ranked,rank))
# SQL-like operations:
flights %>% distinct(carrier) # select distinct carrier from flights
# 5.6 Summarise and group by
by_day <- group_by(flights, year, month, day)
summarise(by_day, delay = mean(dep_delay, na.rm = TRUE))
# equivalent to: select year, month, day, average(delay) from flights group by 1,2,3
# Pipes - pass data frames from one operation to another:
delays <- flights %>%
group_by(dest) %>%
summarise(
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
) %>%
filter(count > 20, dest != "HNL")
# we need na.rm so that it ignores NAs; otherwise we get NA in the final result
# This is similar to using IFERROR in Excel
# Alternative without pipes is the following (messy):
by_dest <- group_by(flights, dest)
delay <- summarise(by_dest,
count = n(),
dist = mean(distance, na.rm = TRUE),
delay = mean(arr_delay, na.rm = TRUE)
)
delay <- filter(delay, count > 20, dest != "HNL")
# Another example of summarise:
not_cancelled %>%
group_by(year, month, day) %>%
summarise(
first = min(dep_time),
last = max(dep_time)
)
# counts
n() # count
sum(!is.na(x)) # counts number of non-NAs
# The following do the same:
not_cancelled %>% count(dest)
not_cancelled %>% count(dest, sort = TRUE) # This orders by the count rather than dest
not_cancelled %>% group_by(dest) %>% summarise(n())
not_cancelled %>% count(dest,origin)
not_cancelled %>% group_by(dest,origin) %>% summarise(n())
# Ex 5.6.7.
# 3 - missing data; better to use:
cancelled <- flights %>%
filter(is.na(dep_time) | is.na(arr_time))
# Cancelled flights by day:
cancelled %>%
group_by(year,month,day) %>%
summarise(
n()
)
# Summary of flights by day:
flights %>%
group_by(year,month,day) %>%
summarise(
number_flights = n(),
number_cancelled_flights = sum(is.na(dep_time) | is.na(arr_time)),
prop_flights_cancelled = number_cancelled_flights / number_flights,
avg_delay2 = mean(arr_delay[arr_delay > 0])
) %>%
ggplot(mapping = aes(x = prop_flights_cancelled, y = avg_delay2)) +
geom_point(alpha=1/10)
ggplot(mapping = aes(x = day, y = prop_flights_cancelled)) +
geom_point(alpha = 1/10)
# Trying to find the pattern of delays by day ... need to concatenate the date variables into a single field to plot on x axis
# Be careful with aggregate functions and grouping - just like in SQL. Window functions work naturally with grouped data.
|
eebc5c811d82fc7ce1947b12ef23580b8408aec6 | 35bace0a2490d9e15f021f752c854748c9ff378c | /man/od_fatal_any_opioid.Rd | 80884e8d2f50417d50262401eb6b567369c6792f | [] | no_license | doh-FXX0303/overdoser | 077c0c1eebdf8a3f3a32a0ef2439aee61e546243 | d0314c87545b5fd66026f3bd3d956afc7ce6de38 | refs/heads/master | 2023-03-15T20:48:32.451747 | 2018-10-29T22:47:03 | 2018-10-29T22:47:03 | 569,497,723 | 1 | 0 | null | 2022-11-23T00:46:34 | 2022-11-23T00:46:33 | null | UTF-8 | R | false | true | 500 | rd | od_fatal_any_opioid.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/od_fatal_any_opioid.R
\name{od_fatal_any_opioid}
\alias{od_fatal_any_opioid}
\title{Find any opioid.}
\usage{
od_fatal_any_opioid(data, underly_col, mult_col)
}
\arguments{
\item{data}{input data}
\item{underly_col}{underlying column index}
\item{mult_col}{multiple causes column indices}
}
\value{
cdc_any_drugs, cdc_any_opioid,
}
\description{
Using CDC definitions.
}
\examples{
to be added
}
|
e8f916f3ef43d1caf7acec4f0d11f74f3ae39f6b | 176422a095eaf12515a9055dcc1b643f1d9e4910 | /site_nutrients/data_setup.R | 2f65afd8171a7c4f2f2cff493add06f1ecda8a41 | [] | no_license | wesbick/Root_paper | 43710a08af0bd5faa882d6c259a455dd4f8a67a6 | 21a8917de875b1c54fc86f0f01cea3ad5b00ed84 | refs/heads/master | 2021-06-30T04:12:25.228319 | 2019-05-29T20:06:44 | 2019-05-29T20:06:44 | 109,291,448 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 371 | r | data_setup.R |
leco16 <- read.csv("site_nutrients/LECO/Bickford_Dec16.csv", header = F, sep = ",", stringsAsFactors = F)
colnames(leco16) <- c("Study", "SiteID", "mass", "PerN", "PerC", "Date")
leco_soils_15 <- leco16[leco16$Study == "BIckford_15_Soils", ]
leco_soils_16 <- leco16[leco16$Study == "BIckford_16_Soils", ]
leco_plant_16 <- leco16[leco16$Study == "BIckford_16_Plant", ]
|
584704faf1d2738dc805955c0199c5aa69a2dae4 | 6e2ae783067a77b41b1b4c395ddcd8bbac469b31 | /man/nm_description.Rd | 6f46500430c3f0ad30a5433f5ce743617466e424 | [] | no_license | Eliford/PKPDmisc | 5ff36f8f626a116262bdd550cae5d8401755bf97 | d99608fc1247b855992e18e9bec039394c2c2de9 | refs/heads/master | 2023-03-28T10:46:36.041890 | 2020-04-07T14:40:16 | 2020-04-07T14:40:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 413 | rd | nm_description.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nm_comments.R
\name{nm_description}
\alias{nm_description}
\title{show description output from .mod file}
\usage{
nm_description(filename)
}
\arguments{
\item{filename}{name of .mod file}
}
\value{
string
}
\description{
show description output from .mod file
}
\details{
return line containing description file
}
\seealso{
nm_notes
}
|
4394e8fd0f89260a16580541a87cf55ab54f97fb | 29585dff702209dd446c0ab52ceea046c58e384e | /CRAC/R/distance.luminosity.R | b439dba771d1d3cb934ec3efa3cf2e2091754910 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 578 | r | distance.luminosity.R | #' Compute the luminosity distance [Mpc/h]
#'
#' @param z redshift upper limit
#' @param cosmo cosmological parameter list
#' @param z0 redshift lower limit
#' @param ... pass to integrate() to control integration accuracy.
#' @return Luminosity distance from \eqn{z_0(=0)} to \eqn{z} [Mpc/h]
#' @references Equation (20) in [H99]
#' @seealso \code{\link{distance.angular}},\code{\link{distance.comoving}}
#'
#' @examples
#'distance.luminosity(0.1,parameter.fidcosmo)
distance.luminosity <-
function(z, cosmo, z0=0, ...) {
distance.angular(z, cosmo, z0=z0, ...) * (1+z)^2.
}
|
2928371d71adeced77c51f706d8f14cb7b9d3487 | e667d89c3a081edd7a1284b652d97688ef603a80 | /R Programming/Exercice 1/pollutantmean.R | 16fc599a2b2b3d25e7c82b30f4ba2b3178048806 | [] | no_license | fabianoal/My-Coursera-Data-Science-Projects | 2d15b8a1148df478e35bd7c06396bafa64595bc7 | b118e02ed26bc920e6f68bc7db6513ac812fe531 | refs/heads/master | 2021-01-01T05:51:12.904670 | 2016-01-24T15:00:59 | 2016-01-24T15:00:59 | 40,630,138 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 223 | r | pollutantmean.R | pollutantmean <- function(directory, pollutant, id = 1:332) {
dfs <- do.call(rbind, lapply(file.path(paste0(directory,.Platform$file.sep,(sprintf('%03d', id)), '.csv')), read.csv))
mean(dfs[,pollutant], na.rm = TRUE)
}
|
6eb381bc7fc11a17e3e1bb0c4c48b57f93bbc673 | 53fbeea7ca52f3b474e79efe34b1bee3f9a52572 | /tests/testthat/test-coerceS4ToList.R | d6df7e3f0db8cf7af42eaf0bb0bb9a727529ff1a | [
"MIT"
] | permissive | csu-xiao-an/transformer | 575aa7a59094e02006d44672c3fa5f79c085c757 | 04658a7d99973ac0d3d9b4484b0e599ecedcf936 | refs/heads/master | 2020-07-26T17:56:40.024193 | 2019-09-09T16:55:50 | 2019-09-09T16:55:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 361 | r | test-coerceS4ToList.R | context("coerceS4ToList")
test_that("coerceS4ToList", {
object <- coerceS4ToList(rse)
expect_is(object, "list")
expect_identical(
object = names(object),
expected = c(
"rowRanges",
"colData",
"assays",
"NAMES",
"elementMetadata",
"metadata"
)
)
})
|
59ae879b388e596d1b3a272547ea19e4c841a655 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /ggforce/inst/testfiles/enclose_points/libFuzzer_enclose_points/enclose_points_valgrind_files/1609955763-test.R | b611e11b1ad63da73ed695a071c04f13d7a55278 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 195 | r | 1609955763-test.R | testlist <- list(id = 16777215L, x = c(2.83989925879564e-29, NaN, 9.43975066195912e+281, 2.40281901634502e-306, NaN, 0), y = NaN)
result <- do.call(ggforce:::enclose_points,testlist)
str(result) |
fe0a28d8fe1871d35a4617ff570aee4a6fc54697 | 0aa3452bf58964faad6f59faf53340a7f9b79625 | /man/NetFeature.Rd | 310b35593ccf72c1a547bb706c1990a409beb227 | [] | no_license | tanlabcode/arvin | f27495c55aa802d859cb96b4ca3a28f0bec70ca0 | 00101cc93e7999f3a84908ce2d3580063a26eb75 | refs/heads/master | 2020-04-04T12:09:47.084270 | 2018-11-02T20:11:20 | 2018-11-02T20:11:20 | 155,916,312 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 534 | rd | NetFeature.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NetworkFeatureCalculation.R
\name{NetFeature}
\alias{NetFeature}
\title{Compute network-based features needed for risk variant prediction}
\usage{
NetFeature(Net, nodeFile, edgeFile, snpFile)
}
\arguments{
\item{Net}{a graph object representing the input network}
\item{edgeFile}{the path for the network file}
\item{Nodes}{the path for the node file}
}
\value{
a matrix containing the different type network features
}
\description{
This function ...
}
|
1886a9e0188e7ee30539043abe332c741e777366 | 6fa5c9434ad36808fecf6db902e515f71e87e5fc | /R code/National analysis/mergeEBTOthers.R | f74c6914cfb60e011502983ee308d68a721af2de | [] | no_license | ttvand/Master-thesis-Statistics-UEFA2016 | 1cca430e7c9687473a7f8364bb9ddf96e7fad330 | bf662d167fd6c8c6a6e2df6965ee3eee7568bfb3 | refs/heads/master | 2021-01-12T04:25:47.606388 | 2016-12-29T11:36:51 | 2016-12-29T11:36:51 | 77,608,020 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 574 | r | mergeEBTOthers.R | # Clear workspace
rm(list=ls())
# Set working directory
basePath <- "C:/Users/Tom/Documents/Sta dat Tom/Thesis/R/National analysis"
setwd(basePath)
# Load model and summary data
model1File <- "predictProbsNat4FIFAWeights.RData"
model2File <- "predictProbsNatEBT4FIFAWeightsS.RData"
# modelFile <- "predictProbsNat4FIFAWeightsPoisson.RData"
load(model1File)
summary1 <- summary
rm(summary)
load(model2File)
summary2 <- summary
rm(summary)
# Combine data frames
summary <- cbind(summary1,summary2[,-(1:13)])
# Write combined file to memory
save(summary, file = model1File) |
e82699576d938ead53dc5de89c3d1c8ab6ac5307 | b6e18b1f022ff87ab51ccfb55164b928f11c1254 | /plot6.R | bd93d946ff56897e70f3abd7ff78c8b06e2b206c | [] | no_license | jpatrickbennett/ExploratoryProject2 | bb7b07043ae63abcbd55b8dc8ed550c6ae20b043 | 84a35d97161f9887ac4a67070c89059965596397 | refs/heads/master | 2016-09-05T10:31:16.221875 | 2015-01-24T21:08:41 | 2015-01-24T21:08:41 | 29,791,731 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 774 | r | plot6.R | ## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
theseSCC <- SCC[grep("[Vv]ehicle", SCC$SCC.Level.Two),]
vehicleNEI <- NEI[NEI$SCC %in% theseSCC$SCC,]
balt_LA_Vehicle <- vehicleNEI[vehicleNEI$fips == "24510" | vehicleNEI$fips == "06037",]
yearlytotals <- aggregate(Emissions ~ year + fips, data = balt_LA_Vehicle, sum)
yearlytotals$Location <- as.factor(rep(c("LA","Baltimore"), each=4))
g <- qplot(year, Emissions, data = yearlytotals, geom = "line", facets = .~Location)
g <- g + labs(title = "Baltimore & LA PM2.5 Emissions from Vehicles ('99-'08)")
g <- g + labs(y = "Emissions (tons)")
png(filename="plot6.png", width = 600, height = 300, units = "px")
g
dev.off() |
3dfe306b5db15e5be803cb59fe0822224e996d67 | 2f31813b70d1fd1e343f34ab14645f26313cbc7d | /R/fitSignatures.R | 64100dd01a8260f5435ce38355f747e2d2c15661 | [
"MIT"
] | permissive | yuanjingnan/MesKit | 3a6ce6133a0e013902a4af6c61eb23604dd66d9a | 97113d1f87ff7b3e1ea090d95a6a5d09a5a08bb2 | refs/heads/master | 2023-01-30T13:40:17.214036 | 2020-11-23T13:08:50 | 2020-11-23T13:08:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,101 | r | fitSignatures.R | #' fitSignatures
#' @description Find nonnegative linear combination of mutation signatures to reconstruct matrix and
#' calculate cosine similarity based on somatic SNVs.
#'
#' @param tri_matrix A matrix or a list of matrix generated by \code{\link{triMatrix}} function.
#' @param patient.id Select the specific patients. Default NULL, all patients are included
#' @param signaturesRef Signature reference,Users can upload their own reference.
#' Default "cosmic_v2". Option: "exome_cosmic_v3","nature2013".
#' @param associated Associated Vector of associated signatures.
#' If given, will narrow the signatures reference to only the ones listed. Default NULL.
#' @param min.mut.count The threshold for the variants in a branch. Default 15.
#' @param signature.cutoff Discard any signature contributions with a weight less than this amount. Default 0.1.
#' @return A list of data frames, each one contains treeMSOutput,
#' containing information about each set/branch's mutational signature.
#'
#' @importFrom pracma lsqnonneg
#' @examples
#' maf.File <- system.file("extdata/", "HCC_LDC.maf", package = "MesKit")
#' clin.File <- system.file("extdata/", "HCC_LDC.clin.txt", package = "MesKit")
#' ccf.File <- system.file("extdata/", "HCC_LDC.ccf.tsv", package = "MesKit")
#' maf <- readMaf(mafFile=maf.File, clinicalFile = clin.File, ccfFile=ccf.File, refBuild="hg19")
#'
#' ## Load a reference genome.
#' library(BSgenome.Hsapiens.UCSC.hg19)
#'
#' phyloTree <- getPhyloTree(maf, patient.id = 'HCC8257')
#' tri_matrix <- triMatrix(phyloTree)
#' fitSignatures(tri_matrix)
#' @export fitSignatures
fitSignatures <- function(tri_matrix = NULL,
patient.id = NULL,
signaturesRef = "cosmic_v2",
associated = NULL,
min.mut.count = 15,
signature.cutoff = 0.1){
if(!is.null(patient.id)){
patient.setdiff <- setdiff(patient.id, names(tri_matrix))
if(length(patient.setdiff) > 0){
stop(paste0("Patient ", patient.setdiff, " can not be found in your data"))
}
tri_matrix <- tri_matrix[names(tri_matrix) %in% patient.id]
}
## get signatrue reference
df.etiology <- NULL
if(is(signaturesRef,'character')){
signaturesRef <- match.arg(signaturesRef,
choices = c("cosmic_v2","nature2013","exome_cosmic_v3"),
several.ok = FALSE)
signatures.etiology <- readRDS(file = system.file("extdata", "signatures.etiology.rds", package = "MesKit"))
# signatures.etiology <- readRDS(file = "D:/renlab/MesKit/inst/extdata/signatures.etiology.rds")
if (signaturesRef == "cosmic_v2"){
sigsRef <- readRDS(file = system.file("extdata", "signatures.cosmic.rds", package = "MesKit"))
rownames(sigsRef) <- gsub("Signature.", "Signature ", rownames(sigsRef))
df.etiology <- data.frame(aeti = signatures.etiology$cosmic_v2$etiology,
sig = rownames(signatures.etiology$cosmic_v2))
}else if(signaturesRef == "nature2013"){
sigsRef <- readRDS(file = system.file("extdata", "signatures.nature2013.rds", package = "MesKit"))
## rename signature
rownames(sigsRef) <- gsub("Signature.", "Signature ", rownames(sigsRef))
df.etiology <- data.frame(aeti = signatures.etiology$nature2013$etiology,
sig = rownames(signatures.etiology$nature2013))
}else if(signaturesRef == "exome_cosmic_v3"){
sigsRef <- readRDS(file = system.file("extdata", "signatures.exome.cosmic.v3.may2019.rds", package = "MesKit"))
df.etiology <- data.frame(aeti = signatures.etiology$cosmic_v3$etiology,
sig = rownames(signatures.etiology$cosmic_v3))
}
}else if(!is(signaturesRef, 'data.frame')){
stop('Input signature reference should be a data frame.')
}else{
sigsRef <- signaturesRef
}
if(!is.null(associated)){
signature.setdiff <- setdiff(associated, rownames(sigsRef))
if(length(signature.setdiff) > 0){
stop(paste0(signature.setdiff, " were not found in signature reference."))
}
sigsRef <- sigsRef[rownames(sigsRef) %in% associated, ]
}
tri_matrix_list <- tri_matrix
processFitSig <- function(i){
if(typeof(tri_matrix_list[[i]]) == "list"){
tri_matrix <- tri_matrix_list[[i]]$tri_matrix
tsb.label <- tri_matrix_list[[i]]$tsb.label
}else{
tri_matrix <- tri_matrix_list[[i]]
}
patient <- names(tri_matrix_list)[i]
## Remove branches whose mutation number is less than min.mut.count
branch_remove_idx <- which(rowSums(tri_matrix) <= min.mut.count)
branch_remove <- rownames(tri_matrix)[branch_remove_idx]
if(length(branch_remove) > 0){
message("Warning: mutation number of ",
paste(branch_remove, collapse = ", "),
" in ",patient, " is less than min.mut.count")
branch_left <- setdiff(rownames(tri_matrix),branch_remove)
if(length(branch_left) == 0){
return(NA)
}
tri_matrix <- tri_matrix[branch_left,]
## rebuild matrix if there is only one branch left
if(is(tri_matrix, "numeric")){
type_name <- names(tri_matrix)
tri_matrix <- matrix(tri_matrix, ncol = length(tri_matrix))
rownames(tri_matrix) <- branch_left
colnames(tri_matrix) <- type_name
}
}
## convert mutation number to proportion
origin_matrix <- t(apply(tri_matrix,1,function(x)x/sum(x)))
## calculate cosine similarity
branch_num <- nrow(origin_matrix)
refsig_num <- nrow(sigsRef)
cos_sim_matrix <- matrix(nrow = branch_num, ncol = refsig_num)
rownames(cos_sim_matrix) <- rownames(origin_matrix)
colnames(cos_sim_matrix) <- rownames(sigsRef)
cos_sim_matrix <- apply(origin_matrix, 1, function(x){
apply(sigsRef, 1, function(y){
y <- as.numeric(y)
s <- as.numeric(x %*% y / (sqrt(x %*% x) * sqrt(y %*% y)))
return(s)
})
}) %>% t()
## calculate signature contribution by solving nonnegative least-squares constraints problem(weight)
type_num <- ncol(origin_matrix)
sigsRef_t <- t(as.matrix(sigsRef))
## contribution matrix
con_matrix <- matrix(1, nrow = branch_num, ncol = refsig_num)
## reconstruted matrix
recon_matrix <- matrix(1, nrow = branch_num, ncol = type_num)
## solve nonnegative least-squares constraints.
con_matrix <- apply(origin_matrix, 1, function(m){
lsq <- pracma::lsqnonneg(sigsRef_t, m)
return(lsq$x)
}) %>% t()
recon_matrix <- apply(origin_matrix, 1, function(m){
lsq <- pracma::lsqnonneg(sigsRef_t, m)
l <- sigsRef_t %*% as.matrix(lsq$x)
return(l)
}) %>% t()
rownames(con_matrix) <- rownames(origin_matrix)
colnames(con_matrix) <- rownames(sigsRef)
rownames(recon_matrix) <- rownames(origin_matrix)
colnames(recon_matrix) <- colnames(origin_matrix)
## calculate RSS of reconstructed matrix and origin matrix
RSS <- vapply(seq_len(branch_num), function(i){
r <- recon_matrix[i,]
o <- origin_matrix[i,]
rss <- sum((r-o)^2)
return(rss)
},FUN.VALUE = numeric(1))
names(RSS) <- rownames(origin_matrix)
## summary mutation signatures of branches
if(!is.null(df.etiology)){
etiology_ref <- as.character(df.etiology$aeti)
names(etiology_ref) <- as.character(df.etiology$sig)
}
etiology_ref <- as.character(df.etiology$aeti)
names(etiology_ref) <- as.character(df.etiology$sig)
signatures_etiology <- data.frame()
signatures_etiology_list <- lapply(seq_len(branch_num), function(i){
branch_name <- rownames(tri_matrix)[i]
contribution <- con_matrix[i,]
sig_cut <- names(contribution[contribution > signature.cutoff])
if(length(sig_cut) == 0){
return(NA)
}
sig_con <- as.numeric(contribution[contribution > signature.cutoff])
# mut_sum <- sum(tri_matrix[i,])
sub <- data.frame(Level_ID = branch_name,
Signature = sig_cut,
# Mutation_number = mut_sum,
Contribution = sig_con)
if(!is.null(df.etiology)){
aet <- etiology_ref[sig_cut]
sub$Etiology <- as.character(aet)
}
return(sub)
})
signatures_etiology_list <- signatures_etiology_list[!is.na(signatures_etiology_list)]
signatures_etiology <- dplyr::bind_rows(signatures_etiology_list)
## order data frame by contribution of each branch
signatures_etiology <- dplyr::arrange(signatures_etiology,
dplyr::desc(.data$Level_ID),
dplyr::desc(.data$Contribution))
recon_df <- as.data.frame(recon_matrix)
recon_df$Branch <- as.character(row.names(recon_df))
rownames(recon_df) <- NULL
recon_spectrum <- tidyr::pivot_longer(recon_df,
-"Branch",
names_to = "Type",
values_to = "Reconstructed")
## convert origin matrix to data frame
origin_df <- as.data.frame(origin_matrix)
origin_df$Branch <- as.character(row.names(origin_df))
rownames(origin_df) <- NULL
origin_spectrum <- tidyr::pivot_longer(origin_df,
-"Branch",
names_to = "Type",
values_to = "Original")
mut_spectrum <- dplyr::left_join(recon_spectrum, origin_spectrum, by = c("Branch", "Type"))
total_cosine_similarity <- mut_spectrum %>%
dplyr::group_by(Branch) %>%
dplyr::summarise(
cosine_similarity = sum(Original*Reconstructed)/(sqrt(sum(Original^2))*sqrt(sum(Reconstructed^2)))
) %>%
dplyr::rename("Level_ID" = "Branch") %>%
as.data.frame()
## calculate cosine similarity between origin matrix and reconstructed matrix
if(typeof(tri_matrix_list[[i]]) == "list"){
f <- list(
reconstructed.mat = recon_matrix,
original.mat = origin_matrix,
cosine.similarity = cos_sim_matrix,
total.cosine.similarity = total_cosine_similarity,
RSS = RSS,
signatures.etiology = signatures_etiology,
tsb.label = tsb.label
)
return(f)
}else{
f <- list(
reconstructed.mat = recon_matrix,
original.mat = origin_matrix,
cosine.similarity = cos_sim_matrix,
total.cosine.similarity = total_cosine_similarity,
RSS = RSS,
signatures.etiology = signatures_etiology
)
return(f)
}
}
result <- lapply(seq_len(length(tri_matrix_list)) ,processFitSig)
names(result) <- names(tri_matrix_list)
result <- result[!is.na(result)]
if(length(result) == 0){
return(NA)
}else{
return(result)
}
}
|
c00c90d512e8e69b36f30ddbf34e6bb3ec5955da | 1803cf018cefcd9b7ae2c980b2e4a294f3cff396 | /man/pairwise.fstb.Rd | a45b39a27c42fc531783f0f8f90a42e11f9151fc | [] | no_license | tomatebio/PopGenReport | 4aa15f90385703d9acc57ced895355f682571a2a | bef87caa305ec8060193828470ebfb5a0afea0d3 | refs/heads/master | 2020-04-15T11:52:27.117366 | 2015-09-03T10:37:29 | 2015-09-03T10:37:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 518 | rd | pairwise.fstb.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/popdynfun.r
\name{pairwise.fstb}
\alias{pairwise.fstb}
\title{Calculates pairwise fsts using a genind object (very fast)}
\usage{
pairwise.fstb(gsp)
}
\arguments{
\item{gsp}{a genind object}
}
\value{
a pairwise fst matrix (same as hierfstat pairwise.fst)
}
\description{
for simulation the original pairwise.fst was much to slow. The fast version works only without NAs and diploid data (so do not use it on non-simulated data)
}
|
622aa268b6d280bc696e2882796f60f1b60d2793 | f8ce1034cef41685ab2387fa42084ac1ee5c96cf | /chapter14/proc.R | 7db8cace542c17e5ba9e0daa1b68306e8b00740f | [] | no_license | AnguillaJaponica/RProgramming | ab7e15d99153ebe585745289490e6e3df118a35c | ae1401920b5a52795cffc95bd83922f15761e531 | refs/heads/master | 2020-11-28T10:07:02.579925 | 2019-12-23T15:28:23 | 2019-12-23T15:28:23 | 229,777,900 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 656 | r | proc.R | library(pROC)
prob.label <- data.frame(prob = c(0.98, 0.95, 0.9, 0.87, 0.85, 0.8, 0.75,
0.71, 0.63, 0.55, 0.51, 0.47, 0.43, 0.38, 0.35, 0.31, 0.28, 0.24, 0.22,
0.19, 0.15, 0.12, 0.08, 0.04, 0.01), label = factor(c(1, 1, 0, 1, 0,
0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0), levels = c(1,
0)))
# ROC曲線のプロット
roc.curve <- roc(response = prob.label$label, predictor = prob.label$prob,
levels = c(0, 1))
plot(roc.curve, legacy.axes = TRUE)
AUCを算出するauc関数には、roc関数により生成されたオブジェクトを指定します。
# AUCの算出
> auc(roc.curve)
Area under the curve: 0.6558 |
7e5033a9864c943106adce4999e1a60f70b1d278 | 1352befe44c4df0aff15c8561deede8d45711e6c | /Functions/PowerFlow.R | 3ce8f4afd2a3a490f78f9706ad30ec993d35f171 | [] | no_license | JonnoB/Create_ETYS_network | 5cadbaa76804184bbfcf1380ab464608da463cce | 6efd198527ef1d1d39f85741ac714ee345b53e4d | refs/heads/master | 2021-01-21T19:58:02.014521 | 2018-02-22T08:44:02 | 2018-02-22T08:44:02 | 92,177,917 | 0 | 0 | null | 2018-02-22T08:44:03 | 2017-05-23T13:44:41 | HTML | UTF-8 | R | false | false | 574 | r | PowerFlow.R | PowerFlow <- function(g, SlackRef){
#Calculates the PowerFlow from a graph that contains the following attributes
#named edges, edgeweights, a balanced power generation and demand column, Powerflow (will be overwritten)
#g: an igraph object
#SlackRef: the node to remove from the calculations to prevent the matrix being singular
InjectionVector <- get.vertex.attribute(g, "BalencedPower")[get.vertex.attribute(g, "name")!=SlackRef]
Power <- ImpPTDF(g, SlackRef)$PTDF %*% InjectionVector
g <- set_edge_attr(g, "PowerFlow", value = Power)
return(g)
}
|
be64edd88df7f7ed4e06af00dfcb5dd750845152 | 4288957ae81853c5c96f41edb3b889d94c77cb01 | /plot4.R | cc15d86b5230d870b6913043dd2554be792464b8 | [] | no_license | sergiotosa/ExData_Plotting1 | 7374eb846f607235bb162e16e85e383474b488dd | 7a0e9b0be841d7dd2ffd8c09735fa7b275ee869e | refs/heads/master | 2020-12-25T05:09:15.795234 | 2014-08-07T14:47:27 | 2014-08-07T14:47:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,035 | r | plot4.R | plot4 <- function(){
## Read the file
subset<-read.csv.sql("household_power_consumption.txt", sep=";", sql='select * from file where Date="1/2/2007" OR Date="2/2/2007"')
date<-paste(subset$Date,subset$Time)
time<-strptime(date, format="%d/%m/%Y %H:%M:%S")
png(filename="plot4.png", height=480, width=480)
par(mfcol=c(2,2))
plot(time, subset$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
plot(time, subset$Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering")
lines(time, subset$Sub_metering_2, type="l", col="red", xlab="", ylab="Energy sub metering")
lines(time, subset$Sub_metering_3, type="l", col="blue", xlab="", ylab="Energy sub metering")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"))
plot(time, subset$Voltage, type="l", xlab="datetime", ylab="Voltage")
plot(time, subset$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
} |
b2076dad772a2a470bd9c2955f149f6b7d2704b6 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.machine.learning/man/lexmodelsv2_list_bot_recommendations.Rd | 46e9d536516048f38f90a0e79cf18caca3d094fb | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,368 | rd | lexmodelsv2_list_bot_recommendations.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lexmodelsv2_operations.R
\name{lexmodelsv2_list_bot_recommendations}
\alias{lexmodelsv2_list_bot_recommendations}
\title{Get a list of bot recommendations that meet the specified criteria}
\usage{
lexmodelsv2_list_bot_recommendations(
botId,
botVersion,
localeId,
maxResults = NULL,
nextToken = NULL
)
}
\arguments{
\item{botId}{[required] The unique identifier of the bot that contains the bot recommendation
list.}
\item{botVersion}{[required] The version of the bot that contains the bot recommendation list.}
\item{localeId}{[required] The identifier of the language and locale of the bot recommendation
list.}
\item{maxResults}{The maximum number of bot recommendations to return in each page of
results. If there are fewer results than the max page size, only the
actual number of results are returned.}
\item{nextToken}{If the response from the ListBotRecommendation operation contains more
results than specified in the maxResults parameter, a token is returned
in the response. Use that token in the nextToken parameter to return the
next page of results.}
}
\description{
Get a list of bot recommendations that meet the specified criteria.
See \url{https://www.paws-r-sdk.com/docs/lexmodelsv2_list_bot_recommendations/} for full documentation.
}
\keyword{internal}
|
cfe22817e749e11bd12d12f0412198f468ff2392 | e9a2500087f7d3428b9c42e6e1aa2ced6938f268 | /SizeBiasedMCMCWorking.R | 5ed2f3563b5ac22e4cb1a151c8885c96ae8a044f | [] | no_license | PM520-Spring-2020/Week6-SizeBiasedMCMC | 819a9c37c55f6ef4d7fa3ba3c291d1ea17857c7f | 2be28f3136ed3581ea69e7e5e10a17c8ddd05fc2 | refs/heads/master | 2021-02-05T19:55:24.906232 | 2020-03-02T18:01:11 | 2020-03-02T18:01:11 | 243,825,637 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,241 | r | SizeBiasedMCMCWorking.R | # Size-biased sampling via MCMC
Exponential<-function(dRate,x){
return (dRate*exp(-dRate*x))
}
TruncatedNormal<-function(dMean,x){
y<- -1
while (y<0){y<-dnorm(x,dMean,1)}
return (y) # assume an SD of 1, and truncation at 0 for now
}
SizedBiasedMCMC<-function(Density, Param1,NoOfIts,StepSize,LowRange,HighRange){
#start somewhere
x<-runif(1,LowRange,HighRange)
Accepteds<-vector(mode="double",length=NoOfIts)
# do the MCMC
for (i in 1:NoOfIts){
# propose new x
xprime<-x+runif(1,-StepSize,StepSize)
if (xprime> HighRange)
xprime<-HighRange-(xprime-HighRange) # this treats the edge of the range as a 'reflecting boundary'.
if (xprime< LowRange)
xprime<-LowRange-(xprime-LowRange) # this treats the edge of the range as a 'reflecting boundary'.
# Calculate Hastings Ratio - the Q term will disappear
Paccept<-min(1,(xprime*Density(Param1,xprime))/(x*Density(Param1,x)))
#Paccept<-min(1,(Density(Param1,xprime))/(Density(Param1,x)))
#cat("\n",paste(xprime," ",x," ",Density(Param1,xprime)," ",Density(Param1,x))," ",Paccept)
#cat(Paccept)
#cat("\n")
# move or not?
p<-runif(1)
if (p<Paccept)
{
x<-xprime
}
# update the vector of states
Accepteds[i]<-x
}
return (Accepteds)
}
# try the following
SB<-SizedBiasedMCMC(Exponential,1,200000,1,0,10)
HSB<-hist(SB,breaks=50)
plot(HSB,main="size-biased expo: run 1")
plot(HSB$mids,HSB$density,pch='.',cex=3,main="run 1 as density")
curve(x*exp(-x),add=TRUE,col="blue")
SB2<-SizedBiasedMCMC(Exponential,1,200000,1,0,10)
HSB2<-hist(SB2,breaks=50)
plot(HSB2,main="size-biased expo: run 2")
plot(HSB2$mids,HSB2$density,pch='.',cex=3,,main="run 2 as density",)
curve(x*exp(-x),add=TRUE,col="blue")
# Gelman code
library("coda")
plot(SB,type="l")
# convert to mcmc objects, with a burn-in - the Gelman routine needs them as column vectors (each variable in a column)
# so we need to transpose them
#MCMC1<-mcmc(t(SB),start=1000)
#MCMC2<-mcmc(t(SB2),start=1000)
MCMC1<-mcmc(SB,start=1000)
MCMC2<-mcmc(SB2,start=1000)
# combine different mcmc chain objects to an mcmc list.
Combined<-mcmc.list(list(MCMC1,MCMC2))
# gelman functions are
gelman.plot(Combined) # for plots
gelman.diag(Combined) # for diagnostic values
|
59fb3058b2e9d4970453b1e22449919a0e7d6766 | 8928cca80877279f0c6b3c6d288706cc379847d5 | /rpkg/grocaptss/pkg/R/classify.R | 9335ad38b20308e2350c202b2442355cbabc71c6 | [
"BSD-2-Clause"
] | permissive | andrelmartins/grocap.tsshmm | 5f4ef11df41abdab6e85577d6f9b1acafbc50525 | 88d4d1b02ab41716d708b19dfc4a0275bb96faa3 | refs/heads/master | 2021-01-21T05:16:38.156417 | 2017-02-25T21:11:58 | 2017-02-25T21:11:58 | 83,163,284 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 4,381 | r | classify.R |
#' Classify pairs (in order, [-, +]) as SS, SU, US, UU
#'
#' @export
stable.unstable.classify <- function(pair.plus, pair.minus, bwSet, cage.thresh = 8) {
pair.plus.values = values.preds(pair.plus, bwSet$GROcap.plus, bwSet$CAGENuc.bp.plus)
pair.minus.values = values.preds(pair.minus, bwSet$GROcap.minus, bwSet$CAGENuc.bp.minus)
# normaly high.values == median(log10(...))
#
# here, we'll use the top 80% ...
tmp.plus = log10(pair.plus.values[,1])
tmp.minus = log10(pair.minus.values[,1])
thresh.plus = quantile(tmp.plus, prob = 0.2)
thresh.minus = quantile(tmp.minus, prob = 0.2)
#high.values = log10(pair.plus.values[,1]) > median(log10(pair.plus.values[,1])) & log10(pair.minus.values[,1]) > median(log10(pair.minus.values[,1]))
high.values = tmp.plus > thresh.plus & tmp.minus > thresh.minus
# stability thresholds
#
# redef stable/unstable
w.cage.plus = pair.plus.values[high.values, 2] >= cage.thresh
w.cage.minus = pair.minus.values[high.values, 2] >= cage.thresh
# leave buffer
wo.cage.plus = pair.plus.values[high.values, 2] == 0
wo.cage.minus = pair.minus.values[high.values, 2] == 0
# define classes
#
# order of pairs is (-, +)
#
stable.stable = w.cage.plus & w.cage.minus
stable.unstable = wo.cage.plus & w.cage.minus
unstable.stable = w.cage.plus & wo.cage.minus
unstable.unstable = wo.cage.plus & wo.cage.minus
# get actual coordinates
ss.plus = pair.plus[high.values,][stable.stable,]
ss.minus = pair.minus[high.values,][stable.stable,]
su.plus = pair.plus[high.values,][stable.unstable,]
su.minus = pair.minus[high.values,][stable.unstable,]
us.plus = pair.plus[high.values,][unstable.stable,]
us.minus = pair.minus[high.values,][unstable.stable,]
uu.plus = pair.plus[high.values,][unstable.unstable,]
uu.minus = pair.minus[high.values,][unstable.unstable,]
return(list(ss.plus = ss.plus, ss.minus = ss.minus,
su.plus = su.plus, su.minus = su.minus,
us.plus = us.plus, us.minus = us.minus,
uu.plus = uu.plus, uu.minus = uu.minus,
thresh.plus = thresh.plus, thresh.minus = thresh.minus))
}
#' Write stable-unstable BED files
#'
#' @export
stable.unstable.write <- function(prefix, classified.lst) {
write.bed <- function(bed, filename) {
write.table(bed, file=filename, sep='\t', quote=F, col.names=F, row.names=F)
}
make.name <- function(type) {
paste(prefix, type, "bed", sep='.')
}
write.bed(classified.lst$ss.plus, make.name("SS_plus"))
write.bed(classified.lst$ss.minus, make.name("SS_minus"))
write.bed(classified.lst$su.plus, make.name("SU_plus"))
write.bed(classified.lst$su.minus, make.name("SU_minus"))
write.bed(classified.lst$us.plus, make.name("US_plus"))
write.bed(classified.lst$us.minus, make.name("US_minus"))
write.bed(classified.lst$uu.plus, make.name("UU_plus"))
write.bed(classified.lst$uu.minus, make.name("UU_minus"))
}
#' Filter singletons
#'
#' @export
single.filter <- function(singles, bwSet, thresh.plus, thresh.minus, cage.thresh = 8) {
single.plus = singles[singles[,6] == '+',]
single.minus = singles[singles[,6] == '-',]
# get scores
single.plus.values = values.preds(single.plus, bwSet$GROcap.plus, bwSet$CAGENuc.bp.plus)
single.minus.values = values.preds(single.minus, bwSet$GROcap.minus, bwSet$CAGENuc.bp.minus)
# filter by thresholds
tmp.plus = log10(single.plus.values[,1])
tmp.minus = log10(single.minus.values[,1])
#
singletons = rbind(
single.plus[tmp.plus > thresh.plus,],
single.minus[tmp.minus > thresh.minus,])
# classify
# redef stable/unstable
w.cage.plus = single.plus.values[, 2] >= cage.thresh
w.cage.minus = single.minus.values[, 2] >= cage.thresh
# leave buffer
wo.cage.plus = single.plus.values[, 2] == 0
wo.cage.minus = single.minus.values[, 2] == 0
stable.plus = tmp.plus > thresh.plus & w.cage.plus
unstable.plus = tmp.plus > thresh.plus & wo.cage.plus
stable.minus = tmp.minus > thresh.minus & w.cage.minus
unstable.minus = tmp.minus > thresh.minus & wo.cage.minus
stable.singletons = rbind(
single.plus[stable.plus,],
single.minus[stable.minus,])
unstable.singletons = rbind(
single.plus[unstable.plus,],
single.minus[unstable.minus,])
return(list(all = singletons, stable = stable.singletons, unstable = unstable.singletons))
}
|
d92d89a6290ebc8e706306c342185e93c620d0c0 | 9e8936a8cc7beae524251c8660fa755609de9ce5 | /man/details_multinom_reg_brulee.Rd | 8066cfffb68fa377fd8b6868ae36cdb56bd1f72d | [
"MIT"
] | permissive | tidymodels/parsnip | bfca10e2b58485e5b21db64517dadd4d3c924648 | 907d2164a093f10cbbc1921e4b73264ca4053f6b | refs/heads/main | 2023-09-05T18:33:59.301116 | 2023-08-17T23:45:42 | 2023-08-17T23:45:42 | 113,789,613 | 451 | 93 | NOASSERTION | 2023-08-17T23:43:21 | 2017-12-10T22:48:42 | R | UTF-8 | R | false | true | 3,039 | rd | details_multinom_reg_brulee.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multinom_reg_brulee.R
\name{details_multinom_reg_brulee}
\alias{details_multinom_reg_brulee}
\title{Multinomial regression via brulee}
\description{
\code{\link[brulee:brulee_multinomial_reg]{brulee::brulee_multinomial_reg()}} fits a model that uses linear predictors
to predict multiclass data using the multinomial distribution.
}
\details{
For this engine, there is a single mode: classification
\subsection{Tuning Parameters}{
This model has 2 tuning parameter:
\itemize{
\item \code{penalty}: Amount of Regularization (type: double, default: 0.001)
\item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 0.0)
}
The use of the L1 penalty (a.k.a. the lasso penalty) does \emph{not} force
parameters to be strictly zero (as it does in packages such as glmnet).
The zeroing out of parameters is a specific feature the optimization
method used in those packages.
Other engine arguments of interest:
\itemize{
\item \code{optimizer()}: The optimization method. See
\code{\link[brulee:brulee_linear_reg]{brulee::brulee_linear_reg()}}.
\item \code{epochs()}: An integer for the number of passes through the training
set.
\item \code{lean_rate()}: A number used to accelerate the gradient decsent
process.
\item \code{momentum()}: A number used to use historical gradient information
during optimization (\code{optimizer = "SGD"} only).
\item \code{batch_size()}: An integer for the number of training set points in
each batch.
\item \code{stop_iter()}: A non-negative integer for how many iterations with no
improvement before stopping. (default: 5L).
\item \code{class_weights()}: Numeric class weights. See
\code{\link[brulee:brulee_multinomial_reg]{brulee::brulee_multinomial_reg()}}.
}
}
\subsection{Translation from parsnip to the original package (classification)}{
\if{html}{\out{<div class="sourceCode r">}}\preformatted{multinom_reg(penalty = double(1)) \%>\%
set_engine("brulee") \%>\%
translate()
}\if{html}{\out{</div>}}
\if{html}{\out{<div class="sourceCode">}}\preformatted{## Multinomial Regression Model Specification (classification)
##
## Main Arguments:
## penalty = double(1)
##
## Computational engine: brulee
##
## Model fit template:
## brulee::brulee_multinomial_reg(x = missing_arg(), y = missing_arg(),
## penalty = double(1))
}\if{html}{\out{</div>}}
Factor/categorical predictors need to be converted to numeric values
(e.g., dummy or indicator variables) for this engine. When using the
formula method via \code{\link[=fit.model_spec]{fit()}}, parsnip will
convert factor columns to indicators.
Predictors should have the same scale. One way to achieve this is to
center and scale each so that each predictor has mean zero and a
variance of one.
}
\subsection{Case weights}{
The underlying model implementation does not allow for case weights.
}
\subsection{References}{
\itemize{
\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer.
}
}
}
\keyword{internal}
|
7bcb5c73e9291795a2c2e81e32064a60df1bce74 | 31928567719ec1812a049e743c1136d39118e73a | /R/internals.R | a5a21d5bf00a00af023df1578e0235c595fb064a | [] | no_license | cran/ICSNP | c94fff63b2616b615fbc1a4c0e0f51b8fb0909b2 | 5efc7e82dc8a2414fd8bea63ed173d81e8c2a105 | refs/heads/master | 2020-02-26T14:58:37.751662 | 2018-03-10T22:03:38 | 2018-03-10T22:03:38 | 17,679,935 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 273 | r | internals.R | norm<-function(X)
.C("norming", as.double(X), as.integer(dim(X)), res=double(dim(X)[1]),PACKAGE="ICSNP")$res
sumsignout<-function(X)
{
d<-dim(X)
matrix(.C("sum_of_sign_outers", as.double(X),as.integer(d), res=double(d[2]^2),PACKAGE="ICSNP")$res,ncol=d[2],byrow=T)
}
|
ffb29da6dfbfe3f0076b63d9db0d78a0eaf9e2a5 | 6ea95cada38629b3aeffe6d5a3e24c4f3bd948a4 | /man/normalize.Rd | 81e02313927957c336044cab4e8abacec9ffa78a | [] | no_license | nickharrigan/Nick | 1c9c2b409c72650a7ceaabeb4f91201d099908f3 | 406450e4ec64e8c8e17ff7ebcf9197947c93ec66 | refs/heads/master | 2020-03-26T21:22:56.104225 | 2018-08-20T07:59:57 | 2018-08-20T07:59:57 | 145,384,523 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 509 | rd | normalize.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalize.R
\name{normalize}
\alias{normalize}
\title{Normalize}
\usage{
normalize(x)
}
\arguments{
\item{x}{A variable}
}
\description{
This function allows you normalise a variable. The function takes one variable as an input, and then subtracts the mean and divides by the standard deviation of the variable.
}
\examples{
normalize()
}
\keyword{Centering}
\keyword{Normalisation,}
\keyword{Standardisation,}
|
1de9df9cd4120be6846343b0d39c3390bce80ff6 | 809f1752af7fea7029573b732a8b01a2e558442e | /weight.R | 73705393dbe164948bd06b51bbdf1a73ccbcf770 | [] | no_license | krojtous/weights | 3c02928cc4b7b9996863f301d19c344aac6ae746 | f86b9eab219897faaeb34f8d5898a4d15da0cbe9 | refs/heads/master | 2021-01-22T03:43:53.050501 | 2017-02-09T13:30:06 | 2017-02-09T13:30:06 | 81,452,432 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,032 | r | weight.R | #Name: weight.R
#Date: 31.1.2017
#Author: Matouš Pilnáček - Public Opinion Research Centre, Czech Academy of Science
#E-mail: matous.pilnacek@soc.cas.cz
#Description: Weighting of CVVM survey of crossed age, education and sex
library(foreign)
#functions for basic CVVM model
source("model.R")
#CVVM survey data
cvvm = read.spss(file = "./../../../Data/Nase_spolecnost/1701/Data/NS_1701_DataFile-FINAL.sav",
to.data.frame = TRUE,
use.value.labels = FALSE,
use.missings = FALSE,
reencode = 'utf-8')
#Czech Statistical Office Data from census 2011
CSU = read.csv("CSU.csv", sep = ";", dec = ",")
#-----------------Set 0 to system missing----------------------------------
cvvm[cvvm$IDE.2 == 0 | is.na(cvvm$IDE.2), "IDE.2"] = NA
cvvm[cvvm$IDE.8 == 0 | is.na(cvvm$IDE.8), "IDE.8"] = NA
cvvm[cvvm$t_VZD == 0 | is.na(cvvm$t_VZD), "t_VZD"] = NA
#----------------Missing analysis-------------------------------------------
sum(is.na(cvvm$IDE.2)) #number of missing in age
sum(is.na(cvvm$IDE.8)) #number of missing in sex
sum(is.na(cvvm$t_VZD)) #number of missing in edu
sum(is.na(cvvm$t_VZD) | is.na(cvvm$IDE.8) | is.na(cvvm$IDE.2)) #number of missing in all vars
#------------------Recode age to three categories------------------------
# RECODE IDE.2 (15 thru 39=1) (40 thru 59=2) (60 thru Highest=3) INTO t_VEK_3.
cvvm$t_VEK_3 = cvvm$IDE.2
cvvm[cvvm$IDE.2 > 14 & cvvm$IDE.2 < 40, "t_VEK_3"] = 1
cvvm[cvvm$IDE.2 > 39 & cvvm$IDE.2 < 60, "t_VEK_3"] = 2
cvvm[cvvm$IDE.2 > 59, "t_VEK_3"] = 3
#------------------Make relative count of weighting categorie in survey-------
tab = as.data.frame(table(cvvm$IDE.8,cvvm$t_VZD,cvvm$t_VEK_3))
tab$relSurvey = tab$Freq/sum(tab$Freq)
names(tab) = c("IDE.8","t_VZD","t_VEK_3","absSurvey", "relSurvey")
#------------------Compute weights----------------------------------------
tab = merge(tab, CSU)
tab$weight = tab$relCSU / tab$relSurvey
View(tab)
cvvm$IDE.8
cvvm$t_VZD
View(tableRel(cvvmModel(cvvm), labels = TRUE))
|
7b9be142492de669940eb81d21dacc7357ab0930 | 744a39f1517cc98d5b91be460031d04a3db375e6 | /03_variableSelections.R | 823203be235a5931772e408ebe0a8486dff97121 | [] | no_license | jennybc/Stat-545a-2013-HW | ae6071f91b30c2e40796787cccb2f04d9982f869 | 7b3f3a1ca0231a524872c7ed5adbbdcf64483609 | refs/heads/master | 2021-01-18T03:00:56.615003 | 2013-12-08T03:10:45 | 2013-12-08T03:10:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 675 | r | 03_variableSelections.R | # As we did before, we create an environment to hold the data.
main.variable.en <- new.env()
# We load the cleaned binned data.
evalq({
adult.data <- read.table("adult_clean_binned.txt", header=TRUE)
}, main.variable.en)
# We keep certain variables in our data and remove the redundant ones as follows:
evalq({
adult.data <- data.frame(adult.data[,c("age", "workclass", "education", "educationNum", "maritalStatus", "occupation", "relationship", "race", "sex", "nativeCountry")])
},main.variable.en)
# We save the resulting data into a different text file
evalq({
write.table(adult.data, "adult_clean_selected.txt", sep = "\t", row.names = FALSE)
},main.variable.en) |
fb6b902ace5dae1ad75fb0b2a6e7b47af8f9c08e | 5a0eb1aed082d69c290070fb2e03c5ecfdc445a9 | /R/Inherit_functions/int_exact_copy.R | 6fd8c533e32f847c278854495460ae71c94b1756 | [] | no_license | gvdr/2016_Forward_Simulation | db82ae66617e549bf2dd66ee984f69990edd2678 | 9ac6494b89544bb07cb289cbea8a7007e4c2b3c8 | refs/heads/master | 2020-12-03T12:55:44.636154 | 2016-08-28T00:11:00 | 2016-08-28T00:11:00 | 66,307,310 | 0 | 1 | null | 2016-08-26T16:00:34 | 2016-08-22T20:53:30 | HTML | UTF-8 | R | false | false | 588 | r | int_exact_copy.R | int_exact_copy <- function(current_interactions, mom_number, child_l_number, child_r_number, new_time, ...){
mom_traits <- current_interactions[which(current_interactions$from == mom_number), ]
child_l_traits <- mom_traits
child_l_traits$from <- child_l_number
child_r_traits <- mom_traits
child_r_traits$from <- child_r_number
children_interactions <- as.data.frame(mapply(c, child_l_traits, child_r_traits, child_l_traits[, c(2,1,3)], child_r_traits[, c(2,1,3)]))
res <- new_network(mom_traits, mom_number, new_time, children_interactions)
return(res)
}
|
57cfcf02e0d9f0904abbc070fb052949bca5ff28 | 4a45693543a4d842912a42539b474af3d1d49684 | /man/sp_arrange.Rd | 94f5655dc110306b52b0c7b21c1dc8b1ad3dae12 | [] | no_license | LukasK13/sharepointr | bb23920efe59cce4c58b9f5f1f058704e3e3af5e | 2f955e9ef0db90dc65f0195182ee69dd78f340d8 | refs/heads/master | 2022-12-13T09:22:15.153264 | 2022-12-09T20:58:07 | 2022-12-09T20:58:07 | 179,996,847 | 48 | 15 | null | 2022-12-09T20:58:08 | 2019-04-07T17:05:56 | R | UTF-8 | R | false | true | 664 | rd | sp_arrange.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sharepoint_list_get_pipeline.R
\name{sp_arrange}
\alias{sp_arrange}
\title{Arrange a SharePoint list}
\usage{
sp_arrange(table, ..., .arrange = NULL)
}
\arguments{
\item{table}{A SharePoint list connection
as returned by sp_list()}
\item{...}{Comma separated arrange commands}
}
\value{
Modfied SharePoint list connection
}
\description{
This method allows to arrange a SharePoint list
}
\examples{
sp_con = sp_connection("https://yourdomain.sharepoint.com", "YourUsername", "YourPassword", Office365 = T)
sp_list = sp_list(sp_con, "yourList") \%>\% sp_arrange(Title, desc(column2))
}
|
6c95e0573376bb2e6441ff4208a8d8c2b2b722ce | 04a7e4899d9aac6d1dbb0c37a4c45e5edb4f1612 | /R/replace-vecs-with-vecs.R | 279609d120928468aa27340cc8ac6bd1b2ebe478 | [
"MIT"
] | permissive | pbs-assess/csasdown | 796ac3b6d30396a10ba482dfd67ec157d7deadba | 85cc4dda03d6513c11350f7f607cce1cacb6bf6a | refs/heads/main | 2023-08-16T17:22:18.050497 | 2023-08-16T00:35:31 | 2023-08-16T00:35:31 | 136,674,837 | 47 | 18 | NOASSERTION | 2023-06-20T01:45:07 | 2018-06-08T23:31:16 | R | UTF-8 | R | false | false | 2,975 | r | replace-vecs-with-vecs.R | #' Inject vectors of strings into another vector replacing the elements
#' given the start and end indices in the main vector
#'
#' @details
#' Algorithm - Make a list of vector slices of the `main_vec` that are not
#' being replaced, than interleave the vectors in `repl_vecs` with those.
#' If the `start_inds` gives a value of 1 for the initial vector to replace,
#' that is a special case, and there is code to capture that and do it
#' correctly. The resulting list is flattened and returned.
#'
#' @param main_vec The vector to insert into
#' @param repl_vecs A list of vectors to be inserted in one or more places
#' @param start_inds A vector of indices at which to insert the vectors.
#' This argument will have `sort(unique())` run on it
#' @param end_inds A vector of indices to mark the end of insertion.
#' This argument will have `sort(unique())` run on it
#'
#' @keywords internal
#'
#' @return A vector of strings
replace_vecs_with_vecs <- function(main_vec = NULL,
repl_vecs = NULL,
start_inds = NULL,
end_inds = NULL){
if(is.null(main_vec)){
bail(csas_color("main_vec"), " cannot be ", csas_color("NULL"))
}
if(is.null(repl_vecs)){
bail(csas_color("repl_vecs"), " cannot be ", csas_color("NULL"))
}
if(is.null(start_inds)){
bail(csas_color("start_inds"), " cannot be ", csas_color("NULL"))
}
if(is.null(end_inds)){
bail(csas_color("end_inds"), " cannot be ", csas_color("NULL"))
}
if(any(is.na(main_vec))){
bail(csas_color("main_vec"), " cannot contain ", csas_color("NA"))
}
if(any(is.na(repl_vecs))){
bail(csas_color("repl_vecs"), " cannot contain ", csas_color("NA"))
}
if(any(is.na(start_inds))){
bail(csas_color("start_inds"), " cannot contain ", csas_color("NA"))
}
if(any(is.na(end_inds))){
bail(csas_color("end_inds"), " cannot contain ", csas_color("NA"))
}
start_inds <- sort(unique(start_inds))
end_inds <- sort(unique(end_inds))
# `keep_lst` contains the parts of the vector not spanned by the range of
# `start_inds` to `end_inds`
keep_lst <- map(seq_along(start_inds), ~{
if(.x == 1){
if(start_inds[1] != 1){
main_vec[1:(start_inds[1] - 1)]
}else{
NULL
}
}else{
if((end_inds[.x - 1] + 1) <= (start_inds[.x] - 1)){
main_vec[(end_inds[.x - 1] + 1):(start_inds[.x] - 1)]
}else{
NULL
}
}
})
if(end_inds[length(end_inds)] < length(main_vec)){
keep_lst <- c(keep_lst,
list(main_vec[(end_inds[length(end_inds)] + 1):length(main_vec)]))
}
# Interleave the `repl_lst` vectors
out_lst <- imap(keep_lst, ~{
if(.y == 1 && start_inds[1] == 1){
c(repl_vecs[[.y]], .x)
}else{
if(.y <= length(repl_vecs)){
c(.x, repl_vecs[[.y]])
}else{
.x
}
}
})
out_lst <- out_lst[lengths(out_lst) > 0]
unlist(out_lst)
}
|
9c0cd9fb665011923a8f635c3c3f8d22ca39c560 | 227fe346470dbdc79fe8014c0a4f5d5ba5e72fdf | /src/2-Individual View/Manhattan_plot.R | ad90203c68de04c9ae4209ac36e9999364d4a9e8 | [
"MIT",
"BSD-3-Clause"
] | permissive | InfOmics/MUVIG | 022098e745c7900e60ca7b3d4730ed6c8160d850 | 8288a8d1c29c3691849621516414bd10b7dad7dd | refs/heads/main | 2023-07-14T14:52:29.517003 | 2021-08-24T09:24:20 | 2021-08-24T09:24:20 | 308,043,408 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,291 | r | Manhattan_plot.R | # function to create the Manhattan Plot
# R packages required to run the script:
# - tidyverse
# - ggplot2
# - ggrepel
if(!require("tidyverse", character.only = TRUE))
{
install.packages("tidyverse")
if(!require("tidyverse", character.only = TRUE))
{
stop("tidyverse package not found")
}
}
if(!require("ggplot2", character.only = TRUE))
{
install.packages("ggplot2")
if(!require("ggplot2", character.only = TRUE))
{
stop("ggplot2 package not found")
}
}
if(!require("ggrepel", character.only = TRUE))
{
install.packages("ggrepel")
if(!require("ggrepel", character.only = TRUE))
{
stop("ggrepel package not found")
}
}
suppressPackageStartupMessages( c( library(tidyverse),
library(ggplot2),
library(ggrepel) ))
ManhattanGenerator <- function(gwasResults,path,pheno){
df <- gwasResults %>%
# Compute chromosome size
group_by(CHR) %>%
summarise(chr_len=max(BP)) %>%
# Calculate cumulative position of each chromosome
mutate(tot=cumsum(as.numeric(chr_len))-chr_len) %>%
select(-chr_len) %>%
# Add this info to the initial dataset
left_join(gwasResults, ., by=c("CHR"="CHR")) %>%
# Add a cumulative position of each SNP
arrange(CHR, BP) %>%
mutate( BPcum=BP+tot) %>%
# Add highlight and annotation information
mutate( is_annotate = ifelse(-log10(P)>=5, "yes", "no")) %>%
mutate( is_highlight = ifelse(is_annotate == "yes", "yes", "no")) %>%
mutate( is_super_highlight = ifelse(-log10(P)>=7.3, "yes", "no"))
axisdf <- df %>% group_by(CHR) %>% summarize(center=( max(BPcum) + min(BPcum) ) / 2 )
p = ggplot(df, aes(x=BPcum, y=-log10(P))) +
# Show all points
geom_point( aes(color=as.factor(CHR)), alpha=0.8, size=1.3) +
scale_color_manual(values = rep(c("#28286F", "#758FC5"), 22 )) +
# custom X axis:
scale_x_continuous( label = axisdf$CHR, breaks= axisdf$center ) +
scale_y_continuous(limits=c(0,10),breaks=c(0,2.5,5,7.3,10))+
# Add highlighted points
geom_point(data=subset(df, is_highlight=="yes"), color="orange", size=2) +
geom_point(data=subset(df, is_super_highlight=="yes"), color="red", size=3) +
# Add label using ggrepel to avoid overlapping
geom_label_repel( data=subset(df, is_annotate=="yes"), aes(label=SNP), size=2) +
# Custom the theme:
theme_bw() +
theme(
legend.position="none",
panel.border = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
axis.title.x = element_text(size=13),
axis.title.y = element_text(size=13, hjust = 0.5),
axis.text = element_text(size=13, hjust = 0.5),
plot.title = element_text(hjust = 0.5)
) + ggtitle(sprintf("%s - Manhanttan plot", pheno)) +
xlab("Chromosome") + ylab(expression(-log[1*0](p)))
p2 = p + geom_hline(yintercept=-log10(1e-5), linetype="dashed",
color = "blue4", size=0.3) +
geom_hline(yintercept=-log10(5e-8), linetype="dashed",
color = "red", size=0.3)
ggsave(
filename=path,
plot=p2,
device="png",
dpi=500,
width = 14,
height = 8,
units = "in"
)}
|
f9c4ea6a2fcdde41c60452fb4b98273cec4ac2cc | 369df18b3ee9b666e118a8adde0a0494f2257b20 | /method.R | 0b6d06015e0a45ddf73c7eb422f95badcfc387e1 | [] | no_license | jarichardsonccbf/edi_look | c421afe208261ada86e3ee8b25ae8f5db9927b82 | 77296f01c360c109df756c61008f285737060b23 | refs/heads/master | 2022-04-20T03:51:48.945932 | 2020-04-17T15:44:44 | 2020-04-17T15:44:44 | 255,673,963 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,733 | r | method.R | KA <- data.frame(KeyAccount = c("WALMART DISCOUNT STO",
"WALMART NEIGHBORHOOD",
"WALMART SUPER CENTER",
"PUBLIX (JACKSONVILLE",
"PUBLIX (LAKELAND)",
"PUBLIX (MIAMI)",
"SPEEDWAY",
"7-ELEVEN",
"RACETRAC FRANCHISE",
"RACETRAC PETROLEUM",
"PUBLIX PHARMACYS",
"WALGREEN",
"DOLLAR GENERAL #1016",
"CVS DRUGS",
"FAMILY DOLLAR STORES",
"DOLLAR TREE STORES",
"BIG LOTS",
"FIVE BELOW"),
`Ordering Method` = c("Walmart EDID",
"Walmart EDID",
"Walmart EDID",
"EDI",
"EDI",
"EDI",
"EDI",
"EDI",
"EDID",
"EDID",
"EDID",
"Future EDID",
"Future EDID",
"Future EDID",
"Future EDID",
"Future EDID",
"Future EDID",
"Future EDID"))
channel <- data.frame(Channel = c("Convenience Store/Pe"),
`Ordering Method` = c("EDID")) |
9b3ca80e2e3b67be6f9eb39491616f8d4f7add13 | abfcaf9a17cf0fa5221d16a880d212b767e08f3c | /VANESSA-DAM-SA/vanessadamsa/inst/vanessadamsa/ui.r | c04044f666f8dd31a1730736bc6619e0d5263b5b | [
"MIT"
] | permissive | orijitghosh/VANESSA-DAM | 9c91276b5bd6d4d44fd31ca68847c545380af8e4 | 79912224ca1d55c2039b6570a058aafa0ad8e95c | refs/heads/main | 2023-04-17T01:09:27.242862 | 2023-02-09T12:07:41 | 2023-02-09T12:07:41 | 326,950,938 | 7 | 0 | null | null | null | null | UTF-8 | R | false | false | 61,995 | r | ui.r | library(shiny)
library(shinythemes)
library(WaveletComp)
library(dplyr)
library(shinydashboard)
library(shinycustomloader)
library(shinyalert)
library(colourpicker)
library(shinyWidgets)
library(shinyhelper)
library(ggplot2)
source("helpers.R")
shinyUI <-
(
navbarPage(
"VANESSA-DAM-Sleep",
theme = shinytheme("sandstone"),
collapsible = TRUE,
fluid = TRUE,
position = c("static-top"),
id = "tabs",
tabPanel(
"Data input",
icon = icon("table"),
useShinyalert(),
sidebarPanel(
width = 2,
fileInput(
"data",
"Choose Monitor Files",
multiple = TRUE,
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv"
)
),
# tags$hr(),
fileInput(
"meta",
"Choose Metadata File",
multiple = FALSE,
accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv"
)
) %>%
helper(
type = "inline",
title = "Metadata file",
content = c(
"Your Metadata file should be a comma separated file and have these following <b>six</b> columns:",
"<i>file</i>, <i>start_datetime</i>, <i>stop_datetime</i>, <i>region_id</i>, <i>genotype</i>, <i>replicate</i>",
"The Metadata file can be generated from within the app from the <b>DATA FORMATTING</b> tab"
),
size = "m",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
numericInput(
"modtau",
"Modulo tau for wrapping",
24, 0, 24
) %>%
helper(
type = "inline",
title = "Modulo tao for wrapping",
content = c(
"Use modulo tao for the particular genotype, this will be used for averaging over subjective days in case your experiment was in DD.",
"<b>REMEMBER: the same modulo tao will be used for ALL genotypes and monitors. So change this wisely.</b>"
),
size = "m",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
numericInput(
"ldperiod",
"LD cycle period",
24, 0, 24
) %>%
helper(
type = "inline",
title = "LD cycle period",
content = c(
"This value will be used to determine light dark shading in the plots."
),
size = "m",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
numericInput(
"min",
"Summary time window in minutes",
15, 0, 240
) %>%
helper(
type = "inline",
title = "Summary window",
content = c("The app will detect the binning in your data, this bin value will be used to plot graphs"),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
numericInput(
"light",
"Duration of light in hours",
12, 0, 24
) %>%
helper(
type = "inline",
title = "Duration of light in hours",
content = c(
"This value will be used for light dark shading of the plots.",
"The light part will be determined by the starting of your <i>start_datetime</i> value in metadata file."
),
size = "m",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
numericInput(
"genotype",
"Number of monitors",
1, 1, 12
) %>%
helper(
type = "inline",
title = "Number of Monitors",
content = c("Enter the total number of Monitor files you want to analyze."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
numericInput(
"replicate",
"Number of replicates",
1, 1, 12
),
numericInput(
"remove",
"How many minimum days flies should be alive to be counted?",
2,
0,
30
),
numericInput(
"start",
"Starting day",
1, 1, 30
) %>%
helper(
type = "inline",
title = "Starting day",
content = c("Subset your data, Starting day 1 will be 1. <i>leave out transition days</i>"),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
numericInput(
"end",
"Ending day",
3, 1, 30
) %>%
helper(
type = "inline",
title = "Ending day",
content = c("Subset your data, if your data is 8 days long, the 8th day will be ending day 7. <i>leave out transition days</i>"),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
# submitButton("Update Values on all fields", icon("refresh"), width = "200px"),
withBusyIndicatorUI(
actionBttn(
inputId = "cal",
label = "Start calculations!",
style = "minimal",
color = "primary",
icon = icon("calculator")
)
) %>%
helper(
type = "inline",
title = "Calculations done by pressing this button",
content = c("By pressing this button you will curate your data (remove data when an individual is dead), remove individuals which were dead before the number of days you specified in the <i>How many minimum days flies should be alive to be counted?</i> box. All calculations will also be done in this period."),
size = "m",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
)
),
sidebarPanel(
colourInput("col1", "Select colour", "#005900", allowTransparent = TRUE, returnName = TRUE),
colourInput("col2", "Select colour", "#2796A3", allowTransparent = TRUE, returnName = TRUE),
colourInput("col3", "Select colour", "#DF7000", allowTransparent = TRUE, returnName = TRUE),
colourInput("col4", "Select colour", allowTransparent = TRUE, returnName = TRUE),
colourInput("col5", "Select colour", allowTransparent = TRUE, returnName = TRUE),
colourInput("col6", "Select colour", allowTransparent = TRUE, returnName = TRUE),
colourInput("col7", "Select colour", allowTransparent = TRUE, returnName = TRUE),
colourInput("col8", "Select colour", allowTransparent = TRUE, returnName = TRUE),
colourInput("col9", "Select colour", allowTransparent = TRUE, returnName = TRUE),
colourInput("col10", "Select colour", allowTransparent = TRUE, returnName = TRUE),
colourInput("col11", "Select colour", allowTransparent = TRUE, returnName = TRUE),
colourInput("col12", "Select colour", allowTransparent = TRUE, returnName = TRUE),
width = 2
),
# submitButton("Update Values on all fields", icon("refresh"), width = "200px"),
mainPanel(box(
width = 12,
div(
style = "overflow-x: scroll",
DT::dataTableOutput("contents") %>% withLoader(type = "html", loader = "pacman")
),
tags$hr(),
downloadBttn(
outputId = "report",
label = "Generate report",
style = "minimal",
color = "primary"
)
))
),
tabPanel(
"Sleep Profiles",
icon = icon("chart-area"),
mainPanel(
tags$hr(),
navlistPanel(
" Figures",
tabPanel(
"All ethograms"
%>%
helper(
type = "inline",
title = "",
content = c("All sleep ethograms for all individuals for the chosen days will be shown."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("alletho_height", "height", 2500, 500, 10000, 50),
numericInput("alletho_width", "width", 1500, 500, 10000, 50),
actionBttn(
inputId = "plotalletho",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput(
"alletho"
) %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Curated ethograms"
%>%
helper(
type = "inline",
title = "",
content = c("All curated sleep ethograms for all individuals for the chosen days will be shown."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("curatedetho_height", "height", 2500, 500, 10000, 50),
numericInput("curatedetho_width", "width", 1500, 500, 10000, 50),
actionBttn(
inputId = "plotcuratedetho",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput(
"curatedetho"
) %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Curated ethograms wrapped"
%>%
helper(
type = "inline",
title = "",
content = c("All curated sleep ethograms for all individuals for the chosen days averaged across days will be shown."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("curatedetho_wrap_height", "height", 2500, 500, 10000, 50),
numericInput("curatedetho_wrap_width", "width", 1000, 500, 10000, 50),
actionBttn(
inputId = "plotcuratedetho_wrap",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput(
"curatedetho_wrap"
) %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Plots over days individual"
%>%
helper(
type = "inline",
title = "",
content = c("Sleep will be plotted for each individual over chosen days."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("popplot_height", "height", 6000, 3000, 10000, 50),
numericInput("popplot_width", "width", 1500, 500, 10000, 50),
actionBttn(
inputId = "plotpopplot",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput(
"popplot",
) %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Average plot wrapped individual"
%>%
helper(
type = "inline",
title = "",
content = c("Sleep will be plotted for each individual, averaged over chosen days."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("popplotwrap_height", "height", 1400, 800, 3000, 50),
numericInput("popplotwrap_width", "width", 1500, 500, 10000, 50),
actionBttn(
inputId = "plotpopplotwrap",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput(
"popplotwrap",
) %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Average plots over days"
%>%
helper(
type = "inline",
title = "",
content = c("For each genotype sleep will be plotted over chosen days after averaging over all inidividuals."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("popplot1_height", "height", 300, 100, 1000, 50),
numericInput("popplot1_width", "width", 1200, 500, 10000, 50),
actionBttn(
inputId = "plotpopplot1",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput(
"popplot1",
) %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Average plot wrapped"
%>%
helper(
type = "inline",
title = "",
content = c("For each genotype sleep will be plotted averaged over chosen days after averaging over all inidividuals."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("popplotwrap1_height", "height", 300, 100, 1000, 50),
numericInput("popplotwrap1_width", "width", 1000, 500, 10000, 50),
actionBttn(
inputId = "plotpopplotwrap1",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput(
"popplotwrap1",
) %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Average plot wrapped polar"
%>%
helper(
type = "inline",
title = "",
content = c("For each genotype sleep will be plotted averaged over chosen days after averaging over all inidividuals in a circular scale."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("popplotwrap1polar_height", "height", 1000, 300, 1500, 50),
numericInput("popplotwrap1polar_width", "width", 1000, 300, 1500, 50),
actionBttn(
inputId = "plotpopplotwrap1polar",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput(
"popplotwrap1polar",
) %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Download data"
%>%
helper(
type = "inline",
title = "",
content = c("All sleep data of each individual will be downloaded as a <b>.csv</b> file. The details of the content and how to navigate through it is available in the tutorial file."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
downloadBttn(
outputId = "downloadData_chi_sq",
label = "Download bout analysis data",
style = "minimal",
color = "primary"
),
downloadBttn(
outputId = "downloadData_chi_sq_new",
label = "Download bout analysis data with phase details",
style = "minimal",
color = "primary"
),
downloadBttn(
outputId = "downloadData_ind_sleep_pro",
label = "Download individual sleep profiles",
style = "minimal",
color = "primary"
),
downloadBttn(
outputId = "downloadData_avg_sleep_pro",
label = "Download average sleep profiles",
style = "minimal",
color = "primary"
),
downloadBttn(
outputId = "downloadData_sleep_phase",
label = "Download sleep time in different phases",
style = "minimal",
color = "primary"
),
downloadBttn(
outputId = "downloadData_awake_phase",
label = "Download awake time in different phases",
style = "minimal",
color = "primary"
),
tags$hr(),
DT::dataTableOutput("periodpower_new") %>% withLoader(type = "html", loader = "pacman")
)
)
)
)
),
tabPanel("Sleep fractions",
icon = icon("chart-bar"),
navlistPanel(
"Data overview",
tabPanel(
"Sleep fraction"
%>%
helper(
type = "inline",
title = "",
content = c("Sleep fraction of different genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("popplotwrapbox_height", "height", 700, 300, 2000, 50),
numericInput("popplotwrapbox_width", "width", 1000, 500, 10000, 50),
awesomeCheckbox(
inputId = "popplotwrapbox_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plotpopplotwrapbox",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("popplotwrapbox") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Sleep fraction summary"
%>%
helper(
type = "inline",
title = "",
content = c("Sleep fractions in light and dark part of the day of different genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("popplotwrapboxmelt_height", "height", 700, 300, 2000, 50),
numericInput("popplotwrapboxmelt_width", "width", 1000, 500, 10000, 50),
awesomeCheckbox(
inputId = "popplotwrapboxmelt_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plotpopplotwrapboxmelt",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("popplotwrapboxmelt") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Time spent sleeping"
%>%
helper(
type = "inline",
title = "",
content = c("Total sleep in different days of different genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("total_sleep_height", "height", 700, 300, 2000, 50),
numericInput("total_sleep_width", "width", 1000, 500, 10000, 50),
awesomeCheckbox(
inputId = "total_sleep_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plottotal_sleep",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("total_sleep") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Time spent awake"
%>%
helper(
type = "inline",
title = "",
content = c("Total awake time in different days of different genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("total_awake_height", "height", 700, 300, 2000, 50),
numericInput("total_awake_width", "width", 1000, 500, 10000, 50),
awesomeCheckbox(
inputId = "total_awake_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plottotal_awake",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("total_awake") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Time spent sleeping in day and night"
%>%
helper(
type = "inline",
title = "",
content = c("Total sleep time in light and day parts of different days of different genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("total_sleep_phase_height", "height", 700, 300, 2000, 50),
numericInput("total_sleep_phase_width", "width", 1000, 500, 10000, 50),
awesomeCheckbox(
inputId = "total_sleep_phase_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plottotal_sleep_phase",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("total_sleep_phase") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Time spent awake in day and night"
%>%
helper(
type = "inline",
title = "",
content = c("Total awake time in light and day parts of different days of different genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("total_awake_phase_height", "height", 700, 300, 2000, 50),
numericInput("total_awake_phase_width", "width", 1000, 500, 10000, 50),
awesomeCheckbox(
inputId = "total_awake_phase_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plottotal_awake_phase",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("total_awake_phase") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Activity Index"
%>%
helper(
type = "inline",
title = "",
content = c("Activity index (total awake time activity counts/total awake minutes) of different days of different genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("act_index_height", "height", 700, 300, 2000, 50),
numericInput("act_index_width", "width", 1000, 500, 10000, 50),
awesomeCheckbox(
inputId = "act_index_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plotact_index",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("act_index") %>% withLoader(type = "html", loader = "pacman")
)
),
widths = c(3, 9)
# )
)
),
tabPanel("Bout analysis",
icon = icon("chart-bar"),
navlistPanel(
"Data overview",
tabPanel(
"Bouts"
%>%
helper(
type = "inline",
title = "",
content = c("All bouts of different genotypes will be plotted averaged over days and individuals."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("bout_height", "height", 300, 100, 1000, 50),
numericInput("bout_width", "width", 1000, 500, 10000, 50),
actionBttn(
inputId = "plotbout",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("bout") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Number of bouts"
%>%
helper(
type = "inline",
title = "",
content = c("Number of bouts of differemt genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("numbouts_height", "height", 500, 300, 1000, 50),
numericInput("numbouts_width", "width", 700, 500, 10000, 50),
awesomeCheckbox(
inputId = "numbouts_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plotnumbouts",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("numbouts") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Number of awake bouts"
%>%
helper(
type = "inline",
title = "",
content = c("Number of awake bouts of differemt genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("numbouts_awake_height", "height", 500, 300, 1000, 50),
numericInput("numbouts_awake_width", "width", 700, 500, 10000, 50),
awesomeCheckbox(
inputId = "numbouts_awake_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plotnumbouts_awake",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("numbouts_awake") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Mean bout length"
%>%
helper(
type = "inline",
title = "",
content = c("Mean lengths of bouts of differemt genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("meanboutlength_height", "height", 500, 300, 2000, 50),
numericInput("meanboutlength_width", "width", 700, 500, 10000, 50),
awesomeCheckbox(
inputId = "meanboutlength_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plotmeanboutlength",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("meanboutlength") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Mean awake bout length"
%>%
helper(
type = "inline",
title = "",
content = c("Mean lengths of awake bouts of differemt genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("meanboutlength_awake_height", "height", 500, 300, 2000, 50),
numericInput("meanboutlength_awake_width", "width", 700, 500, 10000, 50),
awesomeCheckbox(
inputId = "meanboutlength_awake_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plotmeanboutlength_awake",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("meanboutlength_awake") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Number of bouts in light and dark phase"
%>%
helper(
type = "inline",
title = "",
content = c("Number of bouts separately in the light and dark part of the day of differemt genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("numbouts_ld_height", "height", 500, 300, 2000, 50),
numericInput("numbouts_ld_width", "width", 700, 500, 10000, 50),
awesomeCheckbox(
inputId = "numbouts_ld_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plotnumbouts_ld",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("numbouts_ld") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Mean bout length in light and dark phase"
%>%
helper(
type = "inline",
title = "",
content = c("Mean lengths of bouts separately in the light and dark part of the day of differemt genotypes will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("meanboutlength_ld_height", "height", 500, 300, 2000, 50),
numericInput("meanboutlength_ld_width", "width", 700, 500, 10000, 50),
awesomeCheckbox(
inputId = "meanboutlength_ld_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plotmeanboutlength_ld",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("meanboutlength_ld") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Mean bout length distribution"
%>%
helper(
type = "inline",
title = "",
content = c("Distriubtion of mean lengths of bouts of differemt genotypes will be plotted as <i>density ridge plots</i>."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("meanboutlength_distrib_height", "height", 500, 300, 2000, 50),
numericInput("meanboutlength_distrib_width", "width", 700, 500, 10000, 50),
actionBttn(
inputId = "plotmeanboutlength_distrib",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("meanboutlength_distrib") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Latency to first bout"
%>%
helper(
type = "inline",
title = "",
content = c("Latency to first bout of differemt genotypes will be plotted as violin plots. Latency is calculated as minutes from ZT0. So, naturally the dark phase latency will be much larger, in case you want to calculate dark phase latency, substract total light hours in minutes from the reported dark phase latency value"),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("latency_height", "height", 500, 300, 1000, 50),
numericInput("latency_width", "width", 700, 500, 10000, 50),
awesomeCheckbox(
inputId = "latency_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plotlatency",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("latency") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Latency to first bout in light and dark"
%>%
helper(
type = "inline",
title = "",
content = c("Latency to first bout of differemt genotypes in light and dark phases will be plotted as violin plots."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("latency_ld_height", "height", 500, 300, 1000, 50),
numericInput("latency_ld_width", "width", 700, 500, 10000, 50),
awesomeCheckbox(
inputId = "latency_ld_text",
label = "Print mean values on plot",
value = TRUE
),
actionBttn(
inputId = "plotlatency_ld",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("latency_ld") %>% withLoader(type = "html", loader = "pacman")
)
),
tabPanel(
"Bout summary"
%>%
helper(
type = "inline",
title = "",
content = c("Number of bouts vs mean lengths of bouts will be plotted as a scattered plot."),
size = "s",
buttonLabel = "Okay!",
easyClose = TRUE,
fade = TRUE
),
mainPanel(
splitLayout(
numericInput("boutsummary_height", "height", 300, 100, 1000, 50),
numericInput("boutsummary_width", "width", 1000, 500, 10000, 50),
actionBttn(
inputId = "plotboutsummary",
label = "Plot",
style = "minimal",
color = "primary",
icon = icon("forward")
)
),
tags$hr(),
plotOutput("boutsummary") %>% withLoader(type = "html", loader = "pacman")
)
),
widths = c(3, 9)
)
),
tabPanel(
"Data formatting",
icon = icon("database"),
sidebarPanel(
width = 3,
textInput("monitorname1", label = "Monitor#1 name", value = "", ),
airDatepickerInput(
inputId = "startdatetime1",
value = Sys.Date(),
label = "Pick start date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
airDatepickerInput(
inputId = "enddatetime1",
value = Sys.Date(),
label = "Pick end date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
textInput("genotype1_1", label = "Genotype for channels 1-8", value = ""),
textInput("replicate1_1", label = "Replicate for channels 1-8", value = ""),
textInput("genotype1_2", label = "Genotype for channels 9-16", value = ""),
textInput("replicate1_2", label = "Replicate for channels 9-16", value = ""),
textInput("genotype1_3", label = "Genotype for channels 17-24", value = ""),
textInput("replicate1_3", label = "Replicate for channels 17-24", value = ""),
textInput("genotype1_4", label = "Genotype for channels 25-32", value = ""),
textInput("replicate1_4", label = "Replicate for channels 25-32", value = ""),
hr(),
textInput("monitorname2", label = "Monitor#2 name", value = "", ),
airDatepickerInput(
inputId = "startdatetime2",
value = Sys.Date(),
label = "Pick start date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
hr(),
airDatepickerInput(
inputId = "enddatetime2",
value = Sys.Date(),
label = "Pick end date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
textInput("genotype2_1", label = "Genotype for channels 1-8", value = ""),
textInput("replicate2_1", label = "Replicate for channels 1-8", value = ""),
textInput("genotype2_2", label = "Genotype for channels 9-16", value = ""),
textInput("replicate2_2", label = "Replicate for channels 9-16", value = ""),
textInput("genotype2_3", label = "Genotype for channels 17-24", value = ""),
textInput("replicate2_3", label = "Replicate for channels 17-24", value = ""),
textInput("genotype2_4", label = "Genotype for channels 25-32", value = ""),
textInput("replicate2_4", label = "Replicate for channels 25-32", value = ""),
hr(),
textInput("monitorname3", label = "Monitor#3 name", value = "", ),
airDatepickerInput(
inputId = "startdatetime3",
value = Sys.Date(),
label = "Pick start date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
hr(),
airDatepickerInput(
inputId = "enddatetime3",
value = Sys.Date(),
label = "Pick end date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
textInput("genotype3_1", label = "Genotype for channels 1-8", value = ""),
textInput("replicate3_1", label = "Replicate for channels 1-8", value = ""),
textInput("genotype3_2", label = "Genotype for channels 9-16", value = ""),
textInput("replicate3_2", label = "Replicate for channels 9-16", value = ""),
textInput("genotype3_3", label = "Genotype for channels 17-24", value = ""),
textInput("replicate3_3", label = "Replicate for channels 17-24", value = ""),
textInput("genotype3_4", label = "Genotype for channels 25-32", value = ""),
textInput("replicate3_4", label = "Replicate for channels 25-32", value = ""),
hr(),
textInput("monitorname4", label = "Monitor#4 name", value = "", ),
airDatepickerInput(
inputId = "startdatetime4",
value = Sys.Date(),
label = "Pick start date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
hr(),
airDatepickerInput(
inputId = "enddatetime4",
value = Sys.Date(),
label = "Pick end date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
textInput("genotype4_1", label = "Genotype for channels 1-8", value = ""),
textInput("replicate4_1", label = "Replicate for channels 1-8", value = ""),
textInput("genotype4_2", label = "Genotype for channels 9-16", value = ""),
textInput("replicate4_2", label = "Replicate for channels 9-16", value = ""),
textInput("genotype4_3", label = "Genotype for channels 17-24", value = ""),
textInput("replicate4_3", label = "Replicate for channels 17-24", value = ""),
textInput("genotype4_4", label = "Genotype for channels 25-32", value = ""),
textInput("replicate4_4", label = "Replicate for channels 25-32", value = ""),
hr(),
textInput("monitorname5", label = "Monitor#5 name", value = "", ),
airDatepickerInput(
inputId = "startdatetime5",
value = Sys.Date(),
label = "Pick start date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
airDatepickerInput(
inputId = "enddatetime5",
value = Sys.Date(),
label = "Pick end date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
textInput("genotype5_1", label = "Genotype for channels 1-8", value = ""),
textInput("replicate5_1", label = "Replicate for channels 1-8", value = ""),
textInput("genotype5_2", label = "Genotype for channels 9-16", value = ""),
textInput("replicate5_2", label = "Replicate for channels 9-16", value = ""),
textInput("genotype5_3", label = "Genotype for channels 17-24", value = ""),
textInput("replicate5_3", label = "Replicate for channels 17-24", value = ""),
textInput("genotype5_4", label = "Genotype for channels 25-32", value = ""),
textInput("replicate5_4", label = "Replicate for channels 25-32", value = ""),
hr(),
textInput("monitorname6", label = "Monitor#6 name", value = "", ),
airDatepickerInput(
inputId = "startdatetime6",
value = Sys.Date(),
label = "Pick start date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
hr(),
airDatepickerInput(
inputId = "enddatetime6",
value = Sys.Date(),
label = "Pick end date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
textInput("genotype6_1", label = "Genotype for channels 1-8", value = ""),
textInput("replicate6_1", label = "Replicate for channels 1-8", value = ""),
textInput("genotype6_2", label = "Genotype for channels 9-16", value = ""),
textInput("replicate6_2", label = "Replicate for channels 9-16", value = ""),
textInput("genotype6_3", label = "Genotype for channels 17-24", value = ""),
textInput("replicate6_3", label = "Replicate for channels 17-24", value = ""),
textInput("genotype6_4", label = "Genotype for channels 25-32", value = ""),
textInput("replicate6_4", label = "Replicate for channels 25-32", value = ""),
hr(),
textInput("monitorname7", label = "Monitor#7 name", value = "", ),
airDatepickerInput(
inputId = "startdatetime7",
value = Sys.Date(),
label = "Pick start date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
hr(),
airDatepickerInput(
inputId = "enddatetime7",
value = Sys.Date(),
label = "Pick end date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
textInput("genotype7_1", label = "Genotype for channels 1-8", value = ""),
textInput("replicate7_1", label = "Replicate for channels 1-8", value = ""),
textInput("genotype7_2", label = "Genotype for channels 9-16", value = ""),
textInput("replicate7_2", label = "Replicate for channels 9-16", value = ""),
textInput("genotype7_3", label = "Genotype for channels 17-24", value = ""),
textInput("replicate7_3", label = "Replicate for channels 17-24", value = ""),
textInput("genotype7_4", label = "Genotype for channels 25-32", value = ""),
textInput("replicate7_4", label = "Replicate for channels 25-32", value = ""),
hr(),
textInput("monitorname8", label = "Monitor#8 name", value = "", ),
airDatepickerInput(
inputId = "startdatetime8",
value = Sys.Date(),
label = "Pick start date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
hr(),
airDatepickerInput(
inputId = "enddatetime8",
value = Sys.Date(),
label = "Pick end date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
textInput("genotype8_1", label = "Genotype for channels 1-8", value = ""),
textInput("replicate8_1", label = "Replicate for channels 1-8", value = ""),
textInput("genotype8_2", label = "Genotype for channels 9-16", value = ""),
textInput("replicate8_2", label = "Replicate for channels 9-16", value = ""),
textInput("genotype8_3", label = "Genotype for channels 17-24", value = ""),
textInput("replicate8_3", label = "Replicate for channels 17-24", value = ""),
textInput("genotype8_4", label = "Genotype for channels 25-32", value = ""),
textInput("replicate8_4", label = "Replicate for channels 25-32", value = ""),
hr(),
textInput("monitorname9", label = "Monitor#9 name", value = "", ),
airDatepickerInput(
inputId = "startdatetime9",
value = Sys.Date(),
label = "Pick start date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
airDatepickerInput(
inputId = "enddatetime9",
value = Sys.Date(),
label = "Pick end date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
textInput("genotype9_1", label = "Genotype for channels 1-8", value = ""),
textInput("replicate9_1", label = "Replicate for channels 1-8", value = ""),
textInput("genotype9_2", label = "Genotype for channels 9-16", value = ""),
textInput("replicate9_2", label = "Replicate for channels 9-16", value = ""),
textInput("genotype9_3", label = "Genotype for channels 17-24", value = ""),
textInput("replicate9_3", label = "Replicate for channels 17-24", value = ""),
textInput("genotype9_4", label = "Genotype for channels 25-32", value = ""),
textInput("replicate9_4", label = "Replicate for channels 25-32", value = ""),
hr(),
textInput("monitorname10", label = "Monitor#10 name", value = "", ),
airDatepickerInput(
inputId = "startdatetime10",
value = Sys.Date(),
label = "Pick start date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
hr(),
airDatepickerInput(
inputId = "enddatetime10",
value = Sys.Date(),
label = "Pick end date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
textInput("genotype10_1", label = "Genotype for channels 1-8", value = ""),
textInput("replicate10_1", label = "Replicate for channels 1-8", value = ""),
textInput("genotype10_2", label = "Genotype for channels 9-16", value = ""),
textInput("replicate10_2", label = "Replicate for channels 9-16", value = ""),
textInput("genotype10_3", label = "Genotype for channels 17-24", value = ""),
textInput("replicate10_3", label = "Replicate for channels 17-24", value = ""),
textInput("genotype10_4", label = "Genotype for channels 25-32", value = ""),
textInput("replicate10_4", label = "Replicate for channels 25-32", value = ""),
hr(),
textInput("monitorname11", label = "Monitor#11 name", value = "", ),
airDatepickerInput(
inputId = "startdatetime11",
value = Sys.Date(),
label = "Pick start date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
hr(),
airDatepickerInput(
inputId = "enddatetime11",
value = Sys.Date(),
label = "Pick end date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
textInput("genotype11_1", label = "Genotype for channels 1-8", value = ""),
textInput("replicate11_1", label = "Replicate for channels 1-8", value = ""),
textInput("genotype11_2", label = "Genotype for channels 9-16", value = ""),
textInput("replicate11_2", label = "Replicate for channels 9-16", value = ""),
textInput("genotype11_3", label = "Genotype for channels 17-24", value = ""),
textInput("replicate11_3", label = "Replicate for channels 17-24", value = ""),
textInput("genotype11_4", label = "Genotype for channels 25-32", value = ""),
textInput("replicate11_4", label = "Replicate for channels 25-32", value = ""),
hr(),
textInput("monitorname12", label = "Monitor#12 name", value = "", ),
airDatepickerInput(
inputId = "startdatetime12",
value = Sys.Date(),
label = "Pick start date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
hr(),
airDatepickerInput(
inputId = "enddatetime12",
value = Sys.Date(),
label = "Pick end date and time:",
timepicker = TRUE,
timepickerOpts = timepickerOptions(timeFormat = "hh:ii:00"),
update_on = "close",
addon = "right"
),
textInput("genotype12_1", label = "Genotype for channels 1-8", value = ""),
textInput("replicate12_1", label = "Replicate for channels 1-8", value = ""),
textInput("genotype12_2", label = "Genotype for channels 9-16", value = ""),
textInput("replicate12_2", label = "Replicate for channels 9-16", value = ""),
textInput("genotype12_3", label = "Genotype for channels 17-24", value = ""),
textInput("replicate12_3", label = "Replicate for channels 17-24", value = ""),
textInput("genotype12_4", label = "Genotype for channels 25-32", value = ""),
textInput("replicate12_4", label = "Replicate for channels 25-32", value = ""),
hr(),
downloadBttn(
outputId = "downloadmetadata",
label = "Download metadata",
style = "minimal",
color = "primary"
),
),
mainPanel(box(
width = 12,
div(
style = "overflow-x: scroll",
actionBttn(
inputId = "updatemeta",
label = "Update metadata",
style = "minimal",
color = "primary",
icon = icon("forward")
),
tableOutput("userdata") %>% withLoader(type = "html", loader = "pacman")
)
))
),
tabPanel(
"Documentation",
icon = icon("book-open"),
source("Documentation.R", local = TRUE)[1]
)
)
)
|
c026af2012f984b319eae9a44c7970f4910bdaf0 | edf2d3864db8751074133b2c66a7e7995a960c6b | /R/EMLeastSquaresClassifier.R | ac1e1ce36631d6132b02b8dd06efcb40af311583 | [] | no_license | jkrijthe/RSSL | 78a565b587388941ba1c8ad8af3179bfb18091bb | 344e91fce7a1e209e57d4d7f2e35438015f1d08a | refs/heads/master | 2023-04-03T12:12:26.960320 | 2023-03-13T19:21:31 | 2023-03-13T19:21:31 | 7,248,018 | 65 | 24 | null | 2023-03-28T06:46:23 | 2012-12-19T21:55:39 | R | UTF-8 | R | false | false | 9,998 | r | EMLeastSquaresClassifier.R | #' @include LeastSquaresClassifier.R
setClass("EMLeastSquaresClassifier",
representation(responsibilities="ANY",opt_res="ANY",intermediate="ANY"),
prototype(name="Expectation Maximization Least Squares Classifier"),
contains="LeastSquaresClassifier")
#' An Expectation Maximization like approach to Semi-Supervised Least Squares Classification
#'
#' As studied in Krijthe & Loog (2016), minimizes the total loss of the labeled and unlabeled objects by finding the weight vector and labels that minimize the total loss. The algorithm proceeds similar to EM, by subsequently applying a weight update and a soft labeling of the unlabeled objects. This is repeated until convergence.
#'
#' By default (method="block") the weights of the classifier are updated, after which the unknown labels are updated. method="simple" uses LBFGS to do this update simultaneously. Objective="responsibility" corresponds to the responsibility based, instead of the label based, objective function in Krijthe & Loog (2016), which is equivalent to hard-label self-learning.
#'
#' @param scale Should the features be normalized? (default: FALSE)
#' @param eps Stopping criterion for the minimization
#' @param verbose logical; Controls the verbosity of the output
#' @param alpha numeric; the mixture of the new responsibilities and the old in each iteration of the algorithm (default: 1)
#' @param method character; one of "block", for block gradient descent or "simple" for LBFGS optimization (default="block")
#' @param objective character; "responsibility" for hard label self-learning or "label" for soft-label self-learning
#' @param init objective character; "random" for random initialization of labels, "supervised" to use supervised solution as initialization or a numeric vector with a coefficient vector to use to calculate the initialization
#' @param max_iter integer; maximum number of iterations
#' @param beta numeric; value between 0 and 1 that determines how much to move to the new solution from the old solution at each step of the block gradient descent
#' @param save_all logical; saves all classifiers trained during block gradient descent
#' @references Krijthe, J.H. & Loog, M., 2016. Optimistic Semi-supervised Least Squares Classification. In International Conference on Pattern Recognition (To Appear).
#' @inheritParams BaseClassifier
#' @family RSSL classifiers
#' @examples
#' library(dplyr)
#' library(ggplot2)
#'
#' set.seed(1)
#'
#'df <- generate2ClassGaussian(200,d=2,var=0.2) %>%
#' add_missinglabels_mar(Class~.,prob = 0.96)
#'
#'# Soft-label vs. hard-label self-learning
#'classifiers <- list(
#' "Supervised"=LeastSquaresClassifier(Class~.,df),
#' "EM-Soft"=EMLeastSquaresClassifier(Class~.,df,objective="label"),
#' "EM-Hard"=EMLeastSquaresClassifier(Class~.,df,objective="responsibility")
#')
#'
#'df %>%
#' ggplot(aes(x=X1,y=X2,color=Class)) +
#' geom_point() +
#' coord_equal() +
#' scale_y_continuous(limits=c(-2,2)) +
#' stat_classifier(aes(linetype=..classifier..),
#' classifiers=classifiers)
#'
#' @export
EMLeastSquaresClassifier <- function(X, y, X_u, x_center=FALSE, scale=FALSE, verbose=FALSE, intercept=TRUE,lambda=0, eps=10e-10, y_scale=FALSE, alpha=1,beta=1, init="supervised", method="block", objective="label", save_all=FALSE, max_iter=1000) {
## Preprocessing to correct datastructures and scaling
ModelVariables<-PreProcessing(X=X,y=y,X_u=X_u,scale=scale,intercept=intercept,x_center=FALSE)
X<-ModelVariables$X
X_u<-ModelVariables$X_u
Y<-ModelVariables$Y[,1,drop=FALSE]
scaling<-ModelVariables$scaling
classnames<-ModelVariables$classnames
modelform<-ModelVariables$modelform
if(length(classnames)!=2) { stop("EMLeastSquaresClassifier requires 2 classes.")}
n <- nrow(X)
m <- ncol(X)
k <- ncol(Y)
Xe<-rbind(X,X_u)
opt_res <- NULL
intermediate <- NULL
if (y_scale) {
y_scale <- colMeans(Y)
} else {
y_scale <- rep(0,ncol(Y))
}
Y <- sweep(Y,2,y_scale)
if (nrow(X)<ncol(X)) inv <- function(X) { ginv(X) }
else inv <- function(X) { ginv(X) }
w_sup <- inv(t(X) %*% X + n*lambda*diag(c(0,rep(1,(m-1))))) %*% (t(X) %*% t(t(Y)))
if (intercept) {
XeInv <- inv(t(Xe) %*% Xe + n*lambda*diag(c(0,rep(1,(m-1)))))
} else {
XeInv <- inv(t(Xe) %*% Xe + n*lambda*diag(rep(1,m)))
}
if (method=="block") {
if (intercept) {
theta <- inv(t(X) %*% X + n*lambda*diag(c(0,rep(1,(m-1))))) %*% (t(X) %*% t(t(Y)))
} else {
theta <- inv(t(X) %*% X + n*lambda*diag(rep(1,m))) %*% (t(X) %*% t(t(Y)))
}
if (init=="random") {
resp <- runif(nrow(X_u))
} else if (init=="supervised") {
resp <- X_u %*% theta
} else if (is.numeric(init)) {
theta <- init
resp <- X_u %*% theta
}
resp_old <- rep(Inf,nrow(X_u))
if (save_all) {
intermediate_resp <- list(list(resp))
intermediate_theta <- list(list(theta))
}
iterations <- 0
while (sum(abs(resp-resp_old))>eps && iterations<max_iter) {
Ye <- rbind(Y,matrix(resp,ncol=1))
theta_old <- theta
theta <- XeInv %*% (t(Xe) %*% t(t(Ye)))
theta <- beta*theta + (1-beta) * theta_old
resp_old <- resp
resp <- X_u %*% theta
if (objective=="responsibility" || objective=="hard") {
resp <- as.integer(resp>0.5)
} else if (objective=="label" || objective=="soft") {
resp <- pmin(1,pmax(0,resp))
} else if (objective=="contrastive") {
resp <- as.integer(X_u %*% theta > X_u %*% w_sup)
} else {
stop("Objective not known.")
}
resp <- alpha * resp + (1-alpha) * resp_old
if (save_all) {
intermediate_resp <- c(intermediate_resp,list(resp))
intermediate_theta <- c(intermediate_theta,list(theta))
}
iterations <- iterations + 1
if (verbose) print(sum(abs(resp-resp_old)))
}
if (save_all) { intermediate <- list(intermediate_resp,intermediate_theta) }
if (verbose) { cat("Number of iterations: ",iterations,"\n")}
opt_res <- list(counts=iterations)
} else if (method=="simple") {
if (init=="random") {
theta <- c(rnorm(ncol(Xe)),runif(nrow(X_u)))
} else if (init=="supervised") {
theta <- c(w_sup, X_u %*% w_sup)
}
if (objective=="label" || objective=="soft") {
opt_res <- optim(theta,
loss_minmin_lsy,
gr=gradient_minmin_lsy,
Xe=Xe,Y=Y,X_u=X_u,
method="L-BFGS-B",
control=list(maxit=1000),
lower=c(rep(-Inf,ncol(X)),
rep(0.0-y_scale,nrow(X_u))),
upper=c(rep(Inf,ncol(X)),
rep(1.0-y_scale,nrow(X_u))))
theta <- opt_res$par
resp <- theta[-c(1:ncol(Xe))]
theta <- matrix(theta[1:ncol(Xe)])
} else if (objective=="responsibility" || objective=="hard") {
opt_res <- optim(theta,
loss_minmin_lsq,
gr=gradient_minmin_lsq,
Xe=Xe,Y=Y,X_u=X_u,X=X,
method="L-BFGS-B",
control=list(maxit=1000),
lower=c(rep(-Inf,ncol(X)),
rep(0.0,nrow(X_u))),
upper=c(rep(Inf,ncol(X)),
rep(1.0,nrow(X_u))))
theta <- opt_res$par
resp <- theta[-c(1:ncol(Xe))]
theta <- matrix(theta[1:ncol(Xe)])
} else if (objective=="contrastive") {
opt_res <- optim(theta,
loss_minmin_contrastive_ls,
gr=gradient_minmin_contrastive_ls,
Xe=Xe,Y=Y,X_u=X_u,X=X,w_sup=w_sup,
method="L-BFGS-B",
control=list(maxit=1000),
lower=c(rep(-Inf,ncol(X)),
rep(0.0,nrow(X_u))),
upper=c(rep(Inf,ncol(X)),
rep(1.0,nrow(X_u))))
theta <- opt_res$par
resp <- theta[-c(1:ncol(Xe))]
theta <- matrix(theta[1:ncol(Xe)])
}
} else {
stop("Unknown method")
}
new("EMLeastSquaresClassifier",
classnames=classnames,
scaling=scaling,
theta=theta,
modelform=modelform,
intercept=intercept,
responsibilities=as.numeric(resp),
y_scale=y_scale,
opt_res=opt_res,
intermediate=intermediate)
}
loss_minmin_lsy <- function(theta,Xe,Y,X_u) {
w <- theta[1:ncol(Xe)]
Ye <- rbind(Y,matrix(theta[-c(1:ncol(Xe))],ncol=1))
sum((Xe %*% w - Ye)^2)
}
gradient_minmin_lsy <- function(theta,Xe,Y,X_u) {
w <- theta[1:ncol(Xe)]
u <- theta[-c(1:ncol(Xe))]
Ye <- rbind(Y,matrix(u,ncol=1))
c(2 * t(Xe) %*% Xe %*% w - 2 * t(Xe) %*% Ye,
-2 * (X_u %*% w - u))
}
# Only for 0,1 encoding
loss_minmin_lsq <- function(theta,Xe,Y,X_u,X) {
w <- theta[1:ncol(Xe)]
q <- theta[-c(1:ncol(Xe))]
Ye <- rbind(Y,matrix(theta[-c(1:ncol(Xe))],ncol=1))
sum((X %*% w - Y)^2) + sum(q * ((X_u %*% w)-1)^2) + sum((1-q) * ((X_u %*% w)-0)^2)
}
#Only for 0,1 encoding
gradient_minmin_lsq <- function(theta,Xe,Y,X_u,X) {
w <- theta[1:ncol(Xe)]
u <- theta[-c(1:ncol(Xe))]
Ye <- rbind(Y,matrix(u,ncol=1))
c(2 * t(Xe) %*% Xe %*% w - 2 * t(Xe) %*% Ye,
-2 * (X_u %*% w - 0.5))
}
loss_minmin_contrastive_ls <- function(theta,Xe,Y,X_u,X,w_sup) {
w <- theta[1:ncol(Xe)]
q <- theta[-c(1:ncol(Xe))]
Ye <- rbind(Y,matrix(theta[-c(1:ncol(Xe))],ncol=1))
sum((Xe %*% w - Ye)^2) -sum((Xe %*% w_sup - Ye)^2)
}
gradient_minmin_contrastive_ls <- function(theta,Xe,Y,X_u,X,w_sup) {
w <- theta[1:ncol(Xe)]
u <- theta[-c(1:ncol(Xe))]
Ye <- rbind(Y,matrix(u,ncol=1))
c(2 * t(Xe) %*% Xe %*% w - 2 * t(Xe) %*% Ye,
-2 * (X_u %*% w - X_u %*% w_sup))
} |
6b09a27313f6d47410d5d7390160d617519095dd | 2c5a3c1b0ca9b746ca3657e811466ab9017be59f | /Gotham_Cabs/code/garbage_code/Script1_Regression_tree_lib_tree.R | 68f45a4014b3d392b2511c3c5bd70a96195aeab9 | [] | no_license | ccirelli2/ML_Final_Project_2019 | 5982263cdd2f7ef4818ae9b976dd7525f7dcdc0d | 02b3df31f6a253ac0270ef27545eb46cfac51792 | refs/heads/master | 2020-05-15T13:28:49.872082 | 2019-05-02T20:02:34 | 2019-05-02T20:02:34 | 182,301,101 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,719 | r | Script1_Regression_tree_lib_tree.R | # READINGS_______________________________________________________________________________
'Train Regression Tree using rpart:
- https://www.statmethods.net/advstats/cart.html
Regression Trees using Tree
- http://www.di.fc.ul.pt/~jpn/r/tree/tree.html
Regression Trees
- https://www.datacamp.com/community/tutorials/decision-trees-R
R Documentation "tree"
- https://www.rdocumentation.org/packages/tree/versions/1.0-39/topics/tree
Tree Control
- https://rdrr.io/cran/tree/man/tree.control.html
'
'Regression Tree Tuning Parameters
- tree() : A tree is grown by binary recursive partitioning using the response in the specified formula
and choosing splits from the terms of the right-hand-side.
Tree growth is limited to a depth of 31 by the use of integers to label nodes. (source R documentation)
- formula: target ~ features
- weights: ?
- subset: ? an expression specifying the subset of cases to be used.
- control: control paramaters that you can pass to your model.
- method:
- prune.tree(): Determines a nested sequence of subtrees of the supplied tree by recursively “snipping” off the
least important splits. If k is supplied, the optimal subtree for that value is returned.
- best: max number of terminal nodes
- dataframe: data frame upon which the sequance of cost-complexity subtrees is evaluated.
- method: for regression trees only "deviance" is accepted. For classification an alternative
"misclassification" is allowed.
- tree.control()
- nobs: The number of observations in a training set
- mincut: The minimum number of observations to include in either child node. Defaul = 5.
- minsize: The smallest allowed node size. The default is 10.
- mindev:
- NOTE: This function produces default values of mincut and minsize, and ensures that mincut is
at most half minsize.
To produce a tree that fits the data perfectly, set mindev = 0 and minsize = 2,
if the limit on tree depth allows such a tree.
'
## CLEAR NAMESPACE________________________________________________________________________
rm(list = ls())
## IMPORT LIBRARIES_______________________________________________________________________
library(rpart)
library(tree)
library(ggplot2)
## CREATE DATASET_________________________________________________________________________
setwd('/home/ccirelli2/Desktop/Repositories/ML_Final_Project_2019/Gotham_Cabs/data')
s1.50k.nolimits = read.csv('sample1_50k.csv')[2:12] #[2:12] drop datetime col.
s2.100k.nolimits = read.csv('sample1_100k.csv')[2:12]
s3.250k.nolimits = read.csv('sample1_250k.csv')[2:12]
s4.50k.wlimits = read.csv('sample2_wlimits_50k.csv')[2:12]
s5.100k.wlimits = read.csv('sample2_wlimits_100k.csv')[2:12]
s6.250k.wlimits = read.csv('sample2_wlimits_250k.csv')[2:12]
# RANDOMIZE DATA__________________________________________________________________________
s1.50k.nolimits_ran = s1.50k.nolimits[sample(nrow(s1.50k.nolimits)),]
s2.100k.nolimits_ran = s2.100k.nolimits[sample(nrow(s2.100k.nolimits)),]
s3.250k.nolimits_ran = s3.250k.nolimits[sample(nrow(s3.250k.nolimits)),]
s4.50k.wlimits_ran = s4.50k.wlimits[sample(nrow(s4.50k.wlimits)), ]
s5.100k.wlimits_ran = s5.100k.wlimits[sample(nrow(s5.100k.wlimits)), ]
s6.250k.wlimits_ran = s6.250k.wlimits[sample(nrow(s6.250k.wlimits)), ]
# TRAIN / TEST SPLIT______________________________________________________________________
# Calculate Number of Training Observations
train_nrows_50k = (nrow(s1.50k.nolimits) * .7)
train_nrows_100k = (nrow(s2.100k.nolimits) * .7)
train_nrows_250k = (nrow(s3.250k.nolimits) * .7)
# Train
s1.train = s1.50k.nolimits_ran[1: train_nrows_50k, ]
s2.train = s2.100k.nolimits_ran[1: train_nrows_100k, ]
s3.train = s3.250k.nolimits_ran[1: train_nrows_250k, ]
s4.train = s4.50k.wlimits_ran[1: train_nrows_50k, ]
s5.train = s5.100k.wlimits_ran[1: train_nrows_100k, ]
s6.train = s6.250k.wlimits_ran[1: train_nrows_250k, ]
# Test
s1.test = s1.50k.nolimits_ran[train_nrows_50k: nrow(s1.50k.nolimits_ran), ] # Index from training to total
s2.test = s2.100k.nolimits_ran[train_nrows_100k: nrow(s2.100k.nolimits_ran), ]
s3.test = s3.250k.nolimits_ran[train_nrows_250k: nrow(s3.250k.nolimits_ran), ]
s4.test = s4.50k.wlimits_ran[train_nrows_50k: nrow(s4.50k.wlimits_ran), ]
s5.test = s5.100k.wlimits_ran[train_nrows_100k: nrow(s5.100k.wlimits_ran), ]
s6.test = s6.250k.wlimits_ran[train_nrows_250k: nrow(s6.250k.wlimits_ran), ]
# TRAIN REGRESSION TREE - TREE___________________________________________________
# Train Model 1 -----------------------------------------------------------------
m1.train = tree(duration ~ ., data = s6.train)
# Plot Tree
plot(m1.train)
text(m1.train, cex = .75) # Note how all fo the first splits are distance or speed.
# Get Summary Statistics
m1.summary = summary(m1.train)
m1.summary # Note 15 nodes
# Calculate RSE
m1.train.rse = sqrt(sum((m1.summary$residuals^2)) / (length(m1.summary$residuals) -2) )
print(paste('Model-1 train rse =>', m1.train.rse))
# Make a Prediction
m1.predict = predict(m1.train, s6.test)
m1.test.rse = sqrt(sum((s6.test$duration - m1.predict)^2) / (length(s6.test$duration) -2) )
print(paste('Model-1 test rse =>', m1.test.rse))
# Train Model 2 -----------------------------------------------------------------
m2.train = tree(duration ~ ., data = s6.train, mindev = 0.001) # Controls the number of nodes. Default = 0.01
# Plot Tree
plot(m2.train)
text(m2.train, cex = .75) # Note how all fo the first splits are distance or speed.
# Get Summary Statistics
m2.summary = summary(m2.train)
m2.summary # Note 15 nodes
# Calculate RSE
m2.train.rse = sqrt(sum((m2.summary$residuals^2)) / (length(m2.summary$residuals) -2) )
print(paste('Model-1 train rse =>', m2.train.rse))
# Make a Prediction
m2.predict = predict(m2.train, s6.test)
m2.test.rse = sqrt(sum((s6.test$duration - m2.predict)^2) / (length(s6.test$duration) -2) )
print(paste('Model-1 test rse =>', m2.test.rse))
# Create Lists to Capture Values & A DataFrame to House the Columns
index.mindev = c()
list.train.rse = c()
list.test.rse = c()
list.test.unknowndata.rse = c()
index.count = 1
# Iterate Over Range of Values for mindev
for (i in seq(0.01, 0.001, -0.005)){
# Train Model
index.mindev[index.count] = i
m0.train = tree(duration ~ ., data = s6.train, mindev = i) # Controls the number of nodes. Default = 0.01
# Plot Tree
#plot(m0.train)
#text(m0.train, cex = .75) # Note how all fo the first splits are distance or speed.
# Get Summary Statistics
m0.summary = summary(m0.train)
m0.summary # Note 15 nodes
# Calculate RSE
m0.train.rse = sqrt(sum((m0.summary$residuals^2)) / (length(m0.summary$residuals) -2) )
list.train.rse[index.count] = m0.train.rse
print(paste('Model-1 ', 'mindev => ', i, 'train rse =>', m0.train.rse))
# Make a Prediction
m0.predict = predict(m0.train, s6.test)
m0.test.rse = sqrt(sum((s6.test$duration - m0.predict)^2) / (length(s6.test$duration) -2) )
list.test.rse[index.count] = m0.test.rse
print(paste('Model-1 ', 'mindev => ', i , 'test rse =>', m0.test.rse))
# Make Prediction - Unseen Dataset
m.unknown.predict = predict(m0.train, s4.50k.wlimits_ran)
m.unknowndata.test.rse = sqrt(sum((s4.50k.wlimits_ran$duration - m.unknown.predict)^2) / (length(s6.test$duration) -2) )
list.test.unknowndata.rse[index.count] = m.unknowndata.test.rse
print(paste('Model-1 ', 'mindev => ', i , 'unknown data test rse =>', m.unknowndata.test.rse))
print('-------------------------------------------------------------')
index.count = index.count + 1
}
# Create DataFrame
df.0 = data.frame(row.names = index.mindev)
df.0$train.rse = list.train.rse
df.0$test.rse = list.test.rse
df.0$unknowndata.test.rse = list.test.unknowndata.rse
# Plot Results
p = ggplot() +
geom_line(data = df.0, aes(x = index.mindev, y = df.0$train.rse, color = 'Train RSE')) +
geom_line(data = df.0, aes(x = index.mindev, y = df.0$test.rse, color = 'Test RSE')) +
xlab('Range Values Min Dev') +
ylab('RSE')
print(p)
|
97bd6470f4ea924b2250f034c8ff9c16d915c71f | 0a5386794de4a9af32d80465ee3c790e3361ac5a | /covid-19/cov19_make_bed.r | 37412df20c23c7cceab7788b7cb988d0be826a63 | [] | no_license | JonMarten/RNAseq | 66e62260c7494ca0fd109fd1f3b94855b3a7d5d8 | 582e12c8b5ce92341e8f652dfb84754261e497ba | refs/heads/master | 2021-07-09T18:20:32.719827 | 2020-08-07T09:15:39 | 2020-08-07T09:15:39 | 175,254,662 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,172 | r | cov19_make_bed.r | # make bed format phenotypes for tensorQTL for COVID-19
#Chr start end ID UNR1 UNR2 UNR3 UNR4
#chr1 173863 173864 ENSG123 -0.50 0.82 -0.71 0.83
#chr1 685395 685396 ENSG456 -1.13 1.18 -0.03 0.11
#chr1 700304 700305 ENSG789 -1.18 1.32 -0.36 1.26
library(data.table)
library(dplyr)
#setwd("/rds/project/jmmh2/rds-jmmh2-projects/interval_rna_seq/covid19/phenotypes")
setwd("/rds/project/jmmh2/rds-jmmh2-projects/interval_rna_seq/covid19/phenotypes")
# Create mapping file to match phenotype to genotype
omictable <- fread("/rds/project/jmmh2/rds-jmmh2-projects/interval_rna_seq/analysis/04_phase2_full_analysis/covariates/processed/INTERVAL_omics_table_02APR2020.csv", data.table = F)
idmap <- omictable %>%
select(genotype_individual_id = affymetrix_ID, phenotype_individual_id = RNA_ID) %>%
filter(!is.na(phenotype_individual_id))
#phe <- fread("/rds/project/jmmh2/rds-jmmh2-projects/interval_rna_seq/analysis/01_cis_eqtl_mapping/phenotype/INTERVAL_RNAseq_phase1_filteredSamplesGenes_TMMNormalised_FPKM_#ounts_foranalysis.txt", data.table = F)
phe <- fread("UNfilteredSamplesGenes_TMMNormalised_FPKM_Counts_Phase1-2_initialcalling.csv", data.table = F)
anno <- fread("/rds/project/jmmh2/rds-jmmh2-projects/interval_rna_seq/analysis/04_phase2_full_analysis/annotation_file/Feature_Annotation_Ensembl_gene_ids_autosomesPlusChrX_b38.txt", data.table = F)
bed <- left_join(phe, anno[,1:4]) %>%
select(Chr = chromosome, start, end, ID = feature_id, INT_RNA7427205:INT_RNA7960548) %>%
arrange(Chr, start) %>%
filter(!is.na(Chr)) %>%
rename("#Chr" = Chr)
# Rename IDs to match genotype file
namevec <- base::match(names(bed)[5:ncol(bed)], idmap$phenotype_individual_id)
names(bed)[5:ncol(bed)] <- as.character(idmap$genotype_individual_id[namevec])
missvec <- which(is.na(names(bed)))
bed <- bed[,-missvec]
# Remove IDs not in covariate file/genotype file
covariates <- fread("/rds/project/jmmh2/rds-jmmh2-projects/interval_rna_seq/analysis/04_phase2_full_analysis/covariates/INTERVAL_RNAseq_phase1-2_age_sex_rin_batch_PC10.txt", data.table = F)
covarids <- covariates[1,-1] %>% as.character()
bedids <- names(bed[5:ncol(bed)])
genoids <- fread("../genotypes/INTERVAL_chrX_merged_cleaned_RNAseq_phase1-2.fam", data.table = F)
genoids <- genoids$V1 %>% as.character
keepids <- intersect(covarids, bedids)
keepids <- intersect(keepids, genoids)
bed2 <- bed %>%
select("#Chr", start, end, ID, keepids)
covkeep <- c(1,which(covariates[1,] %in% keepids))
cov2 <- covariates[,covkeep]
cov19 <- fread("/rds/user/jm2294/rds-jmmh2-projects/interval_rna_seq/scripts/RNAseq/covid-19/covid_genes_b37.csv", data.table = F)
bedcov19 <- bed2 %>% filter(ID %in% cov19$ensembl_id[-5])
fwrite(bedcov19, sep = "\t", file = "/rds/project/jmmh2/rds-jmmh2-projects/interval_rna_seq/covid19/phenotypes/INTERVAL_RNAseq_phase1-2_UNfilteredSamplesGenes_TMMNormalised_FPKM_Counts_foranalysis_COVID19.bed")
write.table(quote = F, row.names = F, col.names = F, cov2, sep = "\t", file = "/rds/project/jmmh2/rds-jmmh2-projects/interval_rna_seq/covid19/covariates/INTERVAL_RNAseq_COVID19_covariates.txt")
###########################################
## NOTE: The bed file must be compressed and indexed with the commands below:
# module load ceuadmin/tabix/0.2.6
# bgzip INTERVAL_RNAseq_phase1-2_UNfilteredSamplesGenes_TMMNormalised_FPKM_Counts_foranalysis_COVID19.bed && tabix -p bed INTERVAL_RNAseq_phase1-2_UNfilteredSamplesGenes_TMMNormalised_FPKM_Counts_foranalysis_COVID19.bed.gz
############################################
# Remove all individuals with no gene counts for ACE2
ace2 <- bedcov19 %>%
filter(ID == "ENSG00000130234")
ace2.t <- t(ace2[,-(1:4)])
zerovec <- which(ace2.t == min(ace2.t)) + 4
ace2.no0 <- ace2[,-zerovec]
ace2ids <- names(ace2.no0)[-(1:4)]
covkeep.ace2 <- c(1,which(covariates[1,] %in% ace2ids))
cov.ace2 <- covariates[,covkeep.ace2]
fwrite(ace2.no0 , sep = "\t", file = "/rds/project/jmmh2/rds-jmmh2-projects/interval_rna_seq/covid19/phenotypes/INTERVAL_RNAseq_phase1-2_UNfilteredSamplesGenes_TMMNormalised_FPKM_Counts_foranalysis_ACE2_no_zeros.bed")
write.table(cov.ace2, quote = F, row.names = F, col.names = F, sep = "\t", file = "/rds/project/jmmh2/rds-jmmh2-projects/interval_rna_seq/covid19/covariates/INTERVAL_RNAseq_COVID19_covariates_ACE2_no_zeros.txt")
# module load ceuadmin/tabix/0.2.6
# bgzip INTERVAL_RNAseq_phase1-2_UNfilteredSamplesGenes_TMMNormalised_FPKM_Counts_foranalysis_ACE2_no_zeros.bed && tabix -p bed INTERVAL_RNAseq_phase1-2_UNfilteredSamplesGenes_TMMNormalised_FPKM_Counts_foranalysis_ACE2_no_zeros.bed.gz
# Output list of individuals with non-zero ACE2 to filter plink genotypes
plinkout <- data.frame("FID" = ace2ids, "IID" = ace2ids)
write.table(plinkout, sep = "\t", quote = F, col.names = F, row.names = F, file = "/rds/project/jmmh2/rds-jmmh2-projects/interval_rna_seq/covid19/genotypes/ace2_nonzero_ids.txt")
# GxE
gxe <- fread("../covariates/INTERVAL_RNAseq_phase1_GxE_neutPCT.txt", data.table = F)
gxe2 <- gxe[,covkeep]
write.table(quote = F, row.names = F, col.names = F, gxe2, sep = "\t", file = "/rds/project/jmmh2/rds-jmmh2-projects/interval_rna_seq/covid19/INTERVAL_RNAseq_COVID19_neutPCT_GxE.txt")
|
a4cbb7ecfe9db4253c47f2426483268fd5ecab6a | acfc4a18c41c8bcd76ff898ec3899b9e59737474 | /R/Max.Profit.PC.R | e8be7959ffe634c54999d62561d460e84ed67669 | [] | no_license | tomvar/bundling | 8a2d135b69973df75320d2a78ba2a7457147af71 | e8fc6e6a1f7b006a3d9ff59a33bb795bbf677a15 | refs/heads/master | 2021-01-10T21:54:43.301831 | 2018-03-14T16:22:51 | 2018-03-14T16:22:51 | 39,305,990 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,982 | r | Max.Profit.PC.R | #' Find maximum profit for Pure componenets strategy
#'
#' @param r1.r2 NX2 reservation prices of two goods []
#' @param p.1.min.max Vector
#' @param p.2.min.max dfg
#' @param c.1 good 1 parameter of production cost
#' @param c.2 good 1 parameter of production cost
#' @param alfa parameter of scale economics alfa = 0 --> CRS, alfa < 0 --> IRS, alfa < 0 --> DRS
#' @param beta parameter of sope economics beta = 0 --> neutral, beta > 0 complementary, beta < 0 substitution
#' @param teta parameter of complementary and substitution of goods beta = 0 --> neutral, beta > 0 complementary, beta < 0 substitution
#' @param FC fixed Cost of production
#'
#' @return max profit
#'
#' @export
Max.Profit.PC <- function(r1.r2, p.1.min.max, p.2.min.max, c.1, c.2, alfa, beta, teta, FC) {
numerate <- max(p.1.min.max, p.2.min.max)
FC <- FC/numerate
c.1 <- c.1/numerate
c.2 <- c.2/numerate
r1.r2 <- data.frame(r1.r2)/numerate
p.1.min.max <- p.1.min.max/numerate
p.2.min.max <- p.2.min.max/numerate
step <- 0.01
prices.pc <- Prices.PC(p.1.min.max, p.2.min.max, step)
output.i <-foreach(i = prices.pc[,1], j = prices.pc[,2], .combine="rbind", .packages = "bundling",.multicombine=TRUE) %dopar% {
p.1 <- i
p.2 <- j
prices.pc.i <- cbind(p.1, p.2)
output <- Profit.PC(r1.r2, prices.pc.i, c.1, c.2, alfa, beta, teta, FC)
list(output$profit,output$c.s,output$t.c,output$p.1,output$p.2)
}
output <- matrix(unlist(output.i), ncol = 5, byrow = FALSE)
ind.max.profit <- apply(output, 2, max)[1]
max.profit <- matrix((output[output[,1] == ind.max.profit]), ncol = 5, byrow = FALSE)
ind.max.c.s <- apply(max.profit, 2, max)[2]
max.profit <- matrix((max.profit[max.profit[,2] == ind.max.c.s]),ncol = 5, byrow = FALSE)
step <- 0.005
p.1.min.max <- c(max.profit[1,4]-0.1,max.profit[1,4]+0.1)
p.2.min.max <- c(max.profit[1,5]-0.1,max.profit[1,5]+0.1)
prices.pc <- Prices.PC(p.1.min.max, p.2.min.max, step)
output.i <-foreach(i = prices.pc[,1], j = prices.pc[,2], .combine="rbind", .packages = "bundling",.multicombine=TRUE) %dopar% {
p.1 <- i
p.2 <- j
prices.pc.i <- cbind(p.1, p.2)
output <- Profit.PC(r1.r2, prices.pc.i, c.1, c.2, alfa, beta, teta, FC)
list(output$profit,output$c.s,output$t.c,output$p.1,output$p.2)
}
output <- matrix(unlist(output.i), ncol = 5, byrow = FALSE)
ind.max.profit <- apply(output, 2, max)[1]
max.profit <- matrix((output[output[,1] == ind.max.profit]), ncol = 5, byrow = FALSE)
ind.max.c.s <- apply(max.profit, 2, max)[2]
max.profit <- matrix((max.profit[max.profit[,2] == ind.max.c.s]),ncol = 5, byrow = FALSE)
remove("output")
output.max.PC <- list(
max.profit = max.profit[1,1]*numerate,
max.profit.c.s = max.profit[1,2]*numerate,
max.profit.t.c = max.profit[1,3]*numerate,
max.profit.p.1 = max.profit[1,4]*numerate,
max.profit.p.2 = max.profit[1,5]*numerate)
return(output.max.PC)}
|
10a1bb628de98e0b268263e2e602a5380536ca54 | f186b57cf6e8f1d67055001dbc55a7d6e6d0681e | /man/climater_dap.Rd | 16485c9370097a8bcc07dd182cef4fa3ef8bf2ee | [
"MIT"
] | permissive | mikejohnson51/climateR | 7f005e7ba5e8eb59245cc899b96362d8ed1256a2 | 4d02cd9ccc73f1fd7ad8760b50895a0daaa2f058 | refs/heads/master | 2023-09-04T08:57:09.854817 | 2023-08-21T19:53:33 | 2023-08-21T19:53:33 | 158,620,263 | 138 | 35 | MIT | 2023-08-14T22:52:00 | 2018-11-22T00:07:16 | R | UTF-8 | R | false | true | 1,323 | rd | climater_dap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shortcuts.R
\name{climater_dap}
\alias{climater_dap}
\title{ClimateR dry run}
\usage{
climater_dap(id, args, verbose, dryrun, print.arg = FALSE)
}
\arguments{
\item{id}{The resource name, agency, or catalog identifier}
\item{args}{The parent function arguments}
\item{verbose}{Should messages be emited?}
\item{dryrun}{Return summary of data prior to retrieving it}
\item{print.arg}{should arguments be printed? Usefull for debugging}
}
\value{
data.frame
}
\description{
ClimateR dry run
}
\seealso{
Other dap:
\code{\link{.resource_grid}()},
\code{\link{.resource_time}()},
\code{\link{climater_filter}()},
\code{\link{dap_crop}()},
\code{\link{dap_get}()},
\code{\link{dap_meta}()},
\code{\link{dap_summary}()},
\code{\link{dap_to_local}()},
\code{\link{dap_xyzv}()},
\code{\link{dap}()},
\code{\link{extract_sites}()},
\code{\link{get_data}()},
\code{\link{go_get_dap_data}()},
\code{\link{grid_meta}()},
\code{\link{make_ext}()},
\code{\link{make_vect}()},
\code{\link{merge_across_time}()},
\code{\link{parse_date}()},
\code{\link{read_dap_file}()},
\code{\link{read_ftp}()},
\code{\link{time_meta}()},
\code{\link{try_att}()},
\code{\link{var_to_terra}()},
\code{\link{variable_meta}()},
\code{\link{vrt_crop_get}()}
}
\concept{dap}
|
0f2a4a16f7a667b18abcde5d7fea1b15ed302a0a | aef733f76ffd42db3b47d70326fff01e1003f3d1 | /Nonlinear-Models/ch7-polynomial-regression-and-step-functions.r | 1e9f8864305e48e5b65d4e09f8fd4653508722a9 | [] | no_license | colson1111/ISLR | 03766237bd8ad859a988bfe3906aefc00ebcdec3 | 8d889e602ba1c1b09fb39107ff1e0e2b781ff1a9 | refs/heads/master | 2021-01-10T21:17:48.493009 | 2015-08-10T17:49:07 | 2015-08-10T17:49:07 | 40,409,428 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,313 | r | ch7-polynomial-regression-and-step-functions.r |
# POLYNOMIAL REGRESSION
library(ISLR)
attach(Wage)
fit <- lm(wage~poly(age, 4), data = Wage)
coef(summary(fit))
fit2 <- lm(wage~poly(age, 4, raw = TRUE), data = Wage)
coef(summary(fit2))
fit2a <- lm(wage~age + I(age^2) + I(age^3) + I(age^4), data = Wage)
coef(fit2a)
fit2b <- lm(wage~cbind(age, age^2, age^3, age^4), data = Wage)
coef(fit2b)
agelims <- range(age)
age.grid <- seq(from = agelims[1], to = agelims[2])
preds <- predict(fit, newdata = list(age = age.grid), se = TRUE)
se.bands <- cbind(preds$fit + 2 * preds$se.fit, preds$fit - 2 * preds$se.fit)
#plot it
par(mfrow = c(1,2), mar = c(4.5, 4.5, 1, 1), oma = c(0, 0, 4, 0))
plot(age, wage, xlim = agelims, cex = 0.5, col = "darkgrey")
title("Degree-4 Polynomial", outer = T)
lines(age.grid, preds$fit, lwd = 2, col = "blue")
matlines(age.grid, se.bands, lwd = 1, col = "blue", lty = 3)
preds2 <- predict(fit2, newdata = list(age = age.grid), se = TRUE)
max(abs(preds$fit - preds2$fit))
# comparing multiple polynomial regression models to determine best one
fit.1 <- lm(wage ~ age, data = Wage)
fit.2 <- lm(wage ~ poly(age, 2), data = Wage)
fit.3 <- lm(wage ~ poly(age, 3), data = Wage)
fit.4 <- lm(wage ~ poly(age, 4), data = Wage)
fit.5 <- lm(wage ~ poly(age, 5), data = Wage)
anova(fit.1, fit.2, fit.3, fit.4, fit.5)
coef(summary(fit.5))
fit.1 <- lm(wage ~ education + age, data = Wage)
fit.2 <- lm(wage ~ education + poly(age, 2), data = Wage)
fit.3 <- lm(wage ~ education + poly(age, 3), data = Wage)
anova(fit.1, fit.2, fit.3)
# POLYNOMIAL LOGISTIC REGRESSION
fit <- glm(I(wage > 250)~poly(age, 4), data = Wage, family = binomial)
preds <- predict(fit, newdata = list(age = age.grid), se = T)
pfit <- exp(preds$fit)/(1 + exp(preds$fit))
se.bands.logit <- cbind(preds$fit + 2 * preds$se.fit, preds$fit - 2 * preds$se.fit)
se.bands <- exp(se.bands.logit)/(1 + exp(se.bands.logit))
# could use: type = "response" in the predict function, but the SEs wouldn't have been right
plot(age, I(wage > 250), xlim = agelims, type = "n", ylim = c(0, 0.2))
points(jitter(age), I((wage>250)/5), cex = 0.5, pch = "|", col = "darkgrey")
lines(age.grid, pfit, lwd = 2, col = "blue")
matlines(age.grid, se.bands, lwd = 1, col = "blue", lty = 3)
# STEP FUNCTIONS
table(cut(age, 4))
fit <- lm(wage ~ cut(age, 4), data = Wage)
coef(summary(fit))
|
0050ec8fc86f00e91b6d3117bf9c661d124acf23 | 44ac6d8f221845534e825eca609ed9d74f342b5e | /CreateNGramFiles/CreateDictionary.R | 37687f4b4495dda77f13dc4d8d475c87b340fe65 | [] | no_license | brianfrancis/CapstoneProject | a66a1619c6a28c4414264807e46ecaeae404fdc5 | 6253c75e930e7366c0a10925e5caba6b829c5d01 | refs/heads/master | 2021-01-12T14:52:56.955536 | 2016-10-09T21:25:20 | 2016-10-09T21:25:20 | 68,934,581 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,693 | r | CreateDictionary.R |
appendToDictionary <- function(corp,
dict.filename, dict.foldername,
initialfile){
library(data.table)
library(stylo)
words <- data.table(make.frequency.list(corp))
setnames(words,"V1", "word")
setkey(words,word)
#setnames(unigramfreq, "N", "freq")
#setnames ??
##check if this is the first file for the dictionary
##if not bring in the existing info and get the next word ID
##otherwise next word ID = 1
if (initialfile == FALSE){
dictionary <- fread(paste(dict.foldername, dict.filename, sep="/"))
setkey(dictionary,word)
nextindex <- max(dictionary$wordID) + 1
# get words not in dictionary
new.words <- dictionary[words,on=c(word="word")]
new.words <- new.words[is.na(wordID),]
new.words[,wordID := seq(nextindex, (nextindex-1+nrow(new.words)))]
dictionary <- rbind(dictionary, new.words)
} else
{
nextindex <- 1
#add <unk> to the dictionary
words <- rbind(words,list("<unk>"))
words <- words[order(word)]
words[,wordID := seq(1, nrow(words))]
dictionary <- words
}
write.csv(dictionary, paste(dict.foldername, dict.filename, sep="/"),row.names=FALSE)
dictionary
}
replaceWordsWithIDs <- function(corp, dictionary){
wordlookup <- as.vector(dictionary$wordID)
names(wordlookup) <- dictionary$word
#turn the list into a big vector and replace the word with the word ID
v <- unlist(corp)
v <- wordlookup[v]
names(v) <- NULL
#get indices to recreate list from big vector
f <- rep(1:length(corp),sapply(corp,length))
#turn the vector back into a list
newCorp <- split(v,f)
names(newCorp) <- NULL
newCorp
}
appendToNgram <- function(corp, filename, foldername,
initialfile, ngramsize){
library(data.table)
library(stylo)
x <- sapply(corp, length)
ngrams <- lapply(corp[x>(ngramsize-1)], txt.to.features, ngram.size=ngramsize)
new.ngramfreq <- data.table(make.frequency.list(ngrams, value=TRUE, relative=FALSE))
rm(ngrams)
gc()
setnames(new.ngramfreq,"data", "wordID")
setnames(new.ngramfreq, "N", "freq")
#split the ngram into a column per wordID
ids <- new.ngramfreq[, tstrsplit(wordID, " ", fixed=TRUE)]
#rename the columns
for(i in 1:ncol(ids)){
if (i < ncol(ids)){
setnames(ids,i,paste("cond",i, sep=""))
} else
{setnames(ids,i,"prediction")}
}
#change the wordIDs from character to integer (huge memory savings)
cols <- names(ids)
ids[,(cols):=lapply(.SD,as.integer),.SDcols=cols]
#link it all back together
new.ngramfreq <- cbind(ids, new.ngramfreq)
rm(ids, cols)
new.ngramfreq[,wordID:=NULL]
##check if this is the first file for the dictionary
##if not bring in the existing info and get the next word ID
##otherwise next word ID = 1
if (initialfile == FALSE){
repo.ngramfreq <- readRDS(paste(foldername, filename, sep="/"))
combined <- rbind(repo.ngramfreq, new.ngramfreq)
rm(repo.ngramfreq,new.ngramfreq)
#get the columns we'll group by (all expect freq)
bycols <- names(combined)[names(combined)!="freq"]
setkeyv(combined, bycols)
repo.ngramfreq <- combined[, .(sum(freq)), by=bycols]
rm(combined)
setnames(repo.ngramfreq,"V1","freq")
} else
{
repo.ngramfreq <- new.ngramfreq
}
saveRDS(repo.ngramfreq, paste(foldername, filename, sep="/"))
}
replaceFirstWordWithUNK <- function(olddictionary, dictionary,corp){
setkey(dictionary,word)
#check if old dictionary has anything and get new words if so
#otherwise everything in the dictionary is new
if (nrow(olddictionary) > 0) {
setkey(olddictionary,word)
setkey(dictionary,word)
x <- olddictionary[dictionary]
newwords <- x[is.na(x$wordID)]$word
} else {
newwords <- dictionary$word
}
v <- unlist(corp)
#get indices to recreate list from big vector
f <- rep(1:length(corp),sapply(corp,length))
counter <- as.integer(ave(v, v, FUN=seq_along))
v[counter==1 & v %in% newwords] <- "<unk>"
#turn the vector back into a list
newCorp <- split(v,f)
names(newCorp) <- NULL
newCorp
}
replaceOOVWords <- function(corp, dictionary) {
setkey(dictionary,word)
#vector
v <-(unlist(corp))
#get indices to recreate list from big vector
f <- rep(1:length(corp),sapply(corp,length))
dt <- data.table(v)
setnames(dt,"v","word")
#setkey(dt,word)
x <- dictionary[dt, on=c(word="word")]
x[is.na(x$wordID)]$word <- "<unk>"
v <- x$word
newCorp <- split(v,f)
names(newCorp) <- NULL
newCorp
} |
2fb8e643b9457406127f4f0361c31ac7dde99589 | 94849a599e008fd402255b7cd1b0b90a3434a182 | /transportation.R | f25c4af80f3dd92a9dd90bbd82abca3176cbaf39 | [] | no_license | zhaoqiao0120/Sus_Qiao | 6c8a729f5aa0b155fc72fba87b20e7976fab2ee1 | 17eaebe6f8dfa410a4bfd87c06199c8e73b20258 | refs/heads/master | 2023-01-11T13:00:28.878082 | 2023-01-10T05:38:49 | 2023-01-10T05:38:49 | 209,385,054 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,977 | r | transportation.R | install.packages("sf")
install.packages("raster")
install.packages("spData")
install.packages("dplyr")
install.packages("stplanr")
install.packages("tmap")
devtools::install_github("Nowosad/spDataLarge")
library(sf) # classes and functions for vector data
library(raster) # classes and functions for raster data
library(spData) # load geographic data
library(dplyr)
library(stringr) # for working with strings (pattern matching)
library(spDataLarge) # load larger geographic data
library(stplanr) # geographic transport data package
library(tmap) # visualization package (see Chapter 8)
vignette(package = "sf") # see which vignettes are available
vignette("sf1") # an introduction to the package
############12.3#####
data(bristol_zones)
names(bristol_zones)
data(bristol_od) #travel data
names(bristol_od)
zones_attr = bristol_od %>%
group_by(o) %>%
summarize_if(is.numeric, sum) %>% #the total number of people living in each zone
dplyr::rename(geo_code = o)
zones_joined = left_join(bristol_zones, zones_attr, by = "geo_code")
zones_od = bristol_od %>%
group_by(d) %>%
summarize_if(is.numeric, sum) %>%
dplyr::select(geo_code = d, all_dest = all) %>%
inner_join(zones_joined, ., by = "geo_code")
qtm(zones_od, c("all", "all_dest")) +
tm_layout(panel.labels = c("Origin", "Destination"))
############12.4#####
od_top5 = bristol_od %>%
arrange(desc(all)) %>%
top_n(5, wt = all)
od_intra = filter(bristol_od, o == d)
od_inter = filter(bristol_od, o != d)
desire_lines = od2line(od_inter, zones_od)
#> Creating centroids representing desire line start and end points.
qtm(desire_lines, lines.lwd = "all")
############12.5#####
desire_lines$distance = as.numeric(st_length(desire_lines))
desire_carshort = dplyr::filter(desire_lines, car_driver > 300 & distance < 5000)
route_carshort = line2route(desire_carshort, route_fun = route_osrm)
desire_carshort$geom_car = st_geometry(route_carshort)
############12.6#####
desire_rail = top_n(desire_lines, n = 3, wt = train)
ncol(desire_rail)
#> [1] 10
desire_rail = line_via(desire_rail, bristol_stations)
ncol(desire_rail)
#> [1] 13
############12.7#####
summary(bristol_ways)
ways_freeway = bristol_ways %>% filter(maxspeed == "70 mph")
ways_sln = SpatialLinesNetwork(ways_freeway)#represent route networks simultaneously as graphs and a set of geographic lines
slotNames(ways_sln)
#> [1] "sl" "g" "nb" "weightfield"
weightfield(ways_sln)
#> [1] "length"
class(ways_sln@g)
e = igraph::edge_betweenness(ways_sln@g)#the number of shortest paths passing through each edge
plot(ways_sln@sl$geometry, lwd = e / 500)
############12.8#####
route_rail = desire_rail %>%
st_set_geometry("leg_orig") %>%
line2route(route_fun = route_osrm) %>%
st_set_crs(4326)
route_cycleway = rbind(route_rail, route_carshort)
route_cycleway$all = c(desire_rail$all, desire_carshort$all)
qtm(route_cycleway, lines.lwd = "all")
|
4b32809193b52927fcece472cd200e95f12faea3 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/2438_1/rinput.R | f5dfea7daa3f8519e95007d742eac62421acbe5b | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("2438_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2438_1_unrooted.txt") |
4fd88fb2efd1178ff7b533262d4ff16bcc8ec795 | 355c358f22e2b72b2b4489c0c1b15e93e21bd064 | /tests/testthat.R | 5e1d655aec4afb4cd0f3e727cdc08479993b61a4 | [] | no_license | cran/POV | d517e188413cb5d999fcb9a5fb26bc0f00e3021a | c7cbd5f5ec500adc8dc51d30ee116ffb053524f8 | refs/heads/master | 2023-01-15T05:39:37.693859 | 2020-11-16T15:20:05 | 2020-11-16T15:20:05 | 315,988,928 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 50 | r | testthat.R | library(testthat)
library(POV)
test_check("POV")
|
9bbcb392d0eddc72e6e070101af84a3253ee39b8 | 5c42dcf6618e465256a42bb58946eb414d601cd0 | /initialize_country.R | 5dcb254567b9e14b54e07a5081a36bd6514c7d5b | [] | no_license | Ecological-Complexity-Lab/COVID19_vaccine_model | d5091ca7a0ebec96f7ecc1c4698fa8368263c28f | 170605af9cd798d0cb57288cbdcc9cd640ae9c75 | refs/heads/main | 2023-07-27T01:01:13.498480 | 2021-08-20T08:02:28 | 2021-08-20T08:02:28 | 324,159,405 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,917 | r | initialize_country.R | print(paste('Initializing for',current_country))
# Define population size
population_sizes <- tibble(country=c('Israel', 'Belgium', 'Finland', 'Germany', 'Italy', 'Luxembourg', 'Netherlands', 'Poland'),
N=c(8712900,11539.326*1000,5532.159*1000,83517.05*1000,60550.09*1000,615.73*1000,17097.12*1000,37887.77*1000))
N <- subset(population_sizes, country==current_country)$N # Population size
# Load age structure
age_structure <- read_csv('country_age_structure.csv')
age_structure$Proportion <- age_structure[[current_country]]
# Re-estimate contact rates
contacts <- as_tibble(polymod$contacts)
participants <- as_tibble(polymod$participants)
country_contacts <- contacts %>%
left_join(participants, by='part_id') %>%
filter(country==ifelse(current_country=='Israel','Italy',current_country)) %>% # For Israel use Italy's contact matrix. For other countries use the country's matrix
# Define which contacts are relevant for COVID19
mutate(COVID19_contact=ifelse((phys_contact==2 & duration_multi>=3) | phys_contact==1, T, F)) %>%
filter(COVID19_contact==T)
contact_matrix <-
country_contacts %>%
dplyr::select(part_id, cont_id, cnt_age_exact, part_age) %>%
drop_na() %>% # Remove rows with no exact ages.
distinct(part_id, cont_id, part_age, cnt_age=cnt_age_exact) %>%
# Categorize participants into age groups
mutate(part_age_group=case_when(
(part_age>=0 & part_age<=9) ~ "0-9",
(part_age>=10 & part_age<=19) ~ "10-19",
(part_age>=20 & part_age<=29) ~ "20-29",
(part_age>=30 & part_age<=39) ~ "30-39",
(part_age>=40 & part_age<=49) ~ "40-49",
(part_age>=50 & part_age<=59) ~ "50-59",
(part_age>=60 & part_age<=69) ~ "60-69",
(part_age>=70 & part_age<=79) ~ "70-79",
(part_age>=80) ~ "80+")
) %>%
# Categorize contacts into age groups
mutate(cnt_age_group=case_when(
(cnt_age>=0 & cnt_age<=9) ~ "0-9",
(cnt_age>=10 & cnt_age<=19) ~ "10-19",
(cnt_age>=20 & cnt_age<=29) ~ "20-29",
(cnt_age>=30 & cnt_age<=39) ~ "30-39",
(cnt_age>=40 & cnt_age<=49) ~ "40-49",
(cnt_age>=50 & cnt_age<=59) ~ "50-59",
(cnt_age>=60 & cnt_age<=69) ~ "60-69",
(cnt_age>=70 & cnt_age<=79) ~ "70-79",
(cnt_age>=80) ~ "80+")
) %>%
# Count number of participants and contacts
group_by(part_age_group, cnt_age_group) %>%
summarise(num_contacts=length(cont_id), num_participants=n_distinct(part_id)) %>%
# Calculate mean
mutate(mean_contacts=num_contacts/num_participants) %>%
# Make a matrix
dplyr::select(part_age_group, cnt_age_group, mean_contacts) %>%
spread(cnt_age_group, mean_contacts)
contact_matrix <- data.matrix(as.data.frame(contact_matrix)[,-1])
rownames(contact_matrix) <- colnames(contact_matrix)
# At least 1 contact per combination
contact_matrix[is.na(contact_matrix)] <- 1
# Make the matrix symmetric
contact_matrix_sym <- (contact_matrix+t(contact_matrix))/2
|
c8ee58f9b923b9896a71ac82a2d9e5d71ab7b631 | 84c27ec545e7a5e9448d95c0676b882317fafd7c | /tests/testthat/test-268.R | 6399300026a9591ad8fe262f7db4a558e41c0d95 | [] | no_license | OpenMx/OpenMx | ac58c848b4ce63079c79ccad13f972d81c90d348 | cbe1c3207453b92efc96b4fc37205cbe231dda27 | refs/heads/master | 2023-08-24T11:01:53.655345 | 2023-08-20T20:30:35 | 2023-08-20T20:30:35 | 4,393,940 | 86 | 50 | null | 2023-09-01T01:57:08 | 2012-05-21T13:38:34 | R | UTF-8 | R | false | false | 741 | r | test-268.R | library(testthat)
library(OpenMx)
data(demoOneFactor)
context("268")
body <- function() {
E <- mxAlgebra(U%x%1,name="E")
factorModel <- mxModel(
"One Factor",
mxMatrix("Full", 5, 1, values=0.8,
free=TRUE, name="A"),
mxMatrix("Symm", 1, 1, values=1,
free=FALSE, name="L"),
mxMatrix("Diag", 5, 5, values=1,
free=TRUE, name="U"),
mxAlgebra(A %*% L %*% t(A) + E, name="R"),
mxExpectationNormal(covariance = "R",
dimnames = names(demoOneFactor)),
mxFitFunctionML(),
mxData(cov(demoOneFactor), type="cov", numObs=500)
)
expect_error(mxCheckIdentification(factorModel,details=T),
"'E' not found")
}
test_that("268", body)
|
17a3300e707b861134519cf19a6a3132872dcd30 | 96cf6b7c28944616697b5efb2a0cf06ec00dcc3c | /aerolineaJerarquicoCluste.R | ff80708efe5dc06dc0641d5d4f5faff69fc78721 | [] | no_license | alondraSanchezM/clustering | 82301cff806548e6ca35a7722a9838286bc4b06a | 3f257249cb04083fd29816af8b97d9821332f1ea | refs/heads/main | 2023-03-18T17:43:28.222916 | 2021-03-04T19:28:44 | 2021-03-04T19:28:44 | 344,296,061 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,039 | r | aerolineaJerarquicoCluste.R | library(cluster)
library(ggplot2)
library(factoextra)
library(clustertend)
library(dendextend)
library(corrplot)
library(NbClust)
# B. Comercialización para viajeros frecuentes.
aerolinea<-read.csv("RFiles/EastWestAirlinesCluster.csv",header = TRUE, sep = ",")
#---Exploración inicial de los datos
str(aerolinea)
View(aerolinea)
set.seed(124)
hopkins(aerolinea,n=nrow(aerolinea)-1) #si es cercano a 0 el dataset es agrupable
#----Preprocesamiento de los datos
sum(is.na(aerolinea))
aerolinea <- aerolinea[, -c(1)] #Quitamos la columna id
View(aerolinea)
#---------Con datos normalizados
aerolinea <- scale(aerolinea)
#Determinamos el mejor k
nb<-NbClust(aerolinea,distance="euclidean",min.nc=2,max.nc=10,method="ward.D")
fviz_nbclust(nb) #3
#Calculo distancia Euclidiana
dist <- dist(aerolinea, method = "euclidean")
#Agrupamiento jerarquico
hc_ward <- hclust(dist, method = "ward.D")
#Cortar el arbor
grp_ward <- cutree(hc_ward, k = 3)
# Numero de observaciones en cada cluste
table(grp_ward)
fviz_dend(hc_ward, k=3, cex = 0.5,
k_colors = c("#D81159", "#8F2D56","#218380"),
color_labels_by_k = TRUE, rect = TRUE)
#-------------Con datos sin normalizar
#Calculo distancia Euclidiana
dist <- dist(aerolinea, method = "euclidean")
#Agrupamiento jerarquico
hc_ward <- hclust(dist, method = "ward.D")
#Cortar el arbor
grp_ward <- cutree(hc_ward, k = 2)
fviz_dend(hc_ward, k=2, cex = 0.5,
k_colors = c("#D81159", "#8F2D56"),
color_labels_by_k = TRUE, rect = TRUE)
#---------------------------
#--------Comparación de centroides y etiquetado
# Función para encontrar los centroides del grupo
centroid = function(i, dat, groups)
{
ind = (groups == i)
colMeans(dat[ind,])
}
sapply(unique(grp_ward), centroid, aerolinea[,1:11], grp_ward)
table(grp_ward)
dataCluster = aggregate(aerolinea[,1:11],list(grp_ward),median) #Se elimina la columna id dataset normal
data.frame(Cluster=dataCluster[,1],Freq=as.vector(table(grp_ward)),dataCluster[,-1])
#Inciso d muestra del 95%
muestra = aerolinea[sample(nrow(aerolinea), nrow(aerolinea)*0.95,replace=FALSE),]
dist_muestra <- dist(muestra, method = "euclidean")
hc_ward_muestra <- hclust(dist_muestra, method = "ward.D")
grp_ward_m <- cutree(hc_ward_muestra, k = 3)
fviz_dend(hc_ward_muestra, k=3, cex = 0.5,
k_colors = c("#D81159", "#8F2D56","#218380"),
color_labels_by_k = TRUE, rect = TRUE)
#---------------------K-MEANS
aeroMeans <- kmeans(aerolinea, 3, nstart = 25)
print(aeroMeans)
# Cluster size
aeroMeans$size
# Cluster means
aeroMeans$centers
fviz_cluster(aeroMeans, data = aerolinea,
palette = c("#D81159", "#8F2D56","#218380"),
ellipse.type = "euclid", # Concentration ellipse
star.plot = TRUE, # Add segments from centroids to items
repel = TRUE, # Avoid label overplotting (slow)
ggtheme = theme_minimal()
)
kmeansAero <- eclust(aeroMeans, "kmeans", k = 3, nstart = 25, graph = TRUE) |
9db903f0a5778070f9f5a8ebc8c7daa386bd67f1 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sparseFLMM/examples/sparseFLMM.Rd.R | 1a318dfa46d3d016d9c998696ceb4b597a6e703c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,640 | r | sparseFLMM.Rd.R | library(sparseFLMM)
### Name: sparseFLMM
### Title: Functional Linear Mixed Models for Irregularly or Sparsely
### Sampled Data
### Aliases: sparseFLMM
### Keywords: FPCA models,
### ** Examples
## Not run:
##D # subset of acoustic data (very small subset, no meaningful results can be expected and
##D # FAMM estimation does not work for this subset example. For FAMM estimation, see below.)
##D data("acoustic_subset")
##D
##D acoustic_results <- sparseFLMM(curve_info = acoustic_subset, use_RI = FALSE, use_simple = FALSE,
##D method = "fREML", use_bam = TRUE, bs = "ps", d_grid = 100, min_grid = 0,
##D max_grid = 1, my_grid = NULL, bf_mean = 8, bf_covariates = 8, m_mean = c(2,3),
##D covariate = TRUE, num_covariates = 4, covariate_form = rep("by", 4),
##D interaction = TRUE,
##D which_interaction = matrix(c(FALSE, TRUE, TRUE, TRUE, TRUE,
##D FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE,
##D FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE),
##D byrow = TRUE, nrow = 4, ncol = 4),
##D save_model_mean = FALSE, para_estim_mean = FALSE, para_estim_mean_nc = 0,
##D bf_covs = c(5, 5, 5), m_covs = list(c(2, 3), c(2, 3), c(2, 3)),
##D use_whole = FALSE, use_tri = FALSE, use_tri_constr = TRUE,
##D use_tri_constr_weights = FALSE, np = TRUE, mp = TRUE,
##D use_discrete_cov = FALSE,
##D para_estim_cov = FALSE, para_estim_cov_nc = 5,
##D var_level = 0.95, N_B = NA, N_C = NA, N_E = NA,
##D use_famm = FALSE, use_bam_famm = TRUE,
##D bs_int_famm = list(bs = "ps", k = 8, m = c(2, 3)),
##D bs_y_famm = list(bs = "ps", k = 8, m = c(2, 3)),
##D save_model_famm = FALSE, use_discrete_famm = FALSE,
##D para_estim_famm = FALSE, para_estim_famm_nc = 0)
## End(Not run)
## Not run:
##D # whole data set with estimation in the FAMM framework
##D
##D data("acoustic")
##D acoustic_results <- sparseFLMM(curve_info = acoustic, use_RI = FALSE, use_simple = FALSE,
##D method = "fREML", use_bam = TRUE, bs = "ps", d_grid = 100, min_grid = 0,
##D max_grid = 1, my_grid = NULL, bf_mean = 8, bf_covariates = 8, m_mean = c(2,3),
##D covariate = TRUE, num_covariates = 4, covariate_form = rep("by", 4),
##D interaction = TRUE,
##D which_interaction = matrix(c(FALSE, TRUE, TRUE, TRUE, TRUE,
##D FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE,
##D FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE),
##D byrow = TRUE, nrow = 4, ncol = 4),
##D save_model_mean = FALSE, para_estim_mean = FALSE, para_estim_mean_nc = 0,
##D bf_covs = c(5, 5, 5), m_covs = list(c(2, 3), c(2, 3), c(2, 3)),
##D use_whole = FALSE, use_tri = FALSE, use_tri_constr = TRUE,
##D use_tri_constr_weights = FALSE, np = TRUE, mp = TRUE,
##D use_discrete_cov = FALSE,
##D para_estim_cov = TRUE, para_estim_cov_nc = 5,
##D var_level = 0.95, N_B = NA, N_C = NA, N_E = NA,
##D use_famm = TRUE, use_bam_famm = TRUE,
##D bs_int_famm = list(bs = "ps", k = 8, m = c(2, 3)),
##D bs_y_famm = list(bs = "ps", k = 8, m = c(2, 3)),
##D save_model_famm = FALSE, use_discrete_famm = FALSE,
##D para_estim_famm = TRUE, para_estim_famm_nc = 5)
## End(Not run)
|
366d46e62191fa841eb651027f986f93bb6bda3e | 8244d6782e753ba56fb9e755aedb7c9466b5ec38 | /job_calc_comp_files_panther.r | c317971d4ab53191bbea5b33a2774df5eaa3cbe8 | [] | no_license | inbal-tz/shen-orr | 8317edacdc58bebe19b86b153731f65b842ab27b | cd25dce56e79f710d181591c3e0b066a4df8a7b2 | refs/heads/master | 2020-09-21T00:39:58.156494 | 2020-04-07T14:52:53 | 2020-04-07T14:52:53 | 224,630,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,030 | r | job_calc_comp_files_panther.r | #!/Local/md_shenorr/R-3.2.0/bin/Rscript
library(readr)
library(dplyr)
panther_orthologs <- select(read_delim("/storage/md_shenorr/inbaltz/panther_orthologs.csv", delim=','),c(2,3,4))
disease = Sys.getenv("disease")
RFST = Sys.getenv("RFST")
setwd(paste0("/storage/md_shenorr/inbaltz/microarrays/",RFST,"/",disease,"/"))
AllCompPairs_GS <- read_delim("AllCompPairs_GS", delim = '\t',col_names = c('index', 'human_file','mouse_file'), skip=1)
for (comp_file in 1:nrow(AllCompPairs_GS)){
mousedf = readRDS(paste0(AllCompPairs_GS[comp_file,]$mouse_file,"_all_orthologs.rds"))
humandf = readRDS(paste0(AllCompPairs_GS[comp_file,]$human_file,"_all_orthologs.rds"))
colnames(mousedf) <- paste0("MM.",colnames(mousedf))
colnames(humandf) <- paste0("HS.",colnames(humandf))
merged <- merge(merge(panther_orthologs,mousedf, by.x = "mouse_entrez_ID", by.y = 0),humandf,by.x = "human_entrez_ID",by.y = 0)
saveRDS(merged,paste0("comp_",AllCompPairs_GS[comp_file,]$mouse_file,"_",AllCompPairs_GS[comp_file,]$human_file,".rds"))
}
|
32ede84397ed7aa4acacf75a26cdb5488875dff1 | 62b7ecdde00f9019fb59862845d8a2f968e85a11 | /iris_notebook.R | c6f8af5c1489ea8ebba4759d7d281758aa0eb388 | [] | no_license | erikbostrom/Iris-data | ecaf423caf5b775fe60e04c7403ae2abdd2fec47 | a6f17da36ad2028cf043f81d4495f12aaceedf6b | refs/heads/master | 2021-05-09T13:50:51.498207 | 2018-01-26T12:14:47 | 2018-01-26T12:14:47 | 119,046,955 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,318 | r | iris_notebook.R | ## ------------------------------------------------------------------------
str(iris)
## ------------------------------------------------------------------------
#iris$Species <- as.character(iris$Species)
table(iris$Species)
## ------------------------------------------------------------------------
summary(iris[iris$Species=="setosa",])
summary(iris[iris$Species=="versicolor",])
summary(iris[iris$Species=="virginica",])
## ------------------------------------------------------------------------
## ----fig.height = 6, fig.width = 6---------------------------------------
library(ggplot2)
p <- "Set2"
sfb <- scale_fill_brewer(palette=p)
t <- theme_light()
p1 <- ggplot(iris, aes(x=Sepal.Length, fill=Species)) + geom_histogram(color="black",size=0.3,binwidth=0.05) + sfb + t
p2 <- ggplot(iris, aes(x=Petal.Length, fill=Species)) + geom_histogram(color="black",size=0.3,binwidth=0.05) + sfb + t
grid.arrange(p1, p2, ncol=1)
## ----fig.height = 6, fig.width = 6---------------------------------------
require(gridExtra)
p1 <- ggplot(iris, aes(x=Species,y=Sepal.Length,fill=Species)) + geom_boxplot(color="black",size=0.3,show.legend = FALSE) + sfb + t
p2 <- ggplot(iris, aes(x=Species,y=Sepal.Width, fill=Species)) + geom_boxplot(color="black",size=0.3,show.legend = FALSE) + sfb + t
p3 <- ggplot(iris, aes(x=Species,y=Petal.Length, fill=Species)) + geom_boxplot(color="black",size=0.3,show.legend = FALSE) + sfb + t
p4 <- ggplot(iris, aes(x=Species,y=Petal.Width, fill=Species)) + geom_boxplot(color="black",size=0.3,show.legend = FALSE) + sfb + t
grid.arrange(p1, p2, p3, p4, ncol=2)
## ----fig.height = 8, fig.width = 6---------------------------------------
scb = scale_color_brewer(palette=pal)
p1 <- ggplot(iris,aes(x=Sepal.Length,y=Sepal.Width)) + geom_point(aes(color=Species),size=2) + scb + t
p2 <- ggplot(iris,aes(x=Petal.Length,y=Petal.Width)) + geom_point(aes(color=Species),size=2) + scb + t
grid.arrange(p1, p2, ncol=1)
## ------------------------------------------------------------------------
set.seed(1234)
## ------------------------------------------------------------------------
I <- sample(c(0,1), nrow(iris), replace=TRUE, prob=c(2/3, 1/3))
prop.table(table(I))
## ------------------------------------------------------------------------
iris.train <- iris[I==0,1:4]
iris.train_full <- iris[I==0,]
iris.train_labels <- iris[I==0,5]
iris.test <- iris[I==1,1:4]
iris.test_full <- iris[I==1,]
iris.test_labels <- iris[I==1,5]
## ------------------------------------------------------------------------
library(class)
iris_knn <- knn(train=iris.train, test=iris.test, cl=iris.train_labels, k=3)
iris_knn
## ------------------------------------------------------------------------
comp <- data.frame(iris_knn,data.frame(iris.test_labels))
names(comp) <- c("kNN predicted Species","Correct Species")
count <- 0
j <- 1
err <- 0
for(i in 1:length(comp[,1])){
if (comp[i,1]==comp[i,2]){
count = count + 1
}else{
err[j] <- i
j <- j+1
}
}
print(paste(count,"out of",length(comp[,1]),"predicted correctly! This is",100*count/length(comp[,1]), "percent correct."))
## ------------------------------------------------------------------------
comp[err,]
## ------------------------------------------------------------------------
library(gmodels)
CrossTable(x=iris.test_labels, y=iris_knn, prop.chisq = FALSE)
## ------------------------------------------------------------------------
# Import the random forest package
library(randomForest)
# Compute the training
iris_randomforest <- randomForest(Species~ . , data=iris.train_full, ntree=100, mtry=2, importance=TRUE,proximity=TRUE)
iris_randomforest
## ----fig.height = 2.5, fig.width = 6-------------------------------------
imp <- varImpPlot(iris_randomforest)
graphics.off()
imp <- data.frame(imp)
p1 <- ggplot(imp,aes(y=row.names(imp),x=MeanDecreaseAccuracy)) + geom_point() + t + xlab("Mean accuracy loss [%]") + ylab(NULL)
p2 <- ggplot(imp,aes(y=row.names(imp),x=MeanDecreaseGini)) + geom_point() + t + xlab("Mean Gini impurity decrease [%]") + ylab(NULL)
grid.arrange(p1, p2, ncol=2)
## ------------------------------------------------------------------------
prediction <- predict(iris_randomforest,iris.test_full)
table(observed=iris.test_labels, predicted=prediction)
|
610817ffddf8192fc76a7a32ae2bf5dcf7608172 | 863c5853c0040a5423ae77d940a602687b0502db | /update.R | 4590398a90df99b2baa65d623f5eb1a77a1f21e5 | [] | no_license | BarryHaworth/IMDB | 23da91ce3ef07a0db0c2934a99091f70a90eb312 | d07100c4bf0319b80d37d95d0dc3d892e94bf4ec | refs/heads/master | 2022-07-12T11:15:54.344575 | 2022-06-25T12:09:03 | 2022-06-25T12:09:03 | 144,531,213 | 0 | 0 | null | 2018-12-03T08:42:51 | 2018-08-13T04:56:03 | R | UTF-8 | R | false | false | 15,758 | r | update.R | # Data Download
# Download and save IMDB files
# Data file info https://www.imdb.com/interfaces/
# Data files https://datasets.imdbws.com/
# This program combines the loading of IMDB data files
# and the ripping of vote details and metascores.
# It is intended as the daily automatic data update
# and replaces the programs download.R and metascore.R
library(rvest)
library(dplyr)
library(rmutil)
options(timeout= 4000000)
print("Program started")
timestamp()
PROJECT_DIR <- "c:/R/IMDB"
DATA_DIR <- "c:/R/IMDB/data"
FILE_DIR <- "c:/R/IMDB/data/tsv"
load(file=paste0(DATA_DIR,"/votes.RData"))
load(file=paste0(DATA_DIR,"/metascores.RData"))
load(file=paste0(DATA_DIR,"/rating.history.RData"))
votes <- votes %>% group_by(tconst,Date) %>% slice(1) %>% ungroup # Filter for unique date within movie
# Get Ratings file and stamp each file with the date
if (!file.exists(paste(FILE_DIR,"/ratings-",Sys.Date(),".tsv.gz",sep=""))){
download.file("https://datasets.imdbws.com/title.ratings.tsv.gz",
paste0(FILE_DIR,"/ratings-",Sys.Date(),".tsv.gz"))
}
# Get latest version of other files but only keep the latest version
get_title <- function(file){
local_file <- paste0(FILE_DIR,"/title.",file,".tsv.gz")
print(paste("Local file:",local_file))
remote_file <- paste0("https://datasets.imdbws.com/title.",file,".tsv.gz")
print(paste("Remote File:",remote_file))
if (!file.exists(local_file) |
as.Date(file.info(local_file)$mtime) != Sys.Date()){
download.file(remote_file,local_file)
}
}
get_name <- function(file){
local_file <- paste0(FILE_DIR,"/name.",file,".tsv.gz")
print(paste("Local file:",local_file))
remote_file <- paste0("https://datasets.imdbws.com/name.",file,".tsv.gz")
print(paste("Remote File:",remote_file))
if (!file.exists(local_file) |
as.Date(file.info(local_file)$mtime) != Sys.Date()){
download.file(remote_file,local_file)
}
}
get_title("basics")
get_title("crew")
get_title("episode")
get_title("principals")
get_name("basics")
# Function to read a ratings data file and add the date
read_rat <- function(date){
r <- read.delim(paste0(FILE_DIR,"/ratings-",date,".tsv.gz"),stringsAsFactors = FALSE, quote="")
r$Date=as.Date(date)
return(r)
}
# Read new data to Ratings data frame
ratings <- read_rat(Sys.Date())
# Add ratings to Ratings History
rating.history <- rbind(rating.history,ratings) %>%
filter(numVotes >= 100) %>%
# filter(Date >= Sys.Date() - 30) %>%
distinct()
# Replace old Basics data frame with new data
basics <- read.delim(paste0(FILE_DIR,"/title.basics.tsv.gz") ,stringsAsFactors = FALSE, quote="")
# Clean Basics
# basics <- basics[basics$titleType=="movie",] # Only keep movies
# basics <- basics[basics$titleType %in% c("movie","tvSeries","video","tvMovie"),] # Only keep selected types
basics <- basics[is.na(basics$titleType) == FALSE,] # Drop unknown type
basics <- basics[is.na(basics$runtimeMinutes)==FALSE,] # Drop unknown runtime
basics <- basics[basics$startYear <=
as.numeric(substr(Sys.Date(),1,4)),] # drop release date after this year
# Set types for columns
basics$titleType <- as.factor(basics$titleType)
basics$isAdult <- as.numeric(basics$isAdult)
basics$startYear <- as.numeric(basics$startYear)
basics$endYear <- as.numeric(basics$endYear)
basics$runtimeMinutes <- as.numeric(basics$runtimeMinutes)
save(basics,file=paste0(DATA_DIR,"/basics.RData"))
# Replace old Episode data frame with new data
episode <- read.delim(paste0(FILE_DIR,"/title.episode.tsv.gz") ,stringsAsFactors = FALSE, quote="")
save(episode,file=paste0(DATA_DIR,"/episode.RData"))
# Rip individual votes for a given movie
vote_rip <- function(tconst){
url <- paste0('https://www.imdb.com/title/',tconst,'/ratings?ref_=tt_ov_rt')
#Reading the HTML code from the website
webpage <- read_html(url)
rank_html <- html_nodes(webpage,'.leftAligned')
rank_data <- html_text(rank_html)
ranks <- rev(as.numeric(gsub(',','',rank_data[2:11])))
Date <- Sys.Date()
Vote_01 <- ranks[1]
Vote_02 <- ranks[2]
Vote_03 <- ranks[3]
Vote_04 <- ranks[4]
Vote_05 <- ranks[5]
Vote_06 <- ranks[6]
Vote_07 <- ranks[7]
Vote_08 <- ranks[8]
Vote_09 <- ranks[9]
Vote_10 <- ranks[10]
Vote_sum <- sum(ranks)
votes <- data.frame(tconst,Date,Vote_01,Vote_02,Vote_03,Vote_04,
Vote_05,Vote_06,Vote_07,Vote_08,Vote_09,Vote_10,Vote_sum)
return(votes)
}
# Read Metacritic Score for a single movie
meta_rip <- function(tconst){
url <- paste0('https://www.imdb.com/title/',tconst,'/criticreviews?ref_=tt_ov_rt')
#Reading the HTML code from the website
webpage <- read_html(url)
meta_html <- html_nodes(webpage,'.metascore_wrap')
if (length(meta_html) >0) {
metascore <- as.numeric(html_text(meta_html))
} else {
metascore <- NA
}
Date <- Sys.Date()
meta <- data.frame(tconst,Date,metascore)
return(meta)
}
# Identify growth in votes.
# The logic here is that the votes in the ratings file is usually a day
# behind the votes on the web site.
votes.today <- ratings %>% select(c("tconst","Date","numVotes"))
votes.dates <- sort(unique(rating.history$Date))
today <- votes.dates[length(votes.dates)]
yesterday <- votes.dates[length(votes.dates)-1]
votes.today <- rating.history %>%
filter(Date == today) %>%
select(c("tconst","numVotes")) %>%
rename(today.rat=numVotes)
votes.yesterday <- rating.history %>%
filter(Date == yesterday) %>%
select(c("tconst","numVotes")) %>%
rename(yesterday.rat=numVotes)
votes.growth <- merge(votes.today,votes.yesterday,by="tconst",all.x=T,all.y=T)
votes.growth$yesterday.rat[is.na(votes.growth$yesterday.rat)] <- 0
votes.growth$today.rat[is.na(votes.growth$today.rat)] <- 2*votes.growth$yesterday.rat[is.na(votes.growth$today.rat)]
votes.growth <- votes.growth %>%
mutate(delta = today.rat - yesterday.rat,
votes = today.rat+delta)
max.votes.Date <- aggregate(Date~tconst, data=votes, max)
votes.growth <- merge(votes.growth,max.votes.Date,by="tconst",all.x=T)
# IDS with votes > 1000. This gives ~15,000 movies to check
min.votes <- 1000 # Minimum votes to be included
#ids <- data.frame(unique(votes.growth$tconst[votes.growth$votes > min.votes]),stringsAsFactors = FALSE)
#names(ids) <- "tconst"
# Filter for IDS of movies in basics list
movie_ids <- data.frame(unique(basics$tconst),stringsAsFactors = FALSE)
names(movie_ids) <- "tconst"
#ids <- merge(ids,movie_ids,by="tconst")
#ids <- merge(basics[,c("tconst","titleType","primaryTitle")],ids,by="tconst")
votes.growth <- merge(votes.growth,movie_ids,by="tconst")
votes.growth <- merge(basics[,c("tconst","titleType","primaryTitle")],votes.growth,by="tconst")
votes.growth <- votes.growth[order(-votes.growth$votes),] # Order by descending number of votes
# Identify movies to update
# Compile information. For each tconst:
# Latest number of votes from IMDB (today votes)
# Previous number of votes from IMDB (yesterday votes)
# Date of most recent Vote counting.
# Update votes for:
# movies where number of votes has increased by 500+ or 10% (biggest increase first)
# Up to 1000 movies where votes not yet saved (largest votes first)
# 100 movies least recently updated (oldest first)
# max.votes.Vote_sum <- aggregate(Vote_sum~tconst, data=votes, max)
# max.votes.Date <- aggregate(Date~tconst, data=votes, max)
# max.ratings.numVotes <- aggregate(numVotes~tconst, data=ratings, max)
# Combine with vote counts
# ids <- merge(ids,max.ratings.numVotes,by="tconst",all.x=TRUE)
# ids <- merge(ids,max.votes.Vote_sum,by="tconst",all.x=TRUE)
# ids <- merge(ids,max.votes.Date,by="tconst",all.x=TRUE)
# ids <- ids[order(-ids$numVotes),] # Order by descending number of votes
# ids.novotes <- ids[(is.na(ids$Vote_sum)==TRUE),] # No Previous Vote Count
# ids.votes <- ids[(is.na(ids$Vote_sum)==FALSE),] # With previous Vote count
ids.novotes <- votes.growth[(is.na(votes.growth$Date)&(votes.growth$votes>1000)),]
ids.votes <- votes.growth[(!is.na(votes.growth$Date)&(votes.growth$votes>1000)),]
# Votes have increased by > 500 or by 10% or more
#ids.votes.plus <- ids.votes[((ids.votes$numVotes >= ids.votes$Vote_sum+500)|(ids.votes$numVotes/ids.votes$Vote_sum>1.1)),]
ids.votes.plus <- votes.growth %>% filter(votes>1000) %>% filter(Date != Sys.Date()) %>% filter( (delta > 500)|(today.rat / yesterday.rat > 1.1) )
# ids.votes.plus
# Votes have not increased by > 500
#ids.votes.minus <- ids.votes[!((ids.votes$numVotes >= ids.votes$Vote_sum+500)|(ids.votes$numVotes/ids.votes$Vote_sum>1.1)),]
ids.votes.minus <- votes.growth %>% filter(!is.na(Date)) %>% filter(Date != Sys.Date()) %>% filter( !(tconst %in% unique(ids.votes.plus$tconst)))
# head(ids.votes.minus)
#ids.votes.minus <- ids[(is.na(ids$Vote_sum)==FALSE)&(ids$numVotes < ids$Vote_sum+500),]
n.plus=length(ids.votes.plus$tconst)
#n.novotes=length(ids.novotes$tconst)
update.ids <- ids.votes.plus$tconst
label <- "Increased Votes"
n.ids <- length(update.ids)
count <- 0
start.time <- Sys.time()
for(id in update.ids){
count <- count + 1
ETA <- Sys.time() + (n.ids-count) * (Sys.time() - start.time)/count
print(paste(label,
"ID:",id,"number",count,"of",n.ids,
"Started at",format(start.time,"%H:%M:%S"),
"ETA:",format(ETA,"%H:%M:%S")))
votes <- rbind(votes,vote_rip(id))
metascores <- rbind(metascores,meta_rip(id))
}
save(votes,file=paste0(DATA_DIR,"/votes.RData")) # Save Votes data after each step
save(metascores,file=paste0(DATA_DIR,"/metascores.RData"))
update.ids <- head(ids.novotes,1000)$tconst
label <- "New Votes"
n.ids <- length(update.ids)
count <- 0
start.time <- Sys.time()
for(id in update.ids){
count <- count + 1
ETA <- Sys.time() + (n.ids-count) * (Sys.time() - start.time)/count
print(paste(label,
"ID:",id,"number",count,"of",n.ids,
"Started at",format(start.time,"%H:%M:%S"),
"ETA:",format(ETA,"%H:%M:%S")))
votes <- rbind(votes,vote_rip(id))
metascores <- rbind(metascores,meta_rip(id))
}
save(votes,file=paste0(DATA_DIR,"/votes.RData")) # Save Votes data after each step
save(metascores,file=paste0(DATA_DIR,"/metascores.RData"))
ids.votes.minus <- ids.votes.minus[order(ids.votes.minus$Date),]
print(paste("Updating 250 of the oldest voted movies. Oldest Date is",head(ids.votes.minus$Date,1)))
update.ids <- head(ids.votes.minus,250)$tconst
label <- "Old Movie Update"
n.ids <- length(update.ids)
count <- 0
start.time <- Sys.time()
for(id in update.ids){
count <- count + 1
ETA <- Sys.time() + (n.ids-count) * (Sys.time() - start.time)/count
print(paste(label,
"ID:",id,"number",count,"of",n.ids,
"Started at",format(start.time,"%H:%M:%S"),
"ETA:",format(ETA,"%H:%M:%S")))
votes <- rbind(votes,vote_rip(id))
metascores <- rbind(metascores,meta_rip(id))
}
# Order votes & metascores by tconst
votes <- votes[order(as.character(votes$tconst)),]
votes <- unique(votes)
metascores <- metascores[order(as.character(metascores$tconst)),]
metascores <- unique(metascores)
# Filter records with missing votes
votes <- votes %>% filter(!is.na(Vote_sum))
# Fit the model
load(file=paste0(DATA_DIR,"/vote.model.RData"))
vote.model <- vote.model %>% filter(!is.na(Vote_sum))
vote.model <- vote.model %>% group_by(tconst,Date) %>% slice(1) %>% ungroup # Filter for unique date within movie
bb_gof <-function(par){ # Beta Binomial model Goodness of Fit
m <- par[1]
s <- par[2]
scale <- par[3]
votes <- vote.model[movie,c("Vote_01","Vote_02","Vote_03","Vote_04",
"Vote_05","Vote_06","Vote_07","Vote_08",
"Vote_09","Vote_10")]
model <- vote.model$Vote_sum[movie]*scale*dbetabinom(0:9,9,m,s)
pct_delta <- votes - model
pct_delta[1] <- min(0,pct_delta$Vote_01)
pct_delta[10] <- min(0,pct_delta$Vote_10)
delta <- sum(pct_delta^2)
return(delta)
}
# Update the vote model
# Add newly harvested Votes to vote.model:
new.votes <- votes %>% anti_join(vote.model,by=c("tconst","Date"))
vote.model <- full_join(vote.model,new.votes)
# Fit the model
modelled <- sum(is.na(vote.model$m)==F) # Count of modelled
unmodelled <- sum(is.na(vote.model$m)==T) # Count of Unmodelled
count <- 0
start.time <- Sys.time()
for (movie in 1:length(vote.model$tconst)){
# for (movie in 1:2){
# if (vote.model$tconst[movie] == 'tt5870084') next # this record creates problems - or it used to
if (is.na(vote.model$m[movie])==T){
count <- count + 1
ETA <- Sys.time() + (unmodelled -count) * (Sys.time() - start.time)/count
fit <- optim(c(.5,2,0.1),bb_gof,method="L-BFGS-B",lower=c(0.001,0.001,0.001),upper=c(0.999,Inf,1) )
print(paste("Fit Model ID:", vote.model$tconst[movie],
"number",movie,"of",length(votes$tconst),
"at time",format(Sys.time(),"%H:%M:%S"),
"Start:",format(start.time,"%H:%M:%S"),
"ETA:",format(ETA,"%H:%M:%S")))
m <- fit$par[1]
s <- fit$par[2]
scale <- fit$par[3]
vote.model$m[movie] <- m
vote.model$s[movie] <- s
vote.model$scale[movie] <- scale
modelled.votes <- vote.model$Vote_sum[movie]*scale*dbetabinom(0:9,9,m,s)
resid.1 <- max(0,vote.model$Vote_01[movie] - modelled.votes[1])
resid.10 <- max(0,vote.model$Vote_10[movie] - modelled.votes[10])
vote.model$polarity[movie] <- (resid.1 + resid.10)/vote.model$Vote_sum[movie]
vote.model$binary.ppn[movie] <- resid.10/(resid.1 + resid.10)
}
}
# Calculate Means
vote.model$mean <- (1*vote.model$Vote_01 +
2*vote.model$Vote_02 +
3*vote.model$Vote_03 +
4*vote.model$Vote_04 +
5*vote.model$Vote_05 +
6*vote.model$Vote_06 +
7*vote.model$Vote_07 +
8*vote.model$Vote_08 +
9*vote.model$Vote_09 +
10*vote.model$Vote_10)/vote.model$Vote_sum
# Median
# vote.model$median <- as.numeric(NA)
for (movie in 1:length(votes$tconst)){
if(is.na(vote.model$median[movie])){ # Only update unknown medians
cume <-0
v <- vote.model[movie,c("Vote_01","Vote_02","Vote_03","Vote_04","Vote_05","Vote_06","Vote_07","Vote_08","Vote_09","Vote_10")]
for (j in 1:10){
cume <- cume+v[j]
if (cume>vote.model$Vote_sum[movie]/2){
vote.model$median[movie] <- j
break
}
}
}
}
#Beta Mean
# vote.model$beta.mean <- as.numeric(NA)
for (movie in 1:length(votes$tconst)){
if(is.na(vote.model$beta.mean[movie])){ # Only update unknown Beta Means
vote.model$beta.mean[movie] <- sum(dbetabinom(0:9,9,vote.model$m[movie],vote.model$s[movie])*(1:10))
}
}
# Binary mean
# Impute missing binary proportions
vote.model$binary.ppn[is.na(vote.model$binary.ppn)] <- vote.model$beta.mean[is.na(vote.model$binary.ppn)]/10
vote.model$binary.mean <- 1+9*vote.model$binary.ppn
# Save the Results
votes <- votes %>% group_by(tconst,Date) %>% slice(1) %>% ungroup # Filter for unique date within movie
save(votes,file=paste0(DATA_DIR,"/votes.RData"))
save(metascores,file=paste0(DATA_DIR,"/metascores.RData"))
save(basics,file=paste0(DATA_DIR,"/basics.RData"))
save(episode,file=paste0(DATA_DIR,"/episode.RData"))
save(ratings,file=paste0(DATA_DIR,"/ratings.RData"))
save(vote.model,file=paste0(DATA_DIR,"/vote.model.RData"))
save(ratings,file=paste0(DATA_DIR,"/ratings.RData"))
save(rating.history,file=paste0(DATA_DIR,"/rating.history.RData"))
print("Program finished")
timestamp()
|
2c8a613f85f75293f3fbc55627e908f64c20fed9 | eca810397cfa067c4c7f8ced66c4b748b8a1e8c9 | /GamAnalyses_withRace.R | fbbcd028fb854f89e1d13278a17b4ee1eebc3c59 | [] | no_license | PennBBL/pncPreterm | c149319dfdbb801eabf0e0acf15f9db5dc138cec | 936cb62f63f652b2adb393dbafe6bf31891c313b | refs/heads/master | 2022-06-20T18:22:41.423267 | 2020-05-06T13:12:30 | 2020-05-06T13:12:30 | 116,976,795 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,576 | r | GamAnalyses_withRace.R | ##########################################
#### GAM MODELS FOR PREMATURITY STUDY ####
##########################################
#Load data
data.NMF <- read.csv("/data/joy/BBL/projects/pncPreterm/subjectData/n278_Prematurity_allData.csv", header=TRUE, na.strings = "NA")
#Make race2 a factor with three levels (White, African American, and Other)
data.NMF$race2 <- as.factor(data.NMF$race2)
#Load library
library(mgcv)
#Get NMF variable names
nmfComponents <- names(data.NMF)[grep("Nmf26",names(data.NMF))]
#Run gam models with race 2 (white, african american, other)
#NmfModels <- lapply(nmfComponents, function(x) {
# gam(substitute(i ~ s(age) + sex + race2 + medu1 + ga, list(i = as.name(x))), method="REML", data = data.NMF)
#})
#OR Run gam models with white (white vs nonwhite)
NmfModels <- lapply(nmfComponents, function(x) {
gam(substitute(i ~ s(age) + sex + white + medu1 + ga, list(i = as.name(x))), method="REML", data = data.NMF)
})
#Look at model summaries
models <- lapply(NmfModels, summary)
#Pull p-values
p <- sapply(NmfModels, function(v) summary(v)$p.table[5,4])
#Convert to data frame
p <- as.data.frame(p)
#Print original p-values to three decimal places
p_round <- round(p,3)
#FDR correct p-values
pfdr <- p.adjust(p[,1],method="fdr")
#Convert to data frame
pfdr <- as.data.frame(pfdr)
#To print fdr-corrected p-values to three decimal places
pfdr_round <- round(pfdr,3)
#List the NMF components that survive FDR correction
Nmf_fdr <- row.names(pfdr)[pfdr<0.05]
##Only look at the 11 significant components
nmfComponents11 <- c("Nmf26C1","Nmf26C2","Nmf26C4","Nmf26C7","Nmf26C8","Nmf26C10","Nmf26C18","Nmf26C19","Nmf26C22","Nmf26C23","Nmf26C26")
#Run gam models with white (white vs nonwhite)
NmfModels11 <- lapply(nmfComponents11, function(x) {
gam(substitute(i ~ s(age) + sex + white + medu1 + ga, list(i = as.name(x))), method="REML", data = data.NMF)
})
#Look at model summaries
models11 <- lapply(NmfModels11, summary)
#Pull p-values
p11 <- sapply(NmfModels11, function(v) summary(v)$p.table[5,4])
#Convert to data frame
p11 <- as.data.frame(p11)
#Print original p-values to three decimal places
p11_round <- round(p11,3)
#FDR correct p-values
pfdr11 <- p.adjust(p11[,1],method="fdr")
#Convert to data frame
pfdr11 <- as.data.frame(pfdr11)
#To print fdr-corrected p-values to three decimal places
pfdr11_round <- round(pfdr11,3)
#Add row names
rownames(pfdr11_round) <- c(1, 2, 4, 7, 8, 10, 18, 19, 22, 23, 26)
#List the NMF components that survive FDR correction
Nmf_fdr11 <- row.names(pfdr11_round)[pfdr11_round<0.05]
|
6a6498ed2a6de2943faaac610b3d2ed1c6664716 | ae38a89ef3f46fcd1d84bb99fbe0a10d1b0f5a69 | /ui.R | 10625ff3759889c5b32896ac9dec8a36142a26f9 | [] | no_license | juancho182/shiny | 3e1a36e5e8c07abdebd808cd19369187e50345d8 | ba8226b8a5c3153b30f75b01dd3561fb642594e1 | refs/heads/main | 2023-07-07T03:10:59.120900 | 2021-08-07T00:11:11 | 2021-08-07T00:11:11 | 392,521,232 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,145 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a barplot
shinyUI(fluidPage(
# Application title
titlePanel("Covid Deaths by Country"),
# Sidebar with a slider input for number of deaths
sidebarLayout(
sidebarPanel(
sliderInput("deaths",
"Number of deaths:",
min = 0,
max = 650000,
value = c(20000,50000)),
h5("Help: The chart on the right will show one bar per country with the total covid 19 deaths up to July 31th, 2021. Use the slider to filter the countries with the number of deaths you are interested in. If there are too many bars, country names may not fit the plot. Source code: https://github.com/juancho182/shiny")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
))
|
0ce5f251eecae3df85d1e94762c284186dbd60d0 | 7dfd2b5a31ac723907f4888c98782084eac920fc | /man/associate_components.Rd | ac29616931f10a4e5d7b87ecc359f6a5f9e6c6e7 | [] | no_license | ehenrion/ChIAnalysis | c9ac7ec9412d841dc7c1ac21706c055800b5600c | e53c25d142d294e26989e2e801d2b8633cf94b2f | refs/heads/master | 2020-03-31T13:30:14.430350 | 2017-01-18T19:51:49 | 2017-01-18T19:51:49 | 152,258,410 | 0 | 0 | null | 2018-10-09T13:44:07 | 2018-10-09T13:44:23 | null | UTF-8 | R | false | true | 744 | rd | associate_components.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AnnotateChIA.R
\name{associate_components}
\alias{associate_components}
\title{Associates components ids and sizes to chia data, as returned by \code{\link{load_chia}}.}
\usage{
associate_components(chia.obj)
}
\arguments{
\item{chia.obj}{ChIA-PET data, as returned by \code{\link{annotate_chia}}.}
\item{split}{Should the data be divided into communities?}
\item{oneByOne}{Sould the netwoks be treated one by one or as a whole?}
\item{method}{What method sould be used to split data (ignored if split = \code{FALSE}).}
}
\value{
The annotated chia.obj.
}
\description{
Associates components ids and sizes to chia data, as returned by \code{\link{load_chia}}.
}
|
a0f19d75d8b5fa71ca7c954839d209c05681e8d4 | 313282460809ca3380cfa5153497d943faef5a3a | /FunFunctions/root_log_function.R | f932a4a236ac8839cb7eb883710e725d0496891e | [] | no_license | chipmonkey/Kaggle | a17fd0f963bb217a09ca4861637ed47b17c3b902 | e304773a3f22b1a32b75b87329dede42799009df | refs/heads/master | 2021-01-10T13:04:15.900860 | 2017-04-16T17:09:16 | 2017-04-16T17:09:16 | 43,185,271 | 1 | 0 | null | 2016-08-28T19:03:20 | 2015-09-26T01:36:23 | R | UTF-8 | R | false | false | 794 | r | root_log_function.R |
rootlog <- function (x, threshold = 0.00001) {
if(x < 0) { stop("can't take the root-log of a negative number")}
mymin <- 0
mymax <- x
myguess <- (mymin + mymax) / 2
myerror <- myguess^myguess - x
while (abs(myerror) > threshold) {
# print(paste0("myerror: ", myerror, " myguess: ", myguess))
if(myerror > 0) {
mymax <- myguess
myguess <- (mymin + mymax) / 2
myerror <- myguess^myguess - x
} else { # Don't check vor zero since we're inside the while...
mymin <- myguess
myguess <- (mymin + mymax) / 2
myerror <- myguess^myguess - x
}
}
return(myguess)
}
# Usage Examples / tests:
x <- rootlog(1000)
x^x
rootlog(-5)
y <- data.frame(invalues = c(4,5,6,7,8,9))
y$rootlog <- lapply(y$invalues, rootlog)
head(y)
|
8b428af5596ea5a0d3a7ca2b51d8d2c251d84e4e | ae9f99925fa4116d7ab96d34bd81314eb3887e79 | /kod/package/cr07/R/aml.R | 1803d6de7c17f451d03287388f75ed4298dfda83 | [] | no_license | MagdaMlynarczyk/mgr | d7f9d229db21a85e67aab0178f96de6d452000f5 | da77d32438a9d2ec2c155bbebfdde6d4178fb7fd | refs/heads/master | 2020-12-30T12:23:39.231409 | 2017-06-10T11:30:19 | 2017-06-10T11:30:19 | 91,432,067 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 74 | r | aml.R | #' @title eqeqw
#' @description gerge
#' @name data
#' @docType data
NULL
|
477d9efca67e3e535e684f2b6ecd1868f98755b9 | 13b38ec5ab726ef97da01751a704ebe9a626952c | /pmcexplore/man/clean_DAS.Rd | ceeb3e2dc2c91fd7069d63fdb8ae2dab785b5abb | [] | no_license | natgable/pmcexplore_package | d6c31b923fecef5a8e91259e3f986e87b5b44633 | b92812c864ca18c7973677e955eef2c97efbb019 | refs/heads/master | 2022-12-18T10:00:34.133134 | 2020-09-18T14:51:29 | 2020-09-18T14:51:29 | 295,450,743 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 647 | rd | clean_DAS.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_DAS.R
\name{clean_DAS}
\alias{clean_DAS}
\title{Clean DAS information created by the \code{get_DAS} function}
\usage{
clean_DAS(das_df)
}
\arguments{
\item{das_df}{A tibble generated by \code{get_DAS}}
}
\value{
A tibble with the columns PMCID, tagging_method, content,
has_das, and attached_media. This resulting tibble is ready to be passed
into the function \code{label_DAS}.
}
\description{
This function cleans and preps data sharing information to use
to later label the PMCIDs by data sharing type.
}
\examples{
get_DAS("PMC3257301") \%>\%
clean_DAS()
}
|
d3922eba5734b5383e1f030b759c24446d659b7f | d86dc6266f527a14c8705a775267e26490b32708 | /man/WGCNA_cytoscape_each_module.Rd | 2a57a54339d3063d69c7688ddaca9f25043eb045 | [
"MIT"
] | permissive | Tong-Chen/YSX | a3c221ea891d9ac5136e35a231606440484e981c | 4bb1d21f3d4fa5703937207fce4301ad5ac3aa16 | refs/heads/master | 2021-06-26T03:19:10.277663 | 2021-01-12T13:14:15 | 2021-01-12T13:14:15 | 172,993,901 | 7 | 8 | null | null | null | null | UTF-8 | R | false | true | 599 | rd | WGCNA_cytoscape_each_module.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WGCNA.R
\name{WGCNA_cytoscape_each_module}
\alias{WGCNA_cytoscape_each_module}
\title{Output nodes and edges data for each module only. (deprecated)}
\usage{
WGCNA_cytoscape_each_module(edgeData, nodeData, module, prefix)
}
\arguments{
\item{edgeData}{generated by \code{WGCNA_cytoscape}}
\item{nodeData}{generated by \code{WGCNA_cytoscape}}
\item{module}{A module color name}
\item{prefix}{prefix for output files.}
}
\description{
Output nodes and edges data for each module only. (deprecated)
}
\examples{
None
}
|
b81b74022664eca1a2dea5c6f430cec99c0007f3 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610386820-test.R | bed010db2239329b7249de2798affc8f33a0d826 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 560 | r | 1610386820-test.R | testlist <- list(a = -1L, b = -1L, x = c(-15269884L, NA, -16253030L, -436207617L, -16777216L, -16252920L, -16218627L, -33686019L, -1125833062L, -33686019L, -33686019L, -33686019L, -33686019L, 235407972L, 751971372L, -771883523L, -33686019L, -33686019L, -33686017L, -1L, -1703937L, -131587L, -33686019L, -33686019L, -33686019L, -33686019L, -33686019L, -33686019L, -33686019L, -33686019L, -33747448L, 174337234L, 741134803L, -738257152L, 71319807L, 150931455L, 65535L, 16777215L, -49665L, -1L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
5a86b15e22da078a75c2833b0f05a4058cc109f8 | 254173d2e111c66e40fd40d3c931b4e75e31999c | /Functions/tree.resolution.R | b73fb447a9511de953503ee07611a64d82437a4a | [] | no_license | TGuillerme/CharactersCorrelation | 9a4275752222c5a07502f46727fa13a5484b1c88 | 062ba7d9372da1ef3666710756599663a5a76721 | refs/heads/master | 2023-04-13T01:29:04.482403 | 2023-03-30T15:25:42 | 2023-03-30T15:25:42 | 71,147,759 | 0 | 1 | null | 2019-05-30T06:08:57 | 2016-10-17T14:36:21 | HTML | UTF-8 | R | false | false | 3,710 | r | tree.resolution.R | #' @title Tree resolution
#'
#' @description Get tree resolution
#'
#' @param
#'
#' @examples
#'
#' @seealso
#'
#' @author Thomas Guillerme
#' @export
# path = "../Data/Trees_out/Consensus_trees/Bayesian/Trees/25t_100c/"
# suffix = "con.tre"
tree.resolution <- function(path, suffix) {
## Read the tree files
trees <- list.files(path = path, pattern = suffix)
## Get the tree format
type <- scan(paste0(path,trees[1]), what = "character", quiet = TRUE, n = 1)
if(type == "#NEXUS") {
read.tree.fun <- ape::read.nexus
} else {
read.tree.fun <- ape::read.tree
}
## Categories
categories <- unique(unlist(lapply(lapply(strsplit(trees, split = "_"), function(X) return(X[4])), function(X) return(strsplit(X, split = paste0(".", suffix))[[1]]))))
## Read the trees per categories
read.one.category <- function(category, path, suffix, read.tree.fun) {
return(unlist(lapply(as.list(list.files(path = path, pattern = paste0(category, ".", suffix))), function(X, read.tree.fun, path) return(ape::Nnode(read.tree.fun(paste0(path, X)))), read.tree.fun, path)))
}
## Get the resolution per categories
resolutions <- lapply(as.list(categories), read.one.category, path, suffix, read.tree.fun)
## Reorder the categories (norm, max, min, rand)
resolutions <- resolutions[c(3, 1, 2, 4)]
names(resolutions) <- categories[c(3, 1, 2, 4)]
return(resolutions)
}
# col <- c("darkgrey", "red", "green3", "blue", "grey", "orange", "lightgreen", "lightblue")
# plot.tree.resolution("../Data/Trees_out/Consensus_trees/", ntaxa = 25, col = c("darkgrey", "red", "green3", "blue", "grey", "orange", "lightgreen", "lightblue"))
plot.tree.resolution <- function(path, ntaxa, col, type = "line") {
## placeholder lists
bayesian_path <- parsimon_path <- list()
## Get the full path for both methods
bayesian_path[[1]] <- paste0(path, "Bayesian/Trees/", ntaxa, "t_100c/")
parsimon_path[[1]] <- paste0(path, "Parsimony/Trees/", ntaxa, "t_100c/")
bayesian_path[[2]] <- paste0(path, "Bayesian/Trees/", ntaxa, "t_350c/")
parsimon_path[[2]] <- paste0(path, "Parsimony/Trees/", ntaxa, "t_350c/")
bayesian_path[[3]] <- paste0(path, "Bayesian/Trees/", ntaxa, "t_1000c/")
parsimon_path[[3]] <- paste0(path, "Parsimony/Trees/", ntaxa, "t_1000c/")
## Get the trees
suffix = "con.tre"
bayesian_nodes <- lapply(bayesian_path, tree.resolution, suffix)
parsimon_nodes <- lapply(parsimon_path, tree.resolution, suffix)
## Plot the results
plot(NULL, xaxt = "n", xlim = c(1, 12), ylim = c(1, ntaxa-1), xlab = "", ylab = "Nodes")
for(character in 1:length(bayesian_nodes)) {
plot.CI(bayesian_nodes[[character]], type = type, col = col[1:4], shift = 4*(character-1) + 0, point.col = "black", width = 0.1, cent.tend = median, lwd = 3)
plot.CI(parsimon_nodes[[character]], type = type, col = col[5:8], shift = 4*(character-1) + 0.3, point.col = "black", width = 0.1, cent.tend = median, lwd = 3)
}
## X axis
axislab <- c("100c", "350c", "1000c")
axis(1, 1:12, labels = FALSE, tick = FALSE)
axis(1, c(2.5, 6.6, 10.5), tick = FALSE, labels = axislab)
## Lines
abline(v = 4.66) ; abline(v = 8.66)
## Legend
y_positions <- seq(from = 1, to = 0.2, length.out = 4)
legend_lab <- paste(names(bayesian_nodes[[1]]), "(bay/par)")
for(legend in 1:4) {
## Get the y position
y_pos <- (ntaxa/5) * y_positions[legend]
## Plot the legend
points(y = rep(y_pos, 2), x = c(0.7,0.85), pch = 19, col = col[c(legend,legend+4)])
text(y = y_pos, x = 0.85, legend_lab[legend], cex = 0.6, pos = 4)
}
} |
f746dfa31f5d0c987ed7a0348dbed40c1b74e132 | e0bcd3a0bfa23c1d445c2c738b8e37323a0c3b71 | /script/zzz_archive/unwind_portfolio_level_by_delta.R | 4e729abcbd9b10e0472ed3da29f827f0e98f46ce | [] | no_license | active-analytics/pqam_2018 | 87a017a55c130412e4d090518f2f47a6adb67c9a | cbc0437bb9da5460939641ec39af24070b733e24 | refs/heads/master | 2021-10-20T11:22:43.513713 | 2019-02-27T14:07:28 | 2019-02-27T14:07:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,856 | r | unwind_portfolio_level_by_delta.R | # clearing shit out
rm(list=ls())
cat("\014")
# load packages
library(tidyverse)
library(tictoc)
###################################
## loading parallax 2018 dataset ##
###################################
chr_path_root <- "/Users/Pritam/files/ods/plx_2018/data_output/"
# chain description
df_chain_desc <-
read_csv(paste0(chr_path_root, "spy_weekly_chain_desc_5yr.csv"))
# chain history
df_chain_hist <-
read_csv(paste0(chr_path_root, "spy_weekly_chain_hist_5yr.csv"))
# option history
df_opt_hist <-
read_csv(paste0(chr_path_root, "spy_weekly_opt_hist_5yr.csv"))
# position scaling
df_position_scaling <-
read_csv(paste0(chr_path_root, "spy_weekly_position_scaling_5yr.csv"))
# scaled put pnl
df_pnl_put <-
read_csv(paste0(chr_path_root, "spy_weekly_pnl_put_5yr.csv"))
# scaled strangle pnl
df_strangle_pnl_all <-
read_csv(paste0(chr_path_root, "spy_weekly_pnl_strangle_5yr.csv"))
# scaled call pnl
df_call_pnl <-
read_csv(paste0(chr_path_root, "spy_weekly_pnl_call_5yr.csv"))
## isolating only the 10-delta
df_strangle_pnl <-
df_strangle_pnl_all %>% filter(variation == 0.1)
## adding signed-delta
df_strangle_pnl$signed_delta <- NA_real_
for (ix in 1:nrow(df_strangle_pnl)){
chr_type <- df_strangle_pnl$type[ix]
dbl_delta <- df_strangle_pnl$delta[ix]
if (chr_type == "put"){
df_strangle_pnl$signed_delta[ix] <- -dbl_delta
} else {
df_strangle_pnl$signed_delta[ix] <- dbl_delta
}
}
dbl_thresh <- seq(0.01, 1, 0.01)
lst_portfolio <- list()
# looping through all thresholds I want to test
for(ix_thresh in 1:length(dbl_thresh)){
tic()
# setting threshhold
dbl_threshold <- dbl_thresh[ix_thresh]
# loop through all the chains and perform the calculations
for (ix_chn in 1:nrow(df_chain_desc)){
# grabbing the expiration date for the current chain
dt_expiration <- df_chain_desc$expiration[ix_chn]
# grabbing all the trades for the current chain
df_curr_chn_trades <-
df_strangle_pnl %>%
filter(expiration == dt_expiration) %>%
arrange(data_date)
# grouping together the data needed for determing management action
# basically this just combines the PNLs and greeks of the put/calls
df_chain_pnl <-
df_curr_chn_trades %>%
group_by(data_date) %>%
summarize(
strangle_mult = mean(strangle_mult)
, upx = mean(underlying_price)
, bid = sum(bid)
, ask = sum(ask)
, dly_opt_pnl = sum(dly_opt_pnl)
, dly_tot_pnl = sum(dly_tot_pnl)
, net_delta = -sum(signed_delta)
)
#df_chain_pnl
df_chain_pnl$breach <- NA
df_chain_pnl$manage_mult <- NA_real_
bln_breach <- FALSE
df_chain_pnl$breach[1] <- FALSE # breach no allowed on execution date
df_chain_pnl$manage_mult[1] <- 1 # breach no allowed on execution date
# looping through all the days of the chain and determining
# when to unwind the positions
for (ix_dt in 2:nrow(df_chain_pnl)){
# grabbing the current net-delta and previous net delta
dbl_net_delta <- df_chain_pnl$net_delta[ix_dt]
dbl_prev_net_delta <- df_chain_pnl$net_delta[ix_dt - 1]
if(ix_dt != nrow(df_chain_pnl)){
# if there hasn't been a breach yet, check for a breach
# if there has already been a breach, then stays a breach
if ((!bln_breach) &
(abs(dbl_net_delta) > dbl_threshold)) {
bln_breach <- TRUE
}
} else {
# I don't allow for a breach on expiration date
if((!bln_breach) &
# if it wasn't a breach yesterday, it's not a breach today
(abs(dbl_prev_net_delta) < dbl_threshold)){
bln_breach <- FALSE
}
}
# updating the breach column
df_chain_pnl$breach[ix_dt] <- bln_breach
# this is the logic for how it's going to affect subsequent PNLs
# the position is unwound the day of the breach, so all subsequent
# pnls after the breach are zeroed out
df_chain_pnl$manage_mult[ix_dt] <-
as.integer(!df_chain_pnl$breach[ix_dt - 1])
}
## adding expiration column and moving to the beginning of the dataframe
df_chain_pnl <-
df_chain_pnl %>%
mutate(
expiration = dt_expiration
)
df_chain_pnl <-
df_chain_pnl %>%
select(expiration) %>%
bind_cols(df_chain_pnl %>% select(-expiration))
## adding threshhold number
df_chain_pnl <-
df_chain_pnl %>%
mutate(
threshold = dbl_threshold
)
df_chain_pnl <-
df_chain_pnl %>%
select(threshold) %>%
bind_cols(df_chain_pnl %>% select(-threshold))
lst_portfolio[[length(lst_portfolio) + 1]] <- df_chain_pnl
}
toc()
print(paste0(dbl_threshold, ": complete."))
}
df_portfolio <- bind_rows(lst_portfolio)
#df_portfolio
print("DONE!")
#write_csv(df_portfolio, "df_portfolio_unwind_by_delta_portfolio.csv")
##################
## ANALSYS CODE ##
##################
# calculating the scaled, managed PNLs
df_portfolio <-
df_portfolio %>%
mutate(
managed_naked = strangle_mult * dly_opt_pnl * manage_mult
, managed_dh = strangle_mult * dly_tot_pnl * manage_mult
)
# aggregating pnls by day so we can calculate Sharpe-Ratio
df_daily_pnl <-
df_portfolio %>%
group_by(threshold, data_date) %>%
summarize(
dly_managed_naked = sum(managed_naked)
, dly_managed_dh = sum(managed_dh)
)
#########################
## calculating metrics ##
#########################
## 1) annualized mean of daily pnl
## 2) annualized standard-deviation of daily pnl
## 3) annualized sharpe-ratio
# naked options
df_metrics_naked <-
df_daily_pnl %>%
group_by(threshold) %>%
summarize(
avg_ann = (mean(dly_managed_naked) * 252) %>% round(2)
, std_ann = (sd(dly_managed_naked) * sqrt(252)) %>% round(2)
, sharpe =
((mean(dly_managed_naked) / sd(dly_managed_naked)) * sqrt(252)) %>% round(2)
)
# delta-hedged options
df_metrics_dh <-
df_daily_pnl %>%
group_by(threshold) %>%
summarize(
avg_ann = (mean(dly_managed_dh) * 252) %>% round(2)
, std_ann = (sd(dly_managed_dh) * sqrt(252)) %>% round(2)
, sharpe =
((mean(dly_managed_dh) / sd(dly_managed_dh)) * sqrt(252)) %>% round(2)
)
## plotting ## (get these into a single plot with gridExtra_
# naked
df_metrics_naked %>%
ggplot(aes(x=threshold, y=sharpe)) +
geom_point() +
geom_smooth(se = FALSE)
df_metrics_naked %>%
ggplot(aes(x=threshold, y=avg_ann)) +
geom_point() +
geom_smooth(se = FALSE)
df_metrics_naked %>%
ggplot(aes(x=threshold, y=std_ann)) +
geom_point() +
geom_smooth(se = FALSE)
# naked
df_metrics_dh %>%
ggplot(aes(x=threshold, y=sharpe)) +
geom_point() +
geom_smooth(se = FALSE)
df_metrics_dh %>%
ggplot(aes(x=threshold, y=avg_ann)) +
geom_point() +
geom_smooth(se = FALSE)
df_metrics_dh %>%
ggplot(aes(x=threshold, y=std_ann)) +
geom_point() +
geom_smooth(se = FALSE)
|
e93c1205fe79df59c9bd7f63864e81427883ad2c | 89979be00febc21cff5c31d3ce774f0cbfce17b8 | /run.R | a44e217b23a4c792b6c84ec9d8cb29975619bced | [] | no_license | joe-nas/genePhenotypePrediction | f5d37d88215d83ca860a2ab8cd626e72b71f97dd | 4f79b624d6a59e2b6d75392d8c1e44148ece3597 | refs/heads/master | 2020-12-24T17:26:57.561550 | 2015-06-17T20:53:06 | 2015-06-17T20:53:06 | 37,200,968 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,947 | r | run.R | library(GEOmetadb)
# library(GEOquery)
library(plyr)
library(data.table)
library(caret)
library(doMC)
library(exprt)
registerDoMC(18)
# getSQLiteFile(destdir ="../genePhenotypePrediction/" )
con <- dbConnect(SQLite(),'../genePhenotypePrediction/GEOmetadb.sqlite')
gdsquery <- dbGetQuery(con,'SELECT sample_organism, gds, sample_count, gpl, value_type from gds WHERE (sample_organism="Homo sapiens" OR sample_organism="Mus musculus") AND (value_type="count" OR value_type="transformed count")')
dbDisconnect(con)
#
# GAD <- fread("/scratch/jfalck/usr/rprojects/exprt/data/GADCDC/GADCDC_data.tsv",
# sep = "\t", header = T, )
#
gdsquery <- data.frame(lapply(gdsquery,function(x) gsub(" ","_",x)))
gdsquery$sample_count <- as.integer(gdsquery$sample_count)
gds <- Filter(function(x){sum(x$sample_count) >= 50},split(gdsquery, list(gdsquery$sample_organism, gdsquery$gpl, gdsquery$value_type),))
initializeExpr <- function(gds = "list"){
lapply(gds, function(o){
new("Expr", organism = gsub(" ","_",as.character(unique(o$sample_organism))),
valueType = gsub(" ","_",as.character(unique(o$value_type))),
gpl = as.character(unique(o$gpl)),
gds = as.character(unique(o$gds)),
sampleCounts = as.integer(o$sample_count))
})
}
# cleft_lip_genes <- toupper(GAD[DISEASE %like% "cleft lip",unique(GENE)])
test <- initializeExpr(gds)
analysis <- function(i){
plot(i$var_explained,
ylim = c(0,1))
abline(v = i$n_pcs)
}
#
# library(pROC)
#
#
# ho <- sample(cleft_lip_genes,80,replace = F)
# train <- cleft_lip_genes[!cleft_lip_genes %in% ho]
#
# testl <- c("Homo_sapiens.GPL97.transformed_count","Homo_sapiens.GPL2895.count",
# "Homo_sapiens.GPL571.count","Homo_sapiens.GPL571.transformed_count",
# "Homo_sapiens.GPL96.transformed_count","Homo_sapiens.GPL97.count")
tbd <- file.exists(paste(gsub("[.]","_",names(test)),"rds",sep="."))
test <- test[!tbd]
e <- new.env()
e$dat <- ""
doforall <- function(ds){
test[[ds]]$load()
test[[ds]]$merge(fbind = F)
# test[[ds]]$impute()
test[[ds]]$reduce()
test[[ds]]$exprSvd$save()
# e$dat <<- readRDS(paste(names(test)[ds],"rds.gz",sep = "."),compress="gzip")
# e$dat$reduce(.6)
# # test[[ds]]$save(names(test)[ds])
# e$dat$pred(train,666)
# e$dat$prediction[,.(IDENTIFIER,
# "prediction" = prediction,
# "pcs" = e$dat$n_pcs)][]
}
res <- l_ply(seq_along(test)[95:length(test)],doforall)
#
# rocfun <- function(i){
# plot(roc(res[[i]]$IDENTIFIER %in% ho, res[[i]]$prediction))
# }
#
# total <- 20
# for(i in 1:total){
# Sys.sleep(0.1)
# cat(i)
# # update GUI console
# flush.console()
# }
#
#
#
#
# tdt <- data.table::rbindlist(res)
# tdt <- tdt[,.("prediction" = mean(prediction*pcs)/sum(pcs)),by=IDENTIFIER]
#
# plot(roc(tdt$IDENTIFIER %in% ho, tdt[,(prediction)]))
mdat <- test[[4]]$gdsMissing[,3:ncol(test[[4]]$gdsMissing),with=F]
|
c4e17ddd532f893c3f6f57c4a69bea7e4038b21f | a3af6f3f22f37375f2e73f933d7e3a189c2f4e65 | /HW3.R | fadc7dd8d7cd98c6e9468fd4af65416d85ef1df6 | [] | no_license | stevespangler/MSA8190 | 22aae359ac7da271f43b9b89e7862b4c182a7999 | 061f9dcbabf73488674440f5007608990de995ef | refs/heads/master | 2021-01-19T09:14:32.420892 | 2017-02-15T16:01:51 | 2017-02-15T16:01:51 | 82,078,373 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,278 | r | HW3.R | # Problem 1
############
F <- function(x){
y <- x
pts <- length(x)
for (i in pts) {
ifelse(x[i] < -2, y[i] <- 0,
ifelse(x[i] < 2, y[i] <- 0.25*x+0.5,
ifelse(x[i] >= 2, y[i] <- 1
)))
}
return(y)
}
# a
F(1.8)
# b
1-F(-1.4)
# c
F(-2)
# d
F(1) - F(-1)
# Problem 2
############
# a
D(expression(1-exp(-2*x)), 'x')
# b
D(expression(0.2*x),'x')
D(expression(0.04*x + 0.64),'x')
# Problem 3
############
f <- function(x){
y <- 1.5*x^2
return(y)
}
F <- function(x){
y <- x
pts <- length(x)
for (i in 1:pts) {
ifelse(x[i] < -1, y[i] <- 0,
ifelse(x[i] < 1, y[i] <- (integrate(f, lower = -1, upper = x[i])$val),
y[i] <- 1
))
}
return(y)
}
# Plot of CDF
curve(F, from = -1, to = 1, n=1001)
# Mean or Expected Value of X
integrate(function(x) x*f(x), lower = -1, upper = 1)
# Variance of X
integrate(function(x) x^2*f(x), lower = -1, upper = 1)
# Problem 4
############
a <- 1.5
b <- 2.2
f <- function(x){
y <- x
pts <- length(x)
for (i in 1:pts) {
ifelse(x[i] <1.5, y[i] <- 0,
ifelse(x[i] <= 2.2, y[i] <- (1/(b-a)),
y[i] <- 0
))
}
return(y)
}
# a
# mean
(a+b)/2
# variance
(b-a)^2/12
# b
integrate(f,lower = -Inf,upper = 2)
# Problem 5
############
# a
qnorm(0.9)
# b
qnorm(0.5)
# c
qnorm(0.1, lower.tail = FALSE)
# d
qnorm(0.9, lower.tail = FALSE)
# e
# Problem 6
############
# a
p <- 0.9
qnorm((1+p)/2)
qnorm((1-p)/2)
# b
p <- 0.99
qnorm((1+p)/2)
qnorm((1-p)/2)
# c
p <- 0.68
qnorm((1+p)/2)
qnorm((1-p)/2)
# d
p <- 0.9973
qnorm((1+p)/2)
qnorm((1-p)/2)
# Problem 7
############
# a
pnorm(12,mean=12.4,sd=0.1)
# b
pnorm(12.1,mean = 12.4,sd = 0.1) + pnorm(12.6,mean = 12.4,sd = 0.1,lower.tail = FALSE)
# c
p <- 0.99
qnorm((1+p)/2, mean = 12.4,sd = 0.1)
qnorm((1-p)/2, mean = 12.4,sd = 0.1)
# Problem 8
############
n <- 5000
p <- 1-0.999
mu <- n*p
var1 <- n*p*(1-p)
z <- (10-mu)/sqrt(var1)
pnorm(z,lower.tail = FALSE)
# Problem 9
############
# a
lam <- 1000*0.4
# b
z <- (350-lam)/sqrt(lam)
pnorm(z,lower.tail = FALSE)
# Problem 10
############
lam <- 0.0003
# a
pexp(10000,rate = lam,lower.tail = FALSE)
# b
pexp(7000,rate = lam)
# Problem 11
############
mu <- 15
lam <- 1/mu
# a
dexp(30, rate = lam)
# b
pexp(10,rate = lam)
# c
pexp(10,rate = lam) - pexp(5, rate = lam)
# d
qexp(0.9, rate = lam)
# Problem 12
############
lam <- 30/60
r <- 5
# a
mu <- r/lam
std <- sqrt(r/lam^2)
# b
pgamma(10,shape = r,rate = lam)
# c
pgamma(5,shape = r,rate = lam)
# Problem 13
############
lam <- 20
r <- 100
# a
mu <- r/lam
# b
r <- 30
mu <- r/lam
# c
lam <- 5 # calls in 15 sec interval
ppois(3, lam, lower.tail = FALSE)
# Problem 14
############
# a
mu <- 700*gamma(1+0.5)
# b
var1 <- 700^2*gamma(1+(2/2)) - 700^2*(gamma(1+(1/2)))^2
# c
pweibull(mu, 2,scale = 700, lower.tail = FALSE)
# Problem 15
############
# a
plnorm(10, meanlog = 0.5, sdlog = 1, lower.tail = FALSE)
# b
qlnorm(0.5,meanlog = 0.5, sdlog= 1)
# c
mu <- exp(0.5+(1/2))
std <- sqrt(exp(2*0.5+1)*(exp(1)-1))
|
c6fda5ab8b913c8c52581d853b218b1e45b4452f | 700c56e663c1fcec3cbd9cd0645c61cbd6fc4072 | /OnlineSuperUnif/man/pvalues_simulation.Rd | d08e9831b0e2a4c27387e159a34c6930635da6b7 | [] | no_license | iqm15/SUREOMT | 6d6c63a5132652386cc97793848f4cebec07cc48 | 0019960eea18095ea6dc147a3314f00217ee8ef0 | refs/heads/main | 2023-08-16T06:22:44.073300 | 2021-10-25T05:28:47 | 2021-10-25T05:28:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,452 | rd | pvalues_simulation.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_pvalues_simulation.R
\name{pvalues_simulation}
\alias{pvalues_simulation}
\title{pvalues-simulation}
\usage{
pvalues_simulation(dataframe, alternative = "two.sided", input = "marginal")
}
\arguments{
\item{alternative}{A string informing if we are doing two.sided test or not.
The argument can either be "two.sided", "less", or "greater".
Here the default is to "two.sided".}
\item{input}{The format of the input data frame, see the references below.
Here the default is to "marginal".}
\item{data}{frame A data frame of 4 columns and any number of rows (the number of rows
is the number of hypotheses that the user is testing).}
}
\value{
A list containing the raw p-values and their CDF support.
}
\description{
Function that performs Fisher's exact two-sided test.
This function uses the 'fisher.pvalues.support' from
the DiscreteFDR package of Junge. F et al (2019).
}
\examples{
test_data <- data.frame(c(1, 0, 2, 5, 2), rep(25, 5), c(0, 0, 2, 14, 11), rep(25, 5))
or test_data <- data_simulation(25, 100, 0.3, 0.4, "end")$data
pvalues_simulation(test_data).
}
\references{
Junge. F, Durand. G, Döhler. S, and Roquain. E (2019)
DiscreteFDR: An R package for controlling the false
discovery rate for discrete test statistics.
\url{https://arxiv.org/pdf/1904.02054.pdf}
\url{https://cran.r-project.org/web/packages/DiscreteFDR/index.html}.
}
|
5f2711c91b7793fbcb3158b85dfe1f6cafbdc162 | 6779754ebb67b19261b164d015c596e1ec9dd251 | /man/eval_code.Rd | 85520ba42dd3e4e7da73faaa426041ca92e392d8 | [
"MIT"
] | permissive | dirkschumacher/rcompilertools | df06d2975177a7fc3c4985383aa311aa9882b6aa | 97950299056412fbbe294a505c7d2756116b00de | refs/heads/master | 2021-06-25T13:52:19.688390 | 2021-01-24T21:22:39 | 2021-01-24T21:22:39 | 205,582,293 | 16 | 0 | null | null | null | null | UTF-8 | R | false | true | 272 | rd | eval_code.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compiler-tools.R
\name{eval_code}
\alias{eval_code}
\title{Eval tidy bytecode}
\usage{
eval_code(x)
}
\arguments{
\item{x}{an object of class tidy_bytecode}
}
\description{
Eval tidy bytecode
}
|
660655ea15a5969770543541ed14b85538f29970 | efd84b42aa36f00b1c73d153c0f3c17d67481020 | /titanic/Titanic20.R | a9e6561e47de043946500c846fc9551a9a202a4e | [] | no_license | beaunorgeot/datasciencecoursera | e940c1e5a020817e7ff335f9c27a256305f21f54 | bb8a4dece6918cb5a990645658d21f7f5e6e9945 | refs/heads/master | 2021-04-12T05:16:44.925606 | 2015-09-18T20:58:52 | 2015-09-18T20:58:52 | 31,728,932 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,831 | r | Titanic20.R |
library(caret)
library(randomForest)
library(dplyr)
setwd("/Users/bnorgeot/datasciencecoursera/titanic")
inTraining <- read.table("train.csv", sep= ",", header=TRUE)
testSet <- read.table("test.csv", sep= ",", header=TRUE) #these are unlabeled and used to upload to kaggle.
# CHECK THE PROPORTIONS of survived died.If 1 outcome is far more common than the other you need to change your approach
#This is an important step because if the proportion was smaller than 15%,
# it would be considered a rare event and would be more challenging to model.
prop.table(table(inTraining$Survived)) #survived is outcome 1 = .38
set.seed(42)
#split the training set into a train/test set (which I'm calling validate), so that testSet is a true hold out set
inTrain <- createDataPartition(inTraining$Survived, p = 0.8, list = F)
trainSet <- inTraining[inTrain,]
validSet <- inTraining[-inTrain,]
#remove features that I don't want to use for prediction. I only do this on trainSet, I want validSet to look exactly like testSet, except that outcome is labeled
library(dplyr)
#trainSet <- trainSet %>% select(-c(PassengerId, Name,Ticket,Cabin))
#glmnet can't deal w/factors, it can only handle numerics. Solve this problem by turning factor variables into dummyVars
#make dummies
#titanicDummy <- dummyVars("~.", data=trainSet, fullRank=F)
#trainSet <- as.data.frame(predict(titanicDummy,trainSet))
#trainSet$Survived <- as.factor(trainSet$Survived)
#validSet <- as.data.frame(predict(titanicDummy,validSet))
# TRAIN SOME MODELs
myControl <- trainControl(method = "repeatedcv", #use cross-validation
number = 10, repeats = 5 )
#glmnetFit <- train(Survived ~., data = trainSet, method = "glmnet", preProcess = "knnImpute", trControl = control, metric ="Accuracy") #.80
#formula <- c(Survived ~ Pclass + Sex + Fare + SibSp + Embarked + Parch)
#form. <- list("Pclass"+"Sex"+"Fare"+"SibSp"+"Embarked"+"Parch")
#modRF1 <- train(form = formula, method = "rf", data = trainSet, trControl = myControl)
trainSet$Survived <- as.factor(trainSet$Survived) # outcome should be factor for classification
modRF <- train(Survived ~Pclass + Sex + Fare + SibSp + Embarked + Parch, method = "rf", data = trainSet, trControl = myControl)
modGBM <- train(Survived ~Pclass + Sex + Fare + SibSp + Embarked + Parch, method = "gbm", data = trainSet, trControl = myControl, verbose = F)
modLDA <- train(Survived ~Pclass + Sex + Fare + SibSp + Embarked + Parch, method = "lda", data = trainSet, preProcess=c("center","scale"), trControl = myControl)
modRF #.815
modGBM #.81
modLDA #.78
# STACK THE PREDICTIONS
# make predictions
predRF <- predict(modRF,validSet)
predGBM <- predict(modGBM, validSet)
predLDA <- predict(modLDA, validSet)
# Fit a model that combines all (both of the predictors)
predDF <- data.frame(predRF,predGBM,predLDA,Survived=validSet$Survived)
predDF$Survived <- as.factor(predDF$Survived)
#train a new model on the predictions
validSet$Survived <- as.factor(validSet$Survived)
combModFit <- train(Survived ~.,method="rf",data=predDF, trControl = myControl)
predComb <- predict(combModFit,validSet)
# Get/compare the accuracies for the 3 singular models and the 1 combined model (4 models)
c1 <- confusionMatrix(predRF, validSet$Survived)$overall[1]
c2 <- confusionMatrix(predGBM, validSet$Survived)$overall[1]
c3 <- confusionMatrix(predLDA, validSet$Survived)$overall[1]
c4 <- confusionMatrix(predComb, validSet$Survived)$overall[1]
print(paste(c1, c2, c3, c4)) #"0.786516853932584 0.786516853932584 0.775280898876405 0.820224719101124"
# Combined did 4-5% better than any individual model
#Compare the predictions from each model to eachother, and color by the true answer to compare how close they were
qplot(predRF,predGBM,colour=Survived,data=validSet) #this just produced 2 data points wtf? duh, there's only 2 outcomes.
# This diagnostic would be useful for regression, but isn't all that useful for classification
#check correlation modelCor
predDF1 <- predDF
predDF1$predRF <- as.numeric(predDF1$predRF)
predDF1$predGBM <- as.numeric(predDF1$predGBM)
predDF1$predLDA <- as.numeric(predDF1$predLDA)
predDF1$Survived <- as.numeric(predDF1$Survived)
cor(predDF1) # this works, and it checks the correlation of the predictions LDA and GBM are highly correlated, the others are not
#Here is the caret method for checking the correlation of the models
modCor <- modelCor(resamples(list(RF = modRF, GBM = modGBM, LIN = modLDA))) # we see no correlation between models
#Try some other models for poops
modGLMNET <- train(Survived ~Pclass + Sex + Fare + SibSp + Embarked + Parch, method = "glmnet", data = trainSet, preProcess=c("center","scale"), trControl = myControl)
# see what models are available
names(getModelInfo())
modSVM <- train(Survived ~Pclass + Sex + Fare + SibSp + Embarked + Parch, method = "svmRadial", data = trainSet, preProcess=c("center","scale"), trControl = myControl)
modGLMNET #.79
modSVM #.79
modCor2 <- modelCor(resamples(list(GLMNET = modGLMNET, SVM = modSVM))) #no correlation b/tween models
predGLMNET <- predict(modGLMNET, validSet)
predSVM <- predict(modSVM, validSet)
#join these predictions to the old combined set to create a super set built w/5 models, all w/similar accuracy
predDF2 <- data.frame(predDF,predSVM,predGLMNET)
#check correlation across all models
modCor3 <- modelCor(resamples(list(RF = modRF, GBM = modGBM, LIN = modLDA,GLMNET = modGLMNET, SVM = modSVM))) #No High Correlations
#New Stack, Use GBM to bring all pieces together
combModFit2 <- train(Survived ~.,method="gbm",data=predDF2, trControl = myControl)
predComb2 <- predict(combModFit2,validSet)
# Check accuracy of everything
c1 <- confusionMatrix(predRF, validSet$Survived)$overall[1]
c2 <- confusionMatrix(predGBM, validSet$Survived)$overall[1]
c3 <- confusionMatrix(predLDA, validSet$Survived)$overall[1]
c4 <- confusionMatrix(predComb, validSet$Survived)$overall[1]
c5 <- confusionMatrix(predGLMNET, validSet$Survived)$overall[1]
c6 <- confusionMatrix(predSVM, validSet$Survived)$overall[1]
c7 <- confusionMatrix(predComb2, validSet$Survived)$overall[1]
print(paste(c1, c2, c3, c4, c5, c6, c7))
# Accuracy for comb2 was LOWER than comb1, overfitting or does gbm perform worse than rf for this task?
# This print/paste shit is lame, create a proper table
# Does combining in an RF change anything? YES
combModFit3 <- train(Survived ~.,method="rf",data=predDF2, trControl = myControl)
predComb3 <- predict(combModFit3,validSet)
c8 <- confusionMatrix(predComb3, validSet$Survived)$overall[1] #.831 Got and extra percent!
# Change testSet$Survived to factor!! Way to go retartd! testSet is unlabeled
#HERE. STUCK. CAN'T PREDICT ON TESTSET!!!!
#Make new predictions on the unlabeled testSet and submit to Kaggle
testSet$Survived <- predict(combModFit3, newdata = testSet)
## Error in `$<-.data.frame`(`*tmp*`, "Survived", value = structure(c(1L, : replacement has 417 rows, data has 418
#Uh, oh! There is an error here! When you get this type of error in R, it means that you are trying to assign a vector
#of one length to a vector of a different length, so the two vectors don't line up. We have to find the missing data ourself
summary(testSet) #The variable Fare has 1 NA
#replace w/ifelse:
# if an entry in the column “Fare” is NA, then replace it with the mean of the column (also removing the NA's when you take the mean). Otherwise, leave it the same.
testSet$Fare <- ifelse(is.na(testSet$Fare), mean(testSet$Fare, na.rm= TRUE), testSet$Fare)
#Now try training the model again
testSet$Survived <- predict(model, newdata = testSet)
# TRY THIS AS SUBMISSION
submission2 <- testSet %>% select(PassengerId,Survived)
#write resulting predictions w/only the two columns to csv
write.table(submission,file = "submission3.csv", col.names = TRUE, row.names = FALSE, sep = ",")
|
198460c9832a56f85766f6eee2a5d98920637b0d | 7a95abd73d1ab9826e7f2bd7762f31c98bd0274f | /meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615842256-test.R | 7d23be3eadd1c2de09c2ceb6464afdb1b9901f93 | [] | no_license | akhikolla/updatedatatype-list3 | 536d4e126d14ffb84bb655b8551ed5bc9b16d2c5 | d1505cabc5bea8badb599bf1ed44efad5306636c | refs/heads/master | 2023-03-25T09:44:15.112369 | 2021-03-20T15:57:10 | 2021-03-20T15:57:10 | 349,770,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 832 | r | 1615842256-test.R | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = numeric(0), rs = numeric(0), temp = c(4.69559353453699e-306, 2.64939413085906e-158, -10930438.4735499, 1.81037721950714e+87, -1.39621374675821e-18, 9.03412513167857e-46, 4.56997008477242e+255, -1.92295507924653e-68, 2.08343441298214e-168, 7.18523245988025e-298, 4.74403783644579e-53, 2.21250606168974e+76, 3.72141460118873e+137, -5.61830321090795e-205, -1.4259446736964e+282, -8.04135102968176e-61, 5.68302231331421e+225, 1.28924107955207e+99, -1.38209255127074e+56, -1.53732826639188e+173, -1.53732818170537e+173, 5.95835080989286e-136, 2.07507571253324e-322, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) |
45cb133faaeab5f48aefb6286fe845555280f8e2 | 3b6fd9148a6c86cd05fc3b9db05cee8b7cbb96fa | /man/freq.Rd | 7e767d133dc7244226e6593f5d720cc31244c4c0 | [
"MIT"
] | permissive | markusdumke/fluid | 604faa7023539c843d44ff6d970663e507c18068 | e7c4c566495e9f76d6b4bacec475ab8a1bb04686 | refs/heads/master | 2020-04-18T09:41:31.133937 | 2019-09-23T18:48:40 | 2019-09-23T18:48:40 | 167,443,315 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 650 | rd | freq.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/freq.R
\name{freq}
\alias{freq}
\alias{infreq}
\title{Show most/least frequent values.}
\usage{
freq(.x, .n = 10, .prop = FALSE)
infreq(.x, .n = 10, .prop = FALSE)
}
\arguments{
\item{.x}{A vector passed on to \link{table}.}
\item{.n}{Integer scalar. Number of classes to show.}
\item{.prop}{Logical value. Show proportions?}
}
\value{
Table object.
}
\description{
These are convenient functions to show the most frequent or least frequent
values in a factor or character vector.
}
\examples{
iris[1:79, "Species"] \%>\% freq()
iris[1:79, "Species"] \%>\% infreq(2)
}
|
5a540c65686fd8c4c142bc7b8d3c62024efa23a4 | 7967838192a759a9de5603b011226ca8cba2a05a | /getSPX.R | a194a1d00402262fb79ea3883b814749cc7d8af7 | [] | no_license | dxcv/Asset-Allocation-Model | 8457db8c66b0d2a30927cc261d3e0e74f6b64106 | 87a5674fd57fc33b4beac7c23368c013811f9dc8 | refs/heads/master | 2020-12-03T23:20:13.263414 | 2017-11-14T21:20:06 | 2017-11-14T21:20:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 700 | r | getSPX.R | getSPX = function() {
require(lubridate)
# get data in the "SPX.csv" file and log transform the prices
#get data
spx = read.csv("SPX.csv",colClasses = "character")
clean = function(df) {
colnames(df) = as.character(df[1,])
rownames(df) = df[,1]
df = df[-1, ,drop=FALSE]
df=df[,-1,drop=FALSE]
df = df[1:(nrow(df)-1),,drop=FALSE]
rownames(df)[1] <- "1/4/2002"
dates = mdy(rownames(df))
rownames(df) = dates
#print(head(df))
df
}
numerify = function(df) {
for (c in 1:ncol(df)) {
df[,c] = as.numeric(df[,c])
}
df
}
res = list(
spx = log(numerify(clean(spx)))
)
}
spx = getSPX() |
477c17b022622ef49c3180d22eeb7481877a9524 | 5b1f7332090d122e754a2f21e56d752d34edae96 | /app.R | 8d1fbd6f66240c07df5b94e421b3a716f9d5ee5f | [] | no_license | Gedevan-Aleksizde/karaokestudio | 83b46f7c9adcaa4d78fc512f5c01f0e804366f42 | f8aa11ccbfd2402530258f96dc10d18c785707ca | refs/heads/master | 2023-01-28T22:43:34.299495 | 2020-04-04T16:20:43 | 2020-04-04T16:20:43 | 219,550,047 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,368 | r | app.R | source(file = "global_funcs.R", encoding = "utf-8", local = T)
video_id <- "XSLhsjepelI" # miku
video_id <- "iH_YJde1yps" # dorohedoro
lang = "ja-JP"
lyrics <- read_rds("test_lyrics_dorohedoro.rds")
ui <- fluidPage(
useShinyjs(),
titlePanel(
fluidRow(
column(width = 3, "KaʀaokeStudio"),
column(width = 9, img(src = "icon.svg", height = 80, width = 80), align = "right"),
id = "title-header"
),
windowTitle = "KaʀaokeStudio"
),
h3(strong("タイトル:"), get_video_details(video_id)$items[[1]]$snippet$title),
sidebarLayout(
sidebarPanel(
div(
div(
# div(htmlOutput("out1")),
textInput(inputId = "videoId", label = "video ID", value = video_id)
), id = "sidebar-panel"
),
width = 2
),
mainPanel(
div(id = "player", frameborder = "0"),
tags$script(HTML(paste("var video_id =", sQuote(video_id, F), ";")),
HTML(read_file("js/youtube_api.js"))),
id = "video-panel",
width = 9
)
), theme = "css/karaokestudio.css"
)
server <- function(input, output, session){
observeEvent(input$videoStatus,
{
if(input$videoStatus == 1){
duration <- get_video_duration(id = video_id)
fpath <- record_voice(file = "tmp_shinytest.flac", seconds = duration)
print("recoding converted")
showNotification("scoring started...", type = "message")
voice <- get_voice_text_gls(get_gl_speech(fpath, lang = lang))
print("voice recognized")
if(is.null(lyrics)){
lyrics <- convert_pronounce(get_video_caption(video_id = video_id))
}
print("lyrics downloaded")
score <- compute_karaoke_score(join_lyrics_voice(lyrics, voice))
print(score)
showNotification(
paste("SCORE: ", round(score), "/ 100"), type = "message", closeButton = T, duration = NULL
)
}
})
observeEvent(input$do, {
showNotification(p("test"),
type = "message", closeButton = T, duration = NULL)
})
# output$out1 <- renderText(input$videoStatus)
}
shinyApp(ui = ui, server = server) |
5a76314d1fa04f84a298536a29d20de091809d10 | 10e7160c116b33c1c6ef4b919176089cb5177d10 | /man/carto.pal.Rd | 5ce011fcb549102bf309ab1a0ae0fa5449027703 | [] | no_license | riatelab/cartography | 556008a18a06b024791f7d1b1c7e0e2711b4c6f4 | 188a92e7cdc6e51d82ca8cacd970c7bb426f5d84 | refs/heads/master | 2023-08-06T02:16:00.746142 | 2023-01-18T09:30:27 | 2023-01-18T09:30:27 | 37,248,864 | 353 | 46 | null | 2021-02-16T13:03:24 | 2015-06-11T08:27:11 | R | UTF-8 | R | false | true | 2,866 | rd | carto.pal.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/palettes.R
\name{carto.pal}
\alias{carto.pal}
\alias{carto.pal.info}
\alias{display.carto.pal}
\alias{display.carto.all}
\title{Build Cartographic Palettes}
\usage{
carto.pal(
pal1,
n1,
pal2 = NULL,
n2 = NULL,
middle = FALSE,
transparency = FALSE
)
carto.pal.info()
display.carto.pal(name)
display.carto.all(n = 10)
}
\arguments{
\item{pal1}{name of the color gradient (see Details).}
\item{n1}{number of colors (up to 20).}
\item{pal2}{name of the color gradient (see Details).}
\item{n2}{number of colors (up to 20).}
\item{middle}{a logical value. If TRUE, a neutral color ("#F6F6F6", light
grey) between two gradients is added.}
\item{transparency}{a logical value. If TRUE, contrasts are enhanced by
adding an opacity variation.}
\item{name}{name of the palette available in the package (see Details).}
\item{n}{number of colors in the gradient (up to 20).}
}
\value{
\code{carto.pal} returns a vector of colors.
\code{carto.pal.info} returns a vector of color palettes names.
}
\description{
\code{carto.pal} builds sequential, diverging and qualitative color palettes.
Diverging color palettes can be dissymmetric (different number of colors in
each of the two gradients).
\code{carto.pal.info} displays the names of all color palettes.
\code{display.carto.pal} displays one color palette.
\code{display.carto.all} displays all the available color palettes.
}
\details{
Sequential palettes: "blue.pal", "orange.pal", "red.pal",
"brown.pal", "green.pal", "purple.pal", "pink.pal", "wine.pal", "grey.pal",
"turquoise.pal", "sand.pal", "taupe.pal", "kaki.pal" or "harmo.pal". \cr\cr
Qualitative palettes: "pastel.pal" or "multi.pal".
}
\examples{
# Simple gradient: blue
carto.pal(pal1 = "blue.pal" ,n1 = 20)
# Double gradient: blue & red
carto.pal(pal1 = "blue.pal", n1 = 10, pal2 = "red.pal", n2 = 10)
# Adding a neutral color
carto.pal(pal1 = "blue.pal", n1 = 10, pal2 = "red.pal", n2 = 10, middle = TRUE)
# Enhancing contrasts with transparency
carto.pal(pal1="blue.pal", n1 = 10, pal2 = "red.pal", n2 = 10, middle = TRUE,
transparency = TRUE)
# The double gradient can be asymetric
carto.pal(pal1 = "blue.pal", n1 = 5, pal2 = "red.pal", n2 = 15, middle = TRUE,
transparency = TRUE)
# Build and display a palette
mypal <- carto.pal(pal1 = "blue.pal", n1 = 5, pal2 = "red.pal", n2 = 15,
middle = TRUE, transparency = TRUE)
k <- length(mypal)
image(1:k, 1, as.matrix(1:k), col =mypal, xlab = paste(k," classes",sep=""),
ylab = "", xaxt = "n", yaxt = "n",bty = "n")
carto.pal.info()
display.carto.pal("orange.pal")
display.carto.all(8)
}
\references{
Qualitative palettes were generated with "i want hue"
(\url{https://medialab.github.io/iwanthue/}) by Mathieu Jacomy at
the Sciences-Po Medialab.
}
|
bcaaff369d9f675ea08716bbb935b0543b37bc8c | dd3791e85564a7e4ff51b5701b069235ab8d3531 | /man/switchDirection.Rd | 4ae105000172f709f71a31ebb4b8f3ed2b5c1daa | [] | no_license | iamciera/DiffLogo | f0bb4b062a4ed680d36ebce61e568f1dbc030177 | e862f0a10d2a5397ad3f0baa51da22ce158e3b12 | refs/heads/master | 2021-01-22T04:05:19.913562 | 2017-05-19T06:54:39 | 2017-05-19T06:54:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 321 | rd | switchDirection.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pwmAlignment.R
\name{switchDirection}
\alias{switchDirection}
\title{If 'forward' return 'reverse'
If 'reverse' return 'forward'}
\usage{
switchDirection(direction)
}
\description{
If 'forward' return 'reverse'
If 'reverse' return 'forward'
}
|
7dfbbf1ecb5f271ddeb53ac1ab537830f3c97f24 | e1d4cceb6474ac0d600136ffb4fcbdad59762236 | /tests/testthat/test_merge_bed.R | 559c2da533c7a654937caa2615c55b39bb7c2693 | [] | no_license | sgrote/ABAEnrichment | 64a3f24cdefa2b39d2c365e57ea83d81328146c6 | 6f9fb17b300e831900a252c78f3f3efa7b39582d | refs/heads/master | 2022-01-05T16:55:38.319400 | 2019-07-14T14:14:59 | 2019-07-14T14:14:59 | 107,267,872 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 758 | r | test_merge_bed.R |
# context("merge_bed.R")
# input
chroms = c(1,1,1,2,2,2,2,2,2,3,3,4,4,4,5,5,5,5)
starts = c(2,4,10,2,2,2,7,7,7,1,3,8,2,5,1,7,2,3)
stops = c(3,10,12,5,5,5,9,8,10,4,5,10,7,6,2,9,4,5)
bed = data.frame(chroms,starts,stops)
# expected output
chroms = c(1,1,2,2,3,4,4,5,5)
starts = c(2,4,2,7,1,2,8,1,7)
stops = c(3,12,5,10,5,7,10,5,9)
expected = data.frame(chroms,starts,stops)
test_that("candidate and background regions get correctly merged",{
expect_that(merge_bed(bed), equals(expected))
})
# error-input
chroms = c(1,3,3,4)
starts = c(2,4,10,2)
stops = c(3,10,7,5)
bed = data.frame(chroms,starts,stops)
test_that("error is thrown when start > stop",{
expect_that(merge_bed(bed), throws_error("Genomic regions must be defined with start < stop."))
})
|
32902af573deb84174adfad1300d2ea13e6c1124 | 4da90c8651b69da45f45abac575e6ef0e13f3ce9 | /Simulation.R | 3d74d4ea64b4e2b1ae214259ba58d0cfea72ac53 | [] | no_license | qqwjq/BoostedVCR | 16e43aaabeb54e940ea820fea178632c96cf6cd6 | ea6dba95fba284135618a4616331e19681914d85 | refs/heads/master | 2021-01-10T04:53:07.371807 | 2015-11-05T00:27:12 | 2015-11-05T00:27:12 | 45,576,502 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,270 | r | Simulation.R | ##############################################################################
sub.set=T
M=1e+6
setwd("F:\\Research\\MPO\\RPackage")
source("BoostTree.R")
begintime=strptime(date(), "%a %b %d %H:%M:%S %Y")
sum.tree=c()
nu.param=0.01
nBoost.param=5
n=2000
x.vec=rnorm(n)
p=10
s.mat=matrix(runif(p*n),ncol=p,byrow=T)
bet=cbind(2*(sin(2*pi*s.mat[,1]))^2+exp(2*s.mat[,2]-1),2*(cos(2*pi*s.mat[,1]))^2+8*s.mat[,2]*(1-s.mat[,2]))
y.vec=apply(cbind(1,x.vec)*bet,1,sum)+rnorm(n,0,0.5)
plot(x.vec,y.vec,type='p',pch='.',cex=2)
################################################################################
names.part=paste("var",1:p,sep='')
colnames(s.mat)=names.part
index.train=(1:n) %in% sample(1:n,round(n*0.8))
split.nd="1"
Leaf=rep("1",length(y.vec))
crit.root=sum((regression(y.vec[index.train], cbind(1,x.vec[index.train]),method="linear")$residuals)^2)
res=Boost.Tree(s.mat[Leaf==split.nd &index.train,],as.matrix(cbind(1,x.vec)[Leaf==split.nd & index.train,]),y.vec[Leaf==split.nd & index.train],names.part,nBoost=nBoost.param,nu=nu.param,mini.size=20,fit.method="linear")
#endtime=strptime(date(), "%a %b %d %H:%M:%S %Y")
#difftime(endtime, begintime, units='secs')
index.pred=!index.train
res.pred=pred.Boost(s.mat[index.pred,],as.matrix(cbind(1,x.vec)[index.pred,]),y.vec[index.pred],res,3:4,nBoost.param,nu=nu.param)
nBoost.tune=(1:nBoost.param)[res.pred$L2.error==min(res.pred$L2.error)]
res.tune=Boost.Tree(s.mat[Leaf==split.nd &index.train,],as.matrix(cbind(1,x.vec)[Leaf==split.nd & index.train,]),y.vec[Leaf==split.nd & index.train],names.part,nBoost=nBoost.tune,nu=nu.param,mini.size=20,fit.method="linear",ncatelow=0,ncatehigh=1)
save.image(paste("Sim1Boost",nBoost.param,".RData",sep=''))
endtime=strptime(date(), "%a %b %d %H:%M:%S %Y")
difftime(endtime, begintime, units='secs')
## Summarize the boosting results
summary.Boost(res,names.part)
## Out-sample prediction
index.pred=!index.train
res.pred=pred.Boost(s.mat[index.pred,],as.matrix(cbind(1,x.vec)[index.pred,]),y.vec[index.pred],res,3:4,nBoost.param,nu=nu.param)
################################################################################
|
049e10f297a76644ebf2be6dab4575fe2e6babfd | f6c1ee3cefbec9c3e493ef32389672b5f72b9dd0 | /data/exprDataCleaning.R | 035297da3a93fa259e7ce1935d656de20324f580 | [
"MIT"
] | permissive | isadorafranca/2017_project_5 | 0162257c978aebddfb8480307122bf79cac01f1e | bc6cfb0a2e54cd51363c3bdd77e2dedeecffc702 | refs/heads/master | 2020-08-06T09:00:37.616684 | 2019-01-26T08:34:30 | 2019-01-26T08:34:30 | 212,916,059 | 1 | 0 | MIT | 2019-10-04T23:08:31 | 2019-10-04T23:08:31 | null | UTF-8 | R | false | false | 3,176 | r | exprDataCleaning.R | ### Load my packages
library(magrittr)
library(bigsnpr)
#### First, we will create the gene expression matrix from all people...
# Assumption: All probes are identical for each dataset, and arranging the datasets by Normalization REF will out the DFs in the same order.
exprData <- Sys.glob("./data/E-MTAB-264.processed.1/*.txt") %>%
purrr::map(fread) %>%
purrr::map(dplyr::arrange, `Normalization REF`) %>%
dplyr::bind_cols()
### Remove the duplicate columns, which is the Normalization REF...
exprData %<>% dplyr::select(-dplyr::matches("Normalization REF[0-9]+"))
### Now lets read in the SNP data....
tmpfile <- tempfile()
base <- paste0(
"https://github.com/gabraham/flashpca/raw/master/HapMap3/",
"1kg.ref.phase1_release_v3.20101123_thinned_autosomal_overlap")
exts <- c(".bed", ".bim", ".fam")
purrr::map2_int(paste0(base, exts), paste0(tmpfile, exts), ~download.file(.x, .y))
rdsfile <- snp_readBed(paste0(tmpfile, ".bed"), tempfile())
bigsnp <- snp_attach(rdsfile)
(G <- bigsnp$genotypes)
object.size(G)
G.matrix <- G[]
G.matrix <- as.data.frame(G.matrix)
format(object.size(G.matrix), units = "MB")
row.names(G.matrix) <- bigsnp$fam$sample.ID
colnames(G.matrix) <- bigsnp$map$marker.ID
# write.csv(G.matrix,"./data/snpData.csv")
# write.csv(bigsnp$map, "./data/snpMetaData.csv")
# write.csv(bigsnp$fam, "./data/metaData.csv")
snpData <- G.matrix
snpMetaData <- bigsnp$map
metaData <- bigsnp$fam
rm(G.matrix, bigsnp, tmpfile)
### I have snp data for 1092 people, but expression data for 619. Lets find the intersect there.
exprData %<>% dplyr::select(one_of(c("Normalization REF",intersect(colnames(exprData), rownames(snpData)))))
# Looks like the probes have Illumina codes, which is not really useful. Lets convert it to gene format. As indicated here (https://www.ebi.ac.uk/arrayexpress/experiments/E-MTAB-264/), the expression data was collected using A-MEXP-930 - Illumina Human-6 v2 Expression BeadChip. If you follow the link, there is a txt file with the code annotations, which is what we need.
# Similar to what we did above, lets read in this Illumina data temporarily.
tmpfile <- tempfile()
base <- "https://www.ebi.ac.uk/arrayexpress/files/A-MEXP-930/A-MEXP-930.adf"
exts <- ".txt"
download.file(paste0(base, exts),paste0(tmpfile, exts))
rdsfile <- fread(paste0(tmpfile, exts), header= TRUE, sep= "\t", skip= "Reporter")
# since there is a header we do not want, we are starting to read the file where the word REPORTER is.
rdsfile %<>% rename(`Normalization REF`= `Reporter Name`) %>% select(`Normalization REF`, `Reporter Database Entry[hugo]` )
exprData %<>% left_join(rdsfile)
exprData %<>% select(`Reporter Database Entry[hugo]`, everything())
exprData %<>% filter(`Reporter Database Entry[hugo]` != "") %>% select(-`Normalization REF`)
exprData %<>% mutate_at(colnames(exprData)[2:length(exprData)], funs(as.numeric)) %>% group_by(`Reporter Database Entry[hugo]`) %>% summarise_all(mean)
exprData %<>% rename(GeneId = `Reporter Database Entry[hugo]`)
exprData %<>% column_to_rownames(var= "GeneId") %>% signif(digits= 3) %>% rownames_to_column(var= "GeneId")
exprData %>% write_csv("./data/exprData.csv.gz")
|
a3a7d7bca36e139a253ee922460ac8dcd5b9f6c9 | 3ea22cd02d20b7b33dc03f5f82aaad973905d355 | /Scripts/Places.R | 7cf9e0700a027bfbe5502f8ac5d11592e014f904 | [] | no_license | Redefine-Studio/Google-Maps-JSON-Visualization-with-R | 702c84ed76b86bb0c22027b7dbe34fa9abadbfd8 | 0194c292d90fe279253f5687d1df8a7e10bb12f8 | refs/heads/master | 2020-03-23T04:41:43.082473 | 2018-07-16T07:50:09 | 2018-07-16T07:50:09 | 141,099,699 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,140 | r | Places.R | # ANALYSE GOOGLE JSON FILES
# © 2018 Schule für Gestaltung Bern und Biel
# Philipp Lehmann, philipp@followscontent
#
#
# Load libraries
# --------------
library(jsonlite)
library(maps)
library(maptools)
library(rgdal)
# =============
# LOAD DATA
# =============
# Global variables
# ----------------
datafile <- "./Data/Demo.json"
all_places <- data.frame()
subset_places <- data.frame()
dot_color <- "#000000"
# Default init function
# ---------------------
loadData <- function(filepath = "./Data/Demo.json") {
datafile <<- filepath
# Import JSON
places_json <- fromJSON(datafile, flatten=TRUE)
temp_places <- as.data.frame(places_json)
# Rename columns
colnames(temp_places)[1] <- "Timestamp"
colnames(temp_places)[2] <- "Latitude"
colnames(temp_places)[3] <- "Longitude"
colnames(temp_places)[4] <- "Accuracy"
colnames(temp_places)[5] <- "Altitude"
colnames(temp_places)[6] <- "VertAccuracy"
colnames(temp_places)[7] <- "Velocity"
colnames(temp_places)[8] <- "Heading"
# Recalculate latitude and longitude
temp_places$Latitude <- temp_places $Latitude * 0.0000001
temp_places$Longitude <- temp_places $Longitude * 0.0000001
temp_places$Timestamp <- as.POSIXct(as.numeric(as.character(temp_places $Timestamp))/1000,origin="1970-01-01", tz="GMT")
# Set global variable
all_places <<- temp_places
return(paste("You have loaded", nrow(all_places), "places."))
}
# ========
# MAPS
# ========
# DrawZurichMap
# -------------
drawZurichGeoMap <- function () {
drawLocalGeoMap(8.45, 8.65, 47.27, 47.47)
}
# DrawBerneMap
# -------------
drawBerneGeoMap <- function () {
# drawLocalGeoMap(7.24, 7.29, 46.84, 47.04)
drawLocalGeoMap(7.2, 7.32, 46.8, 47.1)
}
# DrawSwissMap
# ------------
drawSwissGeoMap <- function () {
drawLocalGeoMap(6, 10.5, 45.5, 48)
}
# DrawLocalMap
# ------------
drawLocalGeoMap <- function (long1 = 6, long2 = 10.5, lat1 = 45.5, lat2 = 48.0) {
# Assign coordinates
long = c(long1, long2)
lat = c(lat1, lat2)
# Load GeoJSON files
map_lakes = readOGR("./Geo/switzerland-lakes.geo.json", "OGRGeoJSON")
map_switzerland = readOGR("./Geo/switzerland-canton.geo.json", "OGRGeoJSON")
# Plot maps
plot(map_switzerland, xlim=long, ylim=lat, fg="white", col="grey", border="black", lwd=1)
plot(map_lakes, col="#4a4a4a", border=NA, add=TRUE)
}
# DrawEuropeMap
# ------------
drawEuropeGeoMap <- function () {
map_europe = readOGR("./Geo/europe.geo.json", "OGRGeoJSON")
plot(map_europe, xlim=c(-10,30), ylim=c(32, 70), fg="white", col="grey", border="black", lwd=1)
}
# DrawWorldMap
# ------------
drawWorldGeoMap <- function () {
map_world = readOGR("./Geo/world.geo.json", "OGRGeoJSON")
plot(map_world, fg="white", col="grey", border="black", lwd=1)
}
# ===============
# DRAW PLACES
# ===============
# Draw all Places
# ---------------
drawAllPlaces <- function(c = "#000000", ch="•") {
# Draw all places on map
# Note: Plot Map First
lines(all_places$Longitude, all_places$Latitude, col=adjustcolor(c, alpha.f = 0.3), lwd=1)
points(all_places$Longitude, all_places$Latitude, col=c, lwd=3, pch=ch)
# Comment in console
return(paste("Plotted", nrow(all_places), "places"))
}
# Generic draw subset Places
# --------------------------
drawSubsetPlaces <- function(c = "#000000", ch="•", l=FALSE) {
# Draw recent places on map
# Note: Plot Map First
if(l) { lines(subset_places$Longitude, subset_places $Latitude, col=adjustcolor(c, alpha.f = 0.3), lwd=1) }
points(subset_places $Longitude, subset_places $Latitude, col=c, lwd=3, pch=ch)
# Comment in console
return(paste("Plotted", nrow(subset_places), "places"))
}
# ===============
# FILTER DATA
# ===============
# Draw recent Places (Specify a number if you like)
# -------------------------------------------------
drawRecentPlaces <- function(n = 10, c = "#000000", ch="•") {
# Subset for timeframe, get recent places
subset_places <<- all_places[1:n, ]
# Draw recent places on map
drawSubsetPlaces(c, ch, TRUE)
# Comment in console
return(paste("Displaying the", nrow(subset_places), "latest points that have been tracked"))
}
# Draw moving Places
# -------------------------------------------------
drawMovingPlaces <- function(c = "#ff0000", ch="•") {
subset_places <<- all_places[which(all_places$Velocity > 0),]
drawSubsetPlaces(c, ch)
# Comment in console
return(paste("Moving points:", nrow(subset_places), " - Not moving points", nrow(all_places)-nrow(subset_places)))
}
# Geographic points of interest
# -----------------------------
drawMostNorthernPoint <- function(c = "#ff0000", ch="•") {
# Find and draw most northern point
subset_places <<- all_places[order(all_places$Latitude, decreasing=TRUE)[1],]
drawSubsetPlaces(c, ch)
# Comment in console
return(paste("Most northern point at:", subset_places$Latitude[1]))
}
drawMostSouthernPoint <- function(c = "#ff0000", ch="•") {
# Find and draw most southern point
subset_places <<- all_places[order(all_places$Latitude, decreasing=FALSE)[1],]
drawSubsetPlaces(c, ch)
# Comment in console
return(paste("Most southern point at:", subset_places$Latitude[1]))
}
drawMostEasternPoint <- function(c = "#ff0000", ch="•") {
# Find and draw most eastern point
subset_places <<- all_places[order(all_places$Longitude, decreasing=TRUE)[1],]
drawSubsetPlaces(c, ch)
# Comment in console
return(paste("Most eastern point at:", subset_places$Longitude[1]))
}
drawMostWesternPoint <- function(c = "#ff0000", ch="•") {
# Find and draw most western point
subset_places <<- data.frame()
subset_places <<- all_places[order(all_places$Longitude, decreasing=FALSE)[1],]
drawSubsetPlaces(c, ch)
# Comment in console
return(paste("Most western point at:", subset_places$Longitude[1]))
}
drawHighestPoint <- function(c = "#ff0000", ch="•") {
# Find and draw most northern point
subset_places <<- all_places[order(all_places$Altitude, decreasing=TRUE)[1],]
drawSubsetPlaces(c, ch)
# Comment in console
return(paste("Highest altitude:", subset_places$Altitude[1]))
}
drawLowestPoint <- function(c = "#ff0000", ch="•") {
# Find and draw most northern point
subset_places <<- all_places[order(all_places$Altitude, decreasing=FALSE)[1],]
drawSubsetPlaces(c, ch)
# Comment in console
return(paste("Lowest altitude:", subset_places$Altitude[1]))
}
drawHighestVelocity <- function(c = "#ff0000", ch="•") {
# Find and draw most northern point
subset_places <<- all_places[order(all_places$Velocity, decreasing=TRUE)[1],]
drawSubsetPlaces(c, ch)
# Comment in console
return(paste("Highest velocity:", subset_places$Velocity[1]))
}
# Filter by Time
# --------------
drawWeekendPoints <- function(c = "#00ff00", ch="•") {
# Find and draw datapoints tracked on weekends using the POSIXlt Timestamp attribute $wday
subset_places <<- all_places[ (as.POSIXlt(all_places$Timestamp)$wday > 5), ]
drawSubsetPlaces(c, ch)
# Comment in console
return(paste("You have tracked", nrow(subset_places) ,"places on weekends."))
}
drawWorkdayPoints <- function(c = "#0000ff", ch="•") {
# Find and draw datapoints tracked on weekends using the POSIXlt Timestamp attribute $wday
subset_places <<- all_places[ (as.POSIXlt(all_places$Timestamp)$wday < 6), ]
drawSubsetPlaces(c, ch)
# Comment in console
return(paste("You have tracked", nrow(subset_places) ,"places on workdays."))
}
drawDaytimePoints <- function(c = "#00ff00", ch="•") {
# Find and draw datapoints tracked on weekends using the POSIXlt Timestamp attribute $wday
subset_places <<- all_places[ (as.POSIXlt(all_places$Timestamp)$hour > 7) & (as.POSIXlt(all_places$Timestamp)$hour < 20), ]
drawSubsetPlaces(c, ch)
# Comment in console
return(paste("You have tracked", nrow(subset_places) ,"between 08:00 and 20:00"))
}
drawNighttimePoints <- function(c = "#0000ff", ch="•") {
# Find and draw datapoints tracked on weekends using the POSIXlt Timestamp attribute $wday
subset_places <<- all_places[ !((as.POSIXlt(all_places$Timestamp)$hour > 7) & (as.POSIXlt(all_places$Timestamp))$hour < 20), ]
drawSubsetPlaces(c, ch)
# Comment in console
return(paste("You have tracked", nrow(subset_places) ,"places between 20:00 and 08:00"))
}
drawDayPoints <- function(day = "2018-01-27", c = "#000000", ch="•") {
# Find and draw datapoints tracked on weekends using the POSIXlt Timestamp attribute $wday
subset_places <<- all_places[ substr(all_places$Timestamp, 1, 10) == day, ]
drawSubsetPlaces(c, ch, TRUE)
# Comment in console
return(paste("You have tracked", nrow(subset_places) ,"places on", day))
}
drawTimerangePoints <- function(startdate = "2018-01-01", enddate = "2018-07-01", c ="#000000", ch="•") {
# Parse start- and enddate
start <- as.POSIXct(startdate)
end <- paste(enddate, "23:59:59")
end <- as.POSIXct(end)
# subset_places <<- all_places[ as.numeric(gsub('-','', substr(all_places$Timestamp), 1, 10) >= start & substr(all_places$Timestamp, 1, 10) <= enddate, ]
subset_places <<- all_places[ as.numeric(all_places$Timestamp) > as.numeric(start) & as.numeric(all_places$Timestamp) <= as.numeric(end), ]
drawSubsetPlaces(c, ch, TRUE)
# Comment in console
return(paste("You have tracked", nrow(subset_places) ,"places between", startdate, "and", end))
}
# ================
# OTHER CHARTS
# ================
drawTimerangeAltitudeChart <- function(startdate = "2018-01-01", enddate = "2018-07-01", c ="#000000") {
# Parse start- and enddate
start <- as.POSIXct(startdate)
end <- paste(enddate, "23:59:59")
end <- as.POSIXct(end)
subset_places <<- all_places[ as.numeric(all_places$Timestamp) > as.numeric(start) & as.numeric(all_places$Timestamp) <= as.numeric(end), ]
drawSubsetPlaces(c, TRUE)
plot.new()
plot(subset_places$Timestamp, subset_places$Altitude, xlab="Timerange", ylab="Altitude", type="l")
}
# ===========
# HELPERS
# ===========
# Clear workspace (resets R)
# --------------------------
clear <- function() {
ENV <- globalenv()
ll <- ls(envir = ENV)
ll <- ll[ll != "clr"]
rm(list = ll, envir = ENV)
}
# Check
# ----
check <- function(x) {
return("Script loaded, everything ok")
}
|
a71ef3d978a391a01dbdb54e6d2833d95989264d | 584f29c4c847e307a1be1fb6df360b1ecce4e39a | /wines-2-data-preparation-outliers.R | 64b67f6be45327b21e0970af2239cb5d26241eaa | [] | no_license | fperes/r-wine-in | 1093316143d8245fdf933c061971a3a3ec309d03 | acbe97b0788a14ac3abaef77d688bf3b72423364 | refs/heads/master | 2021-01-21T22:05:42.476011 | 2017-06-22T22:50:37 | 2017-06-22T22:50:37 | 95,162,727 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 25,071 | r | wines-2-data-preparation-outliers.R | #
# INFNET MIT Big Data - Bloco A - Trabalho de R
#
# Título do Trabalho : Qualidade do vinhos
# Fonte de dados : UCI - base wines
# Autor : Fernando A J Peres
# Data : 2017-06-09
# Arquivo : Análise de outliers
#
#*******************************************************************************
# Análise entre variável dependente e independentes
# A T R I B U T O S:
# 01. fixed.acidity = Acidez fixa
# 02. volatile.acidity = Acidez volátil
# 03. citric.acid = Ácido cítrico
# 04. residual.sugar = Açucar residual
# 05. chlorides = Cloretos
# 06. free.sulfur.dioxide = Livre de dióxido de enxofre
# 07. total.sulfur.dioxide = Total de dióxido de enxofre
# 08. density = Densidade
# 09. pH = pH
# 10. sulphates = Sulfatos
# 11. alcohol = Teor alcólico
# 12. quality = Qualidade
# 13. color
# 14. col.color
# 15. taste
# 16. taste.color
# ******************************************************************************
# #### SETUP ####
# ******************************************************************************
## work directory path ##
wine.path = "/Users/fernandoperes/dev/r/r-wine/" # to be reused as needed
setwd(wine.path)
## Sources
source(file = paste(wine.path, "wines-0-utils.R", sep = ""))
## Libraries
library(dplyr)
library(ggplot2)
# ******************************************************************************
# #### Load prepared data files ####
# ******************************************************************************
load(file="all-wine.Rda") # load(file="red-wine.Rda") # load(file="white-wine.Rda")
# ******************************************************************************
# #### 1. FIXED ACIDITY ####
# ******************************************************************************
# <<< Acidez fixa >>>
## initialization (decrease rework)
x = all.wine$fixed.acidity
field.label = wine.fields.fixed.acidity
field.name = "fixed.acidity"
#calc limits
sno.lno = wine.sno.lno(x)
# mark ouliers
table(all.wine$outlier) # before mark outliers
all.wine$outlier <- wine.mark.outlier(start = TRUE, df = all.wine,
field = field.name, sno.lno = sno.lno)
table(all.wine$outlier) # after marked outliers
# Get the subset of ALL.WINE excluding marked outliers
all.wine.non.outliers <- all.wine %>% filter(all.wine$outlier == FALSE)
x2 <- all.wine.non.outliers$fixed.acidity
# Preparing to plot charts
## distribution plot resuls
par.customized <- par(mfrow = c(1, 2))
# Plot field distribution with ALL (including outliers)
title = paste(field.label, " - com outliers presentes", sep = "")
wine.distribution.plot(title = title, x = x, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red")
# Plot field distribution excluding outliers)
title = paste(field.label, " - com outliers removidos", sep = "")
wine.distribution.plot(title = title, x = x2, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red" )
## plot the difference all X witout outliers
par.customized <- par(mfrow = c(1, 2))
title = paste(field.label, " - status de outliers", sep = "")
wine.outliers.summary.plot(title = title, t = table(all.wine$outlier),
colors = c("green", "dark red"))
# ******************************************************************************
# #### 2. VOLATILE ACIDITY ####
# ******************************************************************************
# The amount of acetic acid in wine, which at too high of levels can lead to an unpleasant, vinegar taste
# <<< Volatile acidity >>>
## initialization (decrease rework)
x = all.wine$volatile.acidity
field.label = wine.fields.volatile.acidity
field.name = "volatile.acidity"
#calc limits
sno.lno = wine.sno.lno(x)
# mark ouliers
table(all.wine$outlier) # before mark outliers
all.wine$outlier <- wine.mark.outlier(start = TRUE, df = all.wine,
field = field.name, sno.lno = sno.lno)
table(all.wine$outlier) # after marked outliers
# Get the subset of ALL.WINE excluding marked outliers
all.wine.non.outliers <- all.wine %>% filter(all.wine$outlier == FALSE)
x2 <- all.wine.non.outliers$volatile.acidity
# Preparing to plot charts
## distribution plot resuls
par.customized <- par(mfrow = c(1, 2))
# Plot field distribution with ALL (including outliers)
title = paste(field.label, " - com outliers presentes", sep = "")
wine.distribution.plot(title = title, x = x, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red")
# Plot field distribution excluding outliers)
title = paste(field.label, " - com outliers removidos", sep = "")
wine.distribution.plot(title = title, x = x2, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red" )
## plot the difference all X witout outliers
par.customized <- par(mfrow = c(1, 2))
title = paste(field.label, " - status de outliers", sep = "")
wine.outliers.summary.plot(title = title, t = table(all.wine$outlier),
colors = c("green", "dark red"))
# Ácido cítrico
# ******************************************************************************
# #### 3. CITRIC ACID ####
# ******************************************************************************
# Found in small quantities, citric acid can add ‘freshness’ and flavor to wines
# <<< Ácido cítrico >>>
## initialization (decrease rework)
x = all.wine$citric.acid
field.label = wine.fields.citric.acid
field.name = "citric.acid"
#calc limits
sno.lno = wine.sno.lno(x)
# mark ouliers
table(all.wine$outlier) # before mark outliers
all.wine$outlier <- wine.mark.outlier(start = TRUE, df = all.wine,
field = field.name, sno.lno = sno.lno)
table(all.wine$outlier) # after marked outliers
# Get the subset of ALL.WINE excluding marked outliers
all.wine.non.outliers <- all.wine %>% filter(all.wine$outlier == FALSE)
x2 <- all.wine.non.outliers$citric.acid
# Preparing to plot charts
## distribution plot resuls
par.customized <- par(mfrow = c(1, 2))
# Plot field distribution with ALL (including outliers)
title = paste(field.label, " - com outliers presentes", sep = "")
wine.distribution.plot(title = title, x = x, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red")
# Plot field distribution excluding outliers)
title = paste(field.label, " - com outliers removidos", sep = "")
wine.distribution.plot(title = title, x = x2, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red" )
## plot the difference all X witout outliers
par.customized <- par(mfrow = c(1, 2))
title = paste(field.label, " - status de outliers", sep = "")
wine.outliers.summary.plot(title = title, t = table(all.wine$outlier),
colors = c("green", "dark red"))
# ******************************************************************************
# #### 4. RESIDUAL SUGAR ####
# ******************************************************************************
# The amount of sugar remaining after fermentation stops, it’s rare to find wines with less than 1 gram/liter and wines with greater than 45 grams/liter are considered sweet
# <<< Açucar residual >>>
## initialization (decrease rework)
x = all.wine$residual.sugar
field.label = wine.fields.residual.sugar
field.name = "residual.sugar"
#calc limits
sno.lno = wine.sno.lno(x)
# mark ouliers
table(all.wine$outlier) # before mark outliers
all.wine$outlier <- wine.mark.outlier(start = TRUE, df = all.wine,
field = field.name, sno.lno = sno.lno)
table(all.wine$outlier) # after marked outliers
# Get the subset of ALL.WINE excluding marked outliers
all.wine.non.outliers <- all.wine %>% filter(all.wine$outlier == FALSE)
x2 <- all.wine.non.outliers$residual.sugar
# Preparing to plot charts
## distribution plot resuls
par.customized <- par(mfrow = c(1, 2))
# Plot field distribution with ALL (including outliers)
title = paste(field.label, " - com outliers presentes", sep = "")
wine.distribution.plot(title = title, x = x, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red")
# Plot field distribution excluding outliers)
title = paste(field.label, " - com outliers removidos", sep = "")
wine.distribution.plot(title = title, x = x2, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red" )
## plot the difference all X witout outliers
par.customized <- par(mfrow = c(1, 2))
title = paste(field.label, " - status de outliers", sep = "")
wine.outliers.summary.plot(title = title, t = table(all.wine$outlier),
colors = c("green", "dark red"))
# Cloretos
# ******************************************************************************
# #### 5. CHLORIDES ####
# ******************************************************************************
# The amount of salt in the wine
# <<< Cloretos >>>
## initialization (decrease rework)
x = all.wine$chlorides
field.label = wine.fields.chlorides
field.name = "chlorides"
#calc limits
sno.lno = wine.sno.lno(x)
# mark ouliers
table(all.wine$outlier) # before mark outliers
all.wine$outlier <- wine.mark.outlier(start = TRUE, df = all.wine,
field = field.name, sno.lno = sno.lno)
table(all.wine$outlier) # after marked outliers
# Get the subset of ALL.WINE excluding marked outliers
all.wine.non.outliers <- all.wine %>% filter(all.wine$outlier == FALSE)
x2 <- all.wine.non.outliers$chlorides
# Preparing to plot charts
## distribution plot resuls
par.customized <- par(mfrow = c(1, 2))
# Plot field distribution with ALL (including outliers)
title = paste(field.label, " - com outliers presentes", sep = "")
wine.distribution.plot(title = title, x = x, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red")
# Plot field distribution excluding outliers)
title = paste(field.label, " - com outliers removidos", sep = "")
wine.distribution.plot(title = title, x = x2, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red" )
## plot the difference all X witout outliers
par.customized <- par(mfrow = c(1, 2))
title = paste(field.label, " - status de outliers", sep = "")
wine.outliers.summary.plot(title = title, t = table(all.wine$outlier),
colors = c("green", "dark red"))
# Livre de dióxido de enxofre
# ******************************************************************************
# #### 6. FREE SULFUR DIOXIDE ####
# ******************************************************************************
# The free form of SO2 exists in equilibrium between molecular SO2 (as a dissolved gas) and bisulfite ion; it prevents microbial growth and the oxidation of wine
# <<< Livre de dioxido de enxofre >>>
## initialization (decrease rework)
x = all.wine$free.sulfur.dioxide
field.label = wine.fields.free.sulfur.dioxide
field.name = "free.sulfur.dioxide"
#calc limits
sno.lno = wine.sno.lno(x)
# mark ouliers
table(all.wine$outlier) # before mark outliers
all.wine$outlier <- wine.mark.outlier(start = TRUE, df = all.wine,
field = field.name, sno.lno = sno.lno)
table(all.wine$outlier) # after marked outliers
# Get the subset of ALL.WINE excluding marked outliers
all.wine.non.outliers <- all.wine %>% filter(all.wine$outlier == FALSE)
x2 <- all.wine.non.outliers$free.sulfur.dioxide
# Preparing to plot charts
## distribution plot resuls
par.customized <- par(mfrow = c(1, 2))
# Plot field distribution with ALL (including outliers)
title = paste(field.label, " - com outliers presentes", sep = "")
wine.distribution.plot(title = title, x = x, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red")
# Plot field distribution excluding outliers)
title = paste(field.label, " - com outliers removidos", sep = "")
wine.distribution.plot(title = title, x = x2, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red" )
## plot the difference all X witout outliers
par.customized <- par(mfrow = c(1, 2))
title = paste(field.label, " - status de outliers", sep = "")
wine.outliers.summary.plot(title = title, t = table(all.wine$outlier),
colors = c("green", "dark red"))
# Total de dióxido de enxofre
# ******************************************************************************
# #### 7. TOTAL SULFUR DIOXIDE ####
# ******************************************************************************
# Amount of free and bound forms of S02; in low concentrations, SO2 is mostly undetectable in wine, but at free SO2 concentrations over 50 ppm, SO2 becomes evident in the nose and taste of wine
# <<< Total de dioxido de enxofre >>>
## initialization (decrease rework)
x = all.wine$total.sulfur.dioxide
field.label = wine.fields.total.sulfur.dioxide
field.name = "total.sulfur.dioxide"
# calc limits
sno.lno = wine.sno.lno(x)
# mark ouliers
table(all.wine$outlier) # before mark outliers
all.wine$outlier <- wine.mark.outlier(start = TRUE, df = all.wine,
field = field.name, sno.lno = sno.lno)
table(all.wine$outlier) # after marked outliers
# Get the subset of ALL.WINE excluding marked outliers
all.wine.non.outliers <- all.wine %>% filter(all.wine$outlier == FALSE)
x2 <- all.wine.non.outliers$total.sulfur.dioxide
# Preparing to plot charts
## distribution plot resuls
par.customized <- par(mfrow = c(1, 2))
# Plot field distribution with ALL (including outliers)
title = paste(field.label, " - com outliers presentes", sep = "")
wine.distribution.plot(title = title, x = x, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red")
# Plot field distribution excluding outliers)
title = paste(field.label, " - com outliers removidos", sep = "")
wine.distribution.plot(title = title, x = x2, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red" )
## plot the difference all X witout outliers
par.customized <- par(mfrow = c(1, 2))
title = paste(field.label, " - status de outliers", sep = "")
wine.outliers.summary.plot(title = title, t = table(all.wine$outlier),
colors = c("green", "dark red"))
# Densidade
# ******************************************************************************
# #### 8. DENSITY ####
# ******************************************************************************
# Density of water is close to that of water depending on the percent alcohol and sugar content
# <<< Densidade >>>
## initialization (decrease rework)
x = all.wine$density
field.label = wine.fields.density
field.name = "density"
#calc limits
sno.lno = wine.sno.lno(x)
# mark ouliers
table(all.wine$outlier) # before mark outliers
all.wine$outlier <- wine.mark.outlier(start = TRUE, df = all.wine,
field = field.name, sno.lno = sno.lno)
table(all.wine$outlier) # after marked outliers
# Get the subset of ALL.WINE excluding marked outliers
all.wine.non.outliers <- all.wine %>% filter(all.wine$outlier == FALSE)
x2 <- all.wine.non.outliers$density
# Preparing to plot charts
## distribution plot resuls
par.customized <- par(mfrow = c(1, 2))
# Plot field distribution with ALL (including outliers)
title = paste(field.label, " - com outliers presentes", sep = "")
wine.distribution.plot(title = title, x = x, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red")
# Plot field distribution excluding outliers)
title = paste(field.label, " - com outliers removidos", sep = "")
wine.distribution.plot(title = title, x = x2, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red" )
## plot the difference all X witout outliers
par.customized <- par(mfrow = c(1, 2))
title = paste(field.label, " - status de outliers", sep = "")
wine.outliers.summary.plot(title = title, t = table(all.wine$outlier),
colors = c("green", "dark red"))
# pH
# ******************************************************************************
# #### 9. pH ####
# ******************************************************************************
# Describes how acidic or basic a wine is on a scale from 0 (very acidic) to 14 (very basic); most wines are between 3-4 on the pH scale
# <<< pH >>>
## initialization (decrease rework)
x = all.wine$fixed.pH
field.label = wine.fields.pH
field.name = "pH"
sno.lno = wine.sno.lno(x)
# mark ouliers
table(all.wine$outlier) # before mark outliers
all.wine$outlier <- wine.mark.outlier(start = TRUE, df = all.wine,
field = field.name, sno.lno = sno.lno)
table(all.wine$outlier) # after marked outliers
# Get the subset of ALL.WINE excluding marked outliers
all.wine.non.outliers <- all.wine %>% filter(all.wine$outlier == FALSE)
x2 <- all.wine.non.outliers$pH
# Preparing to plot charts
## distribution plot resuls
par.customized <- par(mfrow = c(1, 2))
# Plot field distribution with ALL (including outliers)
title = paste(field.label, " - com outliers presentes", sep = "")
wine.distribution.plot(title = title, x = x, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red")
# Plot field distribution excluding outliers)
title = paste(field.label, " - com outliers removidos", sep = "")
wine.distribution.plot(title = title, x = x2, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red" )
## plot the difference all X witout outliers
par.customized <- par(mfrow = c(1, 2))
title = paste(field.label, " - status de outliers", sep = "")
wine.outliers.summary.plot(title = title, t = table(all.wine$outlier),
colors = c("green", "dark red"))
# Sulfato
# ******************************************************************************
# #### 10. SULPHATES ####
# ******************************************************************************
# A wine additive which can contribute to sulfur dioxide gas (S02) levels, wich acts as an antimicrobial and antioxidant
# <<< Sulfato >>>
## initialization (decrease rework)
x = all.wine$sulphates
field.label = wine.fields.sulphates
field.name = "sulphates"
sno.lno = wine.sno.lno(x)
# mark ouliers
table(all.wine$outlier) # before mark outliers
all.wine$outlier <- wine.mark.outlier(start = TRUE, df = all.wine,
field = field.name, sno.lno = sno.lno)
table(all.wine$outlier) # after marked outliers
# Get the subset of ALL.WINE excluding marked outliers
all.wine.non.outliers <- all.wine %>% filter(all.wine$outlier == FALSE)
x2 <- all.wine.non.outliers$sulphates
# Preparing to plot charts
## distribution plot resuls
par.customized <- par(mfrow = c(1, 2))
# Plot field distribution with ALL (including outliers)
title = paste(field.label, " - com outliers presentes", sep = "")
wine.distribution.plot(title = title, x = x, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red")
# Plot field distribution excluding outliers)
title = paste(field.label, " - com outliers removidos", sep = "")
wine.distribution.plot(title = title, x = x2, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red" )
## plot the difference all X witout outliers
par.customized <- par(mfrow = c(1, 2))
title = paste(field.label, " - status de outliers", sep = "")
wine.outliers.summary.plot(title = title, t = table(all.wine$outlier),
colors = c("green", "dark red"))
# Teor Álcólico
# ******************************************************************************
# #### 11. ALCOHOL ####
# ******************************************************************************
# The percent of alcohol
# <<< Teor alcólico >>>
## initialization (decrease rework)
x = all.wine$alcohol
field.label = wine.fields.alcohol
field.name = "alcohol"
#calc limits
sno.lno = wine.sno.lno(x)
# mark ouliers
table(all.wine$outlier) # before mark outliers
all.wine$outlier <- wine.mark.outlier(start = TRUE, df = all.wine,
field = field.name, sno.lno = sno.lno)
table(all.wine$outlier) # after marked outliers
# Get the subset of ALL.WINE excluding marked outliers
all.wine.non.outliers <- all.wine %>% filter(all.wine$outlier == FALSE)
x2 <- all.wine.non.outliers$alcohol
# Preparing to plot charts
## distribution plot resuls
par.customized <- par(mfrow = c(1, 2))
# Plot field distribution with ALL (including outliers)
title = paste(field.label, " - com outliers presentes", sep = "")
wine.distribution.plot(title = title, x = x, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red")
# Plot field distribution excluding outliers)
title = paste(field.label, " - com outliers removidos", sep = "")
wine.distribution.plot(title = title, x = x2, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red" )
## plot the difference all X witout outliers
par.customized <- par(mfrow = c(1, 2))
title = paste(field.label, " - status de outliers", sep = "")
wine.outliers.summary.plot(title = title, t = table(all.wine$outlier),
colors = c("green", "dark red"))
# #### 12. QUALITY ####
# ******************************************************************************
# The qualty of the wine based on perception (Rate 0..10)
# <<< qualidade >>>
## initialization (decrease rework)
x = all.wine$quality
field.label = wine.fields.quality
field.name = "quality"
#calc limits
sno.lno = wine.sno.lno(x)
# mark ouliers
table(all.wine$outlier) # before mark outliers
all.wine$outlier <- wine.mark.outlier(start = TRUE, df = all.wine,
field = field.name, sno.lno = sno.lno)
table(all.wine$outlier) # after marked outliers
# Get the subset of ALL.WINE excluding marked outliers
all.wine.non.outliers <- all.wine %>% filter(all.wine$outlier == FALSE)
x2 <- all.wine.non.outliers$quality
# Preparing to plot charts
## distribution plot resuls
par.customized <- par(mfrow = c(1, 2))
# Plot field distribution with ALL (including outliers)
title = paste(field.label, " - com outliers presentes", sep = "")
wine.distribution.plot(title = title, x = x, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red")
# Plot field distribution excluding outliers)
title = paste(field.label, " - com outliers removidos", sep = "")
wine.distribution.plot(title = title, x = x2, field.label = field.label,
color = wine.color.all, xlim = c(min(x), max(x)),
sno.lno = sno.lno, line.color = "red" )
## plot the difference all X witout outliers
par.customized <- par(mfrow = c(1, 2))
title = paste(field.label, " - status de outliers", sep = "")
wine.outliers.summary.plot(title = title, t = table(all.wine$outlier),
colors = c("green", "dark red"))
|
6cd5810e37a70ce2ef4832bcf86883764da8a759 | 1a83ac47bb1ffe39b416dfce1964051fa77d5b7c | /man/crudeMonteCarlo-class.Rd | 82cf6f6c299c935956940f58c2cb298a51a24ea5 | [] | no_license | cran/sampSurf | 9052ab60378e3295ecade04e573e6770c145cf74 | 9388a099e8cef6109c544bcc95770bc9a60670e6 | refs/heads/master | 2021-06-05T21:01:53.864724 | 2021-03-05T14:50:02 | 2021-03-05T14:50:02 | 17,699,448 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,900 | rd | crudeMonteCarlo-class.Rd | \name{crudeMonteCarlo-class}
\Rdversion{1.1}
\docType{class}
\alias{crudeMonteCarlo-class}
%\alias{plot,crudeMonteCarlo,missing-method}
\alias{summary,crudeMonteCarlo-method}
\title{Class \code{"crudeMonteCarlo"}}
\description{ This is the class definition that allows for the
application of crude Monte Carlo sampling to \code{downLog} or
\code{standingTree} objects. Examples of the class usage can be found
in the Monte Carlo sampling vignette noted below. }
\section{Objects from the Class}{ Objects can be created by calls of the
form \code{new("crudeMonteCarlo", ...)}. However, an object constructor
of the same name, \code{\link{crudeMonteCarlo}}, has been provided and
is the preferred method for creating objects that are ensured to be
valid. }
\section{Slots}{
Please note that diameters below are presumed to be in the
\emph{same} units as length, i.e., meters for \dQuote{metric}, and
feet for \dQuote{English} \code{units}. Cross-sectional areas are in
compatible units.
In addition to the slots provided by the virtual superclass
\dQuote{\code{\linkS4class{MonteCarloSampling}}}, the following slots
are represented\ldots
\describe{
\item{\code{proxy}:}{Object of class \code{"character"}: The name of
the proxy function used. This should normally be the built-in
\dQuote{\code{cmcProxy}}, which is the default and the only proxy
allowed by the constructor.}
\item{\code{diam.s}:}{Object of class \code{"numeric"}: A vector of
\code{n.s} diameters corresponding to the sampled heights
\code{hgt.s} (see below). }
\item{\code{rho.s}:}{Object of class \code{"numeric"}: A vector of
cross-sectional areas corresponding to the diameters in
\code{diam.s}. }
\item{\code{hgt.s}:}{Object of class \code{"numeric"}: A vector of
sampled heights at which the \code{diam.s} are taken. }
\item{\code{vol.s}:}{Object of class \code{"numeric"}: A vector of
volume estimates associated with the sampled \code{hgt.s} and
associated diameters and cross-sectional areas. }
\item{\code{volEst}:}{Object of class \code{"numeric"}: The sample
mean volume estimate of the \code{vol.s} for the bole segment. }
\item{\code{volVar}:}{Object of class \code{"numeric"}: The within
bole variance estimate of \code{volEst}. }
\item{\code{ci.lo}:}{Object of class \code{"numeric"}: The lower
1-\code{alphaLevel} confidence interval on the bole volume
estimate. }
\item{\code{ci.up}:}{Object of class \code{"numeric"}: The upper
1-\code{alphaLevel} confidence interval on the bole volume
estimate. }
\item{\code{alphaLevel}:}{Object of class \code{"numeric"}: The
two-tailed alpha-level for confidence interval construction. }
\item{\code{trueVol}:}{Object of class \code{"numeric"}: The true
volume for the stem segment being estimated (see \code{segBnds} in
the base class definition). }
\item{\code{relErrPct}:}{Object of class \code{"numeric"}: The
relative error in volume in percent. }
}
}
\section{Extends}{
Class \code{"\linkS4class{MonteCarloSampling}"}, directly.
}
\section{Methods}{
\describe{
\item{plot}{\code{signature(x = "crudeMonteCarlo", y = "missing")}:
Displays a plot of the stem object and sampled points. }
\item{summary}{\code{signature(object = "crudeMonteCarlo")}: Prints
a summary of the object. }
}
}
\references{
\describe{
%
\item{}{Gove, J. H. 2013. Monte Carlo sampling methods in
\pkg{sampSurf}. Package vignette.}
}
}
\author{
Jeffrey H. Gove %, \email{jhgove@unh.edu}
}
\seealso{
\code{\linkS4class{MonteCarloSampling}},
\code{\linkS4class{importanceSampling}},
\code{\linkS4class{controlVariate}},
\code{\linkS4class{antitheticSampling}}.
}
\examples{
showClass("crudeMonteCarlo")
}
\keyword{classes}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.