blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e41cb125fc0050e0e5f313b58d1a6b08b9d175f8
|
fa1cac5372cac6deda38a026e6ffe296a22fd186
|
/demo.R
|
3f508a68cf7bca009b1e6b105e90ee8013fe5a98
|
[] |
no_license
|
sVujke/inequality_measures_r_py
|
2752322e5a0c6142a61f6a5727eea88b520a8860
|
59c51603571c531b35362b60a3eee914d4c63526
|
refs/heads/master
| 2021-01-20T09:45:56.281558
| 2017-05-11T16:22:53
| 2017-05-11T16:22:53
| 90,283,711
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,248
|
r
|
demo.R
|
library(ineq)
data_set <- read.csv('users_dataset.csv', TRUE, ",")
head(data_set)
income <- data_set$income
paste("gini:", ineq(income,type = "Gini"), sep=" ")
#print("gini: ",ineq(data_set$income,type = "Gini")
#plot(Lc(AirPassengers))
gini <- ineq(income,type = "Gini")
atkinson0 <- Atkinson(income, parameter = 0, na.rm = TRUE)
atkinson05 <- Atkinson(income, parameter = 0.5, na.rm = TRUE)
atkinson1 <- Atkinson(income, parameter = 1, na.rm = TRUE)
thiel <- Theil(income, parameter = 0, na.rm = TRUE)
var_coef <- var.coeff(income)
kolm <- Kolm(income, parameter = 1, na.rm = TRUE)
paste("Gini: ", gini)
paste("Atkinson epsilon=0: ", atkinson0)
paste("Atkinson epsilon=0.5: ", atkinson05)
paste("Atkinson epsilon=1: ", atkinson1)
paste("Thiel: ", thiel)
paste("Coef of variation: ", var_coef)
# [1] "Gini: 0.444510730810279"
# > paste("Atkinson epsilon=0: ", atkinson0)
# [1] "Atkinson epsilon=0: 0"
# > paste("Atkinson epsilon=0.5: ", atkinson05)
# [1] "Atkinson epsilon=0.5: 0.170146049026273"
# > paste("Atkinson epsilon=1: ", atkinson1)
# [1] "Atkinson epsilon=1: 0.350512160004805"
# > paste("Thiel: ", thiel)
# [1] "Thiel: 0.324566254000937"
# > paste("Coef of variation: ", var_coef)
# [1] "Coef of variation: 0.819013822648111"
|
153e5dc647c8702b5ace082ffe2af85e23f050f2
|
5f89f3a68e52f8cfc59d001efce81e2357aedb6b
|
/code_dtwcp/3-split-aki.R
|
626b05d3bed8674b2daa67d3708e7c820c0eb1c7
|
[] |
no_license
|
andreaczhang/DTW-CP
|
b399aa601e96956114a0443d0ab7d0f4a068450c
|
5c908643f62f7a0662cb1ded40ce787f79c45191
|
refs/heads/master
| 2022-11-14T23:05:46.832435
| 2020-07-14T13:16:03
| 2020-07-14T13:16:03
| 278,315,923
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,342
|
r
|
3-split-aki.R
|
# keep spliting, now AKI cohort
# the difference is only that I sample 350/150 control/cases
# 50 random sets for each day
library(magrittr)
library(purrr)
library(dplyr)
library(tictoc)
# library(caret)
#### data
dataPath_rev_aki <- 'path/to/data/aki/'
aki_dinfo <- read.csv(paste0(dataPath_rev_aki, 'cohortInfo_akiAll.csv'))
aki_processed7days <- readRDS(file = paste0(dataPath_rev_aki, 'processed7day_aki.RData'))
# ------ select the proper window ------ #
# 7 days
below7dayindex <- which(aki_dinfo$dischtime_hours<= 168)
dinfo7day <- aki_dinfo[below7dayindex, ]
# dinfo7day %>% nrow # 3705
# match the ID, double check outcomes
all.equal(paste0('icustay_', dinfo7day$icustay_id),
names(aki_processed7days))
# --------- partition the static data ----------- #
id_day1_aki <- which(dinfo7day$dischtime_hours <= 24)
id_day2_aki <- which(dinfo7day$dischtime_hours <= 48)
id_day3_aki <- which(dinfo7day$dischtime_hours <= 72)
id_day4_aki <- which(dinfo7day$dischtime_hours <= 96)
id_day5_aki <- which(dinfo7day$dischtime_hours <= 120)
id_day6_aki <- which(dinfo7day$dischtime_hours <= 144)
id_day7_aki <- which(dinfo7day$dischtime_hours <= 168)
dinfo_day1_aki <- dinfo7day[id_day1_aki, ]
dinfo_day2_aki <- dinfo7day[id_day2_aki, ]
dinfo_day3_aki <- dinfo7day[id_day3_aki, ]
dinfo_day4_aki <- dinfo7day[id_day4_aki, ]
dinfo_day5_aki <- dinfo7day[id_day5_aki, ]
dinfo_day6_aki <- dinfo7day[id_day6_aki, ]
dinfo_day7_aki <- dinfo7day[id_day7_aki, ]
# ----- split into 50 ------ #
# for each day, query 50 random sets (different from sepsis!)
# use controled outcome (70/30%, 350 alive 150 dead)
# then save the train, test IDs into its own folder
# this will be used for creating the baseline data too
# tryday1 <- trteIDgenerator_aki(outcometable = dinfo_day1_aki)
trteid_day1_aki <- list()
trteid_day2_aki <- list()
trteid_day3_aki <- list()
trteid_day4_aki <- list()
trteid_day5_aki <- list()
trteid_day6_aki <- list()
trteid_day7_aki <- list()
set.seed(1)
for(s in 1:50){
# the function here is different from sepsis.
trteid_day1_aki[[s]] <- trteIDgenerator_aki(outcometable = dinfo_day1_aki)
trteid_day2_aki[[s]] <- trteIDgenerator_aki(outcometable = dinfo_day2_aki)
trteid_day3_aki[[s]] <- trteIDgenerator_aki(outcometable = dinfo_day3_aki)
trteid_day4_aki[[s]] <- trteIDgenerator_aki(outcometable = dinfo_day4_aki)
trteid_day5_aki[[s]] <- trteIDgenerator_aki(outcometable = dinfo_day5_aki)
trteid_day6_aki[[s]] <- trteIDgenerator_aki(outcometable = dinfo_day6_aki)
trteid_day7_aki[[s]] <- trteIDgenerator_aki(outcometable = dinfo_day7_aki)
}
# now link back to the icustay id and outcomes
# note that the IDs names are slightly different from sepsis
day1_split_aki <- map(1:50, function(x){
matchID_eachsplit(trainID = trteid_day1_aki[[x]]$randindex_tr,
testID = trteid_day1_aki[[x]]$randindex_te,
dinfoDF = dinfo_day1_aki)
})
day2_split_aki <- map(1:50, function(x){
matchID_eachsplit(trainID = trteid_day2_aki[[x]]$randindex_tr,
testID = trteid_day2_aki[[x]]$randindex_te,
dinfoDF = dinfo_day2_aki)
})
day3_split_aki <- map(1:50, function(x){
matchID_eachsplit(trainID = trteid_day3_aki[[x]]$randindex_tr,
testID = trteid_day3_aki[[x]]$randindex_te,
dinfoDF = dinfo_day3_aki)
})
day4_split_aki <- map(1:50, function(x){
matchID_eachsplit(trainID = trteid_day4_aki[[x]]$randindex_tr,
testID = trteid_day4_aki[[x]]$randindex_te,
dinfoDF = dinfo_day4_aki)
})
day5_split_aki <- map(1:50, function(x){
matchID_eachsplit(trainID = trteid_day5_aki[[x]]$randindex_tr,
testID = trteid_day5_aki[[x]]$randindex_te,
dinfoDF = dinfo_day5_aki)
})
day6_split_aki <- map(1:50, function(x){
matchID_eachsplit(trainID = trteid_day6_aki[[x]]$randindex_tr,
testID = trteid_day6_aki[[x]]$randindex_te,
dinfoDF = dinfo_day6_aki)
})
day7_split_aki <- map(1:50, function(x){
matchID_eachsplit(trainID = trteid_day7_aki[[x]]$randindex_tr,
testID = trteid_day7_aki[[x]]$randindex_te,
dinfoDF = dinfo_day7_aki)
})
# ======= sanity check ====== #
names(day1_split_aki) <- paste0('split', 1:50)
names(day2_split_aki) <- paste0('split', 1:50)
names(day3_split_aki) <- paste0('split', 1:50)
names(day4_split_aki) <- paste0('split', 1:50)
names(day5_split_aki) <- paste0('split', 1:50)
names(day6_split_aki) <- paste0('split', 1:50)
names(day7_split_aki) <- paste0('split', 1:50)
saveRDS(day1_split_aki, paste0(dataPath_rev_aki, 'day1_aki_50splits/alloutcomes_aki_day1.RData'))
saveRDS(day2_split_aki, paste0(dataPath_rev_aki, 'day2_aki_50splits/alloutcomes_aki_day2.RData'))
saveRDS(day3_split_aki, paste0(dataPath_rev_aki, 'day3_aki_50splits/alloutcomes_aki_day3.RData'))
saveRDS(day4_split_aki, paste0(dataPath_rev_aki, 'day4_aki_50splits/alloutcomes_aki_day4.RData'))
saveRDS(day5_split_aki, paste0(dataPath_rev_aki, 'day5_aki_50splits/alloutcomes_aki_day5.RData'))
saveRDS(day6_split_aki, paste0(dataPath_rev_aki, 'day6_aki_50splits/alloutcomes_aki_day6.RData'))
saveRDS(day7_split_aki, paste0(dataPath_rev_aki, 'day7_aki_50splits/alloutcomes_aki_day7.RData'))
|
5260ad1a17362010859690a3b9fed244db569631
|
07f566e07ed138e6d1db5dfbbfaf977be87c91ad
|
/01_hello_world.R
|
bcc17ebffe99daf49cf0ca94059eb1c1c3fd367f
|
[
"MIT"
] |
permissive
|
eshanmherath/r-in-data-science
|
3bef91450f2041ce09cca5ce1faf105afb02032c
|
7aa4bdff396c7325413190cc437cd834a0516b4c
|
refs/heads/master
| 2021-09-03T21:54:35.242628
| 2018-01-12T09:37:37
| 2018-01-12T09:37:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 80
|
r
|
01_hello_world.R
|
# R Programming Hello World
hello.world <- "Hello World"
print(hello.world)
|
251faaae16c6ae0668c55fd513c36d93e0da987d
|
f54a2ac4d0cf19f638b700d87f3c42ddf9807f86
|
/Xinyu Leng homework3-3.R
|
ab5839e774200c652a30955d486dbe4a0c4acb14
|
[] |
no_license
|
XinyuLeng/Datamining_Homework3
|
ad79662d81843fa974741cacc04db49068fbab29
|
f6e2d40c8006bd0684bc8d2c9f06b767d9c1dbbb
|
refs/heads/main
| 2023-04-03T22:28:13.103887
| 2021-04-12T08:36:59
| 2021-04-12T08:36:59
| 357,117,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,590
|
r
|
Xinyu Leng homework3-3.R
|
# \section{Problem 3:Predictive model building: California housing}
install.packages("ggmap")
install.packages("leaps")
library(ggplot2)
library(tidyverse)
library(ggmap)
library(leaps)
library(scales)
CAhousing <- read.csv("C:/Users/Administrator/Desktop/CAhousing.csv", stringsAsFactors=TRUE)
summary(CAhousing)
# sampling
set.seed(1234)
train <- sample(nrow(CAhousing),0.7*nrow(CAhousing))
CAhousingtrain <- CAhousing[train,]
CAhousingtest <- CAhousing[-train,]
# Q1 build the best predictive model for medianHouseValue
# model selection
### 因为逐步回归法没办法评测到每一个模型,所以在这里使用全子集回归
### 这里我用了一种比较简洁的方法,如果跟兄弟的最佳模型有区别,我可以根据兄弟的模型修改后面的绘图代码
# using all-subsets regression to choose the best model
leaps <-regsubsets( medianHouseValue ~ longitude + latitude + housingMedianAge + totalRooms + totalBedrooms + population + households + medianIncome, data=CAhousingtrain, nbest=8)
plot(leaps, scale="adjr2")
# As we can see in the plots, the model with all 8 variables has the best R^2, which is equal to 0.65. So the best predictive model is longitude + latitude + housingMedianAge + totalRooms + totalBedrooms + population + households + medianIncome
# final model
fit <- lm( medianHouseValue ~ longitude + latitude + housingMedianAge + totalRooms + totalBedrooms + population + households + medianIncome, data=CAhousingtrain)
summary(fit)
coef(fit)
coef(fit) %>% round (3)
# predict the model
test_actual = CAhousingtest$medianHouseValue
test_predictions = predict(fit, CAhousingtest)
# get maps
califonia <- c(left = -125, bottom = 32, right = -113, top = 42)
map <- get_stamenmap(califonia, zoom = 9, maptype = "watercolor")
ggmap(map)
# figure 1:a plot of the original data, using a color scale to show medianHouseValue versus longitude (x) and latitude (y).
head(CAhousing[c('longitude','latitude','medianHouseValue')])
### figure1第一种展现形式是将population作为size大小的变量,可能更好看一些。 data=data0[data0$Category %in% Top3,],alpha=1)+labs(x='Longitude',y='Latitude')
plot_map11 = ggmap(map, base_layer = ggplot(CAhousing,
aes(x = longitude, y = latitude, color = medianHouseValue
))) +
geom_point(aes(size = population), alpha = 0.4) +
xlab("Longitude") +
ylab("Latitude") +
ggtitle("medianHouseValue versus longitude and latitude(original data)") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_color_distiller(palette = "Paired",labels=comma) +
labs(color = "Median House Value", size = "Population")
plot_map11
# figure1第二种展现形式是按照题目要求只有median house value 和经纬度,比较直观。
plot_map12 = ggmap(map, base_layer = ggplot(CAhousing,
aes(x = longitude, y = latitude, color = medianHouseValue
))) +
geom_point(alpha = 0.4) +
xlab("Longitude") +
ylab("Latitude") +
ggtitle("medianHouseValue versus longitude and latitude(original data)") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_color_distiller(palette = "Paired",labels=comma) +
labs(color = "Median House Value")
plot_map12
### figure1 兄弟可以二选一
# figure 2:a plot of your model's predictions of medianHouseValue versus longitude (x) and latitude (y).
plot_map2 = ggmap(map, base_layer = ggplot(CAhousingtest,
aes(x = longitude, y = latitude, color = test_predictions))) +
geom_point(alpha = 0.4) +
xlab("Longitude") +
ylab("Latitude") +
ggtitle("medianHouseValue versus longitude and latitude(prediction)") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_color_distiller(palette = "Paired",labels=comma) +
labs(color = "test_prediction")
plot_map2
# figure 3:a plot of your model's errors/residuals versus longitude (x) and latitude (y)
plot_map3 = ggmap(map, base_layer = ggplot(CAhousingtest,
aes(x = longitude, y = latitude, color = test_predictions - test_actual ))) +
geom_point(aes(size = abs(test_predictions - test_actual)),alpha = 0.4) +
xlab("Longitude") +
ylab("Latitude") +
ggtitle("residuals versus longitude and latitude") +
theme(plot.title = element_text(hjust = 0.5)) +
scale_color_distiller(palette = "Paired",labels=comma) +
labs(color = "residuals", size = "Magnitude of Price Difference")
plot_map3
|
da66dc8dcbb7645c966f5e21d4cf801d7d43e231
|
04825f1f6b860bb08287c13dda6d5f8b7587f987
|
/plot2.R
|
5bec83b88dbe5469c6600ebb443a808fafd57bfa
|
[] |
no_license
|
ClauMorgado/ExData_Plotting1
|
4520d053df38176b9f46ea577ff477c9f4d924ad
|
fbc9a3daf927321c8e90ffe18346f7dbd2e98e87
|
refs/heads/master
| 2021-01-14T13:57:38.171620
| 2015-04-11T11:10:21
| 2015-04-11T11:10:21
| 33,732,437
| 0
| 0
| null | 2015-04-10T14:22:06
| 2015-04-10T14:22:05
| null |
UTF-8
|
R
| false
| false
| 449
|
r
|
plot2.R
|
data <- read.table("household_power_consumption.txt", sep=";", header = TRUE, stringsAsFactors = FALSE)
gap <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
gap$DateTime <- strptime(paste(gap$Date, gap$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot2.png", width=480, height=480)
Sys.setlocale("LC_ALL","C")
plot(gap$DateTime, as.numeric(gap$Global_active_power), type="l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off()
|
3fb3ea9bcf8570bfd1bc37fe5fbbf802895ed9ef
|
f7e9d03142cf38c239eed63b2e7cc41a45456398
|
/RESULTS_tableS4.R
|
21f6978ebd7db845290db2d30428f90f952d8f0f
|
[] |
no_license
|
bozenne/Article-lvm-small-sample-inference
|
1e67c5f7de9e00c29b74c9b2c0e6b50d06004e3d
|
8290c2452a247bd5d58f7636363e1e9e7b68e436
|
refs/heads/master
| 2022-01-29T03:37:23.136079
| 2022-01-18T09:20:27
| 2022-01-18T09:20:27
| 166,855,261
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,844
|
r
|
RESULTS_tableS4.R
|
path.data2 <- "./data2/"
## * package
library(data.table)
source("FCT.R") ## get function fitStudent
if(FALSE){ ## sanity check
set.seed(10)
dt.test <- rbind(data.table(name = "rt", estimate.MLcorrected = rt(1e3, df = 3), se.MLcorrected = 1, df.MLcorrected = 3),
data.table(name = "rnorm", estimate.MLcorrected = rnorm(1e3), se.MLcorrected = 1, df.MLcorrected = Inf),
data.table(name = "runif", estimate.MLcorrected = runif(1e3), se.MLcorrected = 1, df.MLcorrected = NA))
fitStudent(dt.test, robust = FALSE)
## se.empirical df.empirical mean.se mean.df Etype1 zdist.ksD zdist.ksP tdist.ksD tdist.ksP empirical.cv
## rt 1.0243040 3.061981e+00 1 3 0.05157778 0.07855195 8.739392e-06 0.03930024 9.108476e-02 1
## rnorm 0.9802015 4.408587e+01 1 Inf 0.05173461 0.02132348 7.534688e-01 0.01818344 8.955425e-01 1
## runif 0.2918500 2.455879e+07 1 NaN NaN 0.07207078 6.157547e-05 0.07218601 5.956193e-05 1
}
## * Import
## ** simulation results
if(dir.exists(path.data2)){
dist.simulation <- readRDS(file.path("data2","dist-simulation.rds"))
## saveRDS(lapply(dist.simulation, function(x){x[n==20]}), file = file.path("data2","dist-simulation.rds"))
}else{
stop("Data is too large to be stored on Github (30Mb). Contact the corresponding author to obtain the dataset. \n")
}
## * fit distributions
checkStud.MM <- fitStudent(dist.simulation$MM[n==20],
robust = FALSE,
seqLink = c("Y2","eta~Gene1Y"))
checkStud.robustMM <- fitStudent(dist.simulation$MM[n==20],
robust = TRUE,
seqLink = c("Y2","eta~Gene1Y"))
checkStud.factor <- fitStudent(dist.simulation$factor[n==20],
robust = FALSE,
seqLink = c("Y2","Y1~Gene2Y","eta~Gene1Y","Y4~eta"))
checkStud.robustfactor <- fitStudent(dist.simulation$factor[n==20],
robust = TRUE,
seqLink = c("Y2","Y1~Gene2Y","eta~Gene1Y","Y4~eta"))
checkStud.lvm <- fitStudent(dist.simulation$lvm[n==20],
robust = FALSE, value.max = 10,
seqLink = c("Y2","Y1~Gene2Y","eta1~Gene1Y","Y4~eta1","eta1~eta2","Y1~~Y2"))
checkStud.robustlvm <- fitStudent(dist.simulation$lvm[n==20],
robust = TRUE, value.max = 10,
seqLink = c("Y2","Y1~Gene2Y","eta1~Gene1Y","Y4~eta1","eta1~eta2","Y1~~Y2"))
dt_gghist <- dist.simulation$lvm[n==20 & name %in% c("eta1~eta2","Y1~~Y2"), .(wald = na.omit(estimate.MLcorrected/se.MLcorrected)), by = "name"]
## gg_hist <- ggplot(dt_gghist, aes(x = wald))
## gg_hist <- gg_hist + geom_histogram(bins = 100, aes(y=..density..), color = "red") + facet_wrap(~name)
## gg_hist <- gg_hist + stat_function(fun = dnorm, n = 101, args = list(mean = 0, sd = 1.1), size = 2)
rm.col <- c("empirical.cv","zdist.ksD","zdist.ksP")
keep.col <- setdiff(colnames(checkStud.MM),rm.col)
greek <- list(a = c("$\\nu_2$","$\\gamma_1$"),
b = c("$\\nu_2$","$\\lambda_4$","$\\gamma_1$","$k_1$"),
c = c("$\\nu_2$","$k_1$","$\\lambda_4$","$\\gamma_1$","$b_1$","$\\sigma_{12}$"))
row <- list(a = c("Y2","eta~Gene1Y"),
b = c("Y2","Y4~eta","eta~Gene1Y","Y1~Gene2Y"),
c = c("Y2","Y1~Gene2Y","Y4~eta1","eta1~Gene1Y","eta1~eta2","Y1~~Y2"))
## * Table S4
df.table1 <- rbind(cbind(scenario = "A", parameter = greek$a, as.data.frame(checkStud.MM[row$a,keep.col])),
cbind(scenario = "B", parameter = greek$b, as.data.frame(checkStud.factor[row$b,keep.col])),
cbind(scenario = "C", parameter = greek$c, as.data.frame(checkStud.lvm[row$c,keep.col]))
)
df.table1$scenario <- as.character(df.table1$scenario)
rownames(df.table1) <- NULL
df.table1$se.empirical <- formatC(round(df.table1$se.empirical, digits = 3), format = "f", digits = 3)
df.table1$df.empirical <- formatC(round(df.table1$df.empirical, digits = 1), format = "f", digits = 1)
df.table1$mean.se <- formatC(round(df.table1$mean.se, digits = 3), format = "f", digits = 0)
df.table1$mean.df <- formatC(round(df.table1$mean.df, digits = 1), format = "f", digits = 1)
df.table1$Etype1 <- formatC(round(df.table1$Etype1, digits = 3), format = "f", digits = 3)
df.table1$tdist.ksD <- formatC(round(df.table1$tdist.ksD, digits = 3), format = "f", digits = 3)
df.table1$tdist.ksP <- format.pval(df.table1$tdist.ksP, digits = 3, eps = 1e-3)
df.table1[duplicated(df.table1$scenario),"scenario"] <- ""
addtorow <- list()
addtorow$pos <- list(0,0,2,6)
addtorow$command <- c("&&\\multicolumn{2}{c}{Empirical Student} & \\multicolumn{2}{c}{Modeled Student} & Expected
& \\multicolumn{2}{c}{KS-test} \\\\ \\cmidrule(lr){3-4} \\cmidrule(lr){5-6} \\cmidrule(lr){8-9} \n",
"Scenario & parameter & dispersion & df & dispersion & df & type 1 error & statistic & p-value \\\\\n",
"[4mm] ","[4mm] ")
print(xtable::xtable(df.table1,
label = "tab:validation",
caption = paste("Comparison between the empirical distribution of the Wald statistic vs. the modeled distribution (after correction) for n=20. ",
"Empirical Student: standard error and degrees of freedom of a non-standardized Student's t-distribution fitted to the empirical distribution.",
"Modeled Student: average estimated degrees of freedom over the simulations.",
"Expected type 1 error: rejection rate under the empirical Student when using the 2.5 and 97.5\\% quantile of the modeled Student.",
"Kolmogorov Smirnov test: comparison between the empirical cumluative distribution function (cdf) and the cdf of the empirical Student."),
),
add.to.row = addtorow,
include.colnames = FALSE,
include.rownames=FALSE,
sanitize.text.function = function(x){x},
booktabs = TRUE)
## \begin{table}[ht]
## \centering
## \begin{tabular}{lllllllll}
## \toprule
## &&\multicolumn{2}{c}{Empirical Student} & \multicolumn{2}{c}{Modeled Student} & Expected
## & \multicolumn{2}{c}{KS-test} \\ \cmidrule(lr){3-4} \cmidrule(lr){5-6} \cmidrule(lr){8-9}
## Scenario & parameter & dispersion & df & dispersion & df & type 1 error & statistic & p-value \\
## \midrule
## A & $\nu_2$ & 1.006 & 40.8 & 1 & 36.7 & 0.051 & 0.005 & 0.659 \\
## & $\gamma_1$ & 1.004 & 17.9 & 1 & 18.3 & 0.051 & 0.004 & 0.961 \\
## [4mm] B & $\nu_2$ & 0.998 & 39.2 & 1 & 9.1 & 0.029 & 0.006 & 0.513 \\
## & $\lambda_4$ & 1.067 & 117.2 & 1 & 5.9 & 0.023 & 0.004 & 0.885 \\
## & $\gamma_1$ & 1.088 & 311.8 & 1 & 10.5 & 0.043 & 0.006 & 0.455 \\
## & $k_1$ & 1.063 & 14.8 & 1 & 17.1 & 0.066 & 0.005 & 0.792 \\
## [4mm] C & $\nu_2$ & 1.026 & 45.5 & 1 & 10.1 & 0.035 & 0.006 & 0.536 \\
## & $k_1$ & 1.106 & 12.6 & 1 & 17.1 & 0.080 & 0.004 & 0.936 \\
## & $\lambda_4$ & 1.080 & 145.9 & 1 & 6.0 & 0.025 & 0.008 & 0.237 \\
## & $\gamma_1$ & 1.152 & 49.6 & 1 & 10.1 & 0.059 & 0.009 & 0.135 \\
## & $b_1$ & 1.127 & 783296.7 & 1 & 3.6 & 0.010 & 0.017 & <0.001 \\
## & $\sigma_{12}$ & 1.074 & 934003.5 & 1 & 7.8 & 0.031 & 0.038 & <0.001 \\
## \bottomrule
## \end{tabular}
## \caption{Comparison between the empirical distribution of the Wald statistic vs. the modeled distribution (after correction) for n=20. Empirical Student: standard error and degrees of freedom of a non-standardized Student's t-distribution fitted to the empirical distribution. Modeled Student: average estimated degrees of freedom over the simulations. Expected type 1 error: rejection rate under the empirical Student when using the 2.5 and 97.5\% quantile of the modeled Student. Kolmogorov Smirnov test: comparison between the empirical cumluative distribution function (cdf) and the cdf of the empirical Student.}
## \label{tab:validation}
## \end{table}
## * Additional table: robust (not in the article)
if(FALSE){
df.table2 <- rbind(cbind(scenario = "A", parameter = greek$a, as.data.frame(checkStud.robustMM[row$a,keep.col])),
cbind(scenario = "B", parameter = greek$b, as.data.frame(checkStud.robustfactor[row$b,keep.col])),
cbind(scenario = "C", parameter = greek$c, as.data.frame(checkStud.robustlvm[row$c,keep.col]))
)
df.table2$scenario <- as.character(df.table2$scenario)
rownames(df.table2) <- NULL
df.table2$se.empirical <- as.character(round(df.table2$se.empirical, digits = 3))
df.table2$df.empirical <- as.character(round(df.table2$df.empirical, digits = 1))
df.table2$mean.se <- as.character(round(df.table2$mean.se, digits = 3))
df.table2$mean.df <- as.character(round(df.table2$mean.df, digits = 1))
df.table2$Etype1 <- as.character(round(df.table2$Etype1, digits = 3))
df.table2$tdist.ksD <- as.character(round(df.table2$tdist.ksD, digits = 3))
df.table2$tdist.ksP <- format.pval(df.table2$tdist.ksP, digits = 3, eps = 1e-3)
df.table2[duplicated(df.table2$scenario),"scenario"] <- ""
addtorow <- list()
addtorow$pos <- list(0,0,2,6)
addtorow$command <- c("&&\\multicolumn{2}{c}{Empirical Student} & \\multicolumn{2}{c}{Modeled Student} & Expected
& \\multicolumn{2}{c}{KS-test} \\\\ \\cmidrule(lr){3-4} \\cmidrule(lr){5-6} \\cmidrule(lr){8-9} \n",
"Scenario & parameter & se & df & se & df & type 1 error & statistic & p-value \\\\\n",
"[4mm] ","[4mm] ")
print(xtable::xtable(df.table2,
label = "tab:robustvalidation",
caption = paste("Comparison between the empirical distribution of the robust Wald statistic vs. the modeled distribution (after correction) for n=20. ",
"Empirical Student: standard error and degrees of freedom of a Student's t-distribution fitted on the empirical distribution.",
"Modeled Student: average estimated degrees of freedom over the simulations.",
"Expected type 1 error: rejection rate under the empirical Student when using the 2.5 and 97.5\\% quantile of the modeled Student.",
"Kolmogorov Smirnov test: comparison between the empirical cumluative distribution function (cdf) and the cdf of the empirical Student."),
),
add.to.row = addtorow,
include.colnames = FALSE,
include.rownames=FALSE,
sanitize.text.function = function(x){x},
booktabs = TRUE)
}
|
29a7d08075b18af4d2cccb4a67ac955607e790e2
|
53a0e1eebcf10e6d661865624227e06774bc30f4
|
/man/readControls.Rd
|
8f6c9da10e98830fae29b846f260cc1496c18250
|
[] |
no_license
|
graumannlab/readat
|
447416ce15be432e1349db57896edca165b973d1
|
34de431d478dac37d91bd87d1efdab7d43362da8
|
refs/heads/master
| 2020-09-10T01:02:53.251425
| 2020-05-14T12:24:44
| 2020-05-14T12:24:44
| 67,595,712
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,382
|
rd
|
readControls.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read-sample-submision-input-files.R
\name{readControls}
\alias{readControls}
\title{Read SomaLogic Sample Submission Controls File}
\usage{
readControls(file = "controls.csv")
}
\arguments{
\item{file}{A string denoting the path to an input CSV file. See Input file
specification section.}
}
\value{
A \code{data.table} with 96 rows and 2 columns.
\describe{
\item{PlatePosition}{A letter followed by a number, constructed from the
Subarray ("A" for 1, "B" for 2, etc.) and the slide number from 1 to 12.}
\item{BarCode}{Sample barcode for QC, Calibrator, and Buffer samples, in the
form "I" followed by 6 digits.}
}
}
\description{
Read SomaLogic Sample Submission Controls File
}
\section{Input file specification}{
A CSV file without a header line containing up to 96 rows and two
columns as follows.
\enumerate{
\item{Plate positions from A1, A2, through to H12.}
\item{Barcodes in the form "I" followed by 6 digits.}
}
}
\examples{
# See ?writeSampleSubmissionForm for a more complete example
withr::with_dir(
system.file("extdata", package = "readat"),
{
(controls <- readControls())
}
)
}
\seealso{
\code{\link{readSlides}}, \code{\link{readComments}}, and
\code{\link{readSamples}} for reading other submission forms and
\code{\link{writeSampleSubmissionForm}} for usage examples.
}
|
e55ebe23df06ac0f88cd5fb59c91ace529d37192
|
de3f9765bc5085aa11c508b32a1a8565cf2a8739
|
/DAM1_reader.R
|
cafcc0695bacf77ee92f2abad9160485cc75665c
|
[] |
no_license
|
nli8888/Circadian_Rhythm
|
4922bdc87369a36a517738a3c5ad2eb8bd2b5ef2
|
922e6e5248ccd4a44d5c3a23f7a8b85b457e08bf
|
refs/heads/master
| 2021-01-19T22:52:42.547952
| 2017-06-27T07:43:24
| 2017-06-27T07:43:24
| 88,878,910
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,314
|
r
|
DAM1_reader.R
|
library(rethomics)
#time_format can either be "hr"/"min"/"sec"
#time_to_round_to in seconds
DAM1_single_reader = function(file,
#file_format = "DAM1",
time_format = "min",
time_to_round_to = rethomics::hours(1), #aka hour in seconds
#num_of_dup = "double", #can be "double", "triple" or "quadruple"
ref_hour = NULL){ #time_zone/ref_hour not supported yet, not for future work; can force check 4th line to make sure its 0000; if not stop
#if (file_format == "DAM1"){
#header = scan("/media/nick/Data/Users/N/Documents/MSc_Bioinfo/2016/Data_Analysis_Project/Circadian_Rhythm/per_rescue_v2/120115A5M/120115A5mCtM007C01.txt", what="", nmax= 1, sep="\n")
#infile = scan("/media/nick/Data/Users/N/Documents/MSc_Bioinfo/2016/Data_Analysis_Project/Circadian_Rhythm/per_rescue_v2/120115A5M/120115A5mCtM007C01.txt", what=1, skip=1, sep="\n")
header = scan(file, what="", nmax= 1, sep="\n")
channel = substr(strsplit(header, " ")[[1]][1], nchar(strsplit(header, " ")[[1]][1])-1, nchar(strsplit(header, " ")[[1]][1]))
channel = as.numeric(channel)
print(channel)
condition = substr(strsplit(header, " ")[[1]][1], 7, 11)
print(condition)
raw_date = substr(strsplit(header, " ")[[1]][1], 1, 6)
print(raw_date)
# day = substr(raw_date, 1, 2)
# month = substr(raw_date, 3, 4)
# year = paste("20", substr(raw_date, 5, 6), sep="")
# print(day)
# print(month)
# print(year)
monitor = substr(strsplit(header, " ")[[1]][1], nchar(strsplit(header, " ")[[1]][1])-6, nchar(strsplit(header, " ")[[1]][1])-3)
print(monitor)
expID = paste(raw_date, monitor, sep="")
infile = scan(file, what=1, skip=1, sep="\n")
sample_freq = infile[2]
if (time_format == "hr") {
sample_freq = sample_freq*60*60
} else if (time_format == "min"){
sample_freq = sample_freq*60
} else if (time_format == "sec"){
} else {
warning('arguement for time_format not acceptable; time_format can either be "hr"/"min"/"sec"')
break
}
activity = infile[4:length(infile)]
t_list = vector()
t = 0
for (i in activity){
#print(i)
t_list = c(t_list, t)
t = t + sample_freq
}
t_round = floor(t_list/(time_to_round_to))#*(time_to_round_to)
hour = t_round%%24
day = (floor(t_round/(24)))
dt = data.table(experiment_id=expID,
condition=condition,
machine_name=monitor,
region_id=channel,
date=raw_date,
activity=activity,
t=t_list
#t_round=t_round,
#hour=hour,
#day=day
)
setkeyv(dt, c("experiment_id", "region_id", "date", "machine_name"))
# actod = copy(dt)
# if (num_of_dup == "double"){
# actod2 = copy(actod)
# actod2 = actod2[,day := day-1]
# actod2 = actod2[,hour := hour + 24]
# actod = actod[day<max(day)]
# actodd = rbind(actod, actod2)
# actodd = actodd[day>-1]
# actodd = actodd[, day_str := sprintf("day\n%03d",day)]
# p = ggplot(actodd,aes(hour,ymax=y, ymin=min(y))) +
# geom_ribbon() +
# facet_grid(day_str ~ .) + scale_x_continuous(name="time (h)",breaks = 0:8 * 6)+
# scale_y_continuous(name="y")
# p
# }
#return(c(dt, actod, actod2))
return(dt)
# } else if (file_format == "DAM2"){
#
# }
}
DAM1_multi_reader = function(PATH,
...){ #use rethomics:::checkDirExists to also check if dir exist
filelist = list.files(PATH, pattern=".*\\.txt", full.names=TRUE)
x = lapply(filelist, DAM1_single_reader, ...)
DT = rbindlist(x)
setkeyv(DT, key(x[[1]]))
return(DT)
}
###MULTIFILE###
#PATH="/media/nick/Data/Users/N/Documents/MSc_Bioinfo/2016/Data_Analysis_Project/Circadian_Rhythm/per_rescue_v2/120115C5M"
#DT = DAM1_multi_reader(PATH, time_format = "min", time_to_round_to = 60*60)
## filelist = list.files(PATH, pattern=".*\\.txt", full.names=TRUE)
## filelist
## x = lapply(filelist, DAM1_reader, time_format="min")
## DT = rbindlist(x)
## setkeyv(DT, key(x[[1]]))
###SINGLEFILE###
# DT = DAM1_single_reader("/media/nick/Data/Users/N/Documents/MSc_Bioinfo/2016/Data_Analysis_Project/Circadian_Rhythm/per_rescue_v2/120115A5M/120115A5mCtM007C01.txt")
# actod = copy(DT)
# actod = actod[,.(sum_activity = sum(activity), hour = hour, day = day), by = t_round]
# actod = unique(actod)
# actod2 = copy(actod)
# actod2 = actod2[,day := day-1]
# actod2 = actod2[,hour := hour + 24]
# actod = actod[day<max(day)]
# actodd = rbind(actod, actod2)
# actodd = actodd[day>-1]
# actodd = actodd[, day_str := sprintf("day\n%03d",day)]
#
#
# p = ggplot(actodd, aes(hour,ymax=sum_activity, ymin=min(sum_activity))) +
# geom_ribbon() +
# facet_grid(day_str ~ .) + scale_x_continuous(name="time (h)",breaks = 0:8 * 6)+
# scale_y_continuous(name="activity")
# p = ggplot(actodd, aes(x=hour, y=sum_activity)) +
# geom_col() +
# facet_grid(day_str ~ .) + scale_x_continuous(name="time (h)",breaks = 0:8 * 6)+
# scale_y_continuous(name="activity")
# p
#
# d = DT[region_id == 1 & machine_name == "M007"]
# e = copy(d)
# e = e[, day := day-1]
#
# d = d[, .(mean_activity = mean (activity), hour=hour), by = t_round]
#dplot = ggplot(d, aes(x=t_round, y=mean_activity)) + geom_line()
#dplot
####PLOTS THAT Q SHOWED ME####
#overviewPlot(activity, DT, machine_name)
#overviewPlot(activity, DT[region_id==1], machine_name)
#ethogramPlot(activity, DT, machine_name, error="sem")
#ethogramPlot(activity, DT, facet_var=machine_name, error="sem")
# x = DAM1_read("/media/nick/Data/Users/N/Documents/MSc_Bioinfo/2016/Data_Analysis_Project/Circadian_Rhythm/per_rescue_v2/120115A5M/120115A5mCtM007C01.txt", "min")
# x2 = DAM1_read("/media/nick/Data/Users/N/Documents/MSc_Bioinfo/2016/Data_Analysis_Project/Circadian_Rhythm/per_rescue_v2/120115A5M/120115A5mCtM007C02.txt", "min")
# x3 = DAM1_read("/media/nick/Data/Users/N/Documents/MSc_Bioinfo/2016/Data_Analysis_Project/Circadian_Rhythm/per_rescue_v2/120115A5M/120115A5mCtM007C03.txt", "min")
# x4 = DAM1_read("/media/nick/Data/Users/N/Documents/MSc_Bioinfo/2016/Data_Analysis_Project/Circadian_Rhythm/per_rescue_v2/120115A5M/120115A5mCtM007C04.txt", "min")
# x5 = DAM1_read("/media/nick/Data/Users/N/Documents/MSc_Bioinfo/2016/Data_Analysis_Project/Circadian_Rhythm/per_rescue_v2/120115A5M/120115A5mCtM007C05.txt", "min")
# dt = x[, .(mean_activity = mean (activity)), by = t_round]
# dt2 = x2[, .(mean_activity = mean (activity)), by = t_round]
# dt3 = x3[, .(mean_activity = mean (activity)), by = t_round]
# dt4 = x4[, .(mean_activity = mean (activity)), by = t_round]
# dt5 = x5[, .(mean_activity = mean (activity)), by = t_round]
#plot1 = ggplot(x, aes(x=t , y=activity)) + geom_line()
# plot1 = ggplot(dt, aes(x=t_round, y=mean_activity)) + geom_line()
# plot2 = ggplot(dt2, aes(x=t_round, y=mean_activity)) + geom_line()
# plot3 = ggplot(dt3, aes(x=t_round, y=mean_activity)) + geom_line()
# plot4 = ggplot(dt4, aes(x=t_round, y=mean_activity)) + geom_line()
# plot5 = ggplot(dt5, aes(x=t_round, y=mean_activity)) + geom_line()
#
# plot1
# plot2
# plot3
# plot4
# plot5
#DT = rbind(x, x2, x3, x4, x5)
#data.table rolling join -> for subsampling
# t = c(1,1,1,1:50) * 60
# t
# t_round = floor(t/(5*60))*5*60
# t_round
|
78e4c9749370936e5f490e8a0db883163202873c
|
6f59eb1098f964b0611ffa60127759eb11f8d968
|
/src/R/PlotDiscriminateErrorFunctions.R
|
f2d5d10c245c23c88b67927c1cc9c52c3dbbdd6a
|
[] |
no_license
|
evolvedmicrobe/cafe-quality
|
f1403725ea11f771a70b5dea27cf3febaad09c0f
|
c54e2eb629cb89bf44acd48a497745ee4b04a4a7
|
refs/heads/master
| 2021-01-15T13:34:25.551557
| 2015-04-02T16:37:56
| 2015-04-02T16:40:53
| 25,172,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 531
|
r
|
PlotDiscriminateErrorFunctions.R
|
x = seq(-13.8, 4.5, .01)
y = -log(1+exp(x))
plot(x,y)
y2 = 1 / (1 + exp(-x))
plot(x, y2)
n = length(x)
y3 = rep(1,n)
y3[x<0] = 0
xs = c(x,x,x)
ys = c(-y,y2,y3)
group = c(rep("Log-Sigmoid (CCS)", n), rep("Logistic (Quiver/MCE)", n), rep("Empirical Error Rate",n))
d = data.frame(xs,ys,group)
ggplot(d, aes(x=xs, y = ys, colour=group))+geom_line(lwd=1.2)+ theme_bw(base_size=16)+labs(x="Mutations Score Difference", y = "Function Value", title="Scoring Schemes")+scale_colour_discrete(name="Scoring Function\n(per mutation)")
|
c3b944c8e33467e89b38de9f6fa323815d63e240
|
e41839aeffcce1a9c8be1983f47633eabdce4d76
|
/P1/Codigo/ParteB.R
|
118af9421461256a54cc2d180c4993d913107de9
|
[] |
no_license
|
byKakayo/analise_reco_padroes
|
ca491e0550fe985bb57f6ed47f15ffea23e921cd
|
3b921e102de10a8726a19e69be6b4ab9881fb796
|
refs/heads/master
| 2022-12-18T23:36:48.780445
| 2020-09-18T20:48:48
| 2020-09-18T20:48:48
| 261,283,263
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,531
|
r
|
ParteB.R
|
#AUTOMATO PROBABILÍSTICO
#Chama arquivo em R com as funcoes utilizadas
source("functions.R")
#Nós do automato
nos <-c("0", "1", "2", "3", "4", "5")
#Probabilidade de transicao
dt <-c(0.9, 0.882, 0 , 0 , 0 , 0.01 ,
0.1, 0.098, 0 , 0 , 0 , 0 ,
0 , 0.02 , 0.2, 0.194, 0 , 0 ,
0 , 0 , 0.8, 0.776, 0 , 0 ,
0 , 0 , 0 , 0.03 , 0.5, 0.495,
0 , 0 , 0 , 0 , 0.5, 0.495)
#Transformar probabilidade na matriz de transicao
mt <-matrix(data = dt, byrow = T, ncol = 6, dimnames = list(nos, nos))
#Quantidade de iterações
n = 500
#Estado inicial
e = "0"
#A matrix de transicao dos automatos dos itens
#6(d) e 6(e) sao iguais
#Cria o padrão e armazena na matrix
ve1 = generate_pattern(nos, mt, n, e)
ve2 = generate_pattern(nos, mt, n, e)
#Square wave plot do automato 6(d)
plot(x = ve1[,1],
y = ve1[,2],
type='s',
axes=F,
xlab="iteração",
ylab="valor",
xlim=c(0,n),
ylim=c(0,5),
main = "Visualização do padrão gerado - Autômato 6(d)")
axis(1, pos = -0.1)
axis(2, pos = -5)
#Adequando o padrão aos valores assumidos pelo automato
ve2[ve2[,2] == "2",2] <- "0"
ve2[ve2[,2] == "4",2] <- "0"
ve2[ve2[,2] == "3",2] <- "1"
ve2[ve2[,2] == "5",2] <- "1"
plot(x = ve2[,1],
y = ve2[,2],
type='s',
axes=F,
xlab="iteração",
ylab="valor",
xlim=c(0,n),
ylim=c(0,1),
main = "Visualização do padrão gerado - Autômato 6(e)")
axis(1, pos = -0.02)
axis(2, pos = -5)
|
556dbf4e92599b571c8a3e68ebc1baf5bbd20d52
|
21cf09a1aad63021ca2788fea19b83d26c389ef3
|
/marathon/marathon_dashboard/server.R
|
10efb49700ca963424366731cdb081ffad6c7083
|
[] |
no_license
|
AntoniaLovjer/NYC-Marathon-EDA
|
8d51e8a7fd655c2d841e3b4cee3ff7bbfd3db8f9
|
3d9f7a4f53ea572357750d0753daa0e357f32be8
|
refs/heads/master
| 2020-04-15T00:37:55.337738
| 2018-12-13T16:30:52
| 2018-12-13T16:30:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,040
|
r
|
server.R
|
# create the server functions for the dashboard
server <- function(input, output) {
# Read Data
marathon <- read_csv('marathon.csv')
source('aux_functions.R')
# Read inputs
location <- reactive({input$loc})
state <- reactive({input$state})
input_year <- reactive({input$input_year})
country <- reactive({input$country})
output$cityyear <- renderText({
if (location() == 'State'){
text <- paste(state(), ', USA - ', input_year())
} else{
text <- paste(country(), ' - ', input_year())
}
return(text)
})
output$total <- renderText({
if (location() == 'State'){
total <- marathon %>%
filter(year == input_year() & state_name == state()) %>%
nrow()
} else {
total <- marathon %>%
filter(year == input_year() & country == country()) %>%
nrow()
}
paste0(prettyNum(total, big.mark = ',')," Total Finishers")
})
output$runners <- renderValueBox({
if (location() == 'State'){
num_runners <- marathon %>%
filter(year == input_year() & state_name == state() & type == 'R') %>% nrow()
} else {
num_runners <- marathon %>%
filter(year == input_year() & country == country() & type == 'R') %>% nrow()
}
valueBox(prettyNum(num_runners, big.mark = ','), "Runners", color = "purple")
})
output$wheelchairs <- renderValueBox({
if (location() == 'State'){
num_wheelchairs <- marathon %>%
filter(year == input_year() & state_name == state() & type == 'W') %>% nrow()
} else {
num_wheelchairs <- marathon %>%
filter(year == input_year() & country == country() & type == 'W') %>% nrow()
}
valueBox(prettyNum(num_wheelchairs, big.mark = ','), "Wheelchairs", color = "aqua")
})
output$handcycles <- renderValueBox({
if (location() == 'State'){
num_handcycles <- marathon %>%
filter(year == input_year() & state_name == state() & type == 'H') %>% nrow()
} else {
num_handcycles <- marathon %>%
filter(year == input_year() & country == country() & type == 'H') %>% nrow()
}
valueBox(num_handcycles, "Handcycles", color = "orange")
})
#creating content
output$genderRatio <- renderPlot({
if (location() == 'State'){
plot_genderRatio(marathon, 1, state(), input_year())
}
else {
plot_genderRatio(marathon, 0, country(), input_year())
}
})
output$densityplot <- renderPlot({
if (location() == 'State'){
density_plot_geo(marathon, 1, state(), input_year())
}
else {
density_plot_geo(marathon, 0, country(), input_year())
}
})
output$pyramidplot <- renderPlot({
if (location() == 'State'){
pyramid_plot(marathon, 1, state(), input_year())
}
else {
pyramid_plot(marathon, 0, country(), input_year())
}
})
output$boxplot <- renderPlot({
if (location() == 'State'){
splint_boxplot(marathon, 1, state(), input_year())
}
else {
splint_boxplot(marathon, 0, country(), input_year())
}
})
}
|
d97c2d4a6f4820528b788eb48fc6461f82807be7
|
90832012541b9c048a75ca29ebc88b806370f097
|
/data-raw/populate.R
|
5cf36f547192a7a4fb1e8cb6f33752846cbea019
|
[] |
no_license
|
beanumber/etllahman
|
b00197630d20887f1a8dc8a65a95fac5bcaeb8a0
|
c79a0be77fc44600c6d7c98304bccd0c2ad8f85f
|
refs/heads/master
| 2020-03-11T14:44:30.408132
| 2018-04-18T20:04:52
| 2018-04-18T20:04:52
| 130,063,432
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 167
|
r
|
populate.R
|
library(etllahman)
db <- src_mysql_cnf(dbname = "lahman2016")
bdb <- etl("etllahman", db = db, dir = "~/dumps/lahman")
bdb %>%
etl_init()
bdb %>%
etl_update()
|
3a9f38998b9e125102db6953d7de5ad59250ad0f
|
8f8d61d286054a9b4ea299216ddf2e6f0ffe4221
|
/lDopaEI/man/convertUnits.Rd
|
ae20a1a3098f24cd0ab84f26721c69a7a5eb85a4
|
[
"MIT"
] |
permissive
|
MonteShaffer/L-Dopa_EI
|
fcf9e6c703694bac4c4da4ea4fae1d60db30e644
|
d5ceca2be18efe8313ee2b08f8659fcb0e81d26c
|
refs/heads/master
| 2021-07-11T01:20:08.645760
| 2017-10-07T20:37:54
| 2017-10-07T20:37:54
| 105,927,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 530
|
rd
|
convertUnits.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/integrateMe.R
\name{convertUnits}
\alias{convertUnits}
\title{Convert Units (from gravity to m/s^2)}
\usage{
convertUnits(x, from = "g", to = "m/s^2")
}
\arguments{
\item{x}{numeric vector}
\item{from}{units to change from}
\item{to}{units to change to}
}
\value{
numeric vector of converted result
}
\description{
Constants must be loaded with: \code{setup = loadSetup();}
}
\examples{
convertUnits(1); # 9.8
convertUnits(seq(-2,2,by=0.25));
}
|
15eed6a99f8b0eb7358ac38e6f18567efd7b286f
|
2b2908fb7a492b3f801f23c780a35cef8d5b0d59
|
/WangJun/apriorTest3.R
|
d630967cf2f411c1062fa98a967d806a532ab401
|
[] |
no_license
|
yaoran2000/MyWork
|
957129dfa4d587d0ad32f99d9f5c0ace1c36282a
|
c952e97654a9086cf53ab7dcaef56260540ffdd2
|
refs/heads/master
| 2020-04-06T04:44:31.844899
| 2017-04-25T06:15:52
| 2017-04-25T06:15:52
| 82,897,882
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 912
|
r
|
apriorTest3.R
|
library(Matrix)
library(arules)
library(arulesViz)
library(grid)
library(arulesViz)
require('RPostgreSQL')
pw <- {"Admin1234$"}
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv, dbname = "mht",host = "localhost", port = 5432,user = "etl", password = pw)
rm(pw)
df_bill <- dbGetQuery(con,"select cast(t1.billid as text) billid,string_agg(t1.billproductname,',') from billproduct t1 group by t1.billid")
#print(df_bill)
#itemFrequencyPlot(Groceries,topN=20,type="absolute")
#df_bill[] <- lapply(df_bill,factor)
for (column in names(df_bill)){
df_bill[column] <- as.factor(df_bill[[column]])
}
lapply(df_bill,class)
df_bill_matrix <- as(df_bill, "transactions")
#print(results_matrix)
rules <- apriori(df_bill_matrix, parameter = list(supp = 0.001, conf = 0.9))
options(digits=2)
inspect(rules[1:5])
rules<-sort(rules, by="confidence", decreasing=TRUE)
plot(rules,method="graph",interactive=TRUE,shading=NA)
|
5bf49489b86276cf89e66e2744ebb7368a73f31a
|
07f9ba53c35091bb55094a0244a2701056bd3323
|
/man/player_profile.Rd
|
9ed9018cbd1bca1e8245a3f04646f10d29423c33
|
[] |
no_license
|
MrDAndersen/mfl2R
|
cd1b9e4ed479c4cc7595a4cb5f8a5fd01deed4e7
|
fa3496dbffaf7a55e2b990709e75a9122f8ae5a9
|
refs/heads/master
| 2022-12-12T15:53:21.890433
| 2022-11-30T15:08:19
| 2022-11-30T15:08:19
| 249,209,957
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 421
|
rd
|
player_profile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/endpoint.R
\name{player_profile}
\alias{player_profile}
\title{Player Profile}
\usage{
player_profile(player_id)
}
\arguments{
\item{player_id}{A single player ID or vector of multiple player IDs to get information on}
}
\description{
Returns a list with summary of information regarding a player, including DOB,
ADP ranking, height/weight.
}
|
a9e5db5fa28d456ce9371fd639f8666a983c7f9f
|
a17cf22be2304c96d267fc1b68db7b7279c4a293
|
/man/getCurrentTarget.Rd
|
873fb6233b146a5675fefb9a51310fbbd166939d
|
[] |
no_license
|
robertdouglasmorrison/DuffyTools
|
25fea20c17b4025e204f6adf56c29b5c0bcdf58f
|
35a16dfc3894f6bc69525f60647594c3028eaf93
|
refs/heads/master
| 2023-06-23T10:09:25.713117
| 2023-06-15T18:09:21
| 2023-06-15T18:09:21
| 156,292,164
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,021
|
rd
|
getCurrentTarget.Rd
|
\name{getCurrentTarget}
\alias{getAllTargets}
\alias{getCurrentTarget}
\alias{getCurrentTargetSpecies}
\alias{getCurrentTargetFilePrefix}
\title{
Get Current Target
}
\description{
Get details about the current target of organism(s)
}
\usage{
getAllTargets()
getCurrentTarget()
getCurrentTargetSpecies()
getCurrentTargetFilePrefix()
}
\details{
These functions retrieve details about the currently defined target organism(s).
See \code{\link{Targets}} for a more detailed overview.
}
\value{
For 'getAllTargets', a table of TargetIDs, SpeciesIDs, FilePrefixes, and Species Text annotation details.
For 'getCurrentTarget', a table with one row, of TargetID, SpeciesIDs, FilePrefixes, and Species Text annotation details.
For 'getCurrentTargetSpecies', a vector of this target's SpeciesIDs.
For 'getCurrentTargetFilePrefix', a vector of this target's file prefixes.
}
\seealso{
\code{\link{exportTargets}}, for writing the targets table to a disk file.
\code{\link{setCurrentTarget}}, to change the current target.
}
|
7caba2bfffba12282210d4c66a72dd13db52b395
|
23b6db906b59256e600320113aba459a5123fe05
|
/man/launch_app.Rd
|
7688e9afdac1582710eb4211ea077a1aacceef0d
|
[] |
no_license
|
uk-gov-mirror/datasciencecampus.gRoot
|
d00486a737239ac1314ac8474b66a232da77316c
|
353d2437bb13d3e48cd9c67e9bf0072616dd22c5
|
refs/heads/master
| 2021-09-23T09:59:54.027508
| 2018-09-21T13:00:42
| 2018-09-21T13:00:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 327
|
rd
|
launch_app.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/launch_app.R
\name{launch_app}
\alias{launch_app}
\title{launch_app}
\usage{
launch_app()
}
\value{
launch the gReeneRy app in internet browser window
}
\description{
Visualise and interact with urban forest data of Cardiff
}
\author{
Joe Peskett
}
|
6a25bd401e115b35acb8ee0f15f6f6b81305b4cc
|
d1e988ee94288a547f32063d500fdecd4e6b0bc1
|
/Assignment3/E3.R
|
ac2ba5acd5046756e455844bb5831e9792523699
|
[] |
no_license
|
HaraldBrolin/Big_data_exercises
|
1859c4dd1615e4c01fa4d0b3cbf0e412d0c6e33e
|
d4b884b394c5daf96a53b602b4889dbb7d53f9ab
|
refs/heads/master
| 2021-10-09T13:04:05.233130
| 2017-12-14T08:43:02
| 2017-12-14T08:43:02
| 112,359,555
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 960
|
r
|
E3.R
|
#-- Som perceptroner, kan testa utan training
rm(list = ls())
library(caret); library(mlbench); library(adabag); library(plyr); library(fastAdaboost)
load("HWD_train_data.RData")
set.seed(2017)
df_numbers <- sample(data_train)
rm(data_train)
id_train <- sample(1:dim(df_numbers)[1], 0.8*dim(df_numbers)[1])
train_data <- df_numbers[id_train, ]; rownames(train_data) <- 1:dim(train_data)[1]
test_data <- df_numbers[-id_train, ]; rownames(test_data) <- 1:dim(test_data)[1]
rm(id_train)
#------------------ Ten differnet responses
train_data_x1 <- train_data
train_data_x1$V1 <- as.numeric(train_data$V1 == "1")
#------------------ Train a model
ctrl <- trainControl(method = 'repeatedcv',
repeats = 1,
number = 10,
classProbs = T)
M_adaboost <- train(make.names(V1) ~ ., train_data_x1,
method = 'adaboost',
metric = "Accuracy",
trControl = ctrl)
|
42ed82ce698c086f78fd3c8251c0995e3d71ce57
|
564b61f29747117c49fe061e8225e0cee485208a
|
/run_analysis.R
|
426f659317278fb59524d9f9b15f3aa9f41c402d
|
[] |
no_license
|
rajeshtwn/getandcleandata
|
2d78898b526dcf3f50a258ce4f12008496bce3ef
|
30ce783f98590c95e892fbe78749db03e05ee3b2
|
refs/heads/master
| 2021-01-10T05:39:19.447149
| 2016-03-10T09:35:27
| 2016-03-10T09:35:27
| 52,954,339
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,258
|
r
|
run_analysis.R
|
run_analysis <- function() {
features = read.table(file="./UCI HAR Dataset/features.txt")
activity_labels = read.table(file="./UCI HAR Dataset/activity_labels.txt")
train_data <- read.table(file="./UCI HAR Dataset/train/X_train.txt")
test_data <- read.table(file="./UCI HAR Dataset/test/X_test.txt")
tcombined_data <- rbind(train_data, test_data)
combined_data <- setNames(data.frame(tcombined_data), features[,2])
train_output <- read.table(file="./UCI HAR Dataset/train/y_train.txt")
test_output <- read.table(file="./UCI HAR Dataset/test/y_test.txt")
combined_output <- rbind(train_output, test_output)
#new_features <- features[grep("mean|std", features[,2]),2]
#meanandstd_features <- new_features[-grep("meanFreq", new_features)]
meanandstd_features <- features[grep("mean\\(\\)|std\\(\\)", features[,2]),2]
meanandstd_data <- combined_data[ , which(names(combined_data) %in% meanandstd_features)]
combined_data_output <- cbind(meanandstd_data, combined_output)
actual_data <- merge(combined_data_output, activity_labels, by.x = "V1", by.y = "V1")
means_by_activity <- aggregate(actual_data[,2:67], by=list(Category=actual_data$V2), FUN=mean)
write.table(means_by_activity, file = "./tidy_dataset.txt", row.name = FALSE)
}
|
ef5fd14883ab222d4635f75d1549f36fea325ad2
|
9d68fb722ba7a4fab7f959d3bf145b2f4ff44a8c
|
/tests/testthat/test-rsample-time_series_cv.R
|
e35ad3ecec9a8619fcf3ee17f367adb04c768e23
|
[] |
no_license
|
business-science/timetk
|
29d2e7251082522d7d0683dae635402c532cba0b
|
086f46824f8c53bfde83f709f964ae5d6b544a5a
|
refs/heads/master
| 2023-08-04T19:50:20.594218
| 2023-03-30T12:32:01
| 2023-03-30T12:32:01
| 87,742,539
| 518
| 91
| null | 2023-05-11T15:54:47
| 2017-04-09T22:11:02
|
R
|
UTF-8
|
R
| false
| false
| 4,695
|
r
|
test-rsample-time_series_cv.R
|
context("TEST TIME SERIES CV")
# SINGLE TIME SERIES ----
m750 <- m4_monthly %>% filter(id == "M750") %>% arrange(desc(date))
resample_spec <- time_series_cv(data = m750,
initial = "6 years",
assess = "24 months",
skip = "24 months",
cumulative = FALSE,
slice_limit = 3)
resamples_unnested <- resample_spec %>% tk_time_series_cv_plan()
resample_groups <- resamples_unnested %>%
select(.id, .key, date) %>%
group_by(.id, .key)
test_that("Check Structure: time_series_cv()", {
# Structure
expect_s3_class(resample_spec, "time_series_cv")
expect_s3_class(resample_spec, "rset")
expect_equal(nrow(resample_spec), 3)
expect_equal(ncol(resample_spec), 2)
expect_equal(names(resample_spec), c("splits", "id"))
})
test_that("Check Structure: tk_time_series_cv_plan()", {
# Structure
expect_equal(names(resamples_unnested), c(".id", ".key", "id", "date", "value"))
})
test_that("Inspect Results: time_series_cv()", {
# Check Max Dates
dates_tbl <- resample_groups %>%
slice_max(date)
dates_vec <- dates_tbl %>% filter(.key == "training") %>% pull(date) %>% unique()
expect_equal(
c("2013-06-01", "2011-06-01", "2009-06-01") %>% as.Date(),
dates_vec
)
dates_vec <- dates_tbl %>% filter(.key == "testing") %>% pull(date) %>% unique()
expect_equal(
c("2015-06-01", "2013-06-01", "2011-06-01") %>% as.Date(),
dates_vec
)
# Check Min Dates
dates_tbl <- resample_groups %>%
slice_min(date)
dates_vec <- dates_tbl %>% filter(.key == "training") %>% pull(date) %>% unique()
expect_equal(
c("2007-07-01", "2005-07-01", "2003-07-01") %>% as.Date(),
dates_vec
)
dates_vec <- dates_tbl %>% filter(.key == "testing") %>% pull(date) %>% unique()
expect_equal(
c("2013-07-01", "2011-07-01", "2009-07-01") %>% as.Date(),
dates_vec
)
})
# PANEL DATA ----
walmart_tscv <- walmart_sales_weekly %>%
time_series_cv(
date_var = Date,
initial = "12 months",
assess = "3 months",
skip = "3 months",
slice_limit = 4
)
resamples_unnested <- walmart_tscv %>% tk_time_series_cv_plan()
resample_groups <- resamples_unnested %>%
select(.id, .key, Date) %>%
group_by(.id, .key)
test_that("Inspect Results: time_series_cv()", {
# Check Max Dates
dates_tbl <- resample_groups %>%
slice_max(Date) %>%
ungroup()
dates_vec <- dates_tbl %>% filter(.key == "training") %>% pull(Date) %>% unique()
expect_equal(
c("2012-08-03", "2012-05-11", "2012-02-17", "2011-11-25") %>% as.Date(),
dates_vec
)
dates_vec <- dates_tbl %>% filter(.key == "testing") %>% pull(Date) %>% unique()
expect_equal(
c("2012-10-26", "2012-08-03", "2012-05-11", "2012-02-17") %>% as.Date(),
dates_vec
)
# Check Min Dates
dates_tbl <- resample_groups %>%
ungroup() %>%
group_by(.id, .key) %>%
filter(Date == min(Date)) %>%
ungroup()
dates_vec <- dates_tbl %>% filter(.key == "training") %>% pull(Date) %>% unique()
expect_equal(
c("2011-08-12", "2011-05-20", "2011-02-25", "2010-12-03") %>% as.Date(),
dates_vec
)
dates_vec <- dates_tbl %>% filter(.key == "testing") %>% pull(Date) %>% unique()
expect_equal(
c("2012-08-10", "2012-05-18", "2012-02-24", "2011-12-02") %>% as.Date(),
dates_vec
)
})
walmart_tscv <- walmart_sales_weekly %>%
time_series_cv(
date_var = Date,
initial = "12 months",
assess = 2,
skip = "3 months",
slice_limit = 4
)
walmart_tscv %>% tk_time_series_cv_plan() %>% plot_time_series_cv_plan(Date, Weekly_Sales)
resamples_unnested <- walmart_tscv %>% tk_time_series_cv_plan()
resample_groups <- resamples_unnested %>%
select(.id, .key, Date) %>%
group_by(.id, .key)
resample_count <- resample_groups %>%
group_by(.id, .key) %>%
count()
test_that("Inspect Results: time_series_cv()", {
# Training: All should be 364 (52 * 7 Groups)
expect_true({
all({
resample_count %>%
filter(.key == "training") %>%
pull(n) == 52*7
})
})
# Testing: All should be 14 (2 * 7 Groups)
expect_true({
all({
resample_count %>%
filter(.key == "testing") %>%
pull(n) == 2*7
})
})
})
|
4f7e9a047b96ee32017f51ab177704d014600f32
|
5788453c664275919c952748a3f232359c5ea363
|
/R/eurusds.R
|
5647112f42f59e50f82be568ea719447df796ccc
|
[] |
no_license
|
anb133/EurUsd
|
b857e9f624b689f4dce0686d470fb55335b5e645
|
bdaec6f069327e0545ed148926c06706aa4d524e
|
refs/heads/master
| 2020-05-29T16:43:39.416886
| 2019-05-29T16:24:41
| 2019-05-29T16:24:41
| 189,256,508
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,046
|
r
|
eurusds.R
|
#' Get EUR-USD exchange rates
#'
#' The function gets EUR-USD exchange rates for the provided time period.
#'
#' @param date_from date. Formatted as yyyy-mm-dd
#' @param date_to date. Formatted as yyyy-mm-dd
#' @return data.table
#' @export
#'
#' @importFrom httr GET content
#' @importFrom logger log_debug
#' @importFrom data.table data.table
#'
#' @examples
#' eurusds('2019-05-20', '2019-05-27')
eurusds <- function(date_from, date_to) {
# Send request to API
response <- GET(
'https://api.exchangeratesapi.io/history',
query = list(
start_at = date_from,
end_at = date_to,
base = 'USD',
symbols = 'EUR'
)
)
# Extract Rates
exchange_rates <- content(response)$rates
# Create a data table
eurusds <- data.table(
date = as.Date(names(exchange_rates)),
eurusd = as.numeric(unlist(exchange_rates))
)
eurusds <- eurusds[order(date)]
log_debug('Loaded exchange rates for {paste(min(eurusds$date), max(eurusds$date), sep = "-")}')
return(eurusds)
}
|
9e1c93ab51d7c19758c40cd6991ac0a274627497
|
8bfee4afaa66bfbe790484ba48b3fe1f16ac2865
|
/Code/bayesmpp_aux.R
|
8bada7c8152ac5292cfec87a07c3e387859b9f7b
|
[] |
no_license
|
AdrianaPAS90/Tesis_APAS
|
71fecb250419d6607a2ad0b361cf51ae18f007ad
|
3b135e5c4cd19f87c22e242c7d5bb68788737017
|
refs/heads/master
| 2021-01-12T09:51:24.772547
| 2018-06-29T18:11:39
| 2018-06-29T18:11:39
| 76,280,756
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,009
|
r
|
bayesmpp_aux.R
|
###----------------------------------------------------------------------
bayesmpp <- function(alpha_0 =2,beta_0 = 0.3, d, c, n, M){
# datos - arreglo de nx3 (col1-individuo, col2-duraciones, col3-costos)
# M - numero de simulacion del gibbs sampler
#
# Inicio-Repositorios
# Parametros
alpha_d_rep <- array(NaN,M)
alpha_theta_rep <- array(NaN,M)
beta_theta_rep <- array(NaN,M)
alpha_gamma_rep <- array(NaN,M)
beta_gamma_rep <- array(NaN,M)
# Latentes
theta_rep <- matrix(NaN,M,n)
gamma_rep <- matrix(NaN,M,n)
# Valores iniciales
alpha_d_sim <- 1 # (o cualquiera)
alpha_theta_sim <- 1 # (o cualquiera)
beta_theta_sim <- 1
alpha_gamma_sim <- 1
beta_gamma_sim <- 1
theta_sim <- 1
gamma_sim <- 1
# Gibbs sampler, per se
m <- 1
for(m in 1:M){
# Simular de la final completa de los parametros que solo dependen de otros parametros.
alpha_gamma_sim <- bayesmpp_alpha_gamma(x0)
beta_gamma_sim <- bayesmpp_beta_gamma(gamma_sim, alpha_0, beta_0)
#Simular la final completa de par?metros y variables que dependen de costos o duraciones anteriores.
j <- 1
for (j in 1:n){
alpha_d_sim <- bayesmpp_alpha_d(x0, d)
alpha_theta_sim <- bayesmpp_alpha_theta(x0, alpha_d_sim, d)
beta_theta_sim <- bayesmpp_beta_theta(x0, alpha_d_sim, alpha_theta_sim, d)
theta_sim <- bayesmpp_theta(d, alpha_d_sim, alpha_theta_sim, beta_theta_sim)
theta_rep[m,j] <- theta_sim
gamma_sim <- bayesmpp_gamma(d, c, alpha_gamma_sim, beta_gamma_sim)
gamma_rep[m,j] <- gamma_sim
}
# Almacenamos en el repositorio los parametros.
alpha_d_rep[m] <- alpha_d_sim
alpha_theta_rep[m] <- alpha_theta_sim
beta_theta_rep[m] <- beta_theta_sim
alpha_gamma_rep[m] <- alpha_gamma_sim
beta_gamma_rep[m] <- beta_gamma_sim
}
# Output
bayesmpp_out <- list(alpha_d_rep,alpha_theta_rep,beta_theta_rep,alpha_gamma_rep,beta_gamma_rep,theta_rep,gamma_rep)
}
|
7d1146009a085f2b029e7e6f80cffe99fcdb09f8
|
83676931d003fc9bb6821509279deb057d669ba3
|
/data-raw/all_family_tree-3funds.R
|
83f9475d9c8407866f92735f97edca5a34ad3d7d
|
[] |
no_license
|
LunaSare/phunding
|
c70a6fd65314650cfd52471347df1aa7415e0a5d
|
4ea97eebea39c820d0ad0c0e24700c86ab2182d2
|
refs/heads/master
| 2020-04-02T05:30:26.245639
| 2019-02-21T16:18:41
| 2019-02-21T16:18:41
| 154,083,205
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 695
|
r
|
all_family_tree-3funds.R
|
gg <- match(tolower(fam_tree_brlen$ott_ids), tolower(names(fam_funds$funds)))
# just to check which ott_ids are not in the output dated tree
gg <- gg[!is.na(gg)]
funds <- fam_funds$funds[gg] # the tips that have funding
length(funds) == length(fam_funds$funds) # if TRUE all funded taxa are in the tips of the tree
no_funds_names <- fam_tree_brlen$tip.label[!fam_tree_brlen$ott_ids %in% names(funds)]
no_funds <- rep(0, length(no_funds_names))
names(no_funds) <- no_funds_names
funds_names <- fam_tree_brlen$tip.label[match(names(funds), fam_tree_brlen$ott_ids)]
names(funds) <- funds_names
all_funds <- c(funds, no_funds)
length(all_funds) == length(fam_tree_brlen$tip.label) #should be TRUE
|
49c4014b014c53e4132160e74445684870cf04c8
|
ba6e98c2d1a1b4d7de382624632b5b442101898f
|
/Startup.R
|
9c75a46edf5274e112b83b7491bfd77741103df0
|
[] |
no_license
|
amart90-UI/CriteriaScripts
|
41d74534a30b63249ddc717c30c3aba83e87b184
|
36c2a8d6cbe24386c942c15c4c6b34957c859437
|
refs/heads/master
| 2020-03-23T01:08:25.057385
| 2018-07-19T19:06:31
| 2018-07-19T19:06:31
| 140,902,464
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,685
|
r
|
Startup.R
|
#set up WD
setwd("C:/Users/PyroGeo/Refugia/Ranking") # school computer
#setwd("D:/Refugia/Ranking") # home computer
#setwd("C:/Users/Anthony/Refugia/Ranking") # laptop
#load libraries
library(sp)
library(rgeos)
library(raster)
library(rgdal)
library(matrixStats)
library(plyr)
library(gdistance)
library(dismo)
# Load Fire Perimeter
fire.perim <- readOGR("Datasets/Table Mountain/TableMountainComplex.shp")
#fire.perim <- readOGR("C:/Users/PyroGeo/Refugia/Working/Maloney.shp")
# Load UI
ui <- readOGR("Datasets/Table Mountain/TableUI.shp")
#ui <- readOGR("C:/Users/PyroGeo/Refugia/Working/UI_Maloney.shp")
ui <- spTransform(ui, projection(fire.perim))
ui <- gBuffer(ui, byid=T, width=0)
# Order UI by ID
ui <- ui[order(match(as.numeric(as.character(ui@data$ID)),sort(as.numeric(as.character(ui@data$ID))))),]
# Load states
us <- getData("GADM", country="USA", level=1)
# Extract states
pnwstates <- c("Washington", "Oregon", "Idaho")
pnw <- us[match(toupper(pnwstates),toupper(us$NAME_1)),]
pnw <- spTransform(pnw, projection(fire.perim))
# Create score dataframe
scores.df <- data.frame(ID = ui@data$ID)
# Setup plot layout
plot(pnw, main = "Fire location")
plot(fire.perim, add = T, col = 'red', border = 'red')
X11()
map.matrix = dev.cur()
layout(matrix(c(1:9), 3, 3, byrow = TRUE), widths = 1, heights = 1)
plot(1, type="n", axes=FALSE, xlab="", ylab="")
legend("center", title= "Refugia value", legend = c("Low", "", "", "High"),
fill = c("green", "yellow", "orange", "red"))
dev.set(which = 2)
# Cleanup intermediates
rm(pnwstates, us)
# Write PNW
#writeOGR(pnw, dsn= paste0(getwd(), "/Output"), layer = "pnw", driver = "ESRI Shapefile", overwrite_layer = T)
|
ce836bea139274ce606136639e5211703fbd6efa
|
dd116ab1aa141a5c6128dd3c94eeed549c4ca983
|
/man/remove_unused_jamie.Rd
|
cd1e921516f48a8638eac78d062553ec7002ce24
|
[
"MIT"
] |
permissive
|
pahanc/malariasimulation
|
56aecf1f7f3057d6cb631a32aeda226e25f45946
|
48aea12df28d519bec16e2791d175b70ad8c7ecc
|
refs/heads/master
| 2023-06-30T10:35:59.220798
| 2021-08-02T11:14:52
| 2021-08-02T11:14:52
| 351,858,799
| 0
| 0
|
NOASSERTION
| 2021-08-02T11:56:14
| 2021-03-26T17:19:21
|
C++
|
UTF-8
|
R
| false
| true
| 416
|
rd
|
remove_unused_jamie.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compatibility.R
\name{remove_unused_jamie}
\alias{remove_unused_jamie}
\title{remove parameter keys from jamie's format that are not used
in this IBM}
\usage{
remove_unused_jamie(params)
}
\arguments{
\item{params}{with keys in the jamie's format}
}
\description{
remove parameter keys from jamie's format that are not used
in this IBM
}
|
7e6eea37650c8739a76556f27678e2a849adbf94
|
5390aac150a93ba4762983b3eaaf1ec58c3e78d8
|
/source/asm_Functions/asm_fragConsistencyChecker.R
|
f5a73283d80d4bd0625b7422b399ced1357de4fc
|
[
"NIST-PD"
] |
permissive
|
asm3-nist/DART-MS-DBB
|
144a44de34e71d3dcf4c19a2317d4f9766cbba41
|
6189fd3b894f69de49113ed13afd6a7c650f74d2
|
refs/heads/master
| 2023-04-10T22:33:33.820668
| 2021-05-03T20:10:16
| 2021-05-03T20:10:16
| 292,061,121
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 622
|
r
|
asm_fragConsistencyChecker.R
|
asm_fragConsistencyChecker <- function(x){
nspectra = length(x)
metric1 = numeric(nspectra) # "center of mass" in each spectra. should decrease across energies
metric2 = numeric(nspectra) # max m/z.. too much spectral noise to be good as currently implemented.. do not return
for(i in 1:nspectra){
mz = x[[i]][,1]
ab = x[[i]][,2]
ab = ab/sum(ab)
weighted_mz = sum(mz*ab)
metric1[i] = weighted_mz
metric2[i] = max(mz)
}
test1 = order(-metric1)
Results = list(test1)
return(Results)
}
|
dcd43544a09f7cbad2df6fb6263f162dfc401668
|
d5331ec752b979e7c0b7edb81536875f6400a97f
|
/man/ISRaD.extra.Cstocks.Rd
|
2965e4838dd3290d63162d9fc5f4a7a7f2dde67d
|
[] |
no_license
|
AuHau/ISRaD
|
81ef5005951ad1dfb0bd72d34d2aea0ebd257f9b
|
4e69adfdfadd521d932898a0b01f436bd2328e8e
|
refs/heads/master
| 2020-04-18T01:23:27.646220
| 2019-04-04T16:25:29
| 2019-04-04T16:25:29
| 167,116,873
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 641
|
rd
|
ISRaD.extra.Cstocks.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ISRaD.extra.Cstocks.R
\name{ISRaD.extra.Cstocks}
\alias{ISRaD.extra.Cstocks}
\title{ISRaD.extra.Cstocks}
\usage{
ISRaD.extra.Cstocks(database)
}
\arguments{
\item{database}{ISRaD dataset object.}
}
\value{
returns ISRaD_data object with filled columns
}
\description{
Calculates soil organic carbon stock
}
\details{
Function first fills lyr_bd_samp and lyr_c_org. SOC stocks can only be calculated if organic carbon concentration and bulk density data are available. SOC stocks are then calculated for the fine earth fraction (<2mm).
}
\author{
J. Beem-Miller
}
|
65eb9a7ff66ef9bc4675dc38e8aed2aaac8f6af1
|
112638177c6bf7820395552772b9f606b457b4c8
|
/R/ICP-fitters.R
|
6c2a5b186320e52db8d3fc56ae8b598cc1d9caa4
|
[] |
no_license
|
JoaoSantinha/ICPSurv
|
36abb23bd9c605e6faf0fa46181f1bd4f2d02738
|
9b1b369e5ef70f88490608bb5f642cb659a2d22d
|
refs/heads/master
| 2022-04-14T08:23:26.401308
| 2020-02-22T16:02:38
| 2020-02-22T16:02:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,567
|
r
|
ICP-fitters.R
|
#' Internal regression functions
#'
#' The \code{fit_model} function is a generic function meant for internal use in
#' the \code{ICPSurv} package, and as such it is not exported to the namespace.
#'
#' The \code{fit_model.X} and \code{fit_nonparam_model.X} functions are internal
#' fitter functions. They are usually all quite simple as their main
#' functionality apart from fitting the model is ensureing that the output
#' is compatible with the \code{\link{plausible_predictor_test}}s.
#'
#' All \code{fit_model} methods must return the following:
#' \itemize{
#' \item{\code{coefficients} }{ The estimated regression coefficients.}
#' \item{\code{deviance} }{ The deviance of the fitted model, i.e. \eqn{-2 log(likelihood)}}
#' \item{\code{df} }{ Degrees of freedom in fitted model.}
#' \item{\code{scale} }{ Scale.}
#' }
#'
#' All \code{fit_nonparam_model} methods must return the following:
#' \itemize{
#' \item{\code{cum} }{ Cumulative regression coefficient.}
#' \item{\code{cum.var} }{ Cumulative variance of regression effects.}
#' }
#' Morover if \code{n.sim} in the \code{method} is non-zero then the
#' \code{fit_nonparam_model} also returnes the following
#' \itemize{
#' \item{\code{sup} }{ Kolmogorov–Smirnov test.}
#' \item{\code{int} }{ Cramér–von Mises test.}
#' }
#'
#'
#' @param method a \strong{method object} created by the
#' \code{\link{method_obj}} function.
#' @param Y a vector or \code{\link[survival]{Surv}} object describing the
#' target variable.
#' @param X a matrix, vector or data frame describing the covariates.
#' @param subset an optional vector specifying a subset of observations to be
#' used in the fitting process.
#' @param id for timevarying covariates the variable must associate each record
#' with the id of a subject.
#' @param ... additional arguments to be passed to lower level functions.
#'
#' @return Both the \code{fit_model} and \code{fit_nonparam_model} methods return a list.
#'
#' \code{fit_model} methods must return the following:
#' \item{\code{coefficients} }{ The estimated regression coefficients.}
#' \item{\code{deviance} }{ The deviance of the fitted model, i.e. \eqn{-2 log(likelihood)}}
#' \item{\code{df} }{ Degrees of freedom in fitted model.}
#' \item{\code{scale} }{ Scale.}
#'
#'
#' \code{fit_nonparam_model} methods return the following:
#' \item{\code{cum} }{ Cumulative regression coefficient.}
#' \item{\code{cum.var} }{ Cumulative variance of regression effects.}
#' \item{\code{sup} }{ Kolmogorov–Smirnov test (only returned if \code{n.sim} not zero).}
#' \item{\code{int} }{ Cramér–von Mises test (only returned if \code{n.sim} not zero).}
#'
#'
#'
#' @seealso \code{\link{ICP}}, \code{\link{plausible_predictor_test}} for the
#' full wrapper functions.
#' @keywords internal
fit_model <- function(method, Y, X, subset, ...) {
UseMethod("fit_model", method)
}
#' @rdname fit_model
fit_model.default <- function(method, Y, X, subset = NULL, ...) {
warning(method$model,
"is not a recocnised statistical model in the ICPSurv framework",
" - the recognised models are linear models 'lm', generalized",
" linear models 'glm' (distribution family must be specified),",
" proportional hazard models 'ph' and",
" additive hazard models 'ah'.")
return(1)
}
#' @rdname fit_model
fit_model.lm <- function(method, Y, X, subset = NULL, ...) {
mf <- stats::model.frame(Y ~ ., data = data.frame(Y, X), subset = subset)
fit <- stats::lm(mf)
coef <- fit$coefficients
var <- stats::vcov(fit)
if (any(is.na(coef))) {
index <- which(is.na(coef))
coef[index] <- 0
var[,index] <- var[index,] <- 0
diag(var)[index] <- Inf
}
return(list("coefficients" = coef,
"covariance" = var,
"deviance" = stats::deviance(fit),
"df" = fit$df.residual,
"scale" = stats::deviance(fit) / fit$df.residual))
}
#' @rdname fit_model
fit_model.glm <- function(method, Y, X, subset = NULL, ...) {
if (is.null(method$family)) {
stop("For model = 'glm' there must be specified a family in the method object")
}
mf <- stats::model.frame(Y ~ ., data = data.frame(Y, X), subset = subset)
fit <- suppressWarnings(stats::glm(mf, family = method$family))
coef <- fit$coefficients
var <- stats::vcov(fit)
if (any(is.na(coef))) {
index <- which(is.na(coef))
coef[index] <- 0
var[,index] <- var[index,] <- 0
diag(var)[index] <- Inf
}
return(list("coefficients" = coef,
"covariance" = var,
"deviance" = fit$deviance,
"df" = fit$df.residual,
"scale" = summary(fit, dispersion = method$dispersion)$dispersion))
}
#' @rdname fit_model
fit_model.ph <- function(method, Y, X, subset = NULL, ...) {
if (! survival::is.Surv(Y)) {
Y <- survival::Surv(Y)
}
if (length(X) == 0) {
fit <- survival::survreg(Y ~ 1, subset = subset, dist = "exponential")
} else {
fit <- survival::survreg(Y ~ ., data = data.frame(X), subset = subset,
dist = "exponential")
}
if (any(diag(fit$var) == 0)) {
ind <- which(diag(fit$var) == 0)
diag(fit$var)[ind] <- Inf
}
return(list(
"coefficients" = fit$coefficients,
"covariance" = fit$var,
"deviance" = - 2 * fit$loglik[2],
"df" = fit$df.residual,
"scale" = 1
))
}
#' @rdname fit_model
fit_model.ah <- function(method, Y, X, subset = NULL, ...) {
if (! survival::is.Surv(Y)) {
Y <- survival::Surv(Y)
}
dist <- survival::survreg.distributions$exponential
dist$trans <- dist$itrans <- function(y) y
dist$dtrans <- function(y) rep(1,length(y))
if (length(X) == 0) {
fit <- survival::survreg(Y ~ 1, subset = subset, dist = dist)
} else {
fit <- survival::survreg(Y ~ ., data = data.frame(X), subset = subset,
dist = dist)
}
if (any(diag(fit$var) == 0)) {
ind <- which(diag(fit$var) == 0)
diag(fit$var)[ind] <- Inf
}
return(list(
"coefficients" = fit$coefficients,
"covariance" = fit$var,
"deviance" = - 2 * fit$loglik[2],
"df" = fit$df.residual,
"scale" = 1))
}
#' @rdname fit_model
fit_model.hazard <- function(method, Y, X, subset = NULL, ...) {
if (! survival::is.Surv(Y)) {
Y <- survival::Surv(Y)
}
if (length(X) == 0) {
fit <- survival::survreg(Y ~ 1, subset = subset, dist = method$dist)
} else {
fit <- survival::survreg(Y ~ ., data = data.frame(X), subset = subset,
dist = method$dist)
}
return(list(
"coefficients" = fit$coefficients,
"covariance" = fit$var,
"deviance" = - 2 * fit$loglik[2],
"df" = fit$df.residual,
"scale" = 1
))
}
#' @rdname fit_model
fit_nonparam_model <- function(method, Y, X, subset, ...) {
UseMethod("fit_nonparam_model", method)
}
#' @rdname fit_model
fit_nonparam_model.default <- function(method, Y, X, subset = NULL, ...) {
stop("A time varying fitter function is not defined for this model class:",
class(method))
}
#' @rdname fit_model
fit_nonparam_model.ph <- function(method, Y, X, subset = NULL, id = NULL, ...) {
if (! survival::is.Surv(Y)) {
Y <- survival::Surv(Y)
}
if (is.null(method$n.sim)) {
n.sim <- 50
} else {
n.sim <- method$n.sim
}
if (length(X) == 0) {
fit <- timereg::aalen(Y ~ 1, data = data.frame(X), id = id, n.sim = n.sim)
} else {
fit <- quiet(timereg::timecox(Y ~ ., data = data.frame(X),
id = id, n.sim = n.sim))
}
if (sum(is.na(fit$cum[2,])) > 0) {
stop("The model can not be fitted due to bugs in the dependencies.\n ",
"The error has been reported to the maintainers of the 'timereg' package.")
}
if (n.sim != 0) {
res <- list(
"cum" = fit$cum,
"cum.var" = fit$var.cum,
"sup" = unname(fit$pval.testBeqC),
"int" = unname(fit$pval.testBeqC.is))
} else {
res <- list(
"cum" = fit$cum,
"cum.var" = fit$var.cum)
}
return(res)
}
#' @rdname fit_model
fit_nonparam_model.ah <- function(method, Y, X, subset = NULL, id = NULL, ...) {
if (! survival::is.Surv(Y)) {
Y <- survival::Surv(Y)
}
if (is.null(method$n.sim)) {
n.sim <- 50
} else {
n.sim <- method$n.sim
}
robust <- ifelse(n.sim == 0, 0, 1)
if (length(X) == 0) {
fit <- timereg::aalen(Y ~ 1, data = data.frame(X), id = id,
n.sim = n.sim, robust = robust)
} else {
fit <- timereg::aalen(Y ~ ., data = data.frame(X), id = id,
n.sim = n.sim, robust = robust)
}
if (sum(is.na(fit$cum[2,])) > 0) {
stop("The model can not be fitted due to bugs in the dependencies.\n ",
"The error has been reported to the maintainers of the 'timereg' package.")
}
if (n.sim != 0) {
res <- list(
"cum" = fit$cum,
"cum.var" = fit$var.cum,
"sup" = unname(fit$pval.testBeqC),
"int" = unname(fit$pval.testBeqC.is))
} else {
res <- list(
"cum" = fit$cum,
"cum.var" = fit$var.cum)
}
return(res)
}
# @rdname fit_model
#fit_nonparam_model.hazard <- function(method, Y, X, id = NULL, ...) {
# if (! survival::is.Surv(Y)) {
# Y <- survival::Surv(Y)
# }
# if (is.null(method$n.sim)) {
# n.sim <- 50
# } else {
# n.sim <- method$n.sim
# }
# robust <- ifelse(n.sim == 0, 0, 1)
# if (length(X) == 0) {
# fit <- timereg::aalen(Y ~ 1, id = id, n.sim = n.sim, robust = robust)
# } else if (method$link %in% c("proportional", "log")) {
# fit <- quiet(timereg::timecox(Y ~ ., data = data.frame(X),
# id = id, n.sim = n.sim, robust = robust))
# } else if (method$link %in% c("additive", "identity")) {
# fit <- timereg::aalen(Y ~ ., data = data.frame(X), id = id,
# n.sim = n.sim, robust = robust)
# }
# if (sum(is.na(fit$cum[2,])) > 0) {
# stop("The model can not be fitted due to bugs in the dependencies.\n ",
# "The error has been reported to the maintainers of the 'timereg' package.")
# }
# if (n.sim != 0) {
# res <- list(
# "cum" = fit$cum,
# "cum.var" = fit$var.cum,
# "sup" = unname(fit$pval.testBeqC),
# "int" = unname(fit$pval.testBeqC.is))
# } else {
# res <- list(
# "cum" = fit$cum,
# "cum.var" = fit$var.cum)
# }
# return(res)
#
# stop("The 'nonparam' method is only implementer for the cox and aalen hazard",
# "models so far.")
#}
quiet <- function(x) {
sink(tempfile())
on.exit(sink())
invisible(force(x))
}
|
58d333abaa71b5737a671b506524d4fd787eb103
|
5e3f359317825ac9034f50c21746b1cab14bb554
|
/Z_otherScripts/econJobforum.R
|
8e3293fc239bd8d0c221008320fa96914c6ff109
|
[
"MIT"
] |
permissive
|
rpplayground/GSERM_TextMining
|
961239625a027d6329a960e5eb7bbf3c8d5c154a
|
49fccfe1877db8171f8cd357e06be49a0640b817
|
refs/heads/master
| 2020-09-08T13:13:54.360494
| 2019-09-09T15:25:11
| 2019-09-09T15:25:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,589
|
r
|
econJobforum.R
|
#' Title: Obtain text from econonics job forum
#' Purpose: Learn some basic scraping
#' Author: Ted Kwartler
#' email: ehk116@gmail.com
#' License: GPL>=3
#' Date: 2019-5-12
# Lib
library(rvest)
library(purrr)
# Init
pg <- read_html('https://www.econjobrumors.com/') #https://www.econjobrumors.com/page/2
maxPg <- pg %>% html_node('.nav') %>% html_text()
maxPg <- as.numeric(gsub('Next|,| »','', tail(strsplit(maxPg, '…')[[1]],1)))
# Construct all pages
allURLS <- paste0('https://www.econjobrumors.com/page/', 1:maxPg)
# Get all possible threads among all pages
allLinks <- list()
for (i in 1:5){ #length(allURLS)
print(paste('getting page ', allURLS[i]))
# Get all links
tmp <- pg %>% html_nodes("a") %>% html_attr("href")
tmp <- tmp [grep('https://www.econjobrumors.com/topic/', tmp)]
# Try to avoid appearing like DDOS
Sys.sleep(1)
allLinks[[allURLS[i]]] <- tmp
print('finished')
}
# Organize into comprehensive thread vector
allThreads <- unlist(allLinks)
# Identify & Remove dupes
allThreads <- allThreads[-grep('#post', allThreads)]
allThreads <- unique(allThreads)
saveRDS(allThreads, 'C:/Users/Edward/Desktop/StGallen2019_student/Z_otherData/econForum/allThreads6_6_2019.rds')
# Extract elements from each page
threadScrape <- list()
for (i in 1:5){
print(paste('scraping page', allThreads[i]))
threadPg <- read_html(allThreads[i])
threadPg %>% html_nodes(xpath = '//*[@id="position-1"]/div[1]')
threadPg %>% html_nodes(xpath = '//*[@id="thread"]') %>% html_text()
# Author
author <- threadPg %>% html_node(xpath = '//*[@id="thread"]') %>% html_nodes("[class='threadauthor']") %>% html_text()
author <- trimws(gsub("\n|\t", '', author))
postMeta <- threadPg %>% html_node(xpath = '//*[@id="thread"]') %>% html_nodes("[class='poststuff']") %>% html_text()
allTxt <- vector()
for(j in 1:length(postMeta)){
postTxt <- threadPg %>% html_node(xpath = paste0('//*[@id="position-',j,'"]/div[2]/div[1]')) %>%
html_text()
postTxt <- gsub('[\n]','', postTxt)
allTxt[j] <- postTxt
}
# Get time
postTime <- vector()
for(j in 1:length(postMeta)){
postTime[j] <- paste('SysTime:', Sys.time(), 'WebpageTime:',trimws(head(strsplit(postMeta, "[#]")[[j]],1)))
}
resp <- data.frame(author,
postTime,
raw_postMeta = postMeta,
url = allThreads[i],
text = allTxt )
threadScrape[[i]] <- resp
Sys.sleep(1)
}
threadScrape[[1]]$raw_postMeta
threadScrape[[1]]$text
# End
|
4f2fe3ea570effc0d85fb039466b400fbecdf940
|
d2c892e59bb876e2205ad6ca9acb3e904aaeab5b
|
/code/R/SIMLR_Estimate_Number_of_Clusters.R
|
5fd26f5096a64aac31a2956b1fd4731909f899f7
|
[] |
no_license
|
yuqimiao/multiomics-SIMLR
|
088738b77a7e0441a41e0d6b14137c2e1aefa8a7
|
bedd32e5e5ddafad2844803d901e5075c716203a
|
refs/heads/master
| 2023-03-09T10:18:04.289011
| 2021-02-20T04:40:39
| 2021-02-20T04:40:39
| 293,400,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,526
|
r
|
SIMLR_Estimate_Number_of_Clusters.R
|
# Estimates the number of clusters by means of two huristics
"SIMLR_Estimate_Number_of_Clusters" = function( X, NUMC = 2:5, cores.ratio = 1 ) {
D_Kernels = multiple.kernel.numc(t(X),cores.ratio)
distX = array(0,c(dim(D_Kernels[[1]])[1],dim(D_Kernels[[1]])[2]))
for (i in 1:length(D_Kernels)) {
distX = distX + D_Kernels[[i]]
}
distX = distX / length(D_Kernels)
W = max(max(distX)) - distX
W = network.diffusion.numc(W,max(ceiling(ncol(X)/20),10))
Quality = Estimate_Number_of_Clusters_given_graph(W,NUMC)
Quality_plus = Estimate_Number_of_Clusters_given_graph(W,NUMC+1)
Quality_minus = Estimate_Number_of_Clusters_given_graph(W,NUMC-1)
K1 = 2*(1 + Quality) - (2 + Quality_plus + Quality_minus)
K2 = K1*(NUMC+1)/(NUMC)
return(list(K1=K1,K2=K2))
}
# This function estimates the number of clusters given the two huristics
# given in the supplementary materials of our nature method paper.
# W is the similarity graph; NUMC is a vector which contains the possible choices
# of number of clusters
"Estimate_Number_of_Clusters_given_graph" = function( W, NUMC = 2:5 ) {
quality = NULL
if (min(NUMC)<1) {
stop('Note that we always assume a minimum of at least 2 clusters: please change values for NUMC.')
}
W = (W + t(W)) / 2
if(!is.null(NUMC)) {
degs = rowSums(W)
D = Matrix(0,nrow=length(degs),ncol=length(degs),sparse=TRUE)
diag(D) = degs
L = D - W
degs[which(degs==0)] = .Machine$double.eps
# calculate D^(-1/2)
diag(D) = 1/(degs^0.5)
# calculate normalized Laplacian
L = D %*% L %*% D
# compute the eigenvectors corresponding to the k smallest eigenvalues
res = eigen(L)
eigenvalue = res$values
U = res$vectors
res = sort(eigenvalue,decreasing=FALSE,index.return=TRUE)
eigenvalue = res$x
b = res$ix
U = U[,b]
for (ck in NUMC) {
if(ck==1) {
tmp = array(0,dim(U))
diag(tmp) = 1/(U[,1]+.Machine$double.eps)
res = sum(sum(tmp%*%U[,1]))
}
else {
UU = U[,1:ck]
tmp = sqrt(rowSums(UU^2))+.Machine$double.eps
tmp = matrix(rep(tmp,ck),nrow=length(tmp),ncol=ck)
UU = UU / tmp
res = discretisation(UU)
EigenVectors = res$EigenVectors^2
temp1 = t(apply(EigenVectors,1,function(x) return(sort(x,decreasing=TRUE))))
tmp = 1 / (temp1[,1]+.Machine$double.eps)
tmp1 = Matrix(0,nrow=length(tmp),ncol=length(tmp),sparse=TRUE)
diag(tmp1) = tmp
tmp = tmp1%*%temp1[,1:max(2,ck-1)]
tmp = sum(sum(tmp))
res = (1-eigenvalue[ck+1])/(1-eigenvalue[ck])*tmp
}
quality = c(quality,res)
}
}
return(quality)
}
"discretisation" = function( EigenVectors ) {
n = nrow(EigenVectors)
k = ncol(EigenVectors)
vm = sqrt(rowSums(EigenVectors*EigenVectors,2))
tmp = matrix(rep(vm+.Machine$double.eps,k),nrow=length(vm+.Machine$double.eps),ncol=k)
EigenVectors = EigenVectors/tmp
R = array(0,c(k,k))
R[,1] = t(EigenVectors[round(n/2),])
c = array(0,c(n,1))
for(j in 2:k) {
c = c + abs(EigenVectors%*%R[,(j-1)])
R[,j] = t(EigenVectors[which.min(c),])
}
lastObjectiveValue = 0
exitLoop = 0
nbIterationsDiscretisation = 0
nbIterationsDiscretisationMax = 20
while(exitLoop==0) {
nbIterationsDiscretisation = nbIterationsDiscretisation + 1
EigenvectorsDiscrete = discretisationEigenVectorData(EigenVectors%*%R)
res_svd = svd(t(EigenvectorsDiscrete)%*%EigenVectors+.Machine$double.eps)
U = res_svd$u
S = res_svd$d
V = res_svd$v
NcutValue=2*(n-sum(S))
if(abs(NcutValue-lastObjectiveValue) < .Machine$double.eps || nbIterationsDiscretisation > nbIterationsDiscretisationMax) {
exitLoop = 1
}
else {
lastObjectiveValue = NcutValue
R = V%*%t(U)
}
}
res = list(EigenvectorsDiscrete=EigenvectorsDiscrete,EigenVectors=EigenVectors)
return(res)
}
# Discretizes previously rotated eigenvectors in discretisation
# Timothee Cour, Stella Yu, Jianbo Shi, 2004
"discretisationEigenVectorData" = function( EigenVector ) {
n = nrow(EigenVector)
k = ncol(EigenVector)
J = apply(t(EigenVector),2,function(x) return(which.max(x)))
Y = sparseMatrix(i=1:n,j=t(J),x=1,dims=c(n,k))
return(Y)
}
# compute and returns the multiple kernel
"multiple.kernel.numc" = function( x, cores.ratio = 1 ) {
# set the parameters
kernel.type = list()
kernel.type[1] = list("poly")
kernel.params = list()
kernel.params[1] = list(0)
# compute some parameters from the kernels
N = dim(x)[1]
KK = 0
sigma = seq(2,1,-0.25)
# compute and sort Diff
Diff = dist2(x)
Diff_sort = t(apply(Diff,MARGIN=2,FUN=sort))
# compute the combined kernels
m = dim(Diff)[1]
n = dim(Diff)[2]
allk = seq(10,30,2)
# setup a parallelized estimation of the kernels
cores = as.integer(cores.ratio * (detectCores() - 1))
if (cores < 1 || is.na(cores) || is.null(cores)) {
cores = 1
}
cl = makeCluster(cores)
clusterEvalQ(cl, {library(Matrix)})
D_Kernels = list()
D_Kernels = unlist(parLapply(cl,1:length(allk),fun=function(l,x_fun=x,Diff_sort_fun=Diff_sort,allk_fun=allk,
Diff_fun=Diff,sigma_fun=sigma,KK_fun=KK) {
if(allk_fun[l]<(nrow(x_fun)-1)) {
TT = apply(Diff_sort_fun[,2:(allk_fun[l]+1)],MARGIN=1,FUN=mean) + .Machine$double.eps
TT = matrix(data = TT, nrow = length(TT), ncol = 1)
Sig = apply(array(0,c(nrow(TT),ncol(TT))),MARGIN=1,FUN=function(x) {x=TT[,1]})
Sig = Sig + t(Sig)
Sig = Sig / 2
Sig_valid = array(0,c(nrow(Sig),ncol(Sig)))
Sig_valid[which(Sig > .Machine$double.eps,arr.ind=TRUE)] = 1
Sig = Sig * Sig_valid + .Machine$double.eps
for (j in 1:length(sigma_fun)) {
W = dnorm(Diff_fun,0,sigma_fun[j]*Sig)
D_Kernels[[KK_fun+l+j]] = Matrix((W + t(W)) / 2, sparse=TRUE, doDiag=FALSE)
}
return(D_Kernels)
}
}))
stopCluster(cl)
# compute D_Kernels
for (i in 1:length(D_Kernels)) {
K = D_Kernels[[i]]
k = 1/sqrt(diag(K)+.Machine$double.eps)
G = K * (k %*% t(k))
G1 = apply(array(0,c(length(diag(G)),length(diag(G)))),MARGIN=2,FUN=function(x) {x=diag(G)})
G2 = t(G1)
D_Kernels_tmp = (G1 + G2 - 2*G)/2
D_Kernels_tmp = D_Kernels_tmp - diag(diag(D_Kernels_tmp))
D_Kernels[[i]] = Matrix(D_Kernels_tmp, sparse=TRUE, doDiag=FALSE)
}
return(D_Kernels)
}
# perform network diffusion of K steps over the network A
"network.diffusion.numc" <- function( A, K ) {
# set the values of the diagonal of A to 0
diag(A) = 0
# compute the sign matrix of A
sign_A = A
sign_A[which(A>0,arr.ind=TRUE)] = 1
sign_A[which(A<0,arr.ind=TRUE)] = -1
# compute the dominate set for A and K
P = dominate.set(abs(A),min(K,nrow(A)-1)) * sign_A
# sum the absolute value of each row of P
DD = apply(abs(P),MARGIN=1,FUN=sum)
# set DD+1 to the diagonal of P
diag(P) = DD + 1
# compute the transition field of P
P = transition.fields(P)
# compute the eigenvalues and eigenvectors of P
eigen_P = eigen(P)
U = eigen_P$vectors
D = eigen_P$values
# set to d the real part of the diagonal of D
d = Re(D + .Machine$double.eps)
# perform the diffusion
alpha = 0.8
beta = 2
d = ((1-alpha)*d)/(1-alpha*d^beta)
# set to D the real part of the diagonal of d
D = array(0,c(length(Re(d)),length(Re(d))))
diag(D) = Re(d)
# finally compute W
W = U %*% D %*% t(U)
diagonal_matrix = array(0,c(nrow(W),ncol(W)))
diag(diagonal_matrix) = 1
W = (W * (1-diagonal_matrix)) / apply(array(0,c(nrow(W),ncol(W))),MARGIN=2,FUN=function(x) {x=(1-diag(W))})
diag(D) = diag(D)[length(diag(D)):1]
W = (W + t(W)) / 2
W[which(W<0,arr.ind=TRUE)] = 0
return(W)
}
|
825f85bd8efb54459c50bc272bc8380dd1c391ce
|
05d95ea2a571a5d502bbfe78f0c8f7e24cb5db80
|
/inst/scripts/make-metadata_19Q3.R
|
8808a98cb9ee10a091873b2b62645bdd82b03c4f
|
[] |
no_license
|
back-kom/depmap
|
aa365a3e3e7da2f0efa9998d65ca9cbcdf013b45
|
548dac61536dae83a97a7e41032d36232713338e
|
refs/heads/master
| 2022-07-14T14:40:41.075830
| 2020-04-17T15:40:09
| 2020-04-17T15:40:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,145
|
r
|
make-metadata_19Q3.R
|
### =========================================================================
### depmap metadata 19Q3 data
### -------------------------------------------------------------------------
### this script generates the metadata_19Q3.csv
## note: this script assumes that your current directory is depmap/inst/scripts/
meta_19Q3 <- data.frame(
Title = c(paste0("rnai_19Q3"),
paste0("crispr_19Q3"),
paste0("copyNumber_19Q3"),
paste0("RPPA_19Q3"),
paste0("TPM_19Q3"),
paste0("mutationCalls_19Q3"),
paste0("metadata_19Q3"),
paste0("drug_sensitivity_19Q3")
),
Description = c(paste0("(DEMETER2) Batch and off-target corrected RNAi ",
"gene knockdown dependency data for 17309 genes, ",
"712 cancer cell lines, 31 primary Diseases and ",
"31 lineages."),
paste0("(CERES) Batch and off-target corrected CRISPR-Cas9 ",
"gene knockdout dependency data for 18333 genes, ",
"625 cell lines, 28 primary diseases and 29 ",
"lineages."),
paste0("WES log copy number data for 27562 genes, 1657 ",
"cell lines, 36 primary diseases and 34 lineages."),
paste0("Reverse Phase Protein Array (RPPA) Western Blot ",
"expression data from 214 genes, 899 cancer cell ",
"lines, 28 primary diseases and 28 lineages."),
paste0("CCLE 'Transcript Per Million' (TPM) RNAseq gene ",
"expression data (in scale scale (log2(TPM+1))) ",
"for protein coding genes from 19144 genes, 1210 ",
"cancer cell lines, 32 primary diseases and 33 ",
"lineages."),
paste0("Merged mutation calls (for coding region, germline ",
"filtered) and includes data from 18798 genes, 1656 ",
"cell lines, 36 primary diseases and 34 lineages. "),
paste0("Metadata for cell lines in the 19Q3 DepMap release, ",
"for 0 genes, 1736 cell lines, 37 primary diseases ",
"and 34 lineages."),
paste0("Drug sensitivity data for cancer cell lines ",
"derived from replicate collapsed logfold change ",
"values relative to DMSO, corrected for experimental ",
"confounders using ComBat representing 4686 compounds, ",
"578 cell lines, 23 primary diseases and 25 lineages")
),
BiocVersion = "3.10",
Genome = "",
SourceType = "CSV",
SourceUrl = c(paste0("https://ndownloader.figshare.com/files/13515395"),
paste0("https://ndownloader.figshare.com/files/16757666"),
paste0("https://ndownloader.figshare.com/files/16757699"),
paste0("https://depmap.org/portal/download/api/download/external?file_name=ccle%2Fccle_2019%2FCCLE_RPPA_20181003.csv"),
paste0("https://ndownloader.figshare.com/files/16757690"),
paste0("https://ndownloader.figshare.com/files/16757702"),
paste0("https://ndownloader.figshare.com/files/16757723"),
paste0("https://ndownloader.figshare.com/files/17008628")
),
SourceVersion = "Aug 7 2019",
Species = "Homo sapiens",
TaxonomyId = 9606,
Coordinate_1_based = TRUE,
DataProvider = "Broad Institute",
Maintainer = "Theo Killian <theodore.killian@uclouvain.be>",
RDataClass = "tibble",
DispatchClass = "Rda",
RDataPath = c(paste0("depmap/rnai_19Q3.rda"),
paste0("depmap/crispr_19Q3.rda"),
paste0("depmap/copyNumber_19Q3.rda"),
paste0("depmap/RPPA_19Q3.rda"),
paste0("depmap/TPM_19Q3.rda"),
paste0("depmap/mutationCalls_19Q3.rda"),
paste0("depmap/metadata_19Q3.rda"),
paste0("depmap/drug_sensitivity_19Q3.rda")
),
Tags=paste0("ExperimentHub, ExperimentData, ReproducibleResearch, RepositoryData, AssayDomainData, ",
"CopyNumberVariationData, DiseaseModel, CancerData, BreastCancerData, ColonCancerData, ",
"KidneyCancerData, LeukemiaCancerData, LungCancerData, OvarianCancerData, ProstateCancerData",
"OrganismData, Homo_sapiens_Data, PackageTypeData, SpecimenSource, CellCulture, Genome, ",
"Proteome, StemCell, Tissue"),
Notes = "This dataset is from the 19Q3 release")
write.csv(meta_19Q3, file="../extdata/metadata_19Q3.csv", row.names=FALSE)
## to upload this metadata to EH, enter the following into the CL
## ExperimentHubData::makeExperimentHubMetadata("~/tmp/depmap/", fileName = "metadata_19Q3.csv")
|
cf844a8fa4fd9d97995b27b5dda82fec13a429a7
|
a0108f3ebcea5379c09976db1d7c22cf426aa738
|
/Chapter 11 - Introduction to Moderation/chap11 - analyzing data.R
|
8ec3825d9591e6751827b103f59f58a5f5009b7d
|
[
"MIT"
] |
permissive
|
ajb254/BehavioralDataAnalysis
|
8562f1a4d4da8c9a26960d031da003a490f16e1a
|
accc4b523f868d0d74ab003a1a9e698d9abe1449
|
refs/heads/master
| 2023-04-26T21:38:45.541798
| 2021-05-30T08:14:22
| 2021-05-30T08:14:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,985
|
r
|
chap11 - analyzing data.R
|
#################################
##### This script analyzes the data used in chapter 11,
##### Introduction to Moderation
#################################
##### Setup #####
# Common libraries
suppressMessages(suppressWarnings(library(tidyverse)))
library(boot) #Required for Bootstrap simulations
library(rstudioapi) #To load data from local folder
library(ggpubr) #To generate multi-plots
# Libraries for high-performance Bootstrap
library(mltools) #For function one_hot
library(data.table) #For function as.data.table
library(Rfast) #For function lmfit
library(parallel)
library(doParallel)
### Setting the working directory to the parent folder of this script (Rstudio only)
sourceDir <- rstudioapi::getActiveDocumentContext()$path %>% str_extract("^.+/")
setwd(sourceDir)
set.seed(1234)
options(scipen=10)
#Reading the data
hist_data <- read_csv("chap11-historical_data.csv")
#Formatting the data
hist_data <- hist_data %>%
mutate(store_id = factor(store_id)) %>%
mutate(day = factor(day)) %>%
mutate(children = factor(children)) %>%
mutate(play_area = factor(play_area))
##### Section 1: Varieties of Moderation #####
#### Segmentation ####
summary(lm(duration~play_area * children, data=hist_data))
### Figure 11-3. Visual representation of moderation ###
viz_fun_11.3 <- function(dat){
#Representing visually moderation
summary_dat0 <- dat %>%
group_by(children,play_area) %>%
summarize(avg_duration = mean(duration)) %>%
data.frame()
mod_p0 <- ggplot(summary_dat0, aes(x=children, y=avg_duration,
group=play_area)) + ylim(c(0,65)) +
geom_point() + geom_line(aes(lty=play_area)) + ylab("Average visit duration") +
scale_linetype_manual(values=c("dotted", "solid"))
mod_p0
}
viz_fun_11.3(hist_data)
#### Non-linearities ####
#Toy dataset for this subsection
nonlin_data <- tibble(
Emails = seq(from = 1, to = 10, by = 0.5),
Purchases = log(Emails) + rnorm(19, 0, 0.1),
Properties = seq(from = 1, to = 10, by = 0.5),
Customers = Properties^2 + rnorm(19, 0, 2))
nonlin_data <- nonlin_data %>%
mutate(pred_PP_lin = lm(data=nonlin_data, Purchases~Emails)$fitted.values,
pred_PP_quad = lm(data=nonlin_data,
Purchases~Emails+I(Emails^2))$fitted.values)
### Figure 11-14. Non-linear relationships between variables ###
viz_fun_11.14 <- function(dat){
p1 <- ggplot(dat, aes(Emails, Purchases)) + geom_point() + xlab("avg. monthly marketing emails") + ylab("avg. monthly purchases")
p2 <- ggplot(dat, aes(Properties, Customers)) + geom_point() + xlab("number of properties (1000s)") + ylab("number of customers (1000s)")
ggarrange(p1, p2, ncol=2, nrow=1)
}
viz_fun_11.14(nonlin_data)
### Figure 11-15. Linear (dashed) and quadratic (solid) lines of best fit ###
viz_fun_11.15 <- function(dat){
ggplot(dat, aes(Emails, Purchases)) + geom_point(shape=16) +
xlab("avg. monthly marketing emails") +
ylab("avg. monthly purchases") +
geom_line(aes(y=pred_PP_lin, col='red'), lty="dashed", show.legend=FALSE) +
geom_line(aes(y=pred_PP_quad, col='blue'), show.legend=FALSE)
}
viz_fun_11.15(nonlin_data)
# Syntax for self-moderation
summary(lm(Purchases ~ Emails + I(Emails^2), data=nonlin_data))
##### Section 2: How To Apply Moderation #####
#### When to look for moderation ####
#hist_data <- hist_data %>% mutate(age_quart = ntile(age, 4))
#### Multiple Moderators ####
### Figure 11-23. Moderated moderation across different age groups ###
viz_fun_11.23 <- function(dat){
summary_dat <- dat %>%
mutate(age_grp = factor(round(age/10)*10, ordered = TRUE,
levels = c("20","30","40","50","60","70","80"))) %>%
group_by(children,play_area, age_grp) %>%
summarize(avg_duration = mean(duration)) %>%
data.frame()
mod_p1 <- ggplot(summary_dat, aes(x=children, y=avg_duration, group=play_area)) +
ylim(c(0,65)) + geom_point() + geom_line(aes(lty=play_area)) +
labs(y="Average visit duration") + scale_linetype_manual(values=c("dotted",
"solid")) +
facet_grid(.~age_grp)
mod_p1
}
viz_fun_11.23(hist_data)
#Regression summary
summary(lm(duration~play_area * children * age, data=hist_data))
#### Validating Moderation With Bootstrap ####
# Function for bootstrap
mod_boot_fun <- function(dat, B, N){
set.seed(1)
#One-hot encoding factors in data
dat <- dat %>%
dplyr::select(-day,-store_id,-prop_children) %>%
as.data.table() %>%
mltools::one_hot(dropUnusedLevels = TRUE) %>%
mutate(const = 1) %>%
#Adding moderation variable
mutate(inter = children_1 * play_area_1)
#Converting the original data to a matrix
mat <- data.matrix(dat)
#Preallocating memory to result vector
boot_list <- list()
#Generating the random numbers for the bootstrap
rng <- matrix(data = sample(1:N, size = N*B, replace = TRUE),
nrow = N, ncol = B)
loop_fun <- function(k){
if(k %% 10 == 0){
cat("starting iteration", k, "\n")
}
boot_mat <- mat[rng[,k],]
#Coefficients for moderated effect
X <- boot_mat[,c('play_area_1', 'children_1', 'inter', 'age', 'const')]
Y <- boot_mat[,'duration']
coeffs <- (Rfast::lmfit(X, Y)$be)
res <- c(
sample_size = N,
coeff_0 = as.numeric(coeffs['const',]),
coeff_p = as.numeric(coeffs['play_area_1',]),
coeff_c = as.numeric(coeffs['children_1',]),
coeff_i = as.numeric(coeffs['inter',])
)
return(res)
}
## Parallelized bootstrap
#Detecting the number of cores for parallel processing
numCores <- detectCores()
registerDoParallel(numCores)
boot_list <- foreach(i=1:B, .packages = 'Rfast') %dopar% loop_fun(i)
stopImplicitCluster()
boot_summary <- bind_rows(boot_list)
return(boot_summary)
}
#Running the bootstrap simulation with 1k samples of 10k rows
mod_summ <- mod_boot_fun(hist_data, B = 1e3, N = 1e5)
### Figure 11-25. Distribution of bootstrapped values for interaction coefficient (1k samples of 10k rows) ###
viz_fun_11.25 <- function(dat){
ggplot(dat, aes(x=coeff_i)) + geom_histogram() + xlim(c(19.5,22.5)) +
geom_vline(xintercept = 20.985, col='red') + xlab("beta_pc")
}
viz_fun_11.25(mod_summ)
#Running the bootstrap simulation with 1k samples of 200k rows
mod_summ <- mod_boot_fun(hist_data, B = 1e3, N = 2e5)
### Figure 11-26. Distribution of bootstrapped values for interaction coefficient (1k samples of 200k rows) ###
viz_fun_11.26 <- function(dat){
ggplot(mod_summ, aes(x=coeff_i)) + geom_histogram() + xlim(c(19.5,22.5)) +
geom_vline(xintercept = 20.985, col='red') + xlab("beta_pc")
}
viz_fun_11.26(mod_summ)
#### Interpreting individual coefficients ####
# Creating smaller sample for readability of figures
set.seed(1)
extract_dat <- hist_data %>% slice_sample(n=1e3)
### Figure 11-27. Sample of 1,000 data points and regression lines with a play area (full dots, solid line) and without (crosses, dashed line), without moderation term ###
viz_fun_11.27 <- function(dat){
ggplot(extract_dat, aes(x=age, y=duration, col=play_area)) +
geom_point(aes(shape = play_area), alpha = 0.8) +
scale_shape_manual(values=c(4, 16)) +
geom_abline(intercept = 25, slope = -0.024, col = 'red', lty='dashed', size = 1.5) +
geom_abline(intercept = 25+12.5568, slope = -0.024, col = 'blue')
}
viz_fun_11.27(extract_dat)
### Figure 11-28. Sample of 1,000 data points and regression lines with a play area (full dots, solid line) and without (crosses, dashed line), with moderation term ###
viz_fun_11.28 <- function(dat){
ggplot(extract_dat, aes(x=age, y=duration, col=play_area)) +
geom_point(aes(shape = play_area), alpha = 0.8) +
scale_shape_manual(values=c(4, 16)) +
geom_abline(intercept = 23.82, slope = 0, col = 'red', lty='dashed', size = 1.5) +
geom_abline(intercept = 23.82+15.852, slope = -0.0659, col = 'blue')
}
viz_fun_11.28(extract_dat)
### Setting meaningful reference points
#Centering age
centered_data <- hist_data %>%
mutate(age = age - mean(age))
#Resetting default for play_area
centered_data <- hist_data %>%
mutate(play_area = factor(play_area, levels=c('1','0')))
### Calculating effects at the level of business decisions
business_metric_fun <- function(dat){
mod_model <- lm(duration~play_area * (children + age), data=dat)
action_dat <- dat %>%
filter(play_area == 0)
action_dat <- action_dat %>%
mutate(pred_dur0 = predict(mod_model, action_dat)) %>%
mutate(play_area = factor('1', levels=c('0', '1')))
action_dat <- action_dat %>%
mutate(pred_dur1 = predict(mod_model, action_dat)) %>%
mutate(pred_dur_diff = pred_dur1 - pred_dur0) %>%
dplyr::group_by(store_id) %>%
summarise(mean_d = mean(pred_dur_diff), sum_d = sum(pred_dur_diff))
return(action_dat)
}
action_summ_dat <- business_metric_fun(hist_data)
summary(action_summ_dat)
|
968443c8d8866bee97c3da186789a19650f06b80
|
a84a1f8417a7076e07d279eb91f436819802ad86
|
/clustering/routine_data_preparation.r
|
0df5c61cd074d459b56b82e46bd7a002ec3b1a12
|
[] |
no_license
|
mlukasik/spines
|
1fbb6648663459f29be60529ec88fb56069c82cc
|
a2897ee35b5498a456bc53224e86dec0c7aa03ed
|
refs/heads/master
| 2021-01-10T20:44:33.523480
| 2016-03-30T08:44:03
| 2016-03-30T08:44:03
| 40,179,987
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,153
|
r
|
routine_data_preparation.r
|
# Routine that loads and prepares data for global typology generation.
source(file="loading.r")
source(file="pca.r")
print("DATA LOADING AND (PCA) TRANSFORMING")
if (!(exists("test.data.file"))){
test.data.file = train.data.file
}
# Print parameters
print(paste("train.data.file =", train.data.file))
print(paste("test.data.file =", test.data.file))
print(paste("groups.ids =", groups.ids))
print(paste("features.names.t0 =", paste(features.names.t0, collapse=" ")))
print(paste("features.names.t1 =", paste(features.names.t1, collapse=" ")))
print(paste("features.names.nice =", paste( features.names.nice, collapse=" ")))
print(paste("pca.feature.groups =", paste(pca.feature.groups, collapse=" ")))
print(paste("pca.num.features =", pca.num.features))
print(paste("normalization =", normalization))
if (exists("images.base.dir"))
print(paste("images.base.dir =", images.base.dir))
if (exists("iadditional.features.names"))
print(paste("additional.features.names =", paste(additional.features.names, collapse=" ")))
if (exists("iadditional.features.names.t0"))
print(paste("additional.features.names.t0 =", paste(additional.features.names.t0, collapse=" ")))
if (exists("iadditional.features.names.t1"))
print(paste("additional.features.names.t1 =", paste(additional.features.names.t1, collapse=" ")))
# Loads input data.
train = DataLoadingRoutine(train.data.file, groups.ids, features.names.t0, features.names.t1,
features.names.nice, additional.features.names,
additional.features.names.t0, additional.features.names.t1,
spine.id.field.separator)
test = DataLoadingRoutine(test.data.file, groups.ids, features.names.t0, features.names.t1,
features.names.nice, additional.features.names,
additional.features.names.t0, additional.features.names.t1,
spine.id.field.separator)
description.str = paste('N=',dim(train)[1],',D=',pca.num.features,",#",
groups.ids[1],'=',sum(train[,group.id]==groups.ids[1]),',#',
groups.ids[2],'=',sum(train[,group.id]==groups.ids[2]), sep='')
print(description.str)
print("summary(train):")
print(summary(train))
print("summary(test):")
print(summary(test))
# since now all features all called features.names.nice and time is stored in time.id
#######################################################################################
#######################################################################################
source(file="routine_contours.r")
#######################################################################################
#######################################################################################
# Changes representation using PCA
pcs = PCAAnalysisRoutine(train, features.names.nice, pca.feature.groups, output.dir)
train.features = PCAPredictMany(train, pcs, pca.num.features)
test.features = PCAPredictMany(test, pcs, pca.num.features)
if (normalization) {
train.features = scale(train.features)
test.features = scale(test.features)
}
|
4bebb4e0abbbbbaadb14143a3d84f04a565287f0
|
948eacbfad766b6345859b18fd9fcf969d57efe5
|
/Discovering Behaviors with Unsupervised Learning/kmeans_steps.R
|
e52da8c5aed9d850e7e5f327605a2b7d330e325c
|
[] |
no_license
|
enriquegit/behavior-code
|
5d795de076f0adb4d17ab5cc8219e2c799d5b133
|
1c5063c223653a4d1375e992082d95c76751a731
|
refs/heads/master
| 2023-05-04T21:19:45.758564
| 2021-05-13T13:47:45
| 2021-05-13T13:47:45
| 297,452,697
| 8
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,318
|
r
|
kmeans_steps.R
|
# This script demonstrates k-means implementation.
source(file.path("..","auxiliary_functions","globals.R"))
source(file.path("..","auxiliary_functions","functions.R"))
# Prepare students mental health dataset.
# Load students mental health behavior dataset.
dataset <- read.csv(file.path(datasets_path,"students_mental_health",
"data.csv"),
stringsAsFactors = F)
# Replace empty strings with NAs so the following methods will work properly.
library(naniar)
dataset <- replace_with_na_all(dataset, ~.x %in% common_na_strings)
# Since the last rows starting at 269 are full of missing values we will discard them.
dataset <- dataset[1:268,]
# Discard internet column since it has many missing values.
dataset <- dataset[, -39]
# Remove rows with missing values in the variable Intimate.
dataset <- dataset[-which(is.na(dataset$Intimate)), ]
# Select which variables are going to be used for clustering.
selvars <- c("ToAS","ToSC")
# Reduced dataset after dropping unused columns.
reddataset <- dataset[, selvars]
# Normalize variables from 0 to 1.
# Since we are not dividing into train and test sets we pass the same dataset
# to the normalize() function defined in auxiliary_functions/functions.R
res <- normalize(reddataset, reddataset)
# Extract normalized data frame from result.
data <- res$train
# Compute centroid of all data points
centroid <- colMeans(data[,c(1,2)])
# Plot points
plot(data$ToAS, data$ToSC)
# Plot centroid
points(centroid[1], centroid[2], col="red", pch=18, cex = 2)
############################
#### Implement k-means. ####
############################
dataset <- as.data.frame(data)
k <- 3 # number of clusters.
maxiter <- 10 # maximum number of iterations.
plotit <- 4 # plot first plotit iterations.
# Generate random centroids by picking k random points from dataset.
set.seed(124)
centroids <- dataset[sample(nrow(dataset), size = k, replace = F),]
# Plot dimensions
pwidth <- 5; pheight <- 4
#png("clust_it0.png", width = pwidth, height = pheight, units = "in", res = 200)
plot(data$ToAS, data$ToSC, xlab = "ToAS", ylab = "ToSC", main = "Initial random centroids")
points(centroids$ToAS, centroids$ToSC, col=2:4, pch=18, cex = 1.5)
#dev.off()
# Keep track to which group each point is assigned to.
assignments <- rep(-1, nrow(dataset))
#png(paste0("clust_its.png"), width = pwidth*2, height = pheight*2, units = "in", res = 200)
par(mfrow=c(2,2))
for(iter in 1:maxiter){
print(paste0("Iteration ",iter))
# compute distance to closest centroid
for(i in 1:nrow(dataset)){
p <- dataset[i,]
mind <- Inf # min distance
minc <- -1 # min cluster
for(j in 1:k){
d <- dist(rbind(centroids[j,],p))
if(d < mind){
mind <- d
minc <- j
}
}
assignments[i] <- minc
}
if(iter <= plotit){
#png(paste0("clust_it",iter,".png"), width = pwidth, height = pheight)
plot(dataset$ToAS, dataset$ToSC, xlab = "ToAS", ylab = "ToSC", col=assignments+1, cex=1, main=paste0("Iteration ",iter))
points(centroids$ToAS, centroids$ToSC, col=2:4, pch=18, cex = 2.5)
#dev.off()
}
# update centroids.
for(i in 1:k){
idxs <- which(assignments == i)
centroids[i,] <- colMeans(dataset[idxs,])
}
} # end of maxiter
#dev.off()
|
2c9a68df1c85bc40a72e7c9761215be31d9953fb
|
645ba07f53773e9e4fa2b1462dd46293d3405d76
|
/plot4.R
|
fb8171b77ca777276c2c81abfe9f85e05e895313
|
[] |
no_license
|
PanBartosz/ExData_Plotting1
|
98ba59423a52ae64e4296d1d3845362072ca0bb7
|
1b80a464af7cce8041e48c641222365b2c1cbff9
|
refs/heads/master
| 2021-01-24T22:36:41.308242
| 2015-06-07T19:15:21
| 2015-06-07T19:15:21
| 37,027,941
| 0
| 0
| null | 2015-06-07T18:49:15
| 2015-06-07T18:49:14
| null |
UTF-8
|
R
| false
| false
| 1,072
|
r
|
plot4.R
|
data <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
d1 <- subset(data, Date == "1/2/2007"| Date == "2/2/2007")
d1$Date <- paste(d1$Date, d1$Time)
d1$Date <- strptime(d1$Date, format = "%d/%m/%Y %H:%M:%S")
par(mfcol = c(2,2))
plot(d1$Date, as.numeric(as.vector(d1$Global_active_power)), type="l", ylab = "Global Active Power (kilowatts)", xlab="")
plot(d1$Date, as.numeric(as.vector(d1$Sub_metering_1)), type="n", ylab = "Energy sub metering", xlab ="")
lines(d1$Date,as.numeric(as.vector(d1$Sub_metering_1)))
lines(d1$Date,as.numeric(as.vector(d1$Sub_metering_2)), col = "red")
lines(d1$Date,as.numeric(as.vector(d1$Sub_metering_3)), col = "blue")
legend("topright", legend= c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty = 1, col=c("black", "red", "blue"), cex = 0.25)
plot(d1$Date,as.numeric(as.vector(d1$Voltage)), type = "l", ylab = "Voltage", xlab="datetime")
plot(d1$Date,as.numeric(as.vector(d1$Global_reactive_power)), type = "l", ylab = "Global_reactive_power", xlab="datetime")
dev.copy(png, file = "plot4.png")
dev.off()
|
846769ac8b0f2eeb6b24fca2e0006465a0fc10f6
|
a2170e32571a703e4acf2f2dcc0880044866b20c
|
/man/Timezone.Rd
|
1b0d6611f9b48eb08230e16737d206f0cc209ee4
|
[
"MIT"
] |
permissive
|
grepinsight/lookr
|
de7cfd7e5870fc2f2296771be5c6ba64d1994f39
|
c8a5e2f0a55bbfc3ce056974f09ade9187359951
|
refs/heads/master
| 2020-10-01T03:55:06.264726
| 2020-09-09T22:07:10
| 2020-09-09T22:07:10
| 227,449,023
| 1
| 0
|
MIT
| 2019-12-11T19:57:46
| 2019-12-11T19:57:45
| null |
UTF-8
|
R
| false
| true
| 2,676
|
rd
|
Timezone.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Timezone.r
\name{Timezone}
\alias{Timezone}
\title{Timezone Class}
\description{
Timezone Class
Timezone Class
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{value}}{}
\item{\code{label}}{}
\item{\code{group}}{}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{Timezone$new()}}
\item \href{#method-toJSON}{\code{Timezone$toJSON()}}
\item \href{#method-fromJSON}{\code{Timezone$fromJSON()}}
\item \href{#method-toJSONString}{\code{Timezone$toJSONString()}}
\item \href{#method-fromJSONString}{\code{Timezone$fromJSONString()}}
\item \href{#method-clone}{\code{Timezone$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Timezone$new(value, label, group)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSON"></a>}}
\if{latex}{\out{\hypertarget{method-toJSON}{}}}
\subsection{Method \code{toJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Timezone$toJSON()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSON"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSON}{}}}
\subsection{Method \code{fromJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Timezone$fromJSON(TimezoneJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-toJSONString}{}}}
\subsection{Method \code{toJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Timezone$toJSONString()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSONString}{}}}
\subsection{Method \code{fromJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Timezone$fromJSONString(TimezoneJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{Timezone$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
03be97a9efd50b9a4e49cdae6b2071b11e12d4f9
|
c7ea655b5fa1a26c34beb3449a86f3aa32dd5e1f
|
/simple.R
|
a51be7050cae3aa2de7679e77379e3a1dac98e93
|
[] |
no_license
|
tdhock/datatable-foverlaps
|
0255f2be539020abe52fac2c9a4136a238150574
|
fd202ea93a1cb67c11c783d40f287f8b8041b36b
|
refs/heads/master
| 2021-01-22T04:48:17.153576
| 2015-02-12T19:00:54
| 2015-02-12T19:00:54
| 30,076,054
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,174
|
r
|
simple.R
|
works_with_R("3.1.2",
GenomicRanges="1.18.4",
data.table="1.9.4")
bedGraph <- data.table(chrom="chr1",
chromStart=as.integer(c(0, 200)),
chromEnd=as.integer(c(200, 300)),
coverage=as.integer(c(1, 2)))
bg.gr <- with(bedGraph, {
GRanges(chrom, IRanges(chromStart, chromEnd), coverage=coverage)
})
bg.gr.1 <- with(bedGraph, {
GRanges(chrom, IRanges(chromStart+1L, chromEnd), coverage=coverage)
})
bg.1 <- with(bedGraph, {
data.table(chrom,
chromStart=chromStart+1L,
chromEnd,
coverage)
})
test <- function(chromStart, chromEnd, expected){
list(window=data.table(chrom="chr1",
chromStart=as.integer(chromStart),
chromEnd=as.integer(chromEnd)),
expected=as.integer(expected))
}
tests <-
list(test(200, 1000, 2),
test(199, 1000, c(1, 2)),
test(0, 200, 1),
test(0, 201, c(1, 2)))
methods <-
list("intersectBed-2.22.1"=function(win){
write.table(win, "win.bed",
quote=FALSE, row.names=FALSE, sep="\t", col.names=FALSE)
write.table(bedGraph, "bg.bedGraph",
quote=FALSE, row.names=FALSE, sep="\t", col.names=FALSE)
cmd <-
paste("bedtools2/bin/intersectBed",
"-sorted",
"-wa -wb",
"-a win.bed",
"-b bg.bedGraph",
"> overlap.bedGraph")
system(cmd)
overlap.df <- read.table("overlap.bedGraph")
names(overlap.df) <-
c("chrom.win", "chromStart.win", "chromEnd.win",
"chrom.bg", "chromStart.bg", "chromEnd.bg", "coverage.bg")
overlap.df$coverage.bg
}, foverlaps=function(win){
setkey(win, chrom, chromStart, chromEnd)
overlap.dt <- foverlaps(bedGraph, win, nomatch=0L)
overlap.dt$coverage
}, findOverlaps=function(win){
win.gr <- with(win, GRanges(chrom, IRanges(chromStart, chromEnd)))
hits.gr <- findOverlaps(bg.gr, win.gr)
overlap.df <- as.data.frame(bg.gr[queryHits(hits.gr), ])
overlap.df$coverage
}, "foverlaps+1"=function(win){
win$chromStart <- win$chromStart+1L
setkey(win, chrom, chromStart, chromEnd)
overlap.dt <- foverlaps(bg.1, win, nomatch=0L)
overlap.dt$coverage
}, "findOverlaps+1"=function(win){
win.gr <- with(win, GRanges(chrom, IRanges(chromStart+1L, chromEnd)))
hits.gr <- findOverlaps(bg.gr.1, win.gr)
overlap.df <- as.data.frame(bg.gr[queryHits(hits.gr), ])
overlap.df$coverage
})
result.list <- list()
for(test.i in seq_along(tests)){
test.list <- tests[[test.i]]
for(method in names(methods)){
fun <- methods[[method]]
computed <- fun(test.list$window)
result <- all.equal(computed, test.list$expected)
status <- ifelse(isTRUE(result), "correct", result)
result.list[[paste(test.i, method)]] <-
data.table(test.i, method, status,
expected=paste(test.list$expected, collapse=","),
computed=paste(computed, collapse=","),
chromStart=test.list$window$chromStart,
chromEnd=test.list$window$chromEnd)
}
}
simple <- do.call(rbind, result.list)
save(simple, file="simple.RData")
|
bca8e8ca1b38a12f0767a8cce636217863e3f6fc
|
234a13f1c58ed3e724b54943c1a2ed6e12d77c50
|
/Course Project 1 Setup.R
|
860e903b92640541e55e8d5d8d4abf0690044b51
|
[] |
no_license
|
kward34/RepData_PeerAssessment1
|
f01a2e1c9356f2ae9514eeb1e84b15772a4b4320
|
9b466be6bd98ae61426f0bd597617f822d4a0747
|
refs/heads/master
| 2020-12-02T07:46:31.961414
| 2017-07-10T03:26:54
| 2017-07-10T03:26:54
| 96,724,492
| 0
| 0
| null | 2017-07-10T01:43:49
| 2017-07-10T01:43:48
| null |
UTF-8
|
R
| false
| false
| 383
|
r
|
Course Project 1 Setup.R
|
setwd("~/Documents/Documents/Important Files/Hopkins/Data Science/Reproducable Research/Week 2")
if(!file.exists("./projectdata")){dir.create("./projectdata")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2Factivity.zip"
download.file(fileUrl,destfile="./projectdata/projectDataset.zip")
unzip(zipfile="./projectdata/projectDataset.zip",exdir="./projectdata")
|
dd9554df0899327983275540a9adac71eea0b328
|
cc28ab14a36ec63a828b840ea2f99a32c9dc5fdc
|
/man/smoking.Rd
|
5fc9baed271d166521ad9a028fec8f408b1eb198
|
[] |
no_license
|
cran/HSAUR3
|
7dfea10c9c3c91784ed0644bcfdda0ae91adef8b
|
02c2e7664afeb6d33c3b4bf3e28748b4a5b885aa
|
refs/heads/master
| 2023-04-28T01:39:02.583604
| 2023-04-15T06:10:02
| 2023-04-15T06:10:02
| 18,895,724
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,118
|
rd
|
smoking.Rd
|
\name{smoking}
\alias{smoking}
\docType{data}
\title{ Nicotine Gum and Smoking Cessation }
\description{
Data from a meta-analysis on nicotine gum and smoking cessation
}
\usage{data("smoking")}
\format{
A data frame with 26 observations (studies) on the following 4 variables.
\describe{
\item{\code{qt}}{the number of treated subjetcs who stopped
smoking.}
\item{\code{tt}}{the totla number of treated subjects.}
\item{\code{qc}}{the number of subjetcs who stopped
smoking without being treated.}
\item{\code{tc}}{the total number of subject not being treated.}
}
}
\details{
Cigarette smoking is the leading cause of preventable death in
the United States and kills more Americans than AIDS, alcohol,
illegal drug use, car accidents, fires, murders and suicides
combined. It has been estimated that 430,000 Americans die from
smoking every year. Fighting tobacco use is, consequently, one
of the major public health goals of our time and there are now
many programs available designed to help smokers quit. One of
the major aids used in these programs is nicotine chewing gum,
which acts as a substitute oral activity and provides a source
of nicotine that reduces the withdrawal symptoms experienced
when smoking is stopped. But separate randomized clinical trials
of nicotine gum have been largely inconclusive, leading
Silagy (2003) to consider combining the results studies
found from an extensive literature search. The results of these
trials in terms of numbers of people in the treatment arm and
the control arm who stopped smoking for at least 6 months after
treatment are given here.
}
\source{
C. Silagy (2003), Nicotine replacement therapy for smoking
cessation (Cochrane Review). \emph{The Cochrane Library},
\bold{4}, John Wiley & Sons, Chichester.
}
\examples{
data("smoking", package = "HSAUR3")
boxplot(smoking$qt/smoking$tt,
smoking$qc/smoking$tc,
names = c("Treated", "Control"), ylab = "Percent Quitters")
}
\keyword{datasets}
|
b4672fce52031c9bee7df9ca6d7713b92289a966
|
38915da347869e164d9485f9bbd394fe56d2fcb0
|
/1_1MFScondist_jp/ui_Chi.R
|
76c63f28d34a1d518ae299c4d0e38d5206c02551
|
[
"MIT"
] |
permissive
|
mephas/mephas_web
|
24df65c5bdbf1e65c91523f4bfd120abae03e409
|
197e99828d6b9a6a3c1d11b2fc404c9631103ec0
|
refs/heads/master
| 2023-07-21T14:29:37.597163
| 2023-07-13T03:32:32
| 2023-07-13T03:32:32
| 161,571,944
| 8
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,516
|
r
|
ui_Chi.R
|
#****************************************************************************************************************************************************1.6. chi
sidebarLayout(
sidebarPanel(
h4(tags$b("Step 1. データソースを選ぶ")),
p("数式ベース、シミュレーションベース、又は、ユーザのデータベース"),
#Select Src
selectInput(
"InputSrc_x", "選択肢",
c("数式ベース" = "MathDist",
"シミュレーションベース" = "SimuDist",
"ユーザデータベース" = "DataDist")),
hr(),
#Select Src end
h4(tags$b("Step 2. パラメータの設定")),
#condiPa 1
conditionalPanel(
condition = "input.InputSrc_x == 'MathDist'",
HTML("<b>1. パラメータの設定(Chi(v))</b>"),
#HTML("<h4><b>Step 1. Set Parameters for Chi(v)</b></h4>"),
numericInput("x.df", HTML("v > 0:自由度 = 平均 = 分散/2"), value = 4, min = 0),
hr(),
numericInput("x.xlim", "x軸の範囲(> 0)を変更します。", value = 8, min = 1)
),
#condiPa 1 end
#condiPa 2
conditionalPanel(
condition = "input.InputSrc_x == 'SimuDist'",
numericInput("x.size", "シミュレートした数の標本サイズ", value = 100, min = 1, step = 1),
sliderInput("x.bin", "ヒストグラムのビンの数", min = 0, max = 100, value = 0),
p("ビンの数が0の場合はプロットでデフォルトのビンの数が使用されます。")
),
#condiPa 2 end
#condiPa 3
conditionalPanel(
condition = "input.InputSrc_x == 'DataDist'",
tabsetPanel(
tabPanel("手入力",p(br()),
p("データポイントは「,」「;」「Enter」「Tab」で区切ることができます。"),
p(tags$b("データはCSV(1列)からコピーされ、ボックスに貼り付けられます")),
tags$textarea(
id = "x.x", #p
rows = 10, "11.92\n1.42\n5.56\n5.31\n1.28\n3.87\n1.31\n2.32\n3.75\n6.41\n3.04\n3.96\n1.09\n5.28\n7.88\n4.48\n1.22\n1.2\n9.06\n2.27"
),
p("欠損値はNAとして入力されます")
), #tab1 end
tabPanel.upload.num(file ="x.file", header="x.header", col="x.col", sep="x.sep")
),
sliderInput("bin.x","ヒストグラムのビンの数", min = 0, max = 100, value = 0),
p("ビンの数が0の場合はプロットでデフォルトのビンの数が使用されます。")
),
#condiPa 3 end
hr(),
h4(tags$b("Step 2. 確率を表示する")),
numericInput("x.pr", HTML("赤線の左側の面積の割合 = Pr(X < x<sub>0</sub>)で、赤線の位置が x<sub>0</sub> です。"), value = 0.05, min = 0, max = 1, step = 0.05),
hr()
), #sidePa end
mainPanel(
h4(tags$b("Outputs")),
conditionalPanel(
condition = "input.InputSrc_x == 'MathDist'",
h4("数式ベースプロット"),
tags$b("カイ二乗分布プロット"),
plotOutput("x.plot", click = "plot_click5"),
verbatimTextOutput("x.info"),
#HTML("<p><b>The position of Red-line, x<sub>0</sub></b></p>"),
#p(tags$b("The position of Red-line, x<sub>0</sub>")),
#tableOutput("x")
hr(),
# plotly::plotlyOutput("x.plot.cdf")
plotOutput("x.plot.cdf")
),
conditionalPanel(
condition = "input.InputSrc_x == 'SimuDist'",
h4("シミュレーションベースプロット"),
tags$b("乱数から作成したヒストグラム"),
plotly::plotlyOutput("x.plot2"),#click = "plot_click6",
p("ビンの数が0の場合はプロットでデフォルトのビンの数が使用されます。"),
#verbatimTextOutput("x.info2"),
downloadButton("download6", "乱数をダウンロードする"),
p(tags$b("サンプルの記述統計")),
tableOutput("x.sum"),
HTML(
"
<b> 説明 </b>
<ul>
<li> Mean = v </li>
<li> SD = sqrt(2v) </li>
</ul>
"
)
),
conditionalPanel(
condition = "input.InputSrc_x == 'DataDist'",
tags$b("データの確認"),
DT::DTOutput("XX"),
h4("データの分布"),
tags$b("アプロードされたデータの密度プロット"),
plotly::plotlyOutput("makeplot.x2"),
tags$b("アプロードされたデータのヒストグラム"),
plotly::plotlyOutput("makeplot.x1"),
tags$b("アプロードされた累積密度プロット(CDF)"),
plotly::plotlyOutput("makeplot.x3"),
p(tags$b("データの記述統計")),
tableOutput("x.sum2")
)
) #main pa end
)
|
466cef28cb282dd124d1203542c0c7bcd0b7afbf
|
e15b2830959991a75a8635d6958c691c2bd52974
|
/R/single.R
|
3defbcdd9d9db55e690d145c61daf55661e1e3ea
|
[] |
no_license
|
CHOIJUNGWON/big_data_web
|
93a5afb46b473ecf2c3fa952d57986b882efc4b5
|
f41cd7db738ccfa724eceff3b02da304ff61713b
|
refs/heads/master
| 2021-05-08T17:30:00.879909
| 2018-03-02T04:12:54
| 2018-03-02T04:12:54
| 119,476,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 768
|
r
|
single.R
|
mpg<-as.data.frame(ggplot2::mpg)
head(mpg)
library(ggplot2)
displ1<-mpg%>%filter(displ<=4)
displ2<-mpg%>%filter(displ>=5)
mean(displ1$hwy)
mean(displ2$hwy)
cty1<-mpg%>%filter(manufacturer=='audi')
cty2<-mpg%>%filter(manufacturer=='toyota')
mean(cty1$cty)
mean(cty2$cty)
hwy1<-mpg%>%filter(manufacturer=='chevrolet'|manufacturer=='ford'|manufacturer=='honda')
hwy1
mean(hwy1$hwy)
mpg<-as.data.frame(ggplot2::mpg)
head(mpg)
library(ggplot2)
a<-mpg%>%select(class, cty)
suv1<-a%>%filter(class=='suv')
compact1<-a%>%filter(class=='compact')
mean(suv1$cty)
mean(compact1$cty)
mpg<-as.data.frame(ggplot2::mpg)
mpg[c(65,124,131,153,212),'hwy']<-NA
table(is.na(mpg$hwy))
table(is.na(mpg$drv))
a<-mpg%>%filter(!is.na(hwy))%>%group_by(drv)%>%summarise(mean_hwy=mean(hwy))
a
|
127c24934078d47bfbeabb743d3495da402ec79e
|
eb03c1bd9aeb0d0d9fc76dd8b4faec4d94c9a97c
|
/run_analysis.R
|
27b9c54caf28f477930b50b7b0ead232170c7907
|
[] |
no_license
|
RobinGeurts3007/gettingandcleaningdata
|
c9567f8f7613e23f346ae2976965ba72e50802de
|
e7bd48aa5daa3bf97e902f142ce34ffc686f826f
|
refs/heads/master
| 2020-04-05T04:41:01.476646
| 2018-11-16T14:09:29
| 2018-11-16T14:09:29
| 156,561,696
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,115
|
r
|
run_analysis.R
|
run_analysis <- function() {
library(tidyr)
library(dplyr)
setwd("UCI HAR Dataset")
## 1.1 read train data and labels
setwd("train")
xtrain <- read.table("X_train.txt") ##data
ytrain <- read.table("Y_train.txt") ##labels
subtrain <- read.table("subject_train.txt") ##subject id's
setwd('..')
## 1.2 read test data and labels
setwd("test")
xtest <- read.table("X_test.txt")##data
ytest <- read.table("Y_test.txt") ##labels
subtest <- read.table("subject_test.txt") ##subject id's
setwd('..')
## 1.3 read features and activity labels
features <- read.table("features.txt")
activitylabels <- read.table("activity_labels.txt")
## 1.4 merge data sets
xtotal <- rbind(xtest, xtrain) ## data sets
ytotal <- rbind(ytest, ytrain) ## label sets
subtotal <- rbind(subtest, subtrain) ## subject id's
## 2.1 extract means and standard deviations and apply on data set
extracted_features <- features[grep("mean\\(\\)|std\\(\\)",features[,2]),] ##define pattern for extraction means and sd's
xtotal <- xtotal[,extracted_features[,1]] ##subset data set following extracted features (mean and sd)
## 3.1 expand table to name activities in data set
colnames(ytotal) <- "activity"
ytotal$activitylabel <- factor(ytotal$activity, labels = as.character(activitylabels[,2]))
activitylabel <- ytotal[, -1]
## 4.1 Label data set with descriptive names
colnames(xtotal) <- extracted_features[ ,2]
## 5.1 Create data set with average of each variable over activity and subject
colnames(subtotal) <- "subject"
total <- cbind(subtotal, activitylabel, xtotal)
total_average <- total %>% group_by(subject, activitylabel) %>% summarise_all(funs(mean))
write.table(total_average, file = "tidydata.txt", row.names = FALSE, col.names = TRUE)
}
|
8698040a4e915c2682452277e3fe1e9657cb4ab1
|
6b769ade12829c97f7aa9930265418bede361967
|
/man/Table7_12.Rd
|
f494b4eb395fe5bd7536aa4b71443d5b367f9654
|
[] |
no_license
|
brunoruas2/gujarati
|
67b579a4fde34ae1d57f4a4fd44e5142285663c1
|
a532f2735f9fa8d7cd6958e0fc54e14720052fd4
|
refs/heads/master
| 2023-04-07T01:15:45.919702
| 2021-04-04T15:10:58
| 2021-04-04T15:10:58
| 284,496,240
| 14
| 6
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,195
|
rd
|
Table7_12.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table7_12.R
\docType{data}
\name{Table7_12}
\alias{Table7_12}
\title{Table 7_12}
\format{
\itemize{
\item \strong{Year}
\item \strong{C: }real consumption expenditures in billions of chained 1996 dollars.
\item \strong{Yd: } real personal disposable income in billions of chained 1996 dollars
\item \strong{wealth: } real wealth in billions of chained 1996 dollars
\item \strong{interest: }nominal annual yield on 3-month Treasury securities–inflation rate (measured by the annual % change in annual chained price index).
}
}
\usage{
data('Table7_12')
}
\description{
Real Consumption Expenditure, Real Income, Real Wealth, and Real Interest Rates for the U.S., 1947 to 2000
Sources: C, Yd, and quarterly and annual chain-type price indexes (1996 = 100): Bureau of Economic Analysis, U.S. Department of Commerce (http://www.bea.doc.gov/bea/dn1.htm). Nominal annual yield on 3-month Treasury securities: Economic Report of the President, 2002. Nominal wealth = end-ofyear nominal net worth of households and nonprofits (from Federal Reserve flow of funds data: http://www. federalreserve.gov).
}
\keyword{datasets}
|
8af663f77614f891a89d981b6bc1b3928a904c6f
|
4beebbe7c247266d9f182f8a6d05a6e51b319376
|
/45.MakeKeysLN.r
|
edcdefa3021270278bc54a7c060891b4dcdfe75f
|
[] |
no_license
|
IRRDDv45/Scripts
|
471708895fe7af7c0f206bdc54512e300ebc20c3
|
d82bd200bc2bc25a605211240bcd7251f3f9fc32
|
refs/heads/master
| 2016-08-05T02:39:17.858253
| 2013-09-12T14:41:47
| 2013-09-12T14:41:47
| 12,785,755
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,938
|
r
|
45.MakeKeysLN.r
|
1 # 45.MakeKeys
2 #
3 # source("45.MakeKeys.r",echo=TRUE)
4 #
5 rm(list=ls())
6
7 #library(knitr)
8 library(gdata)
9 library(rms)
10 library(gdata)
11 library(gmodels)
12 library(RMySQL)
13 library(Hmisc)
14
15 load("../Data/mihow.RData")
16 load('../Data/birth.RData')
17
18 options('digits'=10)
19 options('scipen'=10)
20 options('stringAsFactors'=FALSE)
21 #
22 # create keys
23 #
24 mihow$momdob<-paste(substr(mihow$momdob,7,10),'-',substr(mihow$momdob,1,5),sep='')
25
26 mihow$key9 <-paste(trim(mihow$momnamelast),trim(mihow$momnamefirst),
27 mihow$momdobyear, mihow$momdobmonth, mihow$childdobyear, mihow$childdobmonth,
28 mihow$mommailingzip,mihow$momage,mihow$mommarried,mihow$momwhite,sep=":")
29
30 birth$key9 <-paste(trim(birth$momnamelast),trim(birth$momnamefirst),
31 birth$momdobyear, birth$momdobmonth, birth$childdobyear, birth$childdobmonth,
32 birth$mommailingzip,birth$momage,birth$mommarried,birth$momwhite,sep=":")
33
34 mihow$key8<-paste(trim(mihow$momnamelast),trim(mihow$momnamefirst),
35 mihow$momdobyear, mihow$momdobmonth, mihow$childdobyear, mihow$childdobmonth,
36 mihow$mommailingzip,mihow$momage,mihow$mommarried,sep=":")
37
38 birth$key8<-paste(trim(birth$momnamelast),trim(birth$momnamefirst),
39 birth$momdobyear, birth$momdobmonth, birth$childdobyear, birth$childdobmonth,
40 birth$mommailingzip,birth$momage,birth$mommarried,sep=":")
41
42 mihow$key7<-paste(trim(mihow$momnamelast),trim(mihow$momnamefirst),
43 mihow$momdobyear, mihow$momdobmonth, mihow$childdobyear, mihow$childdobmonth,
44 mihow$mommailingzip,mihow$momage,sep=":")
45
46 birth$key7<-paste(trim(birth$momnamelast),trim(birth$momnamefirst),
47 birth$momdobyear, birth$momdobmonth, birth$childdobyear, birth$childdobmonth,
48 birth$mommailingzip,birth$momage,sep=":")
49
50 mihow$key6<-paste(trim(mihow$momnamelast),trim(mihow$momnamefirst),
51 mihow$momdobyear, mihow$momdobmonth, mihow$childdobyear, mihow$childdobmonth,
52 mihow$mommailingzip,sep=":")
53
54 birth$key6<-paste(trim(birth$momnamelast),trim(birth$momnamefirst),
55 birth$momdobyear, birth$momdobmonth, birth$childdobyear, birth$childdobmonth,
56 birth$mommailingzip,sep=":")
57
58 mihow$key5<-paste(trim(mihow$momnamelast),trim(mihow$momnamefirst),
59 mihow$momdobyear, mihow$momdobmonth, mihow$childdobyear, mihow$childdobmonth,
60 mihow$mommailingzip,sep=":")
61
62 birth$key5<-paste(trim(birth$momnamelast),trim(birth$momnamefirst),
63 birth$momdobyear, birth$momdobmonth, birth$childdobyear, birth$childdobmonth,
64 birth$mommailingzip,sep=":")
65
66 mihow$key4<-paste(trim(mihow$momnamelast),trim(mihow$momnamefirst),
67 mihow$momdobyear, mihow$momdobmonth, mihow$childdobyear, mihow$childdobmonth,
68 sep=":")
69
70 birth$key4<-paste(trim(birth$momnamelast),trim(birth$momnamefirst),
71 birth$momdobyear, birth$momdobmonth, birth$childdobyear, birth$childdobmonth,
72 sep=":")
73
74 mihow$key3<-paste(trim(mihow$momnamelast),trim(mihow$momnamefirst),
75 mihow$momdobyear, mihow$momdobmonth,
76 sep=":")
77 birth$key3<-paste(trim(birth$momnamelast),trim(birth$momnamefirst),
78 birth$momdobyear, birth$momdobmonth,
79 sep=":")
80
81 mihow$key2<-paste(trim(mihow$momnamelast),trim(mihow$momnamefirst),sep=":")
82 birth$key2<-paste(trim(birth$momnamelast),trim(birth$momnamefirst),sep=":")
83 mihow$key1<-paste(trim(mihow$momnamelast),sep=":")
84 birth$key1<-paste(trim(birth$momnamelast),sep=":")
85 #
86 save(mihow,file="../Data/mihowK.RData") #with keys
87 save(birth,file="../Data/birthK.RData")
|
f3ad3fa832a7bc2309b022233ecbaae830d0eb44
|
dc5cb16b7ca29ba8a214b92c4af47521dbe4a81b
|
/figures/Rplot2.r
|
01042184e3e3de55e915937b060771343d4a6d67
|
[] |
no_license
|
d2i2k/RepData_PeerAssessment1
|
160286ff2371bd32067bdad95f4babfc4362f8a6
|
b103f7c67993ee7530c0f9eaec845ffb1b045241
|
refs/heads/master
| 2021-01-20T23:16:43.376919
| 2015-04-06T22:19:27
| 2015-04-06T22:19:27
| 22,814,618
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 416
|
r
|
Rplot2.r
|
setwd("C:/Users/d2i2k/tmp/RepData_PeerAssessment1")
ActivityData <- read.csv("activity.csv", header=TRUE)
y <- tapply(ActivityData$steps,INDEX=ActivityData$interval,FUN=mean,na.rm=TRUE)
x <- tapply(ActivityData$interval,INDEX=ActivityData$interval,FUN=mean,na.rm=TRUE)
xy <- cbind(x,y)
plot(xy,type="l",main="Time Series of Mean Steps",xlab="Daily Interval (in minutes)",ylab="Average Number of Steps")
which.max(y)
|
bcf64acf079550dfd01be92188a2bcb8e6564296
|
eea1603cbc55b36ebb1093c9fc96e8adf5652f88
|
/man/bindTables.Rd
|
a0a31e352ae291c5a7964b07dfa0d073a515e839
|
[] |
no_license
|
lcmercado/AutoAnalysis
|
954bcda9a7de59e73f99dd0edbcbd2dd59ecca9a
|
41f40a6817ce29844fc80e1082ee9cab2eea7288
|
refs/heads/master
| 2021-04-03T01:46:57.624551
| 2018-09-10T10:42:38
| 2018-09-10T10:42:38
| 124,944,551
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 868
|
rd
|
bindTables.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bindTables.R
\name{bindTables}
\alias{bindTables}
\title{Bind compatible data tables that have a similar name}
\usage{
bindTables(x)
}
\arguments{
\item{x}{a string pattern that is also included in all the data frames that have to be binded.}
}
\value{
All binded data frames that include in their name the string pattern given as input.
}
\description{
The \code{bindCompatibleTables} function will take a string pattern as input and look for all variables in the Global Environment that include that pattern. Then it will try to \code{rbind} all those variables.
}
\details{
It's important to have all data frames following the same naming pattern and avoid including that naming pattern in the names of other variables.
}
\examples{
\dontrun{
bindedDF <- bindTables("stringPattern")
}
}
|
534cd9093c1807ecf6529de07f9ea66447408f5d
|
732913cad1b98cfa04839d4bd46aa2b6f7272a33
|
/R/view_section_spreadsheet.R
|
df152c4f6fd6a75f3a8ddbd1da2e0fb2f4833705
|
[] |
no_license
|
KerrLab/introbiocure
|
8c3029bf010c070ca119708cad448752591d3dad
|
db9ad5039bfb3a7690acb065aa04d89c83d01569
|
refs/heads/master
| 2020-06-24T06:13:10.351793
| 2017-09-22T22:54:50
| 2017-09-22T22:54:50
| 96,922,212
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,037
|
r
|
view_section_spreadsheet.R
|
#' Open the Google Sheet for a BIO 180/200 section
#'
#' @inheritParams build_section_spreadsheet_title
#'
#' @export
#'
#' @examples
#' \dontrun{
#' view_section_spreadsheet(course = 180, year = 2017, quarter = "AU", section = "C")
#' }
view_section_spreadsheet <- function(course, year, quarter, section) {
section_title <- build_section_spreadsheet_title(
course = course,
year = year,
quarter = quarter,
section = section
)
googlesheets::gs_browse(googlesheets::gs_title(section_title))
}
#' @rdname view_section_spreadsheet
#' @export
view_section_spreadsheet_180 <- function(year, quarter, section) {
view_section_spreadsheet(
course = 180,
year = year,
quarter = quarter,
section = section
)
}
#' @rdname view_section_spreadsheet
#' @export
view_section_spreadsheet_200 <- function(year, quarter, section) {
view_section_spreadsheet(
course = 200,
year = year,
quarter = quarter,
section = section
)
}
|
45918a64b56c416d27987aac001be8da6aa54ae9
|
1712ed440489db168071b533ff8e79a1ede57df7
|
/man/occupancyData.Rd
|
64e7528a29f3fcfdaa14e996c4d423dab1be7ff9
|
[] |
no_license
|
dsjohnson/stocc
|
2947fc1f52e54e6e1747485fc3d905e7078bebc2
|
584c24792bb3c6083de274fab839a3f713c4adf0
|
refs/heads/master
| 2022-10-21T01:42:42.469014
| 2022-10-05T18:52:47
| 2022-10-05T18:52:47
| 14,853,161
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,247
|
rd
|
occupancyData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stocc-package.R
\docType{data}
\name{occupancyData}
\alias{occupancyData}
\title{Simulated occupancy for the 40 x 40 study area.}
\format{
A data frame with 1600 observations on the following 5 variables.
\describe{
\item{site}{Site labels}
\item{x}{Longitude coordinate}
\item{y}{Latitude coordinate}
\item{psi}{True probability of occupancy}
\item{psi.fix}{The fixed effects portion of the occupancy process map}
\item{occ}{True realized occupancy}
}
}
\description{
This data represnts truth with regards to occupancy in the simulated study
area. The probability of occupancy was simulated as \code{pnorm(0, X%*%gamma
+ K alpha, 1, lower=FALSE)}, where \code{K} and \code{alpha} were constructed
from a reduced rank is an ICAR process with precision
(\code{tau}) = 0.3 and \code{gamma = c(-1, 0, 0, 1)}
}
\examples{
data(occupancyData)
##
## Blue points represent realized occupancy.
##
image(x=seq(0.5,39.5,1), y=seq(0.5,39.5,1), z=t(matrix(occupancyData$psi,40)),
xlab="x", ylab="y", main="Occupancy process with realized occupancy")
points(occupancyData$x[occupancyData$occ==1], occupancyData$y[occupancyData$occ==1],
pch=20, cex=0.25, col="blue")
}
|
cbcdaa4f6b70c9f7653af35550ab1126c61ff08d
|
9477487b28fcc311b706d51943b609baf0f9d589
|
/SynthData/decode_DDC.R
|
2d71773ab2ecd3d580585f1b5501139c9bf7b68f
|
[] |
no_license
|
bbujfalussy/tSeq
|
2a7cf19c0df97b1bc3499579e14039b11e68c67a
|
6d131540ba45e56b46f07aba1d0cc211114d0a6d
|
refs/heads/main
| 2022-10-30T15:57:48.113938
| 2022-10-18T16:43:16
| 2022-10-18T16:43:16
| 553,709,039
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,652
|
r
|
decode_DDC.R
|
# DDC decoding using standard ratemaps
dir.create('./SimFigs/Fig5/', showW=F)
## decoding the spikes
outfile <- paste('decode_estR_Tmax', Tmax, '_Ncells', N.cells, '_', code, '_spPth', spPth, reg, '_DDC.RData', sep='')
if (outfile %in% list.files('./SimFigs/Fig5/')) {
ddcfile <- paste('./SimFigs/Fig5/', outfile, sep='')
load(file=ddcfile)
opars <- decode_DDC$opars
} else {
thetas <- list(cbind(theta.starts[1:N.theta], theta.starts[2:(N.theta+1)]))
chunks <- get_theta_seqs(sp=spt[,1:2], txy=txyvd, i_data=i_run, cellids=1:ncells, thetas=thetas)
rast_early <- chunks$past[[1]]$rast
rast_mid <- chunks$present[[1]]$rast
rast_late <- chunks$future[[1]]$rast
txy_early <- chunks$past[[1]]$pos
txy_mid <- chunks$present[[1]]$pos
txy_late <- chunks$future[[1]]$pos
bincenters <- attr(posmap, 'xcenters')
Nbins <- length(bincenters)
g.x <- matrix(rep(bincenters, Nbins), Nbins) * 100
g.y <- matrix(rep(bincenters, each=Nbins), Nbins) * 100
g.x.vec <- as.vector(g.x)
g.y.vec <- as.vector(g.y)
rates.vec <- matrix(0, Nbins*Nbins, ncells)
for (i in 1:ncells){
rates.vec[,i] <- as.vector(ratemaps[i,,])
}
rmin <- 0.01 # min firing rate
rates.vec[rates.vec < rmin] <- rmin
# i.cycle <- 1
# t_theta <- txy_early[i.cycle,5]
# sp <- rast_early[i.cycle,]
initpars <- c(mu1=100, mu2=100, sigma=5)
# DDC_likelihood(initpars, sp, rates.vec, g.x.vec, g.y.vec, t_theta)
opars <- array(NA, dim=c(3, 4, N.theta), dimnames=list(c('early', 'mid', 'late'), c('mean1', 'mean2', 'sd', 'nsp'), NULL))
for (i.cycle in 1:N.theta){
sp <- rast_early[i.cycle,]
t_theta <- txy_early[i.cycle,5]
opars[1,, i.cycle] <- c(optim(initpars, DDC_likelihood, gr=NULL, sp=sp, rates_vec=rates.vec, g.x.vec=g.x.vec, g.y.vec=g.y.vec, deltaT=t_theta, method='L-BFGS-B', lower=c(0, 0, 3), upper=c(200, 200, 200), control=list(fnscale=-1))$par, sum(sp))
sp <- rast_mid[i.cycle,]
t_theta <- txy_mid[i.cycle,5]
opars[2,, i.cycle] <- c(optim(initpars, DDC_likelihood, gr=NULL, sp=sp, rates_vec=rates.vec, g.x.vec=g.x.vec, g.y.vec=g.y.vec, deltaT=t_theta, method='L-BFGS-B', lower=c(0, 0, 3), upper=c(200, 200, 200), control=list(fnscale=-1))$par, sum(sp))
sp <- rast_late[i.cycle,]
t_theta <- txy_late[i.cycle,5]
opars[3,,i.cycle] <- c(optim(initpars, DDC_likelihood, gr=NULL, sp=sp, rates_vec=rates.vec, g.x.vec=g.x.vec, g.y.vec=g.y.vec, deltaT=t_theta, method='L-BFGS-B', lower=c(0, 0, 3), upper=c(200, 200, 200), control=list(fnscale=-1))$par, sum(sp))
if (i.cycle%%100 == 0) cat(i.cycle, '\n')
}
decode_DDC <- list(opars=opars)
ddcfile <- paste('./SimFigs/Fig5/', outfile, sep='')
save(decode_DDC, file=ddcfile)
}
|
52cc2f1796bd07de6c25a6c70183f6e09547b50b
|
4d92414b5f6ac4461ece23e8ac14c792190596e6
|
/challenge_6_data no 8/intPrev_bootstrap.R
|
9c613f5fcbc4019eeafbce340ab971eac6bf329e
|
[] |
no_license
|
vmolchan/CodeDataScience
|
6f0a28e89332926bdcd7c0a09f412cb815d46bf5
|
3ba3e9e0d27869f14e6ea7a16261e3939ce89402
|
refs/heads/master
| 2022-02-19T11:32:36.840168
| 2019-07-24T21:02:07
| 2019-07-24T21:02:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,545
|
r
|
intPrev_bootstrap.R
|
n <- 20
sigma <- 0.05
x <- seq(0, 1, length.out = n)
X <- cbind(rep(1,n), x)
beta <- matrix(c(1, 0.5), ncol = 1)
set.seed(0)
y <- X%*%beta + rnorm(n, mean = 0, sd = sigma)
plot(x, y)
abline(a = beta[1], b = beta[2], lty = "dotted")
df <- data.frame(x = X, y = y)
m <- lm(y~x, data = df)
abline(m, col = "blue")
nNew <- 20
xnew = x; #runif(nNew)
p <- predict(m, newdata = data.frame(x = xnew), interval = "pred", level = 0.95)
arrows(x0 = xnew, y0 = p[, 2], y1 = p[, 3], col = "red", code = 3, angle = 90, length = 0.05)
points(xnew, p[, 1], pch = 19)
# par bootstrap sur les donnees
nboot <- 1000
Mboot <- matrix(NA, nrow = n, ncol = nboot)
option <- 'non parametric' # or 'non parametric'
for (i in 1:nboot){
index <- sample(n, replace = TRUE)
mboot <- lm(y~x, data.frame(x = x[index], y = y[index]))
Mboot[, i] <- predict(mboot, newdata = data.frame(x = x))
if (option=="parametric"){
Mboot[, i] <- Mboot[, i] + rnorm(n, 0, sd(mboot$residuals))
} else if (option == "non parametric"){
Mboot[, i] <- Mboot[, i] + sample(mboot$residuals, size = n, replace = TRUE)
} else stop()
}
pboot <- matrix(NA, nrow = n, ncol = 3)
pboot[, 1] <- rowMeans(Mboot)
pboot[, 2] <- apply(Mboot, 1, quantile, 0.025)
pboot[, 3] <- apply(Mboot, 1, quantile, 0.975)
arrows(x0 = xnew, y0 = pboot[, 2], y1 = pboot[, 3], col = "blue", code = 3, angle = 90, length = 0.05)
points(xnew, pboot[, 1], pch = 19)
legend('topleft', legend = c("Exact", paste("Bootstrap (", option, ")", sep = "")),
col = c("grey", "blue"), lty = c(1,1))
|
19140356f7bf7273e0f5b6c4bd0f253dd0791a97
|
1c92699b1f59e73440756844f52c9728f49d6e11
|
/R/draw.barplot2D.R
|
1dbe41a2d288f91022feb4d93f25cd35da1485ab
|
[] |
no_license
|
marchtaylor/mapplots
|
4dfca74282facc317f37f0b2d59d56a24ff6a98e
|
0cf8125ccb7d0cb966ebee73c04912880677d96d
|
refs/heads/master
| 2020-06-03T21:36:15.016473
| 2018-05-22T10:25:28
| 2018-05-22T10:25:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,395
|
r
|
draw.barplot2D.R
|
draw.barplot2D <-
function(x,y,z,width,height,scale=F,col=NULL,col.frame='black',lwd.frame=1,silent=TRUE,...){
nx <- length(x)
nz <- dim(z)[2]
if (is.null(col))
col <- c("#737373", "#F15A60", "#7BC36A", "#599BD3", "#F9A75B", "#9E67AB", "#CE7058", "#D77FB4")
col <- rep(col, length.out = nz)
if(length(y)!=nx) stop('x and y should be vectors of the same length')
if(length(dim(z))!=2) stop('z should be a 2-dimensional array')
if(dim(z)[1]!=nx) stop('the number of rows in of z should match as the length of x and y')
if(length(width)!=length(height)) stop('width and height should have the same length')
if(length(width)>1 & length(width)!=length(x)) stop('width and height should have the same length as x and y')
maxsumz <- max(rowSums(z,na.rm=T),na.rm=T)
pm <- setProgressMsg(1,nx)
for(i in 1:nx){
xi=x[i]
yi=y[i]
zi=z[i,]
if(length(width)>1) widthi <- width[i] else widthi <- width
if(length(height)>1) heighti <- height[i] else heighti <- height
if(scale & length(width)==1) {
widthi <- width * sqrt(sum(zi,na.rm=T))/sqrt(maxsumz)
heighti <- height * sqrt(sum(zi,na.rm=T))/sqrt(maxsumz)
}
j=which(zi>0)
if(sum(zi,na.rm=T)>0) barplot2D(z=zi[j],colour=col[j],x=xi,y=yi,width=widthi,height=heighti,add=T,col.frame,lwd.frame,...)
if(!silent) pm <- progressMsg(pm,i)
}
}
|
e252f1d3b88fbcda00badae24b9c8ddd8f60bb67
|
e87785dc6f078afb4445d50af697c827b81df38d
|
/Modulo_III/R_Modulo_III.R
|
0ced88293f5f7bb220b22deb0960bde75c0f8711
|
[
"MIT"
] |
permissive
|
rodrigosantana/Curso_R_Intro_UNIVALI_2015
|
aa9f6d9145e6878d29c7314eeec0b585678a4ac3
|
029265058a3544a8ca8fba9dd3f6bbe5ae3d98b1
|
refs/heads/master
| 2020-12-24T15:04:42.676811
| 2015-07-16T00:45:03
| 2015-07-16T00:45:03
| 39,032,112
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,535
|
r
|
R_Modulo_III.R
|
## @knitr setup
# smaller font size for chunks
opts_chunk$set(size = "small",
prompt = TRUE,
comment = NA,
tidy = FALSE,
cache = TRUE,
fig.align = "center",
fig.width = 5,
fig.height = 5)
thm <- knit_theme$get("beamer2")
knit_theme$set(thm)
options(width = 65, digits = 5, continue = " ")
## @knitr unnamed-chunk-1
# valores críticos de z com alfa = 0,05 (bilateral)
qnorm(0.025)
qnorm(0.975)
# valores críticos de t com diferentes G.L.
qt(0.025, df = 9)
qt(0.025,df = 900)
## @knitr unnamed-chunk-2
dados <- read.table("crabs.csv", header = T, sep = ";",
dec = ",")
str(dados)
## @knitr unnamed-chunk-3
hist(dados$CL, main = "", ylab = "Frequência absoluta",
xlab = "Comprimento da carapaça (mm)", col = "grey")
## @knitr unnamed-chunk-4
t.test(dados$CL, mu = 30, alternative = "two.sided",
conf.level = 0.95)
## @knitr unnamed-chunk-5
teste <- t.test(dados$CL, mu = 30, alternative = "two.sided",
conf.level = 0.95)
names(teste)
teste$statistic
teste$p.value
## @knitr unnamed-chunk-6
t.test(dados$CL, mu = 30, alternative = "greater",
conf.level = 0.95)
## @knitr unnamed-chunk-7
t.test(dados$CL, mu = 30, alternative = "less",
conf.level = 0.95)
## @knitr unnamed-chunk-8
require(lattice) # pacote para gráficos avançados
histogram(~CL | especie, data = dados)
## @knitr unnamed-chunk-9
with(dados, tapply(CL, especie, summary))
## @knitr unnamed-chunk-10
t.test(CL ~ especie, data = dados, mu = 0,
alternative = "two.sided", conf.level = 0.95)
## @knitr unnamed-chunk-11
t.test(CL ~ especie, data = dados, mu = 0,
alternative = "greater", conf.level = 0.95)
## @knitr unnamed-chunk-12
plot(CW ~ CL, data = dados)
## @knitr unnamed-chunk-13
plot(1:10, 1:10, type = "l", xlab = "", ylab = "", main = "r = 1")
plot(1:10, rep(5,10), type = "l", xlab = "", ylab = "", main = "r = 0")
plot(1:10, -1:-10, type = "l", xlab = "", ylab = "", main = "r = -1")
## @knitr unnamed-chunk-14
cor(dados$CL, dados$CW)
cor.test(dados$CL, dados$CW)
## @knitr unnamed-chunk-15
mod <- lm(CW ~ CL, data = dados)
mod
## @knitr unnamed-chunk-16
summary(mod)
## @knitr unnamed-chunk-17
plot(CW ~ CL, data = dados)
abline(mod)
plot(CW ~ CL, data = dados, xlim = c(0,50), ylim = c(0,55))
abline(mod)
## @knitr unnamed-chunk-18
par(mfrow = c(2,2))
plot(mod)
par(mfrow = c(1,1))
## @knitr unnamed-chunk-19
names(mod)
names(summary(mod))
## @knitr unnamed-chunk-20
with(dados, tapply(CL, especie, summary))
## @knitr unnamed-chunk-21
mean(dados$CL)
## @knitr unnamed-chunk-22
boxplot(CL ~ especie, data = dados)
abline(h = mean(dados$CL), lty = 2, col = "red", lwd = 2)
## @knitr unnamed-chunk-23
plot(CL ~ as.numeric(especie), data = dados, axes = FALSE,
xlim = c(0,3), xlab = "Espécie", ylab = "CL")
axis(1, at = seq(0,3,1), labels = c("", "Azul", "Laranja", ""), tick = FALSE)
axis(2); box()
points(1, mean(dados$CL[dados$especie == "azul"]), pch = 15,
cex = 2, col = "blue")
points(2, mean(dados$CL[dados$especie == "laranja"]), pch = 15,
cex = 2, col = "orange")
abline(h = mean(dados$CL), lty = 2, col = "red", lwd = 2)
## @knitr unnamed-chunk-24
mod <- lm(CL ~ especie, data = dados)
summary(mod)
## @knitr unnamed-chunk-25
plot(CL ~ as.numeric(especie), data = dados, axes = FALSE,
xlim = c(0,3), xlab = "Espécie", ylab = "CL")
axis(1, at = seq(0,3,1), labels = c("", "Azul", "Laranja", ""), tick = FALSE)
axis(2); box()
points(1, mean(dados$CL[dados$especie == "azul"]), pch = 15,
cex = 2, col = "blue")
points(2, mean(dados$CL[dados$especie == "laranja"]), pch = 15,
cex = 2, col = "orange")
abline(h = mean(dados$CL), lty = 2, col = "red", lwd = 2)
segments(1, mean(dados$CL[dados$especie=="azul"]),
2, mean(dados$CL[dados$especie=="laranja"]))
# abline(mod)
## @knitr unnamed-chunk-26
teste <- t.test(CL ~ especie, data = dados, mu = 0,
alternative = "two.sided", conf.level = 0.95)
teste
## @knitr unnamed-chunk-27
sci <- getOption("scipen")
options(scipen = -1)
summary(mod)$coefficients
teste$p.value
teste$estimate
diff(teste$estimate)
options(scipen = sci)
## @knitr unnamed-chunk-28
anova(mod)
## @knitr unnamed-chunk-29
mod.anova <- aov(CL ~ especie, data = dados)
TukeyHSD(mod.anova)
## @knitr unnamed-chunk-30
mod.glm <- glm(CL ~ especie, data = dados,
family = gaussian(link = "identity"))
summary(mod.glm)
|
05e5f0971d5819fdba85260af753c7ef7b8c443b
|
e8d24ceceb6c57171a61d0ee14e83d9b910266cb
|
/utils.R
|
56e40e99116a19cfb56fde553e701698ac0c74f2
|
[] |
no_license
|
adamsqi/srd
|
a706fb7abb8773636f4c925eba6e8b530b227026
|
e53dfbd67826c8ee8f64f40621f0f7f6bf68bbf8
|
refs/heads/master
| 2022-01-05T00:36:40.857360
| 2019-04-29T16:20:51
| 2019-04-29T16:20:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 108
|
r
|
utils.R
|
col_to_factor <- function(data, colname){
data[colname] <- as.factor(data[colname])
return(data)
}
|
d944defad96c78ecce2846b808e1d83be74d2f46
|
a498124c89ff88c7a587f180be8c632a79530246
|
/man/cache-management.Rd
|
e74d1662d809012b1963a7afe5224a8c24737c29
|
[] |
no_license
|
billdenney/httpcache
|
550d7509ddc1e7c5d205282f0b73bb5b6d8ab539
|
1f7148d9f85cd335f3a4fe4c35646c82682d2de2
|
refs/heads/master
| 2020-03-28T04:35:40.151144
| 2018-08-27T05:03:51
| 2018-08-27T05:03:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 461
|
rd
|
cache-management.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cache.R
\name{cache-management}
\alias{cache-management}
\alias{cacheOn}
\alias{cacheOff}
\alias{clearCache}
\alias{cacheOff}
\alias{clearCache}
\title{Manage the HTTP cache}
\usage{
cacheOn()
cacheOff()
clearCache()
}
\value{
Nothing. Functions are run for their side effects.
}
\description{
These functions turn the cache on and off and clear the contents of the
query cache.
}
|
20e6b1d73ca16aa6ac9be30e873da34197c4fdd8
|
35416468434ede48f9886955b7d0ddb0e9c089a5
|
/1.Import&PreprocessingGALAXY_v3.R
|
a33546aa2bc9adb472e87725981f2e9c4d00f4fc
|
[] |
no_license
|
francescaprata/Sensitivity-Analysis
|
9dcdf890be1ffa349a7e2c43f19fc4b69feb8dd6
|
9d6212313fe38e1a4189c459994c2405cfe8f033
|
refs/heads/master
| 2020-05-03T02:08:01.755003
| 2019-04-03T07:31:43
| 2019-04-03T07:31:43
| 178,359,005
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,393
|
r
|
1.Import&PreprocessingGALAXY_v3.R
|
###############################
# Name: Francesca Prata #
# Project: Sentiment Analysis #
# Script: Galaxy set-up #
# Date: 30 March 2019 #
# Version: 3 #
###############################
#Loading required packages
if(!require(pacman))install.packages("pacman")
pacman::p_load('doParallel', 'plotly', 'caret', 'corrplot', 'dplyr',
"e1071", "kknn", 'ROSE')
#Importing dataset
galaxymatrix <- read.csv("galaxy_smallmatrix_labeled_8d.csv")
#Subsetting matrix so that it only contains samsung and galaxy
galaxymatrix <- galaxymatrix %>%
select(starts_with("samsung"), starts_with("galaxy"))
#Making sure that samsung galaxy is mentioned at least once in the website
galaxymatrix <- galaxymatrix %>% filter(samsunggalaxy > 0)
#Recoding galaxysentiment from 6 to only 2 levels
galaxymatrix$galaxysentiment <- recode(galaxymatrix$galaxysentiment,
'0' = 1, '1' = 1, '2' = 1,
'3' = 5, '4' = 5, '5' = 5)
#Balancing the dataset
galaxymatrix <- ovun.sample(galaxysentiment~., data=galaxymatrix,
N=nrow(galaxymatrix), p=0.5,
seed=1, method="both")$data
#Checking that galaxysentiment is a factor
galaxymatrix$galaxysentiment <- as.factor(galaxymatrix$galaxysentiment)
|
d06198f7578ab69f745ea3d04ba5a06f6af53b7b
|
cd3bad7fb562ad5f0333728c261d23ed0661e8bf
|
/modelo_transporte/07_capacity_into_merged.R
|
59e7f24033c94bb75033fe7d1308d2e835a14925
|
[] |
no_license
|
Joaobazzo/Master-thesis-scripts
|
6bfeff05d6567ad5a11a6f8047229943d7b61aea
|
effe21e025993844209bf4af3cbdb7eeadbe34b1
|
refs/heads/master
| 2021-01-02T12:57:01.458385
| 2020-04-01T21:55:54
| 2020-04-01T21:55:54
| 196,636,722
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,531
|
r
|
07_capacity_into_merged.R
|
#
#
# CAPACITY FIELD ADDED TO INTERSECTION
#
# soma ideas for calibrating the results
# https://stats.stackexchange.com/questions/171154/fitting-known-equation-to-data/171191?newreg=00e24e0321294bc0aeefda68c3ff1fe3i
#
# -----------------------------------
rm(list=ls())
setwd("L:/# DIRUR #/ASMEQ/bosistas/joaobazzo/master-thesis-repo1/")
library(mapview)
library(sf)
library(geobr)
library(data.table)
library(future.apply)
merged <- sf::read_sf("dados/Percursos/estendido/intersection_new-centroid.shp")
merged <- sf::st_set_crs(merged,4326) %>% sf::st_transform(31982)
# intersection
muni <- geobr::read_municipality(4106902) %>% sf::st_transform(31982) %>% sf::st_buffer(150)
merged <- sf::st_intersection(merged,muni)
mapview(merged$geometry) + mapview(muni$geom)
# -
# ippuc data
trips <- sf::read_sf("dados/Pesquisa_OD_IPPUC/PVT_VISUM/shapefile/Modelo OD_Pico Manha_20180530_link.SHP") %>%
sf::st_transform(31982) %>% sf::st_intersection(muni)
# --
# st_cast_merged
# --
merged1 <- lapply(1:nrow(merged),function(i){
sf::st_cast(merged[i,],"LINESTRING")
}) %>% data.table::rbindlist() %>% sf::st_as_sf()
# ---
#
# adding capacity into merged data.table
#
# ---
merged1$cap <- 0
system.time({
cap_street <- sapply(ind,function(i){ #
message(i)
output <- merged1[i,"geometry"] %>%
sf::st_buffer(20) %>%
sf::st_intersection(trips)
#if(nrow(output)!=0){
output <- output[output$geometry %>%
sf::st_length() %>%
which.max(),"CAPPRT"]
cap <- output$CAPPRT
return(cap)
# }
})
})
aux1 <- cap_street
gvec <- c()
for(i in 1:length(aux1)){
#message(i)
if(length(aux1[[i]]) == 0){
aux1[[i]] <- NA}else{
if(is.na(aux1[[i]])){
aux1[[i]] <- NA}else{
if(aux1[[i]] == 0 | aux1[[i]] == 9999){aux1[[i]] <- NA}}}
gvec[i] <- aux1[[i]]
}
for(i in 1:length(ind)){
aux[ind[i]] <- gvec[i]
}
aux2 <- c()
for(i in 1:length(aux)){
aux2[i] <- aux[[i]]
}
aux2[which(aux2 == 0)] <- NA
aux3 <- c(aux2[1])
for(i in 2:length(aux2)){
if(is.na(aux2[i])){aux3[i] <- aux3[i-1]}else{
aux3[i] <- aux2[i]
}
}
merged1$cap <- aux3
# merged1 <- readr::read_rds("dados/Percursos/estendido/intersection-new_centroids_cap.rds")
# merged1$trips <- merged1$trips * 2.655
readr::write_rds(merged1,"dados/Percursos/estendido/intersection-new_centroids_cap_adj.rds")
# sf::write_sf(merged,"dados/Percursos/estendido/intersection-new_centroids_cap.shp")
|
04d293306f9519abc9e05df39fa3b8118792b5d2
|
46a23d8ffb23dd4cd845864e8183cba216bc8d68
|
/app.R
|
cebf8af519f25a99413210f7afa624c1f9ae3645
|
[] |
no_license
|
DavidBarke/weibulltools-app
|
84099f30c1027ed808f9905ec90d0529aeb46565
|
2fe0a3a793231da1539767d81d9e22773598f386
|
refs/heads/main
| 2023-04-12T06:34:38.306466
| 2021-04-20T14:01:54
| 2021-04-20T14:01:54
| 329,618,724
| 6
| 0
| null | 2021-02-09T09:22:14
| 2021-01-14T13:08:39
|
R
|
UTF-8
|
R
| false
| false
| 4,662
|
r
|
app.R
|
library(shiny)
library(shinymeta)
library(shinyjs)
library(shinycssloaders)
library(bs4Dash)
library(dplyr)
library(purrr)
library(DT)
library(R.utils)
library(weibulltools)
library(xml2)
ui_server <- function(source_to_globalenv = FALSE) {
# If source_to_global_env all sourced functions get added to the global
# environment which takes some time after the app has stopped
source("init/source_directory.R")
source_directory(
# chdir enables use of relative paths in source statements inside
# these sourced files
path = "./modules",
encoding = "UTF-8",
modifiedOnly = FALSE,
chdir = TRUE,
recursive = TRUE,
envir = if (source_to_globalenv) globalenv() else environment()
)
# Globals ------------------------------------------------------------------
# Allow bigger file inputs
options(shiny.maxRequestSize = 100*1024^2)
# modules/dt_options.R
dt_options()
# UI -----------------------------------------------------------------------
ui <- htmltools::div(
waiter::use_waiter(),
waiter::waiter_show_on_load(waiter::spin_wave()),
#htmltools::includeScript("www/js/dark-mode.js"),
htmltools::includeCSS("www/css/styles.css"),
htmltools::includeCSS("www/css/dark.css"),
htmltools::includeCSS("www/css/dt-dark.css"),
htmltools::tags$link(
rel = "stylesheet",
href = "//cdnjs.cloudflare.com/ajax/libs/highlight.js/10.6.0/styles/default.min.css"
),
htmltools::tags$script(
src = "//cdnjs.cloudflare.com/ajax/libs/highlight.js/10.6.0/highlight.min.js"
),
htmltools::tags$script(
src="https://cdn.jsdelivr.net/npm/js-cookie@rc/dist/js.cookie.min.js"
),
htmltools::tags$script("hljs.highlightAll();"),
container_ui(
id = "container"
),
# Enable shinyjs
useShinyjs(),
# Enable rclipboard
rclipboard::rclipboardSetup(),
htmltools::tags$script(
src="https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/1.7.1/clipboard.min.js",
integrity="sha384-cV+rhyOuRHc9Ub/91rihWcGmMmCXDeksTtCihMupQHSsi8GIIRDG0ThDc3HGQFJ3",
crossorigin="anonymous"
),
# Extend shinyjs with custom JavaScript
shinyjs::extendShinyjs(
"min-js/cookies.js",
functions = c("getCookie", "setCookie", "rmCookie")
),
# Include minified script
htmltools::includeScript("www/min-js/weibulltools-app.js")
)
# SERVER -------------------------------------------------------------------
server <- function(input, output, session) {
# .VALUES ENVIRONMENT ------------------------------------------------
# The .values environment is available to all modules so that arbitrary information
# can be shared via this environment. Elements that underly reactive changes can be
# stored as reactiveValues or reactiveVal
.values <- new.env()
.values$code_header <- quote(library(weibulltools))
.values$is_dark_mode_rv <- shiny::reactiveVal(FALSE)
.values$toast_options <- function(...) {
dots <- list(...)
default <- list(
position = "bottomRight",
autohide = TRUE,
delay = 3000,
class = "bg-primary"
)
default %<-% dots
}
container_server(
id = "container",
.values = .values
)
# Hide waiter when initialisation is done
waiter::waiter_hide()
shiny::observeEvent(input$dark_mode, {
.values$is_dark_mode_rv(input$dark_mode)
})
# Handle dark mode cookie
shiny::observeEvent(TRUE, {
js$getCookie(
cookie = "dark-mode",
id = "cookie_dark_mode"
)
}, once = TRUE)
shiny::observeEvent(input$dark_mode, {
js$setCookie(
cookie = "dark-mode",
value = input$dark_mode,
id = "cookie_dark_mode"
)
})
# Helper function defined in modules/ui/modal_reference.R
.values$open_modal_reference <- open_modal_reference
shiny::observeEvent(input$open_modal, {
.values$open_modal_reference(input$open_modal$value)
})
}
return(list(ui = ui, server = server))
}
ui_server <- ui_server(source_to_globalenv = FALSE)
ui <- ui_server$ui
server <- ui_server$server
shiny::shinyApp(ui, server)
|
f572a51f5c31b8672d6461244e84652ee1e9c9fa
|
8bef64009c1256ace384188118ec8611429256ed
|
/R/RcppExports.R
|
ac6b7ac3050214eccdd538577805fd560008467e
|
[] |
no_license
|
NathanWycoff/iPLSV
|
3f78cde543aff868dca4303dc9bf19c5055906b3
|
cfc4ffe6884f005fc076312b45cf0f3d4cc211a5
|
refs/heads/master
| 2020-03-13T00:03:06.642527
| 2018-06-28T22:04:05
| 2018-06-28T22:04:05
| 130,879,295
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 784
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
softmaxC <- function(x) {
.Call(`_iplsv_softmaxC`, x)
}
exp_nlpostC <- function(Z_exp, PHI, THETA, PSI, docs, Ns, eta, gamma, beta) {
.Call(`_iplsv_exp_nlpostC`, Z_exp, PHI, THETA, PSI, docs, Ns, eta, gamma, beta)
}
g_enlpC <- function(Z_exp, PHI, THETA, PSI, docs, Ns, eta, gamma, beta) {
.Call(`_iplsv_g_enlpC`, Z_exp, PHI, THETA, PSI, docs, Ns, eta, gamma, beta)
}
nlipC <- function(PHI, THETA, PSI, docs, eta, gamma, beta) {
.Call(`_iplsv_nlipC`, PHI, THETA, PSI, docs, eta, gamma, beta)
}
g_nlipC <- function(PHI, THETA, PSI, Ns, docs, eta, gamma, beta) {
.Call(`_iplsv_g_nlipC`, PHI, THETA, PSI, Ns, docs, eta, gamma, beta)
}
|
ba51be5b34f37fb95ba0ab7bcce8e7fce6ba200e
|
8035a3d05fc5ab5c38c753f4e96d3e50e35915ed
|
/man/imputeBLOQ.Rd
|
a2b5f9df739f208ada97f86db96844d71a7a99c3
|
[] |
no_license
|
cran/BLOQ
|
95de339375c26aae6ae5fedf075f4aa2dc7cab02
|
bacaf5984daa7972b3f5f32918354242a3d14aa7
|
refs/heads/master
| 2021-07-12T00:15:04.413710
| 2020-06-07T17:30:06
| 2020-06-07T17:30:06
| 145,910,986
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,994
|
rd
|
imputeBLOQ.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/imputeBLOQ.R
\encoding{UTF-8}
\name{imputeBLOQ}
\alias{imputeBLOQ}
\title{impute BLOQ's with various methods}
\usage{
imputeBLOQ(inputData, LOQ, imputationMethod, progressPrint = FALSE, ...)
}
\arguments{
\item{inputData}{numeric matrix or data frame of the size
n by J (n the sample size and J the number of time points)
the input dataset}
\item{LOQ}{scalar, limit of quantification value}
\item{imputationMethod}{could be a single string or a vector of strings with the same length as
the number of time points (ncol(inputData)). If it is left blank, then the imputation is done using
kernel density estimation method for the columns with at least one non-BLOQ component. For all the
rest (only BLOQ) the constant imputation is used. The allowed values are
"constant", "ros", "kernel", "cml" corresponding to constant imputation,
imputing using regression on order statistics, imputing using kernel density estimator, and
imputing using censored maximum likelihood, respectively.}
\item{progressPrint}{logical variable indicating whether the imputation progress should be printed or not.}
\item{...}{any other argument which should be changed according to the input arguments regarding
the functions corresponding to different imputation methods.}
}
\value{
a list with two components: imputed dataset, and the methods used to impute each column.
}
\description{
function to impute BLOQ's. The user can define column-specific methods to impute the BLOQ's.
}
\examples{
set.seed(111)
inputData <- simulateBealModelFixedEffects(10, 0.693,1, 1, seq(0.5,3,0.5))
LOQ = 0.125
imputeBLOQ(inputData, LOQ,
imputationMethod = c("cml", "ros", "kernel","constant", "constant", "constant"),
maxIter = 500, isMultiplicative = TRUE, constantValue = LOQ)
imputeBLOQ(inputData, LOQ, maxIter = 500, isMultiplicative = TRUE,
constantValue = LOQ/5, epsilon = 1e-04)
}
\author{
Vahid Nassiri, Helen Yvette Barnett
}
|
dcf0039b5fbfabccda10d70a52887ffbe5661a55
|
a76e8d681bb9e4480b89a4136ddc9661c016536d
|
/R/utils.R
|
001d3d883c1a59cb7c55ac2fecae427eb6cd0e37
|
[] |
no_license
|
voronoys/fairsplit
|
1f85adbb4c6f56ad8d03bb848fb654bd373ad42b
|
5e9ef665efae1503501c7d03a5c1ed1e646ccd97
|
refs/heads/master
| 2023-05-02T02:14:31.027989
| 2021-05-13T22:32:12
| 2021-05-13T22:32:12
| 354,666,400
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,738
|
r
|
utils.R
|
#' @title Cosine weighted distance matrix
#'
#' @param x matrix to calculate columns distance
#' @param weights Weights for each attribute in the dataset
#'
#' @return Cosine distance matrix
cosine_sim <- function(X_mat, i, j, w) {
x <- X_mat[i, ]
y <- X_mat[j, ]
out <- sum(w*x*y)/sqrt(sum(w*x^2)*sum(w*y^2))
return(out)
}
cosine <- function(x, weights) {
n <- nrow(x)
combs <- t(combn(x = n:1, m = 2))
lower_tri <- apply(X = combs, MARGIN = 1, FUN = function(comb) cosine_sim(X_mat = x, i = comb[1], j = comb[2], w = weights))
sim <- matrix(NA, nrow = n, ncol = n)
diag(sim) <- 0
sim[combs] <- lower_tri
colnames(sim) <- rownames(sim) <- rownames(x)
dist_mat <- as.dist(1 - sim)
return(dist_mat)
}
#' @title Euclidean weighted distance matrix
#'
#' @param x matrix to calculate columns distance
#' @param weights Weights for each attribute in the dataset
#'
#' @return Euclidean distance matrix
euclidean <- function(x, weights) {
comps <- expand.grid(1:nrow(x), 1:nrow(x))
dist_vec <- mapply(function(i,j) sqrt(sum((weights*(x[i,] - x[j,]))^2)), comps[, 1], comps[, 2])
dist_mat <- as.dist(matrix(dist_vec, nrow(x), nrow(x)))
return(dist_mat)
}
#' @title Objective function to minimize
#'
#' @param data data.frame with ids in the rows and attributes in the columns
#' @param groups n_ids by n_teams matrix composed by zeros and ones
#' @param weights Weights for each attribute in the dataset
#' @param dist_metric Distance metric to compute the dissimilarities
#'
#' @return metric: the summary metric
#' @return means: attributes means by team
#' @return dist: distance matrix between teams
f_obj <- function(data, groups, weights = 1, dist_metric = "cosine") {
means <- t(apply(X = groups, MARGIN = 2, FUN = function(x) colMeans(data[x == 1, ][-1])))
means <- rbind(means, colMeans(means))
means <- rbind(means, colMeans(data[rowSums(groups) == 0, -1]))
n_teams <- (nrow(means)-2)
rownames(means) <- c(paste0("Team ", 1:n_teams), "Overall", "Out")
if(as.character(dist_metric) == "cosine") dist_mat <- cosine(x = means[1:(n_teams + 1), ], weights = weights)
if(as.character(dist_metric) == "euclidian") dist_mat <- euclidean(x = means[1:(n_teams + 1), ], weights = weights)
dist_mat <- as.matrix(dist_mat)
out <- sum(x = dist_mat[nrow(dist_mat), ]) + sd(dist_mat[nrow(dist_mat), ])
return(list(metric = out, means = means, dist = dist_mat))
}
#' @title Split individuals into fair teams
#'
#' @param data data.frame with ids in the rows and attributes in the columns
#' @param n_teams n_ids by n_teams matrix composed by zeros and ones
#' @param team_size Distance metric to compute the dissimilarities
#' @param weights Weights for each attribute in the dataset
#' @param n_it Number of iterations
#' @param buffer Number of iterations to have just random samples
#' @param dist_metric Distance metric to compute the dissimilarities
#' @param seed To make the results reproducible
#'
#' @return best_setting: Best team configuration found
#' @return groups: Array containing all configurations
#' @return out: Individuals not included in any team (just if n_ids != n_teams*team_size)
#' @return probs: Array containing all probabilities during the optimization
split_team <- function(data, n_teams, team_size, weights, n_it, buffer, dist_metric, seed = 4815162342) {
# Seed
set.seed(seed)
# Preparing for the loop
n_ids <- nrow(data)
tol <- 10e-10
groups <- probs <- array(NA, dim = c(n_ids, n_teams, n_it))
ids <- 1:n_ids
metric <- Inf
metrics <- rep(NA, n_it)
for(i in 1:n_it) {
group_aux <- matrix(0, nrow = n_ids, ncol = n_teams)
probs_aux <- matrix(1/n_ids, nrow = n_ids, ncol = n_teams)
ids_aux <- ids
for(j in sample(1:n_teams)) {
if(i > buffer) {
total <- rowSums(groups[, j, (i-buffer):(i-1)])
probs_aux[, j] <- total/sum(total)
}
samp <- sample(x = ids_aux, size = team_size, prob = probs_aux[ids_aux, j] + tol)
group_aux[samp, j] <- 1
ids_aux <- ids_aux[!ids_aux %in% samp]
}
metric_i <- f_obj(data = data, groups = group_aux, weights = weights, dist_metric = dist_metric)$metric
probs[, , i] <- probs_aux
if(metric_i < metric) {
metric <- metric_i
metrics[i] <- metric_i
groups[, , i] <- group_aux
} else {
metrics[i] <- metric
groups[, , i] <- groups[, , (i-1)]
}
}
out <- f_obj(data = data, groups = groups[, , i], weights = weights, dist_metric = dist_metric)
out$groups <- apply(groups[, , i], 2, function(x) data[which(x == 1), 1])
colnames(out$groups) <- paste("Team", 1:n_teams)
return(
list(
best_setting = out,
without_team = subset(data, !id %in% out$groups)[, 1],
groups = groups,
out = ids_aux,
probs = probs,
metrics = metrics,
dist_mat = out$dist
)
)
}
#' @title Bar chart to display individuals attributes
#'
#' @param label Label to place before the bar
#' @param width Bar width
#' @param height Bar height
#' @param fill Color to fill the bar
#' @param background Bar's background color
#'
#' @return barplot
bar_chart <- function(label, width = "100%", height = "12px", fill = "#fc5185", background = "#e1e1e1") {
bar <- shiny::div(style = list(background = fill, width = width, height = height))
chart <- shiny::div(style = list(flexGrow = 1, marginLeft = "8px", background = background), bar)
shiny::div(style = list(display = "flex", alignItems = "center"), label, chart)
}
#' @title Stars to rate each individual
#'
#' @param rating Value between 0 and max_rating
#' @param max_rating Maximum value
#'
#' @return max_rating stars
rating_stars <- function(rating, max_rating = 5) {
star_icon <- function(half = FALSE, empty = FALSE) {
if(half) {
htmltools::tagAppendAttributes(
shiny::icon("star-half-alt"), style = paste("color:", if (empty) "#edf0f2" else "#ffd17c"), "aria-hidden" = "true"
)
} else {
htmltools::tagAppendAttributes(
shiny::icon("star"), style = paste("color:", if (empty) "#edf0f2" else "#ffd17c"), "aria-hidden" = "true"
)
}
}
full_star <- round(rating)
half_star <- as.integer((rating - full_star) > 0)
empty_star <- max_rating - full_star - half_star
stars <- lapply(
seq_len(max_rating),
function(i) {
if(i <= full_star) {
star_icon()
} else if(i <= full_star + half_star) {
star_icon(half = TRUE)
} else {
star_icon(empty = TRUE)
}
}
)
label <- sprintf("%s out of %s stars", rating, max_rating)
shiny::div(title = label, role = "img", stars)
}
#' @title Attributes column definition
#'
#' @param x Attribute columns
#' @param maxWidth Columns maxWidth
#'
#' @return colDef for reactable
attr_column_def <- function(x, max) {
reactable::colDef(
name = x,
align = "center",
minWidth = 130,
maxWidth = 500,
aggregate = "mean",
format = reactable::colFormat(digits = 2),
cell = function(value) {
width <- paste0(round((value/max) * 100, 2), "%")
bar_chart(formatter(value), width = width, fill = "#764567", background = "#e1e1e1")
},
html = TRUE,
style = "display: 'flex'; flexDirection: 'column'; justifyContent: 'center'"
)
}
#' @title Find the next beauty number (under my definition of beauty)
#'
#' @param x A number
#'
#' @return Next beauty number
next_beauty_number <- function(num) {
num <- round(num)
num <- num - 1
order <- 10^(nchar(num) - 1)
fd <- as.integer(num/order)
sd <- as.integer((num - fd*order)/(order/10))
if(order < 100) {
possibilities <- c(1, 3, 5, 10, 15, 25, 50, 75, 100)
beauty_number <- possibilities[num+1 <= possibilities][1]
fd <- as.integer(beauty_number/order)
sd <- as.integer((beauty_number - fd*order)/(order/10))
} else if(sd >= 5) {
sd <- 0
fd <- fd + 1
} else {
sd <- 5
}
beauty_number <- fd*order + sd*(order/10)
return(beauty_number)
}
#' @title Format numbers to display
#'
#' @param x A data.frame column
#'
#' @return Formated number
formatter <- function(x) {
dplyr::case_when(
x < 1e3 ~ as.character(round(x)),
x < 1e6 ~ paste0(as.character(round(x/1e3, 1)), "K"),
x < 1e9 ~ paste0(as.character(x/1e6, 1), "M"),
x < 1e12 ~ paste0(as.character(x/1e9, 1), "B"),
TRUE ~ as.character(x)
)
}
#' @title Radar plot
#'
#' @param data data.frame with the first column storing the attributes, and the other columns with values to be visualized.
#' @param cols column names used to plot the radar
#' @param theme see \code{\link[echarts4r]{e_theme}}
#'
#' @import echarts4r
#' @return
plot_radar <- function(data, name_col = "name", cols, theme = "roma", max_vec = NULL) {
data_plot <- data %>%
dplyr::select(!!name_col, !!cols)
p_base <- data_plot %>%
echarts4r::e_charts(name)
if(is.null(max_vec)) max_vec <- apply(X = data[, -1], MARGIN = 1, FUN = function(x) next_beauty_number(max(x)))
for(i in seq_along(cols)) {
p_base <- p_base %>%
e_radar_vec(
serie = names(data_plot)[-1][i],
max = max_vec,
name = names(data_plot)[-1][i]
)
}
p_final <- p_base %>%
echarts4r::e_tooltip(trigger = "item") %>%
echarts4r::e_labels(show = FALSE, position = "top") %>%
echarts4r::e_theme(theme)
return(p_final)
}
#' @title Making .add_indicators work for vectors of maximum values
.add_indicators_vec <- function(e, r.index, max, radar = list()) {
if (!length(e$x$opts$radar)) e$x$opts$radar <- list(list())
x <- echarts4r:::.get_data(e, e$x$mapping$x)
if(length(max) == 1) max <- rep(max, length(x))
indicators <- data.frame(name = x, max = max)
indicators <- apply(indicators, 1, as.list)
e$x$opts$radar[[r.index + 1]] <- radar
e$x$opts$radar[[r.index + 1]]$indicator <- indicators
e
}
#' @title Making e_radar_ work for vectors of maximum values
e_radar_vec <- function (e, serie, max = 100, name = NULL, legend = TRUE, rm_x = TRUE, rm_y = TRUE, ..., radar = list()) {
r.index <- 0
if (missing(e)) {
stop("must pass e", call. = FALSE)
}
if (missing(serie)) {
stop("must pass serie", call. = FALSE)
}
e <- echarts4r:::.rm_axis(e, rm_x, "x")
e <- echarts4r:::.rm_axis(e, rm_y, "y")
if (is.null(name)) name <- serie
vector <- echarts4r:::.get_data(e, serie)
series <- purrr::map(e$x$opts$series, "type") %>% unlist()
if (!"radar" %in% series) {
serie <- list(type = "radar", data = list(list(value = vector, name = name)), radarIndex = r.index, ...)
e <- .add_indicators_vec(e, r.index, max, radar = radar)
e$x$opts$series <- append(e$x$opts$series, list(serie))
}
else {
e$x$opts$series[[grep("radar", series)]]$data <- append(e$x$opts$series[[grep("radar", series)]]$data, list(list(value = vector, name = name)))
}
if (isTRUE(legend)) {
e$x$opts$legend$data <- append(e$x$opts$legend$data, list(name))
}
return(e)
}
#' @title Scale a number to 0-1 scale
#'
#' @param x a number between min and max
#' @param min minimum value in the scale
#' @param max maximum value in the scale
#'
#' @return the number in the 0-1 scale
to_01 <- function(x, min = 0, max = 1) {
if(any(x < min) | any(x > max)) stop("The number must be something between min and max")
out <- (x-min)/(max-min)
return(out)
}
#' @title Rescale a number to min-max scale
#'
#' @param x a number between 0 and 1
#' @param min minimum value in the scale
#' @param max maximum value in the scale
#'
#' @return the number in the min-max scale
to_minmax <- function(x, min = 0, max = 1) {
if(x < 0 | x > 1) stop("The number must be something between 0 and 1")
out <- (max-min)*x + min
return(out)
}
#' @title Theme for reactable (copy from https://glin.github.io/reactable/articles/spotify-charts/spotify-charts.html)
#'
#' @return Theme for reactable
reactable_theme <- function() {
search_icon <- function(fill = "none") {
svg <- sprintf('<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24"><path fill="%s" d="M10 18c1.85 0 3.54-.64 4.9-1.69l4.4 4.4 1.4-1.42-4.39-4.4A8 8 0 102 10a8 8 0 008 8.01zm0-14a6 6 0 11-.01 12.01A6 6 0 0110 4z"/></svg>', fill)
sprintf("url('data:image/svg+xml;base64,%s')", jsonlite::base64_enc(svg))
}
text_color <- "hsl(0, 0%, 95%)"
text_color_light <- "hsl(0, 0%, 70%)"
text_color_lighter <- "hsl(0, 0%, 55%)"
bg_color <- "hsl(0, 0%, 10%)"
reactable::reactableTheme(
color = text_color,
backgroundColor = bg_color,
borderColor = "hsl(0, 0%, 16%)",
borderWidth = "1px",
highlightColor = "rgba(255, 255, 255, 0.1)",
cellPadding = "10px 8px",
style = list(
fontFamily = "Work Sans, Helvetica Neue, Helvetica, Arial, sans-serif",
fontSize = "14px",
"a" = list(
color = text_color,
"&:hover, &:focus" = list(
textDecoration = "none",
borderBottom = "1px solid currentColor"
)
),
".number" = list(
color = text_color_light,
fontFamily = "Source Code Pro, Consolas, Monaco, monospace"
),
".tag" = list(
padding = "2px 4px",
color = "hsl(0, 0%, 40%)",
fontSize = "12px",
border = "1px solid hsl(0, 0%, 24%)",
borderRadius = "2px"
)
),
headerStyle = list(
color = text_color_light,
fontWeight = 400,
fontSize = "12px",
letterSpacing = "1px",
textTransform = "uppercase",
"&:hover, &:focus" = list(color = text_color)
),
rowHighlightStyle = list(
".tag" = list(color = text_color, borderColor = text_color_lighter)
),
searchInputStyle = list(
paddingLeft = "30px",
paddingTop = "8px",
paddingBottom = "8px",
width = "100%",
border = "none",
backgroundColor = bg_color,
backgroundImage = search_icon(text_color_light),
backgroundSize = "16px",
backgroundPosition = "left 8px center",
backgroundRepeat = "no-repeat",
"&:focus" = list(backgroundColor = "#3c4e82", border = "none"),
"&:hover, &:focus" = list(backgroundImage = search_icon(text_color)),
"::placeholder" = list(color = text_color_lighter),
"&:hover::placeholder, &:focus::placeholder" = list(color = text_color)
),
paginationStyle = list(color = text_color_light),
pageButtonHoverStyle = list(backgroundColor = "hsl(0, 0%, 20%)"),
pageButtonActiveStyle = list(backgroundColor = "hsl(0, 0%, 24%)")
)
}
#' @title Box for the examples
#'
#' @return Box in HTML format
tab_voronoys <- function(text, text_color, background_color, icon, id) {
shiny::HTML(
paste0(
'<a id="', id, '" href="#" class="action-button" style="margin-left: 5px; margin-right: 5px">
<div class = "voronoys-block" style = "background-color:', background_color, ';">
<span class = "name" style = "color:', text_color, '">', text, '</span>
<div class="img_block">
<div class="img_block_conteiner">
<img src="img/', icon,'" style="max-width: 8vh; margin: 10px 10px 10px 20px; opacity: 0.9;">
</div>
</div>
</div></a>'
)
)
}
#' @title Removing button from fileInput
#'
#' @return fileInput HTML code
fileInput2 <- function(inputId, label = NULL, accept = NULL, width = NULL,
buttonLabel = "Browse...", placeholder = "No file selected") {
restoredValue <- restoreInput(id = inputId, default = NULL)
if (!is.null(restoredValue) && !is.data.frame(restoredValue)) {
warning("Restored value for ", inputId, " has incorrect format.")
restoredValue <- NULL
}
if (!is.null(restoredValue)) {
restoredValue <- toJSON(restoredValue, strict_atomic = FALSE)
}
inputTag <- tags$input(
id = inputId,
name = inputId,
type = "file",
style = "position: absolute !important; top: -99999px !important; left: -99999px !important;",
`data-restore` = restoredValue
)
if (length(accept) > 0)
inputTag$attribs$accept <- paste(accept, collapse = ",")
div(
class = "form-group shiny-input-container",
div(
class = "input-group",
tags$label(
class = "input-group-btn input-group-prepend",
span(
"",
inputTag
)
),
tags$input(
type = "text",
class = "form-control",
style = "border: 1px dotted rgba(0,0,0,0.42) !important; padding: 10px !important;",
placeholder = placeholder,
readonly = "readonly"
)
),
tags$div(
id = paste(inputId, "_progress", sep = ""),
class = "progress active shiny-file-input-progress",
tags$div(class = "progress-bar")
)
)
}
# https://github.com/ericrayanderson/shinymaterial/blob/0254f96c7dbb9374b8a2fd9ec7b24bcae73ae68d/R/update-shiny-material-dropdown.R
update_material_dropdown_multiple <- function(session, input_id, value = NULL, choices = NULL){
if(is.null(value)) {
message("ERROR: Must include 'value' with update_material_dropdown")
return(NULL)
}
if(!is.null(choices)){
if(is.null(names(choices)))
names(choices) <- choices
if(!all(value %in% choices)) {
for(ele in value[!value %in% choices])
message("ERROR: value '", ele, "' not found in choices")
return(NULL)
}
choices <- gsub(pattern = " ", replacement = "_shinymaterialdropdownspace_", x = choices, fixed = TRUE)
choices_value_js_code <- paste0(
paste0("$('#", input_id, "').empty(); $('#", input_id, "')"),
paste0('.append(\'<option value="', choices, '"> ', names(choices), "</option>')", collapse = "")
)
session$sendCustomMessage(type = "shinymaterialJS", choices_value_js_code)
choices_label_js_code <- paste0(
"$('#shiny-material-dropdown-", input_id, "').find('ul').empty();",
"$('#shiny-material-dropdown-", input_id, "').find('ul')",
paste0(".append('<li><span> ", names(choices), "</span></li>')", collapse = "")
)
session$sendCustomMessage(type = "shinymaterialJS", choices_label_js_code)
}
valueShow <- gsub(pattern = " ", replacement = "_shinymaterialdropdownspace_", x = value, fixed = TRUE)
value_js_code <- paste0(
paste0("$('#", input_id, "').find('option[value=\"", valueShow, "\"]').prop('selected', true);", collapse = ""),
"$('#", input_id, "').formSelect();",
"Shiny.onInputChange('", input_id, "', ['", paste0(value, collapse = "','"), "']);"
)
session$sendCustomMessage(type = "shinymaterialJS", value_js_code)
}
|
63b177e9c475fa65e4bb973b6595f0d4e955167a
|
ac362ad0dc04a7014c064d5d652128f6a3b954fe
|
/man/zscore.Rd
|
e63b817ebf5f8b5659c8a96fbbd822093f8d306b
|
[
"MIT"
] |
permissive
|
hauselin/hausekeep
|
4078ed0d25d22da83fee6b3dfc15cd6c7bae752c
|
3e1560814e953f67fd407701868eb833c30e0a1d
|
refs/heads/master
| 2023-01-20T15:23:23.711015
| 2023-01-19T02:16:08
| 2023-01-19T02:16:08
| 168,783,741
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 577
|
rd
|
zscore.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zscore.R
\name{zscore}
\alias{zscore}
\title{Compute z-score}
\usage{
zscore(x, n = 0)
}
\arguments{
\item{x}{A vector}
\item{n}{use n or n-1 in the denominator when computing SD (default 0 for n; use -1 for n = -1)}
}
\value{
A vector
}
\description{
Computes z-score (standardized scores)
}
\note{
Formula: (x - mean(x)) / SD(x)
}
\examples{
zscore(1:10)
zscore(1:10, n = 0) # default n = 0 (SD is computed using n)
zscore(1:10, n = -1) # n = -1 (SD is computed using n-1)
}
\author{
Hause Lin
}
|
cc6a6a135abcbfe8b4524eaefdc9869036b68c6b
|
ca33b1708cdf025a6579fa7c3d4241e0c78052ef
|
/app/health_analysis.R
|
c03ef45a64cbdb589bedce9ece6e722a43cb9e48
|
[] |
no_license
|
matthgray/health_care_spending
|
0497a0ff709d58e979d851df400a1f74554ee861
|
2cbf252d1d76970dd2d27979dd93bb4e22fa7fba
|
refs/heads/master
| 2023-02-26T22:44:34.471633
| 2021-01-27T16:12:04
| 2021-01-27T16:12:04
| 323,480,923
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,582
|
r
|
health_analysis.R
|
if (!require("pacman")) install.packages("pacman")
library("tidyverse")
# Load contributed packages with pacman
pacman::p_load(pacman, party, rio, tidyverse)
library(ggplot2)
library(dplyr)
library("growthrates")
health_spending <- read_csv('CleanData.csv')
if (!require("pacman")) install.packages("pacman")
# pacman must already be installed; then load contributed
# packages (including pacman) with pacman
pacman::p_load(GGally, magrittr, pacman, rio, tidyverse)
# health_spending %>% ggpairs()
mean(health_spending$Expenditure)
health_spending %>%
cor(Expenditure,Year)
cor(health_spending,Expenditure,Year)
health_spending %>%
filter(Disease == "Infectious and parasitic diseases") %>%
growth.
health_spending %>%
filter(Disease == "Infectious and parasitic diseases") %>%
summarize(mean(Expenditure))
# growth_rate = growth %>%
# first sort by year
health_spending %>%
filter(Disease == "Infectious and parasitic diseases") %>%
summarize(max(Expenditure) - min(Expenditure))
test <- health_spending %>%
filter(Disease == "Infectious and parasitic diseases") %>%
arrange(Year) %>%
mutate(Diff_year = Year - lag(Year), # Difference in time (just in case there are gaps)
Diff_growth = Expenditure - lag(Expenditure), # Difference in route between years
Rate_percent = (Diff_growth / Diff_year)/Expenditure * 100) # growth rate in percent
test
health_spending %>%
filter(Disease == "Infectious and parasitic diseases") %>%
ggplot(aes(Year, Expenditure)) +
geom_point(size = 3) +
geom_smooth(method = lm)
fit1 <- health_spending %>%
filter(Disease== "Infectious and parasitic diseases") %>%# Save as "fit1"
select(Year, Expenditure) %>% # y, then x
lm()
fit1 %>% summary()
fit1 %>% confint()
fit1 %>% predict()
fit1 %>% predict(interval = "prediction")
fit1 %>% lm.influence()
fit1 %>% influence.measures()
fit1 %>% plot()
h_avg_costs <-health_spending %>%
group_by(Disease) %>%
summarize(average_cost = mean(Expenditure)) %>%
arrange(desc(average_cost))
low_costs <- health_spending %>%
arrange(Expenditure)
high_costs <- h_data %>%
arrange(desc(Costs))
different_costs <- health_spending %>%
group_by(Disease) %>%
summarize(difference = max(Expenditure) - min(Expenditure)) %>%
arrange(desc(difference))
health_spending %>%
filter(Disease == "Infectious and parasitic diseases")
h_avg_costs <-health_spending %>%
group_by(Disease) %>%
summarize(average_cost = mean(Expenditure)) %>%
arrange(desc(average_cost))
h_std_costs <- health_spending %>%
group_by(Disease) %>%
summarize(std = sd(Expenditure)) %>%
arrange(desc(std))
health_spending %>%
filter(Disease == "Infectious and parasitic diseases") %>%
ggplot(aes(x=Year, y=Expenditure)) +
ggtitle("Health") +
geom_bar(stat="identity", fill="steelblue") +
geom_text(aes(label=Expenditure), vjust=1.6, color="black", size=3.5)
health_spending %>%
filter(Disease =="Symptoms; signs; and ill-defined conditions")%>%
ggplot(aes(x=Year, y=Expenditure, group=1)) +
geom_step()+
geom_point()
health_spending %>%
filter(Disease == "Symptoms; signs; and ill-defined conditions") %>%
ggplot(aes( y=Expenditure))+
geom_boxplot(fill='steelblue',color="green")+
theme_dark()
#geom_dotplot(binaxis = 'y',stackdir = 'center', dotsize =1)
library(plotly)
fig <- health_spending %>%
plot_ly(y = Expenditure, type = "box", quartilemethod="exclusive") # or "inclusive", or "linear" by default
fig
m_health
sort(unique(h_data$Disease))
|
579455575e62065f0ccfa7e96ae80a6b77ebea16
|
ba2b092b91f207ae91c269587b8210ede808ecce
|
/R/matchChromosomes.R
|
c6a9cf370e8f435b7caf50ec52fc8a8a264978dc
|
[
"Apache-2.0"
] |
permissive
|
fursham-h/factR
|
cf12691985dfad6bb67e58e09b602895d1a5012e
|
99ef8f40bf109b8a718bc51a3699578c2b3b6c4a
|
refs/heads/master
| 2023-08-18T13:46:27.265640
| 2023-08-02T16:38:24
| 2023-08-02T16:38:24
| 230,322,841
| 2
| 0
|
Apache-2.0
| 2021-04-30T12:01:08
| 2019-12-26T20:14:29
|
R
|
UTF-8
|
R
| false
| false
| 2,106
|
r
|
matchChromosomes.R
|
#' Match seqlevels of input GRanges to reference GRanges or BioString objects
#'
#' @description
#' A convenient wrapper to match seqlevels of a query GRanges object to a
#' reference object that contain seqlevels information. Reference can be a
#' GRanges, GRangesList, BioString or DNAString object. Seqlevels which fail
#' to match will be dropped.
#'
#' @param x GRanges object with seqnames to change
#' @param to GRanges object from which seqnames is referenced
#'
#' @return Corrected input GRanges
#' @export
#' @author Fursham Hamid
#'
#' @examples
#' ## ---------------------------------------------------------------------
#' ## EXAMPLE USING TOY DATASET
#' ## ---------------------------------------------------------------------
#' require(GenomicRanges)
#'
#' ## Create toy GRanges objects
#' gr1 <- GRanges("1", IRanges(start = c(1, 101), width = c(20, 20)), "+")
#' gr2 <- GRanges("chr1", IRanges(start = c(1, 101), width = c(20, 20)), "+")
#'
#' ## Match Ensembl-style chromosomes from gr1 to UCSC-style gr2
#' matchChromosomes(gr1, gr2)
#'
#' ## Possible to match chrosomomes from GRanges object to a Biostrings
#' # object containing seqlevels
#' x0 <- c("chr2" = "CTCACCAGTAT", "chr3" = "TGTCAGTCGA")
#' dna <- Biostrings::DNAStringSet(x0)
#'
#' ## Match gr1 to dna
#' matchChromosomes(gr1, dna)
matchChromosomes <- function(x, to) {
nseqlevelsbefore <- length(GenomeInfoDb::seqlevels(x))
suppressWarnings(
if (!has_consistentSeqlevels(x, to)) {
# attempt to match style first
newStyle <- GenomeInfoDb::mapSeqlevels(GenomeInfoDb::seqlevels(x), (GenomeInfoDb::seqlevelsStyle(to)[1]))
newStyle <- newStyle[!is.na(newStyle)]
x <- GenomeInfoDb::renameSeqlevels(x, newStyle)
}
)
if (length(GenomeInfoDb::seqlevels(x)) < nseqlevelsbefore) {
nseqlevelsafter <- nseqlevelsbefore - length(GenomeInfoDb::seqlevels(x))
rlang::warn(sprintf("%s seqlevels were removed after matching",
nseqlevelsafter))
}
msg <- has_consistentSeqlevels(x, to)
return(x)
}
|
6bf7e3b241f2902b70470d42a022897e070b6077
|
44143d0c480e1cabf87f2c44909afe2aa85bd67c
|
/man/compute.QDiD.Rd
|
e4cb7ca8ccea19747dc48ddcbf79fe23f4ca8f14
|
[] |
no_license
|
bcallaway11/qte
|
c383e991a3969e9e50e30477e701ee11c500d574
|
09830e766b7f9e28643928e9a170f73d9c4c0bcf
|
refs/heads/master
| 2023-08-30T21:43:34.342973
| 2023-08-15T21:37:43
| 2023-08-15T21:37:43
| 19,584,525
| 8
| 5
| null | null | null | null |
UTF-8
|
R
| false
| true
| 413
|
rd
|
compute.QDiD.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/QDiD.R
\name{compute.QDiD}
\alias{compute.QDiD}
\title{Quantile Difference in Differences}
\usage{
compute.QDiD(qp)
}
\arguments{
\item{qp}{QTEparams object containing the parameters passed to QDiD}
}
\value{
QTE object
}
\description{
\code{compute.QDiD} computes the Quantile Difference in
Differences estimator
}
\keyword{internal}
|
d200e0512643c7011c751305ea23c6bdb13cd802
|
288da8178b59a864653cf71d3aab544484d55ff9
|
/R/ap_parse_page.R
|
a46feebefc12ef588da33b9f8e153cbb11f3ca02
|
[
"MIT"
] |
permissive
|
patperu/albopop
|
430ab153ace929b8ced90a7cea2706194c70029a
|
012d447a03307882fb466269cd8494da1fabdea5
|
refs/heads/master
| 2021-01-10T05:24:01.723465
| 2016-02-25T18:59:35
| 2016-02-25T18:59:35
| 52,151,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,192
|
r
|
ap_parse_page.R
|
#' Fetch the data.
#'
#' This scrapes the 'Albo Pretorio' data
#' @param url A URL
#' @param link_url The URL for the links, if NULL same as `url`
#' @param site character, sets the input, currently four sites are supported: "JCityGov", "mapweb", "saga", "studiok"
#' @return A dataframe
#' @export
ap_parse_page <- function(url, link_url = NULL, site = 'studiok') {
# if (is.null(site)) {
# stop("You have to specify the site.",
# call. = FALSE)
# }
switch(site,
JCityGov = get_JCityGov(url, link_url),
mapweb = get_mapweb(url),
saga = get_saga(url),
studiok = get_studiok(url, link_url)
)
}
get_studiok <- function(url, link_url) {
if (is.null(link_url)) link_url <- url
x <- lapply(url, function(url) {
pg <- xml2::read_html(url)
# Fetch items
pgt <- pg %>% html_node("#main > table")
pgt <- html_table(pgt, fill = TRUE)[-c(1:2), ]
pgt$Importo <- NULL
names(pgt) <- c("APNumero", "Descrizione", "Tipo", "Oggetto", "Inizio", "Fino")
pgt$APNumero <- clear_unicode(pgt$APNumero)
pgt$Oggetto <- clear_unicode(pgt$Oggetto)
pgt$Oggetto <- wrap_nr_ogg(pgt$APNumero, pgt$Oggetto)
# Fetch links
link <- pg %>% html_node("tbody")
link <- html_nodes(link, "tr span a") %>% html_attr("href")
link <- file.path(link_url, link)
#link <- data.frame(link = urls, stringsAsFactors = FALSE)
fin <- data.frame(pgt, link, stringsAsFactors = FALSE)
# Sort by date
fin <- fin[order(lubridate::dmy(fin$Inizio), decreasing = TRUE), ]
rownames(fin) <- NULL
fin
})
fin <- do.call("rbind", x)
class(fin) <- c("albopop_srs", class(fin))
fin
}
get_JCityGov <- function(url, link_url) {
next_page <- function(res) {
x <- html_nodes(res, "div")
x <- html_nodes(x, '[class="pagination pagination-centered"]') %>% html_nodes("a")
idx <- which(x %>% html_text() == "Avanti")
html_attr(x[idx], "href")
}
cn <- c("Anno e Numero Registro", "Tipo Atto",
"Oggetto", "Periodo Pubblicazioneda - a")
fin <- list()
i <- 1
repeat{
s <- html_session(url)
stop_for_status(s)
res <- content(s$response, encoding = "UTF-8")
pgt <- html_table(res)[[1]]
pgt <- pgt[, cn]
per_pub <- pgt[, 4]
link <- html_nodes(res, xpath = "//*[@title='Apri Dettaglio']") %>% html_attr("href")
fin[[i]] <- data.frame(Numero = pgt[, 1],
Tipo = pgt[, 2],
Oggetto = wrap_nr_ogg(pgt[, 1], pgt[, 3]),
Inizio = purrr::flatten_chr(strsplit(per_pub, " "))[1],
Fino = purrr::flatten_chr(strsplit(per_pub, " "))[2],
link,
stringsAsFactors = FALSE)
# url <- tryCatch(follow_link(s, "Avanti"), error=function(e)(return(NA)))
# if(is.na(url) || i == 10) break
url <- next_page(res)
if(is.null(url) | is.na(url)) break
i <- i + 1
}
fin <- do.call("rbind", fin)
class(fin) <- c("albopop_srs", class(fin))
fin
}
get_mapweb <- function(url) {
base_url <- "http://www.mapweb.it"
cn <- c("Numero", "Oggetto", "Atto", "Inizio", "Fino")
next_page <- function(res) {
x <- html_nodes(res, xpath="//*[@title='Successiva']") %>% html_attr("href")
x <- paste0(base_url, x)
x
}
fin <- list()
i <- 1
repeat{
pg <- read_html(url)
pgt <- html_nodes(pg, xpath="//*[@id='pageBox']/div[4]/table") %>% html_table() %>% .[[1]]
pgt$Oggetto <- wrap_nr_ogg(pgt$Numero, pgt$Oggetto)
colnames(pgt) <- cn
link <- html_nodes(pg, xpath="//*[@id='tabella_albo']/tr/td[1]/a") %>% html_attr("href")
link <- paste0(base_url, link)
fin[[i]] <- data.frame(pgt, link, stringsAsFactors = FALSE)
url <- next_page(pg)
if(identical(base_url, url)) break
i <- i + 1
}
fin <- do.call("rbind", fin)
class(fin) <- c("albopop_srs", class(fin))
fin
}
get_saga <- function(url) {
cn <- c( "N. reg.", "Inizio", "N. atto", "Tipo", "Oggetto", "Pubblicazione")
fin <- list()
i <- 1
s <- html_session(url)
repeat{
z2 <- html_nodes(s, xpath="//*[@id='documentList']") %>% html_table() %>% .[[1]]
z2 <- z2[, c(1:5,7)]
colnames(z2) <- cn
z2$Oggetto <- wrap_nr_ogg(z2[, 1], z2[, "Oggetto"])
z2$Pubblicazione <- gsub("\\r\\n\t", "", z2$Pubblicazione)
z2$Pubblicazione <- gsub("\\t", "", z2$Pubblicazione)
z2$Fino <- strsplit(z2$Pubblicazione, "-") %>%
purrr::map(2L) %>%
purrr::flatten_chr()
# autsch
z2$Fino <- gsub("16", "2016", z2$Fino)
z2$Pubblicazione <- NULL
link <- html_nodes(s, xpath="//*[@id='documentList']/tbody/tr/td[8]/a/@href") %>% html_text()
link <- regmatches(link, regexpr("[0-9]{5}$", link))
link <- paste0("http://pubblicazioni1.saga.it/publishing/AP/docDetail.do?docId=", link)
fin[[i]] <- data.frame(z2, link, stringsAsFactors = FALSE)
s <- tryCatch(follow_link(s, "Successivo"), error=function(e)(return(NULL)))
if(is.null(s) || i == 20) break
i <- i + 1
}
fin <- do.call("rbind", fin)
class(fin) <- c("albopop_srs", class(fin))
fin
}
|
44a02c7800ad238b4669781f5e9652da089cc83e
|
222ddcb4176c06aa122588179cb5395652653d2d
|
/archive/weighted_update.R
|
36a1c778b1e7a929ec0ed1cabab129a6e0e1aa96
|
[
"MIT"
] |
permissive
|
ck37/acic-tmle-2018
|
f0b1bc9732b10edfa4809f6d5e1729b2e8c338b8
|
471bcdf1e46bea804d62a1c4e1a1d92ef47ff32d
|
refs/heads/master
| 2022-01-08T14:37:52.497376
| 2019-01-23T20:09:11
| 2019-01-23T20:09:11
| 98,351,394
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,573
|
r
|
weighted_update.R
|
# Takes initdata, dataframe or list containing elements: A, Y, Q, g
update <- function(initdata) {
n = length(initdata$A)
# Create ``clever covariate'', which is 0 for treated units. This will only be used to update Q01.
H0W = with(initdata, - g / (1 - g))
HAW = with(initdata, ifelse(A == 1, 0, H0W))
# Fit a glm with the ``clever covariate'' moved to the weight.
# TODO: catch error and recover if fluctuation fails.
# Suppress any warnings here.
fit = suppressWarnings(glm(Y ~ offset(qlogis(Q.QAW)) + 1,
weight = -HAW,
data = initdata,
family = "binomial"))
# Extract epsilon from the MLE.
epsilon_hat = fit$coefficients[1]
if (epsilon_hat == 0) {
warning("Epsilon hat estimated to be zero.")
}
if (is.na(epsilon_hat)) {
warning("Epsilon hat is NA.")
}
# Update estimated Q(0, W)
Q0Wstar = with(initdata, plogis(qlogis(Q.Q0W) + epsilon_hat))
# Update for Q(1, W) merely ensures mean(Q1W*A) = mean(Y*A), i.e., same mean as Y among treated.
# This means we can replace with Ystar in estimator, though update may improve variance estimator
Q1Wstar = with(initdata, Q.Q1W + mean((Y - Q.Q1W) * A)/mean(A))
QAWstar = with(initdata, ifelse(A == 1, Q1Wstar, Q0Wstar))
num_nas0 = sum(is.na(Q0Wstar))
num_nas1 = sum(is.na(Q1Wstar))
if (num_nas0 + num_nas1 > 0) {
cat("Updated Q0Wstar num NAs is:", num_nas0, "\n","Updated Q1Wstar num NAs is:", num_nas1, "\n")
print(summary(initdata$Q.Q0W))
print(summary(initdata$Q.Q1W))
print(summary(qlogis(initdata$Q.Q0W)))
print(summary(qlogis(initdata$Q.Q1W)))
cat("Epsilon hat:", epsilon_hat, "\n")
}
# Calculate percentage of treated units that changed after fluctuation update.
pct_changed = mean(initdata$Q.Q0W[initdata$A == 1] != Q0Wstar[initdata$A == 1] |
initdata$Q.Q1W[initdata$A == 1] != Q1Wstar[initdata$A == 1])
# If the treated units didn't change there is some sort of issue.
if (pct_changed == 0) {
warning("Fluctuation did not change any potential outcomes for treated units.")
}
psi = with(initdata, sum((A == 1) * (Y - Q0Wstar)) / sum(A))
Dstar = with(initdata, ((A == 1) - (A == 0) * g / (1 - g)) / mean(A) * (Y - QAWstar) +
(A == 1) / mean(A) * (Q1Wstar - Q0Wstar - psi))
num_nas = sum(is.na(Dstar))
if (num_nas > 0) {
cat("Dstar num NAs is:", num_nas, "\n")
}
# Compile results.
results = c(psi = psi,
var.psi = (n-1)*var(Dstar)/n^2)
return(results)
}
|
74fa7da9cdcf56736392a86eb3c8514024b0c2a3
|
b0cad7c3a4ca1a0be8462e31ce0ccce61dbcdfff
|
/cachematrix.R
|
c88a63d591cae4c59bafa62fb6fa64b81afed11d
|
[] |
no_license
|
JeremiShane/ProgrammingAssignment2
|
4be257e4fd2b6b13247949cdc7c95900aebfa407
|
92e9f6524d599ff861873beab0ec4f1c3de2fbaf
|
refs/heads/master
| 2021-01-02T08:30:46.459008
| 2017-08-02T20:52:17
| 2017-08-02T20:52:17
| 99,015,060
| 0
| 0
| null | 2017-08-01T15:17:11
| 2017-08-01T15:17:11
| null |
UTF-8
|
R
| false
| false
| 1,270
|
r
|
cachematrix.R
|
# Caching the Inverse of a Matrix
## assignment is to write a pair of functions that cache the inverse of a matrix
## 1. makeCacheMatrix creates a special "matrix" object that can cache its inverse
## 2. cacheSolve computes the inverse of the special "matrix" object, or retrieves the cached inverse
##
### the inverse of A is A^-1 only when
### A * A^-1 = A^-1 * A = I (the identity matrix)
## Create a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y=c(1,1,1,0), d=2) {
## for inverse to work the matrix must be square, and nonsingular
## a square matrix is singular if and only if its determinant is 0
x <<- matrix(y, d, d)
inv <<- NULL
}
get <- function() x
setinverse <- function(solve) inv <<- solve
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) ## maybe should be matrix
}
## Check if "matrix" object inverse is already cached, if so retrieve it, else computes the inverse
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
## Return a matrix that is the inverse of 'x'
}
|
8f944d23ef597bba7bd63ac72be724f4074eb31d
|
955a3a610c2586acd6aebef7d962ad5add461e86
|
/example05.R
|
7323a254de17a8e5f18a2bc2ec8f903aba70f160
|
[] |
no_license
|
aboyd20/Social-Network-Analysis
|
e073df4abfe0401479bdbae76f8fa6646a355325
|
c821086b421650f5f0cd4f5258a72e8dfc6c54b4
|
refs/heads/master
| 2022-12-13T19:09:51.039921
| 2020-09-12T21:42:17
| 2020-09-12T21:42:17
| 295,028,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,058
|
r
|
example05.R
|
# Example 5
## Working with CUG testing
# ---- C1 ----
library("ggplot2")
library("GGally")
library("mcclust")
library("sna")
# Must load other packages first
library("sand")
# ---- C2 ----
#path <- "C:/Users/rburke/Dropbox/2016_cFall/csc495DB/example/ex5"
path <- "/Volumes/BOYDDEPAUL/Fall Quarter 2018/DSC 480/Week 6/ex5"
setwd(path)
lotr1 <- read.graph("lotr1.graphml", format="graphml")
lotr3 <- read.graph("lotr3.graphml", format="graphml")
summary(lotr1)
summary(lotr3)
# ---- C3 ----
wtdeg1 <- graph.strength(lotr1)
wbin1 <-tabulate(wtdeg1)
#make a dataframe
bindf1 <- data.frame(bin=seq(1, length(wbin1)), count=wbin1, movie = "LOTR1")
bindf1 <- bindf1[bindf1$count>0, ]
bindf1$freq <- bindf1$count /vcount(lotr1)
wtdeg3 <- graph.strength(lotr3)
wbin3 <-tabulate(wtdeg3)
#make a dataframe
bindf3 <- data.frame(bin=seq(1, length(wbin3)), count=wbin3, movie = "LOTR3")
bindf3 <- bindf3[bindf3$count>0, ]
bindf3$freq <- bindf3$count /vcount(lotr3)
wdeg.df <-rbind(bindf1, bindf3)
wdeg.df
# ---- C4 ----
g <- ggplot(data = wdeg.df, aes(x=bin, y=freq, color=movie))
g <- g + geom_point()
g <- g + geom_smooth(se=F)
g <- g + scale_y_log10("Frequency")
g <- g + scale_x_log10("Wt. Degree")
print(g)
# ---- C5 ----
plf1 <-fit_power_law(wtdeg1)
plf1$alpha
plf1$KS.stat
plf1$KS.p
plf1$xmin
plf3 <-fit_power_law(wtdeg3)
plf3$alpha
plf3$KS.stat
plf3$KS.p
plf3$xmin
# ---- C6 ----
g <- barabasi.game(1000)
deg <- degree(g, mode = "in")
pltba <- fit_power_law(deg)
pltba$alpha
pltba$KS.stat
pltba$KS.p
# ---- C7 ----
xmin <- plf3$xmin
alpha <- plf3$alpha
xmax <- max(wdeg.df[wdeg.df$movie=="LOTR3", ]$bin)
xvals <- seq(from = xmin, to=xmax, by=1)
yval = xvals^(-alpha)
fit.df <-data.frame(bin=xvals, freq= yval, movie="LOTR3-FIT",count=0)
wdeg.df <- rbind(wdeg.df, fit.df)
# ---- C8 ----
g <- ggplot(data = wdeg.df[wdeg.df$movie !="LOTR1",], aes(x=bin, y=freq, color=movie))
g <- g + geom_point()
g <- g + geom_smooth(se=F)
g <- g + scale_y_log10("Frequency")
g <- g + scale_x_log10("Wt. Degree")
print(g)
# ---- C9 ----
source("mycugtest.R")
# ---- C10 ----
transitivity(lotr1, type = "global")
lotr1.cug <- mycugtest(lotr1, transitivity, cmode="edges", directed=FALSE,
type="global")
# ---- C11 ----
print.cug.test(lotr1.cug)
plot.cug.test(lotr1.cug)
# ---- C12 ----
transitivity(lotr3, type = "global")
lotr3.cug <- mycugtest(lotr3, transitivity, cmode="edges", directed=FALSE,
type="global")
print.cug.test(lotr3.cug)
plot.cug.test(lotr3.cug)
# ---- C13 ----
assortativity_nominal(lotr1, types=factor(V(lotr1)$Race))
lotr1.cug <- mycugtest(lotr1, assortativity_nominal, cmode = "edges",
directed = FALSE, types=factor(V(lotr1)$Race))
print.cug.test(lotr1.cug)
plot.cug.test(lotr1.cug)
# ---- C14 ----
assortativity_nominal(lotr3, types=factor(V(lotr3)$Race))
lotr3.cug <- mycugtest(lotr3, assortativity_nominal, cmode = "edges",
directed = FALSE, types=factor(V(lotr3)$Race))
print.cug.test(lotr3.cug)
plot.cug.test(lotr3.cug)
|
f7cb6c0a2f70c65f3edbff0165bc3abd10b81677
|
f12112c0024d3c5c84f9d7ecad8fbf36aec48a73
|
/man/generateTwoCircles.Rd
|
45253484e5c4964b06022b6d9babbbcbc933d01f
|
[] |
no_license
|
biocq/RSSL
|
edb8d33f2f35ab36a935f91884e3413761c5dad3
|
ca33dbfe5bda775be2ed6e6cac59d18fb063d6d5
|
refs/heads/master
| 2020-12-28T19:08:02.797890
| 2016-08-10T22:03:19
| 2016-08-10T22:03:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 416
|
rd
|
generateTwoCircles.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GenerateSSLData.R
\name{generateTwoCircles}
\alias{generateTwoCircles}
\title{Generate data from 2 circles}
\usage{
generateTwoCircles(n = 100, noise_var = 0.2)
}
\arguments{
\item{n}{integer; Number of examples to generate}
\item{noise_var}{numeric; size of the variance parameter}
}
\description{
One circle circumscribes the other
}
|
455a2687a2eac2c466cdbdc34c02e19e13f6d204
|
eb67b6ceb2458478bc7b1e45b42a8112297eb3f7
|
/Hypothesis Tests Scripts/New_AGG_REGION_EDA_FOR_MATLAB.R
|
a8db02f8b5c930ed925c44b27c8fdeba9c3384e2
|
[] |
no_license
|
JKG114/2020-Final-Project
|
c526758a644d687113f2a9ded90a69414ead5347
|
f6392ab3aa0f5faf56e8d0cd40595663aab65f6d
|
refs/heads/master
| 2020-03-15T03:36:24.589191
| 2018-05-19T18:11:30
| 2018-05-19T18:11:30
| 131,946,438
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,593
|
r
|
New_AGG_REGION_EDA_FOR_MATLAB.R
|
library(ggplot2)
library(ggcorrplot)
library(corrplot)
library(dplyr)
#This is very similar to code where we exported csvs for matlab that contained census tract level
#demographic statistics and the races of those killed by police at the census tract level, only now
#we export 8 csvs where each csv has the same census level tract info except it corresponds to one of
#4 regions: Northeast, South, Midwest, West.
#setwd('Users/stuartgeman/Desktop/data2020/Final Project')
police = read.csv("police_killings_cleaned.csv")
police$X = NULL
police = na.omit(police)
police = police[ ! police$raceethnicity %in% "Unknown", ]
acs = read.csv("acs2015_census_tract_data.csv")
names(acs)[names(acs) == 'CensusTract'] <- 'geo_id'
#We merge the acs dataframe and police dataframe
total <- merge(acs,police,by="geo_id")
total <-na.omit(total)
rownames(total) <- 1:nrow(total)
stat = subset(total, select=c("State","geo_id", "TotalPop","raceethnicity", "Hispanic","White", "Black", "Native","Asian",
"Pacific"))
stat$raceethnicity <- as.character(stat$raceethnicity)
stat$raceethnicity[stat$raceethnicity== "Hispanic/Latino"] <- "Hispanic"
stat$raceethnicity[stat$raceethnicity== "Native American"] <- "Native"
stat$raceethnicity[stat$raceethnicity== "Asian/Pacific Islander"] <- "Asian_Pacific"
#stat$raceethnicity <- as.factor(stat$raceethnicity)
#create new raical category that is a combination of Asian and Pacific
stat$Asian_Pacific = stat$Asian + stat$Pacific
#stat$NonWhite = stat$Asian + stat$Pacific + stat$Native + stat$Black + stat$Hispanic
#Get the raicl demographic data for each county
Races = subset(stat, select = c("State","geo_id","TotalPop","Hispanic", "White", "Black", "Native", "Asian_Pacific"))
Races = Races[!duplicated(Races$geo_id),]
rownames(Races) <- 1:nrow(Races)
Shootings <- matrix(0, ncol = 7, nrow = nrow(Races))
Shootings <- data.frame(Shootings)
names(Shootings)[names(Shootings) == 'X1'] <- 'State'
Shootings$State = Races$State
names(Shootings)[names(Shootings) == 'X2'] <- 'geo_id'
Shootings$geo_id = Races$geo_id
names(Shootings)[names(Shootings) == 'X3'] <- 'Hispanic'
names(Shootings)[names(Shootings) == 'X4'] <- 'White'
names(Shootings)[names(Shootings) == 'X5'] <- 'Black'
names(Shootings)[names(Shootings) == 'X6'] <- 'Native'
names(Shootings)[names(Shootings) == 'X7'] <- 'Asian_Pacific'
#County the number of police killings for eachrace in each county(using geo_id)
for(i in 1:nrow(stat)) {
if(stat$raceethnicity[i] == "Hispanic"){
index <- Shootings$geo_id == stat$geo_id[i]
Shootings$Hispanic[index] <- Shootings$Hispanic[index] + 1
}
if(stat$raceethnicity[i] == "White"){
index <- Shootings$geo_id == stat$geo_id[i]
Shootings$White[index] <- Shootings$White[index] + 1
}
if(stat$raceethnicity[i] == "Black"){
index <- Shootings$geo_id == stat$geo_id[i]
Shootings$Black[index] <- Shootings$Black[index] + 1
}
if(stat$raceethnicity[i] == "Native"){
index <- Shootings$geo_id == stat$geo_id[i]
Shootings$Native[index] <- Shootings$Native[index] + 1
}
if(stat$raceethnicity[i] == "Asian_Pacific"){
index <- Shootings$geo_id == stat$geo_id[i]
Shootings$Asian_Pacific[index] <- Shootings$Asian_Pacific[index] + 1
}
}
Races$geo_id = NULL
#Races$NonWhite = Races$NonWhite/100
#Races$White = Races$White/100
Shootings$geo_id = NULL
#Create a dataframe sub_state with just the information we want
sub_state = subset(acs, select=c("State", "TotalPop","Hispanic", "White", "Black",
"Native","Asian","Pacific" ))
#Remove rows with na's
sub_state <- na.omit(sub_state)
sub_state$Asian_Pacific = sub_state$Asian + sub_state$Pacific
#We now get the Racial demographic data for each census tract and aggregate it at the state level
Races = sub_state %>%
group_by(State) %>%
summarise(TotalState = sum(TotalPop),
Hispanic = (sum(TotalPop*(Hispanic*.01)))/TotalState,
White = (sum(TotalPop*(White*.01)))/TotalState,
Black = (sum(TotalPop*(Black*.01)))/TotalState,
Native = (sum(TotalPop*(Native*.01)))/TotalState,
Asian_Pacific = sum(TotalPop*(Asian_Pacific*.01))/TotalState)
#I will now subset the states into regions Northeast, South, West, Midwest (I had a fifth, Mountain,
#but there weren't enough observations for it so I distributed the mountain states into the others)
NorthEast = c("Connecticut", "Maine", "Massachusetts", "New Hampshire", "Rhode Island", "Vermont",
"New Jersey", "New York","Delaware","District of Columbia", "Maryland",
"Pennsylvania")
South = c("Alabama", "Florida", "Georgia", "Kentucky",
"Mississippi", "North Carolina", "South Carolina", "Tennessee","Virginia",
"West Virginia","Arkansas", "Louisiana","Oklahoma", "Texas")
Midwest = c("Illinois", "Indiana", "Michigan", "Minnesota", "Ohio", "Wisconsin",
"Iowa", "Kansas", "Missouri", "Nebraska","North Dakota", "South Dakota")
West = c("Arizona", "California", "Hawaii", "Nevada","Alaska",
"Idaho", "Oregon", "Washington","Colorado","New Mexico","Utah","Montana","Wyoming")
Demographics_NorthEast = subset(Races, (Races$State %in% NorthEast))
Shooting_NorthEast = subset(Shootings, (Shootings$State %in% NorthEast))
Demographics_South = subset(Races, (Races$State %in% South))
Shooting_South = subset(Shootings, (Shootings$State %in% South))
Demographics_Midwest = subset(Races, (Races$State %in% Midwest))
Shooting_Midwest = subset(Shootings, (Shootings$State %in% Midwest))
Demographics_West = subset(Races, (Races$State %in% West))
Shooting_West = subset(Shootings, (Shootings$State %in% West))
#For Each Demographic Region We aggregate the demographic data
Demographics_NorthEast = Demographics_NorthEast %>%
summarise(TotalPop = sum(TotalState),
Hispanic = (sum(TotalState*(Hispanic)))/TotalPop,
White = (sum(TotalState*(White)))/TotalPop,
Black = (sum(TotalState*(Black)))/TotalPop,
Native = (sum(TotalState*(Native)))/TotalPop,
Asian_Pacific = (sum(TotalState*(Asian_Pacific)))/TotalPop)
Demographics_NorthEast$TotalPop = NULL
Demographics_South = Demographics_South %>%
summarise(TotalPop = sum(TotalState),
Hispanic = (sum(TotalState*(Hispanic)))/TotalPop,
White = (sum(TotalState*(White)))/TotalPop,
Black = (sum(TotalState*(Black)))/TotalPop,
Native = (sum(TotalState*(Native)))/TotalPop,
Asian_Pacific = (sum(TotalState*(Asian_Pacific)))/TotalPop)
Demographics_South$TotalPop = NULL
Demographics_Midwest = Demographics_Midwest %>%
summarise(TotalPop = sum(TotalState),
Hispanic = (sum(TotalState*(Hispanic)))/TotalPop,
White = (sum(TotalState*(White)))/TotalPop,
Black = (sum(TotalState*(Black)))/TotalPop,
Native = (sum(TotalState*(Native)))/TotalPop,
Asian_Pacific = (sum(TotalState*(Asian_Pacific)))/TotalPop)
Demographics_Midwest$TotalPop = NULL
Demographics_West = Demographics_West %>%
summarise(TotalPop = sum(TotalState),
Hispanic = (sum(TotalState*(Hispanic)))/TotalPop,
White = (sum(TotalState*(White)))/TotalPop,
Black = (sum(TotalState*(Black)))/TotalPop,
Native = (sum(TotalState*(Native)))/TotalPop,
Asian_Pacific = (sum(TotalState*(Asian_Pacific)))/TotalPop)
Demographics_West$TotalPop = NULL
#Get Rid of non-numeric values and sum shooting stats
Demographics_NorthEast$State = NULL
#Demographics_NorthEast$geo_id = NULL
Shooting_NorthEast$State = NULL
#Shooting_NorthEast$geo_id = NULL
#Shooting_NorthEast = colSums(Filter(is.numeric, Shooting_NorthEast))
Demographics_NorthEast = Demographics_NorthEast %>% slice(rep(1:n(), each = nrow(Shooting_NorthEast)))
Demographics_South$State = NULL
#Demographics_South$geo_id = NULL
Shooting_South$State = NULL
#Shooting_South$geo_id = NULL
#Shooting_South = colSums(Filter(is.numeric, Shooting_South))
Demographics_South = Demographics_South %>% slice(rep(1:n(),
each = nrow(Shooting_South)))
Demographics_Midwest$State = NULL
#Demographics_Midwest$geo_id = NULL
Shooting_Midwest$State = NULL
#Shooting_Midwest$geo_id = NULL
#Shooting_Midwest = colSums(Filter(is.numeric, Shooting_Midwest))
Demographics_Midwest = Demographics_Midwest %>% slice(rep(1:n(),
each = nrow(Shooting_Midwest)))
Demographics_West$State = NULL
#Demographics_West$geo_id = NULL
Shooting_West$State = NULL
#Shooting_West$geo_id = NULL
#Shooting_West = colSums(Filter(is.numeric, Shooting_West))
Demographics_West = Demographics_West %>% slice(rep(1:n(),
each = nrow(Shooting_West)))
#CSVs for NORTHEAST
write.csv(Shooting_NorthEast, file = "csvs/Northeeast_RacesOfVictims.csv",row.names=FALSE)
write.csv(Demographics_NorthEast, file = "csvs/RacesOfNortheast.csv",row.names=FALSE)
#CSVs for South
write.csv(Shooting_South, file = "csvs/South_RacesOfVictims.csv",row.names=FALSE)
write.csv(Demographics_South, file = "csvs/RacesOfSouth.csv",row.names=FALSE)
#CSVs for Midwest
write.csv(Shooting_Midwest, file = "csvs/Midwest_RacesOfVictims.csv",row.names=FALSE)
write.csv(Demographics_Midwest, file = "csvs/RacesOfMidwest.csv",row.names=FALSE)
#CSVs for West
write.csv(Shooting_West, file = "csvs/West_RacesOfVictims.csv",row.names=FALSE)
write.csv(Demographics_West, file = "csvs/RacesOfWest.csv",row.names=FALSE)
|
6c6436742593209b6c81aea177987d79acb135ea
|
5c350a667a10e0b0fa13e8af82b4d269471a95e1
|
/RProgramming.R
|
588f769782466f1ed2e1aa2aac359c08a457a145
|
[] |
no_license
|
mpitman85/courseraRProgramming
|
fa84c71f712e342d52ea84bb659e5eb16b6bc8a4
|
2833fce2fae8a525761aec3caae932ebd3b5af25
|
refs/heads/master
| 2021-01-20T07:19:14.567411
| 2017-03-29T14:40:08
| 2017-03-29T14:40:08
| 82,600,646
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,475
|
r
|
RProgramming.R
|
#File for testing out and learning code for R Programming Coursera course in the Data Science Specialization
##Quiz Week 1
setwd("~/Dropbox/Projects/Classes/Coursera/DataScienceSpecialization/RProgramming/courseraRProgramming")
data<-read.csv("hw1_data.csv")
names(data)
data[1:2,]
dim(data)
tail(data)
data[152:153,]
data[47,]
is.na(data[,1])
missingOzone<-is.na(data[,1])
countmissing<-as.numeric(missingOzone)
countmissing
sum(countmissing)
ozone<-data[,1]
ozone
bad<-is.na(ozone)
goodozone<-ozone[!bad]
goodozone
mean(goodozone)
part<-data[data$Ozone>31 & data$Temp>90,]
part
good<-complete.cases(part)
goodpart<-part[good,]
goodpart
mean(goodpart$Solar.R)
Solar.R<-part$Solar.R
Solar.R
mean(Solar.R)
bad<-is.na(Solar.R)
goodSR<-Solar.R[!bad]
mean(goodSR)
june<-data[data$Month==6,]
june
mean(june$Temp)
may<-data[data$Month==5,]
may
##Week 1 SWIRL Assignments
install.packages("swirl")
install.packages(c("curl","httr")) ##recommended by Mentor in forum to solve swirl issues
library(swirl)
install_from_swirl("R Programming")
swirl()
##Week2##
x<-c("a", "b", "c", "d")
for(i in 1:4){
print(x[i])
}
for(i in seq_along(x)){
print(x[i])
}
for(letter in x){
print(letter)
}
for(i in 1:4) print(x[i])
x<-matrix(1:6,2,3)
for(i in seq_len(nrow(x))) {
for(j in seq_len(ncol(x))) {
print(x[i,j])
}
}
count<-0
while(count < 10) {
print(count)
count <- count + 1
}
z<-5
while(z>=3 && z<=10) {
print(z)
coin <- rbinom(1, 1, 0.5)
if(coin == 1) {
z <- z+1
} else {
z <- z-1
}
}
##Functions##
add2<- function(x,y) {
x+y
}
add2(5,3)
above10 <- function(x) {
use <- x > 10
x[use]
}
above <- function(x, n = 10) {
use <- x>n
x[use]
}
columnmean <- function(y, removeNA = TRUE) {
nc <- ncol(y)
means <- numeric(nc)
for(i in 1:nc) {
means[i] <- mean(y[,i], na.rm = removeNA)
}
means
}
mydata <- rnorm(100)
sd(mydata)
sd(x=mydata)
sd(x=mydata, na.rm=FALSE)
sd(na.rm=FALSE, x=mydata)
f <- function(a,b) {
a^2
}
myplot <- function(x,y,type="l", ...) {
plot (x,y,type=type, ...)
}
lm <- function(x) {x*x}
lm
search()
f <- function(x,y){
x^2 +y / z
}
make.power <- function(n) {
pow <- function(x) {
x^n
}
pow
}
cube <- make.power(3)
square <- make.power(2)
cube(3)
square(3)
##Date & Time
x <- Sys.time()
x
p <- as.POSIXlt(x)
names(unclass(p))
p$sec
unclass(x)
x$sec
datestring <- c("January 10, 2012 10:40", "December 9, 2011 9:10")
x <- strptime(datestring, "%B %d, %Y %H:%M")
x
class(x)
x <- as.Date("2012-01-01")
y <- strptime("9 Jan 2011 11:34:21", "%d %b %Y %H:%M:%S")
x-y
x <- as.POSIXlt(x)
x-y
##SWIRL exercises
mad_libs <- function(...){
args <- list(...)
place <- args[["place"]]
adjective <- args [["adjective"]]
noun <- args [["noun"]]
paste("News from", place, "today where", adjective, "students took to the streets in protest of the new", noun, "being installed on campus.")
}
##Quiz - Week 2
cube <- function(x,n) {
x^3
}
x <- 1:10
if(x>5){
x <- 0
}
f <- function(x) {
g <- function(y) {
y + z
}
z <- 4
x + g(x)
}
x <- 5
y <- if(x<3) {
NA
} else {
10
}
##Week 3
x <- list(a=1:5, b=rnorm(10))
lapply(x,mean)
#anonymous functions
x <- list(a=matrix(1:4,2,2), b=matrix(1:6,3,2))
x
lapply(x, function(elt) elt[,1])
#apply
x <- matrix(rnorm(200), 20, 10)
x
apply(x, 2, mean)
apply(x,1,sum)
colMeans(x)
rowSums(x)
#mapply
noise <- function(n, mean, sd) {
rnorm(n, mean, sd)
}
noise(5,1,2)
#split
library(datasets)
head(airquality)
s <- split(airquality, airquality$Month)
s
lapply(s, function(x) colMeans(x[,c("Ozone", "Solar.R", "Wind")]))
sapply(s, function(x) colMeans(x[,c("Ozone", "Solar.R", "Wind")]))
sapply(s, function(x) colMeans(x[,c("Ozone", "Solar.R", "Wind")], na.rm=TRUE))
#debugging
printmessage <- function(x) {
if(x >0)
print("x is greater than zero")
else
print("x is less than or equal to zero")
invisible(X)
}
printmessage(x=1)
printmessage(NA)
printmessage2 <- function(x) {
if(is.na(x))
print("x is a missing value!")
else if(x>0)
print("x is greater than zero")
else if(x<0)
print("x is less than or equal to zero")
invisible(x)
}
x <- log(-1)
printmessage2(x)
##Week 4: Simulating a Linear Model
set.seed(20)
x <- rnorm(100)
e <- rnorm(100, 0, 2)
y <- 0.5 + 2 * x + e
summary(y)
plot(x,y)
set.seed(10)
x <- rbinom(100, 1, 0.5)
e <- rnorm(100, 0, 2)
y <- 0.5 + 2 * x + e
summary(y)
plot(x,y)
set.seed(1)
x <- rnorm(100)
log.mu <- 0.5 + 0.3 * x
y <- rpois(100, exp(log.mu))
summary(y)
plot(x, y)
##Simulating Random Sampling
set.seed(1)
sample(1:10, 4)
sample(1:10, 4)
sample(letters, 5)
sample(1:10)
sample(1:10)
sample(1:10, replace=TRUE)
##R Profiler
system.time(readLines("http://www.jhsph.edu"))
hilbert <- function(n) {
i <- 1:n
1 / outer(i - 1, i, "+")
}
x <- hilbert(1000)
system.time(svd(x))
system.time(x)
x
system.time({
n <- 1000
r <- numeric(n)
for (i in 1:n) {
x <- rnorm(n)
r[i] <- mean(x)
}
})
##Quiz Week 4
set.seed(1)
rpois(5,2)
set.seed(10)
x <- rep(0:1, each=5)
e <- rnorm(10,0,20)
y <- 0.5 + 2*x +e
plot(x,y)
|
ff67f2f2b0bd13bd4155e5c7e3f9f61d7667d8a7
|
f96c492e9ac0db5dcd30a2ac3455ff6cd83ac0f1
|
/man/plotBg.Rd
|
07971f71366796016cbbe80fe41f8b68abbfdba1
|
[] |
no_license
|
openanalytics/poissontris
|
66421066a42007caaedca882ca67860f6e07b9f7
|
cf83ec1e09d3f47407faaafac6aa6e35e58ea932
|
refs/heads/master
| 2020-12-02T19:36:25.519219
| 2017-09-18T11:20:51
| 2017-09-18T11:20:51
| 96,363,523
| 11
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 405
|
rd
|
plotBg.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/plottingFunctions.R
\name{plotBg}
\alias{plotBg}
\title{Plot the background list object}
\usage{
plotBg(bg)
}
\arguments{
\item{bg}{list containing x, y and color values}
}
\value{
none (objects plotted to current device)
}
\description{
Plot the background list object
}
\author{
Jason Waddell
}
|
617c8ed5bdad13d4b1d367b5f91fcddbbb7960b9
|
a027e58d29261e3069442596451966e736eff115
|
/man/Mutation.Rd
|
d267462978dc6b25e9bfff471965930f9c01c820
|
[] |
no_license
|
weitinging/MinNetRank
|
b2e24e6bc583df3aa51a6c7e6e2e9334d1e36a6e
|
553fc83ab28fd2ad4f8d166b89a178c49a811dff
|
refs/heads/master
| 2020-09-25T11:19:43.374851
| 2019-12-28T08:34:01
| 2019-12-28T08:34:01
| 225,994,650
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 586
|
rd
|
Mutation.Rd
|
\name{Mutation}
\alias{Mutation}
\title{
geting the gene mutation score for each individual sample
}
\description{
SNP is the mutation matrix. If sample k has the gene i mutation, the value =1. We canculate the normalized matrix of SNP by column.
}
\usage{
Mutation(SNP, Network)
}
\arguments{
\item{SNP}{the mutation matrix}
\item{Network}{the interaction network}
}
\value{
%% ~Describe the value returned
the normalized matrix of SNP by column.
}
\author{
Ting Wei <weitinging@sjtu.edu.cn>; Zhangsheng Yu
Maintainer: Ting Wei <weitinging@sjtu.edu.cn>
}
|
7f9fde88b679aab139406528c81dc9c600695ddc
|
f9554c2ce8d6ebe6dd7bb135c1420a7ca142886d
|
/explore_data/plotting_week_1/plot3.R
|
bb75ed27d133a4426f0751d3bb92a9bd3007f2f4
|
[] |
no_license
|
tomMulholland/datasciencecoursera
|
0294ad17ef72595e2f31b60668adb0b09dfac3c1
|
6001eebffb2747e96a214f7e78fd9a81ad88757b
|
refs/heads/master
| 2020-05-17T00:08:54.898804
| 2014-08-07T18:45:20
| 2014-08-07T18:45:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,140
|
r
|
plot3.R
|
# A plot of global active power in different areas of the house
# Open the file
png(filename = "plot3.png", width = 480, height = 480)
# Column names for the data
col_names = c("Date", "Time", "Global_active_power", "Global_reactive_power",
"Voltage", "Global_intensity", "Sub_metering_1",
"Sub_metering_2", "Sub_metering_3")
# Read in the relevant data (Feb 1st and 2nd, 2007)
power <- read.table("../../data/household_power_consumption.txt", header=FALSE,
skip=66636, sep=";", nrows=2880, quote="",
col.names=col_names)
# Format the date data
dates <- strptime(paste(power$Date, power$Time), "%d/%m/%Y %H:%M:%S")
# Make the plot
with(power, plot(dates, power$Sub_metering_1,
ylab = "Energy sub metering", xlab = "",
type="n"))
lines(dates, power$Sub_metering_1, col="black")
lines(dates, power$Sub_metering_2, col="red")
lines(dates, power$Sub_metering_3, col="blue")
legend("topright", lty=1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Close the file
dev.off()
|
8b8525f24ee200d9938dd3588e0dd0a544195c0f
|
f431e82a9f18a6875df9260b4d141b58b4247cea
|
/Listas/lista_1.R
|
71d44690f242b4df60ae6fcc4e1397d0820519b1
|
[] |
no_license
|
taisbellini/analise-multivariada
|
fb0ab81d786791a107dbce56d967ae24d7ea4246
|
b84a342ef1b8fcc3dc87f0bf4a72f28d8d4fe03e
|
refs/heads/main
| 2023-02-19T07:03:33.827886
| 2021-01-20T12:36:48
| 2021-01-20T12:36:48
| 303,694,024
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,264
|
r
|
lista_1.R
|
### Lista 01 ###
#### Cap 1 ####
#### 1.12 ####
if (!require(mvtnorm)) install.packages("mvtnorm")
library(mvtnorm)
dist = function(P) {
return(max(abs(P[1]), abs(P[2])))
}
#a)
P = c(-3, 4)
P_dist = dist(P)
paste("Distance of P to the origin: " , P_dist)
#b)
# Os pontos que terao distancia 1 da origem sao os que tem +-1 em uma das coordenadas
#e um numero entre 0 e 1 (+-) na outra
min = seq(from= -0.9, to = 0.9, by=0.1)
max = c(rep(1,19), rep(-1,19))
#Todas as combinacoes que geram dist = 1
x1 = c(min, max)
x2 = c(max, min)
points = as.matrix(expand.grid(x1 = x1, x2 = x2))
points = points[((abs(points[,"x1"])) == 1 | (abs(points[, "x2"]) == 1)),]
plot(points)
points_dist = apply(points, 1, dist)
plot(points_dist)
#c)
dist_gen = function(P){
abs_values = sapply(P, abs)
return(max(abs_values))
}
P_gen = c(2, -3, 5, -8)
dist_gen(P_gen)
#### 1.17 ####
data = read.table("../Wichern_data/T1-9.dat", sep = "\t")
head(data)
colnames(data) = c("Country", "100m", "200m", "400m", "800m", "1500m", "3000m", "Marathon")
x_barra = sapply(data[,2:8], mean)
x_barra
Sn = cov(data[2:8])
Sn
R = cor(data[2:8])
R
# Observa-se que entre distancias com metragem masi proxima, a correlacao eh maior
# enquanto grandes diferencas de distancia tem uma correlacao menor
# Max: 1500m - 3000m: 0.97
# Min: 100m - Marathon: 0.66
#### 1.18 ####
#Convertendo os tempos para metros/segundo
data2 = data
data2[,2] = sapply(data2[,2], function(x){100/x})
data2[,3] = sapply(data2[,3], function(x){200/x})
data2[,4] = sapply(data2[,4], function(x){400/x})
data2[,5] = sapply(data2[,5], function(x){800/(x*60)})
data2[,6] = sapply(data2[,6], function(x){1500/(x*60)})
data2[,7] = sapply(data2[,7], function(x){3000/(x*60)})
data2[,8] = sapply(data2[,8], function(x){42195/(x*60)})
data2
x_barra2 = sapply(data2[,2:8], mean)
x_barra2
Sn2 = cov(data2[2:8])
Sn2
R2 = cor(data2[2:8])
R2
#Observa-se o mesmo resultados para a variavel convertida para metros por segundo
#### Cap 2 ####
#### 2.30 ####
remove(list = ls())
mu = c(4,3,2,1)
sigma = rbind(c(3,0,2,2), c(0,1,1,0), c(2,1,9,-2), c(2,0,-2,4))
X = rmvnorm(100, mean = mu, sigma = sigma)
# X1 100x2
X1 = X[,1:2]
# X2 100x2
X2 = X[,3:4]
A = c(1,2)
B = rbind(c(1,-2), c(2,-1))
# AX1 1x100
AX1 = A%*%t(X1)
AX1
# BX2 2x100
BX2 = B%*%t(X2)
BX2
## a) E(X1)
# 1x2
eX1 = apply(X1, 2, mean)
eX1
## b) E(AX1)
# escalar
eAX1 = apply(AX1, 1, mean)
eAX1
## c) Cov(X1)
# 2x2
covX1 = cov(X1)
covX1
## d) Cov(AX1)
# escalar
covAX1 = cov(t(AX1))
covAX1
## e) E(X2)
# 1x2
eX2 = apply(X2, 2, mean)
eX2
## f) E(BX2)
# 1x2
eBX2 = apply(BX2, 1, mean)
eBX2
## g) Cov(X2)
# 2x2
covX2 = cov(X2)
covX2
## h) Cov(BX2)
# 2x2
covBX2 = cov(t(BX2))
covBX2
## i) Cov(X1, X2)
# 2x2
covX1X2 = cov(X1,X2)
covX1X2
## j) Cov(AX1, BX2)
# 1x2
covAX1BX2 = cov(t(AX1), t(BX2))
covAX1BX2
#### 2.31 ####
remove(list = ls())
mu = c(4,3,2,1)
sigma = rbind(c(3,0,2,2), c(0,1,1,0), c(2,1,9,-2), c(2,0,-2,4))
X = rmvnorm(100, mean = mu, sigma = sigma)
# X1 100x2
X1 = X[,1:2]
# X2 100x2
X2 = X[,3:4]
A = c(1,-1)
B = rbind(c(2,-1), c(0,1))
# AX1 1x100
AX1 = A%*%t(X1)
AX1
# BX2 2x100
BX2 = B%*%t(X2)
BX2
## a) E(X1)
# 1x2
eX1 = apply(X1, 2, mean)
eX1
## b) E(AX1)
# escalar
eAX1 = apply(AX1, 1, mean)
eAX1
## c) Cov(X1)
# 2x2
covX1 = cov(X1)
covX1
## d) Cov(AX1)
# escalar
covAX1 = cov(t(AX1))
covAX1
## e) E(X2)
# 1x2
eX2 = apply(X2, 2, mean)
eX2
## f) E(BX2)
# 1x2
eBX2 = apply(BX2, 1, mean)
eBX2
## g) Cov(X2)
# 2x2
covX2 = cov(X2)
covX2
## h) Cov(BX2)
# 2x2
covBX2 = cov(t(BX2))
covBX2
## i) Cov(X1, X2)
# 2x2
covX1X2 = cov(X1,X2)
covX1X2
## j) Cov(AX1, BX2)
# 1x2
covAX1BX2 = cov(t(AX1), t(BX2))
covAX1BX2
#### Cap 3 ####
#### 3.2 ####
# a)
X = cbind(c(3,6,3), c(4,-2,1))
colnames(X) = c("x1", "x2")
x_barra = apply(X, 2, mean)
x_barra
plot(X, xlim=c(0,6), ylim = c(-4,4), xlab = "x1", ylab="x2")
points(x_barra[1], x_barra[2], col = "blue")
text(x_barra[1],x_barra[2],"media amostral", pos = 3)
# c)
d1 = c(-1,2,-1)
d2 = c(3,-3,0)
d1td2 = t(d1)%*%d2
Ld1 = sqrt(6)
Ld2 = sqrt(18)
cos = d1td2/(Ld1*Ld2)
cos
X = cbind(c(3,6,3), c(4,-2,1))
var(X)
cor(X)
#### 3.5 ####
X1 = cbind(c(9,5,1), c(1,3,2))
var(X1)
X2 = cbind(c(3,6,3), c(4,-2,1))
var(X2)
#### 3.9 ####
remove(list=ls())
X = cbind(c(12,18,14,20,16), c(17,20,16,18,19), c(29,38,30,38,35))
x_barra = apply(X, 2, mean)
one = rep(1,5)
x = one%*%t(x_barra)
X - x
S = cov(X)
det(S)
#### 3.11 ####
# definindo S e D ^ -1/2
S = cbind(c(252.04, -68.43), c(-68.43, 123.67))
minDsqr = cbind(c(1/sqrt(252.04), 0), c(0, 1/sqrt(123.67)))
s11 = 252.04
s12 = s21 = -68.43
s22 = 123.67
# Definindo R
r11 = s11/(sqrt(s11)*sqrt(s11))
r12 = s12/(sqrt(s11)*sqrt(s22))
r21 = s21/(sqrt(s22)*sqrt(s11))
r22 = s22/(sqrt(s22)*sqrt(s22))
R = cbind(c(r11, r12), c(r12, r22))
# Calculando R pela definicao do resultado 3-29
R_calc = minDsqr%*%S%*%minDsqr
R
R_calc
#Observa-se que ambos sao iguais, portanto, R = D^-1/2 S D^-1/2
#Definindo D^1/2
Dsqr = cbind(c(sqrt(252.04), 0), c(0, sqrt(123.67)))
# Calculando S pela definicao do resultado 3-30
S_calc = Dsqr%*%R%*%Dsqr
S
S_calc
#Observa-se que ambos sao iguais, portanto, S = D^1/2 R D^1/2
#### 3.14 ####
remove(list = ls())
X = cbind(c(9,5,1), c(1,3,2))
x_barra = as.matrix(apply(X, 2, mean))
S = cov(X)
c = as.matrix(c(-1,2))
b = as.matrix(c(2,3))
sample_mean_c = t(c)%*%x_barra
sample_mean_c
sample_mean_b = t(b)%*%x_barra
sample_mean_b
sample_var_c = t(c)%*%S%*%c
sample_var_c
sample_var_b = t(b)%*%S%*%b
sample_var_b
sample_cov = t(b)%*%S%*%c
sample_cov
#### 3.19 ####
remove(list = ls())
#Definindo S e R
S = cbind(c(0.856,0.635,0.173), c(0.635, 0.568, 0.127), c(0.173,0.128, 0.171))
r11 = S[1,1]/(sqrt(S[1,1])*sqrt(S[1,1]))
r12 = S[1,2]/(sqrt(S[1,1])*sqrt(S[2,2]))
r13 = S[1,3]/(sqrt(S[1,1])*sqrt(S[3,3]))
r21 = S[2,1]/(sqrt(S[2,2])*sqrt(S[1,1]))
r22 = S[2,2]/(sqrt(S[2,2])*sqrt(S[2,2]))
r23 = S[2,3]/(sqrt(S[2,2])*sqrt(S[3,3]))
r31 = S[3,1]/(sqrt(S[3,3])*sqrt(S[1,1]))
r32 = S[3,2]/(sqrt(S[3,3])*sqrt(S[2,2]))
r33 = S[3,3]/(sqrt(S[3,3])*sqrt(S[3,3]))
R = cbind(c(r11, r12, r13), c(r12, r22, r23), c(r31, r32, r33))
# det(S) pelo calculo de determinante:
det_S = det(S)
# det(S) pelo calculo do exercicio:
det_S_calc = S[1,1]*S[2,2]*S[3,3]*det(R)
#Observa-se que os dois sao iguais
det_S
det_S_calc
#### 3.20 ####
remove(list = ls())
# Carrega e limpa os dados
split = function(x){
row = strsplit(trimws(x), " ", fixed = T)
row = unlist(row)
return(row[-2])
}
data = read.table("../Wichern_data/T3-2.dat", sep = "\t", stringsAsFactors = F)
data = lapply(data[,1], split)
data = lapply(data, function(z){ z[!is.na(z) & z != ""]})
data = do.call(rbind, data)
colnames(data) = c("x1", "x2")
X = apply(data, 2, function(x){as.numeric(x)})
# a)
# Obtendo as estatisticas resumo
x_barra = apply(X, 2, mean)
S = cov(X)
# Vamos definir a combinacao a'X
a = as.matrix(c(-1,1))
sample_mean = t(a)%*%x_barra
sample_var = t(a)%*%S%*%a
# b)
X_diff = apply(X, 1, function(x){x[2] - x[1]})
sample_mean_first = mean(X_diff)
sample_var_first = var(X_diff)
sample_mean
sample_mean_first
sample_var
sample_var_first
#### Cap 4 ####
#### 4.2 ####
#b)
remove(list = ls())
require(mvtnorm)
require(plot3D)
mu = c(0,2)
sigma = matrix(c(2, 0.5*sqrt(2), 0.5*sqrt(2),1), ncol = 2)
autovv = eigen(sigma)
#### 4.3 ####
remove(list = ls())
# d)
sigma = cbind(c(1,-2, 0), c(-2,5, 0), c(0,0,2))
A = rbind(c(1/2, 1/2, 0), c(0,0,1))
A%*%sigma%*%t(A)
# e)
Ae = rbind(c(0, 1, 0), c(-5/2,1,-1))
Ae%*%sigma%*%t(Ae)
#### 4.5 ####
# b)
remove(list = ls())
sig11 = 5
sig12 = c(-2, 0)
sig21 = matrix(c(-2,0))
sig22 = matrix(c(1,0,0,2), ncol = 2)
sig = sig11 - sig12%*%solve(sig22)%*%sig21
# c)
remove(list = ls())
sig11 = 2
sig12 = c(1, 2)
sig21 = matrix(c(1,2))
sig22 = matrix(c(1,1,1,3), ncol = 2)
sig = sig11 - sig12%*%solve(sig22)%*%sig21
#### 4.23 ####
require(car)
# a)
remove(list = ls())
data = c(-0.6, 3.1, 25.3, -16.8, -7.1, -6.2, 25.2, 22.6, 26)
qqPlot(data, distribution = "norm")
qq = qqnorm(data)
qqline(data)
# b) Coeficiente
cor(qq$x, qq$y)
#### 4.24 ####
remove(list = ls())
# Carrega e limpa os dados
split = function(x){
row = strsplit(trimws(x), " ", fixed = T)
row = unlist(row)
row = row[row != ""]
return(row)
}
data = read.table("../Wichern_data/P1-4.DAT", header = F, sep = "\t", stringsAsFactors = F)
data = lapply(data[,1], split)
data = do.call(rbind, data)
colnames(data) = c("x1", "x2", "x3")
X = apply(data, 2, function(x){as.numeric(x)})
# a)
#qqplot x1
qqx1 = qqnorm(X[,1])
qqx1
qqline(X[,1])
qqPlot(X[,1])
# Dentro da banda de confianca, mas uma linha nao muito reta
# qqplot x2
qqx2 = qqnorm(X[,2])
qqx2
qqline(X[,2])
qqPlot(X[,2])
# parece mais proximo da normal que x1, pontos mais em cima da linha
# b)
n = length(X[,1])
rqx1 = cor(qqx1$x, qqx1$y)
# [1] 0.9402035
# valor na tabela: 0.9351
# Nao rejeita hipotese de normalidade pois rq > valor da tabela
n = length(X[,2])
rqx2 = cor(qqx2$x, qqx1$y)
# [1] 0.9402035
# valor na tabela: 0.6818
# Rejeita hipotese de normalidade pois rq < valor da tabela
#### 4.25 ####
# Primeiro calcula a distancia estatistica ao quadrado
# Esta distancia, se X eh Normal, tem dist chi-quadrado
# Verificamos entao o qqplot para ver se d se aproxima da chi-quadrado
d2<-mahalanobis(X, colMeans(X), cov(X), inverted = FALSE)
qqPlot(d2, dist="chisq", df=ncol(X), main=paste("Chi-dist"), ylab=paste("d2"))
# Observa-se que os pontos estão en sua maioria dentro da banda de confiança
# e que estão relativamente proximos da reta
#### 4.26 ####
remove(list = ls())
require(ggplot2)
age = c(1, 2, 3, 3, 4, 5, 6, 8,9,11)
price = c(18.95, 19, 17.95,15.54,14,12.95,8.94, 7.49, 6, 3.99)
data = cbind(age, price)
colnames(data) = c("x1", "x2")
# a)
d2<-mahalanobis(data, colMeans(data), cov(data), inverted = FALSE)
d2
# b)
qch = qchisq(0.5, 2)
in50 = (d2 <= qch)
sum(in50)
data_contour = data.frame(cbind(data, in50))
pl <- ggplot(data_contour, aes(x1, x2))
pl + geom_point(aes(color = in50))
#c)
qqPlot(d2, dist="chisq", df=ncol(data), main=paste("Chi-dist"), ylab=paste("d2"))
qq = qqnorm(data[,2])
cor(qq$x, qq$y)
#### 4.28 ####
remove(list = ls())
# Carrega e limpa os dados
split = function(x){
row = strsplit(trimws(x), " ", fixed = T)
row = unlist(row)
row = row[row != ""]
return(row)
}
# pag 60 pdf
data = read.table("../Wichern_data/T1-5.dat", header = F, sep = "\t", stringsAsFactors = F)
data = lapply(data[,1], split)
data = do.call(rbind, data)
colnames(data) = paste("x", seq(1:7), sep = "")
X = apply(data, 2, function(x){as.numeric(x)})
solar_rad = X[,2]
qq = qqnorm(solar_rad)
qq
qqline(solar_rad)
qqPlot(solar_rad)
# Observa-se bastante pontos fora da banda nos quantis mais inferiores
cor(qq$x, qq$y)
#[1] 0.9693258
#### 4.29 ####
#a)
d2<-mahalanobis(X[,5:6], colMeans(X[,5:6]), cov(X[,5:6]), inverted = FALSE)
d2
#b)
qch = qchisq(0.5, 2)
in50 = (d2 <= qch)
sum(in50)
paste("Proportion:", sum(in50)/length(d2))
#c)
qqPlot(d2, dist="chisq", df=ncol(X[,5:6]), main=paste("Chi-dist"), ylab=paste("d2"))
#### 4.34 ####
remove(list = ls())
# Carrega e limpa os dados
split = function(x){
row = strsplit(trimws(x), " ", fixed = T)
row = unlist(row)
row = row[row != ""]
return(row)
}
# pag 64 pdf
data = read.table("../Wichern_data/T1-8.DAT", header = F, sep = "\t", stringsAsFactors = F)
data = lapply(data[,1], split)
data = do.call(rbind, data)
colnames(data) = paste("x", seq(1:6), sep = "")
X = apply(data, 2, function(x){as.numeric(x)})
colnames(X) = paste("x", seq(1:6), sep ="")
###Verificando Normalidade univariada###
##qqplot
par(mfrow=c(3,2))
for( i in 1:ncol(X)){
qqPlot(X[,i], dist="norm", main=paste("x_",i), ylab=paste("empirical"))}
###shapiro univariado###
W=rep(0,ncol(X))
for(k in ncol(X)){
W[k]=shapiro.test(X[,k])$p.value }
W
### coef correlacao ###
## ref value 0.5: 0.9591
Q=rep(0,ncol(X))
for(k in ncol(X)){
qq = qqnorm(X[,k])
Q[k] = cor(qq$x, qq$y)
}
Q
sum(Q >= 0.9591)
# Tanto o teste shapiro quanto o teste do coeficiente de correlacao rq
# Indicam que apenas para a variavel x6 aceitariamos a hipotese de normalidade
### Verificando a normalidade bivariada ###
# Fazendo pelo teste da distancia estatistica e contorno de 50%
biv_test = rbind(
c(1, 2),
c(1, 3),
c(1, 4),
c(1, 5),
c(1, 6),
c(2, 3),
c(2, 4),
c(2, 5),
c(2, 6),
c(3, 4),
c(3, 5),
c(3, 6),
c(4, 5),
c(4, 6),
c(5, 6)
)
biv_test = cbind(biv_test, rep(0, length(biv_test[,1])))
colnames(biv_test) = c("var1", "var2", "proportion 50%")
for(row in length(biv_test[,1])){
vars = biv_test[row,-3]
X_biv = X[,vars]
d2<-mahalanobis(X_biv, colMeans(X_biv), cov(X_biv), inverted = FALSE) ##dist multvariada ~~ QUi-q
qch = qchisq(0.5, 2)
in50 = (d2 <= qch)
biv_test[row, 3] = sum(in50)/length(d2)
}
biv_test
# Apenas 5 e 6 apresentam valor proximo de 50%, mas ainda nao exatamente
#### 4.36 ####
remove(list = ls())
data = read.table("../Wichern_data/T1-9.dat", header = F, sep = "\t", stringsAsFactors = F)
head(data)
###Verificando Normalidade univariada###
##qqplot
par(mfrow=c(3,3))
for( i in 2:ncol(data)){
qqPlot(data[,i], dist="norm", main=paste("x_",i), ylab=paste("empirical"))}
###shapiro univariado###
W=rep(0,ncol(data))
for(k in ncol(data[,-1])){
W[k]=shapiro.test(data[,k])$p.value }
W
### coef correlacao ###
## ref value 0.5: 0.9787
Q=rep(0,ncol(data))
for(k in ncol(data)){
qq = qqnorm(data[,k])
Q[k] = cor(qq$x, qq$y)
}
Q
sum(Q >= 0.9787)
# Nao ha evidencia que nenhuma das variaveis tenha distribuicao normal
###Teste de Normalidade Multivariada###
x=as.matrix(data[,-1])
mvShapiro.Test(x) ## teste Shapiro
#p-value = 1.201e-15
# Nao ha evidencia de normalidade multivariada
#calcula a distancia estatistica
#depois validamos se o d2 tem dist quiquadrado (que eh o caso da normal multivariada)
d2<-mahalanobis(x, colMeans(x), cov(x), inverted = FALSE) ##dist multvariada ~~ QUi-q
d2
qqPlot(d2, dist="chisq", df=ncol(x), main=paste("Chi-dist"), ylab=paste("d2"))
#### Cap 5 ####
#### 5.1 ####
remove(list = ls())
#b)
X = cbind(c(2,8,6,8), c(12,9,9,10))
n = nrow(X)
p = ncol(X)
x_bar = apply(X, 2, mean)
mu = c(7,11)
S = cov(X)
alpha = 0.05
T2_cal<-n*mahalanobis(x_bar, mu, S, inverted = FALSE)
T2_cal
q=qf(1-alpha,p,n-p)*((n-1)*p)/(n-p)
q
|
94eb0db35bc23facfaa192d85eb392cc82ab092a
|
cc2ee7cda1080631699c80372640a15eb7ffd03c
|
/man/crossBoundCumProb.Rd
|
fd30e48a2dae4088ecdbd87b9582c640d2dae7d2
|
[] |
no_license
|
mjuraska/seqDesign
|
2d11a6ea8e1dbabbbdf57aaf8dd4485f462fddc1
|
23965224e807e5512f3a6c75c008ab02f490e9e5
|
refs/heads/master
| 2022-12-11T22:06:58.018005
| 2022-12-08T20:18:27
| 2022-12-08T20:18:27
| 176,831,673
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,392
|
rd
|
crossBoundCumProb.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postMonitoring.R
\name{crossBoundCumProb}
\alias{crossBoundCumProb}
\title{Estimate cumulative probabilities of crossing an efficacy or non-efficacy boundary in an event-driven 2-arm trial design}
\usage{
crossBoundCumProb(
boundType = c("eff", "nonEff"),
nAnalyses,
monitorTrialFile,
monitorTrialDir = NULL
)
}
\arguments{
\item{boundType}{a character string specifying if the one-sided null hypothesis is of the form \eqn{H_0: \theta \geq \theta_0} (\code{"eff"}, default) or \eqn{H_0: \theta \leq \theta_0} (\code{"nonEff"}), where \eqn{\theta} and \eqn{\theta_0} are the true hazard ratio and its value specifying the null hypothesis, respectively}
\item{nAnalyses}{a numeric value specifying the number of analyses}
\item{monitorTrialFile}{either a character string specifying an \code{.RData} file or a list outputted by \code{\link{monitorTrial}}}
\item{monitorTrialDir}{a character string specifying a path to \code{monitorTrialFile} if \code{monitorTrialFile} specifies a file name}
}
\value{
A numeric vector of estimated cumulative probabilities of crossing the specified boundary by analysis \eqn{1,\ldots,\code{nAnalyses}}.
}
\description{
Computes proportions of simulated trials that crossed either an efficacy or a non-efficacy stopping boundary by analysis \eqn{1,\ldots,\code{nAnalyses}} using an \code{.RData} output file from \code{\link{monitorTrial}}. An event-driven 2-arm trial design is assumed.
}
\examples{
simData <- simTrial(N=c(1000, 1000), aveVE=c(0, 0.4),
VEmodel="half", vePeriods=c(1, 27, 79), enrollPeriod=78,
enrollPartial=13, enrollPartialRelRate=0.5, dropoutRate=0.05,
infecRate=0.06, fuTime=156, visitSchedule=seq(0, 156, by=4),
missVaccProb=0.05, VEcutoffWeek=26, nTrials=5,
stage1=78, randomSeed=300)
monitorData <- monitorTrial(dataFile=simData, stage1=78, stage2=156,
harmMonitorRange=c(10,75), harmMonitorAlpha=0.05,
effCohort=list(timingCohort=list(lagTime=0),
times=c(75, 150),
timeUnit="counts",
lagTime=0,
estimand="cox",
nullVE=0,
nominalAlphas=c(0.001525, 0.024501)),
nonEffCohorts=list(timingCohort=list(lagTime=0),
times=c(75, 150),
timeUnit="counts",
cohort1=list(lagTime=0,
estimand="cox",
nullVE=0.4,
nominalAlphas=c(0.001525, 0.024501))),
lowerVEnoneff=0, highVE=1, lowerVEuncPower=0,
alphaHigh=0.05, alphaUncPower=0.05,
verbose=FALSE)
crossBoundCumProb(boundType="eff", nAnalyses=2, monitorTrialFile=monitorData)
crossBoundCumProb(boundType="nonEff", nAnalyses=2, monitorTrialFile=monitorData)
}
|
bc580249a3e76b36a902c77459ee973bc7a25ea1
|
42abe0fef0d12287d170fd2445864f9fb9aec74b
|
/R/dump.R
|
1d69a2e049ff5a1737287408d6c5314ac3a27ab1
|
[] |
no_license
|
natverse/neuprintr
|
45bf13ea5f351c7088ad0e9de36a737342e070f3
|
1bf392fb896710d64e1b03cdddb38ea05a54f5d1
|
refs/heads/master
| 2023-08-26T15:19:05.332544
| 2023-08-16T03:32:48
| 2023-08-16T03:32:48
| 176,496,271
| 3
| 2
| null | 2023-09-07T19:59:04
| 2019-03-19T11:22:52
|
R
|
UTF-8
|
R
| false
| false
| 5,481
|
r
|
dump.R
|
#' @title Download data from neuprint for specified bodyids / a specified ROI
#'
#' @description Download neuron morphology and connectivity data to a specified directory as .csv and .rda files
#' @inheritParams neuprint_read_neurons
#' @inheritParams neuprint_bodies_in_ROI
#' @inheritParams neuprint_connection_table
#' @inheritParams drvid::dv_get_voxels
#' @param dir the directory to which to save the dump
#' @param preprocess a function that can be applied to a nat::neuronlist object, to be called on neurons once they are read from
#' the neuprint server using \code{neuprint_read_neurons}
#' @param connectivity whether or not to include connectivity information in the dump, i.e. an adjacency matrix between bodyids
#' @param volumes whether or not to include neuron volume information in the dump, i.e. voxels. Currently only works by talking to a DVID server using
#' the package drvid
#' @param voxel.thresh the size threshold, in number of voxels, a neuron/segment must exceed, to be included in the dump, if read from an ROI
#' @seealso \code{\link{neuprint_get_synapses}}, \code{\link{neuprint_read_neurons}}
#' @export
#' @rdname neuprint_dump
#' @importFrom nat write.neurons
neuprint_dump <- function(dir, bodyids = NULL, roi = NULL, preprocess = NULL, connectivity = TRUE, volumes = TRUE,
meta = TRUE, nat = TRUE, drvid = FALSE, soma = TRUE,
heal = TRUE, connectors = TRUE, all_segments = FALSE, resample = FALSE,
scale = 4, voxel.thresh = 1e+07,
dataset = NULL, conn=NULL, OmitFailures = TRUE, ...){
message("making data dump in directory ", dir)
conn = neuprint_login(conn)
if(is.null(roi) && is.null(bodyids)){
stop("You must provide either a vector of bodyids or an ROI for your dataset, in order to select neurons to dump at location ", dir,
" If both are provided, extra bodyids from within the ROI will be added to those in argument bodyids")
}
if(!is.null(roi)){ # get bodids in ROI
message("fetching bodyids in ROI ", roi, " in ", dataset)
inroi = neuprint_bodies_in_ROI( roi = roi,
dataset = dataset, conn=conn, ...)
inroi = subset(inroi, voxels>voxel.thresh)
bodyids = unique(c(id2char(inroi$bodyid),
neuprint_ids(bodyids, conn=conn, dataset = dataset)))
}
# Fetch neuron data
message("Reading neurons from ", conn$server, " for dataset: ", dataset)
neurons = neuprint_read_neurons(bodyids = bodyids, meta = meta, nat = nat, drvid=drvid, soma = soma, heal = heal, connectors = connectors,
all_segments = all_segments, dataset = dataset, resample = resample,
conn = conn, OmitFailures = OmitFailures, ...)
# pre-process data
if(!is.null(preprocess)){
message("processing neuronlist data")
neurons = preprocess(neurons)
}
# save neuronlist data
dir.create(file.path(dir, "neuronlist"), showWarnings = FALSE)
save(neurons,file=paste0(dir,"/neuronlist/neuronlist.rda"))
utils::write.csv(neurons[,], file = paste0(dir,"/neuronlist/neuronlist_meta_data.csv"))
# save SWC files
message("saving SWC files")
dir.create(file.path(dir, "swc"), showWarnings = FALSE)
write.neurons(neurons,dir = file.path(dir, "swc"), format = "swc", files = paste0(bodyids,".swc"), Force = TRUE)
# save synapse locations
if(connectors){
message("saving synapse locations")
dir.create(file.path(dir, "connectors"), showWarnings = FALSE)
pb <- utils::txtProgressBar(min = 0, max = length(neurons), style = 3)
for(n in 1:length(neurons)){
utils::write.csv(neurons[[n]]$connectors,file=paste0(dir,"/connectors/",names(neurons)[n],"_connectors.swc"))
utils::setTxtProgressBar(pb, n)
}
close(pb)
}
# save connectivity between the body ids
if(connectivity){
message("saving adjacency matrix")
dir.create(file.path(dir, "connectivity"), showWarnings = FALSE)
adjm = neuprint_get_adjacency_matrix(bodyids = bodyids, dataset = dataset, all_segments = all_segments,
conn = conn, ...)
message("saving upstream connection table")
pre = neuprint_connection_table(bodyids = bodyids, prepost = "PRE", progress = TRUE, roi = NULL,
dataset = dataset, conn = conn, all_segments = all_segments, ... )
message("saving downstream connection table")
post = neuprint_connection_table(bodyids = bodyids, prepost = "POST", progress = TRUE, roi = NULL,
dataset = dataset, conn = conn, all_segments = all_segments, ... )
utils::write.csv(adjm,paste0(dir,"/connectivity/adjacency_matrix.csv"))
utils::write.csv(pre,paste0(dir,"/connectivity/pre_connection_table.csv"))
utils::write.csv(post,paste0(dir,"/connectivity/post_connection_table.csv"))
}
# save volumes
if(volumes&drvid){
message("saving voxels")
dir.create(file.path(dir, "voxels", scale), showWarnings = FALSE, recursive = TRUE)
voxels = pbapply::pblapply(bodyids, drvid::dv_get_voxels, scale = scale, conn = NULL, ...)
pb <- utils::txtProgressBar(min = 0, max = length(bodyids), style = 3)
for(v in 1:length(voxels)){
utils::write.csv(voxels[[v]],file=file.path(dir, "voxels", scale, paste0(bodyids[v],"_voxels.csv")))
utils::setTxtProgressBar(pb, v)
}
close(pb)
}
message("done!")
}
|
948d8fe9f605a66ad8c436c99daee3165b10ff19
|
d104de65c0df3546de0eb0952e1344fa59245a2d
|
/R/pace_converter_functions.R
|
06254d4c13d1f0d1d3357c08a7ca78b65fad14d8
|
[] |
no_license
|
alegerosa/runnR
|
6e515f64be2ec2ff5e5526cb4504602423d4d2ac
|
9972834d390f5666c4eb9b2b1ae560f794061af5
|
refs/heads/master
| 2020-12-20T14:58:50.656950
| 2020-02-17T04:19:08
| 2020-02-17T04:19:33
| 236,115,044
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,100
|
r
|
pace_converter_functions.R
|
#' Pace km to pace mi, text
#'
#' This function converts paces from minutes per kilometer to minutes per mile, taking strings as inputs and returning strings as outputs
#' @param pace_km_text A string or vector of strings with pace in minutes per kilometer, where the minutes are separated from the seconds by a colon.
#' @return A string or vector of strings with the pace in minutes per mile, where the minutes are separated from the seconds by a colon.
#' @examples
#' pace_km_to_pace_mi_text("6:03")
#' pace_km_to_pace_mi_text(c("7:30", "5:27", "6:42"))
#'
#' @export
pace_km_to_mi_text <- function(pace_km_text) {
if(
!(is.character(pace_km_text) &&
stringr::str_detect(pace_km_text, pattern = ":"))
) {
stop('This function only works with the paces inputed as strings of minutes and seconds separated by a \':\', check that all your inputs meet this format: \'6:45\'')
}
dur_km <- lubridate::duration(
mins = as.numeric(stringr::word(pace_km_text, 1, sep = stringr::fixed(":"))),
seconds = as.numeric(stringr::word(pace_km_text, 2, sep = stringr::fixed(":")))
)
dur_mi <- dur_km * 1.609344
pace_mi_period <- lubridate::as.period(dur_mi)
pace_mi_text <- paste(
lubridate::minute(pace_mi_period),
":",
stringr::str_pad(floor(lubridate::second(pace_mi_period)), 2, "left", pad = "0"),
sep = ""
)
return(pace_mi_text)
}
pace_km_to_mi_text(c("2:59", NA))
#' Pace mi to pace km, text
#'
#' This function converts paces from minutes per mile to minutes per kilometer, taking strings as inputs and returning strings as outputs
#' @param pace_mi_text A string or vector of strings with pace in minutes per mile, where the minutes are separated from the seconds by a colon.
#' @return A string or vector of strings with the pace in minutes per kilometer, where the minutes are separated from the seconds by a colon.
#' @examples
#' pace_mi_to_km_text("7:45")
#' pace_mi_to_km_text(c("11:02", "9:00", "10:34"))
#'
#' @export
pace_mi_to_km_text <- function(pace_mi_text) {
if(
!(is.character(pace_mi_text) &&
stringr::str_detect(pace_mi_text, pattern = ":"))
) {
stop('This function only works with the paces inputed as strings of minutes and seconds separated by a \':\', check that all your inputs meet this format: \'6:45\'')
}
dur_mi <- lubridate::duration(
mins = as.numeric(stringr::word(pace_mi_text, 1, sep = stringr::fixed(":"))),
seconds = as.numeric(stringr::word(pace_mi_text, 2, sep = stringr::fixed(":")))
)
dur_km <- dur_mi / 1.609344
pace_km_period <- lubridate::as.period(dur_km)
pace_km_text <- paste(
lubridate::minute(pace_km_period),
":",
stringr::str_pad(floor(lubridate::second(pace_km_period)), 2, "left", pad = "0"),
sep = ""
)
return(pace_km_text)
}
pace_mi_to_km_text(c("8:40", "4:50"))
#' Pace km to pace mi, number input
#'
#' This function converts a pace from minutes per kilometer to minutes per mile, taking two numbers as inputs (minutes and seconds) and returning a string as output. Only works with one observation at a time for now.
#' @param pace_km_min A number with the whole minutes in the pace in minute per kilometer
#' @param pace_km_sec A number with the seconds in the pace in minute per kilometer
#' @return A string with the pace in minutes per mile, where the minutes are separated from the seconds by a colon.
#' @examples
#' pace_km_to_mi_num_input(7,30)
#' pace_km_to_mi_num_input(5,27)
#'
#' @export
pace_km_to_mi_num_input <- function(pace_km_min, pace_km_sec) {
dur_km <- lubridate::duration(
mins = pace_km_min,
seconds = pace_km_sec
)
dur_mi <- dur_km * 1.609344
pace_mi_period <- lubridate::as.period(dur_mi)
pace_mi_text <- paste(
lubridate::minute(pace_mi_period),
":",
stringr::str_pad(floor(lubridate::second(pace_mi_period)), 2, "left", pad = "0"),
sep = ""
)
return(pace_mi_text)
}
pace_km_to_mi_num_input(7,30)
pace_km_to_mi_num_input(5,27)
#' Pace mi to pace km, number input
#'
#' This function converts a pace from minutes per mile to minutes per kilometer, taking two numbers as inputs (minutes and seconds) and returning a string as output. Only works with one observation at a time for now.
#' @param pace_mi_min A number with the whole minutes in the pace in minute per mi
#' @param pace_mi_sec A number with the seconds in the pace in minute per mi
#' @return A string with the pace in minutes per kilometer, where the minutes are separated from the seconds by a colon.
#' @examples
#' pace_mi_to_km_num_input(7,30)
#' pace_mi_to_km_num_input(5,27)
#'
#' @export
pace_mi_to_km_num_input <- function(pace_mi_min, pace_mi_sec) {
dur_mi <- lubridate::duration(
mins = pace_mi_min,
seconds = pace_mi_sec
)
dur_km <- dur_mi / 1.609344
pace_km_period <- lubridate::as.period(dur_km)
pace_km_text <- paste(
lubridate::minute(pace_km_period),
":",
stringr::str_pad(floor(lubridate::second(pace_km_period)), 2, "left", pad = "0"),
sep = ""
)
return(pace_km_text)
}
pace_mi_to_km_num_input(7,30)
pace_mi_to_km_num_input(5,27)
|
6d5d03bdea61bbe1ffd03e5e911275dc0e2732d3
|
fcaaf7ba8ec7e21883394ad57f3fb544f4dd63dc
|
/Cap09/13-Distr-Normal.R
|
f0eafa4bef01e5796c09cb162069a5f9f9d543ed
|
[] |
no_license
|
GasparPSousa/BigDataAnalytics-R-Azure
|
f3226150461496c0d78781bfd8fe3b5bb5237199
|
aeeb060f32f8846ea80f6bc4631d0f07d21cbf1e
|
refs/heads/main
| 2023-05-14T23:57:15.302363
| 2021-06-06T14:04:48
| 2021-06-06T14:04:48
| 357,303,863
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,943
|
r
|
13-Distr-Normal.R
|
# Distribuição Normal
# Configurando o diretório de trabalho
# Coloque entre aspas o diretório de trabalho que você está usando no seu computador
# Não use diretórios com espaço no nome
setwd("~/Cursos/DSA/FCD/BigDataRAzure/Cap09")
# Para saber qual diretório estou trabalhando
getwd()
# Lista de pacotes base carregados
search()
# A Distribuição Normal, ou Gaussiana, é a mais importante distribuição contínua.
# Isso por conta de vários fatores, entre eles, o teorema central do limite, o qual é um resultado essencial
# em aplicações práticas e teóricas, pois garante que, mesmo que os dados não sejam distribuídos segundo uma normal,
# a média dos dados converge para uma distribuição normal conforme o número de dados aumenta.
# O R inclui funcionalidades para operações com distribuições de probabilidades.
# Para cada distribuição há 4 operações básicas indicadas pelas letras:
# d: calcula a densidade de probabilidade f(x) no ponto
# p: calcula a função de probabilidade acumulada F(x) no ponto
# q: calcula o quantil correspondente a uma dada probabilidade
# r: retira uma amostra da distribuição
# Para utlizar as funções combina-se uma das letras acima com uma abreviatura do nome da distribuição.
# Por exemplo, para calcular probabilidades usamos: pnorm para normal, pexp para exponencial,
# pbinom para binomial, ppois para Poisson e assim por diante.
# x <- rnorm(n, mean, sd)
# Onde n é o tamanho da amostra e mean e sd são parâmetros opcionais relacionados à média e desvio padrão,
# respectivamente.
# Distribuição Normal
?rnorm
x <- rnorm(100)
hist(x)
# Densidade
# # Observe que o gráfico gerado assemelha-se a uma Gaussiana e não apresenta assimentria.
# Quando o gráfico da distribuição possui tal forma, há grandes chances de se tratar de uma distribuição normal.
x <- seq(-6, 6, by=0.01)
y <- dnorm(x)
plot(x, y, type="l")
# Sair
q()
|
99ce2f7a3c70d6e0c5a0062a6f2223c777f9e9b6
|
f33bc23a2cf454b908ec5129cd91f202cfaa8893
|
/tests/testthat/test-query.R
|
4f7513b15a55b6385ba8fc3c8984114a80d34258
|
[
"MIT"
] |
permissive
|
ibartomeus/traitbaser
|
7e86da6ec2fbee813e57fb3fcf51cc243be849e5
|
cae4a35e6ab8c7a3cb45e24f0ecfc7b1fe46b73f
|
refs/heads/master
| 2021-04-29T03:18:35.979292
| 2020-04-14T08:20:32
| 2020-04-14T08:20:32
| 78,012,880
| 0
| 0
| null | 2017-01-04T12:17:13
| 2017-01-04T12:17:12
| null |
UTF-8
|
R
| false
| false
| 1,338
|
r
|
test-query.R
|
context("query")
test_that("query species", {
cnx <- connect("http://www.traitbase.info", "demo", "1234")
resource <- resource(cnx, "species")
out <- query(resource)
expect_equal(TRUE, length(out) > -1)
expect_equal(8, ncol(out))
})
test_that("query species limit=1", {
cnx <- connect("http://www.traitbase.info", "demo", "1234")
resource <- resource(cnx, "species")
out <- query(resource, limit=1)
expect_equal(1, nrow(out))
expect_equal(8, ncol(out))
})
test_that("query species limit=2 skip=1", {
cnx <- connect("http://www.traitbase.info", "demo", "1234")
resource <- resource(cnx, "species")
out <- query(resource, limit=2, skip=1)
expect_equal(2, nrow(out))
expect_equal(8, ncol(out))
})
test_that("query species limit=1 names", {
cnx <- connect("http://www.traitbase.info", "demo", "1234")
resource <- resource(cnx, "species")
out <- query(resource, limit=1)
n <- names(out)
expect_equal(1, nrow(out))
expect_equal("_id", n[[1]])
expect_equal("genus", n[[2]])
expect_equal("species", n[[3]])
})
test_that("query species limit=1 with condition", {
cnx <- connect("http://www.traitbase.info", "demo", "1234")
resource <- resource(cnx, "species")
out <- query(resource, limit=1, conditions=buildCondition("genus", "==", "unit-test-sample" ))
expect_equal(NULL, nrow(out))
})
|
1195ea09f1de5896dc09391cd7d10676d6ab8e53
|
b8a44a643675919a6ce53e077c1e6ed84e4ade99
|
/code/settings_to_formula.R
|
10d366f99c5b6d7c4e6d6c4054561d40afc0b72a
|
[] |
no_license
|
bernard-liew/2020_stairs_biomech
|
30f17ac50c0b8e0fedd1d3c7b6e6684e723c2dc9
|
5e478f092385b4455faafbe904d19bb39c9742b4
|
refs/heads/master
| 2023-04-09T18:14:08.766217
| 2021-02-25T11:23:40
| 2021-02-25T11:23:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,298
|
r
|
settings_to_formula.R
|
settings_to_formula <- function(
k_cycle,
k_age,
k_speed,
k_cycle_age_1,
k_cycle_age_2,
k_cycle_speed_1,
k_cycle_speed_2,
k_age_speed_1,
k_age_speed_2,
k_cycle_age_speed_1,
k_cycle_age_speed_2,
k_cycle_age_speed_3,
k_cycle_ht_1,
k_cycle_ht_2,
k_ht,
k_strlen,
k_cycle_strlen_1,
k_cycle_strlen_2,
# k_cycle_re,
cycle_age
) {
# funargs <- as.list(match.call())[-1]
# if(any(unlist(funargs)<3 & unlist(funargs)>0))
# return(do.call("settings_to_formula", lapply(funargs, function(x) pmax(3,x))))
k_cycle <- roundBelow3(k_cycle)
k_age <- roundBelow3(k_age)
k_speed <- roundBelow3(k_speed)
k_cycle_age_1 <- roundBelow3(k_cycle_age_1)
k_cycle_age_2 <- roundBelow3(k_cycle_age_2)
k_cycle_speed_1 <- roundBelow3(k_cycle_speed_1)
k_cycle_speed_2 <- roundBelow3(k_cycle_speed_2)
k_age_speed_1 <- roundBelow3(k_age_speed_1)
k_age_speed_2 <- roundBelow3(k_age_speed_2)
k_cycle_age_speed_1 <- roundBelow3(k_cycle_age_speed_1)
k_cycle_age_speed_2 <- roundBelow3(k_cycle_age_speed_2)
k_cycle_age_speed_3 <- roundBelow3(k_cycle_age_speed_3)
k_cycle_ht_1 <- roundBelow3(k_cycle_ht_1)
k_cycle_ht_2 <- roundBelow3(k_cycle_ht_2)
k_ht <- roundBelow3(k_ht)
k_strlen <- roundBelow3(k_strlen)
k_cycle_strlen_1 <- roundBelow3(k_cycle_strlen_1)
k_cycle_strlen_2 <- roundBelow3(k_cycle_strlen_2)
# k_cycle_re <- roundBelow3(k_cycle_re)
form <- paste0("~ ",
"ti (cycle, k = ", k_cycle, ", bs = 'cr') + ",
"ti (age, k = ", k_age, ", bs = 'cr') + ",
"ti (speed, k = ", k_speed, ", bs = 'cr') + ",
"ti(ht, k = ",k_ht, ", bs = 'cr') + ",
"ti(strlen, k = ", k_strlen, ", bs = 'cr') + ",
"sex ")
if(all(c(k_cycle_age_1,
k_cycle_age_2)>0))
form <- paste0(form,
"+ ti (cycle, age, k = c(",k_cycle_age_1,",", k_cycle_age_2, "), bs = 'cr')")
if(all(c(k_cycle_speed_1,
k_cycle_speed_2)>0))
form <- paste0(form,
"+ ti (cycle, speed, k = c(",k_cycle_speed_1,",", k_cycle_speed_2, "), bs = 'cr')")
if(all(c(k_age_speed_1,
k_age_speed_2)>0))
form <- paste0(form,
"+ ti (age, speed, k = c(",k_age_speed_1,",", k_age_speed_2, "), bs = 'cr')")
if(all(c(
k_cycle_age_1,
k_cycle_age_2,
k_cycle_speed_1,
k_cycle_speed_2,
k_age_speed_1,
k_age_speed_2,
k_cycle_age_speed_1,
k_cycle_age_speed_2,
k_cycle_age_speed_3)>0))
form <- paste0(form,
"+ ti (cycle, speed, age, k = c(",k_cycle_age_speed_1,",", k_cycle_age_speed_2,",",
k_cycle_age_speed_3,"), bs = 'cr')"
)
if(all(c(k_cycle_ht_1,
k_cycle_ht_2)>0))
form <- paste0(form,
"+ ti (cycle, ht, k = c(",k_cycle_ht_1,",", k_cycle_ht_2,"), bs = 'cr')")
if(all(c(k_cycle_strlen_1,
k_cycle_strlen_2)>0))
form <- paste0(form,
"+ ti (cycle, strlen, k = c(",k_cycle_strlen_1,",", k_cycle_strlen_1,"), bs = 'cr')")
# if(k_cycle_re>0)
# form <- paste0(form,
# "+ ti (cycle, k = ", k_cycle_re,", by = study, bs = 're')")
return(form)
}
roundBelow3 <- function(x) ifelse(x<3,0,x)
|
892979ae6fb5b58cf0cccb72d260819691bb8aea
|
784526035645204d053fe5e911b94df76cdd76ef
|
/git.R
|
06ff586f2f09b870bc8ecdb174aae95484c13926
|
[] |
no_license
|
parinfuture/nlp
|
3b798082ec362d3713c6f62b70497ff8510f642a
|
f873138f73cf3c684eb80a6a6d6d0ece51bb8656
|
refs/heads/master
| 2021-01-22T23:16:49.064871
| 2017-03-20T20:46:55
| 2017-03-20T20:46:55
| 85,624,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,196
|
r
|
git.R
|
# This R environment comes with all of CRAN preinstalled, as well as many other helpful packages
# The environment is defined by the kaggle/rstats docker image: https://github.com/kaggle/docker-rstats
# For example, here's several helpful packages to load in
library(ggplot2) # Data visualization
library(readr) # CSV file I/O, e.g. the read_csv function
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
system("ls ../input")
# Any results you write to the current directory are saved as output.
library(readr)
train <- read_csv('../input/train.csv')
head(train)
test <- read_csv('../input/test.csv')
library(stringi)
names(train)
dim(train)
names(test)
dim(test)
#merging all rows to create sentiments
library(stringi)
all <- rbind(train[,c('question1', 'question2')], test[, c('question1', 'question2')])
library(syuzhet)
all_first <- stri_extract_first_words(all$question1)
all_second <- stri_extract_first_words(all$question2)
all_first1 <- stri_extract_last_words(all$question1)
all_second1 <- stri_extract_last_words(all$question2)
all$first = ifelse(all_first == all_second, 1, 0)
all$second = ifelse(all_first1 == all_second1, 1, 0)
all_sentiment1 = get_nrc_sentiment(all$question1)
all_sentiment2 = get_nrc_sentiment(all$question2)
all = cbind(all, all_sentiment1, all_sentiment2)
dim(all)
names(all)
dim(train)
dim(test)
#New train and test set
newtrain <- all[1:404290, ]
newtest <- all[404291:2750086, ]
newtrain[,c(1:23)] = lapply(newtrain[,c(1:23)],as.factor)
newtest[, c(1:23)] = lapply(newtest[, c(1:23)], as.factor)
names(train)
newtrain$is_duplicate <- train$is_duplicate
newtrain <- na.omit(newtrain)
newtest <- na.omit(newtest)
str(newtrain)
newtrain$negative <- as.numeric(newtrain$negative)
str(newtrain$negative)
names(newtrain)
names(newtest)
#Building model
library(nnet)
summary(newtrain)
ideal <- class.ind(newtrain$is_duplicate)
model1 <- nnet(newtrain[,-25], ideal, size = 10, softmax = TRUE)
pred <- predict(model1, newdata = newtest, type = 'class')
dat1 = data.frame(newtest$id = test$id, is_duplicate = pred)
show less
|
811b4e93962f0fb0261d1fa604f58467195151ae
|
43d60aeb722f2803c028273e3b142a4a2f7b9d54
|
/女子100米预测.R
|
12d4fa1cbde862de4ce947c4cae5354a2bf16dc2
|
[] |
no_license
|
jianchongsu/R
|
cc531346b27200365762a1fe6b38f97eef9a3928
|
6377e27d828e075157c6b1ade687c4265c2c5433
|
refs/heads/master
| 2016-09-06T10:04:29.651118
| 2012-08-09T16:49:48
| 2012-08-09T16:49:48
| null | 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 1,254
|
r
|
女子100米预测.R
|
library(XML)
library(drc)
surl="http://www.databaseolympics.com/sport/sportevent.htm?sp=ATH&enum=700"
#url <- "http://www.databaseolympics.com/sport/sportevent.htm?enum=110&sp=ATH"
sdata <- readHTMLTable(readLines(surl), which=2, header=TRUE)
#golddata <- subset(data, Medal %in% "GOLD")
#或者可以采用我这个也可以
gold=sdata[which(sdata$Medal=="GOLD"),]
#以下两个貌似用处不大
gold$Year <- as.numeric(as.character(gold$Year))
gold$Result <- as.numeric(as.character(gold$Result))
tail(gold,10)
logistic <- drm(Result~Year, data=subset(gold, Year>=1900), fct = L.4())
log.linear <- lm(log(Result)~Year, data=subset(gold, Year>=1900))
years <- seq(1896,2012, 4)
predictions <- exp(predict(log.linear, newdata=data.frame(Year=years)))
plot(logistic, xlim=c(1896,2012),
ylim=c(9.5,12),
xlab="Year", main="Olympic 100 metre",
ylab="Winning time for the 100m women final (s)")
points(sdata$Year, data$Result)
lines(years, predictions, col="red")
#以下是在图中标明某个点
points(2012, predictions[length(years)], pch=19, col="red")
text(2011, 10.55, round(predictions[length(years)],2))
u="http://www.basketball-reference.com/players/b/bryanko01.html"
kobe <- readHTMLTable(readLines(u), which=2, header=TRUE)
|
d1c8663a7736e53cac48b9c3937bfec729fe3ea0
|
59209c2327ffc2e64514a81cc0c2565facdbb780
|
/R/spacetime_bisquare.R
|
6db8427b771a21b063daee03e8d1af1a5cfcd82f
|
[] |
no_license
|
holans/ST-COS
|
22db522b5a53b90b0bcd2661458239eb0226615a
|
a20c61cc19fb03919a3e6273c275af199018f60c
|
refs/heads/master
| 2021-03-27T08:33:24.973767
| 2020-06-03T14:36:03
| 2020-06-03T14:36:03
| 70,738,620
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,386
|
r
|
spacetime_bisquare.R
|
#' Space-Time Bisquare Basis
#'
#' @description
#' Space-time bisquare basis on point data.
#'
#' @param dom Space-time points \eqn{(\bm{u}_1,v_1), \ldots, (\bm{u}_n,v_n)}
#' to evaluate. See "Details".
#' @param knots Spatio-temporal knots
#' \eqn{(\bm{c}_1,g_1), \ldots, (\bm{c}_r,g_r)}
#' for the basis. See "Details".
#' @param w_s Spatial radius for the basis.
#' @param w_t Temporal radius for the basis.
#'
#' @return A sparse \eqn{n \times r} matrix whose \eqn{i}th row
#' is
#' \deqn{
#' \bm{s}_i^\top =
#' \Big(
#' \psi_1(\bm{u}_i,v_i), \ldots, \psi_r(\bm{u}_i,v_i)
#' \Big).
#' }
#'
#' @details
#' Notes about arguments:
#' \itemize{
#' \item Both \code{dom} and \code{knots} may be provided as either \code{sf} or
#' \code{sfc} objects, or as matrices of points.
#' \item If an \code{sf} or \code{sfc} object is provided for \code{dom}, \eqn{n}
#' three-dimensional \code{POINT} entries are expected in \code{st_geometry(dom)}.
#' Otherwise, \code{dom} will be interpreted as an \eqn{n \times 3} numeric matrix.
#' \item If an \code{sf} or \code{sfc} object is provided for \code{knots}, \eqn{r}
#' three-dimensional \code{POINT} entries are expected in \code{st_geometry(knots)}.
#' Otherwise, \code{knots} will be interpreted as an \eqn{r \times 3} numeric matrix.
#' \item If both \code{dom} and \code{knots_s} are given as \code{sf} or \code{sfc} objects,
#' they will be checked to ensure a common coordinate system.
#' }
#'
#' For each \eqn{(\bm{u}_i,v_i)}, compute the basis functions
#' \deqn{
#' \psi_j(\bm{u},v) =
#' \left[ 2 - \frac{\Vert \bm{u} - \bm{c}_j \Vert^2}{w_s^2}- \frac{|v - g_j|^2}{w_t^2} \right]^2 \cdot
#' I(\Vert \bm{u} - \bm{c}_j \Vert \leq w_s) \cdot
#' I(|v - g_j| \leq w_t)
#' }
#' for \eqn{j = 1, \ldots, r}.
#'
#' Due to the treatment of \eqn{\bm{u}_i} and \eqn{\bm{c}_j} as points in a
#' Euclidean space, this basis is more suitable for coordinates from a map
#' projection than coordinates based on a globe representation.
#'
#' @examples
#' set.seed(1234)
#'
#' # Create knot points
#' seq_x = seq(0, 1, length.out = 3)
#' seq_y = seq(0, 1, length.out = 3)
#' seq_t = seq(0, 1, length.out = 3)
#' knots = expand.grid(x = seq_x, y = seq_y, t = seq_t)
#' knots_sf = st_as_sf(knots, coords = c("x","y","t"), crs = NA, dim = "XYM", agr = "constant")
#'
#' # Points to evaluate
#' x = runif(50)
#' y = runif(50)
#' t = sample(1:3, size = 50, replace = TRUE)
#' pts = data.frame(x = x, y = y, t = t)
#' dom = st_as_sf(pts, coords = c("x","y","t"), crs = NA, dim = "XYM", agr = "constant")
#'
#' rad = 0.5
#' spacetime_bisquare(cbind(x,y,t), knots, w_s = rad, w_t = 1)
#' spacetime_bisquare(dom, knots_sf, w_s = rad, w_t = 1)
#'
#' # Plot the (spatial) knots and the points at which we evaluated the basis
#' plot(knots[,1], knots[,2], pch = 4, cex = 1.5, col = "red")
#' text(x, y, labels = t, cex = 0.75)
#'
#' # Draw a circle representing the basis' radius around one of the knot points
#' tseq = seq(0, 2*pi, length=100)
#' coords = cbind(rad * cos(tseq) + seq_x[2], rad * sin(tseq) + seq_y[2])
#' lines(coords, col = "red")
#'
#' @family bisquare
#' @export
spacetime_bisquare = function(dom, knots, w_s, w_t)
{
prep = prepare_bisquare(dom, knots, type = "point")
out = compute_basis_spt(prep$X, prep$knot_mat, w_s, w_t)
sparseMatrix(i = out$ind_row + 1, j = out$ind_col + 1, x = out$values,
dims = out$dim)
}
|
286d5dce1f948734cf01db187387aed6fb96c3d4
|
59abcf8d840cb4681de35fc5342184e5f0302c95
|
/examples/acetest.r
|
9ecae728f0eaafd240b473e8c07c196dc4b67d93
|
[
"MIT"
] |
permissive
|
skranz/shinyAce2
|
9863c8ce102e96f6b13e53d3f586c754fb099629
|
80cffdb254e8526c30563a7957605e6b94a33ee8
|
refs/heads/master
| 2016-09-05T16:26:48.552943
| 2014-08-13T14:48:02
| 2014-08-13T14:48:02
| 22,919,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 694
|
r
|
acetest.r
|
library(shiny)
library(shinyAce)
library(restorepoint)
keys = list(runLineKey="Ctrl-Enter", helpKey="F1", runKey="Ctrl-R|Ctrl-Alt-R", hintKey="Ctrl-H")
ui = bootstrapPage(
aceEditor("ace",value="text1\nline2", height=100,
keyId=keys, showLineNumbers=FALSE,highlightActiveLine=FALSE)
,aceEditor("ace2",value="text2", height=100,keyId=keys)
)
server = function(input, output, session) {
observe({
print(input$cursorId)
print(input$cursorId2)
})
observe({
cat("\nrunLineKey pressed:")
print(input$runLineKey)
})
observe({
cat("\nhelpKey pressed:")
print(input$helpKey)
})
}
runApp(list(ui=ui, server=server), launch.browser=rstudio::viewer)
|
870ba52e2b06745c0b2f174cc078b50c6a0b8782
|
1318b29d7b0f212ebe1a87145a13ee563ea094d8
|
/man/ANOVA.Repeat.Measure.Rd
|
91e9e434151de7f713875978bd04e25e0082963a
|
[] |
no_license
|
cran/TrialSize
|
73c3ff9086760334fa83d4608c111bb0ea32765b
|
314e951e9d33786b6a2883f7cd483984cb611243
|
refs/heads/master
| 2021-06-04T14:24:45.564491
| 2020-07-06T20:40:03
| 2020-07-06T20:40:03
| 17,693,970
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,033
|
rd
|
ANOVA.Repeat.Measure.Rd
|
\name{ANOVA.Repeat.Measure}
\alias{ANOVA.Repeat.Measure}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
ANOVA with Repeat Measures
}
\description{
The study has multiple assessments in a parallel-group clinical trial. \eqn{\alpha_i} is the fixed effect for the ith treatment \eqn{\sum \alpha_i =0}.
Ho: \eqn{\alpha_{i} = \alpha_{i'} }
Ha: not equal
}
\usage{
ANOVA.Repeat.Measure(alpha, beta, sigma, delta, m)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{alpha}{
significance level
}
\item{beta}{
power = 1-beta
}
\item{sigma}{
sigma^2 is the sum of the variance components.
}
\item{delta}{
a clinically meaningful difference
}
\item{m}{
Bonferroni adjustment for alpha, totally m pairs comparison.
}
}
\references{
Chow SC, Shao J, Wang H. Sample Size Calculation in Clinical Research. New York: Marcel Dekker, 2003
}
\examples{
Example.15.3.4<-ANOVA.Repeat.Measure(0.05,0.2,1.25,1.5,3)
Example.15.3.4
# 15
}
|
a9f90dd3acf41f00476ab2e12bc5f3ddea6f2c31
|
f90115826c0234fbdfe7f580e6654925aa98e6b2
|
/R-source/corr.R
|
37683625829d5dba262a588e89e01b26db74b283
|
[] |
no_license
|
mizma2k/FX-Git
|
9272485edc4f7dfb4f12d3a4fbcaa74d9e2896d9
|
b9629452419f30c4cade872928c2dcf119096b64
|
refs/heads/master
| 2022-12-30T23:11:03.398272
| 2020-09-09T03:40:11
| 2020-09-09T03:40:11
| null | 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 7,285
|
r
|
corr.R
|
#|------------------------------------------------------------------------------------------|
#| corr.R |
#| Copyright © 2012, Dennis Lee |
#| Assert Question |
#| Write a function that takes a directory of data files and a threshold for complete |
#| cases and calculates the correlation between sulfate and nitrate for monitor locations |
#| where the number of completely observed cases (on all variables) is greater than the |
#| threshold. The function should return a vector of correlations for the monitors that |
#| meet the threshold requirement. If no monitors meet the threshold requirement, then |
#| the function should return a numeric vector of length 0. |
#| |
#| For this function you will need to use the 'cor' function in R which calculates the |
#| correlation between two vectors. Please read the help page for this function via '?cor' |
#| and make sure that you know how to use it. |
#| |
#| Please save your code to a file named corr.R. To run the test script for this part, |
#| make sure your working directory has the file corr.R in it and then run: |
#| |
#| > source("http://spark-public.s3.amazonaws.com/compdata/scripts/corr-test.R") |
#| > corr.testscript() |
#| |
#| Assert History |
#| 1.0.0 Coursera Computing in Data Analysis (Roger Peng) Assignment 1 Part 3 Week 2: |
#| Input: THREE HUNDRED AND TWENTY TWO (322) data files (.csv) in 'specdata' |
#| folder. |
#| Output: ONE (1) correlations vector as threshold = 150. |
#| ONE (1) correlations vector as threshold = 400. |
#| ONE (1) correlations vector as threshold = 5000. |
#| ONE (1) correlations vector as threshold = 0. |
#|------------------------------------------------------------------------------------------|
#|------------------------------------------------------------------------------------------|
#| I N I T I A L I Z A T I O N |
#|------------------------------------------------------------------------------------------|
Init <- function(workDirStr="C:/Users/denbrige/100 FxOption/103 FxOptionVerBack/080 Fx Git/R-source")
{
setwd(workDirStr)
}
#|------------------------------------------------------------------------------------------|
#| I N T E R N A L F U N C T I O N S |
#|------------------------------------------------------------------------------------------|
corr <- function(directory, threshold = 0)
{
#--- Assert 'directory' is a character vector of length 1 indicating the location of the
# CSV files.
# 'threshold' is a numeric vector of length 1 indicating the number of completely
# observed observations (on all variables) required to compute the correlation
# between nitrate and sulfate; the default is 0.
# Return a numeric vector of correlations.
#--- Assert create an empty numeric vector
corrsNum <- numeric(0)
#--- Assert get a data frame as ID = 1:332
nobsDfr <- complete("specdata")
#--- Assert apply threshold
nobsDfr <- nobsDfr[ nobsDfr$nobs > threshold, ]
for(cid in nobsDfr$id)
{
#--- Assert get a data frame as ID in $id
monDfr <- getmonitor(cid, directory)
#--- Assert calculate correlation between $sulfate and $nitrate
corrsNum <- c(corrsNum, cor(monDfr$sulfate, monDfr$nitrate, use="pairwise.complete.obs"))
}
#--- Assert return value is a numeric vector of correlations
return(corrsNum)
}
complete <- function(directory, id = 1:332)
{
#--- Assert 'directory' is a character vector of length 1 indicating the location of the
# CSV files.
# 'id' is an integer vector indicating the monitor ID numbers to be used
# Return a data frame of the form:
# id nobs
# 1 117
# 2 1041
# ...
# where 'id' is the monitor ID number and 'nobs' is the number of complete cases
#--- Assert create an empty vector
nobsNum <- numeric(0)
for(cid in id)
{
#--- Assert get data frame as ID
cDfr <- getmonitor(cid, directory)
#--- Assert count the number of complete cases and append to numeric vector
nobsNum <- c( nobsNum, nrow(na.omit(cDfr)) )
}
#--- Assert return value is a data frame with TWO (2) columns
data.frame(id=id, nobs=nobsNum)
}
getmonitor <- function(id, directory, summarize = FALSE)
{
#--- Assert 'id' is a vector of length 1 indicating the monitor ID number. The user can
# specify 'id' as either an integer, a character, or a numeric.
# 'directory' is a character vector of length 1 indicating the location of the CSV files
# 'summarize' is a logical indicating whether a summary of the data should be printed to
# the console; the default is FALSE
#--- Assert construct file name
# Directory is pre-appended to file name.
# Use sprintf() to add leading zeroes.
# E.g. "specdata/001.csv"
fileStr <- paste(directory, "/", sprintf("%03d", as.numeric(id)), ".csv", sep="")
#--- Assert read csv
rawDfr <- read.csv(fileStr)
#--- Assert summary if true
if(summarize)
{
print(summary(rawDfr))
}
#--- Return value is a data frame
return(rawDfr)
}
#|------------------------------------------------------------------------------------------|
#| M A I N P R O C E D U R E |
#|------------------------------------------------------------------------------------------|
#--- Init set working directory
Init()
#--- Get correlations vector as threshold = 150
data <- corr("specdata", 150)
head(data)
summary(data)
#--- Get correlations vector as threshold = 400
data <- corr("specdata", 400)
head(data)
summary(data)
#--- Get correlations vector as threshold = 5000
data <- corr("specdata", 5000)
summary(data)
length(data)
#--- Get correlations vector as threshold = 0
data <- corr("specdata", 0)
summary(data)
length(data)
#|------------------------------------------------------------------------------------------|
#| E N D O F S C R I P T |
#|------------------------------------------------------------------------------------------|
|
5cbda6d34e360652ddd4a8a64497e6f45585618b
|
4e149b593bac0ab3d59b0663f3f7464aa3f71d41
|
/Commute_RouteExploring_Public.R
|
f4d25b318062ff3327361fd3a68888692d94381c
|
[] |
no_license
|
ZacharyST/CommuteTimes
|
856713b6c745c7efe30bc0ad4ccc206ca22b4b3a
|
a561ce3401a454feeeca3b908c2fd75e0c68eeee
|
refs/heads/master
| 2021-01-19T17:07:26.293221
| 2017-04-14T22:44:38
| 2017-04-14T22:44:38
| 88,305,502
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,818
|
r
|
Commute_RouteExploring_Public.R
|
#!usr/bin/Rscript
############
# LIBRARIES
############
library(googleway)
library(ggplot2)
############
# FUNCTIONS
############
processDirections <- function(origin, destination, mode){
current_time <- Sys.time() + 1
api_key <- 'oops_almost_put_it_in_here'
result <- google_directions(origin=origin, destination=destination, mode=mode, key=api_key, departure_time = current_time) # May need to process to get into desired dataframe. May need to make time of day
if(mode=='driving'){
time <- result$routes$legs[[1]]$duration_in_traffic$value/60 # Given in seconds
miles <- result$routes$legs[[1]]$distance$value/1600 # Given in meters, this converts to miles
}
if(mode=='transit'){
time <- result$routes$legs[[1]]$duration$value/60
miles <- result$routes$legs[[1]]$distance$value/1600
}
time_of_day <- substr(current_time, 12, 16)
return(data.frame(commute_distance=miles, commute_length=time, commute_start=current_time, commute_start_hhmm = time_of_day, mode=mode))
}
############
# GET DATA
############
origin <- "your_start_location_or_address"
destination <- "your_end_location_or_address"
car <- processDirections(origin=origin, destination=destination, mode="driving")
bus <- processDirections(origin=origin, destination=destination, mode="transit")
############
# SAVE DATA
############
fileout <- paste0('/path/out/', gsub(" ", "", origin), '_', gsub(" ", "", destination))
write.table(rbind(car,bus), file=paste0(fileout, '.csv'), quote=FALSE, sep=',', append=TRUE, row.names=FALSE, col.names=FALSE)
############
# PLOT DATA
############
columns <- c('Commute Distance', 'Commute Time', 'Commute Start', 'Time of Day', 'Mode')
data <- read.csv(paste0(fileout,'.csv'), col.names=columns, header=FALSE)
levels(data$Mode)[1] <- 'Driving'
levels(data$Mode)[2] <- 'Transit'
xlabels <- sort(unique(data$Time.of.Day))
xbreaks <- NULL
for(i in 1:length(xlabels)){
if(i %% 12 == 0){
xbreaks[i] <- i
}
else{
xbreaks[i] <- NA
}
}
xbreaks[1] <- 1
xlabels <- xlabels[xbreaks]
pdf(paste0(fileout,'_Plotted.pdf'))
ggplot(data, aes(x=as.integer(Time.of.Day), y=Commute.Time, colour=Mode)) + geom_point(size=.5) + theme_bw() + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(), legend.key = element_blank(), axis.text.x = element_text(angle=90, vjust=0.5, size=16), axis.title.x = element_text(size=16), axis.text.y = element_text(size=16), axis.title.y = element_text(size=16), legend.text = element_text(size=16), legend.title = element_text(size=16)) + xlab("") + ylab("Commute Time") + stat_smooth(aes(group=Mode), method=loess, alpha=.25, size=.5, n=length(unique(data$Time.of.Day))) + scale_x_continuous(breaks=xbreaks, labels=xlabels) + scale_y_continuous(limits=c(10,120), breaks=seq(10,120,10), labels=seq(10, 120, 10))
dev.off()
|
79ba26cb91d50a56af2c5b941557ae016c0031fb
|
f90fec7b801f45ff649ceb7262d8a1058339a26b
|
/Megadetector accuracy/ena24_analysis.R
|
ae511b67b685b50545354f314a0d94d009423290
|
[] |
no_license
|
NaomiMcWilliam/ZSL-Internship
|
f27de445051f042afe2caed9f7c3b45dbede890d
|
394140045529288eccba39c0e8f5e76da2d8f14b
|
refs/heads/main
| 2023-07-16T08:35:21.131212
| 2021-09-02T00:58:22
| 2021-09-02T00:58:22
| 389,102,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,900
|
r
|
ena24_analysis.R
|
library(jsonlite)
library(rjson)
library(tidyverse)
library(glue)
data <- fromJSON(file = "../ENA24/ena24_metadata.json")
#there are 9676 images in total in the dataset
#some of these have humans in, and have been emitted from the images
#so the metadata does not match up with the megadetector, as there are less images
#analysed by the megadetector, ones with humans in have been omitted
#MANUAL CLASSIFICATIONS
data <- fromJSON( file = "../ENA24/ena24_metadata.json")
#DATA ANNOTATIONS
#image id, category id, bounding box
data_annotations <- as.data.frame(do.call(rbind, data$annotations))
data_annotations$id <- unlist(data_annotations$id)
data_annotations$image_id <- unlist(data_annotations$image_id)
data_annotations$category_id <- unlist(data_annotations$category_id)
#data_annotations$bbox <- unlist(data_annotations$bbox)
#DATA IMAGES
#image id, image file name, image width, image height
data_images <- as.data.frame(do.call(rbind, data$images))
#DATA CATEGORIES
#name of animal, image id
data_categories <- as.data.frame(do.call(rbind, data$categories))
colnames(data_categories)[2] <- "category_id"
colnames(data_categories)[1] <-"Classification_manual"
data_categories$Classification_manual <- unlist(data_categories$Classification_manual)
data_categories$category_id <- unlist(data_categories$category_id)
total_df <- merge(data_annotations, data_categories, by="category_id")
manual_df <- total_df[,c("image_id", "Classification_manual", "bbox")]
#issue is that it has a few rows for the same image, so in comparisons they may not be comparing the same thing ..?
#need to look at every bounding box for each image, and see if megadetector also predicts something in roughly that box, and if it is the same thing
#MEGADETECTOR CLASSIFICATIONS
md_data <- fromJSON(file = "../ENA24/ena24.json")
#md_data_images <- as.data.frame(do.call(rbind, md_data$images))
#finding the images which have humans in so were not included in the images
md_images <- vector()
for (i in 1:length(md_data$images)){
md_images[i] <- str_sub(md_data$images[[i]]$file, 14, str_length(md_data$images[[i]]$file)-4)
}
notinc <- vector()
m <- 1
for (j in 1:9676){
if (!(j %in% strtoi(md_images))){
notinc[m] <- j
m <- m+1
}
}
#makes a vector containing image id's, confidence levels, category id, bounding box
#need to make sure it includes all the detections
image_id <- vector()
conf <- vector()
Category_id_md <- vector()
bbox_md <- list()
#i is index for original lists
j<-1 #index for new lists (larger than i as sometimes there are 2 birds in one image etc.)
#k is index for detections in one image
#using json file rather than the data frames
for (i in 1:length(md_images)){
#empty image, no detections
if (length(md_data$images[[i]]$detections)==0){
image_id[j] <- md_images[i]
conf[j] <- md_data$images[[i]]$max_detection_conf #this will be 0, no confidence in any detections
Category_id_md[j] <- "0" #empty
bbox_md[[j]] <- c(0,0,0,0) #empty
j<-j+1 #increment, not going in for loop
}else{
#increment the next detection (there is 1,2,3 etc.)
for (k in 1:length(md_data$images[[i]]$detections)) {
image_id[j] <- md_images[i]
#detections
conf[j] <- md_data$images[[i]]$detections[[k]]$conf
Category_id_md[j]<-md_data$images[[i]]$detections[[k]]$category
bbox_md[[j]] <- c(md_data$images[[i]]$detections[[k]]$bbox)
j <- j+1 #increment to next index in new lists for new detections
}
}
}
Category_id_md_initial <- Category_id_md #includes category before confidence has been considered
#data frame for the images, includes confidence column and the bounding box
md_image_df_conf <- data.frame(image_id, Category_id_md_initial, conf, Category_id_md)
#add the bounding box column list
md_image_df_conf$bbox_md <- bbox_md
#only for above a certain confidence level
md_image_df <- md_image_df_conf
conf_level <- 0
for (i in 1:length(md_image_df_conf$conf)){
if (md_image_df_conf$conf[i] <= conf_level){
md_image_df$Category_id_md[i] <- "0" #empty
}
}
#data frame for megadetector classification name and id
md_data_categories <- as.data.frame(do.call(rbind, md_data$detection_categories))
colnames(md_data_categories)[1] <- "Classification_Megadetector"
md_data_categories$Category_id_md <- rownames(md_data_categories)
md_data_categories <- rbind(md_data_categories, c("empty", "0"))
md_data_info <- md_data$info
md_animal_df <- merge(md_image_df, md_data_categories, by="Category_id_md")
md_animal_df <- md_animal_df [,c("image_id", "Classification_Megadetector", "bbox_md")]
#TOTAL DATA FRAMES, MANUAL & MEGADETECTOR
#the bounding boxes are formatted differently, hard to compare them.
#check bounding boxes
#for (i in data_images$id){
# image <- all_df[all_df$image_id==i , ]
#}
###comparing images from manual vs megadetector
#can't just merge them as images have a different number of detections
#need to check each image and compare what is inside, using loop and creating the combined data frame manually
#if megadetector more detections than manual, create a manual 'empty' row for that image
#if manual has more detections than megadetector, create a megadetector 'empty' row for that image
#loop through every image id
all_animal_df <- data.frame() #create an empty dataframe to add information to
for (i in md_images){
#progress checker
if (strtoi(i)%%1000==0){
print(i)
}
#corresponding database of the current image in megadetector dataframe
md_rows <- md_animal_df[md_animal_df$image_id==i,]
#corresponding database of the current image in manual dataframe
manual_rows <- manual_df[manual_df$image_id==i,]
#megadetector more detections than manual
while (nrow(md_rows)>nrow(manual_rows)){
manual_rows <- rbind(manual_rows, list(i, "empty", list(c(0,0,0,0))))
}
#manual more detections than megadetector
while (nrow(manual_rows)>nrow(md_rows)){
md_rows <- rbind(md_rows, list(i, "empty", list(c(0,0,0,0))))
}
#cannot use merge by image_id as both rows have same image_id, so use cbind
combined_df <- cbind(md_rows, manual_rows)
all_animal_df <- rbind(all_animal_df, combined_df)
}
#with image id
animal_df_images <- all_animal_df[,c("image_id", "Classification_Megadetector", "Classification_manual")]
#just the classifications
animal_df <- all_animal_df[,c("Classification_Megadetector", "Classification_manual")]
table_animal <- table(animal_df)
#CONFUSION MATRIX
#get names of animals, and remove humans and vehicle category name, also does not include the empty category
animal_names <- data_categories[1][-c(9,10),]
cm_manual_class <- vector()
cm_md_class <- vector()
for (i in 1:length(all_animal_df$Classification_manual)){
if (all_animal_df$Classification_manual[i] %in% animal_names){
cm_manual_class[i] <- "positive"
} else{
cm_manual_class[i] <- "negative"
}
}
for (i in 1:length(all_animal_df$Classification_Megadetector)){
if (all_animal_df$Classification_Megadetector[i] %in% c("animal")){
cm_md_class[i] <- "positive"
} else if (all_animal_df$Classification_Megadetector[i] %in% c("empty", "person", "vehicle")){
cm_md_class[i] <- "negative"
} else {
cm_md_class[i] <- "other"
}
}
cm_df <- data.frame(cm_manual_class, cm_md_class)
#actual positive, predicted positive (actual=manual, predicted=megadetector)
apos_ppos <- cm_df[cm_df$cm_manual_class=="positive" & cm_df$cm_md_class=="positive", ]
apos_pneg <- cm_df[cm_df$cm_manual_class=="positive" & cm_df$cm_md_class=="negative", ]
aneg_ppos <- cm_df[cm_df$cm_manual_class=="negative" & cm_df$cm_md_class=="positive", ]
aneg_pneg <- cm_df[cm_df$cm_manual_class=="negative" & cm_df$cm_md_class=="negative", ]
confusion_matrix <- matrix(nrow=2, ncol=2)
rownames(confusion_matrix) <- c("Predicted Positive", "Predicted Negative")
colnames(confusion_matrix) <- c("Actual Positive", "Actual Negative")
confusion_matrix[1,1] <- nrow(apos_ppos)
confusion_matrix[1,2] <- nrow(aneg_ppos)
confusion_matrix[2,1] <- nrow(apos_pneg)
confusion_matrix[2,2] <- nrow(aneg_pneg)
#OUTPUTS
table_animal
confusion_matrix
#make a folder with these results, with the images inside
#e.g. 0.6/Camelus_ferus/positive
apos_pneg_id<- animal_df_images[animal_df_images$Classification_Megadetector %in% c("empty", "person", "vehicle") & animal_df_images$Classification_manual %in% animal_names, "image_id"]
#warning if it already exists so can just put showWarnings as false, as it does not crash just has a warning
#path to create the folder
path <- "C:/Users/n1638/Documents/ZSL/ENA24/conf_matrix_vis/conf_0"
dir.create(path , recursive=TRUE, showWarnings = FALSE)
#original image paths
images_in_folder <- paste("C:/Users/n1638/Documents/ZSL/ENA24/ena24_output/anno_ena24_images~",apos_pneg_id ,".jpg",sep="")
#copy the files over
file.copy(images_in_folder, path)
#look at the images
#all_animal_df[all_animal_df$image_id=="8496",]
|
8710d59ac6370090b17eef379a2765dcbac2e73e
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/pangoContextGetMatrix.Rd
|
0b4b296a8bd1cc9f8964fc4852faee5ea5f31124
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 589
|
rd
|
pangoContextGetMatrix.Rd
|
\alias{pangoContextGetMatrix}
\name{pangoContextGetMatrix}
\title{pangoContextGetMatrix}
\description{Gets the transformation matrix that will be applied when
rendering with this context. See \code{\link{pangoContextSetMatrix}}.}
\usage{pangoContextGetMatrix(object)}
\arguments{\item{\verb{object}}{[\code{\link{PangoContext}}] a \code{\link{PangoContext}}}}
\details{ Since 1.6}
\value{[\code{\link{PangoMatrix}}] the matrix, or \code{NULL} if no matrix has been set
(which is the same as the identity matrix).}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
46898ac55c27dc8fb9689265e0568e6e09b2fb40
|
aeec49bf59ba42b433054c48e7dfd0f935fcf5cb
|
/R/S3_predict.R
|
7e50dc914f844ca736976b269dd93685730c7075
|
[] |
no_license
|
bowers-illinois-edu/estimatr
|
60a214f1a1466a41a59f80f77fbe1c16a229988d
|
63d29278c38aebc8147b2f62191c407025fe0993
|
refs/heads/master
| 2020-03-18T21:56:49.436686
| 2018-05-29T15:17:26
| 2018-05-29T15:17:26
| 135,314,710
| 0
| 0
| null | 2018-05-29T15:17:27
| 2018-05-29T15:15:17
|
R
|
UTF-8
|
R
| false
| false
| 7,611
|
r
|
S3_predict.R
|
#' Predict method for \code{lm_robust} object
#'
#' @param object an object of class 'lm_robust'
#' @param newdata a data frame in which to look for variables with which to predict
#' @param se.fit logical. Whether standard errors are required, default = FALSE
#' @param interval type of interval calculation. Can be abbreviated, default = none
#' @param alpha numeric denoting the test size for confidence intervals
#' @param na.action function determining what should be done with missing
#' values in newdata. The default is to predict NA.
#' @param pred.var the variance(s) for future observations to be assumed for
#' prediction intervals.
#' @param weights variance weights for prediction. This can be a numeric
#' vector or a bare (unquoted) name of the weights variable in the supplied
#' newdata.
#' @param ... other arguments, unused
#'
#' @details Produces predicted values, obtained by evaluating the regression
#' function in the frame `newdata`` for fits from \code{lm_robust} and
#' \code{lm_lin}. If the logical se.fit is TRUE, standard errors of the
#' predictions are calculated. Setting intervals specifies computation of
#' confidence or prediction (tolerance) intervals at the specified level,
#' sometimes referred to as narrow vs. wide intervals.
#'
#' The equation used for the standard error of a prediction given a row of
#' data \eqn{x} is:
#'
#' \eqn{\sqrt(x \Sigma x')},
#'
#' where \eqn{\Sigma} is the estimated variance-covariance matrix from
#' \code{lm_robust}.
#'
#' The prediction intervals are for a single observation at each case in
#' `newdata` with error variance(s) `pred.var`. The the default is to assume
#' that future observations have the same error variance as those used for
#' fitting, which is gotten from the fit \code{\link{lm_robust}} object. If
#' weights is supplied, the inverse of this is used as a scale factor. If the
#' fit was weighted, the default is to assume constant prediction variance,
#' with a warning.
#'
#' @examples
#'
#' # Set seed
#' set.seed(42)
#'
#' # Simulate data
#' n <- 10
#' dat <- data.frame(y = rnorm(n), x = rnorm(n))
#'
#' # Fit lm
#' lm_out <- lm_robust(y ~ x, data = dat)
#' # Get predicted fits
#' fits <- predict(lm_out, newdata = dat)
#' # With standard errors and confidence intervals
#' fits <- predict(lm_out, newdata = dat, se.fit = TRUE, interval = "confidence")
#'
#' # Use new data as well
#' new_dat <- data.frame(x = runif(n, 5, 8))
#' predict(lm_out, newdata = new_dat)
#'
#' # You can also supply custom variance weights for prediction intervals
#' new_dat$w <- runif(n)
#' predict(lm_out, newdata = new_dat, weights = w, interval = "prediction")
#'
#' @export
predict.lm_robust <- function(object,
newdata,
se.fit = FALSE,
interval = c("none", "confidence", "prediction"),
alpha = 0.05,
na.action = na.pass,
pred.var = NULL,
weights,
...) {
X <- get_X(object, newdata, na.action)
# lm_lin scaling
if (!is.null(object$scaled_center)) {
demeaned_covars <-
scale(
X[
,
names(object$scaled_center),
drop = FALSE
],
center = object$scaled_center,
scale = FALSE
)
# Interacted with treatment
treat_name <- attr(object$terms, "term.labels")[1]
interacted_covars <- X[, treat_name] * demeaned_covars
X <- cbind(
X[, attr(X, "assign") <= 1, drop = FALSE],
demeaned_covars,
interacted_covars
)
}
# Get coefs
coefs <- as.matrix(coef(object))
# Get prediction
beta_na <- is.na(coefs[, 1])
# Get predicted values
preds <- X[, !beta_na, drop = FALSE] %*% coefs[!beta_na, ]
if (object[["fes"]]) {
preds <- add_fes(preds, object, newdata)
}
predictor <- drop(preds)
df_resid <- object$df.residual
interval <- match.arg(interval)
if (se.fit || interval != "none") {
if (ncol(coefs) > 1) {
stop("Can't set `se.fit` == TRUE with multivariate outcome")
}
if (object[["fes"]]) {
stop("Can't set `se.fit` == TRUE with `fixed_effects`")
}
ret <- list()
var_fit <-
apply(X[, !beta_na, drop = FALSE], 1, function(x) tcrossprod(crossprod(x, object$vcov), x))
if (interval != "none") {
tval <- qt(alpha / 2, df_resid, lower.tail = FALSE)
if (interval == "prediction") {
# Get weights
if (missing(weights)) {
if (object$weighted && is.null(pred.var)) {
warning("Assuming constant prediction variance even though model fit is weighted\\n")
}
weights <- 1
} else {
weights <- eval(substitute(weights), newdata)
}
if (is.null(pred.var)) {
pred.var <- object$res_var / weights
}
hwid <- tval * sqrt(var_fit + pred.var)
} else if (interval == "confidence") {
hwid <- tval * sqrt(var_fit)
}
predictor <-
matrix(
c(
predictor,
predictor - hwid,
predictor + hwid
),
ncol = 3,
dimnames = list(NULL, c("fit", "lwr", "upr"))
)
}
ret[["fit"]] <- predictor
if (se.fit) {
ret[["se.fit"]] <- sqrt(var_fit)
}
return(ret)
} else {
return(predictor)
}
}
#' @export
predict.iv_robust <- function(object,
newdata,
na.action = na.pass,
...) {
X <- get_X(object, newdata, na.action)
coefs <- as.matrix(coef(object))
beta_na <- is.na(coefs[, 1])
# Get predicted values
preds <- X[, !beta_na, drop = FALSE] %*% coefs[!beta_na, ]
if (object[["fes"]]) {
preds <- add_fes(preds, object, newdata)
}
return(drop(preds))
}
get_X <- function(object, newdata, na.action) {
# Get model matrix
if (is.null(object[["terms_regressors"]])) {
rhs_terms <- delete.response(object[["terms"]])
} else {
rhs_terms <- delete.response(object[["terms_regressors"]])
}
mf <- model.frame(rhs_terms, newdata, na.action = na.action, xlev = object[["xlevels"]])
# Check class of columns in newdata match those in model fit
if (!is.null(cl <- attr(rhs_terms, "dataClasses"))) .checkMFClasses(cl, mf)
if (object[["fes"]]) {
attr(rhs_terms, "intercept") <- 0
}
X <- model.matrix(rhs_terms, mf, contrasts.arg = object$contrasts)
return(X)
}
add_fes <- function(preds, object, newdata) {
# Add factors!
args <- as.list(object[["call"]])
if (length(all.vars(rlang::f_rhs(args[["fixed_effects"]]))) > 1) {
stop(
"Can't use `predict.lm_robust` with more than one set of ",
"`fixed_effects`. Can recover fits in `fitted.values` in the model ",
"object."
)
}
# cast as factor
femat <- model.matrix(
~ 0 + .,
data = as.data.frame(
lapply(
stats::model.frame.default(
args[["fixed_effects"]],
data = newdata,
na.action = NULL
),
FUN = as.factor
)
)
)
keep_facs <- intersect(names(object[["fixed_effects"]]), colnames(femat))
extra_facs <- setdiff(colnames(femat), names(object[["fixed_effects"]]))
if (length(extra_facs)) {
stop(
"Can't have new levels in `newdata` `fixed_effects` variable, such as: ",
paste0(extra_facs, collapse = ", ")
)
}
preds <- preds +
femat[, keep_facs] %*%
object[["fixed_effects"]][keep_facs]
return(preds)
}
|
53d74f5b02c85ac6d20af7ecc334242bf71d536c
|
4846b5b3748b6724d7c379dae7572e9fa90a798d
|
/R/processCapRout.R
|
0c1810d69edb77b361ff8fdd95af1ae23f811700
|
[] |
no_license
|
vbusa1/nearBynding
|
d225bcbdb1541b65c3f01604a1affd8ff51b068a
|
9ccf2b0e7fec87c426cf37fe45077d67abef210a
|
refs/heads/master
| 2023-04-07T19:01:47.323219
| 2021-07-30T17:39:58
| 2021-07-30T17:39:58
| 278,680,217
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,970
|
r
|
processCapRout.R
|
#' @title processCapRout
#'
#' @description Creates context-separated bedGraph files of CapR output for
#' genome and transcriptome alignments.
#'
#' @param CapR_outfile Name of CapR output file. Required
#' @param output_prefix Prefix string to be appended to all output files.
#' Required.
#' @param chrom_size Name of chromosome size file. File must be in two-column
#' format without a header where first column is chromosome name and second
#' column is chromosome length, as from getChainChrSize. Required.
#' @param genome_gtf The name of the GTF/GFF file that contains all exome
#' annotations. Required
#' @param RNA_fragment RNA component of interest. Options depend on the gtf
#' file but often include "gene", "transcript", "exon", "CDS", "five_prime_utr",
#' and/or "three_prime_utr". Default "exon" for the whole exome.
#' @param chain The name of the chain file to be used. Format should be like
#' chain files derived from GRangesMappingToChainFile. Required
#'
#' @return writes bedGraph files of structure signal for each of the six
#' CapR contexts 1) mapped to the genome and 2) lifted-over to the transcriptome
#'
#' @examples
#' ## make chain file
#' load(system.file("extdata/transcript_list.Rda", package="nearBynding"))
#' gtf<-system.file("extdata/Homo_sapiens.GRCh38.chr4&5.gtf",
#' package="nearBynding")
#' GenomeMappingToChainFile(genome_gtf = gtf,
#' out_chain_name = "test.chain",
#' RNA_fragment = "three_prime_utr",
#' transcript_list = transcript_list,
#' alignment = "hg38")
#' ## get chromosome size file
#' getChainChrSize(chain = "test.chain",
#' out_chr = "chr4and5_3UTR.size")
#'
#' processCapRout(CapR_outfile = system.file("extdata/chr4and5_3UTR.out",
#' package="nearBynding"),
#' chain = "test.chain",
#' output_prefix = "chr4and5_3UTR",
#' chrom_size = "chr4and5_3UTR.size",
#' genome_gtf = gtf,
#' RNA_fragment = "three_prime_utr")
#'
#' @importFrom magrittr '%>%'
#' @importFrom utils read.delim
#' @importFrom S4Vectors elementMetadata
#' @importFrom GenomicRanges makeGRangesFromDataFrame
#' @importFrom BiocGenerics strand type
#'
#' @export
processCapRout <- function(CapR_outfile,
output_prefix,
chrom_size,
genome_gtf,
RNA_fragment,
chain) {
folded_transcripts_appended <- readLines(CapR_outfile)
transcript_names <- folded_transcripts_appended[which(
substr(folded_transcripts_appended, 1, 1) == ">"
)]
transcript_names <- lapply(transcript_names, function(x) {
substr(x, 2, nchar(x))
}) %>% unlist()
bulge <- folded_transcripts_appended[which(
substr(folded_transcripts_appended, 1, 5) == "Bulge"
)]
exterior <- folded_transcripts_appended[which(
substr(folded_transcripts_appended, 1, 8) == "Exterior"
)]
hairpin <- folded_transcripts_appended[which(
substr(folded_transcripts_appended, 1, 7) == "Hairpin"
)]
internal <- folded_transcripts_appended[which(
substr(folded_transcripts_appended, 1, 8) == "Internal"
)]
multibranch <- folded_transcripts_appended[which(
substr(folded_transcripts_appended, 1, 11) == "Multibranch"
)]
stem <- folded_transcripts_appended[which(
substr(folded_transcripts_appended, 1, 4) == "Stem"
)]
cut_transcripts <- function(x) {
transcript <- strsplit(x, " ") %>% unlist()
transcript <- transcript[2:length(transcript)]
return(transcript)
}
bulge_cut <- lapply(bulge, cut_transcripts) %>% unlist()
exterior_cut <- lapply(exterior, cut_transcripts) %>% unlist()
hairpin_cut <- lapply(hairpin, cut_transcripts) %>% unlist()
internal_cut <- lapply(internal, cut_transcripts) %>% unlist()
multibranch_cut <- lapply(multibranch, cut_transcripts) %>% unlist()
stem_cut <- lapply(stem, cut_transcripts) %>% unlist()
# double check that transcriptome length total (chr size file)
# equals number of folded transcript positions
chr_size <- read.delim(chrom_size, header = FALSE)
if (sum(chr_size$V2) - length(bulge_cut) != 0) {
stop("The length of the CapR output does not equal the length of
chromosome size. Make sure that the input chromosome size
corresponds to the folded CapR transcripts.")
}
gtf <- import(genome_gtf)
gtf <- with(gtf, plyranges::filter(gtf, type == RNA_fragment))
# write bedgraph file for each transcript
# create bedGraph for each structure separately
bg_shell <- as.data.frame(matrix(NA, ncol = 4, nrow = sum(chr_size$V2)))
colnames(bg_shell) <- c("chr", "start", "end", "score")
n <- 1
for (t in transcript_names) {
gtf_t <- gtf[(elementMetadata(gtf)[, "transcript_id"] == t)]
bg_shell[n:(n + sum(gtf_t@ranges@width) - 1), "chr"] <-
gtf_t@seqnames@values %>% as.character()
if (gtf_t@strand@values[1] == "+") {
for (unit in seq_len(length(gtf_t))) {
gtf_t_unit <- gtf_t[unit]
bg_shell[n:(n + gtf_t_unit@ranges@width - 1), "start"] <-
gtf_t_unit@ranges@start:(gtf_t_unit@ranges@start +
gtf_t_unit@ranges@width - 1)
n <- n + gtf_t_unit@ranges@width
}
} else {
for (unit in length(gtf_t):1) {
gtf_t_unit <- gtf_t[unit]
bg_shell[n:(n + gtf_t_unit@ranges@width - 1), "start"] <-
rev(gtf_t_unit@ranges@start:(gtf_t_unit@ranges@start +
gtf_t_unit@ranges@width - 1))
n <- n + gtf_t_unit@ranges@width
}
}
}
bg_shell$end <- bg_shell$start
if (nchar(bg_shell[1, "chr"]) < 3) {
bg_shell$chr <- paste0("chr", bg_shell$chr)
}
# bulge
df_bulge <- bg_shell
df_bulge$score <- bulge_cut %>% as.numeric()
GRanges_bulge <- makeGRangesFromDataFrame(df_bulge,
keep.extra.columns = TRUE)
export.bedGraph(GRanges_bulge, paste0(output_prefix,"_bulge.bedGraph"))
liftOverToExomicBG(input = paste0(output_prefix,"_bulge.bedGraph"),
chrom_size = chrom_size,
chain = chain,
output_bg = paste0(output_prefix, "_bulge_liftOver.bedGraph"))
# exterior
df_exterior <- bg_shell
df_exterior$score <- exterior_cut %>% as.numeric()
GRanges_exterior <- makeGRangesFromDataFrame(df_exterior,
keep.extra.columns = TRUE)
export.bedGraph(GRanges_exterior,paste0(output_prefix,"_exterior.bedGraph"))
liftOverToExomicBG(input = paste0(output_prefix,"_exterior.bedGraph"),
chrom_size = chrom_size,
chain = chain,
output_bg = paste0( output_prefix,"_exterior_liftOver.bedGraph"))
# hairpin
df_hairpin <- bg_shell
df_hairpin$score <- hairpin_cut %>% as.numeric()
GRanges_hairpin <- makeGRangesFromDataFrame(df_hairpin,
keep.extra.columns = TRUE)
export.bedGraph(GRanges_hairpin, paste0(output_prefix,"_hairpin.bedGraph"))
liftOverToExomicBG(input = paste0( output_prefix,"_hairpin.bedGraph"),
chrom_size = chrom_size,
chain = chain,
output_bg = paste0(output_prefix,"_hairpin_liftOver.bedGraph"))
# internal
df_internal <- bg_shell
df_internal$score <- internal_cut %>% as.numeric()
GRanges_internal <- makeGRangesFromDataFrame(df_internal,
keep.extra.columns = TRUE)
export.bedGraph(GRanges_internal,paste0(output_prefix,"_internal.bedGraph"))
liftOverToExomicBG(input = paste0(output_prefix,"_internal.bedGraph"),
chrom_size = chrom_size,
chain = chain,
output_bg = paste0(output_prefix,"_internal_liftOver.bedGraph"))
# multibranch
df_multibranch <- bg_shell
df_multibranch$score <- multibranch_cut %>% as.numeric()
GRanges_multibranch <- makeGRangesFromDataFrame(df_multibranch,
keep.extra.columns = TRUE)
export.bedGraph(GRanges_multibranch, paste0(output_prefix,
"_multibranch.bedGraph"))
liftOverToExomicBG(input = paste0(output_prefix, "_multibranch.bedGraph"),
chrom_size = chrom_size,
chain = chain,
output_bg = paste0(output_prefix,"_multibranch_liftOver.bedGraph"))
# stem
df_stem <- bg_shell
df_stem$score <- stem_cut %>% as.numeric()
GRanges_stem <- makeGRangesFromDataFrame(df_stem,
keep.extra.columns = TRUE)
export.bedGraph(GRanges_stem, paste0(output_prefix,"_stem.bedGraph"))
liftOverToExomicBG(input = paste0(output_prefix,"_stem.bedGraph"),
chrom_size = chrom_size,
chain = chain,
output_bg = paste0(output_prefix,"_stem_liftOver.bedGraph"))
}
|
9a0d9fbd46d60f2b68bb37abf30896bf9ca8a8c1
|
99b5eff4ec20e62f531f7f1aa9429ce311fbac6b
|
/R/mod_optimization.R
|
db384edb7c90c6e2035ca7c3fe0aeb6abadc6ad3
|
[] |
no_license
|
Binary117736/RNAdecay
|
1e84079da8b54be04224b0ded528a7bb6edfbf72
|
5eef364514cb76f59ac8af1718b3bd0fd9b125ec
|
refs/heads/master
| 2023-07-10T22:43:08.887802
| 2020-04-20T15:54:52
| 2020-04-20T15:54:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,552
|
r
|
mod_optimization.R
|
################################################################################
#' model optimization for fitting exponential decay models to normalized data
#'
#' The mod_optimization function finds the estimates of model parameters by
#' maximum likelihood, for a single gene on a specified list of models, and
#' saves a tab delimited text file of the results named,'
#' [geneID]_results.txt'.
#' The function does the following for each gene:
#' (1) it calculates log likelihood for each point in a 2 dimensional grid of
#' evenly spaced alpha and beta values within the alpha and beta bounds
#' specified using the null model (in which all treatment alphas are
#' equivalent and all betas are equivalent).
#' (2) it calculates log likelihood for each point in a 1 dimensional range of
#' evenly spaced alpha values within the alpha bounds using the single
#' exponential null model (in which all treatment alphas are equivalent).
#' (3) For each of the grid points with the highest log likelihood from steps
#' (1) and (2) 25 starting parameter value sets that are normally
#' distributed around these points are generated.
#' (4) Parameter values are optimized for maximum likelihood using each of
#' these 50 starting parameter sets using pre-compiled C++ functions loaded
#' from dynamically linked libraries stored in the package on all models
#' specified in the models argument.
#' (5) evaluates parameter estimates of all 50 optimizations based on the
#' reported maximum liklihood upon convergence. Only parameter estimates
#' that converged on the same and highest maximum likelihood are returned.
#' (6) returns the optimized parameter estimates, with model selection
#' statistics.
#'
#' @param gene geneID from \code{data} to be modeled
#' @param data decay data data.frame with columns named 'geneID', 'treatment',
#' 't.decay', 'rep', 'value.'
#' @param models vector spceifying which models to run optimization on (e.g.,
#' c('mod1', 'mod239'))
#' @param alpha_bounds vector of length 2 with lower and upper bounds for
#' alpha
#' @param beta_bounds vector of length 2 with lower and upper bounds for beta
#' @param group grouping matrix for alphas or betas
#' @param mod data.frame specifying alpha and beta group pairs for each model
#' @param file_only logical; should output only be written to file (TRUE) or
#' also return a data.frame of the results (FALSE)
#' @param path specify folder for output to be written
#'
#' @useDynLib general_dExp_4sse
#' @useDynLib general_Exp_4sse
#' @useDynLib general_dExp_3sse
#' @useDynLib general_Exp_3sse
#' @useDynLib general_dExp_2sse
#' @useDynLib general_Exp_2sse
#' @useDynLib general_dExp_1sse
#' @useDynLib general_Exp_1sse
#'
#' @export
#'
#' @return returns (if \code{file_only = FALSE}) and writes to \code{path} a
#' data frame of model optimization results for \code{models} one row for
#' each for \code{gene} using values for it found in \code{data}, the columns
#' of the data frame are:
#' geneID, mod (model), model estimates [alpha_treatment1, ...,
#' alpha_treatmentn, beta_treatment1, ..., beta_treatmentn, sigma2],
#' logLik (maximum log likelihood), nPar (number of parameters in the model),
#' nStarts (number of parameter starting value sets (of 50) that converged on
#' a maximum likelihood peak), J (number of parameter starting value sets that
#' converged on the highest - within 1e-4 - maximum likelihood of all
#' parameter starting value sets), range.LL (range of maximum likelihoods
#' values reached by algorithm convergence from all parameter starting value
#' sets), nUnique.LL (number of unique maximum likelihoods values reached by
#' algorithm convergence from all parameter starting value sets), C.alpha (sum
#' of all coefficients of variation for each column of alpha estimates),
#' C.beta (sum of all coefficients of variation for each column of beta
#' estimates), C.tot (C.alpha+C.beta), AICc (calculated from the single
#' highest maximum likelihood of all parameter starting value sets), AICc_est
#' (calculated from the log likelihood value computed by using the mean of
#' each parameter from all optimizations that converged on the highest
#' maximum likelihood of all starting parameter value sets.)
#'
#' @examples
#'
#' mod_optimization(gene = 'Gene_BooFu',
#' data = data.frame(geneID=rep('Gene_BooFu',30),
#' treatment=c(rep('WT',15),rep('mut',15)),
#' t.decay=rep(c(0,7.5,15,30,60),6),
#' rep=rep(paste0('rep',c(rep(1,5),rep(2,5),rep(3,5))),2),
#' value= c(0.9173587, 0.4798672, 0.3327807, 0.1990708, 0.1656554,
#' 0.9407511, 0.7062988, 0.3450886, 0.3176824, 0.2749946,
#' 1.1026497, 0.6156978, 0.4563346, 0.2865779, 0.1680075,
#' 0.8679866, 0.6798788, 0.2683555, 0.5120951, 0.2593122,
#' 1.1348219, 0.8535835, 0.6423996, 0.5308946, 0.4592902,
#' 1.1104068, 0.5966838, 0.3949790, 0.3742632, 0.2613560)),
#' alpha_bounds = c(1e-4,0.75),
#' beta_bounds = c(1e-3,0.075),
#' models = 'mod1',
#' group = t(matrix(c(1,2,1,1,NA,NA),nrow=2,
#' dimnames=list(c('treat1','treat2'),c('mod1','mod2','mod3')))),
#' mod = as.data.frame(t(matrix(c(1,1,1,2,1,3,2,1,2,2,2,3),nrow=2,
#' dimnames=list(c('a','b'),paste0('mod',1:6))))),
#' file_only = FALSE,
#' path = paste0(tempdir(),"/modeling results"))
#'
mod_optimization <- function(gene,
data,
alpha_bounds,
beta_bounds,
models,
group,
mod,
file_only = TRUE,
path = "modeling_results") {
if (!file.exists(path)) {
dir.create(path)
}
genoSet <- 1:(length(unique(data$rep)) * length(unique(data$t.decay)))
nTreat <- length(unique(data$treatment))
nSet <- length(genoSet) * nTreat
if (nTreat > 4) stop("mod_optimization can only handle up to 4 treatments.")
gene <- as.character(gene)
# pull out the data the specific gene data
gdata <- data[data$geneID == as.character(gene),]
t <- gdata$t.decay
m <- gdata$value
const <- constraint_fun_list_maker(mods = mod, groups = group)
eps <- 1e-04
#### Determine starting points. ####
### Define grid for evaluating model 240 while determining starting points.
A <- seq(0, alpha_bounds[2], by = 0.001)
### Define grid for evaluating model 239 while determining starting points.
a <- seq(alpha_bounds[1], alpha_bounds[2], by = 0.001)
b <- seq(beta_bounds[1], beta_bounds[2], by = 0.001)
### Pick 25 points near the 'peak' of Model null double exponential model. Find the
### SSE surface for the double exponential model...
sse.nulldExp <- sapply(b, function(x) {
sapply(
a,
FUN = sse_null_decaying_decay,
b = x,
m = m,
t = t
)
})
### Find the location of the minimum.
loc239 <-
which(sse.nulldExp == min(sse.nulldExp), arr.ind = TRUE)
### Pick 25 points near the 'peak' of Model 240. Find the SSE surface for model
### 240...
sse.nullExp <- sapply(A, FUN = sse_null_const_decay, m = m, t = t)
### Find the location of the minimum.
loc240 <- which(sse.nullExp == min(sse.nullExp))
### Create the matrix of starting points. Pick the alpha values.
aX0 <-
matrix(c(
stats::rnorm(25 * nTreat, mean = a[loc239[1, "row"]], sd = 0.01),
stats::rnorm(25 * nTreat, mean = A[loc240], sd = 0.01)
), ncol = nTreat)
### Fix the alpha values outside of [1e-4,0.75]
aX0 <- ifelse(aX0 < alpha_bounds[1], alpha_bounds[1], aX0)
aX0 <- ifelse(aX0 > alpha_bounds[2], alpha_bounds[2], aX0)
### Pick the beta values.
bX0 <-
matrix(c(
stats::rnorm(25 * nTreat, mean = b[loc239[1, "col"]], sd = 0.01),
stats::runif(25 * nTreat, beta_bounds[1], beta_bounds[2])
), ncol = nTreat)
### Fix the beta values outside of [1e-3,0.075]
bX0 <- ifelse(bX0 < beta_bounds[1], beta_bounds[1], bX0)
bX0 <- ifelse(bX0 > beta_bounds[2], beta_bounds[2], bX0)
### Combine alpha and beta values into one matrix.
X0 <- cbind(aX0, bX0)
colnames(X0) <-
c(paste0("alpha.int", 1:nTreat), paste0("beta.int", 1:nTreat))
# set default parameters
par.default <- c(rep(0, nTreat), rep(1, nTreat))
names(par.default) <-
c(paste0("a", 1:nTreat), paste0("b", 1:nTreat))
# OPTIMIZATION OF THE DOUBLE EXPONENTIAL MODELS
#### Create the objective function in R. ### This has to be done for each gene! ####
obj.dExp <- TMB::MakeADFun(
data = list(t = t, m = m),
parameters = par.default,
silent = TRUE,
DLL = paste0("general_dExp_", nTreat, "sse")
)
results4ab <-
sapply(models[models %in% rownames(mod)[mod$b != max(mod$b)]], function(x,
eps,
nSet,
md,
grp,
alpha_bounds,
beta_bounds) {
fits <- apply(X0, 1, function(starts, y) {
fit <-
nloptr::slsqp(
x0 = as.numeric(starts[1:ncol(X0)]),
fn = obj.dExp$fn,
gr = obj.dExp$gr,
heq = const[[x]],
lower = c(rep(alpha_bounds[1],
nTreat), rep(beta_bounds[1], nTreat)),
upper = c(rep(alpha_bounds[2],
nTreat), rep(beta_bounds[2], nTreat))
)
unlist(c(
geneID = gene,
mod = x,
as.numeric(fit$par),
fit$value,
fit$convergence
))
}, y = x)
fits <- data.frame(t(fits))
fits[,-c(1:2)] <- sapply(fits[,-c(1, 2)], function(x)
as.numeric(as.character(x)))
fits <- as.data.frame(fits)
colnames(fits) <- c("geneID", "mod", c(
paste0("alpha_", as.character(unique(
gdata$treatment
))),
paste0("beta_", as.character(unique(
gdata$treatment
)))
), "SSE", "conv.code")
fits <- fits[fits$conv.code == 4,]
fits$sigma2 <- fit_var(sse = fits$SSE, n = nSet)
fits$logLik <- log_lik(x = fits$SSE,
y = fits$sigma2,
n = nSet)
max.LL <- max(fits$logLik)
range.LL <- max.LL - min(fits$logLik)
n.LL <- length(unique(round(fits$logLik, 4)))
tmp <- fits[fits$logLik > (max.LL - eps),]
C.alpha <- comb_cv(tmp[, grep("alpha", colnames(fits))])
C.beta <- comb_cv(tmp[, grep("beta", colnames(fits))])
C.tot <- C.alpha + C.beta
par.est <- colMeans(tmp[, c(grep("alpha", colnames(fits)), grep("beta", colnames(fits)))])
sigma2 <- mean(tmp$sigma2)
nPar <- n_par(x, mod = md, group = grp)
AICc <- aicc(max.LL, nPar, nSet)
AICc_est <- aicc(log_lik(
x = obj.dExp$fn(par.est),
y = fit_var(sse = obj.dExp$fn(par.est),
n = nSet),
n = nSet
),
p = nPar,
n = nSet)
fit <- c(
as.character(fits$geneID[1]),
as.character(fits$mod[1]),
par.est,
sigma2,
max.LL,
nPar,
nrow(fits),
nrow(tmp),
range.LL,
n.LL,
C.alpha,
C.beta,
C.tot,
AICc,
AICc_est
)
names(fit) <- c(
"geneID",
"mod",
c(
paste0("alpha_", as.character(unique(
gdata$treatment
))),
paste0("beta_", as.character(unique(
gdata$treatment
)))
),
"sigma2",
"logLik",
"nPar",
"nStarts",
"J",
"range.LL",
"nUnique.LL",
"C.alpha",
"C.beta",
"C.tot",
"AICc",
"AICc_est"
)
return(fit)
}, eps = eps, nSet = nSet, grp = group, md = mod, alpha_bounds = alpha_bounds,
beta_bounds = beta_bounds)
results4ab <- as.data.frame(t(results4ab))
results4ab[,-c(1, 2)] <- sapply(results4ab[,-c(1, 2)], function(x)
as.numeric(as.character(x)))
# OPTIMIZATION OF THE SINGLE EXPONENTIAL MODELS
X0 <- aX0
colnames(X0) <- c(paste0("alpha.int", 1:nTreat))
obj.Exp <- TMB::MakeADFun(
data = list(t = t, m = m),
parameters = par.default[1:nTreat],
silent = TRUE,
DLL = paste0("general_Exp_", nTreat, "sse")
)
results4a <-
sapply(models[models %in% rownames(mod)[mod$b == max(mod$b)]], function(x,
eps, nSet, md, grp, alpha_bounds) {
fits <- apply(X0, 1, function(starts, y) {
fit <-
nloptr::slsqp(
x0 = as.numeric(starts[1:ncol(X0)]),
fn = obj.Exp$fn,
gr = obj.Exp$gr,
heq = const[[x]],
lower = rep(alpha_bounds[1], nTreat),
upper = rep(alpha_bounds[2], nTreat)
)
unlist(c(
geneID = gene,
mod = x,
as.numeric(fit$par),
rep(0, nTreat),
fit$value,
fit$convergence
))
}, y = x)
fits <- data.frame(t(fits))
fits[,-c(1:2)] <- sapply(fits[,-c(1, 2)], function(x)
as.numeric(as.character(x)))
fits <- as.data.frame(fits)
colnames(fits) <- c("geneID", "mod", c(
paste0("alpha_", as.character(unique(
gdata$treatment
))),
paste0("beta_", as.character(unique(
gdata$treatment
)))
), "SSE", "conv.code")
fits <- fits[fits$conv.code == 4,]
fits$sigma2 <- fit_var(sse = fits$SSE, n = nSet)
fits$logLik <- log_lik(x = fits$SSE,
y = fits$sigma2,
n = nSet)
max.LL <- max(fits$logLik)
range.LL <- max.LL - min(fits$logLik)
n.LL <- length(unique(round(fits$logLik, 4)))
tmp <- fits[fits$logLik > (max.LL - eps),]
C.alpha <- comb_cv(tmp[, grep("alpha", colnames(fits))])
C.beta <- comb_cv(tmp[, grep("beta", colnames(fits))])
C.tot <- C.alpha + C.beta
par.est <- colMeans(tmp[, c(grep("alpha", colnames(fits)),
grep("beta", colnames(fits)))])
sigma2 <- mean(tmp$sigma2)
nPar <- n_par(x, mod = md, group = grp)
AICc <- aicc(max.LL, nPar, nSet)
AICc_est <- aicc(log_lik(
x = obj.Exp$fn(par.est[1:nTreat]),
y = fit_var(sse = obj.Exp$fn(par.est[1:nTreat]),
n = nSet),
n = nSet
),
p = nPar,
n = nSet)
fit <- c(
as.character(fits$geneID[1]),
as.character(fits$mod[1]),
par.est,
sigma2,
max.LL,
nPar,
nrow(fits),
nrow(tmp),
range.LL,
n.LL,
C.alpha,
C.beta,
C.tot,
AICc,
AICc_est
)
names(fit) <- c(
"geneID",
"mod",
c(
paste0("alpha_", as.character(unique(
gdata$treatment
))),
paste0("beta_", as.character(unique(
gdata$treatment
)))
),
"sigma2",
"logLik",
"nPar",
"nStarts",
"J",
"range.LL",
"nUnique.LL",
"C.alpha",
"C.beta",
"C.tot",
"AICc",
"AICc_est"
)
return(fit)
}, eps = eps, nSet = nSet, grp = group, md = mod, alpha_bounds = alpha_bounds)
results4a <- as.data.frame(t(results4a))
results4a[,-c(1, 2)] <- sapply(results4a[,-c(1, 2)], function(x)
as.numeric(as.character(x)))
results <- rbind(results4a, results4ab)
results <- results[order(as.numeric(gsub("mod", "", results$mod))),]
utils::write.table(results, paste0(path, "/", gene, "_results.txt"), sep = "\t")
cat(gene, "done \n") #; utils::timestamp()
return(if (file_only)
invisible(NULL)
else
results)
}
|
60222a309319540ad799347aa6f1527465f213c2
|
febb10d22db400a4a9e48d17933759623f4fd7c1
|
/DM/lengthen.R
|
c978fdf3537369de61bc6135029a8db4abe98f90
|
[] |
no_license
|
RCanDo/RPacks
|
8f2692c1622afdc64e5e70297efc8ce726fb89ca
|
05643971c3cea56f9eb076bbfac442f1f2480a92
|
refs/heads/master
| 2021-05-16T16:18:23.881625
| 2021-02-06T07:49:15
| 2021-02-06T07:49:15
| 119,801,561
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 3,592
|
r
|
lengthen.R
|
## ---------------------------------------------------------------------------------------------------------------------•°
## FUNCTIONS HERE
## lengthen()
## trim()
## adjust()
##
## DEPENDENCIES
## none
## ---------------------------------------------------------------------------------------------------------------------•°
## ---------------------------------------------------------------------------------------------------------------------•°
lengthen = function(x,n,fill=rev(x)[1],adj=FALSE){ ## symmetric to trim()
if(length(x)<n){
m = length(x)
# if(!is.null(fill)){
# fill = fill[1]
# }else{
# fill = x[m]
# }
x = c(x,rep(fill,length=n-m))
}else if(adj){
x=x[1:n]
}
x
} ##----END----##
## ---------------------------------------------------------------------------------------------------------------------•°
## ---------------------------------------------------------------------------------------------------------------------•°
trim = function(x,n,fill=rev(x)[1],adj=FALSE){ ## symmetric to lengthen()
if(length(x)>=n){
x = x[1:n]
}else if(adj){
m = length(x)
# if(!is.null(fill)){
# fill = fill[1]
# }else{
# fill = x[m]
# }
x = c(x,rep(fill,length=n-m))
}
x
} ##----END----##
## ---------------------------------------------------------------------------------------------------------------------•°
## ---------------------------------------------------------------------------------------------------------------------•°
adjust = function(x,n,fill=rev(x)[1]){
lengthen(x,n,fill,adj=TRUE)
} ##----END----##
## ---------------------------------------------------------------------------------------------------------------------•°
## ---------------------------------------------------------------------------------------------------------------------•°
## EXAMPLES ############################################################################################################•°
## ---------------------------------------------------------------------------------------------------------------------•°
## ---------------------------------------------------------------------------------------------------------------------•°
dummy = function(){
## This is dummy function - it is not considered to be run.
## It contains a series of commands to test functions defined above in this file
## - in such a form you do not need to (un)comment it every session.
## They should be run line by line directly by the user.
## ------------------------------------------------------------------------------------------------•°
( x = sample(4) )
lengthen(x,10)
lengthen(x,10,fill=rev(x)[1]) ## the same; this is default for 'fill'
lengthen(x,10,fill=x) ## as
rep(x,length=10)
lengthen(x,10,x) ## 'fill' is the third argument so you may write it v.short !
#
lengthen(x,10,NA)
lengthen(x,10,0)
##
adjust(x,10)
adjust(x,10,x)
## ...
trim(x,10) ## default for 'adj' is FALSE -- then x is not lengthened when n > length(x)
trim(x,10,adj=TRUE)
trim(x,10,NA,adj=TRUE)
trim(x,10,0,adj=TRUE)
trim(x,10,0)
(x = sample(10))
lengthen(x,4) ## default for 'adj' is FALSE -- then x is not trimmed when n < length(x)
lengthen(x,4,adj=TRUE)
adjust(x,4)
trim(x,4)
rep(x,length=4) ##
## --------------------------------------------------------•°
## standard functions
}
## ---------------------------------------------------------------------------------------------------------------------•°
rm(dummy)
|
5caac502f83cb8cdd73776c9a19da440effdca20
|
72d7aa9ad93c5a8400a2ed6ab88afda72acdefd3
|
/Chip_diff/step9_DEG_chip_gene.R
|
12a69192a0365bd719dbecaa54eb5261d6bf8ef3
|
[] |
no_license
|
ZhaoChen96/colorectal-cancer
|
256f2a1c918c038aa68abf3602641632b2386a50
|
9eed8c37298afa4b4b5c124253158950b525afaf
|
refs/heads/master
| 2022-11-21T20:08:15.752283
| 2020-07-21T01:50:09
| 2020-07-21T01:50:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,005
|
r
|
step9_DEG_chip_gene.R
|
#--------------install package-----------------------------------
install_biomaRt = function (){
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("biomaRt", version = "3.8")
}
#--------------analysis-------------------------------------------
library("biomaRt")
library("org.Mm.eg.db")
library("org.Hs.eg.db")
library("DT")
mouse_gene_transcript = read.delim("/home/zhihl/Project/CRC/RNA_analysis/mart_export_mouse_gene_transcript.txt", sep="\t", header=T)
orthologs_table = read.delim("/home/zhihl/Project/CRC/RNA_analysis/mart_export_humanGene_mouseGene.txt", sep="\t", header=T)
gene_detail = read.delim("/home/zhihl/Project/CRC/RNA_analysis/mart_export_mouse_geneid_symbol.txt", sep="\t", header=T)
human_gene_detail = read.delim("/home/zhihl/Project/CRC/RNA_analysis/mart_export_human_geneid_symbol.txt", sep="\t", header=T)
#1. export biomart data
export_biomart_geneid_symbol = function(species, name){
#species: "Mus musculus", "Homo sapiens"
#name: mouse, human
select_dataset=paste(tolower(substring(strsplit(species," ")[[1]][1],1,1)),
strsplit(species," ")[[1]][2],"_gene_ensembl",sep = "")
ensembl=useMart("ensembl", dataset=select_dataset)
gene_detail=getBM(attributes=c("ensembl_gene_id", "external_gene_name"),mart = ensembl)
write.table(gene_detail, file = paste("/home/zhihl/Project/CRC/RNA_analysis/mart_export_", name, "_geneid_symbol.txt", sep=""), row.names = T, sep="\t", col.names=T)
}
#export_biomart_geneid_symbol("Homo sapiens", "human")
#2. convert essemble to entrez
essemble_to_entrez = function (deg_list){
entrez_map = as.data.frame(unlist(as.list(org.Mm.egENSEMBL2EG)))
ind<-match(deg_list, rownames(entrez_map))
deg_eg = entrez_map[ind,]
return(deg_eg)
}
#3. convert ensembl to symbol
ensembl_to_symbol = function(ensemble_list, name){
if (name == "mouse")
{symbol = gene_detail[match(ensemble_list, gene_detail$ensembl_gene_id),"external_gene_name"]}
if (name == "human")
{symbol = human_gene_detail[match(ensemble_list, human_gene_detail$ensembl_gene_id),"external_gene_name"]}
return(as.vector(symbol))
}
#4. convert essemble transcript to gene
trans2gene = function(deg_list){
mouse_table = mouse_gene_transcript
rownames(mouse_table) = mouse_table$Transcript.stable.ID
select_table = mouse_table[deg_list,]
deg_gene = select_table[,1]
return(deg_gene)
}
#5. orthologs of mouse and human
mouse2human = function(deg_list){
orthologs = orthologs_table
ind = match(deg_list, orthologs$Mouse.gene.stable.ID)
human_genes = orthologs[ind,]
gene_list = unique(na.omit(human_genes[,1]))
return(gene_list)
}
human2mouse = function(deg_list){
orthologs = orthologs_table
ind = match(deg_list, orthologs$Gene.stable.ID)
human_genes = orthologs[ind,]
gene_list = unique(na.omit(human_genes[,3]))
return(gene_list)
}
#------------------------------------------------------------
peak2gene = function(peak_list, peak_gene_path){
peak_gene = read.delim(peak_gene_path, sep="\t", header=T)
peak_gene_table = peak_gene
ind = match(peak_list, peak_gene_table$ID)
human_genes = peak_gene_table[ind,]
gene_list = unique(na.omit(human_genes$gene_id))
return(gene_list)
}
#6. for Chip
diff_gene_for_chip = function(df_all, col_1, col_2, fold_change=1 ,id_type="ENSEMBL", peak_gene){
#col_1: col name of log2 fold change
#col_2: col name of padj
#fold_change: log2 fold change
#id_type: "SYMBOL", "ENTRIZEID" ....
#return list of up and down genes
week_df = df_all[,c(col_1, col_2)]
diff_week_up = rownames(na.omit(week_df[week_df[,1] > fold_change & week_df[,2] < 0.01, ]))
diff_week_down = rownames(na.omit(week_df[week_df[,1] < -(fold_change) & week_df[,2] < 0.01, ]))
down_gene = peak2gene(diff_week_down, peak_gene)
up_gene = peak2gene(diff_week_up, peak_gene)
if (id_type == "ENSEMBL"){
return(list(unique(up_gene), unique(down_gene)))
}
cols <- c("SYMBOL", "ENTREZID", "ENSEMBL")
up_symbol = AnnotationDbi::select(org.Mm.eg.db, keys=as.vector(up_gene), columns=cols, keytype="ENSEMBL")
up_list = unique(up_symbol[, id_type])
down_symbol = AnnotationDbi::select(org.Mm.eg.db, keys=as.vector(down_gene), columns=cols, keytype="ENSEMBL")
down_list = unique(down_symbol[, id_type])
return (list(up_list, down_list))
}
#7. for RNA
diff_gene_in_mouse = function(df_all, col_1, col_2, fold_change=1 ,id_type="ENSEMBL"){
#col_1: col name of log2 fold change
#col_2: col name of padj
#fold_change: log2 fold change
#id_type: "SYMBOL", "ENTRIZEID" ....
#return list of up and down genes
week_df = df_all[,c(col_1, col_2)]
diff_week_up = rownames(na.omit(week_df[week_df[,1] > fold_change & week_df[,2] < 0.01, ]))
diff_week_down = rownames(na.omit(week_df[week_df[,1] < -(fold_change) & week_df[,2] < 0.01, ]))
if (id_type == "ENSEMBL"){
return(list(unique(diff_week_up), unique(diff_week_down)))
}
cols <- c("SYMBOL", "ENTREZID", "ENSEMBL")
up_symbol = AnnotationDbi::select(org.Mm.eg.db, keys=as.vector(diff_week_up), columns=cols, keytype="ENSEMBL")
up_list = unique(up_symbol[, id_type])
down_symbol = AnnotationDbi::select(org.Mm.eg.db, keys=as.vector(diff_week_down), columns=cols, keytype="ENSEMBL")
down_list = unique(down_symbol[, id_type])
return (list(up_list, down_list))
}
#8. statistic
Summary_data = function(mark, state, diff_gene_func, data_type){
summary = list()
if (data_type == "RNA")
{
df_all = read.delim("/home/zhihl/Project/CRC/RNA_analysis/all_diff_data.txt",sep = "\t", header=T)
diff_genes_10w = diff_gene_func(df_all, "log2FoldChange.10wkVSctrl", "padj.10wkVSctrl", 1, "ENSEMBL")
diff_genes_7w = diff_gene_func(df_all, "log2FoldChange.7wkVSctrl", "padj.7wkVSctrl", 1, "ENSEMBL")
diff_genes_4w = diff_gene_func(df_all, "log2FoldChange.4wkVSctrl", "padj.4wkVSctrl", 1, "ENSEMBL")
diff_genes_2w = diff_gene_func(df_all, "log2FoldChange.2wkVSctrl", "padj.2wkVSctrl", 1, "ENSEMBL")
}
if (data_type == "Chip")
{
df_all = read.delim(paste("/home/zhihl/Project/CRC/Chip_analysis/dff/version0821/all_diff_data_", mark, ".txt", sep=""),sep = "\t", header=T)
#peak gene mapping
peak_gene = paste("/home/zhihl/Project/CRC/Chip_analysis/peak_dir_0821/", state, "/", state, ".peak.unique.ID.gene_name.bed", sep="")
diff_genes_10w = diff_gene_func(df_all, "log2FoldChange.10wkVSctrl", "padj.10wkVSctrl", 1, "ENSEMBL", peak_gene)
diff_genes_7w = diff_gene_func(df_all, "log2FoldChange.7wkVSctrl", "padj.7wkVSctrl", 1, "ENSEMBL", peak_gene)
diff_genes_4w = diff_gene_func(df_all, "log2FoldChange.4wkVSctrl", "padj.4wkVSctrl", 1, "ENSEMBL", peak_gene)
diff_genes_2w = diff_gene_func(df_all, "log2FoldChange.2wkVSctrl", "padj.2wkVSctrl", 1, "ENSEMBL", peak_gene)
}
#tummor_vs_control = diff_gene_in_mouse(df_rna, "log2FoldChange.tumorVSctrl", "padj.tumorVSctrl", 1, "ENSEMBL")
#7week and 10week
commom_cancer_up = intersect(diff_genes_10w[[1]], diff_genes_7w[[1]])
commom_cancer_down = intersect(diff_genes_10w[[2]], diff_genes_7w[[2]])
#2week and 4week
commom_colits_up = intersect(diff_genes_4w[[1]], diff_genes_2w[[1]])
commom_colits_down = intersect(diff_genes_4w[[2]], diff_genes_2w[[2]])
#2,4,7,10 week
commom_all_up = intersect(commom_cancer_up, commom_colits_up)
commom_all_down = intersect(commom_cancer_down, commom_colits_down)
#only cancer
only_can_up = setdiff(commom_cancer_up,commom_all_up)
only_can_down = setdiff(commom_cancer_down, commom_all_down)
#only colits
only_col_up = setdiff(commom_colits_up,commom_all_up)
only_col_down = setdiff(commom_colits_down, commom_all_down)
#statistic table
diff_genes = data.frame(up=1:9, down=1:9, row.names=c("2week", "4week","7week", "10week","Colits_2and4", "Cancer_7and10", "all_week", "only_cancer", "only_colits"))
diff_genes["2week",] = c(length(diff_genes_2w[[1]]), length(diff_genes_2w[[2]]))
diff_genes["4week",] = c(length(diff_genes_4w[[1]]), length(diff_genes_4w[[2]]))
diff_genes["7week",] = c(length(diff_genes_7w[[1]]), length(diff_genes_7w[[2]]))
diff_genes["10week",] = c(length(diff_genes_10w[[1]]), length(diff_genes_10w[[2]]))
diff_genes["Colits_2and4",] = c(length(commom_colits_up), length(commom_colits_down))
diff_genes["Cancer_7and10",] = c(length(commom_cancer_up), length(commom_cancer_down))
diff_genes["all_week",] = c(length(commom_all_up), length(commom_all_down))
diff_genes["only_cancer",] = c(length(only_can_up), length(only_can_down))
diff_genes["only_colits",] = c(length(only_col_up), length(only_col_down))
summary$total_table = diff_genes
summary$commom_colits_list = list(commom_colits_up, commom_colits_down)
summary$commom_cancer_list = list(commom_cancer_up, commom_cancer_down)
summary$commom_col_list = list(only_col_up, only_col_down)
summary$commom_can_list = list(only_can_up, only_can_down)
summary$commom_all_list = list(commom_all_up, commom_all_down)
return(summary)
}
#9.enrichment RNA and Chip
enrichment = function (list1, list2, mark){
diff_genes = data.frame(matrix(NA,6,6), row.names=c("cancer_up", "cancer_down","colits_up", "colits_down","all_up", "all_down"))
colnames(diff_genes) = c("marker", "shared_number", "RNA_number", "left_gene_number", "chip_gene_number", "P_value")
#cancer state
share_cancer_up = intersect(list1$commom_cancer_list[[1]], list2$commom_cancer_list[[1]])
pValue = phyper(length(share_cancer_up), length(list1$commom_cancer_list[[1]]), 20000 - length(list1$commom_cancer_list[[1]]), length(list2$commom_cancer_list[[1]]), lower.tail = F)
diff_genes["cancer_up",] = c(mark, length(share_cancer_up), length(list1$commom_cancer_list[[1]]), 20000 - length(list1$commom_cancer_list[[1]]), length(list2$commom_cancer_list[[1]]), pValue)
share_cancer_down = intersect(list1$commom_cancer_list[[2]], list2$commom_cancer_list[[2]])
pValue = phyper(length(share_cancer_down), length(list1$commom_cancer_list[[2]]), 20000 - length(list1$commom_cancer_list[[2]]), length(list2$commom_cancer_list[[2]]), lower.tail = F)
diff_genes["cancer_down",] = c(mark, length(share_cancer_down), length(list1$commom_cancer_list[[2]]), 20000 - length(list1$commom_cancer_list[[2]]), length(list2$commom_cancer_list[[2]]), pValue)
#colits state
share_colits_up = intersect(list1$commom_colits_list[[1]], list2$commom_colits_list[[1]])
pValue = phyper(length(share_colits_up), length(list1$commom_colits_list[[1]]), 20000 - length(list1$commom_colits_list[[1]]), length(list2$commom_colits_list[[1]]), lower.tail = F)
diff_genes["colits_up",] = c(mark, length(share_colits_up), length(list1$commom_colits_list[[1]]), 20000 - length(list1$commom_colits_list[[1]]), length(list2$commom_colits_list[[1]]), pValue)
share_colits_down = intersect(list1$commom_colits_list[[2]], list2$commom_colits_list[[2]])
pValue = phyper(length(share_colits_down), length(list1$commom_colits_list[[2]]), 20000 - length(list1$commom_colits_list[[2]]), length(list2$commom_colits_list[[2]]), lower.tail = F)
diff_genes["colits_down",] = c(mark, length(share_colits_down), length(list1$commom_colits_list[[2]]), 20000 - length(list1$commom_colits_list[[2]]), length(list2$commom_colits_list[[2]]), pValue)
#all week
share_all_up = intersect(list1$commom_all_list[[1]], list2$commom_all_list[[1]])
pValue = phyper(length(share_all_up), length(list1$commom_all_list[[1]]), 20000 - length(list1$commom_all_list[[1]]), length(list2$commom_all_list[[1]]), lower.tail = F)
diff_genes["all_up",] = c(mark, length(share_all_up), length(list1$commom_all_list[[1]]), 20000 - length(list1$commom_all_list[[1]]), length(list2$commom_all_list[[1]]), pValue)
share_all_down = intersect(list1$commom_all_list[[2]], list2$commom_all_list[[2]])
pValue = phyper(length(share_all_down), length(list1$commom_all_list[[2]]), 20000 - length(list1$commom_all_list[[2]]), length(list2$commom_all_list[[2]]), lower.tail = F)
diff_genes["all_down",] = c(mark, length(share_all_down), length(list1$commom_all_list[[2]]), 20000 - length(list1$commom_all_list[[2]]), length(list2$commom_all_list[[2]]), pValue)
return(diff_genes)
}
#10. two mark result
two_mark = function(summary_enhancer, summary_promoter, summary_rna, marks){
df = data.frame(matrix(NA,18,6))
#two_mark, state, up_down, geneID, symbol, withRNA
####enhancer, promoter
#cancer
up_enhancer_promoter = intersect(summary_enhancer$commom_cancer_list[[1]], summary_promoter$commom_cancer_list[[1]])
withRNA = intersect(up_enhancer_promoter, summary_rna$commom_cancer_list[[1]])
df_1 = data.frame(two_mark = rep(marks, length(up_enhancer_promoter)), state = rep("cancer", length(up_enhancer_promoter)),
up_down = rep("up", length(up_enhancer_promoter)), geneID = up_enhancer_promoter,
symbol = gene_detail[match(up_enhancer_promoter, gene_detail$ensembl_gene_id),"external_gene_name"], withRNA = ifelse(up_enhancer_promoter%in%withRNA,"true","false"))
down_enhancer_promoter = intersect(summary_enhancer$commom_cancer_list[[2]], summary_promoter$commom_cancer_list[[2]])
withRNA = intersect(down_enhancer_promoter, summary_rna$commom_cancer_list[[2]])
df_2 = data.frame(two_mark = rep(marks, length(down_enhancer_promoter)), state = rep("cancer", length(down_enhancer_promoter)),
up_down = rep("down", length(down_enhancer_promoter)), geneID = down_enhancer_promoter,
symbol = gene_detail[match(down_enhancer_promoter, gene_detail$ensembl_gene_id),"external_gene_name"], withRNA = ifelse(down_enhancer_promoter%in%withRNA,"true","false"))
new_df = rbind(df_1, df_2)
#colits
up_enhancer_promoter = intersect(summary_enhancer$commom_colits_list[[1]], summary_promoter$commom_colits_list[[1]])
withRNA = intersect(up_enhancer_promoter, summary_rna$commom_colits_list[[1]])
df_3 = data.frame(two_mark = rep(marks, length(up_enhancer_promoter)), state = rep("colits", length(up_enhancer_promoter)),
up_down = rep("up", length(up_enhancer_promoter)), geneID = up_enhancer_promoter,
symbol = gene_detail[match(up_enhancer_promoter, gene_detail$ensembl_gene_id),"external_gene_name"], withRNA = ifelse(up_enhancer_promoter%in%withRNA,"true","false"))
new_df = rbind(new_df, df_3)
down_enhancer_promoter = intersect(summary_enhancer$commom_colits_list[[2]], summary_promoter$commom_colits_list[[2]])
withRNA = intersect(down_enhancer_promoter, summary_rna$commom_colits_list[[2]])
df_4 = data.frame(two_mark = rep(marks, length(down_enhancer_promoter)), state = rep("colits", length(down_enhancer_promoter)),
up_down = rep("down", length(down_enhancer_promoter)), geneID = down_enhancer_promoter,
symbol = gene_detail[match(down_enhancer_promoter, gene_detail$ensembl_gene_id),"external_gene_name"], withRNA = ifelse(down_enhancer_promoter%in%withRNA,"true","false"))
new_df = rbind(new_df, df_4)
#all
up_enhancer_promoter = intersect(summary_enhancer$commom_all_list[[1]], summary_promoter$commom_all_list[[1]])
withRNA = intersect(up_enhancer_promoter, summary_rna$commom_all_list[[1]])
df_5 = data.frame(two_mark = rep(marks, length(up_enhancer_promoter)), state = rep("all", length(up_enhancer_promoter)),
up_down = rep("up", length(up_enhancer_promoter)), geneID = up_enhancer_promoter,
symbol = gene_detail[match(up_enhancer_promoter, gene_detail$ensembl_gene_id),"external_gene_name"], withRNA = ifelse(up_enhancer_promoter%in%withRNA,"true","false"))
new_df = rbind(new_df, df_5)
down_enhancer_promoter = intersect(summary_enhancer$commom_all_list[[2]], summary_promoter$commom_all_list[[2]])
withRNA = intersect(down_enhancer_promoter, summary_rna$commom_all_list[[2]])
df_6 = data.frame(two_mark = rep(marks, length(down_enhancer_promoter)), state = rep("all", length(down_enhancer_promoter)),
up_down = rep("down", length(down_enhancer_promoter)), geneID = down_enhancer_promoter,
symbol = gene_detail[match(down_enhancer_promoter, gene_detail$ensembl_gene_id),"external_gene_name"], withRNA = ifelse(down_enhancer_promoter%in%withRNA,"true","false"))
new_df = rbind(new_df, df_6)
return(new_df)
}
#11. reverse list
reverse_list = function(summary_list){
summary_reversed = summary_list
summary_reversed$commom_cancer_list[[2]] = summary_list$commom_cancer_list[[1]]
summary_reversed$commom_cancer_list[[1]] = summary_list$commom_cancer_list[[2]]
summary_reversed$commom_colits_list[[2]] = summary_list$commom_colits_list[[1]]
summary_reversed$commom_colits_list[[1]] = summary_list$commom_cancer_list[[2]]
summary_reversed$commom_all_list[[2]] = summary_list$commom_all_list[[1]]
summary_reversed$commom_all_list[[1]] = summary_list$commom_all_list[[2]]
summary_reversed$commom_can_list[[2]] = summary_list$commom_can_list[[1]]
summary_reversed$commom_can_list[[1]] = summary_list$commom_can_list[[2]]
summary_reversed$commom_col_list[[2]] = summary_list$commom_col_list[[1]]
summary_reversed$commom_col_list[[1]] = summary_list$commom_col_list[[2]]
return(summary_reversed)
}
#12. gwas gene
gwas_gene = function(summary_rna, name){
#summary_rna, summary_enhancer, summary_promoter, summary_repressed, summary_heterochromatin
#name: "rna", "enhancer", "promoter", "repressed", "heterochromatin"
path_gwas = "/home/zhihl/Project/CRC/Chip_analysis/peak_dir_0821/gwas_gene/gene_list.txt"
CRCgwasList = as.vector(read.delim(path_gwas, sep = "\t", header=F)$V1)
#mark, state, up_down, mouseGeneID, mouseSymbol, humanGeneID, humanSymbol
#cancer
up_cancer_gwas = intersect(CRCgwasList, mouse2human(summary_rna$commom_cancer_list[[1]]))
length_num = length(up_cancer_gwas)
df_1 = data.frame(GWAS=rep("GWAS", length_num), mark=rep(name, length_num),
state=rep("cancer", length_num), up_down=rep("up", length_num),
mouseGeneID=human2mouse(up_cancer_gwas),
mouseSymbol=ensembl_to_symbol(human2mouse(up_cancer_gwas), "mouse"),
humanGeneID=up_cancer_gwas, humanSymbol=ensembl_to_symbol(up_cancer_gwas, "human"))
down_cancer_gwas = intersect(CRCgwasList, mouse2human(summary_rna$commom_cancer_list[[2]]))
length_num = length(down_cancer_gwas)
df_2 = data.frame(GWAS=rep("GWAS", length_num), mark=rep(name, length_num),
state=rep("cancer", length_num), up_down=rep("down", length_num),
mouseGeneID=human2mouse(down_cancer_gwas),
mouseSymbol=ensembl_to_symbol(human2mouse(down_cancer_gwas), "mouse"),
humanGeneID=down_cancer_gwas, humanSymbol=ensembl_to_symbol(down_cancer_gwas, "human"))
new_df = rbind(df_1, df_2)
#colits
#up_colits_gwas = intersect(CRCgwasList, mouse2human(summary_rna$commom_colits_list[[1]]))
up_colits_gwas = intersect(CRCgwasList, mouse2human(summary_rna$commom_colits_list[[1]]))
length_num = length(up_colits_gwas)
df_3 = data.frame(GWAS=rep("GWAS", length_num), mark=rep(name, length_num),
state=rep("colits", length_num), up_down=rep("up", length_num),
mouseGeneID=human2mouse(up_colits_gwas),
mouseSymbol=ensembl_to_symbol(human2mouse(up_colits_gwas), "mouse"),
humanGeneID=up_colits_gwas, humanSymbol=ensembl_to_symbol(up_colits_gwas, "human"))
new_df = rbind(new_df, df_3)
down_colits_gwas = intersect(CRCgwasList, mouse2human(summary_rna$commom_colits_list[[2]]))
length_num = length(down_colits_gwas)
df_4 = data.frame(GWAS=rep("GWAS", length_num), mark=rep(name, length_num),
state=rep("colits", length_num), up_down=rep("down", length_num),
mouseGeneID=human2mouse(down_colits_gwas),
mouseSymbol=ensembl_to_symbol(human2mouse(down_colits_gwas), "mouse"),
humanGeneID=down_colits_gwas, humanSymbol=ensembl_to_symbol(down_colits_gwas, "human"))
new_df = rbind(new_df, df_4)
#all
up_all_gwas = intersect(CRCgwasList, mouse2human(summary_rna$commom_all_list[[1]]))
length_num = length(up_all_gwas)
df_5 = data.frame(GWAS=rep("GWAS", length_num), mark=rep(name, length_num),
state=rep("all", length_num), up_down=rep("up", length_num),
mouseGeneID=human2mouse(up_all_gwas),
mouseSymbol=ensembl_to_symbol(human2mouse(up_all_gwas), "mouse"),
humanGeneID=up_all_gwas, humanSymbol=ensembl_to_symbol(up_all_gwas, "human"))
new_df = rbind(new_df, df_5)
down_all_gwas = intersect(CRCgwasList, mouse2human(summary_rna$commom_all_list[[2]]))
length_num = length(down_all_gwas)
df_6 = data.frame(GWAS=rep("GWAS", length_num), mark=rep(name, length_num),
state=rep("all", length_num), up_down=rep("down", length_num),
mouseGeneID=human2mouse(down_all_gwas),
mouseSymbol=ensembl_to_symbol(human2mouse(down_all_gwas), "mouse"),
humanGeneID=down_all_gwas, humanSymbol=ensembl_to_symbol(down_all_gwas, "human"))
new_df = rbind(new_df, df_6)
return(new_df)
}
#13. add gene symbol to source data
add_symbol = function(table_path, type, out_path, peak_gene_path){
df_all = read.delim(table_path,sep = "\t", header=T)
if (type == "RNA"){
ensemblID = rownames(df_all)
symbol = ensembl_to_symbol(ensemblID, "mouse")
new_df = cbind.data.frame(ensemblID, df_all)
new_df = cbind.data.frame(symbol, new_df)
}
if (type == "Chip"){
peak_gene = read.delim(peak_gene_path, sep="\t", header=T)
peak_gene_table = peak_gene
peak_id = rownames(df_all)
ind = match(peak_id, peak_gene_table$ID)
ensemblID = peak_gene_table[ind, "gene_id"]
symbol = ensembl_to_symbol(ensemblID, "mouse")
new_df = cbind.data.frame(peak_id, df_all)
new_df = cbind.data.frame(ensemblID, new_df)
new_df = cbind.data.frame(symbol, new_df)
}
write.table(new_df, out_path, sep="\t", quote=F, row.names=F, col.names = T, na="NA", eol="\n")
}
add_symbol("/home/zhihl/Project/CRC/RNA_analysis/all_diff_data.txt", "RNA", "/home/zhihl/Project/CRC/Chip_analysis/dff/version0821/final_result/all_diff_data_add_symbol.txt", "")
add_symbol("/home/zhihl/Project/CRC/Chip_analysis/dff/version0821/all_diff_data_H3K27ac.txt", "Chip",
"/home/zhihl/Project/CRC/Chip_analysis/dff/version0821/final_result/all_diff_data_H3K27ac_add_symbol.txt",
"/home/zhihl/Project/CRC/Chip_analysis/peak_dir_0821/enhancer/enhancer.peak.unique.ID.gene_name.bed")
add_symbol("/home/zhihl/Project/CRC/Chip_analysis/dff/version0821/all_diff_data_H3K4me3.txt", "Chip",
"/home/zhihl/Project/CRC/Chip_analysis/dff/version0821/final_result/all_diff_data_H3K4me3_add_symbol.txt",
"/home/zhihl/Project/CRC/Chip_analysis/peak_dir_0821/promoter/promoter.peak.unique.ID.gene_name.bed")
add_symbol("/home/zhihl/Project/CRC/Chip_analysis/dff/version0821/all_diff_data_H3K27me3.txt", "Chip",
"/home/zhihl/Project/CRC/Chip_analysis/dff/version0821/final_result/all_diff_data_H3K27me3_add_symbol.txt",
"/home/zhihl/Project/CRC/Chip_analysis/peak_dir_0821/repressed/repressed.peak.unique.ID.gene_name.bed")
add_symbol("/home/zhihl/Project/CRC/Chip_analysis/dff/version0821/all_diff_data_H3K9me3.txt", "Chip",
"/home/zhihl/Project/CRC/Chip_analysis/dff/version0821/final_result/all_diff_data_H3K9me3_add_symbol.txt",
"/home/zhihl/Project/CRC/Chip_analysis/peak_dir_0821/heterochromatin/heterochromatin.peak.unique.ID.gene_name.bed")
#-------------------------other GWAS data-------------------------------------------------------------------------------------
#human_orthologs = mouse2human(commom_ccan_up)
#cols <- c("SYMBOL", "ENTREZID", "ENSEMBL")
#up_symbol = AnnotationDbi::select(org.Hs.eg.db, keys=as.vector(human_orthologs), columns=cols, keytype="ENSEMBL")
#up_list = unique(up_symbol[, "SYMBOL"])
#CRCgwasList = read.delim("/home/zhihl/Project/CRC/Chip_analysis/peak_gene_mapping/CRCgwasGene.txt",sep = "\t", header=F)
#CRCgwasList = read.delim("/home/zhihl/Project/CRC/Chip_analysis/peak_gene_mapping/CRCmappedGenes.txt",sep = "\t", header=F)
#CRCgwasList = AnnotationDbi::select(org.Hs.eg.db, keys=as.vector(CRCgwasList$V1), columns=cols, keytype="ENSEMBL")
#CRCgwasList = unique(CRCgwasList[, "SYMBOL"])
#intersect(CRCgwasList, up_list)
#--------------------------translate-------------------------------------------------------------------------------------------
# translate to symbol
#cols <- c("SYMBOL", "ENTREZID", "ENSEMBL")
#up_symbol = AnnotationDbi::select(org.Mm.eg.db, keys=as.vector(identy5), columns=cols, keytype="ENSEMBL")
#up_symbol = AnnotationDbi::select(org.Mm.eg.db, keys=as.vector(identy6), columns=cols, keytype="ENSEMBL")
|
d48505ed397c04f1344cffcad31fb0c4a30fa86f
|
4f49d7b50d6b3f1e6bb9abc964630bd40641fa82
|
/import_data.R
|
23570d5f1a9b5899520013ed4b8ec117ed8c6cd7
|
[] |
no_license
|
cfia-data-science/OGP-Summit-Hackathon-Sommet-PGO
|
ff15ba84c3ccff6229afbe3cc91dade4c1f5cadf
|
ab4a92f549c6c74e5f982df4614fc228400632ac
|
refs/heads/master
| 2020-05-28T05:10:25.796017
| 2019-05-29T19:39:44
| 2019-05-29T19:39:44
| 188,889,726
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 269
|
r
|
import_data.R
|
dataUN_export <- read.csv("./ogp_data/us_trademerchtotal_export.csv", head = TRUE, check.names = FALSE, sep = ",", quote = "\"")
dataUN_export_share <- read.csv("./ogp_data/us_trademerchtotal_export_share.csv", head = TRUE, check.names = FALSE, sep = ",", quote = "\"")
|
f3860cf6d052bba989dd6fb107b5e0b114375b9d
|
c55294b3f89a774f7600a7c594c796fe059fc99e
|
/Older scripts for models/n_prox_1m.R
|
0a106c594c1d9a1d6454082aa83eac348346627f
|
[] |
no_license
|
ardenice916/Spider_monkey_stats
|
1e571f1bd83b35f24f76c9475740766996afb98a
|
c6674ad820a045c2224c6cead389ba5de1dc7d53
|
refs/heads/master
| 2020-03-21T10:35:11.686763
| 2019-08-10T18:08:19
| 2019-08-10T18:08:19
| 138,459,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,806
|
r
|
n_prox_1m.R
|
#COMPLETE ANALYSIS OF 2016 WILDTRACKS DATA, FEBRUARY 2018
#set working directory
setwd("~/Spider_monkey_stats")
#install packages
install.packages("readxl")
install.packages("readr")
install.packages("dplyr")
install.packages("tidyr")
install.packages("nortest")
install.packages("lattice")
install.packages("nlme")
install.packages("lme4")
install.packages("lmerTest")
install.packages("multcomp")
install.packages("igraph")
install.packages("ggplot2")
install.packages("MASS")
install.packages("pscl")
install.packages("GLMMadaptive")
#load useful packages
library(readxl)
library(readr)
library(dplyr)
library(tidyr)
library(nortest)
library(lattice)
library(nlme)
library(lme4)
library(lmerTest)
library(multcomp)
library(igraph)
library(ggplot2)
library(MASS)
library(pscl)
library(GLMMadaptive)
#import code
scans <- read.table(file="scans_Wildtracks.csv", sep=",", header=T)#alternate import code
scans$n_encl<-factor(scans$n_encl, levels=c("1","2"))
scans$location_focal<-factor(scans$location_focal, levels=c("satellite","center"))
scans$month<-factor(scans$month, levels=c("jun_jul","jul_aug","aug_sep"))
scans$time_meal<-factor(scans$time_meal, levels=c("none","before","after"))
scans$observer_id<-factor(scans$observer_id, levels=c("916","1231"))
scans <- scans %>% filter(n_prox_1m!="NA")
#view and summarize
View(scans)
summary(scans)
scans$nest <- with(scans, factor(paste(focal_group,focal_id)))
#check n_prox_1m variable
str(scans)
str(scans$n_prox_1m)
summary(scans$n_prox_1m)
print(scans$n_prox_1m)
histogram(scans$n_prox_1m)
plot(scans$n_prox_1m)
ggplot(scans, aes(x = n_prox_1m)) + geom_bar() + facet_wrap(location_focal ~ n_encl)
#begin to fit models
M0_n_prox_1m<-glmer(n_prox_1m ~ n_encl + location_focal + time_meal + focal_sex + focal_age + month +
(1|focal_group/focal_id), na.action=na.omit, data=scans, family = poisson)
M1_n_prox_1m<-glmer(n_prox_1m ~ n_encl + location_focal + time_meal + focal_sex + focal_age + month +
(1|focal_group),
na.action=na.omit, data=scans, family = poisson)
M2_n_prox_1m<-zeroinfl(n_prox_1m ~ n_encl + location_focal + time_meal +
focal_sex + focal_age + month + 1|focal_group,
data=scans, na.action=na.omit)
M3_n_prox_1m<-zeroinfl(n_prox_1m ~ n_encl + location_focal + time_meal +
focal_sex + focal_age + month + 1|focal_id,
data=scans, na.action=na.omit)
anova(M0_n_prox_1m,M1_n_prox_1m,M2_n_prox_1m)
#can't compare across packages?
summary(M2_n_prox_1m,M3_n_prox_1m)
#M2_n_prox_1m<-zeroinfl(n_prox_1m ~ n_encl + location_focal +
# time_meal + focal_sex + focal_age +
# month +(1|focal_group/focal_id), data=scans, na.action=na.omit)
#zeroinfl() doesn't accept random factors?
#try GLMMadaptive package:
M2_n_prox_1m<- mixed_model(n_prox_1m ~ n_encl + location_focal + time_meal + focal_sex +
focal_age + month, random = ~ 1 | focal_group/focal_id,
zi_fixed = ~ n_encl, data = scans, family = zi.poisson())
M3_n_prox_1m<- mixed_model(n_prox_1m ~ n_encl + location_focal + time_meal + focal_sex +
focal_age + month, random = ~ 1 | focal_group/focal_id,
zi_fixed = ~ n_encl, data = scans, family = zi.negative.binomial())
#anova
M1_p1<-M1_n_prox_1m
M2_p1<-M2_n_prox_1m
M3_p1<-M3_n_prox_1m
M0_p1<-M0_n_prox_1m
anova(M0_p1,M1_p1)
#M0 better than M1 as expected
anova(M2_p1,M3_p1)
#ZINB M3 looks better than ZIP M2
anova(M0_p1, M3_p1)
#anova function not comparing M0 to M3? glmer and GLMMadaptive not compatible?
#M0 and M3 have lowest AIc
E0<-residuals(M0_p1, type = "pearson")
str(E0)
E0
summary(E0)
plot(scans$n_encl, E0, xlab="# Enclosures", ylab="Residuals")
plot(scans$location_focal, E0, xlab="Location", ylab="Residuals")
plot(scans$month, E0, xlab="Month", ylab="Residuals")
plot(scans$time_meal, E0, xlab="Meal Status", ylab="Residuals")
plot(scans$focal_sex, E0, xlab="Focal Sex", ylab="Residuals")
plot(scans$focal_age, E0, xlab="Focal Age", ylab="Residuals")
qqnorm(E0)
qqline(E0)
ad.test(E0)
plot(M0_p1)
plot(scans$n_prox_1m, E0)
#those residuals look atrocious... that model clearly did not work haha
# ZERO-INFLATED...
E3<-residuals(M3_p1)
str(E3)
E3
summary(E3)
plot(scans$n_encl, E3, xlab="# Enclosures", ylab="Residuals")
plot(scans$location_focal, E3, xlab="Location", ylab="Residuals")
plot(scans$month, E3, xlab="Month", ylab="Residuals")
plot(scans$time_meal, E3, xlab="Meal Status", ylab="Residuals")
plot(scans$focal_sex, E3, xlab="Focal Sex", ylab="Residuals")
plot(scans$focal_age, E3, xlab="Focal Age", ylab="Residuals")
qqnorm(E3)
qqline(E3)
ad.test(E3)
plot(M3_p1)
plot(scans$n_prox_1m, E3)
#what is going on?
#let's continue on with M3 as if everything is normal...
overdisp_fun <- function(M3_p1) {
rdf <- df.residual(M0_n_prox_1m)
rp <- residuals(M0_n_prox_1m,type="pearson")
Pearson.chisq <- sum(rp^2)
prat <- Pearson.chisq/rdf
pval <- pchisq(Pearson.chisq, df=rdf, lower.tail=FALSE)
c(chisq=Pearson.chisq,ratio=prat,rdf=rdf,p=pval)
}
overdisp_fun(M3_p1)
#drop factors
M3_p1<- mixed_model(n_prox_1m ~ n_encl + location_focal + time_meal + focal_sex +
focal_age + month, random = ~ 1 | focal_group/focal_id,
zi_fixed = ~ n_encl, data = scans, family = zi.negative.binomial())
summary(M3_p1)
M3a_p1<-mixed_model(n_prox_1m ~ n_encl + location_focal + focal_sex + focal_age + month,
random = ~ 1 | focal_group/focal_id, zi_fixed = ~ n_encl,
data = scans, family = zi.negative.binomial())
summary(M3a_p1)
anova(M3_p1,M3a_p1)
|
53c317e4fd385e6e323be846f4586ab93b0fbab7
|
0fe99bf08805179f25232de3cfacd28ee2a12ed4
|
/rmd/functions/cv_da.R
|
3f326afe36bd20b6d7b9aebd6939391a36de2f4a
|
[] |
no_license
|
jerry-ye-xu/stat3014_nutm3004_major_project
|
7f716fe3ca946aae25d6e1c344a55708b0adf31e
|
a4c8ac1c250e42786088368882e3b653e9187878
|
refs/heads/master
| 2020-03-28T10:09:58.611234
| 2018-10-26T12:57:58
| 2018-10-26T12:57:58
| 148,087,496
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,186
|
r
|
cv_da.R
|
library(MASS) # Discriminant analysis
source("./functions/cv.R")
cv_da = function(X,y,method=c("lda","qda"),V,seed=NA)
{
# Set the seed
if (!is.na(seed)) {
set.seed(seed)
}
# Set n
n = length(y)
# Split the data up into V folds
cvSets <- cvFolds(n, V)
# Loop through each fold and calculate the error for that fold
test.error.da <- c()
for (i in 1:V)
{
# set the indices corresponding to the training and test sets
testInds <- cvSets$subsets[which(cvSets$which==i)]
trainInds <- (1:n)[-testInds]
# Separate y and X into the training and test sets
y.test <- y[testInds]
X.test <- X[ testInds,]
y.train <- y[trainInds]
X.train <- X[trainInds,]
# Do classification on ith fold
if (method=="lda") {
res <- lda(y~., data=X,subset=trainInds)
}
if (method=="qda") {
res <- qda(y~., data=X,subset=trainInds)
}
results.da = predict(res, X.test)$class
# Calcuate the test error for this fold
test.error.da[i] <- sum(results.da!=y.test)
}
# Calculate the mean error over each fold
cv.error = sum(test.error.da)/n
# Return the results
return(cv.error)
}
|
42efe7f4f10e76d2a57bd2b18e32b07a61f5d368
|
b5a685ab79de211822dc5216d4ce63bd040f2b48
|
/man/redisHGet.Rd
|
9bec12afa88a979cef2d1b00b8b2756a75de745a
|
[
"Apache-2.0"
] |
permissive
|
mcroiban/rredis
|
c0e99712b1478821cda8bc3b3dd5029982bcf8f6
|
51ac7c4f9c065b262751f55851cb30344388e6b2
|
refs/heads/master
| 2020-12-25T13:51:07.129996
| 2013-03-19T12:01:19
| 2013-03-19T12:01:19
| 10,570,879
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 793
|
rd
|
redisHGet.Rd
|
\name{redisHGet}
\alias{redisHGet}
\title{Retrieve a hased value from Redis.}
\description{Retrieve a value identified by a key and field
from the Redis database.
}
\usage{
redisHGet(key, field, raw = FALSE)
}
\arguments{
\item{key}{
A key name.
}
\item{field}{
A field name.
}
\item{raw}{
Set \code{raw=TRUE} to skip de-serialization of the data.
}
}
\details{
Redis hash values store values in one or more fields associated with a single
key name.
}
\value{
The value corresponding to the specified key/field,
or NULL if the matching key/field hash contained no value
or if no matching key or field was found.
}
\references{
http://redis.io/commands
}
\author{
B. W. Lewis
}
\seealso{
\code{\link{redisHSet}}
}
\examples{
\dontrun{
redisHSet('A','x',runif(5))
redisHGet('A','x')
}
}
|
12525e4abdd4e595b5d29f08181bbb27ecd1a807
|
176930f8e95c1571446fb8031b92c913a81ae020
|
/2020/week_25/week25_black_population.R
|
6c5b5825e856169201a5b085365bc6cfaef6b070
|
[] |
no_license
|
inkyscope/TidyTuesday-1
|
656489c99d7d0f8f4e609ea6fc8ab73838e772da
|
7e86f5f795882c64a1d7ab0b357c34abc50fd4dc
|
refs/heads/master
| 2023-02-26T11:51:11.313834
| 2021-02-03T21:43:42
| 2021-02-03T21:43:42
| 193,203,574
| 1
| 0
| null | 2019-06-22T07:10:23
| 2019-06-22T07:10:23
| null |
UTF-8
|
R
| false
| false
| 1,539
|
r
|
week25_black_population.R
|
library(tidyverse)
census <- read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-06-16/census.csv')
data_graph <- census %>%
filter(region == "USA Total") %>%
pivot_longer(cols = c("black_free", "black_slaves"),
names_to = "black_",
values_to = "population")
p1 <-
ggplot(data_graph, aes(year, population, fill = black_)) +
geom_col(width = 5) +
scale_x_continuous(breaks = seq(1790, 1870, by = 10)) +
scale_y_continuous(name = "Population",
labels = scales::comma_format(),
n.breaks = 10,
expand = c(0, 1),
position = "right") +
scale_fill_manual(values = c("#008F77", "#072220"),
labels = c("Free", "Slaves")) +
labs(title = "Black Population in the USA",
subtitle = "1790 - 1870",
caption = "Source: US Census's Archives | Graphic: @ysamano28") +
theme_ybn(title_size = 23,
title_face = "bold",
title_hjust = 0.5,
caption_hjust = 0.5) +
theme(axis.title.x = element_blank(),
axis.title.y = element_text(hjust = 0),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
legend.spacing.x = unit(0.5, 'cm'),
legend.text = element_text(size = 12),
legend.title = element_blank(),
legend.position = "top")
ggsave("2020/week_25/black_population.png", p1, height = 11, width = 8.5, units = "in", type = "cairo")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.