blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
โ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
โ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
โ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b2bb914fa8398bfac1bab4e3425d200e663c90cd
|
a19663c25470f4b4165fea8313f880f44481e887
|
/edu_stats_report2021/Params_reports56/render.R
|
125b4f289d5eaaeb65a256e0e79f07fdd74ac22f
|
[] |
no_license
|
perlatex/tidy_easyuse
|
e9322e75969c3f5802236afa6159aa355c6a42d2
|
00a90fcd68b6d121b184a507fd457e83e477185d
|
refs/heads/master
| 2022-01-03T09:25:53.661346
| 2021-12-28T10:01:55
| 2021-12-28T10:01:55
| 231,519,749
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,207
|
r
|
render.R
|
library(tidyverse)
load("../data/myData_df5.Rdata")
df <- df5_start %>%
distinct(school) %>%
mutate(title = row_number()) %>%
mutate_at(vars(title), ~str_pad(., 2, pad = "0") )
if (fs::dir_exists("output_56")) {
fs::dir_delete("output_56")
}
fs::dir_create("output_56")
# "่ๅฐๅญ็พๅๆ ก" ๅชๆ5ๅนด็บง๏ผๆฒกๆ6ๅนด็บง็ๆฐๆฎ
df_a <- df %>%
filter(school != "่ๅฐๅญ็พๅๆ ก")
df_b <- df %>%
filter(school == "่ๅฐๅญ็พๅๆ ก")
#######################################################
render_report_a <- function(school, title) {
rmarkdown::render(
"main_reports_sequential.Rmd",
params = list(set_school = school),
output_file = paste0("./output_56/", title, "-", school, ".docx")
)
}
df_a %>%
pmap(render_report_a)
#######################################################
#######################################################
render_report_b <- function(school, title) {
rmarkdown::render(
"main_reports_diverging.Rmd",
params = list(set_school = school),
output_file = paste0("./output_56/", title, "-", school, ".docx")
)
}
df_b %>%
pmap(render_report_b)
#######################################################
|
3c91db970ec3ba6842b88ca3943bfde44ffce568
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/caret/examples/twoClassSim.Rd.R
|
40b28916b690d4fbf5204d865c10a4b76b9c1c70
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 259
|
r
|
twoClassSim.Rd.R
|
library(caret)
### Name: SLC14_1
### Title: Simulation Functions
### Aliases: SLC14_1 SLC14_2 LPH07_1 LPH07_2 twoClassSim
### Keywords: models
### ** Examples
example <- twoClassSim(100, linearVars = 1)
splom(~example[, 1:6], groups = example$Class)
|
03d4000184182c25d00e87fab384be0abb0f3165
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rhli/examples/cfmini.Rd.R
|
b32542fc269f620d15fb532400c37d104ec0ed1d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 133
|
r
|
cfmini.Rd.R
|
library(rhli)
### Name: cfmini
### Title: 'cfmini'
### Aliases: cfmini
### ** Examples
status <- Integer(-1L)
cfmini(status)
|
be62c96da7f3cded2016e6514ebbb6fa24570ba3
|
3ae1cafa31be5daef34d0793ec55811ae78c3785
|
/plot4.R
|
141b25e1b7f5f6766a3028c18b3cfb08ddc4ec71
|
[] |
no_license
|
Leandres/ExData_Plotting1
|
41bfff0d8303f257faf8de9de4662db4b0f12d94
|
b398b1d1cd40f1ea446db658c5ee19f1bc86cf55
|
refs/heads/master
| 2021-01-17T11:20:51.684986
| 2015-01-11T20:49:40
| 2015-01-11T20:49:40
| 29,100,850
| 0
| 0
| null | 2015-01-11T18:14:29
| 2015-01-11T18:14:29
| null |
UTF-8
|
R
| false
| false
| 1,043
|
r
|
plot4.R
|
setwd("C:/.")
# For code on loading data and creating new data set see plot1.R
# Create plot 4
png("plot4.png", width=480, height=480)
par(mfrow = c(2,2))
# Plot 4.1
plot(NewHHpowcon$Time, NewHHpowcon$Global_active_power, type="l",
xlab="", ylab="Global Active Power")
# Plot 4.2
plot(NewHHpowcon$Time, NewHHpowcon$Voltage, type = "l",
xlab = "datetime", ylab = "Voltage")
# Plot 4.3
plot(NewHHpowcon$Time, NewHHpowcon$Sub_metering_1, type = "l", xlab="",
ylab="Energy sub metering")
lines(NewHHpowcon$Time, NewHHpowcon$Sub_metering_1, col="black")
lines(NewHHpowcon$Time, NewHHpowcon$Sub_metering_2, col="red")
lines(NewHHpowcon$Time, NewHHpowcon$Sub_metering_3, col="blue")
legend("topright", lwd=1, pt.cex = 0.6, cex = 0.6, bty = "n",
col=c("black","blue","red"),
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Plot 4.4
plot(NewHHpowcon$Time, NewHHpowcon$Global_reactive_power, type = "l",
xlab = "datetime", ylab = "Global_reactive_power")
dev.off()
|
8d5a8f2822051265877fcc0f15dc1bc2fd2925ef
|
9b5dedcbde7f4d0a5ec40439198e60551e68d6dc
|
/code.R
|
fe4297dbbed14470f82ee16e72e6fc70e9169f5a
|
[] |
no_license
|
conjell/gitTEM
|
4e5e995c47a4cc085174ac868efcfa2412490c65
|
aba48fe345629ad9de199a0bef9822bd98ffbb0e
|
refs/heads/master
| 2020-08-29T04:39:55.948382
| 2019-10-28T23:42:59
| 2019-10-28T23:42:59
| 217,930,244
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,565
|
r
|
code.R
|
setwd("~/Desktop/")
#install.packages('brnn')
library('brnn')
raw_data <- read.csv("samplew.csv", header=T)
raw_data <- read.csv("samplec.csv", header=T)
raw_data <- read.csv("samplewx.csv", header=T)
raw_data <- read.csv("samplecx.csv", header=T)
###temperature prediction for all sensors in future 72 hours
##without heating peridod (May to August)
{#sensor1
SEN1.2 <- predict(nn1.4.2,raw_data[raw_data$X.1%in% c('1:00:00','2:00:00','3:00:00','4:00:00','5:00:00','6:00:00'), ])
Y2 <- c(1:6,25:30,49:54)
aa <- cbind(Y2,SEN1.2)
SEN1.3 <- predict(nn1.4.3,raw_data[raw_data$X.1%in% c('7:00:00','8:00:00','9:00:00','10:00:00','11:00:00','12:00:00'), ])
Y3 <- c(7:12,31:36,55:60)
bb <- cbind(Y3,SEN1.3)
SEN1.4 <- predict(nn1.4.4,raw_data[raw_data$X.1%in% c('13:00:00','14:00:00','15:00:00','16:00:00','17:00:00','18:00:00'), ])
Y4 <- c(13:18,37:42,61:66)
cc <- cbind(Y4,SEN1.4)
SEN1.5 <- predict(nn1.4.5,raw_data[raw_data$X.1%in% c('19:00:00','20:00:00','21:00:00','22:00:00','23:00:00','0:00:00'), ])
Y5 <- c(19:24,43:48,67:72)
dd <- cbind(Y5,SEN1.5)
Total1 <- rbind(aa,bb,cc,dd)
#sensor2
SEN2.2 <- predict(nn2.4.2,raw_data[raw_data$X.1%in% c('1:00:00','2:00:00','3:00:00','4:00:00','5:00:00','6:00:00'), ])
SEN2.3 <- predict(nn2.4.3,raw_data[raw_data$X.1%in% c('7:00:00','8:00:00','9:00:00','10:00:00','11:00:00','12:00:00'), ])
SEN2.4 <- predict(nn2.4.4,raw_data[raw_data$X.1%in% c('13:00:00','14:00:00','15:00:00','16:00:00','17:00:00','18:00:00'), ])
SEN2.5 <- predict(nn2.4.5,raw_data[raw_data$X.1%in% c('19:00:00','20:00:00','21:00:00','22:00:00','23:00:00','0:00:00'), ])
Total2 <- t(cbind(t(SEN2.2),t(SEN2.3),t(SEN2.4),t(SEN2.5)))
#sensor3
SEN3.2 <- predict(nn3.4.2,raw_data[raw_data$X.1%in% c('1:00:00','2:00:00','3:00:00','4:00:00','5:00:00','6:00:00'), ])
SEN3.3 <- predict(nn3.4.3,raw_data[raw_data$X.1%in% c('7:00:00','8:00:00','9:00:00','10:00:00','11:00:00','12:00:00'), ])
SEN3.4 <- predict(nn3.4.4,raw_data[raw_data$X.1%in% c('13:00:00','14:00:00','15:00:00','16:00:00','17:00:00','18:00:00'), ])
SEN3.5 <- predict(nn3.4.5,raw_data[raw_data$X.1%in% c('19:00:00','20:00:00','21:00:00','22:00:00','23:00:00','0:00:00'), ])
Total3 <- t(cbind(t(SEN3.2),t(SEN3.3),t(SEN3.4),t(SEN3.5)))
#sensor5
SEN5.2 <- predict(nn5.4.2,raw_data[raw_data$X.1%in% c('1:00:00','2:00:00','3:00:00','4:00:00','5:00:00','6:00:00'), ])
SEN5.3 <- predict(nn5.4.3,raw_data[raw_data$X.1%in% c('7:00:00','8:00:00','9:00:00','10:00:00','11:00:00','12:00:00'), ])
SEN5.4 <- predict(nn5.4.4,raw_data[raw_data$X.1%in% c('13:00:00','14:00:00','15:00:00','16:00:00','17:00:00','18:00:00'), ])
SEN5.5 <- predict(nn5.4.5,raw_data[raw_data$X.1%in% c('19:00:00','20:00:00','21:00:00','22:00:00','23:00:00','0:00:00'), ])
Total5 <- t(cbind(t(SEN5.2),t(SEN5.3),t(SEN5.4),t(SEN5.5)))
#sensor15
SEN15.2 <- predict(nn15.4.2,raw_data[raw_data$X.1%in% c('1:00:00','2:00:00','3:00:00','4:00:00','5:00:00','6:00:00'), ])
SEN15.3 <- predict(nn15.4.3,raw_data[raw_data$X.1%in% c('7:00:00','8:00:00','9:00:00','10:00:00','11:00:00','12:00:00'), ])
SEN15.4 <- predict(nn15.4.4,raw_data[raw_data$X.1%in% c('13:00:00','14:00:00','15:00:00','16:00:00','17:00:00','18:00:00'), ])
SEN15.5 <- predict(nn15.4.5,raw_data[raw_data$X.1%in% c('19:00:00','20:00:00','21:00:00','22:00:00','23:00:00','0:00:00'), ])
Total15 <- t(cbind(t(SEN15.2),t(SEN15.3),t(SEN15.4),t(SEN15.5)))
#sensor16
SEN16.2 <- predict(nn16.4.2,raw_data[raw_data$X.1%in% c('1:00:00','2:00:00','3:00:00','4:00:00','5:00:00','6:00:00'), ])
SEN16.3 <- predict(nn16.4.3,raw_data[raw_data$X.1%in% c('7:00:00','8:00:00','9:00:00','10:00:00','11:00:00','12:00:00'), ])
SEN16.4 <- predict(nn16.4.4,raw_data[raw_data$X.1%in% c('13:00:00','14:00:00','15:00:00','16:00:00','17:00:00','18:00:00'), ])
SEN16.5 <- predict(nn16.4.5,raw_data[raw_data$X.1%in% c('19:00:00','20:00:00','21:00:00','22:00:00','23:00:00','0:00:00'), ])
Total16 <- t(cbind(t(SEN16.2),t(SEN16.3),t(SEN16.4),t(SEN16.5)))
#sensor18
SEN18.2 <- predict(nn18.4.2,raw_data[raw_data$X.1%in% c('1:00:00','2:00:00','3:00:00','4:00:00','5:00:00','6:00:00'), ])
SEN18.3 <- predict(nn18.4.3,raw_data[raw_data$X.1%in% c('7:00:00','8:00:00','9:00:00','10:00:00','11:00:00','12:00:00'), ])
SEN18.4 <- predict(nn18.4.4,raw_data[raw_data$X.1%in% c('13:00:00','14:00:00','15:00:00','16:00:00','17:00:00','18:00:00'), ])
SEN18.5 <- predict(nn18.4.5,raw_data[raw_data$X.1%in% c('19:00:00','20:00:00','21:00:00','22:00:00','23:00:00','0:00:00'), ])
Total18 <- t(cbind(t(SEN18.2),t(SEN18.3),t(SEN18.4),t(SEN18.5)))
#sensor19
SEN19.2 <- predict(nn19.4.2,raw_data[raw_data$X.1%in% c('1:00:00','2:00:00','3:00:00','4:00:00','5:00:00','6:00:00'), ])
SEN19.3 <- predict(nn19.4.3,raw_data[raw_data$X.1%in% c('7:00:00','8:00:00','9:00:00','10:00:00','11:00:00','12:00:00'), ])
SEN19.4 <- predict(nn19.4.4,raw_data[raw_data$X.1%in% c('13:00:00','14:00:00','15:00:00','16:00:00','17:00:00','18:00:00'), ])
SEN19.5 <- predict(nn19.4.5,raw_data[raw_data$X.1%in% c('19:00:00','20:00:00','21:00:00','22:00:00','23:00:00','0:00:00'), ])
Total19 <- t(cbind(t(SEN19.2),t(SEN19.3),t(SEN19.4),t(SEN19.5)))
#sensor20
SEN20.2 <- predict(nn20.4.2,raw_data[raw_data$X.1%in% c('1:00:00','2:00:00','3:00:00','4:00:00','5:00:00','6:00:00'), ])
SEN20.3 <- predict(nn20.4.3,raw_data[raw_data$X.1%in% c('7:00:00','8:00:00','9:00:00','10:00:00','11:00:00','12:00:00'), ])
SEN20.4 <- predict(nn20.4.4,raw_data[raw_data$X.1%in% c('13:00:00','14:00:00','15:00:00','16:00:00','17:00:00','18:00:00'), ])
SEN20.5 <- predict(nn20.4.5,raw_data[raw_data$X.1%in% c('19:00:00','20:00:00','21:00:00','22:00:00','23:00:00','0:00:00'), ])
Total20 <- t(cbind(t(SEN20.2),t(SEN20.3),t(SEN20.4),t(SEN20.5)))
#sensor21
SEN21.2 <- predict(nn21.4.2,raw_data[raw_data$X.1%in% c('1:00:00','2:00:00','3:00:00','4:00:00','5:00:00','6:00:00'), ])
SEN21.3 <- predict(nn21.4.3,raw_data[raw_data$X.1%in% c('7:00:00','8:00:00','9:00:00','10:00:00','11:00:00','12:00:00'), ])
SEN21.4 <- predict(nn21.4.4,raw_data[raw_data$X.1%in% c('13:00:00','14:00:00','15:00:00','16:00:00','17:00:00','18:00:00'), ])
SEN21.5 <- predict(nn21.4.5,raw_data[raw_data$X.1%in% c('19:00:00','20:00:00','21:00:00','22:00:00','23:00:00','0:00:00'), ])
Total21 <- t(cbind(t(SEN21.2),t(SEN21.3),t(SEN21.4),t(SEN21.5)))
#sensor22
SEN22.2 <- predict(nn22.4.2,raw_data[raw_data$X.1%in% c('1:00:00','2:00:00','3:00:00','4:00:00','5:00:00','6:00:00'), ])
SEN22.3 <- predict(nn22.4.3,raw_data[raw_data$X.1%in% c('7:00:00','8:00:00','9:00:00','10:00:00','11:00:00','12:00:00'), ])
SEN22.4 <- predict(nn22.4.4,raw_data[raw_data$X.1%in% c('13:00:00','14:00:00','15:00:00','16:00:00','17:00:00','18:00:00'), ])
SEN22.5 <- predict(nn22.4.5,raw_data[raw_data$X.1%in% c('19:00:00','20:00:00','21:00:00','22:00:00','23:00:00','0:00:00'), ])
Total22 <- t(cbind(t(SEN22.2),t(SEN22.3),t(SEN22.4),t(SEN22.5)))
#sensor23
SEN23.2 <- predict(nn23.4.2,raw_data[raw_data$X.1%in% c('1:00:00','2:00:00','3:00:00','4:00:00','5:00:00','6:00:00'), ])
SEN23.3 <- predict(nn23.4.3,raw_data[raw_data$X.1%in% c('7:00:00','8:00:00','9:00:00','10:00:00','11:00:00','12:00:00'), ])
SEN23.4 <- predict(nn23.4.4,raw_data[raw_data$X.1%in% c('13:00:00','14:00:00','15:00:00','16:00:00','17:00:00','18:00:00'), ])
SEN23.5 <- predict(nn23.4.5,raw_data[raw_data$X.1%in% c('19:00:00','20:00:00','21:00:00','22:00:00','23:00:00','0:00:00'), ])
Total23 <- t(cbind(t(SEN23.2),t(SEN23.3),t(SEN23.4),t(SEN23.5)))
}
FinalT <- cbind(Total1,Total2,Total3,Total5,Total15,Total16,Total18,Total19,Total20,Total21,Total22,Total23)
FinalT <- FinalT[order(FinalT[,1]),]
colnames(FinalT) <- c("TIME","SEN1","SEN2","SEN3","SEN5","SEN15","SEN16","SEN18","SEN19","SEN20","SEN21","SEN22","SEN23")
#with heating period (September to April)
{
average=cbind(FinalT[,1],rowMeans(FinalT[,2:13]))
#th: the critical temperature at which the heater is turned on
th <- 16
for (i in Y2){
if (average[i,2]<th){
FinalT[i,2]=predict(nw1.8.2,raw_data[i,3:10])
FinalT[i,3]=predict(nw2.8.2,raw_data[i,3:10])
FinalT[i,4]=predict(nw3.8.2,raw_data[i,3:10])
FinalT[i,5]=predict(nw5.8.2,raw_data[i,3:10])
FinalT[i,6]=predict(nw15.8.2,raw_data[i,3:10])
FinalT[i,7]=predict(nw16.8.2,raw_data[i,3:10])
FinalT[i,8]=predict(nw18.8.2,raw_data[i,3:10])
FinalT[i,9]=predict(nw19.8.2,raw_data[i,3:10])
FinalT[i,10]=predict(nw20.8.2,raw_data[i,3:10])
FinalT[i,11]=predict(nw21.8.2,raw_data[i,3:10])
FinalT[i,12]=predict(nw22.8.2,raw_data[i,3:10])
FinalT[i,13]=predict(nw23.8.2,raw_data[i,3:10])
}
}
for (i in Y3){
if (average[i,2]<th){
FinalT[i,2]=predict(nw1.8.3,raw_data[i,3:10])
FinalT[i,3]=predict(nw2.8.3,raw_data[i,3:10])
FinalT[i,4]=predict(nw3.8.3,raw_data[i,3:10])
FinalT[i,5]=predict(nw5.8.3,raw_data[i,3:10])
FinalT[i,6]=predict(nw15.8.3,raw_data[i,3:10])
FinalT[i,7]=predict(nw16.8.3,raw_data[i,3:10])
FinalT[i,8]=predict(nw18.8.3,raw_data[i,3:10])
FinalT[i,9]=predict(nw19.8.3,raw_data[i,3:10])
FinalT[i,10]=predict(nw20.8.3,raw_data[i,3:10])
FinalT[i,11]=predict(nw21.8.3,raw_data[i,3:10])
FinalT[i,12]=predict(nw22.8.2,raw_data[i,3:10])
FinalT[i,13]=predict(nw23.8.2,raw_data[i,3:10])
}
}
for (i in Y4){
if (average[i,2]<th){
FinalT[i,2]=predict(nw1.8.4,raw_data[i,3:10])
FinalT[i,3]=predict(nw2.8.4,raw_data[i,3:10])
FinalT[i,4]=predict(nw3.8.4,raw_data[i,3:10])
FinalT[i,5]=predict(nw5.8.4,raw_data[i,3:10])
FinalT[i,6]=predict(nw15.8.4,raw_data[i,3:10])
FinalT[i,7]=predict(nw16.8.4,raw_data[i,3:10])
FinalT[i,8]=predict(nw18.8.4,raw_data[i,3:10])
FinalT[i,9]=predict(nw19.8.4,raw_data[i,3:10])
FinalT[i,10]=predict(nw20.8.4,raw_data[i,3:10])
FinalT[i,11]=predict(nw21.8.4,raw_data[i,3:10])
FinalT[i,12]=predict(nw22.8.4,raw_data[i,3:10])
FinalT[i,13]=predict(nw23.8.4,raw_data[i,3:10])
}
}
for (i in Y5){
if (average[i,2]<th){
FinalT[i,2]=predict(nw1.8.5,raw_data[i,3:10])
FinalT[i,3]=predict(nw2.8.5,raw_data[i,3:10])
FinalT[i,4]=predict(nw3.8.5,raw_data[i,3:10])
FinalT[i,5]=predict(nw5.8.5,raw_data[i,3:10])
FinalT[i,6]=predict(nw15.8.5,raw_data[i,3:10])
FinalT[i,7]=predict(nw16.8.5,raw_data[i,3:10])
FinalT[i,8]=predict(nw18.8.5,raw_data[i,3:10])
FinalT[i,9]=predict(nw19.8.5,raw_data[i,3:10])
FinalT[i,10]=predict(nw20.8.5,raw_data[i,3:10])
FinalT[i,11]=predict(nw21.8.5,raw_data[i,3:10])
FinalT[i,12]=predict(nw22.8.5,raw_data[i,3:10])
FinalT[i,13]=predict(nw23.8.5,raw_data[i,3:10])
}
}
}
###Product information
##Pharma
pharma <- function(t1,t2,maxtem,value){
temd<-FinalT[,2:13]-maxtem
for (i in 1: nrow(temd)){
for (j in 2:ncol(temd)){
if (temd[i,j] < 0){
temd[i,j]=0}
}
}
#Q10=2:optimistic evaluation; Q10=3:stable evaluation; Q10=4:pessimistic evaluation.
Q10=4
shelflife=4320
shelflife1=shelflife/(Q10^(temd/10))
shelflifeloss=shelflife-shelflife1
houlysloss=shelflifeloss/shelflife
totalloss=colSums(houlysloss[t1:t2,])
costfloss=(value/(4320^2))*(totalloss^2)
return(costfloss)
}
##Floral
floral <- function(t1,t2,maxtem,value){
temd<-FinalT[,2:13]-maxtem
for (i in 1: nrow(temd)){
for (j in 2:ncol(temd)){
if (temd[i,j] < 0){
temd[i,j]=0
}
if(temd[i,j] > 15){
temd[i,j]=15 }
}
}
vaselife=370.13-65.91*exp(0.1092*temd)
vaselifeloss=304.22-vaselife
hourlyfloss=vaselifeloss/vaselife
totalloss=colSums(hourlyfloss[t1:t2,])
costfloss=(value/(304.22^2))*(totalloss^2)
return(costfloss)
}
##location choice based on storage duration
shipment<-read.csv("shipment.csv", header=T)
finallosst<-c()
rrab<-matrix(nrow=100,ncol=10000)
for (k in 1:10000){
set.seed(k)
range=1:70
pro <- c("Flora","Pharma")
opp <- c("COL","CRT","PIL")
opf<-c("COL","PIL")
for (p in 1:100){
shipment[p,1]=sample(pro,1,replace = TRUE,prob = c(0.576,0.424))
shipment[p,2]=sample(range,1,replace = TRUE)
to<-as.numeric(shipment[p,2])+1
too<-as.numeric(shipment[p,2])+9
if (shipment[p,1] == "Pharma"){
shipment[p,7]=sample(opp,1,replace = TRUE)
if (shipment[p,7] == "COL"){
shipment[p,4]=8
if(shipment[p,2]<63){
shipment[p,3]=sample(to:too,1,replace = TRUE)}
else{shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if(shipment[p,7] == "CRT"){
shipment[p,4]=25
if(shipment[p,2]<63){
shipment[p,3]=sample(to:too,1,replace = TRUE)}
else{shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if(shipment[p,7] == "PIL"){
shipment[p,4]=25
shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if (shipment[p,1] == "Flora"){
shipment[p,7]=sample(opf,1,replace = TRUE)
if (shipment[p,7] == "COL"){
shipment[p,4]=8
if(shipment[p,2]<63){
shipment[p,3]=sample(to:too,1,replace = TRUE)}
else{shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if(shipment[p,7] == "PIL"){
shipment[p,4]=25
shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
shipment[p,6]=sample(1:5,1,replace = TRUE)
shipment[p,8]=shipment[p,3]-shipment[p,2]
if(shipment[p,1] == 'Pharma'){
shipment[p,5]=sample(40624.8759:136355.5350,1,replace = TRUE)
}else{
shipment[p,5]=sample(3118.9354:7188.9507,1,replace = TRUE)
}
}
colnames(shipment) <- c("Type","Time.in","Time.out","Optimal","Value","Amount","Category","V8")
shipment <- shipment[order(-shipment$V8,shipment$Time.in),]
a<-function(i){
pharma(as.numeric(shipment[i,2]),as.numeric(shipment[i,3]),
as.numeric(shipment[i,4]),as.numeric(shipment[i,5]))
}
b<-function(i){
floral(as.numeric(shipment[i,2]),as.numeric(shipment[i,3]),
as.numeric(shipment[i,4]),as.numeric(shipment[i,5]))
}
y<-ifelse(shipment$Type != 'Pharma',0,1)
y1<-ifelse(shipment$Type != 'Flora',0,1)
y1
c<-c()
for (i in 1:100){
x<-a(i)*y[i]
c<-rbind(c,x)
}
m<-c()
for (i in 1:100){
x<-b(i)*y1[i]
m<-rbind(m,x)
}
capacity<-c(0,120,120,120,0,0,0,0,0,0,0,0)
capacity
amount<-as.numeric(shipment$Amount)
n<-m+c
n<-t(n)
colnames <- c()
for (i in 1:100){
name<- paste('product',i)
colnames<-cbind(colnames,name)
}
colnames(n)<-colnames
n<-cbind(n,capacity)
ab<-c()
for(j in 1:100){
n<-n[order(n[,j]),]
n1<-n[order(n[,j]),]
for (i in 1:12){
if(n[i,ncol(n)] >= amount[j]){
n[i,ncol(n)]<-n[i,ncol(n)]-amount[j]
break;}
}
nnn<-n1-n
x<-which(nnn==max(nnn),arr.ind=T)
ab<-rbind(ab,x)
}
rab<-rownames(ab)
rab<-matrix(rab)
rrab[1:100,k]<-rab
locationchoice <- cbind(shipment,rab)
names(locationchoice)[7]<-c("location")
locationchoice
finallosst[k]<-0
for(i in 1:100){
loss<-n[which(rownames(n)==rab[i]),i]
finallosst[k]<-finallosst[k]+loss
}
}
hist(finallosst)
finallosst<-matrix(finallosst)
choice<-t(rrab)
percentage<-matrix(nrow=12,ncol=1)
rr<-1:12
for (r in rr){
name<-colnames(FinalT)
name<-name[r+1]
percentage[r,]=sum(choice==name)/(10000*100)
}
rrr<-colnames(FinalT)
rrr<-matrix(rrr)
rrr<-rrr[-1,]
percentage<-cbind(rrr,percentage)
write.table (finallosst, file ="finalloss-w1.csv",sep =",",row.names =FALSE)
write.table (percentage, file ="choice-w1.csv",sep =",",row.names =FALSE)
##location choice based on time in
shipment<-read.csv("shipment.csv", header=T)
finallosst<-c()
rrab<-matrix(nrow=100,ncol=10000)
for (k in 1:10000){
set.seed(k)
range=1:70
pro <- c("Flora","Pharma")
opp <- c("COL","CRT","PIL")
opf<-c("COL","PIL")
for (p in 1:100){
shipment[p,1]=sample(pro,1,replace = TRUE,prob = c(0.576,0.424))
shipment[p,2]=sample(range,1,replace = TRUE)
to<-as.numeric(shipment[p,2])+1
too<-as.numeric(shipment[p,2])+9
if (shipment[p,1] == "Pharma"){
shipment[p,7]=sample(opp,1,replace = TRUE)
if (shipment[p,7] == "COL"){
shipment[p,4]=8
if(shipment[p,2]<63){
shipment[p,3]=sample(to:too,1,replace = TRUE)}
else{shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if(shipment[p,7] == "CRT"){
shipment[p,4]=25
if(shipment[p,2]<63){
shipment[p,3]=sample(to:too,1,replace = TRUE)}
else{shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if(shipment[p,7] == "PIL"){
shipment[p,4]=25
shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if (shipment[p,1] == "Flora"){
shipment[p,7]=sample(opf,1,replace = TRUE)
if (shipment[p,7] == "COL"){
shipment[p,4]=8
if(shipment[p,2]<63){
shipment[p,3]=sample(to:too,1,replace = TRUE)}
else{shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if(shipment[p,7] == "PIL"){
shipment[p,4]=25
shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
shipment[p,6]=sample(1:5,1,replace = TRUE)
shipment[p,8]=shipment[p,3]-shipment[p,2]
if(shipment[p,1] == 'Pharma'){
shipment[p,5]=sample(40624.8759:136355.5350,1,replace = TRUE)
}else{
shipment[p,5]=sample(3118.9354:7188.9507,1,replace = TRUE)
}
}
colnames(shipment) <- c("Type","Time.in","Time.out","Optimal","Value","Amount","Category")
shipment <- shipment[order(shipment$Time.in,-shipment$Time.out),]
a<-function(i){
pharma(as.numeric(shipment[i,2]),as.numeric(shipment[i,3]),
as.numeric(shipment[i,4]),as.numeric(shipment[i,5]))
}
b<-function(i){
floral(as.numeric(shipment[i,2]),as.numeric(shipment[i,3]),
as.numeric(shipment[i,4]),as.numeric(shipment[i,5]))
}
y<-ifelse(shipment$Type != 'Pharma',0,1)
y1<-ifelse(shipment$Type != 'Flora',0,1)
y1
c<-c()
for (i in 1:100){
x<-a(i)*y[i]
c<-rbind(c,x)
}
m<-c()
for (i in 1:100){
x<-b(i)*y1[i]
m<-rbind(m,x)
}
capacity<-c(0,120,120,120,0,0,0,0,0,0,0,0)
capacity
amount<-as.numeric(shipment$Amount)
n<-m+c
n<-t(n)
colnames <- c()
for (i in 1:100){
name<- paste('product',i)
colnames<-cbind(colnames,name)
}
colnames(n)<-colnames
n<-cbind(n,capacity)
ab<-c()
for(j in 1:100){
n<-n[order(n[,j]),]
n1<-n[order(n[,j]),]
for (i in 1:12){
if(n[i,ncol(n)] >= amount[j]){
n[i,ncol(n)]<-n[i,ncol(n)]-amount[j]
break;}
}
nnn<-n1-n
x<-which(nnn==max(nnn),arr.ind=T)
ab<-rbind(ab,x)
}
rab<-rownames(ab)
rab<-matrix(rab)
rrab[1:100,k]<-rab
locationchoice <- cbind(shipment,rab)
names(locationchoice)[7]<-c("location")
locationchoice
finallosst[k]<-0
for(i in 1:100){
loss<-n[which(rownames(n)==rab[i]),i]
finallosst[k]<-finallosst[k]+loss
}
}
hist(finallosst)
finallosst<-matrix(finallosst)
choice<-t(rrab)
percentage<-matrix(nrow=12,ncol=1)
rr<-1:12
for (r in rr){
name<-colnames(FinalT)
name<-name[r+1]
percentage[r,]=sum(choice==name)/(10000*100)
}
rrr<-colnames(FinalT)
rrr<-matrix(rrr)
rrr<-rrr[-1,]
percentage<-cbind(rrr,percentage)
write.table (finallosst, file ="finalloss-w1.csv",sep =",",row.names =FALSE)
write.table (percentage, file ="choice-w1.csv",sep =",",row.names =FALSE)
##location choice based on average loss
finallossa<-c()
rrab2<-matrix(nrow=100,ncol=10000)
for (k in 1:10000){
set.seed(k)
range=1:70
pro <- c("Flora","Pharma")
opp <- c("COL","CRT","PIL")
opf<-c("COL","PIL")
for (p in 1:100){
shipment[p,1]=sample(pro,1,replace = TRUE,prob = c(0.576,0.424))
shipment[p,2]=sample(range,1,replace = TRUE)
to<-as.numeric(shipment[p,2])+1
too<-as.numeric(shipment[p,2])+9
if (shipment[p,1] == "Pharma"){
shipment[p,7]=sample(opp,1,replace = TRUE)
if (shipment[p,7] == "COL"){
shipment[p,4]=8
if(shipment[p,2]<63){
shipment[p,3]=sample(to:too,1,replace = TRUE)}
else{shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if(shipment[p,7] == "CRT"){
shipment[p,4]=25
if(shipment[p,2]<63){
shipment[p,3]=sample(to:too,1,replace = TRUE)}
else{shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if(shipment[p,7] == "PIL"){
shipment[p,4]=25
shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if (shipment[p,1] == "Flora"){
shipment[p,7]=sample(opf,1,replace = TRUE)
if (shipment[p,7] == "COL"){
shipment[p,4]=8
if(shipment[p,2]<63){
shipment[p,3]=sample(to:too,1,replace = TRUE)}
else{shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if(shipment[p,7] == "PIL"){
shipment[p,4]=25
shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
shipment[p,6]=sample(1:5,1,replace = TRUE)
if(shipment[p,1] == 'Pharma'){
shipment[p,5]=sample(40624.8759:136355.5350,1,replace = TRUE)
}else{
shipment[p,5]=sample(3118.9354:7188.9507,1,replace = TRUE)
}
}
colnames(shipment) <- c("Type","Time.in","Time.out","Optimal","Value","Amount","Category")
a<-function(i){
pharma(as.numeric(shipment[i,2]),as.numeric(shipment[i,3]),
as.numeric(shipment[i,4]),as.numeric(shipment[i,5]))
}
b<-function(i){
floral(as.numeric(shipment[i,2]),as.numeric(shipment[i,3]),
as.numeric(shipment[i,4]),as.numeric(shipment[i,5]))
}
y<-ifelse(shipment$Type != 'Pharma',0,1)
y1<-ifelse(shipment$Type != 'Flora',0,1)
y1
c<-c()
for (i in 1:100){
x<-a(i)*y[i]
c<-rbind(c,x)
}
m<-c()
for (i in 1:100){
x<-b(i)*y1[i]
m<-rbind(m,x)
}
capacity<-c(0,120,120,120,0,0,0,0,0,0,0,0)
capacity
amount<-as.numeric(shipment$Amount)
n<-m+c
n<-t(n)
colnames <- c()
for (i in 1:100){
name<- paste('product',i)
colnames<-cbind(colnames,name)
}
colnames(n)<-colnames
mean<-colMeans(n)
meann<-rev(order(mean))
meannn<-rev(sort(mean))
n<-n[,meann]
n<-cbind(n,capacity)
ab<-c()
for(j in 1:100){
n<-n[order(n[,j]),]
n1<-n[order(n[,j]),]
for (i in 1:12){
if(n[i,ncol(n)] >= amount[j]){
n[i,ncol(n)]<-n[i,ncol(n)]-amount[j]
break;}
}
nnn<-n1-n
x<-which(nnn==max(nnn),arr.ind=T)
ab<-rbind(ab,x)
}
rab<-rownames(ab)
rab<-matrix(rab)
rrab2[1:100,k]=rab
locationchoice <- cbind(meannn,rownames(ab))
locationchoice <- locationchoice[,-1]
finallossa[k]<-0
for(i in 1:100){
loss<-n[which(rownames(n)==rab[i]),i]
finallossa[k]<-finallossa[k]+loss
}
}
hist(finallossa)
finallossa<-matrix(finallossa)
choice2<-t(rrab2)
percentage2<-matrix(nrow=12,ncol=1)
rr<-1:12
for (r in rr){
name<-colnames(FinalT)
name<-name[r+1]
percentage2[r,]=sum(choice2==name)/(10000*100)
}
write.table (finallossa, file ="212.csv",sep =",",row.names =FALSE)
write.table (percentage2, file ="schoice-w2.csv",sep =",",row.names =FALSE)
##location choice based on std
finallosss<-c()
rrab3<-matrix(nrow=100,ncol=10000)
for (k in 1:10000){
set.seed(k)
range=1:70
pro <- c("Flora","Pharma")
opp <- c("COL","CRT","PIL")
opf<-c("COL","PIL")
for (p in 1:100){
shipment[p,1]=sample(pro,1,replace = TRUE,prob = c(0.576,0.424))
shipment[p,2]=sample(range,1,replace = TRUE)
to<-as.numeric(shipment[p,2])+1
too<-as.numeric(shipment[p,2])+9
if (shipment[p,1] == "Pharma"){
shipment[p,7]=sample(opp,1,replace = TRUE)
if (shipment[p,7] == "COL"){
shipment[p,4]=8
if(shipment[p,2]<63){
shipment[p,3]=sample(to:too,1,replace = TRUE)}
else{shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if(shipment[p,7] == "CRT"){
shipment[p,4]=25
if(shipment[p,2]<63){
shipment[p,3]=sample(to:too,1,replace = TRUE)}
else{shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if(shipment[p,7] == "PIL"){
shipment[p,4]=25
shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if (shipment[p,1] == "Flora"){
shipment[p,7]=sample(opf,1,replace = TRUE)
if (shipment[p,7] == "COL"){
shipment[p,4]=8
if(shipment[p,2]<63){
shipment[p,3]=sample(to:too,1,replace = TRUE)}
else{shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
if(shipment[p,7] == "PIL"){
shipment[p,4]=25
shipment[p,3]=sample(to:72,1,replace = TRUE)}
}
shipment[p,6]=sample(1:5,1,replace = TRUE)
if(shipment[p,1] == 'Pharma'){
shipment[p,5]=sample(40624.8759:136355.5350,1,replace = TRUE)
}else{
shipment[p,5]=sample(3118.9354:7188.9507,1,replace = TRUE)
}
}
colnames(shipment) <- c("Type","Time.in","Time.out","Optimal","Value","Amount","Category")
a<-function(i){
pharma(as.numeric(shipment[i,2]),as.numeric(shipment[i,3]),
as.numeric(shipment[i,4]),as.numeric(shipment[i,5]))
}
b<-function(i){
floral(as.numeric(shipment[i,2]),as.numeric(shipment[i,3]),
as.numeric(shipment[i,4]),as.numeric(shipment[i,5]))
}
y<-ifelse(shipment$Type != 'Pharma',0,1)
y1<-ifelse(shipment$Type != 'Flora',0,1)
y1
c<-c()
for (i in 1:100){
x<-a(i)*y[i]
c<-rbind(c,x)
}
m<-c()
for (i in 1:100){
x<-b(i)*y1[i]
m<-rbind(m,x)
}
capacity<-c(0,120,120,120,0,0,0,0,0,0,0,0)
capacity
amount<-as.numeric(shipment$Amount)
n<-m+c
n<-t(n)
colnames <- c()
for (i in 1:100){
name<- paste('product',i)
colnames<-cbind(colnames,name)
}
colnames(n)<-colnames
std<-c()
for (i in 1:100){
namee<- sd(n[1:12,i])
std<-cbind(std,namee)
}
stdd<-rev(order(std))
stddd<-rev(sort(std))
n<-n[,stdd]
n<-cbind(n,capacity)
ab<-c()
for(j in 1:100){
n<-n[order(n[,j]),]
n1<-n[order(n[,j]),]
for (i in 1:12){
if(n[i,ncol(n)] >= amount[j]){
n[i,ncol(n)]<-n[i,ncol(n)]-amount[j]
break;}
}
nnn<-n1-n
x<-which(nnn==max(nnn),arr.ind=T)
ab<-rbind(ab,x)
}
rab<-rownames(ab)
rab<-matrix(rab)
rrab3[1:100,k]=rab
locationchoice <- cbind(stddd,rownames(ab))
locationchoice <- locationchoice[,-1]
finallosss[k]<-0
for(i in 1:100){
loss<-n[which(rownames(n)==rab[i]),i]
finallosss[k]<-finallosss[k]+loss
}
}
hist(finallosss)
finallosss<-matrix(finallosss)
choice3<-t(rrab3)
percentage3<-matrix(nrow=12,ncol=1)
rr<-1:12
for (r in rr){
name<-colnames(FinalT)
name<-name[r+1]
percentage3[r,]=sum(choice3==name)/(10000*100)
}
write.table (finallosss, file ="finalloss-wx3.csv",sep =",",row.names =FALSE)
write.table (percentage3, file ="choice-wx3.csv",sep =",",row.names =FALSE)
finalloss<-cbind(finallosst,finallossa,finallosss)
percentagef<-cbind(percentage,percentage2,percentage3)
mean(finallosst)
mean(finallossa)
mean(finallosss)
write.table (finallossa, file ="finallossabc.csv",sep =",",row.names =FALSE)
write.table (percentage3, file ="choice-wx3.csv",sep =",",row.names =FALSE)
|
2f25ab8b60a6ad8c556dbc69db64926a0c02c1e9
|
fe17217bf85ed660a1fa3173f6078133c5bc49e0
|
/man/p.s.strat.Rd
|
24116b310c884dbe66463382651d374081dc8ecc
|
[] |
no_license
|
rgcstats/ODS
|
5a4ba2107328175174b4874e10d8e47733c33397
|
0290071546fdd8dff1c8e9e6d8bc5920d1c04491
|
refs/heads/master
| 2020-12-10T10:30:36.232624
| 2020-01-13T10:20:29
| 2020-01-13T10:20:29
| 77,803,517
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 960
|
rd
|
p.s.strat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/p_s_strat.R
\name{p.s.strat}
\alias{p.s.strat}
\title{Calculates p(s) for the design stratified by y}
\usage{
p.s.strat(ys, yr, log = F, specs)
}
\arguments{
\item{ys}{vector of the sample values of the dependent variable}
\item{yr}{vector of the non-sample values of the dependent variable}
\item{log}{If FALSE (the default), returns p(s). If TRUE, log(p(s)) is returned.}
\item{specs}{An object containing detailed specifications of the design. Should be vector of
cutoffs (for H strata there should be a vector of H-1 cutoffs)}
}
\value{
The probability that s was selected (or the log thereof) multiplied by a constant.
}
\description{
This function returns the probability p(s) of an entire sample being selected
for stratified simple random sampling without replacement where strata are
defined by dividing y into intervals.
}
\details{
@details
Add some details later.
}
|
6c27610b1e4e53b855597c09a18a151b385e99ea
|
f9d80df2d3108c24255c4c2ba0fbce25a3acfad5
|
/code/fisher_exact_test.R
|
f73b77a500f88253cbe2eae49cd1e033917c1ea7
|
[] |
no_license
|
certifiedwaif/phd
|
12ed3c3113a5f662cf14538776f02acecd11338b
|
0f6f7b48917961f6821ff7aaf27357c1be1c6d5a
|
refs/heads/master
| 2021-10-24T22:07:44.383587
| 2019-03-29T03:12:55
| 2019-03-29T03:12:55
| 11,830,694
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 354
|
r
|
fisher_exact_test.R
|
# fisher_exact_test.R
pvalue <- function(n) {
p_non_effect <- 0.05
p_effect <- 0.1
p_group <- 0.5
mat <- matrix(c(n * p_non_effect * p_group, n * (1 - p_non_effect) * (1 - p_group),
n * p_effect * p_group, n * (1 - p_effect) * p_group), ncol = 2)
fisher.test(mat)$p.value
}
for (n in 1:1000) {
cat(n, pvalue(n), "\n")
}
|
78ebb8a4dc9c826cda1933584d6ae39f82421666
|
ea805d721a3cdc2db7a75e38a9b212e4e1885778
|
/ribiosUtils/R/factor.R
|
045ac0fc72bb884724dc533de6bf0e27fca2a3f1
|
[] |
no_license
|
grst/ribios
|
28c02c1f89180f79f71f21a00ba8ad8c22be3251
|
430056c85f3365e1bcb5e565153a68489c1dc7b3
|
refs/heads/master
| 2023-06-01T04:48:20.792749
| 2017-04-10T14:28:23
| 2017-04-10T14:28:23
| 68,606,477
| 0
| 0
| null | 2016-09-19T13:04:00
| 2016-09-19T13:04:00
| null |
UTF-8
|
R
| false
| false
| 2,758
|
r
|
factor.R
|
refactor <- function(factor, levels) {
if(!is.factor(factor))
stop("'factor' must be factor\n")
if(!nlevels(factor)==length(levels))
stop("Level number of factor' must be of the same length of 'levels'\n")
if(is.null(names(levels)))
stop("'levels' must be a named vector: names are (ordered) old levels, values are new levels")
current.levels <- levels(factor)
oldlevels <- names(levels)
newlevels <- unname(levels)
if(!all(oldlevels %in% current.levels)) {
missing.levels <- setdiff(oldlevels,current.levels)
stop(paste("Following old levels are not found:\n",
paste(missing.levels, collapse=" "),"\n"))
}
if(!all(current.levels %in% oldlevels)) {
missing.levels <- setdiff(current.levels, oldlevels)
stop(paste("Following current levels are not included in 'levels':\n",
paste(missing.levels, collapse=" "), "\n"))
}
factor.new <- factor(factor, levels=oldlevels)
levels(factor.new) <- newlevels
return(factor.new)
}
relevels <- function(x, refs) {
if(!all(refs %in% levels(x))) {
missing <- which(!(refs %in% levels(x)))
stop("The following levels are not found in x:\n",paste(refs[missing], sep=","))
}
refs <- rev(refs)
for (i in refs) {
x <- relevel(x, ref=i)
}
return(x)
}
ofactor <- function(x,...) factor(x, levels=unique(as.character(x)),...)
##test.relevels <- function() {
## cup <- c("HSV","FCBayern","KSC","VfB")
## teams <- factor(cup)
## orderTeams <- relevels(teams, cup)
##
## checkEquals(levels(orderTeams), cup)
## checkException(relvels(teams, c(cup, "SF")))
##}
cutInterval <- function(x, step=1,
labelOption=c("cut.default", "left", "right"),
include.lowest=FALSE, right=TRUE, dig.lab=3, ordered_result=FALSE,...) {
labelOption <- match.arg(labelOption,
c("left", "right", "cut.default"))
x.max <- max(x, na.rm=TRUE)
x.min <- min(x, na.rm=TRUE)
cut.up <- ifelse(x.max %% step==0,
x.max %/% step, x.max %/%step+1)*step
cut.low <- ifelse(x.min %/% step==0,
0, step * (x.min %/% step))
cut.scale <- seq(from=cut.low, to=cut.up, by=step)
labels <- NULL
if(labelOption=="left") {
labels <- cut.scale[-length(cut.scale)]
} else if (labelOption=="right") {
labels <- cut.scale[-1]
}
x.cut <- cut(x, cut.scale,labels=labels,
include.lowest=include.lowest, right=right, dig.lab=dig.lab, ordered_result=ordered_result, ## default in cut
...)
return(x.cut)
}
refactorNum <- function(x, decreasing=FALSE) {
x <- factor(as.character(x))
new.levels <- sort(as.numeric(levels(x)),
decreasing=decreasing)
factor(x, levels=new.levels)
}
|
7f31a98f723c597a69dd692e035a0d476c733b4d
|
85d27a2aa01ea5cd3259547605474eeee3e32d2f
|
/man/b2_authorize_account.Rd
|
a8e6c9fefbc900980cbe2aa5654e00677b8c7db9
|
[] |
no_license
|
mvanhala/B2R
|
cff1228bc231c837e7bbf199a4ae82cd6b963b2d
|
f7d99652d00cfeef971eeb7c2da73251df556f2f
|
refs/heads/master
| 2020-12-13T04:28:34.337097
| 2017-07-11T06:56:29
| 2017-07-11T06:56:29
| 95,411,737
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 605
|
rd
|
b2_authorize_account.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/b2_authorize_account.R
\name{b2_authorize_account}
\alias{b2_authorize_account}
\title{Authorize Backblaze B2}
\usage{
b2_authorize_account(account_id = Sys.getenv("B2_ACCOUNT_ID"),
application_key = Sys.getenv("B2_APPLICATION_KEY"))
}
\arguments{
\item{account_id}{B2 account ID}
\item{application_key}{B2 application key for the account}
}
\value{
Invisibly, a list with the values from the JSON response
}
\description{
Log in to B2 API
}
\details{
See \url{https://www.backblaze.com/b2/docs/b2_authorize_account.html}
}
|
3794e2ddc80ef4591a84bd408ab94d6b2f27fa9b
|
c203822ecd4c4c41a82bf2a768e74dd9b4b1af98
|
/run_analysis.R
|
6c4baba3922c2e52de838fa3625d63fee5db6d6c
|
[] |
no_license
|
amagoo/Gettingcleaningdata_courseproject
|
e7dd3f0e7a5482f64507d74ed71ede7417025520
|
c12050e1ce79b31fd7e3d9ed0a147b36de1183e6
|
refs/heads/master
| 2021-01-10T19:59:13.834011
| 2015-05-20T16:41:34
| 2015-05-20T16:41:34
| 35,958,756
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,769
|
r
|
run_analysis.R
|
setwd("C:/Users/andrew/Documents/Gettingcleaningdata_coursera/project_data/UCI HAR Dataset")
###submit code after here:
X_train<-read.table("X_train.txt")
y_train<-read.table("y_train.txt")
X_test<-read.table("X_test.txt")
y_test<-read.table("y_test.txt")
subject_test<-read.table("subject_test.txt")
subject_train<-read.table("subject_train.txt")
feature_labs<-read.table("features.txt")
activity_labels<-read.table("activity_labels.txt")
####################1. merges the test and training datasets###########################
#check that feature_labs match test & training column names
feature_labs$V3<-paste0("V",feature_labs$V1)
sum(!(names(X_train)==feature_labs$V3))
sum(!(names(X_test)==feature_labs$V3))
#change column names to feature names
names(X_train)<-feature_labs$V2
names(X_test)<-feature_labs$V2
X_train2<-cbind(activitycode=y_train$V1,X_train)
X_train2<-cbind(subject=subject_train$V1,X_train2)
X_test2<-cbind(activitycode=y_test$V1,X_test)
X_test2<-cbind(subject=subject_test$V1,X_test2)
#test that column names are equivalent
sum(!(names(X_train2)==names(X_test2)))
#merge test and training sets
complete<-rbind(X_test2,X_train2)
############2. Extract only mean and standard deviation on measurements###############
#Note: featuresinfo.txt explains that each signal had estimated mean() and std().
#I did not extract the meanFreq() or the angle() means
#subset data that only contains mean() or sd()
complete2<-complete[,grepl("mean\\(\\)|std\\(\\)",names(complete))]
#add subject and activity labels back onto complete2
complete2<-cbind(complete[,1:2],complete2)
#####################3. Name the activities with descriptive names##########################################
#clean up labels
activity_labels$V2<-gsub("_","",activity_labels$V2)
activity_labels$V2<-tolower(activity_labels$V2)
names(activity_labels)<-c("activitycode","activity")
#merge activity_labels and complete2 data frame to add column with activity names
complete3<-merge(activity_labels,complete2,all=TRUE)
####################4. Label dataset with descriptive data names#####################################
#Clean up column names
new_names<-gsub("-","_",names(complete3))
new_names<-gsub("\\(|\\)","",new_names)
names(complete3)<-new_names
#########################5. Create a second tidy data set with averages####################
#test for NA's
sum(is.na(complete3))
#calculate subject by activity averages using aggregate
project.data<-aggregate(complete3[,-c(1:3)],list(subject=complete3$subject,activity=complete3$activity),FUN=mean)
#write new table of means to a text file
write.table(project.data,file="project.data.txt",row.name=FALSE)
#Upload text file into R and view
data <- read.table("project.data.txt", header = TRUE)
View(data)
|
220bd9a5269e7896024d3cfff6a48fa0f3ae5e1c
|
ee816a81ef6fbb360e679bb7b3930010c165610a
|
/man/reset_method.Rd
|
d0c0bacb52a226ed5979684d007aa66e40992a76
|
[] |
no_license
|
stephens999/dscr
|
266c215e969bd1bdf27ee51b9d2e30d49baa87bc
|
9bcecf5e0848b384a9e58331c890316ea9335df2
|
refs/heads/master
| 2021-01-19T02:10:48.104730
| 2018-06-29T19:26:07
| 2018-06-29T19:26:07
| 26,604,817
| 13
| 14
| null | 2018-06-29T17:12:24
| 2014-11-13T19:46:43
|
R
|
UTF-8
|
R
| false
| true
| 619
|
rd
|
reset_method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dsc.R
\name{reset_method}
\alias{reset_method}
\title{Removes all output and scoress for a method.}
\usage{
reset_method(dsc, methodname, force = FALSE)
}
\arguments{
\item{dsc}{A dsc object.}
\item{methodname}{String indicating name of methods to remove output.}
\item{force}{Boolean, indicates whether to proceed without
prompting user (prompt is to be implemented).}
}
\value{
Nothing; simply deletes files.
}
\description{
Removes all output and scores for a method; primary
intended purpose is to force re-running of that method.
}
|
914860bfb7b5d1ecfdfacda52a95a4e5d33e2f99
|
2bf497fe5f5d1e5baaba86d7934885106c6c6554
|
/Scripts/CategoryEnrichment.R
|
612647bac6e0e808a4b23246dee85fe3ef2dbdbf
|
[] |
no_license
|
ericfournier2/EMAP
|
25b33cc001f6f5cb96e5009c879729d295ee7251
|
3bcfe1adbf1c7f618332856016362c89d4b72210
|
refs/heads/master
| 2022-09-30T20:14:33.518223
| 2020-06-04T18:42:35
| 2020-06-04T18:42:35
| 269,433,749
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 39,397
|
r
|
CategoryEnrichment.R
|
library(ggplot2)
library(reshape2)
library(png)
library(proto)
library(gridExtra)
# Add a "horizontal bar" type to ggplots, based on geom_bar. This is needed for the
# combined graph. Briefly, you cannot combined facets with space="free" and scale="free"
# with the coord_flip() object. However, geom_bar only has vertical bars, so you can't
# have horizontal bars without coord_flip(). This makes it impossible to create the
# combined graph without having all categories show up in all facets.
# By creating a new type which is equivalent to geom_bar, but with x and y reversed,
# we remove the need for the coord_flip(), and thus we can use free space/scale in the facets.
geom_hbar <- function (mapping = NULL, data = NULL, ...) {
GeomHBar$new(mapping = mapping, data = data, stat = "identity", position = "identity", ...)
}
GeomHBar <- proto(ggplot2:::Geom, {
objname <- "bar"
default_stat <- function(.) StatIdentity
default_pos <- function(.) PositionIdentity
default_aes <- function(.) aes(colour=NA, fill="grey20", size=0.5, linetype=1, weight = 1, alpha = NA)
required_aes <- c("y")
reparameterise <- function(., df, params) {
df$width <- df$width %||%
params$width %||% (resolution(df$y, FALSE) * 0.9)
transform(df,
xmin = pmin(x, 0), xmax = pmax(x, 0),
ymin = y - width / 2, ymax = y + width / 2, width = NULL
)
}
draw_groups <- function(., data, scales, coordinates, ...) {
GeomRect$draw_groups(data, scales, coordinates, ...)
}
guide_geom <- function(.) "polygon"
})
# Given a list of character vectors, builds a matrix representing which elements
# are part of which element of the list. For example, given the following list:
# [[1]] ""
# [[2]] "A B C"
# [[3]] "D"
# [[4]] "B D"
# The function will return the following matrix:
# A B C D
# [1]
# [2] 1 1 1
# [3] 1
# [4] 1 1
#
# Parameters:
# listOfValues - A list of character vectors
# fillPresent - The value to put in the matrix when an element is part of the vector.
# fillAbsent - The value to put in the matrix when an element is not part of the vector.
#
# Returns:
# The matrix indicating the presence of each element in the character vectors.
#
# Notes
# Obtained from Stack Overflow when looking for a way to optimize my own very slow function:
# http://stackoverflow.com/questions/19594253/optimizing-a-set-in-a-string-list-to-a-set-as-a-matrix-operation/19594838#19594838
# Original author says he will eventually add it to his "splitstackshape" package on CRAN.
charBinaryMat <- function(listOfValues, fillPresent=TRUE, fillAbsent = FALSE) {
lev <- sort(unique(unlist(listOfValues, use.names = FALSE)))
m <- matrix(fillAbsent, nrow = length(listOfValues), ncol = length(lev))
colnames(m) <- lev
for (i in 1:nrow(m)) {
m[i, listOfValues[[i]]] <- fillPresent
}
m
}
# Wrapper for charBinaryMat which splits the set of individual strings into
# a list of character vectors and removes leading/trailing spaces.
buildCategoryMatrix <- function(str) {
charBinaryMat(strsplit(sub("^ +", "", str), " ", fixed=TRUE))
}
# Makes all category enrichment calculations for a single category.
# Parameters:
# allCategoryMembership
# A vector of size nrow(annotations) indicating whether each probe within
# the annotation fits within the given category.
# annotations
# The annotation data-frame for the whole chip.
# diffExpr
# The DiffExpr object returned by the doLimmaAnalysis function.
# Returns:
# A vector containing the following information regarding the category enrichment:
# # of successes - Number DE/DM probes in the given category
# # of drawings - Total number of DE/DM probes
# # of possible successes - Number of probes in the given category
# # of cases - The total number of probes
# % of successes - Proportion of DE/DR probes in the given category within all DE/DR probes.
# % of possible successes - Proportion of DE/DR probes in the given category within all probes.
# p-value-low - Significance of the lower tail hypergeometric test
# p-value-high - Significance of the "high" tail hypergeometric test
# # Hyper - The number of hypermethylated probes within this category.
# % Hyper - The proportion of hypermethylated DE/DM probes within this category.
# # HyperAll - The total number of hypermethylated probes.
# % HyperAll - The percentage of hypermethylated DE/DM probes within all probes.
evaluateCategory <- function(allCategoryMembership, annotations, diffExpr) {
# Determine which probes are part of the differentially expressed list.
drawnMembership <- annotations$Probe %in% diffExpr$ID
# Calculate enrichment values.
numSuccess <- sum(allCategoryMembership & drawnMembership)
numPossibleSuccess <- sum(allCategoryMembership)
numTotalCases <- length(allCategoryMembership)
numDrawings <- sum(drawnMembership)
percSuccess <- numSuccess/numDrawings
percPossibleSuccess <- numPossibleSuccess/numTotalCases
diffExprSubset <- diffExpr$ID %in% annotations$Probe[allCategoryMembership]
hyper <- sum(diffExpr[diffExprSubset,2]>0)
percHyper <- hyper/sum(diffExprSubset)
hyperAll <- sum(diffExpr[,2]>0)
hyperRef <- numSuccess-hyper
pvalLow <- phyper(numSuccess, numPossibleSuccess, numTotalCases - numPossibleSuccess, numDrawings, lower.tail=TRUE)
pvalHigh <- phyper(numSuccess, numPossibleSuccess, numTotalCases - numPossibleSuccess, numDrawings, lower.tail=FALSE)
results <- c("# of successes" = numSuccess,
"# of drawings" = numDrawings,
"# of possible successes" = numPossibleSuccess,
"# of cases" = numTotalCases,
"% of successes" = percSuccess,
"% of possible successes" = numPossibleSuccess/numTotalCases,
"p-value-low" = pvalLow,
"p-value-high" = pvalHigh,
"# Hyper Other" = hyper,
"# Hyper Ref" = hyperRef,
"% Hyper Other" = percHyper,
"% Hyper Ref" = 1-percHyper,
"# HyperAll" = hyperAll,
"% HyperAll" = hyperAll/nrow(diffExpr),
"Relative DMR" = log2(percSuccess/(percPossibleSuccess)),
"Relative DMR Count" = paste(numSuccess, "\n(", sprintf("%2.1f", percSuccess*100), "%)", sep=""),
"Relative Hyper" = log2(percHyper/(1-percHyper)),
"Relative Hyper Count" = paste(numSuccess - hyper, ":", hyper, sep=""),
"Enrich Hyper Other" = log2((hyper/hyperAll)/percPossibleSuccess),
"Enrich Hyper Ref" = log2((hyperRef/(numDrawings-hyperAll))/percPossibleSuccess)
)
return(results)
}
# Given a vector of strings describing which categories all probes fit in, perform category
# enrichment for all categories present in the vector, minus those in "invCategories", which
# are completely removed prior to the analysis.
#
# Parameters:
# categories
# A vector of strings, of the same size as nrow(annotations), describing which categories
# each probe fits in. If multiple categories are present, they should be split using
# spaces. Accordingly, category names should not contain spaces.
# invCategories
# A list of categories which should be removed from the analysis. Used for "Unknown"
# categories.
# annotations
# The annotation data-frame for the whole chip.
# diffExpr
# The DiffExpr object returned by the doLimmaAnalysis function.
#
# Returns
# A matrix containing the enrichment analysis results.
evaluateCategories <- function(categories, invCategories, annotations, diffExpr) {
catMatrix <- buildCategoryMatrix(categories)
if(length(invCategories) != 0) {
invRows <- apply(as.matrix(catMatrix[,invCategories]), 1, any)
catMatrix <- catMatrix[!invRows,!(colnames(catMatrix) %in% invCategories)]
diffExpr <- diffExpr[!(diffExpr$ID %in% annotations$Probe[invRows]),]
annotations <- annotations[!invRows,]
}
results <- t(apply(catMatrix, 2, evaluateCategory, annotations, diffExpr))
# Convert back to numeric types
charColumnsIndices <- grepl("Count", colnames(results))
numericColumns <- results[,!charColumnsIndices]
textColumns <- results[,charColumnsIndices]
mode(numericColumns) <- "numeric"
return(data.frame(numericColumns, textColumns, check.names=FALSE))
}
# Simple enrichment analysis for various categories of genes/probes, namely:
# - Distance from a CpG island
# - Length of the associated CpG island
# - Density of the associated CpG island
# - Type of repeated elements present in the fragment
# Parameters:
# diffExpr: The list of differentially methylated genes in an experiment
# annotations: The annotation of all probes
# Returns:
# A list with the results of the enrichment analysis.
enrichmentAnalysis <- function(diffExpr, annotations) {
# Categorize lengths of CpG islands.
thresholds <- quantile(annotations$CpG_Length[annotations$CpG_Length!=0 & !is.na(annotations$CpG_Length)], c(0.20, 0.80))
lengthCategory <- vector(length=nrow(annotations))
lengthCategory[annotations$CpG_Length<thresholds[1]] <- "Small"
lengthCategory[annotations$CpG_Length>thresholds[2]] <- "Long"
lengthCategory[annotations$CpG_Length==0] <- "No-CpG-Island"
lengthCategory[annotations$CpG_Length>=thresholds[1] & annotations$CpG_Length<=thresholds[2]] <- "Intermediate"
lengthCategory[is.na(annotations$CpG_Length)] <- "Unknown"
# Categorize CpG densities.
thresholds <- quantile(annotations$CpG_Density[annotations$CpG_Length!=0 & !is.na(annotations$CpG_Length)], c(0.20, 0.80))
densityCategory <- vector(length=nrow(annotations))
densityCategory[annotations$CpG_Density<thresholds[1]] <- "Low-Density"
densityCategory[annotations$CpG_Density>thresholds[2]] <- "High-Density"
densityCategory[annotations$CpG_Density==0] <- "No-CpG-Island"
densityCategory[annotations$CpG_Density>=thresholds[1] & annotations$CpG_Density<=thresholds[2]] <- "Intermediate-Density"
densityCategory[is.na(annotations$CpG_Density)] <- "Unknown"
# Categorize by type of genic region.
geneRegionTypeCategory <- rep("No-nearby-gene", length=nrow(annotations))
geneRegionTypeCategory[annotations$Distal_Promoter != ""] <- "Distal-Promoter"
geneRegionTypeCategory[annotations$Promoter != ""] <- "Promoter"
geneRegionTypeCategory[annotations$Proximal_Promoter != ""] <- "Proximal-Promoter"
geneRegionTypeCategory[annotations$Intron != ""] <- "Intronic"
geneRegionTypeCategory[annotations$Exon != ""] <- "Exonic"
geneRegionTypeCategory[annotations$Chromosome==""] <- "Unknown"
# Categorize by proximity to CpG Islands
proximityCategory <- as.character(annotations$UCSC_CpG_Proximity)
proximityCategory[proximityCategory=="Shore"] <- "CpG Shore"
proximityCategory[proximityCategory=="Shelf"] <- "CpG Shelf"
proximityCategory[proximityCategory=="Island"] <- "CpG Islands"
proximityCategory[proximityCategory==""] <- "Unknown"
proximityCategory <- sub(" ", "-", proximityCategory, fixed=TRUE)
# Categorize repeat classes.
repeatClasses <- sub("/.*$", "", gsub("/.*? ", " ", annotations$Fragment_RepeatClass))
repeatClasses[repeatClasses==""] <- "No_Repeats"
# Transcription factor binding sites (TFBS) classes.
tfClasses <- as.character(annotations$TFBS)
tfClasses[tfClasses==""] <- "None"
# Uncategorized probes must be removed to remove the bias they induce.
result <- list(
GeneRegion=evaluateCategories(geneRegionTypeCategory, "Unknown", annotations, diffExpr),
Proximity=evaluateCategories(proximityCategory, "Unknown", annotations, diffExpr),
Length=evaluateCategories(lengthCategory, "Unknown", annotations, diffExpr),
Density=evaluateCategories(densityCategory, "Unknown", annotations, diffExpr),
RepeatClasses=evaluateCategories(repeatClasses, c(), annotations, diffExpr))
# Only evaluate TFBS enrichment if there are annotations for it.
if(length(unique(tfClasses)) > 1) {
result[["TFBS"]] <- evaluateCategories(tfClasses, "None", annotations, diffExpr)
}
# Fix names and display orders
relativeOrders=list(Proximity=c("Open Sea", "CpG Shelf", "CpG Shore", "CpG Islands"),
Length=c("No CpG Island","Small","Intermediate","Long"),
Density=c("No CpG Island","Low Density","Intermediate Density","High Density"),
GeneRegion=c("No nearby gene", "Distal Promoter", "Promoter", "Proximal Promoter", "Exonic", "Intronic"),
RepeatClasses=c("No Repeats", "SINE", "LINE", "Simple repeat", "LTR", "Low complexity", "DNA"))
# Put everything except TFBS in the correct order for graphical representations.
for(i in names(relativeOrders)) {
rownames(result[[i]]) <- gsub("[-_]", " ", rownames(result[[i]]))
result[[i]] <- result[[i]][relativeOrders[[i]],]
}
return(result)
}
# Returns the color vector for reference vs other graphs.
getTwoColorVector <- function(refCondition, othercondition) {
result <- c("#FF1100", "#3399FF")
names(result) <- c(refCondition, othercondition)
return(result)
}
# Returns the color vector for multiple categories graph.
# Arguments:
# colorNames: The vector of categories to which colors should be attributed.
# appendWhite: If true, the white color is appended at the end of the list.
# singleColor: If true, returns a vector containing the same color multiple times.
getColorVector <- function(colorNames, appendWhite=FALSE, singleColor=FALSE) {
if(singleColor) {
colorVector <- rep("#3399FF", length(colorNames))
} else {
colorVector <- c("#3399FF", "#55CC00", "#FFCC00","#FF1100" , "#FFFF00", "#999999", "#CC0099")
colorVector <- colorVector[1:length(colorNames)]
# Should we replace the last color with white?
if(appendWhite) {
colorVector[length(colorVector)] <- "#FFFFFF"
}
names(colorVector) <- colorNames
}
return(colorVector)
}
# Takes a data frame returned by the enrichmentAnalysis function and converts it to
# a data frame appropriate for use with our ggplot plots.
# 1. Row names are converted into an ordered factor and put in the "Category" column.
# 2. Items in the columnMap vector are mapped in the data frame. For example, if one
# element of the columnMap vector is EnrichPercent="% of possible successes", then
# the "% of possible successes" column of the original data frame is mapped to the
# "EnrichPercent" column of the returned data frame.
# Parameters:
# enrichData : The raw enrichment data to be converted.
# columnMap : The column mapping.
# insertLineBreak : If true, spaces in the category names are converted to line-breaks.
# reverseOrder : If true, category levels are set in the reverse order of the row order.
# Returns:
# A data frame appropriate for use with ggplot.
mapToDataFrame <- function(enrichData, columnMap, insertLineBreak=FALSE, reverseOrder=FALSE) {
catValues <- rownames(enrichData)
if(insertLineBreak) {
catValues <- gsub(" ", "\n", catValues)
}
catLevels <- catValues
if(reverseOrder) {
catLevels <- rev(catLevels)
}
result <- data.frame(Category=factor(catValues, levels=catLevels))
for(i in names(columnMap)) {
result <- cbind(result, enrichData[,columnMap[i]])
}
colnames(result) <- c("Category", names(columnMap))
rownames(result) <- result$Category
return(result)
}
# Adds a row at the end of a data frame returned by enrichmentAnalysis which creates an
# "All" category which serves as a summary of all other categories.
appendAllRow <- function(enrichData) {
allRow <- enrichData[1,]
allRow["# of successes"] <- allRow["# of drawings"]
allRow["% of successes"] <- 1
allRow["p-value-low"] <- 1
allRow["p-value-high"] <- 1
allRow["# Hyper Other"] <- allRow["# HyperAll"]
allRow["% Hyper Other"] <- allRow["% HyperAll"]
allRow["% Hyper Ref"] <- 1 - allRow["% HyperAll"]
allRow["Relative DMR"] <- 0
allRow["Relative Hyper"] <- log2(allRow["% HyperAll"]/(1-allRow["% HyperAll"]))
allRow["Relative Hyper Count"] <- paste(allRow["# of successes"] - allRow["# Hyper Other"], ":", allRow["# Hyper Other"], sep="")
return(rbind(enrichData, "All"=allRow))
}
# Produces a stacked bar plot comparing the distribution of probes on the whole array with that
# of those within the list of differentially methylated probes.
# Parameters:
# enData: A matrix, with as many rows as there are categories, and two columns.
# Column 1 should contain the proportion of probes in this category for the whole array,
# column 2 should contain the proportion of probes in this category for differentially expressed probes.
# categoryNames: The ordered display names of the categories.
# legendName: The name to give to the plot and the categories' legend.
doStackedBarPlot <- function(enrichData, legendName) {
dataDF <- mapToDataFrame(enrichData,
c("Proportion within\nall EDMA probes"="% of possible successes",
"Proportion within\nselected probes"="% of successes"))
mData <- melt(dataDF, id.vars="Category", variable.name="Type", value.name="Value")
colorVector <- getColorVector(rownames(enrichData))
# Generate and save the plot.
ggplot(mData, aes(x=Type, y=Value, fill=Category)) + # Set plot data.
geom_bar(stat="identity", colour="black") + labs(x="", y="") + # Set pot type (stacked bars) and axis labels (none).
theme( panel.grid.major.x = element_blank(), # Remove X grid lines.
axis.text = element_text(colour="black")) + # Set the axis text to black rather than grey.
scale_fill_manual(name=legendName, breaks=rev(rownames(enrichData)), # Set legend order so it corresponds to the stacked block order.
values=colorVector) # Set legend colors.
ggsave(filename=paste(legendName, " - Absolute proportions of selected probes.png"),
width=7, height=7, units="in") # Save plot to file.
}
# Generates a plot showing the percentage of DMRs which are methylated in otherCondition.
# Each bar is accompanied by a percentage showing the ratio the percentage of hypermethylaed
# DMRs in the other condition in this category and the percentage of hypermethylaed DMRs in
# the other condition for all DMRs.
doHyperPlot <- function(enrichData, categoryNames, legendName, otherCondition) {
# Get value of the "All" bar:
hyperAll <- enrichData[,"% HyperAll"][1]
# Replace spaces with line-breaks since category names will be written horizontally.
rownames(enrichData) <- gsub(" ", "\n", categoryNames, fixed=TRUE)
# Reorder category names so that the first will be up on top.
categoryNames <- factor(c("All", rownames(enrichData)), levels=c(rownames(enrichData), "All"))
# Build vector of values, which are the values in the input argument prepended with the value
# of the HyperAll column
hyperValues <- c(hyperAll, enrichData[,"% Hyper Other"])
# Build labels, which are the proportions of the bar length compared to the "All" bar,
# formatted as a percentage
hyperLabels <- sprintf("%2.1f%%", hyperValues/hyperAll*100)
# If we're above the "All" line, but there isn't enough space to put the label inside of the
# bar without overlapping said line, switch the label to outside of the bar.
hyperLabelPos <- ifelse((hyperValues > hyperAll) & (hyperValues - 0.15 < hyperAll), hyperAll - 0.01, hyperValues - 0.01)
# Is the previously chosen position too close to the left edge of the graph?
tooCloseToLeft <- hyperLabelPos < 0.20
# If so, move the label to outside of the bar.
hyperLabelPos <- ifelse(tooCloseToLeft, hyperValues + 0.01 , hyperLabelPos)
# Finally, if the label is outside of the bar but that it overlaps the "All" line, move it to the right of the "All" line.
hyperLabelPos <- ifelse(tooCloseToLeft & (hyperValues < hyperAll) & ((hyperValues - 20) < hyperAll), hyperAll + 0.01, hyperLabelPos)
# Label justification: right-justified (1), unless there's nos pace tot he left, in which case it will be left-justified (0).
hyperLabelJust <- ifelse(tooCloseToLeft, 0, 1)
# Build data-frame for ggplot
hyperDF <- data.frame(Category=categoryNames, Hyper=hyperValues, Label=hyperLabels,
LabelPos=hyperLabelPos, Just=hyperLabelJust)
# Match colors.
colorVector <- getColorVector(hyperDF$Category, appendWhite=TRUE)
# Build the plot
ggplot(hyperDF, aes(x=Category, y=Hyper, fill=Category)) + # Set data
geom_bar(stat="identity", colour="black") + # Set type (bars)
geom_text(aes(x=Category, y=LabelPos, label=Label, hjust=Just)) + # Text labels
geom_hline(yintercept=enrichData[,"% HyperAll"][1], linetype="dotted") + # Dotted line on "All" level.
ylim(c(0,1)) + # Always go from 0% to 100%
labs(x="", y=paste("Proportion of DMRs which are hyper-methylated in", otherCondition)) + # Set axis labels
theme( panel.grid.major.x = element_blank(), # Remove x grid lines
axis.text = element_text(colour="black", size=14), # Set axis text to black
legend.position="none") + # Remove legend
scale_fill_manual(values=colorVector) + # Set colors
coord_flip() # Turn graphic sideways.
ggsave(filename=paste(legendName, " - Hypermethylation.png"),
width=7, height=7, units="in")
}
# Generates a plot showing the percentage of DMRs which are methylated in each conditions,
# as a stacked bar plot.
doStackedHyperPlot <- function(enrichData, legendName, refCondition, otherCondition) {
enrichData <- appendAllRow(enrichData)
hyperDF <- rbind(mapToDataFrame(enrichData, c(Hyper="% Hyper Other"), TRUE),
mapToDataFrame(enrichData, c(Hyper="% Hyper Ref"), TRUE))
hyperDF <- cbind(hyperDF, Tissue=c(rep(otherCondition, nrow(enrichData)), rep(refCondition, nrow(enrichData))))
hyperDF$Category <- factor(hyperDF$Category, levels=hyperDF$Category[1:nrow(enrichData)])
# Build the plot
ggplot(hyperDF, aes(x=Category, y=Hyper, fill=Tissue)) + # Set data
geom_bar(stat="identity", colour="black") + # Set type (bars)
geom_hline(yintercept=enrichData[,"% HyperAll"][1], linetype="dotted") + # Dotted line on "All" level.
ylim(c(0,1.0000001)) + # Always go from 0% to 100%. Add a tiny bit for imprecisions due to rounding.
labs(x="", y="Proportion of selected probes which are hyper-methylated") + # Set axis labels
theme( panel.grid.major.x = element_blank(), # Remove x grid lines
axis.text = element_text(colour="black", size=14)) + # Set axis text to black
scale_fill_manual(values=getTwoColorVector(refCondition, otherCondition)) + # Set colors
coord_flip() # Turn graphic sideways.
ggsave(filename=paste(legendName, " - Absolute proportions of hypermethylated elements within selected probes.png"),
width=7, height=7, units="in")
}
doDodgedRelativeHyperPlot <- function(enrichData, legendName, refCondition, otherCondition) {
hyperDF <- rbind(mapToDataFrame(enrichData, c(Hyper="Enrich Hyper Other"), TRUE),
mapToDataFrame(enrichData, c(Hyper="Enrich Hyper Ref"), TRUE))
hyperDF <- cbind(hyperDF, Tissue=c(rep(otherCondition, nrow(enrichData)), rep(refCondition, nrow(enrichData))))
hyperDF$Category <- factor(hyperDF$Category, levels=hyperDF$Category[1:nrow(enrichData)])
# Build the plot
ggplot(hyperDF, aes(x=Category, y=Hyper, fill=Tissue)) + # Set data
geom_bar(stat="identity", colour="black", position="dodge") + # Set type (bars)
geom_hline(yintercept=0, linetype="solid", size=1) +
labs(x="", y="log2(Enrichment ratio)") + # Set axis labels
theme( panel.grid.major.x = element_blank(), # Remove x grid lines
axis.text = element_text(colour="black", size=14)) + # Set axis text to black
scale_fill_manual(values=getTwoColorVector(refCondition, otherCondition)) + # Set colors
coord_flip() # Turn graphic sideways.
ggsave(filename=paste(legendName, " - Per-tissue enrichment ratios of hypermethylated elements within selected probes.png"),
width=7, height=7, units="in")
}
doRelativePlot <- function(enrichPercent, topLabels, plotName, baseline=0, appendWhite=FALSE, singleColor=FALSE, combined=FALSE, colorColumn="", showCount=TRUE) {
# Replace -Infinite enrichment scores with -5. These occurs when something is divided by 0,
# and therefore cannot occur when performing a comparison against the proportion
# of probes in the array, IE, it can only occur when comparing proportions
# of hypermethylated probes. In such case, it is reasonable to "cap" the enrichment
# ratio at -5 to represent -Inf.
enrichPercent$EnrichPercent[enrichPercent$EnrichPercent==-Inf] <- -5
enrichPercent$EnrichPercent[enrichPercent$EnrichPercent==Inf] <- 5
# This occurs when 0 is divided by 0, and thus can only occur during hypermethylation
# analysis. If both sides have 0 hypermethylated probes, neither side can be considered,
# enriched and a ratio of 0 makes sense.
enrichPercent$EnrichPercent[is.nan(enrichPercent$EnrichPercent)] <- 0
graphHeight <- 7
leftMargin <- 5
if(nrow(enrichPercent) > 10) {
graphHeight <- 10
leftMargin <- 10
}
# Calculate the offset of labels, based on the total y-span of the plot and the orientation
# of the bar (toward the bottom or toward the top).
labelOffsets <- (max(enrichPercent$EnrichPercent)-min(enrichPercent$EnrichPercent)) * # y-span
ifelse(sign(enrichPercent$EnrichPercent)==1, 0.025, 0.05) * # Orientation of the bar
sign(enrichPercent$EnrichPercent)
labelJust <- ifelse(sign(labelOffsets)==1, 0, 1)
enrichPercent <- cbind(enrichPercent, Offset=enrichPercent$EnrichPercent + labelOffsets, Just=labelJust)
# Create a named color vector so that colors will match those of the stacked-bar plots.
colorVector <- getColorVector(enrichPercent$Category, appendWhite, singleColor)
# Determine the span of the graph. Take the highest absolute enrichment value,
# round it up to the closest 0.5 increment and use that or 1.5, whichever is larger.
ratioEdge <- max(c(abs(min(enrichPercent$EnrichPercent)), max(enrichPercent$EnrichPercent))) + 0.3
ratioRounded <- ceiling(ratioEdge/0.5)*0.5
ratioLimit <- max(1.5, ratioRounded)
# if(colorColumn=="") {
enrichPercent <- cbind(enrichPercent, ColorInfo=enrichPercent$Category)
# } else {
# enrichPercent <- cbind(enrichPercent, ColorInfo=enrichPercent[,colorColumn])
# }
# Generate the main plot.
gPlot <- ggplot(enrichPercent, aes(y=Category, x=EnrichPercent, fill=ColorInfo)) + # Set data
geom_hbar(colour="black") + # Set type (bars)
geom_vline(xintercept=0, linetype="solid", size=1) + # Draw line down the 0 line to delineate both sides.
geom_vline(xintercept=baseline, linetype="dashed", size=0.25) + # Draw the dotted "baseline". If 0, will draw over full middle line and be invisible.
labs(y="", x="log2(Enrichment ratio)") + # Set axis labels
xlim(c(-ratioLimit, ratioLimit)) + # Set axis limits
theme( panel.grid.major.y = element_blank(), # Remove x grid lines
axis.text = element_text(colour="black", size=14), # Set axis text to black
plot.margin = unit(c(0,1,1,1), "lines"),
legend.position="none") + # Remove legend
scale_fill_manual(values=colorVector) # Set colors
if(showCount) {
gPlot <- gPlot + geom_text(aes(x=Offset,label=Count,hjust=Just)) # Set bar labels
}
heightSplit <- c(0.2, 0.8)
if(combined) {
gPlot <- gPlot + facet_grid(Categorization~., scale="free", space="free")
heightSplit <- c(0.12, 0.88)
}
# Disable clipping in the main grob so that labels can overflow from the plot area.
enrichGrob <- ggplot_gtable(ggplot_build(gPlot))
enrichGrob$layout$clip[enrichGrob$layout$name == "panel"] <- "off"
# Generate the scale arrows bitmap grob for annotation.
if(exists(divergentScalePath)) {
divergentScalePath <- "DivergenceScaleNoLabel.png"
}
divergenceScale <- readPNG(divergentScalePath)
gScale <- rasterGrob(divergenceScale, interpolate=TRUE)
# Generate the top annotation, including the arrow and the labels.
labelDF <- data.frame(Label=topLabels, Pos=c(-1,0,1), Y=c(2.5,2.5,2.5)) # Define label positions.
annot <- ggplot(labelDF, aes(x=Pos, label=Label, y=Y)) + # Set data.
geom_text(size=4.3) + # Set the type (text) and its font size.
labs(y="", x="") + xlim(c(-1.2, 1.2)) + ylim(c(0,4)) + # Remove axis labels, set axis limits.
theme( panel.grid = element_blank(), # Remove grid lines
axis.text = element_blank(), # Set axis text to black
plot.margin = unit(c(0,1,0,leftMargin), "lines"),# Remove all margins except the left one
legend.position="none", # Remove the legend
panel.background=element_blank(), # Remove the background
axis.ticks=element_blank()) + # Remove the ticks.
annotation_custom(gScale, ymin=-1, ymax=1, xmin=-1.2, xmax=1.2) # Add the arrow scale.
# Disable clipping so the arrow scale will draw close enough to the actual plot.
annotGrob <- ggplot_gtable(ggplot_build(annot))
annotGrob$layout$clip[annotGrob$layout$name == "panel"] <- "off"
# Draw both plots in a column.
allPlots <- arrangeGrob(annotGrob, enrichGrob, nrow=2, heights=heightSplit)
# Save plot: can't use ggsave for plots drawn through grid. Start a png graphical device.
png(plotName, width = 7, height = graphHeight, units = "in", res=300)
grid.draw(allPlots)
dev.off()
}
getDMREnrichmentLabels <- function() {
return(c("Higher\nconservation\nof methylation",
"Average\ndivergence\nof methylation",
"Higher\ndivergence\nof methylation"))
}
# Produces a bar plot comparing the distribution of probes on the whole array with that
# of those within the list of differentially methylated probes.
doRelativeBarPlot <- function(enData, plotName, relativeOnly="", singleColor=FALSE) {
enrichPercent <- mapToDataFrame(enData, c(EnrichPercent="Relative DMR", Count="Relative DMR Count"), TRUE, FALSE)
topLabels <- getDMREnrichmentLabels()
if(relativeOnly != "") {
topLabels <- c(paste("Lower odds\nof methylation\nin", relativeOnly),
paste("Average odds\nof methylation\nin", relativeOnly),
paste("Higher odds\nof methylation\nin", relativeOnly))
}
doRelativePlot(enrichPercent, topLabels, paste(plotName, " - Enrichment ratios of selected probes.png"), 0, FALSE, singleColor)
return(enrichPercent)
}
getHyperMethylationLabels <- function(refCondition, otherCondition) {
return(c(paste("More\nhypermethylation\nin", refCondition),
"Hypermethylation\nevenly spread",
paste("More\nhypermethylation\nin", otherCondition)))
}
# Produces a bar plot comparing the distribution of probes on the whole array with that
# of those within the list of differentially methylated probes.
doRelativeHyperRatioPlot <- function(enData, plotName, refCondition, otherCondition, singleColor=FALSE) {
enData <- appendAllRow(enData)
enrichPercent <- mapToDataFrame(enData, c(EnrichPercent="Relative Hyper", Count="Relative Hyper Count"), TRUE, FALSE)
topLabels <- getHyperMethylationLabels(refCondition, otherCondition)
fullName <- paste(plotName, " - Enrichment ratios of hypermethylated elements within selected probes.png")
doRelativePlot(enrichPercent, topLabels, fullName, enrichPercent$EnrichPercent[enrichPercent$Category=="All"], appendWhite=TRUE, singleColor)
return(enrichPercent)
}
# Produces a bar plot comparing the distribution of probes on the whole array with that
# of those within the list of differentially methylated probes.
doCombinedRelativeBarPlot <- function(enrichDFList, columnNames, plotName, topLabels, addBaseline=FALSE, colorColumn="", showCount=TRUE) {
# Add a "Categorization" column for facetting.
for(i in 1:length(enrichDFList)) {
enrichDFList[[i]] <- cbind(enrichDFList[[i]], Categorization=names(enrichDFList)[i])
}
# Concatenate all separate enrichment data.
finalDF <- rbind(enrichDFList[[1]], enrichDFList[[2]])
for(i in 3:length(enrichDFList)) {
finalDF <- rbind(finalDF, enrichDFList[[i]])
}
# if(colorColumn=="") {
enrichPercent <- mapToDataFrame(finalDF, c(EnrichPercent=columnNames[1], Count=columnNames[2], Categorization="Categorization"))
# } else {
# enrichPercent <- mapToDataFrame(finalDF, c(EnrichPercent=columnNames[1], Count=columnNames[2], Categorization="Categorization", ColorInfo="ColorInfo"))
# }
baseline <- 0
if(addBaseline) {
baselineDF <- appendAllRow(finalDF)
baseline <- baselineDF[rownames(baselineDF)=="All", columnNames[1]]
}
# singleColor <- TRUE
# if(colorColumn!="") {
# singleColor <- FALSE
# }
doRelativePlot(enrichPercent, topLabels, paste(plotName, " - Combined enrichment.png"),
baseline, appendWhite=FALSE, singleColor=TRUE, combined=TRUE, colorColumn="", showCount=showCount)
}
# Generate both a stacked bar plot and a relative bar plot for a set of data.
doPlots <- function(enrichData, legendName, refCondition, otherCondition, relativeOnly) {
# Create output directory for this enrichment category, and move into it.
dir.create(legendName, showWarnings=FALSE, recursive=TRUE)
setwd(legendName)
# Generate the plots.
doStackedBarPlot(enrichData, legendName)
doStackedHyperPlot(enrichData, legendName, refCondition, otherCondition)
doRelativeBarPlot(enrichData, legendName)
doDodgedRelativeHyperPlot(enrichData, legendName, refCondition, otherCondition)
doRelativeHyperRatioPlot(enrichData, legendName, refCondition, otherCondition)
# Move back to the enrichment directory.
setwd("..")
}
# Plot all enrichment categories using stacked and side-by-side bars.
plotEnrichmentData <- function(enrich, refCondition, otherCondition, relativeOnly="") {
# Plot all data types.
if(relativeOnly == "") {
doPlots(enrich$Proximity, "Distance from CpG Island", refCondition, otherCondition, relativeOnly)
doPlots(enrich$Length, "CpG Island Length", refCondition, otherCondition, relativeOnly)
doPlots(enrich$Density, "CpG Island Density", refCondition, otherCondition, relativeOnly)
doPlots(enrich$GeneRegion, "Genic region", refCondition, otherCondition, relativeOnly)
}
# Do the two graphs that can be directly applied to repeats:
dir.create("Repeat", showWarnings=FALSE, recursive=TRUE)
setwd("Repeat")
doRelativeBarPlot(enrich$RepeatClasses, "Repeat", relativeOnly)
if(relativeOnly == "") {
doStackedHyperPlot(enrich$RepeatClasses, "Repeat", refCondition, otherCondition)
doRelativeHyperRatioPlot(enrich$RepeatClasses, "Repeat", refCondition, otherCondition)
# Now, instead of a stacked bar graph, do a dodged bar graph.
enData <- enrich$RepeatClasses[order(enrich$RepeatClasses[,"% of possible successes"], decreasing=TRUE),]
enDataSubset <- enData[,c("% of possible successes", "% of successes")]
colnames(enDataSubset) <- c("Proportion within\nall EDMA probes", "Proportion within\ndifferentially methylated probes")
mData <- melt(as.matrix(enDataSubset))
colnames(mData) <- c("Type", "Category", "value")
mData$Type <- factor(mData$Type, levels = rownames(enDataSubset))
ggplot(mData, aes(x=Type, y=value, fill=Category)) +
geom_bar(stat="identity", colour="black", position="dodge") +
labs(x="", y="") +
theme( panel.grid.major.x = element_blank(),
axis.text = element_text(colour="black")) +
scale_fill_manual(values=c("#FFCC00", "#3399FF"))
ggsave(filename="Repeat enrichment - Absolute bars.png", width=par("din")*1.5)
}
setwd("..")
enrich[["TFBS"]] <- NULL
for(i in 1:length(enrich)) {
enrich[[i]] <- cbind(enrich[[i]], ColorInfo=ifelse(enrich[[i]][,"Relative Hyper"] < 0, refCondition, otherCondition))
}
doCombinedRelativeBarPlot(enrich, c("Relative Hyper", "Relative Hyper Count"), "Selected probes", getHyperMethylationLabels(refCondition, otherCondition), TRUE, "ColorInfo")
doCombinedRelativeBarPlot(enrich, c("Relative DMR", "Relative DMR Count"), "Hypermethylation within selected probes", getDMREnrichmentLabels(), showCount=FALSE)
}
|
cc76ec88c746847ae5c9b20474a29edc9cc3d0ac
|
a5aad7fcca1e37d64f2b8af9e6eec1c66e5ab969
|
/man/totData.Rd
|
874d237041f97e2b4f567442e82ce9fd93b44300
|
[] |
no_license
|
slepape/EasyqpcR
|
cb4780e00b3cc65fcd12ddf9af8a6d2c52d8ff51
|
ebee75cef19b1ae8c7d8f818c413a8c53800a58c
|
refs/heads/master
| 2020-03-10T03:05:04.987845
| 2018-06-01T11:22:34
| 2018-06-01T11:22:34
| 129,154,329
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,326
|
rd
|
totData.Rd
|
\name{totData}
\alias{totData}
\title{
Aggregation of qPCR biological replicates and data transformation
}
\description{
This function aggregates qPCR biological replicates and calculates the main
parameters : mean (arithmetic or geometric), the standard deviation and the
standard error from your biological replicates of your experience. This
function has an algorithm published by Willems et al. (2008) which performs a
standardization procedure that can be applied to data sets that display high
variation between biological replicates. This enables proper statistical
analysis and drawing relevant conclusions. The procedure is not new, it has
been used in microarray data analysis and is based on a series of sequential
corrections, including log transformation, mean centering, and autoscaling.
}
\usage{
totData(data, r, geo = TRUE, logarithm = TRUE, base, transformation = TRUE,
nSpl, linear = TRUE, na.rm = na.rm)
}
\arguments{
\item{data}{
data.frame containing row datas (genes in columns, samples in rows, Cq values).
}
\item{r}{
numeric, number of qPCR replicates.
}
\item{geo}{
logical, the function will use the geometrical mean of your biological
replicates if TRUE or the arithmetic mean if FALSE.
}
\item{logarithm}{
logical, the NRQs will be log-transformed.
}
\item{base}{
numeric, the logarithmic base (2 or 10).
}
\item{transformation}{
logical, if TRUE, the transformation procedure for highly variable biological
replicates (but with the same tendency) will be done.
}
\item{nSpl}{
numeric, the number of samples.
}
\item{linear}{
logical, after the transformation procedure done, your raw data will be
normalized (anti-log-transformed).
}
\item{na.rm}{
logical, indicating whether NA values should be stripped before the computation
proceeds.
}
}
\details{
The standardization procedure used in this function (if TRUE for the
transformation argument) is based on the article of Willems et al. (2008).
This function perform successively thEerror operations : log-transformation of
your raw data, mean of your log-transformed data for each biological replicate,
mean centering for each biological replicate, standard deviation of each
mean-centered biological replicate, autoscaling of your data, i.e., your
mean-centered data for each biological replicate will be divided by the
standard deviation of the mean-centered biological replicate and then
multiplicated by the mean of the standard deviation of all the biological
replicates.
For more information for the way to use this function, please see the vignette.
}
\value{
\item{Mean of your qPCR runs}{The geometric (if TRUE for geo) or arithmetic
mean of your biological replicates.}
\item{Standard deviations of your qPCR runs}{The standard deviation of your
biological replicates.}
\item{Standard errors of your qPCR runs}{The standard error of your biological
replicates.}
\item{Transformed data}{If TRUE for transformation, your raw data will be
transformed by the algorithm of Willems et al. (2008).}
\item{Reordered transformed data}{The transformed data reordered by rowname.}
}
\references{
Erik Willems Luc Leyns, Jo Vandesompele. Standardization of real-time PCR gene
expression data from independent biological replicates. Analytical Biochemistry
379 (2008) 127-129 (doi:10.1016/j.ab.2008.04.036).
<url:http://www.sciencedirect.com/science/article/pii/S0003269708002649>
}
\author{
Sylvain Le pape <sylvain.le.pape@univ-poitiers.fr>
}
\examples{
data(qPCR_run1,qPCR_run2,qPCR_run3)
nrmData(data = qPCR_run1 , r=3, E=c(2, 2, 2, 2),
Eerror=c(0.02, 0.02, 0.02, 0.02), nSpl=5,
nbRef=2, Refposcol=1:2, nCTL=2,
CF=c(1, 1, 1, 1), CalPos=5, trace=TRUE, geo=TRUE, na.rm=TRUE)
nrmData(data = qPCR_run2 , r=3, E=c(2, 2, 2, 2),
Eerror=c(0.02, 0.02, 0.02, 0.02), nSpl=5,
nbRef=2, Refposcol=1:2, nCTL=2,
CF=c(1, 1, 1, 1), CalPos=5, trace=TRUE, geo=TRUE, na.rm=TRUE)
nrmData(data = qPCR_run3 , r=3, E=c(2, 2, 2, 2),
Eerror=c(0.02, 0.02, 0.02, 0.02), nSpl=5,
nbRef=2, Refposcol=1:2, nCTL=2,
CF=c(1, 1, 1, 1), CalPos=5, trace=TRUE, geo=TRUE, na.rm=TRUE)
## Isolate the calibrator NRQ values of the first biological replicate
a <- nrmData(data = qPCR_run1 , r=3, E=c(2, 2, 2, 2),
Eerror=c(0.02, 0.02, 0.02, 0.02), nSpl=5,
nbRef=2, Refposcol=1:2, nCTL=2,
CF=c(1, 1, 1, 1), CalPos=5, trace=TRUE, geo=TRUE, na.rm=TRUE)[[3]]
## Isolate the calibrator NRQ values of the first biological replicate
b <- nrmData(data = qPCR_run2 , r=3, E=c(2, 2, 2, 2),
Eerror=c(0.02, 0.02, 0.02, 0.02), nSpl=5,
nbRef=2, Refposcol=1:2, nCTL=2,
CF=c(1, 1, 1, 1), CalPos=5, trace=TRUE, geo=TRUE, na.rm=TRUE)[[3]]
## Isolate the calibrator NRQ values of the first biological replicate
c <- nrmData(data = qPCR_run3 , r=3, E=c(2, 2, 2, 2),
Eerror=c(0.02, 0.02, 0.02, 0.02), nSpl=5,
nbRef=2, Refposcol=1:2, nCTL=2,
CF=c(1, 1, 1, 1), CalPos=5, trace=TRUE, geo=TRUE, na.rm=TRUE)[[3]]
## Regrouping the calibrator NRQ values of all the biological replicates
d <- rbind(a, b, c)
## Calibration factor calculation
e <- calData(d)
## Attenuation of inter-run variation thanks to the calibration factor for the
## first biological replicate
nrmData(data = qPCR_run1 , r=3, E=c(2, 2, 2, 2),
Eerror=c(0.02, 0.02, 0.02, 0.02), nSpl=5,
nbRef=2, Refposcol=1:2, nCTL=2,
CF=e, CalPos=5, trace=TRUE, geo=TRUE, na.rm=TRUE)
## Attenuation of inter-run variation thanks to the calibration factor for the
## second biological replicate
nrmData(data = qPCR_run2 , r=3, E=c(2, 2, 2, 2),
Eerror=c(0.02, 0.02, 0.02, 0.02), nSpl=5,
nbRef=2, Refposcol=1:2, nCTL=2,
CF=e, CalPos=5, trace=TRUE, geo=TRUE, na.rm=TRUE)
## Attenuation of inter-run variation thanks to the calibration factor for the
## third biological replicate
nrmData(data = qPCR_run3 , r=3, E=c(2, 2, 2, 2),
Eerror=c(0.02, 0.02, 0.02, 0.02), nSpl=5,
nbRef=2, Refposcol=1:2, nCTL=2,
CF=e, CalPos=5, trace=TRUE, geo=TRUE, na.rm=TRUE)
## Isolate the NRQs scaled to control of the first biological replicate
a1 <- nrmData(data = qPCR_run1 , r=3, E=c(2, 2, 2, 2),
Eerror=c(0.02, 0.02, 0.02, 0.02), nSpl=5,
nbRef=2, Refposcol=1:2, nCTL=2,
CF=e, CalPos=5, trace=TRUE, geo=TRUE, na.rm=TRUE)[1]
## Isolate the NRQs scaled to control of the second biological replicate
b1 <- nrmData(data = qPCR_run2 , r=3, E=c(2, 2, 2, 2),
Eerror=c(0.02, 0.02, 0.02, 0.02), nSpl=5,
nbRef=2, Refposcol=1:2, nCTL=2,
CF=e, CalPos=5, trace=TRUE, geo=TRUE, na.rm=TRUE)[1]
## Isolate the NRQs scaled to control of the third biological replicate
c1 <- nrmData(data = qPCR_run3 , r=3, E=c(2, 2, 2, 2),
Eerror=c(0.02, 0.02, 0.02, 0.02), nSpl=5,
nbRef=2, Refposcol=1:2, nCTL=2,
CF=e, CalPos=5, trace=TRUE, geo=TRUE, na.rm=TRUE)[1]
## Data frame transformation
a2 <- as.data.frame(a1)
b2 <- as.data.frame(b1)
c2 <- as.data.frame(c1)
## Aggregation of the three biological replicates
d2 <- rbind(a2, b2, c2)
totData(data=d2, r=3, geo=TRUE, logarithm=TRUE, base=2,
transformation=TRUE, nSpl=5, linear=TRUE,
na.rm=TRUE)
}
\keyword{Biological replicates}
\keyword{Standardization procedure}
|
9902eb5124d8508bcf40097963ac645955ea5e60
|
25e8d08c92a5fcd67f7f36a11002ec90b9bb75b3
|
/R/datasets.R
|
f47646d8aa4b15cc3820d06177ef3ec3fd0f8128
|
[] |
no_license
|
FBartos/RoBMA
|
335f5dda8b3ebf263addda409ad01f8a66a70b99
|
8de6009b50670276efcaf65f4a9dd69b5f432fa3
|
refs/heads/master
| 2023-07-23T12:49:05.073261
| 2023-07-19T21:12:13
| 2023-07-19T21:12:13
| 259,885,114
| 7
| 3
| null | 2023-07-19T21:12:15
| 2020-04-29T09:43:18
|
R
|
UTF-8
|
R
| false
| false
| 3,525
|
r
|
datasets.R
|
#' @title 27 experimental studies from
#' \insertCite{anderson2010violent;textual}{RoBMA} that meet the best practice criteria
#'
#' @description The data set contains correlation coefficients, sample
#' sizes, and labels for 27 experimental studies focusing on the effect of
#' violent video games on aggressive behavior. The full original data can
#' found at https://github.com/Joe-Hilgard/Anderson-meta.
#'
#'
#' @format A data.frame with 3 columns and 23 observations.
#'
#' @return a data.frame.
#'
#' @references
#' \insertAllCited{}
"Anderson2010"
#' @title 9 experimental studies from
#' \insertCite{bem2011feeling;textual}{RoBMA} as described in
#' \insertCite{bem2011must;textual}{RoBMA}
#'
#' @description The data set contains Cohen's d effect sizes, standard errors,
#' and labels for 9 experimental studies of precognition from the infamous
#' \insertCite{bem2011feeling;textual}{RoBMA} as analyzed in his later meta-analysis
#' \insertCite{bem2011must}{RoBMA}.
#'
#' @format A data.frame with 3 columns and 9 observations.
#'
#' @return a data.frame.
#'
#' @references
#' \insertAllCited{}
"Bem2011"
#' @title 5 studies with a tactile outcome assessment from
#' \insertCite{poulsen2006potassium;textual}{RoBMA} of the effect of potassium-containing toothpaste
#' on dentine hypersensitivity
#'
#' @description The data set contains Cohen's d effect sizes, standard errors,
#' and labels for 5 studies assessing the tactile outcome from a meta-analysis of
#' the effect of potassium-containing toothpaste on dentine hypersensitivity
#' \insertCite{poulsen2006potassium}{RoBMA} which was used as an example in
#' \insertCite{bartos2021bayesian;textual}{RoBMA}.
#'
#' @format A data.frame with 3 columns and 5 observations.
#'
#' @return a data.frame.
#'
#' @references
#' \insertAllCited{}
"Poulsen2006"
#' @title 881 estimates from 69 studies of a relationship between employment and
#' educational outcomes collected by \insertCite{kroupova2021student;textual}{RoBMA}
#'
#' @description The data set contains partial correlation coefficients, standard errors,
#' study labels, samples sizes, type of the educational outcome, intensity of the
#' employment, gender of the student population, study location, study design, whether
#' the study controlled for endogenity, and whether the study controlled for motivation.
#' The original data set including additional variables and the publication can be found
#' at http://meta-analysis.cz/students.
#' (Note that some standard errors and employment intensities are missing.)
#'
#' @format A data.frame with 11 columns and 881 observations.
#'
#' @return a data.frame.
#'
#' @references
#' \insertAllCited{}
"Kroupova2021"
#' @title 18 studies of a relationship between acculturation mismatch and
#' intergenerational cultural conflict collected by
#' \insertCite{lui2015intergenerational;textual}{RoBMA}
#'
#' @description The data set contains correlation coefficients r,
#' sample sizes n, and labels for each study assessing the
#' relationship between acculturation mismatch (that is the result of the contrast
#' between the collectivist cultures of Asian and Latin immigrant groups
#' and the individualist culture in the United States) and intergenerational cultural
#' conflict \insertCite{lui2015intergenerational}{RoBMA} which was used as an
#' example in \insertCite{bartos2020adjusting;textual}{RoBMA}.
#'
#' @format A data.frame with 3 columns and 18 observations.
#'
#' @return a data.frame.
#'
#' @references
#' \insertAllCited{}
"Lui2015"
|
803ddb3e79b93e610cb530c3bf2819a117512d0e
|
ce8d13de6aa47617809c5fc4d83ccd961b310104
|
/man/xgb.max_sensitivity.Rd
|
d86a2bb3d5e462cb6f2d6f20a9271677d9cec97a
|
[] |
no_license
|
BruceZhaoR/Laurae
|
2c701c1ac4812406f09b50e1d80dd33a3ff35327
|
460ae3ad637f53fbde6d87b7b9b04ac05719a169
|
refs/heads/master
| 2021-01-22T12:24:50.084103
| 2017-03-24T19:35:47
| 2017-03-24T19:35:47
| 92,722,642
| 0
| 1
| null | 2017-05-29T08:51:26
| 2017-05-29T08:51:26
| null |
UTF-8
|
R
| false
| true
| 715
|
rd
|
xgb.max_sensitivity.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgb.max_sensitivity.R
\name{xgb.max_sensitivity}
\alias{xgb.max_sensitivity}
\title{xgboost evaluation metric for maximum Sensitivity (True Positive Rate)}
\usage{
xgb.max_sensitivity(pred, dtrain)
}
\arguments{
\item{pred}{Type: numeric. The predictions.}
\item{dtrain}{Type: xgb.DMatrix. The training data.}
}
\value{
The maximum Sensitivity (True Positive Rate) for binary data.
}
\description{
This function allows xgboost to use a custom thresholding method to maximize the Sensitivity (True Positive Rate). You can use this function via \code{eval_metric}. It leaks memory over time, but it can be reclaimed using \code{gc()}.
}
|
71000425f57ea3c2e2c391a0ba733a14f0440516
|
04bd0b46324eb01410c9323d55fec99772127ea4
|
/plot1.R
|
24c43f0a5aaf1460715b903d225cb456e10ccfc7
|
[] |
no_license
|
cybernurl/ExData_Plotting1
|
55770fe7e2083f659ecf3377b67a4dbc2edc943b
|
3c1c0c110029bf85c7049c53d8c3d4d6f05f6c49
|
refs/heads/master
| 2021-01-24T14:32:51.947702
| 2015-04-11T01:50:45
| 2015-04-11T01:50:45
| 33,520,194
| 0
| 0
| null | 2015-04-07T03:37:03
| 2015-04-07T03:37:02
| null |
UTF-8
|
R
| false
| false
| 382
|
r
|
plot1.R
|
##Plot1
## Extracting data
fulldata<-read.csv("household_power_consumption.txt", header=TRUE, sep=";",na.strings="?")
data <- fulldata[fulldata$Date %in% c("1/2/2007","2/2/2007") ,]
##plot graph
png("plot1.png", width=480, height=480)
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
dev.off()
|
d3957f07f243b3b41d14e69cbacd8a63f4d3974a
|
9b9344923dad577a25eab4354d03b8a406137b32
|
/scripts/structural/structural_table.R
|
b0985da73938920e2225ee3beb429d13559f9272
|
[] |
no_license
|
mnvandyke/water_competition
|
5572d3bbace8f6c4bc474202ce4d06bbc61be93b
|
82ce2c965609f25ea22dce0c7abd6005316861d4
|
refs/heads/main
| 2023-04-14T15:38:14.983700
| 2022-09-15T19:00:24
| 2022-09-15T19:00:24
| 519,023,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,996
|
r
|
structural_table.R
|
### Analysis for "Small rainfall changes drive substantial changes in plant coexistence"
### Mary Van Dyke, mnvandyke@ucla.edu
### Last edit: 25 July 2022
### this script makes the structural analysis tables (Extended data tables 5,6,7)
library(gt)
library(tidyverse)
library(dplyr)
struc <- read.csv("./output/structural_analysis_output.csv") %>% arrange(species)
struc$X <- NULL
struc$coexist <- ifelse(struc$outcome == 1, "yes", "no")
struc$outcome <- NULL
##Triplets Table : table ED6
trip <- struc %>%
filter(no_sp == 3)
trip$match <- NULL
trip$no_sp <- NULL
trip_wide<-pivot_wider(data = trip,
names_from = treatment,
values_from = c(theta, omega, coexist)
)
trip_tab <- gt(trip_wide)
trip_tab <- trip_wide %>%
gt(rowname_col ="species") %>%
tab_stubhead(label = "Species") %>%
tab_spanner(
label = "Ambient Rainfall",
columns = c(omega_1, theta_1, coexist_1)) %>%
tab_spanner(
label = "Reduced Rain",
columns = c(omega_2, theta_2, coexist_2)) %>%
tab_style(
style = cell_text(weight = "bold"),
locations = list(cells_column_spanners(), cells_stubhead())) %>%
cols_label(
omega_1 = html("Ω"),
theta_1 = html("θ"),
coexist_1 = html("Predicted <br> Coexistence?"),
omega_2 = html("Ω"),
theta_2 = html("θ"),
coexist_2 = html("Predicted <br> Coexistence?")
) %>%
cols_align(align = "center") %>%
#cols_width(species~px(110)) %>%
fmt_number(c(omega_1, omega_2), decimals = 3) %>%
fmt_number(c(theta_1, theta_2), decimals = 2) %>%
tab_style(
style = cell_text(align = "center", indent = px(100)),
locations = cells_stubhead()) %>%
tab_style(
style = cell_text(align = "center"),
locations = cells_stub())%>%
tab_row_group(
label = html("Coexist in <br> ambient but not <br> reduced rainfall"),
rows = (coexist_1 == "yes" & coexist_2 == "no"),
id = "aa") %>%
tab_row_group(
label = html("Coexist in <br> reduced rainfall <br> but not ambient"),
rows = (coexist_1 == "no" & coexist_2 == "yes"),
id = "bb") %>%
tab_row_group(
label = html("Coexist in <br> neither"),
rows = (coexist_1 == "no" & coexist_2 == "no"),
id = "cc") %>%
row_group_order(
groups = c("aa", "bb", "cc")) %>%
tab_options(row_group.as_column = T)%>%
tab_style(
style = cell_text(align = "left"),
locations = cells_row_groups())
trip_tab
gtsave(trip_tab, "structural_table_triplets.png", "./figures/")
## Pairs Table: Table ED5
pairs <- struc %>%
filter(no_sp == 2)
pairs$no_sp <- NULL
pairs <- pairs %>% filter(species %in% pair_labels) ## pairs_labels is from final_figures.R
pairs_wide<-pivot_wider(data = pairs,
names_from = treatment,
values_from = c(theta, omega, coexist)
)
pairs_tab <- gt(pairs_wide)
pairs_tab <- pairs_wide %>%
gt(rowname_col ="species") %>%
tab_stubhead(label = "Species") %>%
tab_spanner(
label = "Ambient Rainfall",
columns = c(omega_1, theta_1, coexist_1)) %>%
tab_spanner(
label = "Reduced Rain",
columns = c(omega_2, theta_2, coexist_2)) %>%
cols_move_to_start(
columns = c(omega_1, theta_1, coexist_1)) %>%
#cols_width(species~px(110), omega_1~px(100), omega_2~px(100), theta_1~px(100), theta_2~px(100)) %>%
tab_style(
style = cell_text(weight = "bold"),
locations = list(cells_column_spanners(), cells_stubhead())) %>%
cols_label(
omega_1 = html("Ω"),
theta_1 = html("θ"),
coexist_1 = html("Predicted <br> Coexistence?"),
omega_2 = html("Ω"),
theta_2 = html("θ"),
coexist_2 = html("Predicted <br> Coexistence?")) %>%
cols_align(align = "center") %>%
fmt_number(c(omega_1, omega_2), decimals = 3) %>%
fmt_number(c(theta_1, theta_2), decimals = 2) %>%
tab_style(
style = cell_text(align = "center"),
locations = cells_stub()) %>%
tab_style(
style = cell_text(align = "center", indent = px(125)),
locations = cells_stubhead()) %>%
tab_row_group(
label = html("Coexist in <br> ambient but not <br> reduced rainfall"),
rows = (coexist_1 == "yes" & coexist_2 == "no"),
id = "aa") %>%
tab_row_group(
label =html("Coexist in<br>reduced rainfall<br>but not ambient"),
rows = (coexist_1 == "no" & coexist_2 == "yes"),
id = "bb") %>%
tab_row_group(
label = html("Coexist in <br> both"),
rows = (coexist_1 == "yes" & coexist_2 == "yes"),
id = "cc") %>%
tab_row_group(
label = html("Coexist in <br> neither"),
rows = (coexist_1 == "no" & coexist_2 == "no"),
id = "dd") %>%
row_group_order(
groups = c("aa", "bb", "cc", "dd")) %>%
tab_options(row_group.as_column = T) %>%
tab_style(
style = cell_text(align = "left"),
locations = cells_row_groups()
)
pairs_tab
gtsave(pairs_tab, "structural_table_pairs.png", "./figures/")
##Quads, Quints, Sexts Table: Table ED7
quads <- struc %>%
filter(no_sp >= 4)
quads_wide<-pivot_wider(data = quads,
names_from = treatment,
values_from = c(theta, omega, coexist)
)
#quads_tab <- gt(quads_wide)
quads_tab <- quads_wide %>%
gt(rowname_col ="species") %>%
tab_stubhead(label = "Species") %>%
tab_spanner(
label = "Ambient Rainfall",
columns = c(omega_1, theta_1, coexist_1)) %>%
tab_spanner(
label = "Reduced Rain",
columns = c(omega_2, theta_2, coexist_2)) %>%
cols_move_to_start(
columns = c(omega_1, theta_1, coexist_1)) %>%
tab_style(
style = cell_text(weight = "bold"),
locations = list(cells_column_spanners(), cells_stubhead())) %>%
cols_label(
omega_1 = html("Ω"),
theta_1 = html("θ"),
coexist_1 = html("Predicted <br> Coexistence?"),
omega_2 = html("Ω"),
theta_2 = html("θ"),
coexist_2 = html("Predicted <br> Coexistence?")
) %>%
cols_align(align = "center") %>%
#cols_width( omega_1~px(90), omega_2~px(90),
#theta_1~px(90), theta_2~px(90), coexist_1~px(90), coexist_2~px(60)) %>%
fmt_number(c(omega_1, omega_2), decimals = 4) %>%
fmt_number(c(theta_1, theta_2), decimals = 2) %>%
tab_style(
style = cell_text(align = "center"),
locations = cells_stub()) %>%
tab_style(
style = cell_text(align = "center"),
locations = cells_stubhead()) %>%
tab_style(
style = cell_text(align = 'left'),
locations = cells_row_groups()
) %>%
tab_row_group(
label = "Quadruplets",
rows = no_sp == 4,
id = "quads") %>%
tab_row_group(
label = "Quintuplets",
rows = no_sp == 5,
id = "quints") %>%
tab_row_group(
label = "Sextuplet",
rows = no_sp == 6,
id = "sext") %>%
row_group_order(c("quads", "quints", "sext")) %>%
cols_hide(no_sp) %>%
tab_options(row_group.as_column = T)%>%
tab_style(
style = cell_text(align = "left"),
locations = cells_row_groups())
quads_tab
gtsave(quads_tab, "structural_table_quads.png", "./figures/")
|
d3a541aa04c948ec2f05266e656d53c74b25a127
|
c2333607c201f51f0af4f57a780c4e200c47c523
|
/Ejemplos Treemap.R
|
0baca07e0366774c7284fb0ddd09d82d25628be9
|
[] |
no_license
|
ramirocadavid/reportes_contenidos
|
6ba4f587923f63a6a6e9745502140ccb050ce089
|
bb28463608eceff35df035ce5a89413144669265
|
refs/heads/master
| 2022-09-23T11:07:08.314222
| 2022-04-22T14:23:24
| 2022-04-22T14:23:24
| 94,578,086
| 0
| 1
| null | 2017-08-08T22:00:06
| 2017-06-16T20:16:59
|
R
|
UTF-8
|
R
| false
| false
| 680
|
r
|
Ejemplos Treemap.R
|
library(treemap)
# itreemap: Interactive user interface for treemap
data(business)
itreemap(business)
# treecolors: Interactive tool to experiment with Tree Colors
treecolors()
# treegraph Create a tree graph
treegraph(business, index=c("NACE1", "NACE2", "NACE3", "NACE4"), show.labels=FALSE)
treegraph(business[business$NACE1=="F - Construction",],
index=c("NACE2", "NACE3", "NACE4"), show.labels=TRUE, truncate.labels=c(2,4,6))
# treemap: Create a treemap
data(GNI2014)
treemap(GNI2014,
index=c("continent", "iso3"),
vSize="population",
vColor="GNI",
type="value",
format.legend = list(scientific = FALSE, big.mark = " "))
|
c1e0b553e7e861911d060aaa2823991cf51bd671
|
0ca5af9a178b368386e495f62bc3616e8debc27c
|
/credentials.r
|
e797a684a245354a3b641fa33a67b3fe4a987d39
|
[] |
no_license
|
mlshipman/dataProdProj
|
ae4418e7a03ef266467ee94e4e0cf314e2f3b054
|
3e105d36d1def548135be273d795b1d3f4c6843a
|
refs/heads/master
| 2020-12-24T11:25:26.889144
| 2014-08-24T07:21:26
| 2014-08-24T07:21:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 186
|
r
|
credentials.r
|
consumer_key<-""
consumer_secret<-""
access_token<-""
access_secret<-""
save(list=c("consumer_key", "consumer_secret", "access_token", "access_secret"), file="credentials", ascii=TRUE)
|
df4e780c6d77369d585314de356e64e4b13e338f
|
839de41c2aa412e9f9c8b8b1fcb9313caa72efe2
|
/man/date2day.Rd
|
2705f6f5179f2ff66344df9b4c900231d68ae73b
|
[] |
no_license
|
Zhoushu/ETAS-model-for-location-transformed-
|
b98a93158edb96fa6bf595d4931ab642fe590002
|
88347245c210dc2bdc52377349fbc96ff7eb6a39
|
refs/heads/master
| 2021-01-23T07:03:17.328820
| 2014-10-14T13:30:07
| 2014-10-14T13:30:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,402
|
rd
|
date2day.Rd
|
\name{date2day}
\alias{date2day}
\title{
Convert date-time data to numeric data in days
}
\description{
A function to convert date-time data to days with respect to a date-time origin.
}
\usage{
date2day(dates, start, tz = "", ...)
}
\arguments{
\item{dates}{A date-time or date object. Typically, it is a character
vector containing date-time information.}
\item{start}{A date-time or date object. Determines the origin of the conversion.}
\item{tz}{Optional. Timezone specification to be used for the conversion.}
\item{\dots}{Arguments to be passed to \code{as.POSIXlt}.}
}
\value{
A numeric vector of the same length as \code{dates}.
}
\details{
The arguments \code{dates} and \code{start} must be of
appropriate format to be passed to \code{as.POSIXlt} function.
}
\seealso{
\code{\link{as.POSIXlt}} and \code{\link{difftime}} for appropriate format of the data
to be converted.
}
\author{
Abdollah Jalilian
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (dates, start, tz = "", ...)
{
start <- as.POSIXlt(start, tz = tz, ...)
dates <- as.POSIXlt(dates, tz = tz, ...)
out <- as.numeric(difftime(dates, start, units = "days"))
return(out)
}
}
\keyword{spatial}
\keyword{math}
\keyword{date time}
|
c235ac6d596164f8085b41558caf607f5771d427
|
411eba8cfd9fc5f5753e92be1f72672fc286fbe5
|
/02_gmail.automation.R
|
13b012ed667be7d02c2a2bdc9d16442cdbd0f9e5
|
[] |
no_license
|
Remxy/Google-Trend-
|
2a5d76f2acfe962ee4adaaf2b5fc7c3752211222
|
086fa9c916d6e8f22f2521ce2ce96000207f7152
|
refs/heads/master
| 2021-02-14T10:36:08.338504
| 2020-03-04T03:23:32
| 2020-03-04T03:23:32
| 244,797,295
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,411
|
r
|
02_gmail.automation.R
|
##-GOOGLE TRENDS AUTOMATION--
#CRON JOB
#1.0 LIBRARIES
#GMAIL API
install.packages("gmailr")
library(gmailr)
#Report Automation
install.packages("rmarkdown")
library(rmarkdown)
#Core
library(tidyverse)
library(lubridate)
#File System
library(fs)
# 2.0 KEY PARAMETERS
# 2.1 Report Parameters
search_terms <- c("aws", "azure", "google cloud")
#search_terms <- c("docker", "git")
# 2.2 Email Parameters
to <- "aremif03@gmail.com"
subject <- "Google Trends"
body <- str_glue("
Hey Remi,
Find below detailed report on Google Trends Keywords: {str_c(search_terms, collapse = ', ')}
Best Regards
Ade")
# 3.0 REPORT AUTOMATION
install.packages("devtools")
library(devtools)
install_version("rmarkdown", version = "1.8", repos = "http://cran.us.r-project.org")
library(rmarkdown)
devtools::install_github("tinytex")
library(tinytex)
file_path <- now() %>%
str_replace_all("[[:punct:]]", "_") %>%
str_replace(" ", "T") %>%
str_c("_trends_report.pdf")
#params = list("etfnumber" = c(1:5))
#attr(params, 'class') = "knit_param_list"
rmarkdown::render(
input = "C:/Users/Remi_Adefioye/Documents/google_trends/google_trends_report_template.Rmd",
output_format = "pdf_document",
output_file = file_path,
output_dir = "reports",
knit_root_dir = NULL,
params = list(search_terms = "search_terms"),
envir = parent.frame(),
run_pandoc = TRUE,
quiet = FALSE,
encoding = getOption("encoding")
)
# 4.0 GMAIL API AUTOMATION
# Must register an app with the Google Developers consule
#gmailr Instructions: https://github.com/r-lib/gmailr
# - Make an App: https://developers.google.com/gmail/api/quickstart/python
# - Run remotely: https://gargle.r-lib.org/articles/non-interactive-auth.html
# Download Gmail App Credentials & Configure App
gm_auth_configure(path = "C:/Users/Remi_Adefioye/google_trends/credentials.json") #Replace path to app credentials
#Authorize your gmail account
gm_auth(email = "aremif03@gmail.com") # Replace email account
# Create email
email <- gm_mime() %>%
gm_to(to) %>%
gm_from("aremif03@gmail.com") %>%
gm_cc("") %>%
gm_subject(subject) %>%
gm_text_body(body) %>%
gm_attach_file(str_c("reports/"), file_path)
gm_send_message(email, user_id = "me")
|
44f6919ce54f8f637bc5f0f1b5ed20375a524af2
|
b81fb3d7e665ab4795491b4825f38ee79cee9f06
|
/R/plot_factor_returns_cum.R
|
4efcaf516a9ffcfb84e6d0e5a14983bc6e8d4594
|
[] |
no_license
|
olaoritsland/FaktorModell
|
8d03a2b55c94c4b83e6e6f4cb42fe0f35a80706a
|
06f84a6764ac1b3898797a8a0a523d604a0afedb
|
refs/heads/master
| 2020-09-30T05:17:32.985320
| 2019-12-21T21:52:33
| 2019-12-21T21:52:33
| 227,213,746
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 208
|
r
|
plot_factor_returns_cum.R
|
plot_factor_returns_cum <- function(.data) {
.data %>%
mutate_if(is.numeric, cumsum) %>%
pivot_longer(-date) %>%
ggplot(aes(x = date, y = value, color = name)) +
geom_line()
}
|
416d0028ac0c90b1b5d623eb4ca387a9f8fde538
|
f09ad286fcd020a8219d0803ddbef7e5817ee2a2
|
/PUCR-Assignment-9/4.R
|
992c45eaf3173e339d1bd7c733e2549385952f64
|
[] |
no_license
|
isabellafischer/Brasil_2019
|
17874ebfc4dbf3a77c98b1defb4575e1a1a3ed96
|
544b71a52a976b2f743977b754029a891e6525ee
|
refs/heads/master
| 2020-07-07T04:17:00.787358
| 2019-10-31T16:34:59
| 2019-10-31T16:34:59
| 203,246,322
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 576
|
r
|
4.R
|
# Isabella Fischer
library(tidyverse)
set.seed(3)
coord = as_tibble(read.csv("http://vincentarelbundock.github.io/Rdatasets/csv/boot/brambles.csv"))
coordinates = as.matrix(select(coord, x, y))
coordinates = kmeans(coordinates, centers = 3, algorithm = "Forgy")
cluster2 = fitted(coordinates, method = "classes")
coordinates3 = cbind(coord, cluster2)
coordinates3$cluster2 = factor(coordinates3$cluster2)
ggplot(coordinates3)+
geom_point(mapping = aes(x = x, y = y, colour = cluster2))+
labs(col = "Cluster")+
theme_bw()
ggsave(filename = "4.png")
|
979f295cd6bb753623ee344de4e6698f7ea80e6f
|
3ab4931dd17eef3f0ba3153e86e06a00e147809a
|
/R/hess.lo.R
|
1d53e006bdc7f6bdbd328a0d68fd648e1382bec9
|
[] |
no_license
|
cran/gim
|
a5a8ffddbb5b59ed1bb152ec24845227bf9ff390
|
2182d9a6cee5dd3455f36ad743018ec5f840fbc5
|
refs/heads/master
| 2022-11-11T15:16:17.130648
| 2020-06-12T08:10:24
| 2020-06-12T08:10:24
| 125,206,702
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 260
|
r
|
hess.lo.R
|
hess.lo <- function(para, map, data, ref, inv.V, bet0, outcome){
h <- numDeriv::jacobian(score.lo, para, map = map, data = data, ref = ref, inv.V = inv.V, bet0 = bet0, outcome = outcome)
colnames(h) <- names(para)
rownames(h) <- names(para)
h
}
|
cd7894be16ccbddc3e8c26c1343f60fde000d928
|
fead3b0586a4c258cb2c4964cf4a3ad7691cd8b7
|
/inst/unitTests/test_SimpleCompoundDb.R
|
732ea281097f609680a768769c7d4d474da0fb55
|
[
"MIT"
] |
permissive
|
jorainer/xcmsExtensions
|
518ba5445a44a8989fb0c6129ff1c1468b55da0a
|
6e7cb0d04e6547d6295eb1b54d324b82d141c641
|
refs/heads/master
| 2021-05-31T07:04:56.889256
| 2016-05-03T06:30:58
| 2016-05-03T06:30:58
| 54,638,373
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,098
|
r
|
test_SimpleCompoundDb.R
|
####============================================================
## Testing some of the (basic) functionality of the SimpleCompoundDb.
##
####------------------------------------------------------------
detach("package:xcmsExtensions", unload=TRUE)
library(xcmsExtensions)
test_SimpleCompoundDb <- function(){
## Just a silly check
checkException(SimpleCompoundDb())
## Just running the methods to see whether we would get any error.
tmp <- columns(scDb)
tmp <- dbconn(scDb)
tmp <- as.data.frame(scDb)
tmp <- listTables(scDb)
}
test_mzmatch_db <- function(){
realMz <- c(169.2870, 169.5650, 346.4605)
queryMz <- realMz - (floor(realMz) / 1000000) * 10
comps <- c(300.1898, 298.1508, 491.2000, 169.13481, 169.1348, queryMz)
Res <- mzmatch(comps, scDb, column="avg_molecular_weight")
## Compare that to the mzmatch on integer, integer.
masses <- compounds(scDb, columns=c("accession", "avg_molecular_weight"))
Res2 <- mzmatch(comps, masses$avg_molecular_weight)
## Get the accessions for those
Res2 <- lapply(Res2, function(z){
if(!any(is.na(z[, 1]))){
return(data.frame(idx=masses[z[, 1], "accession"],
deltaMz=z[, 2], stringsAsFactors=FALSE)
)
}else{
return(data.frame(idx=NA, deltaMz=NA))
}
})
tmp1 <- do.call(rbind, Res)
tmp2 <- do.call(rbind, Res2)
tmp2 <- cbind(tmp2, adduct=rep("M", nrow(tmp2)), stringsAsFactors=FALSE)
rownames(tmp1) <- NULL
rownames(tmp2) <- NULL
checkEquals(tmp1, tmp2)
## Error checking
checkException(mzmatch(comps, scDb, ionAdduct="sdfdkjf"))
}
test_mzmatch_matrix <- function(){
## Real life example: 5, 7, 9
mzs <- cbind(mzmed=c(324.1584, 327.1989, 329.2000),
mzmin=c(324.1238, 327.1683, 329.1970),
mzmax=c(324.1632, 327.2000, 329.2000))
## Do the M+H search based on mzmed:
mzmedRes <- mzmatch(mzs[, "mzmed"], scDb, ppm=10, ionAdduct="M+H")
mzmedMat <- mzmatch(mzs[, c("mzmin", "mzmax")], scDb, ppm=10, ionAdduct="M+H")
## We require that all compounds identified by mzmed are also found in the matrix version
checkTrue(all(do.call(rbind, mzmedRes)$idx %in% do.call(rbind, mzmedMat)$idx))
## Check it
for(i in 1:3){
## Now get the masses for the first...
cmps <- compounds(scDb, filter=CompoundidFilter(mzmedMat[[i]]$idx),
columns="monoisotopic_molecular_weight")
## Convert masses
massMin <- adductmz2mass(mzs[i, "mzmin"], ionAdduct="M+H")[[1]]
massMax <- adductmz2mass(mzs[i, "mzmax"], ionAdduct="M+H")[[1]]
## Check if all the masses of the identified compounds are within that range.
massMin <- massMin - (massMin * 10/1000000)
massMax <- massMax + (massMax * 10/1000000)
checkTrue(all(cmps$monoisotopic_molecular_weight > massMin &
cmps$monoisotopic_molecular_weight < massMax))
}
## And now compare that the the matrix,numeric version.
compmasses <- compounds(scDb, columns=c("accession", "monoisotopic_molecular_weight"))
for(i in 1:3){
minmass <- adductmz2mass(mzs[i, "mzmin"], ionAdduct="M+H")[[1]]
maxmass <- adductmz2mass(mzs[i, "mzmax"], ionAdduct="M+H")[[1]]
massmat <- matrix(c(minmass, maxmass), nro=1)
SingleRes <- mzmatch(massmat, mz=compmasses$monoisotopic_molecular_weight, ppm=10)[[1]]
SingleRes <- data.frame(id=compmasses[SingleRes[, "idx"], "accession"], SingleRes,
stringsAsFactors=FALSE)
dbRes <- mzmedMat[[i]]
dbRes <- dbRes[order(dbRes$deltaMz), ]
checkEquals(dbRes$idx, SingleRes$id)
## Also the distance?
checkEquals(dbRes$deltaMz, SingleRes$deltaMz)
}
}
test_mzmatch_db_new <- function(){
## This uses now the ion adducts.
realMz <- c(169.2870, 169.5650, 346.4605)
queryMz <- realMz - (floor(realMz) / 1000000) * 10
comps <- c(300.1898, 298.1508, 491.2000, 169.13481, 169.1348, queryMz)
###########
## The "front-end" methods.
Res <- mzmatch(comps, scDb, ppm=10, ionAdduct="M+H")
###########
## The internal functions.
Res <- xcmsExtensions:::.mzmatchCompoundDbSQL(comps, scDb)
## Test the new one.
Res2 <- xcmsExtensions:::.mzmatchMassCompoundDbSql(comps, mz=scDb, ionAdduct=NULL)
Res <- do.call(rbind, Res)
Res2 <- do.call(rbind, Res2)
rownames(Res) <- NULL
rownames(Res2) <- NULL
checkEquals(Res, Res2[, 1:2])
## Test the other new one.
Res3 <- xcmsExtensions:::.mzmatchMassPpmCompoundDbSql(comps, mz=scDb, ionAdduct=NULL)
Res3 <- do.call(rbind, Res3)
rownames(Res3) <- NULL
checkEquals(Res, Res3[, 1:2])
## The full version with ppm on the MZ
Res4 <- xcmsExtensions:::.mzmatchMassCompoundDbSql(comps, mz=scDb,
ionAdduct=supportedIonAdducts())
## and ppm on the mass
Res5 <- xcmsExtensions:::.mzmatchMassPpmCompoundDbSql(comps, mz=scDb,
ionAdduct=supportedIonAdducts())
tmp1 <- Res4[[1]]
rownames(tmp1) <- NULL
tmp2 <- Res5[[1]]
rownames(tmp2) <- NULL
checkEquals(tmp1, tmp2)
}
notrun_mzmatch_performance <- function(){
## Just testing the performance of x SQL queries against one SQL query
## and doing the rest in R...
realMz <- c(169.2870, 169.5650, 346.4605)
## Should get them with a 10 ppm:
queryMz <- realMz - (floor(realMz) / 1000000) * 10
sqlRes <- xcmsExtensions:::.mzmatchCompoundDbSQL(queryMz, scDb)
sqlRes2 <- xcmsExtensions:::.mzmatchCompoundDbSQL(realMz, scDb)
checkEquals(sqlRes, sqlRes2)
comps <- c(300.1898, 298.1508, 491.2000, 169.13481, 169.1348, queryMz)
bigComps <- rep(comps, 1000)
system.time(
sqlRes <- xcmsExtensions:::.mzmatchCompoundDbSQL(bigComps, scDb)
)
## Takes 4 secs for 8000 compounds; 7 seconds including the distance calc.
system.time(
rRes <- xcmsExtensions:::.mzmatchCompoundDbPlain(bigComps, scDb)
)
## Incredible! takes 7.8 secs!!! with accession retrieval: 8.7
}
notrun_test_as.data.frame <- function(){
full <- as.data.frame(scDb)
other <- RSQLite::dbGetQuery(dbconn(scDb), "select * from compound_basic order by accession")
checkEquals(full, other)
}
test_compounds <- function(){
cf <- CompoundidFilter(c("HMDB00010", "HMDB00002", "HMDB00011"))
res <- compounds(scDb, filter=cf)
checkEquals(res$accession, sort(value(cf)))
## Just selected columns
res <- compounds(scDb, filter=cf, columns=c("name", "inchi"))
checkEquals(res$accession, sort(value(cf)))
checkEquals(colnames(res), c("accession", "name", "inchi"))
## Optional arguments
res <- compounds(scDb, filter=cf, columns=c("name", "inchi"), return.all.columns=FALSE)
checkEquals(colnames(res), c("name", "inchi"))
}
test_cleanColumns <- function(){
res <- xcmsExtensions:::cleanColumns(scDb, c("accession", "gene_id", "bla"))
checkEquals(res, "accession")
}
test_prefixColumns <- function(){
## with and without clean.
res <- xcmsExtensions:::prefixColumns(scDb, columns=c("accession", "gene_id", "name"))
checkEquals(res[[1]], c("compound_basic.accession", "compound_basic.name"))
res <- xcmsExtensions:::prefixColumns(scDb, columns=c("value", "gene_id", "name"),
clean=FALSE)
checkEquals(names(res), "metadata")
checkEquals(res[[1]], c("metadata.name", "metadata.value"))
}
test_cleanTables <- function(){
res <- xcmsExtensions:::cleanTables(scDb, tables=c("agfkg", "asdfdfd"))
checkEquals(res, NULL)
res <- xcmsExtensions:::cleanTables(scDb, tables=c("metadata", "compound_basic"))
checkEquals(res, c("metadata", "compound_basic"))
}
test_sortTablesByDegree <- function(){
res <- xcmsExtensions:::sortTablesByDegree(scDb, tables=c("adsds", "metadata", "compound_basic"))
checkEquals(res, c("compound_basic", "metadata"))
}
test_addRequiredJoinTables <- function(){
res <- xcmsExtensions:::addRequiredJoinTables(scDb, "metadata")
checkEquals(res, "metadata")
res <- xcmsExtensions:::addRequiredJoinTables(scDb, "asfkdf")
}
test_buildFilterQuery <- function(){
res <- xcmsExtensions:::buildFilterQuery(scDb)
cf <- CompoundidFilter("adffdf")
gf <- ensembldb::GeneidFilter("asdasdfd")
res <- xcmsExtensions:::buildFilterQuery(scDb, filter=cf)
checkEquals(res, " where compound_basic.accession = 'adffdf'")
res <- xcmsExtensions:::buildFilterQuery(scDb, filter=list(cf, gf))
checkEquals(res, " where compound_basic.accession = 'adffdf'")
res <- xcmsExtensions:::buildFilterQuery(scDb, filter=list(cf, cf))
checkEquals(res, " where compound_basic.accession = 'adffdf' and compound_basic.accession = 'adffdf'")
}
test_buildJoinQuery <- function(){
res <- xcmsExtensions:::buildJoinQuery(scDb, "name")
checkEquals(res, "compound_basic")
res <- xcmsExtensions:::buildJoinQuery(scDb, c("name", "adff"))
checkEquals(res, "compound_basic")
res <- xcmsExtensions:::buildJoinQuery(scDb, c("metadata", "asdds"))
checkEquals(res, NULL)
}
test_buildQuery <- function(){
res <- xcmsExtensions:::buildQuery(scDb, columns=c("accession", "name"))
checkEquals(res, "select distinct compound_basic.accession,compound_basic.name from compound_basic")
res <- xcmsExtensions:::buildQuery(scDb, columns=c("accession", "name"), order.by="smiles")
checkEquals(res, "select distinct compound_basic.accession,compound_basic.name,compound_basic.smiles from compound_basic order by compound_basic.smiles asc")
res <- xcmsExtensions:::buildQuery(scDb, columns=c("accession", "name"), order.by=c("smiles,dfadskfd"))
checkEquals(res, "select distinct compound_basic.accession,compound_basic.name,compound_basic.smiles from compound_basic order by compound_basic.smiles asc")
## And with a filter.
cf <- CompoundidFilter("abc")
res <- xcmsExtensions:::buildQuery(scDb, columns=c("accession", "name"), filter=cf)
checkEquals(res, "select distinct compound_basic.accession,compound_basic.name from compound_basic where compound_basic.accession = 'abc'")
}
test_getWhat <- function(){
## Get all the data
cf <- CompoundidFilter("HMDB00002")
res <- xcmsExtensions:::getWhat(scDb, filter=cf)
checkTrue(nrow(res) == 1)
res <- xcmsExtensions:::getWhat(scDb, filter=cf, columns=c("name", "inchi"))
checkEquals(colnames(res), c("accession", "name", "inchi"))
res <- xcmsExtensions:::getWhat(scDb, filter=cf, columns=c("name", "inchi"), return.all.columns=FALSE)
checkEquals(colnames(res), c("name", "inchi"))
}
test_compounds_MassrangeFilter <- function(){
mrf <- MassrangeFilter(c(300, 310))
cmps <- compounds(scDb, filter=mrf, columns=c("accession", "avg_molecular_weight",
"monoisotopic_molecular_weight"))
checkTrue(all(cmps$monoisotopic_molecular_weight >= 300 &
cmps$monoisotopic_molecular_weight <= 310))
condition(mrf) <- "()"
cmps <- compounds(scDb, filter=mrf, columns=c("accession", "avg_molecular_weight",
"monoisotopic_molecular_weight"))
checkTrue(all(cmps$monoisotopic_molecular_weight > 300 &
cmps$monoisotopic_molecular_weight < 310))
## Changing the column to avg
## mrf@column <- "avg_molecular_weight"
## Combine filters.
cmps <- compounds(scDb, filter=list(mrf, CompoundidFilter("HMDB60116")),
columns=c("accession", "avg_molecular_weight",
"monoisotopic_molecular_weight"))
value(mrf) <- c(304, 310)
cmps <- compounds(scDb, filter=list(mrf, CompoundidFilter("HMDB60116")),
columns=c("accession", "avg_molecular_weight",
"monoisotopic_molecular_weight"))
checkTrue(nrow(cmps)==0)
}
|
528ef8b10f56c15f4ea57d56847a2d5209a5fd20
|
54c36870b35b7504fdfb1d4c7f9fb48258485fdb
|
/R/SimulateData.R
|
4699d33736c863ad8e1d714e2785174ceb8299a3
|
[] |
no_license
|
jackiemauro/OptSortCausal
|
0e48bd1bdcc4a87bb334621d978d3266bb63084c
|
134c538a7a767f255c7fec6b7c7fc592504b6f93
|
refs/heads/master
| 2020-03-16T17:32:25.952535
| 2019-02-21T01:51:39
| 2019-02-21T01:51:39
| 132,836,797
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,203
|
r
|
SimulateData.R
|
#' Simulate a dataset
#'
#' @param N number of observations
#' @param k optional parameter setting degree to which prison/x is deterministic
simsort <- function(N,k=.75){
X = runif(N, 0, 3)
A = sample(c(1:3), size = N, replace = T)
mu = k - (1-k)*as.numeric( (A==1 & X < 1) | (A==2 & X >=1 & X < 2) | (A==3 & X >=2) )
Y = rbinom(N, 1, mu)
data.frame(y = Y, A= A, x = X)
}
simsort2 <- function(N){
X = runif(N, 0, 3)
A = sample(c(1:3), size = N, replace = T, prob = c(.5,.3,.2))
mu = .75 - .5*as.numeric( (A==1 & X < 1) | (A==2 & X >=1 & X < 2) | (A==3 & X >=2) )
Y = rbinom(N, 1, mu)
data.frame(y = Y, A= A, x = X)
}
simsort3 <- function(N){
X1 = runif(N, 0, 3); X2 = rnorm(N)
pi = cbind(.5*expit(X2), .5*expit(X1), 1 - .5*expit(X2) - .5*expit(X1))
A = apply(pi, 1, function(x) sample(c(1:3), size = 1, prob = x))
mu = runif(N,.5,1) - .5*as.numeric( (A==1 & X1 < 1) | (A==2 & X1 >=1 & X1 < 2) | (A==3 & X1 >=2) )
Y = rbinom(N, 1, mu)
data.frame(y = Y, A= A, x1 = X1, x2 = X2)
}
simsort4 <- function(N){
x <- runif(N,0,1)
A <- c(sapply(x, function(y) rbinom(1,1,prob=c(y/2,1-y/2))))
#Y <- A
Y <- as.numeric(A + x > 1.5)
data.frame(y = Y, A = A, x = x)
}
|
25cd9a84181db9b2065606902b0e0dc45e865792
|
b3a5c21adf890f0b66790f23332f0082e7f1b40a
|
/man/format_inline.Rd
|
673f82d1dccd3940dcdc48b87b98bb9b081abb8b
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
r-lib/cli
|
96886f849fe69f8435f2d22fccf5d00dee7a5ce4
|
c36066ca6a208edbeb37ab13467a4dc6f5b5bbe2
|
refs/heads/main
| 2023-08-29T14:19:41.629395
| 2023-08-18T13:18:33
| 2023-08-18T13:18:33
| 89,723,016
| 560
| 69
|
NOASSERTION
| 2023-09-13T11:46:10
| 2017-04-28T16:10:28
|
R
|
UTF-8
|
R
| false
| true
| 1,780
|
rd
|
format_inline.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cli.R
\name{format_inline}
\alias{format_inline}
\title{Format and returns a line of text}
\usage{
format_inline(
...,
.envir = parent.frame(),
collapse = TRUE,
keep_whitespace = TRUE
)
}
\arguments{
\item{...}{Passed to \code{\link[=cli_text]{cli_text()}}.}
\item{.envir}{Environment to evaluate the expressions in.}
\item{collapse}{Whether to collapse the result if it has multiple
lines, e.g. because of \verb{\\f} characters.}
\item{keep_whitespace}{Whether to keep all whitepace (spaces, newlines
and form feeds) as is in the input.}
}
\value{
Character scalar, the formatted string.
}
\description{
You can use this function to format a line of cli text, without emitting
it to the screen. It uses \code{\link[=cli_text]{cli_text()}} internally.
}
\details{
\code{format_inline()} performs no width-wrapping.
}
\examples{
format_inline("A message for {.emph later}, thanks {.fn format_inline}.")
}
\seealso{
This function supports \link[=inline-markup]{inline markup}.
Other functions supporting inline markup:
\code{\link{cli_abort}()},
\code{\link{cli_alert}()},
\code{\link{cli_blockquote}()},
\code{\link{cli_bullets_raw}()},
\code{\link{cli_bullets}()},
\code{\link{cli_dl}()},
\code{\link{cli_h1}()},
\code{\link{cli_li}()},
\code{\link{cli_ol}()},
\code{\link{cli_process_start}()},
\code{\link{cli_progress_along}()},
\code{\link{cli_progress_bar}()},
\code{\link{cli_progress_message}()},
\code{\link{cli_progress_output}()},
\code{\link{cli_progress_step}()},
\code{\link{cli_rule}},
\code{\link{cli_status_update}()},
\code{\link{cli_status}()},
\code{\link{cli_text}()},
\code{\link{cli_ul}()},
\code{\link{format_error}()}
}
\concept{functions supporting inline markup}
|
ca15f4f823c9f6bfcac1765eda21b248160638c8
|
430e757a1e6dae14ddd80ec37268adc410ba5793
|
/man/fp_save_inspection_plot.Rd
|
2f466bce9b78b069520265f11d2c6d6f32ef2fb2
|
[] |
no_license
|
alb202/rfret
|
bf6496460320886f445f40f4c18e27e77a399ac8
|
9b444e238cd89f829e278e6d9505c11d7e4d450c
|
refs/heads/master
| 2021-01-21T18:53:37.742723
| 2017-11-30T22:05:20
| 2017-11-30T22:05:20
| 92,093,777
| 0
| 0
| null | 2017-05-22T19:54:13
| 2017-05-22T19:54:13
| null |
UTF-8
|
R
| false
| true
| 892
|
rd
|
fp_save_inspection_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fp_inspect_raw_data.R
\name{fp_save_inspection_plot}
\alias{fp_save_inspection_plot}
\title{Save raw data plots from a single FP binding dataset}
\usage{
fp_save_inspection_plot(input_plot, dataset_name, output_directory, plot_format)
}
\arguments{
\item{input_plot}{The output of \code{\link{fp_inspect_one_dataset}}.}
\item{dataset_name}{The name of the corresponding dataset.}
\item{output_directory}{The name of the output directory where plots will be
saved.}
\item{plot_format}{A character string indicating the file format to use to
save plots. Possible values are \code{"png"} (default value),
\code{"pdf"} and \code{"svg"}.}
}
\value{
Writes plots in files on disk.
}
\description{
This internal function saves the total fluorescence intensity
plots from an FP dataset to PNG, PDF or SVG files.
}
|
df8e696158f2bc500f60c01a4fa1181eeac67061
|
fe1887fbc9bc4ed192c24109ad05f1575935dc1c
|
/Chapter_4/tables_4.5_B3_cumulative_gas_emissions_lab.R
|
08e472e9a5ca68d19bc996e0be47304801ea3c9d
|
[] |
no_license
|
marenwestermann/PhD-thesis
|
6efb3cb9b68b56d380920dc27daa0de77ddadefc
|
3942cc2099eb5edaccbef08f8866dc33da28bf9a
|
refs/heads/master
| 2020-03-20T20:43:06.078371
| 2018-06-18T07:52:08
| 2018-06-18T07:52:08
| 137,702,409
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 826
|
r
|
tables_4.5_B3_cumulative_gas_emissions_lab.R
|
gases_total <- read.csv("C:/Users/Maren/Dropbox/UQ PhD/PhD work/experiments/(4) GHG monitoring experiment Toowoomba/gases_total.csv")
##N2O
anova_N2O_kg <- aov(gases_total$N2O_N_kg ~ gases_total$treatment)
summary(anova_N2O_kg)
pairwise.t.test(gases_total$N2O_N_kg, gases_total$treatment, p.adjust = "none")
describeBy(gases_total$N2O_N_kg, gases_total$treatment)
##CO2
anova_CO2_kg <- aov(gases_total$CO2_C_kg ~ gases_total$treatment)
summary(anova_CO2_kg)
pairwise.t.test(gases_total$CO2_C_kg, gases_total$treatment, p.adjust = "none")
describeBy(gases_total$CO2_C_kg, gases_total$treatment)
##CH4
anova_CH4_kg <- aov(gases_total$CH4_C_kg ~ gases_total$treatment)
summary(anova_CH4_kg)
pairwise.t.test(gases_total$CH4_C_kg, gases_total$treatment, p.adjust = "none")
describeBy(gases_total$CH4_C_kg, gases_total$treatment)
|
b096741275a43291dfa56d08b70eeccfc73dfa57
|
629e5242206d8e9f62fb7b1ca6a158789dc9f03f
|
/data/make_plots.R
|
28704428c3dc331f5bcb1720f974ed319d6ac2e9
|
[] |
no_license
|
davidwhogg/CensoredData
|
e4c63805a3bef615b0ba25aac9522addb4d1e14d
|
2fcdfbdba74a5b901eaff0d40902280794380400
|
refs/heads/master
| 2016-09-05T10:25:35.249568
| 2013-12-06T23:52:37
| 2013-12-06T23:52:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,494
|
r
|
make_plots.R
|
path = '/Users/jwrichar/Documents/CDI/CensoredData/'
miras = read.table(paste(path,"data/new_periods.dat",sep=""))
sum( abs((miras[,2] * 2 - miras[,4]) / miras[,4]) < 0.05) / dim(miras)[1]
# 0.1264775 of Miras are 'double' the LS period
sum( abs((miras[,2] * 3 - miras[,4]) / miras[,4]) < 0.05) / dim(miras)[1]
# 0.005122143 are triple
pdf(paste(path,"plots/mira_period_amplitude_relation.pdf",sep=""),height=6,width=10)
par(mfrow=c(1,2),mar=c(5,5,2,2))
plot(miras[,2],miras[,3],pch=19,col="#00000015",log='x',xlim=c(50,1000),ylim=c(0.5,7),xlab="LS Period", ylab="LS Amplitude",cex=0.75)
plot(miras[,4],miras[,5],pch=19,col="#00000015",log='x',xlim=c(50,1000),ylim=c(0.5,7),xlab="Censored LS Period", ylab="Censored LS Amplitude",cex=0.75)
dev.off()
rho1 = cor(miras[,2],miras[,3]) # 0.01
rho2 = cor(miras[,4],miras[,5]) # 0.07
pdf(paste(path,"plots/mira_periods_LS_vs_Censored.pdf",sep=""),height=8,width=8)
par(mfrow=c(1,1),mar=c(6,6,1,1))
plot(miras[,2],miras[,4],pch=19,xlab="Lomb-Scargle Period",ylab="Censored LS Period",col="#00000050",cex.lab=1.5)
abline(0,0.5,col=2,lty=2,lwd=2); text(800,400,"half",pos=NULL,cex=1.5)
abline(0,1,col=2,lty=2,lwd=2); text(800,800,"same",pos=NULL,cex=1.5)
abline(0,2,col=2,lty=2,lwd=2); text(400,800,"double",pos=NULL,cex=1.5)
abline(0,3,col=2,lty=2,lwd=2); text(300,900,"triple",pos=NULL,cex=1.5)
dev.off()
double = which(abs((miras[,2] * 2 - miras[,4]) / miras[,4]) < 0.05)
triple = which(abs((miras[,2] * 3 - miras[,4]) / miras[,4]) < 0.05)
|
665c4377c325ae99521babf3925df0b4d85a68da
|
016dbf82abbca9deda0d2f9a3504f526cfde9050
|
/R/mlregressionrandomforest.R
|
eda6e62620e9693f23b618806d2c9457b3284046
|
[] |
no_license
|
JorisGoosen/JASP-Machine-Learning
|
d96ad7df3fe529807582d27b81b850df1955b6bf
|
b9b24d2f158fe8d83f78f161c183ea02db90e5f5
|
refs/heads/master
| 2020-05-26T23:59:26.924187
| 2019-07-23T10:07:09
| 2019-07-23T10:07:09
| 188,417,261
| 0
| 0
| null | 2019-05-24T12:21:33
| 2019-05-24T12:21:32
| null |
UTF-8
|
R
| false
| false
| 12,121
|
r
|
mlregressionrandomforest.R
|
#
# Copyright (C) 2019 University of Amsterdam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
MLRegressionRandomForest <- function(jaspResults, dataset, options, ...) {
# Preparatory work
dataset <- .readDataRegressionAnalyses(dataset, options)
.errorHandlingRegressionAnalyses(dataset, options)
# Check if analysis is ready to run
ready <- .regressionAnalysesReady(options, type = "randomForest")
# create the results table
.regressionMachineLearningTable(dataset, options, jaspResults, ready, type = "randomForest")
# Create the evaluation metrics table
.regressionEvaluationMetrics(dataset, options, jaspResults, ready)
# Create the variable importance table
.randomForestVariableImportance(options, jaspResults, ready, purpose = "regression")
# Create the trees vs model error plot
.randomForestTreesErrorPlot(options, jaspResults, ready, position = 5, purpose = "regression")
# Create the predicted performance plot
.regressionPredictedPerformancePlot(options, jaspResults, ready, position = 6)
# Create the mean decrease in accuracy plot
.randomForestPlotDecreaseAccuracy(options, jaspResults, ready, position = 7, purpose = "regression")
# Create the total increase in node purity plot
.randomForestPlotIncreasePurity(options, jaspResults, ready, position = 8, purpose = "regression")
}
.randomForestRegression <- function(dataset, options, jaspResults){
dataset <- na.omit(dataset)
train.index <- sample(c(TRUE,FALSE),nrow(dataset), replace = TRUE, prob = c(options[['trainingDataManual']], 1-options[['trainingDataManual']]))
train <- dataset[train.index, ]
test <- dataset[!train.index, ]
predictors <- train[, .v(options[["predictors"]])]
target <- train[, .v(options[["target"]])]
test_predictors <- test[, .v(options[["predictors"]])]
test_target <- test[, .v(options[["target"]])]
if (options$noOfPredictors == "manual") {
noOfPredictors <- options[["numberOfPredictors"]]
} else {
noOfPredictors <- floor(sqrt(length(options[["numberOfPredictors"]])))
}
if(options[["modelOpt"]] == "optimizationManual"){
rfit <- randomForest::randomForest(x = predictors, y = target, xtest = test_predictors, ytest = test_target,
ntree = options[["noOfTrees"]], mtry = noOfPredictors,
sampsize = ceiling(options[["bagFrac"]]*nrow(dataset)),
importance = TRUE, keep.forest = TRUE)
noOfTrees <- options[["noOfTrees"]]
} else if(options[["modelOpt"]] == "optimizationError"){
rfit <- randomForest::randomForest(x = predictors, y = target, xtest = test_predictors, ytest = test_target,
ntree = options[["maxTrees"]], mtry = noOfPredictors,
sampsize = ceiling(options[["bagFrac"]]*nrow(dataset)),
importance = TRUE, keep.forest = TRUE)
oobError <- rfit$mse
optimTrees <- which.min(oobError)[length(which.min(oobError))]
rfit <- randomForest::randomForest(x = predictors, y = target, xtest = test_predictors, ytest = test_target,
ntree = optimTrees, mtry = noOfPredictors,
sampsize = ceiling(options[["bagFrac"]]*nrow(dataset)),
importance = TRUE, keep.forest = TRUE)
noOfTrees <- optimTrees
}
trainingFit <- randomForest::randomForest(x = predictors, y = target, xtest = predictors, ytest = target,
ntree = noOfTrees, mtry = noOfPredictors,
sampsize = ceiling(options[["bagFrac"]]*nrow(dataset)),
importance = TRUE, keep.forest = TRUE)
regressionResult <- list()
regressionResult[["rfit"]] <- rfit
regressionResult[["trainingFit"]] <- trainingFit
regressionResult[["train"]] <- train
regressionResult[["test"]] <- test
regressionResult[["noOfTrees"]] <- noOfTrees
regressionResult[["predPerSplit"]] <- noOfPredictors
regressionResult[["bagFrac"]] <- ceiling(options[["bagFrac"]]*nrow(dataset))
regressionResult[["y"]] <- rfit$test[["predicted"]]
regressionResult[["x"]] <- test[,.v(options[["target"]])]
regressionResult[["mse"]] <- mean((rfit$test[["predicted"]] - test[,.v(options[["target"]])])^2)
regressionResult[["ntrain"]] <- nrow(train)
regressionResult[["ntest"]] <- nrow(test)
regressionResult[["oobError"]] <- rfit$mse[length(rfit$mse)]
regressionResult[["varImp"]] <- plyr::arrange(data.frame(
Variable = .unv(as.factor(names(rfit$importance[,1]))),
MeanIncrMSE = rfit$importance[, 1],
TotalDecrNodeImp = rfit$importance[, 2]
), -TotalDecrNodeImp)
return(regressionResult)
}
.randomForestVariableImportance <- function(options, jaspResults, ready, purpose){
if(!is.null(jaspResults[["tableVariableImportance"]]) || !options[["tableVariableImportance"]]) return()
tableVariableImportance <- createJaspTable(title = "Variable Importance")
tableVariableImportance$position <- 4
tableVariableImportance$dependOn(options = c("tableVariableImportance", "scaleEqualSD", "target", "predictors", "modelOpt", "maxTrees",
"noOfTrees", "bagFrac", "noOfPredictors", "numberOfPredictors", "seed", "seedBox"))
tableVariableImportance$addColumnInfo(name = "predictor", title = " ", type = "string")
tableVariableImportance$addColumnInfo(name = "MDiA", title = "Mean decrease in accuracy", type = "number")
tableVariableImportance$addColumnInfo(name = "MDiNI", title = "Total increase in node purity", type = "number")
jaspResults[["tableVariableImportance"]] <- tableVariableImportance
if(!ready) return()
result <- base::switch(purpose,
"classification" = jaspResults[["classificationResult"]]$object,
"regression" = jaspResults[["regressionResult"]]$object)
varImpOrder <- sort(result[["rfit"]]$importance[,1], decr = TRUE, index.return = TRUE)$ix
tableVariableImportance[["predictor"]] <- .unv(.v(result[["varImp"]]$Variable))
tableVariableImportance[["MDiA"]] <- result[["varImp"]]$MeanIncrMSE
tableVariableImportance[["MDiNI"]] <- result[["varImp"]]$TotalDecrNodeImp
}
.randomForestTreesErrorPlot <- function(options, jaspResults, ready, position, purpose){
if(!is.null(jaspResults[["plotTreesVsModelError"]]) || !options[["plotTreesVsModelError"]]) return()
plotTreesVsModelError <- createJaspPlot(plot = NULL, title = "Out-of-bag Error Plot", width = 500, height = 300)
plotTreesVsModelError$position <- position
plotTreesVsModelError$dependOn(options = c("plotTreesVsModelError", "trainingDataManual", "scaleEqualSD", "modelOpt", "maxTrees",
"target", "predictors", "seed", "seedBox", "noOfTrees", "bagFrac", "noOfPredictors", "numberOfPredictors"))
jaspResults[["plotTreesVsModelError"]] <- plotTreesVsModelError
if(!ready) return()
result <- base::switch(purpose,
"classification" = jaspResults[["classificationResult"]]$object,
"regression" = jaspResults[["regressionResult"]]$object)
xTitle <- base::switch(purpose,
"classification" = "Out-of-bag \nClassification Error",
"regression" = "Out-of-bag \nMean Squared Error")
values <- base::switch(purpose,
"classification" = result[["rfit"]]$err.rate[,1],
"regression" = result[["rfit"]]$mse)
values2 <- base::switch(purpose,
"classification" = result[["trainingFit"]]$err.rate[,1],
"regression" = result[["trainingFit"]]$mse)
values <- c(values, values2)
treesMSE <- data.frame(
trees = rep(1:length(values2), 2),
error = values,
type = rep(c("Test set", "Training set"), each = length(values2))
)
xBreaks <- JASPgraphs::getPrettyAxisBreaks(treesMSE[["trees"]], min.n = 4)
yBreaks <- JASPgraphs::getPrettyAxisBreaks(treesMSE[["error"]], min.n = 4)
p <- ggplot2::ggplot(data = treesMSE, mapping = ggplot2::aes(x = trees, y = error, linetype = type)) +
JASPgraphs::geom_line()
if(max(treesMSE[["trees"]]) <= 25)
p <- p + JASPgraphs::geom_point()
p <- p + ggplot2::scale_x_continuous(name = "Number of Trees", labels = xBreaks, breaks = xBreaks) +
ggplot2::scale_y_continuous(name = xTitle, labels = yBreaks, breaks = yBreaks) +
ggplot2::labs(linetype = "")
p <- JASPgraphs::themeJasp(p, legend.position = "top")
plotTreesVsModelError$plotObject <- p
}
.randomForestPlotDecreaseAccuracy <- function(options, jaspResults, ready, position, purpose){
if(!is.null(jaspResults[["plotDecreaseAccuracy"]]) || !options[["plotDecreaseAccuracy"]]) return()
plotDecreaseAccuracy <- createJaspPlot(plot = NULL, title = "Mean Decrease in Accuracy", width = 500, height = 300)
plotDecreaseAccuracy$position <- position
plotDecreaseAccuracy$dependOn(options = c("plotDecreaseAccuracy", "trainingDataManual", "scaleEqualSD", "modelOpt", "maxTrees",
"target", "predictors", "seed", "seedBox", "noOfTrees", "bagFrac", "noOfPredictors", "numberOfPredictors"))
jaspResults[["plotDecreaseAccuracy"]] <- plotDecreaseAccuracy
if(!ready) return()
result <- base::switch(purpose,
"classification" = jaspResults[["classificationResult"]]$object,
"regression" = jaspResults[["regressionResult"]]$object)
p <- ggplot2::ggplot(result[["varImp"]], ggplot2::aes(x = reorder(Variable, MeanIncrMSE), y = MeanIncrMSE)) +
ggplot2::geom_bar(stat = "identity", fill = "grey", col = "black", size = .3) +
ggplot2::labs(x = "", y = "Mean Decrease in Accuracy")
p <-JASPgraphs::themeJasp(p, horizontal = TRUE)
plotDecreaseAccuracy$plotObject <- p
}
.randomForestPlotIncreasePurity <- function(options, jaspResults, ready, position, purpose){
if(!is.null(jaspResults[["plotIncreasePurity"]]) || !options[["plotIncreasePurity"]]) return()
plotIncreasePurity <- createJaspPlot(plot = NULL, title = "Total Increase in Node Purity", width = 500, height = 300)
plotIncreasePurity$position <- position
plotIncreasePurity$dependOn(options = c("plotIncreasePurity", "trainingDataManual", "scaleEqualSD", "modelOpt", "maxTrees",
"target", "predictors", "seed", "seedBox", "noOfTrees", "bagFrac", "noOfPredictors", "numberOfPredictors"))
jaspResults[["plotIncreasePurity"]] <- plotIncreasePurity
if(!ready) return()
result <- base::switch(purpose,
"classification" = jaspResults[["classificationResult"]]$object,
"regression" = jaspResults[["regressionResult"]]$object)
p <- ggplot2::ggplot(result[["varImp"]], ggplot2::aes(x = reorder(Variable, TotalDecrNodeImp), y = TotalDecrNodeImp)) +
ggplot2::geom_bar(stat = "identity", fill = "grey", col = "black", size = .3) +
ggplot2::labs(x = "", y = "Total Increase in Node Purity")
p <- JASPgraphs::themeJasp(p, horizontal = TRUE)
plotIncreasePurity$plotObject <- p
}
|
5ab991de920f104b497cb10e90c13dc0ce10b68c
|
73d4b03a270781f7db515c129fb5d01244385541
|
/functions.R
|
7bc428445114e19a52cf86fe7a9d85b8b5661d9f
|
[] |
no_license
|
fentontaylor/DataScienceCapstone
|
480bdc31f8dde6721069b8fa2bfc3439f5bf0160
|
7caaedb52c5c35b015fa6f98bc602a4f11c63067
|
refs/heads/master
| 2021-01-23T02:10:12.581668
| 2019-11-25T20:49:03
| 2019-11-25T20:49:03
| 85,970,058
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,792
|
r
|
functions.R
|
downloadTextDataset <- function(){
fileURL <- "https://d396qusza40orc.cloudfront.net/dsscapstone/dataset/Coursera-SwiftKey.zip"
if(!file.exists(basename(fileURL))){
download.file(fileURL, basename(fileURL))
unzip(basename(fileURL))
}
}
subsetTextLines <- function(x, percent, destfile=NULL, encoding="UTF-8", seed, save=TRUE){
if( percent<=0 | percent>=1) stop("percent must be be a value between 0 and 1")
if(save==TRUE & is.null(destfile)) stop("must specify destfile if save=TRUE")
set.seed(seed)
con <- file(x)
txt <- readLines(con, encoding=encoding, skipNul = TRUE)
close(con)
n <- length(txt)
subtxt <- txt[sample(1:n,n*percent)]
if(save==FALSE) return(subtxt)
if(save==TRUE) write.table(subtxt, file=destfile, quote=FALSE, sep = "\n",
row.names=FALSE, col.names=FALSE,
fileEncoding = encoding)
}
nWords <- function(x) {
suppressMessages(require(stringr))
str_count(x, "\\S+") %>%
sum
}
myChars <- function(x, n=seq(x)) {
# x: a Corpus
# n: the elements of x for which characters will be returned
require(stringr)
t <- character()
for(i in n){
t <- c(t, x[[i]][[1]])
}
t %>%
str_split("") %>%
sapply(function(x) x[-1]) %>%
unlist %>%
unique %>%
sort(dec=T)
}
cleanPCorpus <- function(x) {
# x: a path to a directory containing the raw txt files for the Corpus
suppressMessages(library(tm))
suppressMessages(library(filehash))
files <- dir(x)
dbDir <- file.path(x, "db")
if(!dir.exists(dbDir)) (dir.create(dbDir))
cleanDir <- file.path(x,"clean")
if(!dir.exists(cleanDir)) (dir.create(cleanDir))
dbFile <- file.path(x,"db", paste0(basename(x),".db"))
if(!file.exists(dbFile)){
corp <- PCorpus(DirSource(x), dbControl=list(dbName=dbFile, dbType="DB1"))
}
print("CONVERTING CHARACTERS...")
dat <- sapply(corp, function(row) iconv(row, "latin1", "ASCII", sub=""))
print("CREATING TEMP FILES...")
tempDir <- file.path(x,"temp")
if(!dir.exists(tempDir)) dir.create(tempDir)
for(i in seq(files)){
write(dat[[i]], file.path(tempDir, files[i]))
}
rm(dat)
dbCleanFile <- file.path(x,"db",paste0(basename(x),"Clean.db"))
corp <- PCorpus(DirSource(tempDir),
dbControl=list(dbName=dbCleanFile,
dbType="DB1"))
print("BEGINNING TRANSFORMATIONS...")
swap <- content_transformer(function(x, from, to) gsub(from, to, x))
corp <- tm_map(corp, content_transformer(tolower))
# Remove profanity words
profanityWords <- readLines(con="data/profanityWords.txt", skipNul = T)
corp <- tm_map(corp, removeWords, profanityWords)
print("PROFANITY REMOVAL COMPLETE...")
# Replace all foreign unicode character codes with a space
corp <- tm_map(corp, swap, "<.*>", " ")
# Delete all twitter-style hashtag references
corp <- tm_map(corp, swap, "#[a-z]+", " ")
# Delete website names
corp <- tm_map(corp, swap, "[[:alnum:][:punct:]]+\\.(?:com|org|net|gov|co\\.uk|aws|fr|de)([\\/[:alnum:][:punct:]]+)?", "webURL")
# Replace all punctuation except EOS punctuation and apostrophe with a space
print("WEB-BASED TEXT REMOVAL COMPLETE...")
corp <- tm_map(corp, swap, "[^[:alnum:][:space:]\'\\.\\?!]", " ")
# Convert numbers with decimal places to <NUM> marker
corp <- tm_map(corp, swap, "[0-9]+\\.[0-9]+", "<NUM>")
# Convert all other numbers to <NUM> marker
corp <- tm_map(corp, swap, "[0-9]+(\\w*)?", "<NUM>")
# Replace all instances of multiple EOS punctuation with one instance
corp <- tm_map(corp, swap, "([\\.\\?!]){2,}", ". ")
# Replace . ? ! with <EOS> tag
corp <- tm_map(corp, swap, "\\. |\\.$", " <EOS> ")
corp <- tm_map(corp, swap, "\\? |\\?$|\\b\\?\\b", " <EOS> ")
corp <- tm_map(corp, swap, "! |!$|\\b!\\b", " <EOS> ")
print("<EOS> AND <NUM> TAGGING COMPLETE...")
# Remove any extra ? !
corp <- tm_map(corp, swap, "!", " ")
corp <- tm_map(corp, swap, "\\?", " ")
# Convert very common occurence of u.s to US
corp <- tm_map(corp, swap, "u\\.s", "US")
corp <- tm_map(corp, swap, "\\.", "")
# Remove single letters except for "a" and "i"
corp <- tm_map(corp, swap, " [b-hj-z] ", " ")
# Clean up leftover punctuation artifacts
corp <- tm_map(corp, swap, " 's", " ")
corp <- tm_map(corp, swap, " ' ", " ")
corp <- tm_map(corp, swap, "\\\\", " ")
corp <- tm_map(corp, stripWhitespace)
print("ALL TRANSFORMATIONS COMPLETE")
print("WRITING CORPUS TEXT TO DISK...")
writeCorpus(corp, cleanDir, filenames = paste0("clean_",files))
print("PROCESSING SUCCESSFULLY FINISHED")
}
cleanTextFull <- function(x) {
require(tm)
require(stringi)
x <- iconv(x, "latin1", "ASCII", sub="")
x <- VCorpus(VectorSource(x))
swap <- content_transformer(function(x, from, to) gsub(from, to, x))
x <- tm_map(x, content_transformer(tolower))
profanityWords <- readLines(con="data/profanityWords.txt", skipNul = T)
x <- tm_map(x, removeWords, profanityWords)
x <- tm_map(x, swap, "<.*>", " ")
x <- tm_map(x, swap, "#[a-z]+", " ")
x <- tm_map(x, swap, "[[:alnum:][:punct:]]+\\.(?:com|org|net|gov|co\\.uk|aws|fr|de)([\\/[:alnum:][:punct:]]+)?", "webURL")
x <- tm_map(x, swap, "[^[:alnum:][:space:]\'\\.\\?!]", " ")
x <- tm_map(x, swap, "[0-9]+\\.[0-9]+", "")
x <- tm_map(x, swap, "[0-9]+(\\w*)?", "")
x <- tm_map(x, swap, "([\\.\\?!]){2,}", ". ")
x <- tm_map(x, swap, "\\. |\\.$", " <EOS> ")
x <- tm_map(x, swap, "\\? |\\?$|\\b\\?\\b", " <EOS> ")
x <- tm_map(x, swap, "! |!$|\\b!\\b", " <EOS> ")
x <- tm_map(x, swap, "!", " ")
x <- tm_map(x, swap, "\\?", " ")
x <- tm_map(x, swap, "u\\.s", "US")
x <- tm_map(x, swap, "\\.", "")
x <- tm_map(x, swap, " [b-hj-z] ", " ")
x <- tm_map(x, swap, " 's", " ")
x <- tm_map(x, swap, " ' ", " ")
x <- tm_map(x, swap, "\\\\", " ")
x <- tm_map(x, stripWhitespace)
x[[1]]$content
}
cleanTextQuick <- function(x) {
suppressMessages(require(stringi))
x <- tolower(x)
x <- stri_replace_all_regex(x, "[[:alnum:][:punct:]]+\\.(?:com|org|net|gov|co\\.uk|aws|fr|de)([\\/[:alnum:][:punct:]]+)?", "webURL")
x <- stri_replace_all_regex(x, "[^[:alnum:][:space:]\'\\.\\?!]", "")
x <- stri_replace_all_regex(x, "[0-9]+\\.[0-9]+", "")
x <- stri_replace_all_regex(x, "[0-9]+(\\w*)?", "")
x <- stri_replace_all_regex(x, "([\\.\\?!]){2,}", ". ")
x <- stri_replace_all_regex(x, "\\. |\\.$", " <EOS> ")
x <- stri_replace_all_regex(x, "\\? |\\?$|\\b\\?\\b", " <EOS> ")
x <- stri_replace_all_regex(x, "! |!$|\\b!\\b", " <EOS> ")
x <- stri_replace_all_regex(x, "[ ]{2,}", " ")
}
n_toks <- function(toks, ng, name, saveDir, saveAll){
# helper function for create_ngrams
# toks: quanteda tokens object of unigrams
# saveAll: should all the intermediary files be saved? (tokens, dfm, word/freq)
# if FALSE, only the word/freq data.frame is saved
if(ng != 1) {
toks <- tokens_ngrams(toks, n=ng, concatenator=" ")
}
if(saveAll) {saveRDS(toks, paste0(saveDir,"/",name,"_toks.rds"))}
dfm <- dfm(toks, tolower=FALSE)
if(saveAll) {saveRDS(dfm, paste0(saveDir,"/",name,"_dfm.rds"))}
n_freq <- freq_df(dfm)
rm(dfm)
if(saveAll) {saveRDS(n_freq, paste0(saveDir,"/",name,"_freq.rds"))}
n_freq <- n_freq[-grep("EOS|NUM", n_freq$words),]
saveRDS(n_freq, paste0(saveDir,"/",name,"_freq_s.rds"))
rm(n_freq)
}
create_ngrams <- function(x, modelName, type, saveAll=TRUE) {
# x: directory containing clean text files
# modelName: sub-directory of x to save ngram files
# type: character vector specifying which ngrams to create
# options are c("uni","bi","tri","quad","five","six")
# saveAll: should all the intermediary files be saved? (tokens, dfm, word/freq)
# if FALSE, only the word/freq data.frame is saved
suppressMessages(require(tm))
suppressMessages(require(quanteda))
print("Creating Corpus...")
myCorp <- VCorpus(DirSource(x))
myCorp <- corpus(myCorp)
mod_dir <- file.path( x, modelName )
if( !dir.exists( mod_dir ) ) dir.create( mod_dir )
print("Creating Tokens...")
toks <- tokens(myCorp, removeSymbols=TRUE)
if("uni" %in% type){
print("Creating Unigrams...")
n_toks(toks=toks, ng=1, name="uni", saveDir=mod_dir, saveAll=saveAll)
print("Complete")
}
if("bi" %in% type){
print("Creating Bigrams...")
n_toks(toks=toks, ng=2, name="bi", saveDir=mod_dir, saveAll=saveAll)
print("Complete")
}
if("tri" %in% type){
print("Creating Trigrams...")
n_toks(toks=toks, ng=3, name="tri", saveDir=mod_dir, saveAll=saveAll)
print("Complete")
}
if("quad" %in% type){
print("Creating Quadgrams...")
n_toks(toks=toks, ng=4, name="quad", saveDir=mod_dir, saveAll=saveAll)
print("Complete")
}
if("five" %in% type){
print("Creating Fivegrams...")
n_toks(toks=toks, ng=5, name="five", saveDir=mod_dir, saveAll=saveAll)
print("Complete")
}
if("six" %in% type){
print("Creating Sixgrams...")
n_toks(toks=toks, ng=6, name="six", saveDir=mod_dir, saveAll=saveAll)
print("Complete")
}
}
combine_tables <- function(files, saveNew=NULL){
# files: character vector of files to be combined
# saveNew: character vector of file.path to save output table
suppressMessages(require(dplyr))
suppressMessages(require(data.table))
out <- data.frame(words=character(),freq=numeric())
for( i in seq_along(files) ){
temp <- as.data.table(readRDS(files[i]))
out <- full_join(out, temp, by="words")
}
out <- out %>%
mutate(freq=rowSums(.[,-1],na.rm=TRUE)) %>%
select(c(words,freq)) %>%
arrange(desc(freq)) %>%
as.data.table()
if( !is.null(saveNew) ) { saveRDS(out, saveNew) }
}
freq_df <- function(x){
suppressMessages(require(data.table))
# This helper function takes a token output and outputs a sorted N-gram frequency table
fr <- sort(colSums(as.matrix(x)),decreasing = TRUE)
df <- data.table(words = as.character(names(fr)), freq=fr)
return(df)
}
freqMat <- function(df){
suppressMessages(require(dplyr))
mat <- df %>% group_by(freq) %>% summarise(n())
colnames(mat) <- c("r", "Nr")
return(mat)
}
makeZr <- function(df){
# Step 1 in simple Good-Turing Smoothing
# df: a frequency of frequency data frame, like the output of freq_df()
# with counts in the first column and frequency of counts in the second.
suppressMessages(require(dplyr))
if(names(df)[1]!="r" | names(df)[2]!="Nr") names(df) <- c("r","Nr")
Zr <- numeric()
m <- nrow(df)
for(i in seq(m)){
q <- ifelse(i==1, 0, df[[i-1,1]])
t <- ifelse(i==m, 2*df[[i,1]]-q, df[[i+1,1]])
Zr[i] <- df[[i,2]]/(0.5*(t-q))
}
df$Zr <- Zr
return(df)
}
do_lgt_r <- function(df) {
# Step 2 performs linear Good-Turing smoothing
# df: a data.frame output from the makeZr() function
logr <- log10(df$r)
logzr <- log10(df$Zr)
fit <- lm(logzr~logr)
b <- coef(fit)[2]
if(b>-1) stop("Slope of regression line is greater than -1")
df$lgt_r <- df$r*(1+1/df$r)^(b+1)
return(df)
}
do_gt_r <- function(df, threshold){
# Step 3 perform regular Good-Turing Smoothing
# df: a data.frame ouput from do_lgt_r()
# threshold: the value of r (count) to perform simple Good-Turing estimates up to
gt_r <- numeric()
for( i in seq(threshold) ){
r <- df[[i,1]]
N <- df[[i+1,2]]/df[[i,2]]
gt_r[i] <- (r+1)*N
}
gt_r <- c(gt_r,rep(NA, nrow(df)-length(gt_r)))
df$gt_r <- gt_r
return(df)
}
sgt_smooth <- function(df, threshold){
# wrapper function for all components of simple Good-Turing smoothing
suppressMessages(require(dplyr))
fm <- df %>%
freqMat() %>%
makeZr() %>%
do_lgt_r() %>%
do_gt_r(threshold=threshold)
fm$sgt <- c(fm$gt_r[1:threshold], fm$lgt_r[(threshold+1):nrow(fm)])
df$r_smooth <- rev(rep(fm$sgt,fm$Nr))
N <- sum(fm$r*fm$Nr)
df$pr <- df$r_smooth/N
tot <- sum(df$pr)
df$pr <- df$pr/tot
df
}
splitText <- function(directory, files, chunkSize){
for( i in files ){
num <- length(readLines(file.path(directory, i)))
chunk <- ceiling(num/chunkSize)
con <- file(file.path(directory, i), open = "r")
for( j in 1:8 ){
if( !dir.exists( file.path(directory, j) ) ){
dir.create( file.path(directory, j) )
}
lines <- readLines(con, n=chunk)
writeLines(lines, file.path(directory, j, paste0(j,".",i)))
}
close(con)
}
}
ngram_list <- function(files, trim=NULL, save=NULL){
nl <- list()
nl[["bi"]] <- readRDS(files[2])
if( !is.null(trim) ) nl$bi <- nl$bi[nl$bi$freq > trim, ]
nl[["tri"]] <- readRDS(files[3])
if( !is.null(trim) ) nl$tri <- nl$tri[nl$tri$freq > trim, ]
nl[["quad"]] <- readRDS(files[4])
if( !is.null(trim) ) nl$quad <- nl$quad[nl$quad$freq > trim, ]
nl[["five"]] <- readRDS(files[5])
if( !is.null(trim) ) nl$five <- nl$five[nl$five$freq > trim, ]
nl[["six"]] <- readRDS(files[6])
if( !is.null(trim) ) nl$six <- nl$six[nl$six$freq > trim, ]
if( !is.null(save) ) { saveRDS(nl, save) }
nl
}
nextWord <- function(x, ngrams, num=1) {
# x: a character string
# ngrams: list of n-grams
# n: number of words to return
require(stringi)
require(dplyr)
# Clean the text with the same process that generated n-gram lists
x <- cleanTextQuick(x)
# Delete text before EOS punctuation since it will skew prediction.
x <- gsub(".*<EOS>", "", x)
x <- gsub(" $", "", x)
# Get length of string for loop iterations
m <- length(stri_split_fixed(str=x, pattern=" ")[[1]])
m <- ifelse(m < 5, m, 5)
for( i in m:1 ){
x <- stri_split_fixed(str=x, pattern=" ")[[1]]
n <- length(x)
# As i decreases, length of x is shortened to search smaller n-grams
x <- paste(x[(n-i+1):n], collapse=" ")
search <- grep(paste0("^", x, " "), ngrams[[i]]$words)
if( length(search) == 0 ) { next }
break
}
choices <- ngrams[[i]][search,]
choices <- arrange(choices, desc(freq))
words <- gsub(paste0(x," "), "", choices$words)
if (length(words)==0) { words <- c("the", "to", "and", "a", "of") }
words[1:num]
}
trimString <- function(x, n) {
suppressMessages(require(stringi))
temp <- stri_split_fixed(x, " ", simplify = T)
paste(temp[1:n], collapse = " ")
}
getLastWord <- function(x){
suppressMessages(require(stringi))
temp <- stri_split_fixed(x, " ", simplify = T)
n <- length(temp)
temp[n]
}
pruneNgrams <- function(x, n, save = NULL) {
# x : ngram list
# n : number of each group to keep
#save : file.path to save pruned ngram list
suppressMessages(require(data.table))
x <- data.table(x)
x <- x[ , group := sapply(words, function(z) trimString(z, n))]
x <- setorder(setDT(x), group, -pr)[, index := seq_len(.N), group][index <= 5L]
x <- x[, c("group", "index") := NULL]
if( !is.null(save) ) saveRDS(x, save)
x
}
nextWord2 <- function(x, ngrams, num=1) {
# x: a character string
# ngrams: list of n-grams
# num: number of words to return
require(stringi)
require(dplyr)
# Clean the text with the same process that generated n-gram lists
x <- cleanTextQuick(x)
# Delete text before EOS punctuation since it will skew prediction.
x <- gsub(".*<EOS>", "", x)
x <- gsub(" $", "", x)
# Get length of string for loop iterations
m <- length(stri_split_fixed(str=x, pattern=" ")[[1]])
m <- ifelse(m < 5, m, 5)
for( i in m:1 ){
x <- stri_split_fixed(str=x, pattern=" ")[[1]]
n <- length(x)
# As i decreases, length of x is shortened to search smaller n-grams
x <- paste(x[(n-i+1):n], collapse=" ")
search <- grep(paste0("^", x, " "), ngrams[[i]]$words)
if( length(search) == 0 ) { next }
break
}
choices <- ngrams[[i]][search,]
choices <- arrange(choices, desc(freq))
words <- gsub(paste0(x," "), "", choices$words)
if (length(words)==0) { ng_ret = 1 }
else{ ng_ret = i+1 }
ng_ret
}
|
5995cbd94e3817403a70a702c854696b840af285
|
babb638e58b5b5937f0e85751e15c63d23e1bcd9
|
/R/LambsWeight.R
|
dc8c4a0987c8fa5a88a911e0ee80a0ef06b05862
|
[
"MIT"
] |
permissive
|
EfratVil/MultiNav
|
dd982b4a8805707cbb25fc3bd3c1c43b5ea55a5a
|
53f7729f0d62c6a83ace0772da7e3c01a115d7e4
|
refs/heads/master
| 2021-06-04T10:54:07.111510
| 2020-04-11T15:23:48
| 2020-04-11T15:23:48
| 95,772,643
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 544
|
r
|
LambsWeight.R
|
#' LambsWeight
#'
#' Daily weight estimates of individual lambs in a small herd.
#' The dataset contains data of 80 daily weight estimates for 39 lambs.
#'
#' @source Volcani center, ARO PLF lab. Data was collected from a novel system which contains electronic scales and drinking behavior sensor, designed for automatic small-ruminant monitoring.
#' @format A data.frame with columns
#' \describe{
#' \item{column}{Each column contains weight measurement of a different lamb.}
#' \item{row}{Time of recording (in hours)}
#' }
"LambsWeight"
|
063d790691c29da6c466864e4dca9909ae8b3958
|
a27b79fc527614f1ae9ab192bec123f7ad55ff36
|
/R/utils-io.R
|
b40d977c23c1eb187612656f52cb767eb9a3e93a
|
[
"MIT"
] |
permissive
|
r-lib/pkgdown
|
59528c00deab7466f678c48ed6e26227eecf1e6c
|
c9206802f2888992de92aa41f517ba7812f05331
|
refs/heads/main
| 2023-08-29T05:25:38.049588
| 2023-07-19T14:26:10
| 2023-07-19T14:26:10
| 3,723,845
| 443
| 330
|
NOASSERTION
| 2023-09-06T09:08:11
| 2012-03-15T00:36:24
|
R
|
UTF-8
|
R
| false
| false
| 888
|
r
|
utils-io.R
|
# Reading -----------------------------------------------------------------
read_file <- function(path) {
lines <- read_lines(path)
paste0(lines, "\n", collapse = "")
}
# Inspired by roxygen2 utils-io.R (https://github.com/klutometis/roxygen/) -----------
readLines <- function(...) stop("Use read_lines!")
writeLines <- function(...) stop("Use write_lines!")
read_lines <- function(path, n = -1L) {
base::readLines(path, n = n, encoding = "UTF-8", warn = FALSE)
}
write_lines <- function(text, path) {
base::writeLines(enc2utf8(text), path, useBytes = TRUE)
}
# Other -------------------------------------------------------------------
file_equal <- function(src, dst) {
if (!file_exists(dst))
return(FALSE)
src_hash <- digest::digest(file = src, algo = "xxhash64")
dst_hash <- digest::digest(file = dst, algo = "xxhash64")
identical(src_hash, dst_hash)
}
|
6ebf6b5c71181ad96c2851b4bb9a8995e720b3f3
|
3dce7cc8a4a3d729d4794211b545b14baa325419
|
/R/nd_centrality.R
|
6edcaa19be1a3ea5998bfb3b850d30cdc045471c
|
[] |
no_license
|
cran/NetworkDistance
|
674e58e0b9001ee37a651f2ade7346af86c5b481
|
76ac6589bc954845dc7ab3bbf2f1a925e0dcc70d
|
refs/heads/master
| 2023-07-12T02:38:46.365762
| 2021-08-21T14:00:08
| 2021-08-21T14:00:08
| 113,559,722
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,806
|
r
|
nd_centrality.R
|
#' Centrality Distance
#'
#' Centrality is a core concept in studying the topological structure of
#' complex networks, which can be either defined for each node or edge.
#' \code{nd.centrality} offers 3 distance measures on node-defined centralities.
#' See this \href{https://en.wikipedia.org/wiki/Centrality}{Wikipedia page} for more
#' on network/graph centrality.
#'
#' @param A a list of length \eqn{N} containing \eqn{(M\times M)} adjacency matrices.
#' @param out.dist a logical; \code{TRUE} for computed distance matrix as a \code{dist} object.
#' @param mode type of node centrality definitions to be used.
#' @param directed a logical; \code{FALSE} as symmetric, undirected graph.
#'
#'
#' @return a named list containing \describe{
#' \item{D}{an \eqn{(N\times N)} matrix or \code{dist} object containing pairwise distance measures.}
#' \item{features}{an \eqn{(N\times M)} matrix where rows are node centralities for each graph.}
#' }
#'
#'
#' @examples
#' \donttest{
#' ## load example data
#' data(graph20)
#'
#' ## use 3 types of centrality measures
#' out1 <- nd.centrality(graph20, out.dist=FALSE,mode="Degree")
#' out2 <- nd.centrality(graph20, out.dist=FALSE,mode="Close")
#' out3 <- nd.centrality(graph20, out.dist=FALSE,mode="Between")
#'
#' ## visualize
#' opar = par(no.readonly=TRUE)
#' par(mfrow=c(1,3), pty="s")
#' image(out1$D[,20:1], main="Degree", col=gray(0:32/32), axes=FALSE)
#' image(out2$D[,20:1], main="Close", col=gray(0:32/32), axes=FALSE)
#' image(out3$D[,20:1], main="Between", col=gray(0:32/32), axes=FALSE)
#' par(opar)
#' }
#'
#' @references
#' \insertRef{roy_modeling_2014}{NetworkDistance}
#'
#' @rdname nd_centrality
#' @export
nd.centrality <- function(A, out.dist=TRUE,
mode=c("Degree","Close","Between"),
directed=FALSE){
#-------------------------------------------------------
## PREPROCESSING
# 1. list of length larger than 1
if ((!is.list(A))||(length(A)<=1)){
stop("* nd.csd : input 'A' should be a list of length larger than 1.")
}
# 2. transform the data while checking
listA = list_transform(A, NIflag="not")
N = length(listA)
M = nrow(listA[[1]])
# 3. out.dist & directed
if ((!is.logical(out.dist))||(!is.logical(directed))){
stop("* nd.centrality : 'out.dist' and 'directed' should be logical variables.")
}
# 4. mode
allmodes = c("degree","close","between")
if (missing(mode)){
finmode = "degree"
} else {
finmode = match.arg(tolower(mode), allmodes)
}
#-------------------------------------------------------
## MAIN COMPUTATION
# 1. prepare for the results
mat_features = array(0,c(N,M))
mat_dist = array(0,c(N,N))
# 2. transform into igraph objects & compute characteristics
for (i in 1:N){
# 2-1. transform
if (directed==FALSE){
tgt = graph_from_adjacency_matrix(listA[[i]], mode="undirected")
} else {
tgt = graph_from_adjacency_matrix(listA[[i]], mode="directed")
}
# 2-2. compute features & record
if (all(finmode=="degree")){
mat_features[i,] = as.vector(igraph::degree(tgt))
} else if ((finmode=="close")){
mat_features[i,] = as.vector(igraph::closeness(tgt))
} else if ((finmode=="between")){
mat_features[i,] = as.vector(igraph::betweenness(tgt))
}
}
# 3. compute pairwise distances
for (i in 1:(N-1)){
vec1 = mat_features[i,]
for (j in (i+1):N){
vec2 = mat_features[j,]
solution = sum(abs(vec1-vec2))
mat_dist[i,j] = solution
mat_dist[j,i] = solution
}
}
#-------------------------------------------------------
## RETURN RESULTS
if (out.dist){
mat_dist = as.dist(mat_dist)
}
result = list()
result$D= mat_dist
result$features = mat_features
return(result)
}
|
bc921399358eb801e31cf5a0a9bd8b06f33574c8
|
ab52fc262fdbfc834469e001a654f7aba23d4cb0
|
/employment_rates/NV/compile_NV.R
|
19b841735c9975a5b4855d6b5544f30991e2bbb0
|
[] |
no_license
|
andyzwang/coronavirus-unemployment
|
89f8eb9f91b033c0e3bb97b5cea0126a343056db
|
be5f8f2583dbe61f79ff6032f4e8beb9bf3f210c
|
refs/heads/master
| 2022-11-19T22:33:12.782097
| 2020-07-22T21:37:02
| 2020-07-22T21:37:02
| 271,094,644
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 938
|
r
|
compile_NV.R
|
# imports
library(tidyverse)
library(janitor)
library(lubridate)
library(maps)
library(stringr)
# import
raw <- read_csv("NV_data.csv")
output <- raw %>%
clean_names("snake") %>%
mutate(
state_fips = "32",
state_short = "NV",
state = "Nevada",
area_type = case_when(
area == "Montana" ~ "state",
str_detect(area, "County") ~ "county",
TRUE ~ "city"
),
area = str_remove(area, ", Nevada"),
polyname = case_when(
area_type == "county" ~ paste("nevada,", tolower(str_remove(str_remove(area, "[[:punct:]]"), " County")), sep = "")
)
) %>%
# Join with FIPS
left_join(county.fips, by = "polyname") %>%
rename(unemployment = unemployed,
employment = employed) %>%
select(
state_fips, state_short, state, area, area_type, fips, period, year,
labor_force, employment, unemployment
)
write.csv(output, file = "NV_compiled.csv", row.names = FALSE)
|
e3efba5f396e87ab7b42ca238dbdbb0954c86728
|
8fa4a8b0292469cde0f5308e1c774ea8c10d4341
|
/Codes/A0.R
|
f048cc56077c5ce04a71cb561279a619c2d87c8b
|
[] |
no_license
|
Minam7/DA_Project
|
255bdb04d8d48d3b8a96a60d8df8b4783d4c432b
|
22a16dd9cc6881522a5a2a95246c64ef52d07bbf
|
refs/heads/master
| 2020-07-31T19:06:05.449683
| 2020-07-07T19:30:45
| 2020-07-07T19:30:45
| 210,721,040
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 588
|
r
|
A0.R
|
library(dplyr)
library(readr)
library(stringr)
casn <- read_csv("Downloads/R/DA_Project/Data/asn_c.csv") %>% as.data.frame(stringsAsFactors = F) %>%
mutate(Total_occupants = ifelse(Total_occupants == 0 & Total_fatalities != 0, Total_fatalities, Total_occupants),
Total_survivors = abs(Total_occupants - Total_fatalities)) %>%
mutate(Total_survivors = ifelse(Total_survivors > Total_occupants, Total_occupants, abs(Total_occupants - Total_fatalities))) %>%
mutate(is_army = str_detect(Operator, regex("Force|Navy",ignore_case = T))) %>%
mutate(occ_no = row_number())
|
19f4c0097f24a72d036e5da4a0a8662b8dced1ce
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610046809-test.R
|
d7dcf27a7f63cd0b9b1ce1753d16185f035020f9
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 193
|
r
|
1610046809-test.R
|
testlist <- list(hi = -3.1638862116397e+134, lo = -3.16388621163964e+134, mu = -3.16388619810127e+134, sig = 9.00092879516474e-316)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
4f89adac8bd7d8e952f5bdbdc05bfc850815a331
|
7b0529c5616c586602f1c060a563b1d3503593b7
|
/cachematrix.R
|
55b2791abb5a30e1c4da55a55293131824e2cd17
|
[] |
no_license
|
ckghosted/ProgrammingAssignment2
|
e13916defcd89b49ab1c14af461ee9e8e69388f8
|
0494f23e4d1a06aca3fea68399b6d7f25b975188
|
refs/heads/master
| 2021-01-14T11:19:51.438105
| 2015-05-24T09:35:58
| 2015-05-24T09:35:58
| 36,113,223
| 0
| 0
| null | 2015-05-23T08:00:46
| 2015-05-23T08:00:45
| null |
UTF-8
|
R
| false
| false
| 1,805
|
r
|
cachematrix.R
|
## The following two functions makeCacheMatrix() and cacheSolve() can be
## used to cache the inverse of a matrix. As long as the concerned matrix
## is not changed, its inverse is computed only once and can be retrived
## for later usage.
## The makeCacheMatrix() function takes a matrix as its argument, returns
## a special matrix object that:
## (1) holds the matrix and its inverse (originally being null), and
## (2) generates a list of setters and getters that can be used to cache
## the matrix and its inverse.
## Although the inverse is null at the first time, we can then compute it
## using the following function cacheSolve(), in which the sub-function
## setinverse() will also be invoked to set the inverse into the special
## matrix object for later usage.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve() function takes a special matrix object produced by
## the above makeCacheMatrix() function and returns the inverse. If the
## inverse has already been calculated, it just retrives it from the
## special matrix object by the getinverse() function; Otherwise, if the
## inverse is null, the function will compute the inverse using the "solve"
## function.
## In this assignment, it is assumed that the matrix supplied is always
## invertible.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
bf3360fe925e409dceff6897476e812a1676b60b
|
d936b0cc6a234e67d2022ee2565874402f5ae09b
|
/sandbox/create_geojson.R
|
e5effa6777ccff6ddab85744e4a232ba429bd1f5
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
crazycapivara/openlayers
|
b70de41605e5c60132fe879d41b95189f1824cb4
|
5d8af098d4b0e44c07e9a9da3db9325ee7995142
|
refs/heads/master
| 2021-01-22T12:36:24.506413
| 2019-05-20T06:52:27
| 2019-05-20T06:52:27
| 82,209,616
| 18
| 2
|
NOASSERTION
| 2019-07-06T13:35:49
| 2017-02-16T17:56:20
|
R
|
UTF-8
|
R
| false
| false
| 2,262
|
r
|
create_geojson.R
|
seattle_geojson = list(
type = "Feature",
geometry = list(
type = "MultiPolygon",
coordinates = list(list(list(
c(-122.36075812146, 47.6759920119894),
c(-122.360781646764, 47.6668890126755),
c(-122.360782108665, 47.6614990696722),
c(-122.366199035722, 47.6614990696722),
c(-122.366199035722, 47.6592874248973),
c(-122.364582509469, 47.6576254522105),
c(-122.363887331445, 47.6569107302038),
c(-122.360865528129, 47.6538418253251),
c(-122.360866157644, 47.6535254473167),
c(-122.360866581103, 47.6533126275176),
c(-122.362526540691, 47.6541872926348),
c(-122.364442114483, 47.6551892850798),
c(-122.366077719797, 47.6560733960606),
c(-122.368818463838, 47.6579742346694),
c(-122.370115159943, 47.6588730808334),
c(-122.372295967029, 47.6604350102328),
c(-122.37381369088, 47.660582362063),
c(-122.375522972109, 47.6606413027949),
c(-122.376079703095, 47.6608793094619),
c(-122.376206315662, 47.6609242364243),
c(-122.377610811371, 47.6606160735197),
c(-122.379857378879, 47.6610306942278),
c(-122.382454873022, 47.6627496239169),
c(-122.385357955057, 47.6638573778241),
c(-122.386007328104, 47.6640865692306),
c(-122.387186331506, 47.6654326177161),
c(-122.387802656231, 47.6661492860294),
c(-122.388108244121, 47.6664548739202),
c(-122.389177800763, 47.6663784774359),
c(-122.390582858689, 47.6665072251861),
c(-122.390793942299, 47.6659699214511),
c(-122.391507906234, 47.6659200946229),
c(-122.392883050767, 47.6664166747017),
c(-122.392847210144, 47.6678696739431),
c(-122.392904778401, 47.6709016021624),
c(-122.39296705153, 47.6732047491624),
c(-122.393000803496, 47.6759322346303),
c(-122.37666945305, 47.6759896300663),
c(-122.376486363943, 47.6759891899754),
c(-122.366078869215, 47.6759641734893),
c(-122.36075812146, 47.6759920119894)
)))
),
properties = list(
name = "Ballard",
population = 48000,
# You can inline styles if you want
style = list(
fillColor = "yellow",
weight = 2,
color = "#000000"
)
),
id = "ballard"
)
|
dcf007f75ceeb5388152cae0a66d35ab668e70c2
|
62a5ff47bc82332003d28a2c17bc8e94d17f4b69
|
/Documents/MA684/Work/hw10/hw10.R
|
a6e5bfb79111a8f5a1f652bc2057b2eb983784a1
|
[] |
no_license
|
jiayuans/Applied-Multiple-Regression-and-Multivariable-Methods
|
13ce5039c174ae3a4c0228b0a1b345d95e8c42c1
|
dae2fe6d2a1ccb2a7c529bb210b330ea69399efe
|
refs/heads/master
| 2021-01-10T09:08:43.260841
| 2016-03-29T18:40:38
| 2016-03-29T18:40:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 885
|
r
|
hw10.R
|
library(psych)
library(GPArotation)
dm <- read.csv("drinkingmotives2007.csv",header=T)
attach(dm)
head(dm)
compout <- princomp(~D1+D2+D3+D4+D5+D6+D7+D8+D9+D10+
D11+D12+D13+D14+D15+D16+D17+D18+D19+D20,
cor=TRUE)
summary(compout)
vars <- data.frame(D1, D2, D3, D4, D5, D6, D7, D8, D9, D10,
D11, D12, D13, D14, D15, D16, D17, D18, D19, D20)
principal(vars,nfactors=3,rotate="varimax")
havafun <- D3+D5+D7+D9+D10+D11+D13+D14+D15+D16+D18
besocial <- D2+D8+D12+D19+D20
cheerup <- D1+D4+D6+D17
mean(havafun)
mean(besocial)
mean(cheerup)
sd(havafun)
sd(besocial)
sd(cheerup)
max(havafun)-min(havafun)
max(besocial)-min(besocial)
max(cheerup)-min(besocial)
lm.havefun <- lm(havafun~AgeDrink)
summary(lm.havefun)
lm.besocial <- lm(besocial~AgeDrink)
summary(lm.besocial)
lm.cheerup <- lm(cheerup~AgeDrink)
summary(lm.cheerup)
|
4de8b0b82ab560134f0b8e97fbd8cbc7578e7628
|
3b62ffa02efef29b8bbaa9041d74a1ee72b4807a
|
/inst/examples/ex-rhrOU.R
|
f58f876a9f79a74f7770b16c8526b6cbfa70b5f6
|
[] |
no_license
|
jmsigner/rhr
|
52bdb94af6a02c7b10408a1dce549aff4d100709
|
7b8d1b2dbf984082aa543fe54b1fef31a7853995
|
refs/heads/master
| 2021-01-17T09:42:32.243262
| 2020-06-22T14:24:00
| 2020-06-22T14:24:00
| 24,332,931
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 544
|
r
|
ex-rhrOU.R
|
set.seed(123)
## Standard Walk
walk <- rhrOU(n = 5000)
plot(walk)
## Adjust pull back
walk <- rhrOU(n = 5000, A = matrix(c(0.01, 0, 0, 0.01), 2))
plot(walk)
## Effect of sigma: not only the scale of x and y changes
set.seed(123)
walk <- rhrOU(n = 5000, A = matrix(c(0.01, 0, 0, 0.01), 2), sigma = 1)
plot(walk)
set.seed(123)
walk <- rhrOU(n = 5000, A = matrix(c(0.01, 0, 0, 0.01), 2), sigma = 100)
plot(walk)
## Effect of xy0
set.seed(123)
walk <- rhrOU(n = 5000, A = matrix(c(0.01, 0, 0, 0.01), 2), sigma = 1, mu = c(50, 50))
plot(walk)
|
bc736ca193c9476ccb48f8dcc248ba29c4138946
|
e75696bb83eaef340a5643e15688e9ad75a22b1f
|
/R/keras_classification.R
|
d0ca9c1c075a594d116f70876b49049112e3ec21
|
[
"MIT"
] |
permissive
|
RJ333/phyloseq2ML
|
e18981dcbc6adc057cfe41e9fcbd1a66b04458ed
|
391ce779d9cb816ca109ab6f89bd7246c27c784c
|
refs/heads/master
| 2022-08-23T10:25:52.853275
| 2020-05-28T16:03:24
| 2020-05-28T16:03:24
| 264,425,119
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,940
|
r
|
keras_classification.R
|
#' Build and compile keras sequential models with 2 hidden layers
#'
#' This function can setup and compile sequential models. Please have a look at
#' [keras_model_sequential](https://keras.rstudio.com/reference/keras_model_sequential.html),
#' [layer_dense](https://keras.rstudio.com/reference/layer_dense.html) and
#' [compile](https://keras.rstudio.com/reference/compile.html) for further details.
#'
#' @param train_data a table of training data
#' @param Layer1_units integer, number of units in the first hidden layer
#' @param Layer2_units integer, number of units in the second hidden layer
#' @param classes integer, number of classes and therefore number of units in
#' the output layer, set to 1 for regression
#' @param Dropout_layer1 numeric, ratio of dropout nodes for layer 1 between 0 and 1
#' @param Dropout_layer2 numeric, ratio of dropout nodes for layer 2 between 0 and 1
#' @param Dense_activation_function char, activation function for the hidden layers
#' @param Output_activation_function char, activation function for the output layer,
#' (default: NULL, used for regression)
#' @param Optimizer_function char, the optimizer function
#' @param Loss_function char, the loss function
#' @param Metric char vector, which metrics to monitor
#' @param ... further arguments
#'
#' @return a compiled keras sequential model with two hidden layers
#'
#' @export
build_the_model <- function(train_data, Layer1_units, Layer2_units, classes,
Dropout_layer1, Dropout_layer2, Dense_activation_function,
Output_activation_function = NULL, Optimizer_function, Loss_function, Metric, ...) {
if(dim(train_data)[[2]] < 1) {
stop("Provided training data has no columns, can't determine input layer shape")
}
# network architecture
model <- keras::keras_model_sequential() %>%
keras::layer_dense(units = Layer1_units,
activation = Dense_activation_function,
input_shape = dim(train_data)[[2]]) %>%
keras::layer_dropout(rate = Dropout_layer1) %>%
keras::layer_dense(units = Layer2_units, activation = Dense_activation_function) %>%
keras::layer_dropout(rate = Dropout_layer2) %>%
keras::layer_dense(units = classes, activation = Output_activation_function)
# compiling the model
model %>% keras::compile(
optimizer = Optimizer_function,
loss = Loss_function,
metrics = Metric)
model
}
#' Run keras tensorflow classification.
#'
#' This functions calls keras tensorflow using the parameter values in each row
#' of the provided master_grid, using the data of the list elements. Please have
#' a look at the keras [fit doc](https://keras.rstudio.com/reference/fit.html)
#' for explanation on the keras related variables, the arguments are beginning
#' with "keras" in the description. Except for `the list`, `master_grid` and `.row`
#' all arguments need to be column names of `master_grid`
#'
#' @param Target factor, the response variable
#' @param ML_object factor or char, the name of the corresponding `the_list` item
#' @param Cycle integer, the current repetition
#' @param Epochs keras, integer, how many times should the whole data set be
#' passed through the network?
#' @param Batch_size keras, integer, how many samples before updating the weights?
#' @param k_fold integer, the total number of k_folds for cross validation
#' @param current_k_fold integer, the current k_fold in range 1 : k_fold
#' @param Early_callback keras, string, a callback metric
#' @param Delay keras, integer, wait for how many epochs before callback happens?
#' @param step character declaring `training` or `prediction`
#' @param the_list The input tables list
#' @param master_grid the data frame containing all parameter combinations
#' @param .row current row of master_grid
#' @param ... additional features passed by pmap call
#'
#' @return a compiled keras sequential model with two hidden layers
#'
#' @export
keras_classification <- function(Target, ML_object, Cycle, Epochs, Batch_size, k_fold,
current_k_fold, Early_callback, Delay, step, the_list, master_grid, .row, ...) {
if(!all(c("Target", "ML_object", "Cycle", "Epochs", "Batch_size", "k_fold",
"current_k_fold", "Early_callback", "Delay", "step") %in% colnames(master_grid))) {
stop("Keras parameters do not match column names in master_grid")
}
if(is.null(the_list[[ML_object]])) {
stop("Names of items in the_list and ML_object in master_grid do not match")
}
if(!exists(c("trainset_labels", "trainset_data", "testset_labels",
"testset_data"), where = the_list[[1]])) {
stop("Item in the_list does not have all required elements:
trainset_labels, trainset_data, testset_labels, testset_data")
}
stopifnot(step == "training" | step == "prediction")
state <- paste("Row", .row, "of", nrow(master_grid))
futile.logger::flog.info(state)
community_table <- the_list[[ML_object]]
training_data <- community_table[["trainset_data"]]
training_labels <- community_table[["trainset_labels"]]
classes <- ncol(training_labels)
if(classes < 2) {
stop("Less then 2 classes found, response variable setup seems incorrect")
}
# lookup to translate between factor levels and class labels
lookup <- stats::setNames(c(colnames(training_labels)), c(0:(classes - 1)))
if (step == "prediction" & (k_fold != 1 | current_k_fold != 1)) {
stop("k_fold and current_k_fold need to be 1 for prediction")
} else if (step == "training") {
indices <- sample(1:nrow(training_data))
folds <- cut(1:length(indices), breaks = k_fold, labels = FALSE)
}
if (step == "training") {
kfold_msg <- paste("k_fold", current_k_fold, "of", k_fold)
futile.logger::flog.info(kfold_msg)
# split training data into train and validation, by number of folds
validation_indices <- which(folds == current_k_fold, arr.ind = TRUE)
validation_data <- training_data[validation_indices, ]
validation_targets <- training_labels[validation_indices, ]
partial_train_data <- training_data[-validation_indices, ]
partial_train_targets <- training_labels[-validation_indices, ]
# build and compile model
model <- build_the_model(train_data = training_data, classes = classes, ...)
# train model
history <- model %>% keras::fit(
partial_train_data,
partial_train_targets,
epochs = Epochs,
batch_size = Batch_size,
callbacks = keras::callback_early_stopping(
monitor = Early_callback,
patience = Delay,
verbose = 0),
validation_data = list(validation_data, validation_targets),
verbose = 0)
} else if (step == "prediction") {
validation_data <- community_table[["testset_data"]]
validation_targets <- community_table[["testset_labels"]]
partial_train_data <- training_data
partial_train_targets <- training_labels
# build and compile model
model <- build_the_model(train_data = training_data, classes = classes, ...)
# train model
history <- model %>% keras::fit(
partial_train_data,
partial_train_targets,
epochs = Epochs,
batch_size = Batch_size,
callbacks = keras::callback_early_stopping(
monitor = Early_callback,
patience = Delay,
verbose = 0),
test_split = 0.0,
verbose = 0)
}
# predict classes
val_predictions <- model %>% keras::predict_classes(validation_data)
# prepare results
factor_targets <- categoric_to_factor(validation_targets)
predicted <- data.frame(factor_targets, val_predictions)
predicted_labels <- data.frame(lapply(predicted, function(i)
lookup[as.character(i)]))
if (nrow(predicted_labels) != nrow(validation_data)) {
stop("Length of predictions and data to be predicted differs")
}
# provide all classes as factor levels, otherwise confusion matrix breaks if
# a class is not predicted or present at all
predicted_labels$val_predictions <- factor(predicted_labels$val_predictions,
levels = colnames(training_labels))
predicted_labels$factor_targets <- factor(predicted_labels$factor_targets,
levels = colnames(training_labels))
# calculate confusion matrix
confusion_matrix <- table(
true = predicted_labels$factor_targets,
predicted = predicted_labels$val_predictions)
# return results data.frame
store_classification_results(hist = history,
prediction_table = predicted_labels, confusion_matrix = confusion_matrix,
train_data = training_data, n_classes = classes)
}
#' Reverse keras::to_categorical
#'
#' This function takes a binary matrix and returns one column representing
#' the factor levels. That way, `keras::to_categorical` can be reversed after
#' the machine learning step and compared to the predictions
#'
#' @param matrix the binary matrix which needs to be converted
#'
#' @return An integer vector with numeric factor levels
#'
categoric_to_factor <- function(matrix) {
if(!is.matrix(matrix)) {
stop("Provided data is not a matrix")
}
apply(matrix, 1, function(row) which(row == max(row)) - 1)
}
#' Store results from keras tf classification training and prediction
#'
#' This function extracts information from the keras model generated by training
#' or prediction and stores them in a data.frame. By calling `classification_metrics`
#' various metrics for classification performance are calculated for each class.
#'
#' @param hist the keras history object
#' @param prediction_table the data.frame comparing predictions and true values
#' @param n_classes the number of classes for classification
#' @param confusion_matrix the confusion matrix generated from `prediction_table`
#' @param train_data the training set data.frame
#'
#' @return A data frame with one row per keras run and class
#'
#' @export
store_classification_results <- function(hist, prediction_table, n_classes,
confusion_matrix, train_data) {
if(!is.data.frame(prediction_table)) {
stop("prediction table is not a data frame")
} else if(nrow(prediction_table) == 0) {
stop("prediction table is empty")
}
results <- data.frame()
# extract classifications for each class, every class becomes own row
for (class in 1:n_classes) {
results[class, "Class"] <- row.names(confusion_matrix)[class]
results[class, "True_positive"] <- confusion_matrix[class, class]
results[class, "False_positive"] <- sum(confusion_matrix[, class]) -
confusion_matrix[class, class]
results[class, "True_negative"] <- sum(confusion_matrix[-class, -class])
results[class, "False_negative"] <- sum(confusion_matrix[class, ]) -
confusion_matrix[class, class]
}
results$Number_of_samples_train <- hist$params$samples
results$Number_of_samples_validate <- nrow(prediction_table)
results$Number_independent_vars <- ncol(train_data)
results <- classification_metrics(results, results$Number_of_samples_validate)
results
}
|
03fcf03149ccc1bf9b300a54e43bf7596db342a5
|
ec3933d1684eab22d34452eb6991aa9d8e8021ab
|
/algorithm/SVM.R
|
3241aa4ff97883b3ee41642b8e68374d1c614db7
|
[] |
no_license
|
ssh352/R_PractiseCode
|
55e992f4734dcc08253d1316986976d977acd700
|
3762a2d37bd237a3f1e25659c08ced7d01d3a8f1
|
refs/heads/master
| 2021-06-01T00:58:54.448799
| 2016-06-15T08:52:41
| 2016-06-15T08:52:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,933
|
r
|
SVM.R
|
#ๆฏๆๅ้ๆบๆฏ็ฐๅจ่ขซๅนฟๆณ็จไบ่งฃๅณๅค็ฑป้็บฟๆงๅ็ฑป้ฎ้ขๅๅๅฝ้ฎ้ขใ
#ไผ ้็ปๅฝๆฐsvm()็ๅ
ณ้ฎๅๆฐๆฏkernelใcostๅgammaใ
#Kernelๆ็ๆฏๆฏๆๅ้ๆบ็็ฑปๅ๏ผๅฎๅฏ่ฝๆฏ็บฟๆงSVMใๅค้กนๅผSVMใๅพๅSVMๆSigmoid SVMใ
#Costๆฏ่ฟๅ็บฆๆๆถ็ๆๆฌๅฝๆฐ๏ผgammaๆฏ้ค็บฟๆงSVMๅคๅ
ถไฝๆๆSVM้ฝไฝฟ็จ็ไธไธชๅๆฐใ
#่ฟๆไธไธช็ฑปๅๅๆฐ๏ผ็จไบๆๅฎ่ฏฅๆจกๅๆฏ็จไบๅๅฝใๅ็ฑป่ฟๆฏๅผๅธธๆฃๆตใ
#ไฝๆฏ่ฟไธชๅๆฐไธ้่ฆๆพๅผๅฐ่ฎพ็ฝฎ๏ผๅ ไธบๆฏๆๅ้ๆบไผๅบไบๅๅบๅ้็็ฑปๅซ่ชๅจๆฃๆต่ฟไธชๅๆฐ๏ผๅๅบๅ้็็ฑปๅซๅฏ่ฝๆฏไธไธชๅ ๅญๆไธไธช่ฟ็ปญๅ้ใๆไปฅๅฏนไบๅ็ฑป้ฎ้ข๏ผไธๅฎ่ฆๆไฝ ็ๅๅบๅ้ไฝไธบไธไธชๅ ๅญใ
#ไพๅญไธๅ่๏ผhttp://blog.jobbole.com/84714/
#ไพๅญไธ๏ผไฝฟ็จๆฏๆๅ้ๆบๅฎ็ฐไบๅ
ๅ็ฑปๅจ๏ผไฝฟ็จ็ๆฐๆฎๆฏๆฅ่ชMASSๅ
็catsๆฐๆฎ้ใ
#ๅจๆฌไพไธญไฝ ๅฐๅฐ่ฏไฝฟ็จไฝ้ๅๅฟ่้้ๆฅ้ขๆตไธๅช็ซ็ๆงๅซใๆไปฌๆฟๆฐๆฎ้ไธญ20%็ๆฐๆฎ็น๏ผ็จไบๆต่ฏๆจกๅ็ๅ็กฎๆง๏ผๅจๅ
ถไฝ็80%็ๆฐๆฎไธๅปบ็ซๆจกๅ๏ผใ
library(e1071)
data(cats, package="MASS")
inputData <- data.frame(cats[, c (2,3)], response = as.factor(cats$Sex)) # response as factor
# linear SVM ็บฟๆงSVM
svmfit <- svm(response ~ ., data = inputData, kernel = "linear", cost = 10, scale = FALSE) # linear svm, scaling turned OFF
print(svmfit)
plot(svmfit, inputData)
compareTable <- table (inputData$response, predict(svmfit)) # tabulate
mean(inputData$response != predict(svmfit)) # 19.44% misclassification error
# radial SVM
#ๆณจ๏ผๅพๅๅบๅฝๆฐไฝไธบไธไธชๅๆฌข่ฟ็ๅ
ๆ ธๅฝๆฐ๏ผๅฏไปฅ้่ฟ่ฎพ็ฝฎๅ
ๆ ธๅๆฐไฝไธบโradialโๆฅไฝฟ็จใๅฝไฝฟ็จไธไธชๅธฆๆโradialโ็ๅ
ๆ ธๆถ๏ผ็ปๆไธญ็่ถ
ๅนณ้ขๅฐฑไธ้่ฆๆฏไธไธช็บฟๆง็ไบใ
#้ๅธธๅฎไนไธไธชๅผฏๆฒ็ๅบๅๆฅ็ๅฎ็ฑปๅซไน้ด็ๅ้๏ผ่ฟไนๅพๅพๅฏผ่ด็ธๅ็่ฎญ็ปๆฐๆฎ๏ผๆด้ซ็ๅ็กฎๅบฆใ
svmfit <- svm(response ~ ., data = inputData, kernel = "radial", cost = 10, scale = FALSE) # radial svm, scaling turned OFF
print(svmfit)
plot(svmfit, inputData)
compareTable <- table (inputData$response, predict(svmfit)) # tabulate
mean(inputData$response != predict(svmfit)) # 18.75% misclassification error
#ๅฏไปฅไฝฟ็จtune.svm()ๅฝๆฐ๏ผๆฅๅฏปๆพsvm()ๅฝๆฐ็ๆไผๅๆฐใ
### Tuning
# Prepare training and test data
set.seed(100) # for reproducing results
rowIndices <- 1 : nrow(inputData) # prepare row indices
sampleSize <- 0.8 * length(rowIndices) # training sample size
trainingRows <- sample (rowIndices, sampleSize) # random sampling
trainingData <- inputData[trainingRows, ] # training data
testData <- inputData[-trainingRows, ] # test data
tuned <- tune.svm(response ~., data = trainingData, gamma = 10^(-6:-1), cost = 10^(1:2)) # tune
summary (tuned) # to select best gamma and costๅฝcostไธบ100๏ผgammaไธบ0.001ๆถไบง็ๆๅฐ็้่ฏฏ็
#cost=100,gamma=0.00,kernal=radial
svmfit <- svm (response ~ ., data = trainingData, kernel = "radial", cost = 100, gamma=0.001, scale = FALSE) # radial svm, scaling turned OFF
print(svmfit)
plot(svmfit, trainingData)
compareTable <- table (testData$response, predict(svmfit, testData)) # comparison table
mean(testData$response != predict(svmfit, testData)) # 13.79% misclassification error
#็ฝๆ ผๅพ
# Grid Plot
n_points_in_grid = 60 # num grid points in a line
x_axis_range <- range (inputData[, 2]) # range of X axis
y_axis_range <- range (inputData[, 1]) # range of Y axis
X_grid_points <- seq (from=x_axis_range[1], to=x_axis_range[2], length=n_points_in_grid) # grid points along x-axis
Y_grid_points <- seq (from=y_axis_range[1], to=y_axis_range[2], length=n_points_in_grid) # grid points along y-axis
all_grid_points <- expand.grid (X_grid_points, Y_grid_points) # generate all grid points
names (all_grid_points) <- c("Hwt", "Bwt") # rename
all_points_predited <- predict(svmfit, all_grid_points) # predict for all points in grid
color_array <- c("red", "blue")[as.numeric(all_points_predited)] # colors for all points based on predictions
plot (all_grid_points, col=color_array, pch=20, cex=0.25) # plot all grid points
points (x=trainingData$Hwt, y=trainingData$Bwt, col=c("red", "blue")[as.numeric(trainingData$response)], pch=19) # plot data points
points (trainingData[svmfit$index, c (2, 1)], pch=5, cex=2) # plot support vectors
#ไพๅญไบ ๅ่ใๆฐๆฎๆๆ R่ฏญ่จๅฎๆใ ้็จ้ธขๅฐพ่ฑไฝไธบๆฐๆฎ้
library(e1071)
data(iris) # ่ทๅๆฐๆฎ้iris
###็ฌฌไธ็งๆ ผๅผๅปบ็ซๆจกๅ
model <- svm(Species~.,data=iris) # ๅปบ็ซsvmๆจกๅ
###็ฌฌไบ็งๆ ผๅผๅปบ็ซๆจกๅ
x <- iris[,-5] # ๆๅirisๆฐๆฎไธญ้ค็ฌฌ5ๅไปฅๅค็ๆฐๆฎไฝไธบ็นๅพๅ้
y <- iris[,5] # ๆๅirisๆฐๆฎไธญ็็ฌฌ5ๅๆฐๆฎไฝไธบ็ปๆๅ้(ๅณๅๅบๅ้)
model <- svm(x,y,kernel ="radial",gamma =if(is.vector(x)) 1 else 1/ncol(x)) # ๅปบ็ซsvmๆจกๅ
###ๅฏนๆจกๅ่ฟ่ก้ขๆต
x <- iris[,1:4] # ็กฎ่ฎค้่ฆ่ฟ่ก้ขๆต็ๆ ทๆฌ็นๅพ็ฉ้ต
pred <- predict(model,x) # ๆ นๆฎๆจกๅmodelๅฏนxๆฐๆฎ่ฟ่ก้ขๆต
pred[sample(1:150,8)] # ้ๆบๆ้8ไธช้ขๆต็ปๆ่ฟ่กๅฑ็คบ
table(pred,y) # ๆจกๅ้ขๆต็ฒพๅบฆๅฑ็คบ
###ๅฎ้
ๅปบๆจก่ฟ็จไธญๅฎๆดๆไฝ
attach(iris) # ๅฐๆฐๆฎirisๆๅๅ็ฌ็กฎ่ฎคไธบๅ้
x <- subset(iris,select = -Species) # ็กฎๅฎ็นๅพๅ้ไธบๆฐๆฎirisไธญ้คๅปSpecies็ๅ
ถไป้กน
y <- Species # ็กฎๅฎ็ปๆๅ้ไธบๆฐๆฎirisไธญ็Species้กน
type <- c("C-classification","nu-classification","one-classification")# ็กฎๅฎๅฐ่ฆ้็จ็ๅ็ฑปๆนๅผ
kernel <- c("linear","polynomial","radial","sigmoid") #็กฎๅฎๅฐ่ฆ้็จ็ๆ ธๅฝๆฐ
pred <- array(0,dim=c(150,3,4)) #ๅๅงๅ้ขๆต็ปๆ็ฉ้ต็ไธ็ปด้ฟๅบฆๅๅซไธบ150๏ผ3๏ผ4
accuracy <- matrix(0,3,4) #ๅๅงๅๆจกๅ็ฒพๅๅบฆ็ฉ้ต็ไธค็ปดๅๅซไธบ3๏ผ4
yy <- as.integer(y) #ไธบๆนไพฟๆจกๅ็ฒพๅบฆ่ฎก็ฎ๏ผๅฐ็ปๆๅ้ๆฐ้ๅไธบ1๏ผ2๏ผ3
for(i in 1:3) #็กฎ่ฎคiๅฝฑๅ็็ปดๅบฆไปฃ่กจๅ็ฑปๆนๅผ
{
for(j in 1:4) #็กฎ่ฎคjๅฝฑๅ็็ปดๅบฆไปฃ่กจๆ ธๅฝๆฐ
{
pred[,i,j]=predict(svm(x,y,type=type[i],kernel=kernel[j]),x) #ๅฏนๆฏไธๆจกๅ่ฟ่ก้ขๆต
if(i>2)
{
accuracy[i,j]=sum(pred[,i,j]!=1)
}
else
{
accuracy[i,j]=sum(pred[,i,j]!=yy)
}
}
}
dimnames(accuracy)=list(type,kernel) #็กฎๅฎๆจกๅ็ฒพๅบฆๅ้็ๅๅๅ่กๅ
table(pred[,1,3],y) # ๆจกๅ้ขๆต็ฒพๅบฆๅฑ็คบ
###ๆจกๅๅฏ่งๅ
plot(cmdscale(dist(iris[,-5])),col=c("lightgray","black","gray")[as.integer(iris[,5])],pch= c("o","+")[1:150 %in% model$index + 1]) # ็ปๅถๆจกๅๅ็ฑปๆฃ็นๅพ
legend(2,-0.8,c("setosa","versicolor","virginica"),col=c("lightgray","black","gray"),lty=1) # ๆ ่ฎฐๅพไพ
data(iris) #่ฏปๅ
ฅๆฐๆฎiris
model=svm(Species~., data = iris) #ๅฉ็จๅ
ฌๅผๆ ผๅผๅปบ็ซๆจกๅ
plot(model,iris,Petal.Width~Petal.Length,fill=FALSE,symbolPalette=c("lightgray","black","grey"),svSymbol="+")
#็ปๅถๆจกๅ็ฑปๅซๅ
ณไบ่ฑ่ผๅฎฝๅบฆๅ้ฟๅบฆ็ๅ็ฑปๆ
ๅต
legend(1,2.5,c("setosa","versicolor","virginica"),col=c("lightgray","black","gray"),lty=1) #ๆ ่ฎฐๅพไพ
###ๆจกๅ่ฟไธๆญฅไผๅ
wts=c(1,1,1) # ็กฎๅฎๆจกๅๅไธช็ฑปๅซ็ๆฏ้ไธบ1๏ผ1๏ผ1
names(wts)=c("setosa","versicolor","virginica") #็กฎๅฎๅไธชๆฏ้ๅฏนๅบ็็ฑปๅซ
model1=svm(x,y,class.weights=wts) #ๅปบ็ซๆจกๅ
wts=c(1,100,100) # ็กฎๅฎๆจกๅๅไธช็ฑปๅซ็ๆฏ้ไธบ1๏ผ100๏ผ100
names(wts)=c("setosa","versicolor","virginica") #็กฎๅฎๅไธชๆฏ้ๅฏนๅบ็็ฑปๅซ
model2=svm(x,y,class.weights=wts) #ๅปบ็ซๆจกๅ
pred2=predict(model2,x) #ๆ นๆฎๆจกๅ่ฟ่ก้ขๆต
table(pred2,y) #ๅฑ็คบ้ขๆต็ปๆ
wts=c(1,500,500) # ็กฎๅฎๆจกๅๅไธช็ฑปๅซ็ๆฏ้ไธบ1๏ผ500๏ผ500
names(wts)=c("setosa","versicolor","virginica") #็กฎๅฎๅไธชๆฏ้ๅฏนๅบ็็ฑปๅซ
model3=svm(x,y,class.weights=wts) #ๅปบ็ซๆจกๅ
pred3=predict(model3,x) #ๆ นๆฎๆจกๅ่ฟ่ก้ขๆต
table(pred3,y) #ๅฑ็คบ้ขๆต็ปๆ
|
deb9570d843bd5aa376f808ccb910bd350a4147b
|
57cc9fbbc9a9d34c1e3719b36fd14ad0198078b8
|
/R/plots.R
|
6880ab5fc753705de2a4cbf02740904087508aad
|
[] |
no_license
|
cihga39871/iteremoval
|
d1a20eb34ecc96dbe9cc71674d627062093c45e5
|
0995da54f51d050c3e83c7e7b933f6a8ef5d4139
|
refs/heads/master
| 2021-05-16T15:00:40.317409
| 2018-06-27T10:28:33
| 2018-06-27T10:28:33
| 118,550,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,928
|
r
|
plots.R
|
#' @title Iteration trace of removed scores
#' @description plot the score of removed feature in each iteration.
#' @family plot
#' @return ggplot2 object.
#' @param li the list result of \code{feature_removal}.
#' @import utils
#' @import ggplot2
#' @export
#' @examples
#' g1 <- SWRG1; g0 <- SWRG0
#'
#' result.complex <- feature_removal(g1, g0,
#' cutoff1=0.95, cutoff0=0.925,
#' offset=c(0.5, 1, 2))
#'
#' # it is a ggplot2 object, so plus sign is available
#' ggiteration_trace(result.complex) + theme_bw()
ggiteration_trace <- function(li) {
# check li a valid list
if (is.null(li$removed.scores))
stop("`li` do not contain 'removed.scores'. Generate `li` with function `feature_removal`.")
stacked.removed.scores <- stack(li$removed.scores, select = -1)
stacked.removed.scores$Index <- li$removed.scores$Index
ggplot(stacked.removed.scores) +
geom_line(aes(stacked.removed.scores$Index, values,
color=stacked.removed.scores$ind)) +
labs(x = "Index", y = "Minimum Prediction Value", color="Offset")
}
#' @import graphics
NULL
#' @title Feature prevalence
#' @family prevalencestat plot
#' @description Compute the feature prevalence after removing the features of
#' the first \code{index} iterations.
#' @param li the list result of \code{feature_removal}.
#' @param index removing the features of the first \code{index} iterations. It
#' allows a positive integer or a proper fraction. If inproper fraction, it is
#' regarded as \code{as.integer(index)}.
#' @param hist.plot bool. A switch to plot the histogram of the remaining
#' features.
#' @export
#' @return Matrix
#' @examples
#' g1 <- SWRG1; g0 <- SWRG0
#'
#' result.complex <- feature_removal(g1, g0,
#' cutoff1=0.95, cutoff0=0.925,
#' offset=c(0.5, 1, 2))
#'
#' # index is a proportion in 0-1
#' prevalence.result <- feature_prevalence(result.complex, 0.5, hist.plot=TRUE)
#'
#' # index is a positive integer
#' prevalence.result <- feature_prevalence(result.complex, 233, hist.plot=TRUE)
feature_prevalence <- function(li, index, hist.plot=TRUE) {
# check li a valid list
if (is.null(li$removed.feature_names))
stop("`li` do not contain 'removed.feature_names'. Generate `li` with function `feature_removal`.")
nfeature <- nrow(li$removed.feature_names)
# check: index >= 1 ? real index : percent of index
if (0 <= index && index < 1) {
index <- as.integer(nfeature * index) + 1L
} else if (index < 0) {
stop("`index` < 0. `index` is either a positive integer or a decimal in [0,1) as a quantile.")
} else if (index > nfeature)
stop("`index` > the feature number.")
features.mt <- li$removed.feature_names[as.integer(index):nfeature,
2:ncol(li$removed.feature_names)]
features.all <- features.mt %>% as.vector %>% as.matrix(ncol=1) %>% sort
Features <- table(unlist(features.all)) %>% sort
if (hist.plot) hist(Features)
return(Features)
}
#' @title Plot histogram of feature prevalence
#' @family plot prevalencestat
#' @description Compute the feature prevalence (present in different cutoffs)
#' after removing the features of
#' the first \code{index} iterations, and then plot the histogram of remaining
#' features. It calls \code{feature_prevalence(..., hist.plot=TRUE)}.
#' @param li the list result of \code{feature_removal}.
#' @param index removing the features of the first \code{index} iterations. It
#' allows a positive integer or a proper fraction. If inproper fraction, it is
#' regarded as \code{as.integer(index)}.
#' @export
#' @return histogram
#' @examples
#' g1 <- SWRG1; g0 <- SWRG0
#'
#' result.complex <- feature_removal(g1, g0,
#' cutoff1=0.95, cutoff0=0.925,
#' offset=c(0.5, 1, 2))
#'
#' # index is a proportion in 0-1
#' feature_hist(result.complex, 0.5)
#'
#' # index is a positive integer
#' feature_hist(result.complex, 233)
feature_hist <- function(li, index) {
Features <- feature_prevalence(li, index, hist.plot=FALSE)
hist(Features)
}
#' @title Screening feature using prevalence
#' @family prevalencestat
#' @description Return the screened feature names.
#' @param features result of \code{feature_prevalence(...)}
#' @param prevalence the prevalence cutoff of features. The features with
#' prevalence less than \code{prevalence} are removed.
#' @export
#' @return Vector
#' @examples
#' g1 <- SWRG1; g0 <- SWRG0
#'
#' result.complex <- feature_removal(g1, g0,
#' cutoff1=0.95, cutoff0=0.925,
#' offset=c(0.5, 1, 2))
#'
#' prevalence.result <- feature_prevalence(result.complex, 233, hist.plot=TRUE)
#'
#' feature.list <- feature_screen(prevalence.result, 3)
feature_screen <- function(features, prevalence) {
which(features >= prevalence) %>% names
}
|
fda1b0e6ea4b7da9a994d40c8ff6078d612e065e
|
a34687bd7f646ed793ef65b94136f1e52e1e0abc
|
/scripts/DGE_analysis/lib/modOptparse.r
|
f45d30cdf26d2f67a8fd94d9acabb50d37fb7a28
|
[] |
no_license
|
almaan/breast_dge
|
a1722cd7b660a843e313f922d441cd67f24ed13b
|
30582543435733ac3e0ddaecdd25c80cde1f49b9
|
refs/heads/master
| 2020-04-06T12:35:02.979021
| 2019-04-04T09:37:36
| 2019-04-04T09:37:36
| 157,461,458
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,382
|
r
|
modOptparse.r
|
allowMultipleArgs <- function(){
#' Modify trailing arguments passed such that space
#' separated arguments to same flag becomes joined by
#' commas; a format supported by optparse, and which later
#' easily can be split into separate parts again
oriArgs <- commandArgs(trailingOnly = TRUE)
flags.pos <- which(sapply(oriArgs, function(x) '-' == substr(x,1,1)))
newArgs <- c()
if (length(flags.pos) > 1) {
for (i in 1:(length(flags.pos)-1))
{
if ((flags.pos[i] + 1) != flags.pos[i+1]) {
pos <- c((flags.pos[i]+1):(flags.pos[i+1]-1))
newArgs <- c(newArgs,oriArgs[flags.pos[i]], paste(oriArgs[pos],collapse=','))
} else {
newArgs <- c(newArgs,oriArgs[flags.pos[i]])
}
}
}
if (length(oriArgs) > tail(flags.pos,n=1)) {
pos <- c((flags.pos[length(flags.pos)]+1):length(oriArgs))
newArgs <- c(newArgs, oriArgs[tail(flags.pos,n=1)],paste(oriArgs[pos],collapse=','))
} else {
newArgs <- c(newArgs, oriArgs[tail(flags.pos,n=1)])
}
return(newArgs)
}
splitMultipleArgs <- function(optArgs) {
#' Use in combination with allowMultipleArgs
#' will split all commaseparated arguments
#' into individual elements in list
for (i in 1:length(optArgs)) {
if (grepl(",",optArgs[[i]])) {
optArgs[[i]] <- unlist(strsplit(optArgs[[i]],','))
}
}
return(optArgs)
}
|
85c87fda550b99bc3eb6539d100ae1c97476bff7
|
7944b44a3e0b62fc2e66a2f7687b8d947dd900b4
|
/man/yadirGetCampaign.Rd
|
48e5e0337c78a5b204d08df179f3f9ad77c37434
|
[] |
no_license
|
selesnow/ryandexdirect
|
3316d3ba092b299b411d69b935d37c4313550482
|
04f8b84d5513d430e2f01cf5ce61c2810264f928
|
refs/heads/master
| 2023-04-03T08:38:45.572545
| 2023-03-17T14:57:18
| 2023-03-17T14:57:18
| 64,333,780
| 59
| 40
| null | 2019-05-02T05:56:51
| 2016-07-27T18:50:53
|
R
|
UTF-8
|
R
| false
| false
| 2,976
|
rd
|
yadirGetCampaign.Rd
|
\name{yadirGetCampaign}
\alias{yadirGetCampaignList}
\alias{yadirGetCampaign}
\title{Get Campaigns List}
\description{Returns the parameters of campaigns that meet the specified criteria.}
\usage{
yadirGetCampaignList(Logins = getOption("ryandexdirect.user"),
States = c("OFF", "ON", "SUSPENDED",
"ENDED", "CONVERTED", "ARCHIVED"), Types =
c("TEXT_CAMPAIGN", "MOBILE_APP_CAMPAIGN",
"DYNAMIC_TEXT_CAMPAIGN", "CPM_BANNER_CAMPAIGN"),
Statuses = c("ACCEPTED", "DRAFT", "MODERATION",
"REJECTED"), StatusesPayment = c("DISALLOWED",
"ALLOWED"), Token = NULL,
AgencyAccount = getOption("ryandexdirect.agency_account"),
TokenPath = yadirTokenPath())
yadirGetCampaign(Logins = getOption("ryandexdirect.user"),
States = c("OFF", "ON", "SUSPENDED",
"ENDED", "CONVERTED", "ARCHIVED"), Types =
c("TEXT_CAMPAIGN", "MOBILE_APP_CAMPAIGN", "DYNAMIC_TEXT_CAMPAIGN",
"CPM_BANNER_CAMPAIGN", "SMART_CAMPAIGN"),
Statuses = c("ACCEPTED", "DRAFT", "MODERATION",
"REJECTED"), StatusesPayment = c("DISALLOWED",
"ALLOWED"), Token = NULL,
AgencyAccount = getOption("ryandexdirect.agency_account"),
TokenPath = yadirTokenPath())
}
\arguments{
\item{Logins}{Your Yandex Login}
\item{AgencyAccount}{Your agency account login, if you get statistic from client account}
\item{TokenPath}{Path to directory where you save credential data}
\item{Token}{character or list object, your Yandex API Token, you can get this by function yadirGetToken or yadirAuth}
\item{States}{character vector, filter by campaign states, for example c("OFF", "ON", "SUSPENDED", "ENDED", "CONVERTED", "ARCHIVED")}
\item{Types}{character vector with campaign types, example c("TEXT_CAMPAIGN", "MOBILE_APP_CAMPAIGN", "DYNAMIC_TEXT_CAMPAIGN")}
\item{Statuses}{character vector, filter campaign list by status, for example c("ACCEPTED", "DRAFT", "MODERATION", "REJECTED")}
\item{StatusesPayment}{character vector, filter campaign list by payment status, for example c("DISALLOWED", "ALLOWED")}
}
\value{data frame with campaings names and parameters}
\author{Alexey Seleznev}
\examples{
\dontrun{
### Please choose another TokenPath to save the Login permanently.
#Get data from client accounts
my_ad_group <- yadirGetCampaign(Login = "login",
TokenPath = tempdir())
#Get data from agency account
# Auth
aut <- yadirAuth(Login = "agency_login",
NewUser = TRUE,
TokenPath = tempdir())
# Load Ad Group List
my_ad_group <- yadirGetCampaign(Login = "client_login",
Token = aut,
TokenPath = tempdir())
}
}
|
3f477114c174ea3a5421624c88a1d6fd89e1ba7b
|
27b1e9936a96dd1b9f8cd0a399f6bbfb1019de47
|
/live_tweet_code.R
|
ddba4365b3cb53b39826f71491c71ba4cb865ddc
|
[] |
no_license
|
muralidatascience/Rcode
|
4c7d5a98e56106a7f0b0b19588962677c7155b9c
|
43676264a899f7c366f66f0b4f408c89400927ab
|
refs/heads/master
| 2020-04-06T06:47:24.324611
| 2016-08-26T12:33:30
| 2016-08-26T12:33:30
| 55,692,889
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,344
|
r
|
live_tweet_code.R
|
install.packages("ROAuth",dependencies = TRUE)
library(streamR)
library(ROAuth)
library(RCurl)
library(bitops)
library(rjson)
library(tm)
requestURL <- "https://api.twitter.com/oauth/request_token"
accessURL <- "https://api.twitter.com/oauth/access_token"
authURL <- "https://api.twitter.com/oauth/authorize"
consumerKey <- "PqNhaShuF97DQliRpp7xf6xeT"
consumerSecret <- "DQ69l0yR6cbcZ4B41xFfZL1LdhkdWdGdh8GOSh9cdnBw0zSTeb"
my_oauth <- OAuthFactory$new(consumerKey = consumerKey, consumerSecret = consumerSecret,
requestURL = requestURL, accessURL = accessURL, authURL = authURL)
my_oauth$handshake(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl"))
save(my_oauth, file = "myoauth.Rdata")
load("myoauth.Rdata")
filterStream("tweets_pepsi.json", track = c("Pepsi"),
oauth = my_oauth,language = 'en' , tweets = 500)
makeCorpus <- function(text){ #Function for making corpus and cleaning the tweets fetched
#twitterdf <- do.call("rbind", lapply(text, as.data.frame)) #store the fetched tweets as a data frame
twitterdf <- sapply(text,function(row) iconv(row, "latin1", "ASCII", sub=""))#Removing emoticons from tweets
twitterCorpus <- Corpus(VectorSource(twitterdf)) #Creating Corpus
toSpace <- content_transformer(function(x, pattern) gsub(pattern, " ", x)) #function to replace a pattern to white space using regex
twitterCorpus <- tm_map(twitterCorpus, toSpace, "(RT|via)((?:\\b\\W*@\\w+)+)") #match rt or via
twitterCorpus <- tm_map(twitterCorpus, toSpace, "@\\w+") #match @
twitterCorpus <- tm_map(twitterCorpus, toSpace, "[ \t]{2,}") #match tabs
twitterCorpus <- tm_map(twitterCorpus, toSpace, "[ |\n]{1,}") #match new lines
twitterCorpus <- tm_map(twitterCorpus, toSpace, "^ ") #match white space at begenning
twitterCorpus <- tm_map(twitterCorpus, toSpace, " $") #match white space at the end
twitterCorpus <- tm_map(twitterCorpus, PlainTextDocument)
twitterCorpus <- tm_map(twitterCorpus, removeNumbers)
twitterCorpus <- tm_map(twitterCorpus, removePunctuation)
twitterCorpus <- tm_map(twitterCorpus, toSpace, "http[[:alnum:]]*") #remove url from tweets
twitterCorpus <- tm_map(twitterCorpus,removeWords,stopwords("en"))
twitterCorpus <- tm_map(twitterCorpus, content_transformer(tolower))
return(twitterCorpus)
}
makeCorpus(tweets_pepsi$V4)
|
7978fde1dcda0c827566bfdd5afb75133136867a
|
9efcf1ceb614f121cb4eea7e0e8f03e57279b2f7
|
/man/setXtrain.VB1FitObj.Rd
|
a0a797a84dbbf990384ad4ef25df1a56059d5fb0
|
[] |
no_license
|
cran/survivalsvm
|
51600fd51c86fb30330d6ad909eb25c2f384f521
|
c06c917e5c26c33f88236f48750a4a0c1d389cf3
|
refs/heads/master
| 2021-03-19T07:22:44.380026
| 2018-02-05T06:01:13
| 2018-02-05T06:01:13
| 94,378,235
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 555
|
rd
|
setXtrain.VB1FitObj.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vanbelle1.R
\name{setXtrain.VB1FitObj}
\alias{setXtrain.VB1FitObj}
\title{\code{VB1FitObj} (ranking approach)}
\usage{
\method{setXtrain}{VB1FitObj}(vb1o, sv)
}
\arguments{
\item{vb1o}{[\code{VB1FitObj}]\cr
Object of class \code{RegFitObj} taken in argument.}
\item{sv}{new value}
}
\value{
[\code{VB1FitObj}]
Modified version of the object taken in argument.
}
\description{
Default mutator of the field \code{Xtrain} of the object taken in an argument.
}
\keyword{internal}
|
d7528060da93a1927d7d2611636f063ce9f35dc2
|
a4fc7101c68b3acf8528367d498781172529b5c3
|
/man/makeContent.labelrepelgrob.Rd
|
4e31f0f32817b7f103789a60c638c3f91367ec39
|
[] |
no_license
|
Gofer51/ggrepel
|
5d01805192145df3e12ac33e0e6c54bfe94ab540
|
9805a334d841bc369d18db3fc3baabb985f4697c
|
refs/heads/master
| 2020-12-11T03:56:45.269256
| 2016-01-07T21:45:50
| 2016-01-07T21:45:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 325
|
rd
|
makeContent.labelrepelgrob.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom-label-repel.R
\name{makeContent.labelrepelgrob}
\alias{makeContent.labelrepelgrob}
\title{grid::makeContent function for labelRepelGrob.}
\usage{
makeContent.labelrepelgrob(x)
}
\description{
grid::makeContent function for labelRepelGrob.
}
|
26777b8f71046a85947bb41b6910899a3a68ac28
|
413a1c3aef6dcc562405e6afee97abf6a441bdec
|
/lvr_bernoulli/resi-data.R
|
21311cc233712a32a3e24e58c2bb4b89e9dc45e7
|
[] |
no_license
|
danielechermisi/mcmc-jags-samples
|
68d98a8b5d10888e2cc58db5d1d2eedc67559b41
|
594f6d27f4f37d3c45b5083f04f4772adc385e59
|
refs/heads/master
| 2021-05-06T00:04:47.042835
| 2018-01-19T14:14:01
| 2018-01-19T14:14:01
| 116,985,841
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 183
|
r
|
resi-data.R
|
"n" <- 10
"Y" <- c(NA,NA,1,1,1,1,0,0,0,0)
"fatturato" <- c(100,230,123,120,1231,100,230,1230,1200,123010)
"missing" <- rep(0, n)
missing[which(is.na(Y))] <- fatturato[which(is.na(Y))]
|
ea40d4b513246d9258bde24068bb4c98dceec399
|
2341115e5bdcd85530538ce53a52efc7ff506af9
|
/์ฝํผ์ค์ธ์ดํ_2์ฐจ๊ณผ์ .R
|
a9965d471988f5038fd732bc15af933de717cd06
|
[] |
no_license
|
heewonham/DataAnalysis-Basic
|
54cbfa5178caebe2875bb62c6c3f6447dcf94a32
|
09c82d8bf107a8ae0b5004f692eb603ee8a119c7
|
refs/heads/master
| 2023-03-04T18:38:08.021502
| 2021-02-11T08:15:49
| 2021-02-11T08:15:49
| 337,966,132
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 7,647
|
r
|
์ฝํผ์ค์ธ์ดํ_2์ฐจ๊ณผ์ .R
|
# 1900๋
๋ ์ดํ ๊ณตํ๋น, ๋ฏผ์ฃผ๋น ๋ํต๋ น ์ฐ์ค๋ฌธ
repub <- vector()
democ <- vector()
re <- c("1909","1921","1925","1929","1953","1957","1969","1973","1981","1985","1989","2001","2005")
de <- c("1913","1917","1933","1937","1941","1945","1949","1961","1965","1977","1993","1997","2009")
for(i in list.files(path='.',pattern='[.]txt$')){
text <- substring(i,1,4)
file <- scan(file=i,what="char",quote = NULL)
if(text %in% re){
repub <- c(repub,file)
}
else if(text %in% de){
democ <- c(democ,file)
}
}
# ๊ณตํ๋น, ๋ฏผ์ฃผ๋น์ dataframe&์๋๋น๋, ๋ธ๋ผ์ด์ฝํผ์ค dataframe 30๊ฐ๊น์ง ๋น๋๋ถ์
repub.t <- sort(table(repub),decreasing = T)
democ.t <- sort(table(democ),decreasing = T)
repub.freq <- data.frame(row.names = names(repub.t), Freq = as.vector(repub.t))
democ.freq <- data.frame(row.names = names(democ.t), Freq = as.vector(democ.t))
brown <- read.delim(file="12_BrownCorpus_frequency.txt",
sep="\t",header=T,row.names=1,quote=NULL)
repub.freq['rel.freq'] = round(repub.freq$Freq/sum(repub.freq$Freq),3)
democ.freq['rel.freq'] = round(democ.freq$Freq/sum(democ.freq$Freq),3)
head(repub.freq,30)
head(democ.freq,30)
head(brown,30)
# ์๋ํด๋ผ์ฐ๋
library(wordcloud)
wordcloud(rownames(repub.freq), repub.freq$Freq, scale=c(3, 0.8), min.freq=2,
max.words=200, random.order=F, rot.per=0.4,colors = brewer.pal(8, "Dark2"))
wordcloud(rownames(democ.freq), democ.freq$Freq, scale=c(3, 0.8), min.freq=2,
max.words=200, random.order=F, rot.per=0.4,colors = brewer.pal(8, "Dark2"))
#n-gram ์ถ์ถ :bi, tri (we, in) de : -- new thier /re : which
list <- c('(^we$|^We$)','(^in$)','(^--$)','(^new$)','(^their$)','(^which$)')
for(i in list){
de.idx <- grep(i,democ)
de.pre.tri <- paste(democ[de.idx-2],democ[de.idx-1],democ[de.idx])
de.pos.tri <- paste(democ[de.idx],democ[de.idx+1],democ[de.idx+2])
de.pre.bi <- paste(democ[de.idx-1],democ[de.idx])
de.pos.bi <- paste(democ[de.idx],democ[de.idx+1])
de.pre.f1 <- data.frame(sort(table(de.pre.bi),decreasing = T))
de.pos.f1 <- data.frame(sort(table(de.pos.bi),decreasing = T))
de.pre.f2 <- data.frame(sort(table(de.pre.tri),decreasing = T))
de.pos.f2 <- data.frame(sort(table(de.pos.tri),decreasing = T))
re.idx <- grep(i,repub)
re.pre.bi <- paste(repub[re.idx-1],repub[re.idx])
re.pos.bi <- paste(repub[re.idx],repub[re.idx+1])
re.pre.tri <- paste(repub[re.idx-2],repub[re.idx-1],repub[re.idx])
re.pos.tri <- paste(repub[re.idx],repub[re.idx+1],repub[re.idx+2])
re.pre.f1 <- data.frame(sort(table(re.pre.bi),decreasing = T))
re.pos.f1 <- data.frame(sort(table(re.pos.bi),decreasing = T))
re.pre.f2 <- data.frame(sort(table(re.pre.tri),decreasing = T))
re.pos.f2 <- data.frame(sort(table(re.pos.tri),decreasing = T))
print(head(de.pre.f1,10))
print(head(de.pos.f1,10))
print(head(de.pre.f2,10))
print(head(de.pos.f2,10))
print(head(re.pre.f1,10))
print(head(re.pos.f1,10))
print(head(re.pre.f2,10))
print(head(re.pos.f2,10))
}
# ํค์๋ ๋ถ์
data <- data.frame(words=vector())
data <- merge(data,data.frame(repub.t),by.x = "words",by.y="repub",all=T)
data <- merge(data,data.frame(democ.t),by.x = "words",by.y="democ",all=T)
colnames(data)[c(2,3)] <- c("repub","democ")
data[is.na(data)] <- 0
data <- data.frame(row.names=data$words, data[2:length(data)])
# comparison cloud
library(wordcloud)
comparison.cloud(data[c(1,2)],random.order=FALSE,scale=c(2,0.9),rot.per=0.4,
max.words=200,colors=brewer.pal(8,"Dark2"),title.size=1.1)
# ๋ฏผ์ฃผ๋น ๊ณตํ๋น์ ์นด์ด์คํ์ด
chi <- chisq.test(data[c(1,2)])$residuals
chi <- as.data.frame(chi)
head(chi[order(chi$repub, decreasing = T),], 30)
head(chi[order(chi$democ, decreasing = T),], 30)
# ๋ฏผ์ฃผ๋น, ๋ธ๋ผ์ด / ๊ณตํ๋น, ๋ธ๋ผ์ด ์นด์ด์คํ์ด
re.br.df <- merge(brown,data.frame(repub.t),by.x="Word",by.y="repub",all=T)
colnames(re.br.df)[c(3)]<-c("repub")
re.br.df[is.na(re.br.df)] <- 0
re.br.df <- data.frame(row.names = re.br.df$Word, re.br.df[c(2,3)])
re.chi <- chisq.test(re.br.df)$residuals
re.chi <- as.data.frame(re.chi)
head(re.chi[order(re.chi$repub, decreasing = T),], 30)
de.br.df <- merge(brown,data.frame(democ.t),by.x="Word",by.y="democ",all=T)
colnames(de.br.df)[c(3)]<-c("democ")
de.br.df[is.na(de.br.df)] <- 0
de.br.df <- data.frame(row.names = de.br.df$Word, de.br.df[c(2,3)])
de.chi <- chisq.test(de.br.df)$residuals
de.chi <- as.data.frame(de.chi)
head(de.chi[order(de.chi$democ, decreasing = T),], 30)
# ์ฐ์ด
#1
node <- c('(^will$)','(^can$)')
d.index <- grep(node,democ)
r.index <- grep(node,repub)
d.span <- vector()
r.span <- vector()
for(i in d.index)
{
d.span <- c(d.span,c((i-4):(i-1),(i+1):(i+4)))
}
d.span <- d.span[d.span>0&d.span<=length(democ)]
d.crc <- democ[d.span]
for(i in r.index)
{
r.span <- c(r.span,c((i-4):(i-1),(i+1):(i+4)))
}
r.span <- r.span[r.span>0&r.span<=length(repub)]
r.crc <- repub[r.span]
#2
dfreq.span<-sort(table(d.crc),decreasing=T)
dfreq.all <- table(democ)
dfreq.co <- data.frame(w1=vector(), w2=vector(),w1w2=vector(),n=vector())
n<-1
for(i in (1:length(dfreq.span)))
{
dfreq.co[n,] <- c(length(d.index),
dfreq.all[names(dfreq.all)==names(dfreq.span)[i]],
dfreq.span[i], length(democ))
rownames(dfreq.co)[n] <- names(dfreq.span)[i]
n <- n+1
}
rfreq.span<-sort(table(r.crc),decreasing=T)
rfreq.all <- table(repub)
rfreq.co <- data.frame(w1=vector(), w2=vector(),w1w2=vector(),n=vector())
n<-1
for(i in (1:length(rfreq.span)))
{
rfreq.co[n,] <- c(length(r.index),
rfreq.all[names(rfreq.all)==names(rfreq.span)[i]],
rfreq.span[i], length(repub))
rownames(rfreq.co)[n] <- names(rfreq.span)[i]
n <- n+1
}
#3
d.coll <- data.frame(dfreq.co, t.score=(dfreq.co$w1w2 -
((dfreq.co$w1*dfreq.co$w2)/dfreq.co$n))/sqrt(dfreq.co$w1w2),
MI = log2((dfreq.co$w1w2*dfreq.co$n)/(dfreq.co$w1*dfreq.co$w2)))
dt.sort <- d.coll[order(d.coll$t.score,decreasing=T),]
dm.sort <- d.coll[order(d.coll$MI,decreasing=T),]
head(dm.sort[dm.sort$w1w2>2,],20)
head(dt.sort[dt.sort$w1w2>2,],20)
r.coll <- data.frame(rfreq.co, t.score=(rfreq.co$w1w2 -
((rfreq.co$w1*rfreq.co$w2)/rfreq.co$n))/sqrt(rfreq.co$w1w2),
MI = log2((rfreq.co$w1w2*rfreq.co$n)/(rfreq.co$w1*rfreq.co$w2)))
rt.sort <- r.coll[order(r.coll$t.score,decreasing=T),]
rm.sort <- r.coll[order(r.coll$MI,decreasing=T),]
head(rm.sort[rm.sort$w1w2>2,],20)
head(rt.sort[rt.sort$w1w2>2,],20)
# ํ์์ ๋ถ์
# ๋ถ์ฉ์ด๋ฅผ ์ ๊ฑฐํ ๋ค ๊ณ์ธต์ ๊ตฐ์ง๋ถ์
tdm <- data.frame(words=vector())
re <- c("1909","1921","1925","1929","1953","1957","1969","1973","1981","1985","1989","2001","2005")
de <- c("1913","1917","1933","1937","1941","1945","1949","1961","1965","1977","1993","1997","2009")
for(i in list.files(path='.',pattern='[.]txt$')){
text <- substring(i,1,4)
file <- scan(file=i,what="char",quote = NULL)
if(text %in% re | text %in% de){
tdm <- merge(tdm, data.frame(table(file)),by.x="words",
by.y="file",all=T)
colnames(tdm)[length(tdm)]<-substring(i,1,4)
}
}
tdm <-data.frame(row.names=tdm$words,tdm[2:length(tdm)])
tdm[is.na(tdm)] <- 0
tdm['rowsum'] <- rowSums(tdm)
stop <- scan(file="13_EnglishStopwords.txt",what="char",quote=NULL)
NEW <- tdm[!(rownames(tdm) %in% stop),]
NEW <- head(NEW[order(NEW$rowsum,decreasing =T),],30)
plot(hclust(dist(scale(t(NEW[1:20,-length(NEW)])),method='minkowski'),
method='ward.D2'))
# ๋ฏผ์ฃผ๋น๊ณผ ๊ณตํ๋น ์ฌ์ด์ ์์ฃผ ์ฐ์ธ ์นด์ด์คํ์ด๋ฅผ ํตํ ๊ณ์ธต์ ๊ตฐ์ง๋ถ์
r.ch = head(chi[order(chi$repub, decreasing = T),], 10)
d.ch = head(chi[order(chi$democ, decreasing = T),], 10)
rownames(r.ch)
rownames(d.ch)
chis<- union(rownames(d.ch),rownames(r.ch))
NEW2 <- tdm[(rownames(tdm) %in% chis),]
NEW2 <- head(NEW2[order(NEW2$rowsum,decreasing =T),],30)
plot(hclust(dist(scale(t(NEW2[1:20,-length(NEW2)])),method='minkowski'),
method='ward.D2'))
|
c92c5ab6c1a623390713f7a8acc34be3361d0baa
|
ee622c585242ce9133d0a085ee9d953e2f173654
|
/R/app.R
|
e235e3e360cec27c1d165f82b60890f0ad161c19
|
[
"MIT"
] |
permissive
|
ctesta01/covid19_icu
|
75aa0d686653cdee320a7bb56d571cdaf64913b1
|
171c95bace1c7f69ca4168328895590e9bab3e4c
|
refs/heads/master
| 2021-05-18T04:36:17.850884
| 2020-03-29T19:31:03
| 2020-03-29T19:31:03
| 251,110,472
| 1
| 0
| null | 2020-03-29T19:02:49
| 2020-03-29T19:02:49
| null |
UTF-8
|
R
| false
| false
| 5,481
|
r
|
app.R
|
####################
server <- function(input, output, session) {
observe({
updateSliderInput(session, "floorcapramp", max=input$time)
updateSliderInput(session, "icucapramp", max=input$time)
if(input$floorcaptarget < input$floorcap) {
updateSliderInput(session, "floorcaptarget", value=input$floorcap)
}
if (input$icucaptarget < input$icucap) {
updateSliderInput(session, "icucaptarget", value=input$icucap)
}
})
output$hospitalPlot <- renderPlot({
# put slider control values here as arguments
plots<- plot_hospital(initial_report=input$initrep,
final_report=input$finalrep,
L=input$floorcap,
M=input$icucap,
distribution=input$distrib,
t= input$time,
chi_C=1/input$avgicudischargetime,
chi_L=1/input$avgfloordischargetime,
growth_rate=log(2)/(input$doubling_time),
mu_C1 = input$ICUdeath_young,
mu_C2 = input$ICUdeath_medium,
mu_C3 = input$ICUdeath_old,
rampslope = input$rampslope,
Cinit = input$Cinit,
Finit = input$Finit,
Lfinal=input$floorcaptarget,
Lramp=input$floorcapramp,
Mfinal=input$icucaptarget,
Mramp=input$icucapramp,
doprotocols=input$doprotocols
)
plot_grid(plots[[1]], plots[[2]],plots[[3]],plots[[4]], nrow=2, ncol=2, labels=c('A', 'B', 'C', 'D'), align="hv")
})
}
####################
generate_ui <- function() {
fluidPage(theme=shinytheme("simplex"),
titlePanel("COVID-19 Hospital Capacity Model"),
sidebarLayout(
sidebarPanel(
tabsetPanel(
tabPanel("Scenario", fluid=TRUE,
includeMarkdown(system.file("content/instructions.md", package='covid19icu')),
h4("Scenario:"),
sliderInput("time", "Time Horizon (days)", min=1, max=60, value=30),
radioButtons("distrib", "Infection curve",
c("Exponential"="exponential",
"Linear"="ramp",
"Saturated"="logistic",
"Flat"="uniform"),
inline=TRUE,
selected="exponential"),
sliderInput("initrep", "Initial cases per day", min=1, max=1e3, value=50),
conditionalPanel(
condition = "input.distrib=='geometric'||input.distrib=='logistic'",
sliderInput("finalrep", "Peak number of cases", min=1, max=3000, value=1000)
),
conditionalPanel(
condition = "input.distrib=='ramp'",
sliderInput("rampslope", "Rate of increase in new cases per day", min=0, max=5, value=1.2, step = .1)
),
conditionalPanel(
condition = "input.distrib == 'exponential'",
sliderInput("doubling_time", "Doubling time (days)", min=2, max=28, value=14)
),
),
tabPanel("Capacity", fluid=TRUE,
includeMarkdown(system.file("content/capacity.md", package='covid19icu')),
sliderInput("icucap", "ICU capacity", min=0, max=3000, value=50),
sliderInput("floorcap", "Initial floor capacity", min=0, max=15000, value=100),
sliderInput("Cinit", "% of ICU capacity occupied at time 0", min=0, max=100, value=12),
sliderInput("Finit", "% of floor capacity occupied at time 0", min=0, max=100, value=56)),
tabPanel("Strategy", fluid=TRUE,
includeMarkdown(system.file("content/protocols.md", package='covid19icu')),
radioButtons("doprotocols", "Capacity expansion strategy",
c("Off"=0, "On"=1),
inline=TRUE,
selected=0),
conditionalPanel(
condition = "input.doprotocols==1",
sliderInput("icucaptarget", "Target ICU capacity", min=0, max=3000, value=50),
sliderInput("icucapramp", "ICU capacity scale-up (days)", min=0, max=30, value=c(10,20)),
sliderInput("floorcaptarget", "Target floor capacity", min=0, max=15000, value=100),
sliderInput("floorcapramp", "Floor capacity scale-up (days)", min=0, max=30, value=c(10,20))
)),
tabPanel("Parameters", fluid=TRUE,
includeMarkdown(system.file("content/parameters.md", package='covid19icu')),
sliderInput("avgfloordischargetime", "Average time on floor", min=0, max=25, value=7),
sliderInput("avgicudischargetime", "Average time in ICU", min=0, max=25, value=10),
sliderInput("ICUdeath_young", "Death rate in ICU (<18 years)", min=0, max=1, value=.1),
sliderInput("ICUdeath_medium", "Death rate in ICU (18-64 years)", min=0, max=1, value=.1),
sliderInput("ICUdeath_old", "Death rate in ICU (65+ years)", min=0, max=1, value=.1),
)),width=4),
mainPanel(
tabsetPanel(
tabPanel("Plots", fluid=TRUE,
plotOutput("hospitalPlot",height="700px")
),
tabPanel("About", fluid=TRUE,
# CHANGE THIS
includeMarkdown(system.file("content/queue_graphic.md", package='covid19icu'))
)
)
)),
hr(),
includeMarkdown(system.file("content/footer.md", package='covid19icu'))
)
}
#' @export
runApp <- function() {
shinyApp(ui = generate_ui(), server = server)
}
|
9e20b9eaa5157d9c5ef59b8f67e1d8a78e147fc9
|
4344aa4529953e5261e834af33fdf17d229cc844
|
/input/gcamdata/man/add_title.Rd
|
a80a6eebaed5bb4fed0a8661979e2a1a5c59b5f7
|
[
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
JGCRI/gcam-core
|
a20c01106fd40847ed0a803969633861795c00b7
|
912f1b00086be6c18224e2777f1b4bf1c8a1dc5d
|
refs/heads/master
| 2023-08-07T18:28:19.251044
| 2023-06-05T20:22:04
| 2023-06-05T20:22:04
| 50,672,978
| 238
| 145
|
NOASSERTION
| 2023-07-31T16:39:21
| 2016-01-29T15:57:28
|
R
|
UTF-8
|
R
| false
| true
| 509
|
rd
|
add_title.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-data.R
\name{add_title}
\alias{add_title}
\title{add_title}
\usage{
add_title(x, title, overwrite = FALSE)
}
\arguments{
\item{x}{An object}
\item{title}{Title of object (character)}
\item{overwrite}{Allow overwrite of title? Logical}
}
\value{
\code{x} with units appended to any existing comments.
}
\description{
Add character units to a data system object. Units are written out
with the data when the file is saved.
}
|
e02586b9d3dab7ff34b8206d5665758f158ec4b7
|
7b8478fa05b32da12634bbbe313ef78173a4004f
|
/man/sm_desc_update.Rd
|
a35faa9160e333c1f30c9122b36338409f5ebe96
|
[] |
no_license
|
jeblundell/multiplyr
|
92d41b3679184cf1c3a637014846a92b2db5b8e2
|
079ece826fcb94425330f3bfb1edce125f7ee7d1
|
refs/heads/develop
| 2020-12-25T18:02:10.156393
| 2017-11-07T12:48:41
| 2017-11-07T12:48:41
| 58,939,162
| 4
| 1
| null | 2017-11-07T12:01:35
| 2016-05-16T14:30:38
|
R
|
UTF-8
|
R
| false
| true
| 705
|
rd
|
sm_desc_update.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internal.R
\name{sm_desc_update}
\alias{sm_desc_update}
\title{Update description of a big.matrix after a row subset (internal)}
\usage{
sm_desc_update(desc, first, last)
}
\arguments{
\item{desc}{Existing big.matrix.descriptor}
\item{first}{First relative row of that matrix}
\item{last}{Last relative row of that matrix}
}
\value{
New descriptor
}
\description{
Generating a new big.matrix.descriptor or doing sub.big.matrix on something
that's not a descriptor is slow. This method exists to effectively create
the descriptor that describe(new.sub.big.matrix) would do, but in a fraction
of the time.
}
\keyword{internal}
|
eb41438677f90b2991985f257071fd9f2869a056
|
c53e367a5a155cfb1ee3a41e8b0351aeaa8d331d
|
/RcppEigen/doc/RcppEigen-Introduction.R
|
fc4e77892367f7154d70bd45a9ce908157562355
|
[
"MIT",
"GPL-2.0-or-later"
] |
permissive
|
solgenomics/R_libs
|
bcf34e00bf2edef54894f6295c4f38f1e480b3fc
|
e8cdf30fd5f32babf39c76a01df5f5544062224e
|
refs/heads/master
| 2023-07-08T10:06:04.304775
| 2022-05-09T15:41:26
| 2022-05-09T15:41:26
| 186,859,606
| 0
| 2
|
MIT
| 2023-03-07T08:59:16
| 2019-05-15T15:57:13
|
C++
|
UTF-8
|
R
| false
| false
| 471
|
r
|
RcppEigen-Introduction.R
|
### R code from vignette source 'RcppEigen-Introduction.Rnw'
###################################################
### code chunk number 1: RcppEigen-Introduction.Rnw:8-13
###################################################
pkgVersion <- packageDescription("RcppEigen")$Version
pkgDate <- packageDescription("RcppEigen")$Date
prettyDate <- format(Sys.Date(), "%B %e, %Y")
#require("RcppEigen")
#eigenVersion <- paste(unlist(.Call("eigen_version", FALSE)), collapse=".")
|
06ea8cd49d91151b072732ef6fdcb5173923ef71
|
b0868dac45ab597d13436209103e90540e9746a3
|
/plot2.R
|
082e14facc0d399e19ab8fa03abc0538762e74fe
|
[] |
no_license
|
xtrios/ExData_Plotting1
|
d24a17626049477f49d30e3f22a7b191f4ad12fe
|
ae17287b0a3c315b71a26c7a6168b6b639606202
|
refs/heads/master
| 2021-01-12T20:32:48.520628
| 2014-11-08T18:57:22
| 2014-11-08T18:57:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
r
|
plot2.R
|
# Read data set
tabAll <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
# Convert date and time format
tabAll[,1]<-as.Date(strptime(tabAll[,1], format="%d/%m/%Y"))
tabAll[,2]<-strftime(strptime(tabAll[,2],"%H:%M:%S"), "%H:%M:%S")
# Select relevant data
tabSel <- subset(tabAll, tabAll$Date=="2007-02-01"|tabAll$Date=="2007-02-02")
library(datasets)
tabSel[,3]<-as.numeric(paste(tabSel[,3]))
# Combine date & time into single variable
datetime<-as.POSIXct(paste(tabSel$Date, tabSel$Time), format="%Y-%m-%d %H:%M:%S")
# Plot data
png("plot2.png",width=480,height = 480, units = "px", pointsize = 12)
plot(datetime,tabSel[,3],"l",ylab="Global Active Power (kilowatts)",xlab="")
dev.off()
|
e91fbd1870b9178c1561b6c1dc78c1ae2607337d
|
d99c9f5e8bb94460ca1aad9a1aeb8c5379e76c5a
|
/02_Single_Factor/Intervals.R
|
40ea47dc5dcebced523fceeceddeb3e5da5d2732
|
[] |
no_license
|
tdeswaef/AquaCrop-Identifiability
|
d24534c031257413023da8ed3688e71ed7fdd2d2
|
199f79990859a3e79c482a4876dcbe338e691947
|
refs/heads/main
| 2023-08-16T09:28:06.483134
| 2021-10-06T09:05:52
| 2021-10-06T09:05:52
| 352,927,804
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,368
|
r
|
Intervals.R
|
graphics.off()
rm(list = ls())
options(error=stop)
library(FME)
library(plyr)
#######
#PATHS
#######
output.collin.path <- "C:/Doctoraat/Irrigatie2.0/collin/Morris/Capture_Variation/Measurement_Int95MAX/Collin_18pars/" #location where collinearity results are saved
scaled_path <- "C:/Doctoraat/Irrigatie2.0/collin/Morris/MorrisPotatoS95max/" #location where scaled sensitivity functions are saved
files <- list.files(path=scaled_path, pattern="*.rds", full.names=TRUE) #files with the rescaled sensitivity functions
chrs <- nchar(scaled_path)
###########
#parameters
###########
#order of parset: baseT1, upT2, pexup3, pexlw4, psto5, psen6,rtx7 ,rtm8, rtshp9, rtexup10, rtexlw11,cgc12, ccx13, cdc14, hi15,
#ccs16, kc17,kcdcl18, wp19, anaer20, evardc21, hipsflo22, hingsto23, hinc24, stbio25,plan26, yld27, root28, sen29, mat30,
#hilen31, pexhp32, pstoshp33, psenshp34
parset <- c(7,26,12,27,16,28,9,29,32,19,17,14,4,15,31,3,6,8) #parameters to select from Morris output (18 most important)
#####################################################
#Calculate collinearities and save to different files
#####################################################
for (fr in c(1:21)){
whatdays <- seq(1,115, by = fr) #Select measurement days
out <- NULL
for (i in 1:length(files)){#length(files)
out <- NULL
morris <- readRDS(files[[i]]) #read in file by file
for(r in 1:100){ #calculate collinearity for every replicate (trajectory)
#select different slices from the 4 dimensional morris elementary effects output array to calculate collinearity from
Yield <- morris[r,parset,whatdays,4]
CC <- morris[r,parset,whatdays,2]
Biomass <- morris[r,parset,whatdays,3]
WC030 <- morris[r,parset,whatdays,5]
WC3060 <- morris[r,parset,whatdays,6]
Stage <- morris[r,parset,whatdays,1]
#merge the different arrays
vars <- rbind(t(Yield),t(CC),t(Biomass),t(WC030),t(WC3060),t(Stage))
#remove columns without effects
name = "noeff"
assign(x = name, value=vars[,apply(vars,2,function(x) !all(x==0))])
#if only one parameter has an effect no collinearity can be calculated so skip
if (ncol(as.data.frame(noeff)) < 2) next
#calculate collinearity and put in a dataframe
Coll <- collin(noeff, N = 18)
Coll$soil <- gsub('[[:digit:]]+', '', substr(substring(files[i],chrs+1),1,nchar(substring(files[i],chrs+1))-4))
Coll$year <- substring(substring(files[i],chrs+1),1,4)
Coll$replicate <- r
out <- rbind.fill(Coll, out)
if (r == 100){
filename <- paste(output.collin.path,fr,substring(substring(files[i],chrs+1),1,4),gsub('[[:digit:]]+', '', substr(substring(files[i],chrs+1),1,nchar(substring(files[i],chrs+1))-4)),".rds", sep = "")
saveRDS(out, filename)
}
}
}
}
#################################################################
#Combine collinearities of different conditions in one dataframe
#################################################################
library(plyr)
Noll <- NULL
outlist <- c("Yield","Biomass","CC","Stage","WC030","WC3060")
for (fr in c(1:21)){
for (sol in c("Clayloam","Loam","Loamysand")){
for (i in c(1975:2018)){
filename <- paste(output.collin.path,fr,i,sol,".rds", sep = "")
if(file.exists(filename)) {
file <- readRDS(filename)
dat <- file[which(file$N == 18),]
dat$freq <- as.character(fr)
Noll <- rbind.fill(Noll, dat)
}
}
}
}
###################
#Make plot
###################
library(ggplot2)
library(ggthemes)
theme_set(theme_few(base_size=14))
x1 = factor(Noll$freq, levels=c("1", "2", "3", "4", "5", "6", "7", "8", "9","10", "11", "12","13", "14", "15", "16", "17","18","19","20","21"))
p2 <- ggplot(Noll, aes(x=x1, y=1-1/collinearity)) + geom_boxplot() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5)) +
geom_hline(yintercept = 0.933333, linetype="dashed", color = "red")+ ylim(0.7,1) + xlab("Measurement interval (d)")
p2
library(export)
graph2ppt(aspectr = 1.5, width = 6.2)
|
87c71e269e87f6319ab0a27399c3664280482150
|
cd628e1c910d766d54b4fa8417a82890e16cdca3
|
/R/pbp.R
|
98133dd11ca21bd47c07056506a8426e67e2f2df
|
[] |
no_license
|
dashee87/deuce
|
33f34ddaef4a8943adf5d704225eaaf06ec3a1ba
|
de7015ec55e5470ca497472171e50b6c822487dd
|
refs/heads/master
| 2021-01-13T10:16:44.665211
| 2016-06-28T01:03:35
| 2016-06-28T01:03:35
| 69,028,710
| 0
| 0
| null | 2016-09-23T13:44:27
| 2016-09-23T13:44:26
| null |
UTF-8
|
R
| false
| false
| 4,958
|
r
|
pbp.R
|
#' Create Point-by-point Data
#'
#' Creates an expanded point-level version from the flat representation in \code{point_by_point}
#'
#' @param obj a row of the \code{point_by_point} data frame
#'
#' @export
pbp <- function(obj){
set_score <- function(x){
games <- strsplit(x, split = ";")[[1]]
ngames <- sapply(games, nchar)
output <- do.call("rbind", lapply(games, game_score))
output$Game <- rep(1:length(games), ngames)
output
}
game_score <- function(x){
points <- strsplit(x, split = "")[[1]]
ace <- grepl("A", points)
df <- grepl("D", points)
points <- sub("D", "R", sub("A", "S", points))
x <- points == "S"
data.frame(
serve_won = x > 1 - x,
serve_points = cumsum(x),
return_points = cumsum(1-x),
serve_score = as.character(point_score(x)),
return_score = as.character(point_score(1 - x)),
ace = ace,
df = df,
stringsAsFactors = FALSE
)
}
point_score <- function(x){
s1 <- cumsum(x)
s2 <- cumsum(1 - x)
score <- ifelse(s1 == 0, "0",
ifelse(s1 == 1, "15",
ifelse(s1 == 2, "30",
ifelse(s1 == 3, "40",
ifelse(s1 > 3 & s1 > s2, "Ad", "40")))))
if(s1[length(s1)] > s2[length(s2)])
score[length(score)] <- "GM"
score
}
tiebreak <- function(x){
points <- strsplit(x, split = "/")[[1]]
points <- unlist(sapply(points, function(x) strsplit(x, split = "")[[1]]))
ace <- grepl("A", points)
df <- grepl("D", points)
points <- sub("D", "R", sub("A", "S", points))
x <- points == "S"
s1 <- cumsum(x)
s2 <- cumsum(1 - x)
player1 <- c(TRUE, rep(c(FALSE, FALSE, TRUE, TRUE), length = length(points)-1))
player1_points <- x # Points won on serve
player1_points[!player1] <- (1-x)[!player1] # Not serving
player2_points <- (1-x)
player2_points[!player1] <- x[!player1] # Serving
player1_points <- cumsum(player1_points)
player2_points <- cumsum(player2_points)
output <- data.frame(
serve_won = x > 1 - x,
serve_points = player1_points,
return_points = player2_points,
ace = ace,
df = df,
stringsAsFactors = FALSE
)
output$serve_points[!player1] <- player2_points[!player1]
output$return_points[!player1] <- player1_points[!player1]
output$serve_score <- as.character(output$serve_points)
output$return_score <- as.character(output$return_points)
if(output$serve_points[nrow(output)] > output$return_points[nrow(output)] )
output$serve_score[nrow(output)] <- "GM"
else
output$return_score[nrow(output)] <- "GM"
output
}
s1 <- obj$server1
s2 <- obj$server2
date <- obj$tny_date
tb_obj <- obj[,c("TB1","TB2","TB3","TB4","TB5")]
tb_index <- sapply(tb_obj, is.na)
obj <- obj[,c("Set1","Set2","Set3","Set4","Set5")]
obj <- obj[,!sapply(obj, is.na)]
if(all(tb_index)){
obj <- obj[,!sapply(obj, is.na)]
result <- lapply(obj, set_score)
max_game <- sapply(result, function(x) max(x$Game))
max_game <- c(0, max_game)
for(i in 1:length(result)){
result[[i]]$Set <- i
result[[i]]$CumGame <- sum(max_game[1:i]) + result[[i]]$Game
}
result <- do.call("rbind", lapply(result, function(x) x))
result$serve <- ifelse(result$CumGame %% 2 != 0, s1, s2)
result$return <- ifelse(result$CumGame %% 2 != 0, s2, s1)
result$tiebreak <- FALSE
}
else{
tb_obj <- tb_obj[,!tb_index, drop = FALSE]
obj <- obj[,!sapply(obj, is.na)]
result <- lapply(obj, set_score)
max_game <- sapply(result, function(x) max(x$Game))
max_game <- c(0, max_game)
for(i in 1:length(tb_index))
if(!tb_index[i]) max_game[(i+1)] <- max_game[(i+1)] + 1
tiebreaks <- lapply(tb_obj, tiebreak)
tb_sets <- which(!tb_index)
for(i in 1:length(tiebreaks)){
serve_index <- c(TRUE,
rep(c(FALSE, FALSE,TRUE, TRUE), length = nrow(tiebreaks[[i]])-1))
tiebreaks[[i]]$Game <- 13
tiebreaks[[i]]$Set <- tb_sets[i]
tiebreaks[[i]]$CumGame <- sum(max_game[1:(i+1)])
if(sum(max_game[1:(tb_sets[i]+1)]) %% 2 != 0){
tiebreaks[[i]]$serve <- s2
tiebreaks[[i]]$return <- s1
tiebreaks[[i]]$serve[serve_index] <- s1 # even game + tb goes to first server
tiebreaks[[i]]$return[serve_index] <- s2
}
else{
tiebreaks[[i]]$serve <- s1
tiebreaks[[i]]$serve[serve_index] <- s2
tiebreaks[[i]]$return <- s2
tiebreaks[[i]]$return[serve_index] <- s1
}
tiebreaks[[i]]$tiebreak <- TRUE
}
for(i in 1:length(result)){
result[[i]]$Set <- i
result[[i]]$CumGame <- sum(max_game[1:i]) + result[[i]]$Game
result[[i]]$serve <- ifelse(result[[i]]$CumGame %% 2 != 0, s1, s2)
result[[i]]$return<- ifelse(result[[i]]$CumGame %% 2 != 0, s2, s1)
result[[i]]$tiebreak <- FALSE
if(any(tb_sets == i))
result[[i]] <- rbind(result[[i]], tiebreaks[[which(tb_sets == i)]])
}
result <- do.call("rbind", lapply(result, function(x) x))
}
result$tourney_start_date <- date
result$breakpoint <- (result$return_score == "40" & !(result$serve_score %in% c("40","Ad"))) | result$return_score == "Ad"
result
}
|
a8391e99a37cb20d95fd560058cfef5e4a00ccdf
|
b32f8bbf08fc23033cfc046922a681db312d8580
|
/c4_exploratory_data_analysis/week2/swirl_ggplot2_extras.R
|
7262874cace57506e8772d38b4d5b6c3e7417553
|
[] |
no_license
|
hieudtrinh/datascience
|
6926afd134b6bbd948cd756e94dae87cb92a010f
|
5bb7af11355a168ab326332031d6205b64b90164
|
refs/heads/master
| 2022-11-21T13:37:28.448172
| 2020-07-22T21:03:06
| 2020-07-22T21:03:06
| 279,670,268
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,650
|
r
|
swirl_ggplot2_extras.R
|
# | Run the R command str with the argument diamonds to see what the data looks
# | like.
#
str(diamonds)
# tibble [53,940 ร 10] (S3: tbl_df/tbl/data.frame)
# $ carat : num [1:53940] 0.23 0.21 0.23 0.29 0.31 0.24 0.24 0.26 0.22 0.23 ...
# $ cut : Ord.factor w/ 5 levels "Fair"<"Good"<..: 5 4 2 4 2 3 3 3 1 3 ...
# $ color : Ord.factor w/ 7 levels "D"<"E"<"F"<"G"<..: 2 2 2 6 7 7 6 5 2 5 ...
# $ clarity: Ord.factor w/ 8 levels "I1"<"SI2"<"SI1"<..: 2 3 5 4 2 6 7 3 4 5 ...
# $ depth : num [1:53940] 61.5 59.8 56.9 62.4 63.3 62.8 62.3 61.9 65.1 59.4 ...
# $ table : num [1:53940] 55 61 65 58 58 57 57 55 61 61 ...
# $ price : int [1:53940] 326 326 327 334 335 336 336 337 337 338 ...
# $ x : num [1:53940] 3.95 3.89 4.05 4.2 4.34 3.94 3.95 4.07 3.87 4 ...
# $ y : num [1:53940] 3.98 3.84 4.07 4.23 4.35 3.96 3.98 4.11 3.78 4.05 ...
# $ z : num [1:53940] 2.43 2.31 2.31 2.63 2.75 2.48 2.47 2.53 2.49 2.39 ...
# | Now let's plot a histogram of the price of the 53940 diamonds in this
# | dataset. Recall that a histogram requires only one variable of the data, so
# | run the R command qplot with the first argument price and the argument data
# | set equal to diamonds. This will show the frequency of different diamond
# | prices.
qplot(price, data = diamonds)
# `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
# | Not only do you get a histogram, but you also get a message about the
# | binwidth defaulting to range/30. Recall that range refers to the spread or
# | dispersion of the data, in this case price of diamonds. Run the R command
# | range now with diamonds$price as its argument.
range(diamonds$price)
# [1] 326 18823
# | Rerun qplot now with 3 arguments. The first is price, the second is data set
# | equal to diamonds, and the third is binwidth set equal to 18497/30). (Use the
# | up arrow to save yourself some typing.) See if the plot looks familiar.
qplot(price, data = diamonds, binwidth = 18497/30)
# | You're probably sick of it but rerun qplot again, this time with 4 arguments.
# | The first 3 are the same as the last qplot command you just ran (price, data
# | set equal to diamonds, and binwidth set equal to 18497/30). (Use the up arrow
# | to save yourself some typing.) The fourth argument is fill set equal to cut.
# | The shape of the histogram will be familiar, but it will be more colorful.
qplot(price, data = diamonds, binwidth = 18497/30, fill = cut)
# | Now we'll replot the histogram as a density function which will show the
# | proportion of diamonds in each bin. This means that the shape will be similar
# | but the scale on the y-axis will be different since, by definition, the
# | density function is nonnegative everywhere, and the area under the curve is
# | one. To do this, simply call qplot with 3 arguments. The first 2 are price
# | and data (set equal to diamonds). The third is geom which should be set equal
# | to the string "density". Try this now.
qplot(price, data = diamonds, geom = "density")
# | Rerun qplot, this time with 4 arguments. The first 2 are the usual, and the
# | third is geom set equal to "density". The fourth is color set equal to cut.
# | Try this now.
qplot(price, data = diamonds, geom = "density", color = cut)
# | See how easily qplot did this? Four of the five cuts have 2 peaks, one at
# | price $1000 and the other between $4000 and $5000. The exception is the Fair
# | cut which has a single peak at $2500. This gives us a little more
# | understanding of the histogram we saw before.
# | Let's start with carat and price. Use these as the first 2 arguments of
# | qplot. The third should be data set equal to the dataset. Try this now.
qplot(carat, price, data = diamonds)
# | Now rerun the same command, except add a fourth parameter, shape, set equal
# | to cut.
qplot(carat, price, data = diamonds, shape = cut)
# Warning message:
# Using shapes for an ordinal variable is not advised
# | The same scatterplot appears, except the cuts of the diamonds are
# | distinguished by different symbols. The legend at the right tells you which
# | symbol is associated with each cut. These are small and hard to read, so
# | rerun the same command, except this time instead of setting the argument
# | shape equal to cut, set the argument color equal to cut.
qplot(carat, price, data = diamonds, color = cut)
# | We'll rerun the plot you just did (carat,price,data=diamonds and color=cut)
# | but add an additional parameter. Use geom_smooth with the method set equal to
# | the string "lm".
qplot(carat,price,data=diamonds, color=cut) + geom_smooth(method="lm")
# `geom_smooth()` using formula 'y ~ x'
# | Again, we see the same scatterplot, but slightly more compressed and showing
# | 5 regression lines, one for each cut of diamonds. It might be hard to see,
# | but around each line is a shadow showing the 95% confidence interval. We see,
# | unsurprisingly, that the better the cut, the steeper (more positive) the
# | slope of the lines.
# | Finally, let's rerun that plot you just did qplot(carat,price,data=diamonds,
# | color=cut) + geom_smooth(method="lm") but add one (just one) more argument to
# | qplot. The new argument is facets and it should be set equal to the formula
# | .~cut. Recall that the facets argument indicates we want a multi-panel plot.
# | The symbol to the left of the tilde indicates rows (in this case just one)
# | and the symbol to the right of the tilde indicates columns (in this five, the
# | number of cuts). Try this now.
qplot(carat,price,data=diamonds, color=cut, facets = . ~ cut) + geom_smooth(method="lm")
# `geom_smooth()` using formula 'y ~ x'
# Which types of plot does qplot plot?
#
# 1: all of the others
# 2: histograms
# 3: box and whisker plots
# 4: scatterplots
#
# Selection: 1
# | Any and all of the above choices work; qplot is just that good. What does the
# | gg in ggplot2 stand for?
#
# 1: goto graphics
# 2: grammar of graphics
# 3: good grief
# 4: good graphics
#
# Selection: 2
# True or False? The geom argument takes a string for a value.
#
# 1: True
# 2: False
#
# Selection: 1
# True or False? The method argument takes a string for a value.
#
# 1: False
# 2: True
#
# Selection: 2
# True or False? The binwidth argument takes a string for a value.
#
# 1: True
# 2: False
#
# Selection: 2
# True or False? The user must specify x- and y-axis labels when using qplot.
#
# 1: False
# 2: True
#
# Selection: 1
# | Now for some ggplots.
# | First create a graphical object g by assigning to it the output of a call to
# | the function ggplot with 2 arguments. The first is the dataset diamonds and
# | the second is a call to the function aes with 2 arguments, depth and price.
# | Remember you won't see any result.
g <- ggplot(diamonds, aes(depth, price))
# | Does g exist? Yes! Type summary with g as an argument to see what it holds.
summary(g)
# data: carat, cut, color, clarity, depth, table, price, x, y, z
# [53940x10]
# mapping: x = ~depth, y = ~price
# faceting: <ggproto object: Class FacetNull, Facet, gg>
# compute_layout: function
# draw_back: function
# draw_front: function
# draw_labels: function
# draw_panels: function
# finish_data: function
# init_scales: function
# map_data: function
# params: list
# setup_data: function
# setup_params: function
# shrink: TRUE
# train_scales: function
# vars: function
# super: <ggproto object: Class FacetNull, Facet, gg>
# | We see that g holds the entire dataset. Now suppose we want to see a
# | scatterplot of the relationship. Add to g a call to the function geom_point
# | with 1 argument, alpha set equal to 1/3.
g+geom_point(alpha = 1/3)
# | That's somewhat interesting. We see that depth ranges from 43 to 79, but the
# | densest distribution is around 60 to 65. Suppose we want to see if this
# | relationship (between depth and price) is affected by cut or carat. We know
# | cut is a factor with 5 levels (Fair, Good, Very Good, Premium, and Ideal).
# | But carat is numeric and not a discrete factor. Can we do this?
# | Of course! That's why we asked. R has a handy command, cut, which allows you
# | to divide your data into sets and label each entry as belonging to one of the
# | sets, in effect creating a new factor. First, we'll have to decide where to
# | cut the data.
# | Let's divide the data into 3 pockets, so 1/3 of the data falls into each.
# | We'll use the R command quantile to do this. Create the variable cutpoints
# | and assign to it the output of a call to the function quantile with 3
# | arguments. The first is the data to cut, namely diamonds$carat; the second is
# | a call to the R function seq. This is also called with 3 arguments, (0, 1,
# | and length set equal to 4). The third argument to the call to quantile is the
# | boolean na.rm set equal to TRUE.
cutpoints <- quantile(diamonds$carat, seq(0,1,length = 4), na.rm = TRUE)
cutpoints
# 0% 33.33333% 66.66667% 100%
# 0.20 0.50 1.00 5.01
range(diamonds$carat)
# [1] 0.20 5.01
# | We see a 4-long vector (explaining why length was set equal to 4). We also
# | see that .2 is the smallest carat size in the dataset and 5.01 is the
# | largest. One third of the diamonds are between .2 and .5 carats and another
# | third are between .5 and 1 carat in size. The remaining third are between 1
# | and 5.01 carats. Now we can use the R command cut to label each of the 53940
# | diamonds in the dataset as belonging to one of these 3 factors. Create a new
# | name in diamonds, diamonds$car2 by assigning it the output of the call to
# | cut. This command takes 2 arguments, diamonds$carat, which is what we want to
# | cut, and cutpoints, the places where we'll cut.
diamonds$car2 <- cut(diamonds$carat, cutpoints)
# | Now we can continue with our multi-facet plot. First we have to reset g since
# | we changed the dataset (diamonds) it contained (by adding a new column).
# | Assign to g the output of a call to ggplot with 2 arguments. The dataset
# | diamonds is the first, and a call to the function aes with 2 arguments
# | (depth,price) is the second.
g <- ggplot(diamonds, aes(depth,price))
summary(g)
# data: carat, cut, color, clarity, depth, table, price, x, y, z, car2
# [53940x11]
# mapping: x = ~depth, y = ~price
# faceting: <ggproto object: Class FacetNull, Facet, gg>
# compute_layout: function
# draw_back: function
# draw_front: function
# draw_labels: function
# draw_panels: function
# finish_data: function
# init_scales: function
# map_data: function
# params: list
# setup_data: function
# setup_params: function
# shrink: TRUE
# train_scales: function
# vars: function
# super: <ggproto object: Class FacetNull, Facet, gg>
# | Now add to g calls to 2 functions. This first is a call to geom_point with
# | the argument alpha set equal to 1/3. The second is a call to the function
# | facet_grid using the formula cut ~ car2 as its argument.
g+geom_point(alpha=1/3)+facet_grid(cut ~ car2)
# | We see a multi-facet plot with 5 rows, each corresponding to a cut factor.
# | Not surprising. What is surprising is the number of columns. We were
# | expecting 3 and got 4. Why?
# | The first 3 columns are labeled with the cutpoint boundaries. The fourth is
# | labeled NA and shows us where the data points with missing data (NA or Not
# | Available) occurred. We see that there were only a handful (12 in fact) and
# | they occurred in Very Good, Premium, and Ideal cuts. We created a vector,
# | myd, containing the indices of these datapoints. Look at these entries in
# | diamonds by typing the expression diamonds[myd,]. The myd tells R what rows
# | to show and the empty column entry says to print all the columns.
diamonds[myd,]
# # A tibble: 12 x 11
# carat cut color clarity depth table price x y z car2
# <dbl> <ord> <ord> <ord> <dbl> <dbl> <int> <dbl> <dbl> <dbl> <fct>
# 1 0.2 Premium E SI2 60.2 62 345 3.79 3.75 2.27 NA
# 2 0.2 Premium E VS2 59.8 62 367 3.79 3.77 2.26 NA
# 3 0.2 Premium E VS2 59 60 367 3.81 3.78 2.24 NA
# 4 0.2 Premium E VS2 61.1 59 367 3.81 3.78 2.32 NA
# 5 0.2 Premium E VS2 59.7 62 367 3.84 3.8 2.28 NA
# 6 0.2 Ideal E VS2 59.7 55 367 3.86 3.84 2.3 NA
# 7 0.2 Premium F VS2 62.6 59 367 3.73 3.71 2.33 NA
# 8 0.2 Ideal D VS2 61.5 57 367 3.81 3.77 2.33 NA
# 9 0.2 Very Good E VS2 63.4 59 367 3.74 3.71 2.36 NA
# 10 0.2 Ideal E VS2 62.2 57 367 3.76 3.73 2.33 NA
# 11 0.2 Premium D VS2 62.3 60 367 3.73 3.68 2.31 NA
# 12 0.2 Premium D VS2 61.7 60 367 3.77 3.72 2.31 NA
# | We see these entries match the plots. Whew - that's a relief. The car2 field
# | is, in fact, NA for these entries, but the carat field shows they each had a
# | carat size of .2. What's going on here?
# | Actually our plot answers this question. The boundaries for each column
# | appear in the gray labels at the top of each column, and we see that the
# | first column is labeled (0.2,0.5]. This indicates that this column contains
# | data greater than .2 and less than or equal to .5. So diamonds with carat
# | size .2 were excluded from the car2 field.
# | Finally, recall the last plotting command
# | (g+geom_point(alpha=1/3)+facet_grid(cut~car2)) or retype it if you like and
# | add another call. This one to the function geom_smooth. Pass it 3 arguments,
# | method set equal to the string "lm", size set equal to 3, and color equal to
# | the string "pink".
g+geom_point(alpha=1/3)+facet_grid(cut ~ car2)+geom_smooth(method="lm", size=3, color="pink")
# `geom_smooth()` using formula 'y ~ x'
# | Nice thick regression lines which are somewhat interesting. You can add
# | labels to the plot if you want but we'll let you experiment on your own.
# | Lastly, ggplot2 can, of course, produce boxplots. This final exercise is the
# | sum of 3 function calls. The first call is to ggplot with 2 arguments,
# | diamonds and a call to aes with carat and price as arguments. The second call
# | is to geom_boxplot with no arguments. The third is to facet_grid with one
# | argument, the formula . ~ cut. Try this now.
ggplot(diamonds, aes(carat, price))+geom_boxplot()+facet_grid(. ~ cut)
# Warning message:
# Continuous y aesthetic -- did you forget aes(group=...)?
# | Yes! A boxplot looking like marshmallows about to be roasted. Well done and
# | congratulations! You've finished this jewel of a lesson. Hope it paid off!
|
c542e7a164ccaed76c5fbbee5ea8b27e02087f21
|
9dcb9c2fa85c2f10dab34b1de1baabad8f957c78
|
/Original/Graphs.R
|
dc138ef210fd304ef67d6f9db00ac226e8032c46
|
[] |
no_license
|
mattgonnerman/WinterSelection
|
26aefdb14831f9b59ae99b4a4e591ccdc27281cb
|
0afc68b3d80dd9ce4e81d761ff2f95c00f7ae64e
|
refs/heads/master
| 2023-04-14T01:11:29.375417
| 2022-07-15T14:27:25
| 2022-07-15T14:27:25
| 334,658,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,987
|
r
|
Graphs.R
|
require(dplyr)
require(ggplot2)
require(forcats)
setwd("E:/Maine Drive/Analysis/Kaj Thesis")
### Comparison of Magnitude of Interactions
interactions.raw <- read.csv("InteractionResults.csv")
interactions.raw$Beh_State <- factor(interactions.raw$Beh_State,
levels = c("Roost", "Stationary", "Mobile"))
interactions.raw$LC_Cov <- factor(interactions.raw$LC_Cov,
levels = c("Wind Exposure", "Distance to Edge", "Proportion Ag",
"Proportion Dev", "Proportion SW", "% Softwood",
"Mean Tree Height", "Basal Area"))
int.Snow <- interactions.raw %>% filter(Weath_Cov == "Snow Depth")
ggplot(data = int.Snow, aes(y = LC_Cov, x = Interaction, shape = Beh_State, color = Beh_State)) +
geom_point(size = 1.5,
position = position_dodge(width = .4)) +
geom_errorbar(aes(xmin = Interaction - (1.96*SD), xmax = Interaction + (1.96*SD)),
width = .2,
position = position_dodge(width = .4)) +
geom_vline(xintercept = 0, color = "grey60", linetype = 2) +
theme_bw() +
xlab("Coefficient Estimate") +
ylab("") +
ggtitle("Snow Depth") +
labs(color = "Behavioral\nState") +
theme(legend.title.align=0.5) +
scale_colour_manual(name = "Behavioral\nState",
labels = c("Roost", "Stationary", "Mobile"),
values = c("yellow4", "violetred4", "royalblue4")) +
scale_shape_manual(name = "Behavioral\nState",
labels = c("Roost", "Stationary", "Mobile"),
values = c(15, 19, 17))
ggsave("SnowDepth_InteractionComp.jpeg", width = 8, height = 7, units = "in")
int.Wind <- interactions.raw %>% filter(Weath_Cov == "Wind Chill")
ggplot(data = int.Wind, aes(y = LC_Cov, x = Interaction, shape = Beh_State, color = Beh_State)) +
geom_point(size = 1.5,
position = position_dodge(width = .4)) +
geom_errorbar(aes(xmin = Interaction - (1.96*SD), xmax = Interaction + (1.96*SD)),
width = .2,
position = position_dodge(width = .4)) +
geom_vline(xintercept = 0, color = "grey60", linetype = 2) +
theme_bw() +
xlab("Coefficient Estimate") +
ylab("") +
ggtitle("Wind Chill") +
labs(color = "Behavioral\nState") +
theme(legend.title.align=0.5) +
scale_colour_manual(name = "Behavioral\nState",
labels = c("Roost", "Stationary", "Mobile"),
values = c("yellow4", "violetred4", "royalblue4")) +
scale_shape_manual(name = "Behavioral\nState",
labels = c("Roost", "Stationary", "Mobile"),
values = c(15, 19, 17))
ggsave("WindChill_InteractionComp.jpeg", width = 8, height = 7, units = "in")
#make big points
#remove endcaps on error bars
#thicker error lines
################################################################################################
### Plot Matrix showing selection at Poor, Average, and Good Weather
require(cowplot)
interactions.raw <- read.csv("InteractionResults.csv") %>%
mutate(Beh_State = factor(Beh_State, levels = c("Roost", "Stationary", "Mobile"))) %>%
mutate(LC_Cov = factor(LC_Cov,
levels = c("Distance to Edge", "Wind Exposure", "Proportion Ag",
"Proportion Dev", "Proportion SW",
"Mean Tree Height", "Basal Area", "% Softwood"))) %>%
arrange(Beh_State, LC_Cov)
int.Snow <- interactions.raw %>% filter(Weath_Cov == "Snow Depth")
int.Wind <- interactions.raw %>% filter(Weath_Cov == "Wind Chill")
# Condition Thresholds (Used summary on raw data and chose near 1st/3rd Quantile and Mean)
# Wind Chill/Roost = 4, 15, 27
# Snow Depth/Roost = 0, 4, 8
i = 1
snow.list <- list()
snow.plots <- list()
for(i in 1:length(int.Snow$LC_Cov)){
snow.df <- data.frame(Behavior = int.Snow$Beh_State[i],
LC = int.Snow$LC_Cov[i],
LC.Coef = int.Snow$LC_Coef[i],
W.Coef = int.Snow$Weath_Coef[i],
Int.Coef = int.Snow$Interaction[i],
LC.Val = rep(seq(-2, 2,.2),3),
W.Val = rep(c(0,4,8), each = 21),
W.Condition = rep(c("Good","Average","Poor"), each = 21))
snow.list[[i]] <- snow.df %>%
mutate(Est = exp((LC.Coef*LC.Val) + (W.Coef*W.Val) + (Int.Coef*LC.Val*W.Val)))
snow.plot <- ggplot(data = snow.list[[i]], aes(x = LC.Val, y = Est, group = W.Condition)) +
geom_line(aes(linetype = W.Condition)) +
theme_classic() +
xlab(snow.df$LC[i]) + ylab("")
snow.plots[[i]] <- snow.plot + theme(legend.position="none")
}
legend <- get_legend(snow.plot + theme(legend.position = "bottom"))
plot_grid(plotlist = snow.plots,
legend,
labels = "auto",
nrow = 3,
align = "hv",
axis = "lb")
|
2b205b879dcbfcb00555f4e54bf59b115923a381
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RSurveillance/examples/pfree.calc.Rd.R
|
41720f977894d905a704acee361d25f3a3873e2f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 350
|
r
|
pfree.calc.Rd.R
|
library(RSurveillance)
### Name: pfree.calc
### Title: Probability of freedom over time
### Aliases: pfree.calc
### Keywords: methods
### ** Examples
# examples for pfree.calc
pfree.calc(0.8, 0.01, 0.5)
pfree.calc(rep(0.6,24), 0.01, 0.5)
pfree.calc(runif(10, 0.4, 0.6), 0.01, 0.5)
pfree.calc(runif(10, 0.4, 0.6), runif(10, 0.005, 0.015), 0.5)
|
00ce39b9b88b1b148e82ac13454f91850f95d82d
|
431860954259d02f7768dd02e6554badbf6faacc
|
/man/wcPCA.Rd
|
72994cf7478557d6f51848ed14028ae78f3c1863
|
[] |
no_license
|
nicolas-robette/GDAtools
|
5e6a7d4454d5edac3fab9bfa202f96ceddadfc66
|
4708925717cb4d0cd957faa46fd813dfcd860c41
|
refs/heads/master
| 2023-07-07T02:54:15.110104
| 2023-06-29T18:58:32
| 2023-06-29T18:58:32
| 214,293,710
| 5
| 3
| null | 2021-06-11T08:41:34
| 2019-10-10T22:04:12
|
R
|
UTF-8
|
R
| false
| false
| 2,560
|
rd
|
wcPCA.Rd
|
\name{wcPCA}
\alias{wcPCA}
\title{Within-class Principal Component Analysis}
\description{
Within-class Principal Component Analysis
}
\usage{
wcPCA(X, class, scale.unit = TRUE, ncp = 5, ind.sup = NULL, quanti.sup = NULL,
quali.sup = NULL, row.w = NULL, col.w = NULL, graph = FALSE,
axes = c(1, 2))
}
\arguments{
\item{X}{a data frame with \emph{n} rows (individuals) and \emph{p} columns (numeric variables)}
\item{class}{factor specifying the class}
\item{scale.unit}{a boolean, if TRUE (default) then data are scaled to unit variance}
\item{ncp}{number of dimensions kept in the results (by default 5)}
\item{ind.sup}{a vector indicating the indexes of the supplementary individuals}
\item{quanti.sup}{a vector indicating the indexes of the quantitative supplementary variables}
\item{quali.sup}{a vector indicating the indexes of the categorical supplementary variables}
\item{row.w}{an optional row weights (by default, a vector of 1 for uniform row weights); the weights are given only for the active individuals}
\item{col.w}{an optional column weights (by default, uniform column weights); the weights are given only for the active variables}
\item{graph}{boolean, if TRUE a graph is displayed. Default is FALSE.}
\item{axes}{a length 2 vector specifying the components to plot}
}
\details{
Within-class Principal Component Analysis is a PCA where the active variables are centered on the mean of their class instead of the overall mean.
It is a "conditional" PCA and can be seen as a special case of PCA with orthogonal instrumental variables, with only one (categorical) instrumental variable.
}
\value{
An object of class \code{PCA} from \code{FactoMineR} package, with an additional item :
\item{ratio}{the within-class inertia percentage}.
}
\note{
The code is adapted from \code{PCA} function from \code{FactoMineR} package.
}
\references{
Escofier B., 1990, Analyse des correspondances multiples conditionnelle, \emph{La revue de Modulad}, 5, 13-28.
Lebart L., Morineau A. et Warwick K., 1984, \emph{Multivariate Descriptive Statistical Analysis}, John Wiley and sons, New-York.)
}
\author{Nicolas Robette}
\seealso{
\code{\link{PCAoiv}}, \code{\link{wcMCA}}, \code{\link{MCAoiv}}
}
\examples{
# within-class analysis of decathlon data
# with quatiles of points as class
library(FactoMineR)
data(decathlon)
points <- cut(decathlon$Points, c(7300, 7800, 8000, 8120, 8900), c("Q1","Q2","Q3","Q4"))
res <- wcPCA(decathlon[,1:10], points)
plot(res, choix = "var")
}
\keyword{ multivariate }
|
8b4c8721e7e0f2b1db95297e16be924afb6fa614
|
8839c2feadff327fcbc8573bbbb42c008a4f4406
|
/assignment.R
|
32ad6b44491332372ede16207cd9b8a8199fc757
|
[] |
no_license
|
lachyrussell/datascience
|
d69366d6708fb79292823aa487dd98ba0a6db4c1
|
fa81f5a4e8f0669ce1726defa5993ebbe96fdb58
|
refs/heads/master
| 2021-01-21T13:49:01.530672
| 2016-05-18T09:08:32
| 2016-05-18T09:08:32
| 54,311,997
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 69
|
r
|
assignment.R
|
x <- 1
print(x)
x
msg <- "hello"
msg
x <- ## Incomplete expression
x
|
0e5d4af39e5da09180fab9f0430002ee21e30741
|
e08714d390500a2d701adbade1e297f8138454d4
|
/Regresiรณn y correlaciรณn/matcor1.R
|
1161f3e1963831a488242723af445f7c2707f826
|
[] |
no_license
|
carlosardon/Mis-ejercicios-en-R
|
e2c0525903d7f3950cb66cc1f8fe59bcd2f4c567
|
2d321c74087d1a8dd25f6eb3759462f716d49167
|
refs/heads/master
| 2020-04-29T10:46:01.374403
| 2019-03-17T08:21:39
| 2019-03-17T08:21:39
| 176,073,827
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 5,804
|
r
|
matcor1.R
|
#Script R para correlaciรณn lineal de Pearson y correlaciรณn de distancias
#Primero instalar los siguientes paquetes
library(energy)
library(ggplot2)
library(magrittr)
library(ggpubr)
library(dplyr)
library(Hmisc)
library(corrplot)
library("PerformanceAnalytics")
library(psych)
library(ppcor)
#Luego debe de activarlos
#Importar la base de datos
matrizcor1 <- read.csv("matcor1.csv", header = T)
print(head(matrizcor1)) #Para que muestre las primeras 6 filas
attach(matrizcor1) #Para adjuntar las variables
names(matrizcor1) #Nombre de las columnas
str(matrizcor1) #Tipo de objeto(numerico, entero, caracter,etc)
#Pruebas de normalidad para cada variable
shapiro.test(TF)
shapiro.test(AP)
shapiro.test(CS)
shapiro.test(PF)
shapiro.test(RE)
multi.hist(matrizcor1,ncol =NULL,nrow =NULL, breaks="sturges", bcol="lightblue", dcol = c("blue", "red"), dlty = c("dotted", "solid"), lwd=2, main = "")#Usar uno de "sturges", "freedman-diaconis"("fd"), "scott"
#Grรกficos cuantil cuantil: Verificaciรณn grรกfica de normalidad
ggqqplot(TF, xlab ="Cuantiles teรณricos",ylab="TF(cantidad de frutos)")
ggqqplot(AP, xlab ="Cuantiles teรณricos", ylab="AP(altura de planta)")
ggqqplot(CS, xlab ="Cuantiles teรณricos", ylab="CS(concentraciรณn de sรณlidos)")
ggqqplot(PF, xlab ="Cuantiles teรณricos", ylab="PF(peso del fruto)")
ggqqplot(RE, xlab ="Cuantiles teรณricos", ylab="RE(rendimiento)")
#matriz de varianzas y covarianzas
cov(matrizcor1)
#coeficientes de correlaciรณn y significancia
cor.test(TF, RE, method="pearson")
cor.test(RE, CS, method="pearson")
cor.test(PF, CS, method="pearson")
cor.test(TF, PF, method="pearson")
cor.test(AP, TF, method="pearson")
#correlaciรณn parcial
pcor.test(x = TF, y = RE, z =CS, method = "pearson")# Corr. Entre TF y RE controlando CS
pcor.test(x = TF, y = CS, z =RE, method = "pearson")# Corr. Entre TF y CS controlando RE
pcor.test(x = TF, y = PF, z = RE, method = "pearson")# Corr. Entre TF y PF controlando RE
#Coeficiente de correlaciรณn mรบltiple
(R <- cor(RE, fitted(lm(RE ~ TF +PF))))#Rendimiento en funciรณn del total de frutos y peso del fruto
R^2#coeficiente de determinaciรณn mรบltiple
#Diagramas de correlaciรณn o correlogramas
round(cor(matrizcor1),2)
rcorr(as.matrix(matrizcor1))
corrplot(cor(matrizcor1),method="circle")
corrplot(cor(matrizcor1),method="ellipse")
corrplot(cor(matrizcor1),method="pie")
corrplot(cor(matrizcor1),method="number")
corrplot(cor(matrizcor1),type="upper")
corrplot(cor(matrizcor1),type="lower")
corrplot(cor(matrizcor1),order="hclust")
corrplot(cor(matrizcor1),type="upper", order="hclust")
corrplot(cor(matrizcor1),type="upper", order="hclust", method = "number", number.digits=3)
chart.Correlation(matrizcor1)
pairs.panels(matrizcor1, pch=21,main="matriz de correlaciones")
#Recta que mejor ajusta
ggscatter(matrizcor1,x = "TF", y = "RE", add = "loess", conf.int = TRUE, add.params = list(color = "blue", fill = "lightgray"),color="red",shape =10, size = 3, cor.coef = TRUE, cor.coeff.args = list(method = "pearson", label.x.npc = "left", label.y.npc = "top"), xlab = "Total de frutos", ylab = "Rendimiento (t)")
ggscatter(matrizcor1,x = "RE", y = "CS", add = "reg.line", conf.int = TRUE, add.params = list(color = "blue", fill = "lightgray"),color="red",shape =10, size = 3, cor.coef = TRUE, cor.coeff.args = list(method = "pearson", label.x.npc = "left", label.y.npc = "top"), xlab = "Rendimiento (t)", ylab = "Concentraciรณn de sรณlidos (Brix)")
ggscatter(matrizcor1,x = "PF", y = "CS", add = "loess", conf.int = TRUE, add.params = list(color = "blue", fill = "lightgray"),color="red",shape =10, size = 3, cor.coef = TRUE, cor.coeff.args = list(method = "pearson", label.x.npc = "left", label.y.npc = "top"), xlab = "Peso del fruto (g)", ylab = "Concentraciรณn de sรณlidos (Brix)")
ggscatter(matrizcor1,x = "TF", y = "PF", add = "reg.line", conf.int = TRUE,add.params = list(color = "blue", fill = "lightgray"),color="red",shape =10, size = 3, cor.coef = TRUE, cor.coeff.args = list(method = "pearson", label.x.npc = "left", label.y.npc = "top"), xlab = "Total de frutos", ylab = "Peso del fruto (g)")
#Correlaciรณn de distancias: relaciรณn lineal o no lineal
#Entre el total de frutos y el rendimiento
dcor(TF,RE) #Coeficiente de correlaciรณn de distancias
unlist(DCOR(TF,RE))#covarianza, dcor y varianzas de variables
bcdcor(TF,RE)#Con correcciรณn del sesgo que incrementa con la dimensiรณn
dcor.test(TF, RE, R=61)#prueba de significancia sin correcciรณn (R=2n-1)
dcor.ttest(TF, RE, distance=FALSE)#Prueba de significancia con correcciรณn
#Entre rendimiento y concentraciรณn de sรณlidos
dcor(RE,CS) #Coeficiente de correlaciรณn de distancias
unlist(DCOR(RE,CS))#covarianza, dcor y varianzas de variables
bcdcor(RE,CS)#Con correcciรณn del sesgo que incrementa con la dimensiรณn
dcor.test(RE,CS, R=61)#prueba de significancia sin correcciรณn (R=2n-1)
dcor.ttest(RE,CS, distance=FALSE)#Prueba de significancia con correcciรณn
#Entre el peso del fruto y concentraciรณn de sรณlidos
dcor(PF,CS) #Coeficiente de correlaciรณn de distancias
unlist(DCOR(PF,CS))#covarianza, dcor y varianzas de variables
bcdcor(PF,CS)#Con correcciรณn del sesgo que incrementa con la dimensiรณn
dcor.test(PF,CS, R=61)#prueba de significancia sin correcciรณn (R=2n-1)
dcor.ttest(PF,CS, distance=FALSE)#Prueba de significancia con correcciรณn
#Total del frutos y peso del fruto
dcor(TF,PF) #Coeficiente de correlaciรณn de distancias
unlist(DCOR(TF,PF))#covarianza, dcor y varianzas de variables
bcdcor(TF,PF)#Con correcciรณn del sesgo que incrementa con la dimensiรณn
dcor.test(TF,PF, R=61)#prueba de significancia sin correcciรณn (R=2n-1)
dcor.ttest(TF,PF, distance=FALSE)#Prueba de significancia con correcciรณn
|
50bb850be2cea5ac96dcb56bb0386388f8b11839
|
ee0689132c92cf0ea3e82c65b20f85a2d6127bb8
|
/trg/d2a-fmssrcc.R
|
883077ff2c9fdcb6c5f154a58bda6b4ac7449e0c
|
[] |
no_license
|
DUanalytics/rAnalytics
|
f98d34d324e1611c8c0924fbd499a5fdac0e0911
|
07242250a702631c0d6a31d3ad8568daf9256099
|
refs/heads/master
| 2023-08-08T14:48:13.210501
| 2023-07-30T12:27:26
| 2023-07-30T12:27:26
| 201,704,509
| 203
| 29
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,837
|
r
|
d2a-fmssrcc.R
|
# Read Data into R Environment
#CSV Files----
#Read from CSV file in PC
head(iris)
write.csv(iris, "./data/iris.csv", row.names=F)
read1 = read.csv(file="./data/iris.csv", header = TRUE,sep = ",")
read1
read1 = read.csv(file="./data/dhiraj.csv", header = TRUE,sep = ",")
head(read1)
str(read1)
class(read1)
head(read1)
read2 = read.table(file="./data/iris.csv", header = TRUE,sep = ",")
str(read2); class(read2)
head(read2)
read3 = read.delim(file="./data/iris.csv", header = TRUE,sep = ",")
str(read3) ; class(read3)
head(read3)
#difference is use of specify delimeter(read.csv takes default as comma)
#or location is different from Project Folders, or want to search for the file
read4 = read.csv(file=file.choose())
str(read4)
head(read4)
# From URL : Read CSV from Web----
read_web1 = read.csv('http://www.stats.ox.ac.uk/pub/datasets/csb/ch11b.dat')
head(read_web1)
library(data.table)
read_web2 = fread("http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv")
head(read_web2)
class(read_web2)
#Text file from Web-----
read_txt = read.table("https://s3.amazonaws.com/assets.datacamp.com/blog_assets/test.txt", header = FALSE)
head(read_txt)
#Google Sheets-----
library(gsheet) #install it#
#install.packages('gsheet')
library(gsheet)
url_gsheet = "https://docs.google.com/spreadsheets/d/1QogGSuEab5SZyZIw1Q8h-0yrBNs1Z_eEBJG7oRESW5k/edit#gid=107865534"
df_gsheet = as.data.frame(gsheet2tbl(url_gsheet))
head(df_gsheet)
#graphs
mtcars
names(mtcars)
table(mtcars$cyl)
table(mtcars$cyl, mtcars$am)
mtcars$mpg
#continuous data - histogram, boxplot
hist(mtcars$mpg)
boxplot(mtcars$mpg, horizontal = T)
boxplot( mpg ~ gear, data=mtcars, col=1:3)
t1 = table(mtcars$gear)
t1
barplot(t1, col=1:3)
students
t2 = table(students$college)
barplot(t2)
t3 = table(students$gender)
barplot(t3)
title('This is bar plot', sub = 'Subtitle')
pie(t3)
|
4249ce5507b773ea78ada67efe8a476678b3284f
|
39834735c33714c011da3c2ab438db4525f4ac9d
|
/man/wahlperioden.Rd
|
6e7ad73fc56d23ff8aa2a031a7c9c61d15dad284
|
[] |
no_license
|
petermeissner/dip21
|
2e37ab5ede145d11b92bfff78202273424b2df34
|
f26206409af7c89fdda80fcb3bb3f46862e435c0
|
refs/heads/master
| 2020-12-24T20:33:12.438958
| 2016-05-03T22:15:25
| 2016-05-03T22:15:25
| 57,194,506
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 387
|
rd
|
wahlperioden.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dip21_search.R
\name{wahlperioden}
\alias{wahlperioden}
\title{function returning options for legislative terms}
\usage{
wahlperioden(regex = NULL)
}
\arguments{
\item{wp}{a regular expression used to look up labels and return their values}
}
\description{
function returning options for legislative terms
}
|
c53ab2deff27f2318a02909db0e85ffe55d4ad96
|
941ef2fdda9fdfa5085db448ce4728fb3a8c629c
|
/run_analysis.R
|
d615aa796dbdd7640b6168a2a93e2af1235ac5b7
|
[] |
no_license
|
PepijnDG/GetCleanProject
|
5f100167239483a84c6fcde495c151713ea52b3a
|
fe26e5109baf22339dcce1afd155f1aca457e785
|
refs/heads/master
| 2021-04-09T16:40:21.857010
| 2015-02-22T11:35:46
| 2015-02-22T11:35:46
| 31,124,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,433
|
r
|
run_analysis.R
|
# load packages and save main directory
library (plyr)
library (reshape2)
wd <- getwd()
# load training data
setwd("train")
xtrain <- read.table("x_train.txt")
ytrain <- read.table("y_train.txt")
strain <- read.table("subject_train.txt")
# load test data
setwd(wd)
setwd("test")
xtest <- read.table("x_test.txt")
ytest <- read.table("y_test.txt")
stest <- read.table("subject_test.txt")
# merge test and training data
xmerged <- rbind(xtest, xtrain)
# load features data and apply on data set
setwd(wd)
feat <- read.table("features.txt")
colnames(xmerged) <- feat$V2
# merge label data and apply on data set
ymerged <- rbind(ytest, ytrain)
act <- read.table("activity_labels.txt")
ymerged$V1 <- as.factor(ymerged$V1)
levels(ymerged$V1) <- act$V2
xmerged[,"activity"] <- ymerged
# merge subject data and apply on data set
smerged <- rbind(stest, strain)
xmerged[,"subject"] <- smerged
# subsettting and writing to txt file
colstd<-xmerged[,grep('std',names(xmerged))]
colmean<-xmerged[,grep('mean',names(xmerged))]
activities<-xmerged[,grep('activity',names(xmerged))]
subjects<-xmerged[,grep('subject',names(xmerged))]
DFsub <- cbind(colmean, colstd, activities, subjects)
DFmelt <- melt(DFsub, id.vars=c("subjects", "activities"), value.name="value")
SubActVarMean <- dcast(DFmelt, subjects+activities ~ variable, mean)
write.table(SubActVarMean, "analysis.txt", row.name=FALSE, sep="\t")
# clean environment
rm(list = ls())
|
bf712959c52eac6ef3ce20b4fda9ab68ea31075a
|
fe4ff59b4a915bf021abb4d976045b7cd97b11ab
|
/man/compareTemporal.Rd
|
47df285eb7cf8f445c601e33bbd75de8ee570f6e
|
[] |
no_license
|
cran/crimelinkage
|
4908a8b35d27e03e4b44f0c2f902834cec7f863f
|
872ad1c7ec9c6445063a56a753657e1c8ab9fd4d
|
refs/heads/master
| 2021-01-19T06:46:19.974002
| 2015-09-19T19:50:30
| 2015-09-19T19:50:30
| 34,173,828
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 928
|
rd
|
compareTemporal.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/compareCrimes.R
\name{compareTemporal}
\alias{compareTemporal}
\title{Make temporal evidence variable from (possibly uncertain) temporal info}
\usage{
compareTemporal(DT1, DT2, show.pb = FALSE, ...)
}
\arguments{
\item{DT1}{(n x 2) data.frame of (DT.FROM,DT.TO) for the crimes}
\item{DT2}{(n x 2) data.frame of (DT.FROM,DT.TO) for the crimes}
\item{show.pb}{(logical) show the progress bar}
\item{\ldots}{other arguments passed to \code{\link{expAbsDiff.circ}}}
}
\value{
data.frame of expected absolute differences:
\itemize{
\item temporal - overall difference (in days) [0,max]
\item tod - time of day difference (in hours) [0,12]
\item dow - fractional day of week difference (in days) [0,3.5]
}
}
\description{
Calculates the temporal distance between crimes
}
\keyword{internal}
|
8f0b7fb9c8470a376415c9348b87d486db3a5b30
|
1491e182e255ac4f3b2bb2cc8da24f4619da5138
|
/plot6.R
|
fc6c56625f60ec85514bdf1b9ca02f925c29205c
|
[] |
no_license
|
lenazun/ExDataProject
|
c9696dfb645a3666b7149e3552254d34b698a83c
|
6608273a06dca182987190a7f1f1fe3c4a83d13f
|
refs/heads/master
| 2020-06-06T16:19:59.933101
| 2014-05-23T22:39:11
| 2014-05-23T22:39:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,269
|
r
|
plot6.R
|
#NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
#convert year and type to factors
NEI$year <- as.factor(NEI$year)
NEI$type <- as.factor(NEI$type)
#subset motor vehicles for Baltimore City, MD and Los Angeles, CA
balmotor <- subset(NEI, NEI$type == "ON-ROAD" & NEI$fips == "24510")
lamotor <- subset(NEI, NEI$type == "ON-ROAD" & NEI$fips == "06037")
#sum emissions per year for motor vehicles for both cities
balmotoremissions <-(tapply(balmotor$Emissions, balmotor$year, sum))
lamotoremissions <-(tapply(lamotor$Emissions, lamotor$year, sum))
#create a data frame with the year by year change data
require(quantmod)
diff <- as.data.frame(matrix(ncol=3, nrow=8))
names(diff) = c("year", "city", "emissionschange")
diff$emissionschange[1:4] <- Delt(balmotoremissions)
diff$emissionschange[5:8] <- Delt(lamotoremissions)
diff$city[1:4] <- "Baltimore City"
diff$city[5:8] <- "Los Angeles"
diff$year <- c("1999", "2002", "2005", "2008")
diff$year <- as.factor(diff$year)
diff$city <- as.factor(diff$city)
#plot emissions to compare both cities
qplot(x=year, y=emissionschange, fill=city,
data=diff, geom="bar", stat="identity",
position="dodge")
#copy plot to file
dev.copy(png, file="plot6.png")
dev.off()
|
639b3508565bda6ffaee0566fd24025abc3e5981
|
6df0d7054556eee6127ee99378a623303ba56ecc
|
/R/ggstance.R
|
5782c87379adc73b0db87f9bb7802d6f1252423e
|
[] |
no_license
|
miguelyad26/ggstance
|
c9088c2735836d4171b08f78f99e4f81d20d8cd3
|
6eca6d13ea9078f63e0de439bdc473cd9977f9a1
|
refs/heads/master
| 2021-01-24T09:57:06.629959
| 2016-08-12T07:39:34
| 2016-08-12T07:55:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
ggstance.R
|
#' @importFrom purrr map map_if walk2 keep map_at splice invoke some
#' walk partial set_names %||%
NULL
#' Base ggproto classes for ggstance
#'
#' @seealso ggplot2::ggproto
#' @keywords internal
#' @name ggstance-ggproto
NULL
|
8df142ceba5565fb4ee7f86d619ab6521602dc99
|
a44b2e10db0aa9b7f3fde27d9840ab474d10ab57
|
/modal/glm_modal.R
|
04b3c269afb00c911b6605cfcd54cb662b26e0a3
|
[] |
no_license
|
felipeamorimbr/inla-interface
|
2ebae4821b37609bdb72bc92c99434079a6c2937
|
cc87a7e173d86014c15fac063b4ab2c40967f102
|
refs/heads/master
| 2021-09-20T08:00:06.291441
| 2021-09-06T20:37:04
| 2021-09-06T20:37:04
| 201,983,807
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,187
|
r
|
glm_modal.R
|
# GLM Data
observeEvent(data_input(), {
glm_data <<- list()
glm_data$formula <<- list(
resp_var = reactive({
data_input()$covariates[1]
}),
cov_var = reactive({
NULL
}),
not_selected = reactive({
data_input()$covariates
}),
intercept = reactive({
TRUE
}),
family = reactive({
"Gaussian"
})
)
glm_data$fixed_priors <<- inla.set.control.fixed.default()
glm_data$hyper <<- inla.set.control.family.default()
glm_data$fixed_priors_tab <<- FALSE
glm_data$hyper_tab <<- FALSE
})
# GLM access buttons
model_buttons$glm <- smAction("glm_action_btn", translate("Hierarchical Linear Regression", language = language_selected, words_one))
model_boxes$glm <- actionButton(
inputId = "glm_box_btn",
box_model_ui(id = "glm_box", name = translate("Hierarchical Linear Models", language = language_selected, words_one), author = "Felipe Amorim", icon = "fa-chart-area", color = "#12a19b"),
style = "all:unset; color:black; cursor:pointer; outline:none;"
)
# Modal UI
observeEvent(c(input$glm_action_btn, input$glm_box_btn), {
validate(need(sum(input$glm_action_btn, input$glm_box_btn) > 0, ""))
glm_data$formula <<- new_chooser(
id = "glm_formula",
selected_right = glm_data$formula$cov_var(),
selected_left = glm_data$formula$not_selected(),
resp_var = glm_data$formula$resp_var(),
rightLabel = translate("Covariates Selected", language = language_selected, words_one),
leftLabel = translate("Covariates", language = language_selected, words_one)
)
glm_data$fixed_priors <<- fixed_effects_priors(
id = "glm_fixed",
formula_data = glm_data$formula
)
glm_data$hyper <<- sel_hyper(
id = "glm_hyper",
Link = TRUE,
formula_data = glm_data$formula,
linkLabel = translate("Select the Link Function", language = language_selected, words_one)
)
showModal(modalDialog(fluidPage(
includeCSS(path = "modal/style_lm.css"),
shinyjs::useShinyjs(),
tabsetPanel(
id = "glm_tabs", type = "tabs",
tabPanel(
title = translate("Select Variables", language = language_selected, words_one),
tags$br(),
new_chooser_UI(
id = "glm_formula",
respLabel = translate("Response", language = language_selected, words_one),
resp_var = glm_data$formula$resp_var(),
selected_right = glm_data$formula$cov_var(),
selected_left = glm_data$formula$not_selected(),
familyLabel = translate("Family", language = language_selected, words_one),
familyChoices = glm_family
)
),
tabPanel(
title = translate("Fixed Effects", language = language_selected, words_one),
tags$br(),
fixed_effects_priors_ui(id = "glm_fixed")
),
tabPanel(
title = translate("Hyperparameter Prior", language = language_selected, words_one),
sel_hyper_ui(
id = "glm_hyper"
)
)
),
tags$head(
tags$style(HTML(
"
.modal-header{
border-bottom-color: #12a19b;
}
"
))
)
),
title = translate("Hierarchical Linear Regression", language = language_selected, words_one),
size = "l",
fade = FALSE,
footer = tagList(actionButton(inputId = "glm_ok", label = "Ok"), modalButton(label = translate("Cancel", language = language_selected, words_one)))
))
})
# observeEvent(input$glm_tabs, {
# glm_data$fixed_priors <<- fixed_effects_priors(
# id = "glm_fixed",
# cov_var = glm_data$formula$cov_var(),
# intercept = glm_data$formula$intercept()
# )
#
# glm_data$hyper <<- sel_hyper(
# id = "glm_hyper",
# Link = TRUE,
# sel_family = glm_data$formula$family(),
# linkLabel = translate("Select the Link Function", language = language_selected, words_one)
# )
# })
observeEvent(input$glm_tabs, {
glm_data$fixed_priors_tab <<- ifelse(input$glm_tabs == translate("Fixed Effects", language = language_selected, words_one), TRUE, glm_data$fixed_priors_tab)
glm_data$hyper_tab <<- ifelse(input$glm_tabs == translate("Hyperparameter Prior", language = language_selected, words_one), TRUE, glm_data$hyper_tab)
})
# What happens after the user clicks in ok to make the model
glm_tabindex <- reactiveVal(1)
observeEvent(input$glm_ok, {
useShinyjs()
# Create the input of the fomula used on inla funtion
glm_inla.formula <- as.formula(paste0(glm_data$formula$resp_var(), " ~ ",
paste0(glm_data$formula$cov_var(), collapse = " + "),
ifelse(glm_data$formula$intercept(),
" + 1",
" - 1")))
# Count the number of tabs
glm_output_name <- paste("output_tab", glm_tabindex(), sep = "_")
if(glm_data$fixed_priors_tab == FALSE){
glm_control_fixed <- inla.set.control.fixed.default()
}else{
glm_control_fixed <- control_fixed_input(
prioris = glm_data$fixed_priors(),
v.names = glm_data$formula$cov_var(),
intercept = glm_data$formula$intercept()
)
}
if(glm_data$hyper_tab == FALSE){
glm_control_family <- inla.set.control.family.default()
}else{
glm_control_family <- glm_data$hyper$control_family_input()
}
# Create values to the result of the model and the edited call of the model
glm_inla <- list()
glm_inla_call_print <- list()
# Created the model according to user input
glm_inla[[glm_output_name]] <- try(inla(
formula = glm_inla.formula,
data = hot_to_r(input$data),
family = glm_data$formula$family(),
control.fixed = glm_control_fixed,
control.compute = control_compute_input,
control.inla = control_inla_input,
control.family = glm_control_family
), silent = TRUE)
if (class(glm_inla[[glm_output_name]]) == "try-error") {
sendSweetAlert(
session = session,
title = translate("Error in inla", language = language_selected, words_one),
text = tags$span(
translate("INLA has crashed. INLA try to run and failed.", language = language_selected, words_one)
),
html = TRUE,
type = "error",
closeOnClickOutside = TRUE
)
} else {
# Close the modal with lm options
removeModal()
# Create the new call to the model
glm_inla_call_print[[glm_output_name]] <- paste0(
"inla(data = ", "dat",
", formula = ", '"', glm_data$formula$resp_var(),
" ~ ", ifelse(glm_data$formula$intercept(), ifelse(is.null(glm_data$formula$cov_var()), "+1", ""), "-1 + "), paste0(glm_data$formula$cov_var(), collapse = " + "), '"',
paste0(", family = ", '"', glm_data$formula$family(), '"'),
ifelse(glm_data$fixed_priors_tab == FALSE, "", paste0(
", control.fixed = ",
list_call(glm_control_fixed)
)),
ifelse(identical(paste0(input$ok_btn_options_modal), character(0)), "",
paste0(", control.compute = ", list_call(control_compute_input), ", control.inla = ", list_call(control_inla_input))
),
ifelse(lm_data$hyper_tab == FALSE, "", paste0(", control.family = ", list_call(glm_control_family))),
")"
)
appendTab(
inputId = "mytabs", select = TRUE,
tabPanel(
title = paste0(translate("Hierarchical Linear Model", language = language_selected, words_one), " ",glm_tabindex()),
useShinydashboard(),
useShinyjs(),
fluidRow(
column(
width = 6,
box(
id = paste0("glm_box_call_", glm_tabindex()),
title = translate("Call", language = language_selected, words_one),
status = "primary",
solidHeader = TRUE,
width = 12,
textOutput(outputId = paste0("glm_call", glm_tabindex())),
tags$b(tags$a(icon("code"), translate("Show code", language = language_selected, words_one), `data-toggle` = "collapse", href = paste0("#showcode_call", glm_tabindex()))),
tags$div(
class = "collapse", id = paste0("showcode_call", glm_tabindex()),
tags$code(
class = "language-r",
paste0("dat <- ", '"', input$file$name, '"'),
tags$br(),
paste0("glm_inla_", glm_tabindex()), " <- ", glm_inla_call_print[[glm_output_name]],
tags$br(),
paste0("glm_inla_", glm_tabindex(), "$call")
)
)
)
),
column(
width = 6,
box(
id = paste0("glm_box_time_used", glm_tabindex()),
title = translate("Time Used", language = language_selected, words_one),
status = "primary",
solidHeader = TRUE,
width = 12,
dataTableOutput(outputId = paste0("glm_time_used_", glm_tabindex())),
tags$b(tags$a(icon("code"), translate("Show code", language = language_selected, words_one), `data-toggle` = "collapse", href = paste0("#showcode_time", glm_tabindex()))),
tags$div(
class = "collapse", id = paste0("showcode_time", glm_tabindex()),
tags$code(
class = "language-r",
paste0("dat <- ", '"', input$file$name, '"'),
tags$br(),
paste0("glm_inla_", glm_tabindex()), " <- ", glm_inla_call_print[[glm_output_name]],
tags$br(),
paste0("glm_inla_", glm_tabindex(), "$cpu.sued")
)
)
)
)
), # fluidrow ends here
fluidRow(
column(
width = 12,
box(
id = paste0("glm_box_fix_effects_", glm_tabindex()),
title = translate("Fixed Effects", language = language_selected, words_one),
status = "primary",
solidHeader = TRUE,
width = 12,
dataTableOutput(outputId = paste0("glm_fix_effects_", glm_tabindex())),
tags$b(tags$a(icon("code"), translate("Show code", language = language_selected, words_one), `data-toggle` = "collapse", href = paste0("#showcode_fix_effects_", glm_tabindex()))),
tags$div(
class = "collapse", id = paste0("showcode_fix_effects_", glm_tabindex()),
tags$code(
class = "language-r",
paste0("dat <- ", '"', input$file$name, '"'),
tags$br(),
paste0("glm_inla_", glm_tabindex()), " <- ", glm_inla_call_print[[glm_output_name]],
tags$br(),
paste0("glm_inla_", glm_tabindex(), "$summary.fixed")
)
)
)
),
column(
width = 12,
useShinyjs(),
fluidRow(
conditionalPanel(
condition = "(input.ccompute_input_2 != '') || (input.ccompute_input_2 == '' && input.ccompute_input_2 == true)",
box(
id = paste0("glm_box_model_hyper_", glm_tabindex()),
title = translate("Model Hyperparameters", language = language_selected, words_one),
status = "primary",
solidHeader = TRUE,
width = 6,
dataTableOutput(outputId = paste0("glm_model_hyper_", glm_tabindex())),
tags$b(tags$a(icon("code"), translate("Show code", language = language_selected, words_one), `data-toggle` = "collapse", href = paste0("#showcode_model_hyper_", glm_tabindex()))),
tags$div(
class = "collapse", id = paste0("showcode_model_hyper_", glm_tabindex()),
tags$code(
class = "language-r",
paste0("dat <- ", '"', input$file$name, '"'),
tags$br(),
paste0("glm_inla_", glm_tabindex()), " <- ", glm_inla_call_print[[glm_output_name]],
tags$br(),
paste0("glm_inla_", glm_tabindex(), "$summary.hyperpar")
)
)
)
),
box(
id = paste0("glm_box_neffp_", glm_tabindex()),
title = translate("Expected Effective Number of Parameters in the Model", language = language_selected, words_one),
status = "primary",
solidHeader = TRUE,
width = 6,
dataTableOutput(outputId = paste0("glm_neffp_", glm_tabindex())),
tags$b(tags$a(icon("code"), translate("Show code", language = language_selected, words_one), `data-toggle` = "collapse", href = paste0("#showcode_neffp_", glm_tabindex()))),
tags$div(
class = "collapse", id = paste0("showcode_neffp_", glm_tabindex()),
tags$code(
class = "language-r",
paste0("dat <- ", '"', input$file$name, '"'),
tags$br(),
paste0("glm_inla_", glm_tabindex()), " <- ", glm_inla_call_print[[glm_output_name]],
tags$br(),
paste0("glm_inla_", glm_tabindex(), "$neffp")
)
)
),
conditionalPanel(
condition = "(input.ccompute_input_4 != '' && input.ccompute_input_4 == true)",
box(
id = paste0("glm_box_dic_waic_", glm_tabindex()),
title = translate("DIC and WAIC", language = language_selected, words_one),
status = "primary",
solidHeader = TRUE,
width = 6,
dataTableOutput(outputId = paste0("glm_dic_waic_", glm_tabindex())),
tags$b(tags$a(icon("code"), translate("Show code", language = language_selected, words_one), `data-toggle` = "collapse", href = paste0("#showcode_dic_waic_", glm_tabindex()))),
tags$div(
class = "collapse", id = paste0("showcode_dic_waic_", glm_tabindex()),
tags$code(
class = "language-r",
paste0("dat <- ", '"', input$file$name, '"'),
tags$br(),
paste0("glm_inla_", glm_tabindex()), " <- ", glm_inla_call_print[[glm_output_name]],
tags$br(),
paste0("glm_inla_", glm_tabindex(), "$dic$dic"),
tags$br(),
paste0("glm_inla", glm_tabindex(), "$dic$dic.sat"),
tags$br(),
paste0("glm_inla", glm_tabindex(), "$dic$p.eff")
)
)
)
)
)
)
)
)
)
# "Server" of result tab
# Call
output[[paste0("glm_call", glm_tabindex())]] <- renderText({
glm_inla_call_print[[glm_output_name]]
})
# Time Used
output[[paste0("glm_time_used_", glm_tabindex())]] <- renderDataTable({
data_time_used <- glm_inla[[glm_output_name]][["cpu.used"]] %>%
t() %>%
as.data.frame(row.names = c("Time")) %>%
round(digits = 5)
DT::datatable(
data = data_time_used,
options = list(
dom = "t",
pageLength = 5
)
)
})
# Fixed Effects
output[[paste0("glm_fix_effects_", glm_tabindex())]] <- renderDataTable(
{
glm_inla[[glm_output_name]][["summary.fixed"]] %>%
round(digits = 5)
},
options = list(
paging = FALSE,
dom = "t"
)
)
# Model Hyper
output[[paste0("glm_model_hyper_", glm_tabindex())]] <- renderDataTable(
{
glm_inla[[glm_output_name]][["summary.hyperpar"]] %>%
round(digits = 5)
},
options = list(
dom = "t",
paging = FALSE
)
)
# Others (neffp)
output[[paste0("glm_neffp_", glm_tabindex())]] <- renderDataTable(
{
glm_neffp_dataframe <- glm_inla[[glm_output_name]][["neffp"]] %>%
round(digits = 5)
colnames(glm_neffp_dataframe) <- "Expected Value"
glm_neffp_dataframe
},
options = list(
dom = "t",
paging = FALSE
)
)
# Devicance Information Criterion (DIC)
output[[paste0("glm_dic_waic_", glm_tabindex())]] <- renderDataTable(
{
data.frame(
"DIC" = glm_inla[[glm_output_name]][["dic"]][["dic"]],
"DIC Saturated" = glm_inla[[glm_output_name]][["dic"]][["dic.sat"]],
"Effective number of parameters (DIC)" = glm_inla[[glm_output_name]][["dic"]][["p.eff"]],
"WAIC" = glm_inla[[glm_output_name]][["waic"]][["waic"]],
"Effective number of parameters (WAIC)" = glm_inla[[glm_output_name]][["waic"]][["p.eff"]],
row.names = "Expected Value"
) %>%
round(digits = 5) %>%
t()
},
options = list(
dom = "t",
paging = FALSE
)
)
glm_tabindex(glm_tabindex() + 1)
}
})
|
7a1cb89684265e3d02edd53f29fa6d6303c9c1e4
|
1d4e4acf0bd82f6f65bfcc0fe4588a0aabec0e73
|
/assigkcluster.R
|
3115daecf1fd147170869a78c63a282c2a2650e0
|
[] |
no_license
|
rosetk/datascience-course
|
68516a011c3496dbb8779843819cee5a3c5d3179
|
12561c2ea6702245bcd8fe8941cbe6f6041914d1
|
refs/heads/main
| 2022-12-18T20:37:58.041575
| 2020-10-02T05:12:32
| 2020-10-02T05:12:32
| 300,483,862
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,960
|
r
|
assigkcluster.R
|
install.packages("plyr")
library(plyr)
x<-runif(50)
x
y<-runif(50)
y
data<-cbind(x,y)
data
plot(data)
plot (data, type ="n")
text(data, rownames(data))
km<-kmeans(data,4)
str(km)
install.packages("animation")
library(animation)
km1<-kmeans.ani(data,4)
str(km1)
km$cluster
km$centers
# assignment 1
input<-read.csv("D:/datascience/assignment/clustering/crime_data.csv",1)
normalised_data<-scale(input[,2:5])
# assignment 2
install.packages("xlsx")
library(xlsx)
input<-read.xlsx("D:/datascience/assignment/clustering/EastWestAirlines.xlsx",2)
normalised_data<-scale(input[,2:11])
# k means clustering for assignment
#elbow curve and k ~ sqrt(n/2) to decide the k value
wss=(nrow(normalised_data)-1)*sum(apply(normalised_data,2,var))
for(i in 2:8)wss[i] = sum(kmeans(normalised_data,centers=i)$withinss)
plot(1:8 , wss, type="b",xlab="number of clusters", ylab = "within groups sum of squares")
title(sub="k-Means Clustering Scree Plot")
fit<-kmeans (normalised_data,5)
str(fit)
final2<-data.frame (input, fit$cluster)
final2
final3<-final2[,c(ncol(final2),1:(ncol(final2)-1))]
t=aggregate(input[,2:11],by=list(fit$cluster),FUN=mean)
# selecting k for kmeans clustering using kselection
install.packages("kselection")
library(kselection)
k<-kselection (iris[,-5],parallel=TRUE,k_threshold=0.9,max_centers=12)
?kselection
?iris
# using parallel processing
install.packages("doParallel")
library(doParallel)
registerDoParallel(cores=2)
k<-kselection (iris[,-5],parallel=TRUE,k_threshold=0.9,max_centers=12)
# k clustering alternative forlarge data set - Clustering Large Application (CLARA)
install.packages("cluster")
library(cluster)
xds<-rbind(cbind(rnorm(5000,0,8),rnorm(5000,0,8)), cbind(rnorm(5000, 50 ,8), rnorm(5000,50,8)))
xcl<-clara(xds,2,sample=100)
clusplot(xcl)
# Partitioning around medioids
xpm<-pam(xds,2)
clusplot(xpm)
|
ba791002a4a562dbb3a311d1f51031b9a82abfa4
|
0d0f71b0b9eca2896cf24a6dac799a1637f5fee5
|
/code/helpers/plotting_helpers.R
|
593e14592f3dad392eab194f80648f444d610326
|
[] |
no_license
|
mkiang/beiwe_missing_data
|
eb1a037d238d2e6be7615eb96078315a7c86d38b
|
ef65f4379ced4009427814a5f6aaa7882bb80207
|
refs/heads/master
| 2023-06-20T12:57:43.452236
| 2021-07-21T15:47:11
| 2021-07-21T15:47:11
| 325,392,811
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,881
|
r
|
plotting_helpers.R
|
## Plotting helpers
## Misc helpers ----
mkdir_p <- function(dir_name) {
## Mimics mkdir -p
dir.create(dir_name, showWarnings = FALSE, recursive = TRUE)
}
## Themes ----
mk_classic <- function(...) {
## Just a shortcut for serif fonts and classic theme with legend in upper
## left by default.
theme_classic(base_size = 10, base_family = "Times") +
theme(title = element_text(family = "Times"),
legend.key = element_rect(fill = NA, color = NA),
legend.position = c(0.01, 1.01),
legend.justification = c(0, 1),
legend.background = element_rect(fill = alpha("white", .75),
color = NA))
}
mk_x90 <- function(...) {
## Makes x-axis text 90 degrees
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5),
...)
}
mk_legend_ur <- function(...) {
## Moves legend to upper right
theme(legend.position = c(0.98, 0.98),
legend.justification = c(1, 1))
}
mk_nyt <- function(...) {
## http://minimaxir.com/2015/02/ggplot-tutorial/
## paste0('https://timogrossenbacher.ch/2016/12/',
## 'beautiful-thematic-maps-with-ggplot2-only/')
## https://github.com/hrbrmstr/hrbrthemes/blob/master/R/theme-ipsum.r
## Colos โย stick with the ggplot2() greys
c_bg <- "white"
c_grid <- "grey80"
c_btext <- "grey5"
c_mtext <- "grey30"
# Begin construction of chart
theme_bw(base_size = 11, base_family = "Arial Narrow") +
# Region
theme(panel.background = element_rect(fill = c_bg, color = c_bg),
plot.background = element_rect(fill = c_bg, color = c_bg),
panel.border = element_blank()) +
# Grid
theme(panel.grid.major.y = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor = element_blank(),
axis.ticks = element_line(color = c_grid, size = .15,
linetype = "solid"),
axis.ticks.length = unit(.15, "cm")) +
# Legend
theme(legend.position = c(0, 1),
legend.justification = c(0, 1),
legend.direction = "vertical",
legend.key = element_rect(fill = NA, color = NA),
legend.background = element_rect(fill = "transparent", color = NA),
legend.text = element_text(color = c_mtext)) +
# Titles, labels, etc.
theme(plot.title = element_text(color = c_btext, vjust = 1.25,
face = "bold", size = 11),
axis.text = element_text(size = 8, color = c_mtext),
axis.line.x = element_line(color = c_grid, linetype = "solid"),
axis.text.x = element_text(size = 8, color = c_mtext,
hjust = .5),
axis.title.x = element_text(size = 9, color = c_mtext,
hjust = 1),
axis.title.y = element_text(size = 9, color = c_mtext,
hjust = 1)) +
# Facets
theme(strip.background = element_rect(fill = c_grid, color = c_btext),
strip.text = element_text(size = 8, color = c_btext)) +
# Plot margins
theme(plot.margin = unit(c(0.35, 0.2, 0.3, 0.35), "cm")) +
# Additionals
theme(...)
}
turn_off_clipping <- function(ggplot_grob, draw = FALSE) {
x <- ggplot_gtable(ggplot_build(ggplot_grob))
x$layout$clip[x$layout$name == "panel"] <- "off"
x$layout$clip = "off"
if (draw) {
grid.draw(x)
}
return(x)
}
|
d207a5c84b1f3e6ef5513561673343c32cbee42d
|
a176626eb55b6525d5a41e2079537f2ef51d4dc7
|
/Uni/Projects/code/$Rsnips/generate_exposures.r
|
ba9067a1c82f96e6cea35b1ef2191c3dbad8db91
|
[] |
no_license
|
zeltak/org
|
82d696b30c7013e95262ad55f839998d0280b72b
|
d279a80198a1dbf7758c9dd56339e8a5b5555ff2
|
refs/heads/master
| 2021-01-21T04:27:34.752197
| 2016-04-16T04:27:57
| 2016-04-16T04:27:57
| 18,008,592
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,092
|
r
|
generate_exposures.r
|
# script to join geolocated BI lat/lon/dates with Itai's PM and temperature predictions
# started 3/24/2016
library(data.table)
library(pryr) # keep track of memory usage
library(ggmap) # to plot participant locations
#### participant info
load("data/FromHeather/BIdeID_2016-03-22.RData")
bideid <- BIdeID
rm(BIdeID)
# This file contains dob and PHI and sits w/ Heather
biid<-fread("data/FromHeather/deid2.csv")
bideid<-merge(bideid, biid[,.(ID,dob)],by='ID')
rm(biid)
#### PM exposures
# import the data.table of exposure series for each grid ID
# we use a relative path (relative to our R project/git repo)
pm <- readRDS("data/FromItai/pmmatrix_2016-03-18.rds")
# import table linking subjectID with gridID
sidlinkpm <- readRDS("data/FromItai/cases_aodguid_2016-03-18.rds")
# #### Temperature exposures
# # import the data.table of exposure series for each grid ID
# # we use a relative path (relative to our R project/git repo)
# tmp <- readRDS("data/FromItai/tmpmatrix_2016-03-18.rds")
# # import table linking subjectID with gridID
# sidlinktmp <- readRDS("data/FromItai/cases_tempguid_2016-03-18.rds")
# looking at PM estimates
class(pm)
dim(pm)
sapply(pm, class)
# fix some variable classes
pm[, GUID := as.integer(GUID)]
pm[, day := as.IDate(day)] # for data.table purposes
pm[1:2,]
# look at exposure link table
class(sidlinkpm)
dim(sidlinkpm)
sidlinkpm[1:2,]
# look at participant info table
bideid[1:2,]
sapply(bideid, class)
bideid[, LMP := as.IDate(LMP)] # for data.table merging purposes
bideid[, dob:=as.IDate(strptime(dob,"%m/%d/%Y"))]
dim(bideid)
# to generate exposure estimates, we need to join tables and extract date ranges
bideid <- merge(bideid, sidlinkpm[, .(ID, GUID)], by = "ID", all.x = T)
# first - are there kids not in the link table?
bideid[is.na(GUID), .N] # 64 kids
# where are these kids?
backgroundmap <- get_map("Boston, MA", zoom = 5)
ggmap(backgroundmap, extent = "normal", darken = c(0.5, "white")) +
geom_point(aes(x = X, y = Y), alpha = 1, data = bideid[is.na(GUID),]) +
theme_bw()
# they are outside of the exposure model region
# where are the 100 most frequent GUIDs (where are most people coming from)
ggmap(backgroundmap, extent = "normal", darken = c(0.5, "white")) +
geom_point(aes(x = X, y = Y), alpha = 1,
data = bideid[GUID %in% bideid[, .N, by = GUID][order(N, decreasing = T)][1:100, GUID]]) +
theme_bw()
# for kids with a GUID - make a long data.table with their daily exposure time series
# (395 days starting 60 days before LMP)
# computing range join with foverlaps
# since we need a start and end for both datasets, we say that each PM exposure ends on the nextday
pm[, nextday := day + 1]
# set the period we are interested in (395 days starting 60 days before LMP)
bideid[, start := LMP-59]
bideid[, end := LMP+335]
# key up both DTs
setkey(pm, GUID, day, nextday)
setkey(bideid, GUID, start, end)
# let's see how long this takes
ptm <- proc.time(); Sys.time() # took 12 minutes on Allan's iMac; 25 min on Heather's BI laptop
bipmlong <- foverlaps(pm[, .(GUID, day, nextday, pm25 = pm25_final)], bideid, by.x = c("GUID", "day", "nextday"), by.y = c("GUID", "start", "end"), type="any", nomatch = 0)
ptm <- proc.time() - ptm; paste(round(ptm[["elapsed"]]/60, 1), "minutes")
dim(bipmlong)
bipmlong
# are the NA only people who didn't have a GUID?
identical(bideid[is.na(GUID), ID], bipmlong[is.na(pm25), ID])
bipmlong[is.na(pm25), .N] # same 64 people
# we drop them here
bipmlong <- bipmlong[!is.na(pm25)]
# create summary variables (trimester specific averages and recent exposures)
# restrict to those who have an LMP more than 335 days before 2013-12-31 (to allow a dlm series that has a 60 day lag after birth)
setkey(bipmlong, ID)
setkey(bideid, ID)
bideid[LMP + 335 <= as.Date("2013-12-31"), .N] # 47971 babies
bideid[,range(LMP)] # All LMPs - 60 days are after start of PM model
#merge in dob from Heather's file
bipmlong[bideid[LMP + 335 <= as.Date("2013-12-31"), .(ID,dob)],dob:=dob]
bipmsummary <- bipmlong[bideid[LMP + 335 <= as.Date("2013-12-31"), .(ID)],
list(pmpreg = mean(.SD[day >= LMP & day <= dob, pm25]),
pmtri1 = mean(.SD[day >= LMP & day < LMP + 7*14, pm25], na.rm = T),
pmtri2 = mean(.SD[day >= LMP + 7*14 & day < LMP + 7*28 & day <= dob, pm25], na.rm = T),
pmtri3 = mean(.SD[day >= LMP + 7*28 & day <= dob, pm25], na.rm = T), # consider the days you did have
pmlast02days = mean(.SD[day >= dob - 2 & day <= dob, pm25]), #these variables need to be run by Heather with the real dob
pmlast07days = mean(.SD[day >= dob - 7 & day <= dob, pm25]),
pmlast14days = mean(.SD[day >= dob - 14 & day <= dob, pm25]),
pmlast28days = mean(.SD[day >= dob - 28 & day <= dob, pm25]),
dobdow = format(dob, "%a"),
LMP = LMP[1], edd = LMP[1] + GA_days[1]),by=ID]
dim(bipmsummary)
# Remove dob for sharing back to Allan/Margherita/Itai
bipmlong[,dob:=NULL]
names(bideid)
bideid[,dob:=NULL]
names(bipmlong)
names(bipmsummary)
# check for missingness
bipmsummary[is.na(pmtri3)]
problemid <- bipmsummary[is.na(pmtri3), ID]
setkey(bideid, ID)
bideid[.(problemid)]
# well some of these may not have a third trimester (delivered too soon)
# <7*28 = 196 days after LMP
bideid[.(problemid), summary(GA_days)]
bideid[.(problemid),][order(GA_days)]
# let's track down someone missing their pmtri3
bideid[.(problemid)][485]# ID 46537
bipmsummary[ID == 46570]
# drop one all missing row created by our summary somehow
bipmsummary <- bipmsummary[!is.na(ID)]
# #check one observation with separate code: ID 1000
# #We calculated thee summary using edd (but actual averages based on dob)
# bideid[ID == 1000]
# pm[GUID == 1202432 & day >= as.Date("2003-06-02") & day <= as.Date("2003-06-02") + 283.5, mean(pm25_final)]
# bipmsummary[ID == 1000]
# save out the derived exposure summary
write.csv(bipmsummary, file = paste0("data/bipmsummary_", Sys.Date(), ".csv"), row.names = F)
# generate the wide exposure dataset for DLMs
setkey(bipmlong, ID, day)
# subsetting to individuals within the model time period
# create a dayindex
# note that LMP will always be dayindex061
bipmlong[bideid[LMP + 335 <= as.Date("2013-12-31"), .(ID)], dayindex := paste0("dayindex", sprintf("%0.3i", 1:.N)), by = "ID"]
# save out the derived long exposure time series
write.csv(bipmlong, file = paste0("data/bipmlongdaily_", Sys.Date(), ".csv"), row.names = F)
bipmwide <- dcast.data.table(bipmlong[bideid[LMP + 335 <= as.Date("2013-12-31"), .(ID)], list(ID, dayindex, pm25)], ID ~ dayindex)
dim(bipmwide)
bipmwide[1:5,1:5,with=FALSE]
# drop the column of NA (not sure why it is there)
bipmwide[, "NA" := NULL]
# save this out
write.csv(bipmwide, paste0(file ="data/bipm_wide_396days_", Sys.Date(), ".csv"), row.names = F)
#cleanup
mem_used()
rm(i, ptm, backgroundmap, pm, problemid, sidlinkpm)
# end of file
|
16ce57e7311c2b8af96473dc248cab9127cbc8ab
|
5d9a9aeba6dd447f04709a03a1d1584f8328a6a8
|
/9_FinalCode.R
|
77a539a8caaaed629be12ffb4443371fc928fd64
|
[] |
no_license
|
edrake07/Machine-Learning-Project
|
bad70ddadc4ce1ea1f6889830763356eea308341
|
bbdf78aea8c9476b92e1f139336b805fbcdff64d
|
refs/heads/master
| 2022-04-26T19:58:09.727451
| 2020-04-14T16:41:18
| 2020-04-14T16:41:18
| 255,450,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,283
|
r
|
9_FinalCode.R
|
#### Final Project Code
# load packages
library(dplyr)
library(caret)
library(randomForest)
library(rattle)
library(parallel)
library(doParallel)
library(ggplot2)
# set seed for replicability
set.seed(1234567)
## Basic Data Load and Cleaning
# read in data from csv files
training_full <- read.csv("train.csv")
validation <- read.csv("test.csv")
## Initial cleaning for training data set
# get data headers for training
headers <- names(training_full)
data_types <- sapply(training_full, class)
# convert true numeric data from factor
factors <- grep("factor", data_types)
factors <- factors[-c(1,2,3,37)]
asNumeric <- function(x) as.numeric(as.character(x))
training_full <- modifyList(training_full, lapply(training_full[, factors],
asNumeric))
## Initial cleaning for validation data set
# get data headers for training
headers <- names(validation)
data_types <- sapply(validation, class)
# convert true numeric data from factor
factors <- grep("factor", data_types)
factors <- factors[-c(1,2,3,37)]
validation <- modifyList(validation, lapply(validation[, factors],
asNumeric))
## Initial Column Selection and Test/Train Data Creation
# identify the 8 caclucated features from data
feature_keys <- c("avg","var","stddev","max","min","amplitude","kurtosis","skewness")
toMatch <- paste0("^",feature_keys)
toMatch <- paste(toMatch, collapse = "|")
feature_names <- grep(toMatch, names(training_full), value = T)
feature_index <- grep(toMatch, names(training_full))
training_full_limited_cols <- training_full[,-c(feature_index)]
training_full_limited_cols <- training_full_limited_cols[,-c(1,3:7)]
# create testing/training samples within full testing data
inTraining <- createDataPartition(training_full_limited_cols$classe, p = .75, list=FALSE)
training <- training_full_limited_cols[inTraining,]
testing <- training_full_limited_cols[-inTraining,]
# save datasets for future use
saveRDS(training, file="training.RDS")
saveRDS(testing, file="testing.RDS")
saveRDS(validation, file="validation.RDS")
## Run Decision Tree and Random Forest Analysis
# configure training runs using various resampling methods
fitControl_none <- trainControl(method = "none",
allowParallel = TRUE)
fitControl_cv2 <- trainControl(method = "cv",
number = 2,
allowParallel = TRUE)
fitControl_cv5 <- trainControl(method = "cv",
number = 5,
allowParallel = TRUE)
fitControl_cv10 <- trainControl(method = "cv",
number = 10,
allowParallel = TRUE)
# configure parallel processing
cluster <- makeCluster(detectCores())
registerDoParallel(cluster)
# train tree models
start_tree_none <- Sys.time()
print(Sys.time())
modFit_tree_none <- train(classe ~ ., data=training, method="rpart", trControl=fitControl_none)
saveRDS(modFit_tree_none, "modFit_tree_none.RDS")
end_tree_none <- Sys.time()
print(Sys.time())
start_tree_cv2 <- Sys.time()
print(Sys.time())
modFit_tree_cv2 <- train(classe ~ ., data=training, method="rpart", trControl=fitControl_cv2)
saveRDS(modFit_tree_cv2, "modFit_tree_cv2.RDS")
end_tree_cv2 <- Sys.time()
print(Sys.time())
start_tree_cv5 <- Sys.time()
print(Sys.time())
modFit_tree_cv5 <- train(classe ~ ., data=training, method="rpart", trControl=fitControl_cv5)
saveRDS(modFit_tree_cv5, "modFit_tree_cv5.RDS")
end_tree_cv5 <- Sys.time()
print(Sys.time())
start_tree_cv10 <- Sys.time()
print(Sys.time())
modFit_tree_cv10 <- train(classe ~ ., data=training, method="rpart", trControl=fitControl_cv10)
saveRDS(modFit_tree_cv10, "modFit_tree_cv10.RDS")
end_tree_cv10 <- Sys.time()
print(Sys.time())
# train random forest models
start_rf_none <- Sys.time()
print(Sys.time())
modFit_rf_none <- train(classe ~ ., data=training, method="rf", trControl=fitControl_none)
saveRDS(modFit_rf_none, "modFit_rf_none.RDS")
end_rf_none <- Sys.time()
print(Sys.time())
start_rf_cv2 <- Sys.time()
print(Sys.time())
modFit_rf_cv2 <- train(classe ~ ., data=training, method="rf", trControl=fitControl_cv2)
saveRDS(modFit_rf_cv2, "modFit_rf_cv2.RDS")
end_rf_cv2 <- Sys.time()
print(Sys.time())
start_rf_cv5 <- Sys.time()
print(Sys.time())
modFit_rf_cv5 <- train(classe ~ ., data=training, method="rf", trControl=fitControl_cv5)
saveRDS(modFit_rf_cv5, "modFit_rf_cv5.RDS")
end_rf_cv5 <- Sys.time()
print(Sys.time())
start_rf_cv10 <- Sys.time()
print(Sys.time())
modFit_rf_cv10 <- train(classe ~ ., data=training, method="rf", trControl=fitControl_cv10)
saveRDS(modFit_rf_cv10, "modFit_rf_cv10.RDS")
end_rf_cv10 <- Sys.time()
print(Sys.time())
# de-register parallel processing cluster
stopCluster(cluster)
registerDoSEQ()
## Test model accuracy and predict
# predict with tree models
pred_tree_none <- predict(modFit_tree_none, testing)
pred_tree_cv2 <- predict(modFit_tree_cv2, testing)
pred_tree_cv5 <- predict(modFit_tree_cv5, testing)
pred_tree_cv10 <- predict(modFit_tree_cv10, testing)
# predict with random forest models
pred_rf_none <- predict(modFit_rf_none, testing)
pred_rf_cv2 <- predict(modFit_rf_cv2, testing)
pred_rf_cv5 <- predict(modFit_rf_cv5, testing)
pred_rf_cv10 <- predict(modFit_rf_cv10, testing)
# cacluate prediction accuracy on testing data set
accuracy_tree_none <- sum(pred_tree_none == testing$classe) / nrow(testing)
accuracy_tree_cv2 <- sum(pred_tree_cv2 == testing$classe) / nrow(testing)
accuracy_tree_cv5 <- sum(pred_tree_cv5 == testing$classe) / nrow(testing)
accuracy_tree_cv10 <- sum(pred_tree_cv10 == testing$classe) / nrow(testing)
accuracy_rf_none <- sum(pred_rf_none == testing$classe) / nrow(testing)
accuracy_rf_cv2 <- sum(pred_rf_cv2 == testing$classe) / nrow(testing)
accuracy_rf_cv5 <- sum(pred_rf_cv5 == testing$classe) / nrow(testing)
accuracy_rf_cv10 <- sum(pred_rf_cv10 == testing$classe) / nrow(testing)
accuracy_nums <- c(accuracy_tree_none, accuracy_tree_cv2, accuracy_tree_cv5, accuracy_tree_cv10,
accuracy_rf_none, accuracy_rf_cv2, accuracy_rf_cv5, accuracy_rf_cv10)
run_names <- c("tree_none", "tree_cv2", "tree_cv5", "tree_cv10", "rf_none", "rf_cv2", "rf_cv5", "rf_cv10")
accuracy <- as.data.frame(cbind(run_names, accuracy_nums))
accuracy$model <- c("tree", "tree", "tree", "tree", "rf", "rf", "rf", "rf")
names(accuracy) <- c("run", "accuracy", "model")
accuracy$accuracy <- as.numeric(as.character(accuracy$accuracy))
#g <- ggplot(data=accuracy, aes(x=run, y=accuracy, fill=model)) + geom_bar(stat="identity") + geom_text(aes(label=round(accuracy,4)), angle = 90, position = position_stack(vjust = 0.5))
#g + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + scale_x_discrete(limits=c(run_names)) + scale_y_continuous(labels = percent)
# Final Prediction Using Validation Data Set
# select relevant columns of the validation data set
final_cols <- names(training)
strMatch <- final_cols
strMatch <- paste0("^", strMatch, "$", collapse="|")
final_cols_index <- grep(strMatch, names(validation))
validation_final <- validation[,final_cols_index]
# perform prediction on the validation data set
pred_final <- predict(modFit_rf_cv5, validation_final)
|
1caca0e2cba041456155841927c886ca850fd10a
|
86d44314862fc8f10e0c8d590ef54089b932b69c
|
/packrat/lib/x86_64-apple-darwin19.4.0/4.0.4/effectsize/doc/convert.R
|
119b7e3f30f6f4553e8b2d7dff69a49cb5c24ea4
|
[] |
no_license
|
marilotte/Pregancy_Relapse_Count_Simulation
|
6f50670198174e01705ad3bf107cabad03c81b23
|
c4ccd9419cf1e63520caddd1e6c89f42013b24a2
|
refs/heads/main
| 2023-04-06T07:20:39.965543
| 2021-03-31T21:18:29
| 2021-03-31T21:18:29
| 349,435,570
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,250
|
r
|
convert.R
|
## ----message=FALSE, warning=FALSE, include=FALSE------------------------------
library(knitr)
options(knitr.kable.NA = "")
knitr::opts_chunk$set(comment = ">")
options(digits = 3)
pkgs <- c("effectsize", "ggplot2", "correlation", "parameters", "bayestestR")
if (!all(sapply(pkgs, require, quietly = TRUE, character.only = TRUE))) {
knitr::opts_chunk$set(eval = FALSE)
}
## -----------------------------------------------------------------------------
set.seed(1)
data <- bayestestR::simulate_difference(
n = 10,
d = 0.2,
names = c("Group", "Outcome")
)
## ---- echo=FALSE--------------------------------------------------------------
print(data, digits = 3)
## -----------------------------------------------------------------------------
cohens_d(Outcome ~ Group, data = data)
## ---- warning=FALSE-----------------------------------------------------------
correlation::correlation(data)[2, ]
## -----------------------------------------------------------------------------
d_to_r(-0.31)
## -----------------------------------------------------------------------------
fit <- lm(mpg ~ am + hp, data = mtcars)
parameters::model_parameters(fit)
# A couple of ways to get partial-d:
5.28 / sigma(fit)
t_to_d(4.89, df_error = 29)[[1]]
## -----------------------------------------------------------------------------
t_to_r(4.89, df_error = 29)
correlation::correlation(mtcars[, c("mpg", "am", "hp")], partial = TRUE)[1, ]
# all close to:
d_to_r(1.81)
## -----------------------------------------------------------------------------
# 1. Set a threshold
thresh <- 0
# 2. dichotomize the outcome
data$Outcome_binom <- data$Outcome < thresh
# 3. Fit a logistic regression:
fit <- glm(Outcome_binom ~ Group,
data = data,
family = binomial()
)
parameters::model_parameters(fit)
# Convert log(OR) (the coefficient) to d
oddsratio_to_d(-0.81, log = TRUE)
## -----------------------------------------------------------------------------
OR <- 3.5
baserate <- 0.85
oddsratio_to_riskratio(OR, baserate)
## -----------------------------------------------------------------------------
OR <- 3.5
baserate <- 0.04
oddsratio_to_riskratio(OR, baserate)
|
badd94ee5ff1ed5a6ff011b8206bad1c6e48b864
|
552d910aabda4755d6d831e1063e503015037603
|
/R/net.edges.R
|
9d305c6d0daa2b04310ef0ad5576556e60faf607
|
[] |
no_license
|
cran/JGL
|
da88bdca245751169d574e27c0fd846b70594c4c
|
bca5ba36a8070f7b391bbd7a9d69c5de6e9d6007
|
refs/heads/master
| 2021-05-16T03:11:20.349474
| 2018-11-30T22:40:15
| 2018-11-30T22:40:15
| 17,680,067
| 3
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 256
|
r
|
net.edges.R
|
net.edges <-
function(theta)
{
adj = make.adj.matrix(theta,separate=TRUE)
K = length(theta)
edges = list()
for(k in 1:K)
{
diag(adj[[k]])=0
gadj = graph.adjacency(adj[[k]],mode="upper")
edges[[k]] = E(gadj)
}
return(edges)
}
|
44f6811fca8b95bc63e64358e438c7ab8bf8648d
|
4b89672c6ff0667897a3fea3819980b7868d64f7
|
/man/dot.Rd
|
c5627052d382e238bb940f75e80491180adc695d
|
[] |
no_license
|
cran/RWeka
|
04abd774e061bb54dcb2dd5269d4cd48ebdb8d4f
|
cadb071065d055b127d265c1eae16c0f6c217a96
|
refs/heads/master
| 2023-03-15T23:22:07.324627
| 2023-03-07T13:18:59
| 2023-03-07T13:18:59
| 17,693,014
| 5
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,297
|
rd
|
dot.Rd
|
\name{dot}
\alias{write_to_dot}
\alias{write_to_dot.Weka_classifier}
\title{Create DOT Representations}
\description{
Write a DOT language representation of an object for processing via
Graphviz.
}
\usage{
write_to_dot(x, con = stdout(), \dots)
\method{write_to_dot}{Weka_classifier}(x, con = stdout(), \dots)
}
\arguments{
\item{x}{an \R object.}
\item{con}{a \link{connection} for writing the representation to.}
\item{\dots}{additional arguments to be passed from or to methods.}
}
\details{
Graphviz (\url{https://www.graphviz.org}) is open source graph
visualization software providing several main graph layout programs,
of which \code{dot} makes \dQuote{hierarchical} or layered drawings of
directed graphs, and hence is typically most suitable for visualizing
classification trees.
Using \code{dot}, the representation in file \file{foo.dot} can be
transformed to PostScript or other displayable graphical formats using
(a variant of) \code{dot -Tps foo.dot >foo.ps}.
Some Weka classifiers (e.g., tree learners such as J48 and M5P)
implement a \dQuote{Drawable} interface providing DOT representations
of the fitted models. For such classifiers, the \code{write_to_dot}
method writes the representation to the specified connection.
}
\keyword{graphs}
|
36ab6c478cb66f8b39aa08787672152957731947
|
2fe4c16e0377a99e198ab04d5c378ca247ae4329
|
/Rscript/Library/mzkit_app/pipeline/mrm_quantify.R
|
9a1889e3a8d26d1d452e8d63b23f6654e7d5ece7
|
[
"MIT"
] |
permissive
|
xieguigang/mzkit
|
1964d28b0fad5f6d44950fdccdd4a70877f75c29
|
6391304b550f7e4b8bb6097a6fb1c0d3b6785ef1
|
refs/heads/master
| 2023-08-31T06:51:55.354166
| 2023-08-30T08:56:32
| 2023-08-30T08:56:32
| 86,005,665
| 37
| 11
|
MIT
| 2023-03-14T14:18:44
| 2017-03-23T23:03:07
|
Visual Basic .NET
|
UTF-8
|
R
| false
| false
| 15,341
|
r
|
mrm_quantify.R
|
#' title: MRM quantification
#' author: xieguigang <gg.xie@bionovogene.com>
#'
#' description: do LC-MS/MS targeted metabolomics quantitative analysis
#' base on the MRM ion pairs data. This script will take a set of
#' *.mzML raw data files, and then create linear models based on the
#' linear reference raw data files, do sample quantitative evaluation
#' and QC assertion if the QC file is exists in your sample files.
require(mzkit);
# imports mzkit library modules
imports ["Linears", "MRMLinear", "visualPlots"] from "mzkit.quantify";
imports "assembly" from "mzkit";
# includes external helper script
imports "plot_ionRaws.R";
#region "pipeline script configuration"
# config of the standard curve data files
[@info "The folder path of the reference lines. you can set the reference name pattern via '--patternOfRef' parameter for matched the raw data files in this folder."]
[@type "folder, *.mzML"]
let wiff as string = ?"--Cal" || stop("No standard curve data provides!");
[@info "The folder path of the sample data files."]
[@type "folder, *.mzML"]
let sample as string = ?"--data" || stop("No sample data files provided!");
[@info "MRM ion information xlsx table file. This table file must contains the linear reference content data of each targeted metabolite for create linear reference models."]
[@type "*.xlsx"]
let MRM.info as string = ?"--MRM" || stop("Missing MRM information table file!");
# use external MSL data file if there is no
# ion pair data in the MRM table file.
[@info "The *.MSL ion file for specific the MRM ion pairs data if there is no ion pair data in the MRM table."]
[@type "*.MSL"]
let ions as string = ?"--ions";
[@info "The *.MSL ion file for specific the MRM ion pairs data if the ions data in sample is different with reference samples(mainly address of the RT shift problems.)."]
[@type "*.MSL"]
let ions2 as string = ?"--ions-sample";
[@info "folder location for save quantification result output."]
[@type "folder"]
let dir as string = ?"--export" || `${wiff :> trim([" ", "/"])}-result/`;
print("reference lines:");
print(wiff);
print("sample data:");
print(sample);
print("result will be export to folder location:");
print(dir);
# The regexp pattern of the file name for match
# the reference point data.
[@info "the regexp expression pattern for match of the reference lines raw data file."]
[@type "regexp"]
const patternOf.ref as string = ?"--patternOfRef" || '[-]?LM[-]?\d+';
[@info "the regexp expression pattern for match of the QC sample raw data files."]
[@type "regexp"]
const patternOf.QC as string = ?"--patternOfQC" || "QC[-]?\d+";
[@info "the regexp expression pattern for match of the blank sample raw data files."]
[@type "regexp"]
const patternOf.Blank as string = ?"--patternOfBLK" || "BLK(\s*\(\d+\))?";
[@info "Do linear fitting of the given ions in different parameters?"]
const individualFit as boolean = ?"--individual-fit";
# let Methods as integer = {
# NetPeakSum = 0;
# Integrator = 1;
# SumAll = 2;
# MaxPeakHeight = 3;
# }
# peak area intergration calculation method
# these api functions that required of the integrator parameter
#
# 1. sample.quantify
# 2. wiff.scans
# 3. MRM.peaks
# 4. extract.peakROI
#
[@info "the peak area integrator algorithm name."]
[@type "term"]
let integrator as string = ?"--integrator" || "NetPeakSum";
[@info "Create of the linear reference in work curve mode?"]
let isWorkCurve as boolean = ?"--workMode";
[@info "the window size for match the RT value in MSL ion data with the RT value that detected by the peak in samples. The data unit of this parameter should be in 'second', not 'minute'."]
[@type "time window in seconds"]
let rt_winSize as double = as.numeric(?"--rt.winsize" || 5);
[@info "The m/z tolerance value for match the MRM ion pair in format of mzkit tolerance syntax. Value of this mass tolerance can be da:xxx (delta mass) or ppm:xxx (ppm precision)."]
[@type "mzError"]
let tolerance as string = ?"--mz.diff" || "ppm:15";
[@info "the time range of a peak, this parameter is consist with two number for speicifc the upper bound and lower bound of the peak width which is represented with RT dimension."]
[@type "doublerange"]
let peakwidth as string = ?"--peakwidth" || "8,30";
[@info "the threshold value for determine that a detected peak is noise data or not. ZERO or negative value means not measure s/n cutoff."]
let sn_threshold as double = ?"--sn_threshold" || "3";
# Max number of points for removes in
# linear modelling
#
# + negative value for auto detects: n.points / 2 - 1
# + ZERO for no points is removed
# + positive value for specific a number for the deletion.
[@info "Max number of reference points for removes in linear modelling. The default value '-1' means auto detects."]
let maxNumOfPoint.delets as integer = ?"--max.deletes" || -1;
[@info "The angle threshold for detect a peak via the calculation of sin(x)."]
let angle.threshold as double = ?"--angle.threshold" || 8;
[@info "quantile threshold value for detected baseline noise in the peak finding."]
let baseline.quantile as double = ?"--baseline.quantile" || 0.5;
#end region
if (isWorkCurve) {
print("Linear Modelling will running in work curve mode!");
}
print("View parameter configurations:");
print("RT window size:");
print(rt_winSize);
print("m/z tolerance for find MRM ion:");
print(tolerance);
print("Integrator that we used for calculate the Peak Area:");
print(integrator);
print("Max number of points that allowes removes automatically in the process of linear modelling:");
print("peak width range(unit in second):");
print(peakwidth);
print("signal/noise ratio threshold is:");
print(sn_threshold);
if (maxNumOfPoint.delets < 0) {
print("It's depends on the number of reference sample");
} else {
if (maxNumOfPoint.delets == 0) {
print("Is not allowed for removes any points!");
} else {
print(`Removes less than ${maxNumOfPoint.delets} bad reference points.`);
}
}
print(`MRM ion peak is populated from raw data with angle threshold ${angle.threshold}.`);
print(`All of the data ticks that its intensity value less than ${baseline.quantile} quantile level will be treated as background noise`);
let reference = NULL;
let is = NULL;
# read MRM, standard curve and IS information from the given file
if (file.exists(ions)) {
# ion paires data from MSL file
[reference, is] = MRM.info |> [
read.reference("coordinates"),
read.IS("IS")
];
print("Use external msl data as ion pairs.");
# the time unit is minute by default
# required convert to second by
# specific that the time unit is Minute
# at here
ions = mzkit::ionPairsFromMsl(ions, unit = "Minute");
} else {
# ion pairs data from the MRM data table file.
# read data from a data sheet which is named ``ion pairs``.
[ions, reference, is] = MRM.info :> [
read.ion_pairs("ion pairs"),
read.reference("coordinates"),
read.IS("IS")
];
}
# print debug message
print("View reference standard levels data:");
print(reference);
print("Internal standards:");
if (length(is) == 0) {
print("No internal standards...");
} else {
print(is);
}
print("Ion pairs for each required metabolites:");
print(ions);
print("Previews of the isomerism ion pairs:");
print(ions :> isomerism.ion_pairs);
print(`The reference data raw files will be matches by name pattern: [${patternOf.ref}]`);
wiff <- list(samples = sample, reference = wiff)
# :> wiff.rawfiles("[-]?LM[-]?\d+")
:> wiff.rawfiles(patternOf.ref, patternOfBlank = patternOf.Blank)
:> as.object
;
print("Reference standards:");
print(basename(wiff$standards));
print("Sample data files:");
print(basename(wiff$samples));
let blanks <- NULL;
let QC_samples = basename(wiff$samples) like regexp(patternOf.QC);
if (sum(QC_samples) > 0) {
print(`Find ${sum(QC_samples)} in raw data:`);
print(basename(wiff$samples[QC_samples]));
}
const args = MRM.arguments(
tolerance = tolerance,
timeWindowSize = rt_winSize,
angleThreshold = angle.threshold,
baselineQuantile = baseline.quantile,
peakAreaMethod = integrator,
TPAFactors = NULL,
peakwidth = peakwidth,
sn_threshold = sn_threshold
);
if (wiff$hasBlankControls) {
print(`There are ${length(wiff$blanks)} blank controls in wiff raw data!`);
print(wiff$blanks);
blanks = wiff$blanks :> wiff.scans(
ions = ions,
peakAreaMethod = integrator,
TPAFactors = NULL,
tolerance = tolerance,
timeWindowSize = rt_winSize,
removesWiffName = TRUE,
angleThreshold = angle.threshold,
baselineQuantile = baseline.quantile,
peakwidth = peakwidth,
sn_threshold = sn_threshold
);
} else {
print("Target reference data have no blank controls.");
}
#' Create linear models
#'
#' @param wiff_standards A file path collection of ``*.mzML`` files, which should be the reference points.
#' @param subdir A directory name for save the result table
#'
const linears.standard_curve as function(wiff_standards, subdir) {
const rt.shifts = wiff_standards :> MRM.rt_alignments(ions, args);
print("Previews of the rt shifts summary in your sample reference points:");
rt.shifts
|> as.data.frame
|> print
;
rt.shifts
|> as.data.frame
|> write.csv(file = `${dir}/${subdir}/rt_shifts.csv`)
;
# Get raw scan data for given ions
const CAL <- wiff_standards
# list.files(wiff, pattern = "*.mzML")
:> wiff.scan2(
ions = ions,
removesWiffName = TRUE,
rtshifts = NULL, # rt.shifts
args = args
);
const ref <- linears(
rawScan = CAL,
calibrates = reference,
ISvector = is,
autoWeighted = TRUE,
blankControls = blanks,
maxDeletions = maxNumOfPoint.delets,
isWorkCurveMode = isWorkCurve,
args = args
);
CAL :> write.csv(file = `${dir}/${subdir}/referencePoints(peakarea).csv`);
for(line in ref) {
if (line :> as.object :> do.call("isValid")) {
line :> printModel(subdir);
}
}
# save linear models summary
ref
|> lines.table
|> write.csv(file = `${dir}/${subdir}/linears.csv`)
;
for(mzML in wiff_standards) {
const filepath <- `${dir}/${subdir}/peaktables/${basename(mzML)}.csv`;
const peaks <- MRM.peak2(mzML = mzML, ions = ions, args = args);
# save peaktable for given rawfile
write.csv(peaks, file = filepath);
}
ref;
}
#' print model summary and then do standard curve plot
#'
#' @param line the linear fitting object model from the reference dataset
#' @param subdir the sub directory name for save the linear modelling
#' visualization and data table file.
#'
const printModel as function(line, subdir) {
# get compound id name
const id as string = line
|> as.object
|> do.call("name");
# view summary result
print(line);
bitmap(file = `${dir}/${subdir}/standard_curves/${id}.png`) {
line
|> standard_curve(title = `Standard Curve Of ${id}`)
;
}
# save reference points
line
|> points(nameRef = id)
|> write.points(file = `${dir}/${subdir}/standard_curves/${id}.csv`)
;
}
#' Run linear quantification
#'
#' @param wiff_standards a list of filepath of the reference standards.
#' @param ions the ion pairs dataset
#' @param subdir the sub directory name for save the linear modelling
#' visualization and data table file.
#'
const doLinears as function(wiff_standards, ref, ions, subdir = "") {
let scans = [];
let ref_raws = ions
# get ion chromatograms raw data for
# TIC data plots
|> getIonsSampleRaw(wiff_standards, tolerance)
|> lapply(ion => ion$chromatograms)
;
# calculate standards points as well for quality controls
# and result data verification
const sample.files = wiff$samples << wiff_standards;
# Write raw scan data of the user sample data
sample.files
# list.files(wiff, pattern = "*.mzML")
|> wiff.scans(
ions = ions,
peakAreaMethod = integrator,
TPAFactors = NULL,
tolerance = tolerance,
removesWiffName = TRUE,
timeWindowSize = rt_winSize,
angleThreshold = angle.threshold,
baselineQuantile = baseline.quantile,
peakwidth = peakwidth,
sn_threshold = sn_threshold
)
|> write.csv(file = `${dir}/${subdir}/samples.csv`)
;
# create ion quantify result for each metabolites
# that defined in ion pairs data
for(sample.mzML in sample.files) {
const peakfile as string = `${dir}/${subdir}/samples_peaktable/${basename(sample.mzML)}.csv`;
const result = ref |> sample.quantify(
sample.mzML, ions,
peakAreaMethod = integrator,
tolerance = tolerance,
timeWindowSize = rt_winSize,
TPAFactors = NULL,
angleThreshold = angle.threshold,
baselineQuantile = baseline.quantile,
peakwidth = peakwidth,
sn_threshold = sn_threshold
);
print(basename(sample.mzML));
# QuantifyScan
result
|> as.object
|> do.call("ionPeaks")
|> write.ionPeaks(file = peakfile);
scans <- scans << result;
}
print("Sample raw files that we scans:");
print(length(scans));
# save the MRM quantify result
# base on the linear fitting
result(scans) :> write.csv(file = `${dir}/${subdir}/quantify.csv`);
scans.X(scans) :> write.csv(file = `${dir}/${subdir}/rawX.csv`);
print("Creating linear model report....");
# save linear regression html report
ref
|> report.dataset(scans, ionsRaw = ref_raws)
|> html
|> writeLines(con = `${dir}/${subdir}/index.html`)
;
if (sum(QC_samples) > 0) {
print("Creating QC report....");
ref
|> report.dataset(scans, QC_dataset = patternOf.QC)
|> html
|> writeLines(con = `${dir}/${subdir}/QC.html`)
;
} else {
print("QC report will not created due to the reason of no QC samples...");
}
}
if (wiff$numberOfStandardReference > 1) {
# test for multiple standard curves
const groups = wiff$GetLinearGroups() |> as.list;
print("We get linear groups:");
print(groups);
for(linear_groupKey in names(groups)) {
print(`Run linear profiles for '${linear_groupKey}'`);
print(groups[[linear_groupKey]]);
# create linear reference data
const ref = linears.standard_curve(groups[[linear_groupKey]], subdir);
const samples_MSL = (
if (file.exists(ions2)) {
print("Use external msl data as ion pairs for the sample data!");
# the time unit is minute by default
# required convert to second by
# specific that the time unit is Minute
# at here
mzkit::ionPairsFromMsl(ions2, unit = "Minute");
} else {
ions;
}
);
# doLinears as function(wiff_standards, ref, ions, subdir = "") {
groups[[linear_groupKey]]
|> doLinears(
ref = ref,
ions = samples_MSL,
subdir = linear_groupKey
);
}
} else {
const ref = linears.standard_curve(wiff$standards, "");
const samples_MSL = (
if (file.exists(ions2)) {
print("Use external msl data as ion pairs for the sample data!");
# the time unit is minute by default
# required convert to second by
# specific that the time unit is Minute
# at here
mzkit::ionPairsFromMsl(ions2, unit = "Minute");
} else {
ions;
}
);
print("run LC-MS/MS mrm quantification for single data group!");
wiff$standards :> doLinears(
ref = ref,
ions = samples_MSL,
subdir = ""
);
}
print("MRM quantify [JOB DONE!]");
|
b8ac10af17a5024e54ba28d18b40156df05391c9
|
994555fa87be0fd25686cf1faa17f0675f6b3a97
|
/plot1.R
|
9e46f3763f50d3dfb0a6535d283fb7fe5735cac2
|
[] |
no_license
|
hcschneider30/ExData_Plotting1
|
261989352d441646a71bcfd818784c2c4b67d7c7
|
34bab8ae515ad236bb83069afc64b6dfcf60d277
|
refs/heads/master
| 2020-12-11T04:19:00.585114
| 2014-05-10T08:22:38
| 2014-05-10T08:22:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 934
|
r
|
plot1.R
|
# read data.frame from a ';' seperated file
# define column classes
# NA symbol in this data file is '?'
df <- read.table("household_power_consumption.txt",
sep=";",
header=TRUE,
colClasses = c("character", "character", "numeric", "numeric",
"numeric", "numeric", "numeric", "numeric", "numeric"),
na.strings = "?")
# get the date strings into the R Date class
df$Date <- as.Date(df$Date, format = "%d/%m/%Y")
# extract only the data for the two days required
df2 <- df[df$Date %in% as.Date(c('2007-02-01', '2007-02-02')),]
# set the png device for the plot
png('plot1.png', width = 480, height = 480, units = 'px')
# plot the histogram
hist(df2$Global_active_power, col='red', breaks=12, main='Global Active Power',
xlab='Global Active Power (kilowatts)', ylab='Frequency')
# don't forget to close the device
dev.off()
|
a3b77b3ce8b6805dbfec90817e785509bb74b539
|
946ee68030584a153c39c6c99c5101b8d2cae2dc
|
/ModelConstruction/FAO56_recycle.R
|
99e35cc653411a716b60d70e3d258bb73cfbe381
|
[] |
no_license
|
dsidavis/GreenWater
|
613a645fe52619185477659a0c4c8af208692f98
|
0be339d86a29af676a08e148ccfd94f61d454f75
|
refs/heads/master
| 2021-07-18T02:07:02.672488
| 2017-10-26T20:32:50
| 2017-10-26T20:32:50
| 107,836,376
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,996
|
r
|
FAO56_recycle.R
|
#alternative approach using means for different periods of the Kc curve. This was used in the FAO56 spreadsheet program.
#calculate U2 and minRH by year for mid to late period for each cell of interest; this is used by Kcb function. Should just use average of years for 2017
U2_mid <- function(U2.df, col_index, Jmid, Jlate) {
U2_temp <- U2.df[which(U2.df$DOY >= Jmid & U2.df$DOY <= Jlate), ]
result <- as.data.frame(tapply(U2_temp[,col_index], U2_temp$year, mean)) #or could do all cells via this arg to replace col_index <- 6:ncol(U2_temp)
colnames(result) <- colnames(U2.df)[col_index]
return(result) #rownames are years, search by which(rownames(result)=='year of interest')
}
#check function
U2_mid_allyrs <- U2_mid(U2.df, 6, almond_parameters$Jmid, almond_parameters$Jlate)
RHmin_mid <- function(RHmin, col_index, Jmid, Jlate) {
RHmin_temp <- RHmin[which(RHmin$DOY >= Jmid & RHmin$DOY <= Jlate), ]
result <- as.data.frame(tapply(RHmin_temp[,col_index], RHmin_temp$year, mean))
colnames(result) <- colnames(RHmin)[col_index]
return(result)
}
RHmin_mid_allyrs <- RHmin_mid(RHmin.df, 6, almond_parameters$Jmid, almond_parameters$Jlate)
U2_end <- function(U2.df, col_index, Jlate, Jharv) {
U2_temp <- U2.df[which(U2.df$DOY >= Jlate & U2.df$DOY <= Jharv), ]
result <- as.data.frame(tapply(U2_temp[,col_index], U2_temp$year, mean)) #or could do all cells via this arg to replace col_index <- 6:ncol(U2_temp)
colnames(result) <- colnames(U2.df)[col_index]
return(result) #rownames are years, search by which(rownames(result)=='year of interest')
}
#check function
U2_end_allyrs <- U2_end(U2.df, 6, almond_parameters$Jlate, almond_parameters$Jharv)
RHmin_end <- function(RHmin.df, col_index, Jlate, Jharv) {
RHmin_temp <- RHmin.df[which(RHmin.df$DOY >= Jlate & RHmin.df$DOY <= Jharv), ]
result <- as.data.frame(tapply(RHmin_temp[,col_index], RHmin_temp$year, mean))
colnames(result) <- colnames(RHmin.df)[col_index]
return(result)
}
RHmin_end_allyrs <- RHmin_mid(RHmin.df, 6, almond_parameters$Jlate, almond_parameters$Jharv)
Kcb_mid <- function(Kcb_mid_std, U2_summary, RHmin_summary, h_mid, yr) {#equation 5 from Allen et al. 2005;
U2_mid_mean <- U2_summary[which(rownames(U2_summary)==yr),]
RHmin_mid_mean <- RHmin_summary[which(rownames(RHmin_summary)==yr),]
Kcb_mid_std + (0.04*(U2_mid_mean-2)-0.004*(RHmin_mid_mean-45))*(h_mid/3)^0.3
}
#test the function
Kcb_mid(almond_parameters$Kcb_mid, U2_mid_allyrs, RHmin_mid_allyrs, almond_parameters$height, 2004)
Kcb_mid(almond_parameters$Kcb_mid, U2_mid_allyrs, RHmin_mid_allyrs, almond_parameters$height, 2016)
Kcb_end <- function(Kcb_end_std, U2_summary, RHmin_summary, h_end, yr) {#equation 5 from Allen et al. 2005
U2_end_mean <- U2_summary[which(rownames(U2_summary)==yr),]
RHmin_end_mean <- RHmin_summary[which(rownames(RHmin_summary)==yr),]
Kcb_end_std + (0.04*(U2_end_mean-2)-0.004*(RHmin_end_mean-45))*(h_end/3)^0.3
}
Kcb_end(almond_parameters$Kcb_end, U2_end_allyrs, RHmin_end_allyrs, almond_parameters$height, 2004)
Kcb_end(almond_parameters$Kcb_end, U2_end_allyrs, RHmin_end_allyrs, almond_parameters$height, 2016)
#trial results functions and experimenting with tapply and aggregate
IrDates <- function(df, irr.n, df.output) { #only works for irr.n=5
years <- (min(df$year)+1):(max(df$year)-1) #could add if statment to handle partial years
for (i in 1:length(years)) {
df.temp <- df[which(df$year==years[i]), ]
j <- which(df.temp$Ir > 0)
if (length(j) >= irr.n) {
if (years[i] > min(df$year)+1) {
Ir.dates.add <- data.frame(Irr.1=df.temp$dates[j[1]], Irr.2=df.temp$dates[j[2]], Irr.3=df.temp$dates[j[3]], Irr.4=df.temp$dates[j[4]], Irr.5=df.temp$dates[j[5]], Irr.Last=df.temp$dates[j[length(j)]])
#Ir.dates.add$year <- years[i]
Ir.dates <- rbind(Ir.dates, Ir.dates.add)
next
} else {
Ir.dates <- data.frame(Irr.1=df.temp$dates[j[1]], Irr.2=df.temp$dates[j[2]], Irr.3=df.temp$dates[j[3]], Irr.4=df.temp$dates[j[4]], Irr.5=df.temp$dates[j[5]], Irr.Last=df.temp$dates[j[length(j)]])
#Ir.dates$year <- years[i]
next
}
} else {
stop(print('There is a problem with the IrDates function. Cannot handle a water year 2004-2016 with less than 5 irrigations'))
}
}
col.start <- which(colnames(df.output)=='Irr.1')
col.end <- which(colnames(df.output)=='Irr.Last')
df.output[which(df.output$unique_model_code==model.code), col.start:col.end] <- Ir.dates
#print(class(Ir.dates$Irr.1))
return(df.output)
#print(Ir.dates)
}
IrrigationTimes <- function(x) {
if(length(which(x > 0))==0) {
return(NA)
} else {
print(names(x))
return(which(x > 0))
}
}
testfunction <- function(x) {
print(x['dates'])
}
IrrigationDates <- function(x) {
if(length(which(x[, 'Ir'] > 0))==0) {
return(NA)
}
else if (length(which(x[, 'Ir'] > 0)) >= 5) {
return(x[ , 'dates'][which(x[, 'Ir']> 0)[1:5]])
} else {
return(x[ , 'dates'][which(x[, 'Ir'] >0)])
}
}
|
384203baacc886120e671e42b2e05e7bb8d2642c
|
64a1e9180b778b767c49666c353e57063d96b3cb
|
/Script_Aunts.R
|
cc31bf6d255a02407645cc683e3fb80800081453
|
[] |
no_license
|
JuanPablo-RamirezLoza/Wolf-Kinship-Demography
|
4f9336f56a48794f103410584792cbc296e157af
|
bba6ab478a665b4b0031b1884a7914fd2033111b
|
refs/heads/main
| 2023-04-27T10:49:39.235088
| 2021-05-13T08:55:16
| 2021-05-13T08:55:16
| 362,403,593
| 0
| 0
| null | 2021-04-28T09:06:58
| 2021-04-28T09:01:55
| null |
UTF-8
|
R
| false
| false
| 18,588
|
r
|
Script_Aunts.R
|
# kinship relationships at equilibrium
# Packages used to build the sub-models
library(NetLogoR)
library(testthat)
library(kinship2)
library(SciViews)
library(DescTools)
load(file="popSim.Rdata")
# all indiv ever lived in the pop
# calculate relatedness for all indiv alive at the end, and for females alive only
#withParentID <- NLwith(agents = popSim[[(nYearSim + 1)]], var = "motherID", val = 0:1000000) # extract individuals with parent ID
#FemwithParentID <- NLwith(agents = withParentID, var = "sex", val = "F") # females with parent ID
#allRelatedness <- relatedness(listAllInd = popSim, whoInd = of(agents = withParentID, var = "who"))
#FemRelatedness <- relatedness(listAllInd = popSim, whoInd = of(agents = FemwithParentID, var = "who"))
#dim(allRelatedness)
#dim(FemRelatedness)
##############################################################################
# Kinship matrix for mothers ################################################
##############################################################################
# For individuals ever alive in the pop (dead or not at last time step)
dataall <- unique(do.call(rbind, popSim) ) # all indiv ever lived in population
allwithparents <- dataall[!is.na(dataall$motherID),] #se data for individuals with known parents only
#### What does the comma do?
#### comma => take all columns (index of lines to keep before comma, index of columns after comma). Here we want to keep all columns, just the line without NA for motherID
# for now, keep females only, and female-related kin only
AllFem <- allwithparents[allwithparents$sex=="F",] # all females with known mother
# ID of all mothers and grandmothers ever lived in pop
IDMothers <- unique(AllFem[,'motherID'] ) # mothers of at least one female
IDGrandMothers <- unique( AllFem[which(AllFem$who %in% IDMothers),'motherID'] )# grand mother of at least one female
# sisters (same mother ID)
siblings <- tapply(X=AllFem$who,INDEX=AllFem$motherID) # associate per brood based on motherID
sistersdup <- split(x=AllFem$who,f=siblings) # list per brood, with duplicates
sisters <- lapply(sistersdup,unique) # list per brood without duplicates
# For individuals alive at last time step
datalast <- as.matrix(popSim[[26]])
datalastF <- datalast[ (datalast$sex=="F" & !is.na(datalast$motherID) ) ,] # keep only females with known mother
#ego IDs
datalastF$who # ID of females alive with known mothers the last time step
nego <- nrow(datalastF) # 90 here
# Mothers alive at last time step
IDMothersAlive <- datalastF$who[datalastF$who %in% IDMothers]
# Ages of mothers alive at last time step
AgeMothersAlive <- datalastF$age[datalastF$who %in% IDMothers]
# Grand Mothers alive at last time step
IDGrandMothersAlive <- datalastF$who[datalastF$who %in% IDGrandMothers]
# Age of Grand Mothers alive at last time step
AgeGrandMothersAlive <- datalastF$age[datalastF$who %in% IDGrandMothers]
# sisters at last time step
siblingsAlive <- tapply(X=datalastF$who,INDEX=datalastF$motherID) # all females arranged in a list in same component with their sisters
sistersdupAlive <- split(x=datalastF$who,f=siblingsAlive)
sistersAlive <- lapply(sistersdupAlive,unique)
# check this is correct
# for sisters #1940 and #2783 should be 12 and 7 yo
datalastF[datalastF$who=='1940',]
datalastF[datalastF$who=='2783',]
table(datalastF$age)
ages<- 1:11
# LOOP FOR PR MOTHER ALIVE
nbMAalive <- kinship_MA <- matrix(NA, nrow=max(ages),ncol=max(ages))
nbGMAalive <- kinship_GMA <- matrix(NA, nrow=max(ages),ncol=max(ages))
n_ego_agei <- n_MA <- n_GMA <-Pr_MA_egoagei <- Pr_GMA_egoagei <-NULL
mums <- list()
for(i in ages){ #loop over ego age
ego_agei <- datalastF[datalastF$age==i,] # ego age i alive at last time step
n_ego_agei[i] <-nrow(ego_agei) # nb of ego age i
if(n_ego_agei[i]==0){
nbMAalive[i,] <- rep(NA,length(ages))
nbGMAalive[i,] <-rep(NA,length(ages))
}else if(n_ego_agei[i]>0){
# for mothers
Mothers.ID <- ego_agei$motherID # mothers ID of ego age i
Mothers.Alive.ID <- ego_agei$motherID[which(Mothers.ID %in% IDMothersAlive)] # alive mothers ID of ego age i
mums[[i]] <- Mothers.Alive.ID
n_MA[i] <- length(Mothers.Alive.ID) # nb of ego age i with mother alive at last time step
Pr_MA_egoagei[i] <- n_MA[i]/ n_ego_agei[i] # proba mother alive for ego age i
# for grand mothers
GM.ID <- AllFem$motherID[match(Mothers.ID, AllFem$who )] # check the ID of the Grand Mother
GM.Alive.ID <- GM.ID[GM.ID %in% IDGrandMothersAlive]
n_GMA[i] <- length(GM.Alive.ID) # nb of ego age i with mother alive at last time step
Pr_GMA_egoagei[i] <- n_GMA[i]/ n_ego_agei[i] # proba mother alive for ego age i
# age of mother AND GRAND MOTHER if alive at last time step
Mage <- datalastF$age[match(Mothers.Alive.ID, datalastF$who )] # match ID of mothers alive at last time step in datalast (which includes all females alive at last time step)
GMage <- datalastF$age[match(GM.Alive.ID, datalastF$who )] # match ID of mothers alive at last time step in datalast (which includes all females alive at last time step)
for(j in ages){ # loop over mothers ages
# FOR MOTHERS
nbMAalive[i,j] <- sum(Mage==j)
kinship_MA[i,j] <- sum(Mage==j) / n_ego_agei[i] # kinship matrix Pr of mother alive
# FOR GRAND MOTHERS
nbGMAalive[i,j] <- sum(GMage==j)
kinship_GMA[i,j] <- sum(GMage==j) / n_ego_agei[i] # kinship matrix Pr of mother alive
} # end loop over mother age
} # end if loop
} # end loop over ego age
Res_summary <- data.frame(ego.age=ages,
nb.ego=n_ego_agei, # number of ego aged i at last time step
nb.ego.MA=n_MA, # number of ego aged i with mother alive at last time step
Pr_MA =Pr_MA_egoagei, # proba mother alive for ego age i
nb.ego.GMA=n_GMA, # number of ego aged i with mother alive at last time step
Pr_GMA =Pr_GMA_egoagei) # proba mother alive for ego age i
round(Res_summary,2) # summary per ego age
kinship_MA # kinship matrix for mothers
kinship_GMA # kinship matrix for grand mothers
##############################################################################################
### LOOP FOR SISTERS ####
##############################################################################################
AvgSisAlive <- matrix(NA, nrow=max(ages),ncol=max(ages))
n_ego_agei <-NULL
for(i in ages){ #loop over ego age
ego_agei <- datalastF[datalastF$age==i,] # ego age i alive at last time step
n_ego_agei[i] <-nrow(ego_agei) # nb of ego age i
if(n_ego_agei[i]==0){
AvgSisAlive[i,] <- rep(NA,length(ages))
}else if(n_ego_agei[i]>0){
Mothers.ID.ego <- ego_agei$motherID # mothers ID of ego age i
for(j in ages){ # loop over sisters ages
sis_agej <- datalastF[datalastF$age==j] #individuals of age j alive at last time step
if (j==i){ #same cohort sisters
siblingsAlivei <- tapply(X=sis_agej$who,INDEX=sis_agej$motherID)
sistersdupAlivei <- split(x=sis_agej$who,f=siblingsAlivei)
sistersAlivei <- lapply(sistersdupAlivei,unique)
AvgSisAlive[i,j]<-sum(lengths(sistersAlivei)*(lengths(sistersAlivei)-1))/sum(lengths(sistersAlivei))
} else if (j!=i){ #sisters from different cohorts
Mothers.ID.sis <- sis_agej$motherID
AvgSisAlive[i,j]<- sum(table(Mothers.ID.ego[Mothers.ID.ego %in% Mothers.ID.sis]))/n_ego_agei[i]
} # close "if" sisters from different cohorts
} #close loop over sisters ages
} #close "if" there are more than zero ego age i
} #close loop over age i
AvgSisAlive # kinship matrix for sisters
#########################################################################################
### LOOP FOR CHILDREN & GRANDCHILDREN #####################################
#########################################################################################
nbChAlive <- kinship_Ch <- matrix(NA, nrow=max(ages),ncol=max(ages))
nbGChAlive <- kinship_GCh <- matrix(NA, nrow=max(ages),ncol=max(ages))
n_ego_agei <- n_Ch <- n_Gch <-Avg_Ch_egoagei <- Avg_GCh_egoagei <-NULL
children <- list()
for(i in ages){ #loop over ego age
ego_agei <- datalastF[datalastF$age==i,] # ego age i alive at last time step
n_ego_agei[i] <-nrow(ego_agei) # nb of ego age i
if(n_ego_agei[i]==0){
nbChAlive[i,] <- rep(NA,length(ages))
nbGChAlive[i,] <- rep(NA,length(ages))
}else if(n_ego_agei[i]>0){
# for Children
Children.ID.i <- datalastF$who[which(datalastF$motherID %in% ego_agei$who)] # ID of children of ego age i
n_Ch[i] <- length(Children.ID.i) # nb of children of ego age i alive at last time step
Avg_Ch_egoagei[i] <- n_Ch[i]/ n_ego_agei[i] # average number of children of ego age i
# for grand children
DaughtersDAi <- unique(AllFem$who[which(AllFem$motherID %in% ego_agei$who)]) #dead or alive daughters of ego age i
GChildren.ID.i <- datalastF$who[which(datalastF$motherID %in% DaughtersDAi)] #ID of alive grandchildren of ego age i
n_Gch[i] <- length(GChildren.ID.i)
Avg_GCh_egoagei[i] <- n_Gch[i]/ n_ego_agei[i] # average number of grandchildren of ego age i
# age of alive children and grandchildren at last time step
Chage <- datalastF$age[match(Children.ID.i, datalastF$who )] #match ID of children with datalastF to get their age
GChage <- datalastF$age[match(GChildren.ID.i, datalastF$who )] # match ID of grandchildren with datalastF to get their age
for(j in ages){ # loop over children and grandchildren ages
# FOR MOTHERS
nbChAlive[i,j] <- sum(Chage==j)
kinship_Ch[i,j] <- sum(Chage==j) / n_ego_agei[i] # kinship matrix for expected number of children
# FOR GRAND MOTHERS
nbGChAlive[i,j] <- sum(GChage==j)
kinship_GCh[i,j] <- sum(GChage==j) / n_ego_agei[i] # kinship matrix for expected number of grandchildren
} # end loop over children & grand children age
} # end if loop
} # end loop over ego age
Res_summary.desc <- data.frame(ego.age=ages,
nb.ego=n_ego_agei, # number of ego aged i at last time step
nb.Ch=n_Ch, # number of children of ego age i
Avg_Ch =Avg_Ch_egoagei, # expected number of children for ego age i
nb.GCh=n_Gch, # number of grandchildren of ego age i
Avg_GCh =Avg_GCh_egoagei) # proba mother alive for ego age i
round(Res_summary.desc,2) # summary per ego age
kinship_Ch # kinship matrix for children
kinship_GCh # kinship matrix for grandchildren
#########################################################################################
### LOOP FOR AUNTS #######################################################
#########################################################################################
#Aunts
kinship_Aunts <- matrix(NA, nrow=max(ages),ncol=max(ages))
n_ego_agei <- n_Aunts <-NULL
for(i in ages){ #loop over ego age
ego_agei <- datalastF[datalastF$age==i,] # ego age i alive at last time step
n_ego_agei[i] <-nrow(ego_agei) # nb of ego age i
if(n_ego_agei[i]==0){
kinship_Aunts[i,] <- rep(NA,length(ages))
}else if(n_ego_agei[i]>0){
# find mothers' & grandmothers' IDs
Mothers.ID.ego <- ego_agei$motherID # mothers ID of ego age i
# GM.ID <- AllFem$motherID[match(Mothers.ID, AllFem$who )] # all GM (dead and alive) of ego age i alive at last time step
for(j in ages){ # loop over sisters ages
sis_agej <- datalastF[datalastF$age==j] #individuals of age j alive at last time step
if (j==i){ #same cohort sisters
siblingsAlivei <- tapply(X=sis_agej$who,INDEX=sis_agej$motherID)
sistersdupAlivei <- split(x=sis_agej$who,f=siblingsAlivei)
sistersAlivei <- lapply(sistersdupAlivei,unique)
AvgSisAlive[i,j]<-sum(lengths(sistersAlivei)*(lengths(sistersAlivei)-1))/sum(lengths(sistersAlivei))
} else if (j!=i){ #sisters from different cohorts
Mothers.ID.sis <- sis_agej$motherID
AvgSisAlive[i,j]<- sum(table(Mothers.ID.ego[Mothers.ID.ego %in% Mothers.ID.sis]))/n_ego_agei[i]
#find grandmother's daughters
# AllPosAuntsi <- AllFem[which(AllFem$motherID %in% GM.ID),] #all possible aunts (i.e. daughters of grandmother: either mother or aunt of ego)
# PosAuntsAlivei <- datalastF[which(AllPosAuntsi$who %in% datalastF$who)] #all possible aunts alive at last time step
#potential aunts split by their mother ID
# AuntsAlivei <- tapply(X=PosAuntsAlivei$who,INDEX=PosAuntsAlivei$motherID)
# AuntsdupAlivei <- split(x=PosAuntsAlivei$who,f=AuntsAlivei)
# AuntsAlivei <- lapply(AuntsdupAlivei,unique)
}
}
###AUNTS
Egoage1 <- dataF[dataF$age==1,]
negoage1 <-nrow(Egoage1)
Egoage1.MID <- Egoage1[!is.na(Egoage1$motherID),]
IDMothersDeadOrAlive <- unique(dataall[which(dataall$who %in% Egoage1.MID$motherID),'who'])
IDGMDeadOrAlive <- unique(dataall[which(dataall$who %in% IDMothersDeadOrAlive),'motherID'])
AllAunts <- dataF[which(dataF$motherID %in% IDGMDeadOrAlive)] #All alive daughters of ego's grandmother, including her mother
#Split Aunts by their mother ID
GroupAunts <- tapply(X=AllAunts$who,INDEX=AllAunts$motherID)
AuntsGrouped <- split(x=AllAunts$who,f=GroupAunts)
AuntsGroupedF <- lapply(AuntsGrouped,unique)
#After we group AllAunts$who by their mother ID, to get the number of aunts we need to subtract 1 from each group of the list
# EXCEPT if the group includes more than one mother, in which case they are all aunts.
#They have to be mothers to an alive individual of the focal age class, and they have to be alive themselves.
MothersAlive.egoage1 <- dataF[which(dataF$who %in% Egoage1$motherID),]
#Tried with the loop below but couldn't get it to work.
n <- c(rep(NA,length(AuntsGroupedF)))
for (i in 1:length(AuntsGroupedF)){
if (length(which(MothersAlive.egoage1$who %in% AuntsGroupedF[[i]]) > 1)) {
n[i] <- length(AuntsGroupedF[[1]])
} else if (length(IDMothersDeadOrAlive[[which(IDMothersDeadOrAlive %in% AuntsGroupedF[[i]])]]) == 1){
n[i] <- length(AuntsGroupedF[[1]])-1
}
}
# This loop would only give us the expected number of aunts for individuals of each class.
# We would still need to find a way to split the expected number by age of the aunts too.
# This is tricky because after splitting them by mother ID we are left only with the ID of the aunts,
# but we don't keep their age.
# If we split them by age first, and then by mother ID, for each individual in a group of sisters of a given age,
# we also need to check if their sisters of other ages have had children.
# I haven't figured out how to do this, since the split function
# gives the groups new numbers starting from one, it doesn't keep track of mother ID.
#Split by age
Aunts1 <-AllAunts[AllAunts$age==1,]
Aunts2 <-AllAunts[AllAunts$age==2,]
Aunts3 <-AllAunts[AllAunts$age==3,]
Aunts4 <-AllAunts[AllAunts$age==4,]
Aunts5 <-AllAunts[AllAunts$age==5,]
Aunts6 <-AllAunts[AllAunts$age==6,]
Aunts7 <-AllAunts[AllAunts$age==7,]
Aunts8 <-AllAunts[AllAunts$age==8,]
Aunts9 <-AllAunts[AllAunts$age==9,]
Aunts10 <-AllAunts[AllAunts$age==10,]
Aunts11 <-AllAunts[AllAunts$age==11,]
Aunts12 <-AllAunts[AllAunts$age==12,]
Aunts13 <-AllAunts[AllAunts$age==13,]
Aunts14 <-AllAunts[AllAunts$age==14,]
#Split Aunts by their age and mother ID
GroupAunts1 <- tapply(X=Aunts1$who,INDEX=Aunts1$motherID)
AuntsGrouped1 <- split(x=Aunts1$who,f=GroupAunts1)
AuntsGroupedF1 <- lapply(AuntsGrouped1,unique)
GroupAunts2 <- tapply(X=Aunts2$who,INDEX=Aunts2$motherID)
AuntsGrouped2 <- split(x=Aunts2$who,f=GroupAunts2)
AuntsGroupedF2 <- lapply(AuntsGrouped2,unique)
GroupAunts3 <- tapply(X=Aunts3$who,INDEX=Aunts3$motherID)
AuntsGrouped3 <- split(x=Aunts3$who,f=GroupAunts3)
AuntsGroupedF3 <- lapply(AuntsGrouped3,unique)
GroupAunts4 <- tapply(X=Aunts4$who,INDEX=Aunts4$motherID)
AuntsGrouped4 <- split(x=Aunts4$who,f=GroupAunts4)
AuntsGroupedF4 <- lapply(AuntsGrouped4,unique)
GroupAunts5 <- tapply(X=Aunts5$who,INDEX=Aunts5$motherID)
AuntsGrouped5 <- split(x=Aunts5$who,f=GroupAunts5)
AuntsGroupedF5 <- lapply(AuntsGrouped5,unique)
GroupAunts6 <- tapply(X=Aunts6$who,INDEX=Aunts6$motherID)
AuntsGrouped6 <- split(x=Aunts6$who,f=GroupAunts6)
AuntsGroupedF6 <- lapply(AuntsGrouped6,unique)
GroupAunts7 <- tapply(X=Aunts7$who,INDEX=Aunts7$motherID)
AuntsGrouped7 <- split(x=Aunts7$who,f=GroupAunts7)
AuntsGroupedF7 <- lapply(AuntsGrouped7,unique)
GroupAunts8 <- tapply(X=Aunts8$who,INDEX=Aunts8$motherID)
AuntsGrouped8 <- split(x=Aunts8$who,f=GroupAunts8)
AuntsGroupedF8 <- lapply(AuntsGrouped8,unique)
GroupAunts9 <- tapply(X=Aunts9$who,INDEX=Aunts9$motherID)
AuntsGrouped9 <- split(x=Aunts9$who,f=GroupAunts9)
AuntsGroupedF9 <- lapply(AuntsGrouped9,unique)
GroupAunts10 <- tapply(X=Aunts10$who,INDEX=Aunts10$motherID)
AuntsGrouped10 <- split(x=Aunts10$who,f=GroupAunts10)
AuntsGroupedF10 <- lapply(AuntsGrouped10,unique)
GroupAunts11 <- tapply(X=Aunts11$who,INDEX=Aunts11$motherID)
AuntsGrouped11 <- split(x=Aunts11$who,f=GroupAunts11)
AuntsGroupedF11 <- lapply(AuntsGrouped11,unique)
GroupAunts12 <- tapply(X=Aunts12$who,INDEX=Aunts12$motherID)
AuntsGrouped12 <- split(x=Aunts12$who,f=GroupAunts12)
AuntsGroupedF12 <- lapply(AuntsGrouped12,unique)
GroupAunts13 <- tapply(X=Aunts13$who,INDEX=Aunts13$motherID)
AuntsGrouped13 <- split(x=Aunts13$who,f=GroupAunts13)
AuntsGroupedF13 <- lapply(AuntsGrouped13,unique)
GroupAunts14 <- tapply(X=Aunts14$who,INDEX=Aunts14$motherID)
AuntsGrouped14 <- split(x=Aunts14$who,f=GroupAunts14)
AuntsGroupedF14 <- lapply(AuntsGrouped14,unique)
mothersAunts1<-c(Aunts1$motherID)
mothersAunts2<-c(Aunts2$motherID)
mothersAunts3<-c(Aunts3$motherID)
mothersAunts4<-c(Aunts4$motherID)
mothersAunts5<-c(Aunts5$motherID)
mothersAunts6<-c(Aunts6$motherID)
mothersAunts7<-c(Aunts7$motherID)
mothersAunts8<-c(Aunts8$motherID)
mothersAunts9<-c(Aunts9$motherID)
mothersAunts10<-c(Aunts10$motherID)
mothersAunts11<-c(Aunts11$motherID)
mothersAunts12<-c(Aunts12$motherID)
mothersAunts13<-c(Aunts13$motherID)
mothersAunts14<-c(Aunts14$motherID)
#########################################################################################
### KINSHIP MATRIX FOR COUSINS #####################################################
#########################################################################################
|
cb17a7a4110dd4b66c85dcae8dec66d39e84a785
|
2a747f76dca45608ddfea0348230e30555bb865c
|
/ese/WS1920/demo.R
|
38a847e21d2311a3a3c5b5a82cad9223074e2d6c
|
[] |
no_license
|
feigensp/lehre
|
a8efffa20be7f3b93378c7a8c51ab0102b81b903
|
8fbc86e2dcce32b427a56c6d3ffbfc34da97bd2a
|
refs/heads/master
| 2020-08-03T18:42:47.228666
| 2020-01-20T08:13:04
| 2020-01-20T08:13:04
| 211,848,563
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,145
|
r
|
demo.R
|
#Read file
input <- read.csv("C:/../rt.csv", sep=";", dec=",")
rt <- input[,'time..task']
#Minimum/Maximum
minValue <- min(rt)
maxValue <- max(rt)
#Arithmetic mean
meanValue <- mean(rt)
medianValue <- median(rt)
#Histogram
rtNum <- as.numeric(unlist(rt))
hist(rtNum)
#Boxplots
boxplot(rtNum)
#Violinplots; install and load package
install.packages("vioplot")
library(vioplot)
#Violin plot anzeigen
vioplot(rtNum)
#write plots to pdf
#open pdf-device
pdf("plots.pdf")
#create plot
boxplot(data)
#close pdf-device (file might not be readable otherwise)
dev.off()
#Select all rows, in which the programming language is Haskell; from this, select column with the resonse time
rt1 <- subset(rt,pl=='haskell')[,'time..task']
rt2 <- subset(rt,pl=='Java')[,'time..task']
#T-Test for independent samples
t.test(rt1, rt2)
#Shapiro-Wilk Test for normal distribution
shapiro.test(rt)
#Mann-Whitney-U test (independent samples)
wilcox.test(rt1,rt2,paired=FALSE)
#correlation
rtTask2 <- input[,'time2']
plot(rt,rtTask2)
cor.test(rt,rtTask2, method="pearson")
cor.test(rt,rtTask2, method="spearman")
#more at: http://rtutorialseries.blogspot.de/
|
1a0505e8d8953b966a6df5e7fb43529afdcf7121
|
589bf3773cd27c8d5f022747a7344596e7bd8067
|
/R/convert_dates.R
|
637b65177f4380a433b924a268d96e6d2895cb2f
|
[] |
no_license
|
MikkoVihtakari/MarineDatabase
|
974b67991ee2636028f5060e93eae875bd79e800
|
73879eecbedb92e126ce469504b8b18f635ff952
|
refs/heads/master
| 2022-11-06T18:17:31.395171
| 2020-06-29T14:23:09
| 2020-06-29T14:23:09
| 112,598,897
| 1
| 1
| null | 2017-12-08T09:22:11
| 2017-11-30T10:36:22
|
R
|
UTF-8
|
R
| false
| false
| 6,870
|
r
|
convert_dates.R
|
#' @title Convert Excel dates to consistent date formatting
#' @description Converts messy nonstandarized Excel dates to consistent date formatting.
#' @param dt Data.frame for which the date conversions should be made. The date column has to be named as \code{date}.
#' @param date_col Not implemented yet. Name of the column, which contains dates to be converted.
#' @param excel_file path to the Excel file where the dates originate from. Can be left empty, if the date conversion should be done for other type of files (for example .csv or .txt).
#' @param file_ext Extension of the data file. Can be left empty, if \code{dt} originates from another type file than Excel sheet.
#' @param add_time Hours to be added to the ISO 8601 \code{date}s. See Details.
#' @param date_origin The origin for recorded dates in the Excel sheet in "YYYY-MM-DD" format. See Details.
#' @param output_format Character string specifying in which format the date information should be returned. Options: \code{"iso8601"} (default) returns the date column as a character string in ISO 8601 standard, \code{"POSIXct"} returns the column in \link[as.POSIXct]{UTC time format}, and \code{"as.Date"} returns the date column \link[as.Date]{in Date format ignoring hours, minutes and seconds}.
#' @return Returns a data.frame equal to \code{dt} with \code{date_col} as character representing ISO 8601 dates.
#' @details Large (biological) datasets are often recorded on Excel sheets with the file going around several computers using different operating systems and locales. This often leads to dates being recorded in multiple formats from text strings, to various Excel date formats and numeric date codes for which the origin date may vary. This function attempts to fix such inconsistensies in date formats and returns the dates as a character column representing ISO 8601 dates. The function is still experimental and due to the many ways of recording dates in Excel, the outcome might differ from the desired outcome. Please check each date returned by the function and report any inconsistencies so that the function can be improved.
#'
#' The \code{add_time} argument can be used to add or subtract hours from the output, if the times do not match with those in the Excel sheet. This can be helpful if your locale or operating system causes an offset between recorded dates.
#'
#' The function also works for other types of messy dates than those recorded in Excel sheets.
#' @import openxlsx
#' @author Mikko Vihtakari
#' @export
# Test parameters
# dt = tmp; excel_file = data_file; file_ext = file_ext; output_format = output_format; add_time = 0; date_origin = "1899-12-30"
convert_dates <- function(dt, excel_file = NULL, file_ext = NULL, add_time = 0, date_origin = "1899-12-30", output_format = "iso8601") {
if(!is.null(excel_file)) file_ext <- MarineDatabase::select(strsplit(excel_file, "\\."), 2)
if(is.numeric(dt$date) & file_ext %in% c("xlsx", "xls")) {
dt$temp_date <- openxlsx::convertToDateTime(dt$date, tz = "UTC")
dt$temp_date <- dt$temp_date + add_time*3600
dt$date <- strftime(as.POSIXct(dt$temp_date, "UTC"), "%Y-%m-%dT%H:%M:%S%z", tz = "UTC")
#message(paste("Date converted to ISO 8601 format. Stored as", class(dt$date), "assuming", openxlsx::getDateOrigin(excel_file), "as origin date. Control that dates match with the Excel sheet. You can use add_time to adjust if there is offset."))
} else {
if(is.numeric(dt$date)) {
dt$temp_date <- as.POSIXct(as.numeric(dt$date) * (60*60*24), tz = "UTC", origin = date_origin)
dt$temp_date <- dt$temp_date + add_time*3600
dt$date <- strftime(as.POSIXct(dt$temp_date, "UTC"), "%Y-%m-%dT%H:%M:%S%z", tz = "UTC")
#message(paste("Date converted to ISO 8601 format. Stored as", class(dt$date), "class assuming", date_origin, "as origin date. Control that dates match with the Excel sheet. You can use add_time to adjust if there is offset."))
} else {
if(class(dt$date) == "Date") {
dt$temp_date <- dt$date + add_time*3600
dt$date <- strftime(as.POSIXct(dt$temp_date, "UTC"), "%Y-%m-%dT%H:%M:%S%z", tz = "UTC")
#message(paste("Date converted to ISO 8601 format. Stored as", class(dt$date), "class assuming", date_origin, "as origin date. Control that dates match with the Excel sheet. You can use add_time to adjust if there is offset."))
} else {
## If date is character (meaning there are typos), try to fix them
if(class(dt$date) == "character") {
temp_date <- suppressWarnings(is.na(as.numeric(dt$date)))
if(any(temp_date)) {
temp_date <- lapply(dt$date, function(k) {
if(grepl("UTC", k)) {
out <- strptime(k, format = "%Y-%m-%d %H:%M", tz = "UTC")
out <- out + add_time*3600
} else {
out <- strptime(k, format = "%d.%m.%Y %H:%M", tz = "UTC")
out <- out + add_time*3600
}
if(is.na(out) & grepl("\\.", k)) {
out <- strptime(k, format = "%d.%m.%Y", tz = "UTC")
out <- out + add_time*3600
}
if(is.na(out) & grepl("\\-", k)) {
out <- strptime(k, format = "%Y-%m-%d", tz = "UTC")
out <- out + add_time*3600
}
if(is.na(out)) { #last save
out <- as.POSIXct(as.numeric(k) * (60*60*24), tz = "UTC", origin = date_origin)
out <- out + add_time*3600
}
strftime(as.POSIXct(out, "UTC"), "%Y-%m-%dT%H:%M:%S%z", tz = "UTC")
})
temp_date <- unlist(temp_date)
if(any(is.na(temp_date))) {
warning("Typo in date format for records ", paste(unique(dt$date[is.na(temp_date)]), collapse = ", "), " on rows ", paste(which(is.na(temp_date)), collapse = ", "), ". NAs produced.")
dt$date <- temp_date
} else {
dt$date <- temp_date
#message(paste("Date converted to ISO 8601 format. Stored as", class(dt$date), "class. Control that dates match with the Excel sheet. You can use add_time to adjust if there is offset."))
}} else {
dt$temp_date <- as.POSIXct(as.numeric(dt$date) * (60*60*24), tz = "UTC", origin = date_origin)
dt$temp_date <- dt$temp_date + add_time*3600
dt$date <- strftime(as.POSIXct(dt$temp_date, "UTC"), "%Y-%m-%dT%H:%M:%S%z", tz = "UTC")
#message(paste("Date converted to ISO 8601 format. Stored as", class(dt$date), "class assuming", date_origin, "as origin date. Control that dates match with the Excel sheet. You can use add_time to adjust if there is offset."))
}
} else {
stop("Implement new date conversion. Does not work for these data.")
}}}}
dt <- dt[!names(dt) %in% "temp_date"]
dt$date <- switch(output_format,
iso8601 = dt$date,
POSIXct = as.POSIXct(dt$date, "UTC"),
as.Date = as.Date(dt$date),
stop("Output date format is not implemented."))
return(dt)
}
|
8f2f0a690bc06a2e225eab4ba64aee0616d29a47
|
a7c4723087d16f75add07061726058060008c74e
|
/filepath.r
|
cb977a41bad24d32102604873f8f87cca89223c7
|
[] |
no_license
|
c-zhong/TriageBehaviorGraphAnalysis
|
29c37aff35af2b9518c53e384c0c1188d58f6f4f
|
25a492e5ab6253e1559946835f4a9f03255ba60c
|
refs/heads/master
| 2021-05-28T21:01:06.651241
| 2015-02-25T19:16:21
| 2015-02-25T19:16:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,752
|
r
|
filepath.r
|
#get the whole path of a subject
getFilename <-function(subID){
filename <- "output//"
result <- paste(filename,subID,sep="")
result <- paste(result,".csv",sep="")
return(result)
}
getNodeFilename <-function(subID){
filename <- "data//"
result <- paste(filename,subID,sep="")
result <- paste(result,".csv",sep="")
return(result)
}
getEdgeFilename_Undirected_Unweighted <-function(subID){
filename <- "output1//"
result <- paste(filename,subID,sep="")
result <- paste(result,".csv",sep="")
return(result)
}
#full directed edge with edgeweight
# A is equal to B (A <----> B)
# A is comp with B (A<---->B)
# A is subed by B (A --> B)
# A corresponds to B ( A <--> B)
getEdgeFilename_Definition0<-function(subID){
filename <- "output_definition0//"
result <- paste(filename,subID,sep="")
result <- paste(result,".csv",sep="")
return(result)
}
#Definition1 is the same as definition0
getEdgeFilename_Definition1<-function(subID){
filename <- "output_definition0//"
result <- paste(filename,subID,sep="")
result <- paste(result,".csv",sep="")
return(result)
}
#Definition2 is the same as definition0
getEdgeFilename_Definition2<-function(subID){
filename <- "output_definition0//"
result <- paste(filename,subID,sep="")
result <- paste(result,".csv",sep="")
return(result)
}
getPlotPath<-function(dir, title){
result <- paste(dir, title, sep="//")
result <-paste(result, ".jpg", sep="")
return (result)
}
getMyPath<-function(dir, subdir, title){
result <- paste(dir, subdir, sep="//")
result <- paste(result, title, sep="//")
result <-paste(result, ".jpg", sep="")
return (result)
}
getPlotPath_plot<-function(id){
result <- paste("plot", id, sep ="//")
return (result)
}
|
8bd3af430bbd8869d153bdd886338c615158678d
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Letombe/Abduction/aim-50-3_4-yes1-4-50/aim-50-3_4-yes1-4-50.R
|
88063bbf65b85db4d03c5aa209b0298b12575040
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 69
|
r
|
aim-50-3_4-yes1-4-50.R
|
2762f1584c7536654c135a7099cf4d4c aim-50-3_4-yes1-4-50.qdimacs 320 754
|
5c27b70b9bd445d9088757b79ffff75bb7563951
|
7164d4515036f3ebce26f3a0b7f0b3031683aaa1
|
/man/table.contvar.Rd
|
5d8859cc4e568bd83f18c8d3fe3e12c2df3d7cb4
|
[] |
no_license
|
kcha193/simarioV2
|
0a96193cf09f32acea2287d76687a9e1ee2eb237
|
66c7bfbb3dfd3dbd7d9b95d7d9b84632e6aa5aca
|
refs/heads/master
| 2023-03-15T14:34:46.222056
| 2023-02-26T01:59:06
| 2023-02-26T01:59:06
| 57,259,491
| 5
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 834
|
rd
|
table.contvar.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulate.r
\name{table.contvar}
\alias{table.contvar}
\title{Display a vector of continuous values in a table using the
breaks supplied.
Attachs a meta attribute with varname}
\usage{
table.contvar(x, breaks, varname)
}
\arguments{
\item{x}{vector of continous values}
\item{breaks}{a numeric vector of two or more cut points
NB: note that the cut point value is not included in the bin
(ie: include.lowest = FALSE)
Therefore the very first cut point must be less than min(x)}
\item{varname}{added as a tag on the meta attribute}
}
\value{
a table (proportions) with names specified by breaks
}
\description{
Display a vector of continuous values in a table using the
breaks supplied.
Attachs a meta attribute with varname
}
|
6e9ce3c0d35ab0ec9b5b4306478f2cd409619422
|
7a6be82ecc13a6deafa51b207915b08336486d91
|
/gganimate.R
|
4dd4c585f87eb62385d2d9e0c8668faec184499c
|
[] |
no_license
|
anerigarg/MSc_data
|
e60ab5293b382a3280b7688bddddbb399bd578d3
|
37b52f80204d8a02d605b3380003e2fc40cee6ab
|
refs/heads/master
| 2023-01-24T11:16:27.387328
| 2020-12-08T19:37:44
| 2020-12-08T19:37:44
| 285,697,250
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,288
|
r
|
gganimate.R
|
# Trophic Group playing with animation ------------------------------------
install.packages("gganimate")
library(gganimate)
install.packages("hrbrthemes")
library(hrbrthemes)
library(ggplot2)
ARD_func_sum3 <- ARD_3 %>%
mutate(treatment = factor(treatment, levels = c("0%", "30%", "50%", "70%", "100%", "control"))) %>%
filter(time_block == "start") %>%
group_by(treatment,complexity, visit, MajorTrophicGroup) %>%
summarize(density.mean = mean(density), density.sd = sd(density))
troph_anim <- ARD_func_sum3 %>%
ggplot(aes(x = visit,
y = density.mean,
group = MajorTrophicGroup,
colour = MajorTrophicGroup))+
geom_line(size = 1.5,
alpha = 0.8)+
geom_point(size = 2)+
scale_color_viridis(discrete = TRUE, name = "Trophic Group") +
ggtitle("Trophic Group Density over Time") +
facet_grid(complexity~treatment) +
ylab("mean fish density(fish/0.79m^2") +
# theme_ipsum_pub()+
theme(
axis.title = element_text(size = 11),
axis.text = element_text(size = 11),
legend.title = element_text(size = 11),
legend.text = element_text(size = 11),
strip.text = element_text(size = 11)
) +
transition_reveal(visit)
animate(troph_anim, height = 780, width = 1135)
anim_save("troph_anim.gif", animation = last_animation())
print(troph_anim)
anim_save("trophic_test.gif", animation = last_animation(), path = NULL)
# anim_save("trophic_test.gif", anim)
ARD_func_sum3 %>%
ggplot(aes(x = visit,
y = density.mean,
group = MajorTrophicGroup,
colour = MajorTrophicGroup))+
geom_line(size = 1.5,
alpha = 0.8)+
geom_point(size = 2)+
scale_color_viridis(discrete = TRUE, name = "Trophic Group") +
ggtitle("Trophic Group Density over Time") +
facet_grid(complexity~treatment) +
ylab("mean fish density(fish/0.79m^2") +
# theme_ipsum_pub()+
theme(
axis.title = element_text(size = 11),
axis.text = element_text(size = 11),
legend.title = element_text(size = 11),
legend.text = element_text(size = 11),
strip.text = element_text(size = 11)
) +
transition_reveal(visit)
anim_save("trophic_test.gif", animation = last_animation(), path = NULL)
|
b664ced9677928e2545867a08803b7d3e8031610
|
89f8118060345bd9ed48d42f75bfcf9ba5c7be46
|
/r_scripts/mysql_read.R
|
956ea0d521310c091faadd4b6967bdbed305d020
|
[] |
no_license
|
DerrickStuckey/gwu-cloud-workshop
|
bcbd52f2a784d5d08751949467c41457acbc6fe7
|
a64cc4f5b0e509c6fa2143f64a0db269fce5e714
|
refs/heads/master
| 2021-01-10T18:04:54.551639
| 2020-11-20T18:10:50
| 2020-11-20T18:10:50
| 44,842,127
| 1
| 8
| null | 2019-09-28T00:32:30
| 2015-10-23T22:37:57
|
Python
|
UTF-8
|
R
| false
| false
| 862
|
r
|
mysql_read.R
|
## Read some data from an AWS MySQL Instance ##
library(RMySQL)
mysql_host <- "gwu-workshop-mysql.cegeieiv8hqw.us-west-2.rds.amazonaws.com"
mysql_user <- "derrick"
mysql_pass <- ""
mysql_dbname <- "mydb"
mysql_port <- 3306
mydb = dbConnect(MySQL(),
user=mysql_user,
password=mysql_pass,
dbname=mysql_dbname,
host=mysql_host,
port=mysql_port)
dbListTables(mydb)
dbListFields(mydb, 'us_interest_rates')
# read the first row of the table
results = dbSendQuery(mydb, "select * from us_interest_rates")
first_row = fetch(results, n=1)
first_row
dbClearResult(results)
# read the whole table
results2 = dbSendQuery(mydb, "select * from us_interest_rates")
full_table <- fetch(results2, n=-1)
nrow(full_table)
head(full_table)
dbClearResult(results2)
dbDisconnect(mydb)
|
635343988a61118d0d2fd1aad5ab2bea09925792
|
94f841e4275860a5b9cf45b4f081888e21659498
|
/tests/temporal_discounting.R
|
4150febafcc4fa458e0c25ffe982f7c2d3a462bd
|
[] |
no_license
|
SVA-SE/freedom
|
33d170c45a9a5639ecbc0ea5714a6ab9b1024f88
|
f6145fe5aa3b1b3db42b07d42ea41a897030a0a2
|
refs/heads/master
| 2023-02-06T10:13:48.392626
| 2023-01-24T10:28:48
| 2023-01-24T10:28:48
| 69,856,790
| 1
| 3
| null | 2022-09-01T09:30:20
| 2016-10-03T09:23:39
|
R
|
UTF-8
|
R
| false
| false
| 1,261
|
r
|
temporal_discounting.R
|
library(freedom)
ob <- post_fr(0.5, 0.5)
stopifnot(ob > 0.5)
ob1 <- tools::assertError(post_fr(-0.1, 0.5))[[1]]$message
ob2 <- tools::assertError(post_fr(1.1, 0.5))[[1]]$message
ex <- "The prior probability of freedom cannot be greater than 1 or less than 0"
stopifnot(length(grep(ex, ob1)) == 1L)
stopifnot(length(grep(ex, ob2)) == 1L)
ob1 <- tools::assertError(post_fr(0.1, -0.1))[[1]]$message
ob2 <- tools::assertError(post_fr(0.1, 1.1))[[1]]$message
ex <- "System sensitivity cannot be greater than 1 or less than 0"
stopifnot(length(grep(ex, ob1)) == 1L)
stopifnot(length(grep(ex, ob2)) == 1L)
ob <- prior_fr(0.9, 0.01)
stopifnot(ob < 0.9)
ob1 <- tools::assertError(prior_fr(0.1, -0.1))[[1]]$message
ob2 <- tools::assertError(prior_fr(0.1, 1.1))[[1]]$message
ex <- paste("The annual probability of introduction cannot",
"be greater than 1 or less than 0")
stopifnot(length(grep(ex, ob1)) == 1L)
stopifnot(length(grep(ex, ob2)) == 1L)
ob1 <- tools::assertError(prior_fr(-0.1, 0.1))[[1]]$message
ob2 <- tools::assertError(prior_fr(1.1, 0.1))[[1]]$message
ex <- paste("The posterior probability of freedom cannot",
"be greater than 1 or less than 0")
stopifnot(length(grep(ex, ob1)) == 1L)
stopifnot(length(grep(ex, ob2)) == 1L)
|
ada9bbc84100726983768512e533e38fa2951c1b
|
da3b39f21e00611f3ef44ed0d4009bc0299e934d
|
/HW1.R
|
117cc434c0d2ac4e1ba7a1120e07aa0b16903ff4
|
[] |
no_license
|
ky2171/ML-HW1
|
c510e0c96388b3c51cf2c14ba67af09834984ea3
|
71280613df1094eef4d4f14120d0a3ba5d483269
|
refs/heads/master
| 2022-11-29T11:43:20.499781
| 2020-08-13T14:48:58
| 2020-08-13T14:48:58
| 287,303,032
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,334
|
r
|
HW1.R
|
# Question 1
recursion <- function (n) {
if (n == 1) {
return(1)
}
else if (n == 2) {
return(1)
}
else {
return (recursion (n-1)+recursion (n-2)+ 2*(n-2))
}
}
print(recursion(36))
# Question 2
binomial <- function (n,m) {
if (m == 0 | m == n) {
return (1)
}
else {
result <- binomial(n-1,m) + binomial (n-1,m-1)
return (result)
}
}
print(binomial(88,44))
# Question 3
gcd <- function (x,y) {
seql = (2:min(x,y))
for ( i in seql) {
if (x %% i==0 && y %% i==0) {
result1 = i
}
i = i+1
}
return(result1)
}
x <- 12306
y <- 32148
cat("The Greatest Common Divisor is",gcd(x,y))
cat("The Smallest Common Multiple is", x*y/gcd(x,y))
# Question 4 (a)
WHO <- read.csv("/Users/yankeyu/Desktop/WHO copy.csv")
str(WHO)
summary(WHO)
colSums(is.na(WHO)) >=3
# Question 4 (b)
WHO$Country[which.max(WHO$FertilityRate)]
WHO$Country[which.min(WHO$FertilityRate)]
# Question 4 (c)
GNI_sd <-tapply(WHO$GNI, WHO$Region, sd, na.rm = TRUE)
cat (names(GNI_sd[which.min(GNI_sd)]), min(GNI_sd))
# Question 4 (d)
RichCountry = subset (WHO, GNI>20000)
Mean_CM <- mean(RichCountry$ChildMortality, na.rm = TRUE)
cat("The mean child mortality of the rich countries is",Mean_CM)
# Question 4 (e)
plot(WHO$GNI,WHO$LifeExpectancy,xlab = "Income Level",ylab = "Life Expectancy")
|
2fcb2d34f9e667c2dacc60d1fb08bc9ea9ddcf4e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/maxLik/examples/bread.maxLik.Rd.R
|
ad6e11f31796037fe51d758af95d6110bc034c74
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 442
|
r
|
bread.maxLik.Rd.R
|
library(maxLik)
### Name: bread.maxLik
### Title: Bread for Sandwich Estimator
### Aliases: bread.maxLik
### Keywords: methods
### ** Examples
## ML estimation of exponential duration model:
t <- rexp(100, 2)
loglik <- function(theta) log(theta) - theta*t
## Estimate with numeric gradient and hessian
a <- maxLik(loglik, start=1 )
# Extract the "bread"
library( sandwich )
bread( a )
all.equal( bread( a ), vcov( a ) * nObs( a ) )
|
4c335fb433404d7c9ff22ea3051f9ccd07da419e
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/5785_0/rinput.R
|
29e945cda610e29ab9dfea586ba08a00d3ec571a
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("5785_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5785_0_unrooted.txt")
|
925e41e29dfc661132079c0731655f29d2f9e5c8
|
8af125a9516b1b783da83e7e34eb1a6eda71e0b7
|
/Association Rules/Solution (Movies).R
|
f6e9cec704bc695ddca02045ab1e6f9eda33d830
|
[] |
no_license
|
Abhishek012345/Data-Science-Assignment
|
0b2fa3f1173331703c1f27cd50f880159b121b17
|
2c0dc7f772db4f8868b4801e5cdcaf1388582a08
|
refs/heads/master
| 2022-12-31T15:43:52.865230
| 2020-10-18T06:11:22
| 2020-10-18T06:11:22
| 259,623,325
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 939
|
r
|
Solution (Movies).R
|
library("arules")
library("readxl")
movie_data <- read.csv(file.choose())
View(movie_data)
library("arulesViz")
arule <- apriori(as.matrix(movie_data[6:15]), parameter = list(support = 0.2, confidence = 0.7))
arule1 <- apriori(as.matrix(movie_data[6:15]), parameter = list(support = 0.06, confidence = 0.8))
arule2 <- apriori(as.matrix(movie_data[6:15]), parameter = list(support = 0.03, confidence = 0.6))
inspect(sort(arule, by="lift"))
plot(arule, jitter=0)
plot(arule, method = "grouped")
plot(arule, method = "graph")
######### Updated diffrent types of plots #########
library(colorspace)
plot(arule, control=list(col=sequential_hcl(100)), jitter = 0)
plot(arule, shading="order", control=list(main = "Two-key plot",
col=rainbow(5)), jitter = 0)
plot(arule, method="matrix", measure=c("lift", "confidence"))
plot(arules, method="paracoord")
|
420c1d55a4573d889ebcfcddcf9f3699b8cea292
|
1cb4d4f9f11ab04500a006d7dfe81530d07c578d
|
/PIBIC 2018-2019-20200413T151926Z-001/PIBIC 2018-2019/Mapa interativo/Rascunho.R
|
67c5df4b45dc242bec059283589b27f73d5059ed
|
[] |
no_license
|
rodolfo-oliveira/tcc
|
86ee2a48f9759713608d0e80039bc6e2fae866f1
|
0340cfa20021438be7e1d4557007bba5aa4e29e4
|
refs/heads/master
| 2023-05-13T18:33:30.288043
| 2021-05-20T22:26:33
| 2021-05-20T22:26:33
| 256,174,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 313
|
r
|
Rascunho.R
|
library(tidyverse)
library(ggplot2)
a=read.csv(file = "distritos_dummy.csv")
expand.grid(x =as.factor(1:3),y= as.factor(1:3), id = 1:9) ->df
df$tempo<-round(a$Tempo.de.viagem,2)
df %>%
ggplot(aes(x,y))+
geom_tile(col = "white")+
facet_wrap(~id,ncol= 3,scales = "free")+
geom_text(aes(label = tempo))
|
7cf37d3915d1e07b420f81d2b7987857af7c0d0f
|
536ffbc0b339513609bbe5c5187ac0a693b7b326
|
/R/get_species_docs.R
|
f85f9e982558163d8ba9e3184a1a7acbeeb9576a
|
[] |
no_license
|
jacob-ogre/nmfsscraper
|
69633e69fb0725a6b6b8e7f81e0b2c22fb75b3d1
|
6dc44f5a41e1d9dfc2553555b803670860f6323b
|
refs/heads/master
| 2020-01-23T21:42:45.786260
| 2016-11-26T13:57:06
| 2016-11-26T13:57:06
| 74,686,806
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,580
|
r
|
get_species_docs.R
|
#' Get all PDF URLs from query URL
#'
#' Function description; defaults to title if left blank
#'
#' @param url The URL to be queried for PDF links
#' @importFrom xml2 read_html
#' @importFrom rvest html_nodes html_attr
#' @export
#' @examples
#' \dontrun{
#' url <- "http://www.nmfs.noaa.gov/pr/species/turtles/green.html"
#' chemyd_pdfs <- get_species_pdf_urls(url)
#' }
get_species_pdf_urls <- function(url) {
domain <- try(paste(strsplit(url, "/")[[1]][1:3], collapse = "/"),
silent = TRUE)
if(class(domain) == "try-error") domain <- "http://www.nmfs.noaa.gov"
url <- URLencode(url)
page <- xml2::read_html(url)
atag <- rvest::html_nodes(page, "a")
href <- rvest::html_attr(atag, "href")
pdfs <- href[grep(href, pattern = "pdf$|PDF$")]
if(length(pdfs) > 0) {
pdfs <- ifelse(grepl(pdfs, pattern = "^http"),
pdfs,
paste0(domain, pdfs))
return(unique(pdfs))
} else {
return(NULL)
}
}
#' Download all PDF documents from NMFS for a species
#'
#' @details Uses \link{get_species_pdf_urls} to fetch a vector of PDF URLs for
#' species documents maintained by the National Marine Fisheries Service (NMFS).
#' Filenames are the \link{basename} of the URL with spaces replaced by "_".
#' Uses \link[pdfdown]{pdfdown}, which returns a data.frame of results, to
#' do the scraping.
#'
#' @param url The URL to query for PDF links
#' @param subd The directory (subdirectory) to which the PDFs are downloaded
#' @return An augmented data.frame from \link[pdfdown]{pdfdown} with:
#' \describe{
#' \item{url}{Document URL}
#' \item{dest}{Path to document}
#' \item{success}{One of Success, Failed, Pre-exist}
#' \item{pdfCheck}{TRUE if a real PDF, else FALSE}
#' \item{taxon}{The taxon represented, from the URL}
#' }
#' @importFrom dplyr bind_rows
#' @export
#' @examples
#' \dontrun{
#' url <- "http://www.nmfs.noaa.gov/pr/species/turtles/green.html"
#' dl_res <- get_species_pdfs(url, "~/Downloads/NMFS_rec")
#' }
download_species_pdfs <- function(url, subd = "") {
message(paste("\t\tProcessing:", url))
all_species_pdfs <- get_species_pdf_urls(url)
if(!is.null(all_species_pdfs)) {
res <- lapply(all_species_pdfs, pdfdown::download_pdf, subd = subd)
res <- dplyr::bind_rows(res)
spp_pt <- strsplit(gsub(url,
pattern = "\\.htm$|\\.html$",
replacement = ""),
split="/")
idx <- length(spp_pt[[1]])
spp <- paste(spp_pt[[1]][(idx-1):idx], collapse=":")
res$taxon <- rep(spp, length(res[[1]]))
return(res)
} else {
return(data.frame(url = url,
dest = NA,
success = "Failed",
pdfCheck = NA,
stringsAsFactors = FALSE))
}
}
#' Download PDFs of documents linked on NMFS species pages
#'
#' @note This function will only get documents linked from pages linked to
#' NMFS's Protected Resources ESA-listed species page,
#' \url{http://www.nmfs.noaa.gov/pr/species/esa/listed.htm}. In general this
#' means recovery plans and many \emph{Federal Register} documents will not
#' be gathered.
#' @param subd The directory (subdirectory) to which the PDFs are downloaded
#' @importFrom dplyr bind_rows
#' @export
#' @examples
#' \dontrun{
#' download_all_species_pdfs()
#' }
download_all_species_pdfs <- function(subd) {
urls <- get_species_pages_links()
res <- lapply(urls, download_species_pdfs, subd = subd)
res <- dplyr::bind_rows(res)
return(res)
}
|
359e3b84de243ff25851952073c64f80d5253d03
|
3b2dab7452682a8ebf0be7b81494dae3e83a8872
|
/man/gemeindegebiet.Rd
|
c3fff82a68a79bf1ac55abb10f777c838014df13
|
[] |
no_license
|
swissgeodata4r/swissvector4r
|
0ac63a0e58a56d246306716c7b39a5fec48e6551
|
bbf78218f39c2f4742e9b16bd1899b8b98cda766
|
refs/heads/master
| 2020-04-14T21:23:50.542502
| 2019-01-26T00:04:27
| 2019-01-26T00:04:27
| 164,127,052
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,046
|
rd
|
gemeindegebiet.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_documentation.R
\docType{data}
\name{gemeindegebiet}
\alias{gemeindegebiet}
\title{Municipality border (Polygons)}
\format{An \code{sf} object containing polygon data with four features.
\describe{
\item{NAME}{Name of the country}
\item{BEZIRKSNUM}{Official district number}
\item{KANTONSNUM}{Official number of canton}
\item{BFS_NUMMER}{Number of the Swiss official commune register}
\item{EINWOHNERZ}{Number of inhabitants}
\item{GEM_FLAECH}{Area}
\item{geometry}{sfc_POLYGON data in EPSG 2056}
}}
\source{
\url{https://shop.swisstopo.admin.ch/de/products/landscape/boundaries3D}
}
\usage{
gemeindegebiet
}
\description{
Manipulated data from the Swissboundries3D dataset by \href{http://swisstopo.ch}{swisstopo.}
}
\details{
Manipulation:
\enumerate{
\item Removed z-Values
\item Dropped some columns
\item Singlepart to multipart (\code{\link[sf]{summarise.sf}} corresponds to
\code{\link[sf]{st_union}}?)
}
}
\keyword{datasets}
|
07f411fde1f64a17261ff576a6fa5469144bcf80
|
ea181fde79f86982f610a6e44cc528fe7bb9d220
|
/exercise.R
|
9ad9a3a8d03b214907cef67737523a06945c881f
|
[] |
no_license
|
jenjong/Accounting
|
74f3ce129f7f7408d246503a1ed341350a4a92ae
|
a3690ba862317b4a2d35ee597ece7b2593b3e7c4
|
refs/heads/master
| 2021-01-01T15:19:46.489129
| 2017-07-18T13:13:11
| 2017-07-18T13:13:11
| 97,592,280
| 1
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 16,083
|
r
|
exercise.R
|
rm(list = ls())
gc()
#####################################################
# ์ 1์ ํ๊ณ๋ฑ์
#####################################################
์์ฐ <- 300000
๋ถ์ฑ <- 200000
์๋ณธ <- 100000
์์ฐ
๋ถ์ฑ + ์๋ณธ
์์ฐ == ๋ถ์ฑ + ์๋ณธ
print(์์ฐ)
options('scipen' = 10)
print(์์ฐ)
๋ถ์ฑ + ์๋ณธ
์์ฐ == ๋ถ์ฑ + ์๋ณธ
#####################################################
# ์ 2์ ํ์ฅ๋ ํ๊ณ๋ฑ์
#####################################################
์์ฐ <- 500
๋น์ฉ <- 200
์ฐจ๋ณ <- ์์ฐ + ๋น์ฉ
์ฐจ๋ณ
๋ถ์ฑ <- 400
์๋ณธ <- 100
์์ต <- 200
๋๋ณ <- ๋ถ์ฑ + ์๋ณธ + ์์ต
๋๋ณ
์ฐจ๋ณ == ๋๋ณ
์ฐจ๋ณ <- ์์ฐ + ๋น์ฉ
์ฐจ๋ณ
๋ถ์ฑ <- 400
์๋ณธ <- 100
์์ต <- 200
๋๋ณ <- ๋ถ์ฑ + ์๋ณธ + ์์ต
๋๋ณ
์ฐจ๋ณ == ๋๋ณ
#####################################################
# ์ 3์ ํ๊ณ์ ๊ฑฐ๋ ํ๋ณ
#####################################################
์ฌ๊ฑด1 <- "๋ก๋ณถ์ด ํ๋งค"
์ฌ๊ฑด2 <- "์๋์ค๊ธฐ ์ ์ ์ ๋ฌธ๋ณด๊ธฐ"
์ฌ๊ฑด3 <- "๋์ ์ผ๋น ์ฃผ๊ธฐ"
์ฌ๊ฑด <- c("๋ก๋ณถ์ด ํ๋งค", "์๋์ค๊ธฐ ์ ์ ์ ๋ฌธ๋ณด๊ธฐ", "๋์ ์ผ๋น ์ฃผ๊ธฐ")
์ฌ๊ฑด_ํ๋จ = c(TRUE, FALSE, TRUE)
์ฌ๊ฑด[์ฌ๊ฑด_ํ๋จ]
๊ฒฝ์ ์ ์ฌ๊ฑด <- ์ฌ๊ฑด[์ฌ๊ฑด_ํ๋จ]
๊ฒฝ์ ์ ์ฌ๊ฑด
a <- c(10, 20, 40, 80)
a
b <- c(TRUE, FALSE, TRUE)
b
์ฌ๊ฑด[1]
์ฌ๊ฑด[c(1,2)]
#####################################################
# ์ 4์ ๊ฑฐ๋๋ถ์
#####################################################
์ฐจ๋ณ <- c(0,0)
names(์ฐจ๋ณ) <- c('์์ฐ','๋น์ฉ')
ํ๊ธ <- 15000
์ฐจ๋ณ["์์ฐ"] <- ํ๊ธ
์ฐจ๋ณ
๋๋ณ <- rep(0,3)
๋๋ณ
๋๋ณ <- rep(0,3)
๋๋ณ
names(๋๋ณ) <- c('๋ถ์ฑ', '์๋ณธ', '์์ต')
์๋ณธ๊ธ <- 15000
๋๋ณ['์๋ณธ'] <- ์๋ณธ๊ธ
๋๋ณ
sum(์ฐจ๋ณ)
sum(๋๋ณ)
sum(๋๋ณ) == sum(์ฐจ๋ณ)
#####################################################
# ์ 5์ ๋ถ๊ฐ
#####################################################
๊ณ์ ์ฝ๋ <- c(101,102,103,104,105,106,107,108,109,110,111,112,113)
๊ณ์ ์ฝ๋ <- 101:113
๊ณ์ ๊ณผ๋ชฉ๋ช
<- c('ํ๊ธ', '๋งค์ถ์ฑ๊ถ', '์ํ', '์ฐจ๋์ด๋ฐ๊ตฌ',
'๊ธฐ๊ณ', '๋งค์
์ฑ๋ฌด', '๋ฏธ์ง๊ธ๊ธ', '์ฅ๊ธฐ์ฐจ์
๊ธ',
'์๋ณธ๊ธ', '๋งค์ถ', '๋งค์ถ์๊ฐ', '๊ธ์ฌ', '์์ฐจ๋ฃ')
๊ณ์ ๋ถ๋ฅ <- c(rep('์์ฐ', 5), rep('๋ถ์ฑ', 3), '์๋ณธ', '์์ต', rep('๋น์ฉ',3))
๊ณ์ ๊ณผ๋ชฉํ <- data.frame(๊ณ์ ์ฝ๋, ๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ณ์ ๋ถ๋ฅ)
๊ณ์ ๊ณผ๋ชฉํ
๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ์ฝ๋
๊ณ์ ๊ณผ๋ชฉํ[3, ]
๊ณ์ ๊ณผ๋ชฉํ[3, 1:2]
์ผ์ <- rep('1์1์ผ', 2)
๊ณ์ ๊ณผ๋ชฉ๋ช
<- c('ํ๊ธ', '์๋ณธ๊ธ')
๊ณ์ ์ฝ๋ <- c(101, 109)
๊ธ์ก <- c(100000000, 100000000)
์์น <- c('์ฐจ๋ณ', '๋๋ณ')
๋ถ๊ฐ๊ธฐ๋ก <- data.frame(์ผ์, ๊ณ์ ์ฝ๋, ๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ธ์ก, ์์น)
๋ถ๊ฐ์ฅ <- ๋ถ๊ฐ๊ธฐ๋ก
match(c('ํ๊ธ', '์๋ณธ๊ธ'), ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ๊ณผ๋ชฉ๋ช
)
๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ์ฝ๋[c(1,9)]
๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ์ฝ๋[ match(c('ํ๊ธ', '์๋ณธ๊ธ'), ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ๊ณผ๋ชฉ๋ช
) ]
์ผ์ <- rep('1์1์ผ', 2)
๊ณ์ ๊ณผ๋ชฉ๋ช
<- c('ํ๊ธ', '์๋ณธ๊ธ')
๊ณ์ ์ฝ๋ <- ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ์ฝ๋[ match(๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ๊ณผ๋ชฉ๋ช
) ]
๊ธ์ก <- c(100000000, 100000000)
์์น <- c('์ฐจ๋ณ', '๋๋ณ')
๋ถ๊ฐ๊ธฐ๋ก <- data.frame(์ผ์, ๊ณ์ ์ฝ๋, ๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ธ์ก, ์์น)
๋ถ๊ฐ์ฅ <- ๋ถ๊ฐ๊ธฐ๋ก
์ผ์ <- rep('1์2์ผ', 2)
๊ณ์ ๊ณผ๋ชฉ๋ช
<- c('์์ฐจ๋ฃ', 'ํ๊ธ')
๊ณ์ ์ฝ๋ <- ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ์ฝ๋[ match(๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ๊ณผ๋ชฉ๋ช
) ]
๊ธ์ก <- c(1000000, 1000000)
์์น <- c('์ฐจ๋ณ', '๋๋ณ')
๋ถ๊ฐ๊ธฐ๋ก <- data.frame(์ผ์, ๊ณ์ ์ฝ๋, ๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ธ์ก, ์์น)
๋ถ๊ฐ์ฅ <- rbind(๋ถ๊ฐ์ฅ, ๋ถ๊ฐ๊ธฐ๋ก)
์ผ์ <- rep('1์3์ผ', 2)
๊ณ์ ๊ณผ๋ชฉ๋ช
<- c('์ฐจ๋์ด๋ฐ๊ตฌ', 'ํ๊ธ')
๊ณ์ ์ฝ๋ <- ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ์ฝ๋[ match(๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ๊ณผ๋ชฉ๋ช
) ]
๊ธ์ก <- c(1500000, 1500000)
์์น <- c('์ฐจ๋ณ', '๋๋ณ')
๋ถ๊ฐ๊ธฐ๋ก <- data.frame(์ผ์, ๊ณ์ ์ฝ๋, ๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ธ์ก, ์์น)
๋ถ๊ฐ์ฅ <- rbind(๋ถ๊ฐ์ฅ, ๋ถ๊ฐ๊ธฐ๋ก)
์ผ์ <- rep('1์4์ผ', 2)
๊ณ์ ๊ณผ๋ชฉ๋ช
<- c('๊ธ์ฌ', 'ํ๊ธ')
๊ณ์ ์ฝ๋ <- ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ์ฝ๋[ match(๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ๊ณผ๋ชฉ๋ช
) ]
๊ธ์ก <- c(700000, 700000)
์์น <- c('์ฐจ๋ณ', '๋๋ณ')
๋ถ๊ฐ๊ธฐ๋ก <- data.frame(์ผ์, ๊ณ์ ์ฝ๋, ๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ธ์ก, ์์น)
๋ถ๊ฐ์ฅ <- rbind(๋ถ๊ฐ์ฅ, ๋ถ๊ฐ๊ธฐ๋ก)
์ผ์ <- rep('1์5์ผ', 2)
๊ณ์ ๊ณผ๋ชฉ๋ช
<- c('๊ธฐ๊ณ', '๋ฏธ์ง๊ธ๊ธ')
๊ณ์ ์ฝ๋ <- ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ์ฝ๋[ match(๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ๊ณผ๋ชฉ๋ช
) ]
๊ธ์ก <- c(5000000, 5000000)
์์น <- c('์ฐจ๋ณ', '๋๋ณ')
๋ถ๊ฐ๊ธฐ๋ก <- data.frame(์ผ์, ๊ณ์ ์ฝ๋, ๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ธ์ก, ์์น)
๋ถ๊ฐ์ฅ <- rbind(๋ถ๊ฐ์ฅ, ๋ถ๊ฐ๊ธฐ๋ก)
์ผ์ <- rep('1์6์ผ', 2)
๊ณ์ ๊ณผ๋ชฉ๋ช
<- c('๋งค์ถ์ฑ๊ถ', '๋งค์ถ')
๊ณ์ ์ฝ๋ <- ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ์ฝ๋[ match(๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ๊ณผ๋ชฉ๋ช
) ]
๊ธ์ก <- c(4000000, 4000000)
์์น <- c('์ฐจ๋ณ', '๋๋ณ')
๋ถ๊ฐ๊ธฐ๋ก <- data.frame(์ผ์, ๊ณ์ ์ฝ๋, ๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ธ์ก, ์์น)
๋ถ๊ฐ์ฅ <- rbind(๋ถ๊ฐ์ฅ, ๋ถ๊ฐ๊ธฐ๋ก)
์ผ์ <- rep('1์7์ผ', 2)
๊ณ์ ๊ณผ๋ชฉ๋ช
<- c('ํ๊ธ', '์ฅ๊ธฐ์ฐจ์
๊ธ')
๊ณ์ ์ฝ๋ <- ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ์ฝ๋[ match(๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ณ์ ๊ณผ๋ชฉํ$๊ณ์ ๊ณผ๋ชฉ๋ช
) ]
๊ธ์ก <- c(40000000, 40000000)
์์น <- c('์ฐจ๋ณ', '๋๋ณ')
๋ถ๊ฐ๊ธฐ๋ก <- data.frame(์ผ์, ๊ณ์ ์ฝ๋, ๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ธ์ก, ์์น)
๋ถ๊ฐ์ฅ <- rbind(๋ถ๊ฐ์ฅ, ๋ถ๊ฐ๊ธฐ๋ก)
๋ถ๊ฐ์ฅ$๊ณ์ ๊ณผ๋ชฉ๋ช
== 'ํ๊ธ'
๋ถ๊ฐ์ฅ[๋ถ๊ฐ์ฅ$๊ณ์ ๊ณผ๋ชฉ๋ช
== 'ํ๊ธ', ]
๊ฐ๋ถ๊ฐ <- ๋ถ๊ฐ์ฅ[๋ถ๊ฐ์ฅ$๊ณ์ ๊ณผ๋ชฉ๋ช
== 'ํ๊ธ', ]
๊ฐ๋ถ๊ฐ$๊ธ์ก
๊ฐ๋ถ๊ฐ$์์น == โ๋๋ณโ
๊ฐ๋ถ๊ฐ$๊ธ์ก[ ๊ฐ๋ถ๊ฐ$์์น=='๋๋ณ']
sum(๊ฐ๋ถ๊ฐ$๊ธ์ก[ ๊ฐ๋ถ๊ฐ$์์น=='๋๋ณ'])
#####################################################
# 6๋ฒ ๋ฌธ์
#####################################################
์์ฐ <- 425000
names(์์ฐ) <- 'ํตํ'
์์ฐ[2]
์์ฐ[2] <- 10000
names(์์ฐ)[2] <- '์ฐํ'
์์ฐ[3] <- 100000
names(์์ฐ)[3] <- 'ํ์ธ๋ฐํ๋น์ข์ํ'
์์ฐ[4] <- 40000
names(์์ฐ)[4] <- '๊ธฐ์ผ์ด๊ฒฝ๊ณผํ์ด์ํ'
์์ฐ[5] <- 20000
names(์์ฐ)[5] <- '๋ฐฐ๋น๊ธํต์ง์ง๊ธํ'
์์ฐ[6] <- 120000
names(์์ฐ)[6] <- '์ ์ผ์์ํ'
์์ฐ[7] <- 3000
names(์์ฐ)[7] <- '์ฐํธํ์ฆ์'
์์ฐ[8] <- 500000
names(์์ฐ)[8] <- '์ง์์๋ํ๊ฐ๋ถ์ฆ'
์์ฐ[9] <- 4000000
names(์์ฐ)[9] <- '์๋์ฑ์๊ธ์ฆ์_์ทจ๋๋น์๋ง๊ธฐ4๊ฐ์'
์์ฐ
์์ฐ๊ธฐ๋ก <- c(T, F, rep(T,3), F, T, rep(F,2))
sum(์์ฐ[์์ฐ๊ธฐ๋ก])
#####################################################
# ์ 7์ ๋งค์
ํ ์ธ
#####################################################
์ผ์ <- rep( '2016-5-1', 2)
class(์ผ์)
์ผ์ <- as.Date(์ผ์)
class(์ผ์)
์ผ์ <- rep( '2016-5-1', 2)
์ผ์ <- as.Date(์ผ์)
๊ณ์ ๊ณผ๋ชฉ๋ช
<- c('๋งค์
', '๋งค์
์ฑ๋ฌด')
๊ธ์ก <- rep(2000, 2)
์์น <- c('์ฐจ๋ณ', '๋๋ณ')
๋ถ๊ฐ๊ธฐ๋ก1<- data.frame(์ผ์,๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ธ์ก, ์์น)
๋ถ๊ฐ์ฅ1 <- ๋ถ๊ฐ๊ธฐ๋ก1
์ผ์ <- rep( '2016-5-1', 3)
์ผ์ <- as.Date(์ผ์)
๊ณ์ ๊ณผ๋ชฉ๋ช
<- c('๋งค์
', '๋งค์
์ฑ๋ฌด', '๋งค์
ํ ์ธ(๋งค์
)')
๊ธ์ก <- c(2000, 1960, 40)
์์น <- c('์ฐจ๋ณ', '๋๋ณ', '๋๋ณ')
๋ถ๊ฐ๊ธฐ๋ก2<- data.frame(์ผ์,๊ณ์ ๊ณผ๋ชฉ๋ช
, ๊ธ์ก, ์์น)
๋ถ๊ฐ์ฅ2 <- ๋ถ๊ฐ๊ธฐ๋ก2
์ง๊ธ์ผ์ <- as.Date('2016-05-09')
if (์ง๊ธ์ผ์ - ์ผ์[1] < 10) print(๋ถ๊ฐ์ฅ2) else print(๋ถ๊ฐ์ฅ1)
#####################################################
# ์ 8์ ๋งค์ถ์๊ฐ
#####################################################
๊ธฐ์ด์ฌ๊ณ ์ก <- 30
๋น๊ธฐ๋งค์
์ก <- 70
๊ธฐ๋ง์ฌ๊ณ ์ก <- 40
๋งค์ถ์๊ฐ์ก <- ๊ธฐ์ด์ฌ๊ณ ์ก + ๋น๊ธฐ๋งค์
์ก - ๊ธฐ๋ง์ฌ๊ณ ์ก
๋งค์ถ์๊ฐ์ก
๋งค์ถ์๊ฐ๊ณ์ฐํจ์<- function(๊ธฐ์ด์ฌ๊ณ ์ก, ๋น๊ธฐ๋งค์
์ก, ๊ธฐ๋ง์ฌ๊ณ ์ก)
{
๋งค์ถ์๊ฐ์ก <- ๊ธฐ์ด์ฌ๊ณ ์ก + ๋น๊ธฐ๋งค์
์ก - ๊ธฐ๋ง์ฌ๊ณ ์ก
return(๋งค์ถ์๊ฐ์ก)
}
๋งค์ถ์๊ฐ๊ณ์ฐํจ์(๊ธฐ์ด์ฌ๊ณ ์ก = 30, ๋น๊ธฐ๋งค์
์ก = 70, ๊ธฐ๋ง์ฌ๊ณ ์ก = 40)
#####################################################
# ์ 9์ ์ฌ๊ณ ์์ฐ ๋จ์์๊ฐ๊ฒฐ์ ๋ฐฉ๋ฒ: ์ ์
์ ์ถ๋ฒ
#####################################################
์
๊ณ <- data.frame(์ผ์ = as.Date(c('2016-03-01','2016-03-09', '2016-03-24')),
์ ์ = c('์ ์์ด์', '๋งค์
', '๋งค์
'),
์๋ = c(5, 15, 20),
๋จ๊ฐ = c(20000,18000,22000))
์ถ๊ณ <- data.frame(์ผ์ = as.Date(c('2016-03-16','2016-03-29')),
์ ์ = c('๋งค์ถ', '๋งค์ถ'),
์๋ = c(10, 12),
๋จ๊ฐ = c(NA,NA))
์
์ถ๊ณ <- rbind(์
๊ณ , ์ถ๊ณ )
sort.fit <-sort.int(์
์ถ๊ณ $์ผ์, index.return = TRUE)
sort.fit$ix
์
์ถ๊ณ <-์
์ถ๊ณ [c(1,2,4,3,5),]
์์ <- data.frame(์๋ = 5, ๋จ๊ฐ = 20000)
์ฌ๊ณ <- ์์
์์ <- data.frame(์๋ = 15, ๋จ๊ฐ =18000)
์ฌ๊ณ <- rbind(์ฌ๊ณ , ์์)
ํ๋งค์๋ <- 10
cumsum(์ฌ๊ณ $์๋)
cumsum(์ฌ๊ณ $์๋)>=ํ๋งค์๋
which(cumsum(์ฌ๊ณ $์๋)>=ํ๋งค์๋
min( which(cumsum(์ฌ๊ณ $์๋)>=ํ๋งค์๋))
๋์ ์๋ <- cumsum(์ฌ๊ณ $์๋)
์์ฌ๋ฌผํ์์น<-min( which(๋์ ์๋>=ํ๋งค์๋))
์ฌ๊ณ <- ์ฌ๊ณ [์์ฌ๋ฌผํ์์น:nrow(์ฌ๊ณ ), ]
์ฌ๊ณ $์๋[1] <- ๋์ ์๋[์์ฌ๋ฌผํ์์น]-ํ๋งค์๋
์์ <- data.frame(์๋ = 20, ๋จ๊ฐ = 22000)
์ฌ๊ณ <- rbind(์ฌ๊ณ , ์์)
ํ๋งค์๋ <- 12
๋์ ์๋ <- cumsum(์ฌ๊ณ $์๋)
์์ฌ๋ฌผํ์์น<-min( which(๋์ ์๋>=ํ๋งค์๋))
์ฌ๊ณ <- ์ฌ๊ณ [์์ฌ๋ฌผํ์์น:nrow(์ฌ๊ณ ), ]
์ฌ๊ณ $์๋[1] <- ๋์ ์๋[์์ฌ๋ฌผํ์์น]-ํ๋งค์๋
์ฌ๊ณ
๊ธฐ๋ง์ฌ๊ณ ์ก <- sum(์ฌ๊ณ $์๋*์ฌ๊ณ $๋จ๊ฐ)
๊ธฐ๋ง์ฌ๊ณ ์ก
๊ธฐ์ด์ฌ๊ณ ์ก <- ์
๊ณ $์๋[1]*์
๊ณ $๋จ๊ฐ[1]
๋น๊ธฐ๋งค์
์ก <- sum(์
๊ณ $์๋[-1]*์
๊ณ $๋จ๊ฐ[-1])
ํ๋งค์ฌ๊ณ ์์ฐ์๊ฐ <- ๊ธฐ์ด์ฌ๊ณ ์ก + ๋น๊ธฐ๋งค์
์ก - ๊ธฐ๋ง์ฌ๊ณ ์ก
ํ๋งค์ฌ๊ณ ์์ฐ์๊ฐ
#####################################################
# ์ 10์ ์ฌ๊ณ ์์ฐ ๋จ์์๊ฐ๊ฒฐ์ ๋ฐฉ๋ฒ: ์ดํ๊ท ๋ฒ
#####################################################
ํ๊ท ๋จ๊ฐ <- sum(์
๊ณ $๋จ๊ฐ * ์
๊ณ $์๋)/ sum(์
๊ณ $์๋)
ํ๊ท ๋จ๊ฐ
#####################################################
# ์ 11์ ์ฌ๊ณ ์์ฐ ๋จ์์๊ฐ๊ฒฐ์ ๋ฐฉ๋ฒ: ์ด๋ํ๊ท ๋ฒ
#####################################################
์์น <- 1
์ฌ๊ณ <- data.frame(์๋ = ์
์ถ๊ณ $์๋[์์น], ๋จ๊ฐ = ์
์ถ๊ณ $๋จ๊ฐ[์์น])
์์น <- 2
๊ธ์ก <- (์ฌ๊ณ $๋จ๊ฐ*์ฌ๊ณ $์๋) + (์
์ถ๊ณ $์๋[์์น]* ์
์ถ๊ณ $๋จ๊ฐ[์์น])
์๋ <- ์ฌ๊ณ $์๋ + ์
์ถ๊ณ $์๋[์์น]
์ฌ๊ณ $์๋ <- ์๋
์ฌ๊ณ $๋จ๊ฐ <- ๊ธ์ก/์๋
์์น <- 3
์ฌ๊ณ $์๋ <- ์ฌ๊ณ $์๋ - ์
์ถ๊ณ $์๋[์์น]
์์น <- 4
๊ธ์ก <- (์ฌ๊ณ $๋จ๊ฐ*์ฌ๊ณ $์๋) + (์
์ถ๊ณ $์๋[์์น]* ์
์ถ๊ณ $๋จ๊ฐ[์์น])
์๋ <- ์ฌ๊ณ $์๋ + ์
์ถ๊ณ $์๋[์์น]
์ฌ๊ณ $์๋ <- ์๋
์ฌ๊ณ $๋จ๊ฐ <- ๊ธ์ก/์๋
์์น <- 5
์ฌ๊ณ $์๋ <- ์ฌ๊ณ $์๋-์
์ถ๊ณ $์๋[์์น]
์ฌ๊ณ
for ( i in 1:5)
{
์์น <- i
if (์
์ถ๊ณ $์ ์[i] == '์ ์์ด์')
{
์ฌ๊ณ <- data.frame(์๋ = ์
์ถ๊ณ $์๋[์์น], ๋จ๊ฐ = ์
์ถ๊ณ $๋จ๊ฐ[์์น])
}
if (์
์ถ๊ณ $์ ์[i] == '๋งค์
')
{
๊ธ์ก <- (์ฌ๊ณ $๋จ๊ฐ*์ฌ๊ณ $์๋) + (์
์ถ๊ณ $์๋[์์น]* ์
์ถ๊ณ $๋จ๊ฐ[์์น])
์๋ <- ์ฌ๊ณ $์๋ + ์
์ถ๊ณ $์๋[์์น]
์ฌ๊ณ $์๋ <- ์๋
์ฌ๊ณ $๋จ๊ฐ <- ๊ธ์ก/์๋
}
if (์
์ถ๊ณ $์ ์[i] == '๋งค์ถ')
{
์ฌ๊ณ $์๋ <- ์ฌ๊ณ $์๋ - ์
์ถ๊ณ $์๋[์์น]
}
}
#####################################################
# ์ 12์ ๋งค์ถ์ฑ๊ถ์ ๋์
#####################################################
๋์์ ๋ณด <- data.frame(์ฐ์ฑ๊ธฐ๊ฐ = c('2<', '2>=&3<', '3>=&6<', '6>='),
๋งค์ถ์ฑ๊ถ์์ก = c(75000, 10000, 5000, 500),
๊ณผ๊ฑฐ๋์์จ = c(0, 0.04, 0.10, 0.25))
๋์์ก <- sum( ๋์์ ๋ณด$๋งค์ถ์ฑ๊ถ์์ก * ๋์์ ๋ณด$๊ณผ๊ฑฐ๋์์จ)
ํญ๋ชฉ์ ๋ณด <- data.frame( ๊ตฌ๋ถ = c('์ง๊ธ๊ธ์ก','๋ถ๊ฐ์ธ', '์ด์', '์์
๊ด์ธ',
'์ค์น๋น์ฉ','๋ณต๊ตฌ๋น์ฉ','์ ์ง๋น์ฉ'),
๊ธ์ก = c(100000,10000,2000,5000,1000,3000,1500)
)
#####################################################
# ์ 13์ ์ ํ์์ฐ ์๊ฐ์ ํฌํจ๋๋ ํญ๋ชฉ
#####################################################
๊ณ์๊ตฌ๋ถ <- c('์ง๊ธ๊ธ์ก', '์ด์', '์์
๊ด์ธ', '์ค์น๋น์ฉ','๋ณต๊ตฌ๋น์ฉ')
sum(ํญ๋ชฉ์ ๋ณด$๊ธ์ก[ํญ๋ชฉ์ ๋ณด$๊ตฌ๋ถ %in% ๊ณ์๊ตฌ๋ถ])
#####################################################
# ์ 14์ ๊ฐ๊ฐ์๊ฐ
#####################################################
์ทจ๋์๊ฐ <- 100000
์์กด๊ฐ์น <- ์ทจ๋์๊ฐ * 0.05
๋ด์ฉ์ฐ์ <- 4
๊ฐ๊ฐ์๊ฐ๋น <- rep( (์ทจ๋์๊ฐ-์์กด๊ฐ์น)/๋ด์ฉ์ฐ์, 4)
์ฅ๋ถ๊ฐ์ก <- ์ทจ๋์๊ฐ-cumsum(๊ฐ๊ฐ์๊ฐ๋น)
์ฅ๋ถ๊ฐ์ก
#####################################################
# ์ 15์ ๊ธ์ต์์ฐ์ ๊ณต์ ๊ฐ์น ์ธก์
#####################################################
์ก๋ฉด๊ธ์ก <- 400000
๋ฐํ๊ธ์ก <- 380000
์ ํจ์ด์์จ <- 0.075
์ก๋ฉด์ด์์จ <- 0.06
๊ธฐ์ด <- ๋ฐํ๊ธ์ก
์ ํจ์ด์ <- ๊ธฐ์ด*์ ํจ์ด์์จ
์ก๋ฉด์ด์ <- ์ก๋ฉด๊ธ์ก*์ก๋ฉด์ด์์จ
์๊ฐ <- ์ ํจ์ด์ - ์ก๋ฉด์ด์
๊ธฐ๋ง <- ๋ฐํ๊ธ์ก + ์๊ฐ
์์ <- data.frame (๊ธฐ์ด, ์ ํจ์ด์, ์ก๋ฉด์ด์, ์๊ฐ, ๊ธฐ๋ง)
์ฌ์ฑ์๊ฐํ <- ์์
# 2
๊ธฐ์ด <- ๊ธฐ๋ง
์ ํจ์ด์ <- ๊ธฐ์ด*์ ํจ์ด์์จ
์ก๋ฉด์ด์ <- ์ก๋ฉด๊ธ์ก*์ก๋ฉด์ด์์จ
์๊ฐ <- ์ ํจ์ด์ - ์ก๋ฉด์ด์
๊ธฐ๋ง <- ๊ธฐ์ด + ์๊ฐ
์์ <- data.frame (๊ธฐ์ด, ์ ํจ์ด์, ์ก๋ฉด์ด์, ์๊ฐ, ๊ธฐ๋ง)
์ฌ์ฑ์๊ฐํ <- rbind(์ฌ์ฑ์๊ฐํ, ์์)
์ฌ์ฑ์๊ฐํ
# 3
๊ธฐ์ด <- ๊ธฐ๋ง
์ ํจ์ด์ <- ๊ธฐ์ด*์ ํจ์ด์์จ
์ก๋ฉด์ด์ <- ์ก๋ฉด๊ธ์ก*์ก๋ฉด์ด์์จ
์๊ฐ <- ์ ํจ์ด์ - ์ก๋ฉด์ด์
๊ธฐ๋ง <- ๊ธฐ์ด + ์๊ฐ
์์ <- data.frame (๊ธฐ์ด, ์ ํจ์ด์, ์ก๋ฉด์ด์, ์๊ฐ, ๊ธฐ๋ง)
์ฌ์ฑ์๊ฐํ <- rbind(์ฌ์ฑ์๊ฐํ, ์์)
์ฌ์ฑ์๊ฐํ
# 4
๊ธฐ์ด <- ๊ธฐ๋ง
์ ํจ์ด์ <- ๊ธฐ์ด*์ ํจ์ด์์จ
์ก๋ฉด์ด์ <- ์ก๋ฉด๊ธ์ก*์ก๋ฉด์ด์์จ
์๊ฐ <- ์ ํจ์ด์ - ์ก๋ฉด์ด์
๊ธฐ๋ง <- ๊ธฐ์ด + ์๊ฐ
์์ <- data.frame (๊ธฐ์ด, ์ ํจ์ด์, ์ก๋ฉด์ด์, ์๊ฐ, ๊ธฐ๋ง)
์ฌ์ฑ์๊ฐํ <- rbind(์ฌ์ฑ์๊ฐํ, ์์)
์ฌ์ฑ์๊ฐํ
#####################################################
# ์ 16์ ์ฌ์ฑ ๋ฐํ
#####################################################
์ ํจ์ด์์จ <- 0.12
๊ธฐ๊ฐ <- 3
์๊ธํ๊ฐ๊ณ์ <- 1/ (1+์ ํจ์ด์์จ)^๊ธฐ๊ฐ
์๊ธ <- 100000
์๊ธํ์ฌ๊ฐ์น <- ์๊ธ*์๊ธํ๊ฐ๊ณ์
์๊ธํ์ฌ๊ฐ์น
๋ณต๋ฆฌ <- (1+์ ํจ์ด์์จ)^(1:3)
๋ณต๋ฆฌ
์ฐ๊ธํ๊ฐ๊ณ์<- sum(1/๋ณต๋ฆฌ)
ํ์์ด์์จ <- 0.10
์ด์ํ์ฌ๊ฐ์น <- ์๊ธ*ํ์์ด์์จ*์ฐ๊ธํ๊ฐ๊ณ์
๋ฐํ๊ธ์ก <- ์๊ธํ์ฌ๊ฐ์น + ์ด์ํ์ฌ๊ฐ์น
#####################################################
# ์ 16์ ์ฌ์ฑ ๋ฐํ: ํ ์ฆ๋ฐํ
#####################################################
์ ํจ์ด์์จ <- 0.08
์ก๋ฉด๊ฐ์ก <- 100000
๊ธฐ๊ฐ <- 3
ํ์์ด์์จ <- 0.1
์๊ธํ๊ฐ๊ณ์ <- round(1/ (1+์ ํจ์ด์์จ)^๊ธฐ๊ฐ, 4)
์๊ธํ์ฌ๊ฐ์น <- ์ก๋ฉด๊ฐ์ก*์๊ธํ๊ฐ๊ณ์
๋ณต๋ฆฌ <- (1+์ ํจ์ด์์จ)^(1:๊ธฐ๊ฐ)
์ฐ๊ธํ๊ฐ๊ณ์<- round(sum(1/๋ณต๋ฆฌ),4)
์ด์ํ์ฌ๊ฐ์น <- ์๊ธ*ํ์์ด์์จ*์ฐ๊ธํ๊ฐ๊ณ์
์ฌ์ฑ๋ฐํ๊ธ์ก <- ์๊ธํ์ฌ๊ฐ์น + ์ด์ํ์ฌ๊ฐ์น
์ฌ์ฑ๋ฐํ๊ธ์ก
์๊ฐํ <- NULL
๊ธฐ์ด <- ์ฌ์ฑ๋ฐํ๊ธ์ก
for ( i in 1:๊ธฐ๊ฐ)
{
์ ํจ์ด์ <- round(๊ธฐ์ด*์ ํจ์ด์์จ)
์ก๋ฉด์ด์ <-์ก๋ฉด๊ฐ์ก*ํ์์ด์์จ
์ฌ์ฑ๋ฐํ์ฐจ๊ธ์๊ฐ <- ์ก๋ฉด์ด์ - ์ ํจ์ด์
๊ธฐ๋ง <- ๊ธฐ์ด - ์ฌ์ฑ๋ฐํ์ฐจ๊ธ์๊ฐ
์์ <- data.frame( ๊ธฐ์ด = ๊ธฐ์ด,
์ ํจ์ด์ = ์ ํจ์ด์,
์ก๋ฉด์ด์ = ์ก๋ฉด์ด์,
์ฌ์ฑ๋ฐํ์ฐจ๊ธ์๊ฐ = ์ฌ์ฑ๋ฐํ์ฐจ๊ธ์๊ฐ,
๊ธฐ๋ง = ๊ธฐ๋ง)
์๊ฐํ <- rbind(์๊ฐํ, ์์)
๊ธฐ์ด <- ๊ธฐ๋ง
}
|
fb520cfd6e8e3fb1b54c629fd409d884a60e5aa8
|
214216cefc96120cd1fbd484f43e4c1c86904ecd
|
/R/blr-plots.R
|
8e520a0303219410bd1d97994febaedba28a26a2
|
[
"MIT"
] |
permissive
|
rsquaredacademy/blorr
|
9fa8a0fc18b4bbbe91d04124aa4a64f3a47051b3
|
073f672bb830080dd666c7cac4ff5d342b3ce0ac
|
refs/heads/master
| 2023-08-30T17:01:49.220745
| 2021-07-08T11:25:36
| 2021-07-08T11:25:36
| 91,309,738
| 18
| 3
|
NOASSERTION
| 2021-07-08T11:25:37
| 2017-05-15T07:49:49
|
R
|
UTF-8
|
R
| false
| false
| 18,323
|
r
|
blr-plots.R
|
#' Residual vs fitted values plot
#'
#' Residual vs fitted values plot.
#'
#' @inheritParams blr_plot_pearson_residual
#' @param line_color Color of the horizontal line.
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_residual_fitted(model)
#'
#' @importFrom ggplot2 geom_hline
#' @importFrom stats residuals rstandard hatvalues
#'
#' @export
#'
blr_plot_residual_fitted <- function(model, point_color = "blue", line_color = "red",
title = "Standardized Pearson Residual vs Fitted Values",
xaxis_title = "Fitted Values",
yaxis_title = "Standardized Pearson Residual") {
blr_check_model(model)
fit_val <- fitted(model)
res_val <- rstandard(model, type = "pearson")
create_plot(fit_val, res_val, point_color, title, xaxis_title, yaxis_title) +
geom_hline(yintercept = 0, color = line_color)
}
#' Residual values plot
#'
#' Standardised pearson residuals plot.
#'
#' @param model An object of class \code{glm}.
#' @param point_color Color of the points.
#' @param title Title of the plot.
#' @param xaxis_title X axis label.
#' @param yaxis_title Y axis label.
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_pearson_residual(model)
#'
#' @export
#'
blr_plot_pearson_residual <- function(model, point_color = "blue",
title = "Standardized Pearson Residuals",
xaxis_title = "id",
yaxis_title = "Standardized Pearson Residuals") {
blr_check_model(model)
res_val <- rstandard(model, type = "pearson")
id <- plot_id(res_val)
create_plot(id, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' Deviance vs fitted values plot
#'
#' Deviance vs fitted values plot.
#'
#' @inheritParams blr_plot_pearson_residual
#' @param line_color Color of the horizontal line.
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_deviance_fitted(model)
#'
#' @export
#'
blr_plot_deviance_fitted <- function(model, point_color = "blue", line_color = "red",
title = "Deviance Residual vs Fitted Values",
xaxis_title = "Fitted Values",
yaxis_title = "Deviance Residual") {
blr_check_model(model)
fit_val <- fitted(model)
res_val <- rstandard(model)
create_plot(fit_val, res_val, point_color, title, xaxis_title, yaxis_title) +
geom_hline(yintercept = 0, color = line_color)
}
#' Deviance residual values
#'
#' Deviance residuals plot.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_deviance_residual(model)
#'
#' @export
#'
blr_plot_deviance_residual <- function(model, point_color = "blue",
title = "Deviance Residuals Plot",
xaxis_title = "id",
yaxis_title = "Deviance Residuals") {
blr_check_model(model)
res_val <- rstandard(model)
id <- plot_id(res_val)
create_plot(id, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' Leverage vs fitted values plot
#'
#' Leverage vs fitted values plot
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_leverage_fitted(model)
#'
#' @export
#'
blr_plot_leverage_fitted <- function(model, point_color = "blue",
title = "Leverage vs Fitted Values",
xaxis_title = "Fitted Values",
yaxis_title = "Leverage") {
blr_check_model(model)
fit_val <- fitted(model)
res_val <- hatvalues(model)
create_plot(fit_val, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' Leverage plot
#'
#' Leverage plot.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_leverage(model)
#'
#' @export
#'
blr_plot_leverage <- function(model, point_color = "blue",
title = "Leverage Plot",
xaxis_title = "id",
yaxis_title = "Leverage") {
blr_check_model(model)
res_val <- hatvalues(model)
id <- plot_id(res_val)
create_plot(id, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' Residual diagnostics
#'
#' @description
#' Diagnostics for confidence interval displacement and detecting ill fitted
#' observations.
#'
#' @param model An object of class \code{glm}.
#'
#' @return C, CBAR, DIFDEV and DIFCHISQ.
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_residual_diagnostics(model)
#'
#' @export
#'
blr_residual_diagnostics <- function(model) {
blr_check_model(model)
res_val <- residuals(model, type = "pearson") ^ 2
hat_val <- hatvalues(model)
num <- res_val * hat_val
den <- 1 - hat_val
c <- num / (den ^ 2)
cbar <- num / den
difchisq <- cbar / hat_val
difdev <- (rstandard(model) ^ 2) + cbar
data.frame(c = c, cbar = cbar, difdev = difdev, difchisq = difchisq)
}
#' CI Displacement C plot
#'
#' Confidence interval displacement diagnostics C plot.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_diag_c(model)
#'
#' @export
#'
blr_plot_diag_c <- function(model, point_color = "blue",
title = "CI Displacement C Plot",
xaxis_title = "id",
yaxis_title = "CI Displacement C") {
blr_check_model(model)
res_val <- extract_diag(model, c)
id <- plot_id(res_val)
create_plot(id, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' CI Displacement CBAR plot
#'
#' Confidence interval displacement diagnostics CBAR plot.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_diag_cbar(model)
#'
#' @export
#'
blr_plot_diag_cbar <- function(model, point_color = "blue",
title = "CI Displacement CBAR Plot",
xaxis_title = "id",
yaxis_title = "CI Displacement CBAR") {
blr_check_model(model)
res_val <- extract_diag(model, cbar)
id <- plot_id(res_val)
create_plot(id, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' Delta chisquare plot
#'
#' Diagnostics for detecting ill fitted observations.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_diag_difchisq(model)
#'
#' @export
#'
blr_plot_diag_difchisq <- function(model, point_color = "blue",
title = "Delta Chisquare Plot",
xaxis_title = "id",
yaxis_title = "Delta Chisquare") {
blr_check_model(model)
res_val <- extract_diag(model,difchisq)
id <- plot_id(res_val)
create_plot(id, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' Delta deviance plot
#'
#' Diagnostics for detecting ill fitted observations.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_diag_difdev(model)
#'
#' @export
#'
blr_plot_diag_difdev <- function(model, point_color = "blue",
title = "Delta Deviance Plot",
xaxis_title = "id",
yaxis_title = "Delta Deviance") {
blr_check_model(model)
res_val <- extract_diag(model, difdev)
id <- plot_id(res_val)
create_plot(id, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' DFBETAs panel
#'
#' Panel of plots to detect influential observations using DFBETAs.
#'
#' @param model An object of class \code{glm}.
#' @param print_plot logical; if \code{TRUE}, prints the plot else returns a plot object.
#'
#' @details
#' DFBETA measures the difference in each parameter estimate with and without
#' the influential point. There is a DFBETA for each data point i.e if there
#' are n observations and k variables, there will be \eqn{n * k} DFBETAs. In
#' general, large values of DFBETAS indicate observations that are influential
#' in estimating a given parameter. Belsley, Kuh, and Welsch recommend 2 as a
#' general cutoff value to indicate influential observations and
#' \eqn{2/\sqrt(n)} as a size-adjusted cutoff.
#'
#' @return list; \code{blr_dfbetas_panel} returns a list of tibbles (for
#' intercept and each predictor) with the observation number and DFBETA of
#' observations that exceed the threshold for classifying an observation as an
#' outlier/influential observation.
#'
#' @references
#' Belsley, David A.; Kuh, Edwin; Welsh, Roy E. (1980). Regression
#' Diagnostics: Identifying Influential Data and Sources of Collinearity.
#' Wiley Series in Probability and Mathematical Statistics.
#' New York: John Wiley & Sons. pp. ISBN 0-471-05856-4.
#'
#' @examples
#' \dontrun{
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_dfbetas_panel(model)
#' }
#'
#' @importFrom stats dfbetas
#' @importFrom ggplot2 geom_linerange geom_text annotate
#'
#' @export
#'
blr_plot_dfbetas_panel <- function(model, print_plot = TRUE) {
blr_check_model(model)
dfb <- dfbetas(model)
n <- nrow(dfb)
np <- ncol(dfb)
threshold <- 2 / sqrt(n)
myplots <- list()
outliers <- list()
for (i in seq_len(np)) {
d <- dfbetas_data_prep(dfb, n, threshold, i)
f <- dfbetas_outlier_data(d)
p <- eval(substitute(dfbetas_plot(d, threshold, dfb, i),list(i = i)))
myplots[[i]] <- p
outliers[[i]] <- f
}
if (print_plot) {
suppressWarnings(do.call(grid.arrange, c(myplots, list(ncol = 2))))
}
names(outliers) <- model_coeff_names(model)
result <- list(outliers = outliers, plots = myplots)
invisible(result)
}
#' CI Displacement C vs fitted values plot
#'
#' Confidence interval displacement diagnostics C vs fitted values plot.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_c_fitted(model)
#'
#' @export
#'
blr_plot_c_fitted <- function(model, point_color = "blue",
title = "CI Displacement C vs Fitted Values Plot",
xaxis_title = "Fitted Values",
yaxis_title = "CI Displacement C") {
blr_check_model(model)
res_val <- extract_diag(model, c)
fit_val <- fitted(model)
create_plot(fit_val, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' Delta chi square vs fitted values plot
#'
#' Delta Chi Square vs fitted values plot for detecting ill fitted observations.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_difchisq_fitted(model)
#'
#' @export
#'
blr_plot_difchisq_fitted <- function(model, point_color = "blue",
title = "Delta Chi Square vs Fitted Values Plot",
xaxis_title = "Fitted Values",
yaxis_title = "Delta Chi Square") {
blr_check_model(model)
res_val <- extract_diag(model, difchisq)
fit_val <- fitted(model)
create_plot(fit_val, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' Delta deviance vs fitted values plot
#'
#' Delta deviance vs fitted values plot for detecting ill fitted observations.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_difdev_fitted(model)
#'
#' @export
#'
blr_plot_difdev_fitted <- function(model, point_color = "blue",
title = "Delta Deviance vs Fitted Values Plot",
xaxis_title = "Fitted Values",
yaxis_title = "Delta Deviance") {
blr_check_model(model)
res_val <- extract_diag(model, difdev)
fit_val <- fitted(model)
create_plot(fit_val, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' Delta deviance vs leverage plot
#'
#' Delta deviance vs leverage plot.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_difdev_leverage(model)
#'
#' @export
#'
blr_plot_difdev_leverage <- function(model, point_color = "blue",
title = "Delta Deviance vs Leverage Plot",
xaxis_title = "Leverage",
yaxis_title = "Delta Deviance") {
blr_check_model(model)
res_val <- extract_diag(model, difdev)
hat_val <- hatvalues(model)
create_plot(hat_val, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' Delta chi square vs leverage plot
#'
#' Delta chi square vs leverage plot.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_difchisq_leverage(model)
#'
#' @export
#'
blr_plot_difchisq_leverage <- function(model, point_color = "blue",
title = "Delta Chi Square vs Leverage Plot",
xaxis_title = "Leverage",
yaxis_title = "Delta Chi Square") {
blr_check_model(model)
res_val <- extract_diag(model, difchisq)
hat_val <- hatvalues(model)
create_plot(hat_val, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' CI Displacement C vs leverage plot
#'
#' Confidence interval displacement diagnostics C vs leverage plot.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_c_leverage(model)
#'
#' @export
#'
blr_plot_c_leverage <- function(model, point_color = "blue",
title = "CI Displacement C vs Leverage Plot",
xaxis_title = "Leverage",
yaxis_title = "CI Displacement C") {
blr_check_model(model)
res_val <- extract_diag(model, c)
hat_val <- hatvalues(model)
create_plot(hat_val, res_val, point_color, title, xaxis_title, yaxis_title)
}
#' Fitted values vs leverage plot
#'
#' Fitted values vs leverage plot.
#'
#' @inheritParams blr_plot_pearson_residual
#'
#' @examples
#' model <- glm(honcomp ~ female + read + science, data = hsb2,
#' family = binomial(link = 'logit'))
#'
#' blr_plot_fitted_leverage(model)
#'
#' @export
#'
blr_plot_fitted_leverage <- function(model, point_color = "blue",
title = "Fitted Values vs Leverage Plot",
xaxis_title = "Leverage",
yaxis_title = "Fitted Values") {
blr_check_model(model)
fit_val <- fitted(model)
hat_val <- hatvalues(model)
create_plot(hat_val, fit_val, point_color, title, xaxis_title, yaxis_title)
}
plot_id <- function(res_val) {
seq_len(length(res_val))
}
extract_diag <- function(model, value) {
vals <- deparse(substitute(value))
blr_residual_diagnostics(model)[[vals]]
}
dfbetas_data_prep <- function(dfb, n, threshold, i) {
dbetas <- dfb[, i]
d <- data.frame(obs = seq_len(n), dbetas = dbetas)
d$color <- ifelse(((d$dbetas >= threshold) | (d$dbetas <= -threshold)),
c("outlier"), c("normal"))
d$fct_color <- ordered(factor(color), levels = c("normal", "outlier"))
d$txt <- ifelse(d$color == "outlier", obs, NA)
# tibble(obs = seq_len(n), dbetas = dbetas) %>%
# mutate(
# color = ifelse(((dbetas >= threshold) | (dbetas <= -threshold)),
# c("outlier"), c("normal")),
# fct_color = color %>%
# factor() %>%
# ordered(levels = c("normal", "outlier")),
# txt = ifelse(color == "outlier", obs, NA)
# )
}
dfbetas_plot <- function(d, threshold, dfb, i) {
ggplot(d, aes(x = obs, y = dbetas, label = txt, ymin = 0, ymax = dbetas)) +
geom_linerange(colour = "blue") +
geom_hline(yintercept = c(0, threshold, -threshold), colour = "red") +
geom_point(colour = "blue", shape = 1) +
xlab("Observation") + ylab("DFBETAS") +
ggtitle(paste("Influence Diagnostics for", colnames(dfb)[i])) +
geom_text(hjust = -0.2, nudge_x = 0.15, size = 2, family = "serif",
fontface = "italic", colour = "darkred", na.rm = TRUE) +
annotate(
"text", x = Inf, y = Inf, hjust = 1.5, vjust = 2,
family = "serif", fontface = "italic", colour = "darkred",
label = paste("Threshold:", round(threshold, 2))
)
}
dfbetas_outlier_data <- function(d) {
d[d$color == "outlier", c('obs', 'betas')]
}
model_coeff_names <- function(model) {
names(coefficients(model))
}
create_plot <- function(x, y, point_color, title, xaxis_title, yaxis_title) {
ggplot(data.frame(x = x, y = y)) +
geom_point(aes(x = x, y = y), color = point_color) +
ggtitle(title) + xlab(xaxis_title) + ylab(yaxis_title)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.