blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bdb99142b9e92eb28602ecad1300dd5a028353fd
|
53a7f0c821ca1735957c0367cbbfbc3621e496ea
|
/corr.R
|
8702d0c6ab1f1d84d2926a9fa8555c3843bdbf6f
|
[] |
no_license
|
basmaNasser/Programming-Assignment-1-Air-Pollution
|
370eb51b7a40d6ffaa21022c4501ce567dd64111
|
3e9911f3f9e1dcafa7c231ae70dd4c9640b56f95
|
refs/heads/master
| 2016-09-06T06:16:26.987974
| 2015-08-22T06:33:37
| 2015-08-22T06:33:37
| 41,194,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,636
|
r
|
corr.R
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
source('complete.R')
d <- complete(directory)
nobs_gt_threshold <- d$id[d$nobs > threshold]
files <- list.files(directory,full.names=TRUE)
readdata <- function(id){
for (f in seq_along(files)) {
if (f == id) {
data <- na.omit(read.csv(files[f]))
return(data)
}
}
}
output <- vector(mode='numeric')
for (i in nobs_gt_threshold) {
data <- readdata(i)
correlated_data <- cor(x=data$sulfate, y=data$nitrate)
output <- c(output, correlated_data)
}
output
}
|
c9566c0bf62e23d53d8c9b2117a553fec767778f
|
ab7049740645bf0a4dadcdae993151506a49dc69
|
/compare estim with true.R
|
ea8e85e898c156e2d28a68c0d1a27cdef2023577
|
[] |
no_license
|
drvalle1/cluster_tsegments_loc
|
52f9f5dc7651430435d4fe8014eb887e4f6476cd
|
3f2d66436b86d70766c755104d14da9c05364b4b
|
refs/heads/master
| 2020-07-29T11:19:46.329862
| 2019-11-19T16:25:16
| 2019-11-19T16:25:16
| 209,779,237
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 444
|
r
|
compare estim with true.R
|
plot(res$phi[ngibbs,],type='h')
fim=data.frame(zestim=res$z[ngibbs,],ztrue=z.true)
fim1=table(fim); fim1
ordem=numeric()
for (i in 1:ncol(fim1)){
ind=which(fim1[,i]==max(fim1[,i]))
ordem=c(ordem,ind)
}
fim1[ordem,]
compare=function(true1,estim1){
rango=range(c(true1,estim1))
plot(true1,estim1,ylim=rango,xlim=rango)
lines(rango,rango,col='red')
}
theta1=matrix(res$theta[ngibbs,],nrow=nclustmax)
compare(theta.true,theta1[ordem,])
|
692932b2705dcd6f22146f314855a0bbbfcb0b08
|
40ffcc2188952550ea8db4b0fc940abd9e01e5d4
|
/R/07_AUC_Ranks.R
|
77f124924cfbd9bf65b64cc7e2a38ffe23cc991b
|
[] |
no_license
|
chantalhari/BioScen1.5_SDM
|
7d2d5f7b998dafa0779daeabec6c8a697b9e0981
|
7aa9ed086a0a3e6c03216dfc6aa05e7b0550ddc1
|
refs/heads/master
| 2023-03-16T00:45:45.769026
| 2020-07-29T09:48:22
| 2020-07-29T09:48:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,528
|
r
|
07_AUC_Ranks.R
|
#-#-# Stacked barchart 400 species data #-#-#
rm(list=ls())
library(data.table)
library(plyr)
library(ggplot2)
library(lattice)
library(fields)
library(maptools)
library(colorRamps)
library(RColorBrewer)
library(grid)
library(gridExtra)
library(reshape2)
# Specify file dir
filedir <- "C:/ProcessedData" # Desktop
# Set taxa
taxa <- c("Amphibian", "Ter_Mammal", "Ter_Bird")
for(i in c(1,2,3)){
#-#-# Summarize frequenzies and plot as stacked bar chart #-#-#
AUCall <- read.csv(paste0(filedir, "/FinalRank_", taxa[i], ".csv"))
head(AUCall)
nrow(AUCall)
#-#-# Select the ten best variable combinations to display in graph #-#-#
AUCallTop <- AUCall ## Add data here
AUCallTop$rank[AUCallTop$rank >= 4] <- 0
head(AUCallTop)
AUCallTop <- AUCallTop[,c("Species","Models","rank")]
head(AUCallTop)
AUCTopTable<-data.frame(table(AUCallTop$Models, AUCallTop$rank))
colnames(AUCTopTable) <- c("ClimateVariable","Rank","Frequency")
head(AUCTopTable)
AUCTopTable$Rank <- as.numeric(as.character(AUCTopTable$Rank))
AUCTopTable <- subset(AUCTopTable,Rank >= 1)
head(AUCTopTable)
#View(AUCTopTable)
AUCTopTable$Frequency <- as.numeric(as.character(AUCTopTable$Frequency))
AUCTopTable <- aggregate(Frequency ~ ClimateVariable, AUCTopTable, sum)
AUCTopTable <- AUCTopTable[order(-AUCTopTable$Frequency),]
head(AUCTopTable)
AUCTopVariables <- AUCTopTable[1:10,]
TopVariableList <- as.vector(AUCTopVariables$ClimateVariable)
#-#-# Subset the entire results data frame choosing only the best variables #-#-#
AUCall <- AUCall
AUCall$rank[AUCall$rank >= 10] <- "Other"
head(AUCall)
nrow(AUCall)
AUCSub <- AUCall[,c("Species","Models","rank")]
head(AUCSub)
AUCFreqTable<-data.frame(table(AUCSub$Models, AUCSub$rank))
colnames(AUCFreqTable) <- c("ClimateVariable","Rank","Frequency")
head(AUCFreqTable)
AUCallFinal <- AUCFreqTable[AUCFreqTable$ClimateVariable %in% TopVariableList, ]
library(dplyr)
AUCallFinal %>% arrange(Rank, desc(Frequency)) %>% head()
nrow(AUCallFinal)
#View(AUCallFinal)
AUCallFinal <- subset(AUCallFinal, Frequency > 0)
#-#-# Set colour scheme #-#-#
PaletteBlue2 <-c('blue4','dodgerblue4','deepskyblue','gray20','gray28','gray39','gray49','gray53','gray63','gray73')
#-#-# Extract all label names #-#-#
#labellist <- as.vector(AUCFreqTable$ClimateVariable)
#labellist <- unique(labellist)
testMax <- (nrow(AUCall))/23
#-#-# Plot the variables #-#-#
p1s <- ggplot(AUCallFinal, aes(x = ClimateVariable, y = Frequency)) +
geom_bar(aes(fill = Rank), stat="identity") +
scale_fill_manual(values=PaletteBlue2)+
scale_y_continuous() +
guides(fill = guide_legend(ncol = 1))+
theme(panel.background=element_rect(fill="white",colour="white"),
panel.grid=element_blank(),
plot.title = element_text(lineheight=2, face="bold",hjust = 0),
axis.text=element_text(size=8, colour="black"),
axis.title=element_text(size=8),
axis.line=element_line(colour="black"))+
theme(axis.text.x=element_text(angle = -90, hjust = 0))
print(p1s)
grobframe <- arrangeGrob(p1s, ncol = 1, nrow=1)
plot(grobframe)
tiff(file = paste0("figures/StackedBar_", taxa[i], "_Top10.tiff"), width = 9000,
height = 4000, units = "px", compression = "lzw", res = 800)
plot(grobframe)
dev.off()
#grobframe <- arrangeGrob(p1,p2,p3, ncol = 3, nrow=1)
#-#-#
boxplot(AUCall$AUC~AUCall$Models,data=AUCall)
}
|
a14c2b1d392371f0d7b8f63a3e3f0417f8af7ab8
|
63d97198709f3368d1c6d36739442efa699fe61d
|
/advanced algorithm/round3/k-server-analysis-master/data/tests/case039.rd
|
4c3205db4975e25996f5ffa93843bb96ee4b56c5
|
[] |
no_license
|
tawlas/master_2_school_projects
|
f6138d5ade91e924454b93dd8f4902ca5db6fd3c
|
03ce4847155432053d7883f3b5c2debe9fbe1f5f
|
refs/heads/master
| 2023-04-16T15:25:09.640859
| 2021-04-21T03:11:04
| 2021-04-21T03:11:04
| 360,009,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,139
|
rd
|
case039.rd
|
20
1 [18, 12, 8] 3 3 3 10 10
2 [18, 12, 9] 1 2 5 2 12
3 [18, 12, 8] 1 2 7 2 14
4 [18, 9, 8] 3 1 8 2 16
5 [3, 9, 8] 5 5 13 10 26
6 [4, 9, 8] 1 2 15 2 28
7 [3, 9, 8] 1 2 17 2 30
8 [4, 9, 8] 1 2 19 2 32
9 [3, 9, 8] 1 2 21 2 34
10 [4, 9, 8] 1 2 23 2 36
11 [3, 9, 8] 1 2 25 2 38
12 [3, 9, 4] 4 1 26 2 40
13 [3, 16, 4] 7 12 38 12 52
14 [3, 17, 4] 1 2 40 2 54
15 [3, 16, 4] 1 2 42 2 56
16 [3, 17, 4] 1 2 44 2 58
17 [3, 16, 4] 1 2 46 2 60
18 [3, 17, 4] 1 2 48 2 62
19 [3, 16, 4] 1 2 50 2 64
20 [3, 17, 4] 1 2 52 2 66
21 [3, 16, 4] 1 2 54 2 68
22 [3, 17, 4] 1 2 56 2 70
23 [3, 16, 4] 1 2 58 2 72
24 [17, 16, 4] 6 2 60 2 74
25 [17, 16, 10] 6 10 70 10 84
26 [17, 16, 11] 1 2 72 2 86
27 [17, 16, 10] 1 2 74 2 88
28 [17, 16, 11] 1 2 76 2 90
29 [17, 16, 10] 1 2 78 2 92
30 [17, 16, 11] 1 2 80 2 94
31 [17, 16, 10] 1 2 82 2 96
32 [17, 16, 11] 1 2 84 2 98
33 [17, 16, 10] 1 2 86 2 100
34 [17, 11, 10] 5 2 88 2 102
35 [3, 11, 10] 6 10 98 10 112
36 [4, 11, 10] 1 2 100 2 114
37 [3, 11, 10] 1 2 102 2 116
38 [4, 11, 10] 1 2 104 2 118
39 [3, 11, 10] 1 2 106 2 120
40 [4, 11, 10] 1 2 108 2 122
41 [3, 11, 10] 1 2 110 2 124
42 [4, 11, 10] 1 2 112 2 126
43 [3, 11, 10] 1 2 114 2 128
44 [4, 11, 10] 1 2 116 2 130
45 [3, 11, 10] 1 2 118 2 132
46 [4, 11, 10] 1 2 120 2 134
47 [3, 11, 10] 1 2 122 2 136
48 [3, 11, 4] 6 0 122 0 136
49 [3, 17, 4] 6 10 132 10 146
50 [3, 18, 4] 1 2 134 2 148
51 [3, 17, 4] 1 2 136 2 150
52 [3, 18, 4] 1 2 138 2 152
53 [3, 17, 4] 1 2 140 2 154
54 [3, 18, 4] 1 2 142 2 156
55 [3, 17, 4] 1 2 144 2 158
56 [3, 18, 4] 1 2 146 2 160
57 [3, 17, 4] 1 2 148 2 162
58 [18, 17, 4] 5 2 150 2 164
59 [18, 17, 10] 6 10 160 10 174
60 [18, 17, 11] 1 2 162 2 176
61 [18, 17, 10] 1 2 164 2 178
62 [18, 17, 11] 1 2 166 2 180
63 [18, 17, 10] 1 2 168 2 182
64 [18, 17, 11] 1 2 170 2 184
65 [18, 17, 10] 1 2 172 2 186
66 [18, 17, 11] 1 2 174 2 188
67 [18, 17, 10] 1 2 176 2 190
68 [18, 17, 11] 1 2 178 2 192
69 [18, 17, 10] 1 2 180 2 194
70 [18, 11, 10] 6 2 182 2 196
71 [4, 11, 10] 6 10 192 10 206
72 [5, 11, 10] 1 2 194 2 208
73 [4, 11, 10] 1 2 196 2 210
74 [5, 11, 10] 1 2 198 2 212
75 [4, 11, 10] 1 2 200 2 214
76 [5, 11, 10] 1 2 202 2 216
77 [4, 11, 10] 1 2 204 2 218
78 [5, 11, 10] 1 2 206 2 220
79 [4, 11, 10] 1 2 208 2 222
80 [5, 11, 10] 1 2 210 2 224
81 [4, 11, 10] 1 2 212 2 226
82 [4, 11, 5] 5 0 212 0 226
83 [4, 17, 5] 6 10 222 10 236
84 [4, 18, 5] 1 2 224 2 238
85 [4, 17, 5] 1 2 226 2 240
86 [4, 18, 5] 1 2 228 2 242
87 [4, 17, 5] 1 2 230 2 244
88 [4, 18, 5] 1 2 232 2 246
89 [4, 17, 5] 1 2 234 2 248
90 [4, 18, 5] 1 2 236 2 250
91 [4, 17, 5] 1 2 238 2 252
92 [4, 18, 5] 1 2 240 2 254
93 [4, 17, 5] 1 2 242 2 256
94 [18, 17, 5] 6 2 244 2 258
95 [18, 17, 11] 6 10 254 10 268
96 [18, 17, 12] 1 2 256 2 270
97 [18, 17, 11] 1 2 258 2 272
98 [18, 17, 12] 1 2 260 2 274
99 [18, 17, 11] 1 2 262 2 276
100 [18, 17, 12] 1 2 264 2 278
101 [18, 17, 11] 1 2 266 2 280
102 [18, 17, 12] 1 2 268 2 282
103 [18, 17, 11] 1 2 270 2 284
104 [18, 12, 11] 5 2 272 2 286
105 [4, 12, 11] 6 10 282 10 296
106 [5, 12, 11] 1 2 284 2 298
107 [4, 12, 11] 1 2 286 2 300
108 [5, 12, 11] 1 2 288 2 302
109 [4, 12, 11] 1 2 290 2 304
110 [5, 12, 11] 1 2 292 2 306
111 [4, 12, 11] 1 2 294 2 308
112 [5, 12, 11] 1 2 296 2 310
113 [4, 12, 11] 1 2 298 2 312
114 [5, 12, 11] 1 2 300 2 314
115 [4, 12, 11] 1 2 302 2 316
116 [5, 12, 11] 1 2 304 2 318
117 [4, 12, 11] 1 2 306 2 320
118 [4, 12, 5] 6 0 306 0 320
119 [4, 18, 5] 6 10 316 10 330
120 [4, 19, 5] 1 2 318 2 332
121 [4, 18, 5] 1 2 320 2 334
122 [4, 19, 5] 1 2 322 2 336
123 [4, 18, 5] 1 2 324 2 338
124 [4, 19, 5] 1 2 326 2 340
125 [4, 18, 5] 1 2 328 2 342
126 [4, 19, 5] 1 2 330 2 344
127 [4, 18, 5] 1 2 332 2 346
128 [19, 18, 5] 5 2 334 2 348
129 [19, 18, 11] 6 10 344 10 358
130 [19, 18, 12] 1 2 346 2 360
131 [19, 18, 11] 1 2 348 2 362
132 [19, 18, 12] 1 2 350 2 364
133 [19, 18, 11] 1 2 352 2 366
134 [19, 18, 12] 1 2 354 2 368
135 [19, 18, 11] 1 2 356 2 370
136 [19, 18, 12] 1 2 358 2 372
137 [19, 18, 11] 1 2 360 2 374
138 [19, 18, 12] 1 2 362 2 376
139 [19, 18, 11] 1 2 364 2 378
140 [19, 12, 11] 6 2 366 2 380
141 [5, 12, 11] 6 10 376 10 390
142 [6, 12, 11] 1 2 378 2 392
143 [5, 12, 11] 1 2 380 2 394
144 [6, 12, 11] 1 2 382 2 396
145 [5, 12, 11] 1 2 384 2 398
146 [6, 12, 11] 1 2 386 2 400
147 [5, 12, 11] 1 2 388 2 402
148 [6, 12, 11] 1 2 390 2 404
149 [5, 12, 11] 1 2 392 2 406
150 [6, 12, 11] 1 2 394 2 408
151 [5, 12, 11] 1 2 396 2 410
152 [5, 12, 6] 5 0 396 0 410
153 [5, 18, 6] 6 10 406 10 420
154 [5, 19, 6] 1 2 408 2 422
155 [5, 18, 6] 1 2 410 2 424
156 [5, 19, 6] 1 2 412 2 426
157 [5, 18, 6] 1 2 414 2 428
158 [5, 19, 6] 1 2 416 2 430
159 [5, 18, 6] 1 2 418 2 432
160 [5, 19, 6] 1 2 420 2 434
161 [5, 18, 6] 1 2 422 2 436
162 [5, 19, 6] 1 2 424 2 438
163 [5, 18, 6] 1 2 426 2 440
164 [19, 18, 6] 6 2 428 2 442
165 [19, 18, 12] 6 10 438 10 452
166 [19, 18, 13] 1 2 440 2 454
167 [19, 18, 12] 1 2 442 2 456
168 [19, 18, 13] 1 2 444 2 458
169 [19, 18, 12] 1 2 446 2 460
170 [19, 18, 13] 1 2 448 2 462
171 [19, 18, 12] 1 2 450 2 464
172 [19, 18, 13] 1 2 452 2 466
173 [19, 18, 12] 1 2 454 2 468
174 [19, 13, 12] 5 2 456 2 470
175 [5, 13, 12] 6 10 466 10 480
176 [6, 13, 12] 1 2 468 2 482
177 [5, 13, 12] 1 2 470 2 484
178 [6, 13, 12] 1 2 472 2 486
179 [5, 13, 12] 1 2 474 2 488
180 [6, 13, 12] 1 2 476 2 490
181 [5, 13, 12] 1 2 478 2 492
182 [6, 13, 12] 1 2 480 2 494
183 [5, 13, 12] 1 2 482 2 496
184 [6, 13, 12] 1 2 484 2 498
185 [5, 13, 12] 1 2 486 2 500
186 [6, 13, 12] 1 2 488 2 502
187 [5, 13, 12] 1 2 490 2 504
188 [5, 13, 6] 6 0 490 0 504
189 [5, 19, 6] 6 10 500 10 514
190 [5, 0, 6] 1 2 502 2 516
191 [5, 19, 6] 1 2 504 2 518
192 [5, 0, 6] 1 2 506 2 520
193 [5, 19, 6] 1 2 508 2 522
194 [5, 0, 6] 1 2 510 2 524
195 [5, 19, 6] 1 2 512 2 526
196 [5, 0, 6] 1 2 514 2 528
197 [5, 19, 6] 1 2 516 2 530
198 [0, 19, 6] 5 2 518 2 532
199 [0, 19, 12] 6 10 528 10 542
200 [0, 19, 13] 1 2 530 2 544
201 [0, 19, 12] 1 2 532 2 546
202 [0, 19, 13] 1 2 534 2 548
203 [0, 19, 12] 1 2 536 2 550
204 [0, 19, 13] 1 2 538 2 552
205 [0, 19, 12] 1 2 540 2 554
206 [0, 19, 13] 1 2 542 2 556
207 [0, 19, 12] 1 2 544 2 558
208 [0, 19, 13] 1 2 546 2 560
209 [0, 19, 12] 1 2 548 2 562
210 [0, 13, 12] 6 2 550 2 564
550 564 492
|
e6622bbfc931e434827afaece7913094cb9eaa36
|
dcd3f8ee8db533ef7cee8fd92ace5072a7c62aca
|
/lib/helper_func.R
|
cd8638beaaec7eb60d152cfa85a3b077be200f74
|
[] |
no_license
|
TZstatsADS/Spr2017-proj5-grp3
|
3ad32c056c3732b75298c0f96d51927ede356ba8
|
1360caf6f9b1e9832950f83ba62f9e3215bb548a
|
refs/heads/master
| 2021-01-19T06:27:45.974270
| 2017-04-28T20:06:56
| 2017-04-28T20:06:56
| 87,463,288
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,233
|
r
|
helper_func.R
|
get.error<-function(f){
f="gpa"
source("../lib/modelFunc.R")
data.filtered <- read.csv('../data/NAreplaced.csv') #4242 1388
select <- read.csv(paste0('../data/Updated_Features/',f,'_features.csv'),stringsAsFactors = FALSE)
data.filtered <- subset(data.filtered,select=c("challengeID",select$Codes)) # 4242*64
label <- read.csv('../data/train.csv')
label<-na.omit(label)
label<-subset(label,select=c("challengeID",f))
Index<-data.filtered$challengeID %in% label$challengeID
data.train<-data.filtered[Index,]
data.train<-as.data.frame(data.train)
data.train<-cbind(label[,-1], data.train[,-1])
colnames(data.train)[1]<-f
load("../data/categorical.RData")
cat=select$Codes[select$Codes %in% categorical]
data.train[,cat]=lapply(data.train[,cat],factor)
# create training and test data set
#set.seed(123)
train.index <- sample(1:nrow(data.train),800,replace = F)
train <- data.train[train.index,] #800*64
test <- data.train[-train.index,] #214*64
for(i in cat){
for(j in 1:nrow(test)){
t=unique(train[,i])
if(!test[j,i] %in% t){
test[j,i]=t[sample(1:length(t),1)]
}
}
}
y<-train[,1]
model_selection_con(train[,-1], test, y)
}
|
4d150b6d869624c7c7b1da790a8d199848afa505
|
e4258fda44b6eeab74cad978d42a17b4cfb4b6d8
|
/plot4.R
|
a81168ea578eae70d522cbdb644e7763d1b37a3f
|
[] |
no_license
|
Anshupriya2694/ExData_Plotting1
|
781f6b3422662aa48f7c5046f4ab2824fc13ccc7
|
a26b503dbff1f62ffc7a642169eecb694771dcb4
|
refs/heads/master
| 2020-05-26T21:51:32.022248
| 2019-05-24T10:29:29
| 2019-05-24T10:29:29
| 188,387,383
| 0
| 0
| null | 2019-05-24T08:50:47
| 2019-05-24T08:50:47
| null |
UTF-8
|
R
| false
| false
| 1,546
|
r
|
plot4.R
|
library(lubridate)
library(dplyr)
#Reading Dataset
household_power_consumption = read.delim("household_power_consumption.txt", header = T,
sep = ";", na.strings = "?")
#Converting "Date" column using lubridate
household_power_consumption$Date = dmy(household_power_consumption$Date)
#Subsetting
household_power_consumption = household_power_consumption%>%
filter(Date == "2007-02-01" | Date == "2007-02-02")
#Converting Time column using lubridate
household_power_consumption$Time = household_power_consumption$hms(Time)
#Pasting Date-Time
date_time = as.POSIXct(paste(household_power_consumption$Date,
household_power_consumption$Time))
#Plotting
png("plot4.png")
par(mfrow = c(2, 2))
plot(date_time, household_power_consumption$Global_active_power,
type="l", xlab="", ylab="Global Active Power (kilowatts)")
plot(date_time, household_power_consumption$Voltage,
type="l", xlab="datetime", ylab="Voltage")
plot(date_time, household_power_consumption$Sub_metering_1,
type="l", xlab="", ylab="Energy sub metering")
lines(date_time, household_power_consumption$Sub_metering_2, col = "red")
lines(date_time, household_power_consumption$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lty = 1, cex = 0.5)
plot(date_time, household_power_consumption$Global_reactive_power,
type = "l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
7c7eff407c5d0a913b990a8421b38be493ffd062
|
6006a87e51de807cae9fe5c6dd054b1c26a035fd
|
/20165164_지현한_실습#13.R
|
7f09b45f79be1f523f1ee1ae22a43f3e031b14f9
|
[] |
no_license
|
HyunHan-Ji/2-1_Data_Science_Bagic
|
af35289af66a31944816db0480ebf2d43fdc85c9
|
0f4ff399eff9158905520d38a23b80f66bd853b2
|
refs/heads/main
| 2023-06-14T14:59:25.611330
| 2021-07-08T12:49:17
| 2021-07-08T12:49:17
| 384,121,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,600
|
r
|
20165164_지현한_실습#13.R
|
#iris 데이터를 이용하여 기술 통계값을 확인
data(iris)
#(1)iris의 종별 최소값, 최대값, 평균, 중간값, 표준편차, 최빈값
aggregate(iris[c(1:4)],list(Species=iris$Species),min)
aggregate(iris[c(1:4)],list(Species=iris$Species),max)
aggregate(iris[c(1:4)],list(Species=iris$Species),mean)
aggregate(iris[c(1:4)],list(Species=iris$Species),median)
aggregate(iris[c(1:4)],list(Species=iris$Species),sd)
#(2)각 종(setosa", "versicolor","virginica") 에 대한 Petal.Length의 밀도함수
a=split(iris,iris$Species)
plot(density(a$setosa$Petal.Length))
plot(density(a$versicolor$Petal.Length))
plot(density(a$virginica$Petal.Length))
#(3) 각 종(setosa", "versicolor","virginica") 에 대한 Petal.Length의 box plot
boxplot(a$setosa$Petal.Length)
boxplot(a$versicolor$Petal.Length)
boxplot(a$virginica$Petal.Length)
#(4)setosa의 Petal.Length에 대한 1사분위, 3사분위, IQR, lower whisker, upper whisker, 최빈값
x=a$setosa$Petal.Length
(q1=quantile(x,probs=0.25))
(q3=quantile(x,3/4))
(IQR=q3-q1)
(LW=q1-1.5*IQR)
(UW=q3+1.5*IQR)
(table(x)[which.max(table(x))])
#(5)setosa의 Petal.Length에 대한 boxplot , , boxplot.status 로 outlier 확인
boxplot(x)
boxplot.stats(x)$out
#(6)setosa의 Petal.Length에 대한 lower whisker, upper whisker로 outlier 확인
(outL=x[which(x<LW)])
(outU=x[which(x>UW)])
#(7)setosa의 Petal.Length에 대한 outlier , plot에서 표시
plot(x)
(outiL=which(x %in% outL))
(outiU=which(x %in% outU))
points(outiL,outL,pch="V",col="red")
points(outiU,outU,pch="V",col="red")
|
5e6ddaab70ea183ecb506b53b33fbce13ddce428
|
a51f14302c8e4a2a0a48dc636d035c4e6669f686
|
/man/showNA.Rd
|
1b6dfe3168a948e2dbaf98b942518f042f59f9e2
|
[] |
no_license
|
holgerman/toolboxH
|
b135c6033c015ac0c4906392f613945f1d2763ad
|
fb8a98ee4629dc5fef14b88f2272d559d5d40f30
|
refs/heads/master
| 2022-07-07T22:26:39.857012
| 2022-06-23T14:56:23
| 2022-06-23T14:56:23
| 100,366,781
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 506
|
rd
|
showNA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/showNA.R
\name{showNA}
\alias{showNA}
\title{FUNCTION_TITLE}
\usage{
showNA(x, showAllNoNA = T, returnAsDataTable = F)
}
\arguments{
\item{x}{PARAM_DESCRIPTION}
\item{showAllNoNA}{PARAM_DESCRIPTION, Default: T}
\item{returnAsDataTable}{PARAM_DESCRIPTION, Default: F}
}
\value{
OUTPUT_DESCRIPTION
}
\description{
FUNCTION_DESCRIPTION
}
\details{
DETAILS
}
\examples{
\dontrun{
if(interactive()){
#EXAMPLE1
}
}
}
\seealso{
}
|
f31f5fdee211239b363fb5c5e24e71c18361ad31
|
ab6da726f9c00cef3fc0fa05a62bab780e5a773a
|
/man/grief_mods.Rd
|
f9e0b8985d5b189206a8ce1ca50541d5312df8a6
|
[] |
no_license
|
cran/RcmdrPlugin.MA
|
ad670acc924cc9677d742844e9cf1c63ebbfa0a4
|
139dd4967917038b6323700a8278f31a1d739491
|
refs/heads/master
| 2021-01-17T17:07:20.976556
| 2014-09-21T00:00:00
| 2014-09-21T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 204
|
rd
|
grief_mods.Rd
|
\name{grief_mods}
\alias{grief_mods}
\title{grief intervention data for meta-analysis (moderator data)}
\description{This file should be merged with the 'grief' dataset by 'id' column.}
\keyword{data}
|
46b98bdea6515ca61d3e0a39dbcf06c99fa122c3
|
25a70bb0cd477731470ecfbbcd306316d05871a1
|
/DEDA_Class_2018WS_Quantlet_Scraper/code_output/SFEvar_pot_backtesting.R
|
75526a6cf157dfb0562a847b957fd1401d153773
|
[] |
no_license
|
QuantLet/DEDA_Class_2018WS
|
1b1f7d1a699f88af8da5727c5a3bc1935e4eda5b
|
8d9831201c9cdc72ab9f456ed56dae81ebcab07c
|
refs/heads/master
| 2020-04-20T20:10:50.696342
| 2019-03-02T15:25:58
| 2019-03-02T15:25:58
| 169,069,574
| 0
| 5
| null | 2019-03-14T15:14:02
| 2019-02-04T11:38:21
|
HTML
|
UTF-8
|
R
| false
| false
| 1,442
|
r
|
SFEvar_pot_backtesting.R
|
# clear variables and close windows
rm(list = ls(all = TRUE))
graphics.off()
# load data
v = t(read.table("VaR0012_pot_Portf.dat"))
x1 = read.table("BAYER_close_0012.dat")
x2 = read.table("BMW_close_0012.dat")
x3 = read.table("SIEMENS_close_0012.dat")
x4 = read.table("VW_close_0012.dat")
# Size of window
h = 250
v = -v
V = x1 + x2 + x3 + x4
d = dim(V)
L = V[-1, ] - V[1:(d[1] - 1), ]
T = length(L)
outlier = matrix(, 1, T - h)
exceedVaR = matrix(, , )
exceedVaR = (L[(1 + h):(d[1] - 1)] < v[1:(T - h)]) # Check for exceedances
for (j in 1:(T - h)) {
if (exceedVaR[j] == TRUE)
{
outlier[j] = L[j + h]
} # Find exceedances
}
K = which(is.finite(outlier))
outlier = outlier[K]
p = round(sum(exceedVaR)/(T - h), 4) # Calculate the exceedance ratio
# Plot the values, VaR estimation and the exceedances
plot(L[(h + 1):(d[1] - 1)], pch = 18, col = "blue", ylim = c(-415, 415), xlab = c(""),
ylab = c(""), axes = FALSE)
box()
axis(1, seq(0, length = 8, by = 500), seq(2000, 2014, by = 2))
axis(2)
title("Peaks Over Threshold Model")
points(K, outlier, pch = 18, col = "magenta")
lines(v, col = "red", lwd = 2)
yplus = K * 0 + min(L[(h + 1):(d[1] - 1)]) - 2
points(K, yplus, pch = 3, col = "dark green")
legend("topright", c("Profit/Loss", "VaR", "Exceedances"), pch = c(18, 15, 18),
col = c("blue", "red", "magenta"))
# Print the exceedances ratio
print(paste("Exceedances ratio:", "", p))
|
7e6414fe623cd1b5e5365aa3ed3717b80119ce89
|
89aac7aaa9d1a8bd95c5ca63cc03147fb545cb24
|
/run_analysis.R
|
6248acf88d2094ded329cda1af72823831792d4b
|
[] |
no_license
|
SheanD/TidyData
|
da0a87f3c6fc764a60a586fe0a79ed7245173cc8
|
35e658d3492f20cd212ddcf5f9ac4b585073ed03
|
refs/heads/master
| 2021-01-20T10:46:06.657769
| 2015-01-26T00:28:44
| 2015-01-26T00:28:44
| 29,837,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,245
|
r
|
run_analysis.R
|
## run_analysis.R :
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
setwd("C:/Users/Shean Dalton/Documents/R/WorkingDirectory")
if (!require("data.table")) {
install.packages("data.table")
}
if (!require("microbenchmark")) {
install.packages("microbenchmark")
}
if (!require("dplyr")) {
install.packages("dplyr")
}
require("data.table")
require("microbenchmark")
require("dplyr")
library("data.table")
library("microbenchmark")
library("dplyr")
## PRESTEPS
## DOWNLOAD FILE DOWNLOAD STEPS
## URL
URLFILE <- 'https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'
## DEFINE Zipped Data File
dataFileZIP <- "./getdata-projectfiles-UCI-HAR-Dataset.zip"
## Directory
dirFile <- "./UCI HAR Dataset"
## TIDY DATA FILES: Directory and filename
tidyDataFile <- "./tidy-UCI-HAR-dataset.txt"
tidyDataFileAVGtxt <- "./tidy-UCI-HAR-dataset-AVG.txt"
## Download (. ZIP)
if (file.exists(dataFileZIP) == FALSE) {
download.file(URLFILE, destfile = dataFileZIP)
}
## Uncompress data file
if (file.exists(dirFile) == FALSE) {
unzip(dataFileZIP)
}
## 1. Merges the training and the test sets to create one data set.
## READ FILES, SET DATATABLES, STANDARDIZE COLUMN NAMES IN PREP TO USE dplyr
activity_labels <- read.table("UCI Har Dataset/activity_labels.txt", stringsAsFactors = FALSE,col.names=c("activity_id","activity_label"))
measurement_labels <- read.table("UCI HAR Dataset/features.txt", row.names = 1, stringsAsFactors = FALSE, col.names=c("id","fnames"))
measurement_labels <- measurement_labels$fnames
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names=c("subject_id"))
measurements_train <- read.table("UCI HAR Dataset/train/X_train.txt")
names(measurements_train) <- measurement_labels
activity_train <- read.table("UCI HAR Dataset/train/y_train.txt",col.names=c("activity_id"))
train <- cbind(subject_train, measurements_train, activity_train)
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt",col.names=c("subject_id"))
measurements_test <- read.table("UCI HAR Dataset/test/X_test.txt")
names(measurements_test) <- measurement_labels
activity_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names=c("activity_id"))
test <- cbind(subject_test, measurements_test, activity_test)
## THE ONE DATASET
mydata <- rbind(train,test)
##2 Extracts only the measurements on the mean and standard deviation for each measurement.
measurments <- grep("mean\\(|std\\(", names(mydata), value = TRUE)
mydata <- mydata[,c("subject_id","activity_id", measurments)]
## 3. Uses descriptive activity names to name the activities in the data set
mydata <- inner_join(mydata, activity_labels, by="activity_id")
mydata <- mydata[, c("subject_id", "activity_label", meas)]
## 4. Appropriately label the data set with descriptive activity names.
cnames <- names(mydata)[-(1:2)]
cnames <- gsub("\\-|\\(|\\)", "", cnames)
cnames <- gsub("^t", "time_", cnames)
cnames <- gsub("^f", "freq_", cnames)
cnames <- gsub("Gyro", "Gyro_", cnames)
cnames <- gsub("Acc", "accel_", cnames)
cnames <- gsub("Mag", "mag_", cnames)
cnames <- gsub("mean", "_mean", cnames)
cnames <- gsub("BodyBody", "body", cnames)
cnames <- gsub("(.+)(std|mean)(X$|Y$|Z$)", "\\1\\3\\2", cnames)
cnames <- gsub("mean", "_mean", cnames)
cnames <- gsub("std", "_std", cnames)
cnames <- gsub("__", "_", cnames)
cnames <- tolower(cnames)
names(mydata)[-(1:2)] <-cnames
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidydata <- mydata %>%
group_by(subject_id, activity_label) %>%
summarise_each(funs(mean)) %>%
#Generate the output text file from the tidy dataset
write.table("./output.txt",row.names=FALSE)
|
d633ed4954cbb5c8575be27aabd51146ac612a38
|
5a049e922aaa0abe891c657db31d48b7fa699457
|
/R/eapr_export.R
|
c826e8057b46987d03b94604bbe54ef9c48e5d4d
|
[] |
no_license
|
GregoryBrownson/EAPR
|
be24cb725a6d11501c4cfab909cb08875ac66a99
|
b6cd2f20451c396e9734821fac0af92064e3d99a
|
refs/heads/master
| 2020-04-28T13:36:28.559368
| 2019-10-28T12:19:33
| 2019-10-28T12:19:33
| 175,310,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 163
|
r
|
eapr_export.R
|
#' Write operations for eapr
#'
#' This function exports an eapr object to a file
#'
#' @param x An eapr object.
#'
#' @export
write.eapr <- function(x) {
}
|
41bede536b4922b597fb6ac85735f6f45a4e3fc1
|
d59430497b1fab82c62f09e7fc01c49cec73644b
|
/R/listQTL.r
|
c468f1f4b321a5c865c31698a6024947a78463ae
|
[] |
no_license
|
liuyufong/AnimalGene2QTL
|
5b4734e177cab8fcd1a1351b5d575dba0af6df0a
|
cfaf0205e21ec4ab3aa27e94aabda485f433e7a1
|
refs/heads/master
| 2021-01-01T10:41:03.593993
| 2017-08-17T13:43:28
| 2017-08-17T13:43:28
| 97,571,834
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 553
|
r
|
listQTL.r
|
#' list of QTL database
#'
#' @return result
#' @export
#' @importFrom RSQLite dbConnect
#' @importFrom RSQLite dbDisconnect
#' @importFrom RSQLite dbGetQuery
#' @importFrom RSQLite SQLite
#' @import knitr
#' @examples
#' listQTL()
listQTL <- function() {
con <- dbConnect(SQLite(), system.file("extdata", "animalqtldb.db", package = "AnimalQTLDB"))
if (isS4(con)) {
result <- dbGetQuery(con, "SELECT * from QTL_Version")
}
dbDisconnect(con)
colnames(result) <- c("QTL", "version")
return(result)
}
|
6b1d43a853969ba4d4f5e7395f1e503dc101783f
|
0b80621e1bbe82153c38fb7446c5fcd5ea65f35b
|
/R/table_annotated_features.R
|
0e1b8b8728dfd3e93d29ffc26ddffbe6abd6a770
|
[
"CC-BY-4.0"
] |
permissive
|
brgordon17/fvfm-prediction
|
ec56920efee4b6b0256103ca12c6012720894585
|
e140b9a5c65c26bc1676b0afa0c00184d0d503fb
|
refs/heads/master
| 2020-06-22T23:20:08.825421
| 2020-06-17T11:01:28
| 2020-06-17T11:01:28
| 198,428,486
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,924
|
r
|
table_annotated_features.R
|
# Code to construct a table of annotated features matching literature (coralmz)
# Author: Benjamin R. Gordon
# Date: 2019-03-17
library(tidyverse)
library(caret)
# Load and prep data -----------------------------------------------------------
mzrf_fvfm <- readRDS("./dev/mzrf_model_fvfm.rds")
coralmz <- coralmz::coralmz
mzdata_raw <- readr::read_csv("./data-raw/mzdata-raw.csv", na = "0")
colnames(mzdata_raw)[1] <- "mz_raw"
# cross reference 20 impvars ---------------------------------------------------
fvfm_impvars <- varImp(mzrf_fvfm, scale = FALSE)
fvfm_impvars <- as_tibble(fvfm_impvars$importance, rownames = "mz")
fvfm_impvars <-
fvfm_impvars %>%
mutate(mz = gsub("mz_", "", mz)) %>%
rowwise() %>%
transmute(
mz = as.numeric(mz),# warnings arise from features with two decimal points
importance = max(Overall)) %>%
arrange(desc(importance)) %>%
slice(1:20)
# Add variables for 50 ppm error ranges
ppm <- 50
mz_matches <-
fvfm_impvars %>%
rowwise() %>%
mutate(adduct = NA,
mz_neutral = mz - 1.007276,
mz_low = mz_neutral - (mz * ppm/10^6),
mz_high = mz_neutral + (mz * ppm/10^6)) %>%
ungroup()
# Cross reference with coralmz
mz_matches <-
mz_matches %>%
mutate(dummy = TRUE) %>%
left_join(coralmz %>% mutate(dummy = TRUE)) %>%
filter(monoiso_mass <= mz_high, monoiso_mass >= mz_low) %>%
select(-dummy,
-mz_neutral,
-mz_low,
-mz_high,
-importance)
# cross reference any adducts --------------------------------------------------
# identify adducts from impvars
adduct_matches <-
fvfm_impvars %>%
mutate(dummy = TRUE) %>%
left_join(mzdata_raw %>% mutate(dummy = TRUE)) %>%
filter(near(mz, mz_raw, tol = .0001)) %>%
select(-dummy,
-5:-182,
-pcgroup,
-mz_raw) %>%
mutate(mz_neutral = as.numeric(str_sub(adduct, str_length(adduct)-6, -1))) %>%
mutate(adduct = str_sub(adduct, 1, str_length(adduct)-8)) %>%
filter(!is.na(mz_neutral))
# add variables for 50ppm error
ppm <- 50
adduct_matches <-
adduct_matches %>%
rowwise() %>%
mutate(mz_low = mz_neutral - (mz_neutral * ppm/10^6),
mz_high = mz_neutral + (mz_neutral * ppm/10^6)) %>%
ungroup()
# Cross reference with coralmz
adduct_matches <-
adduct_matches %>%
mutate(dummy = TRUE) %>%
left_join(coralmz %>% mutate(dummy = TRUE)) %>%
filter(monoiso_mass <= mz_high, monoiso_mass >= mz_low) %>%
select(-dummy,
-mz_neutral,
-mz_low,
-mz_high,
-isotopes,
-importance)
# Construct table --------------------------------------------------------------
matches <- bind_rows(mz_matches, adduct_matches)
# remove generic referencing and replace commas
matches <-
matches %>%
mutate(endnote_ref = stringr::str_replace(endnote_ref, ",", ";")) %>%
select(-ref)
# Save csv
readr::write_csv(matches, "./tables/important_variable_matches.txt")
|
9472c2f4fcce70f066533513b87cf49ab2db004f
|
710289d08c4809f3e17d35788c6de449a6cc211a
|
/man/mc_pr_plot.Rd
|
e704df457097cd885acaee1528055e59748b8366
|
[] |
no_license
|
gweissman/gmish
|
2e2c4b3f35aaca1b05c03ef208cb082b20b66f4c
|
c0dbd66de0039712cd1a2241e75477f70405f8b0
|
refs/heads/master
| 2023-07-31T22:00:11.274740
| 2023-07-11T16:54:12
| 2023-07-11T16:54:12
| 177,597,987
| 5
| 1
| null | 2022-11-10T03:40:46
| 2019-03-25T14:02:48
|
R
|
UTF-8
|
R
| false
| true
| 1,438
|
rd
|
mc_pr_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mc_pr_plot.R
\name{mc_pr_plot}
\alias{mc_pr_plot}
\title{Produce a precision-recall plot for a set of predicted probabilities for a single model across multiple classes.}
\usage{
mc_pr_plot(form, data, max_intervals = 1000)
}
\arguments{
\item{form}{A formula where the left-hand side is the set variable representing the observed outcomes for each class, 0 or 1. The right-hand side represents the column names of the different class probabilities. The names of the columns don't matter to the model, but the order of the observed (on the left) and predicted (on the right) should align.}
\item{data}{A data frame that contains one observed and one predicted column for each class.}
\item{max_intervals}{The maximum number of thresholds to evaluate. Default = 1000.}
}
\description{
Produce a precision-recall plot for a set of predicted probabilities for a single model across multiple classes.
}
\examples{
library(ranger)
library(palmerpenguins)
pp <- penguins[complete.cases(penguins),]
m1 <- ranger(species ~ island + bill_length_mm + flipper_length_mm + body_mass_g + sex,
data = pp, probability = TRUE)
p_obj <- predict(m1, data = pp)
results <- data.frame(p_obj$predictions, ohe(pp$species, drop_ref = FALSE))
mc_pr_plot(pp.species_Adelie + pp.species_Chinstrap + pp.species_Gentoo ~
Adelie + Chinstrap + Gentoo,
data = results)
}
|
527aa866a39ee4e1491ca05529f54a1ef377e1ba
|
7b9eea03ffcca6c104e23f56f839281c1286b16b
|
/flixbus_crawler.R
|
10ed7ed6da9cb2a2901c7f2694997840466020bb
|
[] |
no_license
|
Compscjournalism/midnight_bus_train_comparison
|
98a5c096ad97ba5cc26e2d2fc36f9844a703497c
|
c89d421d693b36920be6f92c97e93a1ee0ed2841
|
refs/heads/master
| 2022-11-03T02:14:39.190415
| 2020-06-19T07:33:49
| 2020-06-19T07:33:49
| 273,432,405
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,489
|
r
|
flixbus_crawler.R
|
library(RCurl)
library(XML)
library(stringr)
#################################################################################################
#<- This script contains the methods used to scrape the Flixbus.de website for both the list of
#<- cities and popular connections as well as the actually scheduled connections for a particular day
#################################################################################################
#store popular connections in a global variable
popular_connections_database <<- data.frame()
#' getting all the cities involved in the network
#'
#' @return dataframe of all cities in Germany connected to Flixbus network
get_all_cities <- function(){
url <- "https://www.flixbus.de/fernbus/deutschland"
page <- getURL(url)
tpage <- htmlParse(page)
#location/ city name
xpathSApply(tpage, "/html/body/section[2]/div[2]/div/div[2]/div/div/div[1]/div[2]/div[1]/ul/li[1]/a", xmlValue)
#link/ the link to the city-specifc page e.g. https://www.flixbus.de/fernbus/aichach
xpathSApply(tpage, "/html/body/section[2]/div[2]/div/div[2]/div/div/div[1]/div[2]/div[1]/ul/li[1]/a", xmlAttrs)
#all location names
test <- cbind(xpathSApply(tpage, "/html/body/section[2]/div[2]/div/div[2]/div/div//a", xmlValue),
xpathSApply(tpage, "/html/body/section[2]/div[2]/div/div[2]/div/div//a", xmlAttrs))
test <- as.data.frame(test)
return(test)
}
#' City-level data
#' Method scrapes the city-specific page of a city and generates data on the city ID, geo-reference and popular connections
#' of that city (city link e.g. https://www.flixbus.de/fernbus/bochum)
#' @param url_bayreuth
#'
#' @return
get_city_level_data <- function(url_bayreuth){
mycurl <- getCurlHandle()
curlSetOpt(cookiejar= "~/Rcookies", curl = mycurl)
rawpage <- getURL(url_bayreuth,
curl= mycurl,
useragent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36",
timeout = 60,
followlocation = TRUE)
tpage_bayreuth <- htmlParse(rawpage)
#number in doubled square brackets is the index of element to assess
# getting the cityID and locations which are stored in a javascript, luckily the javascript with relevant information is always on 9th
# place among all js elements, otherwise we would´ve needed to check for all
tmp <- xpathSApply(tpage_bayreuth, "//script", xmlValue)[[9]]
city_id_row <- str_extract(tmp, "city1Id: \"[0-9]{1,3}")
city_id <- as.numeric(str_extract(city_id_row, "[0-9]{2,3}"))
# geo_locations
city_lat <- gsub(x = str_extract(tmp, "lat: [0-9]{1,2}\\.[0-9]{1,9}"), pattern = "lat: ", replacement = "" )
city_long <-gsub(x = str_extract(tmp, "lon: [0-9]{1,2}\\.[0-9]{1,9}"), pattern = "lon: ", replacement = "")
#the node storing popular connections
popular_connections <- xpathSApply(tpage_bayreuth, "//a[@class='popular-connection__links__item__connection']")
#loop through that node, if there are popular connections found, and store the popular connections as a tupel
if(length(popular_connections) > 0){
travel_tupel_basic <- c("Start", "End")
for(index in seq(1, length(popular_connections))){
travel_tupel <- xpathSApply(popular_connections[[index]], "./div/div", xmlValue)
travel_tupel_basic <- rbind(travel_tupel_basic, travel_tupel)
}
city_favorable_connections <- as.data.frame(travel_tupel_basic)
city_favorable_connections$generated_from <- city_id
#add popular connections per city (the .csv file is once created manually)
popular_connections_database <- dplyr::bind_rows(popular_connections_database,
city_favorable_connections)
}
#writeLines(rawpage, paste0(getwd(), city_id, "citypage.html"))
return(list(city_id, c(city_lat, city_long)))
}
# adding city data to the overall frame
#' Add city-level data
#'
#'Method uses the list of city names create in the function get_all_cities and applies the
#'get_city_level data to merge both city names and data for the cities
#' @param test
#'
#' @return
#' @export
#'
#' @examples
adding_city_data <- function(test){
pb = txtProgressBar(min = 0, max = nrow(test), initial = 0)
favorite_connections <- data.frame()
for(i in seq(1, nrow(test))){
setTxtProgressBar(pb,i)
scraped_info <- get_city_level_data(test$V2[i])
#handling the returned list from get_city_level_data to be fitted to the dataframe format
test$lat[i] <- scraped_info[[2]][1]
test$long[i] <- scraped_info[[2]][2]
test$city_id[i] <- scraped_info[[1]]
}
return(test_entire_complete)
}
#' Scraping bus connection data
#' For a defined connection (from, destination and date) this method generates a dataframe holding all
#' scheduled bus connections from the flixbus website
#'
#' @param from
#' @param destination
#' @param date
#'
#' @examples https://shop.flixbus.de/search?departureCity=309&arrivalCity=88&rideDate=27.04.2020
travel_options_crawl <- function(from, destination, date){
#create the URL structure where to find the scheduled connection data
travel_page_string <- paste("https://shop.flixbus.de/search?departureCity=",
from,
"&arrivalCity=",
destination,
"&rideDate=",
date,
sep = "")
#section to get the HTML doc
mycurl <- getCurlHandle()
curlSetOpt(cookiejar= "~/Rcookies", curl = mycurl)
rawpage <- getURL(travel_page_string,
curl= mycurl,
useragent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36",
timeout = 60,
followlocation = TRUE)
travel_page <- htmlParse(rawpage)
tmp <- xpathSApply(travel_page,
"//div[@id='search-result-direct']//div[@class='b col-xs-12 rides-list currency-eur']//div[@data-transport-type='bus']")
#create the daframe layout for direct and indirect connections (which are stored seperately)
direct_connections_dataframe <- c("departure", "arrival","price_edited", "duration")
indirect_connections_dataframe <- c("departure", "arrival","price_edited", "duration", "shift_duration", "shift_location")
#if the page does not contain any connection information, sth. wen´t wrong (which occured in 0.5% of all scraped connections)
website_bug <- xpathSApply(travel_page, "/html/body/div[1]/section[2]/div/div/div[1]/div[2]/div/p", xmlValue)
if(typeof(website_bug) == "character"){
if(grepl("Etwas ist schief gelaufen", website_bug)){ #"Etwas ist schief gelaufen" here is the error message displayed by flixbus
#usually returned when the website blocked the scraping process (thus see city_data_crawler why crawling is made in batches)
return("Bug")
}
}
#we use these booleans later, for default we set them to FALSE
direct_line_present <- FALSE
indirect_line_present <- FALSE
#if we observe bus connections on the website (length(tmp) > 0) then go through each listed connection stored in tmp and derive the data
if(length(tmp) > 0){
for(index in seq(1, length(tmp))){
travel_data_table <- xpathSApply(tmp[[index]], ".//div[@class='col-xs-12 ride-stations']//tbody")[[1]]
#get departure time
departure_raw <- xpathSApply(travel_data_table, "./tr/td/div[@class='flix-connection__time departure-time']", xmlValue)
departure <- gsub(x = departure_raw, pattern = "\n", "")
departure <- gsub(x = departure, pattern = " ", "")
#get arrival time
arrival_raw <- xpathSApply(travel_data_table, "./tr/td/div[@class='flix-connection__time']", xmlValue)
arrival <- gsub(x = arrival_raw, pattern = "\n", "")
arrival <- gsub(x = arrival, pattern = " ", "")
#get price
price_raw <- xpathSApply(tmp[[index]], ".//span[@class='num currency-small-cents']", xmlValue)[[1]]
price_edited <- as.double(gsub(x = str_extract(price_raw, "[0-9]{1,3},[0-9]{1,3}"), pattern = ",", replacement = "."))
#duration
duration_raw <- xpathSApply(tmp[[1]], ".//div[@class='col-xs-12 duration ride__duration ride__duration-messages']",
xmlValue)[[1]]
duration <- gsub(x = duration_raw, pattern = "\n", "")
duration <- gsub(x = duration, pattern = " ", "")
duration <- (gsub(x = duration, pattern = "Std.", ""))
#check if change of bus line is required
# if this element exists, then there is a change of line
change_of_lines <- ifelse(length(xpathSApply(tmp[[index]], ".//div[contains(@id, 'transf-num-popup-interconnection-')]")) > 0,
TRUE, FALSE)
#depending on the binary value of change_of_lines we then either check for the change of line or not
if(!change_of_lines){
direct_line_present <- TRUE
direct_connections_dataframe <- rbind(direct_connections_dataframe, c(departure, arrival,price_edited, duration))
}
if(change_of_lines){
indirect_line_present <- TRUE
waiting_time_raw <- xpathSApply(tmp[[index]], ".//div[contains(@id, 'transf-num-popup-interconnection-')]/span//div[@class='light-gray']", xmlValue)[[1]]
waiting_time <- as.numeric(str_extract(waiting_time_raw, "[0-9]{1,3}"))
location_change <- xpathSApply(tmp[[index]], ".//div[contains(@id, 'transf-num-popup-interconnection-')]/span/div", xmlValue)[[1]]
indirect_connections_dataframe <- rbind(indirect_connections_dataframe, c(departure, arrival,price_edited, duration,
waiting_time, location_change))
}
}
#bind the results from the loop scraping data together to one dataframe, again differentiating direct and indirect lines
direct_connections <- direct_connections_dataframe
if(direct_line_present){
#edit the direct connections saved
direct_connections <- as.data.frame(direct_connections,
col.names = c("departure", "arrival","price_edited", "duration"),
row.names = NULL)
direct_connections <- direct_connections[-1,]
direct_connections$from <- from
direct_connections$destination <- destination
direct_connections$date <- date
colnames(direct_connections) <- c("Departure", "Arrival", "Price", "Duration", "From", "Destination",
"Date")
}
if(!direct_line_present){
direct_connections <- "no direct lines found"
}
indirect_connections <- indirect_connections_dataframe
if(indirect_line_present){
indirect_connections <- as.data.frame(indirect_connections)
indirect_connections <- indirect_connections[-1,]
indirect_connections$from <- from
indirect_connections$destination <- destination
indirect_connections$date <- date
colnames(indirect_connections) <- c("Departure", "Arrival", "Price", "Duration",
"shift_time", "shift_location",
"From", "Destination",
"Date")
}
if(!indirect_line_present){
indirect_connections <- "no indrect lines found"
}
#we return dataframes for direct and indirect bus connections seperately
return(list(direct_connections, indirect_connections))
}
#################################################################################################
#<- this section handles when there are no connections on the searched day
#<- Flixbus displays a link to alternative connections which the crawler follows and in a similar way
#<- as displayed above scrapes the alternative connections
#################################################################################################
new_link <- xpathSApply(travel_page, '/html/body/div[1]/section[2]/div/div/div[1]/div[2]/div/div[2]/div[1]/div/div[3]/div/div/div/div[2]/ul/li/a', xmlAttrs)[2]
new_link <- paste0("https://shop.flixbus.de/", new_link)
page <- getURL(new_link)
travel_page <- htmlParse(page)
#################################################################################################
#<- basically now the preceding script can be insert to handle the new page in the exact same way
#################################################################################################
tmp <- xpathSApply(travel_page,
"//div[@id='search-result-direct']//div[@class='b col-xs-12 rides-list currency-eur']//div[@data-transport-type='bus']")
direct_connections_dataframe <- c("departure", "arrival","price_edited", "duration")
indirect_connections_dataframe <- c("departure", "arrival","price_edited", "duration", "shift_duration", "shift_location")
direct_line_present <- FALSE
indirect_line_present <- FALSE
if(length(tmp) > 0){
for(index in seq(1, length(tmp))){
travel_data_table <- xpathSApply(tmp[[index]], ".//div[@class='col-xs-12 ride-stations']//tbody")[[1]]
#get departure time
departure_raw <- xpathSApply(travel_data_table, "./tr/td/div[@class='flix-connection__time departure-time']", xmlValue)
departure <- gsub(x = departure_raw, pattern = "\n", "")
departure <- gsub(x = departure, pattern = " ", "")
#get arrival time
arrival_raw <- xpathSApply(travel_data_table, "./tr/td/div[@class='flix-connection__time']", xmlValue)
arrival <- gsub(x = arrival_raw, pattern = "\n", "")
arrival <- gsub(x = arrival, pattern = " ", "")
#get price
price_raw <- xpathSApply(tmp[[index]], ".//span[@class='num currency-small-cents']", xmlValue)[[1]]
price_edited <- as.double(gsub(x = str_extract(price_raw, "[0-9]{1,3},[0-9]{1,3}"), pattern = ",", replacement = "."))
#duration
duration_raw <- xpathSApply(tmp[[1]], ".//div[@class='col-xs-12 duration ride__duration ride__duration-messages']",
xmlValue)[[1]]
duration <- gsub(x = duration_raw, pattern = "\n", "")
duration <- gsub(x = duration, pattern = " ", "")
duration <- (gsub(x = duration, pattern = "Std.", ""))
#for some alternatively proposed connections the departure city and/or arrival city might vary (as it suggest a
#city which is spatially close to the destination or depature city)
departure_city <- xpathSApply(travel_data_table, ".//div[@class='station-name-label']", xmlValue)[1]
arrival_city <- xpathSApply(travel_data_table, ".//div[@class='station-name-label']", xmlValue)[2]
#check if the proposed ones are on a new day
proposed_date <- str_extract(new_link, "[0-9]{2}\\.[0-9]{2}\\.2020")
#check if change of bus line is required
change_of_lines <- ifelse(length(xpathSApply(tmp[[index]], ".//div[contains(@id, 'transf-num-popup-interconnection-')]")) > 0,
TRUE, FALSE)
if(!change_of_lines){
direct_line_present <- TRUE
direct_connections_dataframe <- rbind(direct_connections_dataframe, c(departure, arrival,price_edited, duration,
departure_city, arrival_city, proposed_date))
}
if(change_of_lines){
indirect_line_present <- TRUE
waiting_time_raw <- xpathSApply(tmp[[index]], ".//div[contains(@id, 'transf-num-popup-interconnection-')]/span//div[@class='light-gray']", xmlValue)[[1]]
waiting_time <- as.numeric(str_extract(waiting_time_raw, "[0-9]{1,3}"))
location_change <- xpathSApply(tmp[[index]], ".//div[contains(@id, 'transf-num-popup-interconnection-')]/span/div", xmlValue)[[1]]
indirect_connections_dataframe <- rbind(indirect_connections_dataframe, c(departure, arrival,price_edited, duration,
departure_city, arrival_city,
proposed_date,
waiting_time, location_change))
}
}
direct_connections <- direct_connections_dataframe
if(direct_line_present){
#edit the direct connections saved
direct_connections <- as.data.frame(direct_connections,
col.names = c("departure", "arrival","price_edited", "duration",
"departure_city", "arrival_city", "proposed_date"),
row.names = NULL)
direct_connections <- direct_connections[-1,]
direct_connections$from <- from
direct_connections$destination <- destination
direct_connections$date <- date
colnames(direct_connections) <- c("Departure", "Arrival", "Price", "Duration",
"departure_city", "arrival_city",
"proposed_date",
"From", "Destination",
"Date")
}
if(!direct_line_present){
direct_connections <- "no direct lines found"
}
indirect_connections <- indirect_connections_dataframe
if(indirect_line_present){
indirect_connections <- as.data.frame(indirect_connections)
indirect_connections <- indirect_connections[-1,]
indirect_connections$from <- from
indirect_connections$destination <- destination
indirect_connections$date <- date
colnames(indirect_connections) <- c("Departure", "Arrival", "Price", "Duration",
"departure_city", "arrival_city",
"proposed_date",
"shift_time", "shift_location",
"From", "Destination",
"Date")
}
if(!indirect_line_present){
indirect_connections <- "no indrect lines found"
}
return(list(direct_connections, indirect_connections))
}
#################################################################################################
#<- if no alternative connections are found, this is where we end up. Approximately, the data collection
#<- process showed, around 2% of connections requested are "a mess" and thus no scheduled bus lines found
#################################################################################################
print("It´s a mess")
}
|
500a8526ce0e35fa96c6eafd298af7c2ad8e3572
|
ccec896e6871a4cb25131df6a8375d5a00bf1aee
|
/inst/examples/01_presentation/app.R
|
922be22341538039deec4928c94b5596923248c1
|
[] |
no_license
|
juba/shinyglide
|
de536410706b7b3644858888992d5c9fd8185220
|
a88e054c8b3de2f6d8a651129f470b286271827f
|
refs/heads/master
| 2023-04-07T19:49:55.204992
| 2023-03-15T09:03:33
| 2023-03-15T09:03:33
| 187,533,479
| 93
| 8
| null | 2022-07-20T09:07:11
| 2019-05-19T22:14:20
|
R
|
UTF-8
|
R
| false
| false
| 6,313
|
r
|
app.R
|
## shinyglide presentation app
## Live version at : https://data.nozav.org/app/shinyglide/01_presentation/
library(shiny)
library(shinyglide)
controls <- glideControls(
prevButton(),
list(
nextButton(),
lastButton(
class="btn btn-success",
href="https://juba.github.io/shinyglide",
"Go to project website"
)
)
)
css <- "
body {
background-color: #E0E0E0;
}
.container-fluid {
max-width: 700px;
padding: 20px;
}
.glide-wrapper {
border: 1px solid #888;
box-shadow: 0px 0px 20px #888;
background-color: #FFF;
padding: 1em 2em 2em 2em;
}
.glide__slide img {
max-width: 100%;
}
.bg-info {
margin-top: 2em;
padding: 5px;
text-align: center;
}
span.hl.str { color: #d14;}
span.hl.kwa { color: #099;}
span.hl.num { color: #099;}
span.hl.kwd { color: #333; font-weight: bold;}
span.hl.com { color: #888; font-style: italic;}
"
ui <- fluidPage(
tags$head(
tags$style(css)
),
fluidRow(
div(class="glide-wrapper",
glide(
custom_controls = controls,
screen(
h3("shinyglide presentation app"),
p(HTML("This is an presentation app of <code>shinyglide</code>, an R package to make carousel-like or assistant-like or younameit-like <a href='https://shiny.rstudio.com/'>shiny apps</a>.")),
p("Did you already install a program on Windows ? Yup, this is the same thing."),
p(HTML("To continue, click <em>Next</em>, use keyboard arrows, or swipe if you're on mobile."))
),
screen(
h3("Screens"),
p(HTML("A <code>glide</code> component can be integrated into any shiny application. It is divided in screens. Each screen can contain anything you want, such as shiny inputs :")),
numericInput("n", "n value :", value = 100)
),
screen(
h3("Screens"),
p(HTML("Or outputs :")),
plotOutput("plot")
),
screen(
next_condition = "input.val > 0",
h3("Conditional controls"),
p(HTML("Sometimes you don't want your user to be able to go to the next screen if a certain condition is not met. You can provide such a condition for the <em>Back</em> or the <em>Next</em> control.")),
p(HTML("Here, the <em>Next</em> control gets a <em>disabled</em> status while the following input is 0 (you can choose to hide the control instead of disabling it).")),
numericInput("val", "Value", value = 0, min = 0)
),
screen(
h3("Screen output"),
p(HTML("Sometimes you want to generate a screen and to show it depending of user inputs. This is possible in `shinyglide` thanks to the `screenOutput` function."),
p(HTML("For example, check the following checkbox to get a next screen with a GIF :")),
div(class="bg-info",
checkboxInput("gif", "A GIF please !", value = FALSE)
)
),
screenOutput("gif_screen"),
screen(
h3("Screen output"),
p(HTML("When screen output computations takes long, the <em>Next</em> control is disabled and can show a loading label and a spinner. It comes back to its normal state when all the following screen outputs are updated.")),
p(HTML("For example, when you check the following checkbox, an artificial two seconds loading time is taken to display the next screen (yes, with another GIF).")),
div(class="bg-info",
checkboxInput("gif_loading", "Show me the spinner !", value = FALSE)
)
),
screenOutput("gif_loading_screen"),
screen(
h3("In app controls"),
p(HTML("You can add links or buttons inside your app that will act as a control to navigate between screens.")),
p(HTML("For example, the following link will go back one screen :")),
p(`data-glide-el`="controls",
a(`data-glide-dir`="<", href="#", "Go back")
),
p(HTML("And this link will go back all the way to the first screen :")),
p(`data-glide-el`="controls",
a(`data-glide-dir`="<<", href="#", "Let's start again")
)
),
screen(
next_label = paste("Next one please !", icon("arrow-right", lib="glyphicon")),
previous_label = "Let\'s go back",
h3("Custom controls"),
p(HTML("Default controls are created when you setup a <code>glide</code>. You can customize them by changing their label, either at glide or screen level.")),
p(HTML("Here, this screen has different labels than the other screens."))
),
screen(
h3("Custom controls"),
p(HTML("You can also provide a completely custom code for your controls, either manually or with some helpers functions.")),
p(HTML("The following app gives you an example of custom controls :")),
tags$a(href="https://data.nozav.org/app/shinyglide/04_custom_controls/", "Sample custom controls app")
),
screen(
h3("That's about it !"),
p(HTML("Thanks for your attention. If you want to learn more and see other example apps, you can go to the "), a(href="https://juba.github.io/shinyglide", "project website.")),
p(HTML("You can also take a look at "), a(href="https://github.com/juba/shinyglide/blob/master/inst/examples/01_presentation/app.R", "this application source code."))
)
)
)
)
)
)
server <- function(input, output, session) {
output$plot <- renderPlot({
hist(rnorm(input$n), main = paste("n =", input$n), xlab = "x")
})
output$gif_screen <- renderUI({
if(!(input$gif)) return(NULL)
list(
h3("Here is your GIF"),
div(class = "text-center",
tags$img(src="https://media.giphy.com/media/5wWf7GW1AzV6pF3MaVW/giphy.gif")
)
)
})
outputOptions(output, "gif_screen", suspendWhenHidden = FALSE)
output$gif_loading_screen <- renderUI({
Sys.sleep(2)
if(!(input$gif_loading)) return(NULL)
list(
h3("Here is another GIF"),
div(class = "text-center",
tags$img(src="https://media.giphy.com/media/rq6c5xD7leHW8/giphy.gif")
)
)
})
outputOptions(output, "gif_loading_screen", suspendWhenHidden = FALSE)
}
shinyApp(ui, server)
|
51be30907c8e7d482a292d2b042e49d00489f821
|
05ba1ac8a4ad8695d7d9cf72cbf1e068ae46dd7a
|
/scripts/R/plotLoess.R
|
0f1a0a5ea2b025e0e1beae944d94aa47a1735f4d
|
[
"Apache-2.0",
"Artistic-2.0"
] |
permissive
|
sergpolly/cworld-dekker
|
2985788a5e14df9d8be26e2b58ecf4944cd77a95
|
7557bbe873e623e9059482722922faca4e784ad0
|
refs/heads/master
| 2020-04-21T13:39:34.622698
| 2019-06-07T16:37:37
| 2019-06-07T16:37:37
| 169,606,468
| 0
| 0
|
Apache-2.0
| 2019-02-07T16:50:59
| 2019-02-07T16:50:58
| null |
UTF-8
|
R
| false
| false
| 3,070
|
r
|
plotLoess.R
|
options(bitmapType='cairo')
args <- commandArgs(TRUE)
dir<-args[1]
inputFile<-args[2]
inputFileArray<-unlist(strsplit(inputFile, "\\/"))
inputFileName<-inputFileArray[length(inputFileArray)]
wd<-paste(dir,sep='')
setwd(wd)
data<-read.table(inputFile,header=T,sep="\t")
data<-subset(data,data$loessExpectedValue!="NA")
data<-subset(data,data$loessExpectedStdev!="NA")
allData.y<-c(data$observedData,data$loessExpectedValue,data$loessExpectedValue+data$loessExpectedStdev,data$loessExpectedValue-data$loessExpectedStdev)
allData.y<-sort(allData.y)
allData.y.size<-length(allData.y)
inputFileName<-inputFile
inputFileName<-gsub(".gz", "", inputFileName)
pngfile<-paste(inputFileName,".png",sep='')
png(pngfile,height=800,width=800)
par(mfrow=c(2,1))
# first plot - all data
allData.y.topIndex<-floor(allData.y.size*0.995)-1
allData.y.bottomIndex<-ceiling(allData.y.size*0.005)+1
allData.y.lim.top<-allData.y[allData.y.topIndex]
allData.y.lim.bottom<-allData.y[allData.y.bottomIndex]
alpha<-(100/nrow(data))
if(alpha < 0.01) {
alpha<-0.1
}
plot(data$realInteractionDistance,data$observedSignal,ylim=c(allData.y.lim.bottom,allData.y.lim.top),main=paste(inputFileName,"1% top/bottom trim","C Scatter Plot - All Distances",sep="\n"),xlab="Genomic Distance (bp)",ylab="C counts",type="n")
points(data$realInteractionDistance,data$observedSignal,col=rgb(0,0,0,alpha))
lines(data$realInteractionDistance,data$loessExpectedValue,col="red",lwd=3)
lines(data$realInteractionDistance,data$loessExpectedValue+data$loessExpectedStdev,col="red",lwd=1,lty=2) # plot data$loessExpectedValue + data$loessExpectedStdev
lines(data$realInteractionDistance,data$loessExpectedValue-data$loessExpectedStdev,col="red",lwd=1,lty=2) # plot data$loessExpectedValue - data$loessExpectedStdev
legend("topright", legend = c("loess weighted average", "loess weighted stdev"),lty=1:2,lwd=3:1,xjust=1,col=c("red","red"),yjust=1)
# second plot - zoom in
allData.y.topIndex<-floor(allData.y.size*0.975)-1
allData.y.bottomIndex<-ceiling(allData.y.size*0.025)+1
allData.y.lim.top<-allData.y[allData.y.topIndex]
allData.y.lim.bottom<-allData.y[allData.y.bottomIndex]
plot(data$realInteractionDistance,data$observedSignal,ylim=c(allData.y.lim.bottom,allData.y.lim.top),main=paste(inputFileName,"5% top/bottom trim","C Scatter Plot - All Distances",sep="\n"),xlab="Genomic Distance (bp)",ylab="C counts",type="n")
points(data$realInteractionDistance,data$observedSignal,col=rgb(0,0,0,alpha))
lines(data$realInteractionDistance,data$loessExpectedValue,col="red",lwd=3)
lines(data$realInteractionDistance,data$loessExpectedValue+data$loessExpectedStdev,col="red",lwd=1,lty=2) # plot data$loessExpectedValue + data$loessExpectedStdev
lines(data$realInteractionDistance,data$loessExpectedValue-data$loessExpectedStdev,col="red",lwd=1,lty=2) # plot data$loessExpectedValue - data$loessExpectedStdev
legend("topright", legend = c("loess weighted average", "loess weighted stdev"),lty=1:2,lwd=3:1,xjust=1,col=c("red","red"),yjust=1)
dev.off()
|
3ae508608396392c0474962234fc5a33f515ffe4
|
e76df58842a0adc91594d4e5983b47eace894da5
|
/ui.R
|
2a05766da9d2608088437f2ef47f21761f8b7018
|
[] |
no_license
|
violet468118034/LA-311-call-track
|
81cfc65d2ecfe6f22df8d526740757c745f84f94
|
8deaff10708c6936b453853b6f0fd52bcb290e3f
|
refs/heads/master
| 2021-01-23T03:48:13.750702
| 2017-03-25T05:26:02
| 2017-03-25T05:26:02
| 86,125,093
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,157
|
r
|
ui.R
|
library(shiny)
library(plotly)
library(ggplot2)
library(dplyr)
library(lubridate)
library(ggmap)
library(viridis)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Visualiztion of 311 Call Analysis"),
# Tabset
tabsetPanel(
tabPanel(title = "Data Overview",
navbarPage(title = "Year",
tabPanel( title = "2016 Overview",
dateRangeInput("dtr", "Date Range:", start = "2016-01-01", end = "2016-11-27"),
plotOutput("space_overview"),
br(),
plotlyOutput("time_overview"),
helpText("As we can see in the plot")),
tabPanel(title = "2011-2015 Overview",
dateRangeInput("dtr2", "Date Range:", start = "2011-01-01", end = "2015-05-31"),
plotOutput("space_overview2"),
br(),
plotlyOutput("time_overview2"),
helpText("As we can see in the plot"))
)
),
tabPanel(title = "Call Resolution",
sidebarLayout(
# Define the sidebar with one input
sidebarPanel(
selectInput("call_type", "Request Type:",
choices=c("Processed Info", "Transferred Info", "Referred Info", "Invalid Info", "Given Info")),
hr(),
helpText("Invalid Info concludes Caller Hanger up, Got Voicemail, Info NotAvailable, Line Busy and N/A")
),
# Create a spot for the barplot
mainPanel(
plotOutput("map"),
plotlyOutput("heatmap")
)
)
),
tabPanel(title = "Process Efficiency",
plotlyOutput("eff"),
helpText("As we can see in the plot"))
)
)
|
89e2abf5ff33266761cbd339eb706908699e7386
|
a22849c0548d88c3435025a5d3e34fb53195ed12
|
/covidProject/coronaModel/man/gibbs_SIR.Rd
|
bc848c0b928ae5f22cccf8c5adaf180bed56355f
|
[] |
no_license
|
YvetteLi/COVID19-Progression-Prediction
|
6237f7b853dabb45a286d169153b6a0eb622be2f
|
657579bff48c30c7d13c75584e73638f3f813acd
|
refs/heads/main
| 2023-07-04T22:46:25.027360
| 2021-08-19T00:34:10
| 2021-08-19T00:34:10
| 397,768,188
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 595
|
rd
|
gibbs_SIR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SIR_utils.R
\name{gibbs_SIR}
\alias{gibbs_SIR}
\title{Using Gibbs sampler to update parameters from the full conditional distibutions for SIR model}
\usage{
gibbs_SIR(epimodel)
}
\arguments{
\item{epimodel}{the class of epidemic mpdel}
}
\value{
updated epimodel
}
\description{
Using Gibbs sampler to update parameters from the full conditional distibutions for SIR model
}
\references{
\href{https://github.com/fintzij/BDAepimodel/blob/f73daebff0d46bbd7e68af1429f37b4665fae92b/R/gibbs_template.R}{Gibbs template}
}
|
c99333ae94ac721ad56b172dbcafa08711a2bc50
|
734405d4e0d6f941c50e5b4862943ffc1cab0aa6
|
/script/0701plot기본.R
|
603e8a682ee511778000adcc0f4e8d1c3cee13a6
|
[] |
no_license
|
valborgs/RStudy
|
e98a079a5d0ea7a48f6bf19555630d117df44983
|
ac1808fcc1c3362876e8f2d8823d304496f29231
|
refs/heads/master
| 2020-07-04T14:15:30.309572
| 2019-08-14T08:32:00
| 2019-08-14T08:32:00
| 202,308,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,704
|
r
|
0701plot기본.R
|
# plot() 함수
# 기본적인 그래프 함수
# plot(y축 데이터, 옵션)
# plot(x축 데이터, y축 데이터, 옵션)
#
# par()함수
# 그래프를 그릴 화면에 대한 환경설정
# par(옵션)
# plot창: r스튜디오 내부 창, 장치 단독 창
plot.new() # 스튜디오 plot 창 생성
dev.new() # 장치 단독 창 생성
# 장치 단독창이 실행중일때는 우선권을 가짐
# 옵션없이 x,y축만 있는 기본 그래프
# plot() : 산점도 그래프
plot(c(1:15))
plot(1:5,c(5,3,2,6,10))
plot(1:5,c(5,3,2,6,10))
# plot함수의 주요 옵션
# xlab, ylab: x,y 축 이름
# xlim, ylim: x,y축값 범위
# main: 그래프 제목
# pch: 점의 종류
# cex: 점의 크기
# col: 색상
# type: 그래프 유형
x<-c(1,2,3)
y<-c(4,5,6)
# 기본옵션
# plot(x,y, xlab=, ylab=, main=)
plot(x,y,xlab="x값",ylab="y.랩",main="테스트용 그래프")
# 점의 크기/색상
plot(x,y,cex=3, col='red')
# 축의 생략: axes=FALSE
# label 생략: ann=FALSE
v1<-c(100,130,120,160,150)
plot(v1,type='o',col='red', ylim=c(0,200),axes=FALSE,ann=FALSE)
# 축 추가 : axis()함수
# 축 종류 값: 1->x축 2->y축
axis(1,at=1:5,
lab=c("mon","tue","wed","thu","fri"))
axis(2,ylim=c(0,200))
# 그래프 제목: main 옵션
# 축 제목: xlab, ylab
# title() 함수 이용하여 제목을 붙일 수 있다
# title 옵션: col.main, font.main
# 크기 옵션은 불안정함.
# 축제목
title(xlab="Day",col.lab="black")
title(ylab="PRICE",col.lab="blue")
# 그래프 제목
title(main="FRUIT", col.lab='red')
# 그래프 화면 분할(환경설정)**
# par()
# mfrow 옵션: 행, 열 값으로 분할
par(mfrow=c(1,3)) # 1행 3열로 분할
v1
plot(v1, type='o') # 점과 선을 중첩
plot(v1, type='s') # 왼쪽 값을 기초로 계단 모양 연결
plot(v1, type='l') # 꺾은 선 그래프
par(mfrow=c(2,3)) # 2행 3열로 분할
plot(v1, type='o') # 점과 선을 중첩
plot(v1, type='s') # 왼쪽 값을 기초로 계단 모양 연결
plot(v1, type='l') # 꺾은 선 그래프
pie(v1)
plot(v1, type='o')
barplot(v1)
par(mfrow=c(1,1)) # 다시 그래프 하나만 출력하도록
# 축제목, 지표값, 지표선의 위치 설정
v1
# mgp=c(축제목 위치, 지표값 위치, 지표선 위치)
par(mgp=c(1,1,1))
plot(v1,xlab='aaa')
par(mgp=c(3,2,1))
plot(v1,xlab='aaa')
par(mgp=c(3,2,0))
plot(v1,xlab='aaa')
par(mgp=c(1,2,3))
plot(v1,xlab='aaa')
# 그래프 여백 조정
# oma 옵션으로 여백 조정
# par(oma=c(아래, 왼쪽, 위, 오른쪽))
par(oma=c(5,5,0,0))
plot(v1,xlab='aaa')
par(oma=c(0,0,5,5))
plot(v1,xlab='aaa')
# par() 함수는 그래프 환경 설정
# 그래프 파라미터 설정
# 화면분할,여백, 그래프의 옵션 위치
# 글자크기, 색상 등을 설정할 수도 있음
par(mfrow=c(1,1)) # 분할되어있는 plot창 영역 1개로 설정
# 그래프 겹쳐 그리기
par(new=T) # 그래프 중복 허용
# 그래프 그릴 때 매번 설정해주어야함
v1<-c(1:5)
v2<-c(5:1)
v3<-c(3:7)
# 축값이나 축제목이 겹쳐서 나타남
plot(v1,type="s",col="red",ylim=c(1,5))
par(new=T)
plot(v2,type="o",col="green",ylim=c(1,5))
par(new=T)
plot(v3,type="l",col="blue",ylim=c(3,7))
# qplot함수와 lines()함수를 사용해서 작업
plot(v1,type="s",col="red",ylim=c(1,10))
lines(v2,type="o",col="green",ylim=c(1,5))
lines(v3,type="l",col="blue",ylim=c(1,15))
# lines함수
# plot함수로 출력된 그래프 위에 꺾은선 그래프를 출력하는 함수
# point함수 이미 생성된 plot 이에 점을 추가하는 함수
x<-c(0:10)
y<-c(0:10)
plot(x,y)
points(5,5,cex=5, pch='+', col="red")
# 대각선 그리기
# abline(a=,b=,col=,lty=선의종류,col=색상값)
abline(a=0,b=1, col="black",lty=6)
# 수평선 추가 h= y축값
# 수직선 추가 v= x축값
abline(h=3,col="green",lty=3)
abline(v=5,col="red",lty=2)
# 범례 추가
# legend(x위치, y위치, 내용, ces, css, lty) 위치, 내용은 필수요소
plot(v1,type="s",col="red",ylim=c(1,10))
lines(v2,type="o",col="green",ylim=c(1,5))
lines(v3,type="l",col="blue",ylim=c(1,15))
legend(4,10,c("v1","v2","v3"), cex=0.5,
col=c("red","green","blue"),
lty=1)
legend(4,10,c("v1","v2","v3"), cex=0.2,
col=c("red","green","blue"),
lty=1)
# 보통은 cex를 1 이하로
#
x<-c(0:30)
y<-c(0:30)
plot(x,y)
arrows(10,20,3,24,col='red') # 화살표(10,20)에서(3,24)의 좌표로
rect(3,10,10,30,density=5) # 사각형(3,10)에서(10,30)
text(3,27,"키포인트",srt=25) # (3,27) 위치에 "키포인트" 글자 25기울기로
mtext("우측에 출력", side=4,adj=0.1)
# side = 1(아래), 2(왼쪽), 3(위), 4(오른쪽)
# 막대그래프
# barplot()
x<-c(1:5)
barplot(x) # 기본 막대 그래프
barplot(x,names=c(1:5)) # x축 값 생성
barplot(x,horiz=T) # 가로 막대 그래프 # ylim이 아닌 xlim 설정해야함
x<-matrix(c(5,4,3,2),2,2)
# 누적 막대 그래프
barplot(x,names=c(5,3),
col=c("green","blue"),
ylim=c(0,12))
# 묶음 막대 그래프
barplot(x,names=c(5,3),
beside=T, # 묶음 막대로 설정
col=c("green","blue"),
ylim=c(0,12))
# 가로 누적 막대 그래프
barplot(x,names=c(5,3),
col=c("green","blue"),
horiz=T,
xlim=c(0,12))
# 가로 묶음 막대 그래프
barplot(x,names=c(5,3),
horiz=T,
beside=T,
col=c("green","blue"),
xlim=c(0,12))
# 가로 막대 그래프는 x축을 기본 축으로 봄
v1<-c(100,120,140,160,180)
v2<-c(120,130,150,140,170)
v3<-c(140,170,120,110,160)
qty<-data.frame(BANANA=v1,
CHERRY=v2,
ORANGE=v3)
barplot(as.matrix(qty),main="Fruit's sales,QTY",
beside=T,
col=rainbow(nrow(qty)),
ylim=c(0,300))
legend(14,300,
c("MON","TUE","WED","THU","FRI"),
fill=rainbow(nrow(qty)))
# 16진수 색상값을 사용시에는 fill옵션 사용
# rainbow
# 색상값 생성함수(16진수 RGB값으로 생성)
# 연속된 색상의 벡터값을 생성
a<-rainbow(7)
b<-rainbow(7)
c<-rainbow(7)
a;b;c
d<-rainbow(4)
e<-rainbow(100)
f<-rainbow(20)
d;e;f
# 누적 막대 그래프(2)
barplot(as.matrix(qty),main="Fruit's sales,QTY",
# beside=T,
col=rainbow(nrow(qty)),
ylim=c(0,800))
View(qty)
t(qty) # 행과 열을 바꿔줌 -> 전치행렬 함수
barplot(t(qty),
main="Fruit's sales,QTY",
# beside=T,
col=rainbow(nrow(qty)),
ylim=c(0,800),
names.arg=c('mon',"tue","wed","thu","fri"))
legend(4,800,
c("BANANA","CHERRY","ORANGE"),
fill=rainbow(nrow(qty)))
# 색상값을 조건으로 생성 후 그래프 출력
peach<-c(180,200,250,198,176)
peach
colors=c()
for(i in 1:length(peach)){
if(peach[i]>=200){
colors<-c(colors,"red")
}else if(peach[i]>=180){
colors<-c(colors,"yellow")
}else{
colors<-c(colors,"green")
}
}
colors
barplot(peach,
main="Peach Sales QTY",
names.arg=c('월','화','수','목','금'),
col=colors)
# 원형 그래프
# 기본 pie 차트(내장함수)
p1<-c(10,20,30,40)
# pi(data,radius(원크기)=반지름값)
pie(p1, radius=1)
pie(p1, radius=0.5)
pie(p1, radius=1.5)
# pie함수 옵션
# radius: 원크기
# init.angle: 시작각도
# col: 색상값
# label: 제목설정
pie(p1,
radius=1,
init.angle=90,
col=rainbow(length(p1)),
label=c("1주","2주","3주","4주"))
# 수치값 함께 출력
# 수치 계산값을 label에 출력
pct<-p1/sum(p1)*100
pct
lab<-paste(pct,"%")
lab
pie(p1,
radius=1,
init.angle=90,
col=rainbow(length(p1)),
label=paste(c("1주","2주","3주","4주"),"\n", lab))
legend('topright',
c("1주","2주","3주","4주"),
cex=0.9,
fill=rainbow(length(p1)))
legend('topleft',
c("1주","2주","3주","4주"),
cex=0.9,
fill=rainbow(length(p1)))
legend('bottomright',
c("1주","2주","3주","4주"),
cex=0.9,
fill=rainbow(length(p1)))
legend('bottomleft',
c("1주","2주","3주","4주"),
cex=0.9,
fill=rainbow(length(p1)))
# 3D 그래프
# plotrix 패키지 pie3D()함수 이용
install.packages('plotrix')
library(plotfix)
pie3D(p1,
radius=1,
col=rainbow(length(p1)))
# boxplot 데이터의 분포를 확인하기 위한 용도
v1;v2;v3
boxplot(v1,v2,v3,
col=c("blue","yellow","pink"),
names=c("v1","v2","v3"),
horizontal=T)
# 상자 허리를 가늘게 처리
boxplot(v1,v2,v3,
col=c("blue","yellow","pink"),
names=c("v1","v2","v3"),
horizontal=T,
notch=T) # 중간값에 포인트를 줄때
# 소수점 자리수 정리
round(p1/sum(p1)*100,1) #소수점첫째짜리까지
|
3f44643b1bb5e8e64d56193a9ac1b69daf16331b
|
a31a46ad3878865aefb3bf9b1b4e7b4368a59634
|
/AM+/Plots_for_Paper/threshold_plots.R
|
f9a858646df3430784fcb6652a683ff32fdffad5
|
[] |
no_license
|
geo047/AM-Paper
|
8f2558a2d629e087270ed8fc7f1697d9d53c88f4
|
57309a8542e7aea281098f8ae7207553906d4642
|
refs/heads/master
| 2021-08-08T10:29:20.698858
| 2021-01-22T05:22:04
| 2021-01-22T05:22:04
| 50,554,488
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,505
|
r
|
threshold_plots.R
|
## plotting threshold and FDR for multi-locus mehtods and single-locus methods (fast, fastALL, gemma)
## Last revised: 21/12/2016
##
## input data resx_y.RData ftp'ed from home directory on bragg in
## /home/geo047/MWAM/SimStudy/Timing/Results/res$FAM_$indx.RData
## where $FAM is W S L A HS HL and $indx is the index 1, 2, 3, etc over the thresholds used in
## results.R on bragg
## contains columns
## c("n_am", "nQTL_am",
## "n_amGPU", "nQTL_amGPU",
## "n_mlmm", "nQTL_mlmm",
## "n_glmnet", "nQTL_glmnet",
## "n_bigRR", "nQTL_bigRR",
## "n_ranger", "nQTL_ranger",
## "n_lasso", "nQTL_lasso",
## n_gemma, nQTL_gemma,
## n_fast, nQTL_fast,
## n_fastALL, nQTL_fastALL,
## "nQTL_true")
## Note
## moved from smoothed power curves to lines based on mean. Smoothed curves weird.
require(ggplot2)
library(GGally)
library(ggthemes)
library(gridExtra)
library(RColorBrewer)
library(extrafont)
DIR <- paste(getwd(),"/", sep="")
sizefn <- 16
thresh_indx <- 1:500
## vector initialisation
fam <- c("W", "S", "L","HS","A","HL")
names.of.methods <- c("am", "mlmm","glmnet","lasso","r2VIM","bigRR", "gemma", "fastALL", "fast")
## list initialisation
FDR <- list()
dfres <- list()
recall <- list()
## threshold information is not actually contained in the files obtained from
## bragg. It sits in the results.R file on /home/geo047/MWAM/SimStudy/Timing/RScripts/
pthresh <- seq(0.01,0.99, length.out=500)
pthresh_r2VIM <- seq(0.05, 100, length.out=500)
#alpha <- 10**-seq(-log10(1e-35), -log10(1e-2), length.out=500)
alpha <- seq(-log10(1e-35), -log10(1e-2), length.out=500)
##----------------------------------------
## Forming list with FDR and recall(power)
## results over families and threshold indexes
##------------------------------------------
cat(" Forming FDR and power(recall) results ... \n")
## looping over families
for(ff in fam){
FDR[[ff]] <- list()
recall[[ff]] <- list()
## looing over threshold indexes
for(indx in thresh_indx){
FDR[[ff]][[indx]] <- list()
recall[[ff]][[indx]] <- list()
## Load Data RData Objects
filename <- paste(DIR,"res",ff,"_",indx, ".RData", sep="")
load(filename) # # loads res_mat
for(ii in names.of.methods){
n_method <- paste("n_",ii, sep="")
nQTL_method <- paste("nQTL_",ii, sep="")
## ---- Set Power (recall) and FDR
tmp <- 1 - (mat[, eval(nQTL_method)]/
mat[, eval(n_method)])
tmp[is.nan(tmp)] <- 0
FDR[[ff]][[indx]][[ii]] <- mean(tmp, na.rm=TRUE)
# capturing case where there may not be any results
if(is.nan(FDR[[ff]][[indx]][[ii]]))
FDR[[ff]][[indx]][[ii]] <- NA
recall[[ff]][[indx]][[ii]] <- mean(mat[,eval(nQTL_method)] / mat[, "nQTL_true"],
na.rm=TRUE)
if(is.nan(recall[[ff]][[indx]][[ii]]))
recall[[ff]][[indx]][[ii]] <- NA
} ## end for ii
} ## end for indx
} ## end for family
## form results structure
## method, fam, rep, FDR
dfres <- data.frame()
## looping over families
for(ff in names(FDR))
{
dftmp <- data.frame()
cat(" Reading in family", ff, "\n")
## looping over threshold indexes
for(indx in thresh_indx)
{
## looping over methods
for(mm in names(FDR[[1]][[1]])){
#for(mm in "gemma"){
if(mm=="glmnet"){
df <- data.frame(method=mm, fam=ff, rep=1:length(FDR[[1]][[1]][[1]]),
FDR=FDR[[ff]][[indx]][[mm]] ,
threshold = pthresh[indx])
}
if(mm=="LMM-Lasso"){
df <- data.frame(method=mm, fam=ff, rep=1:length(FDR[[1]][[1]][[1]]),
FDR=FDR[[ff]][[indx]][[mm]] ,
threshold = pthresh[indx])
}
if(mm=="r2VIM"){
df <- data.frame(method=mm, fam=ff, rep=1:length(FDR[[1]][[1]][[1]]),
FDR=FDR[[ff]][[indx]][[mm]] ,
threshold = pthresh_r2VIM[indx])
}
if(mm=="bigRR"){
df <- data.frame(method=mm, fam=ff, rep=1:length(FDR[[1]][[1]][[1]]),
FDR=FDR[[ff]][[indx]][[mm]] ,
threshold = pthresh[indx])
}
if(mm=="gemma"){
df <- data.frame(method=mm, fam=ff, rep=1:length(FDR[[1]][[1]][[1]]),
FDR=FDR[[ff]][[indx]][[mm]] ,
threshold = alpha[indx])
}
if(mm=="fast"){
df <- data.frame(method=mm, fam=ff, rep=1:length(FDR[[1]][[1]][[1]]),
FDR=FDR[[ff]][[indx]][[mm]] ,
threshold = alpha[indx])
}
if(mm=="fastALL"){
df <- data.frame(method=mm, fam=ff, rep=1:length(FDR[[1]][[1]][[1]]),
FDR=FDR[[ff]][[indx]][[mm]] ,
threshold = alpha[indx])
}
dftmp <- rbind.data.frame(dftmp , df)
} ## end for thresh_indx
} ## end for mm
dfres <- rbind.data.frame(dfres, dftmp)
} ## end for ff
## checking if all values are 0 for a curve
for(mm in levels(dfres$method)){
for(ff in levels(dfres$fam)){
indx <- which(dfres$method==mm & dfres$fam==ff)
if(sum(dfres$FDR[indx], na.rm=T)==0){
print("in here")
dfres$FDR[indx] <- NA
}
}
}
## change ordering of factor levels to change order of facet_wrap
dfres$method <- factor(dfres$method, levels=c("glmnet", "lasso", "r2VIM", "bigRR", "gemma",
"fastALL", "fast"))
levels(dfres$method) <- c("glmnet", "LMM-Lasso", "r2VIM", "bigRR", "GEMMA", "FaST-LMM^all",
"FaST-LMM^few")
dfres$fam <- factor(dfres$fam, levels=c("W","L","S","HS","A","HL"))
levels(dfres$fam) <- c("150 x 5K", "350 x 500K", "1500 x 50K", "2000 x 500K",
"4000 x 1.5M", "10000 x 1.5M")
## change family labels to simulation labels
# W 150 x 5 K 750
# S 350 x 400K 140,000
# L 1500 x 50K 75,000
# HS 2000 x 500K 1,000,000
# A 4000 x 1.5M 600,000,000
# HL 10000 x 1.5M 1.5*e10
# "am" "mlmm" "glmnet" "lasso" "r2VIM" "bigRR" "gemma" "fastALL" "fast"
p <- ggplot(data=dfres, aes(threshold, FDR, color=fam)) + geom_line(size=1) +
facet_wrap(~method, ncol=3,scales="free", labeller=label_parsed) +
theme(aspect.ratio = 1) # try with and without
## set theme
p <- p + theme_hc()
## increase spacing between facet plots
p <- p + theme(panel.spacing = unit(3, "lines"))
## specify xlab and ylab
p <- p + ylab(bquote("False discovery rate")) +
xlab(bquote('Significance threshold'))
## change x and y labels size and bold
p <- p + theme(axis.title.x = element_text(angle=0, vjust=1, size=14))
p <- p + theme(axis.title.y = element_text(angle=90, vjust=1, size=14))
# alter x and y axis labels
p <- p +
theme(axis.text.x = element_text(size=11, angle=0)) +
theme(axis.text.y=element_text(size=11, hjust=0.5)) +
theme(strip.text = element_text(size=14))
## increase font of lengend + remove legend title
p <- p + theme(legend.text=element_text(size=12))
p <- p + theme(legend.title=element_blank())
p <- p+ theme(legend.key.width=grid:::unit(1.5,"cm"))
#p + theme_base()
#p + theme_economist_white()
#p + theme_few()
postscript("~/Papers/AM-Paper/threshold.eps", width=10, height=10, fonts=c("sans", fonts()),
horizontal=FALSE)
p
dev.off()
|
25fb975b157465a622ca50947f11d8c637fec45f
|
ca90669ad709f52b46580b121bf85923638b7c3c
|
/man/MODISSummaries.Rd
|
513092bd2c62d5ddb4d5b3ee19391669a480bfa9
|
[] |
no_license
|
huananbei/MODISTools
|
8797f4e102c063726cc54b2946a507935ea33fb7
|
2113d8076272f76157cc96efe7715f53c9bf14ef
|
refs/heads/master
| 2021-10-20T21:05:34.266614
| 2019-03-01T21:53:09
| 2019-03-01T21:53:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,363
|
rd
|
MODISSummaries.Rd
|
\name{MODISSummaries}
\alias{MODISSummaries}
\title{MODIS subset processing & organisation tool
}
\description{A function to run time-series analysis and compute summary statistics for a downloaded MODIS subset, writing a summary file and another file with processed MODIS data tagged onto the original file inputted to MODISSubsets. This function allows the user to easily explore the characteristics of the downloaded data, and then process them into a form that is ready for use in modelling.
}
\usage{MODISSummaries(LoadDat, FileSep = NULL, Dir = ".", Product, Bands, ValidRange, NoDataFill,
ScaleFactor, StartDate = FALSE, QualityScreen = FALSE, QualityBand = NULL,
QualityThreshold = NULL, Mean = TRUE, SD = TRUE, Min = TRUE, Max = TRUE, Yield = FALSE,
Interpolate = FALSE, InterpolateN = NULL, DiagnosticPlot = FALSE)
}
\arguments{
\item{LoadDat}{Input dataset: either the name of an object already in the workspace, or a file to be read in by specifying its file path as a character string, that has location data, dates (end date, and optionally start date) and study ID for each location. If IDs are found in LoadDat that provide a primary key for unique time series then these IDs will be used. Otherwise a set of unique IDs will be generated and used to identify, and file name, each time series.
}
\item{FileSep}{If LoadDat is a character string that corresponds to a file path, choose the delimiter character for that file (e.g. "," for comma separated).
}
\item{Dir}{Character string; an optional argument to specify a file path to the subdirectory where downloaded ASCII files to be processed are located and the output is written: default Dir = "." extracts files from the working directory.
}
\item{Product}{Character; The product shortname code, that the data band input belongs to. The MODIS product table shows all available products and their respective product shortname codes (see references).
}
\item{Bands}{Character; the code that identifies from which band types are the data to be processed. Multiple bands can be specified as a character vector, including the quality control data bands, providing they all come from the same product. With the exception of BRDF Reflectance data products (MCD43A4) that have quality information stored as a separate product (MCD43A2).
}
\item{ValidRange}{Numeric vector of two elements; states the lower (ValidRange[1]) and upper (ValidRange[2]) bounds within which the data to be processed should be found.
}
\item{NoDataFill}{Numeric; the missing data fill value that is used for Bands.
}
\item{ScaleFactor}{Numeric; The specified scaling for the given band type, which the data is to be multiplied by. If a scale factor does not exist for the data band, ScaleFactor should be set to 1.
}
\item{StartDate}{Logical; indicate whether the input dataset contains information on the time-series start date. If StartDate = TRUE, start dates will be taken from the input data and will expect the data frame to have a column named start.date. Default is StartDate = FALSE, whereby the input data is assumed to have only time-series end date. This should be the same as that used in the relevant call to MODISSubsets.
}
\item{QualityScreen}{Logical; optional argument for screening the band data for unreliable pixels. If QualityScreen = TRUE, band data must be downloaded from MODISSubsets with the quality control data corresponding to the same product included. Therefore, both band data and reliability data will be in the same ASCII files for each time-series downloaded. Quality screening is completed by the QualityCheck function, and the arguments for this function need to be included in a MODISSummaries call, if QualityScreen = TRUE. The default is QualityScreen = FALSE, meaning the function will omit data equal to NoDataFill, but will not omit poor quality data.
}
\item{QualityBand}{Character; if QualityScreen = TRUE, the shortname code for the quality data band that you are using to screen Band for poor quality data.
}
\item{QualityThreshold}{Numeric integer; if QualityScreen = TRUE, set the threshold between acceptable and unacceptable quality. Any pixels of lower quality than the class set by QualityThreshold will be removed, and those equal to or of higher quality will be kept. QualityThreshold should be a number within the range of possible QualityScores for the given Product QA data.
}
\item{Mean,
SD,
Min,
Max,
Yield}{Logical; optional arguments that allow selecting which summaries will be included in the summary file that gets written - see value. Selecting Yield requires Interpolate to also be set as TRUE.
}
\item{Interpolate}{Logical; determines whether, after poor quality data is removed, to linearly interpolate between high quality data before calculating the summary statistics. Must be TRUE if Yield = TRUE. The interpolation function used is stats::approx. See ?stats::approx for more details.
}
\item{InterpolateN}{Numeric; if Interpolate = TRUE, optionally set the number interpolated data points to be requested from the time-series interpolation. The default is set to a daily interpolation of the data.
}
\item{DiagnosticPlot}{Logical; if TRUE will produce an additional folder in the specified directory to which plots of the time series data for each site will be saved. Will add the interpolation line, mean, min and max values if specified in the function call.
}
}
\details{If QualityScreen = TRUE, subsets to be processed should include a pixel reliability layer, so the data can be screened for poor quality data, removing them and using linear interpolation to refill data between high quality values.
}
\value{Two CSV files:
One file (MODIS_Summary...) contains summary statistics and computed values for each data. The information this file contains is partly defined by the optional arguments settings: Mean is arithmetic mean; SD is standard deviation; Min and Max are minimum and maximum band values; Yield is the average annual yield (designed for vegetation indices, may not be sensible for all band types); NoFill and PoorQuality show the percentage of values in each time-series that were NoDataFill and omitted by QualityCheck (if QualityScreen = TRUE) respectively. All summary statistics, except yield, are included by default.
The second file (MODIS_Data...) that has the information from the original file inputted (which should have been used in MODISSubsets too) with computed means of the MODIS data tagged on, coupling the input with the output in one form ready for use, such as modelling. In the second file, each nth column of MODIS data, if more than one, will be for each pixel within the whole tile of n pixels collected for the time-series on that row.
}
\references{
\url{https://daacmodis.ornl.gov/cgi-bin/MODIS/GLBVIZ_1_Glb/modis_subset_order_global_col5.pl}
}
\author{Sean Tuck}
\seealso{ \code{\link[MODISTools:MODISSubsets]{MODISSubsets}}
\code{\link[MODISTools:QualityCheck]{QualityCheck}}
}
\examples{
\dontrun{
# dontrun() used because running the example requires internet access,
# and takes over a minute to run.
data(SubsetExample)
MODISSubsets(LoadDat = SubsetExample, Products = "MOD13Q1",
Bands = c("250m_16_days_EVI", "250m_16_days_NDVI", "250m_16_days_pixel_reliability"),
Size = c(0,0), StartDate = TRUE)
# Without quality checking
MODISSummaries(LoadDat = SubsetExample, Product = "MOD13Q1", Bands = "250m_16_days_EVI",
ValidRange = c(-2000,10000), NoDataFill = -3000, ScaleFactor = 0.0001,
StartDate = TRUE)
# With quality checking
MODISSummaries(LoadDat = SubsetExample, Product = "MOD13Q1", Bands = "250m_16_days_EVI",
ValidRange = c(-2000,10000), NoDataFill = -3000, ScaleFactor = 0.0001,
StartDate = TRUE, QualityScreen = TRUE, QualityThreshold = 0,
QualityBand = "250m_16_days_pixel_reliability")
# For both EVI and NDVI
MODISSummaries(LoadDat = SubsetExample, Product = "MOD13Q1",
Bands = c("250m_16_days_EVI","250m_16_days_NDVI"),
ValidRange = c(-2000,10000), NoDataFill = -3000, ScaleFactor = 0.0001,
StartDate = TRUE, QualityScreen = TRUE, QualityThreshold = 0,
QualityBand = "250m_16_days_pixel_reliability")
}
}
|
7200bbc51adb0f1e01266ae95dba2de4cdf25eab
|
0a960783f9c27ba59c1603e97bfb74c20664a9a8
|
/ModelFitting/Bayesian/rstant_testBEST.R
|
10afeef6099e10dd6ad996635e7aada39c49bbc9
|
[] |
no_license
|
Pakillo/Miscellaneous-R-Code
|
f22e1e1ebdbb450c514eaf9246fa4159d7e62fa6
|
11bb22363ad9c57f6c63943597a1cb04fd09d48d
|
refs/heads/master
| 2020-04-01T16:53:22.763412
| 2018-06-07T18:52:44
| 2018-06-07T18:52:44
| 35,029,073
| 2
| 0
| null | 2018-06-07T18:52:45
| 2015-05-04T10:39:56
|
R
|
UTF-8
|
R
| false
| false
| 6,052
|
r
|
rstant_testBEST.R
|
#-------------------------------------------------------------------------------#
# The following is based on Kruschke's 2012 JEP article 'Bayesian estimation #
# supercedes the t-test (BEST)' with only minor changes to stan model. It uses #
# the JAGS/BUGS code in the paper's Appendix B as the reference. #
#-------------------------------------------------------------------------------#
#######################
### Create the Data ###
#######################
### play around with the specs if you like
set.seed(1234)
N1 = 50
N2 = 50
mu1 = 1
mu2 = -.5
sig1 = 1
sig2 = 1
Ng = 2
y1 = rnorm(N1, mu1, sig1)
y2 = rnorm(N2, mu2, sig2)
y = c(y1, y2)
groupID = as.numeric(gl(2, N1))
## if unbalanced
# group = 1:2
# groupID = rep(group, c(N1,N2))
tapply(y, groupID, psych:::describe)
##################
### Stan Setup ###
##################
standat= list(N=length(y), Ng=Ng, groupID=groupID, y=y)
stanmodelcode = '
data {
int<lower=1> N; // sample size (note:putting bounds provides simple data check)
int<lower=2> Ng; // number of groups
vector[N] y; // response
int<lower=1, upper=Ng> groupID[N]; // group ID
}
transformed data{
real meany; // mean of y; see mu prior
meany = mean(y);
}
parameters {
vector[2] mu; // estimated group means and sd
vector<lower=0>[2] sigma; // Kruschke puts upper bound as well; ignored here
real<lower=0, upper=100> nu; // df for t distribution
}
transformed parameters { // none needed
}
model {
// priors
mu ~ normal(meany, 10); // note that there is a faster implementation of this for stan; sd here is more informative than in Kruschke paper
sigma ~ cauchy(0, 5);
nu ~ exponential(1.0/29); // Based on Kruschke; makes mean nu 29 (might consider upper bound, too large and might as well switch to normal)
// likelihood
for (n in 1:N){
y[n] ~ student_t(nu, mu[groupID[n]], sigma[groupID[n]]);
//y[n] ~ normal(mu[groupID[n]], sigma[groupID[n]]); // for comparison, remove all nu specifications if you do this
}
}
generated quantities {
vector[N] yRep; // posterior predictive distribution
real muDiff; // mean difference
real CohensD; // effect size; see footnote 1 in Kruschke paper
real CLES; // common language effect size
real CLES2; // a more explicit approach; the mean should roughly equal CLES
for (n in 1:N){
yRep[n] = student_t_rng(nu, mu[groupID[n]], sigma[groupID[n]]);
}
muDiff = mu[1] - mu[2];
CohensD = muDiff / sqrt(sum(sigma)/2);
CLES = normal_cdf(muDiff / sqrt(sum(sigma)), 0, 1);
CLES2 = student_t_rng(nu, mu[1], sigma[1]) - student_t_rng(nu, mu[2], sigma[2]) > 0;
}
'
#############################
### Run and inspect model ###
#############################
### Run model/examine basic diagnostic plots
library(rstan)
# you can ignore the informational message
fit = stan(model_code=stanmodelcode, data=standat, iter=12000, warmup=2000, cores=4, thin=10)
shinystan::launch_shinystan(fit)
### Print summary of model
print(fit, digits=3, pars=c('mu', 'sigma', 'muDiff', 'CohensD', 'CLES', 'CLES2','nu','lp__'))
### Extract quantities of interest for more processing/visualization.
yRep = extract(fit, par='yRep')$yRep
# compare population and observed data values to estimates in summary print
# mean difference
muDiff = extract(fit, par='muDiff')$muDiff
means = tapply(y, groupID, mean)
sds = tapply(y, groupID, sd)
mu1-mu2 # based on population values
abs(diff(means)) # observed in data
# Cohen's d
CohensD = extract(fit, par='CohensD')$CohensD
(mu1-mu2) / sqrt((sig1^2+sig2^2)/2) # population
(means[1]-means[2]) / sqrt(sum(sds^2)/2) # observed
# common language effect size is the probability that a randomly selected score from one
# population will be greater than a randomly sampled score from the other
CLES = extract(fit, par='CLES')$CLES
pnorm((mu1-mu2) / sqrt(sig1^2+sig2^2)) # population
pnorm((means[1]-means[2]) / sqrt(sum(sds^2))) # observed
########################
### Model Comparison ###
########################
### Compare to Welch's t-test
t.test(y1,y2)
### Compare to BEST; note that it requires coda, whose traceplot function will overwrite rstan's
library(BEST)
BESTout = BESTmcmc(y1, y2, numSavedSteps=12000, thinSteps=10, burnInSteps=2000)
summary(BESTout)
#####################
### Visualization ###
#####################
library(ggplot2); library(reshape2);
### plot posterior predictive distribution vs. observed data density
gdat = melt(yRep)
str(gdat)
colnames(gdat) = c('iteration', 'observation', 'value' )
gdat$groupID = factor(rep(groupID, e=2000)) # change this to match your sample size/chain length
gdat$observation = factor(gdat$observation)
ggplot(aes(x=value), data=gdat) +
geom_density(aes(group=groupID, fill=groupID), color=NA, alpha=.25) +
geom_line(aes(group=observation, color=groupID), stat='density', alpha=.05) +
geom_point(aes(x=y, y=0, color=factor(groupID)), alpha=.15, size=5, data=data.frame(y, groupID)) +
xlim(c(-8,8)) + # might get a warning if extreme values are cut out
geom_density(aes(group=groupID, color=groupID, x=y), alpha=.05, data.frame(groupID=factor(groupID),y))
### plot mean difference or other values of interest
ggplot(aes(x=muDiff), data=data.frame(muDiff=muDiff)) +
geom_density(alpha=.25) +
xlim(c(0,3.5)) +
geom_point(x=muDiff, y=0, alpha=.01, size=3) +
geom_path(aes(x=quantile(muDiff, c(.025, .975)), y=c(.2,.2)), size=2, alpha=.5, color='darkred', data=data.frame())
### BEST plots
par(mfrow=c(2,2))
sapply(c("mean", "sd", "effect", "nu"), function(p) plot(BESTout, which=p))
layout(1)
|
ab34b7d2ff581bcc0c80d51113808f816c6d8df6
|
ba5d5554bf6630bae7c648032d11c2f6532787a3
|
/R/svymnlogit.R
|
eefb97166c9c8418bf1a004c806434ee8b55e105
|
[] |
no_license
|
deepfriar/gilez
|
0194d631d4670a04fa054b2c800ca7f028e549d2
|
a2b58d85765a1b06ce4ec8e9cae3a3225fce931a
|
refs/heads/master
| 2023-04-04T09:24:32.545309
| 2021-04-09T15:22:28
| 2021-04-09T16:11:53
| 356,327,811
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,614
|
r
|
svymnlogit.R
|
#' @describeIn draw svymnlogit
#' @export
draw.svymnlogit <- function(m, x=stats::model.frame(m), B, ...) {
if(!requireNamespace("svymnlogit")) {stop("You must install the svymnlogit package.")}
f <- stats::formula(m)
X <- stats::model.matrix(f, x)
y <- all.vars(stats::formula(m))[1]
Y <- ifelse(is.factor(x[[y]]), levels, unique)(x[[y]]) # dude
bnames <- attr(B, "bnames")
# inelegant use of anonymous function but we'll go with it for now
B <- plyr::llply(plyr::llply(bnames, stringr::str_detect, string=colnames(B)), function(u, B) {B[, u]}, B = B)
M <- plyr::llply(B, function(u) {as.matrix(X) %*% t(u)})
names(M) <- rownames(b)
M <- plyr::llply(M, reshape2::melt, varnames=c("id", "sim"), value.name="value")
M <- plyr::ldply(M, .id="outcome")
M$value <- exp(M$value)
M <- reshape2::dcast(M, id + sim ~ outcome)
M[[setdiff(Y, bnames)]] <- 1
M$Y <- apply(M[, Y], 1, sample, x=Y, size=1, replace=FALSE)
M$Y <- factor(M$Y, levels=Y)
W <- reshape2::dcast(M, id ~ sim, value.var = "Y")
W
}
#' @describeIn consider svymnlogit
#' @export
consider.svymnlogit <- function(m, x, n=1, ...) {
b <- stats::coef(m)
V <- stats::vcov(m)
B <- mvtnorm::rmvt(n, V, stats::df.residual(m), as.numeric(b))
B <- `colnames<-`(B, rep(rownames(b), ncol(b)))
`attr<-`(B, "bnames", rownames(b)) # need to pass this along
}
#' @describeIn getweights svymnlogit
#' @export
getweights.svymnlogit <- function(m, ...) {stats::weights(m)}
#' @describeIn termz svymnlogit has no \code{terms} element
#' @export
termz.svymnlogit <- function(m) {stats::terms(stats::formula(m))}
|
cbe16e36d95fb31e3fd3549b86ece851c2971938
|
2de5faf818c17fc625f0091f73604e25451b0f75
|
/R_functions/3_order_data.R
|
0a375ae3bdf2b9470fba3f468f582e90bb172e23
|
[] |
no_license
|
mcmventura/Tool_QC_StationsLocation
|
5595c6e59fc7a3037dcfd39dfc0075affa5b550b
|
279cf1f1a9f4d576d0daed02a622ba7e7c6b1cc0
|
refs/heads/master
| 2021-08-31T13:34:54.396211
| 2017-12-21T13:58:28
| 2017-12-21T13:58:28
| 114,001,466
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,629
|
r
|
3_order_data.R
|
#'
#' Reorder the data by the coordinate pairs inicial order
#'
#' Concatenate all the files with country names and/or marine region names, i.
#' e, the output of \code{\link{get_country}}, \code{\link{get_marine}} and
#' \code{\link{get_country_sea}} and reorder the data by the coordinate pairs
#' inicial order.
#'
order_data <- function() {
df <- data.frame()
if (file.exists("countries.txt")) {
df1 <- read.table("countries.txt", header = TRUE, sep = "\t", quote = "")
# Change the name of the last column of each data.frame to 'geo_name'
names(df1)[4] <- "geo_name"
# colnames(df1) <- c('id', 'longitude', 'latitude', 'geo_name') Concatenate the data.frames by row
df <- rbind(df, df1)
# Order the data.frame by the 'id' column
final_df <- df[order(df[, 1]), ]
}
if (file.exists("marine_regions.txt")) {
df2 <- read.table("marine_regions.txt", header = TRUE, sep = "\t", quote = "")
names(df2)[4] <- "geo_name"
df <- rbind(df, df2)
final_df <- df[order(df[, 1]), ]
}
if (file.exists("countries_seas.txt")) {
df3 <- read.table("countries_seas.txt", header = TRUE, sep = "\t", quote = "")
names(df3)[4] <- "geo_name"
df <- rbind(df, df3)
final_df <- df[order(df[, 1]), ]
}
write.table(final_df, file = "geographic_names.txt", row.names = FALSE, col.names = TRUE, sep = "\t", quote = FALSE)
write.csv(final_df, file = "geographic_names.csv")
print("Check the output files < geographic_names.txt > and < geographic_names.csv >.")
}
|
8ae864f80dcbf2862b848693d64ce03a049131af
|
e4e07501cd6da34beb76abc3e82b5784db1fe011
|
/man/plot.JZSMed.Rd
|
88bb781ded6ecde4ffea3861f430b94de959772e
|
[] |
no_license
|
MicheleNuijten/BayesMed
|
ff239c8b598e95546fd5bed5d79963c8489192e1
|
2a156ed80e81bb0065b6cee017d86e15d87ff9d1
|
refs/heads/master
| 2021-06-02T20:09:07.919564
| 2020-01-29T13:13:28
| 2020-01-29T13:13:28
| 7,775,559
| 10
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 969
|
rd
|
plot.JZSMed.Rd
|
\name{plot.JZSMed}
\alias{plot.JZSMed}
\title{
Display the results of \code{\link{jzs_med}} in a figure.
}
\description{
This function displays the estimates and posterior probabilities of path alpha, beta, and tau' in a mediation schema and thus renders a clear view of the structure in the data.
}
\usage{
\method{plot}{JZSMed}(x,\dots)
}
\arguments{
\item{x}{
the output of the jzs_med function.
}
\item{\dots}{
additional arguments to be passed on to the plot method, such as graphical parameters (see \code{par}).
}
}
\author{
Michele B. Nuijten <m.b.nuijten@uvt.nl>, Ruud Wetzels, Dora Matzke, Conor V. Dolan, and Eric-Jan Wagenmakers.
}
\seealso{
\code{\link{jzs_med}}
}
\examples{
\dontrun{
# simulate mediational data
a <- .5
b <- .6
t_prime <- .3
X <- rnorm(50,0,1)
M <- a*X + rnorm(50,0,1)
Y <- t_prime*X + b*M + rnorm(50,0,1)
# save jzs_med output
res <- jzs_med(independent=X,dependent=Y,mediator=M)
# plot results
plot(res$main_result)
}
}
|
7ff7b07a852755c0a7e3f0262483be1d7a986fa0
|
76261a184e7aef8de40a3aa41a469148fe6d02f6
|
/R-demos/old notes/单因素方差分析.R
|
006c14bd10872ae3ec8e495492caa9a041efa385
|
[] |
no_license
|
AngelinaBao/Rnotes
|
e3b7dbd80df24fd7f0a3c2f10588f07f37e22659
|
cb9864738d776a19c3cf4d95d37cefcac46374c6
|
refs/heads/master
| 2020-03-24T21:16:08.090091
| 2019-02-09T01:41:17
| 2019-02-09T01:41:17
| 143,022,423
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,509
|
r
|
单因素方差分析.R
|
-rm(list = ls())
gc()
#?????????????????????
library(multcomp)
data(cholesterol, package = "multcomp")
attach(cholesterol)
head(cholesterol)
table(cholesterol)
class(cholesterol)
table(trt)
aggregate(response, by = list(trt), FUN = mean)
aggregate(response, by = list(trt), FUN = sd)
fit <- aov(response ~ trt)
summary(fit)
library(gplots)
plotmeans(response ~ trt, xlab = "Treatment", ylab = "Response",
main = "Mean Plot \nwith 95% CI")
detach(cholesterol)
#??????????????????
#Method 1
TukeyHSD(fit)
par(las = 2) #??????????????????
par(mar = c(5,8,4,2)) #????????????
plot(TukeyHSD(fit))
#Method 2
library(multcomp)
par(mar = c(5,4,6,2))
tuk <- glht(fit, linfct = mcp(trt = "Tukey"))
plot(cld(tuk, .05), col = "lightgrey") #cld??????????????????
#??????????????????????????????????????????,1time???2times???????????????,1time???4times????????????
#????????????
#???????????????
library(car)
qqPlot(lm(response ~ trt, data = cholesterol), simulate = T, main = "QQ Plot")
#???simulate=TRUE???, 95%?????????????????????????????????????????????
#qqPlot??????lm()??????
#????????????95%??????????????????,???????????????????????????
#??????????????????
bartlett.test(response ~ trt, data = cholesterol)
#p=0.9653 ???????????????????????????????????????
#????????????????????????????????????,????????????????????????????????????
outlierTest(fit)
#??????????????????????????????(p>1?????????NA)
#??????,??????Q-Q??????bartleet???outlierTest???????????????,??????????????????ANOVA????????????
#?????????
|
09c566329ddf3b469ce293bbf531088427fabde5
|
15e79da5728a9d3aea403753a19487e5fc05ca67
|
/others/Dice/RSS/r/analysis.R
|
b14628d6319ebde938575e0ba6b130236ce9efc2
|
[] |
no_license
|
jose-dev/coursera
|
d690289e77c00d218461af64626cf047f5038c6e
|
a644badb9a56b4293affad936fcb906c64de31fe
|
refs/heads/master
| 2021-01-17T15:02:33.592741
| 2017-03-28T16:46:55
| 2017-03-28T16:46:55
| 23,797,568
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,147
|
r
|
analysis.R
|
#################################################################
#################################################################
#################################################################
##
## load files
##
mydata <- read.csv("rss_data_r.csv")
mydata_avg <- read.csv("rss_data_r_avrg.csv")
##
## create field with shorter agent names
##
mydata$agent_name <- 'choices'
mydata$agent_name[mydata$agent == 'bradleys-estate-agents'] <- 'bradleys'
mydata$agent_name[mydata$agent == 'manning-stainton'] <- 'manning'
mydata$agent_name[mydata$agent == 'reeds-rains'] <- 'reeds'
mydata_avg$agent_name <- 'choices'
mydata_avg$agent_name[mydata_avg$agent == 'bradleys-estate-agents'] <- 'bradleys'
mydata_avg$agent_name[mydata_avg$agent == 'manning-stainton'] <- 'manning'
mydata_avg$agent_name[mydata_avg$agent == 'reeds-rains'] <- 'reeds'
#################################################################
#################################################################
#################################################################
par(mfrow=c(1,2))
##
## distribution of ratings
##
barplot(table(mydata$rating), xlab="Rating Score", ylab="Number of Reviews", col=c("red", "orange", "yellow", "cyan", "green"), main="Distribution Of Rating Scores")
##
## reviews per agent
##
barplot(table(mydata$rating, mydata$agent_name), xlab="Agent", ylab="Number Of Reviews", col=c("red", "orange", "yellow", "cyan", "green"), main="Rating Scores By Agent")
#################################################################
#################################################################
#################################################################
par(mfrow=c(1,2))
##
## distribution of rating over time
##
barplot(table(mydata$rating, mydata$yyyy), xlab="year", ylab="Number of Reviews", col=c("red", "orange", "yellow", "cyan", "green"), main="Rating Scores per Year")
##
## distribution of monthly average rating per year (from 2012)
##
boxplot(avg_rating~yyyy, data=mydata_avg, col="grey", xlab="Year", ylab="Monthly Average Rating", main="Monthly Average Ratings per Year" )
|
3293ee65d5d4cd0796359d09bd9f286fa98cfb5b
|
24605325877de4199fd101b9eb2c7fc090b5cd52
|
/R/pathrow_num.R
|
8a10ac279c738c6e6763489c5d04a1311f4a2272
|
[] |
no_license
|
azvoleff/wrspathrow
|
aa9d7419c3e50889cab304557395e28386ba091b
|
8acade80b96f0dbedb9a7308e17dfba1d74b677b
|
refs/heads/master
| 2020-08-27T03:23:47.506823
| 2015-12-04T13:20:21
| 2015-12-04T13:20:21
| 15,712,548
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,761
|
r
|
pathrow_num.R
|
load_wrs_data <- function(wrs_type, wrs_mode) {
if (wrs_type == 2) {
wrs_polys <- wrs2_asc_desc
} else if (wrs_type == 1) {
wrs_polys <- wrs1_asc_desc
} else {
stop('wrs_type must be 1 or 2')
}
if (!(wrs_mode %in% c('D', 'A'))) {
stop('wrs_mode must be "D", "A" or c("D", "A")')
}
return(wrs_polys[wrs_polys@data$MODE %in% wrs_mode, ])
}
#' @importFrom rgeos gIntersects gUnaryUnion
intersect_wrs_polys <- function(wrs_polys, x, as_polys) {
intersecting <- as.logical(gIntersects(wrs_polys, gUnaryUnion(x), byid=TRUE))
if (sum(intersecting) == 0) {
stop('no intersecting pathrows found')
} else {
wrs_polys <- wrs_polys[intersecting, ]
wrs_polys <- wrs_polys[order(wrs_polys$PATH, wrs_polys$ROW), ]
if (!as_polys) {
wrs_polys <- data.frame(PATH=wrs_polys@data$PATH, ROW=wrs_polys@data$ROW)
}
return(wrs_polys)
}
}
#' Get WRS-2 path/row numbers for a given spatial object
#'
#' @export pathrow_num
#' @import methods
#' @import wrspathrowData
#' @param x a spatial object
#' @param wrs_type 1 (for WRS-1) or 2 (for WRS-2)
#' @param wrs_mode either 'D' for descending (daytime) or 'A' for ascending
#' @param as_polys if FALSE (default) return a data.frame. If TRUE, return a
#' \code{SpatialPolygonsDataFrame}.
#' @return data.frame with path and row as integers, or, if as_polys=TRUE, a
#' \code{SpatialPolygonsDataFrame}
#' @examples
#' \dontrun{
#' library(sp)
#'
#' pathrow_num(test_poly)
#'
#' x <- pathrow_num(test_poly, as_polys=TRUE)
#' plot(x)
#' plot(test_poly, add=TRUE, lty=2, col="#00ff0050")
#' text(coordinates(x), labels=paste(x$PATH, x$ROW, sep=', '))
#' }
setGeneric("pathrow_num", function(x, wrs_type='2', wrs_mode='D',
as_polys=FALSE) {
standardGeneric("pathrow_num")
})
#' @importFrom raster extent projectExtent crs
#' @importFrom rgeos gIntersects
#' @aliases pathrow_num,Raster-method
setMethod("pathrow_num", signature(x="Raster"),
function(x, wrs_type, wrs_mode, as_polys) {
wrs_polys <- load_wrs_data(wrs_type, wrs_mode)
x_wgs84 <- projectExtent(x, crs=crs(wrs_polys))
x_wgs84_sp <- as(extent(x_wgs84), 'SpatialPolygons')
return(intersect_wrs_polys(wrs_polys, x_wgs84_sp, as_polys))
}
)
#' @importFrom rgeos gIntersects
#' @importFrom sp CRS proj4string spTransform
#' @import rgdal
#' @aliases pathrow_num,Spatial-method
setMethod("pathrow_num", signature(x="Spatial"),
function(x, wrs_type, wrs_mode, as_polys) {
wrs_polys <- load_wrs_data(wrs_type, wrs_mode)
x_wgs84 <- spTransform(x, CRS(proj4string(wrs_polys)))
return(intersect_wrs_polys(wrs_polys, x_wgs84, as_polys))
}
)
|
6269db3d2f7072393f55cc59efb349d43537ead6
|
a7f65d74dfba2a8482c24d3b0dc370030e555cc9
|
/funcirr.R
|
433b23703bbc4a0fd4cddf44093b2bea95513133
|
[] |
no_license
|
meenasirisha145/meenar
|
ca0e51ba6c21be1cd572b1ce680f4d7b697985d6
|
d5ab04c1893f7a2a0570536a22981723491baee5
|
refs/heads/master
| 2021-01-19T06:04:18.663790
| 2018-05-08T14:22:04
| 2018-05-08T14:22:04
| 100,589,222
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 271
|
r
|
funcirr.R
|
IRR=function(Duration,inst_lft,inst_nonlft,lft_time)
{
yield=Duration*inst_lft+(lft_time-1)*1000
totalamt=(lft_time-1)*inst_lft+(Duration-lft_time+1)*inst_nonlft
irr=(-yield+totalamt)/totalamt
return(irr)
}
ii=IRR(40,5000,7000,4)
ii
ii2=IRR(40,5000,7000,39)
ii2
|
6637770f9033cab5210f9682db845d6a940dddb2
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/intcensROC/inst/doc/intcensROC.R
|
3007415c908d50c4e312958c1e1da27f562c60a1
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,705
|
r
|
intcensROC.R
|
## ----setup1, include=FALSE, echo=FALSE----------------------------------------
require(knitr)
## ----setup2, include=FALSE,echo=FALSE-----------------------------------------
old_ops <- options(width=80) # make the printing fit on the page
set.seed(1121) # make the results repeatable
stdt<-date()
## ----beta, eval=FALSE---------------------------------------------------------
# res <- intcensROC(U, V, Marker, Delta, PredictTime, gridNumber = 500)
## ----var, eval=FALSE----------------------------------------------------------
# auc <- intcensAUC(ROCdata)
## ----startOffExample----------------------------------------------------------
library(intcensROC)
## example interval censored data
U <- runif(100, min = 0.1, max = 5)
V <- runif(100, min = 0.1, max = 5) + U
Marker <- runif(100, min = 5, max = 10)
Delta <- sample.int(3, size = 100, replace = TRUE)
pTime <- 4
## compute the ROC curve
res <- intcensROC(U, V, Marker, Delta, pTime, gridNumber = 500)
head(res)
##compute the AUC
auc <- intcensAUC(res)
print(auc)
## ----inputPara, eval=TRUE, echo=FALSE-----------------------------------------
library(copula)
f<-function(x,L0,rate,censor){
1/((x-L0)*rate)*exp(-L0*rate)-1/((x-L0)*rate)*exp(-x*rate)-censor}
dataSim <- function(kendall_tau = 0.3, n = 100, rho = 0.3, lambda = log(2)/6)
{
b_alpha <- 2.35
b_beta <- 1.87
scale <- 10
kendall_tau <- iTau( claytonCopula(), kendall_tau)
Int_cop <- claytonCopula(param = kendall_tau, dim = 2)
Int_mvdc <- mvdc(Int_cop, c("exp","beta"), paramMargins =
list(list(rate = lambda), list(shape1=b_alpha,shape2=b_beta)))
Int_obs_data <- rMvdc(n, Int_mvdc)
colnames(Int_obs_data) <- c("event_time", "marker")
Int_obs_data[,"marker"] <- Int_obs_data[,"marker"]*scale
L0 <-0.1; size <-n; U <-rep(0,size)
L <-uniroot(f, lower = 10^(-6), upper = 500, tol=0.000001,
L0=L0, rate=lambda, censor=rho)
V <-runif(size,L0,L$root)
for (i in 1:size)
U[i] <-runif(1,0,(V[i]-L0))
delta_1 <- Int_obs_data[ ,"event_time"] < U
delta_2 <- Int_obs_data[ ,"event_time"] >= U&
Int_obs_data[ ,"event_time"] <= V
delta_3 <- Int_obs_data[ ,"event_time"] > V
data <- data.frame(U = U, V = V, delta = delta_1+2*delta_2+3*delta_3,
marker=Int_obs_data[,"marker"])
}
## ----inputPara_1, eval=FALSE, echo=TRUE---------------------------------------
# library(copula)
# f<-function(x,L0,rate,censor){
# 1/((x-L0)*rate)*exp(-L0*rate)-1/((x-L0)*rate)*exp(-x*rate)-censor}
# dataSim <- function(kendall_tau = 0.3, n = 100, rho = 0.3, lambda = log(2)/6)
# {
# b_alpha <- 2.35
# b_beta <- 1.87
# scale <- 10
# kendall_tau <- iTau( claytonCopula(), kendall_tau)
# Int_cop <- claytonCopula(param = kendall_tau, dim = 2)
# Int_mvdc <- mvdc(Int_cop, c("exp","beta"), paramMargins =
# list(list(rate = lambda),
# list(shape1=b_alpha,shape2=b_beta)))
# Int_obs_data <- rMvdc(n, Int_mvdc)
# colnames(Int_obs_data) <- c("event_time", "marker")
## ----inputPara_2, eval=FALSE, echo=TRUE---------------------------------------
# Int_obs_data[,"marker"] <- Int_obs_data[,"marker"]*scale
# L0 <-0.1; size <-n; U <-rep(0,size)
# L <-uniroot(f, lower = 10^(-6), upper = 500, tol=0.000001,
# L0=L0, rate=lambda, censor=rho)
# V <-runif(size,L0,L$root)
# for (i in 1:size)
# U[i] <-runif(1,0,(V[i]-L0))
# delta_1 <- Int_obs_data[ ,"event_time"] < U
# delta_2 <- Int_obs_data[ ,"event_time"] >= U&
# Int_obs_data[ ,"event_time"] <= V
# delta_3 <- Int_obs_data[ ,"event_time"] > V
# data <- data.frame(U = U, V = V, delta =
# delta_1+2*delta_2+3*delta_3,
# marker=Int_obs_data[,"marker"])
# }
## ----loadpkg,out.width='2.5in', fig.width=5, fig.height=4---------------------
mydata <- dataSim(kendall_tau = 0.7, n = 300, rho = 0.3, lambda = log(2)/24)
roc <- intcensROC(U=mydata[,"U"],V=mydata[,"V"], Marker=mydata[,"marker"],
Delta=mydata[,"delta"], PredictTime=12)
print(intcensAUC(roc))
plot(roc$fp, roc$tp, type = "l", lwd = 1.2, col="blue", main = "Example ROC",
xlab = "False Positive Rate", ylab = "True Positive Rate" )
## ----sessinfo, echo=FALSE, include=TRUE, results='asis'-----------------------
toLatex(sessionInfo(), locale=FALSE)
## ----times, echo=FALSE, include=TRUE------------------------------------------
print(paste("Start Time",stdt))
print(paste("End Time ",date()))
### reset options
options(old_ops)
|
256361732182c8d779a17b9d40bfae75f799dd5f
|
5a05dd4f8fbc318b05595a003ecd13cd0c917fb4
|
/plot4.R
|
502e9596b0a4bc31db9eaad19b6060c4bd3817e6
|
[] |
no_license
|
RitCh123/ExploratoryDataAnalysis1
|
a78a48a9ab37ea592a590dc57a8e6ad5eb13020a
|
0632e14538e2e7c907dca218c876acdb445da21d
|
refs/heads/main
| 2023-06-07T15:18:36.894922
| 2021-06-23T08:02:43
| 2021-06-23T08:02:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,120
|
r
|
plot4.R
|
unzip("C:/Users/antop/Documents/R/ExploratoryDataAnalysis1/exdata_data_household_power_consumption.zip")
dt <- read.table("C:/Users/antop/Documents/R/ExploratoryDataAnalysis1/household_power_consumption.txt", header =TRUE, sep=";", na.strings="?")
data <- subset(dt, dt$Date %in% c("1/2/2007", "2/2/2007"))
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
datetime <- paste(data$Date, data$Time)
data$datetime <- as.POSIXct(datetime)
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
plot(data$Global_active_power ~ data$datetime, type="l", xlab="", ylab="Global Active Power (kilowatts)")
plot(data$Voltage ~ data$datetime, type="l", xlab="datetime", ylab="Voltage")
with(data, {plot(Sub_metering_1 ~ datetime, type ="l", ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2 ~datetime, col="red")
lines(Sub_metering_3 ~datetime, col="blue")})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, legend =c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(data$Global_reactive_power~data$datetime, type="l", xlab="datetime", ylab="Global_Reactive_Power")
dev.off()
|
894813b18ab34c11df87560daeb14087258c531d
|
9f755220afb902a6179b1723a9716b74887a62ef
|
/R/get_sigmabeta_from_h.R
|
403b015f584eeadb1eecda2dc7af39655dcacfa2
|
[] |
no_license
|
CreRecombinase/MCMCArmadillo
|
36c5e452b194c23bf025cae02826c14ee7be41d5
|
e961cf8c57ccab0a3928b3c0538df15dbb08a85f
|
refs/heads/master
| 2021-08-31T15:48:32.630163
| 2017-12-21T23:24:22
| 2017-12-21T23:24:22
| 115,055,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 180
|
r
|
get_sigmabeta_from_h.R
|
get_sigmabeta_from_h = function(h, gamma, Sigma, X, T){
n = length(X)
ds = diag(Sigma)
num = h * sum(ds)
denom = (1-h)*sum(ds[gamma==1]) * sum(X^2)/n
return(num/denom)
}
|
2430bf4e2f783da22047b2caf432fac816e574bb
|
db4ca3e4072b39f69d84cd28e8e6b0370e62e1b0
|
/R Nuts and Bolts/RNutsandBolts.R
|
327f52405e635c8757e48fbca622cb3ba9ee493c
|
[] |
no_license
|
hannahnavarro9827/Prelim_Repo
|
59bf9b4c8297cfd5ae2cd95db2b30577505fa94d
|
4ad18666cf7ca8c20f387e240d3dae7efff1f713
|
refs/heads/master
| 2020-03-22T05:08:26.991237
| 2018-07-24T07:56:59
| 2018-07-24T07:56:59
| 139,544,850
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,205
|
r
|
RNutsandBolts.R
|
##Entering Input
x<- 1
print(x)
x
msg <- "hello"
##Printing
x<- 1:20
x
##Creating Vectors
x<- c(0.5, 0.6)
x<- c(TRUE, FALSE)
x<- c(T, F)
x<- 9:29
x<- c(1+0i, 2+4i)
x<-
##Missing Values
x <- c(1, 2, NA, 10, 3)
is.na(x)
is.nan(x)
x <- c(1, 2, NaN, NA, 4)
is.na(x)
is.nan(x)
##Data Frames
x<- data.frame(foo = 1:4, bar = c(T, T, F, F))
x
nrow(x)
ncol(x)
##NAmes
x<- 1:3
names(x)
names(x)<- c("foo", "bar", "norf")
x
names(x)
m<- matrix(1:4, nrow = 2, ncol = 2)
dimnames(m) <- list(c("a", "b"), c("c", "d"))
m
##dput-ting R Objects
y<- data.frame(a = 1, b = "a")
dput(y)
dput(y, file = "y.R")
new.y <- dget("y.R")
new.y
##Dumping R Objects
x<- "foo"
y<- data.frame(a = 1, b = "a")
dump(c("x", "y"), file = "data.R")
rm(x, y)
source("data.R")
y
x
##File COnnections
str(file)
function(description = "", open = "", blocking = TRUE, Encoding = getOption("encoding"))
##Connections
con<- file("foo.txt", "r")
data <- read.csv(con)
close(con)
data <- read.cvs("foot.txt")
##Reading lines of the text
con <- gzfile("words.gz")
x<- readLines(con, 10)
x
#Subsetting
x<- c("a", "b", "c", "c", "d", "a")
x[1]
x[2]
x[1:4]
x[x > "a"]
u <- x > "a"
x[u]
##Subsetting Lists
x<- list(foo = 1:4, bar = 0.6)
x[1]
x[[1]]
x$bar
x[{"bar"}]
x{"bar"}
x<- list(foo = 1:4, bar = 0.6, baz = "hello")
x[(1, 3)]
x<- list(foo = 1:4, bar = 0.6, baz = "hello")
name <- "food"
x[[name]]
x$name
s$foo
##Subsetting Nested Element
x<- list(a = list(10, 12, 14), b = c(3.14, 2.81))
x[[c(1, 3)]]
x[[1]][[3]]
x[[c(2, 1)]]
#Subsetting a MAtrix
x<- matrix(1:6, 2, 3)
x[1, 2]
x[2,1]
x[1, ]
x[, 2]
x<- matrix(1:6, 2, 3)
x[1,2]
x[1, 2, drop = FALSE]
x<- matrix(1:6, 2, 3)
x[1, ]
x[1, , drop = FALSE]
##Partial Matting
x<- list(aardvark = 1:5)
x$a
x[["a"]]
x[["a", exact = FALSE]]
##Removing NA Values
x<- c(1, 2, NA, 4, NA, 5)
bad <- is.na(x)
x[!bad]
x<- c(1, 2, NA, 4, NA, 5)
y<- C("a", "b", NA, "d", NA, "F")
good <- complete.cases(x, y)
good
x[good]
y[good]
airquality[1:6, ]
good <- complete.cases(airquality)
airquality[good, ][1:6, ]
##Vectorized Matrix Operation
x<- 1:4; y<- 6:9
x + y
x > 2
x >= 2
y == 8
x * y
x / y
x<- matrix(1:4, 2, 2); y <-matrix(rep(10, 4), 2, 2)
x / y
x %*% y
|
45801a24af634178565087fd833f3192fd8f0a8c
|
0fbc58702c39addfa7949391d92533922dcf9d49
|
/inst/examples/PlantCounts.R
|
6b86e90b8e509ee3ec0497b157e5e901ec668b97
|
[] |
no_license
|
yihui/MSG
|
d3d353514464f962a0d987efd8cf32ed50ac901a
|
8693859ef41139a43e32aeec33ab2af700037f82
|
refs/heads/master
| 2021-11-29T08:12:02.820072
| 2021-08-15T17:14:36
| 2021-08-15T17:14:36
| 1,333,662
| 30
| 12
| null | 2021-08-15T17:14:37
| 2011-02-06T05:42:53
|
R
|
UTF-8
|
R
| false
| false
| 697
|
r
|
PlantCounts.R
|
# # 基础作图法绘制海拔高度与物种数目的 LOWESS 曲线
data(PlantCounts, package = "MSG")
par(mar = c(4.5, 4.5, .1, 0.2), mfrow = c(1, 2), pch = 20)
with(PlantCounts, {
plot(altitude, counts,
panel.first = grid(), col = rgb(0, 0, 0, 0.3))
for (i in seq(0.01, 1, length = 70)) {
lines(lowess(altitude, counts, f = i), col = rgb(
0.4,
i, 0.4
), lwd = 1.5) # 改变 LOWESS 的范围参数 f
}
plot(altitude, counts, col = rgb(0, 0, 0, 0.3))
for (i in 1:200) {
# 有放回抽取 300 个样本序号
idx = sample(nrow(PlantCounts), 300, TRUE)
lines(lowess(altitude[idx], counts[idx]), col = rgb(0, 0, 0, 0.1),
lwd = 1.5)
}
})
|
a904910f9a955cbb309998a6ec08bfd8e699992a
|
f8eeee94ea3526cffb51d78bc8273bc66fd764a1
|
/sim_lim_truth.R
|
b27eee481f8465c23a37e8d1af25b8c6cf5f1572
|
[
"MIT"
] |
permissive
|
waterknows/regression-adjustments
|
cf351f4c6ef592aa34c281a4c98b3013ce4645c2
|
b3e03e5f12e2a52f910296503404d4730a69dbb7
|
refs/heads/master
| 2023-03-17T21:00:53.779072
| 2019-06-13T18:59:12
| 2019-06-13T18:59:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,610
|
r
|
sim_lim_truth.R
|
library(igraph)
library(foreach)
library(doParallel)
library(dplyr)
library(tidyr)
library(broom)
source('functions/data_generators.R')
source('functions/covariate_functions.R')
source('functions/response_functions.R')
source('functions/existing_estimators.R')
source('functions/proposed_estimators.R')
source('functions/variance_estimators.R')
source('functions/precompute_matrices.R')
calculate_true_ATE = function(param, g, pid) {
y1 = dynamic_time_response(w=rep(1, vcount(g)), g, param)
y0 = dynamic_time_response(w=rep(0, vcount(g)), g, param)
c(pid=pid, ATE=mean(y1) - mean(y0))
}
#load('data/caltech.Rdata')
load('data/smallworld_large.Rdata')
set.seed(2018)
n_reps = 5000
n_cores = 50
registerDoParallel(cores=n_cores)
# params = purrr::cross(list(
# b_intercept = 0,
# b_direct = 1,
# b_spill = c(0, 0.25, 0.5, 0.75, 1),#0.1, 0.2, 0.3, 0.4, 0.5),
# max_t = c(2, 4),
# is_probit = TRUE,
# noise_sd = c(1)
# ))
params = purrr::cross(list(
b_intercept = -0.5,
b_direct = 1,
b_spill = c(0, 0.25, 0.5, 0.75, 1),#0.1, 0.2, 0.3, 0.4, 0.5),
max_t = c(2, 4),
is_probit = TRUE,
noise_sd = 1 # c(1, 3)
))
print('Calculating true ATE...')
# Calculate true ATE by simulation
true_ATE = foreach(i = 1:length(params), .combine=rbind) %do% {
param = params[[i]]
print(unlist(param))
foreach(rep = 1:n_reps, .combine=rbind, .inorder=FALSE) %dopar% {
calculate_true_ATE(param, g_sm_large, pid=i)
}
} %>%
data.frame %>%
group_by(pid) %>%
summarise(ATE=mean(ATE))
print(proc.time())
write.csv(true_ATE, file='results/sim_lim/true_ATE_probit.csv', row.names=FALSE)
|
afef3f028e0a3cbfbdb33fac05783dc84a474527
|
6ad95fc7067d6ab32bee6f1a6ad730b984d12579
|
/ashiq/R codes/chapter 2/example1_sec2_5.R
|
37bd022a99e7dd2e022a8bbd52b51548f0ff9d89
|
[] |
no_license
|
sahridhaya/BitPlease
|
d881f95c404ae9a433c79a8f6519ea7eaba9950a
|
04e1c38944f0ee24eb1377ced87e6550253fbb2e
|
refs/heads/master
| 2020-04-21T11:36:10.914922
| 2020-04-06T09:02:47
| 2020-04-06T09:02:47
| 169,531,342
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 481
|
r
|
example1_sec2_5.R
|
#Example 1, section 2.5, page 150
#Suppose that the weather in a certain city is either rainy or dry. As a aresult extesive record keeping it has been determined that the probability of a rainy day following a dry day is 1/3, and the probability of a rainy day following a rainy day is 1/2. Let state D be dry day and state R be a rainy day. Then the transition matrix of this Markov chain is:
T<- matrix(c(1/2,1/2, 2/3,1/3),c(2,2),dimnames=list(c("D","R"),c("R","D")) )
print(T)
|
a7a32d961e6bb22589d7eb49b9345fd66f1633d1
|
bbf9b58e4ea5023f585324ef2a6db068d50d8756
|
/Second-Year/Statistics-and-Probability/cnrv/R/cerinta11.R
|
95b70c3d3cfd0558c07c1a8f96d5c6285925bcd8
|
[] |
no_license
|
DeliaDumitrescu/University-Courses
|
52f3e64e554d57fd41231dfa1299205ac281df2e
|
9dd0b8d808c5b70599b8eeb0e632abca1a0ac75f
|
refs/heads/master
| 2023-06-04T15:07:48.702751
| 2021-06-30T14:14:25
| 2021-06-30T14:14:25
| 338,532,537
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 272
|
r
|
cerinta11.R
|
#' @export
pdfmX <- function(pdfcom, a, b, c, d) {
function(x) {
integrate(function(y) { pdfcom(x, y) }, c, d)$value
}
}
#' @export
pdfmY <- function(pdfcom, a, b, c, d) {
function(y) {
integrate(function(x) { pdfcom(x, y) }, a, b)$value
}
}
|
5d8fb0acfa50815ddbe3cd224dea016b9c283272
|
5cf83c6dca8734ccd75ba777fc1bfbf436b1bc50
|
/R/get_server_photos.R
|
f1300a1da8d16ab21c28a3237275a546aa0f4110
|
[
"MIT"
] |
permissive
|
martonelab/ptm
|
0165f41f3815f44a548ba305d296fc56ab52c5f9
|
95751c3f5ee20954dc7fcd19eda4ffd264081ec9
|
refs/heads/master
| 2021-04-08T19:01:52.411386
| 2020-07-08T03:24:12
| 2020-07-08T03:24:12
| 248,802,414
| 0
| 0
|
NOASSERTION
| 2020-03-20T16:28:39
| 2020-03-20T16:28:38
| null |
UTF-8
|
R
| false
| false
| 880
|
r
|
get_server_photos.R
|
#' Take some ptm numbers and find the photos
#'
#' @param ptm int a ptm number
#' @param results_folder chr destination folder
#'
#' @return
#' @export
#'
#' @examples
#' ### Not run:
#' #get_server_photos(5)
#' ## End(Not run)
get_server_photos <- function(ptm, results_folder = tempdir()){
path <- "/Volumes/martonelab/Photos"
stopifnot(dir.exists(path))
get_ptm <- dplyr::select(ptm, `PTM#`)
find_photo <- dplyr::mutate(get_ptm, ptm = paste0("PTM",`PTM#`))
my_files <- purrr::map(find_photo$ptm,
~list.files(path = path,
pattern = .,
all.files = T,
full.names = T))
# identify the folders
new_folder <- paste0(here::here(), results_folder)
# copy the files to the new folder
my_files <- purrr::map(my_files,
~file.copy(., new_folder))
}
|
ee976fda9c9434403bfcb7cdec43dc1611109d0d
|
474eb4da4ad0f1f5face04049f933d4f911fe5d0
|
/materials/Day_2_7_practical_binomial.R
|
2dbabd685c6eec53e2aebbe00952cf00ba6402b2
|
[] |
no_license
|
bowlerbear/bayesian-intro
|
6cc7905369bd67e3fa839018fa5e367916d3064e
|
b1805ef6f0f22078af4378514276689924e6335c
|
refs/heads/master
| 2022-12-29T16:18:06.715299
| 2020-10-14T09:01:34
| 2020-10-14T09:01:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,697
|
r
|
Day_2_7_practical_binomial.R
|
rm(list=ls())
library(rstan)
library(coda)
library(BayesianTools)
library(boot) # for logit function
rstan_options(auto_write = TRUE)
options(mc.cores = 3)
#------------------------------------------------------------------------------
# statistical model
#------------------------------------------------------------------------------
# Suppose we want to measure the effect of a continuous variable on presence / absence data.
# E.g. presence of a species in different locations, x = temperature in location.
# Now we have k trials per observation and y counts the number of successful trials, so y\in{0,1,2,...,k}
# Statistical model:
# y_i ~ binomial(k,p_i)
# logit(p_i) = a+b*x_i
# or, equivalently: p_i = inverse.logit(a+b*x_i)
# p_i is the probability of presence of the species, the distribution of y_i is binomial with k trials.
# We assume a linear relationship of logit(p) with temperature. (Realistically, a hump-shaped relationship would make more sense.)
# inverse.logit transforms the values of the whole axis to the interval (0,1)
# Research question: is there a positive effect of the predictor to the presence of the species?
#------------------------------------------------------------------------------
# generate data
#------------------------------------------------------------------------------
set.seed(123) # initiate random number generator for reproducability
n = 50
x = sort(runif(n, 0, 1))
a = -2
b = 5
p = inv.logit(a+b*x)
y = rbinom(n=n, size=4, prob=p)
plot(x,y)
lines(x,4*p)
#------------------------------------------------------------------------------
# straightforward model
#------------------------------------------------------------------------------
data = list(n=n,
x=x,
y=y)
stan_code = '
data {
int n;
vector[n] x;
int y[n];
}
parameters {
real a;
real b;
}
model {
// priors
a ~ normal(0, 10);
b ~ normal(0, 10);
// likelihood
y ~ binomial(4,inv_logit(a+b*x));
}
'
stan_model = stan_model(model_code=stan_code)
# save(stan_model, file="stan_model.RData")
# load("stan_model.RData")
fit = sampling(stan_model,
data=data,
chains=3,
iter=2000,
warmup=1000
)
print(fit, digits=3, probs=c(0.025, 0.975))
plot(fit)
plot(As.mcmc.list(fit)) # from coda package
posterior=as.matrix(fit)
str(posterior)
#------------------------------------------------------------------------------
# predictions
#------------------------------------------------------------------------------
# First, we generate credible intervals for the deterministic model. (90%, but choose as you like)
# The deterministic model is p = inv.logit(a+b*x), the probability of a successful trial.
# But there are k=4 trials, so the expected value of successful trials is 4*p.
# Later, we generate prediction intervals for the data (for y) using also the stochastic part.
x.pred = seq(from=0, to=1, by=0.01)
y.cred = matrix(0, nrow=nrow(posterior), ncol=length(x.pred))
for(i in 1:nrow(posterior)){
y.cred[i, ] = 4 * inv.logit(posterior[i,"a"] + posterior[i,"b"]*x.pred)
}
plot(x,y)
for(i in 1:100){
lines(x.pred, y.cred[i, ], col=adjustcolor("red", alpha.f=0.3))
}
plot(x,y)
y.cred.mean = apply(y.cred, 2, function(x) mean(x))
lines(x.pred, y.cred.mean, col="red", lwd=2)
y.cred.q05 = apply(y.cred, 2, function(x) quantile(x, probs=0.05))
lines(x.pred, y.cred.q05, col="red", lwd=2, lty=2)
y.cred.q95 = apply(y.cred, 2, function(x) quantile(x, probs=0.95))
lines(x.pred, y.cred.q95, col="red", lwd=2, lty=2)
# true relationship with true parameters a and b
# lines(x.pred, 4*inv.logit(a+b*x.pred), lwd=2, lty=1, col="blue")
# Now, we draw predicted data from the binomial distribution (k=4 trials) in the statistical model.
y.pred = matrix(0, nrow=nrow(posterior), ncol=length(x.pred))
for(i in 1:nrow(posterior)){
y.pred[i, ] = rbinom(n=length(x.pred), size=4, p=inv.logit(posterior[i,"a"] + posterior[i,"b"]*x.pred))
}
y.pred.mean = apply(y.pred, 2, function(x) mean(x))
lines(x.pred, y.pred.mean, col="blue", lwd=2)
y.pred.q05 = apply(y.pred, 2, function(x) quantile(x, probs=0.05))
lines(x.pred, y.pred.q05, col="blue", lwd=2, lty=2)
y.pred.q95 = apply(y.pred, 2, function(x) quantile(x, probs=0.95))
lines(x.pred, y.pred.q95, col="blue", lwd=2, lty=2)
#------------------------------------------------------------------------------
# frequentist solution
#------------------------------------------------------------------------------
y.succ.fail = cbind(y, 4-y) # 1st column successes, 2nd column fails (if there are y successes, there must be k-y fails)
summary(glm(y.succ.fail~x, family=binomial(link="logit")))
|
cc6ab7a98df86956635dac3488b601129bc6a830
|
bad132f51935944a52a00e20e90395990afd378a
|
/tests/testthat/test_GMLGridFunction.R
|
87e44d8136e3bac27b91bca3bea144256bba660e
|
[] |
no_license
|
cran/geometa
|
9612ad75b72956cfd4225b764ed8f048804deff1
|
b87c8291df8ddd6d526aa27d78211e1b8bd0bb9f
|
refs/heads/master
| 2022-11-10T21:10:25.899335
| 2022-10-27T22:45:13
| 2022-10-27T22:45:13
| 92,486,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 662
|
r
|
test_GMLGridFunction.R
|
# test_GMLGridFunction.R
# Author: Emmanuel Blondel <emmanuel.blondel1@gmail.com>
#
# Description: Unit tests for classes inheriting GMLGridFunction.R
#=======================
require(geometa, quietly = TRUE)
require(sf)
require(testthat)
context("GMLGridFunction")
test_that("GMLGridFunction",{
testthat::skip_on_cran()
#encoding
md <- GMLGridFunction$new()
md$setSequenceRule("Linear")
md$setStartPoint(0,0)
xml <- md$encode()
expect_is(xml, "XMLInternalNode")
#decoding
md2 <- GMLGridFunction$new(xml = xml)
xml2 <- md2$encode()
#assert object identity
expect_true(ISOAbstractObject$compare(md, md2))
})
|
1a7e9d10af05ca5151fa79d6f84777b479eb499c
|
2a143733412aa21b6c095ec8510ac7311d0d0db8
|
/R/bare_thresholding.R
|
9f1ee23b743893cb522732fafeabb946dc7c2545
|
[] |
no_license
|
uk-gov-mirror/jncc.bare-peat-mapping-pilot
|
6fb7c60507178f668bd2f51f898b4e9e24bd0f0f
|
0fae6e6d7ce060cdc28a0aa8bee6c01f034ccb7a
|
refs/heads/master
| 2023-01-06T08:42:25.827443
| 2020-10-30T11:43:26
| 2020-10-30T11:43:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,483
|
r
|
bare_thresholding.R
|
## ############################## ##
##
## Script Name: Thresholding bare peat based on indices
##
## Author: JNCC
##
## Date Created: 2019-11-14
##
## Date Modified: 2020-10-30
##
## Licence: MIT Licence
##
##
## Abstract: function for thresholding based on indices. This will generate the specified indices and threshold based on a supplied function using these layers. The function must use the indices in alphabetical order in order to read in correctly.
##
##
##
## R version 3.6.0 (2019-04-26)
## Dependencies:
## dplyr_0.8.1 rgeos_0.4-3 rgdal_1.4-4
## raster_3.0-7 sp_1.3-1
##
## ############################## ##
#
#' thresholding bare peat based on indices
#'
#' @param Img.path path to the folder with indices values or list of file paths
#' @param out.path output folder to write output to
#' @param ind.name name of indices, will assume that the file naming convention is ind.name_... and will replace with BARE_ when writing out
#' @param c.fun function with which to calculate across the indices stack
#' @param nir near infrared band in the multispectral imagery
#' @param r red band in the multispectral imagery
#' @param g green band in the multispectral imagery
#' @param b blue band in the multispectral imagery
#' @param swir short wave infrared band in the multispectral imagery
#' @param start starting image to process, defaults to the first image
#' @param end ending image to process, if NULL will default to the last image in the folder
#'
#' @return
#' @export
#'
#' @examples
barethresh <- function(Img.path,out.path,spec.bands=NA,ind.name = NA, c.fun, nir=4, r=1,g=2,b=3,swir=NA,start=1,end=NA){
#load dependencies
library(raster)
library(rgdal)
require(rgeos)
require(dplyr)
#create bare peat folder
if (!file.exists(paste(out.path,"bare",sep=""))){
dir.create(paste(out.path,"/bare",sep=""))
}
#creates a temporary file to write to
if (!file.exists(paste0(out.path,"bare","/temp"))){
dir.create(paste0(out.path,"bare","/temp"))
}
# list all images
if(class(Img.path)=="character"){
Img.list <- list.files(Img.path)
} else {
Img.list <-Img.path
}
# get the last image number to loop through if none supplied
if(is.na(end)){
end <- length(Img.list)
}
#load up run_Indices function if needed
if(!is.na(ind.name[1])){
source("./Functions/run_Indices.R")
}
#loop through images
for (i in start:end){
# get spec bands if included in thresholding rule
if (!is.na(spec.bands[1])){
if(class(Img.path)=="character"){
spec.1 <- raster::stack(paste0(Img.path,Img.list[i]))
} else{
spec.1 <- raster::stack(Img.list[[i]])
}
#loop through spectral bands required
all.band <- list()
for (j in spec.bands){
spec.band <- list(spec.1[[j]])
names(spec.band) <- paste0("sb", j)
all.band <-append(all.band,spec.band)
}
} else{
all.band <- list()
}
if(!is.na(ind.name[1])){
#generates the indices with the runIndices function in run_Indices.R
if(class(Img.path)=="character"){
runIndices(image.path=paste0(Img.path,Img.list[i]),out.path=paste0(out.path,"bare","/temp/"), nir=4, r=1,g=2,b=3,swir=swir, indices=ind.name,nf=F)
name <- basename(paste0(Img.path,Img.list[i]))
} else{
runIndices(image.path=Img.list[[i]],out.path=paste0(out.path,"bare","/temp/"), nir=4, r=1,g=2,b=3,swir=swir, indices=ind.name,nf=F)
name <- paste0(names(Img.list[i]),".tif")
}
#takes first indices and lists files in folder
Indices.list <- list.files(paste0(out.path,"bare/temp/Indices/"))
indat.list <- list()
for (n in 1:length(ind.name)){
ind.1 <- raster::raster(paste0(out.path,"bare/temp/Indices/",Indices.list[n],"/",Indices.list[n],"_",name))
item <- list(ind.1)
names(item) <- Indices.list[n]
indat.list <- append(indat.list, item)
}
} else {
indat.list <- list()
}
#join the lists and stack
all.list <- append(all.band,indat.list)
all.stack <- raster::stack(all.list)
#Apply function to raster stack
r.class <- raster::overlay(all.stack, fun=c.fun)
#save it
raster::writeRaster(r.class,paste(out.path,"bare/BARE_",name,sep=""), overwrite=T)
print(paste(i, "of", length(Img.list), "done"))
#remove the temporary folder
unlink(paste0(out.path,"bare/temp"), recursive =TRUE)
}
}
|
5dd7044f7079a036353610c2566be4cba557bde1
|
e7e99d3ad56dc57e4c7b787345608a7b4b960065
|
/Orderedlogit_Tobit.R
|
cff0b3d7d3281d7c1e1fcf01b2bcec9229fc2df0
|
[] |
no_license
|
TomoyaOzawa-DA/Mitaron
|
eb802a48444da7746f482d82672066021cf47bcd
|
19e008b8aed2b9fc393f7eb2cd2b06f8e0449629
|
refs/heads/main
| 2023-02-08T22:41:03.985911
| 2020-12-26T13:34:24
| 2020-12-26T13:34:24
| 323,225,632
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,522
|
r
|
Orderedlogit_Tobit.R
|
library(haven)
df <- read_dta("Mesh_data_year_new.dta")
# descriptive statistics
library(stargazer)
df_ds <- df[, -c(1, 2, 4, 5, 10, 11, 12, 13, 14,15, 16, 17, 18, 19, 20, 21, 26, 27, 28, 29, 30, 31, 32, 33, 34, 38, 39, 40, 41, 42, 43)]
df_ds <- as.data.frame(df_ds)
stargazer(df_ds, out="stargazer.descript.tex", title="descriptive statistics", align=F, style="qje")
# Logit Model
out_logit_L <- glm(LAWSON_Entry ~ LAWSON_existed + LAWSON_existed_2 + Familymart_existed +
F_than_L + L_mono + pop + n_employee + super_dummy, data = df, family = binomial("logit"))
out_logit_F <- glm(Familymart_Entry ~ Familymart_existed + Familymart_existed_2 + LAWSON_existed +
L_than_F + F_mono + pop + n_employee + super_dummy, data = df, family = binomial("logit"))
# Probit Model
out_probit_L <- glm(LAWSON_Entry ~ LAWSON_existed + LAWSON_existed_2 + Familymart_existed +
F_than_L + L_mono + pop + n_employee + super_dummy, data = df, family = binomial("probit"))
out_probit_F <- glm(Familymart_Entry ~ Familymart_existed + Familymart_existed_2 + LAWSON_existed +
L_than_F + F_mono + pop + n_employee + super_dummy, data = df, family = binomial("probit"))
# Ordered logit
df$L_Entry_new <- as.ordered(df$L_Entry_new)
df$F_Entry_new <- as.ordered(df$F_Entry_new)
library(MASS)
out_orderd_L <- polr(L_Entry_new ~ LAWSON_existed + LAWSON_existed_2 + Familymart_existed +
F_than_L + L_mono + pop + n_employee + super_dummy, data = df, Hess = TRUE, method = "logistic")
out_orderd_F <- polr(F_Entry_new ~ Familymart_existed + Familymart_existed_2 + LAWSON_existed +
L_than_F + F_mono + pop + n_employee + super_dummy, data = df, Hess = TRUE, method = "logistic")
# marginal effect / Odds ratio
# Tobit Model
library(AER)
df$L_Entry_new <- as.numeric(df$L_Entry_new)
df$F_Entry_new <- as.numeric(df$F_Entry_new)
out_tobit_L <- tobit(L_Entry_new ~ LAWSON_existed + LAWSON_existed_2 + Familymart_existed +
F_than_L + L_mono + pop + n_employee + super_dummy, data = df)
out_tobit_F <- tobit(F_Entry_new ~ Familymart_existed + Familymart_existed_2 + LAWSON_existed +
L_than_F + F_mono + pop + n_employee + super_dummy, data = df)
# marginal effect / Odds ratio
# Stargazer
stargazer(out_orderd_L, out_orderd_F, out_tobit_L, out_tobit_F, title="stargazer による回帰分析の結果")
|
8c66f91e3d5cf3392cd366400d4710086440faaa
|
6d64ceecb5a4c1464daf5b1624612febfbdcd360
|
/plot-base.R
|
de2640fc462c124b8d4d1d53a7234ec516428ccf
|
[] |
no_license
|
bizmarkdev/RCode
|
bbadb38bea8539280a90bf6106c627091f8c7b97
|
54231e6ee155233db0ff532f693111318649f79a
|
refs/heads/master
| 2021-01-10T14:13:36.210852
| 2016-03-16T20:40:00
| 2016-03-16T20:40:00
| 51,869,773
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,300
|
r
|
plot-base.R
|
# Base Plotting System
# initialize the plot
plot(x,y)
hist(x)
# reset global parameters
dev.off()
plot.new()
# Hierarchical clustering - example
# hclust is deterministic
set.seed(1234)
par(mar = c(0, 0, 0, 0))
x <- rnorm(12, mean = rep(1:3, each = 4), sd = 0.2)
y <- rnorm(12, mean = rep(c(1, 2, 1), each = 4), sd = 0.2)
plot(x, y, col = "blue", pch = 19, cex = 2)
text(x + 0.05, y + 0.05, labels = as.character(1:12))
library(datasets)
hist(airquality$Ozone) ## Draw a new plot
libary(datasets)
with(airquality,plot(Wind,Ozone))
library(datasets)
airquality <- transform(airquality, Month = factor(Month))
boxplot(Ozone ~ Month, airquality, xlab = "Month", ylab = "Ozone (ppb)")
# display two boxplots of pm25, one for each region:
boxplot(pm25 ~ region, data = pollution, col = "red")
# Useful Base Graphic Parameters
# pch: plotting symbol
# lty: line type
# lwd: line width
# col: plotting color
# xlab: x-axis label
# ylab: y-axis label
?par
# set global graphics parameters that affect plots in an R session.
# overridden when used as arguments in specific plotting functions
# las: orientation of the axis labels
# bg: background color
# mar: margin size
# oma: outer margin size
# mfrow: number of plots per row, column (plots are filled row-wise)
# mfcol: number of plots per row, column (plots are filled column-wise)
par(mfrow = c(1,2), mar= c(5,4,3,2), lwd=2, lty=2)
par("lty") # line type = [1] "dashed"
par("lwd") # line width
par("mar") # bottom,left,top,right
par("mfrow") # [1] 1 2 means one row, two columns
# Base plotting functions
# plot
# lines: add lines to a plot
# points: add points to a plot
# text: add text labels to a plot
# title: add annotations to x,y axis
# mtext: add arbirary text to the margins
# axis: adding axis ticks/labels
library(datasets)
with(airquality,plot(Wind,Ozone))
title(main="Ozone and Wind in New York City") # Add a title
title(main="Ozone and Wind in New York City.") # Change a title (messes things up)
with(airquality,plot(Wind,Ozone, main="Ozone and Wind in New York City."))
with(subset(airquality,Month==5),points(Wind,Ozone,col="blue"))
with(airquality,plot(Wind,Ozone,main="Ozone and Wind in New York City",type="n"))
with(subset(airquality,Month==5), points(Wind,Ozone,col="blue"))
with(subset(airquality,Month!=5), points(Wind,Ozone,col="red"))
legend("topright",pch=1,col=c("blue","red"),legend = c("May","Other Months"))
with(airquality,plot(Wind,Ozone,main="Ozone and Wind in New York City",pch=20))
model <-lm(Ozone ~ Wind, airquality)
abline(model,lwd=2)
par(mfrow = c(1,2))
with(airquality,{
plot(Wind,Ozone,main="Ozone and Wind")
plot(Solar.R,Ozone,main="Ozone and Solar Radiation")
})
par(mfrow = c(1, 3), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
with(airquality, {
plot(Wind, Ozone, main = "Ozone and Wind")
plot(Solar.R, Ozone, main = "Ozone and Solar Radiation")
plot(Temp, Ozone, main = "Ozone and Temperature")
mtext("Ozone and Weather in New York City",outer = TRUE)
})
# Base Plotting Demonstration
# margins
x <- rnorm(100)
hist(x)
# defaults are: x,Frequency for x,y labels
y<-rnorm(100)
plot(x,y)
par("mar") # margins are = [1] 5.1 4.1 4.1 2.1 (bottom,left,top,right)
par(mar=c(4,4,2,2))
plot(x,y)
# points
example(points)
plot(x,y,pch=20)
plot(x,y,pch=19)
plot(x,y,pch=2)
plot(x,y,pch=3)
plot(x,y,pch=4)
x <- rnorm(100)
y<-rnorm(100)
plot(x,y)
title("Scatterplot")
text(-2,-2,"Label")
legend("topleft",legend="Data")
legend("topleft",legend="Data",pch=20)
fit <- lm(y ~x)
abline(fit, lwd=3, col = "blue")
plot(x,y,xlab="Weight", ylab = "Height", main="Scatterplot",pch=20)
legend("topright",legend="Data",pch=20)
fit <- lm(y~x)
abline(fit,lwd=3,col="red")
z <- rpois(100,2)
par(mfrow=c(2,1)) # two rows, one column
plot(x,y,pch=20)
plot(x,z,pch=19)
par("mar")
par(mar = c(2,2,1,1))
plot(x,y,pch=20)
plot(x,z,pch=20)
# Using subsets to display different colors
# quantitative variables
x <- rnorm(100)
y <- x + rnorm(100)
# grouping (factor) variables
?gl # see data-types.R for info on factor variables
g <- gl(2,50,labels = c("Male","Female"))
# subsetting based on grouping variables
x #100 quantitative values (no gender)
str(g)
g #100 values, 2 levels, 1-50 are Male, 51-100 are Female
x[g=="Female"] #returns 51-100 values for x
# Display subsets with different colors
par(mfrow = c(1,1))
plot(x,y,type = "n")
?points
points(x[g=="Male"], y[g=="Male"], col="green")
points(x[g=="Female"], y[g=="Female"], col="blue", pch=19)
# Lesson 4: What is a Graphics Device?
dev.cur() # current plotting device
?Devices
library(datasets)
pdf(file="myplot.pdf") # Open PDF device
with(faithful,plot(eruptions,waiting))
title(main="Old Faithful Geyser data")
dev.off() # Close PDF device
# vector devices (pdf, svg, win.metafile, postscript)
# bitmap devices (png, jpeg, tiff, bmp)
# may open multiple graphical devices, but plotting can occur on one graphics at a time
# dev.set(<integer>) to set the current device.
# Copy a plot (dev.copy, dev.copy2pdf)
library(datasets)
with(faithful,plot(eruptions,waiting))
title(main="Old Faithful Geyser data")
dev.copy(png, file="geyserplot.png")
dev.off()
|
7478b7112b8805884363c9c5066cb891e29f37e6
|
9458df97270a3f5984062509b1bfa761a3ed0bdd
|
/R/palette_plot.R
|
decee299073f6e3bf01214a250a04f7ee09d04aa
|
[
"MIT"
] |
permissive
|
Nowosad/colorblindcheck
|
a3845e619e6da8e362b004ad0ef0a7c77420b018
|
07fc7e7a1d67da26f40b5be077d32c8e17eae19e
|
refs/heads/master
| 2023-05-27T05:51:38.841711
| 2023-05-17T14:05:18
| 2023-05-17T14:05:18
| 182,271,287
| 26
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,091
|
r
|
palette_plot.R
|
#' Plot Palette And Its Color Vision Deficiencies
#'
#' Plot of the original input palette and simulations of color vision deficiencies - deuteranopia, protanopia, and tritanopia.
#'
#' @param x A vector of hexadecimal color descriptions
#' @param severity Severity of the color vision defect, a number between 0 and 1
#'
#' @importFrom graphics par plot rect text
#'
#' @seealso palette_bivariate_plot
#'
#' @return A plot with the original input palette and simulations of color vision deficiencies - deuteranopia, protanopia, and tritanopia
#' @export
#'
#' @examples
#' rainbow_pal = rainbow(n = 7)
#' rainbow_pal
#' palette_plot(rainbow_pal)
#'
#' palette_plot(x = rcartocolor::carto_pal(7, "Sunset"))
#' palette_plot(x = rcartocolor::carto_pal(11, "Safe"))
#' palette_plot(x = rcartocolor::carto_pal(7, "Earth"))
#' palette_plot(x = rcartocolor::carto_pal(11, "Vivid"))
palette_plot = function(x, severity = 1){
deu = colorspace::deutan(x, severity = severity)
pro = colorspace::protan(x, severity = severity)
tri = colorspace::tritan(x, severity = severity)
y = list(tri, pro, deu, x)
my_n = length(x)
n_colors = 4
ylim = c(0, n_colors)
oldpar = par(mgp = c(2, 0.25, 0))
on.exit(par(oldpar))
plot(
1,
1,
xlim = c(0, max(my_n)),
ylim = ylim,
type = "n",
axes = FALSE,
bty = "n",
xlab = "",
ylab = ""
)
for (i in seq_len(n_colors)) {
rect(
xleft = 0:(my_n - 1),
ybottom = i - 1,
xright = 1:my_n,
ytop = i - 0.2,
col = y[[i]],
border = "light grey"
)
}
text(
rep(0, n_colors),
(1:n_colors) - 0.6,
labels = c("Tritanopia", "Protanopia", "Deuteranopia", "Normal"),
xpd = TRUE,
adj = 1
)
}
#' Plot Bivariate Palette And Its Color Vision Deficiencies
#'
#' Plot of the original input bivariate palette and simulations of color vision deficiencies - deuteranopia, protanopia, and tritanopia.
#'
#' @param x A vector of hexadecimal color descriptions
#' @param severity Severity of the color vision defect, a number between 0 and 1
#'
#' @seealso palette_plot
#'
#' @return A plot with the original input palette and simulations of color vision deficiencies - deuteranopia, protanopia, and tritanopia
#'
#' @export
#'
#' @examples
#' palette_bivariate_plot(x = rcartocolor::carto_pal(4, "Sunset"))
palette_bivariate_plot = function(x, severity = 1){
deu = colorspace::deutan(x, severity = severity)
pro = colorspace::protan(x, severity = severity)
tri = colorspace::tritan(x, severity = severity)
y = rev(list(tri, pro, deu, x))
labels = rev(c("Tritanopia", "Protanopia", "Deuteranopia", "Normal"))
my_n = length(x)
n_colors = 4
ylim = c(0, n_colors)
ncol = length(x)
# if(missing(nx)) {
nx = sqrt(ncol)
# }
# if(missing(ny)) {
ny = nx
# }
oldpar = par(mfrow = c(2, 2), mar = rep(1, 4))
on.exit(par(oldpar))
for (i in seq_len(n_colors)) {
graphics::image(
matrix(1:ncol, nrow = ny),
axes = FALSE,
col = y[[i]],
main = labels[[i]],
asp = 1
)
}
}
|
1dbe3d690a4136d9120b13f90735e931621fceea
|
53bb0e4d2bbbda8fd491d811a70b7f0b6e676fe0
|
/server.R
|
1422e315054f0bcdcd81b70396c9d0865947f246
|
[] |
no_license
|
jpbarddal/DataAnalysisR
|
4c1bb84e84b23aab21d0235be17af238001847b8
|
98cabe7377f7cc109e3f95ff343285658a90cf4f
|
refs/heads/master
| 2020-03-13T06:36:49.970787
| 2017-03-24T08:21:42
| 2017-03-24T08:21:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,345
|
r
|
server.R
|
library(shiny)
library(shinyjs)
source("core/visualisation.R")
source("core/verification/read_data.R")
source("core/verification/compare_distributions.R")
source("core/drift_timeline/timeline.R")
source("core/visualise_results/result_table.R")
source("core/visualise_results/heatmaps.R")
source("core/visualise_results/detailed_likelihood.R")
source("core/visualise_results/detailed_posterior.R")
# Increase file size limit
options(shiny.maxRequestSize=100*1024^2)
# Variables persistent throughout application lifetime -------------------------
measurement = "StepSize"
folder_name <- "./data"
data.out.foler <- "./data_out"
data.all <- data.frame()
data.paths <- ""
# Actual Functions -------------------------------------------------------------
selectWavelengths <- function(spectra, wavelength.range = c(0, Inf)) {
# Get wavelength in given range
wavelengths <- as.numeric(names(spectra))
col.min <- max(wavelength.range[1] - min(wavelengths) + 1, 1)
col.max <- min(wavelength.range[2] - min(wavelengths) + 1, length(wavelengths))
return(spectra[, col.min:col.max])
}
selectTests <- function(spectra, test.ids) {
# Get tests given
return(spectra[test.ids, ])
}
shinyServer(function(input, output, session) {
data.table <- reactiveValues(data = NULL, name = NULL, paths = NULL)
observe({
output$chunk.attribute.num.vals <- renderText("0")
shinyjs::toggle("timeline.type", condition = !is.null(data.table$data))
shinyjs::toggle("timeline.panel", condition = !is.null(data.table$data))
shinyjs::toggle("analyse.plot", condition = !is.null(data.table$data))
shinyjs::toggle("chunk.attribute.num.vals", condition = grepl("_chunk", input$timeline.type))
shinyjs::toggle("analyse.drift.type", condition = input$analyse.type == "analysis")
# shinyjs::toggle("size.confirm", condition = grepl("_chunk", input$timeline.type))
# shinyjs::toggle("size.reset", condition = grepl("_chunk", input$timeline.type))
# shinyjs::toggle("size.recorded", condition = grepl("_chunk", input$timeline.type))
})
# ------- Data file chooser
# Temporary load data function
observeEvent(input$file.arff, {
file.input <- input$file.arff
data.table$data <- ReadArffFiles(file.input$datapath)
data.table$paths <- paste0(file.input$datapath, collapse = " ")
data.table$name <- gsub(".arff", "", paste0(file.input$name, collapse = "_", sep = ""))
output$result.files <- renderDataTable({data.table$data})
})
# ------ Timeline Generator
output$attribute.subset.length <- renderUI({
min <- 1
# one less for class
max <- ncol(data.table$data) - 1
if (is.null(data.table$data)) {
return()
}
else if (grepl("_chunk", input$timeline.type)) {
# 1 less for group attribute
max <- max - 1
}
sliderInput("slider.attribute.subset.length",
"Attribute Subset Length",
min = min,
max = max,
value = min,
step = 1)
})
output$chunk.attribute <- renderUI({
if (is.null(data.table$data) || !grepl("_chunk", input$timeline.type)) {
return()
}
selectInput("select.chunk.attribute",
"Time Attribute:",
names(data.table$data)[1:ncol(data.table$data) - 1])
})
output$chunk.input <- renderUI({
if (is.null(data.table$data) || !grepl("_chunk", input$timeline.type)) {
return()
}
numericInput("num.chunk.input", label = " Window Duration", min = 1, value = 1)
})
output$window.input <- renderUI({
if (is.null(data.table$data) || input$timeline.type != "stream") {
return()
}
numericInput("num.window.input", label = "Window Size to Add", min = 1, max = ncol(data.table$data), value = 1)
#sliderInput("num.window.input", label = "Window Size to Add", min = 1, max = nrow(data.table$data), value = 1, step = 1)
})
subset.lengths <- reactiveValues(window.lengths = c(), chunk.lengths = c())
sizes <- reactiveValues(chunk.sizes = c(), window.sizes = c())
observeEvent(input$timeline.type, {
if (input$timeline.type == "stream") {
output$size.recorded <- renderText(paste("Window Sizes:", paste(sizes$window.sizes, collapse = ",")))
output$subset.length.recorded <- renderText(paste("Subset Lengths:", paste(subset.lengths$window.lengths, collapse = ",")))
}
else {
output$size.recorded <- renderText(paste("Window Durations:", paste(sizes$chunk.sizes, collapse = ",")))
output$subset.length.recorded <- renderText(paste("Subset Lengths:", paste(subset.lengths$chunk.lengths, collapse = ",")))
}
})
observeEvent(input$length.confirm, {
if (input$timeline.type == "stream") {
subset.lengths$window.lengths <- unique(c(subset.lengths$window.lengths, input$slider.attribute.subset.length))
output$subset.length.recorded <- renderText(paste("Subset Lengths:", paste(subset.lengths$window.lengths, collapse = ",")))
}
else {
subset.lengths$chunk.lengths <- unique(c(subset.lengths$chunk.lengths, input$slider.attribute.subset.length))
output$subset.length.recorded <- renderText(paste("Subset Lengths:", paste(subset.lengths$chunk.lengths, collapse = ",")))
}
})
observeEvent(input$length.reset, {
if (input$timeline.type == "stream") {
subset.lengths$window.lengths <- c()
output$subset.length.recorded <- renderText(paste("Subset Lengths:", paste(subset.lengths$window.lengths, collapse = ",")))
}
else {
subset.lengths$chunk.lengths <- c()
output$subset.length.recorded <- renderText(paste("Subset Lengths:", paste(subset.lengths$chunk.lengths, collapse = ",")))
}
})
observeEvent(input$size.confirm, {
if (input$timeline.type == "stream") {
sizes$window.sizes <- unique(c(sizes$window.sizes, input$num.window.input))
output$size.recorded <- renderText(paste("Window Sizes:", paste(sizes$window.sizes, collapse = ",")))
}
else {
sizes$chunk.sizes <- unique(c(sizes$chunk.sizes, input$num.chunk.input))
output$size.recorded <- renderText(paste("Window Durations:", paste(sizes$chunk.sizes, collapse = ",")))
}
})
observeEvent(input$size.reset, {
if (input$timeline.type == "stream") {
sizes$window.sizes <- c()
output$size.recorded <- renderText(paste("Window Sizes:", paste(sizes$window.sizes, collapse = ",")))
}
else {
sizes$chunk.sizes <- c()
output$size.recorded <- renderText(paste("Window Durations:", paste(sizes$chunk.sizes, collapse = ",")))
}
})
observeEvent(input$run.timeline, {
sizes.current <- if (input$timeline.type == "stream") paste0(sizes$window.sizes, collapse = ',') else paste((match(input$select.chunk.attribute, names(data.table$data)) - 1), paste0(sizes$chunk.sizes, collapse = ','), sep = ",")
subset.lengths <- if (input$timeline.type == "stream") paste0(subset.lengths$window.lengths, collapse = ",") else paste0(subset.lengths$chunk.lengths, collapse = ",")
withProgress(message = "Running Analysis...", {
cmd <- paste("java", "-jar", "./MarTVarD.jar", input$timeline.type,
paste(subset.lengths, collapse = ","), sizes.current,
paste(data.out.foler, data.table$name, sep = "/"), data.table$paths)
print(cmd)
ret <- system2("java", c("-jar", "./MarTVarD.jar", input$timeline.type,
paste(subset.lengths, collapse = ","), sizes.current,
paste(data.out.foler, data.table$name, sep = "/"), data.table$paths),
stdout = "stdout.txt", stderr = "stderr.txt")
print("done")
})
updateSelectInput(session, "select.timeline.plot.data", choices = list.files(path = data.out.foler))
})
# --------- Timeline plotter
output$timeline.plot.data <- renderUI({
selectInput("select.timeline.plot.data", "Data Timeline Result", list.files(path = data.out.foler))
})
output$timeline.plot.type <- renderUI({
if (input$select.timeline.plot.data == "") {
return()
}
types <- list.files(path = paste(data.out.foler, input$select.timeline.plot.data, sep = "/"))
types <- types[! types %in% "analyse"]
selectInput("select.timeline.plot.type", "Timeline Type", types)
})
output$timeline.plot.drift.type <- renderUI({
if (input$select.timeline.plot.type == "") {
return()
}
selectInput("select.timeline.plot.drift.type", "Drift Measurement Type",
unique(sapply(
list.files(paste(data.out.foler,
input$select.timeline.plot.data,
input$select.timeline.plot.type, sep = "/"
)),
function(x) strsplit(x, split = "_")[[1]][1])))
})
output$timeline.plot.sizes <- renderUI({
if (input$select.timeline.plot.type == "") {
return()
}
selectInput("select.timeline.plot.sizes", "Window Size/Duration",
unique(sapply(
list.files(paste(data.out.foler,
input$select.timeline.plot.data,
input$select.timeline.plot.type, sep = "/"
)),
function(x) strsplit(x, split = "_")[[1]][2])))
})
output$timeline.plot.subset.length <- renderUI({
if (input$select.timeline.plot.type == "") {
return()
}
selectInput("select.timeline.plot.subset.length", "Attribute Subset Length",
unique(sapply(
list.files(paste(data.out.foler,
input$select.timeline.plot.data,
input$select.timeline.plot.type, sep = "/"
)),
function(x) strsplit(x, split = "[_.]")[[1]][3])),
multiple = TRUE)
})
observeEvent(input$timeline.plot.reload, {
})
observeEvent(input$timeline.plot.run, {
output$timeline.plot.plot <- renderPlotly(PlotWindowSize(input$select.timeline.plot.drift.type,
input$select.timeline.plot.sizes,
input$select.timeline.plot.subset.length,
directory = paste(data.out.foler,
input$select.timeline.plot.data,
input$select.timeline.plot.type,
sep = "/")))
})
# --------- Compare / Analyse
output$start.index.1 <- renderUI({
if (is.null(data.table)) {
return()
}
numericInput("numeric.start.index.1", "Winodw 1 Start Index", value = 1, min = 1, max = nrow(data.table$data), step = 1)
})
output$end.index.1 <- renderUI({
if (is.null(data.table)) {
return()
}
numericInput("numeric.end.index.1", "Window 1 End Index", value = nrow(data.table$data) / 2, min = input$numeric.start.index.1, max = nrow(data.table$data), step = 1)
})
output$start.index.2 <- renderUI({
if (is.null(data.table)) {
return()
}
numericInput("numeric.start.index.2", "Winodw 2 Start Index", value = (nrow(data.table$data) / 2) + 1, min = 1, max = nrow(data.table$data), step = 1)
})
output$end.index.2 <- renderUI({
if (is.null(data.table)) {
return()
}
numericInput("numeric.end.index.2", "Window 2 End Index", value = nrow(data.table$data), min = input$numeric.start.index.2, max = nrow(data.table$data), step = 1)
})
observeEvent(input$analyse.plot.run, {
if (is.null(data.table)) {
return()
}
else if (input$analyse.type == "compare") {
shinyjs::show("analysis.plot")
shinyjs::hide("analysis.detailed.plot")
shinyjs::hide("analysis.2att.plot")
output$analysis.plot <- renderPlot(Histogram(data.table$data[seq(input$numeric.start.index.1, (input$numeric.end.index.1)), ],
data.table$data[seq(input$numeric.start.index.2, input$numeric.end.index.2), ]))
}
else {
shinyjs::hide("analysis.plot")
window.folder <- paste(input$numeric.start.index.1, input$numeric.end.index.1, input$numeric.start.index.2, input$numeric.end.index.2, sep = "_")
analyse.folder <- paste(data.out.foler, data.table$name, "analyse", window.folder, sep = "/")
if (length(list.files(analyse.folder)) == 0) {
withProgress(message = "Running Analysis...", {
ret <- system2("java", c("-jar", "./MarTVarD.jar", "analyse",
"1,2", paste(input$numeric.start.index.1, input$numeric.end.index.1, input$numeric.start.index.2, input$numeric.end.index.2, sep = ","),
paste(data.out.foler, data.table$name, sep = "/"), data.table$paths))
})
}
drift.type <- input$analyse.drift.type
if (drift.type == "COVARIATE") {
shinyjs::hide("analysis.detailed.plot")
shinyjs::show("analysis.2att.plot")
results.cov.1 <- ResultTable(paste(analyse.folder, "1-attributes_covariate.csv", sep = "/"))
results.cov.2 <- ResultTable(paste(analyse.folder, "2-attributes_covariate.csv", sep = "/"))
output$analysis.2att.plot <- renderPlotly(VisualPairAttributes(results.cov.1, results.cov.2, drift.type = "Covariate"))
}
else if (drift.type == "JOINT") {
shinyjs::hide("analysis.detailed.plot")
shinyjs::show("analysis.2att.plot")
results.joint.1 <- ResultTable(paste(analyse.folder, "1-attributes_joint.csv", sep = "/"))
results.joint.2 <- ResultTable(paste(analyse.folder, "2-attributes_joint.csv", sep = "/"))
output$analysis.2att.plot <- renderPlotly(VisualPairAttributes(results.joint.1, results.joint.2, drift.type = "Joint"))
}
else if (drift.type == "POSTERIOR") {
shinyjs::show("analysis.detailed.plot")
shinyjs::show("analysis.2att.plot")
results.pos.d.1 <- ResultTable(paste(analyse.folder, "1-attributes_posterior_detailed.csv", sep = "/"))
results.pos.1 <- ResultTable(paste(analyse.folder, "1-attributes_posterior.csv", sep = "/"))
results.pos.2 <- ResultTable(paste(analyse.folder, "2-attributes_posterior.csv", sep = "/"))
tmp_plot <- plotly_build(VisualPairAttributes(results.pos.1, results.pos.2, drift.type = "Posterior"))
tmp_plot$layout$margin$b <- 150
output$analysis.2att.plot <- renderPlotly(tmp_plot)
output$analysis.detailed.plot <- renderPlotly(VisualSingleAttributeStructure(results.pos.d.1, "Posterior"))
}
else if (drift.type == "LIKELIHOOD") {
shinyjs::show("analysis.detailed.plot")
shinyjs::show("analysis.2att.plot")
results.like.d.1 <- ResultTable(paste(analyse.folder, "1-attributes_likelihood_detailed.csv", sep = "/"))
results.like.1 <- ResultTable(paste(analyse.folder, "1-attributes_likelihood.csv", sep = "/"))
results.like.2 <- ResultTable(paste(analyse.folder, "2-attributes_likelihood.csv", sep = "/"))
output$analysis.2att.plot <- renderPlotly(VisualPairAttributes(results.like.1, results.like.2, drift.type = "Likelihood"))
output$analysis.detailed.plot <- renderPlotly(VisualSingleLikelihoodAttribute(results.like.d.1))
}
}
})
})
|
e1ea871705968506a19623645af32a44eeec4fa6
|
2089ab20f1a6f77e22c1ebab973346a36b4f780f
|
/man/unique.Rd
|
6a8d4ec9df4212c2e2ab5cbdadae83cfcef1344e
|
[] |
no_license
|
gitlongor/uniqueAtomMat
|
ad092850638fd69b0f53157a12f37acf94dde949
|
61c529dfff609dfc0edc5a9b9ad327d6dbcd4620
|
refs/heads/master
| 2021-01-10T10:47:24.967298
| 2017-07-09T00:32:47
| 2017-07-09T00:32:47
| 36,911,254
| 0
| 0
| null | 2015-07-04T20:32:17
| 2015-06-05T04:10:45
|
C++
|
UTF-8
|
R
| false
| false
| 7,879
|
rd
|
unique.Rd
|
\name{unique.matrix}
\alias{unique.matrix}
\alias{duplicated.matrix}
\alias{anyDuplicated.matrix}
\title{Finding Unique or Duplicated Rows or Columns for Atomic Matrices}
\description{
These S3 methods are alternative (typically much faster) implementations of counterparts in the \code{base} package for atomic matrices.
\code{unique.matrix} returns a matrix with duplicated rows (or columns) removed.
\code{duplicated.matrix} returns a logical vector indicating which rows (or columns) are duplicated.
\code{anyDuplicated.matrix} returns an integer indicating the index of the first duplicate row (or column) if any, and \code{0L} otherwise.
}
\usage{
\method{unique}{matrix}(x, incomparables = FALSE, MARGIN = 1,
fromLast = FALSE, signif=Inf, \dots)
\method{duplicated}{matrix}(x, incomparables = FALSE, MARGIN = 1,
fromLast = FALSE, signif=Inf,\dots)
\method{anyDuplicated}{matrix}(x, incomparables = FALSE,
MARGIN = 1, fromLast = FALSE, signif=Inf,\dots)
}
\arguments{
\item{x}{an atomic matrix of mode \code{"numeric"}, \code{"integer"}, \code{"logical"}, \code{"complex"}, \code{"character"} or \code{"raw"}. When \code{x} is not atomic or when it is not a matrix, the \code{\link[base:unique]{base::unique.matrix}} in the \code{base} package will be called. }
\item{incomparables}{a vector of values that cannot be compared, as in \code{\link[base:unique]{base::unique.matrix}}. Only when \code{incomparables=FALSE} will the code in \code{uniqueAtomMat} package be used; otherwise, the \code{base} version will be called. }
\item{fromLast}{a logical scalar indicating if duplication should be considered
from the last, as in \code{\link[base:unique]{base::unique.matrix}}. }
\item{\dots}{arguments for particular methods.}
\item{MARGIN}{a numeric scalar, the matrix margin to be held fixed, as in \code{\link{apply}}. For \code{unique.matrix}, only \code{MARGIN=1} and \code{MARGIN=2} are allowed; for \code{duplicated.matrix} and \code{anyDuplicated.matrix}, \code{MARGIN=0} is also allowed. For all other cases, the implementation in the \code{base} package will be called.}
\item{signif}{a numerical scalar only applicable to numeric or complex \code{x}. If \code{signif=NULL}, then \code{x} will first be passed to \code{\link{signif}} function with the number of significant digits being the \code{C} constant \code{DBL_DIG}, as explained in \code{\link{as.character}}. If \code{signif=Inf} (which is the default value), then \code{x} is untouched before finding duplicates. If \code{signif} is any other number, it specifies the required number of significant digits for \code{\link{signif}} function. }
}
\details{
These S3 methods are alternative implementations of counterparts in the \code{base} package for atomic matrices (i.e., double, integer, logical, character, complex and raw) directly based on C++98 Standard Template Library (STL) \code{std::set}, or C++11 STL \code{std::unordered_set}. The implementation treats the whole row (or column) \emph{vector} as the key, without the intermediate steps of converting the mode to \code{character} nor collapsing them into a \emph{scalar} as done in \code{base}. On systems with empty \code{`R CMD config CXX11`}, the C++98 STL \code{std::set} is used, which is typically implemented as a self-balancing tree (usually a red-black tree) that takes \eqn{O[n\log{(n)}]}{O[n log(n)]} to find all duplicates, where \code{n=dim(x)[MARGIN]}. On systems with non-empty \code{`R CMD config CXX11`}, the C++11 STL \code{std::unordered_set} is used, with average \eqn{O(n)}{O(n)} performance and worst case \eqn{O(n^2)}{O(n^2)} performance.
Missing values are regarded as equal, but \code{NaN} is not equal to
\code{NA_real_}.
Further, in contrast to the \code{base} counterparts, characters are compared directly based on their internal representations; i.e., no encoding issues for characters. Complex values are compared by their real and imaginary parts separately.
}
\value{
\code{unique.matrix} returns a matrix with duplicated rows (if \code{MARGIN=1}) or columns (if \code{MARGIN=2}) removed.
\code{duplicated.matrix} returns a logical vector indicating which rows (if \code{MARGIN=1}) or columns (if \code{MARGIN=2}) are duplicated.
\code{anyDuplicated.matrix} returns an integer indicating the index of the first (if \code{fromLast=FALSE}) or last (if \code{fromLast=TRUE}) duplicate row (if \code{MARGIN=1}) or column (if \code{MARGIN=2}) if any, and \code{0L} otherwise.
}
\section{Warning}{
In contrast to the \code{base} counterparts,
characters are compared directly based on their internal representations without considering encoding issues; for numeric and complex matrices, the default \code{signif} is \code{Inf}, i.e. comparing floating point values directly without rounding; and \link{long vectors} are not supported yet.
}
\seealso{
\code{\link[base:duplicated]{base::duplicated}}, \code{\link[base:unique]{base::unique}}, \code{\link{signif}}, \code{\link{grpDuplicated}}
}
\examples{
## prepare test data:
set.seed(9992722L, kind="Mersenne-Twister")
x.double=model.matrix(~gl(5,8))[sample(40), ]
## typical uses
unique(x.double)
unique(x.double, fromLast=TRUE)
unique(t(x.double), MARGIN=2)
unique(t(x.double), MARGIN=2, fromLast=TRUE)
anyDuplicated(x.double)
anyDuplicated(x.double, fromLast = TRUE)
## additional atomic test data
x.integer=as.integer(x.double); attributes(x.integer)=attributes(x.double)
x.factor=as.factor(x.integer); dim(x.factor)=dim(x.integer); dimnames(x.factor)=dimnames(x.integer)
x.logical=as.logical(x.double); attributes(x.logical)=attributes(x.double)
x.character=as.character(x.double); attributes(x.character)=attributes(x.double)
x.complex=as.complex(x.double); attributes(x.complex)=attributes(x.double)
x.raw=as.raw(x.double); attributes(x.raw)=attributes(x.double)
## compare results with base:
stopifnot(identical(base::duplicated.matrix(x.double),
uniqueAtomMat::duplicated.matrix(x.double)
))
stopifnot(identical(base::duplicated.matrix(x.integer, fromLast=TRUE),
uniqueAtomMat::duplicated.matrix(x.integer, fromLast=TRUE)
))
stopifnot(identical(base::duplicated.matrix(t(x.logical), MARGIN=2L),
uniqueAtomMat::duplicated.matrix(t(x.logical), MARGIN=2L)
))
stopifnot(identical(base::duplicated.matrix(t(x.character), MARGIN=2L, fromLast=TRUE),
uniqueAtomMat::duplicated.matrix(t(x.character), MARGIN=2L, fromLast=TRUE)
))
stopifnot(identical(base::unique.matrix(x.complex),
uniqueAtomMat::unique.matrix(x.complex)
))
stopifnot(identical(base::unique.matrix(x.raw),
uniqueAtomMat::unique.matrix(x.raw)
))
stopifnot(identical(base::unique.matrix(x.factor),
uniqueAtomMat::unique.matrix(x.factor)
))
stopifnot(identical(base::duplicated.matrix(x.double, MARGIN=0),
uniqueAtomMat::duplicated.matrix(x.double, MARGIN=0)
))
stopifnot(identical(base::anyDuplicated.matrix(x.integer, MARGIN=0),
uniqueAtomMat::anyDuplicated.matrix(x.integer, MARGIN=0)
))
## benchmarking
if (require(microbenchmark)){
print(microbenchmark(base::duplicated.matrix(x.double)))
print(microbenchmark(uniqueAtomMat::duplicated.matrix(x.double)))
print(microbenchmark(base::duplicated.matrix(x.character)))
print(microbenchmark(uniqueAtomMat::duplicated.matrix(x.character)))
}else{
print(system.time(replicate(5e3L, base::duplicated.matrix(x.double))))
print(system.time(replicate(5e3L, uniqueAtomMat::duplicated.matrix(x.double))))
print(system.time(replicate(5e3L, base::duplicated.matrix(x.character))))
print(system.time(replicate(5e3L, uniqueAtomMat::duplicated.matrix(x.character))))
}
}
\keyword{manip}
\keyword{logic}
|
5c9933d9296bf378d86dc5bd7126bd9890651df9
|
bc53027ff6efe71afdc5ac25bc7a1d88eaf61263
|
/tests/testthat/test_with_buffs.R
|
35b7caad9dd67b4b67a9da169851bafe298db344
|
[
"MIT"
] |
permissive
|
cphaarmeyer/warlockr
|
4e2a9b9cfbf2c1a1bcfe6661760b645b3a017af5
|
1966421a83ffa93ae70bb8f0a717e964f3497dd0
|
refs/heads/master
| 2021-07-09T12:51:37.863458
| 2021-03-31T21:38:21
| 2021-03-31T21:38:21
| 236,865,129
| 0
| 0
|
NOASSERTION
| 2020-09-15T16:03:02
| 2020-01-28T23:28:20
|
R
|
UTF-8
|
R
| false
| false
| 1,015
|
r
|
test_with_buffs.R
|
context("with_buffs")
test_that("with_buffs works as expected", {
stats <- list(int = 275, sp = 581, crit = 5, hit = 9)
out_id <- with_buffs(stats, buffs = character())
out_buffs <- with_buffs(stats)
out_consumables <- with_buffs(stats, consumables = c("gae", "eosp"))
out_wordbuffs <- with_buffs(stats, worldbuffs = c("ony", "zg"))
out_mult <- with_buffs(stats,
buffs = c("ai", "motw"), worldbuffs = c("ony", "zg")
)
expect_identical(out_id, clean_stats(stats))
is_greater_somewhere <- function(out) {
do.call(any, do_call_stats(list(out, stats), `>`))
}
expect_true(is_greater_somewhere(out_buffs))
expect_true(is_greater_somewhere(out_consumables))
expect_true(is_greater_somewhere(out_wordbuffs))
expect_true(is_greater_somewhere(out_mult))
expect_error(with_buffs(stats, worldbuffs = c("dm", "wb")))
expect_gt(out_wordbuffs$int, stats$int)
expect_error(with_buffs(stats, consumables = c("gae", "eogf")))
expect_error(with_buffs(stats, buffs = c("ai", "gae")))
})
|
32fde05985b1b1e692c3a2da2e51d3dec84a6e0f
|
d419516e83a780a44dbdcb3156dc5d192e8a5ca2
|
/man/copulahmmdata.Rd
|
992bbd98090214e8a0aadb67a9e2208f28a7d540
|
[] |
no_license
|
cran/hmmhdd
|
c975effc6749ce53a59263d0eb70b4d68b10e0c0
|
b78d722d6c2de97793c3c30a4442e849b21f406c
|
refs/heads/master
| 2020-07-19T02:39:08.657481
| 2019-09-04T13:20:02
| 2019-09-04T13:20:02
| 206,360,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 522
|
rd
|
copulahmmdata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataset.R
\docType{data}
\name{copulahmmdata}
\alias{copulahmmdata}
\title{Simulated copula dataset with underlying Markov Model}
\format{An object of class \code{matrix}.}
\usage{
data(copulahmmdata)
}
\description{
Simulated bivariate dataset with a gaussian component
and an exponential one, with a certain dependency structure
and an underlying 2-state Markov Model.
}
\examples{
data(copulahmmdata)
}
\keyword{datasets}
|
5e63b05bc9c769161415be5f66dd5001fad2345b
|
8b7b10dbc39d9b7adae8c8ee7b25c634e12af91b
|
/PMGT 634 class examples/Transformations.R
|
38b8b85d4b1be61792f7a3f7879a5c2b9af8213a
|
[
"MIT"
] |
permissive
|
tpopenfoose/R-magic
|
001f257ee9d15a0b0ffac7bbab419b0203d820da
|
4a35172c013f2c2c1b128fb9e6c3f0f778ef333a
|
refs/heads/master
| 2020-12-28T12:04:21.959404
| 2012-05-16T02:29:27
| 2012-05-16T02:29:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,079
|
r
|
Transformations.R
|
# Stine and Foster Chapter 20 Problem 37
# Author: Ray Nelson
###############################################################################
# Data and R libraries
load("~/Documents/BYU 2011-2012/Winter 2012/PMGT 634/R magic/PMGT 634 datasets/stineFoster20.RData")
# load the Chapter 20 data frames from the class web site
library(car)
library(forecast)
# scale subscribers by converting to one million subscribers
cellularUS <- transform(cellularUS, subscribers = subscribers / 1000000, period.reciprocal = 1 / period)
# b) Trend in the data
plot(subscribers ~ date, data = cellularUS, type = "l",
xlab = "", ylab = "Million Subscribers")
# c) linear model
cellularUS.lm <- lm(subscribers ~ date, data = cellularUS)
scatterplot(residuals(cellularUS.lm) ~ cellularUS$date, id.n = 5)
residualPlot(cellularUS.lm)
rm(cellularUS.lm)
# d) Logarithmic transformation
log.lm <- lm(log(subscribers) ~ date, data = cellularUS)
summary(log.lm)
plot(log(subscribers) ~ date, data = cellularUS, type = "l")
abline(log.lm, col = "red")
residualPlot(log.lm)
# Forecast with logarithmic transformation model
futureDate <- seq(2007, 2009, .5) # explanatory values to generate forecasts
predictions.log <- predict(log.lm, data.frame(date = futureDate),
interval = "prediction")
predictions.log <- exp(predictions.log[,1]) # reverse the logarithm with exp
# values for comparison forecast plot
(time <- c(cellularUS$date, futureDate))
subscribers.log <- c(cellularUS$subscribers, predictions.log)
plot(subscribers.log ~ time, type = "l")
rm(log.lm, predictions.log)
# e) and f) percentage change in subscribers
plot(pctGrowth ~ period, data = cellularUS, type = "l")
recip.lm <- lm(pctGrowth ~ period.reciprocal, data = cellularUS)
summary(recip.lm)
plot(pctGrowth ~ period.reciprocal, data = cellularUS, type = "l",
xlab = "Reciprocal of Time Since 1984", ylab = "Percentage Change",
main = "Down the Ladder of Powers for X")
abline(recip.lm, col = 'red')
rm(recip.lm)
# Optimal lambda for Box-Cox transformation
BoxCox.lambda(cellularUS$subscribers) # optimal lambda
lambda <- .31
response <- BoxCox(cellularUS$subscribers, lambda)
explanatory <- cellularUS$date
transformed.lm <- lm(response ~ explanatory)
plot(response ~ explanatory, type = "l")
abline(transformed.lm, col = "red")
# Box-Cox forecasts
predictions.boxcox <- predict(transformed.lm,
data.frame(explanatory = futureDate), interval = "prediction")
predictions.boxcox <- InvBoxCox(predictions.boxcox, lambda) # reverse Box-Cox
subscribers.boxcox <- c(cellularUS$subscribers, predictions.boxcox[,1])
plot(subscribers.boxcox ~ time, type = "l")
rm(lambda, response, explanatory, transformed.lm, predictions.boxcox)
# Comparison plot of logarithm and Box-Cox Transformations
plot(subscribers.log ~ time, type = "l", ylab = "Millions of Subscribers",
xlab = "", main = "Comparison of Log and Box-Cox Transformations")
lines(subscribers.boxcox ~ time, type = "l", col = 'red')
# cleanup
rm(time, futureDate, subscribers.log, subscribers.boxcox)
|
2ef574f7e48bb285efc51d38c5be5cf36ffed93e
|
86dd3de51b810aebdb19c16930acb589decbf7fd
|
/step_1.3/final fig3 DepthvsSNPs_181204.R
|
2a0d3de21dbbcaa2a2c334b6da43de8a925d606d
|
[] |
no_license
|
qq1042032751/Aphid_AL4_chromosome_assembly
|
a106c2565a53d1969a72d4670457c15c22185c6e
|
9e023b9946e5c6e79c78d88a9564cf2f8355fe1a
|
refs/heads/master
| 2022-03-30T11:58:11.044369
| 2020-02-03T22:06:54
| 2020-02-03T22:06:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,365
|
r
|
final fig3 DepthvsSNPs_181204.R
|
#library(phytools)
library(ggplot2)
#library(ggExtra)
#library(plotly)
#library(ggrepel)
library(cowplot)
library(ggpubr)
library(RColorBrewer)
library(ggsci)
setwd("/Users/yy/Work/local/Projects/aphid/r_scripts/")
##
# Ref: http://www.sthda.com/english/articles/24-ggpubr-publication-ready-plots/78-perfect-scatter-plots-with-correlation-and-marginal-histograms/
###
# 12 colors
all <- read.table("depthHeteroSNPcombined.table.sorted.filtered.500k", header = F)
all <- read.table("depthHeteroSNPcombined.table.sorted.filtered.500k_plus3", header = F)
all <- read.table("depthHeteroSNPcombined.table.sorted.filtered", header = F)
tb <- table(all$V1)
all$V1 <- factor(all$V1,
levels = names(tb[order(tb, decreasing = TRUE)]))
sp <- ggscatter(all, x = "V20", y = "V23",
color = "V1", palette = "jco",
size = 2, alpha = 0.3, ggtheme = theme_bw(), xlab = "Male/Female Sequencing Depth Ratio", ylab = "Difference of Number Heterozygous SNPs in Females - in Males")
# + scale_fill_manual(values = colorRampPalette(brewer.pal(12, "Accent"))(colourCount))
# Marginal boxplot of x (top panel) and y (right panel)
xplot <- ggviolin(all, x = "V1", y = "V20",
color = "V1", fill = "V1", palette = "jco",
alpha = 0.5, ggtheme = theme_bw(), trim = T) + rotate()
yplot <- ggviolin(all, x = "V1", y = "V23",
color = "V1", fill = "V1", palette = "jco",
# ylim = c(-10, 10),
# add = "boxplot",
alpha = 0.5, ggtheme = theme_bw(), trim = T)
# Cleaning the plots
sp <- sp + rremove("legend")
yplot <- yplot + clean_theme() + rremove("legend")
xplot <- xplot + clean_theme() + rremove("legend")
# Arranging the plot using cowplot
plot_grid(xplot, NULL, sp, yplot, ncol = 2, align = "hv",
rel_widths = c(2, 1), rel_heights = c(1, 2))
# estimate median and SD
chr1 = subset(all, V1 == "Scaffold_20849;HRSCAF=22316") # chr 1
chr2 = subset(all, V1 == "Scaffold_21967;HRSCAF=25451") # chr 2
chr3 = subset(all, V1 == "Scaffold_21646;HRSCAF=24477") # chr 3
chrx = subset(all, V1 == "Scaffold_21773;HRSCAF=24826") # chr X
others = subset(all, V1 != "Scaffold_21773;HRSCAF=24826" &
V1 != "Scaffold_21646;HRSCAF=24477"&
V1 != "Scaffold_21967;HRSCAF=25451"&
V1 != "Scaffold_20849;HRSCAF=22316") # other scaffolds
others$V1 <- "others"
chr_123 = rbind(chr1, chr2, chr3)
chr_123$V1 <- "autosomes"
chrx$V1 <- "chrX"
chr_all = rbind(chr_123, chrx, others)
ggplot(chr_all, aes(chr_all$V20)) + geom_histogram(binwidth=0.01) +
xlim(0,2)
ggplot(chrx, aes(chrx$V20)) + geom_histogram(binwidth=0.01) + xlim(0,2)
ggplot(chr_all, aes(V20, color = V1, fill = V1)) +
# xlim(0,1500)+
scale_x_continuous(name="Male Depth / Female Depth",lim = c(0, 1.5), breaks = seq(0, 1.5, by = 0.1)) +
scale_y_continuous(name="Number of Windows")+
# geom_histogram(binwidth=1) +
geom_histogram(alpha=0.55, binwidth=0.011, alpha = 0.5, position = 'identity') +
# geom_histogram(alpha=0.55, binwidth=10) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
ggplot(chr_all, aes(chr_all$V20)) + geom_histogram(binwidth=0.01) + xlim(0,2)
ggplot(chrx, aes(chrx$V20)) + geom_histogram(binwidth=0.01) + xlim(0,2)
ggplot(others, aes(V20)) + geom_histogram(binwidth=0.01) + xlim(0,2)
# plot depth only
ggviolin(others, x = "V1", y = "V20", xlim=c(0,1),
color = "V1", fill = "V1", palette = "jco",
alpha = 0.5, ggtheme = theme_bw(), trim = T) + rotate()
ggplot(others, aes(x=others$V1, y=others$V20)) + geom_violin() +
scale_y_continuous(limits=c(0, 3)) + coord_flip()
median(chr_123$V20)
sd(chr_123$V20)
###
# customer color with 50 colors
###
# https://quantdev.ssri.psu.edu/sites/qdev/files/Tutorial_ColorR_2.html
library(rstudioapi)
library(fBasics)
library(grDevices)
current_path <- getSourceEditorContext()$path
setwd(dirname(current_path))
pal <- topo.colors(n = 50)
par(mar = rep(0, 4))
pie(rep(1, length(pal)), col = pal)
r_orig <- col2rgb(pal)[1,]
r_orig
g_orig <- col2rgb(pal)[2]
g_orig
b_orig <- col2rgb(pal)[3,]
b_orig
r <- c(76, 0, 0, 0, 0, 255, 255, 255, 255, 230, 209,
189, 168, 148, 128, 107, 87, 66, 46, 26)
g <- c(0, 67, 158, 211, 229, 237, 243, 249, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255)
b <- c(255, 255, 255, 255, 255, 36, 24, 12, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0)
beach <- function (n, name = c("beach.colors"))
{
beach.colors = rgb(r,g,b,maxColorValue = 255)
name = match.arg(name)
orig = eval(parse(text = name))
rgb = t(col2rgb(orig))
temp = matrix(NA, ncol = 3, nrow = n)
x = seq(0, 1, , length(orig))
xg = seq(0, 1, , n)
for (k in 1:3) {
hold = spline(x, rgb[, k], n = n)$y
hold[hold < 0] = 0
hold[hold > 255] = 255
temp[, k] = round(hold)
}
palette = rgb(temp[, 1], temp[, 2], temp[, 3], maxColorValue = 255)
palette
}
pal2 <- beach(n=50)
par(mar = rep(0, 4))
pie(rep(1, length(pal2)), col = pal2)
all <- read.table("depthHeteroSNPcombined.table.sorted.filtered.100k", header = F)
#all$V1 <- reorder(all$V1,all$V1,FUN=length)
#levels(all$V1)
tb <- table(all$V1)
all$V1 <- factor(all$V1,
levels = names(tb[order(tb, decreasing = TRUE)]))
sp <- ggscatter(all, x = "V20", y = "V22",
color = "V1", palette = "pal2",
size = 3, alpha = 0.6, ggtheme = theme_bw(), xlab = "Male/Female Seq. Depth Ratio", ylab = "Max Number of SNPs in males")
# Marginal boxplot of x (top panel) and y (right panel)
xplot <- ggviolin(all, x = "V1", y = "V20",
color = "V1", fill = "V1", palette = "pal2",
alpha = 0.5, ggtheme = theme_bw(), trim = T)+
rotate()
yplot <- ggviolin(all, x = "V1", y = "V22",
color = "V1", fill = "V1", palette = "pal2",
alpha = 0.5, ggtheme = theme_bw(), trim = T)
# Cleaning the plots
sp <- sp + rremove("legend")
yplot <- yplot + clean_theme() + rremove("legend")
xplot <- xplot + clean_theme() + rremove("legend")
# Arranging the plot using cowplot
plot_grid(xplot, NULL, sp, yplot, ncol = 2, align = "hv",
rel_widths = c(2, 1), rel_heights = c(1, 2))
|
1578f647a9572fcc3d91cc73f18c2188db611907
|
e1b45c5941f53eea0f2c99615bd45ab3e89275c7
|
/load_reference_data_2.R
|
9c1b743f3af3eb6cb059fbe7db8af8fe05b81a78
|
[
"MIT"
] |
permissive
|
instigatorofawe/clustering_manuscript
|
32f727cb703c5dd6fed43776fc86a67fdfebedf5
|
a75c437b45a07d7e6e0781012a92ce18179e47b0
|
refs/heads/master
| 2022-05-24T19:23:18.770560
| 2020-04-27T14:19:00
| 2020-04-27T14:19:00
| 259,345,244
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,587
|
r
|
load_reference_data_2.R
|
library(pracma)
library(tictoc)
library(glmnet)
library(ROCR)
library(matrixStats)
library(parallel)
library(ggplot2)
library(xgboost)
tic("Total time to run")
clinical.data = readRDS("data/eicu/clinical_data_icd9_sofa_vent.rds")
patient.result = readRDS("data/eicu/patient_data.rds")
sofa.scores = readRDS("data/eicu/sofa_scores.rds")
comorbidities = readRDS("data/eicu/comorbidities.rds")
broad.spectrum = readRDS("data/eicu/has.broad.spectrum.rds")
significant.rx = readRDS("data/eicu/has.significant.rx.combined.rds")
comorbidities = rbind(comorbidities, t(as.matrix(broad.spectrum)), t(significant.rx))
source("src/R/eicu/functions/generate_sampling_rate_table.R")
source("src/R/eicu/functions/eval_carry_forward.R")
source("src/R/eicu/functions/eval_interval.R")
source("src/R/eicu/functions/eval_max_in_past_2.R")
source("src/R/eicu/functions/eval_sum_in_past.R")
source("src/R/eicu/functions/eval_early_prediction_timestamps_combined_rf_comorbidities.R")
source("src/R/eicu/functions/eval_table_with_sofa_2.R")
source("src/R/eicu/functions/eval_table_with_sofa_comorbidities.R")
source("src/R/eicu/functions/generate_table_with_sofa_timestamps.R")
lengths = sapply(sofa.scores, function(x) length(x$timestamps))
sepsis.labels = sapply(sofa.scores[lengths>0], function(x) rowSums(x[2:7])>=2)
has.sepsis = sapply(sepsis.labels, any)
sepsis.timestamps = mapply(function(x,y) x$timestamps[y], sofa.scores[lengths>0][has.sepsis],sepsis.labels[has.sepsis])
# Determine shock onsets
shock.labels = mapply(function(x,y) x&y$lactate&y$vasopressors, sepsis.labels, sofa.scores[lengths>0])
has.shock = sapply(shock.labels, function(x) any(x,na.rm=T)) #& has.dx[lengths>0]
sepsis.label.lengths = sapply(sepsis.labels,length)
shock.lengths=sapply(shock.labels,length)
sofa.timestamps.lengths = sapply(sofa.scores[lengths>0],function(x)length(x$timestamps))
shock.onsets = mapply(function(x,y) min(x$timestamps[y],na.rm=T),sofa.scores[lengths>0][has.shock],shock.labels[has.shock])
load("data/eicu/reference_dataset_rx_combined_2.rdata")
mortality.patients = patient.result$patientunitstayid[patient.result$hospitaldischargestatus=="Expired"]
has.mortality = sapply(clinical.data[lengths>0], function(x) is.element(x$subject.id,mortality.patients))
discharge.times = sapply(clinical.data[lengths>0][!has.mortality], function(x) patient.result$hospitaldischargeoffset[patient.result$patientunitstayid==x$subject.id])
icu.discharge.times = sapply(clinical.data[lengths>0][!has.mortality], function(x) patient.result$unitdischargeoffset[patient.result$patientunitstayid==x$subject.id])
|
8f6c233847ef2ca68508aebf5804e031cb133358
|
5a7f7ebee0e458863e1da9d2a0fcc93b600d1786
|
/man/as.character.Package.Rd
|
5e508415ff0a2a00914a1929aa8e39e34ea2d888
|
[] |
no_license
|
HenrikBengtsson/R.oo
|
68071bacb43afe2a46201aea0350a3597ee19e6c
|
4101a141b2fa49a43a10df99f56c180ba2c662e6
|
refs/heads/master
| 2023-01-06T23:48:54.872999
| 2022-06-12T18:04:23
| 2022-06-12T18:04:23
| 19,437,907
| 20
| 1
| null | 2018-05-02T04:51:57
| 2014-05-04T22:47:54
|
R
|
UTF-8
|
R
| false
| false
| 830
|
rd
|
as.character.Package.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% Package.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{as.character.Package}
\alias{as.character.Package}
\alias{Package.as.character}
\alias{as.character,Package-method}
\title{Gets a string representation of this package}
\description{
Gets a string representation of this package.
}
\usage{
\method{as.character}{Package}(x, ...)
}
\arguments{
\item{...}{Not used.}
}
\value{
Returns a \code{\link[base]{character}} string.
}
\examples{print(R.oo)}
\author{Henrik Bengtsson}
\seealso{
For more information see \code{\link{Package}}.
}
\keyword{internal}
\keyword{methods}
|
9bfc941d1e0e56e1e8631cf9d73f31cc420359af
|
da8dae69e597072bc616936d1d72a96f65e4efa0
|
/code/oldversions/v2_20180112/model/B2SetPath.R
|
ddbdfa2f1c904d158b7ab5439ac0fad302093c00
|
[] |
no_license
|
UCL/provis
|
71e82c383cd9414840e57c2a2867826d6b4ee3e6
|
86a287c7bc705d4aeffb9bbcf96747e97e6d688b
|
refs/heads/master
| 2020-08-01T04:08:20.198284
| 2019-11-08T12:09:43
| 2019-11-08T12:09:43
| 210,310,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,038
|
r
|
B2SetPath.R
|
B2SetPath<-function(RootDir,CodeDir,region_id=0) {
# Assumptions
# datadir = RootDir/data/region_"region_id"
# newdatadir = datadir/settlements
# mapdir = RootDir/data/maps
# traveldir = RootDir/data/TravelTime/results
# outdir = CodeDir/output
# wwwdir = CodeDir/shiny/www
# farmdir = RootDir/data/RICS
#
if (missing(region_id) | is.null(region_id)) {
region_id=0
}
if (region_id>0) {
DataDir<-paste0(RootDir,"/data1/region",as.character(region_id))
} else if (region_id==0) {
DataDir<-paste0(RootDir,"/data1")
}
NewDataDir<-paste0(RootDir,"/newdata1/region",as.character(region_id))
MapDir<-paste0(RootDir,"/data/maps")
TravelDir<-paste0(RootDir,"/data/TravelTime/results")
if (region_id>0) {
OutDir<-paste0(CodeDir,"/output/region",as.character(region_id))
} else if (region_id==0) {
OutDir<-paste0(CodeDir,"/output")
}
if (!file.exists(paste0(CodeDir,"/output"))) {
dir.create(paste0(CodeDir,"/output"))
}
if (!file.exists(OutDir)){
dir.create(OutDir)
}
if (!file.exists(NewDataDir)){
dir.create(NewDataDir)
}
# directory for pdf's for use by shiny
wwwdir <- paste0(CodeDir,"/shiny/www")
# RICS data direcory
farmdir<-paste0(RootDir,"/data/RICS")
roaddir<-paste0(RootDir,"/data/roads")
riverdir<-paste0(RootDir,"/data/rivers")
# AONB = "Area of Outstanding Natural Beauty"
AONBdir <-paste0(RootDir,"/data/AONB.gdb")
# Put all directories in a single list
dirs<-list(RootDir,DataDir,NewDataDir,MapDir,TravelDir,OutDir,
wwwdir,farmdir,roaddir,riverdir,AONBdir)
names(dirs)<-c("rootdir",
"datadir",
"newdatadir",
"mapdir",
"traveldir",
"outdir",
"wwwdir",
"farmdir",
"roaddir",
"riverdir",
"AONBdir")
return(dirs)
}
|
4b811feef2a20dea31e76913368b44f9f550c217
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610052121-test.R
|
f75bd18c7a3c5f94bd02bb25766765650f7f24b8
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,434
|
r
|
1610052121-test.R
|
testlist <- list(rates = numeric(0), thresholds = c(6.95346565065353e-310, 2.72974914124656e-312, -2.30331099970595e-156, -2.30331110816477e-156, -2.30331110816477e-156, -2.30331110816476e-156, 1.09015244729545e+217, -3.0784026009623e-288, NaN, NaN, 8.6713227971991e-310, -2.1599506916756e-277, -5.69758255001719e-306, 0), x = c(-Inf, 2.87903833031139e-306, -1.04544791084451e-308, NaN, 7.73248132190929e-12, 6.98327594525623e-310, -6.05898824717554e-65, -2.25769487008466e-289, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, -3.07800333305831e-288, NaN, NaN, -5.96890832411666e+306, -5.82900230650667e+303, NaN, -3.18471904527818e-248, 6.68177101606839e-310, 1.390671161567e-309, 0, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, -1.26841746137343e-30, NaN, -5.48681744862531e+303, 7.2911220195564e-304, 1.55587395324171e-59, -1.26836479922652e-30, 3.78576961503532e-270, 2.8925960194786e-307, NaN, -Inf, -5.55059957471347e+303, -1.24267941904771e-267, Inf, -5.46354690059085e-108, -5.46354690059085e-108, NaN, -3.10503618460142e+231, -5.96890832411666e+306, 1.06103027407474e-314, -2.30331110816307e-156, -2.30331110816477e-156, -6.05390363808932e-157, -2.30331110816477e-156, -5.48612733108437e+303, 2.96439387504748e-323, 3.91612999627774e-306, -3.56047810653759e-306, 2.22507386265735e-308, 2.71615461308772e-312, 7.55920438137107e-322, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
82b923d3c8a3a1dc533e023cf6c0b1ec27954eed
|
c053cc97c204c6af25664cf337d6dd94d984c591
|
/R/prob-roc_aunp.R
|
4874181f6aa32700be43f28966aa5b4c10ec650b
|
[
"MIT"
] |
permissive
|
tidymodels/yardstick
|
1b2454ae37da76b6c5c2b36682d573c7044767a7
|
e5c36f206fb737fc54b1a6161c09bc0d63b79beb
|
refs/heads/main
| 2023-08-19T03:29:20.953918
| 2023-08-08T21:32:57
| 2023-08-08T21:32:57
| 108,898,402
| 294
| 55
|
NOASSERTION
| 2023-08-08T21:32:59
| 2017-10-30T19:26:54
|
R
|
UTF-8
|
R
| false
| false
| 4,141
|
r
|
prob-roc_aunp.R
|
#' Area under the ROC curve of each class against the rest, using the a priori
#' class distribution
#'
#' `roc_aunp()` is a multiclass metric that computes the area under the ROC
#' curve of each class against the rest, using the a priori class distribution.
#' This is equivalent to `roc_auc(estimator = "macro_weighted")`.
#'
#' @family class probability metrics
#' @templateVar fn roc_aunp
#' @template return
#' @template event_first
#'
#' @section Multiclass:
#' This multiclass method for computing the area under the ROC curve uses the
#' a priori class distribution and is equivalent to
#' `roc_auc(estimator = "macro_weighted")`.
#'
#' @inheritParams roc_auc
#'
#' @param ... A set of unquoted column names or one or more `dplyr` selector
#' functions to choose which variables contain the class probabilities. There
#' should be as many columns as factor levels of `truth`.
#'
#' @param estimate A matrix with as many
#' columns as factor levels of `truth`. _It is assumed that these are in the
#' same order as the levels of `truth`._
#'
#' @references
#'
#' Ferri, C., Hernández-Orallo, J., & Modroiu, R. (2009). "An experimental
#' comparison of performance measures for classification". _Pattern Recognition
#' Letters_. 30 (1), pp 27-38.
#'
#' @seealso
#'
#' [roc_aunu()] for computing the area under the ROC curve of each class against
#' the rest, using the uniform class distribution.
#'
#' @author Julia Silge
#'
#' @examples
#' # Multiclass example
#'
#' # `obs` is a 4 level factor. The first level is `"VF"`, which is the
#' # "event of interest" by default in yardstick. See the Relevant Level
#' # section above.
#' data(hpc_cv)
#'
#' # You can use the col1:colN tidyselect syntax
#' library(dplyr)
#' hpc_cv %>%
#' filter(Resample == "Fold01") %>%
#' roc_aunp(obs, VF:L)
#'
#' # Change the first level of `obs` from `"VF"` to `"M"` to alter the
#' # event of interest. The class probability columns should be supplied
#' # in the same order as the levels.
#' hpc_cv %>%
#' filter(Resample == "Fold01") %>%
#' mutate(obs = relevel(obs, "M")) %>%
#' roc_aunp(obs, M, VF:L)
#'
#' # Groups are respected
#' hpc_cv %>%
#' group_by(Resample) %>%
#' roc_aunp(obs, VF:L)
#'
#' # Vector version
#' # Supply a matrix of class probabilities
#' fold1 <- hpc_cv %>%
#' filter(Resample == "Fold01")
#'
#' roc_aunp_vec(
#' truth = fold1$obs,
#' matrix(
#' c(fold1$VF, fold1$F, fold1$M, fold1$L),
#' ncol = 4
#' )
#' )
#' @export
roc_aunp <- function(data, ...) {
UseMethod("roc_aunp")
}
roc_aunp <- new_prob_metric(
roc_aunp,
direction = "maximize"
)
#' @export
#' @rdname roc_aunp
roc_aunp.data.frame <- function(data,
truth,
...,
na_rm = TRUE,
case_weights = NULL,
options = list()) {
check_roc_options_deprecated("roc_aunp", options)
prob_metric_summarizer(
name = "roc_aunp",
fn = roc_aunp_vec,
data = data,
truth = !!enquo(truth),
...,
estimator = NULL,
na_rm = na_rm,
event_level = NULL,
case_weights = !!enquo(case_weights)
)
}
#' @rdname roc_aunp
#' @export
roc_aunp_vec <- function(truth,
estimate,
na_rm = TRUE,
case_weights = NULL,
options = list(),
...) {
abort_if_class_pred(truth)
check_roc_options_deprecated("roc_aunp_vec", options)
estimator <- "macro_weighted"
check_prob_metric(truth, estimate, case_weights, estimator)
if (na_rm) {
result <- yardstick_remove_missing(truth, estimate, case_weights)
truth <- result$truth
estimate <- result$estimate
case_weights <- result$case_weights
} else if (yardstick_any_missing(truth, estimate, case_weights)) {
return(NA_real_)
}
# `event_level` doesn't really matter, but we set it anyways
roc_auc_vec(
truth = truth,
estimate = estimate,
estimator = estimator,
na_rm = FALSE,
event_level = "first",
case_weights = case_weights
)
}
|
d3d665b6974ebb40dfda48d751a51926b62254ff
|
dd513a24fd1635f461f54d0246ea70e39b84ba1d
|
/man/export_gifs.Rd
|
935e71d4f67b7d6c8ef1fa4978d6d41a9ac373cd
|
[] |
no_license
|
ekmixon/datamations
|
27b11bd53ca68209218703b5babb4dda4ee26a44
|
638eb203c68b897d2ab1ef1069f911a1254d1ac6
|
refs/heads/main
| 2023-03-07T06:27:23.356785
| 2021-02-19T21:37:41
| 2021-02-19T21:37:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 224
|
rd
|
export_gifs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util-sanddance.R
\name{export_gifs}
\alias{export_gifs}
\title{export all the gifs}
\usage{
export_gifs(df)
}
\description{
export all the gifs
}
|
32596700cd2725d104a4c4df16ac3d878856f73a
|
c4a59b3431c71dd2a8c2bceab81b412fcd88b5a1
|
/WGCNA/master_scripts/wgcna/wgcna.kme.primary.R
|
a082434f5cd8367b64cf57f175a4d30534c057fc
|
[] |
no_license
|
Vectorborne1980/Example_Scripts_2018
|
192a9237d7ac5a58cdca6944033a46ffb767d7a9
|
6270ddaeabec043da3e36f9d720e0671e6175948
|
refs/heads/master
| 2022-01-13T18:37:02.109348
| 2019-07-19T13:15:52
| 2019-07-19T13:15:52
| 108,558,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,077
|
r
|
wgcna.kme.primary.R
|
###############################################################################################
# DS1 KMEs
KME.corr.DS1 <- KME.MEs.DS1[,seq(1, ncol(KME.MEs.DS1), 2)]
colnames(KME.corr.DS1) <- substr( colnames(KME.corr.DS1), 5, nchar(colnames(KME.corr.DS1)))
KME.corr.p.DS1 <- KME.MEs.DS1[,seq(2, ncol(KME.MEs.DS1), 2)]
colnames(KME.corr.p.DS1) <- colnames(KME.corr.DS1)
KME.primary.DS1 <- matrix(nrow=nrow(KME.MEs.DS1),ncol=3)
# RDR: This for loop is terribly inefficient, please recode.
print(paste("Retrieving primary KME values for DS1..."))
for (i in 1:nrow(geneList.DS1)) {
for (j in 1:ncol(KME.corr.DS1)) {
if (geneList.DS1[i,"moduleColors.DS1"]==colnames(KME.corr.DS1)[j] ) {
KME.primary.DS1[i,1] <- geneList.DS1[i,"moduleColors.DS1"]
KME.primary.DS1[i,2] <- KME.corr.DS1[i,j]
KME.primary.DS1[i,3] <- KME.corr.p.DS1[i,j]
}
}
}
colnames(KME.primary.DS1) <- c("KME.primary","KME.color","KME.p.color")
rm(KME.corr.DS1,KME.corr.p.DS1)
###############################################################################################
# DS2 KMEs
KME.corr.DS2 <- KME.MEs.DS2[,seq(1, ncol(KME.MEs.DS2), 2)]
colnames(KME.corr.DS2) <- substr( colnames(KME.corr.DS2), 5, nchar(colnames(KME.corr.DS2)))
KME.corr.p.DS2 <- KME.MEs.DS2[,seq(2, ncol(KME.MEs.DS2), 2)]
colnames(KME.corr.p.DS2) <- colnames(KME.corr.DS2)
KME.primary.DS2 <- matrix(nrow=nrow(KME.MEs.DS2),ncol=3)
# RDR: This for loop is terribly inefficient, please recode
print(paste("Retrieving primary KME values for DS2..."))
for (i in 1:nrow(geneList.DS2)) {
for (j in 1:ncol(KME.corr.DS2)) {
if (geneList.DS2[i,"moduleColors.DS2"]==colnames(KME.corr.DS2)[j] ) {
KME.primary.DS2[i,1] <- geneList.DS2[i,"moduleColors.DS2"]
KME.primary.DS2[i,2] <- KME.corr.DS2[i,j]
KME.primary.DS2[i,3] <- KME.corr.p.DS2[i,j]
}
}
}
colnames(KME.primary.DS2) <- c("KME.primary","KME.color","KME.p.color")
rm(KME.corr.DS2,KME.corr.p.DS2)
###############################################################################################
# DS3 KMEs
KME.corr.DS3 <- KME.MEs.DS3[,seq(1, ncol(KME.MEs.DS3), 2)]
colnames(KME.corr.DS3) <- substr( colnames(KME.corr.DS3), 5, nchar(colnames(KME.corr.DS3)))
KME.corr.p.DS3 <- KME.MEs.DS3[,seq(2, ncol(KME.MEs.DS3), 2)]
colnames(KME.corr.p.DS3) <- colnames(KME.corr.DS3)
KME.primary.DS3 <- matrix(nrow=nrow(KME.MEs.DS3),ncol=3)
# RDR: This for loop is terribly inefficient, please recode
print(paste("Retrieving primary KME values for DS3..."))
for (i in 1:nrow(geneList.DS3)) {
for (j in 1:ncol(KME.corr.DS3)) {
if (geneList.DS3[i,"moduleColors.DS3"]==colnames(KME.corr.DS3)[j] ) {
KME.primary.DS3[i,1] <- geneList.DS3[i,"moduleColors.DS3"]
KME.primary.DS3[i,2] <- KME.corr.DS3[i,j]
KME.primary.DS3[i,3] <- KME.corr.p.DS3[i,j]
}
}
}
colnames(KME.primary.DS3) <- c("KME.primary","KME.color","KME.p.color")
rm(KME.corr.DS3,KME.corr.p.DS3)
###############################################################################################
# DS4 KMEs
KME.corr.DS4 <- KME.MEs.DS4[,seq(1, ncol(KME.MEs.DS4), 2)]
colnames(KME.corr.DS4) <- substr( colnames(KME.corr.DS4), 5, nchar(colnames(KME.corr.DS4)))
KME.corr.p.DS4 <- KME.MEs.DS4[,seq(2, ncol(KME.MEs.DS4), 2)]
colnames(KME.corr.p.DS4) <- colnames(KME.corr.DS4)
KME.primary.DS4 <- matrix(nrow=nrow(KME.MEs.DS4),ncol=3)
# RDR: This for loop is terribly inefficient, please recode
print(paste("Retrieving primary KME values for DS4..."))
for (i in 1:nrow(geneList.DS4)) {
for (j in 1:ncol(KME.corr.DS4)) {
if (geneList.DS4[i,"moduleColors.DS4"]==colnames(KME.corr.DS4)[j] ) {
KME.primary.DS4[i,1] <- geneList.DS4[i,"moduleColors.DS4"]
KME.primary.DS4[i,2] <- KME.corr.DS4[i,j]
KME.primary.DS4[i,3] <- KME.corr.p.DS4[i,j]
}
}
}
colnames(KME.primary.DS4) <- c("KME.primary","KME.color","KME.p.color")
rm(KME.corr.DS4,KME.corr.p.DS4)
###############################################################################################
save(KME.primary.DS1,KME.primary.DS2,KME.primary.DS3,KME.primary.DS4,file="gene_KMEs.RData")
|
df6b247c1509b00bd4754a0aed3aa4c06c6e4661
|
4ae32cabb0fd268f313c4c2a54cecf9daffc9797
|
/man/extract_files.Rd
|
23edd9096bbca0f961c19222003a81b031398289
|
[
"Apache-2.0"
] |
permissive
|
InteragencyEcologicalProgram/smonitr
|
40d9201f5284100aa34015014eeb94229e722fc2
|
6bb40251473dac065587dc1b40f4f747963d1b73
|
refs/heads/master
| 2021-02-13T12:33:04.458773
| 2020-10-28T17:10:49
| 2020-10-28T17:10:49
| 244,696,632
| 2
| 2
|
NOASSERTION
| 2020-10-01T20:54:30
| 2020-03-03T17:16:42
|
R
|
UTF-8
|
R
| false
| true
| 746
|
rd
|
extract_files.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_tools.R
\name{extract_files}
\alias{extract_files}
\title{Extract Files From Zip}
\usage{
extract_files(path, fnames = ".*", ..., verbose = TRUE)
}
\arguments{
\item{path}{The path to the zipfile.}
\item{fnames}{A vector of file names in the archive to extract.
Supports regex.}
\item{...}{Other arguments passed to \code{\link[utils:unzip]{utils::unzip()}}.}
\item{verbose}{If \code{TRUE}, display descriptive message.}
}
\value{
A vector of extracted file paths.
}
\description{
Extract files from a zip archive.
}
\examples{
\dontrun{
f = download_file("ftp://ftp.wildlife.ca.gov/Delta\%20Smelt/SKT.zip")
extract_files(f, "SKT", exdir = tempdir())
}
}
|
ffe5c026041ad6864447d3659da96d972d04bf68
|
8ffd0b0e8be507033d5ccafaec5e23965ce5c80b
|
/scripts/get_shrunk_filtered_results.R
|
e253731d6155b1e6b34d52cc7da21cd076bce62c
|
[] |
no_license
|
Lieker/Gpallida_RNAseq2
|
b1bc71c2fa3573825ecc51e3f0474f092cb01734
|
2c9786daf9ae77916955de611c1c4ebd95def7a4
|
refs/heads/master
| 2023-04-14T14:30:32.807694
| 2022-02-24T08:48:13
| 2022-02-24T08:48:13
| 387,459,969
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,304
|
r
|
get_shrunk_filtered_results.R
|
source("scripts/get_unfiltered_res_dds.R")
get_shrunk_filtered_results <- function(d = dds,
counts_csv_file = "input/counts.csv",
xp_design_csv_file = "input/xp_design.csv",
th = 8,
trtm = c("water", "solA"),
log2FC_threshold = 0,
padj_threshold = 0.05) {
res <- get_unfiltered_res_dds(counts_csv_file,
xp_design_csv_file,
th,
trtm)
target <- xp_design %>% dplyr::filter(xp_design$tp %in% th)
target <- target %>% dplyr::filter(target$treatment %in% trtm)
target <- unique(as.character(target$group))
res_shr <- lfcShrink(dds = d,
res = res,
type = "ashr",
contrast = c("group", target[2], target[1])) %>% as.data.frame()
res_shr$padj[is.na(res_shr$padj)] <- 0.99
res_shr_filtered <- res_shr %>% as.data.frame() %>% dplyr::filter(., padj < padj_threshold) %>%
dplyr::filter(., log2FoldChange > log2FC_threshold | log2FoldChange < -log2FC_threshold)
return(res_shr_filtered)
}
|
8cdcd0fefa343f2f6fbce7ec64f0cb92e6f7a96b
|
05db2c9ff605a07702fdd73d711e838eb8c54efc
|
/R/stats.R
|
62572a446196a5ad2b1bb310ad1fb88618fc46a9
|
[] |
no_license
|
laurabiggins/lipidomics_plasma
|
b5af440bf9f099e3fe2bc712eb6be0c4ab1446ba
|
c8a05a6e1dd05ae2eeaad53f43ea552bfe09d876
|
refs/heads/main
| 2023-06-21T14:03:17.280451
| 2021-07-16T10:49:04
| 2021-07-16T10:49:04
| 369,544,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,075
|
r
|
stats.R
|
# We have various permutations of samples so need appropriate stats.
# assume data is normally distributed - there's no point doing a test for normality
# as there are so few values per sample.
#' Do stats
#'
#' This is a wrapper around decide_test() and do_test(), which checks properties
#' of the data (per lipid), decides on the appropriate statistical test, and runs
#' it, producing a p-value.
#'
#' @param tidy_dataset
#' @param stats_summary
#' @param paired
#'
#' @return
#' @export
#'
#' @examples
#do_stats <- function(tidy_dataset, stats_summary, paired = FALSE){
do_stats <- function(tidy_dataset, paired = FALSE){
n_counts <- tidy_dataset %>%
dplyr::group_by(lipid_name, condition) %>%
summarise(n_values = n(), n_non0 = sum(value > 0), n0 = sum(value == 0)) %>%
ungroup()
n_counts %>%
group_by(lipid_name) %>%
mutate(test_type = decide_test(cur_data(), paired)) %>%
ungroup() %>%
select(lipid_name, test_type) %>%
distinct() %>%
right_join(tidy_dataset) %>%
dplyr::group_by(lipid_name) %>%
mutate(p_val = do_test(cur_data())) %>%
#mutate(adj_pval = p.adjust(p_val, method = "BH")) %>%
ungroup()
}
#' get_fold_change
#'
#' For calculating fold change between 2 conditions. This needs expanding to deal
#' with more than 2 conditions, but should
#'
#' @param df with column names lipid_name, condition, mean
#'
#' @return tibble containing columns lipid name, fold change
#' @export
#'
#' @examples
get_fold_change <- function(df){
conditions <- unique(df$condition)
if(length(conditions) == 3){
stop("populate this for > 2 conditions")
}
fc_name <- paste0("FC_", conditions[1], "_", conditions[2])
fc_data <- pivot_wider(
df,
id_cols = c(lipid_name, condition, mean),
names_from = condition,
values_from = mean,
) %>%
mutate(fold_change = .data[[conditions[1]]]/.data[[conditions[2]]]) %>%
rename(!!fc_name := fold_change)
fc_data %>%
select(-all_of(conditions))
}
#' For calculating fold change between 2 conditions. Another function would be needed
#' for more than 2 conditions
#'
#' @param df with column names lipid_name, condition, mean
#'
#' @return tibble containing columns lipid name, fold change
#' @export
#'
#' @examples
get_fold_change_2_conditions <- function(df){
conditions <- unique(df$condition)
assertthat::assert_that(
length(conditions) == 2,
msg = paste0("expected to find 2 conditions, actually found ", conditions)
)
pivot_wider(
df,
id_cols = c(lipid_name, condition, mean),
names_from = condition,
values_from = mean,
) %>%
mutate(fold_change = .data[[conditions[1]]]/.data[[conditions[2]]]) %>%
select(lipid_name, fold_change)
}
#' Do statistical test
#'
#' performs test on data depending on the type defined in the "test_type" column.
#'
#' @param df with a column named condition and one named test_type
#' that contains values of "independent_t_test", "paired_t-test",
#' "one_sample_t_test", "none", "paired_none"
#'
#' @return p-value
#' @export
#'
#' @examples
do_test <- function(df){
assertthat::assert_that(length(unique(df$test_type)) == 1,
msg = paste0(
"no of unique test types per lipid name should have been 1, but it was",
length(unique(df$test_type))
)
)
test <- df$test_type[1]
switch(test,
independent_t_test = rstatix::t_test(df, log2_value ~ condition, paired = FALSE)$p,
paired_t_test = rstatix::t_test(df, log2_value ~ condition, paired = TRUE)$p,
one_sample_t_test = one_sample_wrapper(df),
none = NA,
paired_none = NA
)
}
one_sample_wrapper <- function(df){
#browser()
n_counts <- df %>%
group_by(condition) %>%
summarise(n_values = n(), n_non0 = sum(value > 0), n0 = sum(value == 0))
non0_condition <- n_counts %>%
filter(n0 == 0) %>%
pull(condition)
#stopifnot(length(non0_condition) == 1, msg = "one sample t-test going wrong")
mu_value <- df %>%
filter(condition != non0_condition) %>%
pull(value) %>%
max()
res <- df %>%
filter(condition == non0_condition) %>%
rstatix::t_test(value ~ 1, mu = mu_value)
res$p
}
#' decide_test
#'
#' Decide which statistical test should be performed depending on a range of
#' variables
#'
#' @param df with columns n0 and n-non0 that contain number of 0 values and number of
#' non-zero values, in addition to columns named sd_ratio and valid_ratio that
#' contain the ratio of standard deviations between conditions and whether that
#' ratio is valid (see get_sd_ratio and get_ratio_validity)
#' valid is TRUE if the larger standard deviation also has the larger mean
#'
#' @param paired TRUE or FALSE - whether the samples are paired or not
#' @param threshold integer value. standard deviation ratio threshold
#'
#' @return
#' @export
#'
#' @examples
decide_test <- function(df, paired, threshold = 2) {
# check if the df has columns n0 and n-non0
if (nrow(df) < 2){
test <- "none"
} else if (nrow(df) == 2){
sufficient_values <- df$n_non0 >= 2 & df$n0 <= 1
if (all(sufficient_values)) {
if (paired) {
test <- "paired_t_test"
} else {
test <- "independent_t_test"
}
} else if ((any(df$n_non0 <=1) & any(df$n0 == 0))){
test <- "one_sample_t_test"
} else {
if (paired) test <- "paired_none"
else test <- "none"
}
} else {
test <- "none"
}
test
}
#' Statistical test information
#'
#' To supply some text about a statistical test that was performed
#'
#' @param stat_test string, one of those in the switch statement
#'
#' @return string, piece of information about the test
#' @export
#'
#' @examples
#' stat_test_info("log2_t_test")
stat_test_info <- function(stat_test){
switch(stat_test,
independent_t_test = "Independent t-test performed on log2 transformed data. The samples were not paired/matched.",
paired_t_test = "Paired t-test performed on log2 transformed data. The samples were paired/matched.",
one_sample_t_test = "The data did not meet the criteria for performing a two sample t-test. One of the conditions had almost all 0 values, so a one-sample t-test was performed for the other condition. The results of this test may not be very robust and the plot should be viewed carefully to understand the data.",
none = "The data did not meet the criteria for performing a statistical test. There may have been too few non-zero values or the variance of a condition may have been too high.",
paired_none = "The data did not meet the criteria for performing a statistical test. There may have been too few non-zero values or the variance of a condition may have been too high."
)
}
|
417053072f130dc7a91fcb7232e1aff142db81c7
|
0969a8a76b2361bd1301b61503392debe2e454e3
|
/Rproject/Rcode/project_package/proj.GR.CHC.package/R/function_getCharge_eventID.R
|
7f06cf39fad63d6c1a32d6c9c96ba3388eb4bc67
|
[] |
no_license
|
daliagachc/GR_chc
|
9a85b133919863769e9d655c2e7c63fa3676177a
|
c40d7a28e18f1cea4c940af44151e48c8926bf55
|
refs/heads/master
| 2020-06-01T04:00:06.653784
| 2019-06-13T05:33:04
| 2019-06-13T05:33:04
| 190,622,843
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 610
|
r
|
function_getCharge_eventID.R
|
#' Get charge from event ID
#'
#' EventIDs are given to positive and negative ion growing events. For example: Pos101 is the
#' growing event 101 of a positive ion. This function extracts the charge, "positive" in this case.
#'
#' @param eventID An eventID of the form "Pos123","Neg42",etc
#'
#' @return The charge of the event. "positive" or "negative"
#' @export
#'
#' @examples getCharge_eventID("Pos101")
getCharge_eventID = function(eventID){
charge=gsub("Neg.*","Neg",eventID)
charge=gsub("Pos.*","Pos",charge)
charge[charge=="Pos"]="positive"
charge[charge=="Neg"]="negative"
return(charge)
}
|
db21b3ac4b4161aac589e3d4721b2e2c5af9bd56
|
14b4098e27706f1cbae4e558391c7c8f2a1ba61e
|
/man/predTime.Rd
|
d15f383b8c1250bee856cf5af23a34c03fb8202f
|
[] |
no_license
|
cran/qualypsoss
|
b0f1a247ae39f21c21d6537e320d97522afd8c98
|
f69214faa637b89596081521c3fb135e17c5440d
|
refs/heads/master
| 2022-09-19T01:29:12.112426
| 2022-08-31T10:50:02
| 2022-08-31T10:50:02
| 236,877,195
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 421
|
rd
|
predTime.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/description-data.r
\docType{data}
\name{predTime}
\alias{predTime}
\title{Years 1971-2099 repeated for the 20 scenarios}
\format{
matrix 129 years x 20 scenarios
}
\usage{
data(predTime)
}
\description{
Years 1971-2099 repeated for the 20 scenarios
}
\author{
Guillaume Evin \email{guillaume.evin@inrae.fr}
}
\keyword{data}
|
fd1cfbd2df0454375d9e686ebc0afbad915223ea
|
af1c7110503a26d6da1b72a282c4bd059ad201da
|
/notebooks/self/r/retro-18.MLE.R
|
39c04e032e4f0c525acecc6b2a690280e75f6501
|
[] |
no_license
|
ygzheng9/easypy
|
e0df0d17bc9bb267a3fe43af324c0cf1692fe27d
|
f7eff5be5cdd6412b706532677b71fb4ecb0b0b7
|
refs/heads/master
| 2022-04-15T17:05:06.865656
| 2020-03-28T01:59:43
| 2020-03-28T01:59:43
| 125,134,931
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,838
|
r
|
retro-18.MLE.R
|
###############################
# 模型已知,参数已知,求指定事件发生的概率
# binom(0.7), 20 次,问 概率 分布
# 也即:发生 1 次的概率,2 次的概率,3 次的概率,....
# x 是发生的次数;
barplot(
dbinom(
x = c(0:20),
size = 20,
prob = 0.7
),
names.arg = 0:20,
ylab = "p(x)",
xlab = "x"
)
###############################
# MLE: Most likelihood estimation
# 模型已知,数据已知,问,最可能的参数取什么?
# 已知模型是 binom,观测到事件是 17 次,问,theta 最有可能是多少?
theta <- seq(from = 0, to = 1, by = 0.01)
plot(theta,
dbinom(x = 12, size = 20, prob = theta),
type = "l",
ylab = "likelihood")
####### 最优化方法
# 定义一个最优化的 cost function
# negative log likelihood function
# data 这里只是一个数值,par 对应的是 theta
nll.binom <- function(data, par) {
return(-log(dbinom(
data, size = 20, prob = par
)))
}
# 执行最优化
optim(par = 0.5, fn = nll.binom, data = 12)
#########
# 正态分布
data <- read.csv("https://git.io/v58i8")
head(data)
# 观察下数据,选定模型,从图形上看,选正态分布
# 所谓观察,是看 density,也即:对于连续变量,对应的是 pdf;对于离散变量,是 histgram;
# 因为观测数据是已知的,所以直接用数据做 histgram,或者 density 曲线;
hist(data$x)
plot(density(data$x))
# 定义 cost function
# 参数 data 是一个 vector,所以 dnorm 返回是也是 vector,同理 log 返回也是 vector,所以需要 sum 成一个数字
nll.normal <- function(data, par) {
return(-sum(log(dnorm(
data, mean = par[1], sd = par[2]
))))
}
# par 是迭代的起点,可以从图形上观察,设定起始值;
optim(par = c(2.8, 0.5),
fn = nll.normal,
data = data$x)
# 求的模型参数后,可以比对一下,模型的 density 曲线,和数据的 density 是否一致
x <- seq(-3, 4, 0.01)
plot(density(data$x))
lines(x, dnorm(x, mean = 2.85, sd = 0.12), lty = 2)
#########
# 有偏度的数据 exGaussian
data2 <- read.csv("https://git.io/v58yI")
plot(density(data2$rt))
# 由于没有现成的 密度函数,所以定义一个
dexg <- function(x, mu, sigma, tau) {
return((1/tau) * exp((sigma^2 / (2 * tau^2)) - (x - mu) / tau) *
pnorm((x - mu)/sigma - (sigma/tau)))
}
nll.exg <- function(data, par) {
return(-sum(log(dexg(x = data,
mu = par[1],
sigma = par[2],
tau = par[3]))))
}
optim(par=c(0, 0.1, 0.1), fn = nll.exg, data = data2$rt)
# 对比模型 与 实际数据
x <- seq(0, 4.5, 0.01)
plot(density(data2$rt))
lines(x, dexg(x, mu = 0.715, sigma = 0.336, tau = 0.465), lty = 2)
|
eb85fce3880cc711e6e495d2f070c5ed3a47039f
|
77fb580e7f65262d3552b17817ba3447746e8edc
|
/tests/testthat.R
|
6c5775a9e2e4ee8c610e8b76dbededce9b1a84cb
|
[] |
no_license
|
paytonjjones/networktools
|
b0cc9fd477b2785a353407f3bbd0564179c70aef
|
ead23972ebe455576d7cbd3eb581af1dcf2b5c94
|
refs/heads/master
| 2022-06-14T18:32:20.425882
| 2022-06-03T21:55:19
| 2022-06-03T21:55:19
| 87,588,729
| 10
| 4
| null | 2022-06-03T21:55:20
| 2017-04-07T21:34:21
|
R
|
UTF-8
|
R
| false
| false
| 68
|
r
|
testthat.R
|
library(testthat)
library(networktools)
test_check("networktools")
|
730d82bf34d3a7c99a5f4e121ff55b7fb995da9f
|
dae30ccd57faedafedbeb7fd6f7171664f20c9e7
|
/test-code/turf print.R
|
c8e7a3207d4549d5da3dca0c68409b9f14790551
|
[] |
no_license
|
ttrodrigz/onezero
|
6a530e166a5e56eb7c4b727b09f0c5a9b966c397
|
8c2d355f0620039de6d0c8fb0bb1ad32c9dfe55f
|
refs/heads/main
| 2023-05-12T15:05:06.643798
| 2023-05-01T13:18:45
| 2023-05-01T13:18:45
| 239,914,766
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,355
|
r
|
turf print.R
|
# Begin turf message ------------------------------------------------------
k.range <- range(k)
# nicer output for displaying k range
if (identical(seq(k.range[1], k.range[2], by = 1), k) & length(k) > 1) {
k.string <- glue("{k.range[1]}-{k.range[2]}")
} else {
k.string <- paste(k, collapse = ", ")
}
cat_line(rule("TURF", line = 2))
cat_line(style_italic(" Sample size: "), scales::comma(nrow(item.mat)))
cat_line(style_italic(" # of items: "), n.items)
cat_line(style_italic(" Set sizes: "), k.string)
cat_line(style_italic(" Depth: "), depth)
if (greedy_begin <= max(k)) {
cat_line()
cat_line(rule("Greedy Algorithm", line = 1))
cat_line(style_italic("Begins at set size: "), greedy_begin)
cat_line(style_italic(" Entry method: "), greedy_entry)
}
cat_line()
# Inclusion/exclusion messages --------------------------------------------
if (any(do.force.in, do.force.in.together, do.force.out, do.force.out.together)) {
cat_line(rule("Constraints"))
if (do.force.in) {
msg <- paste(force.in.names, collapse = ", ")
cat_line("Items included in every combination")
cat_line(paste("\U2022", msg))
cat_line()
}
if (do.force.in.together) {
num <- length(force.in.together.names)
msg <-
force.in.together.names %>%
map(paste, collapse = ", ") %>%
map2(
.x = .,
.y = 1:num,
.f = ~glue("\U2022 {.x}")
) %>%
paste(collapse = "\n")
cat_line("Items that must all appear together within a combination")
cat_line(msg)
cat_line()
}
if (do.force.out) {
msg <- paste(force.out.names, collapse = ", ")
cat_line("Items excluded from every combination")
cat_line(paste("\U2022", msg))
cat_line()
}
if (do.force.out.together) {
num <- length(force.out.together.names)
msg <-
force.out.together.names %>%
map(paste, collapse = ", ") %>%
map2(
.x = .,
.y = 1:num,
.f = ~glue("\U2022 {.x}")
) %>%
paste(collapse = "\n")
cat_line("Items that cannot all appear together within any combination")
cat_line(msg)
cat_line()
}
}
|
2c2f5b98bb8a20b56a298a8d6602f38cd7caf61a
|
1220f5f54a88af7b6b833a51f1264effa69cdf24
|
/oryza_API/oryza_scripts/01_load_inputs_setting_oryza.R
|
c8066bc0c0b9898d1476dc23dd05792be3715f2c
|
[] |
no_license
|
CIAT-DAPA/usaid_procesos_interfaz
|
57dc6ea42f97ec57c3496eec58fae44463be6d04
|
7e073187c9db9074748c04dedd3f9b07ca12c2c9
|
refs/heads/master
| 2023-09-03T10:44:13.768665
| 2023-04-18T13:37:07
| 2023-04-18T13:37:07
| 83,475,773
| 2
| 3
| null | 2023-01-26T16:04:13
| 2017-02-28T20:24:50
|
Python
|
UTF-8
|
R
| false
| false
| 4,813
|
r
|
01_load_inputs_setting_oryza.R
|
### Set Model - ORYZA linux --->> Control + Reruns
# Author: Rodriguez-Espinoza J.
# https://github.com/jrodriguez88/
# 2022
# Arguments
# dir_inputs = dir of input oryza files c(".sol", ".crp", ".exp") and coordinates of weather station
# dir_run = dir of specific simulation setups
# EMD = Emergence day - julian date
# STTIME = Start time simulation - julian date
# IYEAR = Initial Year - numerical XXXX
# EMYR = Emergence Year - numerical XXXX
# ISTN = Station code - numerical - number of scenario (1:99)
make_dir_run <- function(dir_run_main, days){
# require(stringr)
dir <- paste0(dir_run_main, days, '/')
dir <- stringr::str_replace(dir, "ñ", "n")
if (!dir.exists(dir)) {
dir.create(dir, showWarnings = F, recursive = TRUE, mode = "777")
# system('chmod 777 *.*')
# paste0(dir_base, region, '/', cultivar, '/', select_day)
}
return(paste0(dir))
}
# Funcion copia inputs base en directorio de simulacion de cada setups
copy_inputs <- function(dir_inputs, dir_run){
dir_files <- list.files(dir_inputs, full.names = T)
file.copy(dir_files, dir_run)
walk2(.x = c(".sol", ".crp", ".exp"),
.y = paste0("standard", c(".sol", ".crp", ".exp")),
~file.rename(
from = list.files(dir_run, pattern = .x, full.names = T),
to = paste0(dir_run, .y)))
}
# Lee informacion de geolocalizacion
load_coordinates <- function(dir_inputs_run){
frame_list <- function(data){
setNames(split(data[,2], seq(nrow(data))), data[,1])
}
print(paste0("dir_inputs_run: ", dir_inputs_run))
# require(readr)
coordenadas <- read_csv(paste0(dir_inputs_run,'coordenadas.csv')) %>%
as.data.frame() %>%
frame_list()
}
#load_wth_data <- function( )
#Crea archivo control en el directorio especifico de la simulacion
write_control <- function(dir_run){
file_name <- paste0(dir_run, 'control.dat')
# print(file_name)
if(file.exists(file_name)){
file.remove(file_name)
}
sink(file = file_name, append = T)
cat(paste0("CONTROLFILE = 'control.dat'"), sep = '\n')
cat(paste0("STRUN = 1"), sep = '\n')
cat(paste0("ENDRUN = 100"), sep = '\n')
cat(sep = '\n')
cat(paste0("FILEON = 'res.dat'"), sep = '\n')
cat(paste0("FILEOL = 'model.log'"), sep = '\n')
cat(paste0("FILEIR = 'reruns.rer'"), sep = '\n')
cat(paste0("FILEIT = 'standard.exp'"), sep = '\n')
cat(paste0("FILEI1 = 'standard.crp'"), sep = '\n')
cat(paste0("FILEI2 = 'standard.sol'"), sep = '\n')
cat(sep = '\n')
cat(paste0("MULTIY = 'YES'"), sep = '\n')
cat(sep = '\n')
cat(paste0("PRDEL = 1."), sep = '\n')
cat(paste0("IPFORM = 5 "), sep = '\n')
cat(paste0("COPINF = 'N'"), sep = '\n')
cat(paste0("DELTMP = 'N'"), sep = '\n')
cat(paste0("IFLAG = 1100 "), sep = '\n')
sink()
}
#Crea archivo reruns en el directorio especifico de la simulacion
write_reruns_aclimate <- function(dir_run, EMD, STTIME, IYEAR, EMYR){
# require(rebus)
CNTR <- 'ciat'
ISTN <- 1:100
if (Sys.info()['sysname'] == 'Windows'){
WTRDIR = paste0("'", gsub('/', BACKSLASH, " wth/"), "'")
}
else{
WTRDIR = paste0("'", gsub('/', '/', " wth/"), "'")
}
# ISTN <- 1:length(EMD) ## para controlar el escenario climatico a simular
# IYEAR <- reruns_params$IYEAR
# STTIME <- reruns_params$STTIME
# EMD <- reruns_params$EMD
#
data <- data.frame(FILEIT = paste0('standard', '.exp'),
FILEI2 = paste0('standard', '.sol'),
FILEI1 = paste0('standard', '.crp'),
CNTR,
ISTN,
IYEAR,
STTIME,
EMYR,
EMD,
WTRDIR = WTRDIR)
file_name <- paste0(dir_run, 'reruns.rer')
# print(file_name)
if(file.exists(file_name)){
file.remove(file_name)
}
sink(file = file_name, append = T)
for(i in 1:nrow(data)){
cat('********************', sep = '\n')
cat(paste("FILEIT = ", "'", data[i, 'FILEIT'], "'", sep = ""), sep = '\n')
cat(paste("FILEI2 = ", "'", data[i, 'FILEI2'], "'", sep = ""), sep = '\n')
cat(paste("FILEI1 = ", "'", data[i, 'FILEI1'], "'", sep = ""), sep = '\n')
cat(paste("CNTR = ", "'", data[i, 'CNTR'], "'", sep = ""), sep = '\n')
cat(paste("ISTN = ", data[i, 'ISTN'], sep = ""), sep = '\n')
cat(paste("IYEAR = ", data[i, 'IYEAR'], sep = ""), sep = '\n')
cat(paste("STTIME = ", data[i, 'STTIME'], ".", sep = ""), sep = '\n')
cat(paste("EMYR = ", data[i, 'EMYR'], sep = ""), sep = '\n')
cat(paste("EMD = ", data[i, 'EMD'], sep = ""), sep = '\n')
cat(paste("WTRDIR = ", as.character(data[i, 'WTRDIR']), sep = ""), sep = '\n')
}
sink()
}
|
0dc1bf3ddfa10050db37255aaec76035f2c1b2e1
|
cf2f939398a5890945b415456e0d77393e3e14ed
|
/test.R
|
8e99039f34b682ab3d0e10b809fa3140c3a59b38
|
[] |
no_license
|
dwd-dfge/test
|
445559d19b505279d001bfb626022bd9da0d6bfa
|
26396043fcc6a5e810b15fa68d44063471bc8f45
|
refs/heads/master
| 2020-04-10T04:46:32.695422
| 2018-12-07T10:28:59
| 2018-12-07T10:28:59
| 160,808,611
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 756
|
r
|
test.R
|
library(topicmodels)
library(magrittr)
setwd("C:/Users/dwiegand/Desktop/KI/NLP/Korpus/")
#### Import zweizeiliges PDF ####
import <- pdf_text("CSR_EN.pdf") %>%
str_replace_all(" +", "\r\n") %>% #"+" = The preceding item will be matched one or more times. Ersetzen von mehr als zwei Leerzeichen durch "\r\n"
strsplit("\r\n")
for (i in 1:length(import)) {
import[[i]] <- str_squish(import[[i]]) #Entfernen von unnötigen Leerzeichen
}
text_df<-rownames_to_column(data_frame(import)) #Seitenzahlen als Spalte
text_df<-unnest(text_df)
colnames(text_df) <- c("pagenum", "text")
text_df %<>%
mutate(odd = seq_len(nrow(text_df)) %% 2) %>%
mutate(pagenum = as.numeric(pagenum)) %>%
group_by(pagenum) %>%
arrange(pagenum, odd) %>%
ungroup()
|
84fae1fba65cb4b8b0926d702ab0de420d010d82
|
d2129c74ed601e23dc34ab451f2f9337af3dc588
|
/man/dimple.Rd
|
93dd77a738ce08fb2c9f7dcc6c7b2c6448829e68
|
[
"MIT"
] |
permissive
|
kferris10/rcdimple
|
9aa69f530c0958043c34f57b281478f0b1d0604e
|
9dfc80320c745d45b522528432e78e013eb055de
|
refs/heads/master
| 2021-01-23T20:55:51.743348
| 2015-10-02T16:35:51
| 2015-10-02T16:35:51
| 32,642,581
| 1
| 0
| null | 2015-03-21T17:37:58
| 2015-03-21T17:37:57
| null |
UTF-8
|
R
| false
| false
| 2,595
|
rd
|
dimple.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/dimple.R
\name{dimple}
\alias{dimple}
\title{Plot R data with dimple}
\usage{
dimple(data, x, type = "bubble", y = NULL, z = NULL, groups = NULL,
group = NULL, storyboard = NULL, ..., pipe = F, width = NULL,
height = NULL, elementId = NULL)
}
\arguments{
\item{data}{a \code{\link[base]{data.frame}} with the data to plot}
\item{x}{formula \code{y ~ x} or column name(s) of \code{x} variable. Dimple can accept either a single or multiple column name(s)
as \code{x} or \code{y}. If multiple column names for either \code{x} or \code{y}, then the formula method cannot be used.}
\item{type}{a string indicating plot type ( \href{http://github.com/PMSI-AlignAlytics/dimple/wiki/dimple.plot}{bar, line, area, or bubble} )}
\item{y}{column name(s) of \code{y} variable if \code{x} is not a \code{formula}. Dimple can accept either a single or multiple column name(s)
as \code{y} or \code{y}. If multiple column names for either \code{x} or \code{y}, then the formula method cannot be used.}
\item{groups}{see \code{group}}
\item{group}{column name(s) of \code{groups}. Dimple differs from other \code{rCharts2} libraries
in its ability to handle multiple fields or column names as \code{groups}.}
\item{storyboard}{column name(s) with data for \href{https://github.com/PMSI-AlignAlytics/dimple/wiki/dimple.storyboard}{dimple.storyboard}}
\item{...}{various additional arguments to future-proof and provide
undocumented functionality.}
\item{pipe}{\code{logical} to return a \code{\link[pipeR]{Pipe}}.}
\item{width}{width in pixels of the plot container. Plot \code{width} can be set separately (see \code{\link{setBounds}}).}
\item{height}{height in pixels of the plot container. Plot \code{height} can be set separately (see \code{\link{setBounds}}).}
\item{elementId}{\code{string} css selector id for the chart. If the \code{elementId} is \code{NULL},
a semi-random id will be assigned.}
}
\description{
Use \href{http://dimplejs.org}{Dimple} with this \code{htmlwidget}. Dimple is a powerful javascript library
based on \href{http://d3js.org}{d3.js}. Dimple supports numerous chart \href{http://dimplejs.org/examples_index.html}{types} and is
well \href{https://github.com/PMSI-AlignAlytics/dimple/wiki}{documented}.
}
\seealso{
Other dimple: \code{\link{add_legend}};
\code{\link{add_title}}; \code{\link{default_colors}};
\code{\link{facet.dimple}}; \code{\link{set_bounds}};
\code{\link{xAxis.dimple}}, \code{\link{yAxis.dimple}},
\code{\link{zAxis.dimple}}
}
|
aab55a1aec0b4110112099844668c578aba30c9f
|
948dc17401eb812276c36df79c695cf14d91ae06
|
/docs/book/void.R
|
a2943de213eb03403cb793384949bc91664dcf69
|
[
"CC-BY-4.0",
"CC-BY-3.0",
"CC0-1.0"
] |
permissive
|
r-spatial/asdar-book.org
|
bc5d177add0128affb61b8c2ea84dd7ed47869c3
|
366af3b26c2083e585daf2faab6b261e3c622a9b
|
refs/heads/master
| 2023-05-24T19:33:13.224914
| 2023-05-23T10:52:17
| 2023-05-23T10:52:17
| 153,242,511
| 50
| 35
| null | 2020-10-15T08:24:57
| 2018-10-16T07:35:34
|
PHP
|
UTF-8
|
R
| false
| false
| 38
|
r
|
void.R
|
This image was not generated using R.
|
2be1d012d9614d28e958dc70d1347a6691c41fb7
|
5a3bc31268f44c48452a5129f0229e0d91dea5e5
|
/man/cytoBand.Rd
|
40605888dac3a1f51d9e1e2a85fbfc9145969ed1
|
[] |
no_license
|
genome-vendor/r-bioc-dnacopy
|
e644d084267a820ed9f8641bb4e5802cd2e45168
|
14c3c1ba3320b1e035a12993aa3ca181d3fba7b1
|
refs/heads/master
| 2016-09-05T22:19:45.383822
| 2014-01-27T23:25:57
| 2014-01-27T23:25:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 369
|
rd
|
cytoBand.Rd
|
\name{cytoBand}
\alias{cytoBand}
\title{Cytogenic band data}
\description{
Cytogenic band data from the godenPath repository
}
\usage{data(cytoBand)}
\format{A data frame containing five variables: chromosome, start and
end positions, band name and giesma stain.
}
\source{
http://genome.cse.ucsc.edu/goldenPath/hg16/database/cytoBand.txt.gz
}
\keyword{datasets}
|
3c81809a2cbab661146b67d914125ec870d7ffbd
|
2a17b74ba93eb44409a84b1f8913cc98304afe5c
|
/R/modelfit.mg.r
|
3bef3f4df4cd5b855ac656f7c6341df282c8be54
|
[] |
no_license
|
cran/gesca
|
a22d5e23f9629e8d97e721d80c4f5c1cb2dd43b4
|
42c8e51087f274a4c37e40e5a6463b5e85e26c5e
|
refs/heads/master
| 2021-01-15T15:25:21.308665
| 2017-09-28T19:15:10
| 2017-09-28T19:15:10
| 52,947,758
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,444
|
r
|
modelfit.mg.r
|
modelfit.mg <- function (Z0, W, T, nvar, nlv, ng, case_index)
{
# MODEL FIT MEASURES
gfi_1 <- 0
gfi_2 <- 0
srmr_1 <- 0
kk <- 0
ss <- 0
COR_RES <- c() # correlation residual matrix
for (g in 1:ng) {
k <- kk + 1
kk <- kk + nvar
s <- ss + 1
ss <- ss + nlv
zz <- Z0[case_index[g,1]:case_index[g,2],k:kk]
w <- W[k:kk, s:ss]
v <- cbind(diag(nvar), w)
t <- T[s:ss,]
omega <- v - w%*%t
ee <- zz%*%omega
samcov <- cov(zz) # sample covariances for each group
samcorr <- cor(zz)
tp_precov <- solve(omega%*%t(omega),omega%*%diag(apply(ee,2,var))%*%t(omega))
precov <- t(solve(t(omega%*%t(omega)),t(tp_precov)))
COV_RES <- samcov - precov
prerij <- precov
for (i in 1:nvar) {
for (j in 1:nvar) { prerij[i,j] <- precov[i,j]/sqrt(precov[i,i]*precov[j,j]) }
}
srmr <- 0
for (i in 1:nvar) {
for (j in 1:nvar) {
if (j > i) {
corr_residual <- (samcorr[i,j] - prerij[i,j])^2
srmr <- srmr + corr_residual
}
}
}
srmr_1 <- srmr_1 + srmr
gfi_1 <- gfi_1 + sum(diag(COV_RES^2))
gfi_2 <- gfi_2 + sum(diag(samcov^2))
COR_RES <- rbind(COR_RES,samcorr - prerij)
}
nvar_tot <- ng*nvar
srmr_2 <- nvar_tot*(nvar_tot+1)/2
SRMR <- sqrt(srmr_1/srmr_2) # Standardized root mean square residual
GFI <- 1 - (gfi_1/gfi_2) # GFI-ULS
output.modelfit.mg <- list(GFI = GFI, SRMR = SRMR, COR_RES = COR_RES)
output.modelfit.mg
}
|
5c36f3e69af986d736cef2af23447fc73bc211ea
|
753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed
|
/service/paws.cloud9/R/paws.cloud9_operations.R
|
84d3bc56a58d982e1a73a8a0041b611e9ea8c5a2
|
[
"Apache-2.0"
] |
permissive
|
CR-Mercado/paws
|
9b3902370f752fe84d818c1cda9f4344d9e06a48
|
cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983
|
refs/heads/master
| 2020-04-24T06:52:44.839393
| 2019-02-17T18:18:20
| 2019-02-17T18:18:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,176
|
r
|
paws.cloud9_operations.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_operation new_request send_request
NULL
#' Creates an AWS Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and then connects from the instance to the environment
#'
#' Creates an AWS Cloud9 development environment, launches an Amazon Elastic Compute Cloud (Amazon EC2) instance, and then connects from the instance to the environment.
#'
#' @section Accepted Parameters:
#' ```
#' create_environment_ec2(
#' name = "string",
#' description = "string",
#' clientRequestToken = "string",
#' instanceType = "string",
#' subnetId = "string",
#' automaticStopTimeMinutes = 123,
#' ownerArn = "string"
#' )
#' ```
#'
#' @param name [required] The name of the environment to create.
#'
#' This name is visible to other AWS IAM users in the same AWS account.
#' @param description The description of the environment to create.
#' @param clientRequestToken A unique, case-sensitive string that helps AWS Cloud9 to ensure this operation completes no more than one time.
#'
#' For more information, see [Client Tokens](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html) in the *Amazon EC2 API Reference*.
#' @param instanceType [required] The type of instance to connect to the environment (for example, `t2.micro`).
#' @param subnetId The ID of the subnet in Amazon VPC that AWS Cloud9 will use to communicate with the Amazon EC2 instance.
#' @param automaticStopTimeMinutes The number of minutes until the running instance is shut down after the environment has last been used.
#' @param ownerArn The Amazon Resource Name (ARN) of the environment owner. This ARN can be the ARN of any AWS IAM principal. If this value is not specified, the ARN defaults to this environment\'s creator.
#'
#' @examples
#' #
#' \donttest{create_environment_ec2(
#' name = "my-demo-environment",
#' automaticStopTimeMinutes = 60L,
#' description = "This is my demonstration environment.",
#' instanceType = "t2.micro",
#' ownerArn = "arn:aws:iam::123456789012:user/MyDemoUser",
#' subnetId = "subnet-1fab8aEX"
#' )}
#'
#' @export
create_environment_ec2 <- function (name, description = NULL,
clientRequestToken = NULL, instanceType, subnetId = NULL,
automaticStopTimeMinutes = NULL, ownerArn = NULL)
{
op <- new_operation(name = "CreateEnvironmentEC2", http_method = "POST",
http_path = "/", paginator = list())
input <- create_environment_ec2_input(name = name, description = description,
clientRequestToken = clientRequestToken, instanceType = instanceType,
subnetId = subnetId, automaticStopTimeMinutes = automaticStopTimeMinutes,
ownerArn = ownerArn)
output <- create_environment_ec2_output()
svc <- service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
#' Adds an environment member to an AWS Cloud9 development environment
#'
#' Adds an environment member to an AWS Cloud9 development environment.
#'
#' @section Accepted Parameters:
#' ```
#' create_environment_membership(
#' environmentId = "string",
#' userArn = "string",
#' permissions = "read-write"|"read-only"
#' )
#' ```
#'
#' @param environmentId [required] The ID of the environment that contains the environment member you want to add.
#' @param userArn [required] The Amazon Resource Name (ARN) of the environment member you want to add.
#' @param permissions [required] The type of environment member permissions you want to associate with this environment member. Available values include:
#'
#' - `read-only`: Has read-only access to the environment.
#'
#' - `read-write`: Has read-write access to the environment.
#'
#' @examples
#' #
#' \donttest{create_environment_membership(
#' environmentId = "8d9967e2f0624182b74e7690ad69ebEX",
#' permissions = "read-write",
#' userArn = "arn:aws:iam::123456789012:user/AnotherDemoUser"
#' )}
#'
#' @export
create_environment_membership <- function (environmentId, userArn,
permissions)
{
op <- new_operation(name = "CreateEnvironmentMembership",
http_method = "POST", http_path = "/", paginator = list())
input <- create_environment_membership_input(environmentId = environmentId,
userArn = userArn, permissions = permissions)
output <- create_environment_membership_output()
svc <- service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
#' Deletes an AWS Cloud9 development environment
#'
#' Deletes an AWS Cloud9 development environment. If an Amazon EC2 instance is connected to the environment, also terminates the instance.
#'
#' @section Accepted Parameters:
#' ```
#' delete_environment(
#' environmentId = "string"
#' )
#' ```
#'
#' @param environmentId [required] The ID of the environment to delete.
#'
#' @examples
#' #
#' \donttest{delete_environment(
#' environmentId = "8d9967e2f0624182b74e7690ad69ebEX"
#' )}
#'
#' @export
delete_environment <- function (environmentId)
{
op <- new_operation(name = "DeleteEnvironment", http_method = "POST",
http_path = "/", paginator = list())
input <- delete_environment_input(environmentId = environmentId)
output <- delete_environment_output()
svc <- service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
#' Deletes an environment member from an AWS Cloud9 development environment
#'
#' Deletes an environment member from an AWS Cloud9 development environment.
#'
#' @section Accepted Parameters:
#' ```
#' delete_environment_membership(
#' environmentId = "string",
#' userArn = "string"
#' )
#' ```
#'
#' @param environmentId [required] The ID of the environment to delete the environment member from.
#' @param userArn [required] The Amazon Resource Name (ARN) of the environment member to delete from the environment.
#'
#' @examples
#' #
#' \donttest{delete_environment_membership(
#' environmentId = "8d9967e2f0624182b74e7690ad69ebEX",
#' userArn = "arn:aws:iam::123456789012:user/AnotherDemoUser"
#' )}
#'
#' @export
delete_environment_membership <- function (environmentId, userArn)
{
op <- new_operation(name = "DeleteEnvironmentMembership",
http_method = "POST", http_path = "/", paginator = list())
input <- delete_environment_membership_input(environmentId = environmentId,
userArn = userArn)
output <- delete_environment_membership_output()
svc <- service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
#' Gets information about environment members for an AWS Cloud9 development environment
#'
#' Gets information about environment members for an AWS Cloud9 development environment.
#'
#' @section Accepted Parameters:
#' ```
#' describe_environment_memberships(
#' userArn = "string",
#' environmentId = "string",
#' permissions = list(
#' "owner"|"read-write"|"read-only"
#' ),
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @param userArn The Amazon Resource Name (ARN) of an individual environment member to get information about. If no value is specified, information about all environment members are returned.
#' @param environmentId The ID of the environment to get environment member information about.
#' @param permissions The type of environment member permissions to get information about. Available values include:
#'
#' - `owner`: Owns the environment.
#'
#' - `read-only`: Has read-only access to the environment.
#'
#' - `read-write`: Has read-write access to the environment.
#'
#' If no value is specified, information about all environment members are returned.
#' @param nextToken During a previous call, if there are more than 25 items in the list, only the first 25 items are returned, along with a unique string called a *next token*. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.
#' @param maxResults The maximum number of environment members to get information about.
#'
#' @examples
#' # The following example gets information about all of the environment
#' # members for the specified AWS Cloud9 development environment.
#' \donttest{describe_environment_memberships(
#' environmentId = "8d9967e2f0624182b74e7690ad69ebEX"
#' )}
#'
#' # The following example gets information about the owner of the specified
#' # AWS Cloud9 development environment.
#' \donttest{describe_environment_memberships(
#' environmentId = "8d9967e2f0624182b74e7690ad69ebEX",
#' permissions = list(
#' "owner"
#' )
#' )}
#'
#' # The following example gets AWS Cloud9 development environment membership
#' # information for the specified user.
#' \donttest{describe_environment_memberships(
#' userArn = "arn:aws:iam::123456789012:user/MyDemoUser"
#' )}
#'
#' @export
describe_environment_memberships <- function (userArn = NULL,
environmentId = NULL, permissions = NULL, nextToken = NULL,
maxResults = NULL)
{
op <- new_operation(name = "DescribeEnvironmentMemberships",
http_method = "POST", http_path = "/", paginator = list())
input <- describe_environment_memberships_input(userArn = userArn,
environmentId = environmentId, permissions = permissions,
nextToken = nextToken, maxResults = maxResults)
output <- describe_environment_memberships_output()
svc <- service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
#' Gets status information for an AWS Cloud9 development environment
#'
#' Gets status information for an AWS Cloud9 development environment.
#'
#' @section Accepted Parameters:
#' ```
#' describe_environment_status(
#' environmentId = "string"
#' )
#' ```
#'
#' @param environmentId [required] The ID of the environment to get status information about.
#'
#' @examples
#' #
#' \donttest{describe_environment_status(
#' environmentId = "8d9967e2f0624182b74e7690ad69ebEX"
#' )}
#'
#' @export
describe_environment_status <- function (environmentId)
{
op <- new_operation(name = "DescribeEnvironmentStatus", http_method = "POST",
http_path = "/", paginator = list())
input <- describe_environment_status_input(environmentId = environmentId)
output <- describe_environment_status_output()
svc <- service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
#' Gets information about AWS Cloud9 development environments
#'
#' Gets information about AWS Cloud9 development environments.
#'
#' @section Accepted Parameters:
#' ```
#' describe_environments(
#' environmentIds = list(
#' "string"
#' )
#' )
#' ```
#'
#' @param environmentIds [required] The IDs of individual environments to get information about.
#'
#' @examples
#' #
#' \donttest{describe_environments(
#' environmentIds = list(
#' "8d9967e2f0624182b74e7690ad69ebEX",
#' "349c86d4579e4e7298d500ff57a6b2EX"
#' )
#' )}
#'
#' @export
describe_environments <- function (environmentIds)
{
op <- new_operation(name = "DescribeEnvironments", http_method = "POST",
http_path = "/", paginator = list())
input <- describe_environments_input(environmentIds = environmentIds)
output <- describe_environments_output()
svc <- service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
#' Gets a list of AWS Cloud9 development environment identifiers
#'
#' Gets a list of AWS Cloud9 development environment identifiers.
#'
#' @section Accepted Parameters:
#' ```
#' list_environments(
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @param nextToken During a previous call, if there are more than 25 items in the list, only the first 25 items are returned, along with a unique string called a *next token*. To get the next batch of items in the list, call this operation again, adding the next token to the call. To get all of the items in the list, keep calling this operation with each subsequent next token that is returned, until no more next tokens are returned.
#' @param maxResults The maximum number of environments to get identifiers for.
#'
#' @examples
#' #
#' \donttest{list_environments()}
#'
#' @export
list_environments <- function (nextToken = NULL, maxResults = NULL)
{
op <- new_operation(name = "ListEnvironments", http_method = "POST",
http_path = "/", paginator = list())
input <- list_environments_input(nextToken = nextToken, maxResults = maxResults)
output <- list_environments_output()
svc <- service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
#' Changes the settings of an existing AWS Cloud9 development environment
#'
#' Changes the settings of an existing AWS Cloud9 development environment.
#'
#' @section Accepted Parameters:
#' ```
#' update_environment(
#' environmentId = "string",
#' name = "string",
#' description = "string"
#' )
#' ```
#'
#' @param environmentId [required] The ID of the environment to change settings.
#' @param name A replacement name for the environment.
#' @param description Any new or replacement description for the environment.
#'
#' @examples
#' #
#' \donttest{update_environment(
#' name = "my-changed-demo-environment",
#' description = "This is my changed demonstration environment.",
#' environmentId = "8d9967e2f0624182b74e7690ad69ebEX"
#' )}
#'
#' @export
update_environment <- function (environmentId, name = NULL, description = NULL)
{
op <- new_operation(name = "UpdateEnvironment", http_method = "POST",
http_path = "/", paginator = list())
input <- update_environment_input(environmentId = environmentId,
name = name, description = description)
output <- update_environment_output()
svc <- service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
#' Changes the settings of an existing environment member for an AWS Cloud9 development environment
#'
#' Changes the settings of an existing environment member for an AWS Cloud9 development environment.
#'
#' @section Accepted Parameters:
#' ```
#' update_environment_membership(
#' environmentId = "string",
#' userArn = "string",
#' permissions = "read-write"|"read-only"
#' )
#' ```
#'
#' @param environmentId [required] The ID of the environment for the environment member whose settings you want to change.
#' @param userArn [required] The Amazon Resource Name (ARN) of the environment member whose settings you want to change.
#' @param permissions [required] The replacement type of environment member permissions you want to associate with this environment member. Available values include:
#'
#' - `read-only`: Has read-only access to the environment.
#'
#' - `read-write`: Has read-write access to the environment.
#'
#' @examples
#' #
#' \donttest{update_environment_membership(
#' environmentId = "8d9967e2f0624182b74e7690ad69ebEX",
#' permissions = "read-only",
#' userArn = "arn:aws:iam::123456789012:user/AnotherDemoUser"
#' )}
#'
#' @export
update_environment_membership <- function (environmentId, userArn,
permissions)
{
op <- new_operation(name = "UpdateEnvironmentMembership",
http_method = "POST", http_path = "/", paginator = list())
input <- update_environment_membership_input(environmentId = environmentId,
userArn = userArn, permissions = permissions)
output <- update_environment_membership_output()
svc <- service()
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
|
8f23d3582f262f216509dd4d059cc8f4341382da
|
6a7e2b8c481f831309aaad5bc0f12f05dd213521
|
/results/analysis.R
|
3d4ab0918f48086cc9d28da30ff81a081927b3d1
|
[] |
no_license
|
david-shilei/latency-test
|
bd5a9ff75f8a3c1a7e57e20a7c4a97da17616051
|
411761f9e65d603a53fac7613b261ab1c4d99145
|
refs/heads/master
| 2021-01-20T22:20:04.114332
| 2016-07-02T06:19:59
| 2016-07-02T06:19:59
| 62,436,787
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 294
|
r
|
analysis.R
|
args <- commandArgs(trailingOnly = TRUE)
print(args)
csv_file <- args[1]
d <- read.csv(file=csv_file, sep=",")
metrics <- setNames(d, c("Time", "Latency"))
png(paste(csv_file, "png", sep="."))
par(mfrow=c(1,2))
plot(metrics$Latency, main="Ping Analysis")
boxplot(metrics$Latency)
dev.off()
|
09e7404f67b272d5eff48b293832a20149e13eb7
|
c9c2b64749e8d25fc8667446857ef3f396f1e381
|
/main.R
|
2ef2681ab166ea2af7f7c2d9dc9fb7d6b1f152fa
|
[] |
no_license
|
ruizhima/ABCV
|
f55b62b462d4268a61146421c7e8cb14696f4ecd
|
cb6f2aeb0edca6004e5f1c0a31aebe579be8e2bf
|
refs/heads/master
| 2021-01-20T08:01:58.237824
| 2017-05-03T15:01:46
| 2017-05-03T15:01:46
| 90,088,583
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,502
|
r
|
main.R
|
###### Econ 722, PS6
#### Question 3
library("ABCV")
## DGP
nsim = 100;
T = 100;
k_max = 6; # max number of lags
aic_criterion_values_y1 <- matrix(NaN,nrow = nsim,ncol = k_max )
bic_criterion_values_y1 <- matrix(NaN,nrow = nsim,ncol = k_max )
crv_criterion_values_y1 <- matrix(NaN,nrow = nsim,ncol = k_max )
aic_criterion_values_y2 <- matrix(NaN,nrow = nsim,ncol = k_max )
bic_criterion_values_y2 <- matrix(NaN,nrow = nsim,ncol = k_max )
crv_criterion_values_y2 <- matrix(NaN,nrow = nsim,ncol = k_max )
for (ii in 1:nsim) {
for (jj in 1:k_max) {
y1 = dgp1(T,0.7,0)
y2 = dgp2(T,0.6,0)
y1_effective_AIC = y1[(k_max+1):T,1]
y1_effective_BIC = y1[(k_max+1):T,1]
y2_effective_AIC = y2[(jj+1):T,1]
y2_effective_BIC = y2[(jj+1):T,1]
resi1_aic <- correct_fit(y1,jj,k_max)
resi2_aic <- correct_fit(y2,jj,k_max)
resi1_bic <- correct_fit(y1,jj,k_max)
resi2_bic <- correct_fit(y2,jj,k_max)
aic_criterion_values_y1[ii,jj] = log(t(resi1_aic)%*%resi1_aic/(T-k_max-jj)) + 2*jj/(T-k_max-jj)
bic_criterion_values_y1[ii,jj] = log(t(resi1_bic)%*%resi1_bic/(T-k_max-jj)) + jj*log(T-k_max-jj)/(T-k_max-jj)
crv_criterion_values_y1[ii,jj] = cv_hv(y1,jj,10,20)
aic_criterion_values_y2[ii,jj] = log(t(resi2_aic)%*%resi2_aic/(T)) + 2*jj/(T)
bic_criterion_values_y2[ii,jj] = log(t(resi2_bic)%*%resi2_bic/(T-2*jj)) + jj*log(T-jj)/(T-jj)
crv_criterion_values_y2[ii,jj] = cv_hv(y2,jj,10,20)
}
}
aic_choice_y1 <- matrix(1,nrow = nsim, ncol = 1)
bic_choice_y1 <- matrix(1,nrow = nsim, ncol = 1)
crv_choice_y1 <- matrix(1,nrow = nsim, ncol = 1)
aic_choice_y2 <- matrix(1,nrow = nsim, ncol = 1)
bic_choice_y2 <- matrix(1,nrow = nsim, ncol = 1)
crv_choice_y2 <- matrix(1,nrow = nsim, ncol = 1)
for (ii in 1:nsim) {
for (jj in 2:k_max) {
if (aic_criterion_values_y1[ii,jj] < aic_criterion_values_y1[ii,(jj-1)]) {
aic_choice_y1[ii,1] = jj
}
if (bic_criterion_values_y1[ii,jj] < bic_criterion_values_y1[ii,(jj-1)]) {
bic_choice_y1[ii,1] = jj
}
if (crv_criterion_values_y1[ii,jj] < crv_criterion_values_y1[ii,(jj-1)]) {
crv_choice_y1[ii,1] = jj
}
if (aic_criterion_values_y2[ii,jj] < aic_criterion_values_y2[ii,(jj-1)]) {
aic_choice_y2[ii,1] = jj
}
if (bic_criterion_values_y2[ii,jj] < bic_criterion_values_y2[ii,(jj-1)]) {
bic_choice_y2[ii,1] = jj
}
if (crv_criterion_values_y2[ii,jj] < crv_criterion_values_y2[ii,(jj-1)]) {
crv_choice_y2[ii,1] = jj
}
}
}
aic_choice_y1 <- data.frame(aic_choice_y1)
bic_choice_y1 <- data.frame(bic_choice_y1)
crv_choice_y1 <- data.frame(crv_choice_y1)
aic_choice_y2 <- data.frame(aic_choice_y2)
bic_choice_y2 <- data.frame(bic_choice_y2)
crv_choice_y2 <- data.frame(crv_choice_y2)
aic_summary_y1 <- matrix(NaN,nrow = k_max, ncol = 1)
bic_summary_y1 <- matrix(NaN,nrow = k_max, ncol = 1)
crv_summary_y1 <- matrix(NaN,nrow = k_max, ncol = 1)
aic_summary_y2 <- matrix(NaN,nrow = k_max, ncol = 1)
bic_summary_y2 <- matrix(NaN,nrow = k_max, ncol = 1)
crv_summary_y2 <- matrix(NaN,nrow = k_max, ncol = 1)
for (ii in 1:k_max) {
aic_summary_y1[ii,1] = sum(with(aic_choice_y1,aic_choice_y1==ii))
bic_summary_y1[ii,1] = sum(with(bic_choice_y1,bic_choice_y1==ii))
crv_summary_y1[ii,1] = sum(with(crv_choice_y1,crv_choice_y1==ii))
aic_summary_y2[ii,1] = sum(with(aic_choice_y2,aic_choice_y2==ii))
bic_summary_y2[ii,1] = sum(with(bic_choice_y2,bic_choice_y2==ii))
crv_summary_y2[ii,1] = sum(with(crv_choice_y2,crv_choice_y2==ii))
}
|
b4cedb1fece9263e46b13fbb551a8ce4686c0e2b
|
d8d005d99a54a8401628179b03e24f95190b7fba
|
/man/multi_rnorm.Rd
|
833d055a5fa583bc1f5420b4e07de4663fd707c5
|
[] |
no_license
|
japilo/colorednoise
|
add6c6f49e1c76a43175d328f0c922de84831926
|
d3bcfdaf7663cba49efc7d7442af660351e35eea
|
refs/heads/master
| 2021-06-03T23:07:54.516260
| 2020-10-08T00:16:36
| 2020-10-08T00:16:36
| 104,096,384
| 7
| 1
| null | 2020-08-27T07:47:21
| 2017-09-19T15:58:36
|
R
|
UTF-8
|
R
| false
| true
| 742
|
rd
|
multi_rnorm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{multi_rnorm}
\alias{multi_rnorm}
\title{Generate Correlated Normal Random Numbers}
\usage{
multi_rnorm(n, mean, sd)
}
\arguments{
\item{n}{The number of samples desired for each variable.}
\item{mean}{A vector giving the mean of each variable.}
\item{sd}{A valid covariance matrix.}
}
\value{
A matrix with n rows and as many columns as mean values.
}
\description{
Generate random numbers from a multivariate normal distribution.
It can be used to create correlated random numbers.
}
\examples{
mus <- c(0, 3, 5)
sigmas <- matrix(c(1, 0.265, 2.19, 0.265, 0.25, 0.66, 2.19, 0.66, 9), ncol = 3)
mat <- multi_rnorm(100, mus, sigmas)
var(mat)
}
|
b30ab13d1ff0daf875316f4960cb34b85056c0db
|
82b36879156715c0f9df44f5136e32f51fab88c0
|
/cachematrix.R
|
2d57425e794fd6b8ad4adf28721a92df331419fa
|
[] |
no_license
|
andrewdhayes/Programming-Assignment-2
|
3acc093f8b9b56b6ea32907de99a4a9b673301f0
|
b51fb3a670ebd14cebf3eb369b953b0322e93abb
|
refs/heads/master
| 2020-04-26T02:11:54.978674
| 2019-03-01T03:42:22
| 2019-03-01T03:42:22
| 173,227,451
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,876
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
# makeCacheMatrix is a function that creates a special "matrix" object that
# can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
# initializes the object x as a function argument, and the object inv to be used
# later on in the function
inv <- NULL
# makes the set() function which assigns the input variable to the x object in
# the parent environment, and clears any value of inverse from previously
# running the cacheSolve function
set <- function(y) {
x <<- y
inv <<- NULL
}
# retrieves the value of x from the parent environment
get <- function() x
# a function to find the inverse of the matrix
setinverse <- function(solve) inv <<- solve
# a function to return the inverse of the matrix
getinverse <- function() inv
# names the list elements which allows us to access the functions later
# using the '$' operator
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# cacheSolve is a function that computes the inverse of the special "matrix"
# returned by makeCacheMatrix, and retrieves the inverse from the cache if it
# has already been calculated
cacheSolve <- function(x, ...) {
# retrieves the inverse
inv <- x$getinverse()
# if the inverse has already been computed by a previous run of the cacheSolve
# function (if it is not NULL), then this will return the inverse that is cached,
# without going to the extra effort of re-calculating it
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
# if the inverse has not been computed yet, it will retrieve the new object
# entered and run the solve function to find the inverse and return it
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
5d87af6af91fa66f6aebe8fb687f9ab54fb2d5af
|
e01871cf6a2d1497b96abd91de8069a2b792a9f3
|
/R/findMethylatedCytosines.R
|
eb3d4a00b6c8d79b504579b52924c330906df8c4
|
[
"MIT"
] |
permissive
|
diannamcallister/MethylExpress
|
2bc0f8e875641b6c0d1f8c0abc93a6399fb53b7e
|
4adf41221e236f4eee1ace077b1977fc1bf1e47c
|
refs/heads/main
| 2023-01-30T05:17:35.380998
| 2020-12-09T17:03:09
| 2020-12-09T17:03:09
| 307,599,310
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,201
|
r
|
findMethylatedCytosines.R
|
#' Highlighting all Methylated Cytosines
#'
#' A function that returns a visual output highlighting all methylated cytosines.
#'
#' @param orig A string of nucleotides of the original DNA sequence with no
#' modifications, meaning before bisulfite conversion
#' @param bisulfite A string of nucleotides of the DNA sequence after
#' bisulfite conversion
#'
#' @returns A visual output of all nucleotides given, where the methylated
#' cytosines are highlighted.
#'
#' @examples
#' \dontrun{
#' findMethylatedCytosines(MethylationObservation$originalDNA,
#' MethylationObservation$bisulfite)
#' }
#'
#' @references
#' Kevin Ushey, JJ Allaire, Hadley Wickham and Gary Ritchie (2020). rstudioapi:
#' Safely Access the RStudio API. R package version 0.13.
#' https://CRAN.R-project.org/package=rstudioapi
#'
#' @export
#' @import rstudioapi
findMethylatedCytosines <- function(orig, bisulfite) {
# make sure that both inputs are strings
if (!is.character(orig)){
stop("This is not valid input; it requires a string to be given.")
}
if (!is.character(bisulfite)){
stop("This is not valid input; it requires a string to be given.")
}
# both inputs NEED to have the same length of inputs
if (nchar(orig) != nchar(bisulfite)) {
stop("This is not a valid input; it requires a string to be given.")
}
toHighlight <- findMatchingSections(orig, bisulfite)
if (shiny::isRunning()) {
return(toHighlight)
} else {
rstudioapi::viewer(toHighlight)
}
}
#' Generate a markdown file with all methylated cytosines highlighted
#'
#' A helper function that returns a markdown file of the two input DNA strands,
#' where each cytosine that was methylated is highlighted
#'
#' This function is an internal function and therefore cannot be used directly
#' when installing this package - it is instead a helper function for
#' findMethylatedCytosines
#'
#' @param orig A string of nucleotides of the original DNA sequence with no
#' modifications, meaning before bisulfite conversion
#' @param bisulfite A string of nucleotides of the DNA sequence after bisulfite
#' conversion
#'
#' @returns A file containing the two input DNA strands, where the methylated
#' cytosines are highlighted.
#'
#'
findMatchingSections <- function(orig, bisulfite) {
#setup temp file to save the html changes in (locally)
dir <- tempfile()
dir.create(dir)
htmlFile <- file.path(dir, "index.html")
matching <- c()
start <- 1
numChar <- nchar(orig)
for (i in 1:numChar) {
orig_char = substr(orig, i, i)
bisulfite_char = substr(bisulfite, i, i)
if ((identical(orig_char, "c")) && (identical(bisulfite_char, "c"))) {
# this means this nucleotide is methylated!
if (!identical(substr(orig, start, i-1), "")){
highlight(htmlFile, substr(orig, start, i-1), FALSE)
highlight(htmlFile, substr(orig, i, i), TRUE)
start <- i + 1
} else {
highlight(htmlFile, substr(orig, i, i), TRUE)
start <- i + 1
}
}
}
if (!identical(bisulfite_char, "c")) {
# the last nucleotide in the string is not methylated
highlight(htmlFile, substr(orig, start, i), FALSE)
}
return(htmlFile)
}
|
03df4678a6d1035d03da5f7972a8d61b5a18970e
|
604f064ac46806c8aaba71afc42fc6f8f6d2a1e0
|
/man-roxygen/medianMetric.R
|
19b092a44918d1e724817d9c58910e19a5523994
|
[
"MIT"
] |
permissive
|
johndharrison/webpagetestr
|
d2278dcf271dbe7f800e95569829fff045689d43
|
48ba992bfa4ebf3edf6b8767160c1a2e92f65595
|
refs/heads/master
| 2021-01-13T10:26:49.749120
| 2016-11-08T16:34:32
| 2016-11-08T16:34:32
| 72,283,586
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 272
|
r
|
medianMetric.R
|
#' @param medianMetric A string. Set the metric used to calculate median
#' for multiple runs tests. Defaults to "loadTime" = NULL. Other options
#' include "visualComplete", "SpeedIndex", "titleTime", "domTime",
#' "docTime", "fullyLoaded", "render", "TTFB"
|
c8b933822b3d9c2950f42330917eaa785f8a73c7
|
407350d22d3a58e93dd9b02bdb5e2213d8c55cc8
|
/Other/custom-logistic-regression.R
|
00e1e1707b277921e94bf81736540af5341aaf7e
|
[] |
no_license
|
vonElfvin/Machine-Learning-Practice
|
e679fa2b111c8178273c9b3e375abe528ceac213
|
65334ae79d29673c7ae9e5e4833728cf309be22e
|
refs/heads/master
| 2021-05-14T16:23:43.328273
| 2018-11-23T19:46:05
| 2018-11-23T19:46:05
| 116,019,701
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,811
|
r
|
custom-logistic-regression.R
|
#### Custom Logistic Regression
### Setup
set.seed(12345)
data.iris = as.data.frame(iris)
ids = data.iris$Species %in% c("virginica", "versicolor")
y.iris = ifelse(data.iris[ids,]$Species=="virginica", 1, 0)
X.iris = data.iris[ids,c(1,3)] # Select only Lengths to be able to plot
X.iris = as.matrix(X.iris / max(X.iris)) # Rescale for faster convergence
### Functions
## Returns the sigmoid value of given number z
sigmoid = function(z){
return(1 / (1 + exp(-z)))
}
## Returns the logistic regression β.hat
logistic_reg = function(X, y, epochs, learning.rate=5){
X = cbind(1, X)
n = dim(X)[1]
k = dim(X)[2]
β.hat = rep(1, k)
for(i in 1:epochs){
# Calculate the error: yfit - y, yfit = sigmoid(Xβ)
residuals = sigmoid(X %*% β.hat) - y
#residuals = X %*% β.hat - Y # linear regression
# Calculate the gradient at that point
delta = (t(X) %*% residuals) / n
# Move β.hat in opposite direction of gradient, to find local error minima
β.hat = β.hat - learning.rate * delta
}
rownames(β.hat)[1] = "Intercept"
return(β.hat)
}
## Returns predictions of given logistical regresion
pred = function(β.hat, X){
X = cbind(1, X)
return(X %*% β.hat)
}
### Implementation
# Plot sigmoid curve
curve(sigmoid, -10, 10)
# Logistical regression
β.logreg = logistic_reg(X.iris, y.iris, 300000, 5)
yfit = pred(β.logreg, X.iris) > 0
# Plot original data and decision boundry
plot(X.iris[,1], X.iris[,2], col=rgb(y.iris, 1-y.iris, 0, 0.5), pch=19,
xlab="Sepal.Length", ylab="Petal.Length")
#points(X.iris[,1], X.iris[,2], col=rgb(yfit, 1-yfit, 0, 0.5), pch=19) # optional to show predictions
# Calculate boundry, add to plot through abline
intercept = -β.logreg[1]/β.logreg[3]
slope = -β.logreg[2]/β.logreg[3]
abline(intercept, slope, col=1)
|
1d45e916f144934974857ef1ee5d386c7840c91e
|
3ea9a43dbbaa88bfb0fad2bc187cae3902c56e0c
|
/man/MNNpair.Rd
|
69b97700c4d4037db33ffcd9b4e46edab6cda056
|
[] |
no_license
|
KChen-lab/SCMarker
|
99a5b15d8999e95113dbb41e089e0038ed4cd4b3
|
03b8edfaa2b1eb896965bfe9f609da02e91ef03b
|
refs/heads/master
| 2021-06-28T08:47:27.931320
| 2020-09-15T03:40:47
| 2020-09-15T03:40:47
| 138,614,038
| 18
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,939
|
rd
|
MNNpair.Rd
|
\name{MNNpair}
\alias{MNNpair}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Gene pairs
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
MNNpair(k, MNNgene, geneName)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{k}{
%% ~~Describe \code{k} here~~
}
\item{MNNgene}{
%% ~~Describe \code{MNNgene} here~~
}
\item{geneName}{
%% ~~Describe \code{geneName} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (k, MNNgene, geneName)
{
subgene = MNNgene[[k]]
index = match(subgene, geneName)
PP <- function(i, index, MNNgene, k, geneName) {
if (geneName[k] \%in\% MNNgene[[index[i]]])
return(geneName[index[i]])
}
if (length(index[!is.na(index)]) > 0) {
a = do.call(cbind, lapply(1:length(index), PP, index = index,
MNNgene = MNNgene, k = k, geneName = geneName))
if (!is.null(a)) {
return(a)
}
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
dd60d6f76dddeda112c15c3fd2abb3848cd0d2d0
|
a7def3c7c33c7db7c8b529592111b8aa65ec28ae
|
/man/html.Rd
|
480fe9261365028579176c6d5c700f9a85ab0ff1
|
[] |
no_license
|
Arrendi/reactablefmtr
|
92386ec0cca53656175779a08d13be646bc2b819
|
8dcdcfd469fb9a862023280a28ba309c8e6ea395
|
refs/heads/main
| 2023-08-12T21:35:39.678483
| 2021-10-07T13:25:11
| 2021-10-07T13:25:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 818
|
rd
|
html.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table_elements.R
\name{html}
\alias{html}
\title{Apply HTML attributes to title, subtitle, and source text.}
\usage{
html(text, ...)
}
\arguments{
\item{text, ...}{The text provided within the title, subtitle or source with HTML attributes applied.}
}
\value{
an object of class HTML.
}
\description{
Use `html()` to apply HTML attributes to text within `add_title()`, `add_subtitle()`, and `add_source()`.
}
\examples{
\dontrun{
## Change the title color to blue
data <- iris[10:29, ]
reactable(data) \%>\%
add_title(html("Normal title. <span style='color:DodgerBlue;'>Blue title.</span>"))
## Add emojis to the source
data <- iris[10:100, ]
reactable(data) \%>\%
add_source(html("<p>Made with 💗 by: John Doe 😀</p>"))
}
}
|
14589194bf793624ebbee8b2ba5614e8c670d4c7
|
d692ef9a915e8d901d6c17567a632f555c70d5c9
|
/YATAWebAdmin/R/ui/modProvidersServer.R
|
682483b50af0dca978d81eb814e8de774f557644
|
[
"CC0-1.0"
] |
permissive
|
cryptobuks1/YATA2
|
386f0cc47e6bfeda8c626d6d44e033708f06c404
|
04d7c4fefea3babc0d0f30ee15c9a52602623055
|
refs/heads/main
| 2023-04-26T16:05:54.430701
| 2021-05-27T15:00:58
| 2021-05-27T15:00:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 424
|
r
|
modProvidersServer.R
|
modpProvidersServer = function(id, full) {
ns = NS(id)
PNLProviders = R6::R6Class("PNL.PROVIDERS"
,inherit = YATAPanel
,cloneable = FALSE
,lock_class = TRUE
,public = list(
initialize = function(id) {
super$initialize(id)
}
)
,private = list(
)
)
moduleServer(id, function(input, output, session) {
})
}
|
9c6e9ace6ea1bb39f2f75dcb5495067f5ecce6f2
|
ffe0f89ba49fcdb3f190051d35cc6556adcfa278
|
/dataPrep.R
|
69cf89ce41a59274256bbadc98f5ec387ca31645
|
[] |
no_license
|
5hri/CapstoneProject
|
2c2a7b062c45289919ce5519f07cc955f0e25499
|
34c04032bfff090d16700b67b2f79f3ca9c2c2ea
|
refs/heads/master
| 2021-01-12T20:31:54.480742
| 2016-10-08T01:03:38
| 2016-10-08T01:03:38
| 68,489,704
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,219
|
r
|
dataPrep.R
|
# Refer to the Milestone for the source code, here I have modified the data for the App
AppngramTokenizer <- function(theCorpus, ngramCount) {
ngramFunction <- NGramTokenizer(theCorpus,
Weka_control(min = ngramCount, max = ngramCount,
delimiters = " \\r\\n\\t.,;:\"()?!"))
ngramFunction <- data.frame(table(ngramFunction))
ngramFunction <- ngramFunction[order(ngramFunction$Freq,
decreasing = TRUE),]
colnames(ngramFunction) <- c("String","Count")
ngramFunction
}
#Profanity words
profanityfile <- file("http://www.bannedwordlist.com/lists/swearWords.txt", open = "rb")
profanityWords<-readLines(profanityfile, encoding = "UTF-8", warn=TRUE, skipNul=TRUE)
close(profanityfile)
saveRDS(profanityWords, file = "./profanityWords.RDS")
rm(profanityfile)
# Create ngram and save
Appunigram <- AppngramTokenizer(finalCorpusDF, 1)
Appunigram<-cbind(as.data.frame(str_split_fixed(Appunigram$String, fixed(" "), 1)),Appunigram$Count)
colnames(Appunigram)<-c("word1","Freq")
saveRDS(Appunigram, file = "./Appunigram.RDS")
Appbigram <- AppngramTokenizer(finalCorpusDF, 2)
Appbigram<-cbind(as.data.frame(str_split_fixed(Appbigram$String, fixed(" "), 2)),Appbigram$Count)
colnames(Appbigram)<-c("word1","word2","Freq")
saveRDS(Appbigram, file = "./Appbigram.RDS")
Apptrigram <- AppngramTokenizer(finalCorpusDF, 3)
Apptrigram<-cbind(as.data.frame(str_split_fixed(Apptrigram$String, fixed(" "), 3)),Apptrigram$Count)
colnames(Apptrigram)<-c("word1","word2","word3","Freq")
saveRDS(Apptrigram, file = "./Apptrigram.RDS")
Appquadgram <- AppngramTokenizer(finalCorpusDF, 4)
Appquadgram<-cbind(as.data.frame(str_split_fixed(Appquadgram$String, fixed(" "), 4)),Appquadgram$Count)
colnames(Appquadgram)<-c("word1","word2","word3","word4","Freq")
saveRDS(Appquadgram, file = "./Appquadgram.RDS")
Apppentagram <- AppngramTokenizer(finalCorpusDF, 5)
Apppentagram<-cbind(as.data.frame(str_split_fixed(Apppentagram$String, fixed(" "), 5)),Apppentagram$Count)
colnames(Apppentagram)<-c("word1","word2","word3","word4","word5","Freq")
saveRDS(Apppentagram, file = "./Apppentagram.RDS")
|
486f88fc8777984bbe25a7e7a1ad02859d5f8fb3
|
1ac6ed9fe71f59fbf0230ff6c5a3c94650d8b44d
|
/R/overwrite_column.R
|
58edb1e1b167976b6c03d7786d138bdfc6bc642d
|
[
"MIT"
] |
permissive
|
daattali/rsalad
|
58c505511b0d44416a06cf7f99926bae034898ee
|
51d3a190574da341e7942c1b3b1265ac3572f918
|
refs/heads/master
| 2021-01-09T20:48:12.066521
| 2019-10-25T03:21:23
| 2019-10-25T03:21:23
| 26,629,036
| 26
| 9
| null | 2015-10-23T08:56:10
| 2014-11-14T08:18:03
|
R
|
UTF-8
|
R
| false
| false
| 1,010
|
r
|
overwrite_column.R
|
#' Overwrite a column in a data.frame based on a matching column in another df
#'
#' @export
overwrite_column <- function(olddf, newdf, cols, bycol = "well") {
result <- dplyr::left_join(olddf, newdf, by = bycol)
if (missing(cols)) {
cols <- setdiff(colnames(olddf), bycol)
}
# yes yes, looks are horrible in R, but I couldn't find a better solution
# to make sure this works on multiple columns at a time
for (colname in cols) {
colname_x <- sprintf("%s.x", colname)
colname_y <- sprintf("%s.y", colname)
if (all(c(colname_x, colname_y) %in% colnames(result))) {
result %<>%
dplyr::mutate_(.dots = setNames(
list(lazyeval::interp(
~ ifelse(is.na(coly), colx, coly),
colx = as.name(colname_x),
coly = as.name(colname_y))),
colname_x)) %>%
dplyr::rename_(.dots = setNames(colname_x, colname)) %>%
dplyr::select_(lazyeval::interp(~ -colname, colname = as.name(colname_y)))
}
}
result
}
|
64761a0aa69e3dfb930003caa1033fd968ff4e3a
|
f56df8d08b3ec0e4e85ed0b0dbe74da3b63587ee
|
/JFJFJ33.R
|
cf46820ce9340ec5f1614cf43cd8308000fbad31
|
[] |
no_license
|
UCSDWayneTang/2016-primary-election
|
6bea9e0a2ec89b02d2d609d80d64c822c7cd13c4
|
20010c7642593ede780e4a7d48df6f305b5d884e
|
refs/heads/master
| 2020-04-09T14:53:24.826547
| 2018-12-04T19:50:02
| 2018-12-04T19:50:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,047
|
r
|
JFJFJ33.R
|
library(tidyverse)
Clean_data <- read_csv('./data/GaofengCleaning.csv')
all_state_wnh <- Clean_data %>%
group_by(state_abbreviation) %>%
summarise(white_nohisp = sum(white_nohisp*pop14)/sum(pop14)) %>%
ggplot(aes(x=state_abbreviation, y = white_nohisp)) +
geom_histogram(stat = "identity")
East_Coast <- c('FL',
'GA',
'SC',
'NC',
'VA',
'MD',
'DE',
'NJ',
'NY',
'CT',
'RI',
'MA',
'NH',
'ME')
#
list <- c()
list2 <- c()
for (col in colnames(county_results)){
list <- append(list,col)
list2 <- append(list2,sum(county_results[col] == 0))
}
df <- data.frame(column = list,
nzero = list2)
write.csv(county_results, file = "county_results.csv",row.names=FALSE)
county_results$housing_multi <- ifelse(county_results$housing_multi == 0, mean(county_results$housing_multi),county_results$housing_multi)
collist = c('SBO001207',
'AFN120207',
'BPS030214,SBO015207',
'WTN220207',
'hawaii',
'MAN450207',
'SBO315207',
'SBO415207',
'SBO215207',
'SBO115207',
'SBO515207',
'SBO015207',
'BPS030214')
filllist = c('BZA115213',
'RTN130207',
'RTN131207',
'popchange',
'votes',
'fraction_votes',
'BZA110213',
'asian',
'foreign',
'AfAm',
'language',
'tworaces',
'AmInd',
'BZA010213',
'NES010213')
for (col in collist) {
county_results[col] = NULL
}
for (col in filllist) {
for( i in c(1:nrow(county_results))){
if(county_results[i,col] == 0){
county_results[i,col] <- sum(county_results[col])/nrow(county_results[col])
}
}
}
county_winner <- county_results %>%
group_by(state_abbreviation.x, fips,party) %>%
summarise(votes = sum(votes))
county_winner <- county_winner %>%
group_by(fips) %>%
summarise(votes = max(votes))
a <- county_winner %>% inner_join(Clean_data, by = 'fips')
county_winner <- county_winner %>%
inner_join(county, by = c('county.x' = 'county'))
Age18 <- Clean_data %>%
mutate(iage18 = cut(age18,breaks = c(0,5,10,15,20,25,30,35,40,45)), Coast = ifelse(pop14 > mean(pop14),'urban', 'rural')) %>%
ggplot(aes(x=iage18,fill=as.factor(Coast))) + geom_bar(position = 'dodge')
# No_farm -- for democract
Clean_data %>%
filter(Private_nonfarm_establishments_per_person >= 0.04) %>%
mutate(no_farm= cut(Private_nonfarm_establishments_per_person, breaks=c(0,0.02,0.04,0.06,0.08,10))) %>%
ggplot(aes(x=no_farm,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('Percent non_farm_establishments_per_person') + ylab('Number of county win')
# White
Clean_data %>%
mutate(White = cut(white, breaks=c(0,5,10,15,20,100))) %>%
ggplot(aes(x=White,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of white people') + ylab('Number of county win')
Clean_data %>%
mutate(afAm = cut(AfAm, breaks=c(0,5,10,15,20,100))) %>%
ggplot(aes(x=afAm,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of AfAm people') + ylab('Number of county win')
a %>%
filter(votes/(white*pop14) >= 0.0025) %>%
mutate(white = cut(votes/(white*pop14), breaks=c(0.0025,0.003,0.004,0.005,0.01))) %>%
ggplot(aes(x=white,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of white people') + ylab('Number of county win')
# age 18
Clean_data %>%
mutate(age18 = cut(age18, breaks=c(10,15,20,21,22,23,24,25,26,27,28,29,30,35,40,50))) %>%
ggplot(aes(x=age18,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of people under 18') + ylab('Number of county win')
# age 5
Clean_data %>%
mutate(age5 = cut(age5, breaks=c(10,15,20,21,22,23,24,25,26,27,28,29,30,35,40,50))) %>%
ggplot(aes(x=age5,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('percentage of people under 18') + ylab('Number of county win')
# income house
Clean_data %>%
mutate(income_house = cut(income_house, breaks=c(0,30000,60000,90000,120000,200000))) %>%
ggplot(aes(x=income_house, fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('income per house') + ylab('Number of county win')
# margin benefit of no_farm_establishments_per_person
a %>%
filter(votes/(Private_nonfarm_establishments_per_person*pop14) >= 20) %>%
mutate(marginb_no_farm= cut(votes/(Private_nonfarm_establishments_per_person*pop14), breaks = c(20,25,30,35,40,55))) %>%
ggplot(aes(x=marginb_no_farm,fill=as.factor(partywin))) + geom_bar(position='dodge') + xlab('Percent non_farm_establishments_per_person') + ylab('Number of county win')
# Check
AL <- Clean_data %>%
filter(state_abbreviation == 'AL')
sum(AL$white_nohisp*AL$pop14)/sum(AL$pop14)
# Clean
|
a652e17be1765c1fedf25caa422205af2abfa4c0
|
bd74455b71f2d1d64ea199372dea5cc19b767b9f
|
/code/09_plot_figure3.R
|
d58b32227c567a207a5b792871d43310e24c9260
|
[] |
no_license
|
yxlaoban118/opioid_trends
|
e943a98e9b845b6e18dc8c4241a8f76ae4f804b7
|
5e988f348b5cfc02d90e0dd11d4b4aa7e7219e8b
|
refs/heads/master
| 2022-08-04T16:08:51.054398
| 2019-11-25T15:01:46
| 2019-11-25T15:01:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,616
|
r
|
09_plot_figure3.R
|
## Plot figure 2, rates for opioids by race and opioid type, 1979-2015
## Imports
library(tidyverse)
source("./code/mk_nytimes.R")
output_dir <- config::get()$output_folder
## Helper functions
import_results_data <- function(filename) {
df <- readr::read_delim(filename, delim = ";", escape_double = FALSE,
trim_ws = TRUE)
names(df) <- tolower(names(df))
names(df) <- gsub(" ", "", names(df))
names(df) <- gsub(",", "_", names(df))
return(df)
}
## Import data
opioid_types_jp <- import_results_data(
paste0("./joinpoint_analysis/",
"04_opioid_rates_icd10type/",
"04_opioid_rates_icd10type.data.txt"))
plot3_df <- opioid_types_jp %>%
filter(race != "total",
opioid_type %in% c("heroin", "methadone", "natural",
"synth", "other_op")) %>%
ungroup() %>%
mutate(race_cat = factor(race,
levels = c("total", "white", "black"),
labels = c("Total", "White", "Black"),
ordered = TRUE),
opioid_cat = factor(opioid_type,
levels = c("heroin", "methadone",
"natural", "synth", "other_op"),
labels = c("Heroin", "Methadone",
"Natural/Semi-natural",
"Synthetic", "Unspecified"),
ordered = TRUE))
plot3 <- ggplot(plot3_df,
aes(x = year, group = opioid_cat,
color = opioid_cat, shape = opioid_cat)) +
geom_errorbar(aes(ymin = std_rate - 1.96 * standarderror,
ymax = std_rate + 1.96 * standarderror),
width = .1, alpha = .5) +
geom_line(aes(y = model), alpha = .95) +
geom_point(aes(y = std_rate), alpha = .95, size = 1) +
mk_nytimes(axis.line = element_line(color = 'black', linetype = 'solid')) +
labs(x = NULL, y = "Rate (per 100,000)") +
scale_color_brewer(NULL, palette = "Dark2") +
scale_shape_ordinal(NULL) +
facet_grid(race_cat ~ .) +
scale_x_continuous(expand = c(0, .25)) +
scale_y_continuous(breaks = c(0, 2.5, 5))
ggsave(sprintf('%s/fig3_opioid_icd10types.pdf', output_dir), plot3,
width = 8.5, height = 5.5, units = "cm", scale = 2,
device = cairo_pdf)
ggsave(sprintf('%s/fig3_opioid_icd10types.png', output_dir), plot3,
width = 8.5, height = 5.5, units = "cm", scale = 2,
dpi = 300)
|
a0bc59071685fdb79f12940b2170b4fecf35a519
|
4f04adf47ca5f37fb561703f1aa757e0bf116fe2
|
/data-raw/ncm_base.R
|
990a3bd8bf08e310a5e8f11f51c52314b358e78b
|
[] |
no_license
|
transformauy/codigueras
|
28b609b0d9c6152e58f0600232065068b35ad75e
|
0c777b3654d8d4f150b36db09071b3c39050bfdd
|
refs/heads/master
| 2021-11-24T11:47:23.327327
| 2021-10-25T21:29:51
| 2021-10-25T21:29:51
| 163,195,211
| 2
| 1
| null | 2019-07-12T17:00:53
| 2018-12-26T15:50:19
|
R
|
UTF-8
|
R
| false
| false
| 1,292
|
r
|
ncm_base.R
|
# Codiguera completa (Sección, Capítulo, Partida, Subpartida) - archivo de la web en formato .xls que coincide con NCM adaptado a Uruguay.
# ncm_base <- file.path('data-raw', 'ncm_5.xls') %>%
# read_excel(skip = 2) %>%
ncm_base <- file.path('data-raw', 'ncm_aec-10-2018_vi-enmienda_ene2019.xlsx') %>%
read_excel(skip = 11) %>%
rename_at(1:2, ~c('NCM', 'descripcion')) %>%
select(1:2) %>%
filter(is.na(descripcion) != TRUE) %>%
mutate(codigo = str_replace_all(NCM, pattern = '\\.', '')) %>%
mutate(n = nchar(codigo)) %>%
mutate(variable =
if_else(NCM %in% as.character(as.roman(1:21)), 'seccion',
if_else(n == 2, 'capitulo',
if_else(n == 4, 'ncm_4',
if_else(n == 5, 'ncm_5',
if_else(n == 6, 'ncm_6',
if_else(n == 7, 'ncm_7',
if_else(n == 8, 'ncm_8',
if_else(n == 9, 'ncm_9',
'ncm_10')))))))))
# save(ncm_base, file = 'data/ncm_base.rda')
devtools::use_data(ncm_base, overwrite = TRUE)
|
0867b2c3008da93c9da87131a8ae9b7c4b72f215
|
633e5d42e27a335926f57056f8097464ff2dfc61
|
/sentiment.r
|
9af8b3172b418dd09ed514056cd20409d042780e
|
[] |
no_license
|
prokulski/twitter_graph
|
d464cad4c9ac0d0ca7e9b7d0013e1232f7525b67
|
50c5154b94003ff7c3f192f1de7b98c5402887a8
|
refs/heads/master
| 2020-09-28T11:11:20.170662
| 2017-01-22T14:58:18
| 2017-01-22T14:58:18
| 66,703,061
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 619
|
r
|
sentiment.r
|
library(twitteR)
library(ggplot2)
library(dplyr)
# dostęp do API
source('twitter_api_access.r')
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
clinton_tweets = searchTwitter("Hillary Clinton+@HillaryClinton", n=200, lang="en")
trump_tweets = searchTwitter("Donald Trump+@realDonaldTrump", n=200, lang="en")
trump_tweets_df = do.call("rbind", lapply(trump_tweets, as.data.frame))
trump_tweets_df = subset(trump_tweets_df, select = c(text))
clinton_tweets_df = do.call("rbind", lapply(clinton_tweets, as.data.frame))
clinton_tweets_df = subset(clinton_tweets_df, select = c(text))
|
a75cbb949bcb89664cd8f126b21e32c1f7aa1ce8
|
b90fd1fbb011eac4a9ce6174bf1ca0225349077c
|
/cachematrix.R
|
f047354bced7371e4780bfc9a71ac4026e17d935
|
[] |
no_license
|
liangk7/ProgrammingAssignment2
|
cd1e9c1a684f2f08e3d5de57575596665b907a2f
|
77762cf4484dd038a01aec40936d8b44b61b18c9
|
refs/heads/master
| 2021-05-12T05:27:28.513777
| 2018-01-13T01:36:48
| 2018-01-13T01:36:48
| 117,194,232
| 0
| 0
| null | 2018-01-12T04:56:46
| 2018-01-12T04:56:45
| null |
UTF-8
|
R
| false
| false
| 1,229
|
r
|
cachematrix.R
|
## [Put comments here that give an overall description of what your
## functions do]
## [Write a short comment describing this function]
## The following function creates a special matrix that can retain
## its inverse in cache
## Variables
## 'x' as the matrix, 'xinv' as the matrix inverse
## Functions
## 'set', 'get' to set and get value of x
## 'setinv', 'getinv' to set and get value of xinv
makeCacheMatrix <- function(x = matrix()) {
xinv <- NULL
set <- function(y) {
x <<- y
xinv <<- NULL
}
get <- function() x
setinv <- function(inv) xinv <<- inv
getinv <- function() xinv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## [Write a short comment describing this function]
## The following function allows one to pull the inverse of a
## matrix (whether it already exists in cache or not)
## Variables
## 'x' as the matrix object, 'xinv' as the matrix inverse
## 'data' as the matrix
cacheinv <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
xinv <- x$getinv()
if(!is.null(xinv)){
message("getting cached data")
return(xinv)
}
data <- x$get()
xinv <- solve(data,...)
x$setinv(xinv)
xinv
}
|
fd6d5f6c24267e69fe8e2be93434eec93718c4de
|
100ed9652d75f9889d1bff2be0fd82e18240b257
|
/Scripts/Soclim/exploration.R
|
a7ba0a6cdc2fa215f535175bd878f5f853217850
|
[] |
no_license
|
Flavi1P/Phytofloat
|
877129b0fa30111460c75b7239d12de7cc3a3ae9
|
bf47dd5d6100010e13e6f8ce4832c85f67735c4d
|
refs/heads/master
| 2020-12-23T14:06:45.975191
| 2020-03-27T16:12:22
| 2020-03-27T16:12:22
| 237,174,539
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 869
|
r
|
exploration.R
|
library(tidyverse)
library(janitor)
map <- read_csv("map_vec")
btl <- read_delim("Soclim/data/data_btl_JULIA.csv",
"\t", escape_double = FALSE, trim_ws = TRUE)
btl <- clean_names(btl)
names(btl) <- c('station', 'date', 'lon', 'lat', 'depth', 'temp', 'sal', 'ox_mml', 'chl_fluo', 'bbp', 'cp', 'fluo_bbp', 'fluo_cp', 'bbp_cp', 'poc_mmol_m3', 'pon_mmol_m3', 'station_bis')
ggplot(btl)+
geom_polygon(aes( x = long, y = lat, group = group), data = map)+
geom_point(aes(x = lon, y = lat, colour = chl_fluo))+
coord_quickmap()+
scale_color_viridis_c()+
xlim(50,75)+
ylim(-65, -25)+
theme_bw()
cyto <- read_delim("Soclim/data/data_cyto.csv", "\t", escape_double = FALSE, trim_ws = TRUE)
cyto <- clean_names(cyto)
micro <- read_delim("Soclim/data/data_micro.csv", "\t", escape_double = FALSE, trim_ws = TRUE)
micro <- clean_names(micro)
|
f083088889e1c809c1d3f340007f3fe686088e09
|
a62921e670cc120b80f61471e16c871f79dfabe2
|
/_shiny-app/data-raw/clean-locations.R
|
b927804b5e896e43179588552f5d60711e38d7ac
|
[
"CC-BY-4.0"
] |
permissive
|
charliejhadley/idn_germanmigrantletters
|
137e0fc7fa8bb5f62b0c8980248ec8bf6fd89185
|
daca23448545c5bca5a504928871328c9601a3e2
|
refs/heads/master
| 2021-09-09T23:50:57.856905
| 2018-03-20T08:52:42
| 2018-03-20T08:52:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 451
|
r
|
clean-locations.R
|
library("tidyverse")
library("readxl")
all_locations <- read_xlsx("data-raw/unique-locations.xlsx")
colnames(all_locations) <- tolower(make.names(colnames(all_locations)))
all_locations %>%
mutate(new.location.string = location.string) %>%
filter(grepl(",", location.string)) %>%
separate(new.location.string, into = c("country", "location")) %>%
select(-location) %>%
filter(!is.na(latitude)) %>%
write_csv("data/all_locations.csv")
|
0069cd2272bb9372b69e410a9cb5b89f0dfd865e
|
3e14540a1ad52f1a26b2a6102c6d6e478bf2065a
|
/map_test_2.r
|
c55d4613d69cdc44dc4c5710b383d31fd91acbd6
|
[] |
no_license
|
vedapragna/germany_map
|
d4aeae9693317f48e933115d22161c9d02ba680c
|
cf3afbd3ce4dd7d9d9566e3a1abc89a8c214dfa1
|
refs/heads/master
| 2020-12-02T15:24:32.974788
| 2018-02-04T17:38:09
| 2018-02-04T17:38:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,559
|
r
|
map_test_2.r
|
rm(list=ls(all=TRUE))
#shapefile from https://datahub.io/de/dataset/postal-codes-de
#http://www.suche-postleitzahl.org/downloads?download=zuordnung_plz_ort.csv
#post questions here: http://gis.stackexchange.com/
library(choroplethr)
library(dplyr)
library(ggplot2)
library(rgdal)
library(maptools)
library(gpclib)
library(readr)
library(R6)
setwd("~/Documents/entwickeln/statistik_r/mapping_r/geo/plz-gebiete.shp/")
ger_plz <- readOGR(dsn = ".", layer = "plz-gebiete")
gpclibPermit()
ger_plz@data$id <- rownames(ger_plz@data)
ger_plz.point <- fortify(ger_plz, region="id")
ger_plz.df <- inner_join(ger_plz.point,ger_plz@data, by="id")
head(ger_plz.df)
ggplot(ger_plz.df, aes(long, lat, group=group )) + geom_polygon()
#data file
df <- read_csv("~/Documents/entwickeln/statistik_r/mapping_r/de_plz_einwohner.csv")
ger_plz.df$region <- as.integer(ger_plz.df$plz)
ger_plz.df$region <- ger_plz.df$plz
head(ger_plz.df)
GERPLZChoropleth <- R6Class("GERPLZChoropleth",
inherit = choroplethr:::Choropleth,
public = list(
initialize = function(user.df) {
super$initialize(ger_plz.df, user.df)
}
)
)
colnames(df) = c("region", "value")
c <- GERPLZChoropleth$new(df)
c$ggplot_polygon = geom_polygon(aes(fill = value), color = NA)
c$title = "Comparison of number of Inhabitants per Zipcode in Germany"
c$legend= "Number of Inhabitants per Zipcode"
c$set_num_colors(9)
c$render()
c$render_with_reference_map()
|
0902704ca7c2ab11165fb3763339d304c6c25ce0
|
fbe57536cc2d84e69a5bf799c88fcb784e853558
|
/R/unitconversion.carat.to.milligram.R
|
9450572997cb21a9dad047a3473272458a0338fb
|
[
"MIT"
] |
permissive
|
burrm/lolcat
|
78edf19886fffc02e922b061ce346fdf0ee2c80f
|
abd3915791d7e63f3827ccb10b1b0895aafd1e38
|
refs/heads/master
| 2023-04-02T11:27:58.636616
| 2023-03-24T02:33:34
| 2023-03-24T02:33:34
| 49,685,593
| 5
| 2
| null | 2016-10-21T05:14:49
| 2016-01-15T00:56:55
|
R
|
UTF-8
|
R
| false
| false
| 635
|
r
|
unitconversion.carat.to.milligram.R
|
#' Unit Conversion - Mass/Weight - Carat to Milligram
#'
#' Performs a conversion of weights from carats to milligrams.
#'
#' @param x Vector - Values in units of carats
#'
#' @return x, but converted to milligrams
#'
#' @references
#' NIST. Handbook 133 - Checking the Net Contents of Packaged Goods - Appendix E - General Tables of Units of Measurement. 2020.
#' https://www.nist.gov/system/files/documents/2019/12/03/00-20-h133-apdxE_final-17.pdf
unitconversion.carat.to.milligram <- function(
x = 1
) {
x * 200
}
#' @rdname unitconversion.carat.to.milligram
unitconversion.ct.to.mg <- unitconversion.carat.to.milligram
|
38c32367b9afed3a00d5b4747fba62ff553e7264
|
c75ec355d2a0543884e808f03ed2bddaaf323888
|
/examples/exo_1.R
|
ac29bb1d944435fad8a58d53c64ff0fcecab30ca
|
[] |
no_license
|
pedroguarderas/KRIG
|
208520db0bb32ac87db21bdbeadf42a332462bc6
|
c8d8d896fc41b79db7a818baf6879303faeb2c37
|
refs/heads/master
| 2022-10-26T17:46:54.065846
| 2022-10-02T21:07:19
| 2022-10-02T21:07:19
| 21,835,411
| 2
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,421
|
r
|
exo_1.R
|
# Reading data -------------------------------------------------------------------------------------
# Loading libraries --------------------------------------------------------------------------------
library( data.table )
path<-'/home/aju/Documents/Lectures/spatial_statistics/data/'
# Simulated example
data_file<-paste0( path, 'samples.dat' )
SG<-read.csv( file = data_file, header = FALSE, skip = 3, sep = ' ', dec = '.',
colClasses = c( 'numeric', 'numeric', 'numeric' ) )
SG<-as.data.table( SG )
SG[ , V1 := NULL ]
setnames( SG, c( 'x1', 'x2', 'Z' ) )
# Brenda mines
data_file<-paste0( path, 'BrendaMines.dat' )
BM<-read.csv( file = data_file, header = FALSE, skip = 4, sep = ',', dec = '.' )
BM<-as.data.table( BM )
BM[ , V1 := NULL ]
setnames( BM, c( 'a', 's', 'x1', 'x2', 'x3', 'Z1', 'Z2', 'C1', 'C2', 'C3' ) )
save( SG, BM, file = 'RData/mining_data.RData' )
# Copper mine
data_file<-paste0( path, 'Copper.dat' )
Copper<-read.csv( file = data_file, header = FALSE, skip = 2, sep = '\t', dec = '.',
colClasses = c( 'numeric', 'character', 'numeric', 'numeric', 'numeric',
'numeric', 'numeric', 'numeric' ) )
Copper<-as.data.table( Copper )
Copper[ , V1 := NULL ]
setnames( Copper, c( 'a', 's', 'x1', 'x2', 'x3', 'Z', 'C' ) )
save( SG, BM, Copper, file = 'RData/mining_data.RData' )
rm( list = ls() )
gc()
|
a57deeeb25159ffceb1b606283d08d753e059642
|
b6311d0df0b150d3c01bc9e5257ca4ca8430eca4
|
/man/rdevel.Rd
|
bca14dd2f4102aa124eb1605e9381af295657063
|
[] |
no_license
|
JoFrhwld/SQAwR
|
5daa2702ccd14daa057b32e0d78c57e2f62586cb
|
35606adb3bdb55a8db4f344ee71c246c1ca3e534
|
refs/heads/master
| 2020-05-18T11:54:29.391415
| 2012-05-02T23:26:37
| 2012-05-02T23:26:37
| 2,018,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 591
|
rd
|
rdevel.Rd
|
\name{rdevel}
\alias{rdevel}
\docType{data}
\title{
Contributed Packages to \acronym{CRAN} over Time
}
\description{
The number of contributed packages to \acronym{CRAN} over time.
}
\usage{data(rdevel)}
\format{
A data frame with 20 observations on the following 3 variables.
\describe{
\item{\code{vers}}{R version}
\item{\code{count}}{Number of contributed packages}
\item{\code{date}}{Date of version / package count}
}
}
\source{
\url{blog.revolutionanalytics.com/2010/01/rs-exponential-package- growth-ctd.html}
}
\examples{
data(rdevel)
}
\keyword{datasets}
|
dcb88865b7495007544a18da08f061ffd8e22591
|
33b1b3ceed523f43a3b3c8cedcba35982c19e96d
|
/man/models.Rd
|
74b0290b8b74465074bd7a60b95d33cd3c8d8029
|
[] |
no_license
|
mcrucifix/iceages
|
61d300f2394f66e9e05d7a2f4b07e57f29618c69
|
05404a1ecf64af72a08dbe84f786d4a07c664734
|
refs/heads/master
| 2022-12-22T03:26:40.889155
| 2022-12-20T15:04:15
| 2022-12-20T15:04:15
| 7,424,021
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,740
|
rd
|
models.Rd
|
\name{models}
\alias{models}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
List of deterministic and stochastic ice age models
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
data(models)
}
\details{
List of models, as follows:
\itemize{
\item{\code{i80_d} : }{Imbrie and Imbrie (1980) seminal 1-D model.}
\item{\code{vdp_d} : }{Van der pol oscillator with constant bias.}
\item{\code{cr12_d} : }{Generalisation of the van der Pol, with two stable states on the slow manifold}
\item{\code{vdp_s} : }{Stochastic version of the van der Pol oscillator}
\item{\code{cr12_s} : }{Stochastic version of the generalised van der Pol}
\item{\code{sm90_d} : }{Saltzman and Maasch (1990) 3-D model of ice ages}
\item{\code{sm91_d} : }{Saltsman and Maasch (1991) 3-D model of ice ages}
\item{\code{t06_d} : }{Tziperman et al. (2006) hybrid (1.5-D) model of ice ages}
\item{\code{pp04_d} : }{Paillard and Parrenin (2004) 3-D model of ice ages}
\item{\code{vcv18_d} : }{Verbitsky - Crucifix - Volubeev (2018) 3-D model of ice ages}
\item{\code{pp12_d} : }{Parrenin and Palliard (2012) 1.5 D model of ice ages}
\item{\code{i11_d} : }{Imbrie et al. (2011) 2-D model of ice ages}
}
}
\value{
A list of models, where each model is a list with
\itemize{
\item {\code{func}} {Reference to \code{fortran} or {C} function. See \code{src} directory for examples of deterministic (ending as \code{_d_f.f90}) and stochastic (ending as \code{_s_f.f90}) models}
\item{\code{name }} {String giving model name}
\item {\code{spar }} {Named vector of standard model parameters. Should contain \code{omega} as the time-scale factor}
\item {\code{initgrid}} {List of \code{n} items, where \code{n} is the dimension of the state of the system. Each item list is a vector of distinct values that will be used to construct a grid of initial conditions when estimating pullback sections and attractors (see \code{\link{basin}}})
}
}
\references{
\enumerate{
\item{J. Imbrie and J. Z. Imbrie, Modelling the climatic response to orbital variations, Science, 207, 943-953 1980}
\item{B. Saltzman and K. A. Maasch, A first-order global model of late Cenozoic climate, Transactions of the Royal Society of Edinburgh Earth Sciences, 81, 315-325 1990}
\item{B. Saltzman and K. A. Maasch, A first-order global model of late Cenozoic climate. II Further analysis based on a simplification of the CO_2 dynamics, Climate Dynamics, 5, 201-210 1991}
\item{E. Tziperman et al., Consequences of pacing the Pleistocene 100 kyr ice ages by nonlinear phase locking to Milankovitch forcing, Paleoceanography, 21, PA4206 2006}
\item{D. Paillard and F. Parrenin, The Antarctic ice sheet and the triggering of deglaciations, Earth Planet. Sc. Lett., 227, 263-271 2004}
\item{John Z. Imbrie, Annabel Imbrie-Moore, and Lorraine E. Lisiecki, A phase-space model for Pleistocene ice volume, Earth and Planetary Science Letters, 307, 94--102 2011}
\item{M. Crucifix, Why glacial-interglacial cycles could be unpredictable, in prep.}
\item{F. Parrenin and D. Paillard, Terminations VI and VIII (∼ 530 and ∼ 720 kyr BP) tell us the importance of obliquity and precession in the triggering of deglaciations, Climate of the Past Discussions, 8, 3143--3157 2012}
}}
\author{
M. Crucifix
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{pullback_d}}, \code{\link{propagate_s}}
}
\examples{
data(models)
print(models$vdp_d)
# see pullback_d for an example of use of determinsitic models,
# and propagate_s for a use of stochastic models.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
ab54bb5b7ee93eb0d675c35ca0a85c2f33a08352
|
66105ec95e4c2c59214dd96a85b62143855da002
|
/Rscript/mann_whitney_u_test_fun.R
|
2a8d32264aaae6d47784f94cd11dc07cf6455d64
|
[] |
no_license
|
zhuangaabc/metda
|
87354368a15fd7ca1d67299df9af6eb6899df455
|
0942ef0f69990c54f4990959fce89aea24249298
|
refs/heads/master
| 2020-05-24T07:07:27.980616
| 2019-04-22T22:10:25
| 2019-04-22T22:10:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,601
|
r
|
mann_whitney_u_test_fun.R
|
pacman::p_load(data.table)
# fwrite(data.table(p),"p.csv")
# fwrite(data.table(f),"f.csv")
# fwrite(data.table(e),"e.csv")
# return(p)
# dataset_input = fread("dataset_input.csv")
# dataset_input = as.list(dataset_input)
# p = fread("p.csv")
# f = fread("f.csv")
# e = fread("e.csv"); e = data.matrix(e)
e = data.matrix(fread(URLencode(paste0("http://metda.fiehnlab.ucdavis.edu/db/project/",project_id,"/temp_e.csv"))))
e = t(apply(e,1,function(x){
x[is.na(x)] = 0.5 * mean(x[!is.na(x)])
return(x)
}))
groups = factor(p[[column]])
levels = levels(groups)
p_values = apply(e,1,function(x){
tryCatch(wilcox.test(x~groups, alternative = alternative)$p.value,error = function(e){return(1)})
})
result = data.table(index = 1:nrow(f), label = f$label, p_values = p_values, p_values_adjusted = p.adjust(p_values, method = FDR))
report_html = paste0("<h4>Mann-Whitney U test was performed on each compound to test if the median average of <code>",levels[1],"</code> ",ifelse(alternative=='two.sided',"different from", ifelse(alternative=='greater', "greater than", "less than")), " <code>", levels[2],"</code>. Out of <code>",nrow(f),"</code> compounds, <code>", sum(result$p_values<0.05,na.rm = TRUE),"</code> are significant with p_value < 0.05. To control the false disvoery rate (FDR), the <code>",FDR,"</code> procedure was used and <code>",sum(result$p_values_adjusted<0.05,na.rm = TRUE),"</code> compounds are significant after FDR correction.</h4>")
result = list(
result = result,
report_html =report_html
)
|
8d3dc4564e3c9df09cec57bdace7d17e36e9abc4
|
7449d885c822e956deed1e6949477330a968348f
|
/Housing Correlations.R
|
9a4831b0e67560416efad2ad8ba15217c034b56b
|
[] |
no_license
|
ychennay/Data-Visualization
|
049c4da4351bde00d03e8d9951ad6d2863db6abd
|
61fb67a318f189ca39502fcba1241f739576d5d0
|
refs/heads/master
| 2020-06-28T16:04:06.942596
| 2017-12-29T19:26:24
| 2017-12-29T19:26:24
| 74,490,445
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,150
|
r
|
Housing Correlations.R
|
#will open up a dialog box- pick the CSV file to upload it into R
df<- read.csv(file.choose())
#provides general summary statistics
summary(df)
#subset data to only numerical (vs. categorical columns)
numerical.df <- df[,sapply(df, is.numeric)]
#generate correlation matrix
cor(numerical.df, use="pairwise.complete.obs")
#visualize correlation matrix
install.packages('corrplot') #only if you do not have corrplot package installed on R
library('corrplot') #load the package
corrplot(cor(numerical.df, use="pairwise.complete.obs"), method='circle')
#writes the correlation matrix to a CSV file wherever your default working directory is
# if you cant find the file just search "correlation.csv" in your computer
write.csv(cor(numerical.df, use="pairwise.complete.obs"), "correlation.csv")
#run a regression and find significant variables
summary <- summary(lm(df$Additional.dollars.....montheach.month.your.clients.would.pay.for.LEED.or.Energy.Star.certification ~
df$Property.Choice.Factors..Comfort +
df$Property.Choice.Factors.Location +
df$Property.Choice.Factors..Reputation +
df$Property.Choice.Factors..Energy.efficiency +
df$Property.Choice.Factors..Appearance.and.aesthetics +
df$factor.ranking.Brand.reputation +
df$factor.ranking.Ease.of.managing.their.properties +
df$factor.ranking..Cost.of.managing.their.properties +
df$factor.ranking.Tax.subsidies..rebates +
df$factor.ranking..Future.financing.options.from.banks..private.financing.companies..etc. +
df$factor.ranking.5..increase.to.the.property.value +
df$factor.ranking..20..increase.to.the.property.value +
df$factor.ranking..Decrease.in.their.building.s.CO2.and.other.greenhouse.gas.emissions.or.environmental.footprint +
df$City.of.Los.Angeles.website +
df$UCLA.website +
df$Aggregate.real.estate.site.e.g..Zillow.com +
df$Private.real.estate.firm.s.website,
data=df))
capture.output(summary, file = "regression summary.txt")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.