blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a61465e81ae3231c5a2a089152ca56338f09b1c6
|
8547aaa8ce45122e12ecf8862e52b331f6d54dcd
|
/math_modeling/hw/hw1/lakebedr.R
|
0d6abb9a167be5739a021c0689792cd99ff2d9d4
|
[] |
no_license
|
sjyn/LahTech
|
fad9f3356afa18678900a742c749a829a21dac9f
|
adb5130e210c76e9371d34c2a4cb2a255b429d39
|
refs/heads/master
| 2021-01-15T15:43:50.843145
| 2016-12-07T14:55:09
| 2016-12-07T14:55:09
| 43,650,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 104
|
r
|
lakebedr.R
|
#!/usr/bin/RScript
data <- read.table('lakebed.txt')
ml <- lm(data[,3] ~ data[,2] + data[,1])
print(ml)
|
e98e776b245d3b20d4f2d130ce59a1de42f1e9d0
|
707291bd32b30b00ffb1bc935913dc08aef5616f
|
/lecturenote/pcor-bcondo.r
|
5dc657d1b3324e2fe9661a961a7d649db287d754
|
[] |
no_license
|
weininghu1012/STAT306
|
b7e2e7e6e0d9362338c65a33e0fdd9619c31acea
|
a2a35eea51b520e1f050d9daad6eb9fc0551f593
|
refs/heads/master
| 2021-01-01T18:18:12.987529
| 2015-04-08T01:18:17
| 2015-04-08T01:18:17
| 29,931,606
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,033
|
r
|
pcor-bcondo.r
|
# partial correlations for Burnaby condominium data set
# Assume s is a sample covariance or sample correlation matrix
# with (possible) row and column names, this function outputs
# partial correlation of first two variables given the rest.
pcor=function(s)
{ i=1; j=2
i1=c(i, j)
i2=1:nrow(s); i2=i2[c(-i, -j)]
s11=s[i1,i1]; s12=s[i1,i2]; s21=s[i2,i1]; s22=s[i2,i2];
condcov = s11 - s12 %*% solve(s22) %*% s21
condcov[1,2]/sqrt(condcov[1,1] * condcov[2,2])
}
b=read.table("/Users/apple/Desktop/STAT306/lecturenote/burnabycondo.txt",header=T,skip=2)
b=b[,2:9]
b$askprice=b$askprice/10000
b$ffarea=b$ffarea/100
b$mfee=b$mfee/10
b$sqfl=sqrt(b$floor)
attach(b)
options(digits=3)
rmat=cor(b); print(rmat) # sample correlation matrix
# askprice ffarea beds baths floor view age mfee sqfl
#askprice 1.000 0.479 0.5756 0.6234 0.5101 0.3830 -0.4927 0.3774 0.5183
#ffarea 0.479 1.000 0.7954 0.7068 0.0330 0.1329 0.2883 0.8390 0.1063
#beds 0.576 0.795 1.0000 0.8308 0.0405 0.0786 0.0455 0.6916 0.0921
#baths 0.623 0.707 0.8308 1.0000 0.0730 0.0869 -0.1546 0.7186 0.1195
#floor 0.510 0.033 0.0405 0.0730 1.0000 0.5475 -0.1744 0.0272 0.9641
#view 0.383 0.133 0.0786 0.0869 0.5475 1.0000 -0.1854 0.0638 0.5994
#age -0.493 0.288 0.0455 -0.1546 -0.1744 -0.1854 1.0000 0.2426 -0.1261
#mfee 0.377 0.839 0.6916 0.7186 0.0272 0.0638 0.2426 1.0000 0.0650
#sqfl 0.518 0.106 0.0921 0.1195 0.9641 0.5994 -0.1261 0.0650 1.0000
attach(b)
bur7=lm(askprice~ ffarea+beds+baths+sqfl+view+age+mfee)
print(summary(bur7))
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 8.2064 3.3535 2.45 0.01730 *
#ffarea 2.9516 0.7802 3.78 0.00036 ***
#beds 4.4306 2.3992 1.85 0.06964 .
#baths 0.5278 2.6764 0.20 0.84433
#sqfl 2.7095 0.4606 5.88 1.8e-07 ***
#view -0.9676 1.5504 -0.62 0.53489
#age -0.5361 0.0665 -8.06 3.4e-11 ***
#mfee -0.1391 0.1830 -0.76 0.45013
options(digits=7)
# Part 1: partial correlations : equivalent definitions
str1.order=c('askprice','mfee','ffarea')
r.ymfee.ffarea=pcor(rmat[str1.order,str1.order])
print(r.ymfee.ffarea) # -0.05153492 partial corr y and mfee given ffarea
str2.order=c('askprice','mfee','ffarea','sqfl')
r.ymfee.ffareasqfl=pcor(rmat[str2.order,str2.order])
print(r.ymfee.ffareasqfl) # -0.03275904 partial corr y, mfee given ffarea,sqfl
str3.order=c('askprice','mfee','ffarea','sqfl','age')
r.ymfee.ffareasqflage=pcor(rmat[str3.order,str3.order])
print(r.ymfee.ffareasqflage) # 0.06201165
# some regressions
fit.y.ffarea=lm(askprice~ffarea)
fit.mfee.ffarea=lm(mfee~ffarea)
fit.sqfl.ffarea=lm(sqfl~ffarea)
fit.age.ffarea=lm(age~ffarea)
fit.beds.ffarea=lm(beds~ffarea)
cor.res=cor(fit.y.ffarea$residuals,fit.mfee.ffarea$residuals)
print(cor.res) # -0.05153492, same as r.ymfee.ffarea
# exercise: get r.ymfee.ffareasqfl in a similar way.
par(mfrow=c(2,2))
plot(fit.y.ffarea$residuals,fit.mfee.ffarea$residuals) # little assoc
plot(fit.y.ffarea$residuals,fit.sqfl.ffarea$residuals) # positive assoc
plot(fit.y.ffarea$residuals,fit.age.ffarea$residuals) # negative assoc
plot(fit.y.ffarea$residuals,fit.beds.ffarea$residuals) # weak positive assoc
# Part 2: R2 : equivalent identities
# formula for R2 from 2 explanatory
bur2=lm(askprice~ ffarea+sqfl)
print(summary(bur2))
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 7.4413 4.9246 1.511 0.136
#ffarea 2.5880 0.5538 4.674 1.51e-05 ***
#sqfl 3.1819 0.6177 5.151 2.53e-06 ***
#Residual standard error: 8.248 on 66 degrees of freedom
#Multiple R-squared: 0.4505, Adjusted R-squared: 0.4338
str4.order=c('askprice','sqfl','ffarea')
r.ysqfl.ffarea=pcor(rmat[str4.order,str4.order])
print(r.ysqfl.ffarea) # 0.535509 partial corr y,sqfl given ffarea
print(cor(fit.y.ffarea$residuals,fit.sqfl.ffarea$residuals)) # 0.535509
tem=(1-rmat['askprice','ffarea']^2)*(1-r.ysqfl.ffarea^2)
cat(tem,1-tem,"\n")
# 0.5495139 0.4504861 so R^2 = 1- (1-r_{yx_1}^2)(1-r_{yx_2;x_1}^2)
# Part 3: sign of betahat and matching partial correlation
# sign(betahat)=sign(partialcorrelation)
bur2m=lm(askprice~ ffarea+mfee)
print(summary(bur2m))
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 14.4973 5.5977 2.590 0.01180 *
#ffarea 3.3121 1.1966 2.768 0.00732 **
#mfee -0.1375 0.3279 -0.419 0.67641
#Residual standard error: 9.753 on 66 degrees of freedom
#Multiple R-squared: 0.2316, Adjusted R-squared: 0.2083
print(r.ymfee.ffarea) # -0.05153492 partial corr y and mfee given ffarea
# the sign of beta2hat matches that of r_{yx_2;x_1} and not r_{yx_2}
# here sign of betahat(mfee) matches that of r_{y,mfee;ffarea}, not r_{y,mfee}
# see lecture slides for the identity in the case of p=2 explanatory
|
4b25de6c8ca31e2b4fb959d55c31f3e533cf8e8b
|
653aedf1b27c27d421dc36278a71058dfdead905
|
/inst/extdata/GO/set5-1/treemap.R
|
665d04620ee970276cc22116b29789a57d1b475b
|
[] |
no_license
|
18853857973/rnaseq.mcf10a
|
44dcc4d5e99ad8ec5c0ae41b273be3041a9e528a
|
b14c3fb67eb9d85051f77daf9418cf5531950097
|
refs/heads/master
| 2020-03-14T15:38:35.022209
| 2015-10-13T15:11:26
| 2015-10-13T15:11:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,436
|
r
|
treemap.R
|
# A treemap R script produced by the REVIGO server at http://revigo.irb.hr/
# If you found REVIGO useful in your work, please cite the following reference:
# Supek F et al. "REVIGO summarizes and visualizes long lists of Gene Ontology
# terms" PLoS ONE 2011. doi:10.1371/journal.pone.0021800
# author: Anton Kratz <anton.kratz@gmail.com>, RIKEN Omics Science Center, Functional Genomics Technology Team, Japan
# created: Fri, Nov 02, 2012 7:25:52 PM
# last change: Fri, Nov 09, 2012 3:20:01 PM
# -----------------------------------------------------------------------------
# If you don't have the treemap package installed, uncomment the following line:
# install.packages( "treemap" );
library(treemap) # treemap package by Martijn Tennekes
# Set the working directory if necessary
# setwd("C:/Users/username/workingdir");
# --------------------------------------------------------------------------
# Here is your data from REVIGO. Scroll down for plot configuration options.
revigo.names <- c("term_ID","description","freqInDbPercent","abslog10pvalue","uniqueness","dispensability","representative");
revigo.data <- rbind(c("GO:0032008","positive regulation of TOR signaling",0.027,2.7696,0.771,0.000,"positive regulation of TOR signaling"),
c("GO:0032916","positive regulation of transforming growth factor beta3 production",0.008,2.3468,0.834,0.101,"positive regulation of TOR signaling"),
c("GO:1901699","cellular response to nitrogen compound",1.214,1.7986,0.766,0.212,"positive regulation of TOR signaling"),
c("GO:0038092","nodal signaling pathway",0.028,1.5784,0.814,0.213,"positive regulation of TOR signaling"),
c("GO:0050668","positive regulation of homocysteine metabolic process",0.005,2.3468,0.675,0.247,"positive regulation of TOR signaling"),
c("GO:0023019","signal transduction involved in regulation of gene expression",0.169,1.3605,0.753,0.248,"positive regulation of TOR signaling"),
c("GO:1901313","positive regulation of gene expression involved in extracellular matrix organization",0.007,1.8761,0.761,0.282,"positive regulation of TOR signaling"),
c("GO:0001938","positive regulation of endothelial cell proliferation",0.107,1.7167,0.790,0.299,"positive regulation of TOR signaling"),
c("GO:0097296","activation of cysteine-type endopeptidase activity involved in apoptotic signaling pathway",0.015,1.8761,0.709,0.313,"positive regulation of TOR signaling"),
c("GO:0031929","TOR signaling",0.133,1.7520,0.804,0.340,"positive regulation of TOR signaling"),
c("GO:0043162","ubiquitin-dependent protein catabolic process via the multivesicular body sorting pathway",0.032,1.3605,0.838,0.345,"positive regulation of TOR signaling"),
c("GO:0045945","positive regulation of transcription from RNA polymerase III promoter",0.014,1.5784,0.767,0.368,"positive regulation of TOR signaling"),
c("GO:0097284","hepatocyte apoptotic process",0.043,1.5129,0.907,0.371,"positive regulation of TOR signaling"),
c("GO:0050921","positive regulation of chemotaxis",0.168,1.6364,0.745,0.413,"positive regulation of TOR signaling"),
c("GO:0010694","positive regulation of alkaline phosphatase activity",0.025,1.7520,0.787,0.432,"positive regulation of TOR signaling"),
c("GO:0033689","negative regulation of osteoblast proliferation",0.021,1.6556,0.841,0.437,"positive regulation of TOR signaling"),
c("GO:0032069","regulation of nuclease activity",0.083,1.4724,0.771,0.439,"positive regulation of TOR signaling"),
c("GO:0001932","regulation of protein phosphorylation",2.262,1.6440,0.689,0.480,"positive regulation of TOR signaling"),
c("GO:1901148","gene expression involved in extracellular matrix organization",0.008,1.8761,0.846,0.517,"positive regulation of TOR signaling"),
c("GO:0031053","primary miRNA processing",0.007,1.5784,0.714,0.541,"positive regulation of TOR signaling"),
c("GO:0042762","regulation of sulfur metabolic process",0.036,1.8761,0.794,0.543,"positive regulation of TOR signaling"),
c("GO:0006600","creatine metabolic process",0.016,1.4045,0.823,0.546,"positive regulation of TOR signaling"),
c("GO:0019082","viral protein processing",0.014,1.3605,0.799,0.549,"positive regulation of TOR signaling"),
c("GO:0002052","positive regulation of neuroblast proliferation",0.047,1.3605,0.749,0.556,"positive regulation of TOR signaling"),
c("GO:0061045","negative regulation of wound healing",0.018,1.5129,0.839,0.576,"positive regulation of TOR signaling"),
c("GO:0007183","SMAD protein complex assembly",0.028,1.4045,0.768,0.588,"positive regulation of TOR signaling"),
c("GO:0006987","activation of signaling protein activity involved in unfolded protein response",0.074,1.5638,0.582,0.634,"positive regulation of TOR signaling"),
c("GO:0032870","cellular response to hormone stimulus",1.400,1.3429,0.769,0.635,"positive regulation of TOR signaling"),
c("GO:0019068","virion assembly",0.024,1.3197,0.829,0.638,"positive regulation of TOR signaling"),
c("GO:2001267","regulation of cysteine-type endopeptidase activity involved in apoptotic signaling pathway",0.022,1.5784,0.716,0.650,"positive regulation of TOR signaling"),
c("GO:0048870","cell motility",2.570,1.5186,0.847,0.657,"positive regulation of TOR signaling"),
c("GO:0045963","negative regulation of dopamine metabolic process",0.009,2.0506,0.685,0.685,"positive regulation of TOR signaling"),
c("GO:0032259","methylation",1.044,1.6925,0.944,0.000,"methylation"),
c("GO:0051674","localization of cell",2.570,1.5186,0.952,0.000,"localization of cell"),
c("GO:0019049","evasion or tolerance of host defenses by virus",0.003,1.8761,0.804,0.043,"evasion or tolerance of host defenses by virus"),
c("GO:0052173","response to defenses of other organism involved in symbiotic interaction",0.043,1.5129,0.868,0.494,"evasion or tolerance of host defenses by virus"),
c("GO:0006669","sphinganine-1-phosphate biosynthetic process",0.001,2.3468,0.837,0.044,"sphinganine-1-phosphate biosynthesis"),
c("GO:0051568","histone H3-K4 methylation",0.085,2.1079,0.763,0.106,"sphinganine-1-phosphate biosynthesis"),
c("GO:0009404","toxin metabolic process",0.038,1.5784,0.885,0.131,"sphinganine-1-phosphate biosynthesis"),
c("GO:0030388","fructose 1,6-bisphosphate metabolic process",0.009,2.0506,0.896,0.197,"sphinganine-1-phosphate biosynthesis"),
c("GO:0043000","Golgi to plasma membrane CFTR protein transport",0.002,2.0506,0.883,0.206,"sphinganine-1-phosphate biosynthesis"),
c("GO:0036289","peptidyl-serine autophosphorylation",0.006,1.8761,0.852,0.249,"sphinganine-1-phosphate biosynthesis"),
c("GO:0010388","cullin deneddylation",0.010,1.6556,0.854,0.258,"sphinganine-1-phosphate biosynthesis"),
c("GO:0006491","N-glycan processing",0.023,1.5784,0.826,0.271,"sphinganine-1-phosphate biosynthesis"),
c("GO:0015804","neutral amino acid transport",0.080,1.3605,0.939,0.301,"sphinganine-1-phosphate biosynthesis"),
c("GO:0048308","organelle inheritance",0.009,1.6556,0.894,0.302,"sphinganine-1-phosphate biosynthesis"),
c("GO:2000641","regulation of early endosome to late endosome transport",0.014,1.6556,0.849,0.367,"sphinganine-1-phosphate biosynthesis"),
c("GO:0090170","regulation of Golgi inheritance",0.005,1.8761,0.818,0.472,"sphinganine-1-phosphate biosynthesis"),
c("GO:0006000","fructose metabolic process",0.039,1.5784,0.898,0.474,"sphinganine-1-phosphate biosynthesis"),
c("GO:0006668","sphinganine-1-phosphate metabolic process",0.002,2.0506,0.836,0.521,"sphinganine-1-phosphate biosynthesis"),
c("GO:0071816","tail-anchored membrane protein insertion into ER membrane",0.011,1.8761,0.846,0.531,"sphinganine-1-phosphate biosynthesis"),
c("GO:0033523","histone H2B ubiquitination",0.010,1.4547,0.789,0.533,"sphinganine-1-phosphate biosynthesis"),
c("GO:0035413","positive regulation of catenin import into nucleus",0.014,1.5129,0.810,0.539,"sphinganine-1-phosphate biosynthesis"),
c("GO:0006646","phosphatidylethanolamine biosynthetic process",0.021,1.3197,0.851,0.541,"sphinganine-1-phosphate biosynthesis"),
c("GO:0006670","sphingosine metabolic process",0.017,1.3605,0.785,0.594,"sphinganine-1-phosphate biosynthesis"),
c("GO:0019336","phenol-containing compound catabolic process",0.023,1.8761,0.815,0.051,"phenol-containing compound catabolism"),
c("GO:0042135","neurotransmitter catabolic process",0.021,1.5129,0.843,0.296,"phenol-containing compound catabolism"),
c("GO:0030011","maintenance of cell polarity",0.014,1.7520,0.927,0.051,"maintenance of cell polarity"),
c("GO:0060290","transdifferentiation",0.014,2.0506,0.899,0.051,"transdifferentiation"),
c("GO:0048617","embryonic foregut morphogenesis",0.048,1.5129,0.905,0.215,"transdifferentiation"),
c("GO:0048265","response to pain",0.089,1.3605,0.880,0.219,"transdifferentiation"),
c("GO:0048340","paraxial mesoderm morphogenesis",0.039,1.4547,0.912,0.341,"transdifferentiation"),
c("GO:0001889","liver development",0.340,1.3915,0.900,0.357,"transdifferentiation"),
c("GO:0021895","cerebral cortex neuron differentiation",0.049,1.3605,0.864,0.363,"transdifferentiation"),
c("GO:0061008","hepaticobiliary system development",0.344,1.3799,0.907,0.368,"transdifferentiation"),
c("GO:0030878","thyroid gland development",0.051,1.3605,0.909,0.642,"transdifferentiation"));
stuff <- data.frame(revigo.data);
names(stuff) <- revigo.names;
stuff$abslog10pvalue <- as.numeric( as.character(stuff$abslog10pvalue) );
stuff$freqInDbPercent <- as.numeric( as.character(stuff$freqInDbPercent) );
stuff$uniqueness <- as.numeric( as.character(stuff$uniqueness) );
stuff$dispensability <- as.numeric( as.character(stuff$dispensability) );
# by default, outputs to a PDF file
pdf( file="revigo_treemap.pdf", width=16, height=9 ) # width and height are in inches
# check the tmPlot command documentation for all possible parameters - there are a lot more
tmPlot(
stuff,
index = c("representative","description"),
vSize = "abslog10pvalue",
type = "categorical",
vColor = "representative",
title = "REVIGO Gene Ontology treemap",
inflate.labels = FALSE, # set this to TRUE for space-filling group labels - good for posters
lowerbound.cex.labels = 0, # try to draw as many labels as possible (still, some small squares may not get a label)
bg.labels = "#CCCCCCAA", # define background color of group labels
# "#CCCCCC00" is fully transparent, "#CCCCCCAA" is semi-transparent grey, NA is opaque
position.legend = "none"
)
dev.off()
|
b951ab6b7a255a180d90023c489fdf2cc45c5aca
|
e6d16cc0cedbf0994ed6bc4dd2ad1bf8c97d4dda
|
/data-raw/spec/vignettes.R
|
75ac72349dc0f3a1527cbe361ed92a4410279c8a
|
[] |
no_license
|
Musaab-Farooqui/qss-package
|
b7e695f3258bdccddae8f28f8d90921eb9544f8d
|
753787f2263e002c4b9d0ef39d1d01d84f737e5b
|
refs/heads/master
| 2023-05-26T05:14:42.643740
| 2021-06-05T14:11:05
| 2021-06-05T14:11:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 154
|
r
|
vignettes.R
|
cols(
self = col_integer(),
alison = col_integer(),
jane = col_integer(),
moses = col_integer(),
china = col_integer(),
age = col_integer()
)
|
721cb9c3cf5e1395af52262c0f6bed3a0a8f3649
|
d7ff71e8ffb07419aad458fb2114a752c5bf562c
|
/tests/testthat/roxygen-examples-complete/16-dont-warn-empty-out.R
|
64118f44082bb74351068bb3a98a1fb55a0603b5
|
[
"MIT"
] |
permissive
|
r-lib/styler
|
50dcfe2a0039bae686518959d14fa2d8a3c2a50b
|
ca400ad869c6bc69aacb2f18ec0ffae8a195f811
|
refs/heads/main
| 2023-08-24T20:27:37.511727
| 2023-08-22T13:27:51
| 2023-08-22T13:27:51
| 81,366,413
| 634
| 79
|
NOASSERTION
| 2023-09-11T08:24:43
| 2017-02-08T19:16:37
|
R
|
UTF-8
|
R
| false
| false
| 100
|
r
|
16-dont-warn-empty-out.R
|
#' Do stuff
#'
#' Some things we do
#' @examples
#' g()
#' \dontrun{
#' f(x)
#' }
#'
#' @export
g()
|
4f7e440bfe7ccf8e687fffb9bef888aca3c4a8de
|
97c2cfd517cdf2a348a3fcb73e9687003f472201
|
/R/src/QFPairsTrading/tests/testModifiedFuturesPair.r
|
5728bba98120814e2e0a89c657bfc7fdd25646ee
|
[] |
no_license
|
rsheftel/ratel
|
b1179fcc1ca55255d7b511a870a2b0b05b04b1a0
|
e1876f976c3e26012a5f39707275d52d77f329b8
|
refs/heads/master
| 2016-09-05T21:34:45.510667
| 2015-05-12T03:51:05
| 2015-05-12T03:51:05
| 32,461,975
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,455
|
r
|
testModifiedFuturesPair.r
|
library(QFPairsTrading)
testdataPath <- squish(system.file("testdata", package="QFPairsTrading"),'/ModifiedFuturesPair/')
tempDir <- squish(dataDirectory(),'temp_TSDB/')
readZooFile <- function(filename){
return(read.zoo(filename,format='%Y-%m-%d',sep=",",header=TRUE))
}
testConstructor <- function() {
mc <- ModifiedFuturesPair(market.base = 'RE.TEST.TY.1C', market.hedge = 'RE.TEST.TU.1C')
checkInherits(mc, "ModifiedFuturesPair")
}
test.missingDates <- function(){
fp <- ModifiedFuturesPair(market.base = 'AA.1C', market.hedge = 'BB.1C')
fp$setUnderlyingTRIs(container=squish(testdataPath,'AABB_rawData.csv'))
fp$setHedgeRatio(specificNames=list(base='aa',hedge='bb'), hedgeRatio.name = 'price_value_basis_point',
hedgeRatio.source='test',
container=squish(testdataPath,'AABB_rawData.csv'))
fp$generateTRI()
expected <- zoo(c(100,98.81947,87.77605,98.03786,97.96266),as.POSIXct(c('2000-01-02','2000-01-04','2000-01-06','2000-01-08','2000-01-10')))
checkSameLooking(round(fp$getTRI(),5), expected)
expected <- zoo(c(100,101.46876,93.76005,103.61511,105.82907),as.POSIXct(c('2000-01-02','2000-01-04','2000-01-06','2000-01-08','2000-01-10')))
fp$generateTRI(hedgeRatio.lag = 1)
checkSameLooking(round(fp$getTRI(),5), expected)
}
test.frozenTRIs <- function(){
fp <- ModifiedFuturesPair(market.base = 'RE.TEST.TY.1C', market.hedge = 'RE.TEST.TU.1C')
fp$setUnderlyingTRIs(container='systemdb')
fp$setHedgeRatio(specificNames=list(base='ty',hedge='tu'), hedgeRatio.name = 'pvbp', hedgeRatio.source='test',
container=squish(testdataPath,'RE.TEST.pvbp.csv'))
fp$generateTRI(hedgeRatio.offset = 0)
fp$uploadTRI(tsdbSource='internal',uploadPath=tempDir,uploadMethod='file')
filename <- 'RE.TEST.TY.1C_RE.TEST.TU.1C_pvbp_test.csv'
benchFile <- squish(testdataPath,filename)
uploadFile <- squish(tempDir,filename)
checkSame(round(readZooFile(benchFile),6),round(readZooFile(uploadFile),6))
file.remove(uploadFile)
}
test.dataDirect <- function(){
fp <- ModifiedFuturesPair(market.base = 'RE.TEST.TY.1C', market.hedge = 'RE.TEST.TU.1C')
fp$setHedgeRatioByName('base','tu_pvbp','test',container=squish(testdataPath,'RE.TEST.pvbp.csv'))
fp$setHedgeRatioByName('hedge','ty_pvbp','test',container=squish(testdataPath,'RE.TEST.pvbp.csv'))
checkSame(0.06408, first(fp$.hedgeRatio$hedge))
checkSame(0.02034, last(fp$.hedgeRatio$base))
}
|
caef93e79b199e32574f0c44cc645d57231fd5b6
|
14c2f47364f72cec737aed9a6294d2e6954ecb3e
|
/man/isAnnotated.Rd
|
adde62df947d03af4a6296bd2f70f1bb23d7cb74
|
[] |
no_license
|
bedapub/ribiosNGS
|
ae7bac0e30eb0662c511cfe791e6d10b167969b0
|
a6e1b12a91068f4774a125c539ea2d5ae04b6d7d
|
refs/heads/master
| 2023-08-31T08:22:17.503110
| 2023-08-29T15:26:02
| 2023-08-29T15:26:02
| 253,536,346
| 2
| 3
| null | 2022-04-11T09:36:23
| 2020-04-06T15:18:41
|
R
|
UTF-8
|
R
| false
| true
| 474
|
rd
|
isAnnotated.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/AllMethods.R
\name{isAnnotated}
\alias{isAnnotated}
\alias{isAnnotated,EdgeObject-method}
\title{Is the object annotated}
\usage{
isAnnotated(object)
\S4method{isAnnotated}{EdgeObject}(object)
}
\arguments{
\item{object}{An object}
}
\description{
Is the object annotated
}
\section{Methods (by class)}{
\itemize{
\item \code{isAnnotated(EdgeObject)}: Method for EdgeObject
}}
|
4276eabe50067b0200b842503c2631f8f45c3f1f
|
8bdd8e6f050b118f661d357f626d43feeb383de4
|
/man/OutputPlotForPaper.Rd
|
2a5211e49432b1894f4ea656c4973084e5e88d98
|
[] |
no_license
|
StatsResearch/RobsRUtils
|
34a545bfa2eada90b805ce638541840fcad2f9c1
|
8d4ff91b17b40837475302e8a04d31d3f08d04db
|
refs/heads/master
| 2021-01-09T06:40:13.639503
| 2018-01-15T21:05:59
| 2018-01-15T21:05:59
| 81,020,969
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,128
|
rd
|
OutputPlotForPaper.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RobsRUtils.R
\name{OutputPlotForPaper}
\alias{OutputPlotForPaper}
\title{Wrapper function for plotting ggplot2 in a more general output format}
\usage{
OutputPlotForPaper(plot.dir, plot.folder, filename, plot.obj = NULL,
plot.width = NULL, plot.height = NULL, units = "in", res = 600)
}
\arguments{
\item{plot.folder}{- output area for the plt}
\item{plot.obj}{- a ggplot2 object}
\item{plot.width}{- used to specify page size}
\item{plot.height}{- used to specify page size}
\item{plot.filename}{- the filename for the plot}
\item{unit='in'}{- defaults to inches}
\item{res=600}{used by png output}
}
\description{
Wrapper function for plotting ggplot2 in a more general output format
}
\examples{
\dontrun{
x.vals<-c(1,2,3)
y.vals<-c(2,4,6)
plot.data<-data_frame(x.vals,y.vals)
p<-ggplot(data = plot.data,aes(x=x.vals,y=y.vals))
p<-p+geom_line(size=0.5)
p<-p+geom_point(size=2)
titleStr<-'A simple plot'
p<-p+labs(title = titleStr)
print(p)
plot.dir <- './'
file.name<-'A good file name.pdf'
OutputPlotForPaper(file.name,plot.obj=p)
}
}
|
9ef86b9f0c5fb3ff6a342ebb478b3b294adb48e8
|
45aebfdd9d491ce87ed4121737f6a5d892bc7646
|
/tests/testthat/test_ossvm.R
|
1ca54a8651b30f939097a7ae67a4f421cae711f1
|
[
"BSD-3-Clause"
] |
permissive
|
schiffner/locClass
|
3698168da43af5802e5391c3b416a3ca3eb90cbe
|
9b7444bc0556e3aafae6661b534727cd8c8818df
|
refs/heads/master
| 2021-01-19T05:21:57.704770
| 2016-08-21T19:25:12
| 2016-08-21T19:25:12
| 42,644,102
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,109
|
r
|
test_ossvm.R
|
context("ossvm")
test_that("ossvm: misspecified arguments", {
data(iris)
# wrong variable names
expect_error(ossvm(formula = Species ~ V1, data = iris, wf = "gaussian", bw = 10))
# wrong class
expect_error(ossvm(formula = iris, data = iris, wf = "gaussian", bw = 10))
expect_error(ossvm(iris, data = iris, wf = "gaussian", bw = 10))
# target variable also in x
expect_error(ossvm(y = iris$Species, x = iris, wf = "gaussian", bw = 10))
expect_warning(ossvm(Species ~ Species + Petal.Width, data = iris, wf = "gaussian", bw = 10)) ## warning, Species on RHS removed
# missing x
expect_error(ossvm(y = iris$Species, wf = "gaussian", bw = 10))
})
# test_that("ossvm throws a warning if y variable is numeric", {
# data(iris)
# formula, data
# expect_that(ossvm(formula = as.numeric(Species) ~ ., data = iris, wf = "gaussian", bw = 10), gives_warning("'y' was coerced to a factor"))
# y, x
# expect_that(ossvm(y = iris[,1], x = iris[,-1], wf = "gaussian", bw = 10), gives_warning("'y' was coerced to a factor"))
# })
test_that("ossvm works if only one predictor variable is given", {
data(iris)
fit <- ossvm(Species ~ Petal.Width, data = iris, wf = "gaussian", bw = 5)
predict(fit)
})
test_that("ossvm: training data from only one class", {
data(iris)
expect_that(ossvm(Species ~ ., data = iris, bw = 2, subset = 1:50), throws_error("training data from only one class"))
expect_error(ossvm(Species ~ ., data = iris, bw = 2, subset = 1))
expect_that(ossvm(y = iris$Species, x = iris[,-5], bw = 2, subset = 1:50), throws_error("training data from only one class"))
expect_error(ossvm(y = iris$Species, x = iris[,-5], bw = 2, subset = 1))
})
test_that("ossvm: subsetting works", {
data(iris)
# formula, data
expect_that(fit1 <- ossvm(Species ~ ., data = iris, wf = "gaussian", bw = 2, subset = 1:80), gives_warning("group virginica is empty"))
expect_that(fit2 <- ossvm(Species ~ ., data = iris[1:80,], wf = "gaussian", bw = 2), gives_warning("group virginica is empty"))
expect_equal(fit1[-1],fit2[-1])
expect_equal(nrow(fit1$x), 80)
expect_equal(length(fit1$y), 80)
# x, y
expect_that(fit1 <- ossvm(y = iris$Species, x = iris[,-5], wf = "gaussian", bw = 2, subset = 1:80), gives_warning("group virginica is empty"))
expect_that(fit2 <- ossvm(y = iris$Species[1:80], x = iris[1:80,-5], wf = "gaussian", bw = 2), gives_warning("group virginica is empty"))
expect_equal(fit1[-1],fit2[-1])
expect_equal(nrow(fit1$x), 80)
expect_equal(length(fit1$y), 80)
# wrong specification of subset argument
expect_error(ossvm(Species ~ ., data = iris, wf = "gaussian", bw = 10, subset = iris[1:10,]))
expect_error(ossvm(Species ~ ., data = iris, wf = "gaussian", bw = 10, subset = FALSE)) #???
expect_error(ossvm(Species ~ ., data = iris, wf = "gaussian", bw = 10, subset = 0)) #???
expect_error(ossvm(Species ~ ., data = iris, wf = "gaussian", bw = 10, subset = -10:50))
})
test_that("ossvm: NA handling works correctly", {
### NA in x
data(iris)
irisna <- iris
irisna[1:10, c(1,3)] <- NA
## formula, data
# na.fail
expect_that(ossvm(Species ~ ., data = irisna, wf = "gaussian", bw = 10, subset = 6:60, na.action = na.fail), throws_error("missing values in object"))
# check if na.omit works correctly
expect_that(fit1 <- ossvm(Species ~ ., data = irisna, wf = "gaussian", bw = 10, subset = 6:60, na.action = na.omit), gives_warning("group virginica is empty"))
expect_that(fit2 <- ossvm(Species ~ ., data = irisna, wf = "gaussian", bw = 10, subset = 11:60), gives_warning("group virginica is empty"))
expect_equal(fit1[-c(1:2,31)], fit2[-c(1:2,31)])
expect_equivalent(fit1[2], fit2[2])
## x, y
# na.fail
expect_that(ossvm(y = irisna$Species, x = irisna[,-5], wf = "gaussian", bw = 10, subset = 6:60, na.action = na.fail), throws_error("missing values in object"))
# check if na.omit works correctly
expect_that(fit1 <- ossvm(y = irisna$Species, x = irisna[,-5], wf = "gaussian", bw = 10, subset = 6:60, na.action = na.omit), gives_warning("group virginica is empty"))##
expect_that(fit2 <- ossvm(y = irisna$Species, x = irisna[,-5], wf = "gaussian", bw = 10, subset = 11:60), gives_warning("group virginica is empty"))
expect_equal(fit1[-c(1:2,31)], fit2[-c(1:2,31)])
expect_equivalent(fit1[2], fit2[2])
### NA in y
irisna <- iris
irisna$Species[1:10] <- NA
## formula, data
# na.fail
expect_that(ossvm(Species ~ ., data = irisna, wf = "gaussian", bw = 10, subset = 6:60, na.action = na.fail), throws_error("missing values in object"))
# check if na.omit works correctly
expect_that(fit1 <- ossvm(Species ~ ., data = irisna, wf = "gaussian", bw = 10, subset = 6:60, na.action = na.omit), gives_warning("group virginica is empty"))
expect_that(fit2 <- ossvm(Species ~ ., data = irisna, wf = "gaussian", bw = 10, subset = 11:60), gives_warning("group virginica is empty"))
expect_equal(fit1[-c(1:2,31)], fit2[-c(1:2,31)])
expect_equivalent(fit1[2], fit2[2])
## x, y
# na.fail
expect_that(ossvm(y = irisna$Species, x = irisna[,-5], wf = "gaussian", bw = 10, subset = 6:60, na.action = na.fail), throws_error("missing values in object"))
# check if na.omit works correctly
expect_that(fit1 <- ossvm(y = irisna$Species, x = irisna[,-5], wf = "gaussian", bw = 10, subset = 6:60, na.action = na.omit), gives_warning("group virginica is empty"))
expect_that(fit2 <- ossvm(y = irisna$Species, x = irisna[,-5], wf = "gaussian", bw = 10, subset = 11:60), gives_warning("group virginica is empty"))
expect_equal(fit1[-c(1:2,31)], fit2[-c(1:2,31)])
expect_equivalent(fit1[2], fit2[2])
### NA in subset
subset <- 6:60
subset[1:5] <- NA
## formula, data
# na.fail
expect_that(ossvm(Species ~ ., data = iris, wf = "gaussian", bw = 10, subset = subset, na.action = na.fail), throws_error("missing values in object"))
# check if na.omit works correctly
expect_that(fit1 <- ossvm(Species ~ ., data = iris, wf = "gaussian", bw = 10, subset = subset, na.action = na.omit), gives_warning("group virginica is empty"))
expect_that(fit2 <- ossvm(Species ~ ., data = iris, wf = "gaussian", bw = 10, subset = 11:60), gives_warning("group virginica is empty"))
expect_equal(fit1[-c(1:2,31)], fit2[-c(1:2,31)])
expect_equivalent(fit1[2], fit2[2])
## x, y
# na.fail
expect_that(ossvm(y = iris$Species, x = iris[,-5], wf = "gaussian", bw = 10, subset = subset, na.action = na.fail), throws_error("missing values in object"))
# check if na.omit works correctly
expect_that(fit1 <- ossvm(y = iris$Species, x = iris[,-5], wf = "gaussian", bw = 10, subset = subset, na.action = na.omit), gives_warning("group virginica is empty"))
expect_that(fit2 <- ossvm(y = iris$Species, x = iris[,-5], wf = "gaussian", bw = 10, subset = 11:60), gives_warning("group virginica is empty"))
expect_equal(fit1[-c(1:2,31)], fit2[-c(1:2,31)])
expect_equivalent(fit1[2], fit2[2])
})
test_that("ossvm: try all weight functions", {
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "gaussian", bw = 2, probability = TRUE)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = gaussian(2), probability = TRUE)
fit3 <- ossvm(x = iris[,-5], y = iris$Species, wf = "gaussian", bw = 2, probability = TRUE)
fit4 <- ossvm(x = iris[,-5], y = iris$Species, wf = gaussian(2), probability = TRUE)
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit3[-c(1,4)], fit4[-c(1,4)])
expect_equal(fit2[-c(1,2,32)], fit4[-c(1,2)])
expect_equivalent(fit2[2], fit4[2])
set.seed(120)
pred1 <- predict(fit1, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred2 <- predict(fit2, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred3 <- predict(fit3, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred4 <- predict(fit4, probability = TRUE, decision.values = TRUE)
expect_equal(pred1, pred2)
expect_equal(pred3, pred4)
expect_equal(pred2, pred4)
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "gaussian", bw = 5, k = 30, probability = TRUE)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = gaussian(bw = 5, k = 30), probability = TRUE)
fit3 <- ossvm(x = iris[,-5], y = iris$Species, wf = "gaussian", bw = 5, k = 30, probability = TRUE)
fit4 <- ossvm(x = iris[,-5], y = iris$Species, wf = gaussian(bw = 5, k = 30), probability = TRUE)
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit3[-c(1,4)], fit4[-c(1,4)])
expect_equal(fit2[-c(1,2,32)], fit4[-c(1,2)])
expect_equivalent(fit2[2], fit4[2])
set.seed(120)
pred1 <- predict(fit1, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred2 <- predict(fit2, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred3 <- predict(fit3, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred4 <- predict(fit4, probability = TRUE, decision.values = TRUE)
# pred1 <- predict(fit1, newdata = iris[sample(1:150),], probability = TRUE, decision.values = TRUE)
# pred2 <- predict(fit2, newdata = iris[1:10,], probability = TRUE, decision.values = TRUE)
# pred3 <- predict(fit3, newdata = iris[1,-5], probability = TRUE, decision.values = TRUE)
# pred4 <- predict(fit4, newdata = iris[1,-5], probability = TRUE, decision.values = TRUE)
expect_equal(pred1, pred2)
expect_equal(pred3, pred4)
expect_equal(pred2, pred4)
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "epanechnikov", bw = 5, k = 30, probability = TRUE)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = epanechnikov(bw = 5, k = 30), probability = TRUE)
fit3 <- ossvm(x = iris[,-5], y = iris$Species, wf = "epanechnikov", bw = 5, k = 30, probability = TRUE)
fit4 <- ossvm(x = iris[,-5], y = iris$Species, wf = epanechnikov(5, 30), probability = TRUE)
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit3[-c(1,4)], fit4[-c(1,4)])
expect_equal(fit2[-c(1,2,32)], fit4[-c(1,2)])
expect_equivalent(fit2[2], fit4[2])
set.seed(120)
pred1 <- predict(fit1, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred2 <- predict(fit2, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred3 <- predict(fit3, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred4 <- predict(fit4, probability = TRUE, decision.values = TRUE)
expect_equal(pred1, pred2)
expect_equal(pred3, pred4)
expect_equal(pred2, pred4)
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "rectangular", bw = 5, k = 30, probability = TRUE)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = rectangular(bw = 5, k = 30), probability = TRUE)
fit3 <- ossvm(x = iris[,-5], y = iris$Species, wf = "rectangular", bw = 5, k = 30, probability = TRUE)
fit4 <- ossvm(x = iris[,-5], y = iris$Species, wf = rectangular(5, 30), probability = TRUE)
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit3[-c(1,4)], fit4[-c(1,4)])
expect_equal(fit2[-c(1,2,32)], fit4[-c(1,2)])
expect_equivalent(fit2[2], fit4[2])
set.seed(120)
pred1 <- predict(fit1, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred2 <- predict(fit2, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred3 <- predict(fit3, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred4 <- predict(fit4, probability = TRUE, decision.values = TRUE)
expect_equal(pred1, pred2)
expect_equal(pred3, pred4)
expect_equal(pred2, pred4)
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "triangular", bw = 5, k = 30, probability = TRUE)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = triangular(5, k = 30), probability = TRUE)
fit3 <- ossvm(x = iris[,-5], y = iris$Species, wf = "triangular", bw = 5, k = 30, probability = TRUE)
fit4 <- ossvm(x = iris[,-5], y = iris$Species, wf = triangular(5, 30), probability = TRUE)
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit3[-c(1,4)], fit4[-c(1,4)])
expect_equal(fit2[-c(1,2,32)], fit4[-c(1,2)])
expect_equivalent(fit2[2], fit4[2])
set.seed(120)
pred1 <- predict(fit1, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred2 <- predict(fit2, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred3 <- predict(fit3, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred4 <- predict(fit4, probability = TRUE, decision.values = TRUE)
expect_equal(pred1, pred2)
expect_equal(pred3, pred4)
expect_equal(pred2, pred4)
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "biweight", bw = 5, probability = TRUE)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = biweight(5), probability = TRUE)
fit3 <- ossvm(x = iris[,-5], y = iris$Species, wf = "biweight", bw = 5, probability = TRUE)
fit4 <- ossvm(x = iris[,-5], y = iris$Species, wf = biweight(5), probability = TRUE)
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit3[-c(1,4)], fit4[-c(1,4)])
expect_equal(fit2[-c(1,2,32)], fit4[-c(1,2)])
expect_equivalent(fit2[2], fit4[2])
set.seed(120)
pred1 <- predict(fit1, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred2 <- predict(fit2, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred3 <- predict(fit3, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred4 <- predict(fit4, probability = TRUE, decision.values = TRUE)
expect_equal(pred1, pred2)
expect_equal(pred3, pred4)
expect_equal(pred2, pred4)
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "optcosine", bw = 5, k = 30, probability = TRUE)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = optcosine(5, k = 30), probability = TRUE)
fit3 <- ossvm(x = iris[,-5], y = iris$Species, wf = "optcosine", bw = 5, k = 30, probability = TRUE)
fit4 <- ossvm(x = iris[,-5], y = iris$Species, wf = optcosine(5, 30), probability = TRUE)
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit3[-c(1,4)], fit4[-c(1,4)])
expect_equal(fit2[-c(1,2,32)], fit4[-c(1,2)])
expect_equivalent(fit2[2], fit4[2])
set.seed(120)
pred1 <- predict(fit1, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred2 <- predict(fit2, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred3 <- predict(fit3, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred4 <- predict(fit4, probability = TRUE, decision.values = TRUE)
expect_equal(pred1, pred2)
expect_equal(pred3, pred4)
expect_equal(pred2, pred4)
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "cosine", bw = 5, k = 30, probability = TRUE)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = cosine(5, k = 30), probability = TRUE)
fit3 <- ossvm(x = iris[,-5], y = iris$Species, wf = "cosine", bw = 5, k = 30, probability = TRUE)
fit4 <- ossvm(x = iris[,-5], y = iris$Species, wf = cosine(5, 30), probability = TRUE)
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit3[-c(1,4)], fit4[-c(1,4)])
expect_equal(fit2[-c(1,2,32)], fit4[-c(1,2)])
expect_equivalent(fit2[2], fit4[2])
set.seed(120)
pred1 <- predict(fit1, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred2 <- predict(fit2, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred3 <- predict(fit3, probability = TRUE, decision.values = TRUE)
set.seed(120)
pred4 <- predict(fit4, probability = TRUE, decision.values = TRUE)
expect_equal(pred1, pred2)
expect_equal(pred3, pred4)
expect_equal(pred2, pred4)
})
test_that("ossvm: local solution with rectangular window function and large bw and global solution coincide", {
data(iris)
library(e1071)
## newdata missing
fit1 <- wsvm(formula = Species ~ ., data = iris)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = rectangular(20))
fit3 <- svm(Species ~ ., data = iris)
pred1 <- predict(fit1)
pred2 <- predict(fit2)
pred3 <- predict(fit3)
expect_equal(pred1, pred2)
expect_equal(pred1, pred3)
## newdata given
fit1 <- wsvm(formula = Species ~ ., data = iris, probability = TRUE)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = rectangular(8), probability = TRUE)
fit3 <- svm(Species ~ ., data = iris, probability = TRUE)
pred1 <- predict(fit1, newdata = iris, probability = TRUE, decision.values = TRUE)
pred2 <- predict(fit2, newdata = iris, probability = TRUE, decision.values = TRUE)
pred3 <- predict(fit3, newdata = iris, probability = TRUE, decision.values = TRUE)
# pred1 <- predict(fit1, newdata = iris[1,], probability = TRUE, decision.values = TRUE)
# pred2 <- predict(fit2, newdata = iris[1,], probability = TRUE, decision.values = TRUE)
# pred3 <- predict(fit3, newdata = iris[1,], probability = TRUE, decision.values = TRUE)
expect_equal(pred1, pred2)
expect_equal(pred1, pred3)
expect_equal(pred2, pred3)
})
test_that("ossvm: labels vector set correctly",{
## all classes, correct order
data(iris)
iris[,1:4] <- scale(iris[,1:4])
k <- 100
n <- 90
x <- as.matrix(iris[,-5])
dist <- sqrt(colSums((t(x) - x[n,])^2))
w <- rectangular(k = k)(dist)
fit1 <- wsvm(Species ~ ., data = iris, case.weights = w/sum(w) * 150, probability = TRUE, scale = FALSE)
set.seed(120)
pred1 <- predict(fit1, newdata = iris[n,], probability = TRUE, decision.values = TRUE)
fit2 <- ossvm(Species ~ ., data = iris, probability = TRUE, wf = "rectangular", k = k, scale = FALSE)
set.seed(120)
pred2 <- predict(fit2, newdata = iris[n,], probability = TRUE, decision.values = TRUE)
expect_equal(pred1, pred2) # not exactly equal
## all classes, but different order 1
data(iris)
iris[,1:4] <- scale(iris[,1:4])
perm <- 150:1 ## 3, 2, 1
iris <- iris[perm,]
k = 100
n <- 90
x <- as.matrix(iris[,-5])
dist <- sqrt(colSums((t(x) - x[n,])^2))
w <- rectangular(k = k)(dist)
fit1 <- wsvm(Species ~ ., data = iris, case.weights = w/sum(w) * 150, probability = TRUE, scale = FALSE)
set.seed(120)
pred1 <- predict(fit1, newdata = iris[n,], probability = TRUE, decision.values = TRUE)
fit2 <- ossvm(Species ~ ., data = iris, probability = TRUE, wf = "rectangular", k = k, scale = FALSE)
set.seed(120)
pred2 <- predict(fit2, newdata = iris[n,], probability = TRUE, decision.values = TRUE)
expect_equivalent(pred1, pred2)
expect_equal(attr(pred1, "probabilities"), attr(pred2, "probabilities")[,3:1, drop = FALSE]) # not exactly equal
expect_equal(as.vector(attr(pred1, "decision.values")), -as.vector(attr(pred2, "decision.values")[,3:1]))
## all classes, but different order 2
data(iris)
iris[,1:4] <- scale(iris[,1:4])
perm <- c(150:101,1:100) ## 3, 1, 2
iris <- iris[perm,]
k = 100
n <- 90
x <- as.matrix(iris[,-5])
dist <- sqrt(colSums((t(x) - x[n,])^2))
w <- rectangular(k = k)(dist)
fit1 <- wsvm(Species ~ ., data = iris, case.weights = w/sum(w) * 150, probability = TRUE, scale = FALSE)
set.seed(120)
pred1 <- predict(fit1, newdata = iris[n,], probability = TRUE, decision.values = TRUE)
fit2 <- ossvm(Species ~ ., data = iris, probability = TRUE, wf = "rectangular", k = k, scale = FALSE)
set.seed(120)
pred2 <- predict(fit2, newdata = iris[n,], probability = TRUE, decision.values = TRUE)
expect_equivalent(pred1, pred2)
expect_equal(attr(pred1, "probabilities"), attr(pred2, "probabilities")[,c(3,1,2), drop = FALSE])
expect_equivalent(c(1,-1,-1)*attr(pred1, "decision.values")[c(3,1,2)], as.vector(attr(pred2, "decision.values")))
## 2 classes, correct order
data(iris)
iris[,1:4] <- scale(iris[,1:4])
k <- 50
n <- 90
x <- as.matrix(iris[,-5])
dist <- sqrt(colSums((t(x) - x[n,])^2))
w <- rectangular(k = k)(dist)
fit1 <- wsvm(Species ~ ., data = iris, case.weights = w/sum(w) * 150, probability = TRUE, scale = FALSE)
set.seed(120)
pred1 <- predict(fit1, newdata = iris[n,], probability = TRUE, decision.values = TRUE)
fit2 <- ossvm(Species ~ ., data = iris, probability = TRUE, wf = "rectangular", k = k, scale = FALSE)
set.seed(120)
pred2 <- predict(fit2, newdata = iris[n,], probability = TRUE, decision.values = TRUE)
expect_equivalent(pred1, pred2)
expect_equal(attr(pred1, "probabilities"), attr(pred2, "probabilities")[,colnames(attr(pred1, "probabilities")), drop = FALSE])
expect_equal(as.numeric(attr(pred1, "decision.values")), attr(pred2, "decision.values")[,!is.na(attr(pred2, "decision.values"))])
## 2 classes, but different order 1
data(iris)
iris[,1:4] <- scale(iris[,1:4])
perm <- 150:1
iris <- iris[perm,]
k <- 40
n <- 99
x <- as.matrix(iris[,-5])
dist <- sqrt(colSums((t(x) - x[n,])^2))
w <- rectangular(k = k)(dist)
fit1 <- wsvm(Species ~ ., data = iris, case.weights = w/sum(w) * 150, probability = TRUE, scale = FALSE)
set.seed(120)
pred1 <- predict(fit1, newdata = iris[n,], probability = TRUE, decision.values = TRUE)
fit2 <- ossvm(Species ~ ., data = iris, probability = TRUE, wf = "rectangular", k = k, scale = FALSE)
set.seed(120)
pred2 <- predict(fit2, newdata = iris[n,], probability = TRUE, decision.values = TRUE)
expect_equivalent(pred1, pred2)
expect_equal(attr(pred1, "probabilities"), attr(pred2, "probabilities")[,colnames(attr(pred1, "probabilities")), drop = FALSE])
expect_equal(as.numeric(attr(pred1, "decision.values")), -attr(pred2, "decision.values")[!is.na(attr(pred2, "decision.values"))])
## 2 classes, but different order 2
data(iris)
iris[,1:4] <- scale(iris[,1:4])
perm <- c(150:101,1:100)
iris <- iris[perm,]
k <- 50
n <- 90
x <- as.matrix(iris[,-5])
dist <- sqrt(colSums((t(x) - x[n,])^2))
w <- rectangular(k = k)(dist)
fit1 <- wsvm(Species ~ ., data = iris, case.weights = w/sum(w) * 150, probability = TRUE, scale = FALSE)
set.seed(120)
pred1 <- predict(fit1, newdata = iris[n,], probability = TRUE, decision.values = TRUE)
fit2 <- ossvm(Species ~ ., data = iris, probability = TRUE, wf = "rectangular", k = k, scale = FALSE)
set.seed(120)
pred2 <- predict(fit2, newdata = iris[n,], probability = TRUE, decision.values = TRUE)
expect_equivalent(pred1, pred2)
expect_equal(attr(pred1, "probabilities"), attr(pred2, "probabilities")[,colnames(attr(pred1, "probabilities")), drop = FALSE])
expect_equal(as.numeric(attr(pred1, "decision.values")), attr(pred2, "decision.values")[!is.na(attr(pred2, "decision.values"))])
})
test_that("ossvm: arguments related to weighting misspecified", {
# bw, k not required
expect_that(fit1 <- ossvm(Species ~ ., data = iris, wf = gaussian(0.5), k = 30, bw = 0.5), gives_warning(c("argument 'k' is ignored", "argument 'bw' is ignored")))
fit2 <- ossvm(Species ~ ., data = iris, wf = gaussian(0.5))
expect_equal(fit1[-1], fit2[-1])
expect_that(fit1 <- ossvm(Species ~ ., data = iris, wf = gaussian(0.5), bw = 0.5), gives_warning("argument 'bw' is ignored"))
fit2 <- ossvm(Species ~ ., data = iris, wf = gaussian(0.5))
expect_equal(fit1[-1], fit2[-1])
expect_equal(fit1$k, NULL)
expect_equal(fit1$nn.only, NULL)
expect_equal(fit1$bw, 0.5)
expect_equal(fit1$adaptive, FALSE)
expect_that(fit1 <- ossvm(Species ~ ., data = iris, wf = function(x) exp(-x), bw = 0.5, k = 30), gives_warning(c("argument 'k' is ignored", "argument 'bw' is ignored")))
expect_that(fit2 <- ossvm(Species ~ ., data = iris, wf = function(x) exp(-x), k = 30), gives_warning("argument 'k' is ignored"))
expect_equal(fit1[-1], fit2[-1])
expect_equal(fit1$k, NULL)
expect_equal(fit1$nn.only, NULL)
expect_equal(fit1$bw, NULL)
expect_equal(fit1$adaptive, NULL)
expect_that(fit1 <- ossvm(Species ~ ., data = iris, wf = function(x) exp(-x), bw = 0.5), gives_warning("argument 'bw' is ignored"))
fit2 <- ossvm(Species ~ ., data = iris, wf = function(x) exp(-x))
expect_equal(fit1[-1], fit2[-1])
expect_equal(fit1$k, NULL)
expect_equal(fit1$nn.only, NULL)
expect_equal(fit1$bw, NULL)
expect_equal(fit1$adaptive, NULL)
# missing quotes
fit <- ossvm(formula = Species ~ ., data = iris, wf = gaussian) ## error because length(weights) and nrow(x) are different
expect_error(predict(fit))
# bw, k missing
expect_that(ossvm(formula = Species ~ ., data = iris, wf = gaussian()), throws_error("either 'bw' or 'k' have to be specified"))
expect_that(ossvm(formula = Species ~ ., data = iris, wf = gaussian(), k = 10), throws_error("either 'bw' or 'k' have to be specified"))
expect_that(ossvm(Species ~ ., data = iris), throws_error("either 'bw' or 'k' have to be specified"))
# bw < 0
expect_that(ossvm(formula = Species ~ ., data = iris, wf = "gaussian", bw = -5), throws_error("'bw' must be positive"))
expect_that(ossvm(formula = Species ~ ., data = iris, wf = "cosine", k = 10, bw = -50), throws_error("'bw' must be positive"))
# bw vector
expect_that(ossvm(formula = Species ~., data = iris, wf = "gaussian", bw = rep(1, nrow(iris))), gives_warning("only first element of 'bw' used"))
# k < 0
expect_that(ossvm(formula = Species ~ ., data = iris, wf = "gaussian", k =-7, bw = 50), throws_error("'k' must be positive"))
# k too small
#fit <- ossvm(formula = Species ~ ., data = iris, wf = "gaussian", k = 5, bw = 0.005)
#expect_equal(length(is.na(predict(fit)$class)), 150)
# k too large
expect_that(ossvm(formula = Species ~ ., data = iris, k = 250, wf = "gaussian", bw = 50), throws_error("'k' is larger than 'n'"))
# k vector
expect_that(ossvm(formula = Species ~., data = iris, wf = "gaussian", k = rep(50, nrow(iris))), gives_warning("only first element of 'k' used"))
})
test_that("ossvm: weighting schemes work", {
## wf with finite support
# fixed bw
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "rectangular", bw = 5)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = rectangular(bw = 5))
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit1$bw, 5)
expect_equal(fit1$k, NULL)
expect_equal(fit1$nn.only, NULL)
expect_true(!fit1$adaptive)
# adaptive bw, only knn
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "rectangular", k = 50)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = rectangular(k = 50))
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit1$k, 50)
expect_equal(fit1$bw, NULL)
expect_true(fit1$nn.only)
expect_true(fit1$adaptive)
# fixed bw, only knn
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "rectangular", bw = 5, k = 50)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = rectangular(bw = 5, k = 50))
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit1$bw, 5)
expect_equal(fit1$k, 50)
expect_true(fit1$nn.only)
expect_true(!fit1$adaptive)
# nn.only not needed
expect_that(ossvm(formula = Species ~ ., data = iris, wf = "rectangular", bw = 5, nn.only = TRUE), gives_warning("argument 'nn.only' is ignored"))
# nn.only has to be TRUE if bw and k are both given
expect_that(ossvm(formula = Species ~ ., data = iris, wf = "rectangular", bw = 5, k = 50, nn.only = FALSE), throws_error("if 'bw' and 'k' are given argument 'nn.only' must be TRUE"))
## wf with infinite support
# fixed bw
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "gaussian", bw = 0.5)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = gaussian(bw = 0.5))
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit1$bw, 0.5)
expect_equal(fit1$k, NULL)
expect_equal(fit1$nn.only, NULL)
expect_true(!fit1$adaptive)
# adaptive bw, only knn
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "gaussian", k = 50)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = gaussian(k = 50))
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit1$bw, NULL)
expect_equal(fit1$k, 50)
expect_equal(fit1$nn.only, TRUE)
expect_true(fit1$adaptive)
# adaptive bw, all obs
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "gaussian", k = 50, nn.only = FALSE)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = gaussian(k = 50, nn.only = FALSE))
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit1$bw, NULL)
expect_equal(fit1$k, 50)
expect_equal(fit1$nn.only, FALSE)
expect_true(fit1$adaptive)
# fixed bw, only knn
fit1 <- ossvm(formula = Species ~ ., data = iris, wf = "gaussian", bw = 1, k = 50)
fit2 <- ossvm(formula = Species ~ ., data = iris, wf = gaussian(bw = 1, k = 50))
expect_equal(fit1[-c(1,4)], fit2[-c(1,4)])
expect_equal(fit1$bw, 1)
expect_equal(fit1$k, 50)
expect_equal(fit1$nn.only, TRUE)
expect_true(!fit1$adaptive)
# nn.only has to be TRUE if bw and k are both given
expect_that(ossvm(formula = Species ~ ., data = iris, wf = "gaussian", bw = 1, k = 50, nn.only = FALSE), throws_error("if 'bw' and 'k' are given argument 'nn.only' must be TRUE"))
})
#=================================================================================================================
context("predict.ossvm")
test_that("predict.ossvm works correctly with formula and data.frame interface and with missing newdata", {
data(iris)
ran <- sample(1:150,100)
## formula, data
fit <- ossvm(formula = Species ~ ., data = iris, wf = "gaussian", bw = 2, subset = ran, probability = TRUE)
pred <- predict(fit)
pred2 <- predict(fit, newdata = iris[ran,]) #####
expect_equal(names(pred), rownames(iris)[ran])
## formula, data, newdata
pred <- predict(fit, newdata = iris[-ran,], probability = TRUE, decision.values = TRUE)
expect_equal(names(pred), rownames(iris)[-ran])
expect_equal(rownames(attr(pred, "probabilities")), rownames(iris)[-ran])
expect_equal(rownames(attr(pred, "decision.values")), rownames(iris)[-ran])
## y, x
fit <- ossvm(x = iris[ran,-5], y = iris$Species[ran], wf = "gaussian", bw = 2, probability = TRUE)
pred <- predict(fit)
expect_equal(names(pred), rownames(iris)[ran])
## y, x, newdata
pred <- predict(fit, newdata = iris[-ran,-5], probability = TRUE, decision.values = TRUE)
expect_equal(names(pred), rownames(iris)[-ran])
expect_equal(rownames(attr(pred, "probabilities")), rownames(iris)[-ran])
expect_equal(rownames(attr(pred, "decision.values")), rownames(iris)[-ran])
})
test_that("predict.ossvm: retrieving training data works", {
data(iris)
## no subset
# formula, data
fit <- ossvm(formula = Species ~ ., data = iris, wf = "gaussian", bw = 2)
pred1 <- predict(fit)
pred2 <- predict(fit, newdata = iris)
expect_equal(pred1, pred2)
# y, x
fit <- ossvm(x = iris[,-5], y = iris$Species, wf = "gaussian", bw = 2)
pred1 <- predict(fit)
pred2 <- predict(fit, newdata = iris[,-5])
expect_equal(pred1, pred2)
## subset
ran <- sample(1:150,100)
# formula, data
fit <- ossvm(formula = Species ~ ., data = iris, wf = "gaussian", bw = 2, subset = ran)
pred1 <- predict(fit)
pred2 <- predict(fit, newdata = iris[ran,])
expect_equal(pred1, pred2)
# y, x
fit <- ossvm(x = iris[ran,-5], y = iris$Species[ran], wf = "gaussian", bw = 2)
pred1 <- predict(fit)
pred2 <- predict(fit, newdata = iris[ran,-5])
expect_equal(pred1, pred2)
})
test_that("predict.ossvm works with missing classes in the training data", {
data(iris)
ran <- sample(1:150,100)
expect_that(fit <- ossvm(Species ~ ., data = iris, wf = "gaussian", bw = 10, subset = 1:100, probability = TRUE), gives_warning("group virginica is empty"))
expect_equal(length(fit$y), 100)
expect_equal(nrow(fit$x), 100)
expect_equal(fit$nclass, 2)
pred <- predict(fit, newdata = iris[-ran,], probability = TRUE, decision.values = TRUE)
expect_equal(nlevels(pred), 3)
expect_equal(ncol(attr(pred, "probabilities")), 2)
expect_equal(ncol(attr(pred, "decision.values")), 1)
})
test_that("predict.ossvm works with one single predictor variable", {
data(iris)
ran <- sample(1:150,100)
fit <- ossvm(Species ~ Petal.Width, data = iris, wf = "gaussian", bw = 2, subset = ran, probability = TRUE)
expect_equal(ncol(fit$x), 1)
predict(fit, newdata = iris[-ran,], probability = TRUE, decision.values = TRUE)
})
test_that("predict.ossvm works with one single test observation", {
data(iris)
ran <- sample(1:150,100)
fit <- ossvm(Species ~ ., data = iris, wf = "gaussian", bw = 2, subset = ran, probability = TRUE)
pred <- predict(fit, newdata = iris[5,])
expect_equal(length(pred), 1)
a <- factor("setosa", levels = c("setosa", "versicolor", "virginica"))
names(a) = "5"
expect_equal(pred, a)
pred <- predict(fit, newdata = iris[5,], probability = TRUE, decision.values = TRUE)
expect_equal(length(pred), 1)
expect_equal(dim(attr(pred, "probabilities")), c(1, 3))
expect_equal(dim(attr(pred, "decision.values")), c(1, 3))
pred <- predict(fit, newdata = iris[58,])
expect_equal(length(pred), 1)
a <- factor("versicolor", levels = c("setosa", "versicolor", "virginica"))
names(a) = "58"
expect_equal(pred, a)
pred <- predict(fit, newdata = iris[58,], probability = TRUE, decision.values = TRUE)
expect_equal(length(pred), 1)
expect_equal(dim(attr(pred, "probabilities")), c(1, 3))
expect_equal(dim(attr(pred, "decision.values")), c(1, 3))
})
test_that("predict.ossvm works with one single predictor variable and one single test observation", {
data(iris)
ran <- sample(1:150,100)
fit <- ossvm(Species ~ Petal.Width, data = iris, wf = "gaussian", bw = 2, subset = ran, probability = TRUE)
expect_equal(ncol(fit$x), 1)
pred <- predict(fit, newdata = iris[5,], probability = TRUE, decision.values = TRUE)
expect_equal(length(pred), 1)
expect_equal(dim(attr(pred, "probabilities")), c(1, 3))
expect_equal(dim(attr(pred, "decision.values")), c(1, 3))
})
test_that("predict.ossvm: NA handling in newdata works", {
data(iris)
ran <- sample(1:150,100)
irisna <- iris
irisna[1:17,c(1,3)] <- NA
fit <- ossvm(Species ~ ., data = iris, wf = "gaussian", bw = 50, subset = ran, probability = TRUE)
## na.omit
pred <- predict(fit, newdata = irisna, na.action = na.omit, probability = TRUE, decision.values = TRUE)
expect_equal(length(pred), 133)
expect_equal(names(pred), as.character(18:150))
expect_equal(nrow(attr(pred, "probabilities")), 133)
expect_equal(rownames(attr(pred, "probabilities")), as.character(18:150))
expect_equal(nrow(attr(pred, "decision.values")), 133)
expect_equal(rownames(attr(pred, "decision.values")), as.character(18:150))
## na.fail
expect_that(predict(fit, newdata = irisna, na.action = na.fail, probability = TRUE, decision.values = TRUE), throws_error("missing values in object"))
})
test_that("predict.ossvm: misspecified arguments", {
data(iris)
ran <- sample(1:150,100)
fit <- ossvm(Species ~ ., data = iris, wf = "gaussian", bw = 2, subset = ran)
# errors in newdata
expect_error(predict(fit, newdata = TRUE))
expect_error(predict(fit, newdata = -50:50))
})
|
873fad83dec87f8855fa8317c0bfc3029f20d826
|
454a2e5c87a170b9bcfe0fd2b11516b90dcc1b05
|
/tests/testthat/testrun.R
|
baecea7e86af6c081ed4fb1565090f449ded9b5b
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
test-mass-forker-org-1/CausalGrid
|
1bec395e2bb68d12cf3c1e4f87d15650b1adec15
|
1aba80502457c211dbfa2099fcef91f97a4fb74f
|
refs/heads/main
| 2023-06-03T04:20:03.314112
| 2021-06-23T18:43:50
| 2021-06-23T18:43:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,523
|
r
|
testrun.R
|
# To run in the command-line with load_all: change do_load_all=T, then run the code in the first if(FALSE), subsequent runs just run that last line of the False block
# To run in the command-line with load_all: then run the code in the first if(FALSE), subsequent runs just run that last line of the False block
library(testthat)
library(rprojroot)
testthat_root_dir <- rprojroot::find_testthat_root_file() #R cmd check doesn't copy over git and RStudio proj file
if(FALSE) { #Run manually to debug
library(rprojroot)
testthat_root_dir <- rprojroot::find_testthat_root_file()
debugSource(paste0(testthat_root_dir,"/testrun.R"))
}
library(CausalGrid)
set.seed(1337)
context("Test Run")
source(paste0(testthat_root_dir,"/../dgps.R"))
data <- mix_data_d(n=1000)
breaks_per_dim = list(c(0.5), c(0))
# Does Bumping work -------------------
ret_bmp1 <- fit_estimate_partition(data$y, data$X, data$d, cv_folds=2, verbosity=0, breaks_per_dim=breaks_per_dim, bump_samples=2, bump_complexity=list(doCV=FALSE, incl_comp_in_pick=FALSE))
ret_bmp2 <- fit_estimate_partition(data$y, data$X, data$d, cv_folds=2, verbosity=0, breaks_per_dim=breaks_per_dim, bump_samples=2, bump_complexity=list(doCV=TRUE, incl_comp_in_pick=FALSE))
ret_bmp3 <- fit_estimate_partition(data$y, data$X, data$d, cv_folds=2, verbosity=0, breaks_per_dim=breaks_per_dim, bump_samples=2, bump_complexity=list(doCV=FALSE, incl_comp_in_pick=TRUE))
ret_bmp4 <- fit_estimate_partition(data$y, data$X, data$d, cv_folds=2, verbosity=0, breaks_per_dim=breaks_per_dim, bump_samples=2, bump_complexity=list(doCV=TRUE, incl_comp_in_pick=TRUE))
# Make sure partition is fine with 0 obs ----------------
X_range = get_X_range(data$X)
ex_part = add_partition_split(grid_partition(X_range), partition_split(1, 0.5))
ex_fact = predict(ex_part, matrix(0.1, ncol=2, nrow=2))
test_that("# of levels will be full even if they don't appear in data.", {expect_equal(length(levels(ex_fact)), 2)})
# Just y ---------------
ret1 <- fit_estimate_partition(data$y, data$X, cv_folds=2, verbosity=0, breaks_per_dim=breaks_per_dim)
print(ret1$partition)
test_that("We get OK results (OOS)", {
expect_equal(ret1$partition$nsplits_by_dim, c(1,1))
})
# Include d ---------------
ret1d <- fit_estimate_partition(data$y, data$X, data$d, cv_folds=2, verbosity=0, breaks_per_dim=breaks_per_dim)
print(ret1d$partition)
test_that("We get OK results (OOS)", {
expect_equal(ret1d$partition$nsplits_by_dim, c(1,1))
})
test_any_sign_effect(ret1d, check_negative=T, method="fdr") #
#test_any_sign_effect(ret1d, check_negative=T, method="sim_mom_ineq") #the sim produces treatment effect with 0 std err, so causes problems
ret2d <- fit_estimate_partition(data$y, data$X, data$d, cv_folds=2, verbosity=0, breaks_per_dim=breaks_per_dim, ctrl_method="all")
print(ret2d$partition)
#TODO: Should I check this?
#test_that("We get OK results (OOS)", {
# expect_equal(ret2d$partition$nsplits_by_dim, c(1,1))
#})
ret3d <- fit_estimate_partition(data$y, data$X, data$d, cv_folds=3, verbosity=0, breaks_per_dim=breaks_per_dim, ctrl_method="LassoCV")
print(ret3d$partition)
#TODO: Should I check this?
#test_that("We get OK results (OOS)", {
# expect_equal(ret3d$partition$nsplits_by_dim, c(1,1))
#})
ret4d <- fit_estimate_partition(data$y, data$X, data$d, cv_folds=2, verbosity=0, breaks_per_dim=breaks_per_dim, ctrl_method="RF")
print(ret4d$partition)
#TODO: Should I check this?
#test_that("We get OK results (OOS)", {
# expect_equal(ret4d$partition$nsplits_by_dim, c(1,1))
#})
ret1db <- fit_estimate_partition(data$y, data$X, data$d, cv_folds=2, verbosity=0, breaks_per_dim=breaks_per_dim, bump_samples=2)
ret1dc <- fit_estimate_partition(data$y, data$X, data$d, cv_folds=2, verbosity=0, breaks_per_dim=breaks_per_dim, importance_type="single")
# Test the output/verbosity ------------------------------------
X_3 = data$X
X_3$X3 = data$X$X2
pot_break_points_3 = breaks_per_dim
pot_break_points_3[[3]] = breaks_per_dim[[2]]
print("---------------")
ret1dd <- fit_estimate_partition(data$y, X_3, data$d, cv_folds=2, verbosity=2, breaks_per_dim=pot_break_points_3, importance_type="interaction", bump_samples=3)
print("---------------")
ret1dd <- fit_estimate_partition(data$y, X_3, data$d, cv_folds=2, verbosity=1, breaks_per_dim=pot_break_points_3, importance_type="interaction", bump_samples=3)
print("---------------")
ret1dd <- fit_estimate_partition(data$y, X_3, data$d, cv_folds=2, verbosity=0, breaks_per_dim=pot_break_points_3, importance_type="interaction", bump_samples=3)
|
9513925bc9de0aa6e38b30560bf632f7dd67a263
|
6b32948c7241e204753cb88999c76cee323b4736
|
/TransMetaRare/R/SKAT_2Kernel_Ortho_Optimal_Each_Q_GridRho1.R
|
b9964a2e8fedc5b1112a1c32091ad7cb24d4975c
|
[] |
no_license
|
shijingc/TransMetaRare
|
e9a0e74fef68bdfb59f35741b1e777afa23b1e44
|
5206b4a880c07b2d5df3b8d26a4bf0d6f88d77a6
|
refs/heads/master
| 2020-03-19T10:24:07.058000
| 2018-06-06T18:05:12
| 2018-06-06T18:05:12
| 136,367,016
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,566
|
r
|
SKAT_2Kernel_Ortho_Optimal_Each_Q_GridRho1.R
|
SKAT_2Kernel_Ortho_Optimal_Each_Q_GridRho1 <-
function( Q.all, rho2, rho1, z1.rho1is0, z2.rho1is0, Z1.rho1is0, Z2.rho1is0, z1.rho1is1, z2.rho1is1, Z1.rho1is1, Z2.rho1is1, Phi.tld, Phi.tld.hf, n.Resampling.Copula){
n.r<-length(rho2)
n.q<-dim(Q.all)[1] - n.Resampling.Copula
n.total = dim(Q.all)[1]
pval.davis <- matrix(rep(0,2*n.r*n.q),ncol=2*n.r) ## 2 is the length of rho1 grid, since rho1 = c(0,1)
for(i in 1:n.r){
Q.rho1is0<-Q.all[1,i]; Q.rho1is1<-Q.all[1,(i+n.r)]
if( n.q > 1) Q.boot.rho1is0<-Q.all[2:n.q,i]; Q.boot.rho1is1<-Q.all[2:n.q,(i+n.r)]
r.corr<-rho2[i]
if(r.corr == 0){
Phi.rho1is0 = t(z1.rho1is0) %*% Phi.tld %*% z1.rho1is0
Phi.rho1is1 = t(z1.rho1is1) %*% Phi.tld %*% z1.rho1is1
if( n.q > 1){
pval.out.rho1is1 = SKAT:::Get_Davies_PVal(Q.rho1is1/2, Phi.rho1is1, Q.boot.rho1is1/2)
pval.davis[,(i+n.r)] = c(pval.out.rho1is1$p.value, pval.out.rho1is1$p.value.resampling)
}else{
pval.davis[,(i+n.r)] = SKAT:::Get_Davies_PVal(Q.rho1is1/2, Phi.rho1is1)$p.value
}
}else if (r.corr == 1){
Phi.rho1is0 = t(z2.rho1is0) %*% Phi.tld %*% z2.rho1is0
Phi.rho1is1 = t(z2.rho1is1) %*% Phi.tld %*% z2.rho1is1
a<- as.matrix(sum(Phi.rho1is1))
if( n.q > 1){
pval.out.rho1is1 = SKAT:::Get_Liu_PVal(Q.rho1is1/2, a, Q.boot.rho1is1/2)
pval.davis[,(i+n.r)] = c(pval.out.rho1is1$p.value, pval.out.rho1is1$p.value.resampling)
}else{
pval.davis[,(i+n.r)] = SKAT:::Get_Liu_PVal(Q.rho1is1/2, a)$p.value
}
}else{
R.M.rho1is0 = r.corr * Z2.rho1is0 + (1-r.corr)*Z1.rho1is0
L.rho1is0 = chol(R.M.rho1is0)
Phi.rho1is0 = L.rho1is0 %*% (Phi.tld %*% t(L.rho1is0))
R.M.rho1is1 = r.corr * Z2.rho1is1 + (1-r.corr)*Z1.rho1is1
Phi.rho1is1 = Phi.tld.hf%*%R.M.rho1is1%*%Phi.tld.hf
if( n.q > 1){
pval.out.rho1is1 = SKAT:::Get_Davies_PVal(Q.rho1is1/2, Phi.rho1is1, Q.boot.rho1is1/2)
pval.davis[,(i+n.r)] = c(pval.out.rho1is1$p.value, pval.out.rho1is1$p.value.resampling)
}else{
pval.davis[,(i+n.r)] = SKAT:::Get_Davies_PVal(Q.rho1is1/2, Phi.rho1is1)$p.value
}
}
if( n.q > 1){
pval.out.rho1is0 = SKAT:::Get_Davies_PVal(Q.rho1is0/2, Phi.rho1is0, Q.boot.rho1is0/2)
pval.davis[,i] = c(pval.out.rho1is0$p.value, pval.out.rho1is0$p.value.resampling)
}else{
pval.davis[,i] = SKAT:::Get_Davies_PVal(Q.rho1is0/2, Phi.rho1is0)$p.value
}
}
pmin<-apply(pval.davis,1,min)
#Sigma = cor( Q.all[(n.q+1):n.total,] ,method = "kendall");
Sigma = cor( Q.all[(n.q+1):n.total,] ,method = "spearman");
out<-list(pmin=pmin,pval=pval.davis, Sigma = Sigma)
return(out)
}
|
16c4ccc96245e9886290613430d5943e59b2811e
|
40c65fff3847662ce46d2afd73acf8b68b785107
|
/tests/testthat/test-check_timestep_by_date.R
|
7df6e15763cd50dbd48c7f1934545da1596cd6ca
|
[
"MIT"
] |
permissive
|
epinowcast/epinowcast
|
b4d4562603938e9a184d3450d9387f92908cd6bc
|
98ec6dbe3c84ecbe3d55ce988e30f8e7cc6b776d
|
refs/heads/main
| 2023-09-05T18:19:10.985900
| 2023-09-05T12:13:49
| 2023-09-05T12:13:49
| 422,611,952
| 23
| 5
|
NOASSERTION
| 2023-09-14T09:57:09
| 2021-10-29T14:47:06
|
R
|
UTF-8
|
R
| false
| false
| 2,714
|
r
|
test-check_timestep_by_date.R
|
test_that("check_timestep_by_date() handles dates and groups correctly", {
# Create a dataset with two date columns, two groups, and multiple reference
# dates for each report date
obs <- data.table::data.table(
.group = c(rep("A", 50), rep("B", 50)),
report_date = as.Date(rep(
rep(seq(as.Date("2020-01-01"), by = "day", length.out = 5), each = 10), 2
), origin = "1970-01-01"),
reference_date = as.Date(
rep(
replicate(5, seq(as.Date("2020-01-01"), by = "day", length.out = 10)),
2
), origin = "1970-01-01"
)
)
# Test for correct timestep
expect_silent(
check_timestep_by_date(obs)
)
# Introduce a discrepancy by adding a duplicate report_date for a given reference_date and group
obs <- rbind(
obs,
data.table::data.table(
.group = "A",
report_date = as.Date("2020-01-01"),
reference_date = as.Date("2020-01-01")
)
)
expect_error(
check_timestep_by_date(obs),
"report_date has a duplicate date. Please remove duplicate dates."
)
# Remove the discrepancy and introduce a discrepancy in one of the date columns
obs <- obs[-nrow(obs)]
obs[1, report_date := as.Date("2019-01-02")]
expect_error(
check_timestep_by_date(obs),
"report_date does not have the specified timestep of 1 day\\(s\\)"
)
# Reset the discrepancy and introduce a discrepancy in one of the groups
obs[1, report_date := as.Date("2020-01-01")]
obs[c(1, 6), report_date := as.Date("2020-01-02")]
expect_error(
check_timestep_by_date(obs),
"report_date has a duplicate date. Please remove duplicate dates."
)
# Drop the group column and test
obs_no_group <- obs[, .group := NULL]
expect_error(
check_timestep_by_date(obs_no_group),
"report_date has a duplicate date. Please remove duplicate dates."
)
})
test_that("check_timestep_by_date() handles insufficient data correctly", {
# Create a dataset with a single date and group
obs_single_date <- data.table::data.table(
.group = "A",
report_date = as.Date("2020-01-01"),
reference_date = as.Date("2020-01-01")
)
# Test for insufficient data
expect_error(
check_timestep_by_date(obs_single_date),
"There must be at least two observations"
)
# Create a dataset with two identical dates and group
obs_identical_dates <- data.table::data.table(
.group = c("A", "A"),
report_date = c(as.Date("2020-01-01"), as.Date("2020-01-01")),
reference_date = c(as.Date("2020-01-01"), as.Date("2020-01-01"))
)
# Test for identical dates
expect_error(
check_timestep_by_date(obs_identical_dates),
"report_date has a duplicate date. Please remove duplicate dates."
)
})
|
17dc5ea6d5260c0c694c53ea6b658750684ad0d0
|
c459dd32d88158cb064c3af2bc2ea8c7ab77c667
|
/tumor_subcluster/calculate_scores/calculate_Epithelial_scores_EMTmoduledown_wPT.R
|
b0e73beabd47246feb991e3918e0a1f2d1f0c606
|
[] |
no_license
|
ding-lab/ccRCC_snRNA_analysis
|
d06b8af60717779671debe3632cad744467a9668
|
ac852b3209d2479a199aa96eed3096db0b5c66f4
|
refs/heads/master
| 2023-06-21T15:57:54.088257
| 2023-06-09T20:41:56
| 2023-06-09T20:41:56
| 203,657,413
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,259
|
r
|
calculate_Epithelial_scores_EMTmoduledown_wPT.R
|
# set up libraries and output directory -----------------------------------
## set working directory
dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/"
setwd(dir_base)
source("./ccRCC_snRNA_analysis/load_pkgs.R")
source("./ccRCC_snRNA_analysis/functions.R")
source("./ccRCC_snRNA_analysis/variables.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir(), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input the average expression calculated (SCT)
avgexp_df <- fread(input = "./Resources/Analysis_Results/average_expression/avgeexp_tumorPTLOH_sct_data_bycluster_rm_doublets_on_katmai/20210903.v1/AverageExpression_ByTumorPTLOHSubcluster.20210903.v1.tsv", data.table = F)
## input the genes to plot
genes4score_df <- fread(data.table = F, input = "./Resources/Analysis_Results/findmarkers/tumor_subclusters/filter_degs/filter_tumormanualcluster_EMT_degs/20210908.v1/EMTModuleDown.EpithelialPT_DEGs.Filtered.tsv")
## input cell number per cluster
cellnumber_percluster_df <- fread(data.table = F, input = "./Resources/Analysis_Results/tumor_subcluster/count/count_cellnumber_per_manual_cluster_rm_doublet/20210805.v1/CellNumberPerTumorManualCluster.20210805.v1.tsv")
# preprocess --------------------------------------------------------------
## identify clusters with sufficient cell number
cluster_pass_df <- cellnumber_percluster_df %>%
filter(Freq >= 50)%>%
mutate(colname_exp = gsub(x = id_cluster_uniq,pattern = "\\-", replacement = "."))
genes4score <- genes4score_df$genesymbol_deg
# format expression data --------------------------------------------------
plot_data_long_df <- avgexp_df %>%
filter(V1 %in% genes4score) %>%
melt() %>%
mutate(id_bycluster_byaliquot = gsub(x = variable, pattern = "SCT.", replacement = "")) %>%
dplyr::filter((id_bycluster_byaliquot %in% cluster_pass_df$colname_exp) | (grepl(x = id_bycluster_byaliquot, pattern = "PT"))) %>%
mutate(easyid_column = str_split_fixed(string = id_bycluster_byaliquot, pattern = "_", n = 2)[,1]) %>%
mutate(cluster_name = str_split_fixed(string = id_bycluster_byaliquot, pattern = "_", n = 2)[,2])
## filter out non-tumor and NA tumor cluster
plot_data_long_df <- plot_data_long_df %>%
filter(!(cluster_name %in% c("", "CNA")))
## make matrix
plot_data_wide_df <- dcast(data = plot_data_long_df, formula = V1 ~ id_bycluster_byaliquot, value.var = "value")
plot_data_raw_mat <- as.matrix(plot_data_wide_df[,-1])
## add row names
rownames(plot_data_raw_mat) <- plot_data_wide_df$V1
## scale by row
plot_data_mat <- t(apply(plot_data_raw_mat, 1, scale))
rownames(plot_data_mat) <- rownames(plot_data_raw_mat)
colnames(plot_data_mat) <- colnames(plot_data_raw_mat)
# calculate geneset score -------------------------------------------------
score_vec <- colMeans(plot_data_mat[genes4score,])*100
colanno_df <- data.frame(cluster_name = colnames(plot_data_mat), score = score_vec)
# write output ------------------------------------------------------------
file2write <- paste0(dir_out, "EpithelialScore.tsv")
write.table(x = colanno_df, file = file2write, quote = F, sep = "\t", row.names = F)
|
732082a0201dd741211c0e229e1f1b16776e7159
|
7e323ebc12c514729ff4df23ff7fe6d8d2c3e395
|
/R/na.mean.R
|
7084c3b54a358c23e142fba369166eba034aca17
|
[] |
no_license
|
einarhjorleifsson/fjolst2
|
05fc78df588d4d749983dde53123e28effaad9f6
|
a7385f789086e1e8c8e00452aa001e3dbc0259a2
|
refs/heads/master
| 2021-01-19T09:44:53.857406
| 2015-07-15T11:46:04
| 2015-07-15T11:48:54
| 39,079,259
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 341
|
r
|
na.mean.R
|
#' Internal function
#'
#' @param v1 xxx
#' @param v2 xxx
na.mean <-
function(v1, v2)
{
ind <- c(1:length(v1))
ind1 <- ind[is.na(v1) & !is.na(v2)]
ind2 <- ind[is.na(v2) & !is.na(v1)]
v <- (v1 + v2)/2
if(length(ind1) > 0)
v[ind1] <- v2[ind1]
if(length(ind2) > 0)
v[ind2] <- v1[ind2]
return(v)
}
|
a1ef3bedfebb982edc6bfc18d4968e9e07eb2266
|
d28025f79c4ec3bbf4e73c350b71ae99441dfbe7
|
/R/showData.R
|
c19e934ab0e9a47f855e9b0a6484ba00a47d6d75
|
[] |
no_license
|
BioversityCostaRica/ClimMob
|
1dcdbe99623290eb55a13129e34531577eae5787
|
dcf4a4405ca2eaaadd183481fa9899e86c29f734
|
refs/heads/master
| 2020-04-06T03:34:27.177664
| 2015-08-13T17:18:31
| 2015-08-13T17:18:31
| 40,671,361
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 886
|
r
|
showData.R
|
.showData <- function(la,datalocal)
{
# Read the texts messages from the file MultilanguageShowData.txt
tt <- as.matrix(read.delim(system.file("external/MultilanguageShowData.txt", package="ClimMob"), header=FALSE, encoding="UTF-8"))
colnames(tt) <- NULL
# Check if myData matrix of data exists
if(!exists("datalocal"))
{
gmessage(tt[4,la], title="Error", icon="error")
return()
}
# Create window
w7 <- gwindow(tt[1,la], visible=FALSE, width=900, height=500, parent=c(0,0))
g10 <- ggroup(horizontal=FALSE, spacing= 0, container=w7)
size(g10)<-c(890,490)
gt1 <- gtable(datalocal, chosencol = 1, multiple=TRUE, container=g10, index=TRUE)
size(gt1) <- c(880,450)
g11<-ggroup(horizontal=TRUE,spacing=0,container=g10)
addSpring(g11)
gb1 <- gbutton(tt[2,la], handler = function(h, ...){dispose(w7)}, container=g11)
visible(w7)<-TRUE
}
|
e299551ae25d9036e89e8cf4b4bd06bdd97a96d4
|
684d0b2e106b2284eaebd194ee3a692f4e379b0a
|
/lab1_special_plot.R
|
d3838f7699eb4aeebee39a9d88007b01e8db7f3b
|
[] |
no_license
|
snakepowerpoint/Multivariate_Statistical_Analysis
|
39dd3f87a88187983ba1759f280081488bcc13fa
|
816c855c6fad5dd934e6a702ff90ef9a25109df6
|
refs/heads/master
| 2021-04-26T22:25:06.221341
| 2018-03-06T14:21:39
| 2018-03-06T14:21:39
| 124,088,530
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,177
|
r
|
lab1_special_plot.R
|
setwd('D:/MyGitHub/Multivariate_Statistical_Analysis/data')
t16<-read.table("T1-6.dat")
library(lattice)
xyplot(V2 ~ V4, data = t16,
groups = V6,
type = c("p", "smooth"), span=.75,
auto.key =list(title = "Iris Data",
x = .15, y=.85, corner = c(0,1),
border = TRUE, lines = TRUE))
xyplot(V3 ~ V5, data = t16,
groups = V6,
type = c("p", "smooth"), span=.75,
auto.key =list(title = "Iris Data",
x = .15, y=.85, corner = c(0,1),
border = TRUE, lines = TRUE))
t19<-read.table("T1-9.dat")
library(rgl)
open3d()
x <- t19$V6
y <- t19$V7
z <- t19$V8
plot3d(x, y, z, col=rainbow(1000))
with(t19, plot3d(V6,V7,V8,type='s', size=2))
identify3d(t19$V6,t19$V7,t19$V8,row.names(t19)) #find 46,11,40
points3d(t19[46,],t19[40,],t19[11,], col="blue")
us<-read.csv("USairpollution.csv")
library(TeachingDemos)
faces2(us[,2:8],labels=as.character(us$X))
library(tourr)
library(andrews)
source("starcoord.R")
source("mmnorm.R")
source("circledraw.R")
source("radviz2d.R")
andrews(flea, type = 4, clr = 5, ymax = 2, main = "Type = 4")
parallelplot(~flea[1:6], flea, groups = species,
horizontal.axis = FALSE, scales = list(x = list(rot = 90)))
starcoord(flea,class = T)
radviz2d(flea)
|
b81967a137566169c29c9e52adf420fddfa927d2
|
04236ab1161ec860ab3b1d0c3225fcbdc54923a3
|
/2020.d/2.racine-carre.d/racine-carre.R
|
50a4c30365c01822774cf968f105f04bfdb46e62
|
[
"MIT"
] |
permissive
|
homeostasie/petits-pedestres
|
957695cdb8a7823ed2e3fe79f7b441410928cba9
|
557c810e26412bc34ebe063dcd904affe5a27855
|
refs/heads/master
| 2023-05-31T03:35:13.365160
| 2023-04-26T21:47:56
| 2023-04-26T21:47:56
| 163,504,589
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 584
|
r
|
racine-carre.R
|
NombreMax = 10000000
nombreTest = c(0:NombreMax)
racineCarre = sqrt(nombreTest)
partieEntiere = floor(racineCarre)
partieEntiereCarre = partieEntiere^2
resteCarre = nombreTest - partieEntiereCarre
Zettaleaf = function(n,a){n + a*(2*n+1)/(4*n^2+2*n+a)}
testRacineCarre = Zettaleaf(partieEntiere, resteCarre)
ecart = racineCarre - testRacineCarre
plot(nombreTest, ecart, type="l", ylim=c(-1*10^(-7),1*10^(-7)))
plot(nombreTest, racineCarre, col="blue",type="l")
lines(nombreTest,testRacineCarre, col="red",type="l")
legend("topleft",
c("sqrt(x)","Zettaleaf(x)"),
fill=c("blue","red"))
|
139a08e310b2e58fadf5542dea740dc169c5f564
|
cafb54d209345a987d5f090dcc88c5a0bbd02757
|
/man/ECLDedUp-class.Rd
|
ae79eaa4096bfd729c14be8bfc388b684c5e3b53
|
[] |
no_license
|
cran/rHpcc
|
70290f02bfbd0e079cb7e7ebefb88bc08c6a9f9d
|
7c0e3fb4fa2e87de5b1a3676e4e9bd8949602304
|
refs/heads/master
| 2016-09-05T17:37:48.534715
| 2012-08-13T00:00:00
| 2012-08-13T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,424
|
rd
|
ECLDedUp-class.Rd
|
\name{ECLDedUp-class}
\Rdversion{1.1}
\docType{class}
\alias{ECLDedUp-class}
\title{Class \code{"ECLDedUp"}}
\description{
Creates an ECL "DEDUP" definition.
The DEDUP function evaluates the recordset for duplicate records, as defined by the condition parameter, and returns
a unique return set. This is similar to the DISTINCT statement in SQL. The recordset should be sorted, unless ALL is specified
}
\examples{
\dontrun{
ecl1 <- ECL$new(hostName="127.0.0.1", port="8008")
recPerson <- ECLRecord$new(name="rec_person")
recPerson$addField("STRING", "code")
recPerson$addField("STRING", "firstName")
recPerson$addField("STRING", "lastName")
recPerson$addField("STRING", "address")
recPerson$addField("STRING", "stateCode")
recPerson$addField("STRING", "city")
recPerson$addField("STRING", "zip")
ecl1$add(recPerson)
dsPerson <- ECLDataset$new(name="ds_person", datasetType = recPerson,
logicalFileName ="~ds::person", fileType="CSV")
ecl1$add(dsPerson)
recPersonTable <- ECLRecord$new(name="personNewTableFormat")
recPersonTable$addField(dsPerson$getName(), "code", seperator=".")
recPersonTable$addField(dsPerson$getName(), "firstName", seperator=".")
recPersonTable$addField(dsPerson$getName(), "lastName", seperator=".")
ecl1$add(recPersonTable)
tblPerson <- ECLTable$new(name="PersonNewTable", inDataset = dsPerson,
format= recPersonTable)
ecl1$add(tblPerson)
PersonNewTableSorted <- ECLSort$new(name="PersonNewTableSorted",
inDataset = tblPerson)
PersonNewTableSorted$addField("lastName")
ecl1$add(PersonNewTableSorted)
mySets <- ECLDedUp$new(name="mySets", inDataset = PersonNewTableSorted)
mySets$addField("lastName")
ecl1$add(mySets)
ecl1$print()
}
}
\keyword{classes}
\section{Fields}{
\describe{
\item{\code{name}:}{Object of class \code{character} Class name. }
\item{\code{inDataset}:}{Object of class \code{ECLDataset} Input record name. }
\item{\code{def}:}{Object of class \code{character} ECl definition/code }
}
}
\section{Methods}{
\describe{
\item{\code{print()}:}{ Prints the ECL code. }
\item{\code{addField(value)}:}{ Used to add ECL definitions. }
\item{\code{getName()}:}{ Returns class name. }
}
}
|
892db2952631392ad8bd01a569a393de0d55579a
|
1ca988fbe3bc59eb676996102941b94207ca0885
|
/R/SBML.R
|
ee4eee0125e4ea6c81e6c0128e185b538c093fe4
|
[] |
no_license
|
cran/rsbml
|
d281dd9ff82ac212d0e4ef1e16461ae2563e580b
|
acc1b1366d3d54aa68db5d591deb249278b15202
|
refs/heads/master
| 2021-01-16T18:42:11.273477
| 2007-04-11T00:00:00
| 2007-04-11T00:00:00
| 17,719,367
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,119
|
r
|
SBML.R
|
setClass("SBML", representation(level = "integer", ver = "integer", model = "Model"),
contains = "SBase", prototype = list(level = as.integer(2), ver = as.integer(1)),
validity = function(object) rsbml_check(object))
setGeneric("level", function(object) standardGeneric("level"))
setMethod("level", "SBML", function(object) object@level)
setGeneric("ver", function(object) standardGeneric("ver"))
setMethod("ver", "SBML", function(object) object@ver)
setGeneric("level<-", function(object, value) standardGeneric("level<-"))
setReplaceMethod("level", "SBML", function(object, value) {
object@level <- as.integer(value)
object
})
setGeneric("ver<-", function(object, value) standardGeneric("ver<-"))
setReplaceMethod("ver", "SBML", function(object, value) {
object@ver <- as.integer(value)
object
})
setGeneric("model", function(object) standardGeneric("model"))
setMethod("model", "SBML", function(object) object@model)
setGeneric("model<-", function(object, value) standardGeneric("model<-"))
setReplaceMethod("model", "SBML", function(object, value) {
object@model <- value
object
})
|
b0c9cfb330d0f4100f37e338eda1f5b10d03a28b
|
c592a22262174d6c671fb48a82c0d19be5ef7e20
|
/man/ajv.errorsText.Rd
|
356a296451a1ef53566467f78d24b30ad18bb081
|
[] |
no_license
|
cran/ajv
|
4596a33934c1fc28a52eefb4140632e6c6613e2f
|
cf385ce8af33f2e0470856f8050850076de27edc
|
refs/heads/master
| 2021-01-20T01:22:26.654576
| 2017-04-24T15:23:22
| 2017-04-24T15:23:22
| 89,262,865
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 892
|
rd
|
ajv.errorsText.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ajv_methods.R
\name{ajv.errorsText}
\alias{ajv.errorsText}
\title{A wrapper for the Ajv.errorsText method}
\usage{
ajv.errorsText(this)
}
\arguments{
\item{this}{An AJV instance, provided implicitly when called via \code{my_instance$errorsText(...)}}
}
\value{
JSON encoded object containing the error message (if any), with
class "AJV-errorsText" for pretty printing via \code{print.errorsText}
}
\description{
Extracts the errors object from
}
\examples{
\dontrun{
my_ajv_instance = Ajv()
my_ajv_instance$errorsText
}
}
\seealso{
Other AJV.Instance.Methods: \code{\link{ajv.addFormat}},
\code{\link{ajv.addKeyword}},
\code{\link{ajv.addSchema}}, \code{\link{ajv.keyword}},
\code{\link{ajv.removeSchema}},
\code{\link{ajv.validateSchema}},
\code{\link{ajv.validate}}
}
|
beb904249fcc50d713c472e148ea195dd7ee01de
|
e55a20ae844186b0ae773da1f94fe4425133caf9
|
/gmaps/gmaps.R
|
d2ecacb16c2b3f9eac7d5d1a8a7ee8571bc29627
|
[] |
no_license
|
Teebusch/datathon2018
|
1c2ef90500344b3aa1ebed6ea43bc16fd0ef3ffd
|
d09a4e3c22664fab3f4a0f33027e7a725e194a5a
|
refs/heads/master
| 2021-03-24T09:13:07.571475
| 2018-02-24T22:05:19
| 2018-02-24T22:05:19
| 121,769,729
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 400
|
r
|
gmaps.R
|
#Adding Google Maps
center = paste(min(df$latitude)+(max(df$latitude)-min(df$latitude))/2,
min(df$longitude)+(max(df$longitude)-min(df$longitude))/2, sep=" ")
map <- get_map(location = center, zoom = 10, maptype = "terrain", source = "google")
ggmap(map) +
geom_path(data = df, aes(x = longitude, y = latitude, color = qid),
show.legend = FALSE) +
facet_wrap(~qid)
|
713a9ab64531b4184d00990ea6fcbd92abd3cebf
|
afcda04b51a9dc6c91442cde4095d88aa6736f1d
|
/man/mat_list_dir.Rd
|
4fc53121a2021c8f55e56230508a5fd3815b4a8d
|
[
"MIT"
] |
permissive
|
MatthieuStigler/matPkg
|
0f3cca09842d9d8bca40470ac7ca6fdd976c8d0d
|
545df24b8a730d63d674945aef321e60a102b016
|
refs/heads/master
| 2023-07-07T03:31:46.306634
| 2023-06-26T14:05:03
| 2023-06-26T14:05:03
| 168,240,066
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 546
|
rd
|
mat_list_dir.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list_directories.R
\name{mat_list_dir}
\alias{mat_list_dir}
\title{List files in a directory}
\usage{
mat_list_dir(path, pattern = ".R", recursive = TRUE, add_ext = FALSE, ...)
}
\arguments{
\item{path}{dir path}
\item{pattern, recursive}{passed to list.files}
\item{add_ext}{Add column with extension?}
\item{\ldots}{Additional arguments to \code{\link{list.files}}}
}
\description{
List files in a directory
}
\examples{
mat_list_dir(path = ".", pattern = "R")
}
|
ee9cabd5adf7158424e109fa39910db2dfd848ce
|
1e659affda4579642682050ba86d440e4724ac15
|
/R/mypackage.R
|
9c8a3d75df5bc5db90925fbf2b52ab593ac85cea
|
[] |
no_license
|
Chengwei94/llrRcpp
|
b19762de68e0d4039dfc13baaf6cadc441c0b22c
|
58e970184298e62014152663ae2e24da73d162ca
|
refs/heads/main
| 2023-04-30T03:05:57.232335
| 2021-05-11T10:50:37
| 2021-05-11T10:50:37
| 307,415,631
| 0
| 0
| null | 2021-05-05T06:48:25
| 2020-10-26T15:20:40
|
C++
|
UTF-8
|
R
| false
| false
| 124
|
r
|
mypackage.R
|
#' @useDynLib llrRcpp, .registration=TRUE
#' @import Rcpp
#' @importFrom graphics lines
#' @import metaheuristicOpt
NULL
|
47b44cabce6d69ec79ca615db344850d3d0b1b7d
|
01d151bf3c209dcb7aa83e2ed1222fbf47de6e3b
|
/Tau/exploratory.R
|
3f9a785afb5b74e2acdb07afa875a351acd5f816
|
[] |
no_license
|
alexsanjoseph/Kaggle
|
d303a545cae45cf9cd49d003e7dcad213a051452
|
bb2c7a92ad44dc9ff6db8d1ef19d4e73dd24c350
|
refs/heads/master
| 2021-01-21T17:03:00.114934
| 2017-12-19T04:01:11
| 2017-12-19T04:01:11
| 38,832,773
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,783
|
r
|
exploratory.R
|
source("bootstrap.R")
library(xgboost)
source("Tau/evaluation.R")
dir_name = "../Kaggle-Data/tau/"
train_file = paste0(dir_name, "training.csv")
test_file = paste0(dir_name, "test.csv")
check_agreement_file = paste0(dir_name, "check_agreement.csv")
check_cor_file = paste0(dir_name, "check_correlation.csv")
sample_subm = read.csv("../Kaggle-Data/tau/sample_submission.csv")
train = read.csv(train_file)
test = read.csv(test_file)
check_cor = read.csv(check_cor_file)
check_agr = read.csv(check_agreement_file)
names(train)
names(check_cor)
names(check_agr)
names(test)
setdiff(names(train), names(test))
train_ratio = 0.9
train_indices = sample(1:nrow(train), floor(train_ratio * nrow(train)))
traindata = train[train_indices,]
cv_data = train[-train_indices]
bst <- xgboost(data = traindata %>% select(one_of(names(test))[1:40]) %>% as.matrix, label = traindata$signal, max.depth = 2, eta = 1, nround = 2,
nthread = 2, objective = "binary:logistic")
bst <- train(x = traindata %>% select(one_of(names(test))[1:40]), y = traindata$signal)
?lm
## check cvm
cvm_pred = predict(bst, check_cor %>% as.matrix)
compute_cvm(cvm_pred, check_cor$mass, n_neighbours = 200,step = 50)
## Check test
pred = predict(bst, test %>% as.matrix)
submission = data.frame(id = test$id, prediction = pred)
head(submission)
write.csv(submission, "../Kaggle-Data/tau/submission1.csv", row.names = F)
pred %>% head
cv_data$signal %>% head(10000)
traindata$
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train1 <- agaricus.train
test1 <- agaricus.test
bst <- xgboost(data = train$data, label = train$label, max.depth = 2,
eta = 1, nthread = 2, nround = 2, objective = "binary:logistic")
pred <- predict(bst, test$data)
|
d172080ee8981e6cb45d4c60730bc8d15709a863
|
818081fbffe4388a449d3510895294a220212eca
|
/ciudad-real/histograms.R
|
8fb17f07995aedc92541edf8c751b96d10cd519c
|
[] |
no_license
|
RubenCantareroNavarro/covid19-routes-survey
|
86a709342f5762a8b3f8aa5b6df207221a4e7bf1
|
bc110bf062c1b829ed8f8ad94fece201a70f0ea8
|
refs/heads/main
| 2023-04-20T10:25:05.462218
| 2021-05-05T15:07:09
| 2021-05-05T15:07:09
| 305,677,583
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 819
|
r
|
histograms.R
|
# https://bookdown.org/jboscomendoza/r-principiantes4/histogramas.html
# https://estadistica-dma.ulpgc.es/cursoR4ULPGC/9c-grafHistograma.html
# http://matepedia-estadistica.blogspot.com/2016/05/histograma-con-frecuencia-relativa.html
library(readxl)
case_1_summary <- read_excel("/home/ruben/Escritorio/caso_1.xlsx")
#View(case_1_summary)
x = as.numeric(case_1_summary$survey_number_danger_points)
# hist(x, freq=FALSE, col="lightcyan", main = NULL, xlab = "Danger points", ylab = "Frecuency", breaks = 14)
hist(x, freq=FALSE, col="lightcyan", main = NULL, xlab = "Danger points", ylab = "Frecuency", breaks = c(0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80), ylim=c(0,0.05))
lines(density(x),col="red",lwd=2)
curve(dnorm(x,
mean=mean(x),
sd=sd(x)),
add=TRUE, col="green")
|
7e1c0c3a779cb16ba62c53fce7a8540631160550
|
53b7280e5902c81e8e23b2eb7c842e269c776e9d
|
/plot3.R
|
aaa84de888f82fb208594d9869b113cc94945ddf
|
[] |
no_license
|
mikeburba/ExData_Plotting1
|
b1a3e402d817e62cd612275f0c2de23cf0ca0053
|
3b311d1a9f0639c113bbb932d6adddae5a0e664e
|
refs/heads/master
| 2021-01-15T11:02:08.151732
| 2015-07-12T21:53:22
| 2015-07-12T21:53:22
| 38,962,541
| 0
| 0
| null | 2015-07-12T13:16:21
| 2015-07-12T13:16:21
| null |
UTF-8
|
R
| false
| false
| 398
|
r
|
plot3.R
|
plot3 <- function(data) {
with(data, {
plot(DateTime, Sub_metering_1, type="n", xlab="", ylab="Energy sub metering")
lines(DateTime, Sub_metering_1, col="black")
lines(DateTime, Sub_metering_2, col="red")
lines(DateTime, Sub_metering_3, col="blue")
legend("topright", lty=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
})
}
|
0b42829878f2208abd7665025569edf0ed03a9aa
|
7a766f8e81afb68f686a1c37e9bfcc7bea6a4948
|
/application/test3x3.rd
|
9c361e1d083621cd8b646954ce79bb7cffc56184
|
[] |
no_license
|
guidocalvano/ThoughtWeave
|
8dc82828d849c634ecb61e6149249acb69dca746
|
d426a920fbddec39604da4126b04313406ce562e
|
refs/heads/master
| 2021-01-01T18:08:02.752481
| 2010-11-16T00:14:06
| 2010-11-16T00:14:06
| 1,083,584
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 174
|
rd
|
test3x3.rd
|
0.15862346878971767 0.5519442537113229 0.10169057318091146 0.21594283109872103 0.982064880271643 0.2351744985160451 0.5999709168135455 0.1746795603860024 0.03724900146367904
|
0f8569a9af65384ff68db9bae8e835e04615ad25
|
ad345ec40bc18b2a7685b0c4d127aab6e8963ff8
|
/3.r
|
8b77fa21d036555ccd8202631bd914d4d06aa4b2
|
[] |
no_license
|
izeh/i
|
d3b01469e763e15e39d4e08de12d8c4dd911aaba
|
c5f3c96a268843c7fc92f331b587361bba327170
|
refs/heads/master
| 2021-06-19T04:47:41.461105
| 2019-07-14T04:30:52
| 2019-07-14T04:30:52
| 102,161,352
| 2
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 679
|
r
|
3.r
|
source("https://raw.githubusercontent.com/rnorouzian/i/master/i.r")
C = beta.id(.6, .8, .6) ; options(warn = -1)
curve(dbeta(x, C$a, C$b), n = 1e4, axes = FALSE, lwd = 2, yaxs = "i", xpd = TRUE,
xlab = "Proportion of preference for (B)", ylab = NA, font.lab = 2)
axis(1, at = axTicks(1), labels = paste0(axTicks(1)*1e2, "%") )
axis(1, at = axTicks(1)[4:5], labels = paste0(axTicks(1)[4:5]*1e2, "%"), font = 2)
x = seq(.6, .8, l = 10)
y = dbeta(x, C$a, C$b)
arrows(x, 0, x, y, code = 2, length = .13, angle = 10, col = 4)
x = c(seq(0, .577, l = 28), seq(.822, 1, l = 10))
y = dbeta(x, C$a, C$b)
arrows(x, 0, x, y, code = 1, length = .12, angle = 10, col = 2)
|
0439d7d7bbc6a1349a53bead27b213f3c6c07e4b
|
75928a1e86e09c05d0baf89cf13cca389a67aa1e
|
/man/metR.Rd
|
31979754551ee996a49e119a3c58ee365b382ab8
|
[] |
no_license
|
eliocamp/metR
|
da2e478190a27e08d6f534a508558ccd0dc0975e
|
b26ff96a4b0d3c94e880ae13f68f87f7a06a78fd
|
refs/heads/master
| 2023-08-31T18:03:26.430340
| 2023-03-25T14:14:33
| 2023-03-25T14:14:33
| 96,357,263
| 146
| 27
| null | 2023-07-02T11:36:32
| 2017-07-05T20:09:40
|
R
|
UTF-8
|
R
| false
| true
| 1,560
|
rd
|
metR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metR-package.R
\docType{package}
\name{metR}
\alias{metR}
\alias{metR-package}
\title{metR: Tools for Easier Analysis of Meteorological Fields}
\description{
\if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}}
Many useful functions and extensions for dealing with meteorological data in the tidy data framework. Extends 'ggplot2' for better plotting of scalar and vector fields and provides commonly used analysis methods in the atmospheric sciences.
}
\section{Overview}{
Conceptually it's divided into \emph{visualization tools} and \emph{data tools}.
The former are geoms, stats and scales that help with plotting using
\link{ggplot2}, such as \link{stat_contour_fill} or \link{scale_y_level}, while the
later are functions for common data processing tools in the atmospheric
sciences, such as \link{Derivate} or \link{EOF}; these are implemented to work in the
\link{data.table} paradigm, but also work with regular data frames.
To get started, check the vignettes:
\itemize{
\item Visualization Tools: \code{vignette("Visualization-tools", package = "metR")}
\item Working with Data: \code{vignette("Working-with-data", package = "metR")}
}
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/eliocamp/metR}
\item Report bugs at \url{https://github.com/eliocamp/metR/issues}
}
}
\author{
\strong{Maintainer}: Elio Campitelli \email{elio.campitelli@cima.fcen.uba.ar} (\href{https://orcid.org/0000-0002-7742-9230}{ORCID})
}
|
30262ae0b505c98f20a4c0461ded7185cb029b82
|
bcfb6ac4dcc3feb9f4761a272c39bb22ef58afec
|
/BrexitFrame.R
|
9f01a07828accb281794a68b91a9c63d5f20b9dc
|
[] |
no_license
|
PolPsychCam/Twitter-Brexit-MoralFoundations
|
dc70b825e95f864b92f843ad486072c8ee04a21c
|
fc472ffb83413a90b89f040892a97b42800d5b7c
|
refs/heads/master
| 2022-12-31T01:38:19.164791
| 2020-10-20T12:50:52
| 2020-10-20T12:50:52
| 279,547,513
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,576
|
r
|
BrexitFrame.R
|
#### Denotes code segment
## Explains process below
# Provides note
#### Directory, Files, Libraries####
##Clears the environment, sets the working directory, calls the libraries
rm(list = ls())
setwd("C:/Users/jaack/OneDrive - University Of Cambridge/Summer Political Psychology/Brexit Twitter/Data")
library('psych') #statistical tools
library('tidyr') #makes things tidyr
library('dplyr') #general data handling
library('XML') #xml handling
library('methods')#
library('pdftools') #pdf handling
library('tm') #textmining package
library('SnowballC') #required for some Corpus cleaning functions
library('car') #bonus regression tools
library('ResourceSelection') #Hosmer Lemeshow test
library('stargazer') #nice regression tables
library('tesseract') #OCR
library('magick') #Image refinement
#### Hansard XMLs in ####
##Creates a list of strings for each xml file name in the folder
filenames<-list.files("C:/Users/jaack/OneDrive - University Of Cambridge/Summer Political Psychology/Brexit Twitter/Data/Hansard",full.names=T)
##Parses those files into an R-readable format.
#Note this format holds only references to the external file, not the data itself
allxmls<-lapply(filenames,xmlParse)
##Get segments from all politicians of interest (1st line creates an entry of matches from each xml, 2nd deletes null entries)
#No data for non-MPS, Iain Duncan Smith, Boris Johnson, or Sadiq Khan
Parlspeeches<-lapply(allxmls,getNodeSet,path="/publicwhip/speech[@speakername=\"Michael Gove\"]/p |
/publicwhip/speech[@speakername=\"David Cameron\"]/p |
/publicwhip/speech[@speakername=\"Boris Johnson\"]/p |
/publicwhip/speech[@speakername=\"Liam Fox\"]/p |
/publicwhip/speech[@speakername=\"Penny Mordaunt\"]/p |
/publicwhip/speech[@speakername=\"Andrea Leadsom\"]/p |
/publicwhip/speech[@speakername=\"Tim Farron\"]/p |
/publicwhip/speech[@speakername=\"David Davis\"]/p |
/publicwhip/speech[@speakername=\"Yvette Cooper\"]/p |
/publicwhip/speech[@speakername=\"Chris Grayling\"]/p |
/publicwhip/speech[@speakername=\"Gisela Stuart\"]/p |
/publicwhip/speech[@speakername=\"George Osborne\"]/p |
/publicwhip/speech[@speakername=\"Iain Duncan Smith\"]/p |
/publicwhip/speech[@speakername=\"Priti Patel\"]/p |
/publicwhip/speech[@speakername=\"Sadiq Khan\"]/p |
/publicwhip/speech[@speakername=\"Yvette Cooper\"]/p |
/publicwhip/speech[@speakername=\"Tim Farron\"]/p")
Parlspeeches<-Parlspeeches[-which(sapply(Parlspeeches, is.null))]
rm(allxmls)
##Extract metadata on speakername and timestamp (in id) for each segment and place into a df. Essentially creates a skeleton which knows how many segments are coming but
##doesn't have data for what they are
Parlmeta<-lapply(Parlspeeches, function(x) lapply(x, xmlParent))
Parlmeta<-lapply(Parlmeta, function(x) lapply(x, xmlAttrs))
Parlmeta<-lapply(Parlmeta, function(x) lapply(x, unlist))
allParl<-data.frame(speaker=unlist(sapply(Parlmeta, function(x) lapply(x, function(y) y[["speakername"]]))),
days_to_go=unlist(sapply(Parlmeta, function(x) lapply(x, function(y) y[["id"]]))))
rm(Parlmeta)
##Add the text segments to their metadata in the dataframe
Parlspeeches<-unlist(Parlspeeches)
allParl$text<-as.character(lapply(Parlspeeches, xmlValue, recursive = TRUE, encoding = "UTF-8"))
rm(Parlspeeches)
##In each entry, convert timestamp into days until referendum
allParl$days_to_go<-regmatches(allParl$days_to_go, gregexpr(pattern = "[[:digit:]][[:digit:]][[:digit:]][[:digit:]]-[[:digit:]][[:digit:]]-[[:digit:]][[:digit:]]", text = allParl$days_to_go))
allParl$days_to_go<-substring(allParl$days_to_go,1,10)
allParl$days_to_go<-as.Date(allParl$days_to_go, format = "%Y-%m-%d")
allParl$days_to_go<-as.integer(as.Date("2016-06-23", format = "%Y-%m-%d")-allParl$days_to_go)
#There is now a dataframe with entries for every time a politician of interest spoke in parliament during the campaign period, with the text segment
#(period of uninterrupted speech), speaker's name, and number of days until the referendum attached
parlsentences <- strsplit(allParl$text, split = "(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\?)\\s", perl=T) #finds pattern of .or?, followed by space, followed by capital.
parlsentences <- data.frame(speaker = rep(allParl$speaker, sapply(parlsentences, length)), text = unlist(parlsentences), days_to_go = rep(allParl$days_to_go, sapply(parlsentences, length)))
#### Marr PDFs in ####
##Creates a list of strings for each xml file name in the folder
filenames<-list.files("C:/Users/jaack/OneDrive - University Of Cambridge/Summer Political Psychology/Brexit Twitter/Data/Marr",full.names=T)
##Processes all files into a single corpus
MarrCorpus<-Corpus(URISource(filenames), readerControl = list(reader = readPDF))
##Extracts dates metadata into a list (possible regressor)
MarrDates<-lapply(MarrCorpus, function(dates) dates[["meta"]][["datetimestamp"]])
MarrDates<-lapply(MarrDates, grep, pattern = "[[:digit:]][[:digit:]][[:digit:]][[:digit:]]-[[:digit:]][[:digit:]]-[[:digit:]][[:digit:]]", value = TRUE)
MarrDates<-substring(MarrDates,1,10)
MarrDates<-as.Date(MarrDates, format = "%Y-%m-%d")
MarrDates<-as.integer(as.Date("2016-06-23", format = "%Y-%m-%d")-MarrDates)
##Remove white space
MarrCorpus<-tm_map(MarrCorpus, stripWhitespace)
##def and use new function to remove control character \r\n pattern
stripControlChar<-content_transformer(function(x) gsub("\r\n", "", x))
MarrCorpus<-tm_map(MarrCorpus, stripControlChar) #tm_map is a bit like sapply for Corpus
##Collapses each file in the corpus into a long string object (previously one object per page, now one per file)
allMarr<-list()
for (doc in 1:length(MarrCorpus)) {
allMarr[doc]<-paste(MarrCorpus[[doc]][[1]], sep = '', collapse = '')
}
rm(MarrCorpus)
##Splits each text block back into a list, now one object per uninterrupted speech fragment.
breakInitials<-function(x) strsplit(x, "(?<=.)(?= [[:upper:]][[:upper:]]: )", perl = TRUE)
allMarr<-sapply(allMarr, breakInitials)
##Turns these lists into a dataframe
allMarr<-lapply(allMarr, as.data.frame, stringsAsFactors = FALSE)
allMarr<-bind_rows(allMarr, .id = "speaker")
##Clarify column name
allMarr<-rename(allMarr,"text" = "X[[i]]")
##Select only those cases where a study politician is speaking
allMarr<-allMarr[grepl(" DC: | YC: | DD: | NF: | TF: | AF: | LF: | MG: | CG: | BJ: | SK: | AL: | PM: | NS: | LW: ",allMarr$text)==TRUE,] #note this loses a few cases where initials have typos
##Add in dates (using speaker number as a proxy index before they are named
allMarr$days_to_go<-MarrDates[match(allMarr$speaker, c(1:17))]
##Match names to speaker numbers (in alphabetical order - the order of files read in)
allMarr$speaker<-recode_factor(allMarr$speaker, "1" = "David Cameron", "2" = "Yvette Cooper", "3" = "David Davis",
"4" = "Nigel Farage", "5" = "Nigel Farage", "6" = "Tim Farron",
"7" = "Arlene Foster", "8" = "Liam Fox", "9" = "Michael Gove", "10" = "Michael Gove",
"11" = "Chris Grayling", "12" = "Boris Johnson", "13" = "Sadiq Khan", "14" = "Andrea Leadsom",
"15" = "Penny Mordaunt", "16" = "Nicola Sturgeon", "17" = "Leanne Wood", .default="NA", .ordered=FALSE)
allMarr$text<-substring(allMarr$text, 5)
marrsentences <- strsplit(allMarr$text, split = "(?<!\\w\\.\\w.)(?<![A-Z][a-z]\\.)(?<=\\.|\\?)\\s", perl=T) #finds pattern of .or?, followed by space, followed by capital.
marrsentences <- data.frame(speaker = rep(allMarr$speaker, sapply(marrsentences, length)), text = unlist(marrsentences), days_to_go = rep(allMarr$days_to_go, sapply(marrsentences, length)))
#### Twitter CSVs in ####
##Creates a list of strings for each csv file name in the folder
# filenames<-list.files("C:/Users/jaack/OneDrive - University Of Cambridge/Summer Political Psychology/Brexit Twitter/Data/Full Tweets",full.names=T)
#
# ##Reads in those files, removes replies, and one useless and misbehaving variable, and turns them into a dataframe
# alltweets<-lapply(filenames,read.csv,stringsAsFactors = FALSE)
# alltweets<-lapply(alltweets,select,-is_retweet)
# alltweets<-bind_rows(alltweets, .id = "speaker")
# row.names(alltweets) <- 1:nrow(alltweets)
##Label speakers - they should default to being read in alphabetical order of twitter handle (note e.g. Arlene Foster = "dupleader"), but its good to check
# alltweets$speaker<-recode_factor(alltweets$speaker, "1" = "Andrea Leadsom", "2" = "Boris Johnson", "3" = "David Cameron",
# "4" = "David Davis", "5" = "Arlene Foster", "6" = "Frances O'Grady", "7" = "George Osborne", "8" = "Gisela Stuart",
# "9" = "Leanne Wood", "10" = "Leave.EU", "11" = "Liam Fox", "12" = "Nicola Sturgeon", "13" = "Nigel Farage", "14" = "Penny Mordaunt",
# "15" = "Remain", "16" = "Ruth Davidson", "17" = "Sadiq Khan", "18" = "Tim Farron", "19" = "Vote Leave", .default="NA", .ordered=FALSE)
alltweets <- read.csv("all4.csv", encoding = "UTF-8")
alltweets$imagetext <- NA
##OCR
ocrengine = tesseract(language = "eng") #creates the OCR engine with default params
for (i in which(!is.na(alltweets$imagelink))) {
print(alltweets$imagelink[i])
try({ #Try loop used in case of URL request failure #1485
inputimg <- image_read(alltweets$imagelink[i], strip = TRUE)
image_write(inputimg,
path = paste("images/", substring(alltweets$imagelink[i],29,nchar(alltweets$imagelink[i])-4), ".png", sep = ""),
format = "png",
quality = 100,
flatten = TRUE)
processedimg <- inputimg %>%
image_resize("2000x") %>%
image_convert(type = "Grayscale")
imgtext <- tesseract::ocr_data(processedimg, engine = ocrengine) #Run OCR on the URL at hand with the English engine
likelywords <- imgtext$word[imgtext$confidence>60] #strip out low-confidence guesses. Change this value if too few/many words are being picked up.
likelywords <- unlist(lapply(likelywords, tolower)) #lowercase everything
likelywords <- likelywords[nchar(likelywords)>1|likelywords=="a"|likelywords=="i"] #strip out non-word 1-letter signals (these are very common, e.g. | for straight lines)
likelysentence <- paste(likelywords, collapse = " ") #collapse the list of found words into a single string
alltweets$imagetext[i] <- if (length(likelysentence) == 0) NA else likelysentence #if there were any words found in the image, record them, else act as if there was no image at all
print(likelysentence)
})
}
#Delete OCR objects
rm(inputimg,likelysentence,likelywords,ocrengine,processedimg,imgtext)
##Rationalise creation date into days until referendum, replyname to a logical, and rename columns
alltweets$created_at<-substring(alltweets$created_at,5,11)
alltweets$created_at<-as.Date(alltweets$created_at, format = "%b %d") #defaults to 2020, but this doesn't matter if we pretend the referendum was this year (both 2020 and 2016 are leap years, though the campaign period was after February anyway)
alltweets$created_at<-as.integer(as.Date("2020-06-23", format = "%Y-%m-%d")-alltweets$created_at)
alltweets<-select(alltweets, "author", "favourites" = "favorite_count", "retweets" = "retweet_count", "text", "imagetext", "reply" = "in_reply_to_screen_name", "days_to_go" = "created_at", "imagelink")
alltweets$reply<-as.logical(alltweets$reply != "" & is.na(alltweets$reply) == FALSE)
##Clarify NAs
alltweets$imagetext[alltweets$imagetext==""] <- NA
alltweets$imagetext[alltweets$imagetext=="NA"] <- NA
##remove tweets not from politicians for study
alltweets$author <- tolower(alltweets$author)
alltweets <- filter(alltweets, author == "andrealeadsom"| author == "borisjohnson"| author == "david_cameron"|
author == "daviddavismp"| author == "dupleader"| author == "francesogrady"| author == "george_osborne"|
author == "giselastuart"| author == "leannewood"| author == "leaveeuofficial"| author == "liamfox"|
author == "nicolasturgeon"| author == "nigel_farage"| author == "pennymordaunt"| author == "peoplesvote_hq"|
author == "ruthdavidsonmsp"| author == "sadiqkhan"| author == "timfarron"| author == "vote_leave")
alltweets$speaker = recode_factor(alltweets$author, andrealeadsom = "Andrea Leadsom", borisjohnson = "Boris Johnson",
david_cameron = "David Cameron", daviddavismp = "David Davis", dupleader = "Arlene Foster",
francesogrady = "Frances O'Grady", george_osborne = "George Osborne", giselastuart = "Gisela Stuart",
leannewood = "Leanne Wood", leaveeuofficial = "Leave.EU", liamfox = "Liam Fox",
nicolasturgeon = "Nicola Sturgeon", nigel_farage = "Nigel Farage", pennymordaunt = "Penny Mordaunt",
peoplesvote_hq = "Remain", ruthdavidsonmsp = "Ruth Davidson", sadiqkhan = "Sadiq Khan",
timfarron = "Tim Farron", vote_leave = "Vote Leave")
alltweets = select(alltweets, -author)
#### Create central df ####
#Standardisation
marrsentences$text<-as.character(marrsentences$text)
alltweets$text<-as.character(alltweets$text)
alltweets$imagetext<-as.character(alltweets$imagetext)
parlsentences$text<-as.character(parlsentences$text)
#Binding
# alltext<-bind_rows(allMarr, allParl, alltweets, .id = "medium")
alltext<-bind_rows(marrsentences,parlsentences,alltweets, .id = "medium")
alltext$medium<-recode_factor(alltext$medium, "1" = "Marr Interview", "2" = "Parliament", "3" = "Twitter")
#Remove duplicated segments (some exist due to errors in the base transcipt. Where two speakers have used the same text, or the same speaker has used it twice, this is retained and treated as a double-endorsement)
alltext<-alltext[-which(duplicated(alltext)),] #this doesn't refer to the $text column, so looks for completely duplicated entries including imagetext
#Create a variable to document OCR presence or absence
alltext$hasimage = !is.na(alltext$imagetext)
#create a new variable for the functional text of each segment by concatenating the shortened image text to the plain text,
#and creating an exception for when no image text exists. Then split that text into individual words
alltext$countingtext <- paste(alltext$text, ifelse(is.na(alltext$imagetext), "", alltext$imagetext), sep = " ")
alltext$countingtext <- strsplit(alltext$countingtext, split = "\\s+")
#Drop web links. Almost certainly only affects Twitter but apply universally just in case
alltext$countingtext<-lapply(alltext$countingtext, gsub, pattern = "(?:(?:https?|ftp):\\/\\/)?[\\w/\\-?=%.]+\\.[\\w/\\-?=%.]+", replacement = "", perl = TRUE) #Remove URLs
#Remove any segments not referring to the EU in either text *or* image text
brexhitwords = "\\bEU(?!\\w)|Brexit|Euro|EU *Ref|Leav|Remain|23 *June|June *23|stronger *in|take *back *control|breaking *point|single *market|350 *m|In *Campaign|Out *Campaign|Single Currency|Schengen|Juncker|Believe *in *Britain|We *Want *Our *Country *Back|Stronger *Safer *and *Better *Off|membership|EU *member|member *state|Brussels|common *market|lexit|stay in|in *or *out|BBC *Debate"
brexhitsegments = unique(c(grep(brexhitwords,alltext$text,ignore.case = TRUE, perl = TRUE), grep(brexhitwords,alltext$imagetext,ignore.case = TRUE, perl = TRUE)))
alltext<-alltext[brexhitsegments,]
#Remove old objects
rm(doc,filenames,MarrDates,stripControlChar,breakInitials,brexhitwords,brexhitsegments)
#### Adjust word counts ####
#Count the pre-split 'counting text' column to get word counts
alltext$wordcount <- lengths(alltext$countingtext)
#Check distributions of word count. Twitter has clear outliers, caused by extensive OCR entries.
summary(alltext$wordcount)
summary(filter(alltext,medium == "Twitter")$wordcount)
summary(filter(alltext,medium == "Marr Interview")$wordcount)
summary(filter(alltext,medium == "Parliament")$wordcount)
#Crop excessive OCR entries by ensuring no text has more than 100 words
alltext$countingtext <- lapply(alltext$countingtext, '[', 1:100) #who knew that's how to subset in lapply!
alltext$countingtext <- lapply(alltext$countingtext, na.omit) #required because subsetting above the existing length of a list imputs NAs
#re-run wordcount
alltext$wordcount <- lengths(alltext$countingtext)
#Check distributions again, now Twitter is more in line. They don't need to be identical, just close enough to be 'fair analogues'.
summary(filter(alltext,medium == "Twitter")$wordcount)
summary(filter(alltext,medium == "Marr Interview")$wordcount)
summary(filter(alltext,medium == "Parliament")$wordcount)
#### Input person regressors####
index <- names(table(alltext$speaker))
#The labels below are contingent on the order of the speakers read in above THIS MUST BE DOUBLE CHECKED WHEN RUN BECAUSE IT CAN EASILY WARP RESULTS!
Brexiteer <- c(T,T,T,T,F,T,F,F,T,F,T,T,T,F,T,T,T,F,F,F,F,T,F)
Conservative <- c(T,F,T,T,T,T,F,T,F,F,F,T,T,F,F,T,T,F,T,F,F,F,F)
Labour <- c(F,F,F,F,F,F,F,F,T,F,F,F,F,F,F,F,F,F,F,T,F,F,T)
alltext$brexiteer <- Brexiteer[match(alltext$speaker, index)]
alltext$conservative <- Conservative[match(alltext$speaker, index)]
alltext$labour <- Labour[match(alltext$speaker, index)]
rm(Brexiteer,Conservative,Labour,index)
#### Data verification ####
##Verify sample manually
indices<-as.list(sample(1:length(alltext$text),50,replace = FALSE))
for (j in 1:length(indices)) {
print(alltext[indices[[j]],])
}
#RESULT:50 out of 50 are correctly identified. Noteworthy that many tweets are quotes from other campaigners
##Check remain tags
Remaintagged<-alltext[grep("StrongerIN|Stronger In", alltext$countingtext, ignore.case = TRUE),]
table(Remaintagged$brexiteer)
print(Remaintagged$countingtext[which(Remaintagged$brexiteer)])
#RESULT: 0/387 are written by leavers
##Check Leave tags
Leavetagged<-alltext[grep("VoteLeave|TakeControl|TakeBackControl", alltext$countingtext, ignore.case = TRUE),]
table(Leavetagged$brexiteer)
print(Leavetagged$countingtext[-which(Leavetagged$brexiteer)])
#RESULT: 0/775 are written by remainers
rm(Leavetagged,Remaintagged,indices,j)
#### Calculate MFT Scores ####
Care <- c("safe|peace|compassion|empath|sympath|care(?!\\w)|caring(?!\\w)|protect|shield(?!\\w)|shelter(?!\\w)|amity(?!\\w)|secur|benefit|defen|guard|preserve(?!\\w)|harm|suffer|war(?!\\w)|wars(?!\\w)|warl|warring(?!\\w)|fight|violen|hurt|kill(?!\\w)|kills(?!\\w)|killer|killed(?!\\w)|killing(?!\\w)|endanger|cruel|brutal|abuse|damag|ruin|ravage(?!\\w)|detriment|crush|attack|annihilate|destroy(?!\\w)|stomp(?!\\w)|abandon|spurn(?!\\w)|impair(?!\\w)|exploit(?!\\w)|exploits(?!\\w)|exploited(?!\\w)|exploiting(?!\\w)|wound")
Fair <- c("fair(?!\\w)|fairly(?!\\w)|fairness(?!\\w)|fair-|fairmind|fairplay(?!\\w)|equal|justice(?!\\w)|justness(?!\\w)|justifi|reciproc|impartial|egalitar|rights(?!\\w)|equity(?!\\w)|evenness(?!\\w)|equivalent(?!\\w)|unbias|tolerant(?!\\w)|equable(?!\\w)|balance|homologous(?!\\w)|unprejudice|reasonable(?!\\w)|constant(?!\\w)|honest|unfair|unequal|bias|unjust|injust|bigot|disciminat|disproportion|inequitable(?!\\w)|prejud|dishonest(?!\\w)|unscrupulous(?!\\w)|dissociate(?!\\w)|preference(?!\\w)|favoritism(?!\\w)|segregat|exclusion(?!\\w)|exclud")
Loyal <-c("abandon|segregat|together(?!\\w)|nation|homeland|family(?!\\w)|families(?!\\w)|familial(?!\\w)|group(?!\\w)|loyal|patriot|communal(?!\\w)|commune|communit|communis|comrad|cadre(?!\\w)|collectiv|joint(?!\\w)|unison(?!\\w)|unite|fellow|guild(?!\\w)|solidarity(?!\\w)|devot|member(?!\\w)|cliqu|cohort(?!\\w)|ally(?!\\w)|insider(?!\\w)|foreign|enem|betray|treason|traitor|treacher|disloyal|individual|apostasy(?!\\w)|apostate(?!\\w)|deserted(?!\\w)|deserter|deserting(?!\\w)|deceiv|jilt|imposter(?!\\w)|miscreant(?!\\w)|spy(?!\\w)|sequester(?!\\w)|renegade(?!\\w)|terroris|immigra")
Auth <- c("preserve(?!\\w)|loyal|betray|treason|traitor|treacher|disloyal|apostasy(?!\\w)|apostate(?!\\w)|deserted|deserter|deserting|obey|obedien|duty(?!\\w)|law(?!\\w)|legal|duti|honor|respect(?!\\w)|respectful|respected(?!\\w)|respects(?!\\w)|order|father|mother(?!\\w)|motherl|mothering(?!\\w)|mothers(?!\\w)|tradition|hierarch|authorit|permit(?!\\w)|permission(?!\\w)|status|rank|leader|class(?!\\w)|bourgeoisie(?!\\w)|caste|position(?!\\w)|complian|command(?!\\w)|supremacy(?!\\w)|control(?!\\w)|submi|allegian|serve(?!\\w)|abide(?!\\w)|defere|defer(?!\\w)|revere|venerat|comply(?!\\w)|defian|rebel|dissent|subver|disrespect|disobe|sediti|agitat|insubordinat|illegal|insurgent(?!\\w)|mutinous(?!\\w)|defy|dissident(?!\\w)|unfaithful(?!\\w)|alienate(?!\\w)|defector(?!\\w)|heretic|nonconformist(?!\\w)|oppose(?!\\w)|protest(?!\\w)|refuse(?!\\w)|denounce(?!\\w)|remonstrate(?!\\w)|riot|obstruct(?!\\w)")
Pure <- c("preserve(?!\\w)|ruin|exploit(?!\\w)|exploits(?!\\w)|exploited(?!\\w)|exploiting(?!\\w)|apostasy(?!\\w)|apostate(?!\\w)|heretic|piety(?!\\w)|pious(?!\\w)|purity(?!\\w)|pure|clean|steril|sacred|chast|holy(?!\\w)|holiness(?!\\w)|saint|wholesome|celiba|abstention(?!\\w)|virgin(?!\\w)|virgins(?!\\w)|virginity(?!\\w)|virginal(?!\\w)|austerity(?!\\w)|integrity(?!\\w)|modesty(?!\\w)|abstinen|abstemiousness(?!\\w)|upright(?!\\w)|limpid(?!\\w)|unadulterated(?!\\w)|maiden(?!\\w)|virtuous(?!\\w)|refined(?!\\w)|decen|immaculate(?!\\w)|innocent(?!\\w)|pristine(?!\\w)|church|disgust|deprav|disease|unclean|contagio|indecen|sin(?!\\w)|sinful|sinner|sins(?!\\w)|sinned(?!\\w)|sinning(?!\\w)|slut|whore(?!\\w)|dirt|impiety(?!\\w)|impious(?!\\w)|profan|gross(?!\\w)|repuls|sick|promiscu|lewd|adulter|debauche|defile|tramp(?!\\w)|prostitut|unchaste(?!\\w)|intemperate(?!\\w)|wanton(?!\\w)|profligate(?!\\w)|filth|trashy(?!\\w)|obscen|lax(?!\\w)|taint|stain|tarnish|debase|desecrat|wicked|blemish(?!\\w)|exploitat|pervert(?!\\w)|wretched")
Lib <-c("exploit(?!\\w)|exploits(?!\\w)|exploited(?!\\w)|exploting(?!\\w)|rights(?!\\w)|obey|obedient|duti|order|supremacy(?!\\w)|control(?!\\w)|submi|serve(?!\\w)|abide(?!\\w)|defere|defer(?!\\w)|defian|rebel|dissent|subver|disobe|defy|defector(?!\\w)|nonconformist(?!\\w)|protest(?!\\w)|free(?!\\w)|freedom(?!\\w)|liberty(?!\\w)|autonom|choice(?!\\w)|choose(?!\\w)|liberate(?!\\w)|liberation(?!\\w)|sovereign|independent(?!\\w)|independence(?!\\w)|dictat|totalitar|coerc|authoritarian|tyran")
alltext$care <- as.logical(lapply(lapply(alltext$countingtext, grepl, pattern = Care, ignore.case = TRUE, perl = TRUE), any))
alltext$fair <- as.logical(lapply(lapply(alltext$countingtext, grepl, pattern = Fair, ignore.case = TRUE, perl = TRUE), any))
alltext$loyal <- as.logical(lapply(lapply(alltext$countingtext, grepl, pattern = Loyal, ignore.case = TRUE, perl = TRUE), any))
alltext$auth <- as.logical(lapply(lapply(alltext$countingtext, grepl, pattern = Auth, ignore.case = TRUE, perl = TRUE), any))
alltext$pure <- as.logical(lapply(lapply(alltext$countingtext, grepl, pattern = Pure, ignore.case = TRUE, perl = TRUE), any))
alltext$lib <- as.logical(lapply(lapply(alltext$countingtext, grepl, pattern = Lib, ignore.case = TRUE, perl = TRUE), any))
alltext$anymoral <- as.logical(ifelse(alltext$care == TRUE|alltext$fair == TRUE|alltext$loyal == TRUE|alltext$auth == TRUE|alltext$pure == TRUE|
alltext$lib == TRUE, "TRUE", "FALSE"))
rm(Auth,Care,Fair,Lib,Loyal,Pure)
#### Export for Gorilla Context Analysis ####
gorillabase <- pivot_longer(alltext, cols = c("care","fair","loyal","auth","pure","lib","anymoral"), names_to = "Foundation", values_to = "ispresent")
gorillabase <- filter(gorillabase, ispresent == TRUE)
gorillabase <- select(gorillabase, -c(ispresent, countingtext))
gorillabase$Foundation <- recode_factor(gorillabase$Foundation, "care" = "Care", "fair" = "Fairness", "loyal" = "Loyalty", "pure" = "Purity", "auth" = "Authority", "lib" = "Liberty")
gorillabase <- filter(gorillabase, Foundation !="anymoral")
for (i in 1:length(gorillabase$imagelink)) {
if (is.na(gorillabase$imagelink[i]) == FALSE && grepl("video", gorillabase$imagelink[i]) == FALSE) {
gorillabase$imagelink[i] <- paste(substring(gorillabase$imagelink[i],29,nchar(gorillabase$imagelink[i])-4), ".png", sep = "")
} else {
gorillabase$imagelink[i] <- NA
}
}
write.csv(gorillabase, "gorillaexport.csv")
#### Gorilla Context Analysis ####
contexts = read.csv("contexts.csv")
#Remove non-data events
contexts = dplyr::filter(contexts, Zone.Name == "Otherexplain" | Zone.Name == "Endorsement" | Zone.Name == "IsSlogan" | Zone.Name == "Outofcontext" | Zone.Name == "Rejection")
#Rename columns
contexts = rename(contexts, ReviewerNumber = ï..Participant.Private.ID, Excerpt = Spreadsheet.Row)
#Recode data
contexts[contexts$Response == "",]$Response = NA
contexts[contexts$ReviewerNumber == 1880680,]$ReviewerNumber = 1
contexts[contexts$ReviewerNumber == 1924705,]$ReviewerNumber = 2
contexts[is.na(contexts$Response),]$Response = TRUE
#Pivot
contexts = pivot_wider(contexts, names_from = Zone.Name, values_from = Response)
contexts$Endorsement = as.logical(contexts$Endorsement)
contexts$Outofcontext = as.logical(contexts$Outofcontext)
contexts$Rejection = as.logical(contexts$Rejection)
#where a button wasn't pressed, implicitly false
contexts$IsSlogan = ifelse(is.na(contexts$IsSlogan), FALSE, TRUE)
contexts[is.na(contexts$Otherexplain),]$Otherexplain = FALSE
contexts[is.na(contexts$Endorsement),]$Endorsement = FALSE
contexts[is.na(contexts$Outofcontext),]$Outofcontext = FALSE
contexts[is.na(contexts$Rejection),]$Rejection = FALSE
#Merge rows referring to the same excerpt
contexts = contexts %>%
group_by(Excerpt, ReviewerNumber) %>%
fill(IsSlogan,Otherexplain,Endorsement,Outofcontext,Rejection, .direction = "downup")
contexts = unique(contexts2)
contexts = ungroup(contexts2)
metadata = data.frame(Totalacc = 1)
metadata$Totalacc = 100 * table(contexts$Endorsement)["TRUE"]/length(contexts$Endorsement)
metadata$R1acc = 100 * table(filter(contexts, ReviewerNumber == 1)$Endorsement)["TRUE"]/nrow(filter(contexts, ReviewerNumber == 1))
metadata$R2acc = 100 * table(filter(contexts, ReviewerNumber == 2)$Endorsement)["TRUE"]/nrow(filter(contexts, ReviewerNumber == 2))
metadata$twotickacc
R1 = select(filter(contexts, ReviewerNumber == 1), Excerpt, Outofcontext, Endorsement, Rejection)
R2 = select(filter(contexts, ReviewerNumber == 2), Excerpt, Outofcontext, Endorsement, Rejection)
R1 = transmute(R1, response = ifelse(Endorsement == TRUE, 1, ifelse(Outofcontext == TRUE, 2, 3)), Excerpt = Excerpt) #1 = Endorse, 2 = OOC, 3 = Reject
R2 = transmute(R2, response = ifelse(Endorsement == TRUE, 1, ifelse(Outofcontext == TRUE, 2, 3)), Excerpt = Excerpt)
Agreement = left_join(R1, R2, by = "Excerpt", suffix = c("_R1", "_R2"))
Agreement$response_R1 = factor(Agreement$response_R1, levels = c("1", "2", "3"))
Agreement$response_R2 = factor(Agreement$response_R2, levels = c("1", "2", "3"))
Agreementtable = table(Agreement$response_R1, Agreement$response_R2)
metadata$twotickacc = 100 * Agreementtable[1,1]/sum(Agreementtable)
metadata$InterRaterReliability = cohen.kappa(Agreementtable, alpha=.05)$kappa
#### Models ####
LogReg <- function(DV, model) {
##ASSUMPTION CHECKS
#Independent variable is binary
if (typeof(DV)!="logical") {
return("ERROR: Non-logical response variable")
}
##TEST
#Univariate
if (model == "univariate") {
model <- glm(family = binomial(link = "logit"), DV ~ brexiteer, data = alltext)
return(model)
}
#Multivariate - person predictors
if (model == "person") {
model <- glm(family = binomial(link = "logit"), DV ~ brexiteer + conservative + labour, data = alltext)
if (any(vif(model)>5)) {
print(vif(model))
return("MODEL NOT RUN - Multicollinearity violation")
} else {
return(model)
}
}
#Multivariate - context and person predictors
if (model == "context") {
model <- glm(family = binomial(link = "logit"), DV ~ brexiteer + conservative + labour + medium + days_to_go + hasimage, data = alltext)
if (any(vif(model)>5)) {
print(vif(model))
return("MODEL NOT RUN - Multicollinearity violation")
} else {
return(model)
}
}
#Multivariate - leavecamps
if (model == "leavecamps") {
model <- glm(family = binomial(link = "logit"), DV ~ care + fair + auth + loyal + pure + lib + days_to_go, data = leavecamps)
if (any(vif(model)>5)) {
print(vif(model))
return("MODEL NOT RUN - Multicollinearity violation")
} else {
return(model)
}
}
}
LinReg <- function(DV, model) {
##ASSUMPTION CHECKS
#Independent variable is binary
if (class(DV)!="numeric") {
return("ERROR: Non-numeric response variable")
}
##TEST
#Multivariate - framings
if (model == "twitterframes") {
model <- lm(DV ~ care + fair + loyal + auth + lib + pure, data = allTwitter)
if (any(vif(model)>5)) {
print(vif(model))
return("MODEL NOT RUN - Multicollinearity violation")
} else {
return(model)
}
}
#Multivariate - framings
if (model == "twitterall") {
model <- lm(DV ~ care + fair + loyal + auth + lib + pure + days_to_go + reply + hasimage, data = allTwitter)
if (any(vif(model)>5)) {
print(vif(model))
return("MODEL NOT RUN - Multicollinearity violation")
} else {
return(model)
}
}
}
##Run univariate regressions
unicare <- LogReg(alltext$care, "univariate")
unifair <- LogReg(alltext$fair, "univariate")
uniloyal <- LogReg(alltext$loyal, "univariate")
uniauth <- LogReg(alltext$auth, "univariate")
unipure <- LogReg(alltext$pure, "univariate")
unilib <- LogReg(alltext$lib, "univariate")
##Run person-level regressions
personcare <- LogReg(alltext$care, "person")
personfair <- LogReg(alltext$fair, "person")
personloyal <- LogReg(alltext$loyal, "person")
personauth <- LogReg(alltext$auth, "person")
personpure <- LogReg(alltext$pure, "person")
personlib <- LogReg(alltext$lib, "person")
##Run context-level regressions
contextcare <- LogReg(alltext$care, "context")
contextfair <- LogReg(alltext$fair, "context")
contextloyal <- LogReg(alltext$loyal, "context")
contextauth <- LogReg(alltext$auth, "context")
contextpure <- LogReg(alltext$pure, "context")
contextlib <- LogReg(alltext$lib, "context")
unicare$aic<-round(unicare$aic)
unifair$aic<-round(unifair$aic)
uniloyal$aic<-round(uniloyal$aic)
uniauth$aic<-round(uniauth$aic)
unipure$aic<-round(unipure$aic)
unilib$aic<-round(unilib$aic)
contextcare$aic<-round(contextcare$aic)
contextfair$aic<-round(contextfair$aic)
contextloyal$aic<-round(contextloyal$aic)
contextauth$aic<-round(contextauth$aic)
contextpure$aic<-round(contextpure$aic)
contextlib$aic<-round(contextlib$aic)
personcare$aic<-round(personcare$aic)
personfair$aic<-round(personfair$aic)
personloyal$aic<-round(personloyal$aic)
personauth$aic<-round(personauth$aic)
personpure$aic<-round(personpure$aic)
personlib$aic<-round(personlib$aic)
stargazer(alltext, type = "html", keep = c("days_to_go","favourites","retweets"), omit.summary.stat = c("min","max"), out = "tables/summary.html", digits = 0)
##Get Univariate logistic regression table
stargazer(unicare,unifair,uniloyal,uniauth,unipure,unilib,
type = "html", title = "Fig. 1 Univariate Models", align = TRUE, out = "tables/model1.html", omit.stat = c("ll"),
dep.var.labels = "Moral Foundation",
column.labels = c("Care","Fairness","Loyalty","Authority","Purity","Liberty"))
rm(unicare,unifair,uniloyal,uniauth,unipure,unilib)
##Get Person-level Multivariate logistic regression table
stargazer(personcare,personfair,personloyal,personauth,personpure,personlib,
type = "html", title = "Fig. 2 Person-level multivariate Models", align = TRUE, out = "tables/model2.html", omit.stat = c("ll"),
dep.var.labels = "Moral Foundation",
column.labels = c("Care","Fairness","Loyalty","Authority","Purity","Liberty"))
rm(personcare,personfair,personloyal,personauth,personpure,personlib)
##Get Context-level Multivariate logistic regression table
stargazer(contextcare,contextfair,contextloyal,contextauth,contextpure,contextlib,
type = "html", title = "Fig. 3 Context-level multivariate Models", align = TRUE, out = "tables/model3.html", omit.stat = c("ll"),
dep.var.labels = "Moral Foundation",
column.labels = c("Care","Fairness","Loyalty","Authority","Purity","Liberty"))
##Make exponent-coefficient tables
stargazer(contextcare,contextfair,contextloyal,contextauth,contextpure,contextlib,
type = "html", title = "Fig. 4 Final model coefficient exponents", align = TRUE, out = "tables/table4.html",
omit.stat = c("ll","aic"), dep.var.labels = "Moral Foundation", apply.coef = function(x) exp(x), report = "vc",
omit.table.layout = "sn", column.labels = c("Care","Fairness","Loyalty","Authority","Purity","Liberty"), omit = "Constant")
rm(contextcare,contextfair,contextloyal,contextauth,contextpure,contextlib)
##Compare Leave.EU and Vote Leave
leavecamps<-filter(alltext, speaker == "Vote Leave"|speaker == "Leave.EU")
leavecamps$leaveeu<-ifelse(leavecamps$speaker == "Leave.EU",TRUE,FALSE)
leavecampsmodel<-LogReg(leavecamps$leaveeu, model = "leavecamps")
stargazer(leavecampsmodel,
type = "html", title = "Fig. 5 Leave camps differentiated", align = TRUE, out = "tables/model4.html",
omit.stat = c("ll"), dep.var.labels.include = FALSE,
covariate.labels = c("Care","Fairness","Loyalty","Authority","Purity","Liberty"),
column.labels = c("Tweet is from Leave.EU rather than Vote Leave"))
##Z score favourites and retweets
#Get person means
allTwitter = filter(alltext, medium == "Twitter")
personmeans <- group_by(allTwitter,speaker) %>%
summarise(meanfavs = mean(favourites), meanretweets = mean(retweets), sdfavs = sd(favourites), sdretweets = sd(retweets))
allTwitter = left_join(allTwitter,personmeans)
#Get z-scores
allTwitter$zfavs = (log(allTwitter$favourites+1) - log(allTwitter$meanfavs+1))/log(allTwitter$sdfavs+1)
allTwitter$zrts = (log(allTwitter$retweets+1) - log(allTwitter$meanretweets+1))/log(allTwitter$sdretweet+1)
##Model tweet popularity
stargazer(LinReg(allTwitter$zfavs, model = "twitterframes"), LinReg(allTwitter$zfavs, model = "twitterall"), LinReg(allTwitter$zrts, model = "twitterframes"),
LinReg(allTwitter$zrts, model = "twitterall"), type = "html", title = "Fig. 6 Multivariate Linear Models", align = TRUE, out = "tables/model5.html",
table.placement = "h", column.labels = c("Favourites","Favourites","Retweets","Retweets"))
rm(allTwitter,leavecamps,leavecampsmodel,personmeans)
#### Graphics ####
plotdata = alltext %>%
group_by(brexiteer) %>%
summarise(Care = sum(care == TRUE) / n(),
Fairness = sum(fair == TRUE) / n(),
Loyalty = sum(loyal == TRUE) / n(),
Authority = sum(auth == TRUE) / n(),
Purity = sum(pure == TRUE) / n(),
Liberty = sum(lib == TRUE) / n())
plotdata = as.data.frame(t(as.matrix(plotdata, ncol = 6)))
colnames(plotdata) <- c("Remain", "Leave")
plotdata <- plotdata[-1,]
plotdata = plotdata * 100
plotdata$foundation = rownames(plotdata)
plotdata = pivot_longer(plotdata, cols = c("Remain", "Leave"), names_to = "Campaign")
library(ggplot2)
foundplot = ggplot(plotdata, aes(x = foundation, y = value, fill = Campaign)) +
geom_bar(position = "dodge", stat = "identity") +
labs(title = "Comparative Frequency of Framings", x = "Foundation", y = "Frequency (% of segments)") +
scale_fill_manual(values = c("firebrick", "steelblue1")) +
scale_y_continuous(breaks = c(0,5,10,15,20,25)) +
theme_classic()
print(foundplot)
ggsave("foundplot.jpg", plot = foundplot, device = "jpeg")
|
0e9c5e6137aa25259a2d76d57d8d7fe9c1c4de67
|
4a558dc177db28e3e2b1dd9f1ccf3426e6eb79c9
|
/sunday 2014_08_03/stats.R
|
4ebf2e8211d757ede362fbde7fa14392d777f215
|
[] |
no_license
|
cmcoffman/rlgl
|
3dc46562c027f69bfd89d6b9be6aa517e780529e
|
d7649d1703e8cd9f1cdc38b20afceecb0418c5d3
|
refs/heads/master
| 2016-09-05T15:22:29.613025
| 2014-08-05T15:57:59
| 2014-08-05T15:57:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 104
|
r
|
stats.R
|
#stats
lm0=lm(GFP~green.intensity*OD.600+plate+factor(row)*factor(column), data=cshl.all )
summary(lm0)
|
0815b6492d152ff96577f247371353c5a7f20ddd
|
cf75f57c49e44070bfb93e04b28e9386a2d2783a
|
/man/itree.Rd
|
3e4d23ba63ce3e1db44672f429333bce0febe36c
|
[] |
no_license
|
ficol/ZUM
|
f7975b259f2991a56fe4fd14cff5278051ed9953
|
7e9d3ce484ff47e6246f2c5e6719668c66385114
|
refs/heads/main
| 2023-08-17T08:12:40.055435
| 2021-08-08T03:48:17
| 2021-08-08T03:48:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 675
|
rd
|
itree.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iforest.R
\name{itree}
\alias{itree}
\title{Builds isolation tree}
\usage{
itree(X, max_depth, curr_depth = 0)
}
\arguments{
\item{X}{data to create tree}
\item{max_depth}{maximal depth of tree}
\item{curr_depth}{current depth of tree}
}
\value{
Node of isolation tree
}
\description{
This builds isolation tree based on parameters. If chosen variable is continual, split value is random value from min to max.
If chosen variable is discrete, split value is a subset of possible values.
Node becomes leaf if size of samples is <= 1, every sample is identical or node's depth equals max depth.
}
|
e115eae608dc774cf1734ef4430c415ea56fc92b
|
e766292e77e01e5fb3fcb1ae8bdbab7562a1f113
|
/src/resources/datacleaner.r
|
39d3f6d462acfb0fc954ab3d9b8d5541b824f16c
|
[] |
no_license
|
kiskacsa08/DiplomaServerSide
|
af8f7580a165110166265655c7899299599e2a27
|
e3a16b9ee5bbec7fbcc39b5fd5d4342b5ebd3504
|
refs/heads/master
| 2021-01-10T08:25:17.374465
| 2016-04-27T15:57:44
| 2016-04-27T15:57:44
| 49,012,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,607
|
r
|
datacleaner.r
|
library(RJDBC)
TEAMIDS <- 0:22
# Convert result string to result class (1=home win, 0=draw, -1=away win)
result2class <- function(goals){
if (is.na(goals)) {
return(NA)
}
# Convert to string
goals <- as.character(goals)
# Split the string
g <- strsplit(goals, "–")
# Convert to number
g <- as.numeric(g[[1]])
# Determine class
if (g[1] > g[2]) {
class <-1
}
else if (g[1] < g[2]) {
class <- -1
}
else {
class <- 0
}
class
}
# Negate a number
negatenumber <- function(number){
-number
}
# Extract results per team from match data
extractteams <- function(matches){
# Create a list
teams <- list()
for (t in TEAMIDS) {
# Select the team's home matches
df1 <- subset(matches, matches[,"HOME_TEAM_ID"] == t)
# Convert the result to class
results <- df1[,'RESULT']
CLASS <- sapply(results, FUN = result2class)
# Append the CLASS column
df1 <- cbind(df1, CLASS)
# Do the same with the team's away matches
df2 <- subset(matches, matches[,"AWAY_TEAM_ID"] == t)
results <- df2[,'RESULT']
CLASS <- sapply(results, FUN = result2class)
CLASS <- sapply(CLASS, FUN = negatenumber)
df2 <- cbind(df2, CLASS)
# Join the two dataframes
df <- rbind(df1, df2)
# # We don't want two matches with the same round number
# df <- df[order(df$ROUND, df$MATCH_DATE),]
# # If there is more than 38 (one season) matches for a team
# if (nrow(df) > 38) {
# for (j in 1:38) {
# # Get the matches with the same round number
# rows <- which(df$ROUND == j)
# if (length(rows) > 1) {
# for (k in 2:length(rows)) {
# # Assign a new round number to the latest matches
# df[rows[k],"ROUND"] <- df[rows[k],"ROUND"] + (k-1)*38
# }
# }
# }
# }
# Order the dataframe by ROUND
df <- df[order(df$ROUND),]
# Add the dataframe to the list
teams[[t+1]] <- df
}
return(teams)
}
#----------------------------------------------------------------------
# Load the database driver
print('init')
jdbcDriver <- JDBC(driverClass = "org.apache.derby.jdbc.ClientDriver", classPath = "./derbyclient.jar")
print('driver loaded')
# Connect to database
jdbcConnection <- dbConnect(jdbcDriver, "jdbc:derby://localhost:1527//home/server/.netbeans-derby/DiplomaOddsDatabase", "diploma", "diploma")
print('connected')
# Read odds data
odds <- dbReadTable(jdbcConnection, "ODDS")
# odds <- read.csv("odds.csv")
# Remove the REFRESH_DATE column
odds <- odds[,!(names(odds) %in% c("REFRESH_DATE"))]
# Order by MATCH_ID
odds <- odds[order(odds$MATCH_ID),]
# Change the missing values (-1) to NAs
odds[odds == -1] <- NA
# Remove the rows with all NAs
MATCH_ID <- odds$MATCH_ID
odds <- odds[,-1]
MATCH_ID <- MATCH_ID[apply(odds,1,function(x)any(!is.na(x)))]
odds <- odds[apply(odds,1,function(x)any(!is.na(x))),]
odds <- cbind(MATCH_ID, odds)
# Remove false data
odds <- odds[apply(odds[,2:ncol(odds)] < 20 | is.na(odds[,2:ncol(odds)]), 1, all),]
# Reset the row indexes
rownames(odds) <- NULL
# Save the column names
ocolumns <- names(odds)
# Read matches data
matches <- dbReadTable(jdbcConnection, "MATCHES")
# matches <- read.csv("matches.csv")
# Change the missing values (N/A) to NAs
matches[matches == "N/A"] <- NA
initial_date <- as.POSIXct("1970-01-01 00:00:00.0")
# We don't want two matches with the same round number
matches <- matches[order(matches$ROUND, matches$MATCH_DATE),]
# If there is more than 38 (one season) matches for a team
for (j in 1:38) {
# If there are more than 10 matches in one round
if (length(which(matches$ROUND == j)) > 10) {
# Get the number of seasons
seasons <- length(which(matches$ROUND == j))/10
# Save the actual round
round <- matches[which(matches$ROUND == j),]
initialMatches <- round[round$MATCH_DATE == initial_date,]
round <- round[!round$ID %in% initialMatches$ID,]
round <- rbind(round, initialMatches)
for (k in 1:seasons) {
# get the actual seasons matches
act_round <- round[((k-1)*10+1):(k*10),]
for (l in 1:10) {
act_round[l,"ROUND"] <- act_round[l,"ROUND"] + (k-1)*38
}
round[((k-1)*10+1):(k*10),] <- act_round
}
matches[which(matches$ROUND == j),] <- round
}
}
# Remove the matches without result or not in one week time
matches$MATCH_DATE <- as.POSIXct(matches$MATCH_DATE)
one_week_later <- Sys.time() + 7*24*60*60
matches <- matches[(complete.cases(matches) | matches$MATCH_DATE < one_week_later),]
matches <- matches[(matches$MATCH_DATE != initial_date),]
matches <- matches[order(matches$ROUND),]
# Extract results by team
teams <- extractteams(matches)
# Extract features
# Create new dataframe
df <- data.frame()
# Variable for tracking the complete cases
j <- 1
# Iterate through the matches
for (i in 1:nrow(matches)) {
row <- matches[i,]
# If there is odds data for the match
if (row$ID %in% odds$MATCH_ID) {
# Save the odds for the match
o1 <- odds[odds$MATCH_ID == row$ID,]
# Save the last odds data for the match
o2 <- o1[nrow(o1),]
# Add the result class to the dataframe
df[j,"CLASS"] <- result2class(row$RESULT)
# Add the odds to the dataframe
for (column in ocolumns) {
df[j,column] <- o2[,column]
}
# Get the home team's matches
hteam <- teams[row$HOME_TEAM_ID + 1]
# I need only the round and the result
if (length(hteam) > 0) {
hteam2 <- hteam[[1]][,c("MATCH_DATE", "CLASS")]
}
hteam2 <- hteam2[hteam2$MATCH_DATE < row$MATCH_DATE,]
hteam2 <- hteam2[order(hteam2$MATCH_DATE, decreasing = TRUE),]
hteam2 <- head(hteam2, n = 4)
# Get the home team's previous matches' result
if (nrow(hteam2) >= 1) {
df[j,"HPREV1"] <- hteam2[1, "CLASS"]
}
if (nrow(hteam2) >= 2) {
df[j,"HPREV2"] <- hteam2[2, "CLASS"]
}
if (nrow(hteam2) >= 3) {
df[j,"HPREV3"] <- hteam2[3, "CLASS"]
}
if (nrow(hteam2) >= 4) {
df[j,"HPREV4"] <- hteam2[4, "CLASS"]
}
# Do the same to the away team
ateam <- teams[row$AWAY_TEAM_ID + 1]
if (length(ateam) > 0) {
ateam2 <- ateam[[1]][,c("MATCH_DATE", "CLASS")]
}
ateam2 <- ateam2[ateam2$MATCH_DATE < row$MATCH_DATE,]
ateam2 <- ateam2[order(ateam2$MATCH_DATE, decreasing = TRUE),]
ateam2 <- head(ateam2, n = 4)
if (nrow(ateam2) >= 1) {
df[j,"APREV1"] <- ateam2[1, "CLASS"]
}
if (nrow(ateam2) >= 2) {
df[j,"APREV2"] <- ateam2[2, "CLASS"]
}
if (nrow(ateam2) >= 3) {
df[j,"APREV3"] <- ateam2[3, "CLASS"]
}
if (nrow(ateam2) >= 4) {
df[j,"APREV4"] <- ateam2[4, "CLASS"]
}
# Increment the tracking variable
j <- j + 1
}
}
# Remove the MATCH_ID column from the dataframe
# df <- df[,!(names(df) %in% c("MATCH_ID"))]
# Get the bookmakers' name
home_bookmakers <- sapply(names(df), function(text) {
grep("HOME", names(df), value = TRUE)
})[,1]
draw_bookmakers <- sapply(names(df), function(text) {
grep("DRAW", names(df), value = TRUE)
})[,1]
away_bookmakers <- sapply(names(df), function(text) {
grep("AWAY", names(df), value = TRUE)
})[,1]
# Create 'average' bookmaker
df[,"HOME_AVG"] <- apply(df[,home_bookmakers], 1, mean, na.rm=TRUE)
df[,"DRAW_AVG"] <- apply(df[,draw_bookmakers], 1, mean, na.rm=TRUE)
df[,"AWAY_AVG"] <- apply(df[,away_bookmakers], 1, mean, na.rm=TRUE)
df$MATCH_ID <- as.integer(df$MATCH_ID)
# Write out the table
dbWriteTable(jdbcConnection, "CLEANEDDATA", df)
# Close the database connection
dbDisconnect(jdbcConnection)
# write.csv(df, "./cleaned.csv")
rm(list=ls())
gc()
q("no")
|
efa6bf3620fd3c837201ee491c6c30c58860d60b
|
e5ef3cde1e45a34dfb6388d9b934231bb4ea929a
|
/Assignment 2 code.R
|
5a0dc672c0e323587987bc6867a8fa6c5e01ddf8
|
[
"MIT"
] |
permissive
|
gopala-goyal/music-records-project
|
25935a9ae9b1c4500ffee95fd016996d41742f16
|
990dc8c3539d09c37f2c4a3b48793bdc66ff6724
|
refs/heads/main
| 2023-07-14T23:34:59.773819
| 2021-09-09T19:45:13
| 2021-09-09T19:45:13
| 402,879,513
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,743
|
r
|
Assignment 2 code.R
|
# Source of data and code: Dimitris Bertsimas @ MIT
MusicRecord<-read.csv("Documents/Study/MMA 2022S/867 - Predictive Modelling/Music-records-project/MusicData.csv") #load data
# How many songs does the dataset include for which the artist name is "Michael Jackson"?
table(MusicRecord$artistname == "Michael Jackson")
# Alternatively, use the pipe %>% function in "dplyr" package
library(dplyr)
MusicRecord %>% filter(artistname == "Michael Jackson") %>% summarize(count = n())
# first use the filter function to split the data into a training set "SongsTrain"
# consisting of all the observations up to and including 2009 song releases, and a testing set "SongsTest",
# consisting of the 2010 song releases.
SongsTrain = MusicRecord %>% filter(year <= 2009)
SongsTest = MusicRecord %>% filter(year == 2010)
# we want to exclude some of the variables in our dataset from being used as independent variables
# ("year", "songtitle", "artistname", "songID", and "artistID"). To do this, we can use the following trick.
# First define a vector of variable names called nonvars - these are the variables that we won't use in our model.
nonvars = c("year", "songtitle", "artistname", "songID", "artistID")
# To remove these variables from your training and testing sets:
SongsTrain = SongsTrain[ , !(names(SongsTrain) %in% nonvars) ]
SongsTest = SongsTest[ , !(names(SongsTest) %in% nonvars) ]
# build a logistic regression model to predict Top10 using the training data.
# We can now use "." in place of enumerating all the remaining independent variables in the following way:
SongsLog1 = glm(Top10 ~ ., data=SongsTrain, family=binomial)
summary(SongsLog1)
# True or False?
# 1. The higher our confidence about time signature, key and tempo, the more likely the song is to be in the Top 10
# 2. In general, if the confidence is low for the time signature, tempo, and key, then the song is more likely to be complex. What does our model suggest in terms of complexity?
# You can make predictions on the test set by using the command:
testPredict = predict(SongsLog1, newdata=SongsTest, type="response")
# Then, you can create a confusion matrix with a threshold of 0.15 by using the table command:
confusion.matrix<-table(SongsTest$Top10, testPredict >= 0.15)
# The accuracy of the model is?
Count.correct<-confusion.matrix[1,1]+confusion.matrix[2,2]
Count.wrong<-confusion.matrix[1,2]+confusion.matrix[2,1]
Accuracy.rate<-Count.correct/(Count.correct+Count.wrong)
# What is the prediction accuracy of the model?
# To generate the ROC curve
install.packages("pROC")
library(pROC)
test_prob = predict(SongsLog1, newdata = SongsTest, type = "response")
test_roc = roc(SongsTest$Top10 ~ test_prob, plot = TRUE, print.auc = TRUE)
|
edc7aaead48779da348375a357e1ebd0dcf2848a
|
0045a18d56af8eb1d47d935da6c4dbede9ac9d72
|
/Lyon-Cours_2017-11-09 Methodes allocation.R
|
b93ed6d0dc8bf95bf8a85f67208c9e6e15ce4972
|
[] |
no_license
|
emarceau/TheoRisque2018
|
040a82c3f5179a7fde97dae751804251ae1c6400
|
89867759f9edb309317415d4d239a24b254fb7c3
|
refs/heads/master
| 2020-04-11T15:56:47.194147
| 2019-11-11T12:18:30
| 2019-11-11T12:18:30
| 161,908,259
| 0
| 0
| null | null | null | null |
ISO-8859-2
|
R
| false
| false
| 938
|
r
|
Lyon-Cours_2017-11-09 Methodes allocation.R
|
# Lyon A2017
# jeudi 9 novembre 2017
#
# methode allocation Euler
# simulation
# TVaR et VaR
# X1,...,Xn = indépendantes
set.seed(2017)
nsim<-100000
nrisk<-3
matU<-matrix(runif(nrisk*nsim),nsim,nrisk)
X1<-qgamma(matU[,1],2,1/10)
X2<-qlnorm(matU[,2],log(20)-0.5,1)
X3<-qgamma(matU[,3],0.5,1/40)
S<-X1+X2+X3
#cbind(1:nsim,X1,X2,X3,S)
kap<-0.99
VaRS<-quantile(S,probs=kap,type=1)
VaRS
TVaRS<-sum(S*(1*(S>VaRS)))/nsim/(1-kap)
TVaRS
C1<-sum(X1*(1*(S>VaRS)))/nsim/(1-kap)
C2<-sum(X2*(1*(S>VaRS)))/nsim/(1-kap)
C3<-sum(X3*(1*(S>VaRS)))/nsim/(1-kap)
VaRX1<-quantile(X1,probs=kap,type=1)
TVaRX1<-sum(X1*(1*(X1>VaRX1)))/nsim/(1-kap)
VaRX2<-quantile(X2,probs=kap,type=1)
TVaRX2<-sum(X2*(1*(X2>VaRX2)))/nsim/(1-kap)
VaRX3<-quantile(X3,probs=kap,type=1)
TVaRX3<-sum(X3*(1*(X3>VaRX3)))/nsim/(1-kap)
c(C1,C2,C3,C1+C2+C3,TVaRS)
c(TVaRX1,TVaRX2,TVaRX3)
mean(X1)
mean(X2)
mean(X3)
TVaRX1
|
b8b213f0f62b9638c424d068f63989e9dc7bf1a8
|
f1556a59213e9dafb25db0d01760a1443c55b6b2
|
/models_old/LGBM_01/functions.R
|
b1ae77738c2c721df934c4d1af690d946f94d380
|
[] |
no_license
|
you1025/probspace_youtube_view_count
|
0e53b0e6931a97b39f04d50a989a1c59522d56a7
|
f53d3acd6c4e5e6537f8236ad545d251278decaa
|
refs/heads/master
| 2022-11-13T13:22:51.736741
| 2020-07-12T04:14:35
| 2020-07-12T04:14:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,287
|
r
|
functions.R
|
source("functions.R", encoding = "utf-8")
# レシピの作成
create_recipe <- function(data) {
recipe <- create_feature_engineerging_recipe(data)
recipe %>%
# 不要項目の削除
recipes::step_rm(
id,
video_id,
title,
publishedAt,
channelId,
channelTitle,
collection_date,
# tags,
thumbnail_link,
description
) %>%
# 対数変換
recipes::step_log(
likes,
dislikes,
sum_likes_dislikes,
tag_characters,
tag_count,
comment_count,
description_length,
url_count,
offset = 1
) %>%
recipes::step_mutate(
diff_likes_dislikes = sign(diff_likes_dislikes) * log(abs(diff_likes_dislikes) + 1)
) %>%
# 視聴回数の対数変換
recipes::step_log(y, offset = 1, skip = T)
}
# LightGBM 専用
train_and_eval_LGBM <- function(split, recipe, formula, hyper_params) {
options("dplyr.summarise.inform" = F)
# 項目選択処理
filter_columns <- function(data) {
# 対象項目の一覧
target_columns <- c("y", attr(terms(formula), "term.labels"))
data %>%
dplyr::select(
dplyr::all_of(target_columns),
# "mean" 全部のせ
dplyr::matches("_mean_"),
# "median" 全部のせ
dplyr::matches("_median_"),
# "min" 全部のせ
dplyr::matches("_min_"),
# # "max" 全部のせ
# dplyr::matches("_max_"),
# # "sd" 全部のせ
# dplyr::matches("_sd_"),
-dplyr::matches("flg_no_tags_"),
-dplyr::matches("flg_no_description_"),
)
}
# 前処理済データの作成
lst.train_valid_test <- recipe %>%
{
recipe <- (.)
# 訓練済レシピ
trained_recipe <- recipes::prep(recipe, training = rsample::training(split))
# train data
df.train.baked <- recipes::juice(trained_recipe)
df.train <- df.train.baked %>%
# 訓練/検証 データに代表値を付与
add_features_per_category(., .) %>%
# 自前ラベルエンコーディング
transform_categories() %>%
# 対象項目のみを選択
filter_columns()
x.train <- df.train %>%
dplyr::select(-y) %>%
as.matrix()
y.train <- df.train$y
# for early_stopping
train_valid_split <- rsample::initial_split(df.train, prop = 4/5, strata = "categoryId")
x.train.train <- rsample::training(train_valid_split) %>%
dplyr::select(-y) %>%
as.matrix()
y.train.train <- rsample::training(train_valid_split)$y
x.train.valid <- rsample::testing(train_valid_split) %>%
dplyr::select(-y) %>%
as.matrix()
y.train.valid <- rsample::testing(train_valid_split)$y
# for LightGBM Dataset
dtrain <- lightgbm::lgb.Dataset(
data = x.train.train,
label = y.train.train
)
dvalid <- lightgbm::lgb.Dataset(
data = x.train.valid,
label = y.train.valid,
reference = dtrain
)
# test data
df.test <- recipes::bake(trained_recipe, new_data = rsample::testing(split)) %>%
# 訓練/検証 データに代表値を付与
add_features_per_category(df.train.baked) %>%
# 自前ラベルエンコーディング
transform_categories() %>%
# 対象項目のみを選択
#dplyr::select(dplyr::all_of(target_columns))
filter_columns()
x.test <- df.test %>%
dplyr::select(-y) %>%
as.matrix()
y.test <- df.test$y %>% { (.) + 1 } %>% log
list(
## model 学習用
train.dtrain = dtrain,
train.dvalid = dvalid,
# RMSE 算出用: train
x.train = x.train,
y.train = y.train,
## RMSE 算出用: test
x.test = x.test,
y.test = y.test
)
}
# 学習
model.fitted <- lightgbm::lgb.train(
# 学習パラメータの指定
params = list(
boosting_type = "gbdt",
objective = "regression",
metric = "rmse",
# user defined
max_depth = hyper_params$max_depth,
num_leaves = hyper_params$num_leaves,
min_data_in_leaf = hyper_params$min_data_in_leaf,
feature_fraction = hyper_params$feature_fraction,
bagging_freq = hyper_params$bagging_freq,
bagging_fraction = hyper_params$bagging_fraction,
lambda_l1 = hyper_params$lambda_l1,
lambda_l2 = hyper_params$lambda_l2,
seed = 1234
),
# 学習&検証データ
data = lst.train_valid_test$train.dtrain,
valids = list(valid = lst.train_valid_test$train.dvalid),
# 木の数など
learning_rate = hyper_params$learning_rate,
nrounds = 20000,
early_stopping_rounds = 200,
verbose = -1,
# カテゴリデータの指定
categorical_feature = c(
"categoryId"
#,"comments_disabled"
#,"ratings_disabled"
)
)
# MAE の算出
train_rmse <- tibble::tibble(
actual = lst.train_valid_test$y.train,
pred = predict(model.fitted, lst.train_valid_test$x.train)
) %>%
yardstick::rmse(truth = actual, estimate = pred) %>%
dplyr::pull(.estimate)
test_rmse <- tibble::tibble(
actual = lst.train_valid_test$y.test,
pred = predict(model.fitted, lst.train_valid_test$x.test)
) %>%
yardstick::rmse(truth = actual, estimate = pred) %>%
dplyr::pull(.estimate)
tibble::tibble(
train_rmse = train_rmse,
test_rmse = test_rmse
)
}
transform_categories <- function(data) {
data %>%
# # Label-Encoding
dplyr::mutate(
comments_disabled = as.integer(comments_disabled) - 1L,
ratings_disabled = as.integer(ratings_disabled) - 1L,
published_month = as.integer(published_month) - 1L,
published_day = as.integer(published_day) - 1L,
published_term_in_month = as.integer(published_term_in_month) - 1L,
published_dow = as.integer(published_dow) - 1L,
published_hour = as.integer(published_hour) - 1L,
published_hour2 = as.integer(published_hour2) - 1L,
comments_ratings_disabled_japanese = as.integer(comments_ratings_disabled_japanese) - 1L
) %>%
# フラグの処理
dplyr::mutate(
flg_categoryId_low = as.integer(flg_categoryId_low),
flg_categoryId_high = as.integer(flg_categoryId_high),
flg_no_tags = as.integer(flg_no_tags),
flg_no_description = as.integer(flg_no_description),
flg_url = as.integer(flg_url),
flg_japanese = as.integer(flg_japanese),
flg_emoji = as.integer(flg_emoji),
flg_official = as.integer(flg_official),
flg_movie_number = as.integer(flg_movie_number),
flg_comments_ratings_disabled_japanese_high = as.integer(flg_comments_ratings_disabled_japanese_high),
flg_comments_ratings_disabled_japanese_very_high = as.integer(flg_comments_ratings_disabled_japanese_very_high),
flg_comments_ratings_disabled_japanese_low = as.integer(flg_comments_ratings_disabled_japanese_low),
flg_comments_ratings_disabled_japanese_very_low = as.integer(flg_comments_ratings_disabled_japanese_very_low)
)
}
|
4952cb5c8a86075aefe0234d33d6ca5838e6d71b
|
e59a11834b12ffc260d068b8478416beac8adb5d
|
/R/sa_functions.R
|
f98d79fcc71f171d8f6a9151be9eea54966e38ce
|
[] |
no_license
|
slevu/garel
|
c9f3020a35f4653695cfd9af1b421aca9a21f758
|
50ef9d6234cc1d0627c2ff1cea55d40af0270094
|
refs/heads/master
| 2020-03-20T02:02:40.739987
| 2019-02-28T11:58:11
| 2019-02-28T11:58:11
| 137,097,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,242
|
r
|
sa_functions.R
|
#' Compute infector probabilities on clades
#' @param tr phylo tree
#' @param parms_SA Parameters from \code{\link{get_sa_parms_range}}
#' @param p proportion of subtype represented (useless)
#' @return dataframe(donor, recip, ip)
#' @details \code{phydynR::phylo.source.attribution.hiv.msm} wraps \code{phydynR::phylo.source.attribution.multiDeme.fgy} that runs \code{phydynR::sourceAttribMultiDemeCpp} (mode = 1) or \code{phydynR::sourceAttribMultiDemeCpp2} (mode = 2)
#' @importFrom ape reorder.phylo
#' @export
sa_by_clades <- function(tr, parms_SA = get_sa_parms_range(), p = 1){
require(ape) ## temp fix for ape::reorder.phylo in phylo.source.attribution.multiDeme.fgy ## importFrom is not enough ?
CD4s <- parms_SA[['CD4s']][tr$tip.label]
STs <- parms_SA[['STs']][tr$tip.label]
EHIs <- parms_SA[['EHIs']][tr$tip.label]
MH <- parms_SA[['MH']]
# apply prop of subtype p
PLWHIV <- parms_SA[['PLWHIV']] * p
NEWINF <- parms_SA[['NEWINF']] * p
##- run SA
W <- phydynR::phylo.source.attribution.hiv.msm(
tree = tr,
sampleTimes = STs, # years
cd4s = CD4s,
ehi = EHIs,
numberPeopleLivingWithHIV = PLWHIV,
numberNewInfectionsPerYear = NEWINF,
maxHeight = MH,
res = 1e3,
treeErrorTol = Inf,
minEdgeLength = 1/52,
mode = 2
)
##- return a DF
return(data.frame(donor = W[[1]],
recip = W[[2]],
ip = as.numeric(W[[3]]),
stringsAsFactors = FALSE))
}
#' Compute infector probabilities and bind all clade results from tree
#' @param pathclade Path to list of clades
#' @param parms_SA Parameters from \code{\link{get_sa_parms_range}}
#' @param outdir Create directory in which to save results
#' @param verbose print
#' @return List of [(inc, prev), dataframe(donor, recip, ip)]
#' @export
get_ip <- function(pathclade, parms_SA = get_sa_parms_range(), outdir = "data/IPS", verbose = TRUE){
##- names
PID <- Sys.getpid()
dir.create(outdir, showWarnings = FALSE)
bn <- sub("\\.rds", paste0("_", PID, "\\.rds"), sub("clades_", "ip_", basename(pathclade)) )
IP <- paste(outdir, bn, sep = "/")
##-
if(!file.exists(IP)){
subtype <- get_subtype(pathclade)
# prop of subtype
p <- parms_SA[['p_msm']][subtype]
clades <- readRDS(pathclade)
w.clades <- lapply(clades, function(x){
sa_by_clades(tr = x,
parms_SA = parms_SA,
p = p)
})
w.cl <- do.call(rbind, w.clades)
lst <- list(parms = parms_SA[c("MH","NEWINF", "PLWHIV")], W = w.cl)
##- save
if(verbose) print(paste('Save', IP))
saveRDS(lst, file = IP)
} else {
if(verbose) print(paste('File exists:', IP))
}
return(IP)
}
##---- get_sa_parms_range ----
#' Default parameters for source attribution in MSM.
#'
#' Surveillance data taken from Brown, 2017 and Yin, 2014.
#' \itemize{
#' \item For incidence in MSM:
#' "Following this adjustment, the estimated number of infections acquired per year rose from around 2,200 infections (95\% credible interval (CrI)1,800 to 2,500) in 2007 to a peak of 2,800 (CrI 2,300 to 3,200) in 2012 before falling to 1,700 (CrI 900 to 2,700) in 2016"
#' \item Let's assume in the period preceding last sequence (2013) an incidence of 2800 [2300 - 3200].
#' \item For prevalence, from Yin et al. 2014, MSM living with HIV in 2013 = 43,500 [40,200 - 48,200]
#' \item Let's further assume that incidence and prevalence are normally distributed and credible interval bounds are equal to 95\% confidence interval bounds, so that \code{sd = (up - lo)/(2 * qnorm(.975))}
#' \item Hence, values of incidence / prevalence can be drawn from normal distributions
#' }
#' @param filename Whether and where to save file
#' @references
#' 1. Brown AE, Kirwan P, Chau C, Khawam J, Gill ON, Delpech VC. Towards elimination of HIV transmission AIDS and HIV related deaths in the UK - 2017 report. Public Health England; 2017.
#' \url{https://www.gov.uk/government/uploads/system/uploads/attachment_data/file/662306/Towards_elimination_of_HIV_transmission_AIDS_and_HIV_related_deaths_in_the_UK.pdf}
#' 2. Yin Z, Brown AE, Hughes G, Nardone A, Gill ON, Delpech VC, et al. HIV in the United Kingdom 2014 Report: data to end 2013. London: Public Health England; 2014.
#' \url{https://www.gov.uk/government/uploads/system/uploads/attachment_data/file/401662/2014_PHE_HIV_annual_report_draft_Final_07-01-2015.pdf}
#' @export
get_sa_parms_range <- function(filename = NA){
data(list = "df", package = 'tenbrit', envir = environment())
## for MSM
sa_df <- df[df$tran2 == "MSM",]
##- Maximum height
MH <- 20
##- incidence, prevalence: draw from norm
prev <- setNames(c(40200, 43500, 48200), c('lo', 'mu', 'up'))
inc <- setNames(c(2300, 2800, 3200), c('lo', 'mu', 'up'))
draw_norm_ci <- function(n, var, plots = FALSE){
lo <- sort(var)[1]
mu <- sort(var)[2]
up <- sort(var)[3]
sd <- (up - lo)/(2 * stats::qnorm(0.975))
x <- rnorm(n = n, mean = mu, sd = sd)
if(plots){
graphics::hist(x, breaks = 30)
graphics::abline(v = mu, col = 'red')
graphics::abline(v = lo, col = 'blue')
graphics::abline(v = up, col = 'blue')
}
return(x)
}
NEWINF <- draw_norm_ci(n = 1, var = inc)
PLWHIV <- draw_norm_ci(n = 1, var = prev)
CD4s <- setNames(sa_df$cd4_365, sa_df$testindex)
EHIs <- setNames(sa_df$stage == 1, sa_df$testindex)
STs <- setNames(as.numeric(sa_df$dateres + 14)/365.25 + 1970, sa_df$testindex)
# proportion subtype in MSM
tt <- table(df$tran2, df$subtype, useNA='ifany')
p_msm <- as.matrix(prop.table(tt,1))[1,]
# head(CD4s); head(EHIs); table(EHIs); head(STs)
parms_SA <- list(MH = MH, NEWINF = NEWINF,
PLWHIV = PLWHIV, STs = STs,
CD4s = CD4s, EHIs = EHIs,
p_msm = p_msm)
if(!is.na(filename)){
##- save
print(paste("save", filename))
saveRDS(parms_SA, file = filename)
}
return(parms_SA)
}
|
f96aeec4613104adf3d03e9cf118bf13261cb9d7
|
9564d47ab4bd212cc73a35432f4d780e2c227873
|
/Ngram Builder.R
|
429400b4c7ce7ae0c88cfa11978949517563fee0
|
[] |
no_license
|
ronaldyeo/Data-Science-Capstone-Project
|
c2f831838a6cc6cbbcb4a22c47567d0706dc4346
|
58287879da66ea745bfe8b24c544f99398b72c81
|
refs/heads/master
| 2023-03-17T18:01:49.983659
| 2021-03-07T11:36:03
| 2021-03-07T11:36:03
| 345,294,903
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,171
|
r
|
Ngram Builder.R
|
library(tidyverse)
library(quanteda)
library(lexicon)
library(tidytext)
library(data.table)
#Set seed for reproducibility
set.seed(2)
#Read files
en_US.news.txt <- file("./en_US.news.txt.gz", open = "r")
en_US.news <- readLines(en_US.news.txt, encoding="UTF-8", skipNul=T)
en_US.blogs.txt <- file("./en_US.blogs.txt.gz", open = "r")
en_US.blogs <- readLines(en_US.blogs.txt, encoding="UTF-8", skipNul=T)
en_US.twitter.txt <- file("./en_US.twitter.txt.gz", open = "r")
en_US.twitter <- readLines(en_US.twitter.txt, encoding="UTF-8", skipNul=T)
close.connection(en_US.news.txt, en_US.blogs.txt, en_US.twitter.txt)
rm(en_US.news.txt, en_US.blogs.txt, en_US.twitter.txt)
#sampling
n <- 0.1
en_US.news.sample <- sample(en_US.news, length(en_US.news) * n)
en_US.blogs.sample <- sample(en_US.blogs, length(en_US.blogs) * n)
en_US.twitter.sample <- sample(en_US.twitter, length(en_US.twitter) * n)
rm(n, en_US.news, en_US.blogs, en_US.twitter)
#Combine all samples and covert to corpus object
vector <- tolower(corpus(c(en_US.news.sample, en_US.blogs.sample, en_US.twitter.sample)))
#vector <- c(en_US.news.sample, en_US.blogs.sample, en_US.twitter.sample)
rm(en_US.news.sample, en_US.blogs.sample, en_US.twitter.sample)
#Tokenise the vector
tokenised.vector <- tokens(
vector,
remove_punct = T,
remove_symbols = T,
remove_numbers = T,
remove_url = T,
remove_separators = T,
split_hyphens = T,
include_docvars = T,
padding =T,
verbose = quanteda_options("verbose"),
)
rm(vector)
#Build ngram
bigram <- tokens_ngrams(tokenised.vector, n = 2) %>% unlist() %>% data.table()
bigram <- separate(bigram, col = ., into = c("word1", "word2"), sep = "_") %>%
count(word1,word2,sort = T)
trigram <- tokens_ngrams(tokenised.vector, n = 3) %>% unlist() %>% data.table()
trigram <- separate(trigram, col = ., into = c("word1", "word2", "word3"), sep = "_") %>%
count(word1, word2, word3, sort = T)
rm(tokenised.vector)
#Export to csv
write.csv(bigram, file = gzfile("bigram.csv.gz"), row.names = FALSE)
write.csv(trigram,file = gzfile("trigram.csv.gz"), row.names = FALSE)
|
c5c8245c94c782f0833a2b9f13519f3177ca5559
|
0eb25213e0641b6502707aaa5a0006be777aaea5
|
/man/colorschemer.Rd
|
02bcc7de7251569dca620b0f1feba73daac5525e
|
[] |
no_license
|
tlcaputi/gtrendR
|
5a720dbd82a4d2c045300ef7316a3101308ec413
|
01b3db0fb35b9add4cebdc8eb7170d9494d5d0ef
|
refs/heads/master
| 2022-11-10T00:54:56.121774
| 2022-10-30T21:33:06
| 2022-10-30T21:33:06
| 249,570,129
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 310
|
rd
|
colorschemer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{colorschemer}
\alias{colorschemer}
\title{Colorscheme}
\usage{
colorschemer(col)
}
\arguments{
\item{col}{A data frame that includes a column `timestamp`}
}
\description{
Colorscheme
}
\examples{
colorschemer("red")
}
|
24799d7f3a1974cf0b7a58e7ee5758470dada7bf
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/HIV.LifeTables/examples/mortmod.45q15.Rd.R
|
d4c32b86e16f79afd48255e0e8130ef91a273d70
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 317
|
r
|
mortmod.45q15.Rd.R
|
library(HIV.LifeTables)
### Name: mortmod.45q15
### Title: Age-specific mortality rate model as a function of HIV
### prevalence, child mortality (5q0), and adult mortality (45q15)
### Aliases: mortmod.45q15
### Keywords: models misc
### ** Examples
mortmod.45q15(child.mort=0.06, adult.mort=0.20, prev=2.5)
|
b03206d5ad5a27de8c82b6f0f92ba4e4e792c163
|
ba1edf30bca6e023562e4aed21c0ca009d22f431
|
/models/biocro/R/get.model.output.BIOCRO.R
|
851c69224a2ac638f227de51878d698125c99438
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
rgknox/pecan
|
79f080e77637dfb974ebb29313b5c63d9a53228e
|
5b608849dccb4f9c3a3fb8804e8f95d7bf1e4d4e
|
refs/heads/master
| 2020-12-27T20:38:35.429777
| 2014-05-06T13:42:52
| 2014-05-06T13:42:52
| 19,548,870
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,756
|
r
|
get.model.output.BIOCRO.R
|
#-------------------------------------------------------------------------------
# Copyright (c) 2012 University of Illinois, NCSA.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the
# University of Illinois/NCSA Open Source License
# which accompanies this distribution, and is available at
# http://opensource.ncsa.illinois.edu/license.html
#-------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------------#
##' Function to retrieve model output from local server
##'
##' @name get.model.output.BIOCRO
##' @title Retrieve model output from local server
##' @param settings list generated from \code{\link{read.settings}} function applied to settings file
##' @import PEcAn.utils
##' @export
##' @author Mike Dietze, David LeBauer
get.model.output.BIOCRO <- function(settings){
### Get model output on the localhost
if(settings$run$host$name == 'localhost'){
get.results(settings = settings)
} else {
print(paste("biocro model specific get.model.output not implemented for\n",
"use on remote host; generic get.model.output under development"))
} ### End of if/else
} ### End of function
#==================================================================================================#
####################################################################################################
### EOF. End of R script file.
####################################################################################################
|
63480c4f5e1453e814f7c04ac75e9a6ddb0fe1b0
|
c6ad1a79050cc22a8bae99b5517dcf78b03a08cf
|
/R/02-perturbations/00_wrangle-envision.R
|
fd9dacd75f5e3444f003cab360a65b5d8414bb4d
|
[] |
no_license
|
evertbosdriesz/cnr-selective-combos
|
0235c1e74ed23f56c115e1f92c98944046cee0d7
|
ebbc365bf17f9304ec98c394327d4ba806bff2f9
|
refs/heads/master
| 2023-04-18T03:36:04.072694
| 2022-05-30T14:23:34
| 2022-05-30T14:23:34
| 321,024,454
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,791
|
r
|
00_wrangle-envision.R
|
# Tidy and Normalize the Envision data
#
# Normalization: Treatment - POA/(DMSO - POA)
library(tidyverse)
library(readxl)
library(here)
library(stringr)
# Get the well annotations
annot <- read_tsv(here("results", "perturbations", "well-annotations.tsv"))
mapping <- read_tsv(here("results", "perturbations", "treatment-mapping.tsv"))
normalize_viability <- function(df) {
pao_mean <- mean(dplyr::pull(dplyr::filter(df, Treatment == "PAO 10uM"), Viability))
dsmo_mean <- mean(dplyr::pull(dplyr::filter(df, Treatment == "DMSO"), Viability))
df %>% mutate(
NormalizedViability = (Viability - pao_mean) / (dsmo_mean - pao_mean)
)
}
wt <- read_csv(
file.path(here("data", "EnVision", "2017-12-11", "CTB_MCF10A_WT_allcombos_20171211.csv")),
skip = 8, n_max = 16, col_names = as.character(seq(24)),
col_types = str_c(rep('i', 24), collapse = "")
) %>%
mutate(Row = LETTERS[1:16]) %>%
gather(Column, Viability, -Row) %>%
unite(Well, Row, Column, sep = "") %>%
left_join(annot, by = "Well") %>%
select(Well, Treatment = TreatmentLuminex, Viability) %>%
filter(!is.na(Treatment)) %>%
normalize_viability()
write_tsv(wt, file.path(here("results", "perturbations", "wt-envision.tsv")))
mut <- read_csv(
file.path(here("data", "EnVision", "2017-12-11", "CTB_MCF10A_PI3K_allcombos_20171211.csv")),
skip = 8, n_max = 16, col_names = as.character(seq(24)),
col_types = str_c(rep('i', 24), collapse = "")
) %>%
mutate(Row = LETTERS[1:16]) %>%
gather(Column, Viability, -Row) %>%
unite(Well, Row, Column, sep = "") %>%
left_join(annot, by = "Well") %>%
select(Well, Treatment = TreatmentLuminex, Viability) %>%
filter(!is.na(Treatment)) %>%
normalize_viability()
write_tsv(mut, file.path(here("results", "perturbations", "pi3k-envision.tsv")))
|
7e75e2b03f01abb2df200771d49d907d75676d7b
|
97ad56b218663daeff7cfe2518c4b60139d3050a
|
/preprocessing_code/retrosplits_preprocess.R
|
a8805401ebcb668e4a89557833983d0ac89b0c78
|
[] |
no_license
|
katieshan/baseball
|
37b900f425fef9e69997c11141c1cb17d8f5f614
|
1584872ceb6a68be13d391e059f557f142b93c89
|
refs/heads/master
| 2020-04-26T07:04:45.657187
| 2020-04-21T20:18:42
| 2020-04-21T20:18:42
| 173,383,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,563
|
r
|
retrosplits_preprocess.R
|
setwd("C:/Users/580377/Documents/Personal/GitHub/baseball")
setwd("C:/Users/Katie/Documents/GitHub/baseball")
source("preprocessing_code/calcpoints.R")
library(tidyverse)
library(lubridate)
#this function calculates modes
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
#This function cleans the retrosheet data
retrorun <- function(year){
year=2019
infile = paste0("raw_data/retrosplits-master/retrosplits-master/daybyday/playing-", year ,".csv")
outpitch = paste0("clean_data/retropitches", year ,".csv")
outbat = paste0("clean_data/retrobats", year ,".csv")
outpitchrds = paste0("clean_data/retropitches", year ,".rds")
outbatrds = paste0("clean_data/retrobats", year ,".rds")
outbattrain = paste0("clean_data/battrain", year ,".rds")
outpitchtrain = paste0("clean_data/pitchtrain", year ,".rds")
start <- paste(year, 04, 01, sep="-") %>% ymd() %>% as.Date()
people <- readRDS("clean_data/databank_people.rds")
#bring in retrosplit data
retrosplits0 <- read_csv(infile)
#bring in varable info and pull preprocessing, pitching, and batting fields
retrovars <- read_csv("raw_data/retrosplits_vars.csv")
retrocols <- retrovars$varname[retrovars$in_preprocess==1]
pitchcols <- retrovars$varname[retrovars$pitch==1]
batcols <- retrovars$varname[retrovars$bat==1]
#filter to preprocessing fields
retro1 <- retrosplits0 %>% select(c(retrocols)) %>%
filter(season.phase=="R") %>%
mutate(game.date = as.Date(game.date), team.key = as.factor(team.key), opponent.key = as.factor(opponent.key))
summary(retro1)
#create game win and loss info (total runs per team per game)
retrogames <- retro1 %>% select(game.key, team.key, opponent.key, team.alignment, B_R)
retrogames_h <- retrogames %>% filter(team.alignment==1) %>%
group_by(game.key, team.key) %>%
summarise(homescore=sum(B_R)) %>%
rename(team.key.home=team.key)
retrogames_a <- retrogames %>% filter(team.alignment==0) %>%
group_by(game.key, team.key) %>%
summarise(awayscore=sum(B_R)) %>%
rename(team.key.away=team.key)
retrogames <- merge(retrogames_h, retrogames_a, by="game.key")
#pull out pitches and merge on games
retropitches <- retro1 %>%
select(pitchcols) %>%
filter(P_G==1) %>%
merge(., retrogames, by="game.key")
#get game scores into pitch data
retropitches <- retropitches %>%
mutate(teamwon = if_else((team.alignment==1 & homescore>awayscore) | (team.alignment==0 & awayscore > homescore), 1, 0),
teamscore = if_else (team.alignment==1, homescore, awayscore),
oppscore = if_else(team.alignment==1, awayscore, homescore)) %>%
select(-homescore, -awayscore, -team.alignment, -team.key.away, -team.key.home)
#add up to impute holds; look at runs scored before and after each pitcher,
#and if previous or subsequent pitcher got the win or loss
retropitches <- retropitches %>%
arrange(game.key, team.key, seq) %>%
group_by(game.key, team.key) %>%
mutate(prevruns = cumsum(P_R) - P_R,
prevwin = cumsum(P_W) - P_W,
prevloss = cumsum(P_L) - P_L) %>%
arrange(game.key, team.key, -seq) %>%
group_by(game.key, team.key) %>%
mutate(subruns = cumsum(P_R) - P_R,
subwin = cumsum(P_W) - P_W,
subloss = cumsum(P_L) - P_L) %>%
arrange(game.key, team.key, seq) %>%
ungroup()
#pull out possible holds--neither wins nor losses, middle relievers, at least one out
holdcands <- retropitches %>%
filter(P_GS==0 & P_W==0 & P_L==0 & P_SV==0 & P_GF==0 & P_OUT>=1) %>%
#calculate holds
#if a previous pitcher got the win, it's a hold
#if a subsequent pitcher got the loss, and the opponent's score at the end
#of the the pitcher's turn was less than the home team's final score,
#it might have been a hold. give it to them and test later.
mutate(holdwins= if_else(prevwin==1, 1, 0),
# holdloss=if_else(subloss==1 & (oppscore - subruns < teamscore),1,0)) %>%
holdloss=0) %>%
select(game.key, person.key, holdwins, holdloss)
#merge the holds back on and calculate final holds and innings pitched
retropitches <- merge(retropitches, holdcands, by=c("game.key", "person.key"), all.x=TRUE)
retropitches$holdsest <- retropitches$holdloss + retropitches$holdwins
retropitches$holdsest[is.na(retropitches$holdsest)]<- 0
retropitches$IP <- retropitches$P_OUT/3
#run the point calculation function
retropitches$points <- calcpitch(df=retropitches, GS="P_GS", IP="IP",ER="P_ER",PBB="P_BB",K="P_SO",SV="P_SV",HLD="holdsest")
summary(retropitches)
rm(holdcands, retrogames_a, retrogames_h)
#now batters
#pull out batters and merge on games
retrobats <- retro1 %>%
filter(P_G==0) %>%
select(batcols) %>%
merge(., retrogames, by="game.key")
retrobats <- retrobats %>%
mutate(B_1B = B_H - B_2B - B_3B - B_HR,
NSB = B_SB - B_CS)
retrobats$points <- calcbat(df=retrobats, R="B_R", S="B_1B", D="B_2B", Tr="B_3B", HR="B_HR", RBI="B_RBI", BBB="B_BB", NSB = "NSB")
retropitches <- retropitches %>%
select(-slot, -P_G, -season.phase, -prevruns, -prevwin, -prevloss, -subruns, -subwin, -subloss, -holdwins, -holdloss)
nrow(retropitches)
# retropitches <- merge(retropitches, people, by.x = "person.key", by.y="retroID", all.x = TRUE)
nrow(retropitches)
sum(is.na(retropitches$playerID))
write.csv(retropitches, outpitch)
saveRDS(retropitches, outpitchrds)
retrobats <- retrobats %>%
select(-season.phase)
# nrow(retrobats)
# retrobats <- merge(retrobats, people, by.x = "person.key", by.y="retroID", all.x = TRUE)
# nrow(retrobats)
# sum(is.na(retrobats$playerID))
write.csv(retrobats, outbat)
saveRDS(retrobats, outbatrds)
#Now get game numbers
bats_sum <- retrobats %>%
select(-game.key, -game.date, -team.alignment, -team.key, -opponent.key, -slot, -seq,
team.key.home, -team.key.away, -homescore, -awayscore) %>%
group_by(person.key) %>%
summarise_if(is.numeric, funs(sum=sum), na.rm=TRUE)
bats_mean <- retrobats %>%
select(-game.key, -game.date, -team.alignment, -team.key, -opponent.key, -slot, -seq,
team.key.home, -team.key.away, -homescore, -awayscore, -B_G_DH, -B_G_PH, -B_G_PR, -F_1B_POS,
-F_2B_POS, -F_3B_POS, -F_SS_POS, -F_OF_POS, -F_LF_POS, -F_CF_POS, -F_RF_POS, F_C_POS, -F_P_POS) %>%
group_by(person.key) %>%
summarise_if(is.numeric, funs(mean=mean), na.rm=TRUE)
bats_sd <- retrobats %>%
select(-game.key, -game.date, -team.alignment, -team.key, -opponent.key, -slot, -seq,
team.key.home, -team.key.away, -homescore, -awayscore, -B_G_DH, -B_G_PH, -B_G_PR, -F_1B_POS,
-F_2B_POS, -F_3B_POS, -F_SS_POS, -F_OF_POS, -F_LF_POS, -F_CF_POS, -F_RF_POS, F_C_POS, -F_P_POS) %>%
group_by(person.key) %>%
summarise_if(is.numeric, funs(sd=sd), na.rm=TRUE)
bats_mode <- retrobats %>%
select(person.key, team.key, slot) %>%
group_by(person.key) %>%
summarise_all(funs(mode=getmode))
bats_all <- merge(bats_sum, bats_mean, by="person.key")
bats_all <- merge(bats_all, bats_sd, by="person.key")
bats_all <- merge(bats_all, bats_mode, by="person.key")
mean_points <- arrange(bats_all, desc(points_sum)) %>%
select(points_sum, points_mean) %>%
top_n(90, points_sum) %>%
summarise(mean(points_mean))
games_above <- retrobats %>%
select(person.key, points) %>%
mutate(games=1, games_above=if_else(points>as.numeric(mean_points), 1, 0)) %>%
select(-points) %>%
group_by(person.key) %>%
summarise_all(funs(count=sum), na.rm=TRUE)
bats_all <- merge(bats_all, games_above, by="person.key") %>%
merge(., people, by.x="person.key", by.y="retroID", all.x=TRUE)%>%
mutate(team.key_mode=as.factor(team.key_mode),
slot_mode = as.factor(slot_mode),
bats=as.factor(bats),
throws=as.factor(throws),
debut=as.Date(debut),
birthdate=as.Date(birthdate),
since_debut = (start-debut)/365.25,
age=(start-birthdate)/365.25) %>%
select(-playerID, -bbrefID, nameFirst, -nameLast, -nameGiven,-debut, -birthdate)
rm(retrobats, bats_mean, bats_mode, bats_sd, bats_sum, games_above, mean_points)
saveRDS(bats_all, outbattrain)
#now pitchers
pitches_sum <- retropitches %>%
select(-game.key, -game.date, -team.key, -opponent.key, -seq, teamwon, -teamscore, -oppscore) %>%
group_by(person.key) %>%
summarise_if(is.numeric, funs(sum=sum), na.rm=TRUE)
pitches_mean <- retropitches %>%
select(-game.key, -game.date, -team.key, -opponent.key, -seq, teamwon, -teamscore, -oppscore) %>%
group_by(person.key) %>%
summarise_if(is.numeric, funs(mean=mean), na.rm=TRUE)
pitches_sd <- retropitches %>%
select(-game.key, -game.date, -team.key, -opponent.key, -seq, teamwon, -teamscore, -oppscore) %>%
group_by(person.key) %>%
summarise_if(is.numeric, funs(sd=sd), na.rm=TRUE)
pitches_mode <- retropitches %>%
select(person.key, team.key) %>%
group_by(person.key) %>%
summarise_all(funs(team.key_mode=getmode))
pitches_all <- merge(pitches_sum, pitches_mean, by="person.key")
pitches_all <- merge(pitches_all, pitches_sd, by="person.key")
pitches_all <- merge(pitches_all, pitches_mode, by="person.key")
mean_points <- arrange(pitches_all, desc(points_sum)) %>%
select(points_sum, points_mean) %>%
top_n(90, points_sum) %>%
summarise(mean(points_mean))
games_above <- retropitches %>%
select(person.key, points) %>%
mutate(games=1, games_above=if_else(points>as.numeric(mean_points), 1, 0)) %>%
select(-points) %>%
group_by(person.key) %>%
summarise_all(funs(count=sum), na.rm=TRUE)
pitches_all <- merge(pitches_all, games_above, by="person.key") %>%
merge(., people, by.x="person.key", by.y="retroID", all.x=TRUE)%>%
mutate(team.key_mode=as.factor(team.key_mode),
bats=as.factor(bats),
throws=as.factor(throws),
debut=as.Date(debut),
birthdate=as.Date(birthdate),
since_debut = (start-debut)/365.25,
age=(start-birthdate)/365.25) %>%
select(-playerID, -bbrefID, -nameFirst, -nameLast, -nameGiven, -debut, -birthdate)
saveRDS(pitches_all, outpitchtrain)
}
retrorun(2010)
retrorun(2011)
retrorun(2012)
retrorun(2013)
retrorun(2014)
retrorun(2015)
retrorun(2016)
retrorun(2017)
retrorun(2018)
retrorun(2019)
|
97a7c1722080537adab4d51d3a2a5117bed7b5f3
|
7152e85ab884aff7001b56c6dd927e14a2811ff1
|
/data-science/data-science-with-r/Dataframe_operations/Data_Frames_Operations.R
|
352e40bc22af3347f54586bc4a4476dbfc640f81
|
[] |
no_license
|
gautam-kumar-22/Data-Science-With-Python-R
|
29685878abc3501803637216dd225f5d6f1f33ff
|
3c0e4e26ec0886623f904dcbb1d3e7883393a497
|
refs/heads/master
| 2023-04-16T11:50:39.686462
| 2021-04-21T09:37:40
| 2021-04-21T09:37:40
| 360,201,702
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,574
|
r
|
Data_Frames_Operations.R
|
############################
# Data frame operations #
############################
## Extracting data from a table data
## Working directory .
getwd()
setwd("/home/labsuser/2021/Jan_16/Dataframe_operations")
getwd()
## Extracting data from an excel
library(readxl)
#install.packages("readxl")
my_df <- read_excel("Demo 1_Identifying_Data_Structures.xlsm")
View(my_df)
### Assignment # import data from different sheets in excel.
## Extracting data from a table file .
table_data <- read.table("table_data.txt",header = TRUE)
table_data
table_data <- read.table("table_data.txt")
table_data
## Extracting data from a CSV file ( comma separated value ) .
US_Car_df <- read.csv("USA_cars_datasets.csv")
View(US_Car_df)
str(US_Car_df)
US_Car_df <- read.csv("USA_cars_datasets.csv",stringsAsFactors = FALSE)
str(US_Car_df)
print(head(US_Car_df,10))
US_Car_df[,4]
US_Car_df$model
?head()
head(US_Car_df$model,10)
US_Car_df[5,]
# list of column names :
names(US_Car_df)
class ( names(US_Car_df) )
# rename the first column
names(US_Car_df)[1]
names(US_Car_df)[1] <- "Id"
names(US_Car_df)
# newheader <- c('col1','col2'........)
# names(US_Car_df) <- newheader
# select columns from a dataframe :
US_Car_df[,1:2]
## Select data from a dataframe
US_Car_df[1:5, c('Id','price','model')]
# Get the count of NAs .
sum(is.na(US_Car_df$model))
sum(is.na(US_Car_df))
#### Assignment : print the count of NAs in individual column.
# run a for loop on names(dataframe)
US_Car_df
## most expensive car brand .
which.max(US_Car_df$price)
expensive_car <- US_Car_df[which.max(US_Car_df$price),c('brand','price')]
expensive_car
# second most expensive car
sorted_car <- US_Car_df[order(-US_Car_df$price),]
sorted_car
sorted_car[2,c('brand','price')]
## unique values of a particular colums
unique(US_Car_df$brand)
length(unique(US_Car_df$brand))
### Filter records .
merc <- subset(US_Car_df, brand == 'mercedes-benz')
merc
length(unique(merc$brand))
## FIlter with multiple conditions
merc1 <- subset(US_Car_df, brand == 'mercedes-benz' & model == 'vans')
merc1
toyota <- subset ( US_Car_df , toupper(brand) == 'TOYOTA')
toyota
dim(subset(US_Car_df, brand == 'mercedes-benz'))
nrow(subset(US_Car_df, brand == 'mercedes-benz'))
ncol(subset(US_Car_df, brand == 'mercedes-benz'))
### grouping
aggregate(US_Car_df$price , by = list(US_Car_df$brand) , FUN = mean )
grouped <- aggregate(x = US_Car_df$price , by = list(US_Car_df$brand,US_Car_df$model) , FUN = sum )
grouped
names(grouped) <- c("brand","model","Sum")
grouped
grouped[order(grouped$brand,grouped$model),]
## sapply on a dataframe
sapply( list(US_Car_df$price), max )
class ( sapply( list(US_Car_df$price), max ) )
sapply( list(US_Car_df$price), summary )
class (sapply( list(US_Car_df$price), summary ))
#### dplyr ####
library(dplyr)
#library(tidyr)
#install.packages("dplyr")
# 1. filter()
# 2. arrange()
# 3. select()
# 4. distinct()
# 5. mutate()
# 6. summarise()
# 7. group_by()
# 1. filter
?filter
chevrolet <- filter(US_Car_df,brand == 'chevrolet')
chevrolet
unique(chevrolet$brand)
unique(chevrolet$model)
names(chevrolet)
chevrolet_1 <- filter(US_Car_df,brand == 'chevrolet',model == 'colorado')
chevrolet_1
nrow(chevrolet_1)
unique(chevrolet_1$model)
chevrolet_2 <- filter(US_Car_df, brand %in% c('chevrolet','toyota') )
unique(chevrolet_2$brand)
###Assignment - Filter those records which doesnt contain
# chevrolet and toyota .
# 2. arrange()
?arrange
View(US_Car_df)
newdata_arranged <- arrange(US_Car_df, price) # by default its ascending .
View(newdata_arranged)
tail(newdata_arranged,15)
newdata_arranged <- arrange(US_Car_df, desc(price))
head(newdata_arranged,15)
# sort by two variables
newdata_arranged <- arrange(US_Car_df, desc(price), mileage)
head(newdata_arranged,15)
View(newdata_arranged)
newdata_arranged <- arrange(US_Car_df, desc(price), desc(mileage))
head(newdata_arranged)
View(newdata_arranged)
# 3. select()
select(US_Car_df,brand,model)
select(filter(US_Car_df,US_Car_df$brand == 'chevrolet'),brand,price,model)
unique(select(filter(US_Car_df,US_Car_df$brand == 'chevrolet'),brand,price,model)$brand)
# 4. distinct() # unique in R base packages
distinct(US_Car_df, brand)
dim(distinct(US_Car_df, brand))
class(distinct(US_Car_df, brand))
is.vector(distinct(US_Car_df, brand))
#View(US_Car_df)
#select(df,colnames)
# 5. mutate() # creates new column based on the condition provided in the code.
?mutate
US_Car_df_mutate <- mutate(US_Car_df, brand_model = paste(brand," ",model))
head(US_Car_df_mutate)
View(US_Car_df_mutate)
US_Car_df_mutate <- mutate(US_Car_df, mileage_new = mileage + 5 )
head(US_Car_df_mutate$brand_model)
#US_Car_df_transmutate <- transmute(US_Car_df, brand_model = paste(brand," ",model))
#head(US_Car_df_transmutate)
#View(US_Car_df_transmutate)
### Assignment : Donot perform the paste(brand," ",model) operation in case of chevrolet .
# Do it for the rest.
# 6. summarise()
?summarise
summarise(US_Car_df, brand_mean = mean(price, na.rm = T))
mean(US_Car_df$price)
# 7. grouping
?group_by
# na.rm means ignore NAs if any .
brand_groups <- group_by(US_Car_df, brand)
brand_groups
summarise(brand_groups, mean(price, na.rm = T))
summarise(brand_groups, sum(price, na.rm = T))
# Average based on price for brand and model (group by multiple columns)
brand_groups <- group_by(US_Car_df, brand,model)
summarise(brand_groups, mean(price, na.rm = T))
summarise(brand_groups, sum(price, na.rm = T))
|
da1872822e16dcef88fb113971214e0e07fd0d0a
|
b01b94db7226001798b33fe627eac033b2be0773
|
/group3project/man/loglik.pom.Rd
|
3d4c23b0dd85a1bca2ba17ee418d6cda67f526ec
|
[] |
no_license
|
kennyitang/BIOS735-Group3
|
f793f22a827f3efcefa0c26f4a90c38bd12e15ad
|
10e298447f4b938c7d3c9e1f5cbc2ffb8bcee8e2
|
refs/heads/master
| 2021-01-02T11:27:49.567359
| 2020-04-27T14:55:02
| 2020-04-27T14:55:02
| 239,602,553
| 1
| 0
| null | 2020-04-23T21:34:36
| 2020-02-10T20:07:47
|
HTML
|
UTF-8
|
R
| false
| true
| 942
|
rd
|
loglik.pom.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pom_loglik.R
\name{loglik.pom}
\alias{loglik.pom}
\title{Log likelihood of proportional odds model}
\usage{
loglik.pom(y, X, param)
}
\arguments{
\item{y}{a vector of ordered factor responses with J levels.}
\item{param}{current values of the alpha and beta parameters of length (J-1+P).}
\item{x}{a N x P data matrix, with no intercept, where categorical variables need to be coded into indicator variables.}
}
\value{
The log likelihood of proportional odds model.
}
\description{
This function returns the log likelihood of the proportional odds model, allowing
an arbitrary number of J ordered response categories (J>2), referencing Woodridge (2002, p656).
}
\examples{
Given y, X, and the current values of alpha and beta, the log likelihood of a proportional odds model
at a specific interation can be calculated as:
loglike.pom(y, X, c(alpha, beta))
}
|
21f7ce609d07140c1f47a1ebd1aba543c0b74b8c
|
3aac8d9944540359b72123e8dd7ad6bf9b05b707
|
/plot2.R
|
b538aa4148ffafc0e0d23f3c04216dbb204f1aeb
|
[] |
no_license
|
jose-barrera/ExData_Plotting1
|
7a1e1393dcdf0245ef7fa6d698cc059289f6a4a0
|
4b4517c7e85674baef7b34bdbf376e6dd4094e18
|
refs/heads/master
| 2021-01-15T17:28:21.035838
| 2015-02-08T22:19:08
| 2015-02-08T22:19:08
| 30,501,177
| 0
| 0
| null | 2015-02-08T18:40:51
| 2015-02-08T18:40:51
| null |
UTF-8
|
R
| false
| false
| 623
|
r
|
plot2.R
|
## Reading data
data <- read.table("household_power_consumption.txt", sep=";", header=TRUE,
na.strings = c("?"))
data$Time <- strptime(paste(data$Date,data$Time),"%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date,"%d/%m/%Y")
## Subsetting plot data
plotdata <- data[data$Date >= "2007-02-01" & data$Date <= "2007-02-02",]
rm(data)
## Opening the device
png(png, file="plot2.png", width=480, height=480, units="px")
## Creating the plot
plot(plotdata$Time, plotdata$Global_active_power, type="l", xlab="",
ylab="Global Active Power (kilowatts)")
## Closing the device
dev.off()
## [EOF]
|
6fc073ffd19442c3107f109b74fe0ed1b9290191
|
0f1920a21f21514e3cf993b3e244d5b4011aaf8e
|
/cachematrix.R
|
e0db7afd0f001ce0e23b23a090a18941ba9e34a0
|
[] |
no_license
|
KaiBerlin/ProgrammingAssignment2
|
2a4f7f8f945d5b7b4fd66eac45f399823619f1f1
|
b600086ae86c99632cb546ed77d48b5701121aac
|
refs/heads/master
| 2020-03-12T02:07:11.936325
| 2018-04-21T09:19:02
| 2018-04-21T09:19:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,058
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix - Prepares a matrix for caching of the inverse operation
## Usage: diag(x)
## - x is a matrix
## return the matrix with cache
makeCacheMatrix <- function(x = matrix()) {
iv <- NULL
set <- function(y) {
x <<- y
iv <<- NULL
}
get <- function() x
setinverse <- function(inverse) iv <<- inverse
getinverse <- function() iv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve - inverts a matrix, using the cached inverted matrix
## if this exists
## Usage: cacheSolve(x, ...)
## - x is a matrix converted into a cacheMatrix by makeCacheMatrix
## - ... further arguments to the solve function
## returns the inverted matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
iv <- x$getinverse()
if(!is.null(iv)) {
message("getting cached data")
return(iv)
}
data <- x$get()
iv <- solve(data, ...)
x$setinverse(iv)
iv
}
|
02d5e2848b7b9338260acc95bff447cd4854e50b
|
7e72d93a908bfc5781f0d95daa2a213f4c38dc58
|
/collection/tests/testthat/test.general.r
|
ca34b5067ecc2f9c6639e846734c5a07c1ea1ef7
|
[] |
no_license
|
lbartnik/varia
|
349ea81e92198daf680a2d022f8b9157913a78f6
|
3ca8179880a55b3c1104a972aa482c190c6adfed
|
refs/heads/master
| 2021-01-25T04:08:05.628355
| 2015-09-16T21:06:27
| 2015-09-16T21:06:27
| 24,222,581
| 0
| 0
| null | 2014-11-26T10:36:41
| 2014-09-19T08:39:09
|
R
|
UTF-8
|
R
| false
| false
| 248
|
r
|
test.general.r
|
context("general tests")
test_that("full flow", {
skip('turned off for now')
if (!require(dplyr, quietly = T)) skip('dplyr not found')
collection("col") %>%
select(flag == 1) %>%
ccply(summary) %>%
save_to('col_result')
})
|
172bde28f33fc4d71d7849464326316071006c68
|
b01f3ca15d81dc03e0f1657b0fa7c1a77747e63f
|
/MAPS2_2_filter_ASV_table.R
|
1b0f0dcc85b2921c4c23b793bacd53ce599c7f9d
|
[] |
no_license
|
FlorianProdinger/MAPS2
|
f8abc1e4f26a573bf4784eecaf3a9ceb62e5bc3a
|
9cb4121945457ff03c8c5cc3f029a5d504349e4e
|
refs/heads/main
| 2022-12-31T14:52:15.767399
| 2020-10-22T05:23:42
| 2020-10-22T05:23:42
| 301,575,635
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,858
|
r
|
MAPS2_2_filter_ASV_table.R
|
#!/bin/R/3.6.1
#check user input
#read out user input
print("[R] script for reading jplace and the blastx output")
if (length(commandArgs(trailingOnly = T)) == 0){
print("please enter a vaild directory after the R script name")
quit()
} else if ( dir.exists( commandArgs(trailingOnly = T)[1] )){
input_u <- commandArgs(trailingOnly = T)
out_dir <-input_u[1]
#directory needs to be set to write tables
setwd(out_dir)
tree_file <- input_u[2] #"test_20200526_5_samples_DNA_sequences.fasta_AA.fasta_pplacer.jplace"
ASV_table_file <- input_u[3] #"test_20200526_5_samples_ASV_table.tsv"
fasta_output_file <- input_u[4] #"test_20200526_5_samples_DNA_sequences.fasta_BLASTX_out.txt"
} else {
print("[Rscript] directory not found")
quit()}
require(ggtree, quietly = T)
require(treeio, quietly = T)
#tutorial to jplace trees in R
#https://bioc.ism.ac.jp/packages/3.2/bioc/vignettes/ggtree/inst/doc/treeImport.html#s4-classes
tree_MP <- read.jplace(tree_file)
#makes a dataframe with all the tree data (no placement)
tree_MP_df <- fortify(tree_MP)
#define a clade which is considred Mimiviridae from two tips:
MIMI_node1 <- "MIMI_POV"
MIMI_node2 <- "MIMI_megavirus_bus"
MIMI_MRCA_node <- MRCA(get.tree(tree_MP), c( MIMI_node1, MIMI_node2))
#get all the "offspring" nodes of the most recent ancestor of Mimiviridae
MIMI_nodes <- offspring(get.tree(tree_MP), MIMI_MRCA_node)
#returns a data frame with all the placed ASVs and where they were placed
tree_MP_placement_df <- as.data.frame(get.placements(tree_MP, by="best"))
#add a column to the dataframe showing if ASV was placed in Mimiviridae (bool)
tree_MP_placement_df$IS_MIMI <- tree_MP_placement_df$node %in% MIMI_nodes
########################
#load the OTU table from previous R script
ASV_tab <- read.table( ASV_table_file , header=1, row.names=1, sep = "\t" )
samples <- colnames( ASV_tab )
#was OTU placed by pplacer??
ASV_tab$pplaced_T_F <- rownames(ASV_tab) %in% tree_MP_placement_df$name
##############################
# best hit to none MIMI ASVs #
##############################
tab_fasta <- read.table(fasta_output_file, header=F, sep=",")
colnames(tab_fasta) <- c("ASVid", "AA_seq", "e_value", "percent_identity", "reading_frame", "best_hit", "other_hit")
not_MIMI_ASV <- tab_fasta$ASVid[ !grepl( "MEGA", tab_fasta$best_hit)]
ASV_tab_2 <- ASV_tab[ASV_tab$pplaced_T_F,]
##############################
###################################################
# check ASVs that were assigned to multiple nodes #
##################################################
asvID_to_number <-function( ASV_ID ){ as.numeric(gsub("ASV_","", ASV_ID))}
all_T <- function( T_F_vector){ length(T_F_vector)==sum(T_F_vector) }
MIMI_ASV_aggre <- aggregate(tree_MP_placement_df$IS_MIMI , by=list( tree_MP_placement_df$name), FUN=all_T)
MIMI_ASV_aggre <- MIMI_ASV_aggre[ order(unlist(lapply( MIMI_ASV_aggre$Group.1, FUN=asvID_to_number))) , ]
#print( head(tree_MP_placement_df) )
#print( head( MIMI_ASV_aggre ))
#print( head( ASV_tab_2 ))
#print( sum( MIMI_ASV_aggre$Group.1 == rownames(ASV_tab_2) ))
if( !(nrow(ASV_tab_2) == sum( MIMI_ASV_aggre$Group.1 == rownames(ASV_tab_2) ))){
print("ERROR with the ASV IDs")
quit()}
ASV_tab_2$IS_MIMI <- MIMI_ASV_aggre$x
ASV_tab_final <- ASV_tab_2[ ASV_tab_2$IS_MIMI , samples ]
#print( head( ASV_tab_final ))
#####################
# singelton removal #
#####################
ASV_tab_final_no_singelton <- ASV_tab_final[ colSums( ASV_tab_final ) != 1 ]
#output the loss in reads, filter statistics
read_loss_df <- data.frame( "dada2" = colSums(ASV_tab[,samples] ),
"pplaced_ASVs" = colSums(ASV_tab_2[,samples] ),
"MIMI_ASVs" = colSums(ASV_tab_final[,samples] ))
read_loss_df$sample_name <- rownames(read_loss_df)
print("[R] created statistics file...")
print( read_loss_df )
#####################
# outputting tables #
#####################
ASV_table_file_new <- gsub( "ASV_table.tsv", "final_ASV_table_noS.tsv", ASV_table_file, )
write.table( file= ASV_table_file_new, ASV_tab_final_no_singelton, sep="\t", quote = F )
#########################
# outputting statistics #
#########################
previous_stat_file_name <- gsub("ASV_table.tsv", "import_merge_statititics_table.tsv", ASV_table_file)
ASV_table_stats_file <- gsub( "ASV_table.tsv", "filter_statistics.tsv", ASV_table_file )
#read stats of previous R script output (qual filter, merging, chimera checking)
filter_stats_1 <- read.table(previous_stat_file_name , header=1, row.names=1,sep="\t")
#combine the two stats tables:
ASV_table_stats <- cbind( filter_stats_1, read_loss_df)
write.table( file= ASV_table_stats_file , ASV_table_stats , sep="\t", quote = F )
save.image( file="dada2_pipeline_Rscript_tree")
#save.image( file=".dada2_pipeline_Rscript_tree")
print("[R] finished ASV_table filtering.")
|
8fddf234073c71a28f9d3dc007c32451560e3277
|
4951e7c534f334c22d498bbc7035c5e93c5b928d
|
/introduction/citations/ESEUR-cites.R
|
0408dec2319d5f96915bd01592da92a8c8b2a133
|
[] |
no_license
|
Derek-Jones/ESEUR-code-data
|
140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1
|
2f42f3fb6e46d273a3803db21e7e70eed2c8c09c
|
refs/heads/master
| 2023-04-04T21:32:13.160607
| 2023-03-20T19:19:51
| 2023-03-20T19:19:51
| 49,327,508
| 420
| 50
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,333
|
r
|
ESEUR-cites.R
|
#
# ESEUR-cites.R, 26 Jun 20
# Data from:
# This books BibTex file
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG citations
source("ESEUR_config.r")
library("plyr")
pal_col=rainbow(2)
# NA entries, there is no data
# 'available' entries are cited works whose data is not used.
Ed=read.csv(paste0(ESEUR_dir, "introduction/citations/ESEUR-cites.csv.xz"), as.is=TRUE, sep=";")
# table(Ed$data)
Ed$year=as.numeric(Ed$year)
c_year=count(Ed$year)
d_year=count(subset(Ed, !is.na(data))$year)
plot(c_year, log="y", col=pal_col[2],
xaxs="i",
xlim=c(1920, 2021),
xlab="Year", ylab="Work cited\n")
points(d_year, col=pal_col[1])
legend(x="topleft", legend=c("Research", "Data"), bty="n", fill=rev(pal_col), cex=1.2)
c_mod=glm(log(freq) ~ x, data=c_year, subset=(x > 1940) & (x < 2017))
# summary(c_mod)
years=1940:2017
pred=predict(c_mod, newdata=data.frame(x=years))
lines(years, exp(pred), col=pal_col[2])
d_mod=glm(log(freq) ~ x, data=d_year, subset=(x > 1940) & (x < 2017))
# summary(d_mod)
pred=predict(d_mod, newdata=data.frame(x=years))
lines(years, exp(pred), col=pal_col[1])
# lines(loess.smooth(d_year$x, d_year$freq, span=0.3), col=pal_col[3])
# legend(x="topleft", legend=c("Research", "Data"), bty="n", fill=rev(pal_col[-3]), cex=1.4)
|
f7f5f2f419faafa38841f4c11434872a0e19e0c8
|
179356a2a9b6a3159610238f6452e7a143badeaa
|
/Analysis-Pipeline-Core Signaling Pathway-Radiogenomics.R
|
0f25b67320e6dd03a8691f0fd72ea68f3bcbe92d
|
[] |
no_license
|
jieunp/radiogenomics
|
ae03f3b02bf73e5f384beb93e5e124a6e419df09
|
b1b0274311b0290e34ddbe006a014ff5d715fe56
|
refs/heads/master
| 2020-07-02T03:00:56.041156
| 2019-08-20T01:04:21
| 2019-08-20T01:04:21
| 201,394,997
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,085
|
r
|
Analysis-Pipeline-Core Signaling Pathway-Radiogenomics.R
|
# The R code is written in collaborative work of Dr. Ji Eun Park and Prof. Seo Young Park
# please contact jieunp@gmail.com if you have further question.
# These are codes for radiogenomics analysis "prediction of core signaling pathway in IDH-wildtype glioblastoma"
## Line 5-129: Feature selection via Student's t-test with false discovery rate correction
## Line 134-233: Feature selection via LASSO penalization and calculate AUC for each genetic mutation
## Line 234-329: Feature selection via Random Forest and find top 5 important features.
## Line 330- : Calculate diagnostic performance
#### First step is univariate analysis
rm(list=ls())
setwd("C:/Radiogenomics/") #write down where the feature exists
rm(list=ls())
require(plyr)
require(dplyr)
#### Read the feature data in
t1<-read.csv("Feature_T1_Z.csv",header = FALSE)
flair<-read.csv("Feature_FLAIR_Z.csv",header = FALSE)
adc<-read.csv("Feature_ADC.csv",header = FALSE)
cbv<-read.csv("Feature_DSC.csv",header = FALSE)
t1_t<-read.csv("Test_Feature_T1_Z.csv",header = FALSE)
flair_t<-read.csv("Test_Feature_FLAIR_Z.csv",header = FALSE)
adc_t<-read.csv("Test_Feature_ADC.csv",header = FALSE)
cbv_t<-read.csv("Test_Feature_DSC.csv",header = FALSE)
t1.n<-cbind(t1,t1_t)
flair.n<-cbind(flair,flair_t)
adc.n<-cbind(adc,adc_t)
cbv.n<-cbind(cbv,cbv_t)
all<-rbind(t1.n,flair.n,adc.n,cbv.n)
xall<-t(all)
colnames(xall)<-c(paste("t1.", 1:nrow(t1.n), sep=""),
paste("flair.", 1:nrow(flair.n), sep=""),
paste("adc.", 1:nrow(adc.n), sep=""),
paste("cbv.", 1:nrow(cbv.n), sep="") )
#summary(xall)
#### Read the genomic data in
g.data1<-read.csv("genomic_train.csv",header=TRUE)
g.data2<-read.csv("genomic_test.csv",header=TRUE)
g.data<-rbind(g.data1,g.data2)
#### Read the genomic data in
summary(g.data)
# number of genes
no.g<-16 #IDH, 1p19q include 16,17
# Student's t-test with false discovery rate to select features
selected.f<-as.list(rep(NA, no.g))
names(selected.f)<-colnames(g.data)[2:17] #IDH, 16, 1p19q 17
# number of selected features
no.s.f<-rep(NA, no.g)
# Instead, we decided to use p-value to screen the features
for(i in 1:no.g){
temp<-p.adjust(apply(xall, 2, t.fun, gg=g.data[, i+1]), method="fdr")
selected.f[[i]]<-which(temp<0.5)
no.s.f[i]<-length(selected.f[[i]])
}
no.s.f
egfr<-c(selected.f[["EGFR"]])
pdgf<-c(selected.f[["PDGFRA"]])
pi3k<-c(selected.f[["PI3K"]])
pten<-c(selected.f[["PTEN"]])
nf1<-c(selected.f[["NF1"]])
mdm2<-c(selected.f[["MDM2"]])
p53<-c(selected.f[["P53"]])
cdk4<-c(selected.f[["CDK4"]])
cdkn<-c(selected.f[["CDKN2AB"]])
rb<-c(selected.f[["Rb1"]])
ccnd<-c(selected.f[["CCND2"]])
IDH<-selected.f[["IDHm"]]
length(egfr)
length(pdgf)
length(pi3k)
length(pten)
length(nf1)
length(mdm2)
length(p53)
length(cdk4)
length(cdkn)
length(rb)
length(ccnd)
length(IDH)
### data of the selected features
#### center and scale?
#x.data.rtk<-scale(xall[, RTK.related])
x.data.egfr<-xall[,egfr]
x.data.pdgf<-xall[,pdgf]
x.data.pi3k<-xall[,pi3k]
x.data.pten<-xall[,pten]
x.data.nf1<-xall[,nf1]
x.data.mdm2<-xall[,mdm2]
x.data.p53<-xall[,p53]
x.data.cdk4<--xall[,cdk4]
x.data.cdkn<-xall[,cdkn]
x.data.rb<-xall[,rb]
x.data.ccnd<-xall[,ccnd]
x.data.idh<-xall[,IDH]
setwd("C:/Radiogenomics/result_t_test")
write.table(x.data.egfr, file="egfr_selected.csv",sep=",",row.names=F, col.names=TRUE)
write.table(x.data.pdgf, file="pdfg_selected.csv",sep=",",row.names=F, col.names=TRUE)
write.table(x.data.pi3k, file="pi3k_selected.csv",sep=",",row.names=F, col.names=TRUE)
write.table(x.data.pten, file="pten_selected.csv",sep=",",row.names=F, col.names=TRUE)
write.table(x.data.nf1, file="nf1_selected.csv",sep=",",row.names=F, col.names=TRUE)
write.table(x.data.mdm2, file="mdm2_selected.csv",sep=",",row.names=F, col.names=TRUE)
write.table(x.data.p53, file="p53_selected.csv",sep=",",row.names=F, col.names=TRUE)
write.table(x.data.cdk4, file="cdk4_selected.csv",sep=",",row.names=F, col.names=TRUE)
write.table(x.data.cdkn, file="cdkn_selected.csv",sep=",",row.names=F, col.names=TRUE)
write.table(x.data.rb, file="rb_selected.csv",sep=",",row.names=F, col.names=TRUE)
write.table(x.data.ccnd, file="ccnd_selected.csv",sep=",",row.names=F, col.names=TRUE)
write.table(x.data.idh,file="idh_selected.csv",sep=",",row.names=F, col.names=TRUE)
####### The second step is LASSO based feature selection for each gene
# change xdat at each time by removing "#"
xdat<-read.csv("egfr_selected.csv")
#xdat<-read.csv("pdfg_selected.csv")
#xdat<-read.csv("pi3k_selected.csv")
#xdat<-read.csv("pten_selected.csv")
#xdat<-read.csv("nf1_selected.csv")
#xdat<-read.csv("mdm2_selected.csv")
#xdat<-read.csv("p53_selected.csv")
#xdat<-read.csv("cdk4_selected.csv")
#xdat<-read.csv("cdkn_selected_n.csv")
#xdat<-read.csv("rb_selected.csv")
#xdat<-read.csv("idh_selected.csv")
dim(xdat)
#### Read the genomic data in
g.data<-read.csv("genomic_train_and_test_order_120.csv",header=T)
summary(g.data)
y<-g.data$EGFR
#y<-g.data$PDGFRA
#y<-g.data$PI3K
#y<-g.data$PTEN
#y<-g.data$NF1
#y<-g.data$MDM2
#y<-g.data$P53
#y<-g.data$CDK4
#y<-g.data$CDKN2AB
#y<-g.data$Rb1
#y<-g.data$IDHm
y<-as.factor(y)
require(dplyr)
## Additional analysis) xdat was further separated as anatomic, diffusion, and perfusion imaging during the revision
#x.ana<-xdat %>% select(starts_with("t1."), starts_with("flair."))
#x.ana<-xdat %>% select(starts_with("adc."))
#x.ana<-xdat %>% select(starts_with("cbv."))
x.train <-data.matrix(xdat[1:85,]) # change xdat as x.ana for additional analysis
x.test <- data.matrix(xdat[86:120,])
yy.train <-data.matrix(y[1:85])
yy.test <-data.matrix(y[86:120])
#LASSO based selection and diagnostic performance using 3-fold cross validation
set.seed(600)
require(glmnet)
require(pROC)
glmnet.obj <- cv.glmnet(x.train,yy.train, family=c("binomial"),nfolds=3)
train.p<-predict(glmnet.obj,newx=x.train,s="lambda.min",type="response")
auc.tr<-roc(yy.train~as.vector(train.p))
test.p<-predict(glmnet.obj,
newx=x.test, s="lambda.min", type="response")
auc.tst<-roc(yy.test~as.vector(test.p))
auc.tr
ci(auc.tr)
coords(auc.tr, "best", ret=c("threshold", "sens", "spec", "ppv", "npv", "accuracy"))
auc.tst
ci(auc.tst)
coords(auc.tst, "best", ret=c("threshold", "sens", "spec", "ppv", "npv", "accuracy"))
coef.mat<-matrix(NA, 1+ncol(x.ana), 1)
coef.mat[1:nrow(coef.mat),1]<-as.vector(coef(glmnet.obj, s="lambda.min"))
rownames(coef.mat)<-c(("intercept"),colnames(x.ana))
selected.mat<-coef.mat[(coef.mat!=0)]
names(selected.mat)<-rownames(coef.mat)[coef.mat!=0]
xx <-x.ana[,names(selected.mat)[-1]]
# coef. name needs
setwd("C:/Radiogenomics/lasso_results")
write.table(xx,file="coeff_egfr.csv",sep=",")
#write.table(xx,file="coeff_pdfg.csv",sep=",")
#write.table(xx,file="coeff_pi3k.csv",sep=",")
#write.table(xx,file="coeff_pten.csv",sep=",")
#write.table(xx,file="coeff_nf1.csv",sep=",")
#write.table(xx,file="coeff_mdm2.csv",sep=",")
#write.table(xx ,file="coeff_p53.csv",sep=",")
#write.table(xx,file="coeff_cdk4.csv",sep=",")
#write.table(xx,file="coeff_cdkn.csv",sep=",")
#write.table(xx,file="coeff_rb.csv",sep=",")
#write.table(xx,file="coeff_ccnd.csv",sep=",")
#write.table(xx,file="coeff_idh.csv",sep=",")
#### Third Step: Random Forest Classifier for Feature Selection
##Before this step , you need to make a combined feature file for each pathway
rm(list=ls())
setwd("C:/Radiogenomics/lasso_results")
egfr<- read.csv("coeff_egfr.csv",header=T)
pdgf<- read.csv("coeff_pdfg.csv",header=T)
pi3k<- read.csv("coeff_pi3k.csv",header=T)
pten<- read.csv("coeff_pten.csv",header=T)
mdm2<- read.csv("coeff_mdm2.csv",header=T)
p53<- read.csv("coeff_p53.csv",header=T)
cdk4<- read.csv("coeff_cdk4.csv",header=T)
cdkn<- read.csv("coeff_cdkn.csv",header=T)
rb<- read.csv("coeff_rb.csv",header=T)
ccnd<- read.csv("coeff_ccnd.csv",header=T)
path_rtk <-cbind (egfr,pdfg,pi3k,pten)
path_p53 <-cbind (mdm2,p53)
path_rb <-cbind (cdk4,cdkn,rb,ccnd)
# check and remove the overlapped features in the path_rtk, path_p53, and path_rb then save them
setwd("C:/Radiogenomics/pathway")
write.table(path_rtk,file="coeff_path_RTK.csv",sep=",")
write.table(path_p53,file="coeff_path_P53.csv",sep=",")
write.table(path_rb,file="coeff_path_Rb.csv",sep=",")
##
rm(list=ls())
setwd("C:/Radiogenomics/pathway")
# change the xdat at each time
xdat <- read.csv("coeff_path_RTK.csv",header=T)
#xdat <- read.csv("coeff_path_P53.csv",header=T)
#xdat <- read.csv("coeff_path_Rb.csv",header=T)
summary(xdat)
setwd("C:/Radiogenomics/")
g.data<-read.csv("genomic_train_and_test_order_120.csv",header=T)
summary(g.data)
y<-g.data$pathway_RTK
#y<-g.data$pathway_p53
#y<-g.data$pathway_Rb
#y<-g.data$IDHm
xtr <-data.matrix(xdat[1:85,])
xtst <- data.matrix(xdat[86:120,])
ytr<-data.matrix(y[1:85])
ytst<-as.factor(y[86:120])
#########random Forest
require(randomForest)
set.seed(100)
#train data
train.data <-data.frame(ytr,xtr)
train.data$ytr<-as.factor(train.data$ytr)
#test data
test.data <-data.frame(ytst,xtst)
test.data$ytst<-as.factor(test.data$ytst)
fitrf <-randomForest(ytr ~.,train.data, importance=TRUE, ntree =100)
plot(fitrf)
print(fitrf)
summary(fitrf)
round(importance(fitrf),2)
varImpPlot(fitrf)
importance(fitrf)
measure<-data.frame(importance(fitrf))
measure$Vars<-row.names(measure)
arrange(measure,desc(measure$MeanDecreaseGini))
arrange(measure,desc(measure$MeanDecreaseAccuracy))
#Diagnostic performance
# RTK
fit<-glm(ytr~t1.xx+t1.xx+flair.xx+adc.xx+cbv.xx, family = binomial(link = "logit"),data=train.data) # this is an example. Write down the selected top 5 features from the Random Forest
fit.tst<-glm(ytst~t1.xx+t1.xx+flair.xx+adc.xx+cbv.xx, family = binomial(link = "logit"),data=test.data) # this is an example.
#P53
fit<-glm(ytr~t1.930+t1.412+t1.439+t1.540+t1.951, family = binomial(link = "logit"),data=train.data) # this is an example.
fit.tst<-glm(ytst~t1.930+t1.412+t1.439+t1.540+t1.951, family = binomial(link = "logit"),data=test.data) # this is an example.
#Rb
fit<-glm(ytr~t1.743+t1.1443+t1.307+t1.1427+flair.910, family = binomial(link = "logit"),data=train.data)# this is an example.
fit.tst<-glm(ytst~t1.743+t1.1443+t1.307+t1.1427+flair.910, family = binomial(link = "logit"),data=test.data)# this is an example.
#Diagnostic performance
pred=predict(fit)
roc1=roc (ytr ~ pred)
plot (roc1)
roc1
ci(roc1)
coords(roc1, "best", ret=c("threshold", "sens", "spec", "ppv", "npv", "accuracy"))
cutoff.value<-coords(roc1, "best", ret=c("threshold", "sens", "spec", "ppv", "npv", "accuracy"))[["threshold"]]
predicted.outcome<-as.factor(ifelse(pred>=cutoff.value, 1, 0))
confusionMatrix(table(predicted.outcome, ytr))
pred.tst=predict(fit.tst)
roc2=roc (ytst ~ pred.tst)
plot (roc2)
roc2
ci(roc2)
coords(roc2, "best", ret=c("threshold", "sens", "spec", "ppv", "npv", "accuracy"))
cutoff.value.tst<-coords(roc2, "best", ret=c("threshold", "sens", "spec", "ppv", "npv", "accuracy"))[["threshold"]]
predicted.outcome.tst<-as.factor(ifelse(pred>=cutoff.value.tst, 1, 0))
confusionMatrix(table(predicted.outcome.tst, ytst))
|
28822dfb35f859d61052a5a30416f4c0389c2142
|
d82a996f50f6b553f645af24a6dd1600b19084cf
|
/MicroPEM_Data_Analysis/MicroPEM_data_cleaning.r
|
e32ef9ff7414133ccccc6727a9bf66caa42333b7
|
[] |
no_license
|
ashlinn/GRAPHS_exposure_data
|
99f3035d2746b318f42113b3759a33543e83d91a
|
9f5923734d00f5a63f66fbc30d318537bd235da9
|
refs/heads/master
| 2021-01-21T11:18:34.561236
| 2018-04-06T20:03:51
| 2018-04-06T20:03:51
| 91,735,083
| 0
| 0
| null | 2017-05-18T20:30:11
| 2017-05-18T20:30:11
| null |
UTF-8
|
R
| false
| false
| 55,601
|
r
|
MicroPEM_data_cleaning.r
|
################################# FIND THE DATA FILES ############################################
# define a file directory
filedirectory <- "/Volumes/My Passport for Mac/WD passport/Columbia-Ghana Project/MicroPem Raw Data/Nephelometer_processed_correct"
Datafiles = list.files(filedirectory,full.names = TRUE) # grab all MicroPEM files in the directory; includes both data as downloaded in Ghana and some RTI corrected files (RH issue)
# find files with no nephelometer reading
excludelist <- c("KHC0234") # bad file, contains no data.
excludefiles <- Datafiles[unlist(sapply(excludelist, function(x) grep(x, Datafiles)))]
Datafiles <- Datafiles[!(Datafiles %in% excludefiles)] # removes empty files with no nephelometer reading
MicroPEMfiles <- as.data.frame(Datafiles) # create a dataframe of datafiles
######################## DEFINE A FUNCTION TO READ AND STORE MICROPEM RAW FILE####################
# for each microPEM file, creates 2 matrices: (i) setting parameters; (ii) time varying measurements
require(dplyr)
require(akima)
require(lubridate)
require(readr)
require(namespace)
convertOutput <- function(path) {
###########################################
# READ THE DATA
###########################################
dataPEM <- read.csv(path, skip = 28, header = FALSE, fill = TRUE)
dataPEM <- dataPEM[dataPEM[, 1] != "Errored Line", ]
dataPEM[, 1] <- as.character(dataPEM[, 1])
dataPEM[, 2] <- as.character(dataPEM[, 2])
dataPEM <- dplyr::tbl_df(dataPEM)
dataPEM <- dataPEM[,1:14] #get rid of empty column
# isolate names and erase spaces and hyphens
namesPEM <- read.csv(path, skip = 24, header = FALSE, nrow = 1)
namesPEM <- unlist(lapply(as.list(namesPEM), toString))
namesPEM <- gsub(" ", "", namesPEM)
namesPEM <- sub("-", "", namesPEM)
namesPEM <- namesPEM[1:14] #get rid of extra column names
names(dataPEM) <- namesPEM
# convert month names if they are abbreviated
dataPEM$Date <- tolower(dataPEM$Date)
dataPEM$Date <- gsub("jan", "01", dataPEM$Date)
dataPEM$Date <- gsub("feb", "02", dataPEM$Date)
dataPEM$Date <- gsub("mar", "03", dataPEM$Date)
dataPEM$Date <- gsub("apr", "04", dataPEM$Date)
dataPEM$Date <- gsub("may", "05", dataPEM$Date)
dataPEM$Date <- gsub("jun", "06", dataPEM$Date)
dataPEM$Date <- gsub("jul", "07", dataPEM$Date)
dataPEM$Date <- gsub("aug", "08", dataPEM$Date)
dataPEM$Date <- gsub("sep", "09", dataPEM$Date)
dataPEM$Date <- gsub("oct", "10", dataPEM$Date)
dataPEM$Date <- gsub("nov", "11", dataPEM$Date)
dataPEM$Date <- gsub("dec", "12", dataPEM$Date)
dataPEM <- dataPEM[dataPEM$Date!="",] #get rid of empty rows
# get original date time
originalDateTime <- paste(dataPEM$Date, dataPEM$Time, sep = " ")
# Warning: Time does not have time zone
# create a variable with date and time together
timeDate <- dmy_hms(originalDateTime, tz="GMT")
timeDate[grep("/",originalDateTime)] = mdy_hms(originalDateTime, tz="GMT")
nephelometer <- as.numeric(dataPEM$RHCorrectedNephelometer)
temperature <- as.numeric(dataPEM$Temp)
relativeHumidity <- as.numeric(dataPEM$RH)
battery <- as.numeric(dataPEM$Battery)
inletPressure <- as.numeric(dataPEM$InletPress)
orificePressure <- as.numeric(dataPEM$FlowOrificePress)
if(length(orificePressure)==0) orificePressure <- as.numeric(dataPEM$OrificePress)
flow <- as.numeric(dataPEM$Flow)
xAxis <- as.numeric(dataPEM$Xaxis)
yAxis <- as.numeric(dataPEM$Yaxis)
zAxis <- as.numeric(dataPEM$Zaxis)
vectorSum <- as.numeric(dataPEM$VectorSumComposite)
names(dataPEM)[14] <- "shutDownReason"
shutDownReason <- as.character(dataPEM$shutDownReason)
wearingCompliance <- rep(NA, length(flow))
validityWearingComplianceValidation <- rep(0, length(flow))
###########################################
# READ THE TOP OF THE FILE
###########################################
participantID <- read.csv(path, skip = 7, header = FALSE, nrow = 1)[1, 2]
if (is.na(participantID)) {
participantID <- path
}
downloadDate <- read.csv(path, skip = 1,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 2]
totalDownloadTime <- read.csv(path, skip = 2,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 2]
deviceSerial <- read.csv(path, skip = 4,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 2]
dateTimeHardware <- read.csv(path,skip = 5,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 2]
dateTimeSoftware <- read.csv(path, skip = 6,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 2]
version <- read.csv(path, skip = 6, header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 3]
filterID <- as.character(read.csv(path, skip = 8,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 2])
participantWeight <- read.csv(path, skip = 9,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 2]
inletAerosolSize <- read.csv(path, skip = 10,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 2]
laserCyclingVariablesDelay <- read.csv(path, skip = 11,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 2]
laserCyclingVariablesSamplingTime <- read.csv(path, skip = 11,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 3]
laserCyclingVariablesOffTime <- read.csv(path, skip = 11,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 4]
SystemTimes <- paste0(read.csv(path, skip = 12,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 2],
read.csv(path, skip = 12,
header = FALSE,
nrow = 1, stringsAsFactors=FALSE)[1, 3])
tempTable <- as.data.frame(read.csv(path, skip = 14,
header = FALSE, nrow = 10))
if(ncol(tempTable)==6){
tempTable[6,7] = tempTable[7,1]
tempTable = tempTable[-7,]
}
tempTable <- cbind(tempTable[, 2:ncol(tempTable)],
rep(NA, nrow(tempTable)))
nephelometerSlope <- tempTable[1, 1]
nephelometerOffset <- tempTable[1, 2]
nephelometerLogInterval <- tempTable[1, 3]
temperatureSlope <- tempTable[2, 1]
temperatureOffset <- tempTable[2, 2]
temperatureLog <- tempTable[2, 3]
humiditySlope <- tempTable[3, 1]
humidityOffset <- tempTable[3, 2]
humidityLog <- tempTable[3, 3]
inletPressureSlope <- tempTable[4, 1]
inletPressureOffset <- tempTable[4, 2]
inletPressureLog <- tempTable[4, 3]
inletPressureHighTarget <- tempTable[4, 4]
inletPressureLowTarget <- tempTable[4, 5]
orificePressureSlope <- tempTable[5, 1]
orificePressureOffset <- tempTable[5, 2]
orificePressureLog <- tempTable[5, 3]
orificePressureHighTarget <- tempTable[5, 4]
orificePressureLowTarget <- tempTable[5, 5]
flowLog <- tempTable[6, 3]
flowHighTarget <- tempTable[6, 4]
flowLowTarget <- tempTable[6, 5]
flowRate <- tempTable[6, 6]
accelerometerLog <- tempTable[7, 3]
batteryLog <- tempTable[8, 3]
ventilationSlope <- tempTable[9, 1]
ventilationOffset <- tempTable[9, 2]
###########################################
# control table
###########################################
control <- data.frame(downloadDate = downloadDate,
totalDownloadTime = totalDownloadTime,
deviceSerial = deviceSerial,
dateTimeHardware = dateTimeHardware,
dateTimeSoftware = dateTimeSoftware,
version = version,
participantID = participantID,
filterID = filterID,
participantWeight = participantWeight,
inletAerosolSize = inletAerosolSize,
laserCyclingVariablesDelay =
laserCyclingVariablesDelay,
laserCyclingVariablesSamplingTime =
laserCyclingVariablesSamplingTime,
laserCyclingVariablesOffTime =
laserCyclingVariablesOffTime,
SystemTimes = SystemTimes,
nephelometerSlope = nephelometerSlope,
nephelometerOffset = nephelometerOffset,
nephelometerLogInterval =
nephelometerLogInterval,
temperatureSlope = temperatureSlope,
temperatureOffset = temperatureOffset,
temperatureLog = temperatureLog,
humiditySlope = humiditySlope,
humidityOffset = humidityOffset,
humidityLog = humidityLog,
inletPressureSlope =
inletPressureSlope,
inletPressureOffset =
inletPressureOffset,
inletPressureLog = inletPressureLog,
inletPressureHighTarget =
inletPressureHighTarget,
inletPressureLowTarget =
inletPressureLowTarget,
orificePressureSlope =
orificePressureSlope,
orificePressureOffset =
orificePressureOffset,
orificePressureLog =
orificePressureLog,
orificePressureHighTarget =
orificePressureHighTarget,
orificePressureLowTarget =
orificePressureLowTarget,
flowLog = flowLog,
flowHighTarget = flowHighTarget,
flowLowTarget = flowLowTarget,
flowRate = flowRate,
accelerometerLog = accelerometerLog,
batteryLog = batteryLog,
ventilationSlope = ventilationSlope,
ventilationOffset = ventilationOffset)
control <- dplyr::tbl_df(control)
###########################################
# CREATE THE OBJECT
###########################################
measures <- data.frame(timeDate = timeDate,
nephelometer = nephelometer,
temperature = temperature,
relativeHumidity = relativeHumidity,
battery = battery,
orificePressure = orificePressure,
inletPressure = inletPressure,
flow = flow,
xAxis = xAxis,
yAxis = yAxis,
zAxis = zAxis,
vectorSum = vectorSum,
shutDownReason = shutDownReason,
wearingCompliance = wearingCompliance,
validityWearingComplianceValidation =
validityWearingComplianceValidation,
originalDateTime = originalDateTime)
measures <- dplyr::tbl_df(measures)
measures <- measures %>%
mutate_(shutDownReason = quote(as.character(shutDownReason))) %>%
mutate_(originalDateTime = quote(as.character(originalDateTime)))
#microPEMObject <- MicroPEM$new(control = control,
# calibration = list(NA),
# measures = measures,
# original = TRUE)
microPEMObject <-list(control = control,
calibration = list(NA),
measures = measures,
original = TRUE)
return(microPEMObject)
}
###################################CHECK MICROPEM SETTINGS################################
QualityControl = NULL # create an empty data frame to store MicroPEM setting and basic summary information
# each row in QualityControl will represent one microPEM session
for(k in 1:nrow(MicroPEMfiles)){
Data1 = convertOutput(as.character(MicroPEMfiles$Datafiles[k])) # use the defined function convertOutput to readin MicroPEM raw file
Data2 = Data1$measures # extract all time-varying variables
Data3 = Data2[!is.na(Data2$nephelometer),] # remove rows without nephelometer reading
Data4 = as.data.frame(Data1$control, stringsAsFactors=FALSE) # extract microPEM setting of the sample
# add start time, endt ime, mininum time, maximum time, mean, minimum and maximum neph reading into Data 4
Data4$starttime = Data3$timeDate[1]
Data4$endtime = Data3$timeDate[nrow(Data3)]
Data4$mintime = sort(Data3$timeDate, decreasing =F, na.rm=T)[1]
Data4$maxtime = sort(Data3$timeDate, decreasing =T, na.rm=T)[1]
Data4$mean = mean(Data3$nephelometer)
Data4$min = min(Data3$nephelometer)
Data4$max = max(Data3$nephelometer)
# add warning meassages into Data 4
Data5 = Data2[Data2$timeDate>=Data4$starttime & Data2$timeDate<=(Data4$endtime-200),]
Data4$startbutton = ifelse(nrow(Data5)==0, NA, sum(sapply(Data5$shutDownReason, match,"Start button", nomatch=0)))
Data4$button1 = ifelse(nrow(Data5)==0, NA, sum(sapply(Data5$shutDownReason, match,"Button 1 pressed", nomatch=0)))
Data4$button2 = ifelse(nrow(Data5)==0, NA, sum(sapply(Data5$shutDownReason, match,"Button 2 pressed", nomatch=0)))
Data4$lowbattery = sum(sapply(Data2$shutDownReason, match,"Low Battery Stop", nomatch=0))
Data4$deadbattery = sum(sapply(Data2$shutDownReason, match,"Battery dead", nomatch=0))
# find the number of time jump back occurance and add into Data 4
Data3$timeDate1 = as.POSIXlt(c(Data3$timeDate[-1], (Data3$timeDate[length(Data3$timeDate)]+1)), tz="GMT")
Data3$timediff = Data3$timeDate1 - Data3$timeDate
Data4$timeerror = length(which(Data3$timediff<0))
# add Data 4 into the data frame of MicroPEM setting and basic summary information
QualityControl = rbind(QualityControl, Data4)
# print loop progress
if(round(k/50)*50==k)
print(k)
}
#######correct Filterid typo
QualityControl$filterID = as.character(QualityControl$filterID) #
QualityControl$filterID[QualityControl$filterID== "KH00123"] = "KHC0123"
QualityControl$filterID[QualityControl$filterID== "KHC3392"] = "KHC0392"
QualityControl$filterID[QualityControl$filterID== "LHC0232"] = "KHC0232"
unique(sort(QualityControl$filterID)) #check unique filterID
#######correct MicroPEMid typo
QualityControl$deviceSerial = as.character(QualityControl$deviceSerial)
QualityControl$deviceSerial[QualityControl$deviceSerial=="UGF32/2012"] = "UGF320415N"
QualityControl$deviceSerial[QualityControl$deviceSerial=="UGF320"]="UGF320429N"
QualityControl$deviceSerial[QualityControl$deviceSerial=="UGF320429"]="UGF320429N"
QualityControl$deviceSerial[QualityControl$deviceSerial=="UFF320401N"]="UGF320401N"
QualityControl$deviceSerial[QualityControl$deviceSerial=="UGF220414N"]="UGF320414N"
QualityControl$deviceSerial[QualityControl$deviceSerial=="UGF220486N"]="UGF320486N"
QualityControl$deviceSerial[QualityControl$deviceSerial=="UGF300422N"]="UGF320422N"
QualityControl$deviceSerial[QualityControl$deviceSerial=="UGF300444N"]="UGF320444N"
QualityControl$deviceSerial[QualityControl$deviceSerial=="UGF300599N"]="UGF320599N"
QualityControl$deviceSerial[QualityControl$deviceSerial=="UG3320463N"]="UGF320463N"
unique(sort(QualityControl$deviceSerial))
########make sure the class of filepath is character##################
QualityControl$participantID = as.character(QualityControl$participantID)
######### drop duplicated files which have wrong MicroPEM settings (those files have been reprocessed via MicroPEM Docking Station)
QualityControl = QualityControl[as.numeric(QualityControl$nephelometerSlope)>=3,] # correct nephelometerSlope is 3
QualityControl = QualityControl[as.numeric(QualityControl$humiditySlope)<=1 & as.numeric(QualityControl$humiditySlope)>0,] # correct humiditySlope is 1
QualityControl = QualityControl[as.numeric(QualityControl$humidityOffset)>-8 & as.numeric(QualityControl$humidityOffset)<10,] # correct humidityOffset is between -5 and 5
##################################CORRECT WRONG DATETIME##########################
# correcting errors that Zheng identified manually (by inspection)
QualityControl$starttime_new = QualityControl$starttime
QualityControl$endtime_new = QualityControl$endtime
QualityControl$endtime_new[QualityControl$filterID=="KHC0221"] = mdy_hms("12/6/2013 9:24:00", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC0245"] = mdy_hms("12/12/2013 22:35:30", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC0268"] = mdy_hms("12/20/2013 8:35:30", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC0514"] = mdy_hms("5/22/2014 14:57:30", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC0679"] = mdy_hms("6/25/2014 23:15:11", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC0867"] = mdy_hms("9/18/2014 16:32:20", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC0936"] = mdy_hms("10/9/2014 15:20:00", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1042"] = mdy_hms("12/18/2014 13:24:30", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1222"] = mdy_hms("2/15/2015 7:52:30", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1274"] = mdy_hms("2/21/2015 20:27:30", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1499"] = mdy_hms("4/21/2015 16:20:15", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1976"] = mdy_hms("1/26/2016 18:01:30", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHCD111B"] = mdy_hms("6/16/2015 8:04:30", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHCD68C"] = mdy_hms("11/8/2014 9:20:30", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHCD86A"] = mdy_hms("11/11/2014 11:22:30", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="GN058"] = mdy_hms("4/1/2014 11:10:10", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="GN084"] = mdy_hms("4/11/2014 10:31:10", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="GN106"] = mdy_hms("4/15/2014 11:14:10", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC0095"] = mdy_hms("10/30/2013 5:52:19", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC0181"] = mdy_hms("11/21/2013 8:59:15", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC0271"] = mdy_hms("2/16/2013 16:15:05", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC0365"] = mdy_hms("2/21/2014 11:49:40", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC0566"] = mdy_hms("5/29/2014 12:02:40", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC0575"] = mdy_hms("6/5/2014 6:07:20", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC0661"] = mdy_hms("6/19/2014 14:51:15", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1018"] = mdy_hms("12/14/2014 9:13:20", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1021"] = mdy_hms("12/14/2014 6:51:20", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1054"] = mdy_hms("12/20/2014 8:46:20", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1080"] = mdy_hms("12/25/2014 8:06:15", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1092"] = mdy_hms("12/31/2014 13:46:27", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC1141"] = mdy_hms("1/30/2015 7:56:00", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1335"] = mdy_hms("3/6/2015 10:36:09", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC1821"] = mdy_hms("9/28/2015 11:40:50", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC1938"] = mdy_hms("12/31/2015 11:00:00", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHCD04A"] = mdy_hms("8/3/2014 10:34:15", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHCD11C"] = mdy_hms("8/22/2014 6:30:19", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHCD77B"] = mdy_hms("11/14/2014 12:28:10", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHCD78B"] = mdy_hms("11/14/2014 11:30:10", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHCD93C"] = mdy_hms("12/18/2014 16:37:20", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC0915"] = mdy_hms("10/3/2014 6:52:30", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC1144"] = mdy_hms("1/31/2015 9:04:40", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC1542"] = mdy_hms("5/4/2015 10:07:50", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC1555"] = mdy_hms("5/5/2015 8:57:50", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC1674"] = mdy_hms("7/23/2015 8:44:20", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1794"] = mdy_hms("9/17/2015 6:00:00", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC0123"] = mdy_hms("11/11/2013 7:01:30", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC0266"] = mdy_hms("12/20/13 8:13:30", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC0356"] = mdy_hms("2/19/2014 10:10:25", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC0429"] = mdy_hms("4/23/2014 10:45:05", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC0526"] = mdy_hms("5/21/2014 9:43:35", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1273"] = mdy_hms("2/22/15 10:06:30", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC1540"] = mdy_hms("6/24/2015 7:57:10", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC1566"] = mdy_hms("5/8/2015 7:38:09", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHC1964"] = mdy_hms("1/22/2016 14:23:30", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHC2022"] = mdy_hms("2/24/2016 14:22:47", tz="GMT")
QualityControl$endtime_new[QualityControl$filterID=="KHCD07A"] = mdy_hms("8/3/14 10:05:30", tz="GMT")
QualityControl$starttime_new[QualityControl$filterID=="KHCD44A"] = mdy_hms("10/16/2014 10:30:00", tz="GMT")
#######################################READ IN MICROPEM LOG DATA##################################
require(readstata13)
require(stringr)
require(lubridate)
# make sure the filepath is correct
MicroPEM = read.dta13("/Volumes/My Passport for Mac/WD passport/Columbia-Ghana Project/Data/Survey_Data/MicroPem.dta")
#add leading 0 to time variables if hour is a single digit (e.g. 825 to 0825)
MicroPEM$labsetdtt = str_pad(MicroPEM$labsetdtt, 4, pad = "0")
MicroPEM$fieldsetdt = str_pad(MicroPEM$fieldsetdt, 4, pad = "0")
MicroPEM$thepaon1 = str_pad(MicroPEM$thepaon1, 4, pad = "0")
MicroPEM$pickupdtt = str_pad(MicroPEM$pickupdtt, 4, pad = "0")
MicroPEM$thepaon2 = str_pad(MicroPEM$thepaon2, 4, pad = "0")
MicroPEM$thepaoff2 = str_pad(MicroPEM$thepaoff2, 4, pad = "0")
MicroPEM$tupemoff = str_pad(MicroPEM$tupemoff, 4, pad = "0")
#assign NA to HEPA end times if Micorpem was not running when retrieving
MicroPEM$thepaon2[MicroPEM$thepaon2=="0000"|MicroPEM$thepaon2=="9999"]=NA
MicroPEM$thepaoff2[MicroPEM$thepaoff2=="0000"|MicroPEM$thepaoff2=="9999"]=NA
#HEPA start Datetime in logsheet
MicroPEM$HEPA1St = paste(MicroPEM$datevisit, MicroPEM$thepaon1)
MicroPEM$HEPA1St = dmy_hm(as.character(MicroPEM$HEPA1St), tz="GMT")
range(MicroPEM$HEPA1St)
#HEPA end Datetime in logsheet
MicroPEM$HEPA2St = paste(MicroPEM$pickupdtd, MicroPEM$thepaon2)
MicroPEM$HEPA2St = dmy_hm(as.character(MicroPEM$HEPA2St), tz="GMT")
which(is.na(MicroPEM$HEPA2St))
range(MicroPEM$HEPA2St, na.rm=T)
MicroPEM$HEPA2End = paste(MicroPEM$pickupdtd, MicroPEM$thepaoff2)
MicroPEM$HEPA2End = dmy_hm(as.character(MicroPEM$HEPA2End), tz="GMT")
which(is.na(MicroPEM$HEPA2End))
range(MicroPEM$HEPA2End, na.rm=T)
#correct a filterid typo
MicroPEM$filterid[which(is.na(MicroPEM$mstudyid))]
which(MicroPEM$filterid=="KHC031B")
MicroPEM$filterid[MicroPEM$filterid=="KHC031B"] = "KHCD31B"
##################################IDENTIFY HEPA CHANGEPOINT###################################
# generate plots of nephalometer time series for visual inspection (plots first 100 and last 100 observations)
# note use of package "changepoint"
require(changepoint)
QualityControl1 = QualityControl[order(QualityControl$filterID),] # sort observation by filterID
HEPAdata = NULL # creat an empty data frame to store HEPA information
# Define a directory to output figures pdf
plotdirectory <- "/Volumes/My Passport for Mac/WD passport/Columbia-Ghana Project/MicroPEM_Data/"
pdf(file = paste0(plotdirectory, "HEPAplot",".pdf"), height = 8, width = 8)
par(mfrow = c(2,2))
par(mar=c(3,3,3,1))
for(k in 1:nrow(QualityControl1)){
Data1 = convertOutput(QualityControl1$participantID[k])
Data2 = Data1$measures
Data3 = Data2[!is.na(Data2$nephelometer),]
Data4 = QualityControl1[k,]
Data5 = Data3[Data3$timeDate>=(Data4$starttime_new)&Data3$timeDate<=(Data4$endtime_new),] # extract readings between starttime and endtime
Data5 = Data5[Data5$relativeHumidity>0 & Data5$relativeHumidity<100,] # drop measurements with RH <=0 or RH>=100
# find the starttime in the logsheet
Data4$startlog = ifelse(length(MicroPEM$thepaon1[MicroPEM$filterid==as.character(Data4$filterID)])==0,
NA, MicroPEM$thepaon1[MicroPEM$filterid==as.character(Data4$filterID)])
# find the endtime in the logsheet
Data4$endlog = ifelse(length(MicroPEM$tupemoff[MicroPEM$filterid==as.character(Data4$filterID)])==0,
NA, MicroPEM$tupemoff[MicroPEM_Compound$filterid==as.character(Data4$filterID)])
Data4$nephelometer_avg = mean(Data5$nephelometer) # add mean nephelometer reading
# find the point when nephelometer reading changed significantly at the start
Data6 = Data5[7:100,] # exclude the first 6 readings (about 1 minute)
HEPASt = cpt.meanvar(Data6$nephelometer,method="BinSeg", Q=3, minseglen=8)
Data4$HEPAstnumber = HEPASt@cpts[1] # identify the place where changepoint is
if(Data4$HEPAstnumber==94){ # if there is no changepoint in the first 100 readings then no start HEPA
Data4$HEPAsttime1 = NA
Data4$HEPAsttime2 = NA
Data4$HEPAstvalue1 = NA
Data4$HEPAstvalue2 = NA
} else {
Data4$HEPAsttime1 = Data5$timeDate[7] # the starttime of start HEPA
Data4$HEPAsttime2 = Data5$timeDate[Data4$HEPAstnumber+6] # the endtime of start HEPA
# the mean nephelometer reading in start HEPA, excluding max and min readings
Data4$HEPAstvalue1 = mean(Data6$nephelometer[1:Data4$HEPAstnumber], trim=1/Data4$HEPAstnumber)
# the mean nephelometer reading after start HEPA period (within the first 100 readings)
Data4$HEPAstvalue2 = mean(Data6$nephelometer[(Data4$HEPAstnumber+1):94])
}
# plot the first 100 readings and show the changepoint
title <- paste(Data4$filterID, Data4$deviceSerial, format(Data4$starttime_new, format = "%b %d %Y"), "StHEPA=", Data4$HEPAstnumber,
"\n", Data4$starttime_new, "startlog=", Data4$startlog)
plot(HEPASt,cpt.width=3)
title(main = title, cex.main = 0.7, col.main = "black")
# find the point when nephelometer reading changed significantly at the end
Data7 = Data5[(nrow(Data5)-3):(nrow(Data5)-99),] # exclude the last 3 readings (about 1 minute)
HEPAEnd = cpt.meanvar(Data7$nephelometer,method="BinSeg", Q=3, minseglen=8)
Data4$HEPAendnumber = HEPAEnd@cpts[1] # identify the place where changepoint is
if(Data4$HEPAendnumber==97){ # if there is no changepoint in the last 100 readings then no end HEPA
Data4$HEPAendtime1 = NA
Data4$HEPAendtime2 = NA
Data4$HEPAendvalue1 = NA
Data4$HEPAendvalue2 = NA
} else {
Data4$HEPAendtime1 = Data5$timeDate[nrow(Data5)-2-Data4$HEPAendnumber] # the starttime of end HEPA
Data4$HEPAendtime2 = Data5$timeDate[nrow(Data5)-3] # the endtime of end HEPA
# the mean nephelometer reading in end HEPA, excluding max and min readings
Data4$HEPAendvalue1 = mean(Data7$nephelometer[1:Data4$HEPAendnumber], trim=1/Data4$HEPAendnumber)
# the mean nephelometer reading before end HEPA period (within the last 100 readings)
Data4$HEPAendvalue2 = mean(Data7$nephelometer[(Data4$HEPAendnumber+1):97])
}
# plot the last 100 readings and show the changepoint
title <- paste(Data4$filterID, Data4$deviceSerial, format(Data4$starttime_new, format = "%b %d %Y"), "EndHEPA=", Data4$HEPAendnumber,
"\n", Data4$Endtime_new,"endlog=", Data4$endlog, "lowbat=", Data4$lowbattery, "deadbat=", Data4$deadbattery)
plot(HEPAEnd,cpt.width=3) #plot end HEPA
title(main = title, cex.main = 0.7, col.main = "black")
HEPAdata = rbind(HEPAdata, Data4) # update HEPAdata data frame
if(round(k/50)*50==k)
print(k)
}
dev.off()
##################################HEPA PERIOD IDENTIFICATION#########################
# To generate HEPAtime.rds, Zheng visually inspected the plots crated by cpt.meanvar -- if he detected errors, he manually updated HEPAdata, then renamed HEPAdata as HEPAtime.rds
# make sure the filepath is correct; note that this data file is also availble in the github directory.
HEPAtime = readRDS("/Volumes/My Passport for Mac/WD passport/Columbia-Ghana Project/MicroPEM_Data/HEPAtime.rds")
# convert character into datetime format
HEPAtime$HEPAsttime1 = mdy_hms(HEPAtime$HEPAsttime1,tz="GMT")
HEPAtime$HEPAsttime2 = mdy_hms(HEPAtime$HEPAsttime2,tz="GMT")
HEPAtime$HEPAendtime1 = mdy_hms(HEPAtime$HEPAendtime1,tz="GMT")
HEPAtime$HEPAendtime2 = mdy_hms(HEPAtime$HEPAendtime2,tz="GMT")
#################################NEPHELOMETER SAMPLE SUMMARY######################################
QualityControl2 = merge(QualityControl1, HEPAtime, by="filterID", all=T) # merge MicroPEM settings with correct HEPA datetimes
Nephelometer = NULL # create an empty data frame to store Nephelometer summary statistics
# Define a directory to output figures pdf
plotdirectory <- "/Volumes/My Passport for Mac/WD passport/Columbia-Ghana Project/MicroPEM_Data/"
pdf(file = paste0(plotdirectory, "ValidationPlot",".pdf"), height = 8, width = 8)
par(mfrow = c(3,3))
par(mar=c(2,2,3,1))
# loop over microPEM files to apply HEPA correction
for(k in 1:nrow(QualityControl2)){
Data1 = convertOutput(QualityControl2$participantID[k])
Data2 = Data1$measures
Data4 = QualityControl2[k,]
# create a variable for startdate of sampling and correct some MicroPEM system time errors
Data4$Startdate = as.Date(Data4$starttime_new)
Data4$Startdate[Data4$filterID=="KHC0271"] = as.Date("2014-01-01", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC0272"] = as.Date("2014-01-01", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC0273"] = as.Date("2014-01-01", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC0274"] = as.Date("2014-01-01", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC0277"] = as.Date("2014-01-01", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC0278"] = as.Date("2014-01-01", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC0279"] = as.Date("2014-01-01", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC0280"] = as.Date("2014-01-01", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC0281"] = as.Date("2014-01-01", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC0398"] = as.Date("2014-03-17", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC0967"] = as.Date("2014-11-14", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC1269"] = as.Date("2015-02-19", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC1867"] = as.Date("2015-10-20", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHC1881"] = as.Date("2015-10-28", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHCD103A"] = as.Date("2015-06-01", TZ="GMT")
Data4$Startdate[Data4$filterID=="KHCD103B"] = as.Date("2015-06-11", TZ="GMT")
Data3 = Data2[Data2$timeDate>=(Data4$starttime_new)&Data2$timeDate<=(Data4$endtime_new),] #rdata between start and end time of measurement
Data5 = Data3[!is.na(Data3$nephelometer),] #drop rows without nephelometer readings
Data6 = Data5[Data5$relativeHumidity>0 & Data5$relativeHumidity<100,] #drop measurement with RH <=0 or RH>=100
if(!is.na(Data4$HEPAsttime1) & !is.na(Data4$HEPAendtime1)){ #calculate HEPA correction values
Start = Data6[Data6$timeDate>=(Data4$HEPAsttime1)&Data6$timeDate<=(Data4$HEPAsttime2),] #start HEPA readings
End = Data6[Data6$timeDate>=(Data4$HEPAendtime1)&Data6$timeDate<=(Data4$HEPAendtime2),] #end HEPA readings
Data4$HEPAstnumber = nrow(Start) #number of readings in start HEPA
Data4$HEPAendnumber = nrow(End) #number of readings in end HEPA
Data4$HEPASt = mean(Start$nephelometer, trim=1/Data4$HEPAstnumber) #average reading in start HEPA excluding max and min readings
Data4$HEPAEnd = mean(End$nephelometer, trim=1/Data4$HEPAendnumber) #average reading in end HEPA excluding max and min readings
# Apply HEPA correction to all nephelometer readings
Data6$nephelometer_corr = Data6$nephelometer - seq(Data4$HEPASt, Data4$HEPAEnd, (Data4$HEPAEnd-Data4$HEPASt)/(length(Data6$nephelometer)-1))
} else if (!is.na(Data4$HEPAsttime1) & is.na(Data4$HEPAendtime1)) { #if no start HEPA, then only end HEPA is used for correction
Start = Data6[Data6$timeDate>=(Data4$HEPAsttime1)&Data6$timeDate<=(Data4$HEPAsttime2),]
End = NA
Data4$HEPAstnumber = nrow(Start)
Data4$HEPAendnumber = NA
Data4$HEPASt = mean(Start$nephelometer, trim=1/Data4$HEPAstnumber)
Data4$HEPAEnd = NA
Data6$nephelometer_corr = Data6$nephelometer - Data4$HEPASt
} else if (is.na(Data4$HEPAsttime1) & !is.na(Data4$HEPAendtime1)) { #if no end HEPA, then only start HEPA is used for correction
Start = NA
End = Data6[Data6$timeDate>=(Data4$HEPAendtime1)&Data6$timeDate<=(Data4$HEPAendtime2),]
Data4$HEPAstnumber = NA
Data4$HEPAendnumber = nrow(End)
Data4$HEPASt = NA
Data4$HEPAEnd = mean(End$nephelometer, trim=1/Data4$HEPAendnumber)
Data6$nephelometer_corr = Data6$nephelometer - Data4$HEPAEnd
} else if (is.na(Data4$HEPAsttime1) & is.na(Data4$HEPAendtime1)) { #if no start and end HEPA, then no correction
Data4$HEPAstnumber = NA
Data4$HEPAendnumber = NA
Data4$HEPASt = NA
Data4$HEPAEnd = NA
Data6$nephelometer_corr = Data6$nephelometer
}
Data6$unique_min <- floor_date(Data6$timeDate, unit = "minute") #get minute from datetime
Data4$Duration = length(unique(Data6$unique_min))/60 #duration of measurement
#average of raw and HEPA-corrected nephelometer readings
Data7 = ddply(Data6, .(unique_min), summarise,
nephelometer_min = mean(nephelometer),
nephelometer_corr_min = mean(nephelometer_corr))
Data4$nephelometer_avg = mean(Data7$nephelometer_min)
Data4$nephelometer_corr_avg = mean(Data7$nephelometer_corr_min)
Data8 = Data3[!is.na(Data3$flow),] #flow rate data
Data8$flow_cor = Data8$flow
if(Data4$Startdate < as.Date(mdy("04/03/2014"))) { #adjust the data with flow meter backward issue
Data8$flow_cor = Data8$flow*0.8211 - 0.0139 # manual correction from Steve Chillrud
}
Data8 = Data8[Data8$shutDownReason!="Flow blocked 1",] #drop rows with flow blocked note
Data8$unique_min <- floor_date(Data8$timeDate, unit = "minute")
Data9 = ddply(Data8, .(unique_min), summarise, #get minute flow rate
Flow = round(max(as.numeric(flow_cor), na.rm=TRUE), digits = 3))
Data4$vol = sum(Data9$Flow, na.rm=T)/2 #calculate total air volume of sampling
Data4$vol[levels(Data4$filterID)=="KHC0100"] = sum(Data9$Flow, na.rm=T) #correct volume for KHC0100 which is always on
Data4$flow.avg = mean(Data9$Flow, na.rm=T)
Data4$flow.sd = sd(Data9$Flow, na.rm=T)
Data4$flow.min = min(Data9$Flow, na.rm=T)
Data4$flow.max = max(Data9$Flow, na.rm=T)
# percent of time that flow rate is between 0.28 and 0.55 LPM
Data4$flow28.good = sum(ifelse(Data9$Flow<= 0.55 & Data9$Flow >= 0.28, 1, 0), na.rm=T)/nrow(Data9)
# percent of time that flow rate is between 0.30 and 0.55 LPM
Data4$flow30.good = sum(ifelse(Data9$Flow<= 0.55 & Data9$Flow >= 0.30, 1, 0), na.rm=T)/nrow(Data9)
Data4$Negative1 = length(which(Data6$nephelometer< (-10)))/nrow(Data6) #calculate percent of negative readings in raw data
Data4$Negative2 = length(which(Data6$nephelometer_corr< (-10)))/nrow(Data6) #calculate percent of negative readings in HEPA-corrected data
Nephelometer = rbind(Nephelometer, Data4) # update Nephelometer data frame
#plots of nephelometer readings which are used for visual validation
title <- paste(Data4$filterID, Data4$deviceSerial, format(Data4$Startdate, format = "%b %d %Y"), "Duration=", round(Data4$Duration),
"\n StHEPA=", round(Data4$HEPASt), "EndHEPA=",round(Data4$HEPAEnd), "mean=", round(Data4$nephelometer_avg), "Adj_m=", round(Data4$nephelometer_corr_avg),
"\n Neg1=", round(Data4$Negative1, digit=3), "Neg2=", round(Data4$Negative2, digit=3))
plot(Data6$timeDate, Data6$nephelometer, type = "l", main = "" , xlab="", ylab = "", ylim=c(-100,500), lwd = 2, col = "blue")
abline(h=0, col="grey")
if (Data4$Negative2>= 0.2) lines(Data6$timeDate, Data6$nephelometer_corr, col = alpha("red",0.6), lwd = 2)
if (Data4$Negative2< 0.2) lines(Data6$timeDate, Data6$nephelometer_corr, col = alpha("black", 0.6), lwd = 2)
if (Data4$Negative2>= 0.2) title(main = title, cex.main = 0.7, col.main = "red")
if (Data4$Negative2< 0.2) title(main = title, cex.main = 0.7, col.main = "black")
if(round(k/50)*50==k)
print(k)
}
dev.off()
##################################NEPHELOMETER VALIDATION#########################
# read in validation index of nephelometer data; also available in the github directory
Validation = read.csv("/Volumes/My Passport for Mac/WD passport/Columbia-Ghana Project/MicroPEM_Data/VisualizedValidation.csv", header=TRUE)
# this contains the results of Zheng's visual inspection of the data.
table(Validation$Validity) # frequency of visual validity
# merge nephelometer summary statistics with visual validation index
Nephelometer1 = merge(Nephelometer, Validation , by="filterID", all=T)
# create an indicator for Harmattan
Nephelometer1$Harmattan = 0
Nephelometer1$Harmattan[Nephelometer1$Note=="elevated baseline"] = 1
# drop samples with invalid nephelometer data
Nephelometer2 = Nephelometer1[Nephelometer1$Validity!=4,]
#################################GRAVIMETRIC SAMPLE DATA ########################################
# read in gravimetric PM data, make sure filepath is correct; also in github
GravimetricPM = read.csv("/Volumes/My Passport for Mac/WD passport/Columbia-Ghana Project/MicroPEM_Data/FilterWeight.csv", header=TRUE,stringsAsFactors=FALSE)
Damagedfilter = c("GN012", "GN016", "GN019", "GN020", "GN021", "GN022", "GN025", "GN028", "GN030", "GN031", "GN032", "GN033",
"GN034", "GN036", "GN044", "GN046", "GN052", "GN053", "GN054", "GN055", "GN057", "GN059", "GN060", "GN065", "GN067", "GN068",
"GN069", "GN076", "GN078", "GN091", "KHC0304", "KHC0305", "KHC0306", "KHC0307", "KHC0308", "KHC0310", "KHC0311", "KHC0312", "KHC0313", "KHC0325",
"KHC0332", "KHC0333", "KHC0365", "KHC0366", "KHC0367", "KHC0368", "KHC0369", "KHC0373", "KHC0375", "KHC0376", "KHC0377", "KHC0378", "KHC0379",
"KHC0380", "KHC0381", "KHC0382","KHC0383","KHC0384","KHC0385","KHC0386","KHC0387", "KHC0389", "KHC0390","KHC0391", "KHC0392", "KHC0393", "KHC0394",
"KHC0395", "KHC0396", "KHC0397", "KHC0398", "KHC0399", "KHC0463", "KHC1025", "KHC1026", "KHC1251","KHC1567")
Outlier = c("KHC0353", "KHC0783", "KHC1000", "KHC1325", "KHC1864")
Unmatched = c("KHC0133", "KHC0163", "KHC0168", "KHC0203", "KHC0275", "KHC0276", "KHC0421", "KHC0452", "KHC0459",
"KHC0536", "KHC0625", "KHC0673", "KHC0679", "KHC0686", "KHC0816", "KHC0947", "KHC1005", "KHC1160", "KHC1173", "KHC1347",
"KHC1348", "KHC1373", "KHC1406", "KHC1432", "KHC1622", "KHC1623", "KHC1624", "KHC1625", "KHC1626", "KHC1627", "KHC1628",
"KHC1629", "KHC1630", "KHC1631", "KHC1652", "KHC1696", "KHC1720", "KHC1769", "KHC1771", "KHC1783", "KHC1833")
# create an index to categorize Gravimetric Sample
GravimetricPM$index = "GOOD"
GravimetricPM$index[GravimetricPM$filterID %in% Unmatched] = "Unmatched"
GravimetricPM$index[GravimetricPM$filterID %in% Outlier] = "Outlier"
GravimetricPM$index[GravimetricPM$filterID %in% Damagedfilter] = "Damaged"
################################Merge Nephelometer and Gravimetric Data###################################
PM_Data = merge(Nephelometer2, GravimetricPM , by="filterID", all.x=T) # merge valid nephelometer data with Gravimetric PM data
#create an index for gravimetric PM >22 hrs
PM_Data$duration_index = 1
PM_Data$duration_index[PM_Data$Duration<22] = 0
#create an index for normal flow rate > 85% of time
PM_Data$flow_index = 1
PM_Data$flow_index[PM_Data$flow28.good<0.85] = 0
PM_Data$PM = ((PM_Data$netmass-0.005)*1000)/(as.numeric(PM_Data$vol)/(1000)) # gravimetric concentration
PM_Data$CF = PM_Data$PM/PM_Data$nephelometer_corr_avg # gravimetric correction factor
# create an index for gravimetric correction factor, 0 if no or problematic gravimetric PM, short duration, or out-of-range flow rate
PM_Data$CF_index = 0
PM_Data$CF_index[!is.na(PM_Data$index) & PM_Data$index=="GOOD" & PM_Data$duration_index == 1 & PM_Data$flow_index == 1] = 1
####################################PLOT GRAVIMETRIC VS NEPHELOMETER#######################################
#subset the dataset for CF estimation for each MicroPEM
PM_Data1 = PM_Data[!is.na(PM_Data$index) & PM_Data$index=="GOOD" & PM_Data$duration_index == 1 & PM_Data$flow_index == 1,]
summary(PM_Data1$CF[PM_Data1$Harmattan==0])
summary(PM_Data1$CF[PM_Data1$Harmattan==1])
plotdirectory <- "/Volumes/My Passport for Mac/WD passport/Columbia-Ghana Project/MicroPEM_Data/"
pdf(file = paste0(plotdirectory, "PMcomparison.pdf"), height = 8, width = 8)
par(mfrow = c(3,3))
par(mar=c(4,4,3,1))
for (i in 1:length(unique(as.character(PM_Data1$deviceSerial.x)))) {
McPEMID = unique(as.character(PM_Data1$deviceSerial.x))[i]
filesbySN <- PM_Data1[PM_Data1$deviceSerial.x==McPEMID,]
filesbySN0 = filesbySN[filesbySN$Harmattan==0,]
filesbySN1 = filesbySN[filesbySN$Harmattan==1,]
reg = lm(filesbySN0$PM~filesbySN0$nephelometer_corr_avg+0)
plot(filesbySN0$PM~filesbySN0$nephelometer_corr_avg, xlim=c(0,350), ylim=c(0,350), xlab=expression(paste("Nephelometer (", mu,"g/", m^{3},')')),
ylab=expression(paste("Gravimetric (", mu,"g/", m^{3},')')), col="blue")
points(filesbySN1$PM~filesbySN1$nephelometer_corr_avg, col="red")
abline(reg)
title(main = McPEMID, cex.main = 1)
legend('right', 'bottom', c(paste("H", "(", nrow(filesbySN1), ")"), paste("Non-H", "(", nrow(filesbySN0), ")")), col=c("red", "blue"), pch=1)
}
dev.off()
##################################OBTAIN GRAVIMETRIC CORRECTION FACTOR#####################################
# read in gravimetric correction factor for each MicroPEM device , make sure filepath is correct
GravimeticCF = read.csv("/Volumes/My Passport for Mac/WD passport/Columbia-Ghana Project/MicroPEM_Data/GravimetricFactor.csv", header=TRUE)
# if there is a problem on gravimetric PM sample, then use device correction factor for the nephelometer readings
PM_Data$CF_new = PM_Data$CF
for (i in 1: nrow(PM_Data)){
if (PM_Data$CF_index[i] == 0) PM_Data$CF_new[i] = GravimeticCF$Ratio[GravimeticCF$MicroPEMID == as.character(PM_Data$deviceSerial.x[i])]
}
####################################DAILY PM AVERAGE DATA ###############################################
DailyPM_Data = NULL # create an empty data frame to store Daily PM data
for (k in 1:nrow(PM_Data)) {
Data1 = convertOutput(PM_Data$participantID[k])
Data2 = Data1$measures
Data4 = PM_Data[k,] # Get PM summary statistics from PM_Data
Data3 = Data2[Data2$timeDate>=(Data4$starttime_new)&Data2$timeDate<=(Data4$endtime_new),] #extract data during the measurement time
Data5 = Data3[!is.na(Data3$nephelometer),] #drop rows without nephelometer reading
Data6 = Data5[Data5$relativeHumidity>0 & Data5$relativeHumidity<100,] #drop measurement with RH <=0 or RH>=100
# HEPA correction
if(!is.na(Data4$HEPASt) & !is.na(Data4$HEPAEnd)) Data6$nephelometer_corr = Data6$nephelometer - seq(Data4$HEPASt, Data4$HEPAEnd, (Data4$HEPAEnd-Data4$HEPASt)/(length(Data6$nephelometer)-1))
if(!is.na(Data4$HEPASt) & is.na(Data4$HEPAEnd)) Data6$nephelometer_corr = Data6$nephelometer - Data4$HEPASt
if(is.na(Data4$HEPASt) & !is.na(Data4$HEPAEnd)) Data6$nephelometer_corr = Data6$nephelometer - Data4$HEPAEnd
if(is.na(Data4$HEPASt) & is.na(Data4$HEPAEnd)) Data6$nephelometer_corr = Data6$nephelometer
#minute nephelometer data
Data6$unique_min <- floor_date(Data6$timeDate, unit = "minute")
Data7 = ddply(Data6, .(unique_min), summarise,
nephelometer_min = mean(nephelometer),
nephelometer_corr_min = mean(nephelometer_corr))
Data7$nephelometer_final_min = Data7$nephelometer_corr_min*Data4$CF_new
Data4$nephelometer_avg = mean(Data7$nephelometer_min)
Data4$nephelometer_corr_avg = mean(Data7$nephelometer_corr_min)
Data4$nephelometer_final_avg = mean(Data7$nephelometer_final_min)
#minute accelerometer data
Data3$unique_min <- floor_date(Data3$timeDate, unit = "minute")
Data3$hours <- hour(Data3$timeDate)
Data8 = ddply(Data3, .(unique_min), summarise,
X.axis_mean = round(mean(as.numeric(xAxis), na.rm=TRUE), digits = 4),
Y.axis_mean = round(mean(as.numeric(yAxis), na.rm=TRUE), digits = 4),
Z.axis_mean = round(mean(as.numeric(zAxis), na.rm=TRUE), digits = 4),
Vector.Sum.Composite_mean = round(mean(as.numeric(vectorSum), na.rm=TRUE), digits = 4),
X.axis_SD = round(sd(xAxis, na.rm=TRUE), digits = 3),
Y.axis_SD = round(sd(yAxis, na.rm=TRUE), digits = 3),
Z.axis_SD = round(sd(zAxis, na.rm=TRUE), digits = 3),
Vector.Sum.Composite_SD = round(sd(vectorSum, na.rm=TRUE), digits = 3),
hours = mean(hours))
# define threshold and moving window for accelerometer data
compliance_threshold = 0.01
window_width = 10
# if no movement during the window (x minute centered in current minute), then the compliance in current minute is 0.
Data8$sd_composite <- round(rollapply(Data8$Vector.Sum.Composite_mean, width=window_width, FUN = sd, align = "right", na.rm = TRUE, fill = NA), digits=3)
Data8$sd_X.axis <- round(rollapply(Data8$X.axis_mean, width=window_width, FUN = sd, align = "right", na.rm = TRUE, fill = NA), digits=3)
Data8$sd_Y.axis <- round(rollapply(Data8$Y.axis_mean, width=window_width, FUN = sd, align = "right", na.rm = TRUE, fill = NA), digits=3)
Data8$sd_Z.axis <- round(rollapply(Data8$Z.axis_mean, width=window_width, FUN = sd, align = "right", na.rm = TRUE, fill = NA), digits=3)
Data8$sd_above_threshold = ifelse(Data8$sd_X.axis > compliance_threshold|Data8$sd_Y.axis > compliance_threshold|Data8$sd_Z.axis > compliance_threshold, 1, 0)
Data8$sd_X_above_threshold = ifelse(Data8$sd_X.axis > compliance_threshold, 1, 0)
Data8$sd_Y_above_threshold = ifelse(Data8$sd_Y.axis > compliance_threshold, 1, 0)
Data8$sd_Z_above_threshold = ifelse(Data8$sd_Z.axis > compliance_threshold, 1, 0)
wakehour = c(7:22) # define wake hour is between 7 am and 10 pm
# divide the sampling period into 24 hour blocks
Data4$PMday_1 = Data7$unique_min[1]
Data4$PMday_2 = Data4$PMday_1 + 24*60*60
Data4$PMday_3 = Data4$PMday_2 + 24*60*60
Data4$PMday4 = Data4$PMday_3 + 24*60*60
Data4$Day_1 = 1
Data4$Day_2 = 2
Data4$Day_3 = 3
# average of uncorrected nephelometer reading in each 24 hour
Data4$OldPM_1 = round(mean(Data7[Data7$unique_min>=Data4$PMday_1&Data7$unique_min<Data4$PMday_2,]$nephelometer_corr_min), digits=3)
Data4$OldPM_2 = round(mean(Data7[Data7$unique_min>=Data4$PMday_2&Data7$unique_min<Data4$PMday_3,]$nephelometer_corr_min), digits=3)
Data4$OldPM_3 = round(mean(Data7[Data7$unique_min>=Data4$PMday_3&Data7$unique_min<Data4$PMday4,]$nephelometer_corr_min), digits=3)
# average of corrected nephelometer reading in each 24 hour
Data4$CorPM_1 = round(mean(Data7[Data7$unique_min>=Data4$PMday_1&Data7$unique_min<Data4$PMday_2,]$nephelometer_final_min), digits=3)
Data4$CorPM_2 = round(mean(Data7[Data7$unique_min>=Data4$PMday_2&Data7$unique_min<Data4$PMday_3,]$nephelometer_final_min), digits=3)
Data4$CorPM_3 = round(mean(Data7[Data7$unique_min>=Data4$PMday_3&Data7$unique_min<Data4$PMday4,]$nephelometer_final_min), digits=3)
# number of nephelometer reading in each 24 hour
Data4$PMn_1 = nrow(Data7[Data7$unique_min>=Data4$PMday_1&Data7$unique_min<Data4$PMday_2,])
Data4$PMn_2 = nrow(Data7[Data7$unique_min>=Data4$PMday_2&Data7$unique_min<Data4$PMday_3,])
Data4$PMn_3 = nrow(Data7[Data7$unique_min>=Data4$PMday_3&Data7$unique_min<Data4$PMday4,])
# wearing compliance (% of time) in each 24 hour
Data4$compliance_1 = sum(Data8[Data8$unique_min>=Data4$PMday_1 & Data8$unique_min<Data4$PMday_2,]$sd_above_threshold, na.rm=T)
Data4$compliance_2 = sum(Data8[Data8$unique_min>=Data4$PMday_2 & Data8$unique_min<Data4$PMday_3,]$sd_above_threshold, na.rm=T)
Data4$compliance_3 = sum(Data8[Data8$unique_min>=Data4$PMday_3 & Data8$unique_min<Data4$PMday4,]$sd_above_threshold, na.rm=T)
# wearing compliance (% of wake time) in each 24 hour
Data4$complianceWake_1 = sum(Data8[Data8$unique_min>=Data4$PMday_1 & Data8$unique_min<Data4$PMday_2 & Data8$hours %in% wakehour,]$sd_above_threshold, na.rm=T)
Data4$complianceWake_2 = sum(Data8[Data8$unique_min>=Data4$PMday_2 & Data8$unique_min<Data4$PMday_3 & Data8$hours %in% wakehour,]$sd_above_threshold, na.rm=T)
Data4$complianceWake_3 = sum(Data8[Data8$unique_min>=Data4$PMday_3 & Data8$unique_min<Data4$PMday4 & Data8$hours %in% wakehour,]$sd_above_threshold, na.rm=T)
# accumulative PM averages for 24, 48, and 72 hour
Data4$PMAverage24=round(mean(Data7[Data7$unique_min>=Data4$PMday_1&Data7$unique_min<Data4$PMday_2,]$nephelometer_final_min), digits=3)
Data4$PMAverage48=round(mean(Data7[Data7$unique_min>=Data4$PMday_1&Data7$unique_min<Data4$PMday_3,]$nephelometer_final_min), digits=3)
Data4$PMAverage72=round(mean(Data7[Data7$unique_min>=Data4$PMday_1&Data7$unique_min<Data4$PMday4,]$nephelometer_final_min), digits=3)
DailyPM_Data = rbind(DailyPM_Data, Data4) # update dailyPM data frame
if(round(k/50)*50==k)
print(k)
}
# save DailyPM dataset, please check the output directory
saveRDS(DailyPM_Data, file = "/Volumes/My Passport for Mac/WD passport/Columbia-Ghana Project/MicroPEM_Data/DailyPM.rds")
##############################MERGE DAILYPM WITH MICROPEM LOG DATA#############################
DailyPM = merge(DailyPM_Date, MicroPEM, by.x="filterID", by.y="filterid", all.x=T)
DailyPM[which(is.na(DailyPM$mstudyid)),] # missing three MircoPEM logsheet KHC0729, KHC1015, KHCD48C
#####################################READ IN CO DATA############################################
COdata = readRDS("/Users/zhengzhou/Dropbox/Ghana_exposure_data_SHARED_2014/CO_files_processed/FINAL_CO_parameters_withvalidation_2016Jun14.rds")
COdata$Startdate = as.Date(COdata$firstdate) #get the start date of CO measurements
COdata1 = COdata[is.na(COdata$cstudyid),] #exclude child CO measurements
##########################MERGE PM AND CO DATA##########################################
PMCO = merge(DailyPM, COdata1, by=c("mstudyid", "Startdate"), all.x=T)
# change co colume names
colnames(PMCO)[colnames(PMCO)=="co_day1_mean"] = "OldCO_1"
colnames(PMCO)[colnames(PMCO)=="co_day2_mean"] = "OldCO_2"
colnames(PMCO)[colnames(PMCO)=="co_day3_mean"] = "OldCO_3"
colnames(PMCO)[colnames(PMCO)=="co_day1_mean_corr"] = "CorCO_1"
colnames(PMCO)[colnames(PMCO)=="co_day2_mean_corr"] = "CorCO_2"
colnames(PMCO)[colnames(PMCO)=="co_day3_mean_corr"] = "CorCO_3"
##############################WIDE TO LONG FORMAT#########################################
PMCO1 <-reshape(PMCO,
varying=c(grep("PMday_",colnames(PMCO)),
grep("OldPM_",colnames(PMCO)),
grep("CorPM_",colnames(PMCO)),
grep("compliance_",colnames(PMCO)),
grep("complianceWake_",colnames(PMCO)),
grep("PMn_",colnames(PMCO)),
grep("OldCO_",colnames(PMCO)),
grep("CorCO_",colnames(PMCO)),
grep("Day_",colnames(PMCO))),
idvar="id",
direction="long", sep="_")
PMCO2 = PMCO1[PMCO1$visually_valid!=3,] # exclude samples with invalid CO readings
PMCO2 = PMCO2[PMCO2$PMn>1320,] #exclude PM sample-day < 22hrs
PMCO2 = PMCO2[!is.na(PMCO2$CorCO),] #exclude CO sample-day < 24hrs
PMCO2 = PMCO2[PMCO2$CorPM>0,] #exclude PM <0
# calculate compliance measure and categorize the measure into 7 buckets
PMCO2$complianceWakePct = PMCO2$complianceWake/PMCO2$PMn
PMCO2$complianceWakePctGP = cut(PMCO2$complianceWakePct, seq(0, 0.7, 0.1), labels=c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7), right=FALSE)
# save PMCO data frame as a Rdata
saveRDS(PMCO2, file = "/Volumes/My Passport for Mac/WD passport/Columbia-Ghana Project/MicroPEM_Data/PMCO.rds")
|
ed4eb096534ef6ea16775b55742f819f575bc8d5
|
2c324eeb4da26a3aa54e781f4f78cb5083cb009b
|
/test.r
|
8038bb65af5f7caa32c4190a42b86b3d4d9d60c5
|
[] |
no_license
|
ravipurama/ggplot-on-4th-day
|
eaf51eb5c6b0a95990196ec8430e3f8113ab76fd
|
ddad92f600c408fb55aa0b414d5dd27b1204c3a9
|
refs/heads/master
| 2020-04-10T03:14:59.512394
| 2018-12-07T03:52:12
| 2018-12-07T03:52:12
| 160,765,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45
|
r
|
test.r
|
#this is my second git
data(iris)
head(iris)
|
19cea5fd3a7454e303577b1f171a7c2b6b5a35b3
|
71cc5cf6f154f6195da4423f847faf650d140d6d
|
/man/jockes.Rd
|
7fc47afddb62060d2e897a9ee085733082258094
|
[] |
no_license
|
c0reyes/TextMiningGUI
|
0ece005b8b7d9360d38b23664dfedc1b138856a2
|
3c7acfbd2288d5a3c671b5a0c6ab67c60f68955b
|
refs/heads/master
| 2023-04-01T20:27:07.006420
| 2021-04-18T17:18:21
| 2021-04-18T17:18:21
| 273,744,535
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 110
|
rd
|
jockes.Rd
|
\name{jockes}
\alias{jockes}
\title{jockes}
\description{
Data from: https://github.com/taivop/joke-dataset
}
|
48aec9064858a148f522b6f2f0ff395b4fc69e4a
|
501f7fe8d182b3c2c9c1290088a1d0765af1f728
|
/6/6-1.R
|
7275f1a520230b50d271e44cdc4a3eb4a2348a31
|
[] |
no_license
|
N-Hirahara/R_kadai
|
c6418765af81cc6f6b68a5b9d752c066f76c1f05
|
91c13679dc5910b9c71b3f685587de070f06db0f
|
refs/heads/master
| 2020-08-16T02:28:19.083525
| 2019-12-13T04:38:40
| 2019-12-13T04:38:40
| 215,442,982
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 600
|
r
|
6-1.R
|
wd <- read.csv("6/weather_all.csv", header=TRUE, sep=",")
# 晴れの割合,平均最高気温,平均最低気温,合計降水量からなるデータを作成
fr <- rep(0, 47)
hm <- rep(0, 47)
lm <- rep(0, 47)
sm <- rep(0, 47)
for(i in 1:47)
{
wi <- wd[wd$prefid==i,]
fr[i] <- nrow(wi[wi$weather=="Fine",])/nrow(wi)
hm[i] <- mean(wi$temphigh)
lm[i] <- mean(wi$templow)
sm[i] <- sum(wi$precipitation)
}
wstat <- data.frame(fineratio=fr, highmean=hm, lowmean=lm, sumprecipitation=sm, row.names=unique(wd$city))
write.table(wstat, file="6/wstat.csv", row.names=TRUE, col.names=TRUE, sep=",")
|
2a30ccb52ecb13f86e7d778bb17c412a7fb51d27
|
b35d8d930b0fd5255bf6ade8e05070badddaadf0
|
/man/eurusd.Rd
|
cf0f22f62ff72353098b12281b81d25121afe571
|
[] |
no_license
|
ilda-kacerja/foRex
|
2e8af71faf33c4708ab86fb527bb04676c3d7b1a
|
1b642a1096e5f051c12c54f1c41070e71aa6e9cb
|
refs/heads/master
| 2020-05-29T16:56:26.164542
| 2019-05-29T16:45:04
| 2019-05-29T16:45:04
| 189,263,152
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 266
|
rd
|
eurusd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eurusd.R
\name{eurusd}
\alias{eurusd}
\title{Exchange rate for the current day}
\usage{
eurusd()
}
\value{
number
}
\description{
Exchange rate for the current day
}
\examples{
eurusd()
}
|
ccef719e990b8f58fc91735303bdcab310d32b22
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/paws/R/cloudwatchrum_operations.R
|
1f9973a98afb57931c914d1299f033e2d977a769
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| false
| 47,651
|
r
|
cloudwatchrum_operations.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include cloudwatchrum_service.R
NULL
#' Specifies the extended metrics and custom metrics that you want a
#' CloudWatch RUM app monitor to send to a destination
#'
#' @description
#' Specifies the extended metrics and custom metrics that you want a
#' CloudWatch RUM app monitor to send to a destination. Valid destinations
#' include CloudWatch and Evidently.
#'
#' By default, RUM app monitors send some metrics to CloudWatch. These
#' default metrics are listed in [CloudWatch metrics that you can collect
#' with CloudWatch
#' RUM](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-metrics.html).
#'
#' In addition to these default metrics, you can choose to send extended
#' metrics or custom metrics or both.
#'
#' - Extended metrics enable you to send metrics with additional
#' dimensions not included in the default metrics. You can also send
#' extended metrics to Evidently as well as CloudWatch. The valid
#' dimension names for the additional dimensions for extended metrics
#' are `BrowserName`, `CountryCode`, `DeviceType`, `FileType`,
#' `OSName`, and `PageId`. For more information, see [Extended metrics
#' that you can send to CloudWatch and CloudWatch
#' Evidently](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/).
#'
#' - Custom metrics are metrics that you define. You can send custom
#' metrics to CloudWatch or to CloudWatch Evidently or to both. With
#' custom metrics, you can use any metric name and namespace, and to
#' derive the metrics you can use any custom events, built-in events,
#' custom attributes, or default attributes.
#'
#' You can't send custom metrics to the `AWS/RUM` namespace. You must
#' send custom metrics to a custom namespace that you define. The
#' namespace that you use can't start with `AWS/`. CloudWatch RUM
#' prepends `RUM/CustomMetrics/` to the custom namespace that you
#' define, so the final namespace for your metrics in CloudWatch is
#' `RUM/CustomMetrics/your-custom-namespace `.
#'
#' The maximum number of metric definitions that you can specify in one
#' [`batch_create_rum_metric_definitions`][cloudwatchrum_batch_create_rum_metric_definitions]
#' operation is 200.
#'
#' The maximum number of metric definitions that one destination can
#' contain is 2000.
#'
#' Extended metrics sent to CloudWatch and RUM custom metrics are charged
#' as CloudWatch custom metrics. Each combination of additional dimension
#' name and dimension value counts as a custom metric. For more
#' information, see [Amazon CloudWatch
#' Pricing](https://aws.amazon.com/cloudwatch/pricing/).
#'
#' You must have already created a destination for the metrics before you
#' send them. For more information, see
#' [`put_rum_metrics_destination`][cloudwatchrum_put_rum_metrics_destination].
#'
#' If some metric definitions specified in a
#' [`batch_create_rum_metric_definitions`][cloudwatchrum_batch_create_rum_metric_definitions]
#' operations are not valid, those metric definitions fail and return
#' errors, but all valid metric definitions in the same operation still
#' succeed.
#'
#' @usage
#' cloudwatchrum_batch_create_rum_metric_definitions(AppMonitorName,
#' Destination, DestinationArn, MetricDefinitions)
#'
#' @param AppMonitorName [required] The name of the CloudWatch RUM app monitor that is to send the metrics.
#' @param Destination [required] The destination to send the metrics to. Valid values are `CloudWatch`
#' and `Evidently`. If you specify `Evidently`, you must also specify the
#' ARN of the CloudWatchEvidently experiment that will receive the metrics
#' and an IAM role that has permission to write to the experiment.
#' @param DestinationArn This parameter is required if `Destination` is `Evidently`. If
#' `Destination` is `CloudWatch`, do not use this parameter.
#'
#' This parameter specifies the ARN of the Evidently experiment that is to
#' receive the metrics. You must have already defined this experiment as a
#' valid destination. For more information, see
#' [`put_rum_metrics_destination`][cloudwatchrum_put_rum_metrics_destination].
#' @param MetricDefinitions [required] An array of structures which define the metrics that you want to send.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Errors = list(
#' list(
#' ErrorCode = "string",
#' ErrorMessage = "string",
#' MetricDefinition = list(
#' DimensionKeys = list(
#' "string"
#' ),
#' EventPattern = "string",
#' Name = "string",
#' Namespace = "string",
#' UnitLabel = "string",
#' ValueKey = "string"
#' )
#' )
#' ),
#' MetricDefinitions = list(
#' list(
#' DimensionKeys = list(
#' "string"
#' ),
#' EventPattern = "string",
#' MetricDefinitionId = "string",
#' Name = "string",
#' Namespace = "string",
#' UnitLabel = "string",
#' ValueKey = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$batch_create_rum_metric_definitions(
#' AppMonitorName = "string",
#' Destination = "CloudWatch"|"Evidently",
#' DestinationArn = "string",
#' MetricDefinitions = list(
#' list(
#' DimensionKeys = list(
#' "string"
#' ),
#' EventPattern = "string",
#' Name = "string",
#' Namespace = "string",
#' UnitLabel = "string",
#' ValueKey = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_batch_create_rum_metric_definitions
#'
#' @aliases cloudwatchrum_batch_create_rum_metric_definitions
cloudwatchrum_batch_create_rum_metric_definitions <- function(AppMonitorName, Destination, DestinationArn = NULL, MetricDefinitions) {
op <- new_operation(
name = "BatchCreateRumMetricDefinitions",
http_method = "POST",
http_path = "/rummetrics/{AppMonitorName}/metrics",
paginator = list()
)
input <- .cloudwatchrum$batch_create_rum_metric_definitions_input(AppMonitorName = AppMonitorName, Destination = Destination, DestinationArn = DestinationArn, MetricDefinitions = MetricDefinitions)
output <- .cloudwatchrum$batch_create_rum_metric_definitions_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$batch_create_rum_metric_definitions <- cloudwatchrum_batch_create_rum_metric_definitions
#' Removes the specified metrics from being sent to an extended metrics
#' destination
#'
#' @description
#' Removes the specified metrics from being sent to an extended metrics
#' destination.
#'
#' If some metric definition IDs specified in a
#' [`batch_delete_rum_metric_definitions`][cloudwatchrum_batch_delete_rum_metric_definitions]
#' operations are not valid, those metric definitions fail and return
#' errors, but all valid metric definition IDs in the same operation are
#' still deleted.
#'
#' The maximum number of metric definitions that you can specify in one
#' [`batch_delete_rum_metric_definitions`][cloudwatchrum_batch_delete_rum_metric_definitions]
#' operation is 200.
#'
#' @usage
#' cloudwatchrum_batch_delete_rum_metric_definitions(AppMonitorName,
#' Destination, DestinationArn, MetricDefinitionIds)
#'
#' @param AppMonitorName [required] The name of the CloudWatch RUM app monitor that is sending these
#' metrics.
#' @param Destination [required] Defines the destination where you want to stop sending the specified
#' metrics. Valid values are `CloudWatch` and `Evidently`. If you specify
#' `Evidently`, you must also specify the ARN of the CloudWatchEvidently
#' experiment that is to be the destination and an IAM role that has
#' permission to write to the experiment.
#' @param DestinationArn This parameter is required if `Destination` is `Evidently`. If
#' `Destination` is `CloudWatch`, do not use this parameter.
#'
#' This parameter specifies the ARN of the Evidently experiment that was
#' receiving the metrics that are being deleted.
#' @param MetricDefinitionIds [required] An array of structures which define the metrics that you want to stop
#' sending.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Errors = list(
#' list(
#' ErrorCode = "string",
#' ErrorMessage = "string",
#' MetricDefinitionId = "string"
#' )
#' ),
#' MetricDefinitionIds = list(
#' "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$batch_delete_rum_metric_definitions(
#' AppMonitorName = "string",
#' Destination = "CloudWatch"|"Evidently",
#' DestinationArn = "string",
#' MetricDefinitionIds = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_batch_delete_rum_metric_definitions
#'
#' @aliases cloudwatchrum_batch_delete_rum_metric_definitions
cloudwatchrum_batch_delete_rum_metric_definitions <- function(AppMonitorName, Destination, DestinationArn = NULL, MetricDefinitionIds) {
op <- new_operation(
name = "BatchDeleteRumMetricDefinitions",
http_method = "DELETE",
http_path = "/rummetrics/{AppMonitorName}/metrics",
paginator = list()
)
input <- .cloudwatchrum$batch_delete_rum_metric_definitions_input(AppMonitorName = AppMonitorName, Destination = Destination, DestinationArn = DestinationArn, MetricDefinitionIds = MetricDefinitionIds)
output <- .cloudwatchrum$batch_delete_rum_metric_definitions_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$batch_delete_rum_metric_definitions <- cloudwatchrum_batch_delete_rum_metric_definitions
#' Retrieves the list of metrics and dimensions that a RUM app monitor is
#' sending to a single destination
#'
#' @description
#' Retrieves the list of metrics and dimensions that a RUM app monitor is
#' sending to a single destination.
#'
#' @usage
#' cloudwatchrum_batch_get_rum_metric_definitions(AppMonitorName,
#' Destination, DestinationArn, MaxResults, NextToken)
#'
#' @param AppMonitorName [required] The name of the CloudWatch RUM app monitor that is sending the metrics.
#' @param Destination [required] The type of destination that you want to view metrics for. Valid values
#' are `CloudWatch` and `Evidently`.
#' @param DestinationArn This parameter is required if `Destination` is `Evidently`. If
#' `Destination` is `CloudWatch`, do not use this parameter.
#'
#' This parameter specifies the ARN of the Evidently experiment that
#' corresponds to the destination.
#' @param MaxResults The maximum number of results to return in one operation. The default is
#' 50. The maximum that you can specify is 100.
#'
#' To retrieve the remaining results, make another call with the returned
#' `NextToken` value.
#' @param NextToken Use the token returned by the previous operation to request the next
#' page of results.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' MetricDefinitions = list(
#' list(
#' DimensionKeys = list(
#' "string"
#' ),
#' EventPattern = "string",
#' MetricDefinitionId = "string",
#' Name = "string",
#' Namespace = "string",
#' UnitLabel = "string",
#' ValueKey = "string"
#' )
#' ),
#' NextToken = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$batch_get_rum_metric_definitions(
#' AppMonitorName = "string",
#' Destination = "CloudWatch"|"Evidently",
#' DestinationArn = "string",
#' MaxResults = 123,
#' NextToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_batch_get_rum_metric_definitions
#'
#' @aliases cloudwatchrum_batch_get_rum_metric_definitions
cloudwatchrum_batch_get_rum_metric_definitions <- function(AppMonitorName, Destination, DestinationArn = NULL, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "BatchGetRumMetricDefinitions",
http_method = "GET",
http_path = "/rummetrics/{AppMonitorName}/metrics",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults", result_key = "MetricDefinitions")
)
input <- .cloudwatchrum$batch_get_rum_metric_definitions_input(AppMonitorName = AppMonitorName, Destination = Destination, DestinationArn = DestinationArn, MaxResults = MaxResults, NextToken = NextToken)
output <- .cloudwatchrum$batch_get_rum_metric_definitions_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$batch_get_rum_metric_definitions <- cloudwatchrum_batch_get_rum_metric_definitions
#' Creates a Amazon CloudWatch RUM app monitor, which collects telemetry
#' data from your application and sends that data to RUM
#'
#' @description
#' Creates a Amazon CloudWatch RUM app monitor, which collects telemetry
#' data from your application and sends that data to RUM. The data includes
#' performance and reliability information such as page load time,
#' client-side errors, and user behavior.
#'
#' You use this operation only to create a new app monitor. To update an
#' existing app monitor, use
#' [`update_app_monitor`][cloudwatchrum_update_app_monitor] instead.
#'
#' After you create an app monitor, sign in to the CloudWatch RUM console
#' to get the JavaScript code snippet to add to your web application. For
#' more information, see [How do I find a code snippet that I've already
#' generated?](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-find-code-snippet.html)
#'
#' @usage
#' cloudwatchrum_create_app_monitor(AppMonitorConfiguration, CustomEvents,
#' CwLogEnabled, Domain, Name, Tags)
#'
#' @param AppMonitorConfiguration A structure that contains much of the configuration data for the app
#' monitor. If you are using Amazon Cognito for authorization, you must
#' include this structure in your request, and it must include the ID of
#' the Amazon Cognito identity pool to use for authorization. If you don't
#' include `AppMonitorConfiguration`, you must set up your own
#' authorization method. For more information, see Authorize your
#' application to send data to Amazon Web Services.
#'
#' If you omit this argument, the sample rate used for RUM is set to 10% of
#' the user sessions.
#' @param CustomEvents Specifies whether this app monitor allows the web client to define and
#' send custom events. If you omit this parameter, custom events are
#' `DISABLED`.
#'
#' For more information about custom events, see [Send custom
#' events](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-custom-events.html).
#' @param CwLogEnabled Data collected by RUM is kept by RUM for 30 days and then deleted. This
#' parameter specifies whether RUM sends a copy of this telemetry data to
#' Amazon CloudWatch Logs in your account. This enables you to keep the
#' telemetry data for more than 30 days, but it does incur Amazon
#' CloudWatch Logs charges.
#'
#' If you omit this parameter, the default is `false`.
#' @param Domain [required] The top-level internet domain name for which your application has
#' administrative authority.
#' @param Name [required] A name for the app monitor.
#' @param Tags Assigns one or more tags (key-value pairs) to the app monitor.
#'
#' Tags can help you organize and categorize your resources. You can also
#' use them to scope user permissions by granting a user permission to
#' access or change only resources with certain tag values.
#'
#' Tags don't have any semantic meaning to Amazon Web Services and are
#' interpreted strictly as strings of characters.
#'
#' You can associate as many as 50 tags with an app monitor.
#'
#' For more information, see [Tagging Amazon Web Services
#' resources](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html).
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Id = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_app_monitor(
#' AppMonitorConfiguration = list(
#' AllowCookies = TRUE|FALSE,
#' EnableXRay = TRUE|FALSE,
#' ExcludedPages = list(
#' "string"
#' ),
#' FavoritePages = list(
#' "string"
#' ),
#' GuestRoleArn = "string",
#' IdentityPoolId = "string",
#' IncludedPages = list(
#' "string"
#' ),
#' SessionSampleRate = 123.0,
#' Telemetries = list(
#' "errors"|"performance"|"http"
#' )
#' ),
#' CustomEvents = list(
#' Status = "ENABLED"|"DISABLED"
#' ),
#' CwLogEnabled = TRUE|FALSE,
#' Domain = "string",
#' Name = "string",
#' Tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_create_app_monitor
#'
#' @aliases cloudwatchrum_create_app_monitor
cloudwatchrum_create_app_monitor <- function(AppMonitorConfiguration = NULL, CustomEvents = NULL, CwLogEnabled = NULL, Domain, Name, Tags = NULL) {
op <- new_operation(
name = "CreateAppMonitor",
http_method = "POST",
http_path = "/appmonitor",
paginator = list()
)
input <- .cloudwatchrum$create_app_monitor_input(AppMonitorConfiguration = AppMonitorConfiguration, CustomEvents = CustomEvents, CwLogEnabled = CwLogEnabled, Domain = Domain, Name = Name, Tags = Tags)
output <- .cloudwatchrum$create_app_monitor_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$create_app_monitor <- cloudwatchrum_create_app_monitor
#' Deletes an existing app monitor
#'
#' @description
#' Deletes an existing app monitor. This immediately stops the collection
#' of data.
#'
#' @usage
#' cloudwatchrum_delete_app_monitor(Name)
#'
#' @param Name [required] The name of the app monitor to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_app_monitor(
#' Name = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_delete_app_monitor
#'
#' @aliases cloudwatchrum_delete_app_monitor
cloudwatchrum_delete_app_monitor <- function(Name) {
op <- new_operation(
name = "DeleteAppMonitor",
http_method = "DELETE",
http_path = "/appmonitor/{Name}",
paginator = list()
)
input <- .cloudwatchrum$delete_app_monitor_input(Name = Name)
output <- .cloudwatchrum$delete_app_monitor_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$delete_app_monitor <- cloudwatchrum_delete_app_monitor
#' Deletes a destination for CloudWatch RUM extended metrics, so that the
#' specified app monitor stops sending extended metrics to that destination
#'
#' @description
#' Deletes a destination for CloudWatch RUM extended metrics, so that the
#' specified app monitor stops sending extended metrics to that
#' destination.
#'
#' @usage
#' cloudwatchrum_delete_rum_metrics_destination(AppMonitorName,
#' Destination, DestinationArn)
#'
#' @param AppMonitorName [required] The name of the app monitor that is sending metrics to the destination
#' that you want to delete.
#' @param Destination [required] The type of destination to delete. Valid values are `CloudWatch` and
#' `Evidently`.
#' @param DestinationArn This parameter is required if `Destination` is `Evidently`. If
#' `Destination` is `CloudWatch`, do not use this parameter. This parameter
#' specifies the ARN of the Evidently experiment that corresponds to the
#' destination to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_rum_metrics_destination(
#' AppMonitorName = "string",
#' Destination = "CloudWatch"|"Evidently",
#' DestinationArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_delete_rum_metrics_destination
#'
#' @aliases cloudwatchrum_delete_rum_metrics_destination
cloudwatchrum_delete_rum_metrics_destination <- function(AppMonitorName, Destination, DestinationArn = NULL) {
op <- new_operation(
name = "DeleteRumMetricsDestination",
http_method = "DELETE",
http_path = "/rummetrics/{AppMonitorName}/metricsdestination",
paginator = list()
)
input <- .cloudwatchrum$delete_rum_metrics_destination_input(AppMonitorName = AppMonitorName, Destination = Destination, DestinationArn = DestinationArn)
output <- .cloudwatchrum$delete_rum_metrics_destination_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$delete_rum_metrics_destination <- cloudwatchrum_delete_rum_metrics_destination
#' Retrieves the complete configuration information for one app monitor
#'
#' @description
#' Retrieves the complete configuration information for one app monitor.
#'
#' @usage
#' cloudwatchrum_get_app_monitor(Name)
#'
#' @param Name [required] The app monitor to retrieve information for.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' AppMonitor = list(
#' AppMonitorConfiguration = list(
#' AllowCookies = TRUE|FALSE,
#' EnableXRay = TRUE|FALSE,
#' ExcludedPages = list(
#' "string"
#' ),
#' FavoritePages = list(
#' "string"
#' ),
#' GuestRoleArn = "string",
#' IdentityPoolId = "string",
#' IncludedPages = list(
#' "string"
#' ),
#' SessionSampleRate = 123.0,
#' Telemetries = list(
#' "errors"|"performance"|"http"
#' )
#' ),
#' Created = "string",
#' CustomEvents = list(
#' Status = "ENABLED"|"DISABLED"
#' ),
#' DataStorage = list(
#' CwLog = list(
#' CwLogEnabled = TRUE|FALSE,
#' CwLogGroup = "string"
#' )
#' ),
#' Domain = "string",
#' Id = "string",
#' LastModified = "string",
#' Name = "string",
#' State = "CREATED"|"DELETING"|"ACTIVE",
#' Tags = list(
#' "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_app_monitor(
#' Name = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_get_app_monitor
#'
#' @aliases cloudwatchrum_get_app_monitor
cloudwatchrum_get_app_monitor <- function(Name) {
op <- new_operation(
name = "GetAppMonitor",
http_method = "GET",
http_path = "/appmonitor/{Name}",
paginator = list()
)
input <- .cloudwatchrum$get_app_monitor_input(Name = Name)
output <- .cloudwatchrum$get_app_monitor_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$get_app_monitor <- cloudwatchrum_get_app_monitor
#' Retrieves the raw performance events that RUM has collected from your
#' web application, so that you can do your own processing or analysis of
#' this data
#'
#' @description
#' Retrieves the raw performance events that RUM has collected from your
#' web application, so that you can do your own processing or analysis of
#' this data.
#'
#' @usage
#' cloudwatchrum_get_app_monitor_data(Filters, MaxResults, Name, NextToken,
#' TimeRange)
#'
#' @param Filters An array of structures that you can use to filter the results to those
#' that match one or more sets of key-value pairs that you specify.
#' @param MaxResults The maximum number of results to return in one operation.
#' @param Name [required] The name of the app monitor that collected the data that you want to
#' retrieve.
#' @param NextToken Use the token returned by the previous operation to request the next
#' page of results.
#' @param TimeRange [required] A structure that defines the time range that you want to retrieve
#' results from.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Events = list(
#' "string"
#' ),
#' NextToken = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_app_monitor_data(
#' Filters = list(
#' list(
#' Name = "string",
#' Values = list(
#' "string"
#' )
#' )
#' ),
#' MaxResults = 123,
#' Name = "string",
#' NextToken = "string",
#' TimeRange = list(
#' After = 123,
#' Before = 123
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_get_app_monitor_data
#'
#' @aliases cloudwatchrum_get_app_monitor_data
cloudwatchrum_get_app_monitor_data <- function(Filters = NULL, MaxResults = NULL, Name, NextToken = NULL, TimeRange) {
op <- new_operation(
name = "GetAppMonitorData",
http_method = "POST",
http_path = "/appmonitor/{Name}/data",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults", result_key = "Events")
)
input <- .cloudwatchrum$get_app_monitor_data_input(Filters = Filters, MaxResults = MaxResults, Name = Name, NextToken = NextToken, TimeRange = TimeRange)
output <- .cloudwatchrum$get_app_monitor_data_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$get_app_monitor_data <- cloudwatchrum_get_app_monitor_data
#' Returns a list of the Amazon CloudWatch RUM app monitors in the account
#'
#' @description
#' Returns a list of the Amazon CloudWatch RUM app monitors in the account.
#'
#' @usage
#' cloudwatchrum_list_app_monitors(MaxResults, NextToken)
#'
#' @param MaxResults The maximum number of results to return in one operation. The default is
#' 50. The maximum that you can specify is 100.
#' @param NextToken Use the token returned by the previous operation to request the next
#' page of results.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' AppMonitorSummaries = list(
#' list(
#' Created = "string",
#' Id = "string",
#' LastModified = "string",
#' Name = "string",
#' State = "CREATED"|"DELETING"|"ACTIVE"
#' )
#' ),
#' NextToken = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_app_monitors(
#' MaxResults = 123,
#' NextToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_list_app_monitors
#'
#' @aliases cloudwatchrum_list_app_monitors
cloudwatchrum_list_app_monitors <- function(MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListAppMonitors",
http_method = "POST",
http_path = "/appmonitors",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults", result_key = "AppMonitorSummaries")
)
input <- .cloudwatchrum$list_app_monitors_input(MaxResults = MaxResults, NextToken = NextToken)
output <- .cloudwatchrum$list_app_monitors_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$list_app_monitors <- cloudwatchrum_list_app_monitors
#' Returns a list of destinations that you have created to receive RUM
#' extended metrics, for the specified app monitor
#'
#' @description
#' Returns a list of destinations that you have created to receive RUM
#' extended metrics, for the specified app monitor.
#'
#' For more information about extended metrics, see
#' [AddRumMetrics](https://docs.aws.amazon.com/cloudwatchrum/latest/APIReference/).
#'
#' @usage
#' cloudwatchrum_list_rum_metrics_destinations(AppMonitorName, MaxResults,
#' NextToken)
#'
#' @param AppMonitorName [required] The name of the app monitor associated with the destinations that you
#' want to retrieve.
#' @param MaxResults The maximum number of results to return in one operation. The default is
#' 50. The maximum that you can specify is 100.
#'
#' To retrieve the remaining results, make another call with the returned
#' `NextToken` value.
#' @param NextToken Use the token returned by the previous operation to request the next
#' page of results.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Destinations = list(
#' list(
#' Destination = "CloudWatch"|"Evidently",
#' DestinationArn = "string",
#' IamRoleArn = "string"
#' )
#' ),
#' NextToken = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_rum_metrics_destinations(
#' AppMonitorName = "string",
#' MaxResults = 123,
#' NextToken = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_list_rum_metrics_destinations
#'
#' @aliases cloudwatchrum_list_rum_metrics_destinations
cloudwatchrum_list_rum_metrics_destinations <- function(AppMonitorName, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListRumMetricsDestinations",
http_method = "GET",
http_path = "/rummetrics/{AppMonitorName}/metricsdestination",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults", result_key = "Destinations")
)
input <- .cloudwatchrum$list_rum_metrics_destinations_input(AppMonitorName = AppMonitorName, MaxResults = MaxResults, NextToken = NextToken)
output <- .cloudwatchrum$list_rum_metrics_destinations_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$list_rum_metrics_destinations <- cloudwatchrum_list_rum_metrics_destinations
#' Displays the tags associated with a CloudWatch RUM resource
#'
#' @description
#' Displays the tags associated with a CloudWatch RUM resource.
#'
#' @usage
#' cloudwatchrum_list_tags_for_resource(ResourceArn)
#'
#' @param ResourceArn [required] The ARN of the resource that you want to see the tags of.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' ResourceArn = "string",
#' Tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_tags_for_resource(
#' ResourceArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_list_tags_for_resource
#'
#' @aliases cloudwatchrum_list_tags_for_resource
cloudwatchrum_list_tags_for_resource <- function(ResourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "GET",
http_path = "/tags/{ResourceArn}",
paginator = list()
)
input <- .cloudwatchrum$list_tags_for_resource_input(ResourceArn = ResourceArn)
output <- .cloudwatchrum$list_tags_for_resource_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$list_tags_for_resource <- cloudwatchrum_list_tags_for_resource
#' Sends telemetry events about your application performance and user
#' behavior to CloudWatch RUM
#'
#' @description
#' Sends telemetry events about your application performance and user
#' behavior to CloudWatch RUM. The code snippet that RUM generates for you
#' to add to your application includes
#' [`put_rum_events`][cloudwatchrum_put_rum_events] operations to send this
#' data to RUM.
#'
#' Each [`put_rum_events`][cloudwatchrum_put_rum_events] operation can send
#' a batch of events from one user session.
#'
#' @usage
#' cloudwatchrum_put_rum_events(AppMonitorDetails, BatchId, Id, RumEvents,
#' UserDetails)
#'
#' @param AppMonitorDetails [required] A structure that contains information about the app monitor that
#' collected this telemetry information.
#' @param BatchId [required] A unique identifier for this batch of RUM event data.
#' @param Id [required] The ID of the app monitor that is sending this data.
#' @param RumEvents [required] An array of structures that contain the telemetry event data.
#' @param UserDetails [required] A structure that contains information about the user session that this
#' batch of events was collected from.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$put_rum_events(
#' AppMonitorDetails = list(
#' id = "string",
#' name = "string",
#' version = "string"
#' ),
#' BatchId = "string",
#' Id = "string",
#' RumEvents = list(
#' list(
#' details = "string",
#' id = "string",
#' metadata = "string",
#' timestamp = as.POSIXct(
#' "2015-01-01"
#' ),
#' type = "string"
#' )
#' ),
#' UserDetails = list(
#' sessionId = "string",
#' userId = "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_put_rum_events
#'
#' @aliases cloudwatchrum_put_rum_events
cloudwatchrum_put_rum_events <- function(AppMonitorDetails, BatchId, Id, RumEvents, UserDetails) {
op <- new_operation(
name = "PutRumEvents",
http_method = "POST",
http_path = "/appmonitors/{Id}/",
paginator = list()
)
input <- .cloudwatchrum$put_rum_events_input(AppMonitorDetails = AppMonitorDetails, BatchId = BatchId, Id = Id, RumEvents = RumEvents, UserDetails = UserDetails)
output <- .cloudwatchrum$put_rum_events_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$put_rum_events <- cloudwatchrum_put_rum_events
#' Creates or updates a destination to receive extended metrics from
#' CloudWatch RUM
#'
#' @description
#' Creates or updates a destination to receive extended metrics from
#' CloudWatch RUM. You can send extended metrics to CloudWatch or to a
#' CloudWatch Evidently experiment.
#'
#' For more information about extended metrics, see
#' [`batch_create_rum_metric_definitions`][cloudwatchrum_batch_create_rum_metric_definitions].
#'
#' @usage
#' cloudwatchrum_put_rum_metrics_destination(AppMonitorName, Destination,
#' DestinationArn, IamRoleArn)
#'
#' @param AppMonitorName [required] The name of the CloudWatch RUM app monitor that will send the metrics.
#' @param Destination [required] Defines the destination to send the metrics to. Valid values are
#' `CloudWatch` and `Evidently`. If you specify `Evidently`, you must also
#' specify the ARN of the CloudWatchEvidently experiment that is to be the
#' destination and an IAM role that has permission to write to the
#' experiment.
#' @param DestinationArn Use this parameter only if `Destination` is `Evidently`. This parameter
#' specifies the ARN of the Evidently experiment that will receive the
#' extended metrics.
#' @param IamRoleArn This parameter is required if `Destination` is `Evidently`. If
#' `Destination` is `CloudWatch`, do not use this parameter.
#'
#' This parameter specifies the ARN of an IAM role that RUM will assume to
#' write to the Evidently experiment that you are sending metrics to. This
#' role must have permission to write to that experiment.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$put_rum_metrics_destination(
#' AppMonitorName = "string",
#' Destination = "CloudWatch"|"Evidently",
#' DestinationArn = "string",
#' IamRoleArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_put_rum_metrics_destination
#'
#' @aliases cloudwatchrum_put_rum_metrics_destination
cloudwatchrum_put_rum_metrics_destination <- function(AppMonitorName, Destination, DestinationArn = NULL, IamRoleArn = NULL) {
op <- new_operation(
name = "PutRumMetricsDestination",
http_method = "POST",
http_path = "/rummetrics/{AppMonitorName}/metricsdestination",
paginator = list()
)
input <- .cloudwatchrum$put_rum_metrics_destination_input(AppMonitorName = AppMonitorName, Destination = Destination, DestinationArn = DestinationArn, IamRoleArn = IamRoleArn)
output <- .cloudwatchrum$put_rum_metrics_destination_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$put_rum_metrics_destination <- cloudwatchrum_put_rum_metrics_destination
#' Assigns one or more tags (key-value pairs) to the specified CloudWatch
#' RUM resource
#'
#' @description
#' Assigns one or more tags (key-value pairs) to the specified CloudWatch
#' RUM resource. Currently, the only resources that can be tagged app
#' monitors.
#'
#' Tags can help you organize and categorize your resources. You can also
#' use them to scope user permissions by granting a user permission to
#' access or change only resources with certain tag values.
#'
#' Tags don't have any semantic meaning to Amazon Web Services and are
#' interpreted strictly as strings of characters.
#'
#' You can use the [`tag_resource`][cloudwatchrum_tag_resource] action with
#' a resource that already has tags. If you specify a new tag key for the
#' resource, this tag is appended to the list of tags associated with the
#' alarm. If you specify a tag key that is already associated with the
#' resource, the new tag value that you specify replaces the previous value
#' for that tag.
#'
#' You can associate as many as 50 tags with a resource.
#'
#' For more information, see [Tagging Amazon Web Services
#' resources](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html).
#'
#' @usage
#' cloudwatchrum_tag_resource(ResourceArn, Tags)
#'
#' @param ResourceArn [required] The ARN of the CloudWatch RUM resource that you're adding tags to.
#' @param Tags [required] The list of key-value pairs to associate with the resource.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$tag_resource(
#' ResourceArn = "string",
#' Tags = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_tag_resource
#'
#' @aliases cloudwatchrum_tag_resource
cloudwatchrum_tag_resource <- function(ResourceArn, Tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/tags/{ResourceArn}",
paginator = list()
)
input <- .cloudwatchrum$tag_resource_input(ResourceArn = ResourceArn, Tags = Tags)
output <- .cloudwatchrum$tag_resource_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$tag_resource <- cloudwatchrum_tag_resource
#' Removes one or more tags from the specified resource
#'
#' @description
#' Removes one or more tags from the specified resource.
#'
#' @usage
#' cloudwatchrum_untag_resource(ResourceArn, TagKeys)
#'
#' @param ResourceArn [required] The ARN of the CloudWatch RUM resource that you're removing tags from.
#' @param TagKeys [required] The list of tag keys to remove from the resource.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$untag_resource(
#' ResourceArn = "string",
#' TagKeys = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_untag_resource
#'
#' @aliases cloudwatchrum_untag_resource
cloudwatchrum_untag_resource <- function(ResourceArn, TagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "DELETE",
http_path = "/tags/{ResourceArn}",
paginator = list()
)
input <- .cloudwatchrum$untag_resource_input(ResourceArn = ResourceArn, TagKeys = TagKeys)
output <- .cloudwatchrum$untag_resource_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$untag_resource <- cloudwatchrum_untag_resource
#' Updates the configuration of an existing app monitor
#'
#' @description
#' Updates the configuration of an existing app monitor. When you use this
#' operation, only the parts of the app monitor configuration that you
#' specify in this operation are changed. For any parameters that you omit,
#' the existing values are kept.
#'
#' You can't use this operation to change the tags of an existing app
#' monitor. To change the tags of an existing app monitor, use
#' [`tag_resource`][cloudwatchrum_tag_resource].
#'
#' To create a new app monitor, use
#' [`create_app_monitor`][cloudwatchrum_create_app_monitor].
#'
#' After you update an app monitor, sign in to the CloudWatch RUM console
#' to get the updated JavaScript code snippet to add to your web
#' application. For more information, see [How do I find a code snippet
#' that I've already
#' generated?](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-find-code-snippet.html)
#'
#' @usage
#' cloudwatchrum_update_app_monitor(AppMonitorConfiguration, CustomEvents,
#' CwLogEnabled, Domain, Name)
#'
#' @param AppMonitorConfiguration A structure that contains much of the configuration data for the app
#' monitor. If you are using Amazon Cognito for authorization, you must
#' include this structure in your request, and it must include the ID of
#' the Amazon Cognito identity pool to use for authorization. If you don't
#' include `AppMonitorConfiguration`, you must set up your own
#' authorization method. For more information, see Authorize your
#' application to send data to Amazon Web Services.
#' @param CustomEvents Specifies whether this app monitor allows the web client to define and
#' send custom events. The default is for custom events to be `DISABLED`.
#'
#' For more information about custom events, see [Send custom
#' events](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-RUM-custom-events.html).
#' @param CwLogEnabled Data collected by RUM is kept by RUM for 30 days and then deleted. This
#' parameter specifies whether RUM sends a copy of this telemetry data to
#' Amazon CloudWatch Logs in your account. This enables you to keep the
#' telemetry data for more than 30 days, but it does incur Amazon
#' CloudWatch Logs charges.
#' @param Domain The top-level internet domain name for which your application has
#' administrative authority.
#' @param Name [required] The name of the app monitor to update.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$update_app_monitor(
#' AppMonitorConfiguration = list(
#' AllowCookies = TRUE|FALSE,
#' EnableXRay = TRUE|FALSE,
#' ExcludedPages = list(
#' "string"
#' ),
#' FavoritePages = list(
#' "string"
#' ),
#' GuestRoleArn = "string",
#' IdentityPoolId = "string",
#' IncludedPages = list(
#' "string"
#' ),
#' SessionSampleRate = 123.0,
#' Telemetries = list(
#' "errors"|"performance"|"http"
#' )
#' ),
#' CustomEvents = list(
#' Status = "ENABLED"|"DISABLED"
#' ),
#' CwLogEnabled = TRUE|FALSE,
#' Domain = "string",
#' Name = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_update_app_monitor
#'
#' @aliases cloudwatchrum_update_app_monitor
cloudwatchrum_update_app_monitor <- function(AppMonitorConfiguration = NULL, CustomEvents = NULL, CwLogEnabled = NULL, Domain = NULL, Name) {
op <- new_operation(
name = "UpdateAppMonitor",
http_method = "PATCH",
http_path = "/appmonitor/{Name}",
paginator = list()
)
input <- .cloudwatchrum$update_app_monitor_input(AppMonitorConfiguration = AppMonitorConfiguration, CustomEvents = CustomEvents, CwLogEnabled = CwLogEnabled, Domain = Domain, Name = Name)
output <- .cloudwatchrum$update_app_monitor_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$update_app_monitor <- cloudwatchrum_update_app_monitor
#' Modifies one existing metric definition for CloudWatch RUM extended
#' metrics
#'
#' @description
#' Modifies one existing metric definition for CloudWatch RUM extended
#' metrics. For more information about extended metrics, see
#' [BatchCreateRumMetricsDefinitions](https://docs.aws.amazon.com/cloudwatchrum/latest/APIReference/).
#'
#' @usage
#' cloudwatchrum_update_rum_metric_definition(AppMonitorName, Destination,
#' DestinationArn, MetricDefinition, MetricDefinitionId)
#'
#' @param AppMonitorName [required] The name of the CloudWatch RUM app monitor that sends these metrics.
#' @param Destination [required] The destination to send the metrics to. Valid values are `CloudWatch`
#' and `Evidently`. If you specify `Evidently`, you must also specify the
#' ARN of the CloudWatchEvidently experiment that will receive the metrics
#' and an IAM role that has permission to write to the experiment.
#' @param DestinationArn This parameter is required if `Destination` is `Evidently`. If
#' `Destination` is `CloudWatch`, do not use this parameter.
#'
#' This parameter specifies the ARN of the Evidently experiment that is to
#' receive the metrics. You must have already defined this experiment as a
#' valid destination. For more information, see
#' [`put_rum_metrics_destination`][cloudwatchrum_put_rum_metrics_destination].
#' @param MetricDefinition [required] A structure that contains the new definition that you want to use for
#' this metric.
#' @param MetricDefinitionId [required] The ID of the metric definition to update.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$update_rum_metric_definition(
#' AppMonitorName = "string",
#' Destination = "CloudWatch"|"Evidently",
#' DestinationArn = "string",
#' MetricDefinition = list(
#' DimensionKeys = list(
#' "string"
#' ),
#' EventPattern = "string",
#' Name = "string",
#' Namespace = "string",
#' UnitLabel = "string",
#' ValueKey = "string"
#' ),
#' MetricDefinitionId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname cloudwatchrum_update_rum_metric_definition
#'
#' @aliases cloudwatchrum_update_rum_metric_definition
cloudwatchrum_update_rum_metric_definition <- function(AppMonitorName, Destination, DestinationArn = NULL, MetricDefinition, MetricDefinitionId) {
op <- new_operation(
name = "UpdateRumMetricDefinition",
http_method = "PATCH",
http_path = "/rummetrics/{AppMonitorName}/metrics",
paginator = list()
)
input <- .cloudwatchrum$update_rum_metric_definition_input(AppMonitorName = AppMonitorName, Destination = Destination, DestinationArn = DestinationArn, MetricDefinition = MetricDefinition, MetricDefinitionId = MetricDefinitionId)
output <- .cloudwatchrum$update_rum_metric_definition_output()
config <- get_config()
svc <- .cloudwatchrum$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.cloudwatchrum$operations$update_rum_metric_definition <- cloudwatchrum_update_rum_metric_definition
|
342ce94c87ed009c225da23fdec0377a2be298bc
|
7cb9ac0c1f2f6f9c916ab6c67dcc55d328385c92
|
/R/derived_variables.R
|
6e382c594d41fc08842d173440c7587099a2c04f
|
[] |
no_license
|
Westat-Transportation/surveysummarize
|
3c4d5b63fcd4eef891b4ff48ea2e88f421552213
|
aebb3032507fda319fdcd5484658a2825aa4c7d5
|
refs/heads/master
| 2023-08-17T05:09:38.018122
| 2023-08-08T21:36:31
| 2023-08-08T21:36:31
| 186,884,470
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,294
|
r
|
derived_variables.R
|
#' @title Add derived Variables from a configuration worksheet (csv).
#'
#' @description Add custom variable to dataset and codebook.
#'
#' @param data Object returned by \link[summarizeNHTS]{read_data}.
#' @param config_csv File path to a csv with fields "NAME", "TABLE", "TYPE", "DOMAIN", "VALUE", "LABEL".
#'
#' @export
#' @import data.table
derived_variables <- function(data, config_csv = NULL) {
if (!'HTS.data' %in% class(data)) {
stop('data is not an "HTS.data" object (returned by the read_data function).')
}
#dataset <- data
#cb <- dataset$documentation
config_table <- fread(config_csv)
if(!identical(colnames(config_table), c('NAME','TABLE','TYPE','DOMAIN','VALUE','LABEL'))) {
logerror('Derived variable CSV must be a table with header: "NAME", "TABLE", "TYPE", "DOMAIN", "VALUE", "LABEL"')
stop('Derived variable CSV must be a table with header: "NAME", "TABLE", "TYPE", "DOMAIN", "VALUE", "LABEL"')
}
config_table_list <- split(config_table, by = 'NAME')
message('Adding derived variables:\n', rep('-', 25))
for (var in names(config_table_list)) {
tbl <- config_table_list[[var]]
tryCatch({
append_config_data(tbl, data)
add_config_codebook(tbl, data)
message(var)
loginfo(paste('Reading derived variable:', var))
}, error = function(e) {
logwarn(sprintf('Failed to add %s\n%s', var, e))
warning('Failed to add ', var, '\n', e)
})
}
# out <- lapply(config_table_list, function(tbl) {
# if (!check_config(tbl)) return(FALSE)
# append_config_data(tbl, data)
# add_config_codebook(tbl, data)
# return(TRUE)
# })
# successful_variables <- names(which(unlist(out)))
# if (length(successful_variables) > 0) {
# message('Derived variables:\n', paste(successful_variables, collapse = ', '), ' successfully added!\n')
# }
}
check_config <- function(tbl) {
NAME <- unique(tbl$NAME)
TABLE <- unique(tbl$TABLE)
TYPE <- unique(tbl$TYPE)
PASS <- TRUE
# TABLE Checks
if (length(TABLE) != 1) {
warning('More than 1 unique TABLE values specified for custom variable: ', NAME)
PASS <- FALSE
} else if (!TABLE %in% c('household','person','vehicle','trip','tour')) {
warning('In custom_variable: ', NAME, ', "', TABLE, '" is an invalid TABLE value. ',
'Use "household", "person", "vehicle", "trip", or "tour"')
PASS <- FALSE
}
# TYPE Checks
if (length(TYPE) != 1) {
warning('More than 1 unique TYPE values specified for custom variable: ', NAME)
PASS <- FALSE
} else if (!TYPE %in% c('character','numeric')) {
warning('In custom_variable: ', NAME, ', "', TYPE, '" is an invalid TYPE value. ',
'Use "character" or "numeric".')
PASS <- FALSE
}
return(PASS)
}
append_config_data <- function(tbl, data) {
NAME <- unique(tbl$NAME)
TABLE <- unique(tbl$TABLE)
TYPE <- unique(tbl$TYPE)
# Add variable to dataset and bin by soecified domains
for(i in 1:nrow(tbl)) {
DOMAIN <- parse(text = tbl[i, 'DOMAIN'])
VALUE <- tbl[i, 'VALUE']
data$data[[TABLE]][eval(DOMAIN), (NAME) := as.character(VALUE)]
}
}
add_config_codebook <- function(tbl, data) {
DERIVED_NAME <- unique(tbl$NAME)
DERIVED_TABLE <- unique(tbl$TABLE)
DERIVED_TYPE <- unique(tbl$TYPE)
# new label for the codebook
new_codebook_values <- data.table(
NAME = DERIVED_NAME,
TABLE = DERIVED_TABLE,
VALUE = as.character(tbl$VALUE),
LABEL = tbl$LABEL
)
new_codebook_variable <- data.table(
NAME = DERIVED_NAME,
TABLE = DERIVED_TABLE,
TYPE = DERIVED_TYPE,
LABEL = DERIVED_NAME
)
if(DERIVED_NAME %in% data$documentation$variables$NAME | DERIVED_NAME %in% data$documentation$values$NAME) {
logwarn(sprintf('Derived variable %s already exists. Overwriting existing data and codebook records.', DERIVED_NAME))
warning(DERIVED_NAME, ' already exists. Overwriting existing data and codebook records.')
data$documentation$variables[NAME == DERIVED_NAME] <- new_codebook_variable
data$documentation$values[NAME == DERIVED_NAME] <- new_codebook_values
} else {
data$documentation$variables <- rbind(data$documentation$variables, new_codebook_variable)
data$documentation$values <- rbind(data$documentation$values, new_codebook_values)
}
}
|
5292ef35750dbc1d05359685ea039d00a7d3335c
|
9b407ebd913589c2f7d383d7a21bb29ede5c632f
|
/Plot3.R
|
7d3da95294323139163e51daf664d280ea4ef942
|
[] |
no_license
|
sfavors3/ExData_Plotting1
|
a720c1cd542efc9fd858de6f4175c7f0fd93e82c
|
a722fa259926047735bc58f0d01d78789f222953
|
refs/heads/master
| 2021-01-22T14:40:16.443844
| 2014-10-10T08:45:16
| 2014-10-10T08:45:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,134
|
r
|
Plot3.R
|
# This code creates a line graph named plot3.png taken from household_power_consumption data
# from 2/1/2007 through 2/2/2007
##create pdf file to save graph
png("plot3.png", width=480, height=480)
##set the working directory and declare variables
title<- "Global Active Power"
##imports data table and extract appropriate date from as Global Active Power
data <- read.table("household_power_consumption.txt", sep=";",na.strings="?", header=TRUE)
globalPower <- data[data$Date=="2/2/2007" | data$Date=="1/2/2007",]
gap <- as.numeric(globalPower$Global_active_power)
##creates a line graph with custom axis
plot(globalPower$Sub_metering_1,type="n", ylab="Energy sub metering", xaxt="n", xlab="")
axis(1,at=c(0,length(gap)/2,length(gap)),labels=c("Thu","Fri","Sat"))
points(globalPower$Sub_metering_1, col = "black",type="l")
points(globalPower$Sub_metering_2, col = "red",type="l")
points(globalPower$Sub_metering_3, col = "blue",type="l")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),pch="_", col=c("black","red","blue"),cex=0.5, seg.len=0.5, bty="n", lwd=2)
#closes the PNG device
dev.off()
|
c86d46089b0d249ec65810e561dd77a97ab07a31
|
f96c243bd74c91e32037c17fc1fdd522de6b1dd7
|
/man/formFilters.Rd
|
41c9e1660778253f56f845554c3f306774ce44dd
|
[] |
no_license
|
cran/insiderTrades
|
eed4ad8d8be58fcae1dc463c84da394d139ff544
|
de306e71e705e44d327d2d618377ac18069ad0c3
|
refs/heads/master
| 2023-07-30T17:47:50.472428
| 2021-10-04T19:20:05
| 2021-10-04T19:20:05
| 413,661,687
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,034
|
rd
|
formFilters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formFilters.R
\name{formFilters}
\alias{formFilters}
\alias{formFilterNonderivativeTransactions}
\alias{formFilterDerivativeTransactions}
\alias{formFilterNonderivativeHoldings}
\alias{formFilterDerivativeHoldings}
\title{formFilters}
\usage{
formFilterNonderivativeTransactions(
filing,
footnoteKeywords,
issuerKeywords,
issuerTradingSymbol,
rptOwnerKeywords,
transactionType
)
formFilterDerivativeTransactions(
filing,
footnoteKeywords,
issuerKeywords,
issuerTradingSymbol,
rptOwnerKeywords,
transactionType
)
formFilterNonderivativeHoldings(
filing,
footnoteKeywords,
issuerKeywords,
issuerTradingSymbol,
rptOwnerKeywords
)
formFilterDerivativeHoldings(
filing,
footnoteKeywords,
issuerKeywords,
issuerTradingSymbol,
rptOwnerKeywords
)
}
\arguments{
\item{filing}{The object containing the Form 4 text}
\item{footnoteKeywords}{The character vector containing any specified key
words to be searched in the form's footnotes. Default is NA.}
\item{issuerKeywords}{The character vector containing any specified key
words to be searched within the issuer block. Default is NA.}
\item{issuerTradingSymbol}{The character vector containing any specified
stock tickers. Default is NA.}
\item{rptOwnerKeywords}{The character vector contianing any specified key
words to be searched within the rptOwner block. Default is NA.}
\item{transactionType}{The character vector containing any specified
transaction codes. Default is NA.}
}
\value{
An integer value that if greater than 0, reflects that at the one of
the criteria parameters listed above has been met. The form will then be
parsed further. Otherwise, if the integer value is 0, the function pulls in
the next Form 4 to evaluate.
}
\description{
formFilters
}
\note{
\code{formFilterNonderivativeTransactions} parses the form and
returns an integer value greater than 0 if one of the key word
criteras is met. This function is specifically for the
\code{\link{nonderivativeTransactionsScrape}} and
\code{\link{nonderivativeTransactionsPullAndScrape}} functions.
\code{formFilterDerivativeTransactions} parses the form and
returns an integer value greater than 0 if one of the key word criteras is
met. This function is specifically for the
\code{\link{derivativeTransactionsScrape}} and
\code{\link{derivativeTransactionsPullAndScrape}} functions. The function
shares the same paramters as
\code{\link{formFilterNonderivativeTransactions}} (filing,
footnoteKeywords, issuerKeywords, issuerTradingSymbol, rptOwnerKeywords,
transactionType). The only difference is within the parsing commands which
use the key word derivative rather than nonderivative.
\code{formFilterNonderivativeHoldings} parses the form and
returns an integer value greater than 0 if one of the key word criteras is
met. This function is specifically for the
\code{\link{nonderivativeHoldingsScrape}} and
\code{\link{nonderivativeHoldingsPullAndScrape}} functions. The function
shares the same paramters as
\code{\link{formFilterNonderivativeTransactions}} (filing,
footnoteKeywords, issuerKeywords, issuerTradingSymbol, rptOwnerKeywords)
except for transactionType since that isn't a criteria for a nonderivative
holdings. Additionally, parsing criteria is different due to targeting
holding information rather than transaction information.
\code{formFilterDerivativeHoldings} parses the form and
returns an integer value greater than 0 if one of the key word criteras is
met. This function is specifically for the
\code{\link{derivativeHoldingsScrape}} and
\code{\link{derivativeHoldingsPullAndScrape}} functions. The function
shares the same paramters as
\code{\link{formFilterNonderivativeHoldings}} (filing,
footnoteKeywords, issuerKeywords, issuerTradingSymbol, rptOwnerKeywords)
except for transactionType since that isn't a criteria for a derivative
holdings. The only difference is within the parsing commands which
use the key word derivative rather than nonderivative.
}
|
5967258845e38d341c3e3f9497b1b304a5a0e876
|
c016dda10b08538e7a9d894c1b19e7d05defe172
|
/R/mod_data_import.R
|
f85b02c0386ec5e25d56aba829360e87efe12e13
|
[
"Artistic-2.0"
] |
permissive
|
HelBor/wpm
|
37c3fb1fa358fe8ed817781fd310ffbb34a7c35b
|
62002a8d64da032454bf3686158c37585843b332
|
refs/heads/master
| 2021-06-25T13:19:01.187012
| 2021-06-15T11:53:24
| 2021-06-15T11:53:24
| 226,340,423
| 3
| 0
| null | 2020-11-30T16:48:39
| 2019-12-06T13:52:50
|
R
|
UTF-8
|
R
| false
| false
| 8,153
|
r
|
mod_data_import.R
|
mod_data_import_ui <- function(id){
ns <- shiny::NS(id)
shiny::fluidRow(
# inputs part
shiny::column(width = 6,
shinydashboard::box(
status = "warning",
width = 12,
collapsible = TRUE,
solidHeader = FALSE,
title = shiny::h3("Upload the dataset"),
shinydashboard::tabBox(
title = "",
id = ns("upload_tabset"),
side = "left",
width = 12,
selected = "user_csv",
shiny::tabPanel(
title = "Upload a file",
value = "user_csv",
icon = shiny::icon("file-upload"),
shiny::column(width = 12,
mod_data_import_file_ui(ns("csv_file"))
)
),
shiny::tabPanel(
title = "Load the demo dataset",
value = "demo",
icon = shiny::icon("database"),
shiny::column(width = 12,
mod_data_import_demo_ui(ns("demo"))
)
)
)
)
),
# output part
shiny::column(width = 6,
shiny::fluidRow(
shiny::column(
width = 7,
style='padding:0px;margin:0px;',
shinydashboard::box(style = "overflow-x: scroll;",
title = shiny::h3("Check that your file is correctly read by WPM"),
solidHeader = FALSE, collapsible = TRUE,
width = 12, status = "warning",
DT::dataTableOutput(ns("default_table")),
shiny::textOutput(ns("default_text"))
)),
shiny::column(
width = 5,
style = 'padding:0px;margin:0px;',
shinydashboard::box(style = "overflow-x: scroll;",
title = shiny::h3("Preview output template"),
solidHeader = FALSE, collapsible = TRUE,
width = 12, status = "warning",
DT::dataTableOutput(ns("wpm_table"))
),
shinydashboard::valueBoxOutput(ns("nb_ech"), width = 6),
shinydashboard::valueBoxOutput(ns("nb_gp"), width = 6)
)
)
)
)
}
mod_data_import_server <- function(id){
shiny::moduleServer(
id,
function(input, output, session) {
ns <- session$ns
toReturn <- shiny::reactiveValues(
df = NULL,
distinct_gps = NULL,
gp_levels = NULL,
nb_samples = 0
)
demo_mod <- mod_data_import_demo_server("demo")
file_mod <- mod_data_import_file_server("csv_file")
# complete here if new module of import
df <- shiny::reactive({
if(input$upload_tabset == "demo" ){
return(demo_mod$df)
}else{
return(file_mod$df)
}
# complete here if new module of import
})
df_wpm <- shiny::reactive({
if(input$upload_tabset == "demo" ){
return(demo_mod$df_wpm)
}else{
return(file_mod$df_wpm)
}
# complete here if new module of import
})
output$nb_ech <- shinydashboard::renderValueBox({
if (is.null(df_wpm())) {
shinydashboard::valueBox(
value = 0 ,
subtitle = "Total number of samples to place",
color = "teal")
}else{
shinydashboard::valueBox(
value = nrow(df_wpm()) ,
subtitle = "Total number of samples to place",
icon = shiny::icon("list"),
color = "teal")
}
})
## Vector containing the different group names
gp_levels <- shiny::reactive({
nb <- NULL
if (is.null(df_wpm())) {
nb <- 0
}else if ("Group" %in% colnames(df_wpm())) {
nb <- unique(df_wpm()$Group)
}
return(nb)
})
## The number of distinct groups in the file
distinct_gps <- shiny::reactive({
d_gp <- NULL
if (is.null(df_wpm())) {
d_gp <- 0
}else if ("Group" %in% colnames(df_wpm())) {
d_gp <- length(unique(df_wpm()$Group))
}
shiny::validate(
shiny::need(d_gp <= 12,
message = "The number of separate groups must not
exceed 12.")
)
return(d_gp)
})
# the number of samples in the dataset
nb_s <- shiny::reactive({
if (is.null(df_wpm())) {
nb <- 0
}else{
nb <- nrow(df_wpm())
}
return(nb)
})
output$nb_gp <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = distinct_gps(),
subtitle = "Total number of distinct groups",
icon = shiny::icon("layer-group"),
color = "teal")
})
shiny::outputOptions(output, "nb_gp", suspendWhenHidden = FALSE)
output$default_table <- DT::renderDataTable(
if(!is.null(df())){
if(methods::is(df(), "data.frame")){
DT::datatable({df()},
rownames = FALSE,
options = list(columnDefs = list(list(className = 'dt-center', targets ="_all")),
pageLength = 5)
)
}
}
)
output$wpm_table <- DT::renderDataTable(
if(!is.null(df_wpm())){
if(methods::is(df_wpm(), "data.frame")){
DT::datatable({df_wpm()},
rownames = FALSE,
options = list(columnDefs = list(list(className = 'dt-center', targets ="_all")),
pageLength = 5)
)
}
}
)
output$default_text <- shiny::renderText({
if(methods::is(df(), "character")){
df()
}
})
shiny::observe({
#
# print("data import : on est dans le observe du toReturn")
# logging::loginfo("nb_s: %s", nb_s())
# logging::loginfo("distinct_gps: %s", distinct_gps())
# logging::loginfo("gp_levels: %s", gp_levels())
#
# print("-------------------------------------------")
toReturn$df <- df_wpm()
toReturn$distinct_gps <- distinct_gps()
toReturn$gp_levels <- gp_levels()
toReturn$nb_samples <- nb_s()
})
return(toReturn)
}
)
}
|
cbcab7090deec0e08768c69bb96e44017deb27b4
|
8ce08913391970b7f6b3e4183310e0e384b026ad
|
/code/figures/diet_variance.R
|
707de14182c236009361b02705c7ce0ba9bfbef8
|
[] |
no_license
|
cfree14/forage_fish
|
dbf636b2429db458477229510b3549120beb63dd
|
3779bd43b4a8a8ec50a6e322bbe4de6bab83be1b
|
refs/heads/master
| 2021-11-11T06:50:18.878151
| 2021-11-01T15:08:12
| 2021-11-01T15:08:12
| 136,339,563
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 938
|
r
|
diet_variance.R
|
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(reshape2)
library(tidyverse)
# Directories
datadir <- "data/hilborn_etal_2017/"
plotdir <- "figures"
# Read data
diets <- read.csv(paste(datadir, "hilborn_etal_2017_diet_information.csv", sep="/"), as.is=T)
# Calculate range
################################################################################
# Calculate diet stats
diet_stats <- diets %>%
select(ocean, pred_comm_name, prey_comm_name, prop_diet_by_energy, prop_diet_by_wt, prop_diet_by_n, prop_diet_by_occur, prop_diet_by_index) %>%
gather(key="prop_type", value="prop", 4:ncol(.)) %>%
filter(!is.na(prop)) %>%
group_by(ocean, pred_comm_name, prey_comm_name) %>%
summarize(prop_avg=mean(prop),
prod_med=median(prop),
prop_min=min(prop),
prop_max=max(prop)) %>%
ungroup()
|
eda562abf5c6fd1dd7e2afff4214cc6ef9ef78bd
|
3c234755377b4637654507f44b0d6e4bfa63a367
|
/ui.R
|
979c080d572fb4316264c3e1959b2866df58571d
|
[] |
no_license
|
vfulco/china_pollution
|
ca3eca11a24486b98f6142f6a5d3d61e0a565681
|
ce3ce991e7e70ede4e5a5e0765df54cac97a965e
|
refs/heads/master
| 2020-03-22T11:56:09.571443
| 2015-12-26T14:43:24
| 2015-12-26T14:43:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,696
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://www.rstudio.com/shiny/
#
library(shiny)
shinyUI(pageWithSidebar(
# Application title
headerPanel("Air pollution in China"),
# Sidebar with a slider input for number of bins
sidebarPanel(
selectInput("city",
"Select a city:",
choices = c("Beijing", "Guangzhou", "Chengdu", "Shanghai",
"Shenyang")),
h4("Description"),
p("Air pollution in China is very high compared with most Western countries.
The extent of this air pollution is difficult to track, as official
government statistics are often unreliable."),
p("The US state department has recorded and published hourly",
a(href = "http://www3.epa.gov/pmdesignations/faq.htm#0", "PM2.5"),
"(a measure of air pollution) data for Beijing since 2008 and for Chengdu,
Guangzhou, Shanghai and Shenyang since 2011."),
p("This app allows you to look at trends in pollution levels in China for
each year that there is available data. It includes a graph of daily
averages and a table showing various monthly summary statistics."),
p("Original data and disclaimers can be found at the ",
a(href = "http://www.stateair.net/web/historical/1/1.html",
"US state department website.")
)
),
# Show a plot of the generated distribution
mainPanel(
h3("Trends in air pollution levels as measured by PM2.5"),
plotOutput(outputId = "plot"),
h3("Monthly PM2.5 Summary Statistics"),
dataTableOutput(outputId = "table")
)
))
|
a47b013cac29bca5383c27da9f57fbae7337b8ec
|
d42229401a0acbf19b28dcef2c521b6236b638b6
|
/atividades/day_5/explorando.R
|
42897cb05b4c069317e8ecbb92c45743c208dc29
|
[] |
no_license
|
LABHDUFBA/2021-SICSS
|
4fbab9bdf1ad5ea9cae7e3dafbff1c836f318251
|
1fc011d29ba60a46f0a48b06a9c16bc4627aac73
|
refs/heads/master
| 2023-05-30T21:11:53.163451
| 2021-06-21T18:25:07
| 2021-06-21T18:25:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,337
|
r
|
explorando.R
|
# instalar o pacote do fragile families
# devtools::install_github("fragilefamilieschallenge/ffmetadata")
# ffmetadata::search_metadata(background_raw)
background_raw <- haven::read_dta("atividades/day_5/ffchallenge_SICSS_2021/FFChallenge_v5/background.dta")
train <- readr::read_csv("atividades/day_5/ffchallenge_SICSS_2021/FFChallenge_v5/train.csv")
naniar::miss_var_summary(train)
> naniar::miss_var_summary(train)
# # A tibble: 7 x 3
# variable n_miss pct_miss
# <chr> <int> <dbl>
# 1 gpa 956 45.1
# 2 layoff 844 39.8
# 3 grit 703 33.1
# 4 materialHardship 662 31.2
# 5 eviction 662 31.2
# 6 jobTraining 660 31.1
# 7 challengeID 0 0
dados_unidos <- dplyr::left_join(train, background_raw, by = "challengeID")
library(magrittr)
# correlacao ------------
corr <- round(cor(expl, use = "complete.obs"), 2)
p.mat <- ggcorrplot::cor_pmat(corr)
corr.plot <- ggcorrplot::ggcorrplot(
corr, hc.order = TRUE, type = "lower", outline.col = "white",
p.mat = p.mat
)
corr.plot
# qual variavel Y queremos? qual variavel queremos prever?
# gpa !
# qual variaveis X queremos?
# selecionar variaveis
dados_unidos %>%
dplyr::select(challengeID, gpa) %>%
tidyr::drop_na(gpa)
|
48202d4a9a7a19da9e66bf3d356ed40894e43bc5
|
1dc0ab4e2b05001a5c9b81efde2487f161f800b0
|
/experiments/train_noisy/an_nn_20.R
|
b8259634c9da79b36162be4a20ce853819e0a04d
|
[] |
no_license
|
noeliarico/knnrr
|
efd09c779a53e72fc87dc8c0f222c0679b028964
|
9f6592d1bbc1626b2ea152fbd539acfe9f9a5ab3
|
refs/heads/master
| 2020-06-01T02:44:34.201881
| 2020-03-13T13:30:52
| 2020-03-13T13:30:52
| 190,601,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,323
|
r
|
an_nn_20.R
|
# ecoli_20an_nn -----------------------------------------------------------
out <- fitNum(ecoli_20an_nn)
fit_ecoli_20an_nn_d <- out$fitd
fit_ecoli_20an_nn_r <- out$fitr
sink("executed", append = TRUE)
cat(paste0("- ecoli_20an_nn ---> ", now(), "\n"))
sink()
# glass_20an_nn -----------------------------------------------------------
out <- fitNum(glass_20an_nn)
fit_glass_20an_nn_d <- out$fitd
fit_glass_20an_nn_r <- out$fitr
sink("executed", append = TRUE)
cat(paste0("- glass_20an_nn ---> ", now(), "\n"))
sink()
# ionosphere_20an_nn -----------------------------------------------------------
out <- fitNum(ionosphere_20an_nn)
fit_ionosphere_20an_nn_d <- out$fitd
fit_ionosphere_20an_nn_r <- out$fitr
sink("executed", append = TRUE)
cat(paste0("- ionosphere_20an_nn ---> ", now(), "\n"))
sink()
# sonar_20an_nn -----------------------------------------------------------
out <- fitNum(sonar_20an_nn)
fit_sonar_20an_nn_d <- out$fitd
fit_sonar_20an_nn_r <- out$fitr
sink("executed", append = TRUE)
cat(paste0("- sonar_20an_nn ---> ", now(), "\n"))
sink()
# wine_20an_nn -----------------------------------------------------------
out <- fitNum(wine_20an_nn)
fit_wine_20an_nn_d <- out$fitd
fit_wine_20an_nn_r <- out$fitr
sink("executed", append = TRUE)
cat(paste0("- wine_20an_nn ---> ", now(), "\n"))
sink()
|
e49b6d590747aea66fb3f20192de95c86bc60b07
|
aff4230bba7510f6fccdccd32646641e50573244
|
/basic_r/code/01basic/script02(데이터타입).R
|
cff8d3592cab0e58c54052cc75d0c367bb81f535
|
[] |
no_license
|
jhr1494/R
|
b6e7bcf71081673cf0ac4bbd2fed11baa1125bf7
|
3fcb607fa582c2f874ed404e8c2b27c21ae1f30f
|
refs/heads/master
| 2023-02-14T18:45:42.929548
| 2021-01-11T14:29:13
| 2021-01-11T14:29:13
| 326,626,594
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,010
|
r
|
script02(데이터타입).R
|
# 자료형
# 숫자형 변수
a <- 1
class(a) # numeric
a <- 3.14
class(a) # numeric
# 문자형 변수
b <- "1"
class(b) # character
c <- c(1:10)
class(c) # integer(=numeric)
d <- TRUE
class(d) # logical
# 범주형변수 factor()로 생성
# 등급, 분류와 같이 명목이 뚜렷한 표현에 적합
e <- factor( c(1,2,3,2,2,2,1,3,4) ) # 1, 2, 3, 4의 범주
class(e) # factor
levels(e) # 1, 2, 3, 4 --- 범주확인
# 타입변환 함수
# as.numeric()(숫자형으로), as.character()(문자형으로), as.factor()(factor형으로)
as.character(a)
as.character(c)
as.numeric(b)
as.factor(c)
# as.Date("2020") # 문자열 -> 날짜
class( as.Date("2020-01-01") ) # 반드시 날짜 서식형태를 지켜야 합니다
# 객체형 데이터 타입
# vector - 1가지 타입으로 생성된 변수
c(1,2,3,4,5) #numeric 이면서 vector
c(1:10)
c(1,2,3,"park", "hong") # 모두 문자형으로 저장되기 때문에 vector
# dataframe - 여러타입이 합쳐진 2차원 구조
data.frame(a = c(1, 2, 3),
b = c("park", "hong", "kim") ) # 각 변수를 행으로 나열
data.frame(a = c(1:3),
b = c("park", "hong") ) #error - dataframe은 길이를 통일시켜야합니다
# 행렬(matrix) - 1가지 타입으로 구성된 2차원 구조
# 차원구조를 기술해줘야합니다!(nrow(행), ncol(열))
matrix( c(1:6), nrow = 3, ncol = 2 ) #3행 2열
matrix( c(1:6), nrow = 2, ncol = 3 ) #2행 3열
matrix( c(1:6), nrow = 3, ncol = 3 ) #값이 모자라는 경우, 앞의 원소를 채워줍니다
as.data.frame( matrix( c(1:6), nrow = 2, ncol = 3) ) #matrix -> data.frame으로
# Array - 다차원 구조의 매트릭스(-> 매트릭스의 복합체(매트릭스가 겹겹이!))
# array( 데이터, dim = c(행, 열, 차원(=겹)) )
array( c(1:15), dim = c(3, 4, 3) ) #3행 4열 3차원
#list - 모든 요소를 담는 다차원 구조
list(a = c(1:5),
b = data.frame(a = c(1,2,3), b = c("kim", "lee", "choi")),
c = matrix(c(1:6), nrow = 3, ncol = 2) )
|
5b1e64ef11459af121750d557e1bd768ffabb00a
|
6ed3b6bea49ac58324852cf2679f2497284ad1ed
|
/run_analysis.R
|
a8eb88a60f734ca8a894cf4ee1d8243a56fb8cfb
|
[] |
no_license
|
fndfernando/Project-Getting-and-Cleaning-Data
|
81c461c0e37bdc0a38c153c0e579e9ca678386ee
|
1721b631f00898c8929077561133bea2fedae088
|
refs/heads/master
| 2021-01-21T14:01:22.417369
| 2016-05-24T14:17:40
| 2016-05-24T14:17:40
| 50,536,585
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,636
|
r
|
run_analysis.R
|
library(dplyr)
## get the colunns names
colname<- read.table("Dataset\\features.txt")
##get the activity label - question 3
activitylavel <- read.table("Dataset\\activity_labels.txt")
names(activitylavel) <- c("id","activity.label")
## get the test data and define the colum names
x_test <- read.table("Dataset\\test\\X_test.txt", col.names = colname$V2)
x_train <- read.table("Dataset\\train\\X_train.txt", col.names = colname$V2)
## get the activities
y_test <- read.table("Dataset\\test\\Y_test.txt")
y_train <- read.table("Dataset\\train\\Y_train.txt")
##set the name for the colunn
names(y_test) <- c("activity")
names(y_train) <- c("activity")
y_test <- merge(y_test,activitylavel,by.x="activity",by.y="id",all=TRUE)
y_train <- merge(y_train,activitylavel,by.x="activity",by.y="id",all=TRUE)
##merge the two dates
d_test <- cbind(y_test,x_test)
d_train <- cbind(y_train,x_train)
##merge test and train - question 1
d<-rbind(d_test,d_train)
##question 2
d2<-select(d,1,2,grep("[Mm]ean|[Ss][Tt][Dd]",names(d)))
##question 4
namesd3 <- names(d2)
namesd3<-gsub("Acc","Acceleration",namesd3)
namesd3<-gsub("Gyro","Gyroscope",namesd3)
namesd3<-gsub("\\.","",namesd3)
namesd3<-gsub("Mag","Magnitude",namesd3)
namesd3<-gsub("^f","Frequency",namesd3)
namesd3<-gsub("^t","Time",namesd3)
namesd3<-gsub("std","StandardDeviation",namesd3)
namesd3<-gsub("angle","Angle",namesd3)
namesd3<-gsub("activity","Activity",namesd3)
d4 <- d2
names(d4) <- namesd3
##question 5
d5 <- d4 %>%
group_by(Activitylabel) %>%
summarise_each(funs(mean(.)), 3:88)
##create the tidy data file
write.table(d5,"tidy_data.txt",row.name=FALSE)
|
bef67985fa75fee8c35e626115c3c7a62a954dad
|
9f5ab4936f557c3903cfa0582004e9fd60fda797
|
/R/plot_spaghetti.R
|
355d9534f3ad6271b9ac80b2d6c9892dd49347d1
|
[] |
no_license
|
mstei4176/ct_real_assess
|
3b423ad2b437a38e5354b7fa141c0ad725766e4b
|
a354036b08d3379178bc71ccc8bbf9be1b388c82
|
refs/heads/master
| 2022-11-21T13:14:10.892600
| 2020-07-22T14:38:23
| 2020-07-22T14:38:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,515
|
r
|
plot_spaghetti.R
|
# spaghetti of every town of average sales price used in Shiny app
# town_name and property specified from Shiny ui
plot_spaghetti <- function(dt, town_name, type){
dt <-
dt[,
mean(as.numeric(sale_price), na.rm = TRUE),
.(town, year, property_type)][, .(
`Average Price` = V1,
`Year` = year,
`Town` = town,
property_type
)][(property_type == type &
`Average Price` > 20000)]
town <- dt[Town == town_name]
ks <- function (x) { number_format(accuracy = 1,
scale = 1/1000,
suffix = "k",
big.mark = ",")(x) }
dt[Town != town_name,
ggplot(.SD,
aes(`Year`,
`Average Price`,
group = `Town`)) +
geom_line() +
theme_bw() +
geom_line(data = town,
aes(`Year`,
`Average Price`,
col = "red"),
size = 0.5) +
scale_y_continuous(trans = "log10",
labels = ks) +
labs(title = "CT Average Annual Sales Price by Town since 1999",
subtitle = "Selected municipality shown in red",
caption = "Public data via CT data",
y = "Average Price - Log Scale ($K)") +
hrbrthemes::theme_ipsum_rc(grid = "XY",
strip_text_face = "bold") +
theme(legend.position = "none")]
}
|
29459b8c7bf7e87942f901a7e88363f838a33ad3
|
f43868b912d70d3ed9c7fe484fc3ea9ce1ec6d97
|
/main.R
|
051dfc5ecb0173f6f0b0bfc3a1594f2be753f705
|
[] |
no_license
|
IgorTkaczyk/labs-R-advanced
|
142f550d59ad65422d774f950672e351a2560eaa
|
7b03121ed52996a687070b7f5e2e23e52854831d
|
refs/heads/master
| 2020-09-12T18:58:58.398807
| 2019-11-18T18:53:07
| 2019-11-18T18:53:07
| 222,518,728
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 86
|
r
|
main.R
|
### zajecia 1
print('Hello')
dum_der <- function(f, x, h) {
(f(x+h) - f(x))/h
}
|
5d7d6d7e0c4f409e293c4dfcfd9729ad4096dc89
|
503d06adc134f2a41547a782efe1b08ccd20cce5
|
/R/imports.R
|
79a88fcbcc8cf1e0edefb540805032cb625c36b5
|
[] |
no_license
|
StuartWheater/dsMiceClient
|
1f03293a5475fdaf92b28c76760081c63b5e9d63
|
929006a6d71ab02eb9075eeff76eb87063c127a6
|
refs/heads/master
| 2022-01-26T05:26:15.897569
| 2019-06-25T18:07:06
| 2019-06-25T18:07:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 161
|
r
|
imports.R
|
#'@import mice
#'@importFrom stats lm.fit rchisq rgamma rnorm
#'@importFrom opal datashield.login
#'@importFrom dsBaseClient ds.mean
NULL
|
5cfd4e36a1869515ca3b317ac60463797cb91b34
|
b58ef6361161adfad9bdc7cc1b23c4988030fbe3
|
/stuff/MeetingDataMerge.R
|
10f8ca15dd95067a9765f9af671fa9d40309cc33
|
[
"MIT"
] |
permissive
|
DSPG-ISU/DSPG
|
01b9ec9a3dd02cd2ee7e52a28ba22f6d312ad2f8
|
c20b50c1dd28eedd879a9226b5f6511a0471c870
|
refs/heads/master
| 2023-02-26T12:54:52.616558
| 2021-02-05T05:44:04
| 2021-02-05T05:44:04
| 277,006,430
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,404
|
r
|
MeetingDataMerge.R
|
# Iowa Recovery Meetings Data Cleaning and Merging
#
# Data Sources:
# https://www.aa-iowa.org/meetings/
# https://iowa-na.org/na-meetings/
# https://adultchildren.org/mtsearch
# https://al-anon.org/al-anon-meetings/find-an-alateen-meeting/
# http://draonline.qwknetllc.com/meetings_dra/usa/iowa.html
# https://www.nar-anon.org/find-a-meeting#groupspublic/?view_7_filters=%5B%7B%22field%22%3A%22field_1%22%2C%22operator%22%3A%22near%22%2C%22value%22%3A%22ames%22%2C%22units%22%3A%22miles%22%2C%22range%22%3A%22100000%22%7D%5D&view_7_page=1
# https://www.smartrecoverytest.org/local/full-meeting-list-download/
# https://locator.crgroups.info/
# https://www.facebook.com/crushofiowa/
# https://refugerecovery.org/meetings?tsml-day=any&tsml-region=iowa
#
#
# Authors: Jessie Bustin
# Dr. Heike Hoffman - Code to Create Schedule Column
# Load Libraries
library(tidyverse)
library(DSPG)
library(ggmap)
library(lubridate)
library(naniar)
# Load Datasets
adultChildren <- read.csv("Raw/Adult_childern_of_alcoholic.csv", stringsAsFactors = FALSE)
alanon <- read.csv("Raw/al-anon.csv")
idra <- read.csv("Raw/IDRA.csv")
narAnon <- read.csv("Raw/Nar_Anon_Dataset.csv")
smart <- read.csv("Raw/Recovery_Celebrate_SMART_IA_Meetings.csv")
# AdultChildren Change column names and drop unneeded columns
adultChildren <- adultChildren %>%
mutate(Notes = paste0(Note, " Code: ", code, " Format: ", Types)) %>%
select(-c(X, time.zone, zip, code, Note, Types)) %>%
rename(Meeting = name, Location = Address, Phone = Tel, Contact.Person = contact.person)
# AdultChildren Clean City Column and Create Full Address Column
adultChildren <- adultChildren %>%
mutate(City = gsub("\\(.*", "", City)) %>%
mutate(Address = paste0(Location, ", ", City, ", ", State))
# Alanon Separate Time Column
alanon <- alanon %>%
separate(MEETING_TIME, c("Day", "Time", "AmPm"), sep = "\\ ") %>%
select(-c(ID, SHOW_MEETINGS_WITH, GROUP_ID, DESCRIPTION)) %>%
separate(ADDRESS, c("Location", "City", "State"), sep = "\\,", remove = FALSE) %>%
rename(Meeting = NAME, Address = ADDRESS, Notes = LOCATION_INSTRUCTION,
Phone = PHONE, Website = WEBSITE, Email = EMAIL) %>%
mutate(Type = "Al-anon")
# narAnon Split Time Column, Select Columns, and Fix Location/Address Columns
narAnon <- narAnon %>%
mutate(Location = Street) %>%
select(-c("ï..", "Zip", "Street")) %>%
mutate(Time = substr(Time, 0, nchar(Time)-2)) %>%
mutate(AmPm = "pm") %>%
mutate(Address = paste0(Location, ", ", City, ", ", State))
# IDRA
idra <- idra %>%
rename(Notes = Types, Meeting = Name, Location = Address, Phone = Tel, Email = email) %>%
mutate(Address = paste0(Location, ", ", City, ", ", State))
# Drop Smart Columns and Separate Address
smart <- smart %>%
select(-Code) %>%
separate(Address, c("Location"), ", ", remove = FALSE)
# Join Data
non_AANA <- full_join(adultChildren, alanon)
non_AANA <- full_join(non_AANA, idra)
non_AANA <- full_join(non_AANA, narAnon)
non_AANA <- full_join(non_AANA, smart)
# Dropping Website Column
non_AANA <- non_AANA %>%
select(-Website)
# Geocoding All Data
# -Do not rerun geocoding unless necessary-
#non_AANA <- non_AANA %>%
# mutate_geocode(location = Address)
#write.csv(non_AANA, "NonAANA_Geocoded.csv")
# Read Geocoded Data Back In
non_AANA <- read.csv("DataMerge/NonAANA_Geocoded.csv")
non_AANA <- non_AANA %>%
select(-X)
# convert meeting times into times
non_AANA$time <- ymd_hm(paste0("2020/07/04", meetings$Time, meetings$AmPm))
# Edit Day That Wasn't in Factor Levels Because of Typo
non_AANA[2, 2] <- "Wednesday"
### introduce schedule variable
#meetings$schedule <- with(meetings, lubridate::hm(paste0(Time, AmPm)))
# hm does not respect am or pm
# ---This section of code Was Contributed by Dr. Heike Hoffman---
#
# convert meeting times into times
non_AANA$time <- ymd_hm(paste0("2020/07/04", meetings$Time, meetings$AmPm))
# Edit Day That Wasn't in Factor Levels Because of Typo
non_AANA[2, 2] <- "Wednesday"
times <- with(non_AANA, strptime(paste0(Time, AmPm), format = "%I:%M%p"))
non_AANA$schedule <- with(non_AANA, lubridate::hm(paste(hour(times), minute(times), sep=":")))
non_AANA$Day <- factor(non_AANA$Day,
levels = c("Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday",
"Sunday"))
helper <- as.numeric(non_AANA$Day) - 1
non_AANA$schedule <- non_AANA$schedule + days(helper)
#Rename and add columns to match new meeting data columns
non_AANA <- non_AANA %>%
rename(street = Location) %>%
mutate(classification = "substance abuse treatment meetings") %>%
select(-time)
#Data to
# Merge with AA/NA Data
all_meetings <- full_join(meetings, non_AANA)
# lowerCase AmPm column
all_meetings <- all_meetings %>%
mutate(AmPm = tolower(AmPm))
# Fill in Blanks and NAs in Meeting Column
all_meetings <- all_meetings %>%
mutate(Meeting = case_when(Meeting == "" ~ Type, is.na(Meeting) ~ Type, TRUE ~ Meeting))
# Fill in blanks in the rest of the Data
#all_meetings <- all_meetings %>%
# replace_with_na_all(condition = ~.x == "")
# Write to csv
# write.csv(all_meetings, "DataMerge/All_Meetings_Geocoded.csv")
#
# meetings <- all_meetings
#
# meetings$type <- factor(meetings$type)
# levels(meetings$type)[6] <- "Iowa Dual Recovery Anonymous (IDRA)"
# meetings$type <- as.character(meetings$type)
# use_data(meetings, overwrite = TRUE)
#
# idx <- which(str_detect( meetings$address, "\xa0"))
# meetings$address <- str_replace_all(meetings$address, "\xa0", "")
# use_data(meetings, overwrite = TRUE)
#
# meetings$city <- str_replace_all(meetings$city, "\xa0", "")
# use_data(meetings, overwrite = TRUE)
#
# meetings$meeting <- str_replace_all(meetings$meeting, "\xa0", " ")
# use_data(meetings, overwrite = TRUE)
#read in geocoded meeting data
full_meetings <- read.csv("raw/All_Meetings_Geocoded.csv")
full_meetings <- full_meetings %>%
select(-c("X")) %>%
rename(street = location, latitude = lat, longitude = lon) %>%
mutate(classification = "substance abuse treatment meetings") %>%
mutate(time = substr(time, 1, 5))
#full_meetings$time <- ymd_hm(paste0("2020/07/04", full_meetings$time, full_meetings$ampm))
times <- with(full_meetings, strptime(paste0(time, ampm), format = "%I:%M%p"))
full_meetings$schedule <- with(full_meetings, lubridate::hm(paste(hour(times), minute(times), sep=":")))
full_meetings$day <- factor(full_meetings$day,
levels = c("Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday",
"Sunday"))
helper <- as.numeric(full_meetings$day) - 1
full_meetings$schedule <- full_meetings$schedule + days(helper)
meetings <- full_meetings
usethis::use_data(meetings, overwrite = TRUE)
meetings$type <- factor(meetings$type)
levels(meetings$type)[6] <- "Iowa Dual Recovery Anonymous (IDRA)"
meetings$type <- as.character(meetings$type)
usethis::use_data(meetings, overwrite = TRUE)
idx <- which(str_detect( meetings$address, "\xa0"))
meetings$address <- str_replace_all(meetings$address, "\xa0", "")
usethis::use_data(meetings, overwrite = TRUE)
meetings$city <- str_replace_all(meetings$city, "\xa0", "")
usethis::use_data(meetings, overwrite = TRUE)
meetings$meeting <- str_replace_all(meetings$meeting, "\xa0", " ")
usethis::use_data(meetings, overwrite = TRUE)
|
0e438c28aaa5692cec97883e74436ca178db7f80
|
fae3b5d5d08abef91629ab8d0b087ac36e46ea06
|
/NBAtools/R/grandpa.R
|
e9f2e51bbab6fdab896fcaffea105bfced12f512
|
[] |
no_license
|
gustavonovoa/NBAtools
|
130d8c5e72f01553a962752969f38df57a2c5f79
|
1a139fb7922d233c3718968138b113fdf3980089
|
refs/heads/master
| 2022-02-02T03:23:02.227165
| 2019-06-24T02:13:11
| 2019-06-24T02:13:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 528
|
r
|
grandpa.R
|
#` A function to find the oldest player
#` This function returns the oldest player (first alphabetically if tied) for a given year
#` @param Pass a year, return points
#` @keywords old age nba
#` @export
#` @examples
#` grandpa()
NBAdata<-readr::read_csv("Seasons_Stats.csv")
grandpa<-function(year){
yearData<-dplyr::filter(NBAdata, Year==year)
ages<-dplyr::select(yearData, Player, Age)
ages<-dplyr::arrange(ages, desc(Age))
as.data.frame(ages)
return(ages[1,])
}
|
1956784ae3c2e48ab1bd97c94f7c5f1b86a19b0e
|
ce139910de57ee90a2f14a5f9dc6707d6558a71e
|
/mogp_emulator/demos/gp_demo.R
|
3d2d5b1fa3ec4ba18aed47dcb887f3c6b2bbc5c5
|
[] |
no_license
|
homerdurand/HistoryMatching_ExeterUQ
|
0d30a35ca3099c39466b875c21f12f700a6c08bd
|
7944785eaa3b789669a1c3f779755cdf6c65c5e5
|
refs/heads/main
| 2023-07-23T13:58:09.844857
| 2021-09-03T12:36:24
| 2021-09-03T12:36:24
| 390,157,314
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,109
|
r
|
gp_demo.R
|
# Short demo of how to fit and use the GP class to predict unseen values based on a
# mean function and prior distributions.
# Before loading reticulate, you will need to configure your Python Path to
# use the correct Python version where mogp_emulator is installed.
# mogp_emulator requires Python 3, but some OSs still have Python 2 as the
# default, so you may not get the right one unless you explicitly configure
# it in reticulate. I use the Python that I installed on my Mac with homebrew,
# though on Linux the Python installed via a package manager may have a
# different path.
# The environment variable is RETICULATE_PYTHON, and I set it to
# "/usr/local/bin/python" as this is the Python where mogp_emulator is installed.
# This is set automatically in my .Renviron startup file in my home directory,
# but you may want to configure it some other way. No matter how you decide
# to configure it, you have to set it prior to loading the reticulate library.
library(reticulate)
mogp_emulator <- import("mogp_emulator")
mogp_priors <- import("mogp_emulator.Priors")
# create some data
n_train <- 10
x_scale <- 2.
x1 <- runif(n_train)*x_scale
x2 <- runif(n_train)*x_scale
y <- exp(-x1**2 - x2**2)
x <- data.frame(x1, x2, y)
# GaussianProcess requires data as a matrix, but often you may want to do some
# regression using a data frame in R. To do this, we can split this data frame
# into inputs, targets, and a dictionary mapping column names to integer indices
# using the function below
extract_targets <- function(df, target_cols = list("y")) {
"separate a data frame into inputs, targets, and inputdict for use with GP class"
for (t in target_cols) {
stopifnot(t %in% names(x))
}
n_targets <- length(target_cols)
inputs <- matrix(NA, ncol=ncol(x) - n_targets, nrow=nrow(x))
targets <- matrix(NA, ncol=n_targets, nrow=nrow(x))
inputdict <- dict()
input_count <- 1
target_count <- 1
for (n in names(x)) {
if (n %in% target_cols) {
targets[,target_count] <- as.matrix(x[n])
} else {
inputs[,input_count] <- as.matrix(x[n])
inputdict[n] <- as.integer(input_count - 1)
input_count <- input_count + 1
}
}
if (n_targets == 1) {
targets <- c(targets)
}
return(list(inputs, targets, inputdict))
}
target_list <- extract_targets(x)
inputs <- target_list[[1]]
targets <- target_list[[2]]
inputdict <- target_list[[3]]
# Create the mean function formula as a string (or you could extract from the
# formula found via regression). If you want correct expansion of your formula
# in the Python code, you will need to install the patsy package (it is pip
# installable) as it is used internally in mogp_emulator to parse formulas.
# Additionally, you will need to convert the column names from the data frame
# to integer indices in the inputs matrix. This is done with a dict object as
# illustrated below.
mean_func <- "y ~ x1 + x2 + I(x1*x2)"
# Priors are specified by giving a list of prior objects (or NULL if you
# wish to use weak prior information). Each distribution has some parameters
# to set -- NormalPrior is (mean, std), Gamma is (shape, scale), and
# InvGammaPrior is (shape, scale). See the documentation or code for the exact
# functional format of the PDF.
# If you don't know how many parameters you need to specify, it depends on
# the mean function and the number of input dimensions. Mean functions
# have a fixed number of parameters (though in some cases this can depend
# on the dimension of the inputs as well), and then covariance functions have
# one correlation length per input dimension plus a covariance scale and
# a nugget parameter.
# If in doubt, you can create the GP instance with no priors, use gp$n_params
# to get the number, and then set the priors manually using gp$priors <- priors
# In this case, we have 4 mean function parameters (normal distribution on a
# linear scale), 2 correlations lengths (normal distribution on a log scale,
# so lognormal), a sigma^2 covariance parameter (inverse gamma) and a nugget
# (Gamma). If you choose an adaptive or fixed nugget, the nugget prior is ignored.
priors <- list(mogp_priors$NormalPrior(0., 1.),
mogp_priors$NormalPrior(0., 1.),
mogp_priors$NormalPrior(0., 1.),
mogp_priors$NormalPrior(0., 1.),
mogp_priors$NormalPrior(0., 1.),
mogp_priors$NormalPrior(0., 1.),
mogp_priors$InvGammaPrior(2., 1.),
mogp_priors$GammaPrior(1., 0.2))
# Finally, create the GP instance. If we had multiple outputs, we would
# create a MultiOutputGP class in a similar way, but would have the option
# of giving a single mean and list of priors (assumes it is the same for
# each emulator), or a list of mean functions and a list of lists of
# prior distributions. nugget can also be set with a single value or a list.
gp <- mogp_emulator$GaussianProcess(inputs, targets,
mean=mean_func,
priors=priors,
nugget="fit",
inputdict=inputdict)
# gp is fit using the fit_GP_MAP function. It accepts a GaussianProcess or
# MultiOutputGP object and returns the same type of object with the
# hyperparameters fit via MAP estimation, with some options for how to perform
# the minimization routine. You can also pass the arguments to create a GP/MOGP
# to this function and it will return the object with estimated hyperparameters
gp <- mogp_emulator$fit_GP_MAP(gp)
print(gp$current_logpost)
print(gp$theta)
# now create some test data to make predictions and compare with known values
n_test <- 10000
x1_test <- runif(n_test)*x_scale
x2_test <- runif(n_test)*x_scale
x_test <- cbind(x1_test, x2_test)
y_actual <- exp(-x1_test**2 - x2_test**2)
y_predict <- gp$predict(x_test)
# y_predict is an object holding the mean, variance and derivatives (if computed)
# access the values via y_predict$mean, y_predict$unc, and y_predict$deriv
print(sum((y_actual - y_predict$mean)**2)/n_test)
|
ca5cc6ec4e42480561daeda23143eb072657c8e4
|
bc0424df42c5083b060c9e403c5319f49bbaca51
|
/cachematrix.R
|
6dc1cb0a7e906e981ee1f0b992728ede92af4abc
|
[] |
no_license
|
realkenlee/ProgrammingAssignment2
|
c69c30f656c2611f6851626c62bfce38f50d6aa1
|
a558ecf91cf2b09c58488579666f873798f724af
|
refs/heads/master
| 2021-01-20T23:02:41.398825
| 2015-09-15T22:41:23
| 2015-09-15T22:41:23
| 42,483,238
| 0
| 0
| null | 2015-09-14T23:38:23
| 2015-09-14T23:38:22
| null |
UTF-8
|
R
| false
| false
| 1,534
|
r
|
cachematrix.R
|
## The following is submission to Coursera's R Programming Assignment #2
## offered by Johns Hopkins University
## Below are implementation of two functions
## makeCacheMatrix: This function creates a special "matrix" object that can cache
## its inverse.
## cacheSolve: This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated (and
## the matrix has not changed), then the cachesolve should retrieve the
## inverse from the cache.
## makeCacheMatrix stores a matrix and its inverse. It also has set and get
## functions.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve will return inverse of a given makeCacheMatrix object
## it first checks if it's inverse has been calculated, if not it
## uses base function solve. cacheSolve only works for invertible matrix
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data)
x$setinverse(i)
i
## Return a matrix that is the inverse of 'x'
}
|
bf3809889bd9af6874c6004732d7574eefc9f2a5
|
c91b227c26552d207765aee509d896d1daa424a3
|
/R/plots.R
|
16287121c3ffe58b02e52898c3648509a8a40096
|
[] |
no_license
|
cran/amei
|
c3827194ce5c6fdd2f3ef4b13be5cc7a2ee9964e
|
6ec4dc1abc827534b1920db6d4911e38cd16224c
|
refs/heads/master
| 2016-09-06T07:55:28.224132
| 2013-12-13T00:00:00
| 2013-12-13T00:00:00
| 17,694,356
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,606
|
r
|
plots.R
|
`plot.epiman` <-
function (x, type = c("epi", "costs", "params", "fracs", "stops"),
showd = FALSE, showv=FALSE, prior=FALSE, main = NULL, ylim=NULL, tp = NULL, ...)
{
type <- match.arg(type)
if (type == "epi") {
if (is.null(main))
main <- "Evolution of Epidemic"
PlotEpi(x$soln, showd = showd, showv=showv, main = main)
}
else if (type == "costs") {
if(is.null(x$pols)) stop("no vaccination strategy used")
if (is.null(main))
main <- "Evolution of Costs"
PlotCosts(x$soln, main = main,ylim=ylim)
}
else if (type == "params") {
if (is.null(main))
main <- "MCMC Inference"
if(prior) hyper <- x$hyper
else hyper <- NULL
PlotParams(x$samp, NULL, tp, hyper)
}
else if (type == "fracs") {
if(is.null(x$vachist)) stop("no vaccination strategy used")
ylab <- "vaccination fraction"
TimeSeriesOfDensities(x$vachist$fracs, x$vactimes,
c(-0.1, 1.1), ylab)
if (!is.null(main))
title(main)
}
else {
if(is.null(x$vachist)) stop("no vaccination strategy used")
ylim <- "stop number"
TimeSeriesOfDensities(x$vachist$stops, x$vactimes,
c(0, x$soln$S[1]), ylim)
if (!is.null(main))
title(main)
}
}
`plot.optvac` <-
function (x, main = NULL, ...)
{
mylayout <- matrix(1,4,5)
mylayout[,5] <- 2
layo <- layout(mylayout)
cmin <- min(x$C)
cmax <- max(x$C)
cmat <- matrix(seq(cmin,cmax,length=100),1,100)
if (is.null(main))
main <- "Optimal vaccination policy surface"
image(x$vacgrid$fracs, x$vacgrid$stops, x$C, main = main,
xlab = "fraction", ylab = "stop number",col=heat.colors(100), ...)
grid(length(x$vacgrid$fracs), length(x$vacgrid$stops), lty = 1,
col = "black")
best <- getpolicy(x)
worst <- getpolicy(x, "worst")
text(best$frac, best$stop, best$cost,cex=1)
text(worst$frac, worst$stop, worst$cost,cex=1)
image(1,seq(cmin,cmax,length=100),cmat,axes=FALSE,xlab='',ylab='',main='',col=heat.colors(100))
axis(2,at=floor(seq(cmin,cmax,length=10)),lwd=0,las=1)
mtext('legend',line=1)
}
`plot.MCepi` <-
function (x, type = c("epi", "costs", "fracs", "stops"), showd = FALSE,
showv = FALSE, main = NULL, ylim = NULL, ...)
{
type <- match.arg(type)
if (type == "epi") {
if (is.null(main)) main <- "Monte Carlo Epidemics"
PlotEpi(x$Median, showd = showd, showv = showv, main = main,
...)
PlotEpi(x$Q1, add = TRUE, showd = showd, showv = showv)
PlotEpi(x$Q3, add = TRUE, showd = showd, showv = showv)
}
else if (type == "costs") {
if (is.null(main)) main <- "Monte Carlo Costs"
if(is.null(ylim)) ylim <- c(min(x$Q1$C),max(x$Q3$C))
PlotCosts(x$Median, ylim = ylim,
main = main, ...)
PlotCosts(x$Q1, add = TRUE)
PlotCosts(x$Q3, add = TRUE)
}
else if (type == "fracs") {
if (is.null(main)) main <- "Monte Carlo Fraction Vaccinated"
plot(x$Median$frac, type = "l", lty = 1, lwd = 2,
xlab = "time", ylab = "fraction",
ylim = c(min(x$Q1$frac), max(x$Q3$frac)), main = main, ...)
lines(x$Q1$frac, lty = 2, lwd = 2)
lines(x$Q3$frac, lty = 2, lwd = 2)
}
else {
if (is.null(main)) main <- "Monte Carlo Stopping Threshold"
plot(x$Median$stop, type = "l", lty = 1, lwd = 2,
xlab = "time", ylab = "stop time",
ylim = c(min(x$Q1$stop), max(x$Q3$stop)), main=main, ...)
lines(x$Q1$stop, lty = 2, lwd = 2)
lines(x$Q3$stop, lty = 2, lwd = 2)
}
}
|
e6fcb75e10350d10f77972dc57e862df5b2b3c28
|
d65b6061f2470b42adcf56eb76da4b561e076e6d
|
/postprocessing.R
|
aab606e880adc5c6353d761fda26840b4b919616
|
[] |
no_license
|
Rosemary94/COVID_vax_CD8
|
60feb732da09580a44394bad030e19d87f60fbf0
|
6b64879d3df5320fffd2043579b477c7d1fee675
|
refs/heads/master
| 2023-06-15T04:26:47.109367
| 2021-07-09T21:55:58
| 2021-07-09T21:55:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,295
|
r
|
postprocessing.R
|
source("functions.R")
##part 1: seurat analysis
#Load aggregated 10X data and create Seurat object
cov1.data <- Read10X(data.dir = "libs_aggregate/filtered_feature_bc_matrix/")
cov1 <- CreateSeuratObject(counts = cov1.data$`Gene Expression`)
#Remove low quality cells
cov1[["percent.mt"]] <- PercentageFeatureSet(cov1, pattern = "^MT-")
plot1 <- FeatureScatter(cov1, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(cov1, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
plot1 + plot2
cov1 <- subset(cov1, subset = nFeature_RNA > 200 & nFeature_RNA < 5000 & percent.mt < 10)
cov1<- NormalizeData(cov1, normalization.method = "LogNormalize", scale.factor = 10000)
#Find 2000 variable genes excluding TCR and BCR genes
markers.remove<-fread("markers.remove.csv", stringsAsFactors = F)
cov.m_filt<-cov1
cov.m_filt <- FindVariableFeatures(cov.m_filt, selection.method = "vst", nfeatures = 2000+135)
VariableFeatures(cov.m_filt) <- VariableFeatures(cov.m_filt)[!(VariableFeatures(cov.m_filt) %in% markers.remove$V2)]
#Regress cell cycle variability
all.genes <- rownames(cov.m_filt)
s.genes <- cc.genes$s.genes
g2m.genes <- cc.genes$g2m.genes
cov.m_filt <- CellCycleScoring(cov.m_filt, s.features = s.genes, g2m.features = g2m.genes, set.ident = TRUE)
cov.m_filt<- ScaleData(cov.m_filt, vars.to.regress = c("S.Score", "G2M.Score"), features = all.genes)
cov.m_filt <- RunPCA(cov.m_filt, features = VariableFeatures(object = cov.m_filt) , verbose = FALSE)
cov.m_filt <- FindNeighbors(cov.m_filt, dims = 1:15)
cov.m_filt <- FindClusters(cov.m_filt, resolution = 0.5, verbose = FALSE)
cov.m_filt <- RunUMAP(cov.m_filt, dims = 1:15)
cov.m_filt <- RunTSNE(cov.m_filt, dims = 1:15)
cov.m_filt$replicate<-(sapply(X = strsplit(colnames(cov.m_filt), split = "-"), FUN = "[", 2))
saveRDS(cov.m_filt, file = "10x_aggr_step1.rds")
umapCoord <- as.data.frame(Embeddings(object = cov.m_filt[["umap"]]))
tsneCoord<-as.data.frame(Embeddings(object = cov.m_filt[["tsne"]]))
tmp<-cbind(umapCoord, batch=cov.m_filt$replicate, cluster=cov.m_filt$seurat_clusters, tsneCoord, nCount=cov.m_filt$nCount_RNA, nFeature=cov.m_filt$nFeature_RNA)
write.csv(tmp, file="10x_aggr_step1.csv")
#Remove B cells and monocytes
aggr<-readRDS("10x_aggr_step1.rds")
Idents(aggr) <- "seurat_clusters"
cl.markers <- FindAllMarkers(aggr, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
aggr.s<-subset(aggr, idents = c(8,10, 11), invert=T)
#find carrier cell barcodes
inf_analysis<-add_citeseq_tcrseq_metadata("10x_aggr_step1.csv")
writeLines(inf_analysis[donor=="1814",barcode,],con = "barcodes_1814.tsv")
#Remove donor #1814 (carrier cells) and all remaining non cd8 cells
Idents(aggr.s) <- "orig.ident"
d1814<-fread("barcodes_1814.tsv", stringsAsFactors = F, header = F)
aggr.f<-aggr.s[,!colnames(aggr.s) %in% d1814$V1]
Idents(aggr.f) <- "orig.ident"
cd8<-subset(x = aggr.f, subset = (CD8A>0.5|CD8B>0.5), invert=F)
cd8 <- FindNeighbors(cd8, dims = 1:15)
cd8 <- FindClusters(cd8, resolution = 0.5, verbose = FALSE)
cd8 <- RunUMAP(cd8, dims = 1:15)
cd8 <- RunTSNE(cd8, dims = 1:15)
DimPlot(cd8, label=T)
cd8.markers <- FindAllMarkers(cd8, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
umapCoord <- as.data.frame(Embeddings(object = cd8[["umap"]]))
tsneCoord<-as.data.frame(Embeddings(object = cd8[["tsne"]]))
cd8$CD8A<-FetchData(cd8, vars = "CD8A")
cd8$CD8B<-FetchData(cd8, vars = "CD8B")
cd8$IL7R<-FetchData(cd8, vars = "IL7R")
cd8$s100a4<-FetchData(cd8, vars = "S100A4")
cd8$cd27<-FetchData(cd8, vars = "CD27")
cd8$cd4<-FetchData(cd8, vars = "CD4")
cd8$CCR7<-FetchData(cd8, vars = "CCR7")
cd8$TCF7<-FetchData(cd8, vars = "TCF7")
cd8$GZMB<-FetchData(cd8, vars = "GZMB")
cd8$GZMH<-FetchData(cd8, vars = "GZMH")
cd8$SELL<-FetchData(cd8, vars = "SELL")
cd8$GZMK<-FetchData(cd8, vars = "GZMK")
tmp<-cbind(umapCoord, batch=cd8$replicate, cluster=cd8$seurat_clusters, tsneCoord, nCount=cd8$nCount_RNA, nFeature=cd8$nFeature_RNA,
CD8A=cd8$CD8A, CD8B=cd8$CD8B, IL7R=cd8$IL7R, s100a4=cd8$s100a4, cd27=cd8$cd27, cd4=cd8$cd4,
ccr7=cd8$CCR7, tcf7=cd8$TCF7, gzmb=cd8$GZMB, gzmh=cd8$GZMH, sell=cd8$SELL, gzmk=cd8$GZMK)
write.csv(tmp, file="cd8_only.csv")
saveRDS(cd8, file = "cd8_only.rds")
##Part 2: add Citeseq, TCR, dextramer assignments for cd8 cells identified on previous step.
inf_analysis8<-add_citeseq_tcrseq_metadata("cd8_only.csv")
legend<-fread("metadata/dextramer_legend_infvax.txt")
setkey(legend,batch,hash,dex)
#fix for double dextramer assignments
tmp<-character()
for (i in 1:nrow(inf_analysis8[grepl("_",bestdex16),]))
{
ind<-inf_analysis8[grepl("_",bestdex16),,][i,.(batch,hash,dex=as.integer(unlist(strsplit(bestdex16,split = "_")))),]
tmp[i]<-(legend[ind,paste(dex_id,collapse="|"),])
inf_analysis8[grepl("_",bestdex16),,][i,dextr:=legend[ind,paste(dex_id,collapse="|"),],]
}
inf_analysis8[grepl("_",bestdex16),dextr:=tmp,]
#filter low confidence dextramer assignments
inf_analysis8[bestdex16_max<4,dextr:=NA,]
inf_analysis8[bestdex16_max<4,spike:=NA,]
inf_analysis8[!is.na(cdr3b_nt)&!is.na(cdr3a_nt),clonotype_id:=.GRP,.(batch,donor,cdr3b_nt,cdr3a_nt)]
inf_analysis8[!is.na(cdr3b_nt),clonotype_id_beta:=.GRP,.(batch,donor,cdr3b_nt)]
#dextr_clone: imputation of dextramer assignment for a clonotype.
inf_analysis8[,dextr_clone:=dextr,.(clonotype_id,donor)]
inf_analysis8[!is.na(clonotype_id),dextr_clone:=names(sort(-table(dextr)))[1],.(clonotype_id,donor)]
#spike_clone: imputation of spike/non-spike assignment for a clonotype.
inf_analysis8$spike_clone<-FALSE
inf_analysis8[dextr_clone%in%c("A24_NYN","B15_NAF","B15_NQF","B44_AEA","B44_AEV","A24_QYI","A02_YLQ","A01_LTD","B15_NAF|B15_NQF","B15_NQF|B15_NAF"),spike_clone:=TRUE,]
inf_analysis8[is.na(dextr_clone),spike_clone:=NA,]
#epitope column: fixed NQF/NAF double assignments, filtered other umbiguous assignments
inf_analysis8[dextr=="B15_NAF|B15_NQF",dextr:="B15_NQF|B15_NAF",]
inf_analysis8[dextr%in%c("B15_NAF|B15_NQF","B15_NQF|B15_NAF"),spike:=TRUE,]
inf_analysis8[,epitope:=dextr_clone,]
inf_analysis8[epitope%in%c("B15_NAF","B15_NQF","B15_NAF|B15_NQF","B15_NQF|B15_NAF"),epitope:="B15_NQF_NAF",]
inf_analysis8[grepl("|",epitope,fixed=T),epitope:=NA,]
inf_analysis8[is.na(epitope),spike_clone:=NA,]
#write out final table
write.tsv(inf_analysis8,fname="cd8_only_dextr.tsv")
|
f5f6ce2bc4931a08025387a19255696f6a160bd0
|
04de6fa4413d180e31040d2915eff9b9049c06f0
|
/modules/Table_Data_module.R
|
986bf81e389b899aea98936b19b4be68b50ecc5d
|
[] |
no_license
|
bastianilso/ILO-Project
|
7b49b9b31a1108df5fb930a46d4f9273e7a8dffb
|
e4f9c37e1405fec6e57fc76a852b6a3da3b11fc8
|
refs/heads/main
| 2023-06-12T00:31:19.642002
| 2021-06-24T07:26:21
| 2021-06-24T07:26:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 295
|
r
|
Table_Data_module.R
|
#Table_Data
Table_Data_UI <- function(id) {
ns = NS(id)
list(
fluidRow(
tableOutput(ns("Table_With_Data")),
)
)
}
Table_Data <- function(input, output, session, df) {
ns <- session$ns
output$Table_With_Data <- renderTable({
table <- df()
return(table)
})
}
|
7100524824a2a4552677d1745476ea3ef3b2b8b1
|
b83cfb6a045040319338cf5e16c0392887993ee0
|
/Previous/spell correction.R
|
f64a6aee8a4e171af9846d290a17d922956c895c
|
[] |
no_license
|
xiaojiezhou/OftenUsed
|
75360c9318158e24045a646e97d61fbf9f02b44e
|
d016df176fa24b9763da0ce4aa408184f533273e
|
refs/heads/master
| 2022-06-22T03:52:56.112580
| 2020-05-06T14:42:37
| 2020-05-06T14:42:37
| 260,552,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,570
|
r
|
spell correction.R
|
library(XLConnect)
library(tm)
wb = loadWorkbook("B:\\XiaojieZhou\\2014\\GCR Survery\\From Others\\Global English CX Results to Jun 1.xlsx")
cmm = readWorksheet(wb, sheet = "report1433191502074", header = TRUE,startCol=13,endCol=13)
# cmm = readWorksheet(wb, sheet = "report1433191502074", header = TRUE,startCol=13,endCol=13, stringsAsFactors=False)
cmm=cmm[!is.na( cmm$CX.Service.Comments),1]
#--- Add replace words according to a dictionary
#--- Replace didn't by 'did not', etc...
#--- replace '/' with 'space' and '...' with '.'
cmm = gsub("[^[:alnum:] ]",' ', cmm)
Comments = head(cmm, n=20L)
Comments=cmm
txt<-VectorSource(Comments)
txt.corpus<-Corpus(txt)
txt.corpus<-tm_map(txt.corpus,tolower)
# txt.corpus<-tm_map(txt.corpus,removePunctuation)
txt.corpus<-tm_map(txt.corpus,removeNumbers)
#txt.corpus<-tm_map(txt.corpus,removeWords,stopwords("english"))
txt.corpus <- tm_map(txt.corpus, PlainTextDocument)
tdm<- TermDocumentMatrix(txt.corpus)
temp <- inspect(tdm)
FreqMat <- data.frame(terms = rownames(temp), freq = rowSums(temp))
row.names(FreqMat) <- NULL
FreqMat<-FreqMat[order(FreqMat$freq,decreasing=T),]
FreqMat$terms=gsub("(<)[a-zA-Z0-9\\+]*(>)",'',FreqMat$terms)
terms<-as.character(FreqMat$terms)
freq<-FreqMat$freq
terms2test<- terms[FreqMat$freq<3]
terms_right<- terms[FreqMat$freq>=10]
class(terms2test)
class(terms_right)
###obtain the wrong words and dictionary suggestion
# program='C:\\Users\\shi.h.4\\AppData\\Roaming\\SepllCheckers\\Aspell\\bin\\aspell.exe'
program='C:\\Program Files (x86)\\Aspell\\bin\\aspell.exe'
result<-aspell(as.factor(terms2test),program=program)
suggestion<-sapply(result$Suggestions, function(x) unlist(x)[1])
non.null.list <- lapply(suggestion,function(x)ifelse(is.null(x), 'NA', x))
suggestion<-unlist(non.null.list)
WrongTerms<-as.data.frame(cbind(result$Original,suggestion))
WrongTerms<-WrongTerms[!WrongTerms$V1== tolower(as.character(WrongTerms$suggestion)),]
DisMat<-adist(terms_right,WrongTerms$V1)
min_dist_index<-apply(DisMat,2, function(x) which(x==min(x))[1] )
min_dist<-apply(DisMat,2, function(x) x=min(x))
CorpusReco<-tolower(as.character(terms_right[min_dist_index]))
df2<-cbind(WrongTerms, CorpusReco ,min_dist)
names(df2) = c("Misspelling", "DictReco", "CorpusReco", "Dist2Corpus")
head(df2, n=12)
df2$CombinedReco=as.character(df2$DictReco)
df2$CombinedReco[df2$Dist2Corpus ==1]=as.character(df2$CorpusReco[df2$Dist2Corpus ==1])
df2$ConsrvReco= (as.character(df2$DictReco) == as.character(df2$CorpusReco))
write.csv(df2, "C:\\Users\\zhou.x\\Desktop\\all corrected.csv")
|
5fe768ac48332ecd70a2c1c9bffd97ac58db7306
|
3f12973f2e3b3f96543181df0d0699aeb3c2a382
|
/Actividad 2.R
|
6ed81b9ebaa89ba27847cc0e3755d079561e8b88
|
[] |
no_license
|
Maria031/ALGORITMOS
|
60d6457a499ff466ac6b37c6d07b73a90f8e58d8
|
4f96206e85634f9c68a370c92cf9e3a17901bf11
|
refs/heads/master
| 2020-03-28T07:28:22.379279
| 2018-12-07T03:01:00
| 2018-12-07T03:01:00
| 147,903,662
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 73
|
r
|
Actividad 2.R
|
write.csv("HolaMundo//C://Users//madey//Documents//Algoritmos//Hola.csv")
|
3092725b2fb218efec8eb58db1f4da5a3636df4a
|
221072e790a97e05eea0debadbe87955b81ae2e1
|
/R/Fast5Files-methods.R
|
05eabb57dca88c2dad4b602f2c400646a658af67
|
[
"Apache-2.0"
] |
permissive
|
Shians/PorexploreR
|
2ca3d93956ba4592564c8019f1a76252a0696283
|
5086f3e704c6c6036a80d103752d9bce984a0e15
|
refs/heads/master
| 2020-06-06T05:51:25.714267
| 2019-06-20T07:44:16
| 2019-06-20T07:44:16
| 192,655,626
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 414
|
r
|
Fast5Files-methods.R
|
#' @include Fast5Files.R
#' @importFrom dplyr sample_n
NULL
setMethod("show", signature = "Fast5Files",
function(object) {
print(object)
})
setGeneric("sample_files", function(object, n) {
standardGeneric("sample_files")
})
setMethod("sample_files", signature = c("Fast5Files", "numeric"),
function(object, n) {
stopifnot(n <= nrow(object))
Fast5Files(dplyr::sample_n(data.frame(object), n))
})
|
e9c684a947a6b0396b259f140cba3490aed202c5
|
a16224a32558f9ec254688fb7c83de3c4c35fc25
|
/R/list.all.R
|
b53fca258576c1e5d45507689e432dd665418cfa
|
[
"MIT"
] |
permissive
|
renkun-ken/rlist
|
fb807f6f0162f52b08aa141104566f7c6e8c2dd6
|
bfaa2c50f79c9e8cdb3bce481262829549ba8e7e
|
refs/heads/master
| 2023-03-21T19:47:49.571870
| 2023-03-11T12:54:35
| 2023-03-11T12:54:35
| 20,375,257
| 187
| 31
|
NOASSERTION
| 2022-06-12T14:48:11
| 2014-06-01T10:33:11
|
R
|
UTF-8
|
R
| false
| false
| 2,127
|
r
|
list.all.R
|
#' Examine if a condition is true for all elements of a list
#'
#' @param .data A \code{list} or \code{vector}
#' @param cond A logical lambda expression
#' @param na.rm logical. If true \code{NA} values are ignored in
#' the evaluation.
#' @seealso \code{\link{list.any}}
#' @return \code{TRUE} if \code{cond} is evaluated to be \code{TRUE}
#' for all elements in \code{.data}.
#' @export
#' @examples
#' x <- list(p1 = list(type='A',score=list(c1=10,c2=8)),
#' p2 = list(type='B',score=list(c1=9,c2=9)),
#' p3 = list(type='B',score=list(c1=9,c2=7)))
#' list.all(x, type=='B')
#' list.all(x, mean(unlist(score))>=6)
#' list.all(x, score$c2 > 8 || score$c3 > 5, na.rm = TRUE)
#' list.all(x, score$c2 > 8 || score$c3 > 5, na.rm = FALSE)
list.all <- function(.data, cond, na.rm = FALSE) {
if (missing(.data))
return(all(na.rm = na.rm))
if (is.empty(.data) || missing(cond))
return(all(.data, na.rm = na.rm))
l <- lambda(substitute(cond))
l$expr <- as.call(list(quote(`!`), l$expr))
res <- list.first.internal(.data, l, parent.frame(), na.rm = na.rm)
!res$state
}
#' Examine if a condition is true for at least one list element
#'
#' @param .data A \code{list} or \code{vector}
#' @param cond A logical lambda expression
#' @param na.rm logical. If true \code{NA} values are ignored in
#' the evaluation.
#' @seealso \code{\link{list.all}}
#' @return \code{TRUE} if \code{cond} is evaluated to be \code{TRUE}
#' for any element in \code{.data}.
#' @export
#' @examples
#' x <- list(p1 = list(type='A',score=list(c1=10,c2=8)),
#' p2 = list(type='B',score=list(c1=9,c2=9)),
#' p3 = list(type='B',score=list(c1=9,c2=7)))
#' list.any(x,type=='B')
#' list.any(x,mean(unlist(score))>=6)
#' list.any(x, score$c2 > 8 || score$c3 > 5, na.rm = TRUE)
#' list.any(x, score$c2 > 8 || score$c3 > 5, na.rm = FALSE)
list.any <- function(.data, cond, na.rm = FALSE) {
if (missing(.data))
return(any(na.rm = na.rm))
if (is.empty(.data) || missing(cond))
return(any(.data, na.rm = na.rm))
res <- list.first.internal(.data, substitute(cond), parent.frame(), na.rm = na.rm)
res$state
}
|
963f72b7afb9c4c1bc4d1bb265e54eeca2284d90
|
961e8f85b7f0e2b6b82b648333449cfc26c6f2c5
|
/project/data/test.R
|
372efabfb6490aa0bf5b16d845bc8f40dc720ccc
|
[] |
no_license
|
kyclark/ecol596
|
b3002e6e2dc1919edf58eab45f45f3763e8274ab
|
c28cb09c2a70fe49e4b4b3129cda474fcf66bd11
|
refs/heads/master
| 2021-01-21T13:49:03.267593
| 2016-04-29T16:31:43
| 2016-04-29T16:31:43
| 51,519,545
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,189
|
r
|
test.R
|
require(zoo)
require(R.utils)
argmax = function(x, y, w=1, ...) {
n = length(y)
y.smooth = loess(y ~ x, ...)$fitted
y.max = rollapply(zoo(y.smooth), 2*w+1, max, align="center")
y.min = rollapply(zoo(y.smooth), 2*w+1, min, align="center")
delta.max = y.max - y.smooth[-c(1:w, n+1-1:w)]
delta.min = y.min - y.smooth[-c(1:w, n+1-1:w)]
i.max = which(delta.max <= 0) + w
i.min = which(delta.min >= 0) + w
print(paste("i.min =", i.min))
list(x=x[i.max], i=i.max, y.hat=y.smooth, y.min=i.min)
}
dat.dir = "~/work/ecol596/project/data"
setwd(dat.dir)
dat = read.delim(file.path(dat.dir, "Ac2.fa"), sep=" ", header=F)
colnames(dat) = c("freq", "count")
x = dat$freq
y = dat$count
w=1
span=.6
peaks = argmax(x=x, y=y, w=w, span=span)
y.smooth = loess(y ~ x)$fitted
main = function() {
dat.dir = "~/work/ecol596/project/data"
setwd(dat.dir)
w = 1
span = .4
i = 0
for (fname in list.files(dat.dir, pattern="*.fa$")) {
dat = read.delim(file.path(dat.dir, fname), sep=" ", header=F)
colnames(dat) = c("freq", "count")
x = dat$freq
y = dat$count
peaks = argmax(x=x, y=y, w=w, span=span)
first.trough.y = peaks$y.min[1]
first.trough.x = x[first.trough.y][1]
first.peak.x = x[peaks$i][1]
first.peak.y = peaks$y.hat[peaks$i][1]
dist.to.mid = first.peak.x - first.trough.x
second.trough.x = first.trough.x + dist.to.mid * 2
#png(filename = paste0(fname, '.png'))
plot(x, y, cex=0.75, col="Gray", main=fname, xlab="count", ylab="frequency")
lines(x, peaks$y.hat, lwd=2)
y.min = min(y)
mins = data.frame(x = x[peaks$y.min], y = peaks$y.hat[peaks$y.min])
#dev.off()
points(x = first.peak.x, y = first.peak.y, col="Red", pch=19, cex=1.25)
segments(first.trough.x, y.min, first.trough.x, first.peak.y, col="Purple")
segments(second.trough.x, y.min, second.trough.x, first.peak.y, col="Purple")
i = i + 1
printf("%5d: %s (%s - %s)\n", i, fname, first.trough.x, first.trough.x + dist.to.mid * 2)
}
printf("Finished, processed %s file%s.\n", i, ifelse(i==1, '', 's'))
}
main()
|
07e8bd9afad1d2c3fe2ee4d913e9ebe6ea44e3d5
|
5f48f90d335918eb85af87ab829bf6f50d07832b
|
/server.R
|
e44fc09d0b985f341d9f9451e2cf35a717e076e2
|
[] |
no_license
|
manilwagle/Developing_Data_Products
|
f690c64e8a950f2838ee36a0bc40cb13ba9b1944
|
593cc27fcb8a03d6edf3e15f368a8bb6944994b1
|
refs/heads/master
| 2021-01-10T07:50:11.057542
| 2015-10-13T23:15:55
| 2015-10-13T23:15:55
| 43,963,152
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 767
|
r
|
server.R
|
library(shiny)
data(mtcars)
# Make cyl (number of cylinders) and am (0 = auto, 1 = manual) into factors
mtcars$cyl <- factor(mtcars$cyl)
mtcars$am <- factor(mtcars$am)
# Train a regression model to predict MPG from
# - wt (weight in 1000 lbs)
# - hp (horsepower)
# - cyl (number of cylinders)
# - am (0 = auto, 1 = manual)
MPGModel <- lm(mpg ~ cyl + hp + wt + am, data = mtcars)
# For testing
# predict(MPGModel, data.frame(wt=3, hp=3, cyl=as.factor(8), am=as.factor(1)))
shinyServer(function(input, output) {
output$mpg <- renderText({
wt <- input$wt
hp <- input$hp
cyl <- input$cyl
am <- input$am
newdf <- data.frame(
wt = wt, hp = hp, cyl = cyl, am = am
)
MPGPredict <- predict(MPGModel, newdf)
MPGPredict
})
})
|
e5cb18dc77115a659701651defd82e5e2047da83
|
d2fdf04be8786c603176b9c779a6dac2611cba81
|
/tests/testthat/test-monolix.R
|
5074c80cd0327e931b6f1433ea0e8693a5ce9888
|
[] |
no_license
|
nlmixrdevelopment/babelmixr
|
4038f202f62fc9fafc586070661d73058620a4a6
|
040fc8eabad7fab0cf53841ec0fbba616bd742b5
|
refs/heads/master
| 2023-03-05T22:20:08.056448
| 2021-02-22T21:31:18
| 2021-02-22T21:31:18
| 308,112,742
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,661
|
r
|
test-monolix.R
|
test_that("model to input information", {
pk.turnover.emax3 <- function() {
ini({
tktr <- log(1)
tka <- log(1)
tcl <- log(0.1)
tv <- log(10)
##
eta.ktr ~ 1
eta.ka ~ 1
eta.cl ~ 2
eta.v ~ 1
prop.err <- 0.1
pkadd.err <- 0.1
##
temax <- logit(0.8)
tec50 <- log(0.5)
tkout <- log(0.05)
te0 <- log(100)
##
eta.emax ~ .5
eta.ec50 ~ .5
eta.kout ~ .5
eta.e0 ~ .5
##
pdadd.err <- 10
})
model({
ktr <- exp(tktr + eta.ktr)
ka <- exp(tka + eta.ka)
cl <- exp(tcl + eta.cl)
v <- exp(tv + eta.v)
emax = expit(temax+eta.emax)
ec50 = exp(tec50 + eta.ec50)
kout = exp(tkout + eta.kout)
e0 = exp(te0 + eta.e0)
##
DCP = center/v
PD=1-emax*DCP/(ec50+DCP)
##
effect(0) = e0
kin = e0*kout
##
d/dt(depot) = -ktr * depot
d/dt(gut) = ktr * depot -ka * gut
d/dt(center) = ka * gut - cl / v * center
d/dt(effect) = kin*PD -kout*effect
##
cp = center / v
cp ~ prop(prop.err) + add(pkadd.err)
effect ~ add(pdadd.err) | pca
})
}
uif <- nlmixr(pk.turnover.emax3)
expect_equal(babelmixr:::monolixMapData(theo_sd, uif),
list(headerType = c(ID = "id", TIME = "time", DV = "observation",
AMT = "amount", EVID = "evid", CMT = "obsid", WT = "ignore"),
regressors = "input={v, emax, ec50, e0, kout, ktr, ka, cl}"))
pk.turnover.emax3 <- function() {
ini({
tktr <- log(1)
tka <- log(1)
tcl <- log(0.1)
tv <- log(10)
##
eta.ktr ~ 1
eta.ka ~ 1
eta.cl ~ 2
eta.v ~ 1
prop.err <- 0.1
pkadd.err <- 0.1
##
temax <- logit(0.8)
tec50 <- log(0.5)
tkout <- log(0.05)
te0 <- log(100)
##
eta.emax ~ .5
eta.ec50 ~ .5
eta.kout ~ .5
eta.e0 ~ .5
##
pdadd.err <- 10
})
model({
ktr <- exp(tktr + eta.ktr)
ka <- exp(tka + eta.ka)
cl0 <- exp(tcl + eta.cl)
v <- exp(tv + eta.v)
emax = expit(temax+eta.emax)
ec50 = exp(tec50 + eta.ec50)
kout = exp(tkout + eta.kout)
e0 = exp(te0 + eta.e0)
##
DCP = center/v
PD=1-emax*DCP/(ec50+DCP)
##
effect(0) = e0
kin = e0*kout
cl0 <- cl * (WT / 70) ^ 0.75
##
d/dt(depot) = -ktr * depot
d/dt(gut) = ktr * depot -ka * gut
d/dt(center) = ka * gut - cl / v * center
d/dt(effect) = kin*PD -kout*effect
##
cp = center / v
cp ~ prop(prop.err) + add(pkadd.err)
effect ~ add(pdadd.err) | pca
})
}
uif <- nlmixr(pk.turnover.emax3)
expect_equal(babelmixr:::monolixMapData(theo_sd, uif),
list(headerType = c(ID = "id", TIME = "time", DV = "observation",
AMT = "amount", EVID = "evid", CMT = "obsid",
WT = "regressor"),
regressors = "input={v, emax, ec50, e0, kout, cl, WT, ktr, ka}\nWT = {use=regressor}"))
})
test_that("monolix dsl", {
expect_equal(rxToMonolix("sqrt(a)"), "sqrt(a)")
expect_equal(rxToMonolix("max(a,b)"), "max(a,b)")
expect_error(rxToMonolix("max(a,b,c)"))
expect_error(rxToMonolix("max(a)"))
expect_equal(rxToMonolix("sum(a,b,c,d)"), "((a)+(b)+(c)+(d))")
expect_equal(rxToMonolix("prod(a,b,c,d)"), "((a)*(b)*(c)*(d))")
expect_equal(rxToMonolix("a<-1+b"), "a = 1+b")
expect_equal(rxToMonolix("a~1+b"), "a = 1+b")
expect_equal(rxToMonolix("a=1+b"), "a = 1+b")
expect_equal(rxToMonolix("expit(a)"), "1/(1+exp(-(a)))")
expect_equal(rxToMonolix("expit(a,b)"), "(1.0-(b))*(1/(1+exp(-(a))))+(b)")
expect_equal(rxToMonolix("expit(a,b,c)"), "((c)-(b))*(1/(1+exp(-(a))))+(b)")
expect_equal(rxToMonolix("logit(a)"), "-log(1/(a)-1)")
expect_equal(rxToMonolix("logit(a,b)"), "-log(1/(((a)-(b))/(1.0-(b)))-1)")
expect_equal(rxToMonolix("logit(a,b,c)"), "-log(1/(((a)-(b))/((c)-(b)))-1)")
expect_equal(rxToMonolix("probitInv(a)"), "normcdf(a)")
expect_equal(rxToMonolix("probitInv(a,b)"), "(1.0-(b))*(normcdf(a))+(b)")
expect_equal(rxToMonolix("probitInv(a,b,c)"), "((c)-(b))*(normcdf(a))+(b)")
expect_equal(rxToMonolix("probit(a)"), "probit(a)")
expect_equal(rxToMonolix("probit(a,b)"), "probit(((a)-(b))/(1.0-(b)))")
expect_equal(rxToMonolix("probit(a,b,c)"), "probit(((a)-(b))/((c)-(b)))")
expect_equal(rxToMonolix("d/dt(depot)=-depot*kel"), "ddt_depot = - depot*kel")
expect_equal(rxToMonolix("depot(0)=50"), "depot_0 = 50")
expect_equal(rxToMonolix("f(depot)=3"), ";f defined in PK section")
expect_equal(rxToMonolix("a**b"), "a^b")
expect_equal(rxToMonolix("if (a<=b){c=1} else if (a==4) {c=2} else {c=4}"), "if a<=b\n c = 1\nelseif a==4\n c = 2\nelse \n c = 4\nend\n")
expect_equal(rxToMonolix("if (a<=b){c=1} else if (a==4) {c=2} else if (a==30) {c=4} else {c=100}"), "if a<=b\n c = 1\nelseif a==4\n c = 2\nelseif a==30\n c = 4\nelse \n c = 100\nend\n")
expect_equal(rxToMonolix("if (a<=b){c=1} else if (a==4) {c=2}"), "if a<=b\n c = 1\nelseif a==4\n c = 2\nend\n")
expect_equal(rxToMonolix("if (a<=b){c=1}"), "if a<=b\n c = 1\nend\n")
expect_equal(rxToMonolix("time"), "t")
expect_error(rxToMonolix("NA"))
expect_error(rxToMonolix("newind"))
expect_equal(rxToMonolix("log1pmx(a)"), "(log(1+a)-(a))")
expect_equal(rxToMonolix("4.3"), "4.3")
expect_equal(rxToMonolix("add.sd"), "add__sd")
pk.turnover.emax3 <- function() {
ini({
tktr <- log(1)
tka <- log(1)
tcl <- log(0.1)
tv <- log(10)
##
eta.ktr ~ 1
eta.ka ~ 1
eta.cl ~ 2
eta.v ~ 1
prop.err <- 0.1
pkadd.err <- 0.1
##
temax <- logit(0.8)
tec50 <- log(0.5)
tkout <- log(0.05)
te0 <- log(100)
##
eta.emax ~ .5
eta.ec50 ~ .5
eta.kout ~ .5
eta.e0 ~ .5
##
pdadd.err <- 10
})
model({
ktr <- exp(tktr + eta.ktr)
ka <- exp(tka + eta.ka)
cl <- exp(tcl + eta.cl)
v <- exp(tv + eta.v)
emax = expit(temax+eta.emax)
ec50 = exp(tec50 + eta.ec50)
kout = exp(tkout + eta.kout)
e0 = exp(te0 + eta.e0)
##
DCP = center/v
PD=1-emax*DCP/(ec50+DCP)
##
effect(0) = e0
kin = e0*kout
##
d/dt(depot) = -ktr * depot
d/dt(gut) = ktr * depot -ka * gut
d/dt(center) = ka * gut - cl / v * center
d/dt(effect) = kin*PD -kout*effect
##
cp = center / v
cp ~ prop(prop.err) + add(pkadd.err)
effect ~ add(pdadd.err) | pca
})
}
uif <- nlmixr(pk.turnover.emax3)
fun <- function(){
ktr = exp(tktr)
ka = exp(tka)
cl = exp(tcl)
v = exp(tv)
emax = expit(temax, -0.5, 2)
ec50 = probitInv(tec50)
kout = tkout
e0 = exp(te0)
}
tmp <- body(fun)
mu.ref <- uif$mu.ref
expect_equal(babelmixr:::.toMonolixDefinition(tmp, mu.ref),
readRDS("test-monolix-definition-1.rds"))
mu.ref <- mu.ref[-2]
expect_equal(babelmixr:::.toMonolixDefinition(tmp, mu.ref),
readRDS("test-monolix-definition-2.rds"))
mu.ref <- mu.ref[-6]
expect_equal(babelmixr:::.toMonolixDefinition(tmp, mu.ref),
readRDS("test-monolix-definition-3.rds"))
mu.ref <- mu.ref[-5]
expect_equal(babelmixr:::.toMonolixDefinition(tmp, mu.ref),
readRDS("test-monolix-definition-4.rds"))
mu.ref <- mu.ref[-4]
expect_equal(babelmixr:::.toMonolixDefinition(tmp, mu.ref),
readRDS("test-monolix-definition-5.rds"))
fun <- function(){
ktr = exp(tktr)
ka = exp(tka)
cl = exp(tcl)
v = exp(tv)
emax = expit(temax, -0.5)
ec50 = probitInv(tec50)
kout = tkout
e0 = exp(te0)
}
tmp <- body(fun)
mu.ref <- uif$mu.ref
expect_equal(babelmixr:::.toMonolixDefinition(tmp, mu.ref),
readRDS("test-monolix-definition-6.rds"))
fun <- function(){
ktr = exp(tktr)
ka = exp(tka)
cl = exp(tcl)
v = exp(tv)
emax = expit(temax)
ec50 = probitInv(tec50)
kout = tkout
e0 = exp(te0)
}
tmp <- body(fun)
mu.ref <- uif$mu.ref
expect_equal(babelmixr:::.toMonolixDefinition(tmp, mu.ref),
readRDS("test-monolix-definition-7.rds"))
})
test_that("F/alag", {
pk.turnover.emax3 <- function() {
ini({
tktr <- log(1)
tka <- log(1)
tcl <- log(0.1)
tv <- log(10)
tfd <- logit(0.99)
talagd <- log(0.01)
##
eta.ktr ~ 1
eta.ka ~ 1
eta.cl ~ 2
eta.v ~ 1
prop.err <- 0.1
pkadd.err <- 0.1
##
temax <- logit(0.8)
tec50 <- log(0.5)
tkout <- log(0.05)
te0 <- log(100)
##
eta.emax ~ .5
eta.ec50 ~ .5
eta.kout ~ .5
eta.e0 ~ .5
##
pdadd.err <- 10
})
model({
ktr <- exp(tktr + eta.ktr)
ka <- exp(tka + eta.ka)
cl <- exp(tcl + eta.cl)
v <- exp(tv + eta.v)
emax = expit(temax+eta.emax)
ec50 = exp(tec50 + eta.ec50)
kout = exp(tkout + eta.kout)
e0 = exp(te0 + eta.e0)
fd <- expit(tfd)
alagd <- exp(talagd)
##
DCP = center/v
PD=1-emax*DCP/(ec50+DCP)
##
effect(0) = e0
kin = e0*kout
##
d/dt(depot) = -ktr * depot
d/dt(gut) = ktr * depot -ka * gut
d/dt(center) = ka * gut - cl / v * center
d/dt(effect) = kin*PD -kout*effect
f(depot) = fd
alag(depot) = alagd
##
cp = center / v
cp ~ prop(prop.err) + add(pkadd.err)
effect ~ add(pdadd.err) | pca
})
}
uif <- nlmixr(pk.turnover.emax3)
tmp <- babelmixr:::monolixModelTxt(uif, warfarin)
lines <- strsplit(tmp$txt,"\n")[[1]]
lines <- lines[regexpr("target=depot", lines) != -1]
expect_equal(lines, "depot(type=1, target=depot, Tlag=alagd, p=fd)")
lines <- strsplit(tmp$txt,"\n")[[1]]
lines <- lines[regexpr("target=effect", lines) != -1]
expect_equal(lines, "depot(type=1, target=effect, Tlag=0.0, p=1.0)")
})
test_that("<FIT>", {
one.compartment <- function() {
ini({
tka <- 0.45 # Log Ka
tcl <- 1 # Log Cl
tv <- 3.45 # Log V
eta.ka ~ 0.6
eta.cl ~ 0.3
eta.v ~ 0.1
add.sd <- 0.7
})
model({
ka <- exp(tka + eta.ka)
cl <- exp(tcl + eta.cl)
v <- exp(tv + eta.v)
d/dt(depot) = -ka * depot
d/dt(center) = ka * depot - cl / v * center
cp = center / v
cp ~ add(add.sd)
})
}
uif <- nlmixr(one.compartment)
tmp <- babelmixr:::monolixModelTxt(uif, theo_sd)
expect_equal(tmp$obs, "DV")
expect_equal(tmp$fit, "\n\n<FIT>\ndata = DV\nmodel = DV\n")
v <- strsplit(tmp$datafile,"\n")[[1]]
v <- v[regexpr(paste0(tmp$obs, " = "), v, fixed=TRUE) != -1]
expect_equal(v, "DV = {use=observation, name=DV, type=continuous}")
pk.turnover.emax3 <- function() {
ini({
tktr <- log(1)
tka <- log(1)
tcl <- log(0.1)
tv <- log(10)
tfd <- logit(0.99)
talagd <- log(0.01)
##
eta.ktr ~ 1
eta.ka ~ 1
eta.cl ~ 2
eta.v ~ 1
prop.err <- 0.1
pkadd.err <- 0.1
##
temax <- logit(0.8)
tec50 <- log(0.5)
tkout <- log(0.05)
te0 <- log(100)
##
eta.emax ~ .5
eta.ec50 ~ .5
eta.kout ~ .5
eta.e0 ~ .5
##
pdadd.err <- 10
})
model({
ktr <- exp(tktr + eta.ktr)
ka <- exp(tka + eta.ka)
cl <- exp(tcl + eta.cl)
v <- exp(tv + eta.v)
emax = expit(temax+eta.emax)
ec50 = exp(tec50 + eta.ec50)
kout = exp(tkout + eta.kout)
e0 = exp(te0 + eta.e0)
fd <- expit(tfd)
alagd <- exp(talagd)
##
DCP = center/v
PD=1-emax*DCP/(ec50+DCP)
##
effect(0) = e0
kin = e0*kout
##
d/dt(depot) = -ktr * depot
d/dt(gut) = ktr * depot -ka * gut
d/dt(center) = ka * gut - cl / v * center
d/dt(effect) = kin*PD -kout*effect
f(depot) = fd
alag(depot) = alagd
##
cp = center / v
cp ~ prop(prop.err) + add(pkadd.err)
effect ~ add(pdadd.err) | pca
})
}
uif <- nlmixr(pk.turnover.emax3)
tmp <- babelmixr:::monolixModelTxt(uif, warfarin)
expect_equal(tmp$obs, "")
expect_equal(tmp$fit, "\n\n<FIT>\ndata = {y_5, y_6}\nmodel = {cp_pred, pca_pred}\n")
v <- strsplit(tmp$datafile,"\n")[[1]]
v <- v[regexpr("use=observation", v, fixed=TRUE) != -1]
expect_equal(v, "dv = {use=observation, name={y_5, y_6},yname={'5', '6'},type={continuous, continuous}}")
})
|
317f3ab88ae49e7dbfe365407dfb8b5274c52090
|
105e158b10a08b907b95c9347e37fae682f70cdc
|
/GTRENDS FINAL PRACTICE.R
|
da4f341e9c8a7209ef51747fda4e68c06914ffcf
|
[] |
no_license
|
btindol178/R--Data-Science-Journey-
|
301919f2cc9eed87ba912b09e383be3b1d9c1c8d
|
23ddcaa0a72552d780faedf18d88b2c081f3cf52
|
refs/heads/master
| 2023-03-20T13:59:34.871495
| 2021-03-08T00:12:56
| 2021-03-08T00:12:56
| 172,145,392
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,626
|
r
|
GTRENDS FINAL PRACTICE.R
|
devtools::install_github("PMassicotte/gtrendsR") # only run once
install.packages("gtrendsR")
## load library
library(gtrendsR)
library(dplyr)
# searching for covid-19 Trends
res0 <- gtrends(c("covid-19"), geo=c("US-AL","US-AK","US-AZ","US-AR","US-CA"),time = "2019-11-01 2020-03-24")
res1 <- gtrends(c("covid-19"), geo=c("US-CO","US-CT","US-DE","US-FL","US-GA"),time = "2019-11-01 2020-03-24")
res2 <- gtrends(c("covid-19"), geo=c("US-HI","US-ID","US-IL","US-IN","US-IA"),time = "2019-11-01 2020-03-24")
res3 <- gtrends(c("covid-19"), geo=c("US-KS","US-KY","US-LA","US-ME","US-MD"),time = "2019-11-01 2020-03-24")
res4 <- gtrends(c("covid-19"), geo=c("US-MA","US-MI","US-MN","US-MS","US-MO"),time = "2019-11-01 2020-03-24")
res5 <- gtrends(c("covid-19"), geo=c("US-MT","US-NE","US-NV","US-NH","US-NJ"),time = "2019-11-01 2020-03-24")
res6 <- gtrends(c("covid-19"), geo=c("US-NM","US-NY","US-NC","US-ND","US-OH"),time = "2019-11-01 2020-03-24")
res7 <- gtrends(c("covid-19"), geo=c("US-OK","US-OR","US-PA","US-RI","US-SC"),time = "2019-11-01 2020-03-24")
res8 <- gtrends(c("covid-19"), geo=c("US-SD","US-TN","US-TX","US-UT","US-VT"),time = "2019-11-01 2020-03-24")
res9 <- gtrends(c("covid-19"), geo=c("US-VA","US-WA","US-WV","US-WI","US-WY"),time = "2019-11-01 2020-03-24")
plot(res0)
plot(res1)
plot(res2)
plot(res3)
plot(res4)
plot(res5)
plot(res6)
plot(res7)
plot(res8)
plot(res9)
#Extract the data frame element of list
res0 <- res0$interest_over_time
res1 <- res1$interest_over_time
res2 <- res2$interest_over_time
res3 <- res3$interest_over_time
res4 <- res4$interest_over_time
res5 <- res5$interest_over_time
res6 <- res6$interest_over_time
res7 <- res7$interest_over_time
res8 <- res8$interest_over_time
res9 <- res9$interest_over_time
# Replace <1 values with 0
res0$hits <- gsub("<1",0,res0$hits) # make na values
res1$hits <- gsub("<1",0,res1$hits) # make na values
res2$hits <- gsub("<1",0,res2$hits) # make na values
res3$hits <- gsub("<1",0,res3$hits) # make na values
res4$hits <- gsub("<1",0,res4$hits) # make na values
res5$hits <- gsub("<1",0,res5$hits) # make na values
res6$hits <- gsub("<1",0,res6$hits) # make na values
res7$hits <- gsub("<1",0,res7$hits) # make na values
res8$hits <- gsub("<1",0,res8$hits) # make na values
res9$hits <- gsub("<1",0,res9$hits) # make na values
# Convert to intiger
res0$hits <- as.integer(res0$hits)
res1$hits <- as.integer(res1$hits)
res2$hits <- as.integer(res2$hits)
res3$hits <- as.integer(res3$hits)
res4$hits <- as.integer(res4$hits)
res5$hits <- as.integer(res5$hits)
res6$hits <- as.integer(res6$hits)
res7$hits <- as.integer(res7$hits)
res8$hits <- as.integer(res8$hits)
res9$hits <- as.integer(res9$hits)
# THIS IS ALL OF THE CHLAMYDIA DATA FOR ALL STATES ONTOP OF EACHOTHER
a <- bind_rows(res0,res1,res2)
b <- bind_rows(res3,res4,res5) # fix res 4
c <- bind_rows(res6,res7,res8)
c <- bind_rows(res9,c)
covid_19 <- bind_rows(a,b,c)
# Remove irrelevlant columns
covid <-covid_19[-c(4,6,7)]
# seperate columns to be more tidy
library(tidyverse)
# Use regular expressions to separate on multiple characters:
df <- covid %>% separate(geo, c("Country","State"), sep = "([\\-])") # here we are sperating by - the (\\ is just part of syntax)
# Seperating the date columns just in case
df2 <- df %>% separate(date, c("Year","Month","Day"), sep = "([\\-])") # here we are sperating by - the (\\ is just part of syntax)
# IMPORT LONGITUDE AND LATTITUDE
usa <- read.csv("USA.csv")
colnames(usa)[1] <- "State"
final <- merge(df2,usa, by=c("State"),all.x=TRUE)
colnames(final)[10] <- "region" # match the map_data
final2 <- merge(df,usa, by=c("State"),all.x=TRUE)
colnames(final2)[8] <- "region" # match the map_data
final2 <- final2[-c(4,5,6,7)]
colnames(final2)[3] <- "value"
colnames(final2)[1] <- "state"
final3 <- final2
final3$date <- as.POSIXct(final3$date, format = '%m-%d-%Y')
final3$date <- format(final3$date, "%m/%d/%y")
##############################################################
final2 # this is final dataframe!!!!!!!!!!!!!!!!!!!!!!
# this is also final dataframe
final3
########################################################
# Plotting
install.packages(map)
library(maps)
us_states <- map_data("state")
head(us_states) # NEED TO MERGE GROUP TO FINAL
us2 <- us_states[-c(1,2,4,6)]
# merge by region now that it is lower
final_f2 <- merge(us_states,final2,by=c("region"), all.x=TRUE)
final_f2 <- final_f2[-c(12,13)]
#keep only one day!!!!!!!!!!!!
date_test <- final_f2%>% filter(date > '2020-03-21')
df_test2 <- date_test[-c(6)]
# GET DISTINCT ROWS
library(dplyr)
df_final <- df_test2 %>% distinct()
df_final2 <- df_final
# try this get first occurance of each stte in the column
library(dplyr)
dfz <- df_final2 %>%
group_by(region) %>%
slice(1) %>% # takes the first occurrence if there is a tie
ungroup()
#########################################################
dfz # final dataframe!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
colnames(dfz)[3] <- "state"
dfff <- dfz[c(3,5)]
dft <- dfff
###############################################
# plotting
install.packages(usmap)
library(usmap)
library(ggplot2)
plot_usmap(data = dfff, values = "hits", color = "red") +
scale_fill_continuous(name = "Hits", label = scales::comma) +
theme(legend.position = "right")
# add alaska and hawaii for fun
dft[nrow(dft) + 1,] =c("AK", 10)
dft[nrow(dft) + 1,] =c("HI", 10)
# THIS ONE IS NOT WORKING WHEN ADD the two states
plot_usmap(data = dft, values = "hits", color = "red") +
scale_fill_continuous(name = "hits", label = scales::comma) +
theme(legend.position = "right")
#
library(tigris)
library(leaflet)
install.packages("jsonlite")
library(jsonlite)
states <- states(cb=T)
# Let's quickly map that out
states %>% leaflet() %>% addTiles() %>% addPolygons(popup=~NAME)
bins <- c(0, 10, 20, 50, 60, 70 ,80, 100)
pal <- colorBin("RdYlBu", domain = dft$state, bins = bins)
# This works!!!!
m %>% addPolygons(
data = states,
weight = 2,
smoothFactor = 0.5,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.8,
fillColor = pal(dfz$hits))
m
# add state names to this
d <- leaflet() %>%
addProviderTiles(providers$Stamen.Toner) %>%
addPolygons(data = states,
weight = 2,
smoothFactor = 0.5,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.8,
fillColor = pal(dfz$hits))
d
# add HIGHLIGHT FEATURE!
C <- leaflet() %>%
addProviderTiles(providers$Stamen.Toner) %>%
addPolygons(data = states,
weight = 2,
smoothFactor = 0.5,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.8,
fillColor = pal(dfz$hits),
highlight = highlightOptions(
weight = 5,
color = "#666666",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE))
C
# add feature to know number
install.packages("htmltools")
library(htmltools)
library(dplyr)
library(leaflet)
labels <- paste("<p>", dfz$state, "</p>",
"<p>", "Hits Rate:", dfz$hits, "</p>",
sep = "")
# add overview feature
z <- leaflet() %>%
addProviderTiles(providers$Stamen.Toner) %>%
addPolygons(data = states,
weight = 2,
smoothFactor = 0.5,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.8,
fillColor = pal(dfz$hits),
highlight = highlightOptions(
weight = 5,
color = "#666666",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE
),
label = lapply(labels,HTML))
z
# add more!!!! LEGEND
w <- leaflet() %>%
addProviderTiles(providers$Stamen.Toner) %>%
addPolygons(data = states,
weight = 2,
smoothFactor = 0.5,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.8,
fillColor = pal(dfz$hits),
highlight = highlightOptions(
weight = 5,
color = "#666666",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE
),
label = lapply(labels,HTML)) %>%
addLegend(pal=pal,
values = dfz$hits,
opacity = 0.7,
position = "topright")
w
##################################################################3
# DO this but find usa !!!
# IN COVID COUNT CHANGE NAME FROM US TO SAME AS PLOTTING PACKAG
install.packages("choroplethr")
library(choroplethr)
install.packages("choroplethrMaps")
library(choroplethrMaps)
library(dplyr)
library(ggplot2)
data(state.map)
ggplot(state.map, aes(long, lat, group=group)) + geom_polygon()
data(country.map, package = "choroplethrMaps")
statemap <- data(state.map)# region
ustate <- unique(state.map$region)
ufin <- unique(final3$region)
yesf <- final3[ufin %in% ustate,]
plotdata <- yesf
plotdata1 <-plotdata[plotdata$date == "01/22/20",]
plotdata2 <-plotdata[plotdata$date == "01/23/20",]
plotdata3 <-plotdata[plotdata$date == "01/24/20",]
plotdata4 <-plotdata[plotdata$date == "1/25/2020",]
plotdata5 <-plotdata[plotdata$date == "1/26/2020",]
plotdata6 <-plotdata[plotdata$date == "1/27/2020",]
plotdata7 <-plotdata[plotdata$date == "1/28/2020",]
plotdata8 <-plotdata[plotdata$date == "1/29/2020",]
plotdata9 <-plotdata[plotdata$date == "1/30/2020",]
plotdata10 <-plotdata[plotdata$date == "1/31/2020",]
plotdata11 <-plotdata[plotdata$date == "2/1/2020",]
plotdata12 <-plotdata[plotdata$date == "2/2/2020",]
plotdata13 <-plotdata[plotdata$date == "2/3/2020",]
plotdata14 <-plotdata[plotdata$date == "02/04/2020",]# NO SEARCHES UNTIL HERE
plotdata15 <-plotdata[plotdata$date == "02/05/20",]
plotdata16 <-plotdata[plotdata$date == "02/06/20",]
plotdata17 <-plotdata[plotdata$date == "02/07/20",]
plotdata18 <-plotdata[plotdata$date == "02/08/20",]
plotdata19 <-plotdata[plotdata$date == "02/09/20",]
plotdata20 <-plotdata[plotdata$date == "02/10/20",]
plotdata21 <-plotdata[plotdata$date == "02/11/20",]
plotdata22 <-plotdata[plotdata$date == "02/12/20",]
plotdata23 <-plotdata[plotdata$date == "02/13/20",]
plotdata24 <-plotdata[plotdata$date == "02/14/20",]
plotdata25 <-plotdata[plotdata$date == "02/15/20",]
plotdata26 <-plotdata[plotdata$date == "02/16/20",]
plotdata27 <-plotdata[plotdata$date == "02/17/20",]
plotdata28 <-plotdata[plotdata$date == "02/18/20",]
plotdata29 <-plotdata[plotdata$date == "02/19/20",]
plotdata30 <-plotdata[plotdata$date == "02/20/20",]
plotdata31 <-plotdata[plotdata$date == "02/21/20",]
plotdata32 <-plotdata[plotdata$date == "02/22/20",]
plotdata33 <-plotdata[plotdata$date == "02/23/20",]
plotdata34 <-plotdata[plotdata$date == "02/24/20",]
plotdata35 <-plotdata[plotdata$date == "02/25/20",]
plotdata36 <-plotdata[plotdata$date == "02/26/20",]
plotdata37 <-plotdata[plotdata$date == "02/27/20",]
plotdata38 <-plotdata[plotdata$date == "02/28/20",]
plotdata39 <-plotdata[plotdata$date == "02/29/20",]
plotdata40 <-plotdata[plotdata$date == "03/01/20",]
plotdata41 <-plotdata[plotdata$date == "03/02/20",]
plotdata42 <-plotdata[plotdata$date == "03/03/20",]
plotdata43 <-plotdata[plotdata$date == "03/04/20",]
plotdata44 <-plotdata[plotdata$date == "03/05/20",]
plotdata45 <-plotdata[plotdata$date == "03/06/20",]
plotdata46 <-plotdata[plotdata$date == "03/07/20",]
plotdata47 <-plotdata[plotdata$date == "03/08/20",]
plotdata48 <-plotdata[plotdata$date == "03/09/20",]
plotdata49 <-plotdata[plotdata$date == "03/10/20",]
plotdata50 <-plotdata[plotdata$date == "03/11/20",]
plotdata51 <-plotdata[plotdata$date == "03/12/20",]
plotdata52 <-plotdata[plotdata$date == "03/13/20",]
plotdata53 <-plotdata[plotdata$date == "03/14/20",]
plotdata54 <-plotdata[plotdata$date == "03/15/20",]
plotdata55 <-plotdata[plotdata$date == "03/16/20",]
plotdata56 <-plotdata[plotdata$date == "03/17/20",]
plotdata57 <-plotdata[plotdata$date == "03/18/20",]
plotdata58 <-plotdata[plotdata$date == "03/19/20",]
state_choropleth(plotdata1)
country_choropleth(plotdata2)
country_choropleth(plotdata3)
country_choropleth(plotdata4)
country_choropleth(plotdata5)
country_choropleth(plotdata6)
country_choropleth(plotdata7)
country_choropleth(plotdata8)
country_choropleth(plotdata9)
country_choropleth(plotdata10)
country_choropleth(plotdata11)
country_choropleth(plotdata12)
country_choropleth(plotdata13) # not usefull up until here
state_choropleth(plotdata14)
state_choropleth(plotdata15)
state_choropleth(plotdata16)
state_choropleth(plotdata17)
state_choropleth(plotdata18)
state_choropleth(plotdata19)
state_choropleth(plotdata20)
state_choropleth(plotdata21)
state_choropleth(plotdata22)
state_choropleth(plotdata23)
state_choropleth(plotdata24)
state_choropleth(plotdata25)
state_choropleth(plotdata26)
state_choropleth(plotdata27)
state_choropleth(plotdata28)
state_choropleth(plotdata29)
state_choropleth(plotdata30)
state_choropleth(plotdata31)
state_choropleth(plotdata32)
state_choropleth(plotdata33)
state_choropleth(plotdata34)
state_choropleth(plotdata35)
state_choropleth(plotdata36)
state_choropleth(plotdata37)
state_choropleth(plotdata38)
state_choropleth(plotdata39)
state_choropleth(plotdata40)
state_choropleth(plotdata41)
state_choropleth(plotdata42)
state_choropleth(plotdata43)
state_choropleth(plotdata44)
state_choropleth(plotdata45)
state_choropleth(plotdata46)
state_choropleth(plotdata47)
state_choropleth(plotdata48)
state_choropleth(plotdata49)
state_choropleth(plotdata50)
state_choropleth(plotdata51)
state_choropleth(plotdata52)
state_choropleth(plotdata53)
state_choropleth(plotdata54)
state_choropleth(plotdata55)
state_choropleth(plotdata56)
state_choropleth(plotdata57)
state_choropleth(plotdata58)
|
3570e9f562c275357fa13b0359fd813bbfdcbc71
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.machine.learning/man/sagemaker_list_user_profiles.Rd
|
d738ceea733aadebae92c557fb2ac8474410e3bb
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,322
|
rd
|
sagemaker_list_user_profiles.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sagemaker_operations.R
\name{sagemaker_list_user_profiles}
\alias{sagemaker_list_user_profiles}
\title{Lists user profiles}
\usage{
sagemaker_list_user_profiles(
NextToken = NULL,
MaxResults = NULL,
SortOrder = NULL,
SortBy = NULL,
DomainIdEquals = NULL,
UserProfileNameContains = NULL
)
}
\arguments{
\item{NextToken}{If the previous response was truncated, you will receive this token. Use
it in your next request to receive the next set of results.}
\item{MaxResults}{The total number of items to return in the response. If the total number
of items available is more than the value specified, a \code{NextToken} is
provided in the response. To resume pagination, provide the \code{NextToken}
value in the as part of a subsequent call. The default value is 10.}
\item{SortOrder}{The sort order for the results. The default is Ascending.}
\item{SortBy}{The parameter by which to sort the results. The default is CreationTime.}
\item{DomainIdEquals}{A parameter by which to filter the results.}
\item{UserProfileNameContains}{A parameter by which to filter the results.}
}
\description{
Lists user profiles.
See \url{https://www.paws-r-sdk.com/docs/sagemaker_list_user_profiles/} for full documentation.
}
\keyword{internal}
|
d99c854478c1b9745c958904c20acd23b7f68330
|
422de0cda897c0340eb4e01bdc0a44a5230a4c0d
|
/man/authoriseGitHub.Rd
|
d32d14b4401654ebd816aa3498273d4e9b14fbe6
|
[] |
no_license
|
gitter-badger/archivist.github
|
794e91762e37197cef0ec9c6eb7c37493d3e4ec4
|
35c8288158f8db978b6b41842d67255876958e70
|
refs/heads/master
| 2020-12-29T00:12:37.405951
| 2016-04-03T20:25:21
| 2016-04-03T20:25:21
| 59,998,255
| 0
| 0
| null | 2016-05-30T09:35:47
| 2016-05-30T09:35:47
| null |
UTF-8
|
R
| false
| true
| 1,112
|
rd
|
authoriseGitHub.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/authoriseGitHub.R
\name{authoriseGitHub}
\alias{authoriseGitHub}
\title{Authorise with GitHub API}
\usage{
authoriseGitHub(ClientID, ClientSecret)
}
\arguments{
\item{ClientID}{A 20 characters long string with Client ID. See https://github.com/settings/applications/ for more details.}
\item{ClientSecret}{A 40 characters long string with Client Secret. See https://github.com/settings/applications/ for more details.}
}
\description{
\code{authoriseGitHub} is function that performes OAuth authorisation with GitHub API
and stores resulting token in the \code{github_token} variable.
In order to authorise your app you need ClinetID and ClientSecret.
They can be found here: https://github.com/settings/applications/new
}
\examples{
\dontrun{
## GitHub version
authoriseGitHub(ClientID, ClientSecret)
}
}
\author{
Przemyslaw Biecek, \email{przemyslaw.biecek@gmail.com}
}
\seealso{
Other archivist.github: \code{\link{archive}},
\code{\link{cloneGitHubRepo}},
\code{\link{createGitHubRepo}},
\code{\link{deleteGitHubRepo}}
}
|
3fcf24558b31cf309412e9afe3576ef0e926bf10
|
cafe1a30c92ec40d0bd1ae7af8fb3c11a118d0d5
|
/plot2.R
|
3d88a4aa76ff76368a5c09e81d01ce4611076940
|
[] |
no_license
|
Matt-Coursera/Explore_DA
|
a75f208d86b7c7d295006eee6292dbfd045f0ee8
|
198e47a429ee69a8e1052f09ab2e50f0fcd9b4b4
|
refs/heads/master
| 2021-01-23T12:26:13.797150
| 2015-08-10T01:05:22
| 2015-08-10T01:05:22
| 40,451,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 674
|
r
|
plot2.R
|
#Reading in the data
dataFile <- "./data/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
#Subsetting to more manageable set of data
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#Setting up the date & time
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Setting as a numeric
globalActivePower <- as.numeric(subSetData$Global_active_power)
#Creating the png file.
png("plot2.png", width=480, height=480)
#Plotting the results
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
#Clean up
dev.off()
|
3227c054832b55a54965394808ffea7a8e470748
|
b29a6b3ac4cb5ffc293e4102f74ef0168e6f7f18
|
/R/trimesh_construct.R
|
61b4a47e31075ebf1acf5cf56978ea31f881306c
|
[
"MIT"
] |
permissive
|
thomasp85/unmeshy
|
3d5022fcd820b6eac19c5b8aaf2fae5ecbc67859
|
c7b8dd2734dac3fd5d45abca941969f4951987d5
|
refs/heads/master
| 2022-12-15T13:21:23.031934
| 2020-09-11T12:20:43
| 2020-09-11T12:20:43
| 294,034,087
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,601
|
r
|
trimesh_construct.R
|
#' Create a trimesh object
#'
#' These functions helps in creating trimesh objects. A trimesh is a subclass of
#' mesh3d as defined in the rgl package. It is exclusively using triangles
#' (unsurprisingly) and adds two additional elements: `it_info` which is a
#' data.frame with information about the triangles in the mesh, and `vb_info`
#' which is a data.frame with information about the vertices. Since trimesh is
#' all about vector graphics it doesn't concern itself with material, texcoords,
#' and normals — converting from a mesh3d will loose that information. trimesh
#' objects can be constructed from either mesh3d object or data frames
#' containing an `x`, `y`, and `z` column (remaining columns will go to the
#' `it_info` element), or potentially from raw data giving vertice coordinates
#' and triangle vertex indexes.
#'
#' @param mesh A trimesh or an object convertible to one
#' @param vertices A 4-row matrix giving the coordinates of vertices in 3D space
#' (last row is filled with 1's by convention). Each vertex correspond to a
#' column in the matrix
#' @param triangles A 3-row matrix giving the indexes of the three vertices that
#' corresponds to the triangle. Each triangle corresponds to a column in the
#' matrix
#' @param vertex_info A data.frame giving additional information about each
#' vertex in the `vertices` matrix.
#' @param triangle_info A data.frame giving addtional information about each
#' triangle given in the `triangles` matrix
#' @param ... ignored
#'
#' @name trimesh_construct
#' @rdname trimesh_construct
#'
NULL
#' @rdname trimesh_construct
#' @export
new_trimesh <- function(vertices, triangles, vertex_info = NULL, triangle_info = NULL) {
if (is.null(vertex_info) || nrow(vertex_info) == 0) {
vertex_info <- tibble(.rows = ncol(vertices))
}
if (is.null(triangle_info) || nrow(triangle_info) == 0) {
triangle_info <- tibble(.rows = ncol(triangles))
}
mode(triangles) <- 'integer'
trimesh <- list(vb = vertices, it = triangles, primitivetype = 'triangle',
material = list(), normals = NULL, texcoords = NULL,
vb_info = vertex_info, it_info = triangle_info)
class(trimesh) <- c('trimesh', 'mesh3d', 'shape3d')
trimesh
}
#' @rdname trimesh_construct
#' @export
is_trimesh <- function(mesh) {
inherits(mesh, 'trimesh')
}
#' @rdname trimesh_construct
#' @export
as_trimesh <- function(mesh, ...) {
UseMethod('as_trimesh')
}
#' @export
as_trimesh.trimesh <- function(mesh, ...) {
mesh
}
#' @export
as_trimesh.mesh3d <- function(mesh, ...) {
trimesh_from_mesh3d(mesh)
}
#' @export
as_trimesh.data.frame <- function(mesh, ...) {
if (!all(c('x', 'y', 'z') %in% names(mesh))) {
stop('data.frame must have an `x`, `y`, and `z` column to be converted to a trimesh', call. = FALSE)
}
trimesh_from_triangles(
mesh$x, mesh$y, mesh$z,
mesh[, !names(mesh) %in% c('x', 'y', 'z'), drop = FALSE]
)
}
#' @export
#' @importFrom tibble as_tibble trunc_mat
print.trimesh <- function(x, ...) {
v <- format(x$vb, digits = 2)
t <- x$it
tri <- vapply(seq_len(ncol(t)), function(i) paste0(
'{', v[1,t[1,i]], ';', v[2,t[1,i]], ';', v[3,t[1,i]], '}, ',
'{', v[1,t[2,i]], ';', v[2,t[2,i]], ';', v[3,t[2,i]], '}, ',
'{', v[1,t[3,i]], ';', v[2,t[3,i]], ';', v[3,t[3,i]], '}'), character(1))
tri <- as_tibble(cbind(data.frame(Vertices = tri), x$it_info))
tri <- trunc_mat(tri)
tri$summary <- c("A trimesh" = paste0(ncol(x$it), ' triangles and ', ncol(x$vb), ' unique vertices'))
print(tri)
invisible(x)
}
#' @importFrom tibble tibble
trimesh_from_triangles <- function(x, y, z, ...) {
triangle_info <- tibble(...)
if (ncol(triangle_info) == 0) triangle_info <- NULL
if (length(x) != length(y) || length(x) != length(z)) {
stop("Coordinates must all have the same length", call. = FALSE)
}
if (length(x) %% 3 != 0) {
stop("Number of vertices must be divisible by 3", call. = FALSE)
}
if (!is.null(triangle_info) && nrow(triangle_info) != length(x)/3) {
stop("Triangle information must match the number of triangles", call. = FALSE)
}
mesh <- join_triangles(x, y, z)
new_trimesh(mesh$vertices, mesh$triangles, triangle_info = triangle_info)
}
trimesh_from_mesh3d <- function(mesh) {
if (mesh$primitivetype == 'quad') {
tri <- matrix(rbind(
mesh$it[c(1, 2, 3), ],
mesh$it[c(3, 4, 1), ]
), nrow = 3)
} else if (mesh$primitivetype == 'triangle') {
tri <- mesh$it
} else {
stop('Cannot convert a ', mesh$primitivetype, ' mesh3d object to a trimesh', call. = FALSE)
}
new_trimesh(mesh$vb, tri)
}
|
d3cc945b55cc0adbe0342430445a503aba658bf2
|
80ff29b3ea83408f561a7c776f524530e8f3e5ee
|
/Packages/Stats202A/R/Lasso.R
|
e85ab327143f9a70a553a439c72b87e60a4e3790
|
[] |
no_license
|
greek-geek/Statistics-Programming
|
b3dd537a4db97b2646c851e6d331d2a4263010f3
|
c4d8c9a0eb6c9bb13f024efced317e0cde1b76b0
|
refs/heads/master
| 2021-09-12T07:05:33.370104
| 2018-04-15T09:15:44
| 2018-04-15T09:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,781
|
r
|
Lasso.R
|
#' Lasso Function
#'
#' This function performs Lasso regression on X and Y.
#' @param X
#' @param Y
#' @keywords Lasso
#' @export
Lasso <- function(X, Y, lambda_all){
# Find the lasso solution path for various values of
# the regularization parameter lambda.
#
# X: n x p matrix of explanatory variables.
# Y: n dimensional response vector
# lambda_all: Vector of regularization parameters. Make sure
# to sort lambda_all in decreasing order for efficiency.
#
# Returns a matrix containing the lasso solution vector
# beta for each regularization parameter.
#######################
## FILL IN CODE HERE ##
#######################
lambda_all = sort(lambda_all, decreasing = TRUE)
p = dim(X)[2]
L = length(lambda_all)
T = 10
beta = matrix(rep(0,p), nrow = p)
beta_all = matrix(rep(0,(p*L)), nrow = p)
err = rep(0,L)
R = Y
ss = rep(0,p)
for(j in 1:p){
ss[j] = sum(X[,j]^2)
}
for (l in 1:L){
lambda = lambda_all[l]
for(t in 1:T){
for (j in 1:p){
db = sum(R*X[,j])/ss[j]
b = beta[j] + db
b = sign(b) * max(0, abs(b) - lambda / ss[j])
db = b - beta[j]
R = R - X[,j] * db
beta[j] = b
}
}
beta_all[,l] = beta
}
#matplot(t(matrix(rep(1, p), nrow = 1)%*%abs(beta_all)), t(beta_all), type = 'l',main='LASSO BOOSTING',xlab='N',ylab='Beta')
#plot(lambda_all, err, type = 'l',main='LASSO ERROR ESTIMATION',xlab='Lambda',ylab='Error')
## Function should output the matrix beta_all, the
## solution to the lasso regression problem for all
## the regularization parameters.
## beta_all is (p+1) x length(lambda_all)
return(beta_all)
}
|
7e749080256b5d99f9c2a2ec09202122aa476fe6
|
86d134d36d286fe5307561d25e89c165b5eee91e
|
/app.R
|
3db24d7765485df0bc23b8eece39ae141e1d676f
|
[] |
no_license
|
kshitionweb/Open-Data-and-Visualization-using-Shiny-R
|
5e787f1b634be126e470241b2fd80482c2c6a416
|
b4e19b5bf0f74ab9d1ff74ae397b057e24b80c12
|
refs/heads/main
| 2023-04-11T01:11:56.254644
| 2021-04-20T21:56:56
| 2021-04-20T21:56:56
| 359,960,379
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,448
|
r
|
app.R
|
# Load packages
pkgs <- c("shiny", "shinythemes", "leaflet", "leaflet.extras",
"tidyverse", "plotly", "scales")
library(conflicted)
conflict_prefer("select", "dplyr")
conflict_prefer("cols", "vroom")
conflict_prefer("filter", "dplyr")
conflict_prefer("lag", "dplyr")
conflict_prefer("layout", "plotly")
lapply(pkgs, require, character.only = TRUE)
# {scales} is for breaks_pretty() & breaks_extended()
### SECTION 1: Read & clean data, make static objects ###
# 1.1. Read population data
population <- read_csv("population.csv",
col_types = cols(quarter = col_double(),
region = col_character(),
population = col_integer()))
# 1.2. Read and clean COVID-19 data
covid_data <- read_csv("covid19_canada_data.csv",
# read_csv figures type by 1st 1000 rows; if all NA == problem.
col_types = cols_only(prname = col_character(),
date = col_date(format = "%d-%m-%Y"),
numdeaths = col_integer(),
numtotal = col_integer(),
numrecover = col_integer(),
numtested = col_integer(),
numtests = col_integer(),
numtoday = col_integer()),
na = c("", "NA", "N/A")) %>%
rename("region" = "prname", "total_deaths" = "numdeaths",
"total_cases" = "numtotal", "total_recovered" = "numrecover",
"new_cases" = "numtoday") %>%
mutate(total_tested = coalesce(numtested, numtests)) %>%
select(-c(numtested, numtests)) %>%
mutate_at(vars("total_deaths":"total_tested"), # remove all non-numeric characters, if any
list(~str_remove_all(., "[^0-9]"))) %>%
mutate_at(vars("total_deaths":"total_tested"), as.numeric) %>%
mutate_if(is.numeric, abs) %>%
filter(!grepl("^Repatriated", region)) %>% # drop 'repatriated travelers' category (irrelevant)
filter(date >= as.Date("2020-03-21")) %>% # removes highly incomplete data prior to March 21
mutate(quarter = lubridate::quarter(lubridate::ymd(date, truncated = 1),
with_year = TRUE)) %>% # create key column to join population estimates by quarter
left_join(population, by = c("quarter", "region")) %>%
group_by(region) %>%
mutate(new_tested = total_tested - lag(total_tested,
default = first(total_tested),
order_by = date)) %>%
mutate(new_deaths = total_deaths - lag(total_deaths,
default = first(total_deaths),
order_by = date)) %>% # next 2 lines remove highly incomplete recoveries data prior to April 18
mutate(tmp_na = if_else(date < as.Date("2020-04-18"), NA_real_, 0)) %>%
mutate(total_recovered = total_recovered + tmp_na) %>% # a hack to convert some vals to NAs based on vals in the 'date' col
mutate(active_cases = total_cases - total_recovered) %>%
select(-total_recovered, -tmp_na, -quarter) %>% # these cols no longer needed
mutate(case_fatality_rate = round(total_deaths / total_cases * 100, 2)) %>% # case fatality rate
mutate(case_fatality_rate = replace_na(case_fatality_rate, 0)) %>% # replace NAs introduced by division by 0
mutate(mortality_per_100000 = round(total_deaths / population * 100000, 2)) %>%
mutate(cases_per_100000 = round(total_cases / population * 100000, 2)) %>% # prevalence: cases per 100,000
mutate(active_cases_per_100000 = round(active_cases / population * 100000, 2)) %>%
mutate(new_cases_per_100000 = round(new_cases / population * 100000, 2)) %>%
mutate(tests_per_1000 = round(total_tested / population * 1000, 2)) %>%
ungroup() %>% # Making sure that 'covid$region' and 'provinces$region' are in the same order,
group_by(date) %>% # else will incorrectly map regions to polygons in Leaflet.
arrange(date, region) %>%
ungroup() %>%
naniar::replace_with_na_if(.predicate = is.numeric, condition = ~.x < 0)
# Re-arrange columns for convenience
covid_data <- covid_data[c("region", "population", "date",
"total_cases", "cases_per_100000",
"active_cases", "active_cases_per_100000",
"new_cases", "new_cases_per_100000",
"total_deaths", "new_deaths",
"case_fatality_rate", "mortality_per_100000",
"total_tested", "new_tested", "tests_per_1000")]
# 1.3. Read Canada's geography
provinces <- sf::st_read("provinces.gpkg",
stringsAsFactors = FALSE,
quiet = TRUE)
# 1.4. CRS to re-project map
epsg2163 <- leafletCRS(crsClass = "L.Proj.CRS",
code = "EPSG:2163",
resolutions = 2^(16:7),
proj4def = "+proj=laea +x_0=0 +y_0=0 +lon_0=-97 +lat_0=62.3 +units=m")
# 1.5. List dates for which colorQuantile() breaks are not unique
broken_dates <- covid_data %>%
filter(date >= "2020-05-01") %>%
pull(date) %>%
unique()
# 1.6. Make logical operator opposite of %in% for convenience
"%not_in%" <- Negate("%in%")
# 1.7. Make color palette for Canada and regions for plotly plot
colors_ca <- c("#1B9E77", "#6A3D9A", "#EF432C", "#E7298A",
"#66A61E", "#D95F02", "#A6761D", "#08306B",
"#E6AB02", "#1F78B4", "#7570B3", "#333333",
"#993404", "#666666")
# 1.8. Specify font for use in plotly plot
my_font <- list(size = 14, color = "black")
# 1.9. Make list of legend items to be selected by default in plotly plot
legend_items <- list("Alberta" = TRUE,
"British Columbia" = TRUE,
"Canada" = TRUE,
"Manitoba" = TRUE,
"New Brunswick" = TRUE,
"Newfoundland and Labrador" = TRUE,
"Northwest Territories" = TRUE,
"Nova Scotia" = TRUE,
"Nunavut" = TRUE,
"Ontario" = TRUE,
"Prince Edward Island" = TRUE,
"Quebec" = TRUE,
"Saskatchewan" = TRUE,
"Yukon" = TRUE)
# 1.10. Make named list of all indicators to pass to the 'choices' arg
indicators_all <- list("Total cases" = "total_cases",
"Cases per 100,000" = "cases_per_100000",
"Active cases" = "active_cases",
"Active cases per 100,000" = "active_cases_per_100000",
"New cases" = "new_cases",
"New cases per 100,000" = "new_cases_per_100000",
"Total deaths" = "total_deaths",
"New deaths" = "new_deaths",
"Case fatality rate, %" = "case_fatality_rate",
"Mortality per 100,000" = "mortality_per_100000",
"Total tested / Total tests" = "total_tested",
"New tested / New tests" = "new_tested",
"Tested per 1,000 / Tests per 1,000" = "tests_per_1000")
# 1.11. Make named list of comparative indicators to pass to the 'choices' arg
indicators_comp <- list("Cases per 100,000" = "cases_per_100000",
"Active cases per 100,000" = "active_cases_per_100000",
"New cases per 100,000" = "new_cases_per_100000",
"Case fatality rate, %" = "case_fatality_rate",
"Mortality per 100,000" = "mortality_per_100000",
"Tested per 1,000 / Tests per 1,000" = "tests_per_1000")
# 1.12. Make list of regions to pass to the 'choices' arg:
regions <- list("Canada", "Alberta", "British Columbia", "Manitoba",
"New Brunswick", "Newfoundland and Labrador",
"Northwest Territories", "Nova Scotia", "Nunavut",
"Ontario", "Prince Edward Island", "Quebec",
"Saskatchewan", "Yukon")
### Section 2. Make app ###
## 2.1. Define UI ##
ui <-
navbarPage(
title = "Open Data and Visualization for COVID-19 Impact on Canada",
theme = shinytheme("flatly"),
tabPanel(title = "Main dashboard",
sidebarLayout(
sidebarPanel(
h4("Make your selection"),
width = 3,
selectInput(inputId = "my_indicator",
label = h5("Indicator - map and plot"),
selected = "cases_per_100000",
choices = indicators_all),
dateInput(inputId = "my_date",
label = h5("Date - map only"),
value = max(covid_data$date), # Set to last day !NA in the dataset.
min = min(covid_data$date), # For most indicators, N too small before Mar 21, causes
max = max(covid_data$date), # error in colorQuantile(): 'breaks' are not unique.
format = "dd MM yyyy"),
tags$p(HTML("<div style='font-family:Inconsolata; font-size:11pt;'>If data for the chosen indicator is not available for the selected date, the date will revert to the last day, for which there is data.</div>")),
selectInput(inputId = "my_region",
label = h5("Region - plot only"),
selected = "Canada",
choices = regions),
dateRangeInput(inputId = "my_daterange",
label = h5("Date range - plot only"),
start = min(covid_data$date),
end = max(covid_data$date),
min = min(covid_data$date),
max = max(covid_data$date),
format = "dd M yyyy"),
tags$p(HTML("<div style='font-family:Inconsolata; font-size:11pt;'>If data for the chosen indicator is not available for the selected date range, the date range will revert to the range, for which there is data.</div>")),
),
mainPanel(width = 9,
fluidRow(htmlOutput("map_title")),
fluidRow(
splitLayout(cellWidths = c("64%", "34%"), # Prevents jittering caused by screen width conflict
leafletOutput("covid_map", height = "600px"),
plotOutput("covid_plot"))
)
)
)
),
tabPanel(title = "Compare regions",
sidebarLayout(
sidebarPanel(
width = 3,
selectInput(inputId = "my_indicator2",
label = h4("Choose indicator"),
selected = "cases_per_100000",
choices = indicators_comp),
tags$p(HTML("<div style='font-family:Inconsolata;'><h5>How to use:</h5></div>")),
includeMarkdown("compare.md"),
tags$p(HTML("")
)
),
mainPanel(
width = 9,
plotlyOutput(outputId = "plotly_plot",
height = "600px")
)
)
),
tabPanel(title = "Download data",
sidebarLayout(
sidebarPanel(
h4("Select and download data"),
width = 5,
checkboxGroupInput(inputId = "my_regions_group",
label = h5("Regions"),
choices = regions,
selected = "Canada",
inline = TRUE),
actionButton(inputId = "select_all_regions",
label = "Select all / Deselect all"),
checkboxGroupInput(inputId = "my_indicators_group",
label = h5("Indicators"),
choices = indicators_all,
selected = indicators_all,
inline = TRUE),
actionButton(inputId = "select_all_indicators",
label = "Select all / Deselect all"),
dateRangeInput(inputId = "my_daterange2",
label = h5("Date range"),
start = max(covid_data$date) - 30,
end = max(covid_data$date),
min = min(covid_data$date),
max = max(covid_data$date),
format = "dd M yyyy"),
downloadButton(outputId = "downloader",
label = h6("Download Selected Data")),
),
mainPanel(
width = 7,
tableOutput("my_table")
)
)
)
) # UI BLOCK ENDS
## 2.2. Define server logic ##
server <- function(input, output, session) {
# Disable scientific notation for readability
options(scipen = 999)
# 2.2.1. Get data
# Get data for provinces
current_prov <- reactive({
req(input$my_date, input$my_indicator)
covid_data %>%
filter(date == input$my_date) %>%
select(input$my_indicator, everything()) %>%
right_join(provinces, by = "region")
})
# Get data for Canada
current_ca <- reactive({
req(input$my_date)
filter(covid_data, region == "Canada") %>%
filter(date == input$my_date)
})
# Get data for the selected region
current_region <- reactive({
req(input$my_region, input$my_daterange, input$my_indicator)
covid_data %>%
filter(region == input$my_region) %>%
filter(between(date, input$my_daterange[1], input$my_daterange[2])) %>%
select("indicator" = input$my_indicator, "region", "date")
})
# 2.2.2. Create palette type-dependent on user's choice of indicator
# selector_i() and selector_d() conductors to help choose palette
selector_i <- reactive({
input$my_indicator
})
selector_d <- reactive({
input$my_date
})
covid_pal <-
reactive({
if (selector_i() == "total_cases" |
selector_i() == "cases_per_100000" |
(selector_i() == "active_cases" &
selector_d() %not_in% broken_dates) |
(selector_i() == "active_cases_per_100000" &
selector_d() %not_in% broken_dates) |
selector_i() == "total_tested" |
selector_i() == "tests_per_1000") {
colorQuantile(palette = "BuPu", n = 7,
domain = current_prov()[[1]]) # Shiny 'Data' input goes here (1)
} else {
colorNumeric(palette = "BuPu",
domain = current_prov()[[1]])# Shiny 'Data' input goes here (1)
#bins = 5#, pretty = TRUE)
}
})
# 2.2.3. Make map
# Create text output to use as map title
output$map_title <-
renderUI({
HTML(
paste(
h4(str_to_sentence(str_replace_all(input$my_indicator, "_", " ")),
"as of",
format(input$my_date, format = "%d %B %Y")),
h6("Click province or marker for more information")))
})
# Make map as a reactive conductor as potentially slower operation
leaflet_map <- reactive({
leaflet(provinces, options = leafletOptions(crs = epsg2163,
minZoom = 3,
maxZoom = 5)) %>%
setView(lng = -90, lat = 63.3, zoom = 3) %>%
setMapWidgetStyle(style = list(background = "#f5f5f5")) %>%
addPolygons(data = provinces,
color = "black",
weight = 1,
opacity = .7,
fillOpacity = .8,
fillColor = ~covid_pal()(current_prov()[[1]]),
label = current_prov()$region,
popup = paste("<div style='font-size: 15px'><b><h5>",
current_prov()$region, "</h5></b>",
"Date:", current_prov()$date, "<br>",
"Total cases:", current_prov()$total_cases, "<br>",
"Cases per 100,000:", current_prov()$cases_per_100000, "<br>",
"Active cases:", current_prov()$active_cases, "<br>",
"Active cases per 100,000:", current_prov()$active_cases_per_100000, "<br>",
"New cases:", current_prov()$new_cases, "<br>",
"New cases per 100,000:", current_prov()$new_cases_per_100000, "<br>",
"Total deaths:", current_prov()$total_deaths, "<br>",
"New deaths:", current_prov()$new_deaths, "<br>",
"Case fatality rate:", current_prov()$case_fatality_rate, "%", "<br>",
"Mortality per 100,000:", current_prov()$mortality_per_100000, "<br>",
"Total tested / Total tests:", current_prov()$total_tested, "<br>",
"New tested / New tests:", current_prov()$new_tested, "<br>",
"Tested per 1,000 / Tests per 1,000:", current_prov()$tests_per_1000, "</div>"),
labelOptions = labelOptions(textsize = "15px",
style = list("font-weight" = "bold")),
highlightOptions = highlightOptions(weight = 1,
color = "#ffffff",
fillOpacity = 1,
bringToFront = TRUE)) %>%
addMarkers(lng = -85,
lat = 59,
label = "CANADA",
popup = paste("<div style='font-size: 15px'><b><h5>", #*
"Canada", "</h5></b>", # note no <br> tag
"Date:", current_ca()$date, "<br>",
"Total cases:", current_ca()$total_cases, "<br>",
"Cases per 100,000:", current_ca()$cases_per_100000, "<br>",
"Active cases:", current_ca()$active_cases, "<br>",
"Active cases per 100,000:", current_ca()$active_cases_per_100000, "<br>",
"New cases:", current_ca()$new_cases, "<br>",
"New cases per 100,000:", current_ca()$new_cases_per_100000, "<br>",
"Total deaths:", current_ca()$total_deaths, "<br>",
"New deaths:", current_ca()$new_deaths, "<br>",
"Case fatality rate:", current_ca()$case_fatality_rate, "%", "<br>",
"Mortality per 100,000:", current_ca()$mortality_per_100000, "<br>",
"Total tested / Total tests:", current_ca()$total_tested, "<br>",
"New tested / New tests:", current_ca()$new_tested, "<br>",
"Tested per 1,000 / Tests per 1,000:", current_ca()$tests_per_1000, "</div>"),
labelOptions = labelOptions(textsize = "15px",
style = list("font-weight" = "bold")))
})
# Pass map to outputs
output$covid_map <- renderLeaflet({leaflet_map()})
# 2.2.4. Make ggplot2 plot
output$covid_plot <-
renderPlot({
current_region() %>%
ggplot(aes(x = date,
y = indicator)) +
geom_line(size = 1.2, color = "orchid1", na.rm = TRUE) +
geom_area(fill = "orchid", alpha = .3,
na.rm = TRUE,
position = "identity") +
scale_x_date(breaks = breaks_pretty(n = 10)) +
scale_y_continuous(breaks = breaks_extended(n = 8)) +
theme_bw() +
theme(plot.title = element_text(size = 14,
face = "bold",
hjust = .5,
margin = margin(b = 10)),
plot.margin = unit(c(0, 0.5, 1, 0.5), "cm"),
panel.grid.minor = element_blank(),
panel.grid.major = element_line(colour = "grey85"),
axis.text = element_text(size = 13, face = "bold"),
axis.text.x = element_text(angle = 45, hjust = 1),
axis.title = element_blank()) +
labs(title = paste0(input$my_region, ": ",
str_replace_all(input$my_indicator, "_", " "),
",\n",
format(input$my_daterange[1], format = "%d %b %Y"),
" to ",
format(input$my_daterange[2], format = "%d %b %Y")))
})
# 2.2.5. Make plotly plot to compare regions
# Make a reactive plot title
plot_title <- reactive({
req(input$my_indicator2)
str_replace_all(string = names(covid_data[input$my_indicator2]), "_", " ") %>%
str_to_title() %>%
paste("by Region")
})
# Make a reactive dataset, pre-subsetted for plotting
covid_plotly <- reactive({
req(input$my_indicator2)
covid_data %>%
group_by(region) %>%
select("region", "date", "indicator" = input$my_indicator2)
})
# Make plotly plot
output$plotly_plot <-
renderPlotly({
my_plot <-
covid_plotly() %>%
plot_ly(x = ~date,
y = ~indicator,
color = ~region) %>%
add_lines(line = list(width = 3),
colors = colors_ca) %>%
layout(title = list(text = plot_title(),
x = .05, # align left
font = list(size = 18, color = "black")),
legend = list(font = my_font),
hoverlabel = list(font = list(size = 14)),
margin = list(t = 50, pad = 5), # pad sets distance between tick marks and ticks
xaxis = list(title = "",
tickfont = my_font,
gridcolor = "#E0E0E0"),
yaxis = list(title = "",
tickfont = my_font,
gridcolor = "#E0E0E0"))
my_plot <- plotly_build(my_plot)
for (i in seq_along(my_plot$x$data)) {
my_plot$x$data[[i]]$visible <- legend_items[[my_plot$x$data[[i]]$name]]
}
my_plot
})
# 2.2.6. Make data table
selected_data <- reactive({
req(input$my_regions_group, input$my_daterange2)
covid_data %>%
filter(region %in% input$my_regions_group) %>%
filter(between(date, input$my_daterange2[1], input$my_daterange2[2])) %>%
select("region", "date", input$my_indicators_group) %>%
mutate_at(vars("date"), as.character)
})
output$my_table <- renderTable({selected_data()}, hover = TRUE)
# Select/deselect all regions and "Select all" button
observe({
if (input$select_all_regions == 0) return(NULL)
else if (input$select_all_regions %% 2 == 0) {
updateCheckboxGroupInput(session,
inputId = "my_regions_group",
choices = regions,
inline = TRUE)
} else {
updateCheckboxGroupInput(session,
inputId = "my_regions_group",
choices = regions,
selected = regions,
inline = TRUE)
}
})
# Select/deselect all indicators and "Select all" button
observe({
if (input$select_all_indicators == 0) return(NULL)
else if (input$select_all_indicators %% 2 != 0) {
updateCheckboxGroupInput(session,
inputId = "my_indicators_group",
choices = indicators_all,
inline = TRUE)
} else {
updateCheckboxGroupInput(session,
inputId = "my_indicators_group",
choices = indicators_all,
selected = indicators_all,
inline = TRUE)
}
})
# 2.2.7. Data downloader
output$downloader <-
downloadHandler(
filename = "covid_open_data_canada.csv",
content = function(file) {
write_csv(selected_data(), file)
}
)
} # SERVER BLOCK ENDS
## 2.3. Run app ##
shinyApp(ui = ui, server = server)
|
87e086aadcbfd7e754490aac7fcc4b604e9252e7
|
8baed20bf6814e71bea9cd94b4111d67ec00f85e
|
/FastGO.R
|
73d4c0fc2603717db1835057e797935e75240c14
|
[] |
no_license
|
laramangravite/StatinDifferentialNetworks
|
61af0b2cba026effe90f3dd012f313e514877fa5
|
74477e41c2fa21e46e92fc4cf1fed558f8e1935d
|
refs/heads/master
| 2020-04-28T15:17:47.020632
| 2012-11-29T18:25:03
| 2012-11-29T18:25:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,719
|
r
|
FastGO.R
|
##############################################################################################
## Fast Gene Ontology enrichment analysis for modules using Fisher's exact test
##
## Author : Zhi Wang
## Contact : Sage Bionetworks
## Email : zhi.wang@sagebase.org
## Date : 03/03/2011
##------------------------------------------------------------------------------------------------------------------
## inputfname contains: gene information, expression data, module information (last column)
## identifier_col : unique gene identifier column
## =1 for normal case (probeset)
## =3 for bxh (locus link number)
## actually, it will be ignored
##
## gene_symbolcol : we assume the first col as probset and this column gives the real gene names
## ontologyfnlist : GeneFisher ontology file, ONLY ACCEPT ONE FILE
## maxSignifLevel : report only the categories with FisherExactTest Pvalue < maxSignifLevel
## outdir : =="", put the results under the same directory as the input, otherwise under the new directory
## useAllModules : whether use all genes as one module
## background : set background size (if set, it should be larger than number of genes in the input file unions ontology file)
## ReportGenes : Whether report overlap genes. It is ~7 times slower for this calculation.
FastModuleOntologyAnalysis <- function(inputfname, ontologyfnlist,
identifier_col=1, gene_symbolcol=2,
outdir="",
useAllModules=F, background=0, minPopHits=5,
signifLevel=0.001, ctopNrows=20, ReportGenes=T
)
{
## read input module file
allMatrix <- read.delim(inputfname, sep=",", header=F)
if(outdir=="") {
mfname = getFileName(inputfname)
} else{
mfname = getFileNameNopath(inputfname)
mfname = paste(outdir, "/", mfname, sep="")
}
if(!useAllModules){ # consider modules
geneSet <- allMatrix[, c(gene_symbolcol, ncol(allMatrix))]
} else{ # consider all gene in the list
geneSet <- cbind(allMatrix[, gene_symbolcol], "all")
}
geneSet[,1] <- toupper(as.character(geneSet[,1])); geneSet[,2] <- as.character(geneSet[,2]);
## remove NA, "NA" and ""
is.rm <- is.na(geneSet[,1]) | is.na(geneSet[,2]) | (geneSet[,1] == "") | (geneSet[,2] == "") | (geneSet[,1] == "NA") | (geneSet[,2] == "NA")
geneSet <- geneSet[!is.rm, ]
## remove redundant rows
geneSet <- unique(geneSet)
## split into list
geneList <- split(geneSet[,1], geneSet[,2])
## read ontology file
ontoTab <- read.delim(ontologyfnlist, sep="\t", header=T, as.is=T)[,c(1,2,5)]
## change ontology system to shorter names
ontoTab[ontoTab[,1] == "GO Biological Process",1] <- "BP"
ontoTab[ontoTab[,1] == "GO Molecular Function",1] <- "MF"
ontoTab[ontoTab[,1] == "GO Cellular Component",1] <- "CC"
ontoTab[ontoTab[,1] == "Panther Biological Process",1] <- "Panther BP"
ontoTab[ontoTab[,1] == "Panther Molecular Function",1] <- "Panther MF"
ontoTab[ontoTab[,1] == "Panther Cellular Component",1] <- "Panther CC"
## combine ontology system and category
ontoTab <- cbind(paste(ontoTab[,1], ontoTab[,2], sep='\t'), ontoTab[,3])
ontoList <- list()
for(i in 1:nrow(ontoTab))
{
genes <- toupper(unlist(strsplit(ontoTab[i,2], "; ")))
#if(length(genes) > minPopHits)
#{
ontoList[[i]] <- genes
#}
}
names(ontoList) <- ontoTab[,1]
overlap <- GeneSetListOverlap(ontoList, geneList,
Background='set1', BackgroundSize=0,
ReportGenes=ReportGenes
)
modNames <- names(overlap$Set2Size)
ontNames <- names(overlap$Set1Size)
enrichTab <- overlap$EnrichTable
# tabnames <- c("Set1", "Set2",
# "Overlap size", "Sampling size", "Positive size", "Background size",
# "Fold enrichment", "P value",
# "Overlap genes"
# )
if(ReportGenes)
{
colnames(enrichTab) <- c("System\tOntology category", "Module",
"Overlap size", "Module size", "Category size", "Background size",
"Fold enrichment", "P value",
"Overlap genes"
)
} else
{
colnames(enrichTab) <- c("System\tOntology category", "Module",
"Overlap size", "Module size", "Category size", "Background size",
"Fold enrichment", "P value"
)
}
enrichTab <- enrichTab[,c(2,4,1,3,5:ncol(enrichTab))]
## filter unsignificant rows
is.sig <- (as.numeric(enrichTab[,"P value"]) <= signifLevel & as.numeric(enrichTab[,"Overlap size"]) >= minPopHits)
enrichTab <- enrichTab[is.sig,]
ontTab <- NULL
## output each module results to file
for(mod in modNames)
{
outFile <- paste(mfname, "_Ontology_", mod, ".xls", sep='')
modTab <- enrichTab[enrichTab[,"Module"] == mod, ]
write.table(modTab, file=outFile, sep='\t', quote=F, col.names=T, row.names=F)
if(nrow(modTab) > ctopNrows)
{
ontTab <- rbind(ontTab, modTab[1:ctopNrows,])
} else
{
ontTab <- rbind(ontTab, modTab)
}
}
ontTab <- ontTab[order(-1* as.numeric(ontTab[,"Module size"]), ontTab[,"Module"], as.numeric(ontTab[,"P value"])), ]
## output total result
totalOutFile <- paste(mfname, "_Ontology.xls", sep='')
write.table(ontTab, file=totalOutFile, sep='\t', quote=F, col.names=T, row.names=F)
}
##-----------------------------------------------------------------------------------------------
## Internal functions
##-----------------------------------------------------------------------------------------------
## New implementation of the test enrichment of overlaps between gene set1 and gene set2
## Input:
## GeneSet*: 2 column data frame, 1st col Gene Identifiers, 2nd col Category Identifiers
## NOTICE: NA, "NA", "" are removed for Gene and Category Identifiers
## Background: which dataset used as background, usually set1 is background, and set2 is query set
## BackgroundSize: Specified background gene set size, only useful if Background="large",
## if specify BackgroundSize, it should be larger than union(GeneSet1, GeneSet2)
##
## Return:
## EnrichTable: Table of enrichments, ranked by p-value
## PVal: P value matrix
## Fold: Fold enrichment matrix
GeneSetOverlap <- function(GeneSet1, GeneSet2,
Background="set1", BackgroundSize=0,
ReportGenes=T
)
{
## determine input parameter
Background <- char.expand(Background, c("set1", "union", "intersect", "large"))
if (length(Background) > 1 || is.na(Background)) stop("Background must be \"set1\", \"union\", \"intersect\", \"large\"")
####################################
## clean up data
## convert datasets to characters
GeneSet1[,1] <- toupper(as.character(GeneSet1[,1])); GeneSet1[,2] <- as.character(GeneSet1[,2]);
GeneSet2[,1] <- toupper(as.character(GeneSet2[,1])); GeneSet2[,2] <- as.character(GeneSet2[,2]);
## remove NA, "NA" and ""
is.rm <- is.na(GeneSet1[,1]) | is.na(GeneSet1[,2]) | (GeneSet1[,1] == "") | (GeneSet1[,2] == "") | (GeneSet1[,1] == "NA") | (GeneSet1[,2] == "NA")
GeneSet1 <- GeneSet1[!is.rm, ]
is.rm <- is.na(GeneSet2[,1]) | is.na(GeneSet2[,2]) | (GeneSet2[,1] == "") | (GeneSet2[,2] == "") | (GeneSet2[,1] == "NA") | (GeneSet2[,2] == "NA")
GeneSet2 <- GeneSet2[!is.rm, ]
## remove redundant rows
GeneSet1 <- unique(GeneSet1); GeneSet2 <- unique(GeneSet2)
###################################
## determine background set, and do adjustments
CatGene1 <- split(GeneSet1[,1], GeneSet1[,2]); CatGene2 <- split(GeneSet2[,1], GeneSet2[,2])
overlap <- GeneSetListOverlap(CatGene1, CatGene2,
Background=Background, BackgroundSize=BackgroundSize,
ReportGenes=ReportGenes
)
return(overlap)
}
## New implementation of the test enrichment of overlaps between gene set1 and gene set2
## Input:
## CatGene*: list format of genes within each category
## names(list) are category names, list[[i]] are genes within category i
## NOTICE: NA, "NA", "" should have already been removed for Gene and Category Identifiers, upper/lower cases are unified
## Background: which dataset used as background, usually set1 is background, and set2 is query set
## BackgroundSize: Specified background gene set size, only useful if Background="large",
## if specify BackgroundSize, it should be larger than union(GeneSet1, GeneSet2)
## ReportGenes: Whether report overlap genes. It is ~7 times slower for this calculation. It will be improved later.
##
## Return:
## EnrichTable: Table of enrichments, ranked by p-value
## EnrichMatrices: Module-by-module enrichment matrices
GeneSetListOverlap <- function(CatGene1, CatGene2,
Background="set1", BackgroundSize=0,
ReportGenes=T
)
{
############################
## debug use
if(F)
{
g1 <- paste('g', c(1:10, 1), sep='')
g2 <- paste('g', c(1:11), sep='')
c1 <- paste('c', c(rep(1,3), rep(2,5), rep(3,2), 2), sep='')
c2 <- paste('c', c(rep(2,3), rep(3,4), rep(1,3), rep(4,1)), sep='')
GeneModuleSet1 <- GeneSet1 <- cbind(g1, c1)
GeneModuleSet2 <- GeneSet2 <- cbind(g2, c2)
Background="set1"
BackgroundSize=0
ReportGenes=T
CatGene1 <- split(GeneSet1[,1], GeneSet1[,2]); CatGene2 <- split(GeneSet2[,1], GeneSet2[,2])
}
## end of debug
############################
## determine input parameter
#st <- proc.time()
Background <- char.expand(Background, c("set1", "union", "intersect", "large"))
if (length(Background) > 1 || is.na(Background)) stop("Background must be \"set1\", \"union\", \"intersect\", \"large\"")
## Unique genes & sets
UnqGene1 <- unique(unlist(CatGene1)); UnqGene2 <- unique(unlist(CatGene2))
CatList1 <- names(CatGene1); CatList2 <- names(CatGene2)
## create relationship matrices
## rows are genes, columns are categories
gsMat1 <- matrix(F, nrow=length(UnqGene1), ncol=length(CatList1), dimnames = list(UnqGene1, CatList1))
for(cat1 in 1:length(CatGene1))
{
gsMat1[CatGene1[[cat1]], cat1] <- T
}
gsMat2 <- matrix(F, nrow=length(UnqGene2), ncol=length(CatList2), dimnames = list(UnqGene2, CatList2))
for(cat2 in 1:length(CatGene2))
{
gsMat2[CatGene2[[cat2]], cat2] <- T
}
#print(proc.time()-st)
#st <- proc.time()
## determine background set
if(Background=="set1")
{
totalGenes <- UnqGene1
totalBalls <- length(totalGenes)
#UnqGene1 <- UnqGene1[UnqGene1 %in% totalGenes]
UnqGene2 <- UnqGene2[UnqGene2 %in% totalGenes]
#tempMat1 <- gsMat1
#gsMat1 <- matrix(F, nrow=totalBalls, ncol=length(CatList1), dimnames = list(totalGenes, CatList1))
#gsMat1[UnqGene1,] <- tempMat1[UnqGene1,]
tempMat2 <- gsMat2
gsMat2 <- matrix(F, nrow=totalBalls, ncol=length(CatList2), dimnames = list(totalGenes, CatList2))
gsMat2[UnqGene2,] <- tempMat2[UnqGene2,]
} else if(Background=="union")
{
totalGenes <- union(UnqGene1, UnqGene2)
totalBalls <- length(totalGenes)
#UnqGene1 <- UnqGene1[UnqGene1 %in% totalGenes]
#UnqGene2 <- UnqGene2[UnqGene2 %in% totalGenes]
tempMat1 <- gsMat1
gsMat1 <- matrix(F, nrow=totalBalls, ncol=length(CatList1), dimnames = list(totalGenes, CatList1))
gsMat1[UnqGene1,] <- tempMat1[UnqGene1,]
tempMat2 <- gsMat2
gsMat2 <- matrix(F, nrow=totalBalls, ncol=length(CatList2), dimnames = list(totalGenes, CatList2))
gsMat2[UnqGene2,] <- tempMat2[UnqGene2,]
} else if(Background=="intersect")
{
totalGenes <- intersect(UnqGene1, UnqGene2)
totalBalls <- length(totalGenes)
UnqGene1 <- UnqGene1[UnqGene1 %in% totalGenes]
UnqGene2 <- UnqGene2[UnqGene2 %in% totalGenes]
tempMat1 <- gsMat1
gsMat1 <- matrix(F, nrow=totalBalls, ncol=length(CatList1), dimnames = list(totalGenes, CatList1))
gsMat1[UnqGene1,] <- tempMat1[UnqGene1,]
tempMat2 <- gsMat2
gsMat2 <- matrix(F, nrow=totalBalls, ncol=length(CatList2), dimnames = list(totalGenes, CatList2))
gsMat2[UnqGene2,] <- tempMat2[UnqGene2,]
} else if(Background=="large")
{
totalGenes <- union(UnqGene1, UnqGene2)
totalBalls <- length(totalGenes)
#UnqGene1 <- UnqGene1[UnqGene1 %in% totalGenes]
#UnqGene2 <- UnqGene2[UnqGene2 %in% totalGenes]
tempMat1 <- gsMat1
gsMat1 <- matrix(F, nrow=totalBalls, ncol=length(CatList1), dimnames = list(totalGenes, CatList1))
gsMat1[UnqGene1,] <- tempMat1[UnqGene1,]
tempMat2 <- gsMat2
gsMat2 <- matrix(F, nrow=totalBalls, ncol=length(CatList2), dimnames = list(totalGenes, CatList2))
gsMat2[UnqGene2,] <- tempMat2[UnqGene2,]
if(totalBalls < length(totalGenes))
{
warning("Defined BackgroundSize is too small, use union(GeneSet1, GeneSet2) instead!")
} else
{
totalBalls <- BackgroundSize
}
}
if(totalBalls == 0) stop("Your background set has problems!")
#print(proc.time()-st)
#st <- proc.time()
## category size
CatSize1 <- colSums(gsMat1); CatSize2 <- colSums(gsMat2)
## overlap matrix
CountMat <- t(gsMat1) %*% gsMat2
## p-value matrix
totalWhite <- matrix(CatSize1, nrow=length(CatSize1), ncol=length(CatSize2), byrow=F)
totalBlack <- totalBalls - totalWhite
totalDrawn <- matrix(CatSize2, nrow=length(CatSize1), ncol=length(CatSize2), byrow=T)
PMat <- phyper(CountMat-1, totalWhite, totalBlack, totalDrawn, lower.tail=F)
FoldMat <- (CountMat/totalDrawn)/(totalWhite/totalBalls)
set1Mat <- matrix(CatList1, nrow=length(CatSize1), ncol=length(CatSize2), byrow=F)
set2Mat <- matrix(CatList2, nrow=length(CatSize1), ncol=length(CatSize2), byrow=T)
#print(proc.time()-st)
#st <- proc.time()
if(ReportGenes)
{
overGeneMat <- matrix("", nrow=length(CatSize1), ncol=length(CatSize2))
for(i in 1:length(CatList1))
{
for(j in 1:length(CatList2))
{
overlapGenes <- totalGenes[gsMat1[,i] & gsMat2[,j]]
overGeneMat[i,j] <- paste(overlapGenes, collapse=";")
}
}
tabnames <- c("Set1", "Set2",
"Overlap size", "Sampling size", "Positive size", "Background size",
"Fold enrichment", "P value",
"Overlap genes"
)
EnrichTable <- data.frame(as.character(set1Mat), as.character(set2Mat),
as.character(CountMat), as.character(totalDrawn), as.character(totalWhite), as.character(totalBalls),
as.character(FoldMat), as.character(PMat),
as.character(overGeneMat),
stringsAsFactors=F
)
colnames(EnrichTable) <- tabnames
} else
{
tabnames <- c("Set1", "Set2",
"Overlap size", "Sampling size", "Positive size", "Background size",
"Fold enrichment", "P value"
)
EnrichTable <- data.frame(as.character(set1Mat), as.character(set2Mat),
as.character(CountMat), as.character(totalDrawn), as.character(totalWhite), as.character(totalBalls),
as.character(FoldMat), as.character(PMat),
stringsAsFactors=F
)
colnames(EnrichTable) <- tabnames
}
EnrichTable[is.na(EnrichTable)] <- 0
EnrichTable <- EnrichTable[order(as.numeric(EnrichTable[,"P value"])) ,]
#print(proc.time()-st)
## return value
return(list(Set1Size=CatSize1, Set2Size=CatSize2,
Count=CountMat, Fold=FoldMat, Pval=PMat,
EnrichTable=EnrichTable
)
)
}
#get the filename without extension
#
getFileExtension=function(fullfname){
splitted=unlist( strsplit(fullfname, "\\.") )
if( length(splitted) >1){
return (splitted[length(splitted)])
} else{
return ("")
}
}
#get the filename without extension
getFileName=function(fullfname){
ext=getFileExtension(fullfname)
if(ext ==""){
return (fullfname)
}
extd = paste(".", ext, sep="")
splitted=splitString(fullfname, extd)
splitted[1]
}
#get the filename without extension and path information
getFileNameNopath=function(fullfname){
myfilename = getFileName(fullfname)
splitted=unlist( strsplit(myfilename, "/") )
splitted[length(splitted) ]
}
#get the filename without path information
getFileFullNameNopath=function(fullfnames){
res = NULL
for(each in fullfnames) {
splitted=unlist( strsplit(each, "/") )
res= c(res, splitted[length(splitted) ])
}
return (res)
}
# to split "abc|123", use sep="\\|", "abc.123" use "\\."
splitString =function(mystring, separator="; "){
splitted = NULL
for (each in mystring){
if (is.na(each) | is.null(each)){
next
}
a=unlist( strsplit(each, separator) )
splitted =c(splitted, a)
}
#a=unlist( strsplit(mystring, separator) )
return(splitted )
}
|
a4ab0dc36d2134af9707084505afde171da70799
|
89ef0527cfab8a5890eb80fea15d2263ad3ba133
|
/tasks/sorting-algorithms-quick-sort/r/sorting-algorithms-quicksort.r
|
464bd504f924f8844b166eee665e22d3e13afc09
|
[
"CC0-1.0"
] |
permissive
|
stefanos1316/Rosetta_Code_Data_Set
|
aed9585dd67a8606b28cebc7027512ce54ff32e0
|
8120b14cce6cb76ba26353a7dd4012bc99bd65cb
|
refs/heads/master
| 2021-07-08T22:23:35.920344
| 2020-07-09T14:00:06
| 2020-07-09T14:00:06
| 132,714,703
| 0
| 0
|
NOASSERTION
| 2020-06-22T16:41:47
| 2018-05-09T06:55:15
|
Java
|
UTF-8
|
R
| false
| false
| 727
|
r
|
sorting-algorithms-quicksort.r
|
qsort <- function(v) {
if ( length(v) > 1 )
{
pivot <- (min(v) + max(v))/2.0 # Could also use pivot <- median(v)
c(qsort(v[v < pivot]), v[v == pivot], qsort(v[v > pivot]))
} else v
}
r=0
executeTask <- function(i) {
qsort(c(16+i,93,-99,95,-96,-24,-53,-71,96,-66,-21,72,-12,-32,-96,62,-42,-50,49,53,-65,52,-25,-69,88,-43,60,66,-94,-69,53,-71,-17,-58,-30,32,-16,-94,-42,-86,59,-53,94,97,-12,15,65,-35,-12,-82,-82,48,-48,66,-42,-63,33,-49,41,-85,94,66,-60,60,-65,-73,-50,-9,-48,-3,15,-77,81,95,-93,83,-21,34,-78,-61,-22,-58,65,78,41,-7,-59,20,88,-38,-15,-69,42,97,-35,61,8,2,-22,-85)) # -31 0 1 2 4 65 83 99 782))
return(i+1)
}
for(i in 0:10000000) {
r = executeTask(i)
}
|
8f25bb6fb4df14ef60c26c7f1137bd78c0a96a4e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/moezipfR/examples/moezipfR.mean.Rd.R
|
0203a5c66fd0b82951c474808a0bf5bf92d8fe8b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
moezipfR.mean.Rd.R
|
library(moezipfR)
### Name: moezipfR.mean
### Title: Expected value.
### Aliases: moezipfR.mean
### ** Examples
moezipfR.mean(2.5, 1.3)
moezipfR.mean(2.5, 1.3, 10^(-3))
|
d1bbd46c1dff33f9f7a6aa1f0e5061cd2898de38
|
a9e85ae71357f3cfd99c98b3723b4103a55fbe93
|
/plot2.R
|
9c0aa801428de80bd61cf4ad7415aea6d441a7af
|
[] |
no_license
|
sean-sebry/ExData_Plotting1
|
dc352b3a9e5a1c05d0da4c4b5903ff8edefba041
|
0298529914dd1f3f78781c2bbaae4ce7207b0075
|
refs/heads/master
| 2020-09-20T15:34:52.711003
| 2019-11-28T20:38:09
| 2019-11-28T20:38:09
| 224,524,065
| 0
| 0
| null | 2019-11-27T22:01:52
| 2019-11-27T22:01:51
| null |
UTF-8
|
R
| false
| false
| 408
|
r
|
plot2.R
|
global_active_power_data <- data_clean %>%
filter(date > ("2007-01-31")) %>%
filter(date < ("2007-02-03")) %>%
mutate(date_time = paste(date, time)) %>%
mutate(date_time = ymd_hms(date_time))
glimpse(global_active_power_data)
png("plot2.png")
with(global_active_power_data, plot(x = date_time, y = global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)"))
dev.off()
|
481ea0306681b92ef3636c1d08a835ad45dbb14e
|
e2120bc56e9e019613f2726777e47ad18d5106de
|
/R-scripts/03_tag_plot.R
|
000d7c33611ce94b1d3a5d99b359c11356cd9b11
|
[
"MIT"
] |
permissive
|
JoeyBernhardt/colour-of-noise
|
cf9c3b03354a2dd37df279c5c8ce45ff8b0fa265
|
aef274825a33de1705f91734b229393cf87b5f99
|
refs/heads/master
| 2021-07-21T04:15:55.688138
| 2020-09-29T19:55:23
| 2020-09-29T19:55:23
| 216,596,370
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,546
|
r
|
03_tag_plot.R
|
### mod tag plot
library(tidyverse)
library(cowplot)
theme_set(theme_cowplot())
events <- read_csv("data-raw/xy-tag.csv") %>%
gather(key = variable, value = time) %>%
group_by(variable) %>%
mutate(lag = time - lag(time)) %>%
mutate(event = 0)
events_wide <- read_csv("data-raw/xy-tag.csv") %>%
mutate(lag = x -y) %>%
mutate(variable = "x,y")
events_wide %>%
ggplot(aes(x = y, y = x)) + geom_point(size =3) + geom_abline(intercept = 0, slope = 1)
ggsave("figures/events-xy-corr.pdf", width = 6, height = 4)
events %>%
filter(variable == "x") %>%
ggplot(aes(x = time, y = event, color = variable)) + geom_point(shape = 124, size = 10) +
ylim(0, 0) +
theme(
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank()) +
scale_color_manual(values = c("turquoise")) +
theme(legend.position = "none") +
xlim(0, 120)
ggsave("figures/events-x.pdf", width = 6, height = 4)
events %>%
filter(variable == "y") %>%
ggplot(aes(x = time, y = event, color = variable)) + geom_point(shape = 124, size = 10) +
ylim(0, 0) +
theme(
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank()) +
scale_color_manual(values = c("darkgrey")) +
theme(legend.position = "none") +
xlim(0, 120)
ggsave("figures/events-y.pdf", width = 6, height = 4)
events %>%
filter(variable %in% c("x", "y")) %>%
ggplot(aes(x = time, y = event, color = variable)) + geom_point(shape = 124, size = 10) +
ylim(0, 0) +
theme(
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank()) +
scale_color_manual(values = c("turquoise", "darkgrey")) +
# scale_color_manual(values = c("darkgrey")) +
theme(legend.position = "none") +
xlim(0, 120)
ggsave("figures/events-xy.pdf", width = 6, height = 4)
events_all <- bind_rows(events, events_wide)
events_all %>%
ungroup() %>%
# mutate(variable = factor(variable)) %>%
mutate(variable = factor(variable, levels = c("x", "y", "x,y"))) %>%
select(-event) %>%
rename(event = variable) %>%
ggplot(aes(x = lag, color = event, fill = event)) + geom_density() +
facet_wrap( ~ event, nrow = 3, ncol = 1) +
scale_fill_manual(values = c("turquoise", "darkgrey", "black")) +
scale_color_manual(values = c("turquoise", "darkgrey", "black")) +
ylab("Frequency") + xlab("Time lag between events") +
theme(legend.position = "none") +
theme(
strip.background = element_blank(),
strip.text.x = element_blank()
)
ggsave("figures/prob-xy.pdf", width = 2.8, height = 3.5)
|
71842b43d61ba30a7401f05f22dfcace3618b7c8
|
b3f48abdb6e0f8871bed3bbd243ac97fcb757615
|
/tests/testthat/test-data_prep.R
|
0ebf1497a60378db847e74320417e708f2e3cc2a
|
[
"MIT"
] |
permissive
|
davisadamw/stocked
|
010153b6f5aa9607245681e44af35c1642a97359
|
eaf00c3ab65c8fc35ae5bbb8dcdbe5b4c4850b97
|
refs/heads/master
| 2023-02-13T16:50:53.749695
| 2021-01-14T08:44:27
| 2021-01-14T08:44:27
| 218,654,966
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,194
|
r
|
test-data_prep.R
|
test_that("basic center and scale works", {
# check on very basic data
expect_identical(center_and_scale(1:3), c(-1, 0, 1))
# check on random data
ck_data <- stats::rnorm(100, mean = 10, sd = 4)
ck_data_cs <- center_and_scale(ck_data)
ck_data_manual <- (ck_data - mean(ck_data)) / stats::sd(ck_data)
# first, check the resulting distribution
expect_equal(mean(ck_data_cs), 0)
expect_equal(stats::sd(ck_data_cs), 1)
expect_equal(ck_data_cs, ck_data_manual)
})
test_that("center and scale works across multiple variables", {
test_data <- tibble(x1 = runif(100, min = 10, max = 20),
x2 = rnorm(100, mean = 10, sd = 2))
test_data_cs <- test_data %>%
center_and_scale_vars(x1, x2)
expect_equal(mean(test_data_cs$x1), 0)
expect_equal(sd(test_data_cs$x1), 1)
expect_equal(mean(test_data_cs$x2), 0)
expect_equal(sd(test_data_cs$x2), 1)
})
test_that("center and scale validation does what it's supposed to", {
test_data <- tibble(x1 = runif(100, min = 100, max = 110))
vald_data <- tibble(x1 = 1:3)
test_data_cs <- test_data %>%
center_and_scale_validation(training = vald_data, x1)
# range of test_data x1 should go from (100-110) to (98-108)
expect_gte(min(test_data_cs$x1), 98)
expect_lte(min(test_data_cs$x1), 108)
expect_gte(max(test_data_cs$x1), 98)
expect_lte(max(test_data_cs$x1), 108)
})
test_that("data prep works as intended", {
data_for_prep <- tibble(uid = LETTERS[1:10],
targ1 = runif(10),
targ2 = targ1 + 2,
pred1 = rnorm(10) * 2 + 1,
pred2 = rnorm(10) * 1 + 3,
col4l = targ2 + 10,
excess = runif(10))
data_prepped <- data_for_prep %>%
prep_data(id_cols = uid,
targ_cols = c(targ1, targ2),
predictor_cols = c(pred1, pred2),
ml_col = col4l)
# make sure market limit went over correctly
expect_equal(data_prepped$market_limit, data_for_prep$col4l)
# and that the old column got dropped
expect_false('col4l' %in% names(data_prepped))
# make sure excess column got dropped
expect_false('excess' %in% names(data_prepped))
})
test_that("data prep throws error if market limit is ever too low and bump_ml not set", {
data_for_prep <- tibble(uid = LETTERS[1:10],
targ1 = runif(10),
targ2 = targ1 + 2,
pred1 = rnorm(10) * 2 + 1,
pred2 = rnorm(10) * 1 + 3,
col4l = targ2 + 10,
excess = runif(10)) %>%
mutate(targ1 = targ1 + 20)
expect_error(prep_data(data_for_prep,
id_cols = uid,
targ_cols = c(targ1, targ2),
predictor_cols = c(pred1, pred2),
ml_col = col4l),
"set bump_ml = TRUE")
})
test_that("the target calculation works", {
tc_data <- tibble(x1 = 1:10, x2 = x1 + 1, x3 = x2 + 2)
tc_ests <- calculate_target(tc_data, x1:x3)
expect_equal(tc_ests$x2, 10)
expect_equal(tc_ests$x3, 20)
})
|
afdfddbbb4f918863de68bb531537edaf86d66c1
|
fdea7c2cb18296bcb6117b6bc72dd4f9bb91b66f
|
/rscripts/readNewPhyto.R
|
e22c7de5ea19494616f6e5eda8660caa84aad8c4
|
[] |
no_license
|
USEPA/Phytoplankton-Data-Analysis
|
71413b111cff0bdb7be831e06b054388a877fc63
|
b16557b84e41cc929dd3f3c85ba9fd114a492d3a
|
refs/heads/master
| 2021-03-27T12:46:45.669570
| 2016-09-30T17:31:49
| 2016-09-30T17:31:49
| 15,807,175
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,250
|
r
|
readNewPhyto.R
|
################
#### Will Barnett, August 2016
################
################
#### This script is called from masterScript.R, and reads in
#### the 1988 / 2012 / 2014 files provided by Nathan Smucker in April 2016
################
## Change working directory
datDir <- "originalData/algae/EFR Phytoplankton Data/"
setwd(datDir)
## 1988 data
id1988 <- which(grepl("1988DataFilesNJSdownload", OUT$full_file_name) & !OUT$processed)
OUT1988 <- OUT[id1988,]
dat <- NULL
for( i in 1:nrow(OUT1988)){
# i = 1
err <- try( excel_sheets(OUT1988$full_file_name[i]) )
if(class(err) == "try-error"){ print("Error")}
temp <- read_excel(OUT1988$full_file_name[i], sheet=OUT1988$sheet[i])
if(OUT1988$full_file_name[i] == "1988DataFilesNJSdownload/rrr21002880622.xls"){
for(j in 1:4){
temp[,j] <- temp[1,j]
}
}
## Some data sheets are reading in a bunch of all NA rows.
numNA <- apply(temp,1, FUN = function(x){sum(is.na(x))})
temp <- temp[!(numNA == ncol(temp)),]
temp$iCheck <- i
print(nrow(temp))
temp$sheet_id <- OUT1988$sheet_id[i]
dat <- rbind(dat,temp)
}
dat <- subset(dat, Group != "Totals")
OUT$script[id1988] <- "readNewPhyto.R"
OUT$processed[id1988] <- TRUE
## Format 1988 data
dat <- subset(dat, !is.na(Station) & !grepl("20R7",dat$Station))
## A couple of NA stations, and some Ohio River stations that don't need to be read.
dat$Station <- ifelse(nchar(dat$Station) == 9, dat$Station, paste("2",dat$Station, sep=""))
## One depth field has an error. The sheet indicates the depth is 005.
id <- grep("ml", dat$`Depth-ft`)
dat$`Depth-ft`[id] <- "005"
ID <- paste(dat$Station,
format.Date(as.Date(as.character(dat$`Date (yymmdd)`), format = "%y%m%d"),
format = "%Y%m%d"),
rep("0000",nrow(dat)),
formatC(as.numeric(dat$`Depth-ft`), width = 3, flag = "0"), sep = "")
algae <- data.frame(ID = ID,
lake = substr(ID, 2,4),
station = substr(ID, 5,9) ,
depth_ft = substr(ID,22,24 ),
date = substr(ID, start=10, stop=17),
taxa = dat$Species,
cell_per_l = as.numeric(dat$`Cells/ml` * 1000),
BV.um3.L = as.numeric(NA),
class = dat$Group,
hab = FALSE,
sheet_id = dat$sheet_id,
qual_replicate = NA)
# Save object for rbind'ing later
algae1988 <- algae
## 2012 data
id2012 <- which(grepl("2012PhytoDataFiles", OUT$full_file_name) & !OUT$processed)
OUT2012 <- OUT[id2012,]
## For each file that starts with a '2', there is a Sample Details worksheet with metadata, like station ID.
## This sheet also contains the Sample IDs, which are listed as different sheets.
## Harvest metadata from 'Sample Details', then loop through each sheet represented by
## the Sample ID column.
algae <- NULL
uniqueFiles <- unique(OUT2012$full_file_name)
for( i in 1:length(uniqueFiles)){
# i = 1
wb <- uniqueFiles[i]
err <- try( excel_sheets(uniqueFiles[i]) )
if(class(err) == "try-error"){ print("Error")}else{
# If the file name starts with a '2', grab metadata first and loop through sheets
fn <- strsplit(wb, "/")[[1]][2]
if(substr(fn,start=1,stop=1) == "2"){
metaTmp <- read_excel(wb, sheet = "Sample Details")
for(j in 1:nrow(metaTmp)){
# j = 1
sheetTmp <- read_excel(wb, sheet = trimws(as.character(metaTmp$`Sample ID`[j]),which="both"),skip = 8, col_names = TRUE)
sheetTmp <- subset(sheetTmp, !is.na(sheetTmp$'Genus species'))
sheetTmp <- subset(sheetTmp, !grepl("ml", sheetTmp$Division))
if(nrow(sheetTmp) > 0){
if(grepl("2GRR20120912",wb)){
idTmp <- paste(metaTmp$Location[j],
metaTmp$`Sample Date`[j],
formatC(as.numeric(metaTmp[j,"Sample Time"]), width = 4, flag = "0"),
formatC(as.numeric(metaTmp[j,"Sample Depth"]), width = 3, flag = "0"),
sep="")
}else if(grepl("2SRR20120919", wb)){
idTmp <- metaTmp$Location[j]
}else {
idTmp <- paste(metaTmp$Location[j],
formatC(as.numeric(metaTmp[j,"Sample Time"]), width = 4, flag = "0"),
formatC(as.numeric(metaTmp[j,"Sample Depth"]), width = 3, flag = "0"),
sep = "")
}
algaeTmp <- data.frame(ID = rep(idTmp,nrow(sheetTmp)),
lake = rep(substr(idTmp, 2,4),nrow(sheetTmp)),
station = rep(substr(idTmp, 5,9),nrow(sheetTmp)),
depth_ft = rep(substr(idTmp,22,24 ),nrow(sheetTmp)),
date = rep(substr(idTmp, start=10, stop=17),nrow(sheetTmp)),
taxa = sheetTmp$`Genus species`,
cell_per_l = as.numeric(sheetTmp$`Concentration (cell #/L)`),
BV.um3.L = as.numeric(sheetTmp$`Total biovolume (µm3/L)`),
class = rep(NA,nrow(sheetTmp)),
hab = rep(FALSE,nrow(sheetTmp)),
sheet_id = rep(OUT2012$sheet_id[OUT2012$full_file_name == wb &
trimws(OUT2012$sheet, which = "both") == metaTmp$`Sample ID`[j]],nrow(sheetTmp)))
algae <- rbind(algae,algaeTmp)
}
}
}else{
# If the file name doesn't start with '2', read the 'Sample Results' sheet only
# i = 7
datTmp <- read_excel(wb, sheet = "Sample Results")
datTmp <- subset(datTmp, !is.na(Location))
idTmp <- paste(datTmp$Location,
datTmp$`Sample Date`,
formatC(as.numeric(datTmp$`Sample Time`), width = 4, flag = "0"),
formatC(as.numeric(datTmp$`Sample Depth`), width = 3, flag = "0"),
sep="")
algaeTmp <- data.frame(ID = idTmp,
lake = substr(idTmp, 2,4),
station = substr(idTmp, 5,9),
depth_ft = substr(idTmp,22,24 ),
date = substr(idTmp, start=10, stop=17),
taxa = datTmp$Taxa,
cell_per_l = as.numeric(datTmp$`Cells/liter`),
BV.um3.L = as.numeric(datTmp$`Total biovolume (µm3/L)`),
class = NA,
hab = FALSE,
sheet_id = OUT2012$sheet_id[OUT2012$full_file_name == wb &
OUT2012$sheet == "Sample Results"])
algae <- rbind(algae,algaeTmp)
}
}
print(nrow(algae))
print(i)
}
OUT$script[id2012] <- "readNewPhyto.R"
OUT$processed[id2012] <- TRUE
algae$qual_replicate <- NA
algae2012 <- algae
## Read algae 2014 data
## 2014 data
id2014 <- which(grepl("2014PhytoDataFilesNJSdownload", OUT$full_file_name) & !OUT$processed)
OUT2014 <- OUT[id2014,]
## Look for files that end in 'QP.xlsx', 'P.xlsx', or 'H.xlsx'.
## These are Quality files, algae data, and HAB data, respectively.
algae <- NULL
subFiles <- unique(subset(OUT2014, grepl("QP.xlsx", OUT2014$full_file_name) |
grepl("P.xlsx", OUT2014$full_file_name) |
grepl("H.xlsx",OUT2014$full_file_name))$full_file_name)
## Read in data from Sample Results tab in each file.
algae <- NULL
for( i in 1:length(subFiles)){
# i = 6
wb <- subFiles[i]
err <- try( excel_sheets(subFiles[i]) )
if(class(err) == "try-error"){ print("Error")}else{
sheetTmp <- read_excel(wb, sheet = 'Sample Results', col_names = TRUE)
# Get rid of NA rows.
sheetTmp <- sheetTmp[!is.na(sheetTmp$Location),]
idTmp <- paste(sheetTmp$Location,
ifelse(class(sheetTmp$`Sample Date`) == "Date",
format(sheetTmp$`Sample Date`, "%Y%m%d"),
sheetTmp$`Sample Date`),
formatC(as.numeric(sheetTmp$`Sample Time`), width = 4, flag = "0"),
formatC(as.numeric(sheetTmp$`Sample Depth`), width = 3, flag = "0"),
sep = "")
# Density column changes name
densCol <- names(sheetTmp)[grepl("ells",names(sheetTmp)) | grepl("ensity",tolower(names(sheetTmp)))]
# Total biovolume column changes names. Search for specific names.
bvNms <- c("Total Biovolume (um3/L)", "Total biovolume (µm3/L)","Biovolume")
bvCol <- names(sheetTmp)[names(sheetTmp) %in% bvNms]
if(length(bvCol) == 0){
tmpBV <- rep(NA,nrow(sheetTmp))
}else {
tmpBV <- sheetTmp[,bvCol]
}
algaeTmp <- data.frame(ID = idTmp,
lake = substr(idTmp, 2,4),
station = substr(idTmp, 5,9),
depth_ft = substr(idTmp,22,24 ),
date = substr(idTmp, start=10, stop=17),
taxa = sheetTmp$Taxa,
cell_per_l = as.numeric(sheetTmp[,densCol]),
BV.um3.L = tmpBV,
class = rep(NA,nrow(sheetTmp)),
hab = ifelse(grepl("H.xlsx",wb), TRUE, FALSE),
sheet_id = OUT2014$sheet_id[OUT2014$full_file_name == wb &
OUT2014$sheet == "Sample Results"],
qual_replicate = ifelse(grepl("QP.xlsx",wb), "Q", NA))
algae <- rbind(algae,algaeTmp)
}
}
algae2014 <- algae
OUT$script[id2014] <- "readNewPhyto.R"
OUT$processed[id2014] <- TRUE
names(algae1988)
names(algae2012)
names(algae2014)
out <- rbind(algae1988,algae2012,algae2014)
#chunck_check(out)
## Read in existing algae file, since the qual_replicate column is new
setwd(homeDir)
algae <- read.csv("processed_data/algae.csv")
algae$qual_replicate <- as.character(NA)
bothAlgae <- rbind(algae, out)
## Write
write.csv(bothAlgae, "processed_data/algae.csv", row.names = FALSE, col.names = TRUE)
|
0c64d0690f33aeebcd4c4f40b399f840dc436adf
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/pangoCairoShowLayoutLine.Rd
|
1bd3d15d39a257672e9058baf86468b6fa56585c
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 585
|
rd
|
pangoCairoShowLayoutLine.Rd
|
\alias{pangoCairoShowLayoutLine}
\name{pangoCairoShowLayoutLine}
\title{pangoCairoShowLayoutLine}
\description{Draws a \code{\link{PangoLayoutLine}} in the specified cairo context.
The origin of the glyphs (the left edge of the line) will
be drawn at the current point of the cairo context.}
\usage{pangoCairoShowLayoutLine(cr, line)}
\arguments{
\item{\verb{cr}}{[\code{\link{Cairo}}] a Cairo context}
\item{\verb{line}}{[\code{\link{PangoLayoutLine}}] a \code{\link{PangoLayoutLine}}}
}
\details{ Since 1.10}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
ee4bdee91a9c5c57fba89c1c54acc8aaf700dfd9
|
33a08ca3c15a5fc678d78777b53d8868a7bc7897
|
/R/Segmentation.R
|
51216442335f54bc68afa144f5f772b7e34c7419
|
[] |
no_license
|
hjanime/DCS
|
09f10d37d549eae3837b1c15d65d319f8c45e8ea
|
33570353b7812fc0d1fb2744cc34d6e92375b90f
|
refs/heads/master
| 2020-03-23T02:44:05.383267
| 2018-07-15T01:18:57
| 2018-07-15T01:18:57
| 140,989,262
| 0
| 0
| null | 2018-07-15T01:17:32
| 2018-07-15T01:17:32
| null |
UTF-8
|
R
| false
| false
| 3,561
|
r
|
Segmentation.R
|
# Purpose : Using the border score, identify candidate boundaries in each group.
#' Call Peaks
#'
#' Scans across a chromosome. Calculates the mean and variance of the border
#' score within the window [focus-h,focus+h]. Identifies foci with a
#' standardized border score above the threshold.
#' @param foci Focus coordinate.
#' @param b Border score.
#' @param h Window half-width.
#' @param t Border score threshold.
#' @importFrom plyr llply
#' @importFrom stats var
callPeaks = function(foci,b,h,t){
# Identify candidate peaks
aux = function(y){
# Positions to include
keep = (abs(foci-y) <= h)
# Local mean and variance
mu = mean(b[keep]);
v = var(b[keep]);
# Z scores
z = (b[keep] - mu)/sqrt(v);
# Out
Out = (foci[keep])[z>=t];
}
peaks = plyr::llply(.data=foci,.fun=aux);
peaks = sort(Reduce(f=union,x=peaks));
return(peaks);
}
#' Thin Peaks
#'
#' If multiple candidate peaks are within the minimum separation distance,
#' evaluates the border score at each, and retains the representative peak with
#' the highest border score.
#' @param p Peak coordinates
#' @param b Peak border scores
#' @param w Minimum separation
#' @importFrom plyr aaply
thinPeaks = function(p,b,w){
flag = T;
peaks = p;
b.p = b;
while(flag){
# Reduce to maximal peaks
aux = function(y){
# Positions to include
keep = (abs(y-peaks)<=w);
# Maximum
topPeak = which.max(b.p[keep]);
topPeak = (peaks[keep])[topPeak];
return(topPeak);
}
peaks = plyr::aaply(.data=peaks,.fun=aux,.margins=1);
peaks = unique(peaks);
b.p = b[p %in% peaks];
if(length(peaks)<2){break};
flag = (min(abs(diff(peaks)))<=w);
}
# Output
return(peaks);
}
#' Candidate Differential Boundaries
#'
#' Scans across each chromosome. Calculates the local mean and variance of the border
#' score within the window [focus-h,focus+h]. Identifies foci with a border score at least
#' \emph{t} standard deviations above the mean. If multiple foci are within distance w of
#' one another, the representative focus with the greatest border score is retained.
#' @param B FS experiment produced by \code{BorderScores}.
#' @param h Window half-width.
#' @param w Minimum boundary separation.
#' @param t Threshold local Z-score for candidate boundaries.
#' @param parallel Run in parallel? Must register parallel backend first.
#' @return data.frame of candidate boundaries
#' @importFrom foreach "%do%" "%:%" foreach registerDoSEQ
#' @export
callBoundaries = function(B,h,w=10,t=2,parallel=F){
# De-Parallelize
if(!parallel){foreach::registerDoSEQ()};
# Groups
ng = length(B@groups);
# Chromosomes
nc = length(B@chrs);
# Loop over groups
i = j = NULL;
Out = foreach(i=1:ng,.combine=rbind) %:%
foreach(j=1:nc,.combine=rbind) %dopar% {
# Subset chromosome
C = B@Data[[B@chrs[j]]];
# Foci
foci = C@foci;
# Border scores
Stats = C@Stats[[B@groups[i]]];
border = Stats$Border;
# Remove NAs
keep = !is.na(border);
Stats = Stats[keep,];
border = border[keep];
foci = foci[keep];
# Candidate peaks
Peaks = callPeaks(foci=foci,b=border,h=h,t=t);
if(length(Peaks)>0){
Peaks = thinPeaks(p=Peaks,b=border[foci %in% Peaks],w=w);
Peaks = data.frame("Chr"=B@chrs[j],"Group"=B@groups[i],
"Focus"=Peaks,Stats[foci %in% Peaks,]);
} else {Peaks = NULL};
# Output
return(Peaks);
}
rownames(Out) = NULL;
return(Out);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.