blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
84d84ae7efebe7f094087ec080c0c2b2d1dadc6d
|
4ed6dfdbac828314df254c82b9dab547d7167a79
|
/04.ExploratoryDataAnalysis/video_lectures/week3.video01.HierarchicalClusteringPart1.v1.R
|
f5d239d7530f4d751a5a0c6edc85612e9f3540b8
|
[] |
no_license
|
minw2828/datasciencecoursera
|
beb40d1c29fc81755a7b1f37fc5559d450b5a9e0
|
e1b1d5d0c660bc434b1968f65c52987fa1394ddb
|
refs/heads/master
| 2021-03-22T00:15:15.147227
| 2015-08-21T07:55:10
| 2015-08-21T07:55:10
| 35,082,087
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 242
|
r
|
week3.video01.HierarchicalClusteringPart1.v1.R
|
## Hierarchical Clustering (part 1)
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
##
|
53443702bc3b6756f4f89ce2b68b9fb05296b3a2
|
461c3b43ec1490872ecdb2e8fa87ab4526f895fa
|
/Quinton_WGD_2020/Dependency/Analysis/2018.12.20/20181220_CERES_analysis.R
|
3f5e124abc01d863a3def67b9a805f213d424c66
|
[
"MIT"
] |
permissive
|
campbio/Manuscripts
|
8f49d01e5190810a12226e9feb1c6d048f238d43
|
23bde921d7a9c7b1d3c99cbc395de3d6081654d7
|
refs/heads/master
| 2022-05-16T23:32:54.780949
| 2022-04-04T19:30:01
| 2022-04-04T19:30:01
| 210,632,134
| 12
| 10
|
MIT
| 2022-04-02T02:57:55
| 2019-09-24T15:08:11
|
HTML
|
UTF-8
|
R
| false
| false
| 4,998
|
r
|
20181220_CERES_analysis.R
|
source("~/GIT/utilities/R/lm_utils.R")
source("~/GIT/utilities/R/mut_utils.R")
library(stringr)
ceres.17q2 <- read.table(gzfile("../../Data/gene_effect_17Q2.csv.gz"), header = T, sep = ",", row.names=1, check.names=F)
ceres.18q3 <- t(read.table(gzfile("../../Data/gene_effect_18Q3.csv.gz"), header = T, sep = ",", row.names=1, check.names=F))
ceres.combined.18q3 <- read.table(gzfile("../../Data/D2_combined_gene_dep_scores_18Q3.csv.gz"), header = T, sep = ",", row.names=1, check.names=F)
ceres.combined.18q3 = ceres.combined.18q3[rowSums(is.na(ceres.combined.18q3)) < 5,]
Absolute_data <- read.table("../../Data/CCLE_combined.table.txt", header = T, sep = "\t", row.names=1)
enrich = function(ceres.data, absolute, cutoff = -1) {
require(pROC)
i = intersect(colnames(ceres.data), absolute[,1])
abs.i = Absolute_data[i,]
wgd = ifelse(abs.i$Genome.doublings > 0, "WGD", "Not_WGD")
ceres.i = ceres.data[,i]
res.fet.full = fet(ceres.i < cutoff, wgd, reorder=FALSE)
colnames(res.fet.full) = paste0("FET_", colnames(res.fet.full))
res.wilcox.full = wilcoxon(ceres.i, wgd, sort=FALSE)
colnames(res.wilcox.full) = paste0("Wilcoxon_", colnames(res.wilcox.full))
ceres.i.select = rowSums(ceres.i < cutoff) > 4
fet.select.fdr = res.fet.full$FET_Pvalue
fet.select.fdr[!ceres.i.select] = NA
fet.select.fdr = p.adjust(fet.select.fdr, 'fdr')
wilcox.select.fdr = res.wilcox.full$Wilcoxon_Pvalue
wilcox.select.fdr[!ceres.i.select] = NA
wilcox.select.fdr = p.adjust(wilcox.select.fdr, 'fdr')
auc = rep(NA, nrow(ceres.i))
for(j in 1:nrow(ceres.i)) {
auc[j] = auc(roc(as.factor(wgd), as.numeric(ceres.i[j,])))
}
res = data.frame(res.fet.full, "FET_FDR_Filter"=fet.select.fdr, AUC=auc, res.wilcox.full, "Wilcoxon_FDR_Filter"=wilcox.select.fdr)
return(list(res=res, data=rbind(WGD=wgd, ceres.i)))
}
getTumorType = function(cn) {
s = str_split(cn, "_", simplify=T)
s2 = s[,-1]
s3 = apply(s2, 1, paste, collapse="_")
s4 = gsub("_+$", "", s3)
return(s4)
}
ceres.17q2.tt = getTumorType(colnames(ceres.17q2))
ceres.18q3.tt = getTumorType(colnames(ceres.18q3))
ceres.combined.18q3.tt = getTumorType(colnames(ceres.combined.18q3))
tumor.type = unique(c(ceres.17q2.tt, ceres.18q3.tt, ceres.combined.18q3.tt))
min.n = 20
for(i in tumor.type) {
print(i)
ix = ceres.17q2.tt == i
cn = intersect(colnames(ceres.17q2)[ix], rownames(Absolute_data))
if(length(cn) >= min.n) {
print("CERES 17q2")
res.ceres.17q2 = enrich(ceres.17q2[,cn], Absolute_data[cn,])
fn = paste0("20181220_CERES_", i, "_17Q2_WGD_results.txt")
write.table(res.ceres.17q2$res, fn, quote=FALSE, row.names=FALSE, sep="\t")
fn = paste0("20181108_CERES_", i, "_17Q2_WGD_data.txt")
write.table(data.frame(Gene=rownames(res.ceres.17q2$data), res.ceres.17q2$data), fn, quote=FALSE, row.names=FALSE, sep="\t")
}
ix = ceres.18q3.tt == i
cn = intersect(colnames(ceres.18q3)[ix], rownames(Absolute_data))
if(length(cn) >= min.n) {
print("CERES 18q3")
res.ceres.18q3 = enrich(ceres.18q3[,cn], Absolute_data[cn,])
fn = paste0("20181220_CERES_", i, "_18Q3_WGD_results.txt")
write.table(res.ceres.18q3$res, fn, quote=FALSE, row.names=FALSE, sep="\t")
fn = paste0("20181220_CERES_", i, "_18Q3_WGD_data.txt")
write.table(data.frame(Gene=rownames(res.ceres.18q3$data), res.ceres.18q3$data), fn, quote=FALSE, row.names=FALSE, sep="\t")
}
ix = ceres.combined.18q3.tt == i
cn = intersect(colnames(ceres.combined.18q3)[ix], rownames(Absolute_data))
if(length(cn) >= min.n) {
print("CERES Combined 18q3")
res.ceres.combined = enrich(ceres.combined.18q3[,cn], Absolute_data[cn,])
fn = paste0("20181108_D2combined_", i, "_18Q3_WGD_results.txt")
write.table(res.ceres.combined$res, fn, quote=FALSE, row.names=FALSE, sep="\t")
fn = paste0("20181220_D2combined_", i, "_18Q3_WGD_data.txt")
write.table(data.frame(Gene=rownames(res.ceres.combined$data), res.ceres.combined$data), fn, quote=FALSE, row.names=FALSE, sep="\t")
}
}
res.ceres.17q2 = enrich(ceres.17q2, Absolute_data)
res.ceres.18q3 = enrich(ceres.18q3, Absolute_data)
res.ceres.combined = enrich(ceres.combined.18q3, Absolute_data)
write.table(res.ceres.17q2$res, "20181220_CERES_17Q2_WGD_results.txt", quote=FALSE, row.names=FALSE, sep="\t")
write.table(data.frame(Gene=rownames(res.ceres.17q2$data), res.ceres.17q2$data), "20181220_CERES_17Q2_WGD_data.txt", quote=FALSE, row.names=FALSE, sep="\t")
write.table(res.ceres.18q3$res, "20181220_CERES_18Q3_WGD_results.txt", quote=FALSE, row.names=FALSE, sep="\t")
write.table(data.frame(Gene=rownames(res.ceres.18q3$data), res.ceres.18q3$data), "20181220_CERES_18Q3_WGD_data.txt", quote=FALSE, row.names=FALSE, sep="\t")
write.table(res.ceres.combined$res, "20181220_D2combined_18Q3_WGD_results.txt", quote=FALSE, row.names=FALSE, sep="\t")
write.table(data.frame(Gene=rownames(res.ceres.combined$data), res.ceres.combined$data), "20181220_D2combined_18Q3_WGD_data.txt", quote=FALSE, row.names=FALSE, sep="\t")
sessionInfo()
|
352bb6736a341cef1aa1f4ca706ca54f489d908b
|
db48edb75b5d4d79acb6d0b491e8515c6c5e2d9a
|
/man/mesaDev.Rd
|
e691c21788bf3189e348a86d2f813ff05286a721
|
[
"MIT"
] |
permissive
|
tunelipt/rwmesa
|
9a164c96b54c87be428969f721ec8ec12ffd4e37
|
579465f190bd4b3bd858821c00f3019779540841
|
refs/heads/master
| 2020-07-27T13:54:10.802381
| 2019-09-17T19:09:29
| 2019-09-17T19:09:29
| 209,114,111
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 600
|
rd
|
mesaDev.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wmesaclient.R
\name{mesaDev}
\alias{mesaDev}
\title{Criar conexão com a mesa giratório do túnel de vento do IPT}
\usage{
mesaDev(url = "localhost", port = 9596)
}
\arguments{
\item{url}{String com URL do servidor XML-RPC}
\item{port}{Inteiro com o número da porta TCP/IP usado pelo XML-RPC}
}
\value{
Um objeto de classe \code{mesa}
}
\description{
Cria um objeto de classe \code{mesa} que controla a comunicação com
o servidor XML-RPC da mesa giratória do túnel de vento do IPT.
}
\examples{
dev <- mesaDev()
}
|
3754be5ce66bf3eee92a2ac8efcbdeb7ff29c1bb
|
35107538d0ab4c8bcac7804d54ed33c4db4a8754
|
/Enemble_test_codes/2_CrossValidation and bagging.r
|
bbe7e86d9b50b99f9b5dcfa532cad0053368efb2
|
[] |
no_license
|
shikharsgit/Ensemble-Selection
|
7b68277be3c4899e7ebfe605a9df12d9972fdfd5
|
5bb4b2e5aede43a6a570883c49fe465de59ca64b
|
refs/heads/master
| 2021-03-16T06:52:42.583673
| 2017-07-23T21:19:14
| 2017-07-23T21:19:14
| 94,370,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,935
|
r
|
2_CrossValidation and bagging.r
|
rm(list=ls())
user=(Sys.info()[6])
Desktop=paste("C:/Users/",user,"/Desktop/",sep="")
setwd(Desktop)
dir.create(paste(Desktop,"/MEMS",sep=""))
dir.create(paste(Desktop,"/MEMS/S6/NIC",sep=""))
dir.create(paste(Desktop,"/MEMS/S6/NIC/Datasets",sep=""))
home=paste(Desktop,"MEMS/S6/NIC/Datasets/",sep="")
setwd(home)
ds=read.csv("data_specs.csv",as.is=T)
rows= c(1:4)
for(i in rows)
{
setwd(paste(home,ds[i,"name"],"/",sep=''))
training_data= read.csv("preproc_data/full_train.csv",as.is=T)
### 10 folds creation
dat_dv=training_data[,c("dv","id")]
##Splitting into test and train
##Creating Local Test data set for model validation purposes
# library(caret)
# set.seed(3456)
#createFolds(y, k = 10, list = TRUE, returnTrain = FALSE)
# devIndex <- as.data.frame(createFolds(dat_dv$dv, k=10, list = TRUE))
###Throwing error
###Creating folds manually with control on dv
set.seed(243)
rand_num=c(sample(c(1:length(which(dat_dv$dv==0)))),sample(c(1:length(which(dat_dv$dv==1)))))
dat_dv2=cbind(dat_dv[order(dat_dv[,'dv']),],rand_num)
dat_dv2_0=dat_dv2[dat_dv2$dv==0,'rand_num']
dat_dv2_0=as.data.frame(dat_dv2_0)
dat_dv2_1=dat_dv2[dat_dv2$dv==1,'rand_num']
dat_dv2_1=as.data.frame(dat_dv2_1)
breaks=unique(quantile(dat_dv2_0[,1], probs = seq(0, 1, by= 0.2)))
dat_dv2_0[,paste(colnames(dat_dv2_0[1]),"bin",sep="_")] <- cut(dat_dv2_0[,1], breaks,include.lowest=TRUE ,labels=c(1:ifelse(length(breaks)>1,(length(breaks) - 1),length(breaks))))
colnames(dat_dv2_0)[1]<-paste("rand_num")
colnames(dat_dv2_0)[2]<-paste("bin")
breaks=unique(quantile(dat_dv2_1[,1], probs = seq(0, 1, by= 0.2)))
dat_dv2_1[,paste(colnames(dat_dv2_1[1]),"bin",sep="_")] <- cut(dat_dv2_1[,1], breaks,include.lowest=TRUE ,labels=c(1:ifelse(length(breaks)>1,(length(breaks) - 1),length(breaks))))
colnames(dat_dv2_1)[1]<-paste("rand_num")
colnames(dat_dv2_1)[2]<-paste("bin")
dat_dv2_bin=rbind(dat_dv2_0,dat_dv2_1)
dat_dv2=cbind(dat_dv2,dat_dv2_bin)
dat_dv2=dat_dv2[,c(1,2,5)]
dat_dv2=dat_dv2[order(dat_dv2[,'id']),]
for(j in 1:5)
{
assign(paste("data_fold_",j,sep=''), dat_dv2[ dat_dv2$bin!=j, c("id","dv")])
}
#########
dflist <- list(data_fold_1,data_fold_2,data_fold_3,data_fold_4,data_fold_5) #,data_fold_6,data_fold_7,data_fold_8,data_fold_9,data_fold_10)
max_row_num=as.integer(max(as.character(lapply(dflist,function(x)nrow(x)))))
###Creating bootstrap samples
for(k in 1:5) #1:10
{
for(l in 1:5) #1:8
{
set.seed(243)
a=dflist[[k]][sample(1:nrow(dflist[[k]]),floor(runif(1, 67, 70)*nrow(dflist[[k]])/100)),]
set.seed(243)
b=rbind(a,a[sample(1:nrow(a),max_row_num-nrow(a)),])
b=merge(training_data,b,by=c('id','dv'))
b=b[order(b[,'id']),]
write.csv(b, paste("preproc_data/dfold",k,"_bag",l,".csv",sep=''),row.names=F)
}
write.csv(merge(training_data,dflist[[k]],by=c('id','dv')),paste("preproc_data/dfold",k,".csv",sep=''),row.names=F)
}
}
|
24b0787c86c75b25510de0350210a371994032b2
|
6d2265c1d24df25711f4796261b9fb9f36ff8bc6
|
/MSOD_syn_deleteJobs.R
|
3009458deb25fc392c0ae7a4f0128ee0ab181e1e
|
[] |
no_license
|
yujunnokia/MSOD
|
0a0721d3885057d938488b6a89f4a2cee2748163
|
29efd2b8dcda1b02f0a4234c94646443b28a5b3b
|
refs/heads/master
| 2020-05-28T10:15:49.236682
| 2014-01-22T17:41:29
| 2014-01-22T17:41:29
| 16,145,811
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 393
|
r
|
MSOD_syn_deleteJobs.R
|
#! /usr/bin/env Rscript
datasets <- c("syn","syn-I","syn-NL") #c("syn","syn-I","syn-NL","syn-I-NL")
indices <- 1:15
models <- c("MSODVL") # c("TRUE","OD","ODLP","MSODTRUE","MSOD")
for (dataset in datasets) {
for (index in indices) {
for (model in models) {
job <- paste("S.",model,".",index, sep="")
system(paste("qdel",job))
}
}
}
|
013fde112bb984be66371163020b21d7b251c878
|
fd91fd81027df91f03e29138b26e2a1b6e31e054
|
/man/PhyDat2Morphy.Rd
|
bb0b009c79b91dc0c791a4c96f66c5f2cf5fd212
|
[] |
no_license
|
gitter-badger/TreeSearch
|
77fa06b36d691f942c8ef578f35f3e005cc2f13e
|
5a95195211d980baa6db29260bf929a12c5bf707
|
refs/heads/master
| 2022-04-20T07:40:33.050434
| 2020-04-16T13:47:57
| 2020-04-16T13:47:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,556
|
rd
|
PhyDat2Morphy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mpl_morphy_objects.R
\name{PhyDat2Morphy}
\alias{PhyDat2Morphy}
\title{Initialize a Morphy Object from a \code{phyDat} object}
\usage{
PhyDat2Morphy(phy)
}
\arguments{
\item{phy}{An object of class \code{\link{phyDat}}.}
}
\value{
A pointer to an initialized Morphy object.
}
\description{
Creates a new Morphy object with the same size and characters as the
\code{phyDat} object
}
\seealso{
Other Morphy API functions: \code{\link{MorphyErrorCheck}},
\code{\link{MorphyWeights}},
\code{\link{SetMorphyWeights}},
\code{\link{SingleCharMorphy}},
\code{\link{UnloadMorphy}},
\code{\link{mpl_apply_tipdata}},
\code{\link{mpl_attach_rawdata}},
\code{\link{mpl_attach_symbols}},
\code{\link{mpl_delete_Morphy}},
\code{\link{mpl_delete_rawdata}},
\code{\link{mpl_first_down_recon}},
\code{\link{mpl_first_up_recon}},
\code{\link{mpl_get_charac_weight}},
\code{\link{mpl_get_num_charac}},
\code{\link{mpl_get_num_internal_nodes}},
\code{\link{mpl_get_numtaxa}},
\code{\link{mpl_get_symbols}},
\code{\link{mpl_init_Morphy}},
\code{\link{mpl_new_Morphy}},
\code{\link{mpl_second_down_recon}},
\code{\link{mpl_second_up_recon}},
\code{\link{mpl_set_charac_weight}},
\code{\link{mpl_set_num_internal_nodes}},
\code{\link{mpl_set_parsim_t}},
\code{\link{mpl_translate_error}},
\code{\link{mpl_update_lower_root}},
\code{\link{mpl_update_tip}},
\code{\link{summary.morphyPtr}}
}
\author{
Martin R. Smith
}
\concept{Morphy API functions}
|
4a88282325714905a38b2a70312e4378bbf08602
|
9bc17a169325375bc993b540d2ad0f0810ca0e76
|
/man/Formula1.Rd
|
76eba1564bbdbb684a8ceb208b886add490f5bd1
|
[] |
no_license
|
alanarnholt/PASWR
|
335b960db32232a19d08560938d26f168e43b0d6
|
f11b56cff44d32c3683e29e15988b6a37ba8bfd4
|
refs/heads/master
| 2022-06-16T11:34:24.098378
| 2022-05-14T22:56:11
| 2022-05-14T22:56:11
| 52,523,116
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 685
|
rd
|
Formula1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PASWR-package.R
\docType{data}
\name{Formula1}
\alias{Formula1}
\title{Pit Stop Times}
\format{
A data frame with 10 observations on the following 3 variables:
\describe{
\item{Race}{number corresponding to a race site}
\item{Team1}{pit stop times for team one}
\item{Team2}{pit stop times for team two}
}
}
\source{
Ugarte, M. D., Militino, A. F., and Arnholt, A. T. (2008)
\emph{Probability and Statistics with R}. Chapman & Hall/CRC.
}
\description{
Pit stop times for two teams at 10 randomly selected Formula 1 races
}
\examples{
with(data = Formula1,
boxplot(Team1, Team2))
}
\keyword{datasets}
|
943ec7ad191ad74114fc5e5f4a314558672dc7d1
|
352090e86c783a1edd02f4c8634137f456e92ce3
|
/man/rptgam.Rd
|
c73f463cda9621b753b6efcf4a2a39db6aec7085
|
[] |
no_license
|
elipickh/rptGam
|
140e56494efa16cb08606704886417f509341556
|
365d4247caee9c4fa25d5e880ac749b98a34491b
|
refs/heads/master
| 2020-08-05T19:07:09.376862
| 2019-11-20T13:32:43
| 2019-11-20T13:32:43
| 212,670,006
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 12,678
|
rd
|
rptgam.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rptgam.R
\name{rptgam}
\alias{rptgam}
\title{Repeatability estimates for random effect GAM models}
\usage{
rptgam(
formula = NULL,
data = NULL,
gamObj = NULL,
rterms = NULL,
gam_pars = NULL,
bam_pars = NULL,
nboot = 0,
boot_type = "param",
ci = 0.95,
ci_type = "all",
case_resample = NULL,
case_tries = 100,
case_minrows = NULL,
nperm = 0,
aic = TRUE,
select = TRUE,
saveboot = TRUE,
savepermute = TRUE,
seed = NULL,
verbose = TRUE,
parallel = TRUE,
ncores = -1,
cl_type = "PSOCK",
logical = FALSE,
blas_threads = NULL
)
}
\arguments{
\item{formula}{A GAM formula. If NULL (default), then a fitted gam object should be
passed to gamObj. Must contain at least one random term, and none of the random terms
can be specified with interactions with other terms.}
\item{data}{A data frame or list containing the model response variable and covariates
required by the formula. If NULL (default), then a fitted gam object should be passed
to gamObj.}
\item{gamObj}{A fitted gam object as produced by mgcv::gam or mgcv::bam. Must contain at
least one random term, and none of the random terms can be specified with interactions
with other terms. Only Gaussian models are currently supported. If NULL, then formula
and data should both be non-NULL.}
\item{rterms}{A character string or a vector of character strings of random term(s) to
include in repeatability estimations. If NULL (default), include all random terms.}
\item{gam_pars}{A list of parameters for mgcv::gam model specification, which will be
used when formula and data are both non-NULL. If TRUE, then the default parameter
values in mgcv::gam will be used. Note that gam uses method = "GCV.Cp" by default. To
use 'REML' set gam_pars to list(method = "REML"), in addition to other settings of interest.
Should not contain the control(nthreads) parameter, as this will be set by rptgam.
Only Gaussian models are currently supported.}
\item{bam_pars}{A list of parameters for mgcv::bam model specification, which will be
used when formula and data are both non-NULL. If TRUE, then the default parameter values
in mgcv::bam will be used. Should not contain the nthreads (which is used when
DISCRETE = TRUE) or the cluster parameters, as they will be set by rptgam. Only Gaussian
models are currently supported.}
\item{nboot}{Number of bootstrap replications per bootstrap type. Default is 0.}
\item{boot_type}{Type of bootstraps to perform for the repeatability estimates. Can be
one or more of the following: 'case', 'param' (default), 'resid', 'cgr', 'reb0', 'reb1',
'reb2'. If 'all', then all methods will be used. Note that types 'reb0', 'reb1', and
'reb2' are available when the GAM model contains exactly one random term.
See \strong{Details} below for more information on the various types.}
\item{ci}{Confidence level for the bootstrap interval(s). Default to 0.95.}
\item{ci_type}{Type of ci for the bootstraps. Can be 'perc', which uses R's quantile
method or 'bca' which uses the bias-corrected and accelerated method (BCa). 'all' (default)
returns both of these types. See \strong{Details} below for more information on the
BCa method used in rptgam.}
\item{case_resample}{A vector of logicals. Required when the 'case' bootstrap method is
used, and specifies whether each level of the model should be resampled. "The levels
should be specified from the highest level (largest cluster) of the hierarchy to the
lowest (observation-level); for example for students within a school, specify the school
level first, then the student level" (lmeresampler::case_bootstrap). Length of vector
should be one more than the number of random terms in the model (the extra, and last,
one being the row unit). See \strong{Details} below for more information.}
\item{case_tries}{A numeric indicating the maximum number of resampling tries for the
'case' bootstrap to get a usable sample (see case_minrows). Default is 100.}
\item{case_minrows}{A numeric indicating the minimum number of rows allowable in a
'case' type bootstrap. This number will be ignored if it is below the number of rows
allowable to run the gam model. If NULL (default), then will use no less than number
of rows allowable to run the gam model.}
\item{nperm}{Number of permutations for calculating asymptotic p-values for the
repeatability estimates. Default is 0. See \strong{Details} below for information
on the permutation method used in rptgam.}
\item{aic}{A logical variable (default is TRUE) indicating whether to calculate the AIC(s) of t
he models with and without the random effect(s).}
\item{select}{A logical variable (default is TRUE) indicating whether to calculate coefficients
for random effects with and without selection penalties. See \strong{Details} below.}
\item{saveboot}{A logical variable (default is TRUE) indicating whether to save the bootstrapped
repeatability estimates in the returned object.}
\item{savepermute}{A logical variable (default is TRUE) indicating whether to save the permutated
repeatability estimates in the returned object.}
\item{seed}{Numeric (which is converted to integer) to be used in set.seed to allow
reproducible results of bootstraps and permutations. Default is NULL.}
\item{verbose}{A logical variable (default is TRUE) indicating if messages should be printed.}
\item{parallel}{A logical variable (default is TRUE) indicating if the models, bootstraps, and
permutations should be run in parallel.}
\item{ncores}{An integer indicating how many cores to use for parallel processing.
Positive integers specify the number of cores. -1 means using all processors, -2 means
using 1 less than all processors, etc. Default is -1.}
\item{cl_type}{One of 'PSOCK' (default) or 'FORK', indicating the type of cluster to
use for parallel processing. 'FORK' can be faster than 'PSOCK' but can be unstable
and isn't available in Windows.}
\item{logical}{A logical variable indicating if virtual CPU cores should be counted when ncores
is negative (the same as running parallel::detectCores(logical = TRUE)). FALSE (default)
only counts physical cores.}
\item{blas_threads}{An integer indicating the number of BLAS threads to use. For
multi-threaded BLAS libraries, such as MKL, OpenBLAS and Apple BLAS, and when parallel
is set to TRUE, is can be faster to limit BLAS threads to 1. This is accomplished
via RhpcBLASctl::blas_set_num_threads(1), which will be installed if it is not already.
R's default BLAS library is single threaded, and so this parameter isn't necessary.
NULL (default) skips this process.}
}
\value{
Returns an object of class \code{rptgam}.
}
\description{
Estimate repeatabilties for random effect terms in mgcv GAM Gaussian models
}
\details{
\strong{Bootstraps:}
The types of bootstraps can be divided into parametric, semi-parametric, and nonparametric categories.
"The parametric bootstrap requires the strongest assumptions: the explanatory variables are considered
fixed, and both the model (specification) and the distribution(s) are assumed to be correct. The
residual bootstrap requires weaker assumptions: apart from considering the explanatory variables
as fixed, only the model (specification) is assumed to be correct. This implies, for example, that
the residuals are assumed to be homoskedastic. The cases bootstrap, finally, requires minimal
assumptions: only the hierarchical dependency in the data is assumed to be specified correctly"
(Van der Leeden et al., 2008).
The parametric method is similar to the bootstrap method used in rptR, which in turn is based on lme4's
'simulate' function. This method "simulates bootstrap samples from the estimated distribution functions.
That is, error terms and random effects are simulated from their estimated normal distributions and are
combined into bootstrap samples via the fitted model equation." (lmeresampler::parametric_bootstrap)
The residual bootstrap method "resamples the residual quantities from the fitted linear mixed-effects
model in order to generate bootstrap resamples. That is, a random sample, drawn with replacement, is
taken from the estimated error terms and the EBLUPS (at each level) and the random samples are combined
into bootstrap samples via the fitted model equation." (lmeresampler::resid_bootstrap)
The cgr bootstrap method (Carpenter et al., 2003) adjusts the residual method (which
can underestimate the variances) by centering the random effects and residuals to resample from a
distribution with mean zero. See lmeresampler::cgr_bootstrap for more details.
The reb0, reb1, and reb2 belong to the class of the random effects block (REB) bootstrap, which
is a semi-parametric method that can better account for distribution and assumptions misspecification
(Chambers & Chandra, 2013). The REB method is only applicable for models with exactly one random term.
See lmeresampler::reb_bootstrap for more on the theory and for details on the three types of REB.
The cases bootstrap is a fully nonparametric method that "resamples the data with respect to the clusters
in order to generate bootstrap samples. Depending on the nature of the data, the resampling can be done
only for the higher-level cluster(s), only at the observation-level within a cluster, or at all levels."
(lmeresampler::case_bootstrap). According to Van der Leeden et al. (2008), models with only one random term,
denoting the individuals ("level 2"), and where level 1 (i.e., rows) is repeated measures, should probably
only resample level 2, but not within individuals. See Van der Leeden et al. (2008) for more details.
\strong{Permutations:}
Permutations are performed according to Lee et al. (2012), which permutes the weighted residuals both within
and among subjects. Note that this permutation method is different from the one currently used in rptR (ver 0.9.22).
\strong{BCa:}
\strong{Penalized model ('SELECT') comparisons:}
If TRUE, rptgam will run the opposite SELECT method used in the original model, and p-values from both methods will be returned for comparison. The SELECT method in mgcv gam/bam penalizes the terms in the model (potentially removing them altogether), as a form of variable selection (see ?mgcv::gam.selection for details).
}
\examples{
library(mgcv)
set.seed(1)
dat <- gamSim(1, n = 100, scale = 2)
fac <- sample(1:5, 100, replace = TRUE)
b <- rnorm(20) * 0.5
dat$y <- dat$y + b[fac]
dat$fac <- as.factor(fac)
# GAM model with one random term
rm1 <- gam(y ~ s(fac, bs = "re") + s(x0) + s(x1) + s(x2) + s(x3),
data = dat, method = "REML")
# Pass the fitted GAM object into rptgam
# nboot and nperm of 100 is for illustration purposes
# and would typically be set higher.
out <- rptgam(gamObj = rm1, parallel = TRUE, nboot = 100,
nperm = 100, aic = TRUE, select = TRUE, verbose = TRUE, seed = 1,
boot_type = 'all', ci_type = 'all',
case_resample = c(TRUE,FALSE))
# Alternatively, run the GAM model through rptgam
out <- rptgam(formula = y ~ s(fac,bs = "re") + s(x0) + s(x1)+
s(x2) + s(x3), data = dat, gam_pars = list(method = "REML"),
parallel = TRUE, nboot = 100, nperm = 100,
aic = TRUE, select = TRUE, verbose = TRUE, seed = 1,
boot_type = 'all', ci_type = 'all',
case_resample = c(TRUE,FALSE))
# bam + discrete method
out <- rptgam(formula = y ~ s(fac, bs = "re") + s(x0) + s(x1) +
s(x2) + s(x3), data = dat, bam_pars = list(discrete = TRUE),
parallel = TRUE, nboot = 100, nperm = 100,
aic = TRUE, select = TRUE, verbose = TRUE, seed = 1,
boot_type = 'all', ci_type = 'all',
case_resample = c(TRUE,FALSE))
}
\references{
Nakagawa, S. & Schielzeth, H. (2010) \emph{Repeatability for
Gaussian and non-Gaussian data: a practical guide for biologists}.
Biological Reviews 85: 935-956.
Van der Leeden, R., Meijer, E., & Busing, F. M. (2008). Resampling
multilevel models. In Handbook of multilevel analysis (pp. 401-433). Springer, New York, NY.
Carpenter, J. R., Goldstein, H. and Rasbash, J. (2003) A novel bootstrap
procedure for assessing the relationship between class size and achievement. Journal of
the Royal Statistical Society. Series C (Applied Statistics), 52, 431–443.
Chambers, R. and Chandra, H. (2013) A random effect block bootstrap for clustered
data. Journal of Computational and Graphical Statistics, 22, 452–470.
Lee, O. E., & Braun, T. M. (2012). Permutation tests for random effects in linear
mixed models. Biometrics, 68(2), 486-493.
https://github.com/aloy/lmeresampler
}
\author{
Eliezer Pickholtz (eyp3@cornell.edu)
}
|
d0b8fa2aebdeea15e05b5caa28fc572480df0d1d
|
e8443eddb0560a39855bb69c0f7d1cdc54397173
|
/static/stat572/notes/Code_gprior.R
|
8ddbeba1b0870702c2afd5748071a5dacaa14b61
|
[
"CC-BY-4.0"
] |
permissive
|
UrbanStudy/stat2019_website
|
68b6f530e0e18d60fb24cd9092ab80f7939a26e4
|
9d41d5caf4ece4c62bf0c301eb8c90429b17ce7d
|
refs/heads/master
| 2021-06-17T01:30:06.363299
| 2021-02-01T04:12:55
| 2021-02-01T04:12:55
| 148,949,001
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,324
|
r
|
Code_gprior.R
|
#Function to generate draws from the joint density of beta and sigma.sq
lm.gprior <- function(y,X,g=dim(X)[1],nu0=1,
s20=try(summary(lm(y~-1+X))$sigma^2,silent=TRUE),
S=1000){
n<-dim(X)[1] ; p<-dim(X)[2]
Hg<- (g/(g+1)) * X%*%solve(t(X)%*%X)%*%t(X)
SSRg<- t(y)%*%( diag(1,nrow=n) - Hg ) %*%y
s2<-1/rgamma(S, (nu0+n)/2, (nu0*s20+SSRg)/2 )
Vb<- g*solve(t(X)%*%X)/(g+1)
Eb<- Vb%*%t(X)%*%y
E<-matrix(rnorm(S*p,0,sqrt(s2)),S,p)
beta<-t( t(E%*%chol(Vb)) +c(Eb))
list(beta=beta,s2=s2)
}
#load the oxygen intake data: we are interested in comparing the difference in response
# to two different treatments also accounting for age
data.oxygen<-dget("yX.o2uptake")
#run regression
regOxy.intake <- lm.gprior(y=data.oxygen[,"uptake"],
X=data.oxygen[,-1],
S=10000)
par(mfrow=c(2,2))
for(j in 1:4){
plot(density(regOxy.intake$beta[,j]),
xlab=bquote(beta[.(j-1)]),
main=bquote(paste("p(",beta[.(j-1)],"|y,X,",sigma^2,")")))
abline(v=0,lty=3,lwd=2)
}
par(mfrow=c(1,1))
plot(density(regOxy.intake$s2),
xlab=expression(sigma^2),
main=expression(paste("p(",sigma^2,"|y,X)")))
#To conduct the comparison of interest, note that the model is:
# Yi = beta0 + beta1*trt + beta2*age + beta3*(trt*age),
#such that:
#E[Y|trt=running,age] = beta0 + beta2*age
#E[Y|trt=aerobics,age] = (beta0+beta1) + (beta2+beta3)*age
#
#So the difference in effect can be measured by:
#E[Y|trt=aerobics,age]-E[Y|trt=running,age] = beta1 + beta3*age
ages <- 20:31
diff.mat <- matrix(NA,ncol=length(ages),nrow=dim(regOxy.intake$beta)[1])
k<-1
for(a in ages){
diff.mat[,k] <- regOxy.intake$beta[,2]+regOxy.intake$beta[,4]*a
k <- k+1
}
colnames(diff.mat) <- ages
boxplot(diff.mat,col="cornflowerblue",pch=20,ylab="age",xlab="diff. max Oxygen intake",
horizontal=T)
abline(v=0,lty=3,lwd=2)
#do a similar analysis with the following dataset:
diabetes.train<-dget("http://www.stat.washington.edu/~hoff/Book/Data/data/yX.diabetes.train")
#use this dataset to validate how well the model fits the data
diabetes.test<-dget("http://www.stat.washington.edu/~hoff/Book/Data/data/yX.diabetes.test")
|
6886b87f98b2a49f23f4e3a92466dd642d2a32b0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/js/examples/coffee_compile.Rd.R
|
7ba030f9a590717f788787392e32fca206cc63e6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 374
|
r
|
coffee_compile.Rd.R
|
library(js)
### Name: coffee_compile
### Title: Coffee Script
### Aliases: coffee_compile coffee
### ** Examples
# Hello world
coffee_compile("square = (x) -> x * x")
coffee_compile("square = (x) -> x * x", bare = TRUE)
# Simple script
demo <- readLines(system.file("example/demo.coffee", package = "js"))
js <- coffee_compile(demo)
cat(js)
cat(uglify_optimize(js))
|
293cf06338c85f063193067ba9e440740cb8e7fb
|
764c655327e373a61091591d760f4ceeb00fe9e7
|
/bin/setup
|
ecd8a69f159ee51b3c778c3c3cfb391d802cd3a4
|
[] |
no_license
|
nelsonmestevao/beautiful-cli
|
f89bc53226085dc9c1f21daa21236c6700835b46
|
82fb0d954c8e091e5890533d7dcabc24aadc7122
|
refs/heads/main
| 2023-02-20T04:00:31.805725
| 2020-12-21T21:45:06
| 2020-12-21T21:45:06
| 332,502,941
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 120
|
setup
|
#!/usr/bin/env Rscript
if (!requireNamespace("renv", quietly = TRUE)){
install.packages("renv")
}
renv::restore()
|
|
de7c48e9e16c16be91cf3643730274b7cbb0aed4
|
6bca977d67101a6274457ca850517ee41cf06c45
|
/plot_functions/plot.meth.nek2.R
|
5503c88e8072c8350f07b7f9ccc7a66564e7cfa6
|
[] |
no_license
|
AAlhendi1707/preinvasive
|
bedcf1f1eca93ab9ae4b44bf32e4d0f9947a1fad
|
e683fa79ad76d0784437eba4b267fb165b7c9ae4
|
refs/heads/master
| 2022-01-06T18:25:52.919615
| 2019-01-18T09:39:42
| 2019-01-18T09:39:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 400
|
r
|
plot.meth.nek2.R
|
########################################################################################################
# Methylation of NEK2-associated probe by group
########################################################################################################
plot.meth.nek2 <- function(filename){
pdf(filename)
plotMethylationByGroup("cg17931972", "NEK2", legend.pos="topleft")
dev.off()
}
|
1e9558bdc03791b5c9c8016e416b9ea0dd5c2452
|
27c8c8337342e22d3e638d9738ca6499243bc86b
|
/R/statistic-viper.R
|
0cd07251cf3070c5ed4a8bb77e28383b45ada355
|
[] |
no_license
|
Eirinits/decoupleR
|
1f578ef44dd3a81496e276058fb3c6eca7d6608d
|
3926381bc63362a7ec7cb1b32b40a85f1f9a9cd1
|
refs/heads/master
| 2023-06-03T01:35:56.461380
| 2021-05-25T18:57:17
| 2021-05-25T18:57:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,924
|
r
|
statistic-viper.R
|
#' VIPER wrapper
#'
#' This function is a convenient wrapper for the [viper::viper()] function.
#'
#' @inheritParams .decoupler_mat_format
#' @inheritParams .decoupler_network_format
#' @inheritDotParams viper::viper -eset -regulon -minsize
#'
#' @return A long format tibble of the enrichment scores for each tf
#' across the samples. Resulting tibble contains the following columns:
#' 1. `statistic`: Indicates which method is associated with which score.
#' 2. `tf`: Source nodes of `network`.
#' 3. `condition`: Condition representing each column of `mat`.
#' 4. `score`: Regulatory activity (enrichment score).
#' @family decoupleR statistics
#' @export
#' @import dplyr
#' @import tibble
#' @import purrr
#' @import tidyr
#' @examples
#' inputs_dir <- system.file("testdata", "inputs", package = "decoupleR")
#'
#' mat <- readRDS(file.path(inputs_dir, "input-expr_matrix.rds"))
#' network <- readRDS(file.path(inputs_dir, "input-dorothea_genesets.rds"))
#'
#' run_viper(mat, network, tf, target, mor, likelihood, verbose = FALSE)
run_viper <- function(mat,
network,
.source = .data$tf,
.target = .data$target,
.mor = .data$mor,
.likelihood = .data$likelihood,
...) {
# Before to start ---------------------------------------------------------
network <- network %>%
convert_to_viper({{ .source }}, {{ .target }}, {{ .mor }}, {{ .likelihood }})
# Analysis ----------------------------------------------------------------
exec(
.fn = viper::viper,
eset = mat,
regulon = network,
minsize = 0,
!!!list(...)
) %>%
as.data.frame() %>%
rownames_to_column("tf") %>%
pivot_longer(-.data$tf, names_to = "condition", values_to = "score") %>%
add_column(statistic = "viper", .before = 1)
}
|
676f28a312982a8707f270f8b9521a083d171115
|
14c304c74e251ea09cb40abfb106623e45cb336b
|
/R/ddo_ddf_spark.R
|
db36a4cf52ce964090b6fef3dd738c561c2da01e
|
[
"BSD-3-Clause"
] |
permissive
|
krseibel/datadr
|
d4ac9ecd42234729cef7d68f603497f7d089727a
|
b8ad0bdfd207a2f311d56209f4ff254f55c7fdec
|
refs/heads/master
| 2020-12-28T02:04:32.139688
| 2014-11-05T17:37:42
| 2014-11-05T17:37:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,275
|
r
|
ddo_ddf_spark.R
|
## Methods for object of class "kvSparkData" - key-value pairs as Spark RDDs
#' @export
ddoInit.sparkDataConn <- function(obj, ...) {
structure(list(), class="kvSparkData")
}
#' @export
ddoInitConn.sparkDataConn <- function(obj, ...) {
obj
}
#' @export
requiredObjAttrs.kvSparkData <- function(obj) {
list(
ddo = getDr("requiredDdoAttrs"),
ddf = getDr("requiredDdfAttrs")
)
}
#' @export
getBasicDdoAttrs.kvSparkData <- function(obj, conn) {
list(
conn = conn,
extractableKV = FALSE,
totStorageSize = NA, # TODO...
nDiv = NA, # TODO,
example = conn$data[[1]]
)
}
#' @export
getBasicDdfAttrs.kvSparkData <- function(obj) {
list(vars = lapply(kvExample(obj)[[2]], class))
}
# kvSparkData is never extractable (yet...)
#' @export
hasExtractableKV.kvSparkData <- function(x) {
FALSE
}
######################################################################
### extract methods
######################################################################
#' @export
extract.kvSparkData <- function(x, i, ...) {
stop("can't extract spark data by key yet...")
}
######################################################################
### convert methods
######################################################################
#' @export
convertImplemented.kvSparkData <- function(obj) {
c("sparkDataConn", "NULL")
}
#' @export
convert.kvSparkData <- function(from, to=NULL) {
convertkvSparkData(to, from)
}
convertkvSparkData <- function(obj, ...)
UseMethod("convertkvSparkData", obj)
# from sparkData to sparkData
#' @export
convertkvSparkData.sparkDataConn <- function(to, from, verbose=FALSE) {
from
}
# from sparkData to memory
#' @export
convertkvSparkData.NULL <- function(to, from, verbose=FALSE) {
res <- getAttribute(from, "conn")$data
if(inherits(from, "ddf")) {
res <- ddf(res, update=FALSE, verbose=verbose)
} else {
res <- ddo(res, update=FALSE, verbose=verbose)
}
addNeededAttrs(res, from)
}
# # from sparkData to local disk
# #' @export
# convertkvSparkData.sparkDataConn <- function(to, from, verbose=FALSE) {
# from
# }
#
# # from sparkData to HDFS
# #' @export
# convertkvSparkData.hdfsConn <- function(to, from, verbose=FALSE) {
# }
#
|
d5b78dda633475adda81f937247203d9eb6d453c
|
fc9b8c83a7ec01667bbfeca205c70d4ca24bcd20
|
/sentiment analysis.R
|
d887099a35aaee73ea53cf01eebae2d98ec1832e
|
[] |
no_license
|
hannahng97/State-of-the-Union-Analysis
|
f91fe3d9b5e8f9518dc262710341a01e6cfb9fc4
|
7875acd54f74ff6c9679534e757a7f4c4bc4f811
|
refs/heads/master
| 2020-03-08T17:57:41.926403
| 2018-04-06T04:06:28
| 2018-04-06T04:06:28
| 128,282,746
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,089
|
r
|
sentiment analysis.R
|
library(readtext)
library(tidytext)
library(dplyr)
library(stringr)
library(tidyr)
library(ggplot2)
library(wordcloud)
library(reshape2)
# read in text
jfk <- readtext("JFK first state of the union.txt") %>%
unnest_tokens(word, text)
obama <- readtext("obama first state of the union.txt") %>%
unnest_tokens(word, text)
# joy, anger, anticipation
nrc_joy <- get_sentiments("nrc") %>%
filter(sentiment == "joy")
nrc_anger <- get_sentiments("nrc") %>%
filter(sentiment == "anger")
nrc_anticipation <- get_sentiments("nrc") %>%
filter(sentiment == "anticipation")
### OBAMA
# obama joy: 52 words
obama_joy <- obama %>%
inner_join(nrc_joy) %>%
count(word, sort = TRUE)
obama_joy_wordcloud <- obama_joy %>%
with(wordcloud(word, n))
# obama anger: 51 words
obama_anger <- obama %>%
inner_join(nrc_anger) %>%
count(word, sort = TRUE)
obama_anger_wordcloud <- obama_anger %>%
with(wordcloud(word, n))
# obama anticipation: 77 words
obama_anticipation <- obama %>%
inner_join(nrc_anticipation) %>%
count(word, sort = TRUE)
obama_anticipation_wordcloud <- obama_anticipation %>%
with(wordcloud(word, n))
# positive vs. negative overall
bing_word_counts <- obama %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
ungroup()
bing_word_counts %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n, fill = sentiment)) +
geom_col(show.legend = FALSE) +
facet_wrap(~sentiment, scales = "free_y") +
labs(y = "Contribution to sentiment",
x = NULL) +
coord_flip()
obama %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("gray20", "gray80"),
max.words = 100)
### JFK
# jfk joy: 62 words
jfk_joy <- jfk %>%
inner_join(nrc_joy) %>%
count(word, sort = TRUE)
jfk_joy_wordcloud <- jfk_joy %>%
with(wordcloud(word, n))
# jfk anger: 51 words
jfk_anger <- jfk %>%
inner_join(nrc_anger) %>%
count(word, sort = TRUE)
jfk_anger_wordcloud <- jfk_anger %>%
with(wordcloud(word, n))
# jfk anticipation: 89 words
jfk_anticipation <- jfk %>%
inner_join(nrc_anticipation) %>%
count(word, sort = TRUE)
jfk_anticipation_wordcloud <- jfk_anticipation %>%
with(wordcloud(word, n))
# positive vs. negative overall
bing_word_counts <- jfk %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
ungroup()
bing_word_counts %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word = reorder(word, n)) %>%
ggplot(aes(word, n, fill = sentiment)) +
geom_col(show.legend = FALSE) +
facet_wrap(~sentiment, scales = "free_y") +
labs(y = "Contribution to sentiment",
x = NULL) +
coord_flip()
jfk %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("gray20", "gray80"),
max.words = 100)
|
fa822ae00c6f41d15cb33c8c793756bdefaf6722
|
80e2a96a6a3e47dabc5ef98139c9526adb9cad4c
|
/models/annotation_stratification.R
|
0c25e4cc17843c7ff8d028f079f4e9af7d36c3c1
|
[] |
no_license
|
jonvw28/arabidopsis_cytosine_methylation
|
eb6c78fa0f669c7ec83009da1907b898d162886e
|
c615245da6e4bbf09f061b37dd6d30bcfa2b83b1
|
refs/heads/master
| 2020-12-24T07:31:01.364139
| 2016-08-17T09:24:16
| 2016-08-17T09:24:16
| 57,891,780
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,325
|
r
|
annotation_stratification.R
|
At_tiles.data <- readRDS("data_complete.RData")
#
#LIBRARIES
#
source('functions_read_in_first.R')
#
if(!require("dplyr")){
install.packages("dplyr")
}
library(dplyr)
#
# Select Relevant Data
#
tags <- which.index(At_tiles.data,c('relative_meth','cytosinesCountCG',
'cytosinesCountCHG', 'cytosinesCountCHH',
'mappab_20mer_1msh','Annotation'))
#
# measure CpG proportion
#
data <- dplyr::select(At_tiles.data,tags) %>%
dplyr::mutate(prop_CG = cytosinesCountCG/(cytosinesCountCG +
cytosinesCountCHG +
cytosinesCountCHH))
rm(tags,At_tiles.data,breakpoints)
#
# Remove missing data points
#
data <- dplyr::select(data,c(1,2,7,5,6))
clear.data <- select.narm(data,index = 1:5,select = FALSE)
rm(data)
annots <- c('exon','intergenic','intron','non-unique','te')
results <- NULL
#
###############################################################################
#
# For loop across annotations
#
###############################################################################
#
for (j in 1:5){
# set up training and test sets
#
set.seed(1234)
ann.data <- dplyr::filter(clear.data, Annotation == annots[j])
ann.data <- train.partition(ann.data,0.7)
#
# Set out number of classes in system
#
classes <- 5
#
################################################################################
#
# Training Phase
#
################################################################################
#
# Add actual results for rel meth quants, adjust for introns as too big a
# proportion have no relative methylation
#
if (j == 3){
ann.data <- quant.bin(ann.data,index=1,split = ncol(ann.data),
title = "Rel_meth_quant_act",class.number = classes, zero.exclude = T)
} else{
ann.data <- quant.bin(ann.data,index=1,split = ncol(ann.data),
title = "Rel_meth_quant_act",class.number = classes)
}
#
#
temp.data <- dplyr::filter(ann.data, set =="training")
if (j == 3){
train.set <- quant.bin(temp.data, index = 1, split = ncol(temp.data),
title = "class_train",class.number = classes, zero.exclude = T)
}else{
train.set <- quant.bin(temp.data, index = 1, split = ncol(temp.data),
title = "class_train",class.number = classes)
}
rm(temp.data)
#
# Train CpG COunt Model with negative Binomial
#
param_CpG <- NULL
for (i in 1:classes){
param_CpG[[i]] <- dplyr::filter(train.set, class_train == i)[,2] %>%
fitdistrplus::fitdist("nbinom",method = "mle")
}
#
# Prediction
###############################################################################
#
# Complete table of probabilites
#
probs <- matrix(nrow = nrow(ann.data),ncol = classes*3+3)
for(i in 1:classes){
probs[,i] <- dnbinom(ann.data[,2], size = param_CpG[[i]]$estimate[1],
mu = param_CpG[[i]]$estimate[2])
}
rm(param_CpG,i)
#
# Train Mappab Score with gaussian kernal
#
for(i in 1:classes){
probs[,i+classes] <- kern.density(ann.data[,4],
dplyr::filter(train.set,class_train == i)[,4])
}
rm(train.set,i)
#
# Calculate scores for the combined distributions
#
for(i in 1:classes){
probs[,i+2*classes] <- probs[,i]*probs[,i+classes]
}
rm(i)
#
# Normalise Probabilities
#
probs[,3*classes + 1] <- apply(probs[,(2*classes +1):(3*classes)],1,sum)
probs[,(2*classes +1):(3*classes)] <- probs[,(2*classes +1):(3*classes)]/probs[,3*classes+1]
#
# Pick most likely Class and what the socre for this is
#
predictions <- as.numeric(apply(probs[,(2*classes +1):(3*classes)],1,which.max))
#
# Append raw data with the predicitons
#
ann.data <- cbind(ann.data,predictions)
rm(probs)
tmp <- ncol(ann.data)
names(ann.data)[tmp] <- c("Predicted_meth_quant")
rm(tmp)
#
################################################################################
#
# Score on test set
#
################################################################################
#
test.data <- dplyr::filter(ann.data, set =="test")
results[[j]] <- classifier.test(test.data, index = c(8,9))
rm(ann.data,test.data,predictions)
}
results
|
28a39e9a7abbefa2516fff8afb45080db7ce1de9
|
6ba8e14f902e2d7d4d4189d58f32d2df6d3a4672
|
/man/greycol.Rd
|
90d334bc30b7c199093179f6ae3b24e816fe4a25
|
[] |
no_license
|
cran/shape
|
7bf191a0810cbea1a1683741ced89c40358b9a51
|
3641dca21ed3ee20673ba28a8ea8b8281b087fbd
|
refs/heads/master
| 2021-08-07T05:07:49.981244
| 2021-05-19T06:20:03
| 2021-05-19T06:20:03
| 17,699,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 856
|
rd
|
greycol.Rd
|
\name{greycol}
\alias{greycol}
\alias{graycol}
\title{
white-black color palette
}
\description{
Creates a vector of (n) contiguous colors from white/grey to black
}
\usage{
greycol(n = 100, interval = c(0.0, 0.7))
}
\arguments{
\item{n }{number of colors.
}
\item{interval }{interval *to* where to interpolate.
}
}
\value{
a vector of character strings giving the colors in hexadecimal format.
}
\author{
Karline Soetaert <karline.soetaert@nioz.nl>
}
\examples{
filled.contour(volcano, color = graycol, asp = 1, main = "greycol,graycol")
graycol(10)
image(matrix(nrow = 1, ncol = 100, data = 1:100),
col = graycol(100), main = "greycol,graycol")
}
\details{
greycol is an alias of graycol
}
\seealso{
\code{\link{rainbow}}, \code{\link{heat.colors}},
\code{\link{topo.colors}}, \code{\link{femmecol}}
}
\keyword{color}
|
999acc36dac8774fc24b45bb54257b09ffa16b5a
|
5677446a232a94486df697870454ba6d97ef223d
|
/pca.R
|
e0b09676548566b40f2b0eb2767094dceae9b66d
|
[] |
no_license
|
Mikemeat/north-american-octo-spice
|
20e8b62aef35226f9f9b46bdb6db058b278e710e
|
13546464cf5b9f809de7163180a8e05b926b19fa
|
refs/heads/master
| 2021-01-01T05:39:44.037264
| 2014-06-13T09:48:20
| 2014-06-13T09:48:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,089
|
r
|
pca.R
|
#pca example
#replicate this in your own adventure traveller
Price <- c(6,7,6,5,7,6,5,6,3,1,2,5,2,3,1,2)
Software <- c(5,3,4,7,7,4,7,5,5,3,6,7,4,5,6,3)
Aesthetics <- c(3,2,4,1,5,2,2,4,6,7,6,7,5,6,5,7)
Brand <- c(4,2,5,3,5,3,1,4,7,5,7,6,6,5,5,7)
data <- data.frame(Price, Software, Aesthetics, Brand)
#two types of pca code
#princomp() - uses variance covariance matrix - more featured
#prcomp() - uses correlation coefficient matrix
pca <- princomp(data, cor=T)
summary(pca, loadings = T)
#loadings - the coefficent of each variable
#Comp.1 = -0.523 * Price - 0.177 * Software + 0.597 * Aesthetics + 0.583 * Brand
#looking for a high standard divation - at or around 1, so take comp 1 & 2
# looking at the cumulative proportion of variance around 80% is good enough
head(pca)
plot(pca$scores[,1])
barplot(pca$scores[,1])
#do the new variables help predict which type of software each person uses?
OS <- c(0,0,0,0,1,0,0,0,1,1,0,1,1,1,1,1)
model <- glm(OS ~ pca$scores[,1], family=binomial)
summary(model)
coefplot(model)
fitted(model)
|
845be7367ef5af8f4f76bab0396a052860bdd1d4
|
afe9b94df6f6a3211ace68b127f57ca38a1965af
|
/tests/testthat/test-updateGeneralSettings.R
|
d680f3a3e2cc08990453ca0d86b4379d60accdfd
|
[] |
no_license
|
datastorm-open/antaresEditObject
|
d10e1f80cdcb4749a82b575ba037ddb642c183fb
|
49739939a8a4e4857db94031b5e76a81ddb03f7c
|
refs/heads/master
| 2021-07-21T14:38:29.878961
| 2017-10-31T08:41:54
| 2017-10-31T08:41:54
| 106,667,353
| 1
| 0
| null | 2017-10-12T08:42:54
| 2017-10-12T08:42:54
| null |
UTF-8
|
R
| false
| false
| 599
|
r
|
test-updateGeneralSettings.R
|
#Copyright © 2016 RTE Réseau de transport d’électricité
context("Function updateGeneralSettings")
# Setup study -------------------------------------------------------------
path <- tempdir()
# Unzip the study
setup_study(path, sourcedir)
# set simulation path in mode input
opts <- antaresRead::setSimulationPath(studyPath, 'input')
# Tests -------------------------------------------------------------------
test_that("Update a general parameter", {
updateGeneralSettings(year.by.year = FALSE)
expect_false(getOption("antares")$parameters$general$`year-by-year`)
})
|
3ce64873902f7cf92a81c3c48e984786f7fe9f90
|
f7b4ea0419535fde79825ec8790bc535405954a0
|
/man/loopit_2D3D.Rd
|
3813c7afb9a7f0fc8f25766aa9db0417e8020c7d
|
[] |
no_license
|
janjansen86/ptrackr
|
9d4f844a9b1986c6e23cb1a85b6f1e06d762cb88
|
72d4f48284251b90c5ea9b072e3bb215e10d8358
|
refs/heads/master
| 2020-04-15T15:54:58.397067
| 2017-11-17T06:01:49
| 2017-11-17T06:01:49
| 51,494,252
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,455
|
rd
|
loopit_2D3D.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loopit_2D3D.R
\name{loopit_2D3D}
\alias{loopit_2D3D}
\title{Loopit 2D/3D}
\usage{
loopit_2D3D(pts_seeded, romsobject, roms_slices = 1, start_slice = 1,
domain = "2D", trajectories = FALSE, speed, runtime = 10,
looping_time = 0.25, sedimentation = FALSE, particle_radius = 0.00016,
time_steps_in_s = 1800, uphill_restricted = NULL,
sed_at_max_speed = FALSE, mean_move = FALSE)
}
\arguments{
\item{pts_seeded}{matrix of particles with 3 colums (lon, lat, depth)}
\item{romsobject}{list of matrices containing ROMS-model cell-values (lon_u, lat_u, h, i_u, i_v, i_w)}
\item{roms_slices}{number of time-frames to use in the particle-tracking}
\item{start_slice}{determines which roms_slice the particle-tracking starts with}
\item{domain}{either "2D" or "3D"}
\item{trajectories}{TRUE/FALSE statement to define whether to store particle trajectories (model runs much faster without storing trajectories). Default is FALSE.}
\item{speed}{(w_sink) sinking rate m/days}
\item{runtime}{(time) total number fo days to run the model}
\item{looping_time}{default at 0.25 which is equal to the 6h intervall of the ROMS-model}
\item{sedimentation}{TRUE/FALSE statement whether particles should settle on the seafloor depending on current speed and particle density (McCave & Swift 1976). Default is FALSE,}
\item{particle_radius}{radius of the particles; this influences the sedimentation rate with smaller values meaning less sedimentation}
\item{uphill_restricted}{define whether particles are restricted from moving uphill, defined as from how many meters difference particles cannot cross between cells}
\item{sed_at_max_speed}{particles will settle at all times only depending on the highest current speed given in any of the ROMS slices. Currently this only work when 4 sclices are available. Default is FALSE}
}
\value{
list(pts=pts, pend=pend, stopindex=obj$stopindex, ptrack=obj$ptrack, lon_list=lon_list, idx_list=idx_list, idx_list_2D=idx_list_2D, id_list=id_list)
}
\description{
Wrapper function to increase performance by looping the trackit-functions in small time intervalls.
}
\details{
Function to run the functions loopit_trackit_2D/loopit_trackit_3D to follow particles through different consecutive ROMS-sclices. Looping can also increase performance when using very large number of particles by looping through shorter time steps.
Loops are set to run in half day intervals. If no runtime is defined, the function will loop depending on the depth of the deepest cell and the sinking speed to allow each particle to possibly sink to the seafloor (2*max(h)/speed)
}
\examples{
data(surface_chl)
data(toyROMS)
########## 3D-tracking:
pts_seeded <- create_points_pattern(surface_chl, multi=100)
run <- loopit_2D3D(pts_seeded = pts_seeded, romsobject = toyROMS, roms_slices = 4, speed = 100, runtime = 50, domain = "3D", trajectories = TRUE)
## testing the output
library(rasterVis)
library(rgdal)
library(rgl)
ra <- raster(nrow=50,ncol=50,ext=extent(surface_chl))
r_roms <- rasterize(x = cbind(as.vector(toyROMS$lon_u), as.vector(toyROMS$lat_u)), y= ra, field = as.vector(-toyROMS$h))
pr <- projectRaster(r_roms, crs = "+proj=laea +lon_0=137 +lat_0=-66") #get the right projection (through the centre)
plot3D(pr, adjust = FALSE, zfac = 50) # plot bathymetry with 50x exaggerated depth
pointsxy <- project(as.matrix(run$pend[,1:2]), projection(pr)) #projection on Tracking-points
points3d(pointsxy[,1],pointsxy[,2],run$pend[,3]*50)#,xlim=xlim,ylim=ylim)
########## 2D-tracking:
pts_seeded <- create_points_pattern(surface_chl, multi=100)
run <- loopit_2D3D(pts_seeded = pts_seeded, roms_slices = 4, romsobject = toyROMS, speed = 100, runtime = 50, sedimentation = TRUE)
plot(pts_seeded)
points(run$pend, col="red", cex=0.6)
points(run$pts , col="blue", cex=0.6)
########## 2D-tracking with storing trajectories:
pts_seeded <- create_points_pattern(surface_chl, multi=100)
run <- loopit_2D3D(pts_seeded = pts_seeded, roms_slices = 4, particle_radius = 0.00001, romsobject = toyROMS, speed = 100, runtime = 50, sedimentation = TRUE, trajectories = TRUE)
plot(pts_seeded)
points(run$pend, col="red", cex=0.6)
points(run$pts , col="blue", cex=0.6)
## looking at the horizontal flux: this should be abother function to handle the output
ra <- raster(nrow=50,ncol=50,ext=extent(surface_chl))
mat_list <- list()
for(islices in 1:length(run$idx_list_2D)){
mat_list[[islices]] <- matrix(unlist(run$idx_list_2D[[islices]]),ncol=12)
}
testmatrix <- do.call(rbind, mat_list)
testid <- unlist(run$id_list)
flux_list <- split(testmatrix,testid)
for(k in 1:length(flux_list)){
## cells visited by a particle ("presence-only")
flux_list[[k]] <- unique(flux_list[[k]])
## drop first and last value (input and setting cell)
flux_list[[k]] <- flux_list[[k]][-c(1,length(flux_list[[k]]))]
}
flux <- as.vector(unlist(flux_list))
xlim <- c(xmin(ra),xmax(ra))
ylim <- c(ymin(ra),ymax(ra))
df <- data.frame(cbind(toyROMS$lon_u[flux],toyROMS$lat_u[flux]))
df$cell <- cellFromXY(ra, df)
ra[] <- tabulate(df$cell, ncell(ra))
plot(ra)
## looking at the current-slices
roms_list <- list()
par(mfrow=c(2,2))
for(i in 1:4){
roms_list[[i]] <- rasterize(x = cbind(as.vector(toyROMS$lon_u), as.vector(toyROMS$lat_u)), y= ra, field = as.vector(sqrt((toyROMS$i_u[,,,i]^2)+(toyROMS$i_v[,,,i])^2)))
plot(roms_list[[i]])
}
par(mfrow=c(1,1))
}
|
b5251f749495b8efdce7c7d6f5d3443cbb2b1ab8
|
7346258bfcf9d1602c45e8452e224eb64aa2cc19
|
/a_team/cmt_semantic_analysis.r
|
4f5bf97de69c85005f1321351b785bca120531f6
|
[] |
no_license
|
kmangyo/Kleague_Data
|
f84f6ff88e0d9e1bd6913f1d251cdc3c3937bafc
|
374168f2ebe3b185f25737314c749e5847c8f449
|
refs/heads/master
| 2021-01-21T06:14:43.033660
| 2017-11-13T12:05:45
| 2017-11-13T12:05:45
| 47,014,809
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,439
|
r
|
cmt_semantic_analysis.r
|
library(RSelenium)
library(rvest)
library(plyr)
library(dplyr)
library(reshape2)
library(stringi)
library(stringr)
library(ggplot2)
# comments data is from http://sports.news.naver.com/gameCenter/textRelayFootball.nhn?category=amatch&tab=player_stats&gameId=20171010A01A001618
# in mac terminal: docker run -d -p 4445:4444 selenium/standalone-firefox:2.53.0
rD <- rsDriver(port=4444L,browser="chrome")
remDr <- rD$client
remDr$open()
remDr$navigate("http://comments.sports.naver.com/template/vs.nhn?category=amatch&gameId=20171010A01A001618")
# over 50k comments
seq_num<-seq(2,5827,by=10)
seq_nums<-list()
for(i in 1:length(seq_num)){
seq_nums[[i]]<-seq(seq_num[i],seq_num[i]+8)
}
for(i in 1:length(seq_nums)){
seq_nums[[i]][10]<-'cbox_next'
}
seq_name<-rep(c(rep("link text",9),'class name'),length(seq_nums))
seq_nums<-unlist(seq_nums)
seq_nums_df<-data.frame(seq_name,seq_nums)
comment_list<-list()
date_list<-list()
id_list<-list()
for(i in 1:nrow(seq_nums_df)) {
page_click <- remDr$findElement(using=as.character(seq_nums_df[i,1]), value=as.character(seq_nums_df[i,2]))
page_click$clickElement()
getsource <-page_click$getPageSource()
comment_list[[i]]<- read_html(getsource[[1]]) %>% html_nodes(".cbox_desc") %>% html_text()
date_list[[i]]<- read_html(getsource[[1]]) %>% html_nodes(".cbox_date") %>% html_text()
id_list[[i]]<- read_html(getsource[[1]]) %>% html_nodes(".cbox_user_id") %>% html_text()
}
comment_df<-melt(comment_list)
date_df<-melt(date_list)
id_df<-melt(id_list)
cmt_df<-cbind(comment_df, date_df, id_df)
cmt_df<-cmt_df[c(1,3,5)]
names(cmt_df)<-c('cmt','time','id')
cmt_df$time<-as.POSIXlt(as.character(cmt_df$time))
# during game
cmt_df$num.time<-as.numeric(cmt_df$time)
cmt_df<-subset(cmt_df, num.time>=1507642200 & num.time<=1507649400)
ggplot(data=cmt_df, aes(time)) + geom_histogram(bins=120)
# using google semantic analysis API
# install.packages('googleLanguageR')
# devtools::install_github("ropensci/googleLanguageR")
library(googleLanguageR)
# random sampling 1k
cmt_df_sample <- sample_n(cmt_df, 1000)
hist(cmt_df_sample$time, breaks = 120)
cmt_df_sample$cmt<-as.character(cmt_df_sample$cmt)
cmt_df_sample$cmt<- gsub("ㄱ|ㄴ|ㄷ|ㄹ|ㅁ|ㅂ|ㅅ|ㅇ|ㅈ|ㅊ|ㅋ|ㅌ|ㅍ|ㅎ|ㅏ|ㅑ|ㅓ|ㅕ|ㅗ|ㅛ|ㅜ|ㅠ|ㅡ|ㅣ|ㅃ|ㅉ|ㄲ|ㅆ|\n|\t", " ", cmt_df_sample$cmt)
texts <- c(cmt_df_sample$cmt)
nlp_result <- gl_nlp(texts, language = "ko")
for (i in 1:length(nlp_result$sentences)){
nlp_result$sentences[[i]]$seq<-i
}
nlp_result_sent<-ldply(nlp_result$sentences, data.frame)
cmt_df_sample$seq<-1
cmt_df_sample$seq<-cumsum(cmt_df_sample$seq)
cmt_df_sample<- cmt_df_sample %>% arrange(num.time)
cmt_df_sample$time<-as.POSIXct(cmt_df_sample$time)
cmt_df_sample<-left_join(cmt_df_sample, nlp_result_sent, c('seq'))
cmt_df_sample$moroco<-str_count(cmt_df_sample$content, "모로코")
cmt_df_sample<-subset(cmt_df_sample, moroco==0)
hist(cmt_df_sample$score, breaks = 20)
mean(cmt_df_sample$score)
ggplot(data=cmt_df_sample, aes(score)) + geom_histogram(bins=20)
# by time
ggplot(cmt_df_sample, aes(time, score)) + geom_point() + geom_smooth()
mav <- function(x,n){stats::filter(x,rep(1/n,n), sides=2)}
# ref) https://druedin.com/2012/08/11/moving-averages-in-r/
mav_df<-mav(cmt_df_sample$score,100)
mav_df<-data.frame(mav_df)
mav_df$seq <-c(1:nrow(mav_df))
names(mav_df)[1]<-'score'
ggplot(data=mav_df, aes(x=seq, y=score, group=1)) + geom_line() + geom_path()
|
07c2247aee3f58850934ee42801bca2044eaf2c2
|
3afd8f34493e45d70b28f46b4be8686214c996d3
|
/R/generate_lag_props.R
|
10d6cdf0ef7bf56ba8d6ab8759e76109bd39ef3a
|
[] |
no_license
|
gcgibson/2018-2019-cdc-flu-contest
|
8feb2fabe2bbb564777e19a3496cffa35bc63708
|
fadf216cd05769dc339e0e04dd1b2c9543ccc114
|
refs/heads/master
| 2021-07-12T11:10:29.639525
| 2019-03-26T17:26:17
| 2019-03-26T17:26:17
| 152,277,720
| 0
| 0
| null | 2018-11-30T18:55:51
| 2018-10-09T15:41:53
|
R
|
UTF-8
|
R
| false
| false
| 5,651
|
r
|
generate_lag_props.R
|
download_backfill_data <- function(){
library(plyr) # for rbind.fill
library(dplyr)
source("https://raw.githubusercontent.com/cmu-delphi/delphi-epidata/master/src/client/delphi_epidata.R")
# Fetch data
all_obs <- lapply(c("nat", paste0("hhs", 1:10)),
function(region_val) {
lapply(0:51,
function(lag_val) {
obs_one_lag <- Epidata$fluview(
regions = list(region_val),
epiweeks = list(Epidata$range(199740, 201815)),
lag = list(lag_val))
lapply(obs_one_lag$epidata,
function(x) {
x[sapply(x, function(comp) is.null(comp))] <- NA
return(as.data.frame(x))
}) %>%
rbind.fill()
}) %>%
rbind.fill()
}) %>%
rbind.fill()
saveRDS(all_obs,
file = "flu_data_with_backfill.rds")
}
create_lag_df <- function(){
data <- readRDS("./data/flu_data_with_backfill_edit.rds")
lag_df <- matrix(NA,ncol=55)
for (region in unique(data$region)){
for (week in unique(data[data$region == region,]$epiweek)){
tmp_data <- data[data$region == region & data$epiweek == week,]
tmp_row <- c()
for (lag in seq(0,51)){
current_observed_data <- tmp_data[tmp_data$lag == lag,]$wili
finally_observed_data <- tmp_data[tmp_data$lag == max(tmp_data$lag),]$wili
prop <- current_observed_data-finally_observed_data
tmp_row <- c(tmp_row,prop)
}
while (length(tmp_row) < 52){
tmp_row <- c(tmp_row, NA)
}
if (length(prop) ){
lag_df <- rbind(lag_df,c(region,week,tmp_row,current_observed_data))
}
}
}
lag_df <- as.data.frame(lag_df)
lag_df <- lag_df[2:nrow(lag_df),]
colnames(lag_df) <- c("Region","week",0,"Incidence")
lag_df$season_week <- unlist(lapply(lag_df$week,function(x) {return (substr(x,5,7))}))
write.csv(lag_df,"./data/lag_df_difference")
}
lag_df <- as.data.frame(read.csv("./data/lag_df0"))
data <- as.data.frame(readRDS("./data/flu_data_with_backfill_edit.rds"))
subset_lag_df <- lag_df[lag_df$week < 201540,]
validation_lag_df <- lag_df[lag_df$week >= 201540,]
library(nlme)
library(MASS)
library(nnet)
lme1 <- lme(fixed=X0 ~ Region +season_week, random = ~ +1|Region,data=subset_lag_df,na.action=na.exclude)
lmfit <- lme(X0 ~ Incidence +season_week,random = ~ +1|Region,data=subset_lag_df,na.action=na.exclude)
lmfit2 <- lm(X0 ~ 1,data=subset_lag_df)
lmfit3 <- lm(X0 ~ Region + season_week+ Incidence,data=subset_lag_df)
#loess_fit <- loess(X0~ Incidence +season_week , data=subset_lag_df)
loess_fit <- nnet(X0~Incidence +season_week + Region, subset_lag_df, size=12, maxit=500, linout=T, decay=0.01)
subset_lag_df$binary <- ifelse(subset_lag_df$X0 < 1,0,1)
glm_fit <- glm(binary ~ Incidence +season_week + Region,data=subset_lag_df)
mse_m1 <- c()
mse_m2 <- c()
mse_m3 <- c()
mse_m4 <- c()
mse_m5 <- c()
mse_m6 <- c()
mse_m7 <- c()
for (region in c("National",paste0("Region ",1:10))){
for (week in c(seq(41,52),seq(20))){
if (week <=9){
data_week <- paste0("0",week)
}else{
data_week <- week
}
if (week < 40){
test_season <- 2016
}else{
test_season <- 2015
}
test_dat <- data.frame(Region=region,season_week = week,Incidence=data[data$region==region & data$epiweek==paste0(test_season,data_week) & data$lag==0,]$weighted_ili)
if (data_week == 1){
prop1 <- data[data$region==region & data$epiweek==paste0(test_season-1,52) & data$lag==0,]$weighted_ili
prop2 <- data[data$region==region & data$epiweek==paste0(test_season-1,52) & data$lag==1,]$weighted_ili
} else if(data_week <=10){
prop1 <- data[data$region==region & data$epiweek==paste0(test_season,paste0("0",as.numeric(data_week)-1)) & data$lag==0,]$weighted_ili
prop2 <- data[data$region==region & data$epiweek==paste0(test_season,paste0("0",as.numeric(data_week)-1)) & data$lag==1,]$weighted_ili
}else{
prop2 <-data[data$region==region & data$epiweek==paste0(test_season,data_week-1) & data$lag==1,]$weighted_ili
prop1 <-data[data$region==region & data$epiweek==paste0(test_season,data_week-1) & data$lag==0,]$weighted_ili
}
m1_pred <- predict(lmfit,newdata = test_dat)
m2_pred <- predict(lme1,newdata = test_dat)
m3_pred <- predict(lmfit2,newdata=test_dat)
m4_pred <- predict(lmfit3,newdata=test_dat)
m5_pred <-predict(loess_fit,newdata=test_dat)
m6_pred <- ifelse(predict(glm_fit,newdata=test_dat)<.5,.99,1.01)
truth <- lag_df[lag_df$Region==region & lag_df$week==paste0(test_season,week),"X0" ]
mse_m1 <- c(mse_m1,(m1_pred-truth)^2)
mse_m2 <- c(mse_m2,(m2_pred-truth)^2)
mse_m3 <- c(mse_m3,(m3_pred-truth)^2)
mse_m4 <- c(mse_m4,(m4_pred-truth)^2)
mse_m5 <- c(mse_m5,(m5_pred-truth)^2)
mse_m6 <- c(mse_m6,(m6_pred-truth)^2)
mse_m7 <- c(mse_m7,((prop1/prop2+m5_pred)/2-truth)^2)
}
}
print (mean(mse_m1))
print (mean(mse_m3))
print (mean(mse_m2))
print (mean(mse_m4))
print (mean(mse_m5))
print (mean(mse_m6))
print (mean(mse_m7))
library(ggplot2)
ggplot(subset_lag_df[subset_lag_df$Region=="Region 1",],aes(x=season_week,y=X0)) +geom_point()
plot(subset_lag_df[subset_lag_df$Region=="Region 1",]$X0)
|
ab4b76740aec7c88530e92c9b59960b333cfd61e
|
d6f4c96bf2e19e52181f1bcdc260cb0c3cf77181
|
/man/fars_summarize_years.Rd
|
84e6d6636007bad8db7d463e50a0bd32ce8822ea
|
[] |
no_license
|
drsmd23/Fars
|
bd5f0fac26f93ec1fdff8164412b60a3cdf924eb
|
30a0228c80fe491139e18661952dab8a5b427c26
|
refs/heads/master
| 2021-01-19T17:10:56.023992
| 2017-04-15T02:39:24
| 2017-04-15T02:39:24
| 88,312,817
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,708
|
rd
|
fars_summarize_years.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_summarize_years}
\alias{fars_summarize_years}
\title{Summarize multiple years of motor vehicle crash data}
\usage{
fars_summarize_years(years)
}
\arguments{
\item{years}{A string or integer vector giving the years of motor vehicle crash files to read in.}
}
\value{
This function returns a data frame containing the number of motor vehicle crashes for each year and month.
An error message occurs if the there is no data file for a corresponding input year.
}
\description{
This function (fars_summarize_years) accepts a vector of years or a single year as input.
Motor vehicle crash data for each year in the input vector is read using the fars_read_years function from this package (\code{\link{fars_read_years}}).
The list of dataframes are then merged using the bind_rows function from the dplyr package (\code{\link[dplyr]{bind_rows}}).
The resulting dataframe is grouped by the year and MONTH columns using the group_by function from the dplyr package (\code{\link[dplyr]{group_by}}).
The total number of records relating to each group is then calculated using the summarize function from the dplyr package (\code{\link[dplyr]{summarize}}).
The results are represented with months displayed along the rows and years along the columns by using the spread function from the tidyr package (\code{\link[tidyr]{spread}}).
An error message occurs if the there is no data file for a corresponding input year.
}
\examples{
\dontrun{
fars_summarize_years(2013)
fars_summarize_years(c(2013, 2014))
fars_summarize_years("2013")
fars_summarize_years(c("2013", "2014"))
fars_summarize_years(years=2013)
}
}
|
44c1c91171d6fdcaef1197b2e165b59558e49708
|
d0da3117f0fda250bd1299b505013546f3e208d9
|
/script/chart_year_sexualMinority.R
|
ebbe318c3b182adb4bd03793cbc68ef37ff5a0dc
|
[] |
no_license
|
MaggieTsang/INFO201_Final_Project
|
bd02ba3bc986176b7a3029b57349bc219803ec7a
|
871c60efe338f97cb9735325b9efd3c3abea3f24
|
refs/heads/master
| 2021-01-20T18:08:10.904964
| 2017-06-02T05:59:25
| 2017-06-02T05:59:25
| 90,907,920
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,014
|
r
|
chart_year_sexualMinority.R
|
# Views trends between year and the appearance frequency of sexual minorities
library(dplyr)
library(plotly)
# Uses a line graph to view trendlines
YearSexuality <- function(dataset) {
# Get graphing summary
plot.summary <- GSMtable(dataset)
colnames(plot.summary) <- c("YEAR", "str_freq", "gsm_freq")
# Create and return graph with two trendlines
line.graph <-
plot_ly(
plot.summary,
x = ~ YEAR,
y = ~ str_freq,
name = "Straight Characters",
type = "scatter",
mode = "lines"
) %>%
add_trace(
y = ~ gsm_freq,
name = "GSM Characters",
type = "scatter",
mode = "lines"
) %>%
layout(
xaxis = list(title = "Year of First Appearance"),
yaxis = list(title = "Character Frequency"),
title = "Comparing Trends of Comic Character Sexual Minorities"
) %>%
return()
}
GSMtable <- function(dataset) {
# Straight character information
straight.characters <-
dataset %>% filter(GSM == "", is.na(YEAR) == FALSE) %>% group_by(YEAR) %>% summarise(freq = n())
colnames(straight.characters) <- c("YEAR", "str_freq")
# Non-straight character information
gsm.characters <-
dataset %>% filter(GSM != "", is.na(YEAR) == FALSE) %>% group_by(YEAR) %>% summarise(freq = n())
colnames(gsm.characters) <- c("YEAR", "gsm_freq")
# Summarizes sexuality data, change N/A frequencies to zero
plot.summary <-
left_join(straight.characters, gsm.characters, by = "YEAR")
plot.summary[is.na(plot.summary)] <- 0
colnames(plot.summary) <-
c("Year", "Total Straight Characters", "Total GSM Characters")
return(plot.summary)
}
GSMintro <- function() {
return(
"As the LGBTQ+ community faces discrimination and continues to fight for their equal rights,
this graph shows if comic book characters represent those of a sexual minority.
The x-axis notes the year of a character's first appearance and the y-axis counts the
total number of new characters that year. The blue line represents straight characters and
the orange line represents those of any gender/sexual minority including
bisexual, homosexual, pansexual, transgender, and gender fluid characters."
)
}
GSMoutro <- function() {
return(
"As a general observation, there are many more straight characters then GSM characters.
Female GSM characters are more common than Male GSM characters, and there are no Agender
GSM characters. Interestingly, the years that have a higher frequency of GSM characters
are aligned with real-life events. For example, GSM characters slightly rose in 1991, a year where
laws were passed that banned discrimination based on sexual orientation. Another rise in 2001 may
be due to the first event of legal same-sex marriage in Ontario, Canada. Later in 2001,
same-sex couples were allowed adoption rights. Although the large gap between Straight and GSM
characters remain, GSM characters are having more appearances as time passes."
)
}
|
031787c31d988d2ae8cfa084e2bbee2a59dd4c1b
|
ee769cd2ee8a012416dd156b3344e39670e55c6c
|
/0507_ShinyApp.R
|
a202f190c75930b98c978e0645b4c0dc541a486f
|
[] |
no_license
|
vank-stats/STS347-Spring2020
|
a09fcf371e55a5d283a46487ec3a4a89d4c89791
|
d542a46be722e3356769eba6abead1eec020dd9a
|
refs/heads/master
| 2020-12-23T06:51:06.117628
| 2020-05-08T01:00:47
| 2020-05-08T01:00:47
| 237,073,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,995
|
r
|
0507_ShinyApp.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# In the preamble, we will load any packages we want to use and possibly also
# any functions or data sets that we will use.
library(shiny)
library(ggplot2)
# Define UI for application that draws a histogram and boxplot
ui <- fluidPage(
# Application title
titlePanel("Our First Shiny App!"),
sidebarLayout(
sidebarPanel(
textInput("name", "Enter your name"),
numericInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30,
step = 5),
radioButtons("fillColor",
"Bar color:",
choices = c("Red" = "indianred1",
"Green" = "green",
"Purple" = "purple",
"Off White" = "seashell1"),
selected = "green"),
selectInput("borderColor",
"Border color:",
choices = c("Black" = "black",
"Gray" = "gray",
"White" = "white",
"Pink" = "pink"),
selected = "white")
),
# Show a plot of the generated distribution
mainPanel(
textOutput("greeting"),
plotOutput("distPlot"),
plotOutput("boxPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = input$fillColor, border = input$borderColor)
})
# output$distPlot <- renderPlot({
# # generate bins based on input$bins from ui.R
# x <- faithful[faithful$waiting >= input$mintime, 2]
#
# # draw the histogram with the specified number of bins
# ggplot(NULL, aes(x = x)) +
# geom_histogram(fill = input$fillColor, bins = input$bins,
# color = input$borderColor)
#
# })
output$boxPlot <- renderPlot({
x <- faithful[, 2]
boxplot(x, col = input$fillColor, border = input$borderColor)
# ggplot(NULL, aes(y = x)) +
# geom_boxplot(fill = input$fillColor, color = input$borderColor)
})
output$greeting <- renderText({
paste0("Hello ", input$name, "!")
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
ec9221f23579ebe292357045e4245bef6da4efcf
|
3fb693303788a999f8e93d6e0323f68ff87a0f85
|
/run_analysis.R
|
7839983505c8728a131bd91505508684192e4683
|
[] |
no_license
|
Rakudajin/Clean_Data_Project
|
5b09890089853fd594cc271e42874932d4c684fa
|
d1f7c4619f8fa07969c97860bc9338ed98531a48
|
refs/heads/master
| 2020-03-29T18:27:49.509928
| 2014-11-23T15:21:54
| 2014-11-23T15:21:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,490
|
r
|
run_analysis.R
|
# Load labels
feat_labs = read.table("UCI HAR Dataset/features.txt")
act_labs = read.table("UCI HAR Dataset/activity_labels.txt")
# Load train
data.sources_train = list.files("UCI HAR Dataset/train/",
pattern="*.txt$", full.names=TRUE, ignore.case=TRUE)
sapply(data.sources_train, read.table) -> train
train = cbind(train[[1]], train[[3]], train[[2]])
# Load tests
data.sources_test = list.files("UCI HAR Dataset/test/",
pattern="*.txt$", full.names=TRUE, ignore.case=TRUE)
sapply(data.sources_test, read.table) -> test
test = cbind(test[[1]], test[[3]], test[[2]])
# Rename
names(test) = c("subject", "activity", c(as.character(feat_labs$V2)))
names(train) = c("subject", "activity", c(as.character(feat_labs$V2)))
# Merge test & train
df = rbind(train, test)
# Remove all features that are not "mean()" or "str()"
takes = c(1, 2, grep("mean\\(\\)", names(df)), grep("std\\(\\)", names(df)))
df = df[, names(df) %in% names(df)[takes]]
# Create dataset of averages
results = c()
for (i in 1:30){
for (j in 1:6){
temp_means = colMeans(subset(df[, 3:68], df$subject == i & df$activity == j))
temp_row = cbind(i, j, t(temp_means))
results = rbind(results, temp_row)
}
}
results = as.data.frame(results)
names(results) = names(df)
# Factor activities
results$activity = factor(results$activity)
levels(results$activity) = act_labs$V2
# Save dataset
write.table(results, row.name=FALSE, "results.txt")
|
2378d03b861a441a5859b0e56cd9d86e750fa178
|
69cc68dd79f328f4f628a06923be8fef20a8c46f
|
/script/rollingonly.R
|
81a6a964f7b913e26fb8c5e2d92fb1cb3aa6d0d7
|
[] |
no_license
|
cw-NaoyaHieda/ToS_Garch
|
365f332e4a9084117348dcc5a8670dd0d43e046a
|
daab49bee282df5aafb84ffc8af18899de497467
|
refs/heads/master
| 2021-09-17T15:43:41.978556
| 2018-07-03T11:21:39
| 2018-07-03T11:21:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,529
|
r
|
rollingonly.R
|
# 重点サンプリングの結果がなんか違うのでやり直してみる
IS.fa_pre <- function(par){
f <- function(x, theta, par){
exp(theta*x)*dfas2(x, mu=par[1], sigma=par[2],
lambda=par[3], delta=par[4])
}
#weightを計算するためにMを計算する.99%,97.5%,95%をまとめて行う
M1 <- integrate(f, -30, 30, theta=theta.val1, par=par)$value
M25 <- integrate(f, -30, 30, theta=theta.val25, par=par)$value
M5 <- integrate(f, -30, 30, theta=theta.val5, par=par)$value
#weightを計算する
w1 <- exp(-theta.val1*rfa1)*M1
w25 <- exp(-theta.val25*rfa25)*M25
w5 <- exp(-theta.val5*rfa5)*M5
#99%点での計算 100~10000までサンプル数を増やして行う
out1<-cbind( rfa1, w1/length(w1))
# サンプルの小さい順にならべかえる
A <- out1[sort.list(out1[,1]),]
# weightの累積和を並べる
A <- cbind(A, cumsum(A[,2]))
# 累積和が0.01に一番近いサンプルが99%VaR
# v1までのサンプルからES0.01の推定値を求める
v1 <- A[which.min(abs(A[,3]-0.01)),1]
es1<- sum(apply(A[1:which.min(abs(A[,3]-0.01)),1:2],1,prod))/0.01
out1 <- c(v1, es1)
out25<-cbind(rfa25, w25/length(w25))
A <- out25[sort.list(out25[,1]),]
A <- cbind(A, cumsum(A[,2]))
v25 <- A[which.min(abs(A[,3]-0.025)),1]
es25<- sum(apply(A[1:which.min(abs(A[,3]-0.025)),1:2],1,prod))/0.025
out25 <- c(v25, es25)
out5<-cbind( rfa5, w5/length(w5))
A <- out5[sort.list(out5[,1]),]
A <- cbind(A, cumsum(A[,2]))
v5 <- A[which.min(abs(A[,3]-0.05)),1]
es5<- sum(apply(A[1:which.min(abs(A[,3]-0.05)),1:2],1,prod))/0.05
out5 <- c(v5, es5)
return(out = cbind(t(out1),t(out25),t(out5)))
}
rIS_SIR <- function(n, par, par2, theta){
## 正規分布を提案分布に
q <- rnorm(n,mean=par2,sd=15)
# 重点分布の密度関数の分子
f <- function(x, theta, par){
exp(theta*x)*dfas2(x, mu=par[1], sigma=par[2],
lambda=par[3], delta=par[4])
}
# 分母
M <- integrate(f, -30, 30, theta=theta, par=par %>% as.numeric())$value
### 指数変換した重点分布の密度関数
d.IS <- function(x, theta,par){
# 重点分布の密度関数の分子
f <- function(x, theta, par){
exp(theta*x)*dfas2(x, mu=par[1], sigma=par[2],
lambda=par[3], delta=par[4])
}
# 分母
return( f(x, theta, par)/M )
}
## 重み
w <- sapply(q,
d.IS, theta =theta, par=par) /dnorm(q, mean=par2, sd=15)
w <- w/sum(w)
## resample
q.resample <- Resample1(q, weight=w, NofSample = n)
list( q=q.resample, w=w)
}
#Fsaからの乱数 SIR並列処理
rfa_SIR<- function(n, mu, sigma, lambda, delta)
{
## 正規分布を提案分布に
q <- rnorm(n,mean=mu %>% as.numeric(),sd=5*sigma %>% as.numeric())
## 重み
w <- sapply(q, dfas2, mu=mu, sigma=sigma, lambda=lambda, delta=delta) %>% as.numeric()/
dnorm(q, mean=mu %>% as.numeric(), sd=5*sigma %>% as.numeric()) %>% as.numeric()
## 合計が1になるように重みを基準化
w <- w/sum(w)
## 重みに従ってresample
q.resample <- Resample1(q, weight=w, NofSample = n)
list(q,q=q.resample, w=w)
}
#局度変換を伴うsinh-arcsinh分布の単純モンテカルロ法によってVaR,ESを求める関数
SMC.fa_pre <-function(theta){
## 正規分布からの重点サンプリングで乱数を取得
rand.fa<-rfa_SIR(n=20000, mu=theta[1],
sigma=theta[2],
lambda=theta[3],
delta=theta[4])
y <- sample(rand.fa$q,10000)
# 単純モンテカルロ法
#VaRの計算
VaR1 <- quantile(y, c(0.01,0.025, 0.05))
# ESの計算
Es1 <- c( mean(y[y < VaR1[1]]),
mean(y[y < VaR1[2]]),
mean(y[y < VaR1[3]]))
# 真値と単純モンテカルロ法の結果をまとめる
out <- cbind(t(VaR1),t(Es1))
return(out)
}
cl <- makeCluster(detectCores()-1) # クラスタの作成
registerDoParallel(cl)
cl_l <- detectCores()-1
result <- pforeach::pforeach(i = 1:dim(result_para)[1], .combine = rbind)({
result_para_now <- result_para[i,]%>% as.numeric()
#真値計算?
#99%,97.5%,95%の各点に対して,先ほどの関数を用いて求める
VaR1.fa <- qfas(0.01, mu=result_para_now[2], sigma=result_para_now[3],
lambda=result_para_now[4], delta = result_para_now[5])
VaR25.fa <- qfas(0.025, mu=result_para_now[2], sigma=result_para_now[3],
lambda=result_para_now[4], delta = result_para_now[5])
VaR5.fa <- qfas(0.05, mu=result_para_now[2], sigma=result_para_now[3],
lambda=result_para_now[4], delta = result_para_now[5])
VaR.true.FA <- c(VaR1.fa ,VaR25.fa ,VaR5.fa )
#単純モンテカルロ
SMC.fa.out <- SMC.fa_pre(result_para_now[-1])
#--------------
# 99%,97.5%,95%それぞれのVaRと平均が一致するthetaを取得
theta.val1<- find.theta(0.01, result_para_now[-1])
theta.val25<- find.theta(0.025, result_para_now[-1])
theta.val5<- find.theta(0.05, result_para_now[-1])
out.fa<-c()
rfa.IS.1<-rIS_SIR(n=20000, par=result_para_now[-1], par2=VaR.true.FA[1], theta=theta.val1)
rfa.IS.25<-rIS_SIR(n=20000, par=result_para_now[-1], par2=VaR.true.FA[2], theta=theta.val25)
rfa.IS.5<-rIS_SIR(n=20000, par=result_para_now[-1], par2=VaR.true.FA[3], theta=theta.val5)
# サンプリングしたものを入力としてFA分布の重点サンプリングを行う
rfa1 <- sample(rfa.IS.1$q, 10000)
rfa25 <- sample(rfa.IS.25$q, 10000)
rfa5 <- sample(rfa.IS.5$q, 10000)
#clusterExport(cl,list("rfa1","rfa25","rfa5"))
#IS.fa.out <- NULL
#重点サンプリング
#while(is.null(IS.fa.out)){
IS.fa.out <- IS.fa_pre(result_para_now[-1])
#}
#-------
rt <- df$log_x[c((i+1):(i+250))]
theta <- c(mean(rt), sd(rt))
SMC.norm.out <- SMC.norm_pre(theta)
IS.norm.out <- IS.norm_pre(theta)
c(IS.fa.out,IS.norm.out,SMC.fa.out,SMC.norm.out)
})
stopCluster(cl)
#result <- cbind(dt=df$dt[251:length(df$dt)],IS.fa.outs,IS.norm.outs,SMC.fa.outs,SMC.norm.outs,parameters)
#save(list=c("result"),file="data/20180629_rolling_result_useoldpara.Rdata")
#save(list=c("result"),file="data/20180701_rolling_result_useoldpara.Rdata")
result_tmp <- c()
for(i in 1:dim(result_para)[1]){
rt <- df$log_x[(i+1):(i+250)]
theta <- c(mean(rt), sd(rt))
SMC.norm.out <- SMC.norm_pre(theta)
IS.norm.out <- IS.norm_pre(theta)
result_tmp <- rbind(result_tmp,data.frame(SMC.norm.out,IS.norm.out))
print(i)
}
load("data/20180629_rolling_result_useoldpara.Rdata")
result <- cbind(result[,c(1:6)],result_tmp[,c(7:12)],result[,c(13:18)],result_tmp[,c(1:6)])
#save(list=c("result"),file="data/20180630_rolling_result_useoldpara.Rdata")
result_only_IS_fa <- pforeach::pforeach(i = 1:dim(result_para)[1], .combine = rbind, .cores = 45)({
#result_only_IS_fa <- c()
#for(i in 1:dim(result_para)[1]){
par <- result_para[i,-1]%>% as.numeric()
result_para_now <- result_para[i,]%>% as.numeric()
#99%,97.5%,95%の各点に対して,先ほどの関数を用いて求める
VaR1.fa <- qfas(0.01, mu=result_para_now[2], sigma=result_para_now[3],
lambda=result_para_now[4], delta = result_para_now[5])
VaR25.fa <- qfas(0.025, mu=result_para_now[2], sigma=result_para_now[3],
lambda=result_para_now[4], delta = result_para_now[5])
VaR5.fa <- qfas(0.05, mu=result_para_now[2], sigma=result_para_now[3],
lambda=result_para_now[4], delta = result_para_now[5])
VaR.true.FA <- c(VaR1.fa ,VaR25.fa ,VaR5.fa )
theta.val1<- find.theta(0.01, par)
theta.val25<- find.theta(0.025, par)
theta.val5<- find.theta(0.05, par)
rfa.IS.1<-rIS_SIR(n=20000, par=result_para_now[-1], par2=VaR.true.FA[1], theta=theta.val1)
rfa.IS.25<-rIS_SIR(n=20000, par=result_para_now[-1], par2=VaR.true.FA[2], theta=theta.val25)
rfa.IS.5<-rIS_SIR(n=20000, par=result_para_now[-1], par2=VaR.true.FA[3], theta=theta.val5)
f <- function(x, theta, par){
exp(theta*x)*dfas2(x, mu=par[1], sigma=par[2],
lambda=par[3], delta=par[4])
}
#weightを計算するためにMを計算する.99%,97.5%,95%をまとめて行う
M1 <- integrate(f, -30, 30, theta=theta.val1, par=par)$value
M25 <- integrate(f, -30, 30, theta=theta.val25, par=par)$value
M5 <- integrate(f, -30, 30, theta=theta.val5, par=par)$value
rfa1 <- sample(rfa.IS.1$q, 10000)
rfa25 <- sample(rfa.IS.25$q, 10000)
rfa5 <- sample(rfa.IS.5$q, 10000)
#weightを計算する
w1 <- exp(-theta.val1*rfa1)*M1
w25 <- exp(-theta.val25*rfa25)*M25
w5 <- exp(-theta.val5*rfa5)*M5
#99%点での計算 100~10000までサンプル数を増やして行う
out1<-cbind(rfa1, w1/N)
# サンプルの小さい順にならべかえる
A <- out1[sort.list(out1[,1]),]
# weightの累積和を並べる
A <- cbind(A, cumsum(A[,2]))
# 累積和が0.01に一番近いサンプルが99%VaR
v1 <- A[which.min(abs(A[,3]-0.01)),1]
# v1までのサンプルからES0.01の推定値を求める
if(is.null(dim(A[1:which.min(abs(A[,3]-0.01)),]))){
es1 <- sum(prod(A[1:which.min(abs(A[,3]-0.01)),c(1:2)]))/0.01
}else{
es1 <- sum(apply(A[1:which.min(abs(A[,3]-0.01)),1:2],1,prod))/0.01
}
out25<-cbind(rfa25, w25/N)
# サンプルの小さい順にならべかえる
A <- out25[sort.list(out25[,1]),]
# weightの累積和を並べる
A <- cbind(A, cumsum(A[,2]))
v25<-A[which.min(abs(A[,3]-0.025)),1]
if(is.null(dim(A[1:which.min(abs(A[,3]-0.025)),]))){
es25 <- sum(prod(A[1:which.min(abs(A[,3]-0.025)),c(1:2)]))/0.025
}else{
es25<- sum(apply(A[1:which.min(abs(A[,3]-0.025)),1:2],1,prod))/0.025
}
out5<-cbind(rfa5, w5/N)
# サンプルの小さい順にならべかえる
A <- out5[sort.list(out5[,1]),]
# weightの累積和を並べる
A <- cbind(A, cumsum(A[,2]))
v5<-A[which.min(abs(A[,3]-0.05)),1]
if(is.null(dim(A[1:which.min(abs(A[,3]-0.05)),]))){
es5 <- sum(prod(A[1:which.min(abs(A[,3]-0.05)),c(1:2)]))/0.05
}else{
es5<- sum(apply(A[1:which.min(abs(A[,3]-0.05)),1:2],1,prod))/0.05
}
c(v1,es1,v25,es25,v5,es5)
#-------
#result_only_IS_fa <- rbind(result_only_IS_fa,IS.fa.out)
#print(i)
})
#save(list=c("result_only_IS_fa"),file="data/result_only_IS_fa")
load("data/20180701_rolling_result_useoldpara.Rdata")
result <- cbind(result_only_IS_fa,result[,c(-1:-6)])
#save(list=c("result"),file="data/20180701_2_rolling_result_useoldpara.Rdata")
|
9e064bcb712a26ade902d9fb8803128e7a1ec3f7
|
6a4b1fdeed411b03b17664114647746f88feeead
|
/Functions/draw_samples.R
|
57778f3f2bafb2d9a92bfa0c3532dc584ecf5ebb
|
[] |
no_license
|
glenmcgee/hospODS
|
8b1162663670c5baf785af7efe731fa984971f24
|
19886dd09901296f16e2ed866db98a3efd1ba7e7
|
refs/heads/master
| 2021-04-15T11:32:24.961170
| 2019-09-09T02:56:15
| 2019-09-09T02:56:15
| 126,397,317
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,010
|
r
|
draw_samples.R
|
####################################################################
# #
# Sampling Functions #
# #
####################################################################
# Feb 2017 #
####################################################################
## code to draw samples
library(tidyverse)
####################################################################
# Function to Draw CSCC Sample #
####################################################################
draw_CSCC_each <- function(dat,nk){
ncontrols <- ncases <- nk/2
dat <- as_tibble(dat)
sum_dat <- dat %>%
group_by(clinic) %>%
summarise(#Nk=n(),
Nk1=sum(event),
Nk0=sum(1-event),
#nk=min(Nk,ncases+ncontrols),
nk1=Nk1*(Nk1<ncases) + ## if there arent enough cases, take them all
ncases*(Nk1>=ncases & Nk0>=ncontrols) + ## if there are enough of both, only take whats needed
min(Nk1,ncases+ncontrols-Nk0)*(Nk1>=ncases & Nk0<ncontrols), ## if we have enough cases but not enough controls, take extra
nk0=Nk0*(Nk0<ncontrols) + ## if there arent enough controls, take them all
ncontrols*(Nk1>=ncases & Nk0>=ncontrols) + ## if there are enough of both, only take whats needed
min(Nk0,ncontrols+ncases-Nk1)*(Nk0>=ncontrols & Nk1<ncases)#, ## if we have enough controls but not enough cases, take extra
#checknk=((nk1+nk0)==nk), ## check that were sampling the right amount
#checknky=((nk1<=Nk1) & (nk0<=Nk0)) ## check that were not asking for too many
)
#sum_dat
## assign everyone a within strata sampling id
dat <- dat %>% group_by(clinic, event) %>% mutate(sample_id=sample(n()))
## assign the controls a sampling limit
dat$nk <- sum_dat$nk0[dat$clinic]
## assign cases a sampling limit
dat$nk[dat$event==1] <- (sum_dat$nk1[dat$clinic])[dat$event==1]
dat <- dat %>% filter(sample_id<=nk) ## if sampling id is less than or equal to sampling limit for
## event/clinic strata, then sample it
return(dat)
}
####################################################################
# Function to Draw CSCC Sample #
# Proportional to Cluster Sizes #
####################################################################
draw_CSCCpropto_each <- function(dat,frac){
dat <- as_tibble(dat)
sum_dat <- dat %>%
group_by(clinic) %>%
summarise(Nk=n(),
nyk=round(Nk*frac/2,0),
Nk1=sum(event),
Nk0=sum(1-event),
nk1=Nk1*(Nk1<nyk) + ## if there arent enough cases, take them all
nyk*(Nk1>=nyk & Nk0>=nyk) + ## if there are enough of both, only take whats needed
min(Nk1,nyk+nyk-Nk0)*(Nk1>=nyk & Nk0<nyk), ## if we have enough cases but not enough controls, take extra
nk0=Nk0*(Nk0<nyk) + ## if there arent enough controls, take them all
nyk*(Nk1>=nyk & Nk0>=nyk) + ## if there are enough of both, only take whats needed
min(Nk0,nyk+nyk-Nk1)*(Nk0>=nyk & Nk1<nyk)#, ## if we have enough controls but not enough cases, take extra
)
## assign everyone a within strata sampling id
dat <- dat %>% group_by(clinic, event) %>% mutate(sample_id=sample(n()))
## assign the controls a sampling limit
dat$nk <- sum_dat$nk0[dat$clinic]
## assign cases a sampling limit
dat$nk[dat$event==1] <- (sum_dat$nk1[dat$clinic])[dat$event==1]
dat <- dat %>% filter(sample_id<=nk) ## if sampling id is less than or equal to sampling limit for
## event/clinic strata, then sample it
return(dat)
}
####################################################################
# Function to Draw CSCC Sample #
# Drawing More Heavily at the Margin #
####################################################################
## split clusters into 3 tertiles based on raw risk
## lowest and highest tertiles get rates of 0.75*frac
## middle tier gets 1.5*frac
## same input as CSCCbalanced
draw_CSCCmargin_each <- function(dat,nk){
nk <- nk/2 ## number of cases and controls
dat <- as_tibble(dat)
sum_risk <- dat %>%
group_by(clinic) %>%
summarise(risk_k=mean(event))
risk_cat <- cut(sum_risk$risk_k, breaks = quantile(sum_risk$risk_k, probs = seq(0, 1, 1/3)),include.lowest=TRUE,labels=c(1,2,3)) ## split into tertiles
dat$risk_cat <- risk_cat[dat$clinic]
sum_dat <- dat %>%
group_by(clinic) %>%
summarise(Nk1=sum(event),
Nk0=sum(1-event),
nyk=round(nk*(0.75*(first(risk_cat)!=2)+1.5*(first(risk_cat)==2) )),
nk1=Nk1*(Nk1<nyk) + ## if there arent enough cases, take them all
nyk*(Nk1>=nyk & Nk0>=nyk) + ## if there are enough of both, only take whats needed
min(Nk1,nyk+nyk-Nk0)*(Nk1>=nyk & Nk0<nyk), ## if we have enough cases but not enough controls, take extra
nk0=Nk0*(Nk0<nyk) + ## if there arent enough controls, take them all
nyk*(Nk1>=nyk & Nk0>=nyk) + ## if there are enough of both, only take whats needed
min(Nk0,nyk+nyk-Nk1)*(Nk0>=nyk & Nk1<nyk)#, ## if we have enough controls but not enough cases, take extra
)
## assign everyone a within strata sampling id
dat <- dat %>% group_by(clinic, event) %>% mutate(sample_id=sample(n()))
## assign the controls a sampling limit
dat$nk <- sum_dat$nk0[dat$clinic]
## assign cases a sampling limit
dat$nk[dat$event==1] <- (sum_dat$nk1[dat$clinic])[dat$event==1]
dat <- dat %>% filter(sample_id<=nk) ## if sampling id is less than or equal to sampling limit for
## event/clinic strata, then sample it
return(dat)
}
####################################################################
# Function to Draw CSCC Sample #
# Drawing More Heavily at the Margin #
# Based on SRR #
####################################################################
## split clusters into 3 tertiles based on raw risk
## lowest and highest tertiles get rates of 0.75*frac
## middle tier gets 1.5*frac
## same input as CSCCbalanced
draw_CSCCmarginSRR_each <- function(dat,SRRs,nk){
nk <- nk/2 ## number of cases and controls
dat <- as_tibble(dat)
SRR_cat <- cut(SRRs, breaks = quantile(SRRs, probs = seq(0, 1, 1/3)),include.lowest=TRUE,labels=c(1,2,3)) ## split into tertiles
dat$SRR_cat <- SRR_cat[dat$clinic]
sum_dat <- dat %>%
group_by(clinic) %>%
summarise(Nk1=sum(event),
Nk0=sum(1-event),
nyk=round(nk*(0.75*(first(SRR_cat)!=2)+1.5*(first(SRR_cat)==2) )),
nk1=Nk1*(Nk1<nyk) + ## if there arent enough cases, take them all
nyk*(Nk1>=nyk & Nk0>=nyk) + ## if there are enough of both, only take whats needed
min(Nk1,nyk+nyk-Nk0)*(Nk1>=nyk & Nk0<nyk), ## if we have enough cases but not enough controls, take extra
nk0=Nk0*(Nk0<nyk) + ## if there arent enough controls, take them all
nyk*(Nk1>=nyk & Nk0>=nyk) + ## if there are enough of both, only take whats needed
min(Nk0,nyk+nyk-Nk1)*(Nk0>=nyk & Nk1<nyk)#, ## if we have enough controls but not enough cases, take extra
)
## assign everyone a within strata sampling id
dat <- dat %>% group_by(clinic, event) %>% mutate(sample_id=sample(n()))
## assign the controls a sampling limit
dat$nk <- sum_dat$nk0[dat$clinic]
## assign cases a sampling limit
dat$nk[dat$event==1] <- (sum_dat$nk1[dat$clinic])[dat$event==1]
dat <- dat %>% filter(sample_id<=nk) ## if sampling id is less than or equal to sampling limit for
## event/clinic strata, then sample it
return(dat)
}
####################################################################
# Function to Draw SRS Sample #
####################################################################
draw_SRS_each <- function(dat,n_k){
dat <- as_tibble(dat)
sum_dat <- dat %>%
group_by(clinic) %>%
summarise(Nk=n(),
nk=min(Nk,n_k) )
## assign everyone a within strata sampling id
dat <- dat %>% group_by(clinic) %>% mutate(sample_id=sample(n()))
## assign sampling limit
dat$nk <- sum_dat$nk[dat$clinic]
dat <- dat %>% filter(sample_id<=nk) ## if sampling id is less than or equal to sampling limit for
## event/clinic strata, then sample it
return(dat)
}
####################################################################
# Function to Draw SRS Sample #
# Proportional to Cluster Sizes #
####################################################################
draw_SRSpropto_each <- function(dat,frac){
dat <- as_tibble(dat)
sum_dat <- dat %>%
group_by(clinic) %>%
summarise(Nk=n(),
nk=round(frac*Nk,0) )
## assign everyone a within strata sampling id
dat <- dat %>% group_by(clinic) %>% mutate(sample_id=sample(n()))
## assign sampling limit
dat$nk <- sum_dat$nk[dat$clinic]
dat <- dat %>% filter(sample_id<=nk) ## if sampling id is less than or equal to sampling limit for
## event/clinic strata, then sample it
return(dat)
}
####################################################################
# Function to Draw SRS Sample #
# Drawing More Heavily at the Margin #
####################################################################
draw_SRSmargin_each <- function(dat,nnk){
dat <- as_tibble(dat)
sum_risk <- dat %>%
group_by(clinic) %>%
summarise(risk_k=mean(event))
risk_cat <- cut(sum_risk$risk_k, breaks = quantile(sum_risk$risk_k, probs = seq(0, 1, 1/3)),include.lowest=TRUE,labels=c(1,2,3)) ## split into tertiles
dat$risk_cat <- risk_cat[dat$clinic]
sum_dat <- dat %>%
group_by(clinic) %>%
summarise(nk=round(nnk*(0.75*(first(risk_cat)!=2)+1.5*(first(risk_cat)==2) )) )
## assign everyone a within strata sampling id
dat <- dat %>% group_by(clinic) %>% mutate(sample_id=sample(n()))
## assign sampling limit
dat$nk <- sum_dat$nk[dat$clinic]
dat <- dat %>% filter(sample_id<=nk) ## if sampling id is less than or equal to sampling limit for
## event/clinic strata, then sample it
return(dat)
}
####################################################################
# Function to Draw SRS Sample #
# Drawing More Heavily at the Margin #
# Based on SRR #
####################################################################
draw_SRSmarginSRR_each <- function(dat,SRRs,nnk){
dat <- as_tibble(dat)
SRR_cat <- cut(SRRs, breaks = quantile(SRRs, probs = seq(0, 1, 1/3)),include.lowest=TRUE,labels=c(1,2,3)) ## split into tertiles
dat$SRR_cat <- SRR_cat[dat$clinic]
sum_dat <- dat %>%
group_by(clinic) %>%
summarise(nk=round(nnk*(0.75*(first(SRR_cat)!=2)+1.5*(first(SRR_cat)==2) )) )
## assign everyone a within strata sampling id
dat <- dat %>% group_by(clinic) %>% mutate(sample_id=sample(n()))
## assign sampling limit
dat$nk <- sum_dat$nk[dat$clinic]
dat <- dat %>% filter(sample_id<=nk) ## if sampling id is less than or equal to sampling limit for
## event/clinic strata, then sample it
return(dat)
}
####################################################################
# Function to Conduct Cluster Sampling #
####################################################################
draw_CS_each <- function(dat,K){
## draw K clusters
clust_id <- sample(unique(dat$clinic),K,replace=FALSE)
## make tibble
dat <- as_tibble(dat)
## select only those with sampled cluster
dat <- dat %>% filter(clinic %in% clust_id)
return(dat)
}
####################################################################
# Function to Draw Random Sample #
####################################################################
draw_RS_each <- function(dat,nn){
## make tibble
dat <- as_tibble(dat)
## assign everyone a sampling id
dat <- dat %>% mutate(sample_id=sample(n()))
## select only those sampled
dat <- dat %>% filter(sample_id<=nn) %>% arrange(clinic)
#dat$clinic <- as.numeric(factor(clean_full$clinic))
return(dat)
}
####################################################################
# Function to Draw CC Sample #
####################################################################
draw_CC_each <- function(dat,nn){
ncontrols <- ncases <- round(nn/2)
dat <- as_tibble(dat)
sum_dat <- dat %>%
summarise(#Nk=n(),
N1=sum(event),
N0=sum(1-event),
n1=N1*(N1<ncases) + ## if there arent enough cases, take them all
ncases*(N1>=ncases & N0>=ncontrols) + ## if there are enough of both, only take whats needed
min(N1,ncases+ncontrols-N0)*(N1>=ncases & N0<ncontrols), ## if we have enough cases but not enough controls, take extra
n0=N0*(N0<ncontrols) + ## if there arent enough controls, take them all
ncontrols*(N1>=ncases & N0>=ncontrols) + ## if there are enough of both, only take whats needed
min(N0,ncontrols+ncases-N1)*(N0>=ncontrols & N1<ncases)#, ## if we have enough controls but not enough cases, take extra
#checknk=((nk1+nk0)==nk), ## check that were sampling the right amount
#checknky=((nk1<=Nk1) & (nk0<=Nk0)) ## check that were not asking for too many
)
## assign everyone a within strata sampling id
dat <- dat %>% group_by(event) %>% mutate(sample_id=sample(n()))
## assign the controls a sampling limit
dat$nny <- sum_dat$n0
## assign cases a sampling limit
dat$nny[dat$event==1] <- (sum_dat$n1)
## if sampling id is less than or equal to sampling limit for
dat <- dat %>% filter(sample_id<=nny)
## old method that doesnt sample extra controls when lacking cases
# nny <- round(nn/2)
# ## make tibble
# dat <- as_tibble(dat)
#
# ## assign everyone a sampling id
# dat <- dat %>% group_by(event) %>% mutate(sample_id=sample(n()))
#
# ## select only those sampled
# dat <- dat %>% filter(sample_id<=nny) %>% arrange(clinic)
# #dat$clinic <- as.numeric(factor(clean_full$clinic))
return(dat)
}
|
fe89f6c06d3147e339808bb12ef0d3cfc4183e4b
|
b186dcdf7e429997ea11c9e8cfc22077c060e489
|
/scripts/analysis/functions/fd_stan_main.R
|
1d4468bc40f72507b077d1fe1ed6432a234fd2db
|
[] |
no_license
|
behinger/fixdur
|
841599cbd231052dbc77ed0213d9a95c0d7faa1e
|
8f6f4a8837b4ca1dd0dfcf9a96fddccc568e51cf
|
refs/heads/master
| 2021-03-27T19:08:37.449607
| 2017-09-25T10:41:58
| 2017-09-25T10:41:58
| 68,902,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,706
|
r
|
fd_stan_main.R
|
fd_stan_main <- function(mres,name,nchains=6,niter=500,rerun=F,is_on_grid=F){
if(is_on_grid){
rerun=T
nchains=1
name = paste0(name, 'grid_',paste0(sample(c(0:9, letters, LETTERS),10,replace=TRUE),collapse=''))
}
name = paste0(name,'.RData')
library(rstan)
dir = '../../cache/stanfit/'
dir.create(file.path(dir), showWarnings = F)
if (file.exists(file.path(dir,name)) & !rerun){
load(file.path(dir,name))
# and then go all the way down to the return statement ;)
}else{
modelMatrix = model.matrix(mres)
betaNames = colnames(modelMatrix)
X <- unname(modelMatrix)
attr(X,"assign") <- NULL
label_dataframe = data.frame(Parameter=sprintf('beta.%i.',1:length(betaNames)),Label=betaNames)
stanDat <- within(list(),
{
N<-nrow(X)
P <- n_u <- n_w <- ncol(X)
X <- Z_u <- Z_w <- X
J <- length(levels(as.factor(mres@frame$subject)))
answer <- mres@frame$choicetime # We don't want to standardized rating!
subj <- as.integer(as.factor(mres@frame$subject))
}
)
# 5. Fit the model.
fd_model = stan_model(file='./stan/fd_model_1.stan')
# init.f = function(chain_id){
# numP = 20
# list(beta=c(200,rep(0,numP-1))+rnorm(numP,0,1))
# }
fit <- sampling(fd_model,data=stanDat,iter=niter, chains=nchains,cores = nchains,refresh=1,init=0)#,init=init.f)
save(modelMatrix,fit,label_dataframe,file= file.path(dir,name))
}
return(list(fit=fit,label_dataframe=label_dataframe,modelMatrix=modelMatrix))
}
|
17fc05c72cfb3855066809e2bfa857aa3aff2770
|
567d8f2a240cd3b7f3899b3fd5e3bd9328b2b895
|
/R/demoplot.R
|
7976a855254f07f0a99f495b7a3ee78783a3e0de
|
[] |
no_license
|
cran/colorspace
|
ec9123555d9a820c2a2c639b94d28734563df6e0
|
fadb043aeb85048a0a2b9daddbb258002d0a4dfc
|
refs/heads/master
| 2023-01-24T18:54:37.214672
| 2023-01-23T10:40:02
| 2023-01-23T10:40:02
| 17,695,192
| 7
| 3
| null | 2017-08-29T03:43:34
| 2014-03-13T04:18:48
|
R
|
UTF-8
|
R
| false
| false
| 8,711
|
r
|
demoplot.R
|
#' Color Palette Demonstration Plot
#'
#' Demonstration of color palettes in various kinds of statistical graphics.
#'
#' To demonstrate how different kinds of color palettes work in different
#' kinds of statistical displays, \code{demoplot} provides a simple convenience
#' interface to some base graphics with (mostly artificial) data sets.
#' All types of demos can deal with arbitrarily many colors. However, some
#' displays are much more suitable for a low number of colors (e.g., the pie
#' chart) while others work better with more colors (e.g., the heatmap).
#'
#' @param x character vector containing color hex codes.
#' @param type character indicating the type of demonstration plot.
#' @param \dots currently not used.
#' @return \code{demoplot} returns invisibly what the respective base graphics
#' functions return that are called internally.
#' @seealso \code{\link{specplot}}, \code{\link{hclplot}}
#' @references Zeileis A, Fisher JC, Hornik K, Ihaka R, McWhite CD, Murrell P, Stauffer R, Wilke CO (2020).
#' \dQuote{colorspace: A Toolbox for Manipulating and Assessing Colors and Palettes.}
#' \emph{Journal of Statistical Software}, \bold{96}(1), 1--49. \doi{10.18637/jss.v096.i01}
#' @keywords hplot
#' @examples
#' ## all built-in demos with the same sequential heat color palette
#' par(mfrow = c(3, 3))
#' cl <- sequential_hcl(5, "Heat")
#' for (i in c("map", "heatmap", "scatter", "spine", "bar", "pie", "perspective", "mosaic", "lines")) {
#' demoplot(cl, type = i)
#' }
#'
#' ## qualitative palettes: light pastel colors for shading areas (pie)
#' ## and darker colorful palettes for points or lines
#' demoplot(qualitative_hcl(4, "Pastel 1"), type = "pie")
#' demoplot(qualitative_hcl(4, "Set 2"), type = "scatter")
#' demoplot(qualitative_hcl(4, "Dark 3"), type = "lines")
#'
#' ## sequential palettes: display almost continuous gradients with
#' ## strong luminance contrasts (heatmap, perspective) and colorful
#' ## sequential palette for spine plot with only a few ordered categories
#' demoplot(sequential_hcl(99, "Purple-Blue"), type = "heatmap")
#' demoplot(sequential_hcl(99, "Reds"), type = "perspective")
#' demoplot(sequential_hcl(4, "Viridis"), type = "spine")
#'
#' ## diverging palettes: display almost continuous gradient with
#' ## strong luminance contrast bringing out the extremes (map),
#' ## more colorful palette with lower luminance contrasts for displays
#' ## with fewer colors (mosaic, bar)
#' demoplot(diverging_hcl(99, "Tropic", power = 2.5), type = "map")
#' demoplot(diverging_hcl(5, "Green-Orange"), type = "mosaic")
#' demoplot(diverging_hcl(5, "Blue-Red 2"), type = "bar")
#'
#' ## some palettes that work well on black backgrounds
#' par(mfrow = c(2, 3), bg = "black")
#' demoplot(sequential_hcl(9, "Oslo"), "heatmap")
#' demoplot(sequential_hcl(9, "Turku"), "heatmap")
#' demoplot(sequential_hcl(9, "Inferno", rev = TRUE), "heatmap")
#' demoplot(qualitative_hcl(9, "Set 2"), "lines")
#' demoplot(diverging_hcl(9, "Berlin"), "scatter")
#' demoplot(diverging_hcl(9, "Cyan-Magenta", l2 = 20), "lines")
#'
#' @export demoplot
#' @importFrom graphics barplot image persp pie plot polygon rect segments
demoplot <- function(x,
type = c("map", "heatmap", "scatter", "spine", "bar", "pie", "perspective", "mosaic", "lines"),
...)
{
type <- match.arg(type,
c("map", "heatmap", "scatter", "spine", "bar", "pie", "perspective", "mosaic", "lines"))
do.call(paste("plot", type, sep = "_"), list(x = x, ...))
}
# Plot map example
plot_map <- function(x, ...) {
n <- length(x)
plot(0, 0, type = "n", xlab = "", ylab = "", xaxt = "n", yaxt = "n", bty = "n",
xlim = c(-88.5, -78.6), ylim = c(30.2, 35.2), asp = 1)
polygon(colorspace::USSouthPolygon,
col = x[cut(stats::na.omit(colorspace::USSouthPolygon$z),
breaks = 0:n / n)])
}
# Plot heatmap example
plot_heatmap <- function(x, ...) {
image(datasets::volcano, col = rev(x), bty = "n", xaxt = "n", yaxt = "n", useRaster = TRUE)
}
# Plot scatter example
.example_env <- new.env()
.example_env$xyhclust <- NULL
plot_scatter <- function(x, ...) {
# Generate artificial data
if (is.null(.example_env$xyhclust)) {
set.seed(1071)
x0 <- sin(pi * 1:60 / 30) / 5
y0 <- cos(pi * 1:60 / 30) / 5
xr <- c(0.1, -0.6, -0.7, -0.9, 0.4, 1.3, 1.0)
yr <- c(0.3, 1.0, 0.1, -0.9, -0.8, -0.4, 0.6)
dat <- data.frame(
x=c(x0 + xr[1], x0 + xr[2], x0 + xr[3], x0 + xr[4], x0 + xr[5],
x0 + xr[6], x0 + xr[7]),
y=c(y0 + yr[1], y0 + yr[2], y0 + yr[3], y0 + yr[4], y0 + yr[5],
y0 + yr[6], y0 + yr[7])
)
attr(dat, "hclust") <- stats::hclust(stats::dist(dat), method = "ward.D")
dat$xerror <- stats::rnorm(nrow(dat), sd=stats::runif(nrow(dat), 0.05, 0.45))
dat$yerror <- stats::rnorm(nrow(dat), sd=stats::runif(nrow(dat), 0.05, 0.45))
.example_env$xyhclust <- dat
}
plot(.example_env$xyhclust$x +
.example_env$xyhclust$xerror,
.example_env$xyhclust$y +
.example_env$xyhclust$yerror,
col = "black", bg = x[stats::cutree(attr(.example_env$xyhclust, "hclust"), length(x))],
xlab = "", ylab = "", axes = FALSE, pch = 21, cex = 1.3)
}
# Plot spine example
plot_spine <- function(x, ...) {
n <- length(x)
# Rectangle dimensions
off <- 0.015
widths <- c(0.05, 0.1, 0.15, 0.1, 0.2, 0.08, 0.12, 0.16, 0.04)
k <- length(widths)
heights <- sapply(
c(2.5, 1.2, 2.7, 1, 1.3, 0.7, 0.4, 0.2, 1.7),
function(p) (0:n / n)^(1 / p)
)
# Rectangle coordinates
xleft0 <- c(0, cumsum(widths + off)[-k])
xleft <- rep(xleft0, each=n)
xright <- xleft + rep(widths, each=n)
ybottom <- as.vector(heights[-(n + 1), ])
ytop <- as.vector(heights[-1, ])
# Draw rectangles, borders, and annotation
plot(0, 0, xlim=c(0, sum(widths) + off * (k - 1)), ylim=c(0, 1),
xaxs="i", yaxs="i", main="", xlab="", ylab="",
type="n", axes=FALSE)
rect(xleft, ybottom, xright, ytop, col = rep(x, k),
border = if(n < 10) "black" else "transparent")
if(n >= 10) rect(xleft0, 0, xleft0 + widths, 1, border="black")
}
# Plot bar example
plot_bar <- function(x, ...) {
barplot(cbind(1.1 + abs(sin(0.5 + seq_along(x))) / 3,
1.9 + abs(cos(1.1 + seq_along(x))) / 3,
0.7 + abs(sin(1.5 + seq_along(x))) / 3,
0.3 + abs(cos(0.8 + seq_along(x))) / 3),
beside = TRUE, col = x, axes = FALSE)
}
# Plot pie example
plot_pie <- function(x, ...) {
pie(0.01 + abs(sin(0.5 + seq_along(x))), labels = "", col = x, radius = 1)
}
# Plot perspective example
plot_perspective <- function(x, ...) {
# Mixture of bivariate normals
n <- 31
x1 <- x2 <- seq(-3, 3, length.out = n)
y <- outer(x1, x2,
function(x, y) {
0.5 * stats::dnorm(x, mean = -1, sd = 0.80) * stats::dnorm(y, mean = -1, sd = 0.80) +
0.5 * stats::dnorm(x, mean = 1, sd = 0.72) * stats::dnorm(y, mean = 1, sd = 0.72)
}
)
# Compute color based on density
if (length(x) > 1) {
facet <- cut(y[-1, -1] + y[-1, -n] + y[-n, -1] + y[-n, -n],
length(x))
cols <- rev(x)[facet]
} else {
cols <- x
}
# Perspective plot coding z-axis with color
persp(x1, x2, y, col = cols, phi = 28, theta = 20, r = 5, xlab = "", ylab = "", zlab = "")
}
# Plot mosaic example
.example_env$msc.matrix <- NULL
plot_mosaic <- function(x, ...) {
if (is.null(.example_env$msc.matrix)) {
set.seed(1071)
mat <- list()
for (i in 1:50) {
mat[[i]] <- matrix(stats::runif(i * 10, min = -1, max = 1), nrow = 10, ncol = i)
}
.example_env$msc.matrix <- mat
}
image(.example_env$msc.matrix[[length(x)]], bty = "n", col = x, xaxt = "n", yaxt = "n")
}
# Plot lines example
plot_lines <- function(x, ...) {
n <- length(x)
plot(NULL, xlab = "", ylab = "", xaxt = "n", yaxt = "n", type = "n",
bty = "n", xlim = c(0, 6), ylim = c(1.5, n + 1.5))
s <- 2:(n + 1)
rev.s <- rev(s)
rev.x <- rev(x)
lwd <- 6
if (n > 5)
lwd <- lwd -1
if (n > 15)
lwd <- lwd -1
if (n > 25)
lwd <- lwd -1
segments(1 / s, s, 2 + 1 / rev.s, rev.s, x, lwd = lwd)
segments(2 + 1 / s, s, 4 - 1 / s, s, rev.x, lwd = lwd)
segments(4 - 1 / s, s, 6 - 1 / s, rev.s, rev.x, lwd = lwd)
}
# Wrapper around specplot. Used by the tcltk interface.
plot_spectrum <- function(x, cex = 1.0, plot = TRUE, rgb = TRUE, ...)
specplot(x, cex = cex, plot = plot, rgb = rgb, ...)
# Wrapper around hclplot. Used by the tcltk interface.
plot_hclplot <- function(x, cex = 1.0, ...)
hclplot(x, cex = cex, ...)
|
fce984c44279b5cf62401f0c7fd2722e680455ac
|
fecd3f8d27df0861bac6a1fd8306d5cbd0ce554a
|
/Homework 7.R
|
3a4307526482b247882da8fa8fed25f61f289965
|
[] |
no_license
|
paschalmj/Bayesian-Albatross-Model
|
c26cef3c04ae80ec16691d122686d6db2833b121
|
e522728a51a062c28b540928b88279393283174a
|
refs/heads/master
| 2020-05-17T05:58:34.069055
| 2019-04-26T03:20:17
| 2019-04-26T03:20:17
| 183,549,270
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,568
|
r
|
Homework 7.R
|
set.seed(123) #make sure it's reproducable
#PART 1
alb.data = read.csv("STAL data.csv", header=T) #breeding pair data
#Calculates NLL assuming exponential growth
#Input: growth rate (r), initial pop (N0), additional variance (CVadd), real data (alb.data)
#Return: NLL
getNLL = function(r, N0, CVadd, alb.data) {
n.years = nrow(alb.data)
real.pairs = alb.data$BreedingPairs #actual counts
est.pairs = numeric(length = n.years) #estimated counts
years = alb.data$Year
first.year = years[1]
#fill in estimated data with exponential model
est.pairs[1] = N0
for(i in 2:n.years) {
est.pairs[i] = est.pairs[1]*(1+r)^(years[i]-first.year)
}
#make shortcuts for constant parts of NLL so equation isn't so long
first = log(sqrt(0.02^2+CVadd^2))
denom = 2*(0.02^2+CVadd^2)
#Find NLL
NLL = 0
for(i in 1:length(est.pairs)) {
numer = (log(real.pairs[i]/est.pairs[i]))^2
NLL = NLL + (first+(numer/denom))
}
return(NLL)
}
getNLL(0.07, 10, 0.05, alb.data) #check if it's right
#PART 2
#Calculates negative log of prior on r
#Input: lower bound of r prior (r.prior.lower), upper bound of r prior (r.prior.upper)
#Return: NL of r prior
rNLprior = function(r.prior.lower, r.prior.upper) {
rNL = -log(1/(r.prior.upper-r.prior.lower))
return(rNL)
}
#Calculates negative log of prior on N0
#Input: lower bound of N0 prior (N0.prior.lower), upper bound of N0 prior (N0.prior.upper)
#Return: NL of N0 prior
N0NLprior = function(N0.prior.lower, N0.prior.upper) {
N0NL = -log(1/(N0.prior.upper-N0.prior.lower))
return(N0NL)
}
#Calculates negative log of prior on CVadd
#Input: lower bound of CVadd prior (CVadd.prior.lower), upper bound of CVadd prior (CVadd.prior.upper)
#Return: NL of CVadd prior
CVaddNLprior = function(CVadd.prior.lower, CVadd.prior.upper) {
CVaddNL = -log(1/(CVadd.prior.upper-CVadd.prior.lower))
return(CVaddNL)
}
NLpriors = rNLprior(-0.1,0.2) + N0NLprior(0,50) + CVaddNLprior(0,0.3) #store sum of NL priors
#PART 3
#Runs the Markov Chain Monte Carlo method. Takes initial parameters and bounces around
#until it settles in the region of high posterior probability.
#Input: number of draws (ndraws), file of counts (filename), initial growth rate (rinit),
# initial count (N0), initial additional variance (CVaddinit)
#Return: matrix of accepted draws (r*, N0*, CVadd*, X*) where X* is total NLL
runMCMC = function(ndraws, filename, rinit, N0init, CVaddinit) {
posterior = matrix(nrow = ndraws, ncol = 4) #matrix of accepted draws
Xstarinit = getNLL(rinit, N0init, CVaddinit, alb.data) + NLpriors #initial Xstar
posterior[1,] = c(rinit, N0init, CVaddinit, Xstarinit) #initial draw
#loop through all the draws
for(i in 2:ndraws) {
rstar = posterior[(i-1), 1] + runif(1, -0.01, 0.01)
N0star = posterior[(i-1), 2] + runif(1, -2, 2)
CVaddstar = posterior[(i-1), 3] + runif(1, -0.05, 0.05)
Xstar = getNLL(rstar, N0star, CVaddstar, alb.data) + NLpriors
ratio = exp(posterior[(i-1), 4]-Xstar)
randNum = runif(1, 0, 1)
#if the random number is less that the ratio, accept draw
if(randNum < ratio) {
posterior[i,] = c(rstar, N0star, CVaddstar, Xstar)
} else { #reject draw and start from same spot as before
posterior[i,] = posterior[i-1,]
}
}
return(posterior)
}
#PART 4
posterior = runMCMC(100000, "STAL data.csv", 0.03, 10, 0.05)
#thin the posterior to reduce autocorrelation and make it a manageable size
temp = posterior[20000:100000,]
thin.Post = temp[seq(1,80000,40),]
#PART 5
#find distributions for pairs in 2014, individuals, and delta bw 2014 & 2015
dist.pairs = thin.Post[,2]*(1+thin.Post[,1])^(2014-1954)
dist.pop = 7.1*dist.pairs
dist.diff = dist.pop*thin.Post[,1]
#STUFF FOR RESULTS SECTION
#quantiles for parameters
r.quant = quantile(thin.Post[,1], probs = c(0.025, 0.5, 0.975))
N0.quant = quantile(thin.Post[,2], probs = c(0.025, 0.5, 0.975))
CVadd.quant = quantile(thin.Post[,3], probs = c(0.025, 0.5, 0.975))
#histograms for parameters
hist(thin.Post[,1], main="Growth Rate", xlab="r")
hist(thin.Post[,2], main="Initial Pairs", xlab="N0")
hist(thin.Post[,3], main="Additional Variance", xlab="CVadd")
#quantiles for pairs in 2014, individuals, and delta bw 2014 & 2015
pairs.quant = quantile(dist.pairs, probs=c(0.025, 0.5, 0.975))
pop.quant = quantile(dist.pop, probs=c(0.025, 0.5, 0.975))
diff.quant = quantile(dist.diff, probs=c(0.025, 0.5, 0.975))
|
836f31a1e2e8f6cba749fce479be0fb6fa6be8d3
|
448a1ff0d4b3d7029b5df32c0a09f32a338c8b65
|
/Lab Week 9-3.R
|
7cfcf30529c03adfc933276660a8d03c42d954b2
|
[] |
no_license
|
LouisJBooth/R
|
ccdca481d1e5ef78aa18e346046f42163b4ee982
|
6820bf082a3562a74a259ad7688771a2c68e1325
|
refs/heads/master
| 2020-04-17T15:50:57.087680
| 2019-01-20T23:24:59
| 2019-01-20T23:24:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 762
|
r
|
Lab Week 9-3.R
|
pdf(file="half sphere.pdf")
x <- matrix(rep(seq(-5, 5, length= 100),100),100,100)
y <- t(x)
f <- function(x, y) { r <- sqrt((25-x^2-y^2)*(25-x^2+-y^2>0)) }
z <- f(x,y)
z[z==0] <- NA
persp(x[,1], y[1,], z, theta = 0, phi = 0, expand = 1, col = "red", xlab="x", ylab="y", zlab="z")
contour(z)
image(z)
contour(z, add=TRUE)
x <- runif(100, min=-1, max=1)
y <- runif(100, min=-1, max=1)
z <- 1 + x + 2 * y + rnorm(100)
reg <- lm(z~x+y)
x1<-matrix(rep(seq(-1, 1, length=10),10), 10, 10)
y1<-t(x1)
zhat<-reg$coefficients[1]+reg$coefficients[2]*x1+reg$coefficients[3]*y1
surface <- persp(x1[,1], y1[1,], zhat, theta = 0, phi = 0, expand = 1, col = heat.colors(30))
surface
xy.list = trans3d(x, y, z, surface)
points(xy.list, pch=20, col=heat.colors(10))
dev.off()
|
c81c2d438b63c73cd2940f3c3182b16bfa130bf6
|
46239fbdbd5af227b892b6cb5ec2782657cf86cb
|
/week2/lessons/code1.R
|
2dd101de17ef04c835535fdb652efb8dcd8939bc
|
[] |
no_license
|
c91403/mitx15071x
|
e9d112d50794da857b9b3ee4425440016abafbf5
|
929d0694d1e93c5967288ae129ea359871d5d1f6
|
refs/heads/master
| 2020-05-17T05:44:32.797381
| 2014-03-21T20:49:14
| 2014-03-21T20:49:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,324
|
r
|
code1.R
|
wine = read.csv("wine.csv")
str(wine)
# 1 variable linear regression, price vs agst
model1 = lm(Price ~ AGST, data=wine)
summary(model1)
# adjusted r-squared = adjust on independent variables, R-squared will decrease if you add a bad var
model1$residuals
# calculate the SSE
SSE = sum(model1$residuals ^ 2)
SSE
# 5.73
# predict price using AGST and HarvestRain
model2 = lm(Price ~ AGST + HarvestRain, data=wine)
summary(model2)
# cooefficent for HR os 0.00457
# R-squared increased, thats a good variable to include in the model
SSE = sum(model2$residual^2)
SSE
# 2.97, lower is better
model3 = lm(Price ~ AGST + HarvestRain + WinterRain + Age + FrancePop, data=wine)
summary(model3)
# R-squared better at 0.8294, this is better than before
SSE = sum(model3$residual^2)
SSE
# 1.73, lower is better
# quiz 2.2.4
modelQuiz4 = lm(Price ~ HarvestRain + WinterRain, data=wine)
summary(modelQuiz4)
# multiple r-squared = 0.3177
# havest rain coeff = -4.971e-03
# intercept coeff = 7.865e+00
# removed FrancePop, since it has no stars
model4 = lm(Price ~ AGST + HarvestRain + WinterRain + Age, data=wine)
summary(model4)
# Multiple R-squared: 0.8286 same
# Adjusted R-squared increased, so could be better
# Age now has 2 stars but didn't have any stars before --> correlation variable
# calculate the correlation between 2 variables
cor(wine$WinterRain, wine$Price)
cor(wine$Age, wine$FrancePop)
# correlation between all variables in a table form
cor(wine)
model5 = lm(Price ~ AGST + HarvestRain + WinterRain, data=wine)
summary(model5)
# r-squared lowered, this is worse than model4
# wrong high correlation variables can make coefficents to have the wrong sign
# quiz 2.2.5
cor(wine$HarvestRain, wine$WinterRain)
modelQuiz5 = lm(Price ~ HarvestRain + WinterRain, data=wine)
summary(modelQuiz5)
winetest = read.csv("wine_test.csv")
# create predictions with an existing model
predicttest = predict(model4, newdata = winetest)
predicttest
# 6.768925 6.684910, our data points look pretty good since they match str(winetest)
SSE = sum((winetest$Price - predicttest) ^2)
SST = sum((winetest$Price - mean(wine$Price))^2)
1 - SSE / SST
# pretty good out of sample r-squared, but our test set is pretty small, should use a bigger test set
|
6ad0f90faddd08f29034bffe1bf648fbf74d82ce
|
614f6596ff7ba85e5fb2abf006c12d8e69e6c650
|
/R/methods.R
|
7e1840774c44876407e2c03e7043785d6199a94b
|
[] |
no_license
|
sheng-liu/SeqFrame
|
09651fb03dc3f55438bb77fce923041689ed92cf
|
d22650c4dc48d86f4aa22b4c2718cec1f84e00d7
|
refs/heads/master
| 2016-09-06T13:49:44.903793
| 2014-12-31T11:16:07
| 2014-12-31T11:16:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,980
|
r
|
methods.R
|
# SeqFrame - methods
#
#
###############################################################################
## so add a id column (the row.numbers), use it subset annotation, transfer to
## grangs and open genome browser todo: remove or hide id columne as row names
## in flowCore
## -----------------------------------------------------------------------------
## SeqFrame annotation method
## generics
##'@exportMethod annotation
setGeneric(
name="annotation",
def=function(obj){
standardGeneric("annotation")
})
##'@exportMethod annotation<-
setGeneric(
name="annotation<-",
def=function(obj,value){
standardGeneric("annotation<-")
})
## methods
setReplaceMethod(
f="annotation",
signature="SeqFrame",
definition=function(obj,value){
# methods::initialize(obj,annotation=value)
obj@annotation=value
obj # this cost me a lot of time to figure out
})
# > annotation(x)=annotation(x)[i,]
# > x
# SeqFrame object 'anonymous'
# with 0 genomic features and -1 epigenetic observables:
# [1] name description range minRange maxRange
# <0 rows> (or 0-length row.names)
# 1 keywords are stored in the 'description' slot
setMethod(
f="annotation",
signature="SeqFrame",
definition=function(obj){
obj@annotation
})
## -----------------------------------------------------------------------------
## SeqFrame show method
## this is a version from flowFrame
##' @exportMethod show
setMethod("show",
signature=signature(object="SeqFrame"),
definition=function(object)
{
dm <- dim(exprs(object))
cat(paste("SeqFrame object '", identifier(object),
"'\nwith ", dm[1], " genomic features and ",
dm[2]-1, " epigenetic observables:\n", sep=""))
show(pData(parameters(object)))
cat(paste(length(description(object)), " keywords are stored in the ",
"'description' slot\n", sep = ""))
return(invisible(NULL))
})
## -----------------------------------------------------------------------------
## SeqFrame subset methods
## "[" function
## by indices
##' @exportMethod "["
setMethod(f="[",
signature=c(x="SeqFrame"),
definition=function(x, i, j, ..., drop=FALSE)
{
# based on location of i,j, defines subsetting method
# sf[1:100,], missing(j)=T
# sf[,1:100], missing(i)=T
switch(1+missing(i)+2*missing(j),
{
# subset exprs, and annotation
exprs(x) = exprs(x)[i, j]
annotation(x)=annotation(x)[i , j]
},
{
x = subsetKeywords(x, j)
exprs(x) = exprs(x)[ , j]
annotation(x) = annotation(x)[ , j]
},
{
exprs(x) = exprs(x)[i, ]
#annotation(x) = annotation(x)[i, ]
annotation(x) = annotation(x)[i, ]
},
{
exprs(x) = exprs(x)[ , ]
annotation(x) = annotation(x)[, ]
} )
return(x)
})
## TODO:
## ADD subset by logical vectors
## ADD subsetting with $
## ADD subset by filter
## -----------------------------------------------------------------------------
## SeqFrame split method
##' @exportMethod split
setMethod(
f="split",
signature=c(x="SeqFrame",f="logicalFilterResult"),
definition=function(x,f,drop=FALSE,...){
annotation=annotation(x)
class(x)="flowFrame"
# split as flowFrame
list=flowCore::split(x,f)
# merge exprs with its own annotation
sf.list=lapply(list,function(frame){
df=merge(exprs(frame),annotation,by="id")
cat("Construct SeqFrame\n")
sf=df2sf(df,keyword(frame))
# re-order exprs(sf) columns to its original sequence
cln=colnames(exprs(frame))
exprs(sf)=exprs(sf)[,cln]
sf
})
return(sf.list)
})
|
92ebe27e93280389c14b7700d30d1b3f269748cd
|
172c131b7456c76b300d5c7213528b281617db8d
|
/man/sSpec-class.Rd
|
7b108ae289c62490252f662c251a6e834f78edb3
|
[] |
no_license
|
cran/momentfit
|
7b23f8d5ed63e32cc4ba368277a106acc4dc8618
|
b7c29f7d8b6f4c3fa0ea5356409b654ede8c8df7
|
refs/heads/master
| 2023-06-08T23:42:57.900725
| 2023-06-05T14:20:02
| 2023-06-05T14:20:02
| 237,930,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 889
|
rd
|
sSpec-class.Rd
|
\name{sSpec-class}
\docType{class}
\alias{sSpec-class}
\title{Class \code{"sSpec"}}
\description{
A class to store the specifications of the kernel used to smooth moment
conditions.
}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("sSpec", ...)}.
It is created by \code{\link{kernapply-methods}}.
}
\section{Slots}{
\describe{
\item{\code{k}:}{Object of class \code{"numeric"} ~~ }
\item{\code{kernel}:}{Object of class \code{"character"} ~~ }
\item{\code{bw}:}{Object of class \code{"numeric"} ~~ }
\item{\code{w}:}{Object of class \code{"tskernel"} ~~ }
\item{\code{bwMet}:}{Object of class \code{"character"} ~~ }
}
}
\section{Methods}{
\describe{
\item{print}{\code{signature(x = "sSpec")}: ... }
\item{show}{\code{signature(object = "sSpec")}: ... }
}
}
\examples{
showClass("sSpec")
}
\keyword{classes}
|
97c361144c3524060531d48079de65090a7a53e6
|
8283ef54fe2d4e743bdb50e54f44cf8068df021d
|
/R/helpers.R
|
d80e7f69406c532247cdb4b5d7ada51cc9eed7fe
|
[] |
no_license
|
daroczig/PWA
|
181ec3a98cdf1a191cfce16659febdad6eb81590
|
234912efcc2d9aaa7ff1bd910713df60a023f6dd
|
refs/heads/master
| 2016-08-07T02:50:32.643363
| 2014-09-14T12:04:13
| 2014-09-14T12:04:13
| 24,019,354
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 264
|
r
|
helpers.R
|
#' Outlier detection function
#' @param x numeric vector
#' @param z standardized threshold
#' @return vector index of outliers
#' @export
#' @examples
#' out(runif(10), 0.9)
out <- function (x, z = 0.7)
which(abs(scale(x, scale = TRUE, center = TRUE)) >= z)
|
0b227b6f392d16188736a4a8846190e8d92d4164
|
b188cae574794bfc9b2cb74f8ef89912c92c7cfe
|
/label_examples/label_spots/label_checker.R
|
020c4f3b65c104a6691a8f900c4cfa6a408ed59d
|
[
"MIT"
] |
permissive
|
dinhnhobao/urops
|
e1dff94de828b420f7fd1df1b07b95de59b6cffe
|
cc25d02830f907040eaad741e426b7b961186bd2
|
refs/heads/master
| 2022-09-08T23:17:56.570128
| 2018-12-07T06:38:37
| 2018-12-07T06:38:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,263
|
r
|
label_checker.R
|
library(imager)
library(stringr)
# Script that, given a directory of pictures and the date entitling their
# labels .csv file, plots the pictures and their labels side-by-side.
# Before running this script, set the working directory as the location of this
# file.
pictures_directory <- "pictures_to_label/"
labels_directory <- "label_csvs/"
date <- "2018-07-24"
file_names <- list.files(pictures_directory, pattern="*.jpg")
labels <- read.csv(paste(labels_directory, date, ".csv", sep=""))
par(mfrow=c(1, 2))
for (index in 1:length(file_names)) {
image <- load.image(paste(pictures_directory, file_names[index], sep=""))
plot(image,
main=file_names[index])
plot(c(-1, 3),
c(-1, 3),
ann=FALSE,
bty="n",
type="n",
xaxt="n",
yaxt="n")
text(x=-1,
y=1,
labels$label[labels$date_id ==
str_sub(file_names[index],
1,
length(file_names[index]) - 6)],
cex = 1.6, col = "black")
text(x=0.75,
y=1,
paste(index, "/", length(file_names), sep=""),
cex = 1.6, col = "black")
readline("")
if (index == length(file_names)) {
par(mfrow=c(1, 1))
plot(load.image("white.png"))
}
}
|
21eccbb21d94643fff36915f2a227436c7760762
|
b43a223cbf97422a2a26e6004f55500713ae9df1
|
/server.R
|
732d5476173589fa8296af3980860bf71816b65a
|
[] |
no_license
|
eriklindquist/bfastspatial
|
1c8d44d89e2b3db7095c057a46e35764ac07ad7f
|
d5a8d875c80afadca168973cc1ef2a90ddd974ac
|
refs/heads/master
| 2020-03-18T07:02:56.039397
| 2018-05-09T11:30:36
| 2018-05-09T11:30:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,575
|
r
|
server.R
|
####################################################################################
####### BFAST
####### SEPAL shiny application
####### FAO Open Foris SEPAL project
####### remi.dannunzio@fao.org - yelena.finegold@fao.org
####################################################################################
####################################################################################
# FAO declines all responsibility for errors or deficiencies in the database or
# software or in the documentation accompanying it, for program maintenance and
# upgrading as well as for any # damage that may arise from them. FAO also declines
# any responsibility for updating the data and assumes no responsibility for errors
# and omissions in the data provided. Users are, however, kindly asked to report any
# errors or deficiencies in this product to FAO.
####################################################################################
####################################################################################
## Last update: 2018/01/18
## bfast / server
####################################################################################
####################################################################################
####### Start Server
shinyServer(function(input, output, session) {
####################################################################################
##################### Choose language option ###########################
####################################################################################
output$chosen_language <- renderPrint({
if (input$language == "English") {
source("www/scripts/text_english.R",
local = TRUE,
encoding = "UTF-8")
#print("en")
}
if (input$language == "Français") {
source("www/scripts/text_french.R",
local = TRUE,
encoding = "UTF-8")
#print("fr")
}
if (input$language == "Español") {
source("www/scripts/text_spanish.R",
local = TRUE,
encoding = "UTF-8")
#print("sp")
}
})
##################################################################################################################################
############### Stop session when browser is exited
session$onSessionEnded(stopApp)
##################################################################################################################################
############### Show progress bar while loading everything
progress <- shiny::Progress$new()
progress$set(message = "Loading data", value = 0)
####################################################################################
####### Step 0 : read the map file and store filepath ###########################
####################################################################################
##################################################################################################################################
############### Find volumes
osSystem <- Sys.info()["sysname"]
volumes <- list()
media <- list.files("/media", full.names = T)
names(media) = basename(media)
volumes <- c(media)
volumes <- c('Home' = Sys.getenv("HOME"),
volumes)
my_zip_tools <- Sys.getenv("R_ZIPCMD", "zip")
##################################################################################################################################
## Allow to download test data
output$dynUI_download_test <- renderPrint({
req(input$download_test_button)
dir.create(file.path("~", "bfast_data_test"),showWarnings = F)
withProgress(message = paste0('Downloading data in ', dirname("~/bfast_data_test/")),
value = 0,
{
system("wget -O ~/bfast_data_test/bfast_data_test.zip https://github.com/openforis/data_test/raw/master/bfast_data_test.zip")
system("unzip -o ~/bfast_data_test/bfast_data_test.zip -d ~/bfast_data_test/ ")
system("rm ~/bfast_data_test/bfast_data_test.zip")
})
list.files("~/bfast_data_test/")
})
##################################################################################################################################
############### Select input file (raster OR vector)
shinyDirChoose(
input,
'time_series_dir',
roots = volumes,
session = session,
restrictions = system.file(package = 'base')
)
##################################################################################################################################
############### Select Forest-Non Forest mask
shinyFileChoose(
input,
'mask_file',
filetype = "tif",
roots = volumes,
session = session,
restrictions = system.file(package = 'base')
)
################################# Data file path
data_dir <- reactive({
validate(need(input$time_series_dir, "Missing input: Please select time series folder"))
req(input$time_series_dir)
df <- parseDirPath(volumes, input$time_series_dir)
})
################################# Display tiles inside the DATA_DIR
output$outdirpath = renderPrint({
basename(list.dirs(data_dir(),recursive = F))
})
################################# Output directory path
mask_file_path <- reactive({
req(input$mask_file)
df <- parseFilePaths(volumes, input$mask_file)
file_path <- as.character(df[, "datapath"])
})
################################# Setup from the archives the Date Range
list_year <- reactive({
req(data_dir())
data_dir <- data_dir()
list <- list.files(data_dir,pattern = "_stack.tif",recursive = T)
unlist(lapply(list,function(x){unlist(strsplit(x,split = "_"))[length(unlist(strsplit(x,split = "_")))-1]}))
})
################################# Take the minimum as beginning Date
beg_year <- reactive({
req(list_year())
min(list_year())
})
################################# Take the maximum as ending Date
end_year <- reactive({
req(list_year())
max(list_year())
})
##################################################################################################################################
############### Option buttons --> KEEP IF ARCHIVE READING IS NOT OPTIMAL
# output$ui_option_h_beg <- renderUI({
# req(input$time_series_dir)
# selectInput(inputId = 'option_h_beg',
# label = "Historical year beginning",
# choices = 2000:2020,
# selected = as.numeric(beg_year())
# )
# })
#
# output$ui_option_m_end <- renderUI({
# req(input$time_series_dir)
# selectInput(inputId = 'option_m_end',
# label = "Monitoring year end",
# choices = as.numeric(input$option_h_beg):2020,
# selected = as.numeric(end_year())
# )
# })
################################# Take the average date for the beginning of monitoring period
output$ui_option_m_beg <- renderUI({
req(input$time_series_dir)
sliderInput(inputId = 'option_m_beg',
label = textOutput("text_option_date_break"),
min = as.numeric(beg_year()),
max = as.numeric(end_year()),
value = (as.numeric(beg_year()) + as.numeric(end_year()))/2
)
})
output$ui_option_order <- renderUI({
req(input$time_series_dir)
selectInput(inputId = 'option_order',
label = "Order parameter",
choices = 1:5,
selected = 3
)
})
output$ui_option_history <- renderUI({
req(input$time_series_dir)
selectInput(inputId = 'option_history',
label = "History parameter",
choices = c("ROC", "BP", "all",as.numeric(beg_year())),
selected = "ROC"
)
})
output$ui_option_type <- renderUI({
req(input$time_series_dir)
selectInput(inputId = 'option_type',
label = "Type parameter",
choices = c("OLS-CUSUM", "OLS-MOSUM", "RE", "ME","fluctuation"),
selected = "OLS-CUSUM"
)
})
output$ui_option_formula <- renderUI({
req(input$time_series_dir)
selectInput(inputId = 'option_formula',
label = "Elements of the formula",
choices = c("harmon","trend"),
multiple = TRUE,
selected = "harmon"
)
})
output$ui_option_sequential <- renderUI({
req(input$time_series_dir)
selectInput(inputId = "option_sequential",
label = "Computation mode",
choices = c("Overall"),#,"Sequential"),
selected= "Overall"
)
})
output$ui_option_useMask <- renderUI({
req(input$time_series_dir)
req(input$mask_file)
selectInput(inputId = "option_useMask",
label = "Use a Forest/Non-Forest mask ?",
choices = c("No Mask","FNF Mask"),#,"Sequential"),
selected= "No Mask"
)
})
output$ui_tiles <- renderUI({
req(input$time_series_dir)
selectInput(inputId = "option_tiles",
label = "Which tiles do you want to process ?",
choices = basename(list.dirs(data_dir(),recursive = F)),
selected= basename(list.dirs(data_dir(),recursive = F)),
multiple = TRUE
)
})
##################################################################################################################################
############### Parameters title as a reactive
parameters <- reactive({
req(input$time_series_dir)
data_dir <- paste0(data_dir(),"/")
print(data_dir)
historical_year_beg <- as.numeric(beg_year())
monitoring_year_end <- as.numeric(end_year())
monitoring_year_beg <- as.numeric(input$option_m_beg)
order <- as.numeric(input$option_order)
history <- as.character(input$option_history)
mode <- as.character(input$option_sequential)
type <- as.character(input$option_type)
mask <- as.character(input$option_useMask)
formula_elements <- unlist(input$option_formula)
type_num <- c("OC","OM","R","M","f")[which(c("OLS-CUSUM", "OLS-MOSUM", "RE", "ME","fluctuation")==type)]
mask_opt <- c("","_msk")[which(c("No Mask","FNF Mask")==mask)]
formula <- paste0("response ~ ",paste(formula_elements,sep = " " ,collapse = "+"))
title <- paste0("O_",order,"_H_",paste0(history,collapse = "-"),"_T_",type_num,"_F_",paste0(substr(formula_elements,1,1),collapse= ""),mask_opt)
})
##################################################################################################################################
############### Insert the start button
output$StartButton <- renderUI({
req(input$time_series_dir)
validate(need(input$option_tiles, "Missing input: Please select at least one tile to process"))
validate(need(input$option_formula, "Missing input: Please select at least one element in the formula"))
actionButton('bfastStartButton', textOutput('start_button'))
})
##################################################################################################################################
############### Run BFAST
bfast_res <- eventReactive(input$bfastStartButton,
{
req(input$time_series_dir)
req(input$bfastStartButton)
data_dir <- paste0(data_dir(),"/")
print(data_dir)
historical_year_beg <- as.numeric(beg_year())
monitoring_year_end <- as.numeric(end_year())
monitoring_year_beg <- as.numeric(input$option_m_beg)
order <- as.numeric(input$option_order)
history <- as.character(input$option_history)
mode <- as.character(input$option_sequential)
type <- as.character(input$option_type)
mask <- as.character(input$option_useMask)
formula_elements <- unlist(input$option_formula)
type_num <- c("OC","OM","R","M","f")[which(c("OLS-CUSUM", "OLS-MOSUM", "RE", "ME","fluctuation")==type)]
mask_opt <- c("","_msk")[which(c("No Mask","FNF Mask")==mask)]
formula <- paste0("response ~ ",paste(formula_elements,sep = " " ,collapse = "+"))
print(order)
print(history)
print(formula)
print(type)
title <- paste0("O_",order,"_H_",paste0(history,collapse = "-"),"_T_",type_num,"_F_",paste0(substr(formula_elements,1,1),collapse= ""),mask_opt)
print(title)
tiles <- input$option_tiles
for(the_dir in tiles){#list.dirs(data_dir, recursive=FALSE)){
withProgress(message = paste0('BFAST running for ',the_dir),
value = 0,
{
setProgress(value = .1)
source("www/scripts/bfast_run.R",echo=T,local=T)
})
}
#############################################################
### MERGE AS VRT
system(sprintf("gdalbuildvrt %s %s",
paste0(data_dir,"/bfast_",title,"_threshold.vrt"),
paste0(data_dir,"/*/results/bfast_",title,"/bfast_",title,"_threshold.tif")
))
print(paste0(data_dir,"/bfast_",title,"_threshold.vrt"))
raster(paste0(data_dir,"/bfast_",title,"_threshold.vrt"))
})
##################################################################################################################################
############### Processing time as reactive
process_time <- reactive({
req(bfast_res())
log_filename <- list.files(data_dir(),pattern="log",recursive = T)[1]
print(paste0(data_dir(),"/",log_filename))
readLines(paste0(data_dir(),"/",log_filename))
})
############### Display the results as map
output$display_res <- renderPlot({
req(bfast_res())
print('Check: Display the map')
plot(bfast_res(), axes = FALSE)
})
##################################################################################################################################
############### Display parameters
output$parameterSummary <- renderText({
req(input$time_series_dir)
print(paste0("Parameters are : ",parameters()))
})
##################################################################################################################################
############### Display time
output$message <- renderText({
req(bfast_res())
print("processing time")
process_time()
})
##################################################################################################################################
############### Turn off progress bar
progress$close()
################## Stop the shiny server
####################################################################################
})
|
c2454b882148e73ba81b9435398ab2d0fb9a22b1
|
338cfd3efe0cc943d2e6b58becf7432ced163ab2
|
/02Mastering Machine Learning with R/ch2linear_regression/i2snake_fit.R
|
b3d874bce249963fdee64834a96fb36ecdca41ad
|
[] |
no_license
|
greatabel/RStudy
|
e1b82574f1a2f1c3b00b12d21f2a50b65386b0db
|
47646c73a51ec9642ade8774c60f5b1b950e2521
|
refs/heads/master
| 2023-08-20T17:07:34.952572
| 2023-08-07T13:22:04
| 2023-08-07T13:22:04
| 112,172,144
| 6
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 245
|
r
|
i2snake_fit.R
|
library(alr3)
data(snake)
names(snake) <- c('content', 'yield')
attach(snake)
yield.fit <- lm(yield ~ content)
summary(yield.fit)
plot(content, yield)
abline(yield.fit, lwd=3, col='red')
par(mfrow = c(2,2))
plot(yield.fit)
qqPlot(yield.fit)
|
5aa71237cc1693fd62b55c60d9e52ec2ff94a905
|
d0abe38ea4cd9a88321437cd4d922d2affc91f46
|
/JDE_Shubert3_JUL_07_2020.R
|
77e0b3e2fd3e50d6b5b62a0b33c8859ef450dafb
|
[] |
no_license
|
ucfilho/Raianars_R_language_July_2020
|
33c50e82ebd1cef15cecaf9e49e86f7ac11cfb6b
|
155ea5e629bcf6c9ac09f556dd4d0a12bffdd9c8
|
refs/heads/master
| 2022-11-18T13:33:21.320916
| 2020-07-09T22:18:54
| 2020-07-09T22:18:54
| 277,663,167
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 658
|
r
|
JDE_Shubert3_JUL_07_2020.R
|
library('DEoptimR')
Shubert3= function(x)
{
Num=length(x)
fun=0
for (i in 1:Num)
{
for (j in 1:5) { fun = fun+ j * sin(((j + 1) * x[ i]) + j) }
}
return(fun)
}
# Global Minimum: 0 , domain=[-10,10]
dim=30
RUNS=50
ITE=2000
NPAR=100
Bounds=10
Y=0;X=0
for(i in 1:RUNS)
{
JDE_R=JDEoptim(rep(-Bounds, dim), rep(Bounds, dim), Shubert3 ,
tol = 1e-100,NP=100, trace = FALSE, maxiter =ITE)
Y[i]=JDE_R$value
}
MEAN=mean(Y)
STD=sd(Y)
MAX=max(Y)
MIN=min(Y)
cat('Shubert3 JDE DIM=',dim,'RUNS=',RUNS,'ITE=',ITE,'Bounds=',-Bounds,Bounds,'\n')
cat('MEAN=',MEAN,'\n')
cat('MAX',MAX,'\n')
cat('MIN=',MIN,'\n')
cat('STD',STD,'\n')
|
1e49cd2725c8e42b471b4c5be5be4dcf83c0db14
|
26c213b0b8a7720d4447660073503f661bcb7a46
|
/final project synthetic data.R
|
144c038baf5f251d1affb1d1e321439b0ce5b38b
|
[] |
no_license
|
brianpclare/SynetheticMedicareData
|
38ea45ff0f62452a80dd13375bdef14a18174328
|
4e4600724bc6fa6065d0b07434fad5515d7533a3
|
refs/heads/master
| 2020-03-09T21:53:08.599409
| 2018-04-20T20:57:20
| 2018-04-20T20:57:20
| 129,021,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,091
|
r
|
final project synthetic data.R
|
# Synthetic Medicare Claims Data - not included in Github repo, over file size limit
# Outpatient Claims - 154 MB file
# https://www.cms.gov/Research-Statistics-Data-and-Systems/Downloadable-Public-Use-Files/SynPUFs/DESample01.html
# Variable Guide
# https://www.cms.gov/Research-Statistics-Data-and-Systems/Downloadable-Public-Use-Files/SynPUFs/Downloads/SynPUF_Codebook.pdf
# page 7, page
library(tidyverse)
library(data.table)
suppressWarnings(
outpatient <- fread("outpatient claims.csv")
)
AD <- outpatient %>% filter(ICD9_DGNS_CD_1 == "3310") %>% select(DESYNPUF_ID) %>% unique()
AD_patient_claims <- outpatient %>% filter(DESYNPUF_ID %in% AD$DESYNPUF_ID)
AD_patient_codes <- AD_patient_claims %>% filter(ICD9_DGNS_CD_1 != "") %>% filter(ICD9_DGNS_CD_1 != "OTHER") %>%
group_by(ICD9_DGNS_CD_1) %>% summarize(count = n()) %>% arrange(desc(count))
AD_num_claims_by_patient <- AD_patient_claims %>% group_by(DESYNPUF_ID) %>% summarize(count = n())
summary(AD_num_claims_by_patient$count)
AD_claim_codes <- AD_patient_claims %>% select(Patient_ID = DESYNPUF_ID, Diagnosis = ICD9_DGNS_CD_1) %>%
filter(Diagnosis != "") %>% filter(Diagnosis != "OTHER")
non_AD <- outpatient %>% select(DESYNPUF_ID) %>% filter(!(DESYNPUF_ID %in% AD$DESYNPUF_ID)) %>% unique()
non_AD_claims <- outpatient %>% filter(!(DESYNPUF_ID %in% AD$DESYNPUF_ID))
## Let's take a sample of 133 AD patients (all of them), 133 other patients
set.seed(740)
model_AD_IDs <- sample_n(AD, 133)
model_non_IDs <- sample_n(non_AD, 133)
model_AD <- AD_patient_claims %>% filter(DESYNPUF_ID %in% model_AD_IDs$DESYNPUF_ID)
model_non <- non_AD_claims %>% filter(DESYNPUF_ID %in% model_non_IDs$DESYNPUF_ID)
#Data Processing
#Step 1 Getting "Patient ID", "Diagnosis", and "Date"
model_AD_codes <- model_AD %>% select(Patient_ID = DESYNPUF_ID, Diagnosis = ICD9_DGNS_CD_1, Date = CLM_FROM_DT) %>%
filter(Diagnosis != "") %>% filter(Diagnosis != "OTHER")
model_non_codes <- model_non %>% select(Patient_ID = DESYNPUF_ID, Diagnosis = ICD9_DGNS_CD_1, Date = CLM_FROM_DT) %>%
filter(Diagnosis != "") %>% filter(Diagnosis != "OTHER")
#Step 2
#Split the AD sample data by patient
by_patient<-group_by(model_AD_codes,Patient_ID)
split<-split(by_patient, by_patient$Patient_ID)
#A function filtering out all the claims happened before the AD claim
myfunction <- function(PatientX){
result <- subset(PatientX, Date<Date[PatientX$Diagnosis == "3310"])
return(result)
}
#Repeat the process on each patient using "lapply"
AllPatient<-lapply(split, myfunction)
model_AD_patient_claims <- bind_rows(AllPatient)
model_AD_patient_claims$AD <- 1
model_non_codes$AD <- 0
# Step 3, for all patients
total_model <- bind_rows(model_AD_patient_claims, model_non_codes) %>% filter(Diagnosis != "340")
# just stopping here to clean up the environment, all we really need now is total_model and I'll keep the
# tables that identify the patients by ID
rm(list = c("outpatient", "non_AD_claims", "non_AD", "model_non_codes", "model_non",
"model_AD_patient_claims", "model_AD", "by_patient", "split", "AllPatient",
"AD", "AD_claim_codes", "AD_num_claims_by_patient", "AD_patient_codes", "model_AD_codes",
"AD_patient_claims"))
## Important parameter
claim_limit = 100
## Important parameter
split <- total_model %>% split(.$Patient_ID)
for(i in 1:length(split)){
split[[i]] <- split[[i]] %>% arrange(desc(Date))
num <- length(split[[i]]$Date)
if(num > claim_limit){
split[[i]] <- tail(split[[i]], claim_limit)
}
}
total_model <- bind_rows(split)
rm("split")
# Step 4
distinct_diagnoses <- total_model %>% ungroup %>% select(Diagnosis) %>% unique()
## Important parameter
num_diagnoses <- length(distinct_diagnoses$Diagnosis)
## Important parameter
distinct_diagnoses$index <- 1:num_diagnoses
# So there are num_diagnoses unique codes, which will be indexed from 1 to num_diagnoses
model_AD_IDs <- total_model %>% filter(AD == 1) %>% select(Patient_ID) %>% unique()
model_non_IDs <- total_model %>% filter(AD == 0) %>% select(Patient_ID) %>% unique()
for(i in 1:length(total_model$Diagnosis)){
x <- total_model$Diagnosis[i]
y <- distinct_diagnoses$index[distinct_diagnoses$Diagnosis == x]
total_model$Diagnosis[i] <- y
}
# total_model <- total_model %>% select(-Date)
total_model <- total_model %>% arrange(Patient_ID, Date)
y_partial <- total_model %>% summarize(n = max(AD))
y_train <- y_partial %>% ungroup() %>% select(-Patient_ID) %>%
unlist() %>% unname()
claim_counts <- total_model %>% select(Patient_ID) %>% summarize(count = n())
## Important parameter (in real problems, will = claim_limit)
max_claims <- max(claim_counts$count)
## Important parameter
split_model <- total_model %>% split(.$Patient_ID) %>% unname()
for(i in 1:length(split_model)){
split_model[[i]] <- split_model[[i]] %>% ungroup() %>% select(Diagnosis) %>%
t() %>% unname()
while(length(split_model[[i]]) < max_claims){
split_model[[i]] <- append(split_model[[i]], 0, after = 0)
}
}
input_train <- do.call(rbind, split_model)
|
027d66ff1261ccd24a70dbf71876cd82840230c9
|
7c5cf1fa0ca132146f7325f1af17b5e515d1adb1
|
/man/dot-addSurvivalPredictions.Rd
|
3518a109958170708ba1c89f941250a8bc61a39b
|
[] |
no_license
|
jspaezp/MSstats
|
00f2deebdfd502f7e07e4d3f00c5518212ceb03a
|
76af4698ca93c904bb8eae694dcf794f56e472ed
|
refs/heads/master
| 2023-04-29T19:11:17.476073
| 2023-03-27T09:49:46
| 2023-03-27T09:49:46
| 93,803,624
| 0
| 0
| null | 2018-05-29T02:11:38
| 2017-06-09T00:43:26
|
R
|
UTF-8
|
R
| false
| true
| 409
|
rd
|
dot-addSurvivalPredictions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_imputation.R
\name{.addSurvivalPredictions}
\alias{.addSurvivalPredictions}
\title{Get predicted values from a survival model}
\usage{
.addSurvivalPredictions(input)
}
\arguments{
\item{input}{data.table}
}
\value{
numeric vector of predictions
}
\description{
Get predicted values from a survival model
}
\keyword{internal}
|
68381950f74624cd323383c07f3991d4ba5b0538
|
5e88cabd66814e2edc394548f6c7d76c6511b41e
|
/man/ConfRatio.Rd
|
67d8c432d205c25feebf6386c8b06403dff81dab
|
[
"MIT"
] |
permissive
|
EarthSystemDiagnostics/paleospec
|
ba7125c62946eba4302e1aaf20e1f7170262809d
|
bf2086b9d4adb5c657af3863d15745a730f9b146
|
refs/heads/master
| 2023-09-01T07:23:35.955702
| 2023-06-18T15:18:16
| 2023-06-18T15:18:16
| 223,199,924
| 0
| 0
|
NOASSERTION
| 2023-06-18T15:18:18
| 2019-11-21T15:02:33
|
R
|
UTF-8
|
R
| false
| true
| 568
|
rd
|
ConfRatio.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ConfRatio.R
\name{ConfRatio}
\alias{ConfRatio}
\title{Confidence Interval of ratios}
\usage{
ConfRatio(varratio, df.1, df.2, pval = 0.1)
}
\arguments{
\item{varratio}{a variance ratio}
\item{df.1}{degree of freedom of denominator}
\item{df.2}{degree of freedom of numerator}
\item{pval}{desired significance level, defaults to 0.1}
}
\value{
lower and upper confidence intervals
}
\description{
Confidence Interval of ratios based on a ChiSquare Distribution
}
\author{
Thomas Laepple
}
|
0cef39e44e771a05d52c9fd5743d8db8055a934a
|
053f4cf013243c844b2c7728438d4d6c314149dc
|
/R/placeholder.r
|
50a85853df8445d3342152b075d4b29ce5e398df
|
[] |
no_license
|
LeverageData/RTutor
|
e4269dbc509920449a1c549305ae920310c1bc2a
|
81a67b29f02d66a2ac44624383b2f052a7692e09
|
refs/heads/master
| 2023-01-13T12:26:13.610354
| 2020-11-11T17:07:14
| 2020-11-11T17:07:14
| 257,319,587
| 1
| 0
| null | 2020-04-20T18:04:05
| 2020-04-20T15:12:59
| null |
UTF-8
|
R
| false
| false
| 237
|
r
|
placeholder.r
|
get.placeholder = function(ps=get.ps()) {
ph = ps$rps$placeholder
if (is.null(ph)) return("___")
ph
}
has.call.placeholder = function(call) {
if (!is.character(call)) {
call = deparse1(call)
}
has.substr(call,".PH_._")
}
|
4c276dfbb42698e2695587292a2f04730876c8c9
|
0bbaef2c499561083f1239b2ea5c95245b111d60
|
/man/workspace_sync.Rd
|
2f8b0b85f056ff3b0aee8d5d7d8d9676c871a164
|
[] |
no_license
|
vh-d/languageserver
|
81f8313cd5836dd977c33d1df3480aea39f97ed1
|
70279af46677906ce1a191e1aa35b752f5af75a0
|
refs/heads/master
| 2020-03-07T10:13:15.696417
| 2018-03-21T04:32:32
| 2018-03-21T04:32:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 348
|
rd
|
workspace_sync.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workspace.R
\name{workspace_sync}
\alias{workspace_sync}
\title{Determine workspace information for a given file}
\usage{
workspace_sync(uri, document)
}
\arguments{
\item{uri}{the file path}
\item{document}{the content of the file}
}
\description{
internal use only
}
|
45a3387e38e61f75ee1285e0e2a5dfe3dbcbb6a7
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/cenROC/man/RocFun.Rd
|
6dbc38ca0f978cba814bb60fcdaeec564cacb333
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,336
|
rd
|
RocFun.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzzz.R
\name{RocFun}
\alias{RocFun}
\title{ROC estimation function}
\usage{
RocFun(U, D, M, bw = "NR", method, ktype)
}
\arguments{
\item{U}{The vector of grid points where the ROC curve is estimated.}
\item{D}{The event indicator.}
\item{M}{The numeric vector of marker values for which the time-dependent ROC curves is computed.}
\item{bw}{The bandwidth parameter for smoothing the ROC function. The possible options are \code{NR} normal reference method; \code{PI} plug-in method and \code{CV} cross-validation method. The default is the \code{NR} normal reference method.}
\item{method}{is the method of ROC curve estimation. The possible options are \code{emp} emperical metod; \code{untra} smooth without boundary correction and \code{tra} is smooth ROC curve estimation with boundary correction.}
\item{ktype}{A character string giving the type kernel to be used: "\code{normal}", "\code{epanechnikov}", "\code{biweight}", or "\code{triweight}".}
}
\description{
ROC estimation function
}
\references{
Beyene, K. M. and El Ghouch A. (2019). Smoothed time-dependent ROC curves for right-censored survival data. <\url{https://dial.uclouvain.be/pr/boreal/object/boreal:219643}>.
}
\author{
Beyene K. Mehari and El Ghouch Anouar
}
\keyword{internal}
|
e9f0ddd56ae71b21431c3c72b3f8c66de9bbb74a
|
bf2f36573a114860dcedd02e16acab2391ef9c07
|
/development.R
|
4d0326b5c4164caee40b45b035eabf1cefcc60a7
|
[] |
no_license
|
romangerlach/rBExIS
|
f8f3b9c195ecb65fa0fc8f31e1a289d7e02c8b75
|
389bd1df142aa9ffa6486fcadef7648990b25204
|
refs/heads/master
| 2021-01-18T02:56:42.806188
| 2016-06-10T06:52:55
| 2016-06-10T06:52:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 550
|
r
|
development.R
|
# This is a development helper file. It hosts calls to help with the
# documentation and package checking and to try out new functions.
# load the devtools package
require("devtools")
# load/reload the bexis package functions
load_all("rBExIS")
# check package for consistency
check("rBExIS")
# Here start functions implemented by the package
require(rBExIS)
## options command to query and set rbexis options
bexis.options()
bexis.options("base_url" = "http://bexis.inf-bb.uni-jena.de:2201")
# get data from bexis
bexis.get.dataset_by(id = 1)
|
1faa9973b05608ad0bf48557027d8202a182b28a
|
40b05699e3dea47eedd27bb4d03b065eaffec42a
|
/cachematrix.R
|
49b2669a641db79fa17112245bb95136f34f58dd
|
[] |
no_license
|
andylytics/ProgrammingAssignment2
|
a6f81da9498070e263504bf79df3868014597b0b
|
bc0cb772360d430fb6a18c3d81779bd059796d09
|
refs/heads/master
| 2021-01-12T21:45:07.417180
| 2014-04-26T02:27:49
| 2014-04-26T02:27:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,185
|
r
|
cachematrix.R
|
### Assignment 2, Coursera R Programming, 2014-04-25
## These two functions are intended to compute and cache the inverse of a supplied square matrix.
## Caching can improve the speed of a script or program by preventing the need to perform a
## computation redundantly
## the makeCacheMatrix function creates a list of functions that can cache the inverse of the
## matrix supplied in the parameter
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## the cacheSolve function computes the inverse of the supplied matrix. If the function will
## return inverse from cache if the computation was previously performed and the matrix is
## unchanged
cacheSolve <- function(x, ...) {
m <- x$getsolve()
# check if inverse of matrix is already in cache
if(!is.null(m)) {
message("getting cached data...")
return(m)
}
# if not cached compute inverse and cache
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
058cb7ad068049725fc2cc89b7a327ff4420b0c1
|
a30f11c8b9ebc0cedd06c23237ced83ef3abc32c
|
/Scripts/00_Functions.R
|
021b83b827651375c2b1ca68f12b888ec53b0375
|
[
"MIT"
] |
permissive
|
avaimar/social_cohesion
|
00770a182434fcd08d31d17e5c923bd5f0fe1181
|
7296a0cfa9510008232a0744b89ff3b8e6e2c765
|
refs/heads/main
| 2023-05-07T04:06:58.219528
| 2021-06-05T01:09:50
| 2021-06-05T01:09:50
| 366,936,146
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,316
|
r
|
00_Functions.R
|
generate_X_Y_W_C <- function(outcome, covariates) {
# _______________________________________________________
# Generates the X, Y, W and cluster components for a given outcome
# and a set of covariates by eliminating missing observations
# from the SC_Data dataset. Assumes the treatment column is
# named 'treatment', and cluster column is 'b_schoolid.
# Inputs:
# - outcome: (str) outcome column name in SC_Data
# - covariates: (vector of str) covariate column names
# Returns:
# A list with X, Y, W, C components each including a matrix
# or data.table
# _______________________________________________________
# Filter missing entries
selected_cols <- c(outcome, covariates, 'treatment', 'b_schoolid')
data <- SC_Data[, selected_cols, with=FALSE]
data <- data[complete.cases(data)]
# Setup formula
fmla <- formula(paste0(outcome, '~', paste(covariates, collapse='+')))
X <- model.matrix(fmla, data)
W <- data[, .(treatment)]
Y <- data[, outcome, with=FALSE]
C <- data[, .(b_schoolid)]
# Format Y, W, C as numeric vectors
W <- as.numeric(W[[1]])
Y <- as.numeric(Y[[1]])
C <- as.numeric(C[[1]])
list(X = X,Y = Y, W = W, C = C)
}
get_AIPW_scores <- function(var_list, cf) {
# Get forest predictions.
m.hat <- cf$Y.hat
e.hat <- cf$W.hat
tau.hat <- cf$predictions
# Predicting mu.hat(X[i], 1) and mu.hat(X[i], 0) for obs in held-out sample
# Note: to understand this, read equations 6-8 in this vignette
# https://grf-labs.github.io/grf/articles/muhats.html
mu.hat.0 <- m.hat - e.hat * tau.hat # E[Y|X,W=0] = E[Y|X] - e(X)*tau(X)
mu.hat.1 <- m.hat + (1 - e.hat) * tau.hat # E[Y|X,W=1] = E[Y|X] + (1 - e(X))*tau(X)
# Compute AIPW scores
aipw.scores <- tau.hat + var_list$W / e.hat * (var_list$Y - mu.hat.1) -
(1 - var_list$W) / (1 - e.hat) * (var_list$Y - mu.hat.0)
aipw.scores
}
partial_dependence_single <- function(selected.covariate, covariates, type, X,
causal.forest, grid_size=0){
# Get data and define other covariates
data <- as.data.frame(X)
other.covariates <- covariates[which(covariates != selected.covariate)]
# Define grid
if (type == 'binary') {
grid.size <- 2
covariate.grid <- c(0, 1)
} else {
grid.size <- grid_size
covariate.grid <- seq(min(data[,selected.covariate]),
max(data[,selected.covariate]), length.out=grid.size)
}
# Take median of other covariates
medians <- apply(data[, other.covariates, F], 2, median)
# Construct a dataset
data.grid <- data.frame(sapply(medians, function(x) rep(x, grid.size)), covariate.grid)
colnames(data.grid) <- c(other.covariates, selected.covariate)
# Expand the data
fmla <- formula(paste0('~ ', paste(covariates, collapse = '+')))
X.grid <- model.matrix(fmla, data.grid)
# Point predictions of the CATE and standard errors
forest.pred <- predict(causal.forest, newdata = X.grid, estimate.variance=TRUE)
tau.hat <- forest.pred$predictions
tau.hat.se <- sqrt(forest.pred$variance.estimates)
# Plot predictions for each group and 95% confidence intervals around them.
data.pred <- transform(data.grid, tau.hat=tau.hat,
ci.low = tau.hat - 2*tau.hat.se,
ci.high = tau.hat + 2*tau.hat.se)
ggplot(data.pred) +
geom_line(aes_string(x=selected.covariate, y="tau.hat", group = 1), color="black") +
geom_errorbar(aes_string(x=selected.covariate, ymin="ci.low",
ymax="ci.high", width=.2), color="blue") +
ylab("") +
ggtitle(paste0("Predicted treatment effect varying '",
selected.covariate, "' (other variables fixed at median)")) +
scale_x_continuous(selected.covariate, breaks=covariate.grid,
labels=signif(covariate.grid, 2)) +
theme_minimal() +
theme(plot.title = element_text(size = 11, face = "bold"))
}
school_level_heterogeneity <- function(var_list, covariates, tau.hat, cf){
school.mat <-
model.matrix(~ b_schoolid + 0,
data = data.frame(var_list$X, b_schoolid = factor(var_list$C)))
school.size <- colSums(school.mat)
school.X <- (t(school.mat) %*% as.matrix(var_list$X[, covariates])) / school.size
school.X <- data.frame(school.X)
colnames(school.X) <- covariates
# Compute doubly robust treatment estimates
dr.score = tau.hat + var_list$W / cf$W.hat *
(var_list$Y - cf$Y.hat - (1 - cf$W.hat) * tau.hat) -
(1 - var_list$W) / (1 - cf$W.hat) *
(var_list$Y - cf$Y.hat + cf$W.hat * tau.hat)
score <- t(school.mat) %*% dr.score / school.size
# Regression forest analysis
school.forest <- regression_forest(school.X, score)
school.pred <- predict(school.forest)$predictions
print(test_calibration(school.forest))
# OLS
school.DF <- data.frame(school.X, school.score=score)
print(coeftest(lm(school.score ~ ., data = school.DF), vcov = vcovHC))
}
generate_X_Y_W_C_df <- function(outcome, covariates, df, treatment) {
# _______________________________________________________
# Generates the X, Y, W and cluster components for a given outcome
# and a set of covariates by eliminating missing observations
# from the SC_Data dataset. Assumes the treatment column is
# named 'treatment', and cluster column is 'b_schoolid.
# Inputs:
# - outcome: (str) outcome column name in SC_Data
# - covariates: (vector of str) covariate column names
# Returns:
# A list with X, Y, W, C components each including a matrix
# or data.table
# _______________________________________________________
# Filter missing entries
SC_table <- as.data.table(df)
selected_cols <- c(outcome, covariates, 'treatment')
data <- SC_table[, selected_cols, with=FALSE]
data <- na.omit(data)
write.csv(data,"unfactored.csv")
data$b_schoolid = factor(data$b_schoolid)
data$bstrata = factor(data$bstrata)
# Setup formula
fmla <- formula(paste0(outcome, '~', paste(covariates, collapse='+')))
X <- model.matrix(fmla, data)
W <- data[, 'treatment']
#Y <- data[, outcome]
Y <- data[, outcome, with=FALSE]
C <- data[, 'b_schoolid']
# Format Y, W, C as numeric vectors
W <- as.numeric(W[[1]])
Y <- as.numeric(Y[[1]])
C <- as.numeric(C[[1]])
list(X = X,Y = Y, W = W, C = C)
}
run_AIPW <- function(outcome,income,covariates,treatment,df) {
# _______________________________________________________
# Runs AIPW based on grf on a dataframe.
# Inputs:
# - outcome: (str) outcome column name in SC_Data
# - income: (str) baseline var name in SC_Data
# - covariates: (vector of str) covariate column names
# - treatment (str) treatment var name
# - df: dataframe
# Returns:
# AIPW estimate and std.err
# _______________________________________________________
covariates <- c(covariates,income)
list_data <- generate_X_Y_W_C_df(outcome,covariates,df,treatment)
forest <- causal_forest(
X=list_data$X,
W=list_data$W,
Y=list_data$Y,
clusters = list_data$C,
W.hat=.5, # In randomized settings, set W.hat to the (known) probability of assignment
num.trees = 100)
forest.ate <- average_treatment_effect(forest,target.sample="overlap")
forest.ate
}
|
a3cb1effe726553c9f6ea50e853e09449fe03ef1
|
6f447146bd1a7ef9a17cb4fbcbd89682c64a7733
|
/man/cell2nb.Rd
|
bdf23e88c66dfde1d03c748ecc300abf0dcb3cc4
|
[] |
no_license
|
zhanglibin123kib/spdep
|
cf28eb2fe0e5f1e3f22a0e936cef61d7ce923a95
|
bc16d2c867224ad695638eeb67c964fb39cc9448
|
refs/heads/master
| 2023-03-18T07:56:39.431747
| 2021-03-05T09:09:26
| 2021-03-05T09:09:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,405
|
rd
|
cell2nb.Rd
|
% Copyright 2001 by Roger S. Bivand
\name{cell2nb}
\alias{cell2nb}
\alias{mrc2vi}
\alias{rookcell}
\alias{queencell}
\alias{vi2mrc}
\title{Generate neighbours list for grid cells}
\description{
The function generates a list of neighbours for a grid of cells. Helper
functions are used to convert to and from the vector indices for row and
column grid positions, and rook (shared edge) or queen (shared edge or
vertex) neighbour definitions are applied by type. If torus is TRUE, the
grid is mapped onto a torus, removing edge effects.
}
\usage{
cell2nb(nrow, ncol, type="rook", torus=FALSE, legacy=FALSE)
mrc2vi(rowcol, nrow, ncol)
rookcell(rowcol, nrow, ncol, torus=FALSE, rmin=1, cmin=1)
queencell(rowcol, nrow, ncol, torus=FALSE, rmin=1, cmin=1)
vi2mrc(i, nrow, ncol)
}
\arguments{
\item{nrow}{number of rows in the grid}
\item{ncol}{number of columns in the grid}
\item{type}{rook or queen}
\item{torus}{map grid onto torus}
\item{legacy}{default FALSE, nrow/ncol reversed, if TRUE wrong col/row directions (see \url{https://github.com/r-spatial/spdep/issues/20})}
\item{rowcol}{matrix with two columns of row, column indices}
\item{i}{vector of vector indices corresponding to rowcol}
\item{rmin}{lowest row index}
\item{cmin}{lowset column index}
}
\value{
The function returns an object of class \code{nb} with a list of integer vectors containing neighbour region number ids. See \code{\link{card}} for details of \dQuote{nb} objects.
}
\author{Roger Bivand \email{Roger.Bivand@nhh.no}}
\seealso{\code{\link{summary.nb}}, \code{\link{card}}}
\examples{
nb7rt <- cell2nb(7, 7)
summary(nb7rt)
xyc <- attr(nb7rt, "region.id")
xy <- matrix(as.integer(unlist(strsplit(xyc, ":"))), ncol=2, byrow=TRUE)
plot(nb7rt, xy)
nb7rt <- cell2nb(7, 7, torus=TRUE)
summary(nb7rt)
# https://github.com/r-spatial/spdep/issues/20
GT <- GridTopology(c(1, 1), c(1,1), c(10, 50))
SPix <- as(SpatialGrid(GT), "SpatialPixels")
nb_rook_cont <- poly2nb(as(SPix, "SpatialPolygons"), queen=FALSE)
nb_rook_dist <- dnearneigh(coordinates(SPix), 0, 1.01)
all.equal(nb_rook_cont, nb_rook_dist, check.attributes=FALSE)
## [1] TRUE
t.nb <- cell2nb(nrow=50, ncol=10, type='rook', legacy=TRUE)
isTRUE(all.equal(nb_rook_cont, t.nb, check.attributes=FALSE))
## [1] FALSE
t.nb <- cell2nb(nrow=50, ncol=10, type='rook')
isTRUE(all.equal(nb_rook_cont, t.nb, check.attributes=FALSE))
## [1] TRUE
}
\keyword{spatial}
|
a47fb5e2d2874b1538453ca6d3b842dc2a59c848
|
0a7ae4e2439d60a6e0f0cc3cb5cdb94225e37211
|
/R/plot.toswp.R
|
0a5484b5a59caa2b90e8f19210d5b2dc2b23f92f
|
[] |
no_license
|
cran/BootWPTOS
|
2a348a545eddda918cf4e3d69683a20dc8afbf2e
|
345d143c842b942dcaffcfaf4edf4fdbaeeb40c6
|
refs/heads/master
| 2022-06-12T06:16:20.567081
| 2022-05-20T11:40:02
| 2022-05-20T11:40:02
| 62,325,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,867
|
r
|
plot.toswp.R
|
plot.toswp <-
function (x, sub = NULL, xlab = "Time", arrow.length = 0.05,
verbose = FALSE, ...)
{
object <- x
x <- x$x
if (is.null(sub))
sub <- paste("Packet:", paste("(", object$level, ",", object$index,")", sep=""), "Using Bonferonni:", object$nreject,
" rejected.")
#
# Plot the original time series in gray
#
ts.plot(x, xlab = xlab, sub = sub, col = "gray80", ...)
st <- summary.toswp(object, quiet=TRUE)
if (is.null(st))
return(NULL)
nreject <- st$nreject
st <- st$rejlist
stHlevs <- NULL
for (i in 1:length(st)) {
stHlevs <- c(stHlevs, st[[i]][1])
}
lyn <- min(stHlevs)
lyx <- max(stHlevs)
nHlevs <- length(lyn:lyx)
ry <- range(x)
mny <- ry[1]
mxy <- ry[2]
mainy <- seq(from = mny, to = mxy, length = nHlevs + 1)
littley <- seq(from = 0, to = (mainy[2] - mainy[1]), length = lyx -
lyn + 2)
if (verbose == TRUE) {
cat("nHlevs: ", nHlevs, "\n")
cat("mny, mxy: ", mny, mxy, "\n")
cat("mainy: ")
print(mainy)
}
abline(h = mainy[1:(length(mainy) - 1)], lty = 2)
axis(4, at = mainy[1:(length(mainy) - 1)], labels = lyn:lyx)
J <- IsPowerOfTwo(length(x))
for (i in 1:length(st)) {
stH <- st[[i]][1]
ix <- st[[i]][c(-1)]
for (j in 1:length(ix)) {
xl <- 2^(J - stH) * (ix[j] - 1)
xr <- 2^(J - stH) * (ix[j])
yy <- mainy[stH - min(stHlevs) + 1]
#+ littley[stH - lyn + 1]
arrows(x0 = xl, x1 = xr, y0 = yy, y1 = yy, code = 3,
col = 2, length = arrow.length)
if (verbose == TRUE) {
cat("stH: ", stH, "\n")
cat("[xl, xt] ", xl, xr, mainy[stH - min(stHlevs) +
1], "\n")
scan()
}
}
}
}
|
d08fe904c5baf7c14598d8dae861cd71ad013f53
|
6629f51c9381de154ff736fe296781abe18ec0c2
|
/shiny/HCT116graph/global.R
|
137b55ba14a630b7c4dbebdbaf6beb82948f390d
|
[] |
no_license
|
leeju-umich/Park_et_al_Cell_Reports_2020
|
f10a7e897bacda2644a75185751a51980d738f06
|
4c4585c9bd3382ad9074fa24b893ae10171d7e0e
|
refs/heads/master
| 2023-01-01T14:25:34.620153
| 2020-10-26T17:04:44
| 2020-10-26T17:04:44
| 281,723,115
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,256
|
r
|
global.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(Seurat)
library(stringr)
library(dplyr)
library (gtools)
library(tidyr)
addGene <- function(gene) {
if ( gene %in% rownames(obj@assays$RNA@data) ) {
meta[,gene] <<- obj@assays$RNA@data[gene,]
return(TRUE)
}
else {
showNotification(paste0("Gene ",gene," cannot be found in normalized expressions"))
return(FALSE)
}
}
#setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
obj <- readRDS("../../rds/hct.mt_04142020.rds")
#meta <- obj@meta.data[,c('nCount_RNA','nFeature_RNA','percent.mt','group','dose')]
meta <- obj@meta.data[,c('group','dose')]
addGene('CCNE2')
addGene('CDKN1A')
cols.all <- colnames(meta)
cols.default <- cols.all
cols.continuous <- c()
cols.discrete <- c()
x.selected <- 'UMAP1'
y.selected <- 'UMAP2'
color.selected <- 'CCNE2'
for(cn in colnames(meta)) {
if ( is.numeric(meta[,cn]) ) {
cols.continuous <- append(cols.continuous,cn)
}
else {
cols.discrete <- append(cols.discrete,cn)
}
}
|
d3804510acc3cc0631a512bef7f53c47ad49b110
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/kwb.hantush/examples/hantushDistancesBaseProps.Rd.R
|
c0fdc5700e7bda5ddda4e3c97406ba7f28bae3a8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,008
|
r
|
hantushDistancesBaseProps.Rd.R
|
library(kwb.hantush)
### Name: hantushDistancesBaseProps
### Title: Hantush distances & base properties: allows input of vector of
### x,y coordinates and also a vector for one of the base properties
### Aliases: hantushDistancesBaseProps
### ** Examples
baseProps <- baseProperties( time = 2^(0:6),
infiltrationRate = 1,
basinWidth = 10,
basinLength = 50,
horizConductivity = 10,
iniHead = 10,
specificYield = 0.2,
numberTimeSteps = 15)
res <- hantushDistancesBaseProps(baseProps = baseProps)
cols <- length(unique(res$dat[[res$changedBaseProp.Name]]))
mainTxt <- sprintf("Changed baseProperty: %s", res$changedBaseProp.Name)
xyplot(WLincrease ~ x,
groups=res$dat[[res$changedBaseProp.Name]],
data=res$dat,
type="b",
auto.key=list(columns=cols),
main=mainTxt)
|
088f9a84e8d2bd96cea55e526ebcab5be745189e
|
9e29face83aae3213bbc6a156caab03b4fd2bf07
|
/R/plot.modgam.R
|
f3ed553b47e7a0f124a14bfa4cb433592912b5bc
|
[] |
no_license
|
cran/MapGAM
|
1a17f31700f02462c95ce14e384aff40695851a5
|
4d4cb56e3ec5abeeac7831503d7170fb9d0f398b
|
refs/heads/master
| 2023-07-22T15:30:56.523947
| 2023-07-15T11:00:02
| 2023-07-15T12:49:47
| 17,680,825
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,309
|
r
|
plot.modgam.R
|
#***********************************************************************************
#
# Maps Predicted Values and Clusters for modgam Objects
# Copyright (C) 2016, The University of California, Irvine
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library??? if not, <http://www.gnu.org/licenses/>.
#
#*******************************************************************************
plot.modgam <- function(x, map = NULL, exp = FALSE, add = FALSE, intervals=TRUE, mapmin = NULL,
mapmax = NULL, col.seq = diverge_hsv(201), anchor=FALSE, border.gray = 0.3,
contours=c("none","response","permrank","interval"),
contours.drawlabels=FALSE, contours.lty=1, contours.lwd=1, contours.levels,
contours.labcex=0.7, arrow=TRUE, axes=FALSE, ptsize=0.9, alpha=0.05,
mai, legend.name = "predicted values",legend.cex=1,...)
{
modgamobj = x
leglab = legend.name
if(missing(legend.name)){
if(!is.null(modgamobj$family)){
if(modgamobj$family[1]=="survival") leglab = if(exp) "hazard ratio" else "log hazard ratio"
if(modgamobj$family[1]=="binomial"&modgamobj$family[2]=="logit") leglab = if(exp) "odds ratio" else "log odds ratio"
if(modgamobj$family[1]=="poisson"&modgamobj$family[2]=="log") leglab = if(exp) "risk ratio" else "log risk ratio"
}
}
contours = contours[1]
## contour settings
if(contours!="none"){
if (contours=="permrank" && is.null(modgamobj$pointwise.permt)) {
warning("permrank contours omitted because permute=0 or pointwise=FALSE in modgam")
contours = "none"
}
if (contours=="interval" && any(c(is.null(modgamobj$conf.low),is.null(modgamobj$conf.high)))){
warning("interval contours omitted because no conf.low or conf.high in modgam")
contours = "none"
}
if(!is.element(contours,c("none","response","permrank","interval"))){
warning("contours omitted because contours type not recognized")
contours = "none"
}
if(contours == "response"){
contours = "fit"
if(missing(contours.drawlabels)) contours.drawlabels = TRUE
}
if(contours == "permrank"){
contours = "pointwise.permt"
if(missing(contours.levels)) contours.levels = c(alpha/2, 1-alpha/2)
if(missing(contours.lwd)) contours.lwd = 2
if(missing(contours.lty)) contours.lty = 1
}
if(contours == "interval"){
if(missing(contours.levels)) contours.levels = c(-1,1)
if(missing(contours.lwd)) contours.lwd = 2
if(missing(contours.lty)) contours.lty = 1
}
}
legend.add.line = if(exp) 1 else 0
if(intervals & all(c(!is.null(modgamobj$conf.low),!is.null(modgamobj$conf.high)))){
## Plot the confidence bands as well as predictions by default
if(is.null(mapmin))
mapmin=min(if(exp)modgamobj$exp.conf.low else modgamobj$conf.low,rm.na=TRUE)
if(is.null(mapmax))
mapmax=max(if(exp)modgamobj$exp.conf.high else modgamobj$conf.high,rm.na=TRUE)
mmai = if(missing(mai)) c(0,0,0.3,0) else mai
legend.cex = legend.cex*1.4
op.mfrow = par()$mfrow
tempobj1 = modgamobj
tempobj1$fit = modgamobj$conf.low; tempobj1$exp.fit = modgamobj$exp.conf.low
tempobj2 = modgamobj
tempobj2$fit = modgamobj$conf.high; tempobj2$exp.fit = modgamobj$exp.conf.high
par(mfrow = c(1,3))
colormap(tempobj1, map, exp, add, mapmin, mapmax, col.seq, anchor, border.gray,contours, contours.drawlabels, contours.lty,
contours.lwd, contours.levels, contours.labcex, 0, arrow, axes, ptsize,mmai,leglab,legend.cex, legend.add.line,
...)
title(main=paste(round((1-modgamobj$predobj$level)*100,2),"% CI (lower)"),cex.main=legend.cex)
colormap(modgamobj, map, exp, add, mapmin, mapmax, col.seq, anchor, border.gray,contours, contours.drawlabels, contours.lty,
contours.lwd, contours.levels, contours.labcex, 0, arrow, axes, ptsize,mmai,leglab,legend.cex, legend.add.line,
...)
title(main="Point Estimate",cex.main=legend.cex)
colormap(tempobj2, map, exp, add, mapmin, mapmax, col.seq, anchor, border.gray,contours, contours.drawlabels, contours.lty,
contours.lwd, contours.levels, contours.labcex, 0, arrow, axes, ptsize,mmai,leglab,legend.cex, legend.add.line,
...)
title(main=paste(round((1-modgamobj$predobj$level)*100,2),"% CI (higher)"),cex.main=legend.cex)
par(mfrow=op.mfrow)
}else
## Plot the predictions only
colormap(modgamobj, map, exp, add, mapmin, mapmax, col.seq, anchor, border.gray,contours, contours.drawlabels, contours.lty,
contours.lwd, contours.levels, contours.labcex,0, arrow, axes, ptsize, mai,leglab,legend.cex, legend.add.line,
...)
}
|
080fb4795cdc519022f6122c1567222dd019e7ee
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962324-test.R
|
1d0c442989fdb51fb2e7a69dfe379e251cb4a9e1
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,233
|
r
|
1609962324-test.R
|
testlist <- list(x = c(-8399334L, 794034134L, NA, -54529L, -180L, 1291845631L, -1L, -1L, -1L, -1L, -1L, -16711681L, -1L, -687865865L, -2097153L, -1L, -65536L, 0L, -1L, -393258L, -2049L, -536871026L, -1L, -10726L, 803602431L, -1L, -10497L, -1L, -1L, -1L, -1L, -1L, -1L, -15060993L, 673513472L, 15728639L, -1L, -1L, -1L, -1L, -1L, -1L, -10726L, 805306149L, NA, -64767L, 0L, -42L, -1L), y = c(-1L, -2686977L, NA, NA, -1L, -704643072L, -268435457L, 5046271L, -449839105L, -16777216L, 169748991L, -54785L, -1L, -2687199L, -2820097L, -6316129L, -1616928865L, -539557889L, -1L, -42L, 441537023L, -1L, -2686977L, -134225921L, -702926875L, -256L, 0L, 16777215L, -1537L, -687865865L, -2097153L, -1895825409L, -42L, 439346687L, -1L, -42L, 439353128L, 620756992L, -268435457L, -1L, -10726L, 805306149L, 239L, -65536L, -42L, -1L, -687865865L, -2097153L, -524321L, -702926849L, 1277100031L, -10726L, 805306367L, -10497L, -524321L, -1L, -1L, -2752512L, 15728639L, -16757505L, -1757185L, -215L, 1289106227L, 859045728L, 439353343L, 721420287L, -2687199L, -2820097L, -6316129L, -1616912481L, -539557889L, -2745601L, -1L, -2687191L, 1277810176L, 7977L, -256L ))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
516291e2fea08a48a64fa16776f626a1fd1008bd
|
d9cb21e1111781770e9d9a48938d65c2662faab8
|
/R/removeRowsAndColumns.R
|
df4888d0f4c68ae89ac617d7b918075b1fa4cca1
|
[] |
no_license
|
Displayr/flipStandardCharts
|
2f30f8571160f263bd64f4ebaae5fc24cbe91815
|
d70f790a95906db1969689fe571918367cc4aaaa
|
refs/heads/master
| 2023-06-22T07:38:34.602645
| 2023-06-13T22:59:26
| 2023-06-13T22:59:26
| 56,812,768
| 5
| 5
| null | 2023-06-13T22:59:27
| 2016-04-21T23:57:16
|
R
|
UTF-8
|
R
| false
| false
| 711
|
r
|
removeRowsAndColumns.R
|
removeRowsAndColumns <- function(chart.matrix, rows.to.ignore = "", cols.to.ignore = "")
{
## Get the individual headings from the input
remove.rows <- as.vector(sapply(strsplit(rows.to.ignore, ","), function(x) gsub("^\\s+|\\s+$", "", x)))
remove.cols <- as.vector(sapply(strsplit(cols.to.ignore, ","), function(x) gsub("^\\s+|\\s+$", "", x)))
## Coerce to a df to get colnames
chart.matrix <- as.data.frame(chart.matrix)
## Get rid of the pertinent bits
chart.matrix <- chart.matrix[!rownames(chart.matrix) %in% remove.rows, ]
chart.matrix <- chart.matrix[, !(colnames(chart.matrix) %in% remove.cols)]
chart.matrix <- as.matrix(chart.matrix)
return(chart.matrix)
}
|
3ca9f5ff75e519375d686e90e02af5083b339961
|
2d1f1df7d7d47e6981160a806c69cbe472936cfb
|
/R/jet.colors.R
|
d5283298447ab578179aa36fe311cedd3afca326
|
[
"Apache-2.0"
] |
permissive
|
anlopezl/predictiveModeling
|
113420e5aadc27025c062c4b32e420457230ee31
|
da1e5b299936733b2885ed01b03c99f2ca14ed18
|
refs/heads/master
| 2021-01-21T09:38:32.983257
| 2013-02-11T17:13:30
| 2013-02-11T17:13:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,365
|
r
|
jet.colors.R
|
library(RColorBrewer)
jet.colors <- colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan", "#7FFF7F",
"yellow", "#FF7F00", "red", "#7F0000"))
redblue.colors <- colorRampPalette(c("#67001F", "#B2182B", "#D6604D", "#F4A582", "#FDDBC7",
"#F7F7F7", "#D1E5F0", "#92C5DE", "#4393C3", "#2166AC", "#053061"))
redblue.ramp <- colorRamp(c("#67001F", "#B2182B", "#D6604D", "#F4A582", "#FDDBC7",
"#F7F7F7", "#D1E5F0", "#92C5DE", "#4393C3", "#2166AC", "#053061"))
bluered.colors <- colorRampPalette(c("#053061", "#2166AC", "#4393C3", "#92C5DE", "#D1E5F0",
"#F7F7F7", "#FDDBC7", "#F4A582", "#D6604D", "#B2182B","#67001F"))
heat.colors2 <- colorRampPalette(c("black", "#5C0000", "#B10000", "#FF0000", "orange", "yellow", "white"))
RdYlBu.colors <- colorRampPalette(brewer.pal(11, "RdYlBu"))
RdBlBu.colors <- colorRampPalette(c("#67001F", "#B2182B", "#D6604D", "#A9A9A9", "#4393C3", "#2166AC", "#053061"))
RdBlBu.ramp <- colorRamp(c("#67001F", "#B2182B", "#D6604D", "#A9A9A9", "#4393C3", "#2166AC", "#053061"))
purple.ramp <- colorRamp(brewer.pal(9, "Purples")[2:9], bias=2)
RdYlGn.ramp <- colorRamp(brewer.pal(11, "RdYlGn")[1:11])
RdYlBu.ramp <- colorRamp(brewer.pal(11, "RdYlBu")[1:11])
Spectral.ramp <- colorRamp(brewer.pal(11, "Spectral")[1:11])
scale01 <- function(x) {
(x-min(x, na.rm=TRUE))/max(x-min(x, na.rm=TRUE), na.rm=TRUE)
}
|
b2b9a2edf50142b42dff72648de2fe9842fa07c8
|
fca72c9df63447447a30579c128c769908de8ef7
|
/3.R
|
840420aeca6791032c1cfbe7440025f0de1f165c
|
[] |
no_license
|
zhtmr/r
|
7ec8f1d783910ba76a875fdee86168d07cafbc8d
|
43e00da9222fa413d5fc8f489329eb3c40537c44
|
refs/heads/main
| 2023-01-25T05:05:41.023216
| 2020-11-23T11:20:50
| 2020-11-23T11:20:50
| 307,611,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,830
|
r
|
3.R
|
library(ggplot2)
# x축 displ, y축 hwy로 지정해 배경 생성
ggplot(data=mpg,aes(x=displ,y=hwy,color=drv))+
# 배경에 산점도 추가
geom_point(
# 점 크기 지정
size=5
)+
#범위 지정
xlim(3,6)+
ylim(10,30)+
stat_smooth()
head(mpg)
ggplot(data=mpg,aes(x=cty,y=hwy,color=drv))+
geom_point()+
stat_smooth()
str(midwest)
ggplot(data=midwest,aes(x=poptotal,y=popasian,color=state))+
geom_point()+
xlim(0,500000)+
ylim(0,10000)+
stat_smooth()
library(dplyr)
df.mpg=mpg %>%
group_by(drv) %>%
summarise(mean_hwy=mean(hwy))
df.mpg
ggplot(data=df.mpg,
aes(x=drv,y=mean_hwy))+
geom_col()
ggplot(data=df.mpg,
# reorder를 이용하여 정렬
# +: 오름, -:내림
aes(x=reorder(drv,mean_hwy),y=mean_hwy))+
geom_col()
?geom_col()
ggplot(data=df.mpg,
aes(x=reorder(drv,-mean_hwy),y=mean_hwy))+
geom_col()
# 빈도막대그래프
ggplot(data=mpg,aes(x=drv))+geom_bar(
mapping = aes(
x=drv,
fill=drv
)
)+
theme(legend.position = "bottom",
plot.title = element_text(
size=30, face = 'bold'
))+
scale_fill_manual(
#객체에 대한 색상 지정
values = c('red','green','blue'),
#범례 이름 지정
name="구동방식",
#각 객체별 이름 지정
labels=c('4륜','전륜','후륜')
)+
labs(
#각 축별 이름 지정
x='drv(구동)',
y='count(빈도)',
#그래프 이름 지정
title='구동별 빈도 수 분석',
#그래프 부제목 지정
subtitle='막대그래프',
#부연설명
caption='출처 : 제작팀'
)+
geom_text(
#y축의 수치 출력
stat='count',
aes(label=..count..),
#좌우측 여백 지정
position=position_dodge(width=1.8),
#수치값의 위치 지정, +:아래로, -:위로
vjust=-0.5
)+
ylim(c(0,120))
# 어떤 회사에서 생산한 'suv' 차종의 도시 연비가 높은가
mpg=as.data.frame(ggplot2::mpg)
df.mpg=mpg %>% group_by(manufacturer) %>% filter(class=='suv')%>% summarise(mean_cty=mean(cty)) %>% arrange(desc(mean_cty)) %>% head(5)
ggplot(data=df.mpg,aes(x=reorder(manufacturer,-mean_cty),y=mean_cty))+
geom_col(
mapping = aes(
x=reorder(manufacturer,-mean_cty),
fill=reorder(manufacturer,-mean_cty),
)
)+guides(fill='none')+
geom_text(
aes(label=round(mean_cty,2)),
vjust=2,
colour='white'
)+
ggtitle('회사별 도시연비')+
labs(
x='회사명',
y='도시연비'
)+
theme(
plot.title = element_text(
family='serif', size=15, face = 'bold', colour = 'darkblue', hjust = 0.5
)
)
#어떤 class 자동차가 가장 많은가
ggplot(data=mpg,aes(x=class))+
geom_bar(
mapping = aes(
x=class,
fill=class
)
)+
ggtitle('차종별 빈도 수 분석')+
labs(
x='class(차종)',
y='count(빈도)',
subtitle = '막대그래프'
)+
geom_text(
stat = 'count',
aes(label=..count..),
vjust=-1,
position=position_dodge(width = 1.8)
)+
scale_fill_discrete(
name="차종"
)
ggplot(data=mpg, aes(x=class))+geom_bar()
ggplot(data = economics, aes(x=date, y=unemploy))+
geom_line()
head(Orange)
table(Orange$Tree)
Orange %>%
filter(Tree==1) %>%
ggplot(aes(age,circumference))+
geom_line()
ggplot(Orange, aes(age,circumference))+
geom_line()
ggplot(Orange, aes(age,circumference, color=Tree))+
geom_line()
ggplot(Orange, aes(age,circumference))+
geom_line(aes(color=Tree))
ggplot(Orange, aes(age,circumference, color=Tree))+
geom_line(linetype=6)+
#그래프의 배경색 제거
theme(panel.background = element_blank())
ggplot(Orange, aes(age,circumference, color=Tree))+
geom_line(aes(linetype=Tree))+
#그래프의 배경색 제거
theme(panel.background = element_blank())
|
0e410d8765ff50f95b6147773bb6e19e028daa7b
|
a8c42539f00da72b7b4fdcf6939efb6f9e8b55f2
|
/vignettes/find_chr_variables_download_data.R
|
e191d868021bc615a466bffb4212ec58f95ea200
|
[] |
no_license
|
uva-bi-sdad/dc_county_health_rankings
|
1a5eb551d95f408744f90d52c7d36dc30d5e032a
|
c8be12638f8bab805697244b6625149cf952738a
|
refs/heads/main
| 2023-08-27T13:29:57.008263
| 2021-10-28T21:55:54
| 2021-10-28T21:55:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 330
|
r
|
find_chr_variables_download_data.R
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----load_column_definitions--------------------------------------------------
#load("data/county.hlth.rnks.columns.RData")
data(county.hlth.rnks.columns.RData, package = "dc.chr.preventable.hospitalizations")
|
6bdbc92b05616449fa29ca451eaa572f8dab7fe8
|
9ef33787b83ad56a11d50dd43f477ff22eee5bcb
|
/R/stat_marimekko.R
|
13e0760215d8be68f2f6732c4a7382845de4422c
|
[] |
no_license
|
cran/ggTimeSeries
|
23c34bf61319ccd65ca49d9321c421fdd5e6a91b
|
7826f0133c9894c496bb41ee04af1e92a999376d
|
refs/heads/master
| 2022-02-06T05:16:51.826879
| 2022-01-23T15:22:42
| 2022-01-23T15:22:42
| 147,195,681
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,051
|
r
|
stat_marimekko.R
|
#' Marimekkofy
#'
#' @param data dataframe
#' @param xbucket x value
#' @param ybucket y value
#' @param weight weight value
#'
#' @return df
#' @export
Marimekkofy <- function(data,
xbucket = "xbucket",
ybucket = "ybucket",
weight = NULL) {
xmax <- ""
xmin <- ""
ymax <- ""
ymin <- ""
fill <- ""
data2 <- as.data.table(data)
if (is.null(weight) | !weight %in% colnames(data2)) {
warning("weight aesthetic not defined. Weight defaulted to 1 for every row.")
data2[, weight := as.numeric(1)]
weight <- "weight"
}
setnames(
data2,
c(xbucket, ybucket, weight),
c("xbucket", "ybucket", "weight")
)
dtX <- data2[
,
list(weight = as.numeric(sum(weight))),
xbucket
]
setkey(dtX, xbucket)
dtX[
,
xmax := cumsum(weight / sum(weight))
]
dtX[
,
xmin := xmax - (weight / sum(weight))
][
,
weight := NULL
]
dtY <- data2[
,
list(weight = as.numeric(sum(weight))),
list(
xbucket,
ybucket
)
]
setkey(dtY, xbucket, ybucket)
dtY[
,
ymax := cumsum(weight / sum(weight)),
xbucket
]
dtY[
,
ymin := ymax - (weight / sum(weight)),
xbucket
][
,
weight := NULL
]
data <- merge(dtX,
dtY,
"xbucket",
allow.cartesian = T
)
data <- merge(
data,
data2[, list(xbucket, ybucket, fill)],
c("xbucket", "ybucket")
)
setnames(
data,
c("xbucket", "ybucket"),
c(xbucket, ybucket)
)
return(data)
}
StatMarimekkoLabels <- ggproto(
"StatMarimekko",
Stat,
required_aes = c("xbucket", "ybucket"),
setup_params = function(data, params) {
if (is.null(params$xlabelyposition)) {
params$xlabelyposition <- 1.2
}
overalldata <- rbindlist(lapply(
unique(data$PANEL),
function(cPANEL) {
Marimekkofy(
data[cPANEL == data$PANEL, ],
"xbucket",
"ybucket",
"weight"
)[
,
PANEL := cPANEL
]
}
))
overalldata <- overalldata[
,
list(
x = (xmin[1] + xmax[1]) / 2,
y = params$xlabelyposition
),
list(
label = xbucket,
PANEL
)
]
return(list(
overalldata = overalldata,
na.rm = T
))
},
compute_group = function(data,
scales,
overalldata,
xlabelyposition) {
data <- overalldata
return(data)
}
)
#' Transforms data for the tiles of the heatmap
StatMarimekko <- ggproto(
"StatMarimekko",
Stat,
required_aes = c("xbucket", "ybucket"),
setup_params = function(data, params) {
overalldata2 <- rbindlist(lapply(
unique(data$PANEL),
function(cPANEL) {
Marimekkofy(
data[cPANEL == data$PANEL, ],
"xbucket",
"ybucket",
"weight"
)[
,
PANEL := cPANEL
]
}
))
return(list(
overalldata2 = overalldata2,
na.rm = T
))
},
compute_group = function(data,
scales,
overalldata2,
xlabelyposition) {
data <- overalldata2
return(data)
}
)
#' Plot two categorical variables as marimekko
#'
#' @param mapping mapping
#' @param data data
#' @param show.legend logical
#' @param inherit.aes logical
#' @param na.rm logical
#' @param xlabelyposition position
#' @param ... other functions
#' A marimekko plot, or a mosaic plot, visualises the co-occurrence of two
#' categorical / ordinal variables. In a time series, it could be used to
#' visualise the transitions from one state to another by considering each
#' state to be a category and plotting current category vs. next category.
#'
#' @section Aesthetics: xbucket, ybucket, fill. Fill argument needs to be
#' assigned to ybucket., or some other column which is a one to one mapping of ybucket.
#' @section Cosmetic Tips: The minimalist look can be achieved by appending the
#' following chunk of code to the output object:
#' \code{
#' +
#' xlab(NULL) +
#' ylab(NULL) +
#' scale_fill_continuous(low = 'green', high = 'red') +
#' theme(
#' axis.text = element_blank(),
#' axis.ticks = element_blank(),
#' legend.position = 'none',
#' strip.background = element_blank(),
#' # strip.text = element_blank(), # useful if only one year of data
#' plot.background = element_blank(),
#' panel.border = element_blank(),
#' panel.background = element_blank(),
#' panel.grid = element_blank(),
#' panel.border = element_blank()
#' )
#' }
#' @export
#' @import ggplot2
#' @examples {
#' library(ggplot2)
#' ggplot(
#' data.frame(
#' x1 = round(3 * runif(10000), 0),
#' y1 = pmax(pmin(round(3 * rnorm(10000), 0), 3), -3),
#' weight = 1:10000
#' )
#' ) +
#' stat_marimekko(
#' aes(
#' xbucket = x1,
#' ybucket = y1,
#' fill = factor(y1),
#' weight = weight
#' ),
#' xlabelyposition = 1.1,
#' color = 'black'
#' )}
stat_marimekko <- function(mapping = NULL,
data = NULL,
show.legend = NA,
inherit.aes = TRUE,
na.rm = T,
xlabelyposition = NULL,
...) {
list(
layer(
stat = StatMarimekko,
data = data,
mapping = mapping,
geom = "rect",
position = "identity",
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(xlabelyposition = xlabelyposition, na.rm = na.rm, ...)
),
layer(
stat = StatMarimekkoLabels,
data = data,
mapping = mapping,
geom = "text",
position = "identity",
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(xlabelyposition = xlabelyposition, na.rm = na.rm, ...)
),
coord_fixed()
)
}
|
1b881e6f17d35c47d09664386e3dc6260e6ea443
|
d232d8214f9d216546c45ba5818acf45201200ec
|
/.Rproj.user/70A32BCB/sources/s-2ABB0572/084633B8-contents
|
7ef3b52606c51e8ee8edd5fef0de9e312bf381aa
|
[] |
no_license
|
Keniajin/alacohol_karuitha
|
1d47d090fedefb16234a1419c3631a2dfec660a1
|
4d42199082405e846fc8654aca4bec9dcf05b094
|
refs/heads/master
| 2022-11-23T13:41:08.828982
| 2020-07-30T09:42:27
| 2020-07-30T09:42:27
| 283,726,653
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,498
|
084633B8-contents
|
##THE BEER DEBATE PROJECT ----
# Objective- to get and clean data on beer ratings ----
# Set working directory ----
setwd("C:\\Users\\John Karuitha\\OneDrive - University of Witwatersrand\\Documents\\My Thesis\\Karuitha and Ojah Data\\r_training\\beer_debate")
save.image("C:/Users/John Karuitha/OneDrive - University of Witwatersrand/Documents/My Thesis/Karuitha and Ojah Data/r_training/beer_debate/data_beer.R.RData")
# Load required packages ----
library(tidyverse)
library(rvest)
library(ggthemes)
library(plotly)
# scrape the data ----
# Top 250
url <- "https://www.beeradvocate.com/beer/top-rated/"
top_250 <- read_html(url) %>%
html_nodes("table") %>%
html_table() %>%
.[[1]]
# Trending 100
url2 <- "https://www.beeradvocate.com/beer/trending/"
trend_100 <- read_html(url2) %>%
html_nodes("table") %>%
html_table() %>%
.[[1]]
# New beers
url3 <- "https://www.beeradvocate.com/beer/top-new/"
new_beers <- read_html(url3) %>%
html_nodes("table") %>%
html_table() %>%
.[[1]]
# Fame beer
url4 <- "https://www.beeradvocate.com/beer/fame/"
fame_beers <- read_html(url4) %>%
html_nodes("table") %>%
html_table() %>%
.[[1]]
# Popular beer
url5 <- "https://www.beeradvocate.com/beer/popular/"
popular_beers <- read_html(url5) %>%
html_nodes("table") %>%
html_table() %>%
.[[1]]
# Rename columns and drop first row ----
# Top 250
names(top_250) <- c("weighted_rank", "beer", "no_of_ratings",
"average_ratings", "my_rating")
top_250 <- top_250[-1,]
# Trending 100
names(trend_100) <- c("weighted_rank", "beer", "no_of_ratings",
"average_ratings", "my_rating")
trend_100 <- trend_100[-1,]
# New beers
names(new_beers) <- c("weighted_rank", "beer", "no_of_ratings",
"average_ratings", "my_rating")
new_beers <- new_beers[-1,]
# Fame beers
names(fame_beers) <- c("weighted_rank", "beer", "no_of_ratings",
"average_ratings", "my_rating")
fame_beers <- fame_beers[-1,]
# Popular beers
names(popular_beers) <- c("weighted_rank", "beer", "no_of_ratings",
"average_ratings", "my_rating")
popular_beers <- popular_beers[-1,]
## Merge the datasets and start cleaning ----
full_beer_data <- rbind(top_250, trend_100, new_beers,
fame_beers, popular_beers)
full_beer_data <- full_beer_data[,-c(1,5)]
## Feature engineering ----
full_beer_data$alcohol_percent <-
str_extract(full_beer_data$beer, "\\d*\\.\\d\\d%")
## Remove % from the alcohol % column and make numeric
full_beer_data$alcohol_percent <-
str_remove_all(full_beer_data$alcohol_percent, "%") %>%
as.numeric()
## Remove , from rating columns and make numeric
full_beer_data$no_of_ratings <-
str_remove_all(full_beer_data$no_of_ratings, ",") %>%
as.numeric()
## Convert average ratings to numeric
full_beer_data$average_ratings <-
as.numeric(full_beer_data$average_ratings)
##Remove the % and | sign from the beer column ----
str_detect(full_beer_data$beer, "\\s*\\|\\s*\\d*\\.\\d*%")
full_beer_data$beer <- str_remove_all(full_beer_data$beer,
"\\s*\\|\\s*\\d*\\.\\d*%")
## Add beer type column ----
full_beer_data$type <- NULL
## Adding stouts ----
full_beer_data$type <- ifelse(str_detect(full_beer_data$beer,
"[Ss]tout"), "Stout", NA)
## Adding IPA - Indian Pale Ale ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"IPA"), "IPA", full_beer_data$type)
## Adding IPA - Wild Ale ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Ww]ild\\s*[Aa]le"), "Wild Ale",
full_beer_data$type)
## Adding pale ale----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Pp]ale\\s*[Aa]le"), "Pale Ale",
full_beer_data$type)
# Adding Strong ale -----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Ss]trong Ale"), "Strong Ale",
full_beer_data$type)
# Adding Farmhouse ale -----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"PLCOld Ale"), "PLCOld Ale",
full_beer_data$type)
# Adding Strong ale -----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Ss]trong Ale"), "Strong Ale",
full_beer_data$type)
## Adding IPA - Ale ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"Ale"), "Ale", full_beer_data$type)
## Adding lambic ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Ll]ambic"), "Lambic",
full_beer_data$type)
## Adding lager----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Ll]ager"), "Lager",
full_beer_data$type)
## Adding sour ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Ss]our"), "Sour",
full_beer_data$type)
## Adding Barleywine ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Bb]arleywine"), "Barleywine",
full_beer_data$type)
## Adding Porter ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Pp]orter"), "Porter",
full_beer_data$type)
## Adding wheat beer ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Ww]heat Beer"), "Wheet Beer",
full_beer_data$type)
## adding Rye beer ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Rr]ye Beer"), "Rye Beer",
full_beer_data$type)
## adding Fruit and Field Beer----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Ff]ruit and Field Beer"),
"Fruit and Field Beer",
full_beer_data$type)
## Adding Pilsner ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Pp]ilsner"), "Pilsner",
full_beer_data$type)
## Adding Bock ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Bb]ock"), "Bock",
full_beer_data$type)
## Adding Tripelr ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Tt]ripel"), "Tripel",
full_beer_data$type)
## ## Adding Quadrupel (Quad) ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Qq]uadrupel\\s*\\(Quad\\)"), "Quadrupel (Quad)",
full_beer_data$type)
## Adding Dubbel ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Dd]ubbel"), "Dubbel",
full_beer_data$type)
## Adding Herb and Spice Beer ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Hh]erb and Spice Beer"),
"Herb and Spice Beer",
full_beer_data$type)
## Adding Champagne ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"[Bb]ière de Champagne \\/ Bière Brut"),
"Bière de Champagne / Bière Brut",
full_beer_data$type)
## Adding HausbrauereiAltbier ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"HausbrauereiAltbier"),
"HausbrauereiAltbier",
full_beer_data$type)
## Adding Kölsch ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"Kölsch"),
"Kölsch",
full_beer_data$type)
## Adding Brett Beer ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"Brett Beer"),
"Brett Beer",
full_beer_data$type)
## Adding Pumpkin Beer ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"Pumpkin Beer"),
"Pumpkin Beer",
full_beer_data$type)
## Adding California Common / Steam Beer ----
full_beer_data$type <- ifelse(is.na(full_beer_data$type) &
str_detect(full_beer_data$beer,
"California Common \\/ Steam Beer"),
"California Common / Steam Beer",
full_beer_data$type)
######################################################################################
## Add column for subtype and clean dataset ----
full_beer_data$subtype <-
ifelse(str_detect(full_beer_data$beer,
"\\s*\\-\\s*\\W*"), str_extract_all(full_beer_data$beer,
"\\s*\\-\\s*\\w*\\s*\\w*\\s*\\w*"), NA)
########################################################################################
## Feature engineer moren----
# Types ----
full_beer_data$type <- factor(full_beer_data$type)
#levels = names(sort(table(full_beer_data$type))))
class(full_beer_data$type)
##########################################################################################
##Visualize the data ----
##Option A
median_na <- function(x)median(x,na.rm = T)
ggplotly(full_beer_data %>% group_by(type) %>% filter(n() >= 7) %>% ungroup %>%
ggplot(aes(x = reorder(type, alcohol_percent, median_na),
y = alcohol_percent, fill = type)) + geom_boxplot() +
theme_hc() + theme(legend.position = "none") +
theme(axis.text.x = element_text(angle = 90)))
### option B
df_plot <- full_beer_data %>% group_by(type) %>% filter(n() >= 7) %>%
mutate(median_perc=median(alcohol_percent, na.rm = T))
p <- ggplot(df_plot,aes(x = reorder(type, median_perc),
y = alcohol_percent, fill = type)) + geom_boxplot() +
theme_hc() + theme(legend.position = "none") +
theme(axis.text.x = element_text(angle = 90))
ggplotly(p)
##Alcohol content vs ratings ----
ggplotly(full_beer_data %>% group_by(type) %>% ggplot(aes(x = alcohol_percent,
y = average_ratings,
color = type)) +
geom_point(alpha = 0.5))
|
|
2fe07911617aa5ca478991b58e9164990b2a3cff
|
110f1df5d85bd38a51b2ebba882636c35ce3dc3e
|
/model_fitting.R
|
c48ca2584b17ac1ec9614c866fd154e22a9859eb
|
[] |
no_license
|
xdu071/quant_skills_1
|
372d0f9c8fde18c2241edc283d3b36eb05c9632c
|
2b81682384df6b288783f3137da142d4f8e6b688
|
refs/heads/main
| 2023-09-03T12:00:38.738687
| 2021-11-08T17:20:21
| 2021-11-08T17:20:21
| 425,875,072
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,051
|
r
|
model_fitting.R
|
# Model fitting and Generalized Linear Model
# This file contains a tutorial material for fitting models and constructing Generalized Linear Model
# Edited by David Du
# Nov-08-2021
# Note: model fit refers to investigating the R-squard adj value
# Note: AIC -> Akaike Information Criterion -> refers to the likelihood of the model
# Import data ----
soils <- read.csv("Peru_Soil_Data.csv", row.names=1, stringsAsFactors=T)
## Exercise 1: Exploring Model Fit ----
# Creating different statistical models
lm_Habitat <- lm(Soil_pH~Habitat,data=soils)
lm_TBS <- lm(Soil_pH~Total_Base_Saturation,data=soils)
lm_Habitat_TBS <- lm(Soil_pH~Habitat + Total_Base_Saturation,data=soils)
lm_Habitat_TBS_Interaction <-lm(Soil_pH~Habitat*Total_Base_Saturation, data=soils)
# Compare their AIC values -> lower AIC, the better. AIC is the only way to compare models of non-continuous variables
AIC(lm_Habitat, lm_TBS, lm_Habitat_TBS, lm_Habitat_TBS_Interaction) # df -> # of variables estimated
# Calculating AIC by hand
logLik(lm_Habitat_TBS) #gives a value of 3.59134
2*4 - 2*3.59134
# Constructing a null model
lm_null <- lm(Soil_pH ~ 1 , data=soils)
AIC(lm_null, lm_Habitat, lm_TBS, lm_Habitat_TBS) # Consider if the simple models and even worth accessing
## Exercise 2: Generalised Linear Models (GLMs) with a Poisson Response ----
# Another dataset
inga <- read.csv("Inga_abundances.csv",row.names=1,stringsAsFactors=T)
# Combine the soil and tree database
combo <- cbind(soils,inga)
# Assess how the abundance of different species varies with soil characteristics and habitat type
mod1 <- glm(thibaudiana~Habitat, data=combo, family=poisson) # family = poisson since we are dealing with count variable
mod1
# Since we are logging the value for poisson, to obtain true value need to exponentiate
exp(-1.946) # this is the true value of the negative value of the intercept
mean(combo$thibaudiana[combo$Habitat=="floodplain"])
# We try to do the same for upland habitat
exp(-1.946+4.518)
mean(combo$thibaudiana[combo$Habitat=="upland"])
summary(mod1)
# Compare using AIC
mod_null <- glm(thibaudiana~1,data=combo,family=poisson)
AIC(mod_null,mod1)
# Try a second model predicting abundance with pH
mod2 <- glm(thibaudiana~Soil_pH,data=combo,family=poisson)
summary(mod2)
AIC(mod_null,mod1,mod2) # not as good as model 1
# Try with combo model and model with interaction elements
mod3 <- glm(thibaudiana~Habitat + Soil_pH,data=combo,family=poisson)
mod4 <- glm(thibaudiana~Habitat * Soil_pH,data=combo,family=poisson)
AIC(mod_null,mod1,mod2,mod3,mod4) # mod3 is the best
# Mod 3 is the best, explore different aspect of the model
summary(mod3)
# Plot predicted versus observed
plot(combo$thibaudiana,fitted(mod3),xlab="Observed",ylab="predicted")
abline(0,1) # see what we underpredict in the middle abundance
# Residual versus our explanatory variable
par(mfrow=c(1,2))
plot(resid(mod3)~Habitat,data=combo) # variability around the predicted mean in uplands
plot(resid(mod3)~Soil_pH,data=combo) # a lot of variability at lower pH levels
# See how predicted variables compare to our observed variables
combo_fp <- combo[combo$Habitat=="floodplain",]
combo_up <- combo[combo$Habitat=="upland",]
plot(combo$Soil_pH,combo$thibaudiana,type="n")
points(combo_fp$Soil_pH,combo_fp$thibaudiana,pch=21,bg="blue") # floodplain
points(combo_up$Soil_pH,combo_up$thibaudiana,pch=24,bg="red") # uplands
points(combo$Soil_pH,fitted(mod3), col="grey") # predicted values
## Exercise 3: Generalised Linear Models (GLMs) with a Binomial Response ----
## ***Key information -> binomial regression values are actually the proportion of time we see a given data (aka value of 1) per unit of explainatory variable
# Look at presence-absence of Inga auristellae (normally overdispersed if you investigate with poisson models)
combo$auristellae_PA <- combo$auristellae
combo$auristellae_PA[combo$auristellae_PA>0] <- 1 # create new "binary variable" signifying presence-absence of the tree
mod1 <- glm(auristellae_PA~Soil_pH,data=combo,family=binomial) # explain presence absence by pH
summary(mod1) # Pay attention to (- or +) only. Est = -1.3961, this means as soil pH increase, our tree is less likely to be present
# Compare model to null to see if soil pH actually significantly affects presence and absence of our tree
mod_null <- glm(auristellae_PA~1,data=combo,family=binomial)
AIC(mod_null, mod1) # our model has lower AIC, so significant
# Compare with other models with habitat (habitat, habitat combo, habitat interaction)
mod2 <- glm(auristellae_PA~ Habitat,data=combo,family=binomial)
mod3 <- glm(auristellae_PA~Soil_pH + Habitat,data=combo,family=binomial)
mod4 <- glm(auristellae_PA~Soil_pH * Habitat,data=combo,family=binomial)
AIC(mod_null, mod1, mod2, mod3, mod4) # our previous model is still the best, also model 1 and 2 have similar explainitory power
# AIC for soil is lower than our previous model, let's see why
plot(auristellae_PA~Soil_pH,data=combo,pch=16)
points(combo$Soil_pH,fitted(mod1), col="grey")
|
2bcf56930f97b107b560259e04282961e14232c1
|
898f5b96a57ead23a7a71ffe503455a6e9aa4d1d
|
/complete.R
|
6aaa89e965c497b8c40a7c3e6c83aed75a02da52
|
[] |
no_license
|
BearyTatsumaki/datasciencecoursera
|
e5c8ba32a5465b483c9510077491e2bb77a4c219
|
d490d0c4b0f1933e767af797579b70c2d12cb20d
|
refs/heads/master
| 2022-11-20T18:50:28.139630
| 2020-07-27T14:52:47
| 2020-07-27T14:52:47
| 280,234,060
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 887
|
r
|
complete.R
|
complete <- function(directory, id = 1:332)
{
data <- c()
idframe <- c()
#for loop takes x in an array, not just a value
for(x in id)
{
#formatC adds the excess 0's in front of the id value
file <- paste(getwd(), "/", directory, "/", formatC(x, 2,flag = 0), ".csv", sep = "")
#reading a file and creating a large data table
lst <- lapply(file, data.table::fread)
dt <- rbindlist(lst)
#taking only complete cases without na
dt <- dt[complete.cases(dt)]
#storing the id s and the calues in arrays to be added later
#to the data frame
idframe <- c(idframe, x)
data <- c(data, nrow(dt))
}
#adding the id and values into a dataframe and returning it
dframe <- data.frame(id = idframe, nobs = data)
dframe
}
|
22b716719e3de806240f2cb3a473da40aca80631
|
9ecd686648f3f0c1eb2c640719da8b133da1cf24
|
/R/affil_df.R
|
b5b2f6511a00c448124376f5f2709e6aaaccb4ac
|
[] |
no_license
|
muschellij2/rscopus
|
55e65a3283d2df25d7611a4ace3f98bd426f4de4
|
bf144768698aaf48cb376bfaf626b01b87a70f73
|
refs/heads/master
| 2022-01-02T15:18:19.295209
| 2021-12-18T00:37:22
| 2021-12-18T00:37:22
| 44,287,888
| 65
| 18
| null | 2021-12-18T00:23:52
| 2015-10-15T02:06:02
|
R
|
UTF-8
|
R
| false
| false
| 2,032
|
r
|
affil_df.R
|
#' @title Search Affiliation Content on SCOPUS
#'
#' @description Searches SCOPUS to get information about documents on an affiliation
#' @param affil_id Affiliation ID number.
#' @param affil_name affiliation name. Disregarded if \code{affil_id} is
#' specified
#' @param verbose Print diagnostic messages
#' @param api_key Elsevier API key
#' @param facets Facets sent in query. See \url{https://dev.elsevier.com/api_docs.html}
#' @param sort sorting sent to query
#' @param ... Arguments to be passed to \code{\link{author_search}}
#' @export
#' @seealso \code{\link{get_author_info}}
#' @return List of entries from SCOPUS
#' @note The \code{affil_data} command will return the list of all entries as well as
#' the \code{data.frame}
affil_df = function(
affil_id = NULL,
affil_name = NULL,
api_key = NULL,
verbose = TRUE,
facets = NULL,
sort = "document-count",
...){
L = affil_data(
affil_id = affil_id,
affil_name = affil_name,
verbose = verbose,
facets = facets,
sort = sort,
... = ...)
df = L$df
return(df)
}
#' @rdname affil_df
#' @export
affil_data = function(
affil_id = NULL,
affil_name = NULL,
api_key = NULL,
verbose = TRUE,
facets = NULL,
sort = "document-count",
...){
# entries = affil_search(
# affil_id = affil_id,
# verbose = verbose, ...)$entries
if (is.null(affil_id)) {
res = process_affiliation_name(
affil_id = affil_id,
affil_name = affil_name,
api_key = api_key, verbose = verbose
)
affil_id = res$affil_id
}
affil_id = gsub("AFFILIATION_ID:", "", affil_id, fixed = TRUE)
entries = author_search_by_affil(
affil_id = affil_id,
verbose = verbose,
facets = facets,
sort = sort,
...)
total_results = entries$total_results
facets = entries$facets
entries = entries$entries
df = gen_entries_to_df(
entries = entries)
df$df$affil_id = affil_id
L = list(entries = entries, df = df)
L$total_results = total_results
L$facets = facets
return(L)
}
|
523a6b3e36f7bdae7356db08f48f60c6c7e15666
|
a2a1d0eec6173e66040071d696f556215603d593
|
/man/A000108.Rd
|
3c7e397da99bb5165a132241f8f68f02d3e851cd
|
[] |
no_license
|
cran/Zseq
|
08ffaf47a36c16d95c294d97b0eac6f6d4d31732
|
1dfdcf0c142076df3ff5337a10af4d570a86cefa
|
refs/heads/master
| 2022-09-28T02:32:44.076755
| 2022-09-07T06:50:18
| 2022-09-07T06:50:18
| 108,447,611
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 757
|
rd
|
A000108.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Catalan.R
\name{Catalan}
\alias{Catalan}
\alias{A000108}
\alias{Segner}
\title{Catalan numbers}
\usage{
Catalan(n, gmp = TRUE)
}
\arguments{
\item{n}{the number of first \code{n} entries from the sequence.}
\item{gmp}{a logical; \code{TRUE} to use large number representation, \code{FALSE} otherwise.}
}
\value{
a vector of length \code{n} containing first entries from the sequence.
}
\description{
Under OEIS \href{https://oeis.org/A000108}{A000108}, the \emph{n}th \emph{Catalan} number is given as
\deqn{C_n = \frac{(2n)!}{(n+1)!n!}}
where the first 6 entries are 1, 1, 2, 5, 14, 42 with \eqn{n\ge 0.}
}
\examples{
## generate first 30 Catalan numbers
print(Catalan(30))
}
|
679c5104ee5e86432244d19733ea7790378e4bc0
|
9dd3e7638423ca7e8c52875f8b43445177f69054
|
/PaperData/Sect. 4.5/GenerateGraph.R
|
5c98aa351dafcbfe9e35693741c76e302b7b108e
|
[] |
no_license
|
douglaspasqualin/Bench-20-STM
|
c9d191a4bce6708be03046d2c05330947f9a5895
|
a2c0df9ad88a683fb08abddeb60f670e52bb5a8e
|
refs/heads/master
| 2023-03-05T19:49:33.080419
| 2021-02-17T17:32:26
| 2021-02-17T17:32:26
| 299,409,095
| 0
| 0
| null | 2020-10-06T22:20:07
| 2020-09-28T19:18:27
|
C
|
UTF-8
|
R
| false
| false
| 1,022
|
r
|
GenerateGraph.R
|
library(ggplot2)
csv <- read.table("mseDynamic.txt", sep = ",", header = TRUE)
#remove with ID zero (no MSE, first matrix collected)
table <- subset(csv, id != 0)
yticks <- c(0, 250, 500, 750, 1000, 1250, 1500)
graph <-
ggplot(data = table, aes(x = id, y = mse, group = app)) +
theme_bw() +
theme(
legend.position="bottom",
legend.title = element_blank(),
plot.title = element_text(hjust = 0.5, face = "bold"),
panel.border = element_blank(),
text = element_text(size = 15),
plot.subtitle = element_text(hjust = 0.5, face = "italic")
) +
geom_line(aes(
linetype = app,
group = factor(app),
color = app
),
size = 0.5) +
geom_point(aes(color = app, shape = app), size = 2) +
xlab("Execution phase") +
ylab("MSE") +
scale_shape_manual(values = c(4, 8, 15, 16, 17, 18, 21, 22, 3, 42))+
scale_y_continuous(limits=c(0,1400), breaks=yticks, labels=yticks)
ggsave(plot = graph, file = "Figure_7.pdf", device = cairo_pdf, width = 15, height = 9, units = "cm")
|
d364f2044fa6653b7adcb579a902816ce982944e
|
79f51a99ceac749670589464fb6b9bc78fe2e95c
|
/StepE.R
|
3700b1f046d81b2ca618df758cb27d9693e2af05
|
[] |
no_license
|
fall2018-saltz/ananth_gv_svm
|
a26c1a2ca191978c0c87906351b7e9378f921129
|
898879df3cff94ac99db2199e6cc9f1e49cd5d9c
|
refs/heads/master
| 2020-04-06T16:40:05.583231
| 2018-11-24T07:32:36
| 2018-11-24T07:32:36
| 157,629,189
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,765
|
r
|
StepE.R
|
###################################################################
#Part D: Predict Values in the Test Data and Create a Confusion Matrix
# 8. Use the predict( ) function to validate the model against test data. Assuming that you put the output from the ksvm( ) call into svmOutput and that your test data set is in a data frame called testData, the call would be:
# svmPred <- predict(svmOutput, testData, type = "votes")
# 9. Now the svmPred object contains a list of votes in each of its rows. The votes are either for “happy” or “notHappy”. Review the contents of svmPred using str( ) and head( ).
# 10. Create a confusion matrix (a 2 x 2 table) that compares the second row of svmPred to the contents of testData$happy variable.
# 11. Calculate an error rate based on what you see in the confusion matrix. See pages 243-244 for more information
library(kernlab)
svmPred1 <- predict(kernfit, testData, type="votes") # predict the test data on top of trained model using predict function. votes are for TRUE or FALSE(Happy Not Happy)
str(svmPred1) # str function is used to get the structure of the prediction
confusionMatrix1 <- table(svmPred1[1,],testData$overall_satisfaction_greater_8) # confusion matrix is created confusion matrix (a 2 x 2 table) that compares the second row of svmPred to the contents of testData$happy variable.
confusionMatrix1
# error rate
rate1 <- (confusionMatrix1[1,1]+confusionMatrix1[2,2])/sum(confusionMatrix1)
rate1
#0.09538092
# the error rate is considerably low, which indicates that our model performed well taking into consideration many attributes like -
# "hotelFriendly", "checkInSat", "lengthOfStay", "hotelClean", "guestAge", "hotelSize", "whenBookedTrip"
|
999a46d405c00321b487e086a1335cf9db2f2179
|
046d88173798e32f626ae598547496da898d49f5
|
/R/shannon.R
|
eb09537bab1d08af21d9244bbe78fccf13730f63
|
[] |
no_license
|
wisekh6/epihet
|
6199b38362200bd3f2826d716fecb653176a679b
|
9cff2f91a1af377df712dbe6c3cbb5748e1bf860
|
refs/heads/master
| 2023-02-06T06:53:28.256472
| 2020-12-21T15:26:58
| 2020-12-21T15:26:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 334
|
r
|
shannon.R
|
#' @title Shannon Entropy
#'
#' @description
#' Calculates the Shannon entropy value
#'
#' @param p A vector of epiallele probabilities
#' @return The Shannon entropy value
#' @examples
#' a<-c(rep(0,13),60.86960,0.00000,39.1304)
#' shannon(a)
#' @export
shannon <- function(p) {
p1 <- p[p > 0]
sum(-p1/100 * log(p1/100))
}
|
407b3189357f53468782062d54577fd6781db723
|
37a70a2a8c84f353d45cd678f059cbe5446d5346
|
/day8_1/nba_eda.R
|
0b9d565ccdba19a98fa7a2b89b6d862799038037
|
[] |
no_license
|
jee0nnii/DataITGirlsR
|
a27f7ce1c3f90765366f120ff85cd7f2cee60e8c
|
cc6e7b3f2d30c690a41e4ca5a165d32de47d3c3f
|
refs/heads/master
| 2021-03-19T16:49:01.412022
| 2017-11-06T15:21:56
| 2017-11-06T15:21:56
| 109,706,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,129
|
r
|
nba_eda.R
|
# NBA 데이터를이용해, 슛확률(Spoint)에대해회귀분석을시행해보자.
nba <-read.csv('NBA.csv')
head(nba)
library(GGally)
plot(nba)
str(nba)
for (i in 1:dim(nba)[2]){
print(summary(nba[i]))
}
test <- subset(nba, select=c(2:11))
ggpairs(test)
colnames(nba)
# [1] "Name" "height" "games" "minutes" "age" "point"
# [7] "assist" "rebound" "fieldgoal" "freethrow" "Spoint"
hist(nba$point)
attach(nba)
#anova test
nbafit2 <- aov(Spoint ~ height, data = nba)
# height 변수가 유의한가? 하는 변수를 선택할 때 사용
# lm(Spoint ~ 1, data = nba)
# lm(Spoint ~ height, data = nba)
####회귀분석####
lm(Spoint ~ . -Name, data= nba)
nbafit <- lm(Spoint ~ . -Name, data= nba)
summary(nbafit)
# Call:
# lm(formula = Spoint ~ height + games + minutes + age + point +
# assist + rebound + fieldgoal + freethrow, data = nba)
#
# Residuals:
# Min 1Q Median 3Q Max
# -0.143506 -0.038752 0.000045 0.032880 0.285624
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 3.295e-01 2.403e-01 1.371 0.173553
# height -1.188e-04 1.115e-03 -0.107 0.915384
# games 1.389e-04 7.311e-04 0.190 0.849729
# minutes -6.732e-05 2.647e-05 -2.543 0.012612 *
# age -3.275e-03 1.915e-03 -1.710 0.090551 .
# point 2.558e-02 2.476e-03 10.330 < 2e-16 ***
# assist -9.280e-03 4.434e-03 -2.093 0.039004 *
# rebound -1.789e-02 8.759e-03 -2.042 0.043929 *
# fieldgoal 5.451e-03 1.545e-03 3.528 0.000648 ***
# freethrow -1.674e-03 7.797e-04 -2.147 0.034309 *
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# Residual standard error: 0.06221 on 95 degrees of freedom
# Multiple R-squared: 0.7368, Adjusted R-squared: 0.7118
# F-statistic: 29.54 on 9 and 95 DF, p-value: < 2.2e-16
####회귀분석####
nbafit1 <- lm(Spoint ~ games + minutes + point + assist + rebound + fieldgoal +
freethrow, data= nba)
summary(nbafit1)
getwd()
setwd("C:/Users/Lucy Kim/Documents/dataitgirls/day8_1")
#### nba 과제 풀이 ####
nba <- read.csv("NBA.csv")
View(nba)
str(nba)
lm(Spoint ~ height + games + minutes + age + point + assist + rebound + fieldgoal + freethrow, data=nba)
fit <- lm(Spoint ~ . -Name, data=nba)
summary(fit)
## anova test
fit0 <- aov(height ~ Spoint, data = nba)
summary(fit0)
fit0 <- aov(Spoint ~ height, data=nba)
#1 ) lm(Spoint ~ 1, data=nba)
#2 ) lm(Spoint ~ height, data=nba)
fit1 <- aov(games ~ Spoint, data = nba)
summary(fit1)
TukeyHSD(fit1)
fit2 <- aov(minutes ~ Spoint, data = nba)
summary(fit2)
fit3 <- aov(age ~ Spoint, data = nba)
summary(fit3)
fit4 <- aov(point ~ Spoint, data = nba) # ***
summary(fit4)
fit5 <- aov(assist ~ Spoint, data = nba)
summary(fit5)
fit6 <- aov(rebound ~ Spoint, data = nba) # ***
summary(fit6)
fit7 <- aov(fieldgoal ~ Spoint, data = nba) # ***
summary(fit7)
fit8 <- aov(freethrow ~ Spoint, data = nba) # .
summary(fit8)
model_nba <- lm(Spoint ~ point + assist + fieldgoal, data=nba)
summary(model_nba)
|
dc9295b01173e65ca451f694123366e4b0a50af1
|
7c9c2e23420a68537b2988ea08f25ac16db42a9c
|
/facets/facet_map.R
|
66678464e29b874047951e4858f82173a9a7d756
|
[] |
no_license
|
VinayArora404219/TN_COVID
|
eede36a1ccb8617c3f0d36199366b7e2082467bb
|
a9dd22a17eafa9e3511b1db658c04a83ef1ebfd0
|
refs/heads/master
| 2022-11-26T18:23:37.449134
| 2020-08-06T03:10:56
| 2020-08-06T03:10:56
| 285,459,297
| 0
| 0
| null | 2020-08-06T03:03:17
| 2020-08-06T03:03:16
| null |
UTF-8
|
R
| false
| false
| 3,378
|
r
|
facet_map.R
|
### Make a facet map of US states with the number of new cases over time
library(tidyverse)
library(tidycensus)
library(cowplot)
library(TTR)
library(geofacet)
library(broom)
### Pull cases data from the NY Times repo below
###
### https://github.com/nytimes/covid-19-data
###
spreadsheet <-
"../Datasets/nytimes/covid-19-data/us-states.csv" %>%
read_csv(col_names = TRUE, col_types = "Dccdd")
### How big should our SMA window be?
SMA_n <- 7
### Pull state population data from the Census
pop2018 <-
get_acs(geography = "state",
variables = c("B01003_001"),
year = 2018,
geometry = FALSE,
cache_table = TRUE) %>%
rename(POP2018 = estimate) %>%
select(NAME, POP2018) %>%
left_join(fips_codes %>%
select(state_name, state) %>%
unique() %>%
as_tibble(),
by = c("NAME" = "state_name")) %>%
rename(state_abbr = state,
state = NAME) %>%
select(state, state_abbr, POP2018)
### Take the data and convert it to a 7-day SMA of new cases per capita
data <-
spreadsheet %>%
select(date, state, cases) %>%
arrange(date) %>%
pivot_wider(id_cols = "date", names_from = "state", values_from = "cases") %>%
mutate(across(!starts_with("date"),
.fns = list(new = ~ c(.[1], diff(.))),
.names = "{fn}_{col}"
)) %>%
select(date, contains("new_")) %>%
rename_at(vars(contains("new_")),
~ str_replace(., "new_", "")) %>%
mutate(across(!starts_with("date"),
.fns = list(sma = ~ SMA(., n = SMA_n)),
.names = "{fn}_{col}"
)) %>%
select(date, contains("sma_")) %>%
rename_at(vars(contains("sma_")),
~ str_replace(., "sma_", "")) %>%
pivot_longer(-date, names_to = "state", values_to = "new_cases") %>%
left_join(pop2018, by = c("state" = "state")) %>%
mutate(new_cases_percapita = 100000 * new_cases / POP2018) %>%
select(date, state, state_abbr, new_cases_percapita)
### Add dot to the end of the line for reference
dots <-
data %>%
select(date, state, new_cases_percapita) %>%
pivot_wider(id_cols = "date",
names_from = "state",
values_from = "new_cases_percapita") %>%
tail(n = 1) %>%
pivot_longer(-date, names_to = "state", values_to = "new_cases_percapita") %>%
rename(dots = new_cases_percapita)
data <- data %>% left_join(dots, by = c("date" = "date", "state" = "state"))
### Compute the growth rate over the last 2 weeks
growth <-
data %>%
select(date, state, new_cases_percapita) %>%
filter(date >= as.Date(Sys.Date()) - 14) %>%
filter(!state %in% c("Virgin Islands",
"Guam",
"Northern Mariana Islands")) %>%
group_by(state) %>%
do(tidy(lm(new_cases_percapita ~ date, .))) %>%
filter(term == "date") %>%
select(state, estimate) %>%
rename(growth = estimate) %>%
arrange(growth)
#data <- data %>% left_join(dots, by = c("date" = "date", "state" = "state"))
this_title <-
paste("New Cases per 100k (", SMA_n, "-day moving average)", sep = "")
g_facet <-
ggplot(data = data) +
theme_bw() +
geom_line(aes(x = as.Date(date), y = new_cases_percapita)) +
geom_point(aes(x = as.Date(date), y = dots), color = "blue") +
facet_geo(~ state) +
labs(title = this_title,
caption = "", x = "", y = "")
print(g_facet)
|
be5b0f3cc13155f1fa12c82b5cf8129486aeeb23
|
05087895b6d9efba31e9b68a361fe72f3e046492
|
/portfolio/analysis/3.3-plot-within-strategy-effects.R
|
0831d0d4abb52f9e997c8f7f3a641997efe481cd
|
[] |
no_license
|
NCEAS/pfx-commercial
|
a37a6644ee60af90bfc72f45987fbe665d0ca347
|
910304637a7be3c3b151972a8cb65a2423914bed
|
refs/heads/master
| 2020-04-04T07:18:28.148662
| 2017-09-02T14:46:48
| 2017-09-02T14:46:48
| 45,410,050
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,751
|
r
|
3.3-plot-within-strategy-effects.R
|
library(dplyr)
library(ggplot2)
library(viridis)
library(ggrepel)
load("portfolio/data-generated/diff-dat-stan.rda")
load("portfolio/data-generated/m.rda")
devtools::load_all("pfxr")
b <- broom::tidyMCMC(m, conf.int = T, estimate.method = "median",
conf.level = 0.5, conf.method = "quantile")
g1 <- b[grepl("coef_g1", b$term), ]
b1 <- b[grepl("coef_b1", b$term), ]
g2 <- b[grepl("coef_g2", b$term), ]
b2 <- b[grepl("coef_b1", b$term), ]
res <- data.frame(strategy_id = 1:nrow(g1)) %>%
inner_join(md) %>%
mutate(strategy = as.character(strategy))
res$inc <- g2$estimate
res$dec <- g1$estimate
res$inc_rev <- b2$estimate
res$dec_rev <- b1$estimate
res$inc.l <- g2$conf.low
res$dec.l <- g1$conf.low
res$inc_rev.l <- b2$conf.low
res$dec_rev.l <- b1$conf.low
res$inc.u <- g2$conf.high
res$dec.u <- g1$conf.high
res$inc_rev.u <- b2$conf.high
res$dec_rev.u <- b1$conf.high
ns <- dat %>% group_by(strategy) %>%
summarise(nn = length(unique(p_holder)))
res <- inner_join(res, ns)
p1 <- res %>%
mutate(strategy_label = ifelse(nn > 250, str_label, NA)) %>%
mutate(inc = inc) %>%
ggplot(aes(x = inc_rev, y = inc)) +
geom_hline(yintercept = 0, lty = 2, col = "grey65") +
geom_vline(xintercept = 0, lty = 2, col = "grey65") +
geom_segment(aes(x = inc_rev.l, xend = inc_rev.u, y = inc, yend = inc),
alpha = 0.1, lwd = 0.4) +
geom_segment(aes(x = inc_rev, xend = inc_rev, y = inc.l, yend = inc.u),
alpha = 0.1, lwd = 0.4) +
geom_text_repel(aes(label = strategy_label, x = inc_rev, y = inc),
size = 2.8, colour = "grey60", #segment.color = "grey80",
point.padding = unit(0.3, "lines"), max.iter = 6e3, segment.size = 0.3) +
geom_point(aes(color = strategy_mean_div, size = nn)) +
xlab("Effect of generalizing on revenue") +
ylab("Effect of generalizing on revenue variability") +
scale_color_viridis() +
theme_gg() +
guides(
colour = guide_legend(override.aes = list(size=3.5), order = 1),
size = guide_legend(order = 2, override.aes = list(pch = 21))) +
annotate("text", x = min(res$inc_rev.l), y = max(res$inc.u), label = "A",
fontface = "bold", size = 5) +
labs(colour = "Mean sp.\ndiversity", size = "Number of\npermit holders") +
theme(legend.title = element_text(size = rel(0.85)))
ann_col <- "grey40"
p2 <- res %>%
mutate(dec = -dec, dec_rev = -dec_rev) %>%
mutate(dec.l = -dec.l, dec_rev.l = -dec_rev.l) %>%
mutate(dec.u = -dec.u, dec_rev.u = -dec_rev.u) %>%
mutate(strategy_label = ifelse(nn > 250, str_label, NA)) %>%
ggplot(aes(x = dec_rev, y = dec)) +
geom_hline(yintercept = 0, lty = 2, col = "grey65") +
geom_vline(xintercept = 0, lty = 2, col = "grey65") +
geom_segment(aes(x = dec_rev.l, xend = dec_rev.u, y = dec, yend = dec),
alpha = 0.1, lwd = 0.4) +
geom_segment(aes(x = dec_rev, xend = dec_rev, y = dec.l, yend = dec.u),
alpha = 0.1, lwd = 0.4) +
geom_text_repel(aes(label = strategy_label, x = dec_rev, y = dec),
size = 2.9, colour = "grey60", #segment.color = "grey80",
point.padding = unit(0.3, "lines"), max.iter = 6e3, segment.size = 0.3) +
geom_point(aes(color = strategy_mean_div, size = nn)) +
xlab("Effect of specializing on revenue") +
ylab("Effect of specializing on revenue variability") +
scale_color_viridis() +
labs(colour = "Mean sp.\ndiversity", size = "Number of\npermit holders") +
theme(legend.title = element_text(size = rel(0.85))) +
theme_gg()
p3 <- p2 + annotate("text", x = min(-1*res$dec_rev.u), y = max(-1*res$dec.l), label = "B",
fontface = "bold", size = 5)
gap <- 0.15
left <- -0.7
right <- 1.4
p4 <- p2 +
annotate("text", x = right, y = 2, label = "' '%up%variability", hjust = 0, col = ann_col, parse = TRUE) +
annotate("text", x = right, y = 2-gap, label = "' '%up%revenue", hjust = 0, col = ann_col, parse = TRUE) +
annotate("text", x = right, y = -0.4, label = "' '%down%variability", hjust = 0, col = ann_col, parse = TRUE) +
annotate("text", x = right, y = -0.4-gap, label = "' '%up%revenue", hjust = 0, col = ann_col, parse = TRUE) +
annotate("text", x = left, y = -0.4, label = "' '%down%variability", hjust = 0, col = ann_col, parse = TRUE) +
annotate("text", x = left, y = -0.4-gap, label = "' '%down%revenue", hjust = 0, col = ann_col, parse = TRUE) +
annotate("text", x = left, y = 2, label = "' '%up%variability", hjust = 0, col = ann_col, parse = TRUE) +
annotate("text", x = left, y = 2-gap, label = "' '%down%revenue", hjust = 0, col = ann_col, parse = TRUE)
# p4
pdf("portfolio/figs/stan-offset-break-anti-spaghetti.pdf", width = 10, height = 4)
grid_arrange_shared_legend(p1, p3, ncol = 2, nrow = 1, position = "right")
dev.off()
ggsave("portfolio/figs/stan-offset-break-anti-spaghetti-specializing.pdf", plot = p4,
width = 6, height = 4.75)
|
fb011adef8c89b1e804648d18c9648b3e294f5f5
|
1c9991dd85472c1265e1ba0c1c4591337887a908
|
/ProbTable.R
|
0b7e5eb5c97e19ae588da628a2fd87cfedd3952c
|
[] |
no_license
|
Jonplussed/PCC-MTH243
|
26ad5dac24931158f95a8ef7d99425112211c780
|
ebd387ec5c1e2650b263208a58ed7b3934b61c5b
|
refs/heads/master
| 2023-05-29T10:38:42.851898
| 2021-06-08T13:43:23
| 2021-06-08T13:51:26
| 357,639,088
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 744
|
r
|
ProbTable.R
|
# A table of possible values and their probabilities, from which we can derive
# the mean (expected value), variance, and standard deviation.
ProbTable <- setRefClass("ProbTable",
fields = list(
frame = "data.frame",
mean = "numeric",
var = "numeric",
stdev = "numeric"
),
methods = list(
initialize = function(x = c(), px = c()) {
frame <<- data.frame(x = x, px = px)
},
solve = function() {
frame <<- within(frame, x.px <- x * px)
mean <<- sum(frame$x.px)
frame <<- within(frame, dev <- x - mean)
frame <<- within(frame, dev2 <- dev^2)
frame <<- within(frame, dev2.px <- dev2 * px)
var <<- sum(frame$dev2.px)
stdev <<- sqrt(var)
.self
}
)
)
|
d59a537f63ba900d3627b78a072c12d855b1c40e
|
325f4cc05aca5072febcfea5d67934e30526beb2
|
/teaching/Applied Statistics and Data Analysis/section-10-11-11am.R
|
b4262b72d5d2a024c0c8a5215c5f11ab1c253984
|
[] |
no_license
|
CongM/CongM.github.io
|
5e293bd7a71c21708acf2f09e138e593b1424e74
|
f6aa745fa3bcc35ae2fe388cd75fd536faa8a674
|
refs/heads/master
| 2023-07-25T14:58:40.359155
| 2023-07-09T06:48:23
| 2023-07-09T06:48:23
| 153,951,666
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,923
|
r
|
section-10-11-11am.R
|
###################################################
#### Examples in Faraway Chapter 6
####
#### Cong Mu
#### 10/11/2019
###################################################
library(faraway)
library(ggplot2)
library(lmtest)
#### Example in Chapter 6
## Constant variance
data(savings)
head(savings)
lmod <- lm(sr ~ pop15 + pop75 + dpi + ddpi, savings)
plot(fitted(lmod), residuals(lmod), xlab = "Fitted", ylab = "Residuals")
abline(h = 0, col = 'red')
## Non-constant variance
data(gala)
head(gala)
lmod <- lm(Species ~ Area + Elevation + Scruz + Nearest + Adjacent, gala)
plot(lmod$fitted.values, lmod$residuals, xlab = "Fitted", ylab = "Residuals")
abline(h = 0, col = 'red')
## Normality
lmod <- lm(sr ~ pop15 + pop75 + dpi + ddpi, savings)
# Q-Q plot
qqnorm(residuals(lmod), ylab = "Residuals" , main = "")
qqline(residuals(lmod), col = 'red')
hist(residuals(lmod), xlab = "Residuals", main = "")
# Some examples
par(mfrow=c(1,3))
n <- 50
for(i in 1:3) {x <- rnorm(n); qqnorm(x); qqline(x,col='red')}
for(i in 1:3) {x <- exp(rnorm(n)); qqnorm(x); qqline(x,col='red')}
for(i in 1:3) {x <- rcauchy(n); qqnorm(x); qqline(x,col='red')}
for(i in 1:3) {x <- runif(n); qqnorm(x); qqline(x,col='red')}
par(mfrow=c(1,1))
# Shapiro-Wilk test
shapiro.test(residuals(lmod))
## Correlated errors
data(globwarm)
head(globwarm)
lmod <- lm(nhtemp ~ wusa + jasper + westgreen + chesapeake + tornetrask + urals + mongolia + tasman, globwarm)
plot(residuals(lmod) ~ year, na.omit(globwarm), ylab = "Residuals")
abline(h = 0, col = 'red')
# Successive pairs of residuals
n <- length(residuals(lmod))
plot(tail(residuals(lmod),n-1) ~ head(residuals(lmod),n-1), xlab = expression(hat(epsilon)[i]), ylab = expression(hat(epsilon)[i+1]))
abline(h = 0, v = 0, col = grey(0.75))
# Durbin–Watson test
dwtest(nhtemp ~ wusa + jasper + westgreen + chesapeake + tornetrask + urals + mongolia + tasman, data = globwarm)
|
b40e845cc3c753860cac6649a6442d2ae5df03a2
|
1563a87464e27bba0c67cd3eb5ee3e7fa93b3254
|
/R/client.R
|
572bac679184d0f8e3de6f99259c5726482afeff
|
[] |
no_license
|
XiangdongGu/flaskrpy
|
22a432c6f3f1e70b5e0a426703e5c447e4698f7d
|
591f043ef54e9b75cbba1568ae85ef82fc2ae237
|
refs/heads/master
| 2021-01-19T08:36:30.031496
| 2017-04-09T15:57:40
| 2017-04-09T15:57:40
| 87,653,022
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,644
|
r
|
client.R
|
#' Make an API call
#'
#' @param model model name
#' @param func function name
#' @param req request data, typically a list or data
#' @param host the host of the API server
#' @export
#' @examples
#' \dontrun{
#' api_run("iris", "pred", iris)
#' }
#'
api_call <- function(model, func, req, host = "http://127.0.0.1:5000") {
require(httr)
host <- file.path(host, "r", model, func)
res <- POST(host, body = jsonlite::toJSON(req))
fromJSON(content(res, "text"))
}
#' Deploy a model to API
#' @param ... list of objects
#' @param host API server
#' @param .model_name the name of the model
#' @export
#'
api_deploy <- function(..., model_name, host = "http://127.0.0.1:5000") {
require(httr)
# Make a list based on ...
dots <- substitute(list(...))
dotsname <- as.character(dots)[-1L]
dots <- eval(dots)
names(dots) <- dotsname
# Write to a temperary RDS file
rdsdir <- tempfile()
dir.create(rdsdir)
on.exit(unlink(rdsdir, recursive = TRUE))
fname <- sprintf("%s.rds", model_name)
fpath <- file.path(rdsdir, fname)
saveRDS(dots, file = fpath)
# Deploy to API server
POST(file.path(host, "deploy"),
body = list(name = fname,
rdsfile = upload_file(fpath)))
cat("SUCCESS!")
}
#' Make a function to available for API call
#'
#' By default, a function should not be exposed to API for security concern.
#' We should only expose the functions we know that has no side effect and
#' necessary for user to use.
#'
#' @param f the function to expose
#' @export
#'
api_expose <- function(f) {
if (!is.function(f)) stop("Object is not a function.")
attr(f, "api_expose") <- TRUE
f
}
|
3745c1c60e6a8657416085c69281b723799fd693
|
ceb3918a00d69ea84b6a0057cf84da1ccb736c7c
|
/man/auto_bio.Rd
|
6975a5ea647eed37dff507380536b635bbceb1ed
|
[] |
no_license
|
zsmith27/CHESSIE
|
3006d6f7b4b49f1bf846837d597fd31c5d87996b
|
785192be00e1b4713fa00238b93996f8d365f9f2
|
refs/heads/master
| 2020-05-25T22:14:07.599940
| 2018-08-20T16:09:42
| 2018-08-20T16:09:42
| 84,974,122
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 497
|
rd
|
auto_bio.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auto.R
\name{auto_bio}
\alias{auto_bio}
\title{Automate Spatial Classification}
\usage{
auto_bio(my.data, index_res, bioregion = NULL)
}
\arguments{
\item{my.data}{=}
\item{index_res}{= Spatial Resolution (i.e., "BASIN", "COAST", "INLAND", or
"BIOREGION")}
\item{bioregion}{= If index_res is set to "BIOREGION," then a list of
bioregions to keep must be specified.}
}
\description{
Automate Spatial Classification
}
|
c91fe62d3875cd2598ad6b1cbd27131a5476953e
|
e19c30ce934823c012664af5ed6a07beaa3f9e4a
|
/pspecter_container/Server/Export/ExportVISPTM.R
|
8adda6e0706cca863cc6388e9e4dec9f03fc8d85
|
[] |
no_license
|
rjea/PSpecteR
|
2d543ce80908b4cd58005a301eba970f80e0c88e
|
d7842034b0f691f8e287a60e246c3497c56ae260
|
refs/heads/master
| 2023-04-17T23:38:58.727261
| 2021-04-21T17:07:30
| 2021-04-21T17:07:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,487
|
r
|
ExportVISPTM.R
|
## David Degnan, Pacific Northwest National Labs
## Last Updated: 2020_09_24
# DESCRIPTION: This contains the modal for exporting the FRAG csv.
list(
# Get all figures
getAllVisPTMFigs <- function(VisPTM) {
# Initiate List to hold all Vis PTM Figures
AllVisPTMFigs <- list()
# Start progress bar for each generated spectra
withProgress({
revals$PTMmarkdown <- T
incProgress(amount = 0.5, "Generating Spectra")
for (row in 1:nrow(VisPTM)) {
# Add one to the PTMread for getModFrag
revals$PTMread <- revals$PTMread + 1
incProgress(0, paste("Exporting Markdown: ", round(row/nrow(VisPTM), 4) * 100, "%", sep = ""))
# Get the peak data and scan number
peak <- getSSPeak()
scan <- getScan()
scanNum <- scan[getScanClick(), "Scan.Num"]
# Now get the fragment data and the original sequence DF
frag <- getModFrag()
oriSDF <- getModOriSDF()
# Get modification name
modName <- VisPTM[as.character(row), "Name"]
##############################################
## Step 1: Plot spectra without annotations ##
##############################################
# Add 0's to peak data and sort
len <- nrow(peak)
spectra <- data.frame("mzExp" = c(peak$mz - 1e-9, peak$mz, peak$mz + 1e-9),
"intensityExp" = c(rep(0, len), peak$intensity, rep(0, len)))
spectra <- spectra[order(spectra$mzExp),]
# Add missing data
spectra$ion <- spectra$type <- spectra$z <- spectra$isoPeak <- NA
# Plot spectra
p <- plot_ly(x = spectra$mzExp, y = spectra$intensityExp, type = "scatter",
mode = "lines+markers", line = list(color = "black"),
name = "Spec", marker = list(opacity = 0, color = "black"), hoverinfo = "text",
hovertext = paste(paste("MZ:", round(spectra$mzExp, 3),
"<br>Int:", round(spectra$intensityExp, 0))))
##############################
## Step 2: Plot annotations ##
##############################
# This step only occurs if they are fragments
if (is.null(frag) == F) {
# Set colors list
colors <- c("a" = "forestgreen", "b" = "steelblue", "c" = "darkviolet",
"x" = "rgb(172, 122, 122)", "y" = "red", "z" = "darkorange")
# Subset out fragment data
frag <- frag[,c("mzExp", "intensityExp", "ion", "type", "z", "isoPeak")]
for (type in c("a", "b", "c", "x", "y", "z")) {
fragSub <- frag[frag$type == type,]
len <- nrow(fragSub)
spectraAnno <- data.frame("mzExp" = c(fragSub$mzExp - 1e-9, fragSub$mzExp, fragSub$mzExp + 1e-9),
"intensityExp" = c(rep(0, len), fragSub$intensityExp, rep(0, len)),
"ion" = rep(fragSub$ion, 3), "z" = rep(fragSub$z, 3),
"isoPeak" = rep(fragSub$isoPeak, 3))
spectraAnno <- spectraAnno[order(spectraAnno$mzExp),]
p <- add_trace(p, x = spectraAnno$mzExp, y = spectraAnno$intensity, type = "scatter",
mode = "lines+markers", line = list(color = colors[type]),
name = type, marker = list(opacity = 0),
hoverinfo = "text", hovertext = paste(paste("Ion: ", spectraAnno$ion,
"<sup>", spectraAnno$z, "</sup> ", spectraAnno$isoPeak, sep = ""), "<br>MZ:",
round(spectraAnno$mzExp, 3), "<br>Int:", round(spectraAnno$intensity, 0)))
# Add labels if enabled
if (is.null(input$ssLetter) == F && input$ssLetter == T & is.null(getFrag()) == F
&& nrow(getFrag()) > 0 & getScan()[getScanClick(), "MS.Level"] != 1 &
nrow(spectraAnno) > 0) {
# Remove 0 from labeling
toLabel <- spectraAnno[spectraAnno$intensityExp > 0,]
# Add spacing
dist <- input$ssLabDist
if (dist == "Very Close") {dist <- 2e5} else if (dist == "Close") {dist <- 2e4} else
if (dist == "Near") {dist <- 2e3} else if (dist == "Far") {dist <- 2e2} else {dist <- 2e1}
adjValX <- max(spectra$mzExp, na.rm = T) / dist
adjValY <- max(spectra$intensityExp, na.rm = T) / dist
# Get and plot labels
for (i in 1:nrow(toLabel)) {
text <- list(
x = toLabel$mzExp[i] + adjValX, y = toLabel$intensityExp[i] + adjValY,
text = HTML(paste('<span style="color: ', colors[type], '; font-size: ',
input$ssAnnoSize, 'pt;"> ', toLabel$ion[i], "<sup>",
toLabel$z[i], "</sup>, ", toLabel$isoPeak[i], "</span>", sep = "")),
xref = "x", yref = "y", showarrow = FALSE)
p <- p %>% layout(annotations = text)
}}
}}
# Add plot name
plotName <- paste("Name: ", VisPTM[as.character(row), "Name"], ", Average.PPM.Error: ",
round(VisPTM[as.character(row), "Average.PPM.Error"], 4), ", Number.of.Ions: ",
VisPTM[as.character(row), "Number.of.Ions"], ", Coverage: ",
VisPTM[as.character(row), "Coverage"], sep = "")
p <- p %>% layout(p, xaxis = list(title = "M/Z (Mass to Charge)"),
yaxis = list(title = "Intensity"),
title = list(text = plotName, font = list(size = 10)))
AllVisPTMFigs[[plotName]] <- p
}
incProgress(amount = 0.5, "Done!")
})
revals$PTMmarkdown <- F
revals$PTMread <- 0
return(AllVisPTMFigs)
},
output$VPmarkdown <- downloadHandler(
filename = function() {
basename <- "PSpecteR"
if (is.null(msPath()) == F) {
basename <- unlist(strsplit(unlist(strsplit(msPath(), "/"))[-1], ".", fixed = T))[1]
}
paste(basename, "_VisPTM_", format(Sys.time(), "%y%m%d_%H%M%S"), ".html", sep = "")
},
content = function(file) {
withProgress({
incProgress(0.1, "Starting HTML Export")
# Get R Markdown file
RmdFile <- file.path("Server", "Export", "VisPTM_Export.Rmd")
incProgress(0.3, "Collecing Parameter Data")
# Declare ion group information
ionGroups <- input$ionGroups
if (is.null(ionGroups)) {ionGroups <- c("a", "b", "c", "x", "y", "z", "Spec")}
# Put all the parameters to be passed to the markdown file into a list
params <- list(scanNum = getScan()[getScanClick(), "Scan.Num"],
seq = getNewVSeq(),
ions = ionGroups,
isoPer = input$ssIsoPerMin,
fragTol = input$ssTolerance,
intMin = input$ssIntenMin,
corrScore = input$ssCorrScoreFilter,
VPMetFigs = getAllVisPTMFigs(getVPMetrics()[order(-getVPMetrics()$Number.of.Ions),]))
# Insert directory information for pandoc
if (Sys.info()["sysname"] == "Windows") {
Sys.setenv(RSTUDIO_PANDOC=file.path("C:", "Program Files", "Rstudio", "bin", "pandoc"))} else
if (Sys.info()["sysname"] == "Darwin") {
Sys.setenv(RSTUDIO_PANDOC=file.path("", "Applications", "RStudio.app", "Contents", "MacOS", "pandoc"))
}
incProgress(0.3, "Writing HTML file")
# Print Markdown
rmarkdown::render(RmdFile, output_file = file,
params = params,
envir = new.env(parent = globalenv()))
incProgress(0.2, "Finished HTML file")
})
})
)
# Export markdown
#observeEvent(input$VPmarkdown, {
#
# if (is.null(getScan())) {
# sendSweetAlert(session, "No MS Data Uploaded", "Upload MS Data", type = "error")
# return(NULL)
# }
#
# if (is.null(getID())) {
# sendSweetAlert(session, "No ID Data Upload", "Upload ID Data", type = "error")
# return(NULL)
# }
# Get VP Metrics
# if (is.null(getVPMetrics())) {
# sendSweetAlert(session, "The Vis PTM Table is Empty.",
# "Let David know if you ever see this error.", type = "error")
# return(NULL)
# }
# Get R Markdown file
# RmdFile <- file.path("Server", "Export", "VisPTM.Rmd")
# Determine if figures folder exists and if not make it
# out <- file.path(outputPath(), "Figures")
# if (dir.exists(out) == F) {dir.create(out)}
# Name the markdown file and copy to the proper folder
# plots$MDindex <- plots$MDindex + 1
# outRmd <- file.path(out, paste(plots$MDindex, "_VisPTM.Rmd", sep = ""))
# file.copy(RmdFile, outRmd, overwrite = T)
# Set the name of the outputted HTML file
# outHTML <- file.path(out, paste(plots$MDindex, "_VisPTM.html", sep = ""))
# PDeclare ion group information
# ionGroups <- input$ionGroups
# if (is.null(ionGroups)) {ionGroups <- c("a", "b", "c", "x", "y", "z", "Spec")}
# Put all the parameters to be passed to the markdown file into a list
# params <- list(scanNum = getScan()[getScanClick(), "Scan.Num"],
# seq = getNewVSeq(),
# ions = ionGroups,
# isoPer = input$ssIsoPerMin,
# fragTol = input$ssTolerance,
# intMin = input$ssIntenMin,
# corrScore = input$ssCorrScoreFilter,
# VPMetFigs = getAllVisPTMFigs(getVPMetrics()[order(-getVPMetrics()$Number.of.Ions),]))
# Insert directory information for pandoc
# if (Sys.info()["sysname"] == "Windows") {
# Sys.setenv(RSTUDIO_PANDOC=file.path("C:", "Program Files", "Rstudio", "bin", "pandoc"))} else {
# Sys.setenv(RSTUDIO_PANDOC=file.path("", "Applications", "RStudio.app", "Contents", "MacOS", "pandoc"))}
# Print Markdown
# rmarkdown::render(outRmd, output_file = outHTML,
# params = params,
# envir = new.env(parent = globalenv()))
# Write to the PSpecteR Session Info
# write(paste(" Vis PTM Markdown exported at ", format(Sys.time(), "%R:%S"), sep = ""),
# file = file.path(outputPath(), "PSpecteR_Session_Info.txt"), append = T)
# Open an HTML in R
# system2("open", outHTML)
#})
|
0a949e7e60a121e2a672cde8605c6c9cb0e39ea6
|
5cbcbd04710d05301b3dab4b5a9bb1e3f22d9512
|
/man/sim.plot.Rd
|
ff3e6eb7497ad4412c6178c1433e9055278fa593
|
[] |
no_license
|
cran/iAdapt
|
da44d081922a287678c846174f4ee40055bab2f0
|
48db1d3f478589faf75146e9e39605f5be3f5b77
|
refs/heads/master
| 2021-08-11T19:41:55.152661
| 2021-08-06T03:50:09
| 2021-08-06T03:50:09
| 236,613,273
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,594
|
rd
|
sim.plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim.plot.R
\name{sim.plot}
\alias{sim.plot}
\title{Generate plots for estimated percent allocation and response per dose.}
\usage{
sim.plot(sims)
}
\arguments{
\item{sims}{output from sim.trials}
}
\value{
Error plots of estimated (1) percent allocation per dose, and
(2) estimated response per dose.
}
\description{
Generate plots for estimated percent allocation and response per dose.
}
\examples{
# Number of pre-specified dose levels
dose <- 5
# Vector of true toxicities associated with each dose
dose.tox <- c(0.05, 0.10, 0.20, 0.35, 0.45)
# Acceptable (p_yes) and unacceptable (p_no) DLT rates used for establishing safety
p_no <- 0.40
p_yes <- 0.15
# Likelihood-ratio (LR) threshold
K <- 2
# Cohort size used in stage 1
coh.size <- 3
# Vector of true mean efficacies per dose (here mean T-cell persistence per dose (\%))
m <- c(5, 15, 40, 65, 80) # MUST BE THE SAME LENGTH AS dose.tox
# Efficacy (equal) variance per dose
v <- rep(0.01, 5)
# Total sample size (stages 1&2)
N <- 25
# Stopping rule
stop.rule <- 9
numsims = 100
set.seed(1)
simulations = sim.trials(numsims = numsims, dose, dose.tox, p1 = p_no, p2 = p_yes,
K, coh.size, m, v, N, stop.rule = stop.rule, cohort = 1,
samedose = TRUE, nbb = 100)
# sim.plot(simulations)
}
|
dd195637757988b7cf71ea60d8d46b858ab06097
|
222b9a96e0767baa47e525ae680612b6550a3e21
|
/data/repo.R
|
604d60d18aa8b2a3b8ec42ea07a8341f4b3e6377
|
[] |
no_license
|
nwant/cs4500
|
494cb5c86463e15aaa5d0a8a50567b5f3988e900
|
567c99cbadb9b6fcb562b438bc1516f9dc5f87aa
|
refs/heads/master
| 2021-03-27T16:59:49.745573
| 2019-01-16T21:47:41
| 2019-01-16T21:47:41
| 70,825,084
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,531
|
r
|
repo.R
|
#------------------------------
# repo.R
# This file returns dataframes related to the CSV's that are imported into the functions
##############################################################################################
library("plyr")
library("stringr")
library("zoo")
#============================
# get.arisa
# ---------
# get a dataframe containing data from the ARISA dataset
#
# Inputs:
# config...a configuration object
#
# Returns:
# a dataframe containing data from the ARISA dataset
# Rows correspond to a single date from which samples were taken.
# Columns include a formated date (date), site location (source),
# and the each species of ARISA with its corresponding relative abundance
# determined for each corresponding date/site.
get.arisa <- function(config) {
# Creates a dataframe and stores all data from the arisa file into it
arisa <- read.csv(config$arisa.fp, header = TRUE, strip.white = TRUE)
# Adds a column for classifying each testing site
arisa$source <- substr(arisa$X, 1, 3)
arisa$source <- gsub("TT", "T", arisa$source)
# Adds a column for each sample test date
arisa$date <- as.Date(substr(arisa$X, 5, 100), "%m_%d_%y")
return(arisa)
}
#============================
# get.ciliates.1
# ---------
# get a dataframe containing data from the Ciliates (tab 1) dataset
#
# Inputs:
# config...a configuration object
#
# Returns:
# a dataframe containing data from the Ciliates (tab 1) dataset.
# Rows correspond to a single date from which samples were taken.
# Columns include a formated date (date), site location (source),
# total cells per litter obtained (TotalCellsPerLiter), and columns for each Ciliate species,
# of which contain the cells per litter for each species taken at a specific date
get.ciliates.1 <- function(config) {
# Creates a dataframe and stores all data from the first tab of the ciliates file
ciliates.1 <- read.csv(config$ciliates.1.fp, header=TRUE, strip.white =TRUE)
# Remove all empty/invalid rows
ciliates.1 <- ciliates.1[!apply(is.na(ciliates.1) | ciliates.1 == "", 1, all), ]
# Convert date strings into R dates.
# transform dates to mm-dd-yy format
ciliates.1$Date <- gsub("/", "-", ciliates.1$Date) # date, month and year all separated by hyphens
# make sure all years only have two digits (not concerned about y2k here)
ciliates.1$Date <- paste(str_match(ciliates.1$Date, "\\d+-\\d+-"), str_match(str_extract(ciliates.1$Date, "(/|-)2?0?\\d\\d$"), "\\d\\d$"), sep="")
# pad all single digits with trailing zero
ciliates.1$Date <- as.Date(ciliates.1$Date, "%m-%d-%y")
# Rename last column, which has odd characters that R can't handle
colnames(ciliates.1)[length(ciliates.1)] <- "TotalCellsPerLiter"
return(ciliates.1)
}
#============================
# get.all
# ---------
# get a combined dataframe containing data from both ARISA the Ciliates (tab 1) datasets
#
# Inputs:
# config...a configuration object
#
# Returns:
# a dataframe containing data from the Ciliates (tab 1) dataset.
# Rows correspond to a single date from which samples were taken.
# Columns include a formated date (date), site location (source),
# and columns for each Ciliate species, of which contain the cells
# per litter for each species taken at a specific date, and the each
# species of ARISA with its corresponding relative abundance determined
# for each corresponding date/site.
get.all <- function(config) {
rename.column <- function(df, old.name, new.name) {
cloned.df <- data.frame(df)
idx <- grep(old.name, colnames(df))
colnames(cloned.df)[idx] <- new.name
return(cloned.df)
}
# Get each data set and label their species classification
arisa <- get.arisa(config)
arisa$X <- NULL
ciliates.1 <- get.ciliates.1(config)
# Stardardize each raw dataframe by removing unnecessary data columns and making
ciliates.1 <- rename.column(ciliates.1, "Date", "date")
ciliates.1 <- rename.column(ciliates.1, "site", "source")
ciliates.1$TotalCellsPerLiter <- NULL
ciliates.1$month <- NULL
ciliates.1$year <- NULL
ciliates.1$X <- NULL
# join using date and source
merged <- merge(ciliates.1, arisa, by=c("date", "source"), all = T)
# replace all NA values to 0 (some of the dates don't match up soe)
merged[is.na(merged)] <- 0
return(merged)
}
|
077ea622a50318c43ddf91fd8f68d6be827f8041
|
23d0908daec85c6efbd7f02a458fbe9e98c3e10a
|
/run_analysis.R
|
ba7d1ec60b56a969e9c7b68039171afa9c69491f
|
[] |
no_license
|
janmunich/Getting-and-Cleaning-Data-Course-Project
|
9f959dfee466e7ba80971928ead0a101bb913d31
|
f2a27561e321f825c7819fb499ebce5fef5d242d
|
refs/heads/master
| 2021-01-13T06:04:24.549167
| 2017-06-22T08:54:16
| 2017-06-22T08:54:16
| 95,093,667
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,834
|
r
|
run_analysis.R
|
library(tidyverse)
# create x,y and subject data sets
X_test <- read.table( "./test/X_test.txt", sep = "", quote = "\"'", skip = 0, stringsAsFactors = FALSE)
y_test <- read.table( "./test/y_test.txt", sep = "", quote = "\"'", skip = 0, stringsAsFactors = FALSE)
subject_test <- read.table( "./test/subject_test.txt", sep = "", quote = "\"'", skip = 0, stringsAsFactors = FALSE)
X_train <- read.table( "./train/X_train.txt", sep = "", quote = "\"'", skip = 0, stringsAsFactors = FALSE)
y_train <- read.table( "./train/y_train.txt", sep = "", quote = "\"'", skip = 0, stringsAsFactors = FALSE)
subject_train <- read.table( "./train/subject_train.txt", sep = "", quote = "\"'", skip = 0, stringsAsFactors = FALSE)
y_data <- rbind(y_train, y_test)
x_data <- rbind(X_train, X_test)
subject_data <- rbind(subject_train, subject_test)
# get only columns with mean or std
features <- read.table( "./features.txt", sep = "", quote = "\"'", skip = 0, stringsAsFactors = FALSE)
mean_std_features <- grep("-(mean|std)\\(\\)", features[, 2])
# subset the mean- and std-columns and create column names
x_data <- x_data[, mean_std_features]
names(x_data) <- features[mean_std_features, 2]
names(subject_data) <- "subject"
# update activity names
activities <- read.table( "./activity_labels.txt", sep = "", quote = "\"'", skip = 0, stringsAsFactors = FALSE)
y_data[, 1] <- activities[y_data[, 1], 2]
names(y_data) <- "activity"
# create a data set with all datas
total <- cbind(x_data, y_data, subject_data)
# Create a second, independent tidy data set with the average of each variable
# for each activity and each subject
averages_data <- total %>% group_by(subject, activity) %>% summarise_each(funs(mean))
write.table(averages_data, "averages_data.txt", row.name = FALSE)
|
e4c083b4e61408f38863de0dbc3899f481ee45d6
|
953226613115cf91fdda04d6f8d873a9960b7461
|
/data_project.R
|
716c5210b038806c42efa04a987e1bc854a7c1ab
|
[] |
no_license
|
jw2020c/DataAnalyticsSpring2020
|
e5b38f660744ace787844d366afdc3dee0fb3613
|
a3e9683859555fb2736f1380ebe0e0ab98cb97b6
|
refs/heads/master
| 2020-12-19T19:21:56.077392
| 2020-05-04T23:52:11
| 2020-05-04T23:52:11
| 235,828,229
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,358
|
r
|
data_project.R
|
setwd("C:/Users/23721/Downloads/DAta analytics")
data <- read.csv("Traffic_Crashes_-_Vehicles.csv")
levels(data$MODEL)
apply(data,2,function(x) print(class(x)))
data$CRASH_DATE<-as.POSIXct(data$CRASH_DATE,format="%m/%d/%Y %I:%M:%S %p", tz="UTC")
data <- data[,-c(1,2,3,5,7,8,9)]
#reform the year
library(lubridate)
data$VEHICLE_YEAR <-year(data$CRASH_DATE)-as.numeric(data$VEHICLE_YEAR)
attach(data)
library(dplyr)
barplot(table(MAKE))
barplot(table(VEHICLE_DEFECT))
hist(VEHICLE_YEAR,xlim=c(0,40))
#data cleaning
data <- data[-which(data$FIRST_CONTACT_POINT=="UNKNOWN"),]
data <- data[-which(data$MANEUVER=="UNKNOWN/NA"),]
data <- data[-which(data$MODEL==""),]
data <- data[-which(data$LIC_PLATE_STATE==""),]
data <- data[-which(data$TRAVEL_DIRECTION=="UNKNOWN"),]
data <- data[-which(data$VEHICLE_USE=="UNKNOWN/NA"),]
data <- data[-which(data$VEHICLE_TYPE=="UNKNOWN/NA"),]
data <- data[,c(4:11,32)]
data <- na.omit(data)
attach(data)
table(MAKE)[which(table(MAKE)>1000)]
brand <- names(table(MAKE)[which(table(MAKE)>1000)])[-36]
data <- data[which(data$VEHICLE_YEAR>=0),]
#EDA
barplot(table(UNIT_TYPE),main = "UNIT_TYPE",cex.names=0.8,col="blue")
barplot(table(VEHICLE_TYPE),main = "VEHICLE_TYPE",cex.names=0.5,col="red")
barplot(table(data$time),main = "Time",cex.names=0.8,col="yellow")
barplot(table(data$season),main = "Season",cex.names=0.8,col="lightgreen")
table(VEHICLE_DEFECT)
barplot(table(data$TRAVEL_DIRECTION),main ="Direction")
barplot(table(data$FIRST_CONTACT_POINT),cex.names=0.5,main ="First contact point")
hist(data$VEHICLE_YEAR,xlim = c(0,40))
apply(data[,-c(1,6)],2,table)
#only left the brands have more than 1000 samples
k<- NA
for(i in 1:length(brand)){
go <- filter(data,MAKE==brand[i])
k <- rbind(k,go)
}
k <- k[-1,]
data <- k
#same work for model
model <- names(table(data$MODEL)[which(table(data$MODEL)>1000)])
k<- NA
#only left the brand has more than 1000 samples
for(i in 1:length(model)){
go <- filter(data,MODEL==model[i])
k <- rbind(k,go)
}
k <- k[-1,]
data <- k
#same work for other lines
use <- names(table(data$VEHICLE_USE)[which(table(data$VEHICLE_USE)>1000)])
k<- NA
for(i in 1:length(use)){
go <- filter(data,VEHICLE_USE==use[i])
k <- rbind(k,go)
}
k <- k[-1,]
data <- k
state <- names(table(data$LIC_PLATE_STATE)[which(table(data$LIC_PLATE_STATE)>1000)])
k<- NA
for(i in 1:length(state)){
go <- filter(data,LIC_PLATE_STATE==state[i])
k <- rbind(k,go)
}
k <- k[-1,]
data <- k
k<-NA
type <- names(table(data$VEHICLE_TYPE)[which(table(data$VEHICLE_TYPE)>1000)])
for(i in 1:length(type)){
go <- filter(data,VEHICLE_TYPE==type[i])
k <- rbind(k,go)
}
k <- k[-1,]
data <- k
k<-NA
type <- names(table(data$MANEUVER)[which(table(data$MANEUVER)>1000)])
for(i in 1:length(type)){
go <- filter(data,MANEUVER==type[i])
k <- rbind(k,go)
}
k <- k[-1,]
data <- k
#create new variable time and season
library(lubridate)
h <- hour(data$CRASH_DATE)
h[h>=20|h<=6] <- 1
h[h<20&h>6]<-"day"
h[h=="1"]<-"night"
h <- as.factor(h)
data$time <- h
m <- month(data$CRASH_DATE)
m[6>=m&m>=4]<-100
m[9>=m&m>=7]<-200
m[12>=m&m>=10]<-300
m[3>=m]<-400
m[m==100]<-"spring"
m[m=="200"]<-"summer"
m[m=="300"]<-"fall"
m[m=="400"]<-"winter"
m <- as.factor(m)
data$season <- m
#convert it to binary problem
data$VEHICLE_DEFECT<-as.character(unlist(data$VEHICLE_DEFECT))
data$VEHICLE_DEFECT[!(data$VEHICLE_DEFECT=="NONE"|data$VEHICLE_DEFECT=="UNKNOWN")]<- "YES"
data$VEHICLE_DEFECT<-as.factor((data$VEHICLE_DEFECT))
table(data$VEHICLE_DEFECT)
#save data
write.csv(data,file = "myda.csv",row.names = F)
setwd("C:/Users/23721/Downloads/DAta analytics")
data <- read.csv("myda.csv",header = T)
#plot
barplot(table(data$time))
barplot(table(data$season))
#train set and test set
mytest <- data[which(data$VEHICLE_DEFECT=="UNKNOWN"),]
mytrain <- data[-which(data$VEHICLE_DEFECT=="UNKNOWN"),]
#we test for rpart but the system crashed
library(caret)
library(rpart)
attach(data)
kk <- rpart(VEHICLE_DEFECT~UNIT_TYPE+time+season+
VEHICLE_YEAR+VEHICLE_TYPE+VEHICLE_USE+FIRST_CONTACT_POINT,data = mytrain)
x <- mytrain[,-c(1,3,4,5,7)]
y <- mytrain[,7]
y <- as.character(y)
y <- as.factor(y)
fitControl <- trainControl(method = "cv",number=3)
m <- c(2:ncol(x))
b <- c(1,seq(10,200,by=10))
oob.error.rf <- matrix(NA,ncol=length(m),
nrow=length(b))
rf.treebag <- list(NA)
nn <- 1
for (j in 1:length(m)){
grid <- expand.grid(.mtry=m[j],
.splitrule="info",
.min.node.size=5)
for (i in 1:length(b)){
rf.treebag[[nn]] <- train(x=x,y=y,
method="ranger",
trControl = fitControl,
metric="Accuracy",
tuneGrid=grid,
num.trees=b[i])
oob.error.rf[i,j] <- rf.treebag[[nn]]$finalModel$prediction.error
print(i)
nn <- nn + 1
print(nn)
}
}
rf.final <- rf.treebag[[which.min(oob.error.rf)]]
prediction <- predict(rf.final,mytest)
glm <- train(x=x,y=y,
method="glm",
trControl = fitControl,
metric="Accuracy")
co <- glm$finalModel$coefficients
prediction<-predict.glm(glm$finalModel,mytest)
mytest[,7]<-prediction
data <- rbind(mytrain,mytest)
#plot
par(mai=c(0.5,0.5,0.5,0.5))
barplot(co[1:22],horiz = T,las=1,cex.names=0.55,col="blue")
barplot(co[23:44],horiz = T,las=1,cex.names=0.55,col="blue")
barplot(co[45:66],horiz = T,las=1,cex.names=0.55,col="blue")
#
mytrain$VEHICLE_DEFECT <- mytrain$VEHICLE_DEFECT=="YES"
make <- aggregate(mytrain$VEHICLE_DEFECT,by=list(data$MAKE),FUN=mean)
model <- aggregate(mytrain$VEHICLE_DEFECT,by=list(data$MODEL),FUN=mean)
hist(model[,2],main="model defect rate",xlab="model")
hist(make[,2],main="maker defect rate",xlab="maker")
model[which(model[,2]>0.025),]
names(model)<-c("Model","defect_rate")
make[which(make[,2]>0.03),]
names(make)<-c("Maker","defect_rate")
library(ggplot2)
ggplot(data=model,aes(x=defect_rate))+geom_histogram(color="red",fill="blue")+ggtitle("Model")
ggplot(data=make,aes(x=defect_rate))+geom_histogram(color="blue",fill="red")+ggtitle("Maker")
co[abs(co)>2][-1]
|
847c05a1cb02e84cb88cc20c341c27595b42637e
|
8efa4abbf80541dee202d9211bec2d71991519da
|
/ch_13/ch_13.R
|
1d1ff33b33608293890662e172ecc3b94513c8d1
|
[] |
no_license
|
kimjunho12/R_BigData
|
ad07009c7e9b919f0321b84655758791004cb3ab
|
fdff2da689a31a6bbe38d448c52f7decc0730fee
|
refs/heads/master
| 2023-06-09T10:42:01.070830
| 2021-06-30T01:53:43
| 2021-06-30T01:53:43
| 361,130,167
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,218
|
r
|
ch_13.R
|
# 일표본
# 01. 데이터 불러오기
RTD = read.csv('../data/Ch09.RTD.csv', header = T)
head(RTD)
RTD = round(RTD, digits = 2)
head(RTD)
# 02. 기술통계량 확인
attach(RTD)
library(psych)
describe(RTD)
# 03. 그래프 그리기
rpar = par(no.readonly = T) # 디폴트 par 값을 미리 할당
par(mfrow = c(1, 2))
boxplot(weight)
hist(
weight,
breaks = 10,
col = "red",
xlab = "커피 용량",
ylab = "개수",
ylim = c(0, 25),
main = "커피 용량에 대한 히스토그램 및 정규 분포"
)
par(rpar)
# 04. 통계 분석
options("scipen" = 20) # 지수 표기법 수정
t.test(
RTD,
alternative = c("two.sided"),
mu = 275.0,
conf.level = 0.95
)
# p-value > 0.05 일때 귀무가설이 맞다는 뜻
# 05. 통계결과 그래프
mu=275
se=1.6
inter = qt(p=0.025, df=99) # 95% 신뢰구간
data = rnorm(1000, mu, se)
data = sort(data)
plot(data,
dnorm(data, mu, se), # 확률분포그래프를 그려라
type = 'l',
main = "커피용량(mu=275) 검정",
xlim = c(220,285))
abline(v=mu, col="green", lty = 5)
abline(v=mu+inter*se, col="blue", lty = 5)
abline(v=mu-inter*se, col="blue", lty = 5)
abline(v=236.35, col='red', lty=5)
detach(RTD)
|
39d351969bd0af8195fbda8e8a572c6b6e96bfb2
|
3f453706bbd71babb3e1f5ae9a2dfeddd3bf0be6
|
/kaggle_crime_pega.R
|
d9506f37d98e61b3425f6c6c206d8411ba3c6d4e
|
[] |
no_license
|
davisincubator/kaggle_sf_crime
|
4cbe91e45d5ab497121faf9434fca368a3ccac35
|
3ecea38e025f94c89e8550e7845ae0926db3e6a3
|
refs/heads/master
| 2021-01-10T12:50:41.504272
| 2016-02-20T03:37:54
| 2016-02-20T03:37:54
| 52,128,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,035
|
r
|
kaggle_crime_pega.R
|
#####--------------------------#####
##### Kaggle SF crime #####
#####--------------------------#####
dir()
dat <- read.csv("train.csv")
head(dat)
table(dat$Category)
which(dat$Category == "TREA")
dat[which(dat$Category == "ASSAULT"), ]$Descript
######################################################
# plot of number of crimes in each category per year #
######################################################
year <- strsplit(as.character(dat$Dates), "-")
head(year)
y <- sapply(1:length(year), function (x) year[[x]][1])
head(y)
dat$year <- y
class(dat$Dates)
length(year)
table(dat$year)
# checking for missing data
sum(is.na(dat$Dates))
crime_table <- dat %>%
group_by(Category) %>%
summarise(count = n()) %>%
arrange(desc(count))
# grouping by crime
library(dplyr)
crime_cat <- dat %>%
group_by(year, DayOfWeek, Category) %>%
summarise(count = n()) %>%
arrange(desc(count))
# plot of freq crime cats by year and day of week
library()
ggplot(aes(x = DayOfWeek, y = count, fill = Category), data = crime_cat) +
geom_bar(stat = "identity", position = "Dodge") + facet_grid(year ~ .)
# non faceted plot of freq of crime cats by year
ggplot(aes(x = year, y = count, fill = Category), data = crime_cat) +
geom_bar(stat = "identity", position = "Dodge")
# plot of frequency of crime categories by year
ggplot(aes(x = Category, y = count), data = crime_cat) +
geom_bar(stat = "identity") + facet_grid(year ~ .) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5))
# grouping by district
names(dat)
crime_cat_dist <- dat %>%
group_by(year, PdDistrict, Category) %>%
summarise(count = n()) %>%
arrange(desc(count))
ggplot(aes(x = PdDistrict, y = count, fill = Category), data = crime_cat_dist) +
geom_bar(stat = "identity", position = "Dodge") + facet_grid(year ~ .)
# grouping by district and not year
names(dat)
crime_cat_dist2 <- dat %>%
group_by(PdDistrict, Category) %>%
summarise(count = n()) %>%
arrange(desc(count))
# messing around with longitude and latitude
smoothScatter(dat$X, dat$Y, xlim = c(-122.5, -122.35))
d <- densCols(dat$X, dat$Y, colramp = colorRampPalette(rev(rainbow(10, end = 4/6))))
p <- ggplot(df) +
geom_point(aes(x, y, col = d), size = 1) +
scale_color_identity() +
theme_bw()
print(p)
# playing with ggmap function
library(ggmap)
sf <- "the castro"
qmap(sf, zoom = 13, source = "stamen", maptype = "toner")
sf_map <- qmap(sf, zoom = 13)
sf_map +
geom_point(aes(x = X, y = Y, colour = Category),
data = dat)
# subsetting by violent crimes: robery, assault, sex offenses forcible
# and creating ggmap plot with crimes by location
vio_crimes <- subset(dat, Category == "ROBBERY" | Category == "ASSAULT" |
Category == "SEX OFFENSES FORCIBLE")
index <- dat$Category %in% c("ROBBERY", "ASSAULT", "SEX OFFENSES FORCIBLE")
d <- dat[index, ]
sf <- "the castro"
sf_map <- qmap(sf, zoom = 13)
sf_map +
geom_point(aes(x = X, y = Y, colour = Category),
data = vio_crimes)
|
22e9d53d0ec8e1e4981390cad583c8009d6e08ab
|
eed89847372fac4e0409b2847b751823d8f7b2f8
|
/show_code.R
|
9e2b3d6b62286ec4f5f74ff3a7a03055663ab600
|
[] |
no_license
|
vogon-poetic/math-comp
|
32c7f66b7ab6598dec7a2e990a6237cd6723a854
|
d89b4c477e685887e8d3188b601302b62dfd6411
|
refs/heads/master
| 2021-03-19T13:06:03.401082
| 2018-05-03T18:51:10
| 2018-05-03T18:51:10
| 119,073,695
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 364
|
r
|
show_code.R
|
##Question 2) check if a five-digit number n is divisible by 9
n <- as.numeric(readline("enter a positive int (n): "))
n_copy <- n
digits <- numeric(5)
for (i in 1:5) {
digits[i] <- n %% 10
n <- n %/% 10
}
if (sum(digits) %% 9 == 0) {
cat(sprintf("%d is divisible by 9\n", n_copy))
} else {
cat(sprintf("%d is NOT divisible by 9\n", n_copy))
}
|
e55718a7a158c876b09bbb2cd8133a069cb4b10d
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/E_Penman/libFuzzer_E_Penman/E_Penman_valgrind_files/1612738520-test.R
|
f5e50847e0dedd344787e0f7a34b573a6d30112f
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 619
|
r
|
1612738520-test.R
|
testlist <- list(Rext = numeric(0), Rs = numeric(0), Z = numeric(0), alpha = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(5.37986976831671e+228, 3.07839226128608e+169, 3.62462043001606e+228, 5.43226404014686e-312, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), u = numeric(0))
result <- do.call(meteor:::E_Penman,testlist)
str(result)
|
4ed4d624d2835706f6e71dae01754416edb77f97
|
48b62187c14ecf1d404043747a90705982a60f61
|
/Text Mining/text.R
|
34b663187e3328a1b50399b3da5c2e548f79f7a7
|
[] |
no_license
|
nndark/R
|
7bb017a9565c90e6db60f947447918ab48d9a38e
|
1711dce3c0a4fb1c81a5996bef02e427049c914c
|
refs/heads/master
| 2021-02-17T00:14:09.091496
| 2020-12-22T13:59:04
| 2020-12-22T13:59:04
| 245,055,221
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 607
|
r
|
text.R
|
#============
# Text mining
#============
# Text 에서 인사이트를 찾는 좋은 분석 방법이다
# 앱 평가, 인기 검색어, 연설문, 소설 등 다양한 텍스트 형태의 파일을 분석할 수 있다
#
#========
# Library
#========
Sys.setenv(JAVA_HOME = 'C:/Program Files/Jave/jre1.8.0_251') # Java Home 위치 설정 필요
library(KoNLP) # 한국어 분석
library(worldcloud) # 워드클라우드
library()
#==========
# Reference
#==========
# 빅데이터 활용을 위한 R프로그래밍 끝내기_중급1(데이터 시각화, 지도와 데이터, 텍스트마이닝)
|
ac22747fa70c9b4280f3e920b2ab5bdc4e56acd0
|
331daade012f87484e435d4e8397122a45d10dae
|
/R/p.lin.lang.R
|
b1ab625f0315a271105cb3220e796a70c0a9c6d1
|
[] |
no_license
|
stela2502/Rscexv
|
9f8cd15b6a1b27056d1ef592c4737e33f4ec459f
|
81c3d6df48152a3cccd85eead6fd82918b97733f
|
refs/heads/master
| 2022-07-26T15:29:37.035102
| 2022-07-06T15:59:55
| 2022-07-06T15:59:55
| 54,368,831
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,634
|
r
|
p.lin.lang.R
|
#' @name p.lin.lang
#' @aliases p.lin.lang,Rscexv-method
#' @rdname p.lin.lang-methods
#' @docType methods
#' @description Calculates the linear hypothesis p value.
#' @param x data matrix
#' @param groups.n the amount of groups
#' @param clus the grouping vector
#' @param n the amount of boot strap operations default=1000
#' @title description of function p.lin.lang
#' @export
setGeneric('p.lin.lang', ## Name
function ( x, groups.n, clus, n=1000 ) {
standardGeneric('p.lin.lang')
}
)
setMethod('p.lin.lang', signature = c ('numeric'),
definition = function ( x, groups.n, clus, n=1000 ) {
x[which( x == -20)] <- NA
real_list <- vector ( 'list', groups.n)
random_length <- vector ( 'numeric', groups.n)
for( a in 1:groups.n){
real_list[[a]]=x[which(clus == a)]
random_length[a] = length(real_list[[a]])
#real_list[[a]] = real_list[[a]][is.na(real_list[[a]]) == F]
}
## get one obj with all data
stat_real = calc.lin.lang.4_list ( real_list )
p_value = 1
#print (stat_real$cor)
if ( is.null(stat_real$cor) ) {
stat_real$cor = 0.001
}
if ( is.na(stat_real$cor) ) {
stat_real$cor = 0.001
}
if ( length(stat_real$cor) == 0){
stop ( "Some crap happened!" )
}
if ( length(stat_real$weight) > 2 && stat_real$cor > 0.001 ){
## bootstrap
cmp = vector( 'numeric', n )
medians <- vector ( 'numeric', groups.n)
or <- vector ( 'numeric', groups.n)
expo = 2
for ( i in 1:n ) {
for( gid in stat_real$groupIDs){
tmp =sample(x,random_length[gid])
tmp = tmp[is.na(tmp) ==F]
or[gid] = length(tmp) / random_length[gid]
if ( length(tmp) == 0 ){
medians[gid] = 0
}
else if ( length(tmp) == 1 ){
medians[gid] = tmp[1]
}else {
medians[gid] = median(tmp)
}
}
cmp[i] = corr( cbind(or[stat_real$groupIDs], medians[stat_real$groupIDs]), w = stat_real$weight / sum(stat_real$weight) )
if ( i %% 10^expo == 0 ){
# expo = expo + 1
t <- boot_p_value ( cmp, stat_real$cor, i )
#print (paste( "I check whether the p value (",t,") is already saturated",i))
if ( t > 20/i ) {
break
}
}
if (is.na(cmp[i]) ){
cmp[i]=1/n
}
}
#hist( cmp )
#abline(v=stat_real$cor, col='red')
stat_real$bootstrapedcor = cmp
p_value <- boot_p_value ( cmp, stat_real$cor, sum( cmp != 0 ) )
#print ( paste('Final p value = ',p_value, 'Using stat real cor =',stat_real$cor,"and n =",sum( cmp != 0 ),"values"))
}
else {
p_value = 1
}
stat_real$p_value = p_value
stat_real
}
)
|
6538410ae461be85cd6fe9be858c896d1e4de2eb
|
30cbe00473560ef95ee135d0f624f9768b04a384
|
/01_retrieve-data.R
|
c53f73c89e9274634aef4af2a7b49e627cb5cc1c
|
[
"Apache-2.0"
] |
permissive
|
mcrpc/human-service-transportation
|
ccadedceaa67a842d90406747e8d06394028726e
|
4029b12e55031fe9beac3e3e11e9eb1484fe0b96
|
refs/heads/master
| 2021-07-07T03:10:50.133265
| 2020-08-26T21:13:00
| 2020-08-26T21:13:00
| 225,943,989
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,091
|
r
|
01_retrieve-data.R
|
# initialize local variables ----------------------------------------------
acsSurvey <- "acs5"
censusYear <- 2010
columnTypeList <- readr::cols(
GEOID = col_character()
)
# load data ---------------------------------------------------------------
# for Illinois counties, Illinois tracts, and HSTP region 6 block groups
# (pulling data for all Illinois block groups resulted in obscenely long
# processing times)
#
# for each dataset, first try to load from saved .csv file,
# if that fails, get data from census API using censusapi and tidycensus
# packages, combining into one table using dplyr
# illinois county data
illinoisCountyDataFileName <- addACSYearsToFilename(
"illinois-counties.csv",
acsYear
)
illinoisCountyDataFile <- paste(
outputDataDirectory,
illinoisCountyDataFileName,
sep = "/"
)
illinoisCountyRawData <- tryCatch(
{
readr::read_csv(
illinoisCountyDataFile,
col_types = columnTypeList
)
},
error = function(err) {
censusTable <- censusapi::getCensus(
name = "dec/sf1",
vintage = censusYear,
vars = c("P002001", "P002003"),
region = "county:*",
regionin = "state:17"
) %>%
dplyr::mutate(
GEOID = paste0(state, county, sep = ""),
population2010 = P002001,
percentUrban2010 = P002003/P002001
) %>%
dplyr::select(-c(state, county, P002001, P002003)) %>%
tibble::as_tibble()
acsTable <- purrr::map_dfr(
dplyr::filter(censusDataInventory, geo_level != "block")[[1]],
~ tidycensus::get_acs(
geography = "county",
table = .,
state = "17",
year = acsYear,
survey = acsSurvey,
output = "wide"
)
) %>%
dplyr::group_by(GEOID) %>%
dplyr::summarize_all(coalesceByColumn)
illinoisCountyRawData <- dplyr::left_join(censusTable, acsTable)
readr::write_csv(illinoisCountyRawData, illinoisCountyDataFile)
}
)
# illinois county previous year data
previousYear <- acsYear - 1
illinoisCountyPreviousYearDataFileName <- addACSYearsToFilename(
"illinois-counties.csv",
previousYear
)
previousYearVariableList <- unlist(
acsVariableTibble$acs_variables_2018
) %>%
stringr::str_remove("E") %>%
append(
unlist(acsVariableTibble$denominator) %>%
stringr::str_remove("E")
)
illinoisCountyPreviousYearDataFile <- paste(
outputDataDirectory,
illinoisCountyPreviousYearDataFileName,
sep = "/"
)
illinoisCountyPreviousYearRawData <- tryCatch(
{
readr::read_csv(
illinoisCountyPreviousYearDataFile,
col_types = columnTypeList
)
},
error = function(err) {
censusTable <- censusapi::getCensus(
name = "dec/sf1",
vintage = censusYear,
vars = c("P002001", "P002003"),
region = "county:*",
regionin = "state:17"
) %>%
dplyr::mutate(
GEOID = paste0(state, county, sep = ""),
population2010 = P002001,
percentUrban2010 = P002003/P002001
) %>%
dplyr::select(-c(state, county, P002001, P002003)) %>%
tibble::as_tibble()
illinoisCountyPreviousYearRawData <- tidycensus::get_acs(
geography = "county",
variable = previousYearVariableList,
state = "17",
year = previousYear,
survey = acsSurvey,
output = "wide"
) %>%
dplyr::left_join(censusTable) %T>%
readr::write_csv(illinoisCountyPreviousYearDataFile)
}
)
# illinois tract data
illinoisTractDataFileName <- addACSYearsToFilename(
"illinois-tracts.csv",
acsYear
)
illinoisTractDataFile <- paste(
outputDataDirectory,
illinoisTractDataFileName,
sep = "/"
)
illinoisTractRawData <- tryCatch(
{
readr::read_csv(
illinoisTractDataFile,
col_types = columnTypeList
)
},
error = function(err) {
censusTable <- censusapi::getCensus(
name = "dec/sf1",
vintage = censusYear,
vars = c("P002001", "P002003"),
region = "tract:*",
regionin = "state:17"
) %>%
dplyr::mutate(
GEOID = paste0(state, county, tract, sep = ""),
population2010 = P002001,
percentUrban2010 = P002003/P002001
) %>%
dplyr::select(-c(state, county, P002001, P002003)) %>%
tibble::as_tibble()
acsTable <- purrr::map_dfr(
dplyr::filter(censusDataInventory, geo_level != "block")[[1]],
~ tidycensus::get_acs(
geography = "tract",
table = .,
state = "17",
#county = region6CountyFIPS3,
year = acsYear,
survey = acsSurvey,
output = "wide"
)
) %>%
tidyr::gather(variable, value, -GEOID, na.rm = TRUE) %>%
dplyr::group_by(GEOID, variable) %>%
dplyr::distinct(value) %>%
tidyr::spread(variable, value)
illinoisTractRawData <- dplyr::right_join(censusTable, acsTable)
readr::write_csv(illinoisTractRawData, illinoisTractDataFile)
}
)
# illinois tract previous year data
illinoisTractPreviousYearDataFileName <- addACSYearsToFilename(
"illinois-tracts.csv",
previousYear
)
illinoisTractPreviousYearDataFile <- paste(
outputDataDirectory,
illinoisTractPreviousYearDataFileName,
sep = "/"
)
illinoisTractPreviousYearRawData <- tryCatch(
{
readr::read_csv(
illinoisTractPreviousYearDataFile,
col_types = columnTypeList
)
},
error = function(err) {
censusTable <- censusapi::getCensus(
name = "dec/sf1",
vintage = censusYear,
vars = c("P002001", "P002003"),
region = "tract:*",
regionin = "state:17"
) %>%
dplyr::mutate(
GEOID = paste0(state, county, tract, sep = ""),
population2010 = P002001,
percentUrban2010 = P002003/P002001
) %>%
dplyr::select(-c(state, county, P002001, P002003)) %>%
tibble::as_tibble()
illinoisTractPreviousYearRawData <- tidycensus::get_acs(
geography = "tract",
variable = previousYearVariableList,
state = "17",
year = previousYear,
survey = acsSurvey,
output = "wide"
) %>%
dplyr::right_join(censusTable, .) %T>%
readr::write_csv(illinoisTractPreviousYearDataFile)
}
)
# region 6 block group data
region6BlockGroupDataFileName <- addACSYearsToFilename(
"region-6-block-groups.csv",
acsYear
)
region6BlockGroupDataFile <- paste(
outputDataDirectory,
region6BlockGroupDataFileName,
sep = "/"
)
region6BlockGroupRawData <- tryCatch(
{
readr::read_csv(
region6BlockGroupDataFile,
col_types = columnTypeList
)
},
error = function(err) {
# 2019-12-06 - TRRILEY:
# census API does not support requests for decennial census data for block
# groups across different tracts and counties, so P002001 and P002003 cannot
# be retrieved for region 6 block groups and urban population percentage
# cannot be determined at this time
acsTable <- purrr::map_dfr(
dplyr::filter(censusDataInventory, geo_level == "block group")[[1]],
~ tidycensus::get_acs(
geography = "block group",
table = .,
state = "17",
county = region6CountyFIPS3,
year = acsYear,
survey = acsSurvey,
output = "wide"
)
) %>%
dplyr::group_by(GEOID) %>%
dplyr::summarize_all(coalesceByColumn)
# the script hangs on the above line if we don't limit ourselves to just
# region 6 block groups--probably an issue with coalesceByColumn function
region6BlockGroupRawData <- acsTable
readr::write_csv(region6BlockGroupRawData, region6BlockGroupDataFile)
}
)
# county time series data -------------------------------------------------
acsYearList <- as.list(c(acsYear - 4:0)) %>%
set_names(c(acsYear - 4:0))
timeSeriesColumnTypes <- readr::cols(
year = col_integer(),
GEOID = col_character()
)
timeSeriesVariableVector <- c(
est_blwpov,
dnm_blwpov,
est_hhsnap,
dnm_hhsnap,
# est_nodipl,
# dnm_nodipl,
# the 2011-2015 ACS 5-year estimates changed S2301
# causing error: unknown variable 'S2301_C01_032E'
est_unempr,
est_lbrfpr,
est_nocars,
dnm_nocars,
est_alttrn,
dnm_alttrn,
est_ovr65,
dnm_ovr65,
inc_percap,
inc_medhh,
gini,
est_vet,
dnm_vet,
est_veto55,
dnm_veto55,
est_dsblty,
dnm_dsblty,
est_dsbo65,
dnm_dsbo65
)
# stringr::str_trunc(10, side = "right", ellipsis = "")
# removing E may not be necessary
# county time series data is not currently being used
# due to the need to filter out urban areas from McLean and Kankakee
#
# illinoisCountyTimeSeriesDataFile <- paste(
# outputDataDirectory,
# "county-time-series-data.csv",
# sep = "/"
# )
#
# illinoisCountyTimeSeriesData <- tryCatch(
# {
# readr::read_csv(
# illinoisCountyTimeSeriesDataFile,
# col_types = timeSeriesColumnTypes
# )
# },
# error = function(err) {
# illinoisCountyTimeSeriesData <- map_dfr(
# acsYearList,
# ~ get_acs(
# geography = "county",
# variable = timeSeriesVariableVector,
# state = "17",
# year = .x,
# survey = acsSurvey,
# output = "tidy"
# ),
# .id = "year"
# ) %>%
# mutate(year = as.integer(year)) %T>%
# readr::write_csv(illinoisCountyTimeSeriesDataFile)
# }
# )
illinoisTractTimeSeriesDataFile <- paste(
outputDataDirectory,
"tract-time-series-data.csv",
sep = "/"
)
illinoisTractTimeSeriesData <- tryCatch(
{
readr::read_csv(
illinoisTractTimeSeriesDataFile,
col_types = timeSeriesColumnTypes
)
},
error = function(err) {
illinoisTractTimeSeriesData <- map_dfr(
acsYearList,
~ get_acs(
geography = "tract",
variable = timeSeriesVariableVector,
state = "17",
year = .x,
survey = acsSurvey,
output = "tidy"
),
.id = "year"
) %>%
mutate(year = as.integer(year)) %T>%
readr::write_csv(illinoisTractTimeSeriesDataFile)
}
)
# geographic data ---------------------------------------------------------
HSTPGeoPackage <- here::here("data/input/hstp.gpkg")
|
1b2bf73c86c3efe0bb1fc9b85264f66a95abad58
|
8b21d97a1a4adc9ac9df962f762cbc820f6b6ab5
|
/R/gbr.R
|
19f5ef5619491e6988ba737aa38d31cc69c85889
|
[] |
no_license
|
rileym/AfricaSoil
|
79b16f21ae4dd2d47dde5405661b6b514c166331
|
c0dec6c33d0f48d1c903f92b46035dd24a6f0154
|
refs/heads/master
| 2016-09-06T17:41:47.630754
| 2015-03-05T23:14:10
| 2015-03-05T23:14:10
| 30,996,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,119
|
r
|
gbr.R
|
# GBM Regression
library("gbm")
library('caret')
source(file = './helpers.R')
##
## Load Data, Clean, & Split
##
setwd('/Users/rileymatthews/Projects/Africa Soil')
full.df <- read.csv(file = './Data/training.csv', header = TRUE, row.names = 'PIDN')
full.test.df <- read.csv(file = './Data/sorted_test.csv', header = TRUE, row.names = 'PIDN')
N <- dim(full.df)[1]
train.prop <- .9
train.idx <- sample(x = 1:N, size = N*train.prop, replace = FALSE)
train.dfs <- soil.clean(df = full.df[train.idx,], targets = TRUE)
val.dfs <- soil.clean(df = full.df[-train.idx,], targets = TRUE)
test.dfs <- soil.clean(df = full.test.df, targets = FALSE)
##
## Training Loop
##
train.dfs <- soil.clean(df = full.df, targets = TRUE)
test.predictions <- data.frame(row.names = row.names(test.dfs$wave))
gbm.objs <- list()
results <- list()
important.vars <- list()
predictions <- data.frame(row.names = row.names(test.dfs$wave))
targets <- colnames(train.dfs$targets)
n.trees <- 50
shrinkage <- .001
interaction.depth = 4
n.pc = 20
n.train <- ceiling(.85*dim(train.dfs$wave)[1])
par(mfrow=c(2,3))
for (target in c('P')){
print(target)
pp <- preProcess(x = train.dfs$wave, method = 'pca', pcaComp = n.pc)
gbm.objs[[target]] <- gbm.fit( x = cbind(predict(object = pp, newdata = train.dfs$wave),train.dfs$spatial),
y = train.dfs$targets[[target]], distribution = 'gaussian',
interaction.depth = interaction.depth, n.trees = n.trees, nTrain = n.train, shrinkage = shrinkage,
response.name = target, verbose = TRUE )
plot(sqrt(gbm.objs[[target]]$valid.error), main = target, xlab = 'n trees', ylab = 'RMSE')
smry <- summary(gbm.objs[[target]] )
important.vars[[target]] <- smry$var[smry$rel.inf > 0]
# predictions[target] <- predict(object = gbm.objs[[target]], newdata = test.dfs$wave,
# n.trees = n.trees)
}
##
## Record results to disk.
##
#save('important.vars', file = './ivars.RData',)
#write.csv(predictions, file = './Data/Preds/GBM-R-preds.csv')
rm(list = ls())
|
a1d6daa2c96dc80e013d9791b854306147427634
|
89ce7e266948ac66630549c288357d21245895e9
|
/Day_3/Day_3_part2.R
|
f666abc39cab8b19785fb1c71e77dd3777712fa6
|
[] |
no_license
|
Emma-Buckley/Biostats-2021
|
9c45616b2a462e41d083cb25e3f4bd27a2b5ee86
|
56d1d94086f9f4e374622f7b26427e51bb1b3044
|
refs/heads/master
| 2023-04-09T08:54:14.977528
| 2021-04-24T13:44:46
| 2021-04-24T13:44:46
| 359,395,287
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,981
|
r
|
Day_3_part2.R
|
#Emma Buckley
#21 April 2021
#Correlations
#Day 3
install.packages("ggpubr")
install.packages("corrplot")
# Load libraries #activate the packages
library(tidyverse)
library(ggpubr)
library(corrplot)
# Load data
ecklonia <- read_csv("~/Biostatistics/Second part of R/Biostats-2021/data/ecklonia.csv")
#Removing categorical variables
#create a subsetted version of our data by removing all of the categorical variables
ecklonia_sub <- ecklonia %>%
select(-species, - site, - ID)
# Perform correlation analysis on two specific variables
# Note that we do not need the final two arguments in this function to be stated
# as they are the default settings.
# They are only shown here to illustrate that they exist.
#comparing two variables
cor.test(x = ecklonia$stipe_length, ecklonia$frond_length, #specifying a column
use = "everything", method = "pearson")
#Now we want to compare many variables
ecklonia_pearson <- cor(ecklonia_sub)
ecklonia_pearson
#Kendall rank correlation
ecklonia_norm <- ecklonia_sub %>%
gather(key = "variable") %>%
group_by(variable) %>%
summarise(variable_norm = as.numeric(shapiro.test(value)[2]))
ecklonia_norm
cor.test(ecklonia$primary_blade_length, ecklonia$primary_blade_width, method = "kendall")
#One panel visual
# Calculate Pearson r beforehand for plotting
#creating label of the r value
r_print <- paste0("r = ",
round(cor(x = ecklonia$stipe_length, ecklonia$frond_length),2))
# Then create a single panel showing one correlation
ggplot(data = ecklonia, aes(x = stipe_length, y = frond_length)) +
geom_smooth(method = "lm", colour = "grey90", se = F) +
geom_point(colour = "mediumorchid4") +
geom_label(x = 300, y = 240, label = r_print) +
labs(x = "Stipe length (cm)", y = "Frond length (cm)") +
theme_pubclean()
#creates a correlation plot
corrplot(ecklonia_pearson, method = "circle")
#If the colour is dark = strong correlation
#if the colour is lighter = weak correlation
|
55058bff15d832d1f79a0be6b669f4b37099d490
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Probability_And_Statistics_For_Engineering_And_The_Sciences_by_Jay_L_Devore/CH12/EX12.12/Ex12_12.R
|
892ff45e10642756a5b7cfbec6637cd512b668c4
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 426
|
r
|
Ex12_12.R
|
#Ex12.12, Page 496
#Answers may vary slightly due to rounding off of values
x<-c(42.2,42.6,43.3,43.5,43.7,44.1,44.9,45.3,45.7,45.7,45.9,46.0,46.2,46.2,46.8,46.8,47.1,47.2)
y<-c(44,44,44,45,45,46,46,46,47,48,48,48,47,48,48,49,49,49)
data1<-data.frame(x,y)
model<-lm(y~x,data=data1)
cat("Regression model:\n")
print(summary(model))
#To display ANOVA table
a<-aov(model)
cat("ANOVA table:\n")
print(summary(a))
|
639ee79b720413c2d21b5ce49f889e3cba6bc2f8
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gdkEventGetScreen.Rd
|
2535960c6a0fe15f2390e6a687003c1b6dd04036
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 663
|
rd
|
gdkEventGetScreen.Rd
|
\alias{gdkEventGetScreen}
\name{gdkEventGetScreen}
\title{gdkEventGetScreen}
\description{Returns the screen for the event. The screen is
typically the screen for \code{event->any.window}, but
for events such as mouse events, it is the screen
where the pointer was when the event occurs -
that is, the screen which has the root window
to which \code{event->motion.x_root} and
\code{event->motion.y_root} are relative.}
\usage{gdkEventGetScreen(object)}
\arguments{\item{\verb{object}}{a \code{\link{GdkEvent}}}}
\details{Since 2.2}
\value{[\code{\link{GdkScreen}}] the screen for the event}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
99d36ebf4e05b773fd45c8a2b5cc656bce7cc27d
|
af63178518502c1b2392eff4cad496784ba5bd30
|
/Lab1.R
|
721a7328e7c98cc75872be160c808a7ee851ddd3
|
[] |
no_license
|
emilyOberH/R-language
|
a98fb82094c816635361cb7388cb96eaac4dda4b
|
934ce7e4f6360a950eea91ac7b9cd837edacbcac
|
refs/heads/master
| 2020-08-05T18:40:06.066731
| 2019-12-09T17:41:33
| 2019-12-09T17:41:33
| 212,659,957
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 264
|
r
|
Lab1.R
|
Kate <-c(22,18,23,20,16,20)
Lucy <-c(32,18,24,18,20,16)
Kate_price = 12
Lucy_price = 15
Kate_profit = sum(Kate * Kate_price)
Lucy_profit = sum(Lucy * Lucy_price)
Combined_profit = Kate_profit + Lucy_profit
Kate_profit
Lucy_profit
Combined_profit
|
fbf78bf2f558e1626275c8a541bd0efbdea3da5e
|
79e46fb854004c24a020e326b2dd06a1044bdce6
|
/Fonctions_1.0/5AFC_Fonction.R
|
36d3084ec7476dedcd2d3218ba1614f522767f0e
|
[] |
no_license
|
floriancafiero/Motifs
|
d0a1c4b6ac96ae48a0cfb00d0e6e9292df2a1263
|
561111860fb8f6fce369aba1dd26f3f0485afdf2
|
refs/heads/master
| 2023-05-23T01:38:20.803320
| 2021-06-09T20:56:21
| 2021-06-09T20:56:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,466
|
r
|
5AFC_Fonction.R
|
## Stage - Legallois ##
## Fonction AFC ##
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# path = "~/Dropbox/2019-2020/Stage/Corpus_Retour_au_texte/"
# csv = "Corpus_motifs.csv" (sortie du script de regex)
# une_oeuvre = "Rigodon.cnr" (si choix d'affichage d'une seule oeuvre, entrez le nom qui apparaît dans la colonne Oeuvre)
# nmotifs = 30 (nombre de motifs à afficher)
# nombre_dimensions = 5 (number of dimensions kept in the results)
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
path = "~/Dropbox/2019-2020/Stage/Test/"
csv = "Corpus_motifs_UDPipe.csv"
nombre_oeuvres = 2
nmotifs = 30
nombre_dimensions = 5
motifs_afc <- function(path = "~/Dropbox/2019-2020/Stage/Test/", csv = "UDPipe_corpus_complet.csv", nombre_oeuvres = 2,
nmotifs = 30, nombre_dimensions = 5, une_oeuvre = "Rigodon.cnr"){
# Librairies :
require("tidyverse")
require("tidytext")
require("FactoMineR")
require("ggplot2")
require("ggrepel")
require("ca")
require("factoextra")
require("data.table")
# Lecture des données :
setwd(path)
corpus_spec <- fread(csv, encoding = "UTF-8")
## Retrait des cases vides :
corpus_spec <- corpus_spec[complete.cases(corpus_spec),]
## Mise sous la forme tidy :
# Vérification okazou :
names(corpus_spec) <- c("mots", "motifs", "Oeuvre")
## Retrait des cases vides :
corpus_spec <- corpus_spec[complete.cases(corpus_spec),]
## Fivegrams :
corpus_spec_punct <- corpus_spec %>%
mutate(next_word = lead(motifs),
next_word2 = lead(motifs, 2),
next_word3 = lead(motifs, 3),
next_word4 = lead(motifs, 4)) %>%
filter(!is.na(next_word), !is.na(next_word2)) %>%
mutate(ngrammotif = paste(motifs, next_word, next_word2, next_word3, next_word4))
# Sélection des colonnes motifs ngram et Oeuvre :
corpus_spec_punct <- corpus_spec_punct[,c("ngrammotif", "Oeuvre")]
names(corpus_spec_punct) <- c("motifs", "Oeuvre")
## Dénombrement + filtrage éventuel des données : ex : n > 10
corpus_spec_punct <- corpus_spec_punct %>%
dplyr::count(Oeuvre, motifs, sort = TRUE)
## Ajout d'une colonne total words pour normaliser la fréquence (fréquence relative) :
total_words <- corpus_spec_punct %>%
group_by(Oeuvre) %>%
dplyr::summarize(total = sum(n))
corpus_words_ngrams <- left_join(corpus_spec_punct, total_words, by = "Oeuvre")
## Calcul de la fréquence relative :
corpus_words_ngrams$rel_freq <- corpus_words_ngrams$n / corpus_words_ngrams$total
# Ordonnancement par fréquences relatives :
corpus_words_ngrams <- corpus_words_ngrams[order(corpus_words_ngrams$rel_freq, decreasing = T),]
## Reshaping the data : colonnes = corpus, lignes = mots et freq
# Réf : https://stackoverflow.com/questions/19346066/r-re-arrange-dataframe-some-rows-to-columns
corpus_lexical_table <- xtabs(rel_freq~motifs+Oeuvre, corpus_words_ngrams)
## Ré-ordonnancement :
corpus_lexical_table <- corpus_lexical_table[order(-corpus_lexical_table[,1], corpus_lexical_table[,1]),]
head(corpus_lexical_table)
tail(corpus_lexical_table)
## ## ## ## ## ## ## ## ## ## ## ##
# Retrait des lignes contenant des ngrams qui ne sont pas dans tous les textes :
# Cela veut dire : toutes les valeurs de ngrams qui sont uniques (qui contienne 0)
# Renversement de la dataframe avec variable = corpus
# rows = motifs
# Retrait des motifs où une valeur = 0.
## Nouvelle matrice nettoyée :
row_substract <- apply(corpus_lexical_table, 1, function(row) all(row !=0 ))
## Subset :
corpus_clean <- corpus_lexical_table[row_substract,]
corpus_clean <- as.matrix(corpus_clean)
head(corpus_clean)
## Visualisation :
maCA <- CA(corpus_clean, ncp = nombre_dimensions, row.sup = NULL, col.sup = NULL,
quanti.sup=NULL, quali.sup = NULL, graph = T,
axes = c(1,2), row.w = NULL, excl=NULL)
# fviz_ca_biplot(maCA, title = "Analyse Factorielle des Correspondances")
plot_ca <- fviz_ca_biplot(maCA, map ="rowprincipal", repel = T, select.row = list(contrib = nmotifs),
title = "Analyse Factorielle des Correspondances")
# Avec gradient de couleur en fonction des coordonnées :
plot_grad <- fviz_ca(maCA, map ="rowprincipal", repel = T, select.row = list(contrib = nmotifs),
col.row = "coord", title = "Analyse Factorielle des Correspondances")
# Une oeuvre particulière :
une_ca <- fviz_ca_biplot(maCA, map ="rowprincipal", repel = T, select.row = list(contrib = nmotifs),
select.col = list(name = une_oeuvre), title = "Analyse Factorielle des Correspondances")
visualisation <- as.numeric(readline("Visualisation, tapez 1 et enter \n Avec gradient de couleurs, tapez 2 \n Une oeuvre particulière, vérifiez que vous l'avez entrée dans le paramètre une_oeuvre et tapez 3"))
if(visualisation == 1){
return(plot_ca)
}
if(visualisation == 2){
return(plot_grad)
}
if(visualisation == 3){
return(une_ca)
}
else{
print("Votre choix ne correspond pas aux critères ternaires proposés...!")
}
}
motifs_afc(path = "~/Dropbox/2019-2020/Stage/Test_Regex_R/",
csv = "Corpus_motifs_UDPipe.csv", nombre_dimensions = 2,
nmotifs = 30)
|
302cb377f1a012e27c2159ee68781e798ccadf09
|
ba185ddcf7dbf23cc0f2b6e6adff5d6c6839fa82
|
/cgp_mut_inf.R
|
05449bbdee46d1108d743c6c5a3bfd497674bfe6
|
[] |
no_license
|
LOBUTO/CANCER.GENOMICS
|
56cbee294c50d17d4f2dde203a1bb806123a0404
|
d1bb3be22cb7ff763ff24b4247a283a8de56cbe7
|
refs/heads/master
| 2020-04-04T04:05:57.122690
| 2019-01-03T16:17:45
| 2019-01-03T16:17:45
| 26,308,455
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,790
|
r
|
cgp_mut_inf.R
|
#cgp_mut_inf.R
library(data.table)
library(reshape2)
library(parallel)
# This script will be to get essential mutations only
# Load file
cgp_mut <- fread("/tigress/zamalloa/OBJECTS/mutations.txt", select = c(1,4,7))
cgp_exp <- readRDS("/tigress/zamalloa/CGP_FILES/083016_cgp_exp.rds")
# Process effect of each mutation
cgp_mut$mutation <- paste0(cgp_mut$Gene, "_", cgp_mut$AA)
cgp_mut[,n:=length(unique(SAMPLE)), by="mutation"]
cgp_mut <- cgp_mut[n>2,] #Filter for minimum number of samples affected by mutation
mutations <- unique(cgp_mut$mutation)
total <- length(mutations)
cat(paste0("total mutations: ", total, "\n"))
nodes<-detectCores()
cat(paste0("nodes: ", nodes, "\n"))
cl<-makeCluster(nodes)
setDefaultCluster(cl)
clusterExport(cl, varlist=c("data.table", "as.data.table", "cgp_mut", "cgp_exp", "mutations", "total", "count"),envir=environment())
count <- 0
main_table <- lapply(mutations, function(i) {
cat(paste0(count, "\n"))
# Obtain cells
pos_cells <- unique(cgp_mut[mutation==i,]$SAMPLE)
neg_cells <- unique(cgp_mut[mutation!=i,]$SAMPLE)
# Obtain influence of each gene in mutation by a wilcoxon test between mutated and non-mutated samples
p_vals <- parApply(cl, cgp_exp, 1, function(x) wilcox.test(x[pos_cells], x[neg_cells])$p.value)
p_vals <- p.adjust(p_vals, method="fdr")
# Keep genes that are differentially expressed
diff_genes <- names(p_vals)[p_vals < 0.1]
if (length(diff_genes)>1){
return(data.table(mutation=i, diff_genes))
}
count <<- count + 1
write.table("log", paste(count,"\n"), append=T, quote=F, row.names = F, col.names = F, sep="\t")
})
stopCluster(cl)
main_table <- do.call(rbind, main_table)
# Store results
saveRDS(main_table, "/tigress/zamalloa/CGP_FILES/cgp_mut_inf.rds")
print("Done writing")
|
eb3458ae94bf53647b87599c4fd925393abf39df
|
edd6a9dd0f4ddb95b5c7f43226b775aa83b12dca
|
/LinReg/man/linreg-class.Rd
|
0430ac3b5ca4a7c17e5fbbd9d5f588375f25108c
|
[
"MIT"
] |
permissive
|
aleka769/A94Lab4
|
0d63a254c9af704f19863165db67ccce0053f98d
|
b77bb436222adf12bb762deaaaa5a7da0020b4e4
|
refs/heads/master
| 2020-03-28T22:45:48.557157
| 2018-09-24T09:23:21
| 2018-09-24T09:23:21
| 149,256,127
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 800
|
rd
|
linreg-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linreg_class2.R
\docType{class}
\name{linreg-class}
\alias{linreg-class}
\alias{linreg}
\title{RC type object to represent linreg-data.}
\description{
linreg object holds data and methods for data calculated in \code{linreg()}-function.
}
\section{Fields}{
\describe{
\item{\code{beta_hat}}{Estimates for each beta}
\item{\code{fits}}{Contains fitted values}
\item{\code{residuals}}{Contains residuals, i.e. observed - fitted}
\item{\code{resid_var}}{Residual variance}
\item{\code{beta_info}}{Contains variance, t-value and p-value for each estimated beta value}
\item{\code{df}}{Degrees of freedom for the model}
\item{\code{formula}}{Formula used in model}
\item{\code{call}}{Call sent to \code{linreg()}}
}}
|
10a09b4daf37bf08efb578a660bae28352ca9915
|
24e9286f61183294a131b1dac53343091294fb41
|
/Bagging/Classification/bagging_class_test.R
|
81b2f3b9c88d3222869b9f7d434e882995927f42
|
[] |
no_license
|
saikatmondal15/Stock-Price-Prediction-of-Indian-Banking-Sector-Using-Machine-Learning-Tecniques
|
11ee7b4a592a926c7ca5a5661262269bce033f51
|
a872f05fadb982d5b1a87bc973a343ad29e217e3
|
refs/heads/master
| 2023-04-21T14:34:10.313190
| 2021-05-14T12:00:03
| 2021-05-14T12:00:03
| 367,329,095
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,062
|
r
|
bagging_class_test.R
|
library(ipred)
training <- read.csv("/home/saikat/Documents/2020/Project/Dataset_ML_Cla.csv")
training <- training[0:2610,]
summary(training)
tail(training)
test <- read.csv("/home/saikat/Documents/2020/Project/Dataset_ML_Cla_test.csv")
test <- test[0:259,]
training$close_norm <- as.factor(training$close_norm)
set.seed(300)
mybag <- bagging(close_norm~day+ day_week+month+year+open_norm+low_norm+high_norm+range_norm,data=training, nbag=25)
#test$close_norm <- as.factor(test$close_norm)
close_pred <- predict(mybag, test)
close_pred
attach(test)
close_pred <- as.integer(as.character(close_pred))
close_pred
plot(test$close_norm~close_pred, xlab = "Predicted percenatge change in Close value", ylab = "actual percentage change in close value", lwd = 2)
#close_pred <- as.numeric(close_pred)
gg1=floor(close_pred+0.5)
gg1
length(close_norm)
length(close_pred)
ttt <- table(test$close_norm,gg1)
ttt
length(gg1)
error <- (ttt[1,2]+ttt[2,1])/2610
error
# for identifying the wronly predicted records
x <- (gg1 - close_norm)
n <-which(x== 1 | x== -1)
length(n)
|
d0322c5a91fc728fe9ad68b7da7fe00ed4264842
|
93f8e4312d4de70c0fe8012d0cf3e0f56451982d
|
/ta_opcost_curve.rsx
|
f32f00052455630fec684dea841388f87ac518a5
|
[] |
no_license
|
alfanugraha/lumens_scripts
|
c312ce23e690c3e561c34443071befa5f0269649
|
dba6a7ad48bc21aaeb9f021f4cc354d72b144840
|
refs/heads/master
| 2023-07-27T19:16:14.244002
| 2019-07-16T10:44:01
| 2019-07-16T10:44:01
| 103,492,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,983
|
rsx
|
ta_opcost_curve.rsx
|
##TA-PostgreSQL=group
##NPV=file
##ques_c_db=string
##cost_threshold=number 2
##statusoutput=output table
# library(pander)
# library(knitr)
# library(markdown)
library(rasterVis)
library(reshape2)
library(plyr)
library(lattice)
library(latticeExtra)
library(RColorBrewer)
library(hexbin)
library(grid)
library(ggplot2)
library(foreign)
library(scales)
time_start<-paste(eval(parse(text=(paste("Sys.time ()")))), sep="")
#=Load active project
load(proj.file)
# set driver connection
driver <- dbDriver('PostgreSQL')
project <- as.character(proj_descr[1,2])
DB <- dbConnect(
driver, dbname=project, host=as.character(pgconf$host), port=as.character(pgconf$port),
user=as.character(pgconf$user), password=as.character(pgconf$pass)
)
list_of_data_lut<-dbReadTable(DB, c("public", "list_of_data_lut"))
# return the selected data from the list
data_lut<-list_of_data_lut[which(list_of_data_lut$TBL_NAME==ques_c_db),]
data_npv<-list_of_data_lut[which(list_of_data_lut$TBL_NAME==NPV),]
quesc_db<-as.character(data_lut[1,2])
n_dta<-nchar(as.character(factor(data_lut[1,2])))
T1<-as.integer(substr(data_lut[1,2], (n_dta-7):(n_dta-4), (n_dta-4)))
T2<-as.integer(substr(data_lut[1,2], (n_dta-3):n_dta, n_dta))
pu_name<-substr(as.character(factor(data_lut[1,2])), 16:(n_dta-8), (n_dta-8))
#====CREATE FOLDER AND WORKING DIRECTORY====
idx_TA_opcost=idx_TA_opcost+1
wd<-paste(dirname(proj.file), "/TA/", idx_TA_opcost, "_OpCostCurve_", T1, "_", T2, "_", pu_name, sep="")
dir.create(wd)
setwd(wd)
# load datasets
data<-dbReadTable(DB, c("public", data_lut$TBL_DATA))
lookup_npv<-dbReadTable(DB, c("public", data_npv$TBL_DATA))
t1=T1
t2=T2
period<-t2-t1
#iteration=5
#prepare NPV look up table
lookup_n<-lookup_npv
lookup_n[,2]<-NULL
colnames(lookup_n)[1] ="ID_LC1"
colnames(lookup_n)[2] ="NPV1"
data<-merge(data,lookup_n,by="ID_LC1")
colnames(lookup_n)[1] ="ID_LC2"
colnames(lookup_n)[2] ="NPV2"
data<-merge(data,lookup_n,by="ID_LC2")
tot_area<-sum(data$COUNT)
#Select data where emission happened and count>0
data_em_sel <- data[ which(data$ck_em == "TRUE"),]
data_em_sel <- data_em_sel[ which(data_em_sel$em > 0),]
data_em_sel<-within(data_em_sel, {
em_rate<-((CARBON_t1-CARBON_t2)*(COUNT*3.67))/(tot_area*period)
em_tot<- (CARBON_t1-CARBON_t2)*3.67
sq_rate<-((CARBON_t2-CARBON_t1)*(COUNT*3.67))/(tot_area*period)
sq_tot<- (CARBON_t2-CARBON_t1)*3.67
opcost<-(NPV1-NPV2)/em_tot
opcost_sq<-(NPV1-NPV2)/sq_tot
cumsum_em<-cumsum(em_rate)
cumsum_sq<-cumsum(sq_rate)
})
#Build opcost table
lcc_col<-as.data.frame(data_em_sel$LU_CHG)
zone_col<-as.data.frame(data_em_sel$Z_NAME)
opcost_col<-as.data.frame(data_em_sel$opcost)
em_col<-as.data.frame(data_em_sel$em_rate)
opcost_tab<-cbind(lcc_col,zone_col)
opcost_tab<-cbind(opcost_tab,opcost_col)
opcost_tab<-cbind(opcost_tab,em_col)
names(opcost_tab)[1] <- "luchg"
names(opcost_tab)[2] <- "zone"
names(opcost_tab)[3] <- "opcost"
names(opcost_tab)[4] <- "emrate"
#BUILD POSITIVE OPCOST TABLE
opcost_tab_p<- opcost_tab[ which(opcost_tab$opcost >= 0),]
opcost_tab_p<- opcost_tab_p[order(opcost_tab_p$opcost),]
opcost_tab_p$cum_emrate<-cumsum(opcost_tab_p$emrate)
TA_opcost_database<-opcost_tab_p
write.dbf(TA_opcost_database,"TA_opcost_database.dbf")
opcost_tab_p$opcost_log<-log10(opcost_tab_p$opcost)
is.na(opcost_tab_p) <- sapply(opcost_tab_p, is.infinite)
opcost_tab_p[is.na(opcost_tab_p)] <- 0
#BUILD NEGATIVE OPCOST TABLE
opcost_tab_n<- opcost_tab[ which(opcost_tab$opcost < 0),]
opcost_tab_n<- opcost_tab_n[order(opcost_tab_n$opcost),]
opcost_tab_n$cum_emrate<-cumsum(opcost_tab_n$emrate)
opcost_tab_n$opcost_log<-opcost_tab_n$opcost*-1
opcost_tab_n$opcost_log<-log10(opcost_tab_n$opcost_log)*-1
#-----MODIFIED UP TO THIS LINE------------------------------------------
#COMBINE POS && NEG OPCOST
opcost_all<-rbind(opcost_tab_n, opcost_tab_p)
#opcost_tab_p$cum_emrate2<-as.factor(opcost_tab_p$cum_emrate)
#opcost_tab_n$cum_emrate2<-as.factor(opcost_tab_n$cum_emrate)
opcost_all$cum_emrate2<-as.factor(opcost_all$cum_emrate)
#find cost threshold
opcost_all2<- opcost_all
opcost_all2$order<-c(1:nrow(opcost_all2))
find_x_val<-subset(opcost_all2, opcost_log>=log10(cost_threshold))
x_val<-find_x_val$order[1]
#opcost_all
#x<-qplot(x=cum_emrate2, y=opcost_log, fill=zone,data=opcost_all, geom="bar", xlab="Emission Per-Ha Area (ton CO2-eq/ha.year)", ylab="Opportunity Cost ($/ton CO2-eq)" )
#x<-x+geom_hline(aes(yintercept=cost_threshold), linetype="dashed")
#x<-x+geom_vline(aes(xintercept=x_val))
#x<-x+theme(axis.text.x = element_text(angle=90, hjust=1, vjust=0))
#x<-x+scale_y_continuous(breaks=c(-5,-4,-3,-2,-1,0,1,2,3,4,5))
#emission
#x<-qplot(x=cum_emrate2, y=opcost_log, fill=zone, data=opcost_tab_p, geom="bar", xlab="Emission Per-Ha Area (ton CO2-eq/ha.year)", ylab="Opportunity Cost ($/ton CO2-eq)")
#x<-x+theme(axis.text.x = element_text(angle=90, hjust=1, vjust=0))
#sequestration
#y<-qplot(x=cum_emrate2, y=opcost_log, fill=zone, data=opcost_tab_n, geom="bar", xlab="Emission Per-Ha Area (ton CO2-eq/ha.year)", ylab="Opportunity Cost ($/ton CO2-eq)" )
#y<-y+theme(axis.text.x = element_text(angle=90, hjust=1, vjust=0))
#WRITE REPORT
title<-"\\b\\fs32 LUMENS-Trade-off Analysis (TA) Project Report\\b0\\fs20"
sub_title<-"\\b\\fs28 Sub modul 1: Opportunity Cost Curve \\b0\\fs20"
line<-paste("------------------------------------------------------------------------------------------------------------------------------------------------")
chapter1<-"\\b\\fs24 1.Opportunity curve \\b0\\fs20"
rtffile <- RTF("TA-Opportunity_cost_curve_report.doc", font.size=11, width = 8.267, height = 11.692, omi = c(0,0,0,0))
img_location <- paste0(LUMENS_path, "/ta_cover.png")
# loading the .png image to be edited
cover <- image_read(img_location)
# to display, only requires to execute the variable name, e.g.: "> cover"
# adding text at the desired location
text_submodule <- paste("Sub-Modul TA\n\n", location, ", ", "Periode ", T1, "-", T2, sep="")
cover_image <- image_annotate(cover, text_submodule, size = 23, gravity = "southwest", color = "white", location = "+46+220", font = "Arial")
cover_image <- image_write(cover_image)
# 'gravity' defines the 'baseline' anchor of annotation. "southwest" defines the text shoul be anchored on bottom left of the image
# 'location' defines the relative location of the text to the anchor defined in 'gravity'
# configure font type
addPng(rtffile, cover_image, width = 8.267, height = 11.692)
addPageBreak(rtffile, width = 8.267, height = 11.692, omi = c(1,1,1,1))
addParagraph(rtffile, title)
addParagraph(rtffile, sub_title)
addNewLine(rtffile)
addParagraph(rtffile, line)
addNewLine(rtffile)
addParagraph(rtffile, chapter1)
addNewLine(rtffile)
addPlot(rtffile, plot.fun=print, width=6, height=5, res=300,
barplot(opcost_all$opcost_log, axes=F, xlab='Emission Per-Ha Area (ton CO2-eq/ha.year)', ylab='Opportunity Cost ($/ton CO2-eq)', col=rainbow(20), space=0.01)+
box()+
axis(1)+
axis(2,at=log10(c(-10000, -1000, -100, -10, -1, -0.1, -0.01, -0.001, -0.0001, 0.0001, 0.001, 0.01, 0.1, 1, cost_threshold, 10, 100, 1000, 10000)),
label=c(-10000, -1000, -100, -10, -1, -0.1, -0.01, -0.001, -0.0001, 0.0001, 0.001, 0.01, 0.1, 1, cost_threshold, 10, 100, 1000, 10000))+
abline(h=log10(1), col='black')+
abline(h=log10(cost_threshold), lty=3)+abline(v=x_val+4)
)
addParagraph(rtffile, "\\b\\fs20 Figure 1. Opportunity cost curve t1\\b0\\fs20.")
addNewLine(rtffile)
done(rtffile)
resave(idx_TA_opcost, file=proj.file)
dbDisconnect(DB)
#=Writing final status message (code, message)
statuscode<-1
statusmessage<-"SCIENDO period projection successfully completed!"
statusoutput<-data.frame(statuscode=statuscode, statusmessage=statusmessage)
#-----------------------------------------------------------------------
# reports<-paste("
# Land Use Planning for Multiple Environmental Services
# ========================================================
# ***
#
# # Lembar hasil analisis TA-Opportunity Cost:
# # Perhitungan opportunity cost berdasarkan data profitabilitas
#
# ***
#
# # Opportunity cost oleh masing-masing unit perencanaan
# ```{r fig.width=12, fig.height=10, echo=FALSE}
# barplot(opcost_all$opcost_log, axes=F, xlab='Emission Per-Ha Area (ton CO2-eq/ha.year)', ylab='Opportunity Cost ($/ton CO2-eq)', col=rainbow(20), space=0.01)
# box()
# axis(1)
# axis(2,at=log10(c(-10000, -1000, -100, -10, -1, -0.1, -0.01, -0.001, -0.0001, 0.0001, 0.001, 0.01, 0.1, 1, cost_threshold, 10, 100, 1000, 10000)),
# label=c(-10000, -1000, -100, -10, -1, -0.1, -0.01, -0.001, -0.0001, 0.0001, 0.001, 0.01, 0.1, 1, cost_threshold, 10, 100, 1000, 10000))
# abline(h=log10(1), col='black')
# abline(h=log10(cost_threshold), lty=3)
# abline(v=x_val+4)
# ```
# ***
# # Intisari opportunity cost
# ```{r fig.width=10, fig.height=9, echo=FALSE}
# pandoc.table(TA_opcost_database)
#
# ```
# ***
# ")
#WRITE REPORT
# write(reports,file="reporthtml.Rmd")
# knit2html("reporthtml.Rmd", options=c("use_xhml"))
|
895f673b08a4978c8b459c185b3aa649fd8366c0
|
b02cf92ccfac713628c653aff2cf0d8057a622d8
|
/code/04_clean_general_payment_data.R
|
2797de3f92eff7e774b337db3bc4874cad6c15a8
|
[] |
no_license
|
anhnguyendepocen/unl-stat850
|
80992ab85cf816c588642aa35cf9cbc413024fc4
|
c597eeb5f75a7c6a3e332d0db31697a5010cd8e7
|
refs/heads/master
| 2023-06-18T16:22:15.405094
| 2021-07-15T17:43:54
| 2021-07-15T17:43:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 885
|
r
|
04_clean_general_payment_data.R
|
# Clean up General Payment Data
library(tidyverse)
gpd <- read_csv("data/General_Payment_Data_Full.csv", guess_max = 36000)
gpd2 <- gpd %>%
select(-matches("Teaching_Hospital"), -ends_with(c("2", "3", "4", "5")),
-Form_of_Payment_or_Transfer_of_Value,
-Covered_Recipient_Type, -Physician_Primary_Type,
-Delay_in_Publication_Indicator, -Program_Year,
-Payment_Publication_Date, -Related_Product_Indicator,
-Dispute_Status_for_Publication,
-Physician_First_Name, -Physician_Middle_Name,
-Physician_Last_Name, -Physician_Name_Suffix,
-matches("Physician_License_State")) # Remove less informative columns
gpd2 %>% summarize_all(~length(unique(.))) %>% t()
write_csv(gpd2, "data/General_Payment_Data.csv", na = '.')
gpd2 %>%
sample_frac(.25) %>%
write_csv("data/General_Payment_Data_Sample.csv", na = '.')
|
0e856c5bc103cea15db0f1ac79c486b61ff07c85
|
bd986e1216c71b4efcddf1c1a835030e524be04a
|
/man/searchFeatures.Rd
|
c2f6e0a89a1b185aed9a1a8bbc0ee049d1abae37
|
[] |
no_license
|
labbcb/GA4GHclient
|
43ac3a6b4bd9ab802ddff20bfc57ec0c2871c44c
|
ec3a6efba8c3e8698b467620dccf441d8419e335
|
refs/heads/master
| 2021-01-19T07:28:36.178878
| 2017-10-30T16:54:30
| 2017-10-30T16:54:30
| 68,452,125
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,260
|
rd
|
searchFeatures.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/searchFeatures.R
\name{searchFeatures}
\alias{searchFeatures}
\title{searchFeatures function}
\usage{
searchFeatures(host, featureSetId, name = NA_character_,
geneSymbol = NA_character_, parentId = NA_character_,
referenceName = NA_character_, start = NA_integer_, end = NA_integer_,
featureTypes = character(), nrows = Inf, responseSize = NA_integer_)
}
\arguments{
\item{host}{URL of GA4GH API data server.}
\item{featureSetId}{The annotation set to search within. Either featureSetId
or parentId must be non-empty.}
\item{name}{Only returns features with this name
(case-sensitive, exact match).}
\item{geneSymbol}{Only return features with matching the provided gene symbol
(case-sensitive, exact match). This field may be replaced with a more generic
representation in a future version.}
\item{parentId}{Restricts the search to direct children of the given parent
feature ID. Either feature_set_id or parent_id must be non-empty.}
\item{referenceName}{Only return features on the reference with this name
(matched to literal reference name as imported from the GFF3).}
\item{start}{Required, if name or symbol not provided. The beginning of the
window (0-based, inclusive) for which overlapping features should be
returned. Genomic positions are non-negative integers less than reference
length. Requests spanning the join of circular genomes are represented as two
requests one on each side of the join (position 0).}
\item{end}{Required, if name or symbol not provided. The end of the window
(0-based, exclusive) for which overlapping features should be returned.}
\item{featureTypes}{TODO: To be replaced with a fully featured ontology
search once the Metadata definitions are rounded out. If specified, this
query matches only annotations whose feature_type matches one of the provided
ontology terms.}
\item{nrows}{Number of rows of the data frame returned by this function.
If not defined, the function will return all entries. If the number of
available entries is less than the value of this this parameter, the function
will silently return only the available entries.}
\item{responseSize}{Specifies the number of entries to be returned by the
server until reach the number of rows defined in \code{nrows} parameter or
until get all available entries. If not defined, the server will return the
allowed maximum reponse size. Increasing this the value of this parameter will
reduce the number of requests and reducing the time required. The will not
respect this parameter if the value if larger than its maximum response size.}
}
\value{
\code{\link{DataFrame}} object.
}
\description{
Search for features (lines of genomic feature files).
}
\details{
This function requests \code{POST host/features/search}.
}
\examples{
host <- "http://1kgenomes.ga4gh.org/"
\dontrun{
datasetId <- searchDatasets(host, nrows = 1)$id
featureSetId <- searchFeatureSets(host, datasetId, nrows = 1)$id
searchFeatures(host, featureSetId, nrows = 10)
}
}
\references{
\href{https://ga4gh-schemas.readthedocs.io/en/latest/schemas/sequence_annotation_service.proto.html#SearchFeatures}{Official documentation}.
}
\seealso{
\code{\link{DataFrame}}, \code{\link{getFeature}}
}
|
9e31f9d08e0d5d6e735383b0a6b739eb99bb08ab
|
6a490bfbbe969cfd282cea5e1dd9b6b9523445d9
|
/man/AgNode-class.Rd
|
eeb770f20a568860f85444a7f4318ae3cd4860ff
|
[] |
no_license
|
kasperdanielhansen/Rgraphviz
|
5a8d7b6f80948d1914c38e27a1806b4c8f02fa3d
|
2c5057bb982db79840c40c0f47b37af7d7887be2
|
refs/heads/master
| 2023-08-14T22:31:31.342702
| 2022-10-28T17:07:11
| 2022-10-28T17:07:11
| 17,160,926
| 8
| 8
| null | 2023-07-21T14:32:16
| 2014-02-25T04:02:14
|
C
|
UTF-8
|
R
| false
| false
| 3,672
|
rd
|
AgNode-class.Rd
|
\name{AgNode-class}
\docType{class}
\alias{AgNode-class}
\alias{AgNode}
\alias{AgNode<-}
\alias{color}
\alias{fillcolor}
\alias{name}
\alias{shape}
\alias{getNodeRW}
\alias{getNodeLW}
\alias{getNodeCenter}
\alias{getRadiusDiv}
\alias{getNodeHeight}
\alias{style}
\alias{style,AgNode-method}
\alias{color,AgNode-method}
\alias{fillcolor,AgNode-method}
\alias{getNodeXY,AgNode-method}
\alias{getNodeCenter,AgNode-method}
\alias{getNodeHeight,AgNode-method}
\alias{getNodeLW,AgNode-method}
\alias{getNodeRW,AgNode-method}
\alias{name,AgNode-method}
\alias{shape,AgNode-method}
\alias{txtLabel,AgNode-method}
\alias{drawAgNode}
\title{Class "AgNode": A class to describe a node for a Ragraph object}
\description{This class is used to represent nodes for the
\code{Ragraph} class. One can retrieve various pieces of information
as well as draw them.}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("AgNode", ...)}.
}
\section{Slots}{
\describe{
\item{\code{center}:}{Object of class \code{"xyPoint"}: The center
point of the node}
\item{\code{name}:}{Object of class \code{"character"}: The name of
the node, used to reference it}
\item{\code{txtLabel}:}{Object of class \code{"AgTextLabel"}: Label
for this edge}
\item{\code{height}:}{Object of class \code{"integer"}: Height of
the node in points}
\item{\code{rWidth}:}{Object of class \code{"integer"}: The right
half of the node in points.}
\item{\code{lWidth}:}{Object of class \code{"integer"}: The left
half of the node in points.}
\item{\code{color}:}{Object of class \code{"character"}: The drawing
color of the node.}
\item{\code{fillcolor}:}{Object of class \code{"character"}: The
color to fill in the node with.}
\item{\code{shape}:}{Object of class \code{"character"}: The shape
of the node.}
\item{\code{style}:}{Object of class \code{"character"}: The style
of the node.}
}
}
\section{Methods}{
\describe{
\item{color}{\code{signature(object = "AgNode")}: Retrieves the
drawing color for the node.}
\item{fillcolor}{\code{signature(object = "AgNode")}: Retrieves the
color to fill in the node image with.}
\item{getNodeCenter}{\code{signature(object = "AgNode")}: Returns
the center point of the node.}
\item{getNodeXY}{\code{signature(object = "AgNode")}: Returns the
center as a two element list, with the first element containing
the 'x' value and the second element containing the 'y' value.}
\item{getNodeHeight}{\code{signature(object = "AgNode")}: Returns
the height of the node. }
\item{getNodeLW}{\code{signature(object = "AgNode")}: Returns the
left width of the node.}
\item{getNodeRW}{\code{signature(object = "AgNode")}: Returns the
right width of the node.}
\item{name}{\code{signature(object = "AgNode")}: Retrieves the name
of the node.}
\item{shape}{\code{signature(object = "AgNode")}: Returns the shape
of the node.}
\item{style}{\code{signature(object = "AgNode")}: Returns the style
of the node.}
\item{txtLabel}{\code{signature(object = "AgNode")}: Retrieves the
node label.}
}
}
\author{Jeff Gentry}
\seealso{\code{\link{Ragraph}}}
\examples{
V <- letters[1:10]
M <- 1:4
g1 <- randomGraph(V, M, .2)
z <- agopen(g1,name="foo")
x <- AgNode(z) ## list of AgNode objects
vv <- x[[1]]
## The methods in use
color(vv)
fillcolor(vv)
getNodeCenter(vv)
getNodeXY(vv)
getNodeHeight(vv)
getNodeLW(vv)
getNodeRW(vv)
name(vv)
shape(vv)
style(vv)
txtLabel(vv)
}
\keyword{classes}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.