blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
94d0a51617450f53f1eab683a082a70b11a0f7f5
|
03eb83ea48a164f07095afd746cfc79217fe069d
|
/WEEK 2 Assigment/code.R
|
a07c7fcc47d8275443ed7b971720f3722221c989
|
[] |
no_license
|
Luis1494/Data-Science-Capstone
|
e158512f21330d53178181c29ba087ca628e580b
|
a9995966a02760582886ef14ce98048c0c2d4a2b
|
refs/heads/master
| 2022-12-07T01:29:59.323457
| 2020-08-17T00:46:55
| 2020-08-17T00:46:55
| 288,049,424
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,329
|
r
|
code.R
|
## Load data
setwd("D:/1-1. R studio/Lecture10. Data science capstone/week2/final/en_US")
blogs<-readLines("en_US.blogs.txt",warn=FALSE,encoding="UTF-8")
news<-readLines("en_US.news.txt",warn=FALSE,encoding="UTF-8")
twitter<-readLines("en_US.twitter.txt",warn=FALSE,encoding="UTF-8")
## Summarize data
size_blogs<-file.size(path="D:/1-1. R studio/Lecture10. Data science capstone/week2/final/en_US/en_US.blogs.txt")/2^20
size_news<-file.size(path="D:/1-1. R studio/Lecture10. Data science capstone/week2/final/en_US/en_US.news.txt")/2^20
size_twitter<-file.size(path="D:/1-1. R studio/Lecture10. Data science capstone/week2/final/en_US/en_US.twitter.txt")/2^20
len_blogs<-length(blogs)
len_news<-length(news)
len_twitter<-length(twitter)
nchar_blogs<-sum(nchar(blogs))
nchar_news<-sum(nchar(news))
nchar_twitter<-sum(nchar(twitter))
library(stringi)
nword_blogs<-stri_stats_latex(blogs)[4]
nword_news<-stri_stats_latex(news)[4]
nword_twitter<-stri_stats_latex(twitter)[4]
table<-data.frame("File Name"=c("Blogs","News","Twitter"),
"File Size(MB)"=c(size_blogs,size_news,size_twitter),
"Num of rows"=c(len_blogs,len_news,len_twitter),
"Num of character"=c(nchar_blogs,nchar_news,nchar_twitter),
"Num of words"=c(nword_blogs,nword_news,nword_twitter))
table
## Clean data
set.seed(12345)
blogs1<-iconv(blogs,"latin1","ASCII",sub="")
news1<-iconv(news,"latin1","ASCII",sub="")
twitter1<-iconv(twitter,"latin1","ASCII",sub="")
rm(blogs)
rm(news)
rm(twitter)
sample_data<-c(sample(blogs1,length(blogs1)*0.01),
sample(news1,length(news1)*0.01),
sample(twitter1,length(twitter1)*0.01))
rm(blogs1)
rm(news1)
rm(twitter1)
## Build corpus
library(tm)
library(NLP)
corpus<-VCorpus(VectorSource(sample_data))
corpus1<-tm_map(corpus,removePunctuation)
corpus2<-tm_map(corpus1,stripWhitespace)
corpus3<-tm_map(corpus2,tolower)
corpus4<-tm_map(corpus3,removeNumbers)
corpus5<-tm_map(corpus4,PlainTextDocument)
corpus6<-tm_map(corpus5,removeWords,stopwords("english"))
corpus_result<-data.frame(text=unlist(sapply(corpus6,'[',"content")),stringsAsFactors = FALSE)
head(corpus_result)
rm(corpus)
rm(corpus1)
rm(corpus2)
rm(corpus3)
rm(corpus4)
rm(corpus5)
## Build N-gram
library(RWeka)
one<-function(x) NGramTokenizer(x,Weka_control(min=1,max=1))
two<-function(x) NGramTokenizer(x,Weka_control(min=2,max=2))
thr<-function(x) NGramTokenizer(x,Weka_control(min=3,max=3))
one_table<-TermDocumentMatrix(corpus6,control=list(tokenize=one))
two_table<-TermDocumentMatrix(corpus6,control=list(tokenize=two))
thr_table<-TermDocumentMatrix(corpus6,control=list(tokenize=thr))
one_corpus<-findFreqTerms(one_table,lowfreq=1000)
two_corpus<-findFreqTerms(two_table,lowfreq=80)
thr_corpus<-findFreqTerms(thr_table,lowfreq=10)
one_corpus_num<-rowSums(as.matrix(one_table[one_corpus,]))
one_corpus_table<-data.frame(Word=names(one_corpus_num),frequency=one_corpus_num)
one_corpus_sort<-one_corpus_table[order(-one_corpus_table$frequency),]
head(one_corpus_sort)
two_corpus_num<-rowSums(as.matrix(two_table[two_corpus,]))
two_corpus_table<-data.frame(Word=names(two_corpus_num),frequency=two_corpus_num)
two_corpus_sort<-two_corpus_table[order(-two_corpus_table$frequency),]
head(two_corpus_sort)
thr_corpus_num<-rowSums(as.matrix(thr_table[thr_corpus,]))
thr_corpus_table<-data.frame(Word=names(thr_corpus_num),frequency=thr_corpus_num)
thr_corpus_sort<-thr_corpus_table[order(-thr_corpus_table$frequency),]
head(thr_corpus_sort)
## Plot graph
library(ggplot2)
one_g<-ggplot(one_corpus_sort[1:10,],aes(x=reorder(Word,-frequency),y=frequency,fill=frequency))
one_g<-one_g+geom_bar(stat="identity")
one_g<-one_g+labs(title="Unigrams",x="Words",y="Frequency")
one_g<-one_g+theme(axis.text.x=element_text(angle=90))
one_g
two_g<-ggplot(two_corpus_sort[1:10,],aes(x=reorder(Word,-frequency),y=frequency,fill=frequency))
two_g<-two_g+geom_bar(stat="identity")
two_g<-two_g+labs(title="Bigrams",x="Words",y="Frequency")
two_g<-two_g+theme(axis.text.x=element_text(angle=90))
two_g
thr_g<-ggplot(thr_corpus_sort[1:10,],aes(x=reorder(Word,-frequency),y=frequency,fill=frequency))
thr_g<-thr_g+geom_bar(stat="identity")
thr_g<-thr_g+labs(title="Trigrams",x="Words",y="Frequency")
thr_g<-thr_g+theme(axis.text.x=element_text(angle=90))
thr_g
|
88f958d4c7b34d657e970bd97b3e9f278d3c885b
|
5bb2c8ca2457acd0c22775175a2722c3857a8a16
|
/R/datasets.R
|
20a5b1c9c1d42fd1e69d475725889067e12c53a0
|
[] |
no_license
|
IQSS/Zelig
|
d65dc2a72329e472df3ca255c503b2e1df737d79
|
4774793b54b61b30cc6cfc94a7548879a78700b2
|
refs/heads/master
| 2023-02-07T10:39:43.638288
| 2023-01-25T20:41:12
| 2023-01-25T20:41:12
| 14,958,190
| 115
| 52
| null | 2023-01-25T20:41:13
| 2013-12-05T15:57:10
|
R
|
UTF-8
|
R
| false
| false
| 362
|
r
|
datasets.R
|
#' Cigarette Consumption Panel Data
#'
#' @docType data
#' @source From Christian Kleiber and Achim Zeileis (2008). Applied
#' Econometrics with R. New York: Springer-Verlag. ISBN 978-0-387-77316-2. URL
#' <https://CRAN.R-project.org/package=AER>
#' @keywords datasets
#' @md
#' @format A data set with 96 observations and 9 variables
#' @name CigarettesSW
NULL
|
9634f8e72d39a2b9c768d85bb690646d8e41c633
|
25298b75d8e54e34261ce7816c9ed95774566dbc
|
/man/weighted.median.boot.se.Rd
|
1199f28a92fbf51a63b70178a164e25413d758ac
|
[] |
no_license
|
BroadbentJim/MendelianRandomization
|
7946787c662beee9c5f7d69189f655c1b4b2425d
|
100d624bae0c5ac296887493c46b0b64ed656d8f
|
refs/heads/master
| 2022-12-07T02:10:17.287876
| 2020-09-03T11:30:24
| 2020-09-03T11:30:24
| 289,373,305
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,466
|
rd
|
weighted.median.boot.se.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mr_median-methods.R
\name{weighted.median.boot.se}
\alias{weighted.median.boot.se}
\title{Weighted median standard error function}
\usage{
weighted.median.boot.se(Bx, By, Bxse, Byse, weights, iter, seed)
}
\arguments{
\item{Bx}{A numeric vector of beta-coefficient values for genetic associations with the exposure.}
\item{By}{A numeric vector of beta-coefficient values for genetic associations with the outcome.}
\item{Bxse}{The standard errors associated with the beta-coefficients in \code{Bx}.}
\item{Byse}{The standard errors associated with the beta-coefficients in \code{By}.}
\item{weights}{Weights.}
\item{iter}{The number of bootstrap samples to generate when calculating the standard error.}
\item{seed}{The random seed to use when generating the bootstrap samples (for reproducibility). If set to \code{NA}, the random seed will not be set (for example, if the function is used as part of a larger simulation).}
}
\value{
Causal estimate.
}
\description{
Internal function for calculating standard error of weighted median estimate (or simple median estimator if weights are all equal) using bootstrapping. The number of iterations and initial value of the random seed can also be set.
}
\details{
None.
}
\examples{
weighted.median.boot.se(Bx = ldlc, By = chdlodds, Bxse = ldlcse, Byse = chdloddsse,
weights = chdloddsse, iter = 100, seed = 314)
}
\keyword{internal}
|
e924340d4de2d1a758ef662ede89b56142d67dd0
|
9c4ddae677019ee2e808444a3e02298b675ec2a7
|
/Pool_meta_sensitivity_v6.r
|
d1cd29b12d52f1348dd970dbb6b19cd5151a7d46
|
[] |
no_license
|
sean-harrison-bristol/MR_interactions
|
c736d2755afafff32eb2a9f20b965223a76f7e2d
|
8074f35c6a5658a5934e51272ec84ec0d721e5db
|
refs/heads/master
| 2020-05-29T09:02:09.969128
| 2019-05-28T14:51:37
| 2019-05-28T14:51:37
| 189,048,431
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,306
|
r
|
Pool_meta_sensitivity_v6.r
|
#script_name: Pool_meta_sensitivity_v6.r
#project: 4-way decomp: paper 1
#script author: Teri North
#script purpose: pool estimates across simulation repeats by
# -taking the mean betahat & SE of betahats (to generate MC 95% CI for betahat)
# -take the mean SE and the SD of betahats
# -calculate power, type i error and coverage where applicable
#date created: 09/08/2018
#last edited: 11/10/2018
#notes:
setwd('') #Folder 1
#number of repeats in each sim
repeats=100
nval=500000
#tracker for erroneous calls - will get two numbers one for model 1 and the other for model 2
n_z1problem=c(1:2)
for (j in c(1:2)){n_z1problem[j]=0}
n_z1prob_track=1
for (model in c(1:2)){
xm_z1_2sls_detec=c(1:25)
for (i in c(1:25)){
xm_z1_2sls_detec[i]=0
}
z1_coverage=c(1:25)
for (i in c(1:25)){
z1_coverage[i]=0
}
#reality check
#how many times is the interaction detected (p<0.05), but the estimate is in the opposite direction to true effect?
z1problem=c(1:25)
for (i in c(1:25)){
z1problem[i]=0
}
fmr_y_int=c(1:25) # counter for # times interaction detected factorial approach (Wald test 5%)
for (i in c(1:25)){
fmr_y_int[i]=0
}
#calculating the mean betas
first=1
for (seedval in c(520160447,267639401,37905828,750891730,435580371,945959183,141153971,456264979,86129334,119011473)){
for (rep in c(1:repeats)){
if (first==1){
data=read.table(file=paste(seedval,'_rep',rep,'_model',model,"_pleio_res.txt",sep=''),sep='\t',header=TRUE)
true_vals=data.frame(data$x_coeff_m,data$x_coeff_y,data$m_coeff_y, data$xm_coeff_y)
first=0
ll=data.frame(
xm_2sls_ll=data$xm_2sls-(qt(0.025,(nval-4),lower.tail=FALSE))*data$xm_2sls_se
)
ul=data.frame(
xm_2sls_ul=data$xm_2sls+(qt(0.025,(nval-4),lower.tail=FALSE))*data$xm_2sls_se
)
for (i in c(1:25)){
if (ll$xm_2sls_ll[i]>0 | ul$xm_2sls_ul[i]<0){xm_z1_2sls_detec[i]=1}
}
for (i in c(1:25)){
if ((ll$xm_2sls_ll[i]<data$xm_coeff_y[i]) & (ul$xm_2sls_ul[i]>data$xm_coeff_y[i])){z1_coverage[i]=1}
}
for (i in c(1:25)){
if (((ll$xm_2sls_ll[i]>0) & (data$xm_coeff_y[i]<0))|((ul$xm_2sls_ul[i]<0) & (data$xm_coeff_y[i]>0))) {z1problem[i]=1}#if interac detec, but coeff wrong direc
}
for (i in c(1:25)){
if (data$fmr_interac_p[i]<0.05){fmr_y_int[i]=1}
}
} else if (first==0){
new=read.table(file=paste(seedval,'_rep',rep,'_model',model,"_pleio_res.txt",sep=''),sep='\t',header=TRUE)
data=data+new
ll_new=data.frame(
xm_2sls_ll=new$xm_2sls-(qt(0.025,(nval-4),lower.tail=FALSE))*new$xm_2sls_se
)
ul_new=data.frame(
xm_2sls_ul=new$xm_2sls+(qt(0.025,(nval-4),lower.tail=FALSE))*new$xm_2sls_se
)
for (i in c(1:25)){
if (ll_new$xm_2sls_ll[i]>0 | ul_new$xm_2sls_ul[i]<0){xm_z1_2sls_detec[i]=xm_z1_2sls_detec[i]+1}
}
for (i in c(1:25)){
if ((ll_new$xm_2sls_ll[i]<new$xm_coeff_y[i]) & (ul_new$xm_2sls_ul[i]>new$xm_coeff_y[i])){z1_coverage[i]=z1_coverage[i]+1}
}
for (i in c(1:25)){
if (((ll_new$xm_2sls_ll[i]>0) & (new$xm_coeff_y[i]<0))|((ul_new$xm_2sls_ul[i]<0) & (new$xm_coeff_y[i]>0))) {z1problem[i]=z1problem[i]+1}#if interac detec, but coeff wrong direc
}
for (i in c(1:25)){
if (new$fmr_interac_p[i]<0.05){fmr_y_int[i]=fmr_y_int[i]+1}
}
}
}
}
#remove true values
data_est=data.frame(data$xm_2sls,data$xm_2sls_se)
#mean betas
mean_denom=repeats*10 #no. rep within seeds * no. seeds
data_mean=data_est/mean_denom #gives mean beta and mean se
#add in the true params
mean_betas=cbind(true_vals,data_mean)
#############################################################################################################################################################################
#now for the standard error
checker=1
for (seeds in c(520160447,267639401,37905828,750891730,435580371,945959183,141153971,456264979,86129334,119011473)){
for (rep in c(1:repeats)){
if (checker==1){
newdata=read.table(file=paste(seedval,'_rep',rep,'_model',model,"_pleio_res.txt",sep=''),sep='\t',header=TRUE)
newdata=(newdata-(data/mean_denom))^2
checker=0
} else if (checker==0){
newer=read.table(file=paste(seedval,'_rep',rep,'_model',model,"_pleio_res.txt",sep=''),sep='\t',header=TRUE)
newdata=newdata+(newer-(data/mean_denom))^2
newdata=data.frame(newdata)
}
}
}
newdata_est=data.frame(newdata$xm_2sls)
#divide by n-1 to get s^2
s2=newdata_est/(repeats*10-1)
se=sqrt(s2/(repeats*10))
##################################################################################################################################################################
xm_z1_2sls_detec=data.frame(xm_z1_2sls_detec)
z1_coverage=data.frame(z1_coverage)
fmr_y_int=data.frame(fmr_y_int)
#results table
res=data.frame(mean_betas$data.x_coeff_m,
mean_betas$data.x_coeff_y,
mean_betas$data.m_coeff_y,
mean_betas$data.xm_coeff_y,
mean_betas$data.xm_2sls,
se$newdata.xm_2sls,
mean_betas$data.xm_2sls_se,
xm_z1_2sls_detec$xm_z1_2sls_detec,
z1_coverage$z1_coverage,
s2$newdata.xm_2sls,
fmr_y_int$fmr_y_int
)
write.table(res,file=paste('500000_EXTRA_final_res_model_',model,'.txt',sep=''),sep='\t',row.names=FALSE)
#how many times across repeat sims is an interaction detected in the incorrect direction?
n_z1problem[n_z1prob_track]=sum(z1problem)
n_z1prob_track=n_z1prob_track+1
}
write(n_z1problem, file='z1problem.txt',append=FALSE, sep = "\n")
for (model in c(1:2)){
t50=data.frame(read.table(file=paste('500000_EXTRA_final_res_model_',model,'.txt',sep=''),header=TRUE))
all=t50
headers=c('mediator_coeff','\t','interac_coeff', '\t', 'mean_est','\t','sd(est)','\t','mean(se(est))','\t','se(est)','\t',
'power','\t','type_i','\t','coverage')
res_l_0=all[round(all$mean_betas.data.xm_coeff_y,3)==0.000,]
res_l_m3=all[round(all$mean_betas.data.xm_coeff_y,3)==-0.111,]
res_l_3=all[round(all$mean_betas.data.xm_coeff_y,3)==0.111,]
res_l_5=all[round(all$mean_betas.data.xm_coeff_y,3)==0.167,]
res_l_1=all[round(all$mean_betas.data.xm_coeff_y,3)==0.333,]
res_l_0=res_l_0[order(res_l_0$mean_betas.data.x_coeff_m),]
res_l_m3=res_l_m3[order(res_l_m3$mean_betas.data.x_coeff_m),]
res_l_3=res_l_3[order(res_l_3$mean_betas.data.x_coeff_m),]
res_l_5=res_l_5[order(res_l_5$mean_betas.data.x_coeff_m),]
res_l_1=res_l_1[order(res_l_1$mean_betas.data.x_coeff_m),]
blank=c(1:5)
for (i in c(1:5)){blank[i]='NA'}
##################################INTERACTION COEFFICIENT#####################################################################################################################
#REMEMBER THAT THE VARIANCE NEEDS TO BE SQRT'D TO CONVERT TO SD
#POWER, TYPE I AND COVERAGE NEED TO BE DIVIDED BY 10 TO CONVERT TO %
###################
#Z=Z1+Z2+Z1Z2+Z1Z1#
###################
editZ1_res_l_0=data.frame(res_l_0$mean_betas.data.x_coeff_m,res_l_0$mean_betas.data.xm_coeff_y,res_l_0$mean_betas.data.xm_2sls,
sqrt(res_l_0$s2.newdata.xm_2sls),res_l_0$mean_betas.data.xm_2sls_se,res_l_0$se.newdata.xm_2sls,blank,
(res_l_0$xm_z1_2sls_detec.xm_z1_2sls_detec)/10,(res_l_0$z1_coverage.z1_coverage)/10)
editZ1_res_l_m3=data.frame(res_l_m3$mean_betas.data.x_coeff_m,res_l_m3$mean_betas.data.xm_coeff_y,res_l_m3$mean_betas.data.xm_2sls,
sqrt(res_l_m3$s2.newdata.xm_2sls),res_l_m3$mean_betas.data.xm_2sls_se,res_l_m3$se.newdata.xm_2sls,(res_l_m3$xm_z1_2sls_detec.xm_z1_2sls_detec)/10,
blank, (res_l_m3$z1_coverage.z1_coverage)/10)
editZ1_res_l_3=data.frame(res_l_3$mean_betas.data.x_coeff_m,res_l_3$mean_betas.data.xm_coeff_y,res_l_3$mean_betas.data.xm_2sls,
sqrt(res_l_3$s2.newdata.xm_2sls),res_l_3$mean_betas.data.xm_2sls_se,res_l_3$se.newdata.xm_2sls,(res_l_3$xm_z1_2sls_detec.xm_z1_2sls_detec)/10,
blank, (res_l_3$z1_coverage.z1_coverage)/10)
editZ1_res_l_5=data.frame(res_l_5$mean_betas.data.x_coeff_m,res_l_5$mean_betas.data.xm_coeff_y,res_l_5$mean_betas.data.xm_2sls,
sqrt(res_l_5$s2.newdata.xm_2sls),res_l_5$mean_betas.data.xm_2sls_se,res_l_5$se.newdata.xm_2sls,(res_l_5$xm_z1_2sls_detec.xm_z1_2sls_detec)/10,
blank, (res_l_5$z1_coverage.z1_coverage)/10)
editZ1_res_l_1=data.frame(res_l_1$mean_betas.data.x_coeff_m,res_l_1$mean_betas.data.xm_coeff_y,res_l_1$mean_betas.data.xm_2sls,
sqrt(res_l_1$s2.newdata.xm_2sls),res_l_1$mean_betas.data.xm_2sls_se,res_l_1$se.newdata.xm_2sls,(res_l_1$xm_z1_2sls_detec.xm_z1_2sls_detec)/10,
blank, (res_l_1$z1_coverage.z1_coverage)/10)
#interaction coefficient=0
write.table(headers, file=paste('TSLS_MED_L0_',model,'_.txt'),append=FALSE, quote=FALSE, row.names=FALSE, col.names=FALSE, sep = "",eol="")
write("", file=paste('TSLS_MED_L0_',model,'_.txt'),append=TRUE, sep = "\n")
write.table(editZ1_res_l_0, file=paste('TSLS_MED_L0_',model,'_.txt'),append=TRUE, quote=FALSE, row.names=FALSE, col.names=FALSE)
#interaction coefficient=m3
write.table(headers, file=paste('TSLS_MED_LM3_',model,'_.txt'),append=FALSE, quote=FALSE, row.names=FALSE, col.names=FALSE, sep = "",eol="")
write("", file=paste('TSLS_MED_LM3_',model,'_.txt'),append=TRUE, sep = "\n")
write.table(editZ1_res_l_m3, file=paste('TSLS_MED_LM3_',model,'_.txt'),append=TRUE, quote=FALSE, row.names=FALSE, col.names=FALSE)
#interaction coefficient=3
write.table(headers, file=paste('TSLS_MED_L3_',model,'_.txt'),append=FALSE, quote=FALSE, row.names=FALSE, col.names=FALSE, sep = "",eol="")
write("", file=paste('TSLS_MED_L3_',model,'_.txt'),append=TRUE, sep = "\n")
write.table(editZ1_res_l_3, file=paste('TSLS_MED_L3_',model,'_.txt'),append=TRUE, quote=FALSE, row.names=FALSE, col.names=FALSE)
#interaction coefficient=5
write.table(headers, file=paste('TSLS_MED_L5_',model,'_.txt'),append=FALSE, quote=FALSE, row.names=FALSE, col.names=FALSE, sep = "",eol="")
write("", file=paste('TSLS_MED_L5_',model,'_.txt'),append=TRUE, sep = "\n")
write.table(editZ1_res_l_5, file=paste('TSLS_MED_L5_',model,'_.txt'),append=TRUE, quote=FALSE, row.names=FALSE, col.names=FALSE)
#interaction coefficient=1
write.table(headers, file=paste('TSLS_MED_L1_',model,'_.txt'),append=FALSE, quote=FALSE, row.names=FALSE, col.names=FALSE, sep = "",eol="")
write("", file=paste('TSLS_MED_L1_',model,'_.txt'),append=TRUE, sep = "\n")
write.table(editZ1_res_l_1, file=paste('TSLS_MED_L1_',model,'_.txt'),append=TRUE, quote=FALSE, row.names=FALSE, col.names=FALSE)
#####
#FMR#
#####
headers2=c('mediator_coeff','\t','interac_coeff', '\t', 'power','\t','type_i')
editfmr_res_l_0=data.frame(res_l_0$mean_betas.data.x_coeff_m,res_l_0$mean_betas.data.xm_coeff_y,blank,(res_l_0$fmr_y_int.fmr_y_int)/10)
editfmr_res_l_m3=data.frame(res_l_m3$mean_betas.data.x_coeff_m,res_l_m3$mean_betas.data.xm_coeff_y,(res_l_m3$fmr_y_int.fmr_y_int)/10,blank)
editfmr_res_l_3=data.frame(res_l_3$mean_betas.data.x_coeff_m,res_l_3$mean_betas.data.xm_coeff_y,(res_l_3$fmr_y_int.fmr_y_int)/10,blank)
editfmr_res_l_5=data.frame(res_l_5$mean_betas.data.x_coeff_m,res_l_5$mean_betas.data.xm_coeff_y,(res_l_5$fmr_y_int.fmr_y_int)/10,blank)
editfmr_res_l_1=data.frame(res_l_1$mean_betas.data.x_coeff_m,res_l_1$mean_betas.data.xm_coeff_y,(res_l_1$fmr_y_int.fmr_y_int)/10,blank)
#interaction coefficient=0
write.table(headers2, file=paste('fmr_L0_',model,'_.txt'),append=FALSE, quote=FALSE, row.names=FALSE, col.names=FALSE, sep = "",eol="")
write("", file=paste('fmr_L0_',model,'_.txt'),append=TRUE, sep = "\n")
write.table(editfmr_res_l_0, file=paste('fmr_L0_',model,'_.txt'),append=TRUE, quote=FALSE, row.names=FALSE, col.names=FALSE)
#interaction coefficient=m3
write.table(headers2, file=paste('fmr_LM3_',model,'_.txt'),append=FALSE, quote=FALSE, row.names=FALSE, col.names=FALSE, sep = "",eol="")
write("", file=paste('fmr_LM3_',model,'_.txt'),append=TRUE, sep = "\n")
write.table(editfmr_res_l_m3, file=paste('fmr_LM3_',model,'_.txt'),append=TRUE, quote=FALSE, row.names=FALSE, col.names=FALSE)
#interaction coefficient=3
write.table(headers2, file=paste('fmr_L3_',model,'_.txt'),append=FALSE, quote=FALSE, row.names=FALSE, col.names=FALSE, sep = "",eol="")
write("", file=paste('fmr_L3_',model,'_.txt'),append=TRUE, sep = "\n")
write.table(editfmr_res_l_3, file=paste('fmr_L3_',model,'_.txt'),append=TRUE, quote=FALSE, row.names=FALSE, col.names=FALSE)
#interaction coefficient=5
write.table(headers2, file=paste('fmr_L5_',model,'_.txt'),append=FALSE, quote=FALSE, row.names=FALSE, col.names=FALSE, sep = "",eol="")
write("", file=paste('fmr_L5_',model,'_.txt'),append=TRUE, sep = "\n")
write.table(editfmr_res_l_5, file=paste('fmr_L5_',model,'_.txt'),append=TRUE, quote=FALSE, row.names=FALSE, col.names=FALSE)
#interaction coefficient=1
write.table(headers2, file=paste('fmr_L1_',model,'_.txt'),append=FALSE, quote=FALSE, row.names=FALSE, col.names=FALSE, sep = "",eol="")
write("", file=paste('fmr_L1_',model,'_.txt'),append=TRUE, sep = "\n")
write.table(editfmr_res_l_1, file=paste('fmr_L1_',model,'_.txt'),append=TRUE, quote=FALSE, row.names=FALSE, col.names=FALSE)
}
|
4e36dd909249ac03479b9e84bb013bf80b63d094
|
02e094167c8ad54218fa47aaa1c49ff40fdbf7b5
|
/Layout.R
|
08310a1cb4b620090436148e06f4a06d88c55912
|
[] |
no_license
|
teerapong588/Optimiation_project
|
910be94bad123bd48aee41904ad1a68266e4d147
|
27272594d4c40775c8629dd0477d36f762b66331
|
refs/heads/master
| 2020-05-03T13:52:50.047320
| 2019-04-18T05:13:08
| 2019-04-18T05:13:08
| 178,663,276
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,140
|
r
|
Layout.R
|
source("bs_tabs.R", local = TRUE)
source("mv_td_tabs.R", local = TRUE)
source("mv_rb_kde_tabs.R", local = TRUE)
source("mv_rb_spe_tabs.R", local = TRUE)
source("mv_rb_covmcd_tabs.R", local = TRUE)
source("mv_rb_sre_tabs.R", local = TRUE)
source("mcv_tabs.R", local = TRUE)
Header <- dashboardHeader(title = "Portfolio Optimization")
Sidebar <- dashboardSidebar(
sidebarMenu(id = "tabs",
menuItem("Basic Statistics", tabName = "basic_stats"),
menuItem("Mean-Variance", tabName = "two",
menuSubItem(text = "Traditional", tabName = "traditional"),
menuSubItem(text = "Kendall Estimator", tabName = "rb_kde"),
menuSubItem(text = "Spearman Estimator", tabName = "rb_spe"),
menuSubItem(text = "CovMcd Estimator", tabName = "rb_covmcd"),
menuSubItem(text = "Shrinkage Estimator", tabName = "rb_sre")
),
menuItem("Mean-Cvar", tabName = "mean-cvar"),
menuItem("Back Testing", tabName = "backtesting",
menuSubItem(text = "Traditional", tabName = "bt-traditional"),
menuSubItem(text = "Robust", tabName = "bt-robust"),
menuSubItem(text = "Shrinkage", tabName = "bt-shrinkage"),
menuSubItem(text = "Bagging", tabName = "bt-bagging"),
menuSubItem(text = "Mean-Cvar", tabName = "bt-mean-cvar"))
)
)
Body <- dashboardBody(
tabItems(
#linked tabs from Tabs.R
tabItem(tabName = "basic_stats", bs_tabs),
tabItem(tabName = "traditional", mv_td_tabs),
tabItem(tabName = "rb_kde", mv_rd_kde_tabs),
tabItem(tabName = "rb_spe", mv_rd_spe_tabs),
tabItem(tabName = "rb_covmcd", mv_rd_covmcd_tabs),
tabItem(tabName = "rb_sre", mv_rd_sre_tabs),
tabItem(tabName = "bagging", h2("Bagging")),
tabItem(tabName = "mean-cvar", mcv_tabs),
tabItem(tabName = "bt-traditional", h2("BT-Traditional")),
tabItem(tabName = "bt-robust", h2("BT-Robust")),
tabItem(tabName = "bt-shrinkage", h2("BT-Shrinkage")),
tabItem(tabName = "bt-bagging", h2("BT-Bagging")),
tabItem(tabName = "bt-mean-cvar", h2("BT-Mean-Cvar"))
)
)
|
c3026bff270dedcd53b0c56f31017df9bf5264c5
|
62d8ea7d6bc9104f3f42178abc5df9e8e4acfb84
|
/Clase_1.R
|
6aaa0fec087cdc1c2ffac9d7f1bd042dc5c55a97
|
[] |
no_license
|
TrinyEG/curso_R
|
e43096132b6eb2e593b00b37eb5d035e1aede853
|
f4b39428445663c6647f1445266eefc0e6c7e0e1
|
refs/heads/main
| 2023-08-31T02:59:03.477125
| 2021-06-17T17:36:23
| 2021-06-17T17:36:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,344
|
r
|
Clase_1.R
|
# Vectores de dos dimensions
c() # Este es el signo que indica un vector
c(2,3)*c(4,5) #multiplicacion
c(2,3)+c(4,5) #Suma
c(2,3)-c(4,5) #Resta
c(2,3)/c(4,5) #Division
# Vectores de dimensiones diferentes
c(2,3,4,6,1)*c(4,5)
## Asignación de objetos
# Pueden usar el signo = o el signo compuesto <- para hacer asignaciones
c(2,3,4,6,1)*c(4,5)
a<-c(2,3,4,6,1)
b<-c(4,5)
## Operaciones con vectores asignados
(a*b)/(b-a)
## Vectores de diferentes elementos (tipos de objetos)
class(2) # Función que permite evaluar el tipo de objeto con el que trabajo
2.1 # Númericos, variable cuantitativa continua
c<-2.1
c1<-as.integer(c) # Las funciones as.XXX indican hacia que tipo de objeto XXX se de transformar otro
c<-as.integer(c)
2 # Integer (Entero), variable cuantitativa discreta
## Categóricas
d<-c("23-45X-16B") # Character es un objeto de tipo cadena
class(d)
e<-c("rojo", "azul", "blanco", "blanco", "azul", "rojo", "blanco") #Categórica
class(e)
f<-as.factor(e)
class(f)
## Vectores boleanos o lógicos
class(TRUE) # Objetos lógicos
g<-c(FALSE, FALSE, TRUE, FALSE, TRUE, TRUE)
g<-c(F, F, T, F, T, T)
#Los vectores lógicos asumen que FALSE=0, TRUE=1
h<-a>=4 ##Categorizar variables
h<-as.factor(h)
## Seleccionar caso en una base de datos
# Aplicar funciones solo a un conjunto de datos
# Hacer operaciones complejas
# Filtrar casos
## Vectores de diferentes de diferentes tipos de elementos
i<-c("gato", 1, 3.1, "liebre", TRUE)
# Los vectores NO pueden tener tipos de objetos diferentes
# Se guardan de tipo characters
## Matrices
?matrix() ## El signo ? me abre la ventana de ayuda de RStudio
l<-matrix(c(1,0,0,1), nrow=2, ncol=2) ## Generar matrices
k<-matrix(c(1, "FALSE", FALSE, 0), ncol=2, nrow=2)
class(k)
# Las matrices NO pueden tener tipos de objetos diferentes
# Se guardan de tipo character
## Data frames
# Aceptan tipos de objetos diferentes
# Renglones sujetos
# Columnas variables
# Compuestas de vectores
# Estudio de 6 sujetos, donde medí tres variables
var1<-c(24, 27, 78, 56, 44, 33)
var2<-c("h", "m", "m", "o", "h", "o")
var3<-c(67, 78, 61, 44,71, 57)
var4<-c(T, F, T, F, F, T)
length(var3)
?length
cbind() #Une columnas con numero similar de renglones
rbind() #Une renglones con número similar de columnas
data1<-cbind(var1, var2, var3, var4)
class(data1)
data2<-data.frame(var1, var2, var3, var4) ## Función para hacer data frames
data2$var1 # El signo $ sirve para extraer variables de un data.frame
var1_1<-data2$var1*2 #Guardando objetos en ambiente global
var5<-data2$var1*data2$var3
data2$var5<-data2$var1*data2$var3
#Con el signo $ puedo insertar variables nuevas en un data-frame
data2$var5<-var5
## Inspeccionando un data.frame
data2
View(data2)
head(data2, n = 1) #Primeras 5 observaciones si n>10
?str()
str(data2)
data(mtcars)
class(mtcars)
View(mtcars) #Abrir un dataset en una ventana de RStudio
?mtcars
str(mtcars) # Estructura de un data.frame
summary(data2) # Ver estadísticas descriptivas de un dataset
summary(mtcars)
### Estadísticas descriptivas aplicadas en R
data2
## Continuas
mean(data2$var1) #Media
sd(data2$var1) #Desviación estándar
median(data2$var1) # Mediana
quantile(data2$var1) #Cuantiles
quantile(data2$var1, probs = c(0.8, 0.85, 0.9))
min(data2$var1) #Mínimo
max(data2$var1) #Máximo
## Categóricas
table(data2$var2)
prop.table(table(data2$var2))
|
4d0d59522a89cf7fd58df063304c65c26631c4c8
|
1c1b46425349d21577d020f96e82990de60205b7
|
/run_analysis.R
|
eb88fd75b935b4d0daa52fc9d1be907a79652bf1
|
[] |
no_license
|
Lchiffon/getting-and-cleaning-data
|
865479f7676a0a6c9c68412bddb728092c688a3a
|
cd4e1800dcfe63681c3cf7cea3207989842fad51
|
refs/heads/master
| 2021-01-17T11:55:03.539546
| 2014-04-24T01:36:28
| 2014-04-24T01:36:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,486
|
r
|
run_analysis.R
|
setwd("C:/Users/Administrator/Desktop/data/UCI HAR Dataset")
## set the working Directory
data_test<-read.table("./test/x_test.txt")
data_train<-read.table("./train/x_train.txt")
data=rbind(data_train,data_test)
## combine the test set and the train set
mean_x<-rowMeans(data)
std_x<-apply(data,1,function(x)sqrt(var(x)))
## compute the mean and the std. of each data
sub_train<-data.frame(act=read.table("./train/y_train.txt")
,sub=read.table("./train/subject_train.txt"))
## put the subject,activiety,x into sub_train
sub_test<-data.frame(act=read.table("./test/y_test.txt")
,sub=read.table("./test/subject_test.txt"))
## combine the descriptive data set
sub=rbind(sub_train,sub_test)
out<-data.frame(mean_x,std_x,sub)
names(out)<-c("mean","std","act","sub")
head(out)
mean_act<-tapply(out$mean,out$act,mean)
mean_sub<-tapply(out$mean,out$sub,mean)
std_act<-tapply(out$std,out$act,mean)
std_sub<-tapply(out$std,out$sub,mean)
## use function "tapply" to compute the average of
## each variable for each activity and each subject
mean_all<-c(mean_act,mean_sub)
std_all<-c(std_act,std_sub)
## combine the activieties and person together
str1<-c("1 WALKING","2 WALKING_UPSTAIRS","3 WALKING_DOWNSTAIRS","4 SITTING","5 STANDING","6 LAYING")
str2<-paste("person",1:30)
str=c(str1,str2)
## write the row.names vector
sta_all<-data.frame(mean=mean_all,std=std_all,row.names=str)
## get the tidy data set
sta_all
## show it
|
82ecb6620b010913c7cbc39413f6d639f495bfd4
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/BMS/R/c.bma.R
|
af5551566a86d5a88204fc5ea030492d6b31731e
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 199
|
r
|
c.bma.R
|
c.bma <-
function (..., recursive = FALSE)
{
if (!missing(recursive))
warning("note that argument recursive has no meaning and is retained for compatibility")
combine_chains(...)
}
|
4a1b66c70f748aaf2f42230ea336daaa7f95df22
|
d1d6630c1952b1a9d481e35ec3c8ffc8af4aa2c7
|
/man/preprocess_SCD.Rd
|
5b8219f14641760dcdf02a9aaf736321c1073b63
|
[] |
no_license
|
cran/scdhlm
|
379a2e83a57df15274f92ce87abd1d49204b493e
|
09db652d54a71995870149f9f9e16e3ac6bc7c9b
|
refs/heads/master
| 2023-03-16T04:41:16.745426
| 2023-03-12T09:30:02
| 2023-03-12T09:30:02
| 69,391,400
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,871
|
rd
|
preprocess_SCD.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocess-function.R
\name{preprocess_SCD}
\alias{preprocess_SCD}
\title{Clean Single Case Design Data}
\usage{
preprocess_SCD(
design,
case,
phase,
session,
outcome,
cluster = NULL,
series = NULL,
center = 0,
round_session = TRUE,
treatment_name = NULL,
data = NULL
)
}
\arguments{
\item{design}{Character string to specify whether data comes from a treatment
reversal (\code{"TR"}), multiple baseline across participants
(\code{"MBP"}), replicated multiple baseline across behaviors
(\code{"RMBB"}), or clustered multiple baseline across participants
(\code{"CMB"}).}
\item{case}{vector of case indicators or name of a character or factor vector
within \code{data} indicating unique cases.}
\item{phase}{vector of treatment indicators or name of a character or factor
vector within \code{data} indicating unique treatment phases.}
\item{session}{vector of measurement occasions or name of numeric vector
within \code{data} of measurement times.}
\item{outcome}{vector of outcome data or name of numeric vector of outcome
data within \code{data}.}
\item{cluster}{(Optional) vector of cluster indicators or name of a character
or factor vector within \code{data} indicating clusters.}
\item{series}{(Optional) vector of series indicators or name of a character
or factor vector within \code{data} indicating series.}
\item{center}{Numeric value for the centering value for session. Default is 0.}
\item{round_session}{Logical indicating whether to round \code{session} to the nearest integer. Defaults to \code{TRUE}.}
\item{treatment_name}{(Optional) character string corresponding to the name
of the treatment phase.}
\item{data}{(Optional) dataset to use for analysis. Must be a
\code{data.frame}.}
}
\value{
A cleaned SCD dataset that can be used for model fitting and effect size calculation.
}
\description{
Clean single case design data for treatment reversal and multiple baseline designs.
}
\note{
If treatment_name is left null it will choose the second level of the phase variable to be the treatment phase.
}
\examples{
data(Laski)
preprocess_SCD(design = "MBP",
case = case, phase = treatment,
session = time, outcome = outcome,
center = 4, data = Laski)
data(Anglesea)
preprocess_SCD(design="TR",
case=case, phase=condition,
session=session, outcome=outcome,
treatment_name = "treatment",
data=Anglesea)
data(Thiemann2001)
preprocess_SCD(design = "RMBB",
case = case, series = series, phase = treatment,
session = time, outcome = outcome,
data = Thiemann2001)
}
|
ca5644676ffb6b9dfce12f011855b324b7fabe73
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH14/EX14.14.33/Ex14_14_33.R
|
c19ec9a557e670912d41acdb5c17e11fe03d69cf
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 504
|
r
|
Ex14_14_33.R
|
#PAGE=318
n=24
r=0.75
m=0.05
a=0.6
z1=1.1513*log((1+r)/(1-r),10)
z1=round(z1,digits = 3)
u=1.1513*log((1+a)/(1-a),10)
u=round(u,digits = 4)
s=1/sqrt(n-3)
s=round(s,digits = 4)
z=(z1-u)/s
z=round(z,digits = 2)
z
a=0.05
x1=1-a
x=qt(x1,df=1/0)
x=round(x,digits = 2)
x
if(x>z) k<-TRUE
k
b=0.5
y=1.1513*log((1+b)/(1-b),10)
y=round(y,digits = 4)
z2=(z1-y)/s
z2=round(z2,digits = 2)
z2
if(x>y) k<-FALSE
k
#"The answer may vary due to difference in representation."
|
63d5dd0a1b698a999b7c6f41b0161b16603761de
|
91294be1f45be0ebe4e588866decab350e7e59a7
|
/CrabStats/GapAnalysis.R
|
0f3f3958ad763770cad13184e0d726a6df901b38
|
[] |
no_license
|
Zheng261/CrabitatResearch
|
6530f5bbc9df8b6406addcbbf48ed7b798c025fd
|
769c00061088638a9b8d581311eb4e0db7b79ff6
|
refs/heads/master
| 2021-06-24T02:27:57.075776
| 2019-05-25T10:52:57
| 2019-05-25T10:52:57
| 140,462,119
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,400
|
r
|
GapAnalysis.R
|
#for (crab in unique(crabs201X$CrabNum)) {
#thisCrab = crabs201X[which(crabs201X$CrabNum == crab),]
#timeVec = thisCrab$Date[2]
#dateVec = as.POSIXct(paste(thisCrab$Date,thisCrab$Time),format="%m/%d/%y %H:%M:%S")
#dateVec2 = c(dateVec[1],dateVec[-length(dateVec)])
#plot(dateVec,thisCrab$Latitude)
#plot(dateVec,thisCrab$Longitude)
#plot(dateVec,thisCrab$Distance)
#}
gaps = crabs201X[c(which(crabs201X$Elapsed > 5000),which(crabs201X$Elapsed > 5000)-1),]
dateVecGaps = as.POSIXct(paste(gaps$Date,gaps$Time),format="%m/%d/%y %H:%M:%S")
#plot(dateVecGaps,gaps$Latitude)
#plot(dateVec,crabs201X$Latitude)
#plot(dateVecGaps,gaps$Longitude)
#plot(dateVec,crabs201X$Longitude)
#Sets up crab location data frame
gapDF = data.frame(matrix(0,ncol=11,nrow=length(unique(gaps$CrabNum))))
colnames(gapDF) <- c("CrabNum","Island","NumEntries","TotalCocos","TotalNatives","TotalScaevola","TotalSand","AvailCocos","AvailNatives","AvailScaevola","AvailSand")
gapDF$CrabNum <- unique(gaps$CrabNum)
findClosestPoint <- function(x,y,df) {
dfclose = df[which(abs(df$lat-x) < 0.00001 & abs(df$long-y) < 0.00001),]
minDist = 100;
minRow = -1;
if (nrow(dfclose)==0) {
return(dfclose)
}
for(row in 1:nrow(dfclose)) {
dist = sqrt((df[row,"lat"] - x)^2 + (df[row,"long"] - y)^2)
if (dist < minDist) {
minRow = row
minDist = dist
}
}
return(dfclose[minRow,])
}
#for (crab in 1:nrow(gapDF)) {
thisCrabTracks = gaps[gaps$CrabNum==gapDF[crab,"CrabNum"],]
gapDF[crab,"Island"]= thisCrabTracks[1,"Island"]
gapDF[crab,"NumEntries"] = nrow(thisCrabTracks)
gapDF[crab,"AvailCocos"] = allLocations[gapDF[crab,"Island"],"Cocos"]
gapDF[crab,"AvailNatives"] = allLocations[gapDF[crab,"Island"],"Natives"]
gapDF[crab,"AvailScaevola"] = allLocations[gapDF[crab,"Island"],"Scaevola"]
gapDF[crab,"AvailSand"] = allLocations[gapDF[crab,"Island"],"Sand"]
thisCrabIsland = islandCoordsList[[gapDF[crab,"Island"]]]
thisCrabTracks$isInWater = FALSE;
for (i in 1:nrow(thisCrabTracks)) {
#temp = thisCrabIsland[(thisCrabIsland[,"long"]==thisCrabTracks$Longitude[i]&thisCrabIsland[,"lat"]==thisCrabTracks$Latitude[i]),]
#temp = r.coordpts@data[(r.coordpts@data[,"long"]==thisCrabTracks$Longitude[i]&r.coordpts@data[,"lat"]==thisCrabTracks$Latitude[i]),]
temp = findClosestPoint(thisCrabTracks$Latitude[i], thisCrabTracks$Longitude[i], thisCrabIsland)
if (nrow(temp) != 0) {
for (j in 1:nrow(temp)) {
if (temp[j,1] == 0) {
gapDF[crab,"TotalCocos"] = gapDF[crab,"TotalCocos"] + 1
} else if (temp[j,1] == 1) {
gapDF[crab,"TotalNatives"] = gapDF[crab,"TotalNatives"] + 1
} else if (temp[j,1] == 2) {
gapDF[crab,"TotalScaevola"] = gapDF[crab,"TotalScaevola"] + 1
} else if (temp[j,1] == 5) {
gapDF[crab,"TotalSand"] = gapDF[crab,"TotalSand"] + 1
}
}
}
}
gapDF[,c("TotalCocos","TotalNatives","TotalScaevola","TotalSand")] = (gapDF[,c("TotalCocos","TotalNatives","TotalScaevola","TotalSand")]+0.01)/rowSums(gapDF[,c("TotalCocos","TotalNatives","TotalScaevola","TotalSand")])
coordinates(gaps) <- ~Longitude+Latitude
proj4string(gaps)<-CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
gaps@data = gaps@data[,1:9]
shapefile(gaps, "gapsVisualTenHours.shp")
widesIII(gapDF[,c("TotalCocos","TotalNatives","TotalScaevola","TotalSand")],gapDF[,c("AvailCocos","AvailNatives","AvailScaevola","AvailSand")])
|
4c618d081934715efa02a5922bdb13fb481a35b5
|
bd8c894931368fa85ec44590f3f0b6d0dd21c5ac
|
/R/POTW_compliance_functions.R
|
0f76951ec1ad6eab4b78c5c7dcf3ae5323348c75
|
[] |
no_license
|
SCCWRP/POTW_Compliance
|
b7953f60edbe3faf3903edb2462bb7043937dcd0
|
cfdce8820171f7361361a46b8f1dcc9e3d58c835
|
refs/heads/master
| 2020-04-14T18:45:37.938887
| 2019-12-19T00:09:18
| 2019-12-19T00:09:18
| 164,031,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 47,558
|
r
|
POTW_compliance_functions.R
|
# POTW_compliance_functions.R
# Functions used in the code "app.R" in the program "POTW_compliance_v202_start.R"
#
# List of functions:
#
# settings.list.create
# settings.change.params
# SCB_map
# grep_start
# grep.start.mult
# calc.param.sel
# plot.profiles
# plot.one.profile
# plot.ref.profile
# plot.prof.graph
# Sigma.UML.calc
# geodetic.distance
# CDOM.plume.detect
# Ref.stns.select
# UML.Z.calc
# Ref.Prof.calc
# filter_np
# V.diff.calc
# TdegC.V.fit.calc
# V.diff.Entr.calc
# V.Z.min.calc
# outr.detect
# report.data.file
# report.data.selected
# report.plume.list
# report.plume.settings
# report.ref.list
# report.ref.settings
# report.outr.param
# report.ref.prof
# report.outr.method
# report.outr.settings
# report.entr.setting
# report.outr.list
# report.max.decrease.depths
#
# Function settings.list.create ***********************************************
settings.list.create <- function( Plume.settings.R, Outr.param.settings ) {
# Plume settings
indx.plume.settings <- grep( "plume_", Plume.settings.R$ShortName )
Plume.settings <- Plume.settings.R[ indx.plume.settings, ]
Plume.settings$Value <- as.numeric( Plume.settings$Value )
# Ref. settings
indx.ref.settings <- grep( "ref_", Plume.settings.R$ShortName )
Ref.settings <- Plume.settings.R[ indx.ref.settings, ]
Ref.settings$Value <- as.numeric( Ref.settings$Value )
# Outrange settings
indx.outr.settings <- grep( "outr_", Plume.settings.R$ShortName )
Outr.settings <- Plume.settings.R[ indx.outr.settings, ]
Outr.settings$Value <- as.numeric( Outr.settings$Value )
# Profile compare method setting
indx.prof.comp.settings <- grep( "prof_comp_", Plume.settings.R$ShortName )
Prof.comp.settings <- Plume.settings.R[ indx.prof.comp.settings, ]
Outr.settings$Value <- as.character( Outr.settings$Value )
# Entrainment settings
indx.entr.settings <- grep( "entr_", Plume.settings.R$ShortName )
Entr.settings <- Plume.settings.R[ indx.entr.settings, ]
Entr.settings$Value <- as.logical( Entr.settings$Value )
#
#browser()
settings <- list(
d.rho.uml = 0.125,
Outfall = data.frame(
Agency = c( "Hyperion","LACSD","OCSD","San Diego - Point Loma" ),
Station = c( "3505","2903","2205","F30"),
depth=c(60,60,57,93)
),
Plume = Plume.settings,
Ref = Ref.settings,
Outr = Outr.settings,
Prof.comp = Prof.comp.settings,
Entr = Entr.settings,
Outr.params = Outr.param.settings
)
# Select Ref and Outr settings for first parameter in the list (DO)
Param1 <- settings$Outr.params$outr_param_name[1]
settings <- settings.change.params( settings, Param1 )
#
#browser()
return( settings )
}
# End of function settings.list.create ****************************************
#
# Function settings.change.param **********************************************
settings.change.params <- function( settings, Param1 ) {
#browser()
#param.list <- colnames( settings$Outr.params )
indx.param <- which( settings$Outr.params$outr_param_name == Param1 )[1]
ref.list <- c( "ref_CDOM_thrsh", "ref_dfo_max", "ref_stn_min" )
for( param.change in ref.list ) {
#param.change <- param.list[k.param]
settings$Ref$Value[ settings$Ref$ShortName == param.change ] <-
as.numeric( settings$Outr.params[ indx.param, param.change ] )
}
outr.list <- c( "outr_refprof_dRho", "outr_refprof_filtW", "outr_refprof_Kstd",
"outr_refprof_Zwindow", "outr_threshold" )
for( param.change in outr.list ) {
#param.change <- param.list[k.param]
settings$Outr$Value[ settings$Outr$ShortName == param.change ] <-
as.numeric( settings$Outr.params[ indx.param, param.change ] )
}
param.change <- "prof_comp_method"
settings$Prof.comp$Value[ settings$Prof.comp$ShortName == param.change ] <-
settings$Outr.params[ indx.param, param.change ]
#
#browser()
return( settings )
}
# End of function settings.change.params ***************************************
#
# Function SCB_map ************************************************************
SCB_map <- function( SCB_usa, GSHHS_mex, Pipes_PBS, Stn_coords, map.lims ) {
suppressWarnings( plotMap( SCB_usa, # USA coastline
xlim = map.lims$x, ylim = map.lims$y,
col="#CCCCCC", bg="#0088FF", lwd=1, xlab="", ylab="",
plt = c(0,1,0,1),
xaxt = "n", yaxt = "n" ) )
suppressWarnings( addPolys( GSHHS_mex, # Mexico coastline
xlim = map.lims$x, ylim = map.lims$y,
col="#CCCCCC", bg="#0088FF", lwd=1, xlab="", ylab="",
plt = c(0,1,0,1),
xaxt = "n", yaxt = "n" ) )
# Pipelines
if( !is.null( Pipes_PBS ) ) {
suppressWarnings( addLines( as.PolySet( Pipes_PBS, projection="LL" ), col = "black", lwd = 2 ) )
}
# Stations
Stn_coords.PD <- data.frame( PID = 1:nrow(Stn_coords),
X = Stn_coords$Longitude,
Y = Stn_coords$Latitude )
Stn_coords.PD <- as.PolyData( Stn_coords.PD, projection = "LL" )
addPoints( Stn_coords.PD, pch = 21, cex = 1.6, bg = "#FFFF00" )
legend.text <- "Stations"
legend.pch <- 21
legend.pt.cex <- 1.6
legend.pt.bg <- "#FFFF00"
#browser()
if( sum( Stn_coords$Plume > 0 ) ) {
addPoints( Stn_coords.PD[Stn_coords$Plume,],
pch = 21, cex = 3.0, bg = "#FF0000" )
legend.text <- c( legend.text, "Plume" )
legend.pch <- c( legend.pch, 21 )
legend.pt.cex <- c( legend.pt.cex, 3.0 )
legend.pt.bg <- c( legend.pt.bg, "#FF0000" )
}
if( sum( Stn_coords$Ref > 0 ) ) {
addPoints( Stn_coords.PD[Stn_coords$Ref,],
pch = 21, cex = 3.0, bg = "#00FF00" )
legend.text <- c( legend.text, "Reference" )
legend.pch <- c( legend.pch, 21 )
legend.pt.cex <- c( legend.pt.cex, 3.0 )
legend.pt.bg <- c( legend.pt.bg, "#00FF00" )
}
if( sum( Stn_coords$Outrange > 0 ) ) {
addPoints( Stn_coords.PD[Stn_coords$Outrange,],
pch = 21, cex = 5.0, bg = "#FFBB00" )
legend.text <- c( legend.text, "Outranges" )
legend.pch <- c( legend.pch, 21 )
legend.pt.cex <- c( legend.pt.cex, 5.0 )
legend.pt.bg <- c( legend.pt.bg, "#FFBB00" )
}
legend( "topright", legend = legend.text,
pch = legend.pch, pt.cex = legend.pt.cex, pt.bg = legend.pt.bg
)
}
# End of function SCB_map ******************************************************
# Function grep_start **********************************************************
# Find the first index of the string (in str_list) starting
# with a sub_str
grep_start <- function( sub_str, str_list ){
indx_sel <- grep( sub_str, substr( str_list, 1, nchar(sub_str) ) )
if( length(indx_sel)>1 ) indx_sel <- indx_sel[1]
indx_sel <- indx_sel
}
# End of function grep_start ***************************************************
# Function grep.start.mult *****************************************************
# Finds the first index of the string (in str_list) starting
# with a one of the elements of the "sub_str" (which may be several strings)
# Returns NA if nothing was found
grep.start.mult <- function ( sub_str, str_list ) {
#
#indx_sel <- which( substr( str_list, 1, nchar(sub_str) ) %in% sub_str )
indx_sel <- apply( as.data.frame( sub_str, stringsAsFactors = FALSE), 1,
function(x) { grep( x, substr( str_list, 1, nchar(x) ) ) } )
indx_sel <- as.numeric( indx_sel )
indx_sel <- indx_sel[ is.finite( indx_sel ) ]
if( length( indx_sel ) > 1 ) indx_sel <- indx_sel[1]
if( length( indx_sel ) == 0 ) indx_sel <- NA
#
return( indx_sel )
}
# End of function grep.start.mult **********************************************
# Function calc.param.sel *****************************************************
# Calculates data frame param.sel, including columns:
# col.name and k.col (the name(s) the column should start and its number)
calc.param.sel <- function( param.list, Param.names, data.col.names ) {
n.param <- length( param.list )
param.sel <- data.frame( k.col = rep( NA, n.param ), col.name = character( n.param ),
stringsAsFactors = FALSE )
rownames( param.sel ) <- param.list
for( k.param.sel in 1:n.param ) {
k.param <- which( Param.names$Parameter == rownames( param.sel )[ k.param.sel ] )
if( length( k.param ) == 0 ) {
param.sel <- NULL
} else {
param.sel$k.col[ k.param.sel ] <- grep.start.mult( Param.names$ParamNameStarts[ k.param ], data.col.names )
if( is.na( param.sel$k.col[ k.param.sel ] ) ) {
param.sel$col.name[ k.param.sel ] <- paste( Param.names$ParamNameStarts[ k.param ], collapse = "," )
} else {
param.sel$col.name[ k.param.sel ] <- paste( data.col.names[ param.sel$k.col[ k.param.sel ] ], collapse = "," )
}
}
}
#browser()
#
return( param.sel )
}
# End of function calc.param.sel **********************************************
# Function plot.profiles *******************************************************
plot.profiles <- function( X, Z, StnID, lwd = 1, col = "#000000" ) {
StnID.list <- data.frame( StnID = unique( StnID ) )
#plot.one.profile( StnID.list[1], X, Z, StnID, lwd = 1, col = "#000000" )
apply( X = StnID.list, MARGIN = 1, FUN = plot.one.profile, X, Z, StnID,
1, col )
}
# End of function plot.profiles ************************************************
# Function plot.one.profile ****************************************************
plot.one.profile <- function( StnID.plot, X, Z, StnID, lwd = 1, col = "#000000" ) {
StnID.plot <- strsplit( StnID.plot, split = " " )[[1]][1]
indx.stn <- ( StnID == StnID.plot )
X.plot <- X[indx.stn]
Z.plot <- Z[indx.stn]
indx.sort <- order( Z.plot )
X.plot <- X.plot[ indx.sort ]
Z.plot <- Z.plot[ indx.sort ]
lines( X[indx.stn], Z[indx.stn], lwd = lwd, col = col )
}
# End of function plot.one.profile *********************************************
# Function plot.ref.profile ****************************************************
plot.ref.profile <-function( RefProf, Ref.Prof.Kstd ) {
# RefProf[ c("Vertical axis","V-mean","V-std")]
#browser()
RefProf <- RefProf[ order( RefProf[,1] ), ]
lines( RefProf[,2], RefProf[,1], col = "green", lwd = 3, lty = 3 )
lines( RefProf[,2] + Ref.Prof.Kstd * RefProf[,3], RefProf[,1] ,
col = "green", lwd = 3, lty = 3 )
lines( RefProf[,2] - Ref.Prof.Kstd * RefProf[,3], RefProf[,1],
col = "green", lwd = 3, lty = 1 )
}
# End of function plot.ref.profile *********************************************
# Function plot.prof.graph ****************************************************
plot.prof.graph <- function( x.data, z.data, StnID, prof.Z.ranges, main, ylab ) {
prof.Z.lims <- data.frame(
x = extendrange( x.data, f = 0.1 ),
y = extendrange( z.data, f = 0.1 ) )
if( !is.null( prof.Z.ranges$x ) & !is.null( prof.Z.ranges$y ) ) {
prof.Z.lims$x = prof.Z.ranges$x
prof.Z.lims$y = prof.Z.ranges$y
}
plot( x.data, z.data, main = main,
xlim = prof.Z.lims$x,
ylim = rev( prof.Z.lims$y ),
xlab = "", ylab = ylab,
font.main = 2, font.lab = 2, cex.main = 1.6, cex.lab = 1.2 )
plot.profiles( x.data, z.data, StnID, 1, "#000000" )
}
# End of function plot.prof.graph *********************************************
# Function Sigma.UML.calc *********************************************
Sigma.UML.calc <- function( Surv.data, settings.Plume ) {
#
#Sigma.UML <- Surv.data$Sigma
d.rho.uml <- with( settings.Plume, Value[ which( ShortName == "plume_uml_drho" )[1] ] )
min.layer.uml <- with( settings.Plume, Value[ which( ShortName == "plume_uml_min_layer" )[1] ] )
#browser()
Profile.list <- unique( Surv.data$Profile )
Sigma.UML <- rep( NA, nrow( Surv.data ) )
for( k.prof in 1:length(Profile.list) ) {
indx.prof1 <- which( Surv.data$Profile == Profile.list[k.prof] )
if( length( indx.prof1 ) > 0 ) {
Prof <- cbind( Surv.data$Z[ indx.prof1 ], Surv.data$Sigma[ indx.prof1 ] )
# Remove all rows with non-finite values
Prof <- Prof[ !rowSums( !is.finite( Prof ) ), ]
if( nrow( Prof ) > 0 ) {
Prof <- Prof[ order( Prof[,1] ), ]
Prof <- Prof[ !duplicated(Prof[,1] ), ]
Z.stn1 <- Prof[,1]
Sigma.stn1 <- Prof[,2]
if( max( Z.stn1 ) > min.layer.uml ) {
Sigma.10m <- approx( Z.stn1, Sigma.stn1, min.layer.uml )$y
if( is.finite( Sigma.10m ) ) {
Sigma.UML[ indx.prof1 ] <- Sigma.10m + d.rho.uml
}
}
}
}
}
return( Sigma.UML )
}
# End of function Sigma.UML.calc *******************************************
#
# Function geodetic.distance ***********************************************
# Calculate the distance between two geographic locations
# Call: Dist <- geodetic.distance( c(Lon1,Lat1), c(Lon2,Lat2) )
geodetic.distance <- function( point1, point2 ) {
R <- 6371
p1rad <- point1 * pi/180
p2rad <- point2 * pi/180
d <- sin( p1rad[2] ) * sin( p2rad[2] ) +
cos( p1rad[2] ) * cos( p2rad[2] ) * cos( abs( p1rad[1]-p2rad[1] ) )
d <- acos(d)
return( R*d )
}
# End of function geodetic.distance *****************************************
# Function CDOM.plume.detect **************************************************
CDOM.plume.detect <- function( Stn.list.surv, Surv.data, settings,
CDOM.thrsh.pc ) {
#
Agency <- unique( Stn.list.surv$Agency )[1]
if( !( Agency %in% settings$Outfall$Agency ) ) {
return( Surv.data$Plume <- FALSE )
}
# Calculate "Sigma.UML" for each sample (Sigma at the upper pycnocline boundary)
Surv.data$Sigma.UML <- Sigma.UML.calc( Surv.data, settings$Plume )
outfl.depth <- with( settings$Outfall,
depth[ which( settings$Outfall$Agency == Agency )[1] ] )
# Calculate "Stn.depth" and "dist_from_outfl" for all samples
Stn.list.nodup <- with( Stn.list.surv, data.frame( StnID = Station,
Stn.depth = Depth, Dist.Outfl = Dist.Outfl,
Latitude = Latitude, Longitude = Longitude ) )
Stn.list.nodup <- Stn.list.nodup[ !duplicated( Stn.list.nodup$StnID ), ]
#browser()
#Surv.data1 <- merge( Surv.data, Stn.list.nodup, sort = FALSE )
Surv.data <- plyr::join( Surv.data, Stn.list.nodup, by = "StnID" )
# Extract plume detection settings
# plume.depth.min <- with( Plume.settings, Value[ which( ShortName == "plume_depth_min" )[1] ] )
# plume.d.outfl.km <- with( Plume.settings, Value[ which( ShortName == "plume_d_outfl_km" )[1] ] )
# plume.z.over.bottom <- with( Plume.settings, Value[ which( ShortName == "plume_z_over_bottom" )[1] ] )
# plume.d.km <- with( Plume.settings, Value[ which( ShortName == "plume_d_km" )[1] ] )
# plume.d.rho <- with( Plume.settings, Value[ which( ShortName == "plume_d_rho" )[1] ] )
plume.depth.min <- with( settings$Plume, Value[ which( ShortName == "plume_depth_min" )[1] ] )
plume.d.outfl.km <- with( settings$Plume, Value[ which( ShortName == "plume_d_outfl_km" )[1] ] )
plume.z.over.bottom <- with( settings$Plume, Value[ which( ShortName == "plume_z_over_bottom" )[1] ] )
plume.d.km <- with( settings$Plume, Value[ which( ShortName == "plume_d_km" )[1] ] )
plume.d.rho <- with( settings$Plume, Value[ which( ShortName == "plume_d_rho" )[1] ] )
# CDOM threshold
indx.CDOM <- with( Surv.data,
( Sigma > Sigma.UML ) &
( Z <= outfl.depth ) &
( Stn.depth > plume.depth.min ) )
CDOM.thresh <- as.numeric(
quantile( Surv.data$CDOM[ indx.CDOM ], probs = CDOM.thrsh.pc/100,
na.rm = TRUE ) )
Surv.data$plume.trace <- FALSE
Surv.data$Plume <- FALSE
indx.plume.1 <- with( Surv.data,
( CDOM > CDOM.thresh ) &
( Sigma > Sigma.UML ) &
( Z <= outfl.depth ) &
( Stn.depth > plume.depth.min ) )
Surv.data$plume.trace[ indx.plume.1 ] <- TRUE
#browser()
# Select "plume core" samples
indx.plume.core <- with( Surv.data,
( plume.trace ) &
( Dist.Outfl < plume.d.outfl.km ) )
Surv.data$Plume[ indx.plume.core ] <- TRUE
Surv.data$plume.trace[ indx.plume.core ] <- FALSE
n.plume.samples <- sum( Surv.data$Plume )
if( n.plume.samples > 0 ) {
plume_increased <- TRUE
while( plume_increased ) {
plume_increased <- FALSE
indx.plume2add <- which( with( Surv.data,
( !Plume ) &
( plume.trace ) &
( Z <= Stn.depth - plume.z.over.bottom ) ) )
indx.cont.plume <- which( Surv.data$Plume )
n.plume2add <- length( indx.plume2add )
if( n.plume2add > 0 ) {
n.cont.plume <- length( indx.cont.plume )
dist.btw.plumes <- matrix( data = NA, ncol = n.cont.plume,
nrow = n.plume2add )
Lat.Lon.cont.plume <-
as.matrix( Surv.data[ indx.cont.plume, c("Longitude","Latitude")] )
Lat.Lon.plume2add <-
as.matrix( Surv.data[ indx.plume2add, c("Longitude","Latitude")] )
for( k.plume2add in 1:n.plume2add ) {
dist.btw.plumes[ k.plume2add, ] <-
apply( Lat.Lon.cont.plume, 1, geodetic.distance,
Lat.Lon.plume2add[ k.plume2add, ] )
}
sigma.cont.plume <- matrix(
data = rep( Surv.data$Sigma[ indx.cont.plume ], each = n.plume2add ),
ncol = n.cont.plume, nrow = n.plume2add )
sigma.plume2add <- matrix(
data = rep( Surv.data$Sigma[ indx.plume2add ], times = n.cont.plume ),
ncol = n.cont.plume, nrow = n.plume2add )
sigma.btw.plumes <- abs( sigma.cont.plume - sigma.plume2add )
indx.plume.added.matrix <- ( ( dist.btw.plumes < plume.d.km ) &
( sigma.btw.plumes < plume.d.rho ) )
if( any( indx.plume.added.matrix ) ) {
indx.plume.added <- ( rowSums( indx.plume.added.matrix ) > 0 )
Surv.data$Plume[ indx.plume2add ] <- TRUE
Surv.data$plume.trace[ indx.plume2add ] <- FALSE
plume_increased <- TRUE
}
}
}
}
return( Surv.data$Plume )
}
# End of function CDOM.plume.detect *******************************************
# Function Ref.stns.select **************************************************
Ref.stns.select <- function( Stn.list.surv, Surv.data, settings ) {
#
#browser()
Agency <- unique( Stn.list.surv$Agency )[1]
if( !( Agency %in% settings$Outfall$Agency ) ) {
return( Surv.data$Plume <- FALSE )
}
# Remove stations with "RefPlume"
Stn.list.surv$Ref = TRUE
Stn.list.surv$Ref[ Stn.list.surv$RefPlume ] <- FALSE
# Remove stations with no CDOM
StnID.CDOM <- unique( Surv.data$StnID[ is.finite( Surv.data$CDOM ) ] )
Stn.list.surv$Ref[ !(Stn.list.surv$Station %in% StnID.CDOM) ] <- FALSE
# Remove stations which do not fit the "plume" depth limit
plume.depth.min <- with( settings$Plume, Value[ which( ShortName == "plume_depth_min" )[1] ] )
Stn.list.surv$Ref[ Stn.list.surv$Depth <= plume.depth.min ] <- FALSE
# Remove stations with UML by the bottom
Stn.list.surv$UML.Z <- UML.Z.calc( Stn.list.surv$Profile, Surv.data,
settings$Plume )
Stn.list.surv$Ref[ is.na( Stn.list.surv$UML.Z ) ] <- FALSE
Stn.list.surv$Ref[ ( Stn.list.surv$Depth <= Stn.list.surv$UML.Z ) ] <- FALSE
#
ref.dfo.max <- with( settings$Ref, Value[ which( ShortName == "ref_dfo_max" )[1] ] )
ref.stn.min <- with( settings$Ref, Value[ which( ShortName == "ref_stn_min" )[1] ] )
# Find the indices ref.stn.min reference stations most close to the outfall
Dist.outfl.ref <- Stn.list.surv$Dist.Outfl
Dist.outfl.ref[ !Stn.list.surv$Ref ] <- NA
Dist.outfl.ref.rank <- rank( Dist.outfl.ref )
if( sum( Stn.list.surv$Dist.Outfl[ Stn.list.surv$Ref ] <= ref.dfo.max )
>= ref.stn.min ) {
Stn.list.surv$Ref[ Stn.list.surv$Dist.Outfl > ref.dfo.max ] <- FALSE
} else {
Stn.list.surv$Ref <- FALSE
Stn.list.surv$Ref[ Dist.outfl.ref.rank <= ref.stn.min ] <- TRUE
}
return( Stn.list.surv$Ref )
}
# End of function Ref.stns.select *******************************************
#
# Function UML.Z.calc *********************************************
UML.Z.calc <- function( Profile.list, Surv.data, settings.Plume ) {
#
d.rho.uml <- with( settings.Plume, Value[ which( ShortName == "plume_uml_drho" )[1] ] )
min.layer.uml <- with( settings.Plume, Value[ which( ShortName == "plume_uml_min_layer" )[1] ] )
#browser()
Z.UML <- rep( NA, length( Profile.list ) )
for( k.prof in 1:length( Profile.list ) ) {
indx.prof1 <- which( Surv.data$Profile == Profile.list[k.prof] )
if( length( indx.prof1 ) > 0 ) {
Prof <- cbind( Surv.data$Z[ indx.prof1 ], Surv.data$Sigma[ indx.prof1 ] )
# Remove all rows with non-finite values
Prof <- Prof[ !rowSums( !is.finite( Prof ) ), ]
if( nrow( Prof ) > 0 ) {
Prof <- Prof[ order( Prof[,1] ), ]
Prof <- Prof[ !duplicated(Prof[,1] ), ]
Z.stn1 <- Prof[,1]
Sigma.stn1 <- Prof[,2]
#
Sigma.10m <- approx( Z.stn1, Sigma.stn1, min.layer.uml )$y
if ( is.finite( Sigma.10m ) ) {
Sigma.UML <- Sigma.10m + d.rho.uml
indx.fin <- is.finite( Sigma.stn1 )
Z.UML[ k.prof ] <- approx(
Sigma.stn1[ indx.fin ], Z.stn1[ indx.fin ], Sigma.UML )$y
}
}
}
}
return( Z.UML )
}
# End of function UML.Z.calc ***********************************************
#
# Function Ref.Prof.calc ***************************************************
Ref.Prof.calc <- function( Outrange.Param, Surv.data, Stn.list.surv,
settings ) {
#
#browser()
Surv.data$Sigma.UML <- Sigma.UML.calc( Surv.data, settings$Plume )
indx.bUML <- with( Surv.data, ( Sigma > Sigma.UML ) )
Ref.Profile.list <- Stn.list.surv$Profile[ Stn.list.surv$Ref ]
indx.ref <- ( Surv.data$Profile %in% Ref.Profile.list )
if( sum( indx.bUML & indx.ref, na.rm = TRUE ) < 2 ) {
RefProf <- NULL
return( RefProf )
}
#
Ref.Prof.dRho <- as.numeric( with( settings$Outr, Value[ which( ShortName == "outr_refprof_dRho" )[1] ] ) )
Sigma.min <- floor( min( Surv.data$Sigma[ indx.bUML & indx.ref ],
na.rm=TRUE) / Ref.Prof.dRho ) * Ref.Prof.dRho
Sigma.max <- ceiling( max( Surv.data$Sigma[ indx.bUML & indx.ref ],
na.rm=TRUE) / Ref.Prof.dRho ) * Ref.Prof.dRho
#
RefProf <- data.frame( Sigma = seq( from = Sigma.min, to = Sigma.max, by = Ref.Prof.dRho ),
Z = NA, V.mean = NA, V.std = NA, TdegC = NA )
length.RefProf <- nrow( RefProf )
n.Ref.Prof <- sum( Stn.list.surv$Ref )
V.ref <- matrix( NA, nrow = length.RefProf, ncol = n.Ref.Prof )
Z.ref <- matrix( NA, nrow = length.RefProf, ncol = n.Ref.Prof )
TdegC.ref <- matrix( NA, nrow = length.RefProf, ncol = n.Ref.Prof )
#
for( k.Ref.Prof in 1:n.Ref.Prof ) {
indx.prof1 <- ( Surv.data$Profile == Ref.Profile.list[ k.Ref.Prof ] )
Prof.1 <- as.matrix( Surv.data[ indx.prof1, c( "Sigma","Z",Outrange.Param,"TdegC") ] )
Prof.1 <- Prof.1[ order( Prof.1[,1] ), ]
Prof.1 <- Prof.1[ !duplicated(Prof.1[,1] ), ]
indx.fin <- !apply( apply( Prof.1, 1, is.na ), 2, any )
if( sum( indx.fin, na.rm = TRUE ) > 1 ) {
V.ref[ , k.Ref.Prof ] <- signal::interp1(
Prof.1[indx.fin,"Sigma"], Prof.1[indx.fin, Outrange.Param ],
RefProf$Sigma, method="linear" )
Z.ref[ , k.Ref.Prof ] <- signal::interp1(
Prof.1[indx.fin,"Sigma"], Prof.1[indx.fin, "Z" ],
RefProf$Sigma, method="linear" )
TdegC.ref[ , k.Ref.Prof ] <- signal::interp1(
Prof.1[indx.fin,"Sigma"], Prof.1[indx.fin, "TdegC" ],
RefProf$Sigma, method="linear" )
}
}
RefProf$Z <- apply( Z.ref, 1, mean, na.rm = TRUE )
RefProf$TdegC <- apply( TdegC.ref, 1, mean, na.rm = TRUE )
RefProf$V.mean <- apply( V.ref, 1, mean, na.rm = TRUE )
RefProf$V.std <- apply( V.ref, 1, sd, na.rm = TRUE )
RefProf$V.std[ is.na( RefProf$V.std ) ] <- 0
# Filter
RefProf.FiltW <- as.numeric( with( settings$Outr, Value[ which( ShortName == "outr_refprof_FiltW" )[1] ] ) )
RefProf$V.mean <- filter_np( RefProf$V.mean, RefProf.FiltW, extend = TRUE )
RefProf$V.std <- filter_np( RefProf$V.std, RefProf.FiltW, extend = TRUE )
RefProf$Z <- filter_np( RefProf$Z, RefProf.FiltW, extend = TRUE )
RefProf$TdegC <- filter_np( RefProf$TdegC, RefProf.FiltW, extend = TRUE )
#
#browser()
return( RefProf )
}
# End of function Ref.Prof.calc ********************************************
#
# Function filter_np *******************************************************
# Filter a vector
filter_np <- function( Y, Filt_w, extend = TRUE ) {
#
n_obs <- length( Y )
X <- seq( n_obs )
indx_nan <- is.na( Y )
if( sum( !indx_nan, na.rm = TRUE ) <= 1 ) {
return( Y )
} else {
Y1 <- Y
Y1[ indx_nan ] <- signal::interp1( X[!indx_nan], Y[!indx_nan], X[indx_nan] )
indx_nan <- is.na( Y1 )
if ( indx_nan[1] ) {
indx_first <- min( which( !indx_nan ) )
Y1[ 1 : indx_first-1 ] <- Y1[ indx_first ]
}
if( indx_nan[n_obs]) {
indx_last <- max( which( !indx_nan ) )
Y1[ seq( from = indx_last+1, to = n_obs ) ] <- Y1[ indx_last ]
}
#
filt_window <- rep( 1, Filt_w ) / Filt_w
Y3 <- c( rep( Y1[1], Filt_w ), Y1, rep( Y1[n_obs], Filt_w ) )
Y3sm <- stats::filter( Y3, filt_window )
Y1sm <- Y3sm[ seq( from = Filt_w+1, to = Filt_w+n_obs ) ]
if( !extend ) Y1sm[ indx_nan ] <- NA
return( Y1sm )
}
}
# End of function filter_np ************************************************
#
# Function V.diff.calc *****************************************************
# Calculate the list of profiles (for each plume profile) of the
# "integrated" differences between the parameter and reference profile
V.diff.calc <- function( Outrange.Param, Stn.list.surv, Surv.data, RefProf,
settings ) {
#
Plume.prof.list <- Stn.list.surv$Profile[ Stn.list.surv$Plume ]
n.plumes <- length( Plume.prof.list )
V.diff.list <- as.list( rep( NA, n.plumes ) )
names( V.diff.list ) <- Plume.prof.list
if( sum( Surv.data$Plume ) > 0 ) {
# Calculate the coefficients of entrainment
n.poly <- 3
TdegC.V.fit <- TdegC.V.fit.calc( Outrange.Param, Stn.list.surv,
Surv.data, n.poly, settings )
for( k.plume in 1:n.plumes ) {
# Calculate data frame for each plume
V.diff.list[[ k.plume ]] <- V.diff.Entr.calc( Outrange.Param, Surv.data,
Plume.prof.list[ k.plume ], RefProf, settings, TdegC.V.fit )
}
}
return( V.diff.list )
}
# End of function V.diff.calc **********************************************
#
# Function TdegC.V.fit.calc ************************************************
TdegC.V.fit.calc <- function( Outrange.Param, Stn.list.surv, Surv.data,
n.poly, settings ) {
#
indx.ref <- ( Surv.data$Profile %in% Stn.list.surv$Profile[Stn.list.surv$Ref] ) &
is.finite( Surv.data$TdegC ) & is.finite( Surv.data[ , Outrange.Param ] )
Surv.data$Sigma.UML <- Sigma.UML.calc( Surv.data, settings$Plume )
indx.bUML <- with( Surv.data, ( Sigma > Sigma.UML ) )
TdegC.ref <- Surv.data$TdegC[ indx.ref & indx.bUML ]
V.ref <- Surv.data[ indx.ref & indx.bUML, Outrange.Param ]
indx.fin <- ( !is.na( TdegC.ref ) ) & ( !is.na( V.ref ) )
V.ref <- V.ref[ indx.fin ]
TdegC.ref <- TdegC.ref[ indx.fin ]
TdegC.V.fit <- lm( V.ref ~ stats::poly( TdegC.ref, n.poly, raw=TRUE ) )
#
return( TdegC.V.fit)
}
# End of function TdegC.V.fit.calc ********************************************
#
# Function V.diff.Entr.calc ***************************************************
V.diff.Entr.calc <- function( Outrange.Param, Surv.data, Plume.Profile, RefProf,
settings, TdegC.V.fit ) {
#
Outr.settings <- settings$Outr
Entr.settings <- settings$Entr
#
V.diff.DF <- RefProf[ , c("Sigma","TdegC") ]
colnames( V.diff.DF)[2] <- "TdegC.ref"
V.diff.DF$V.plume <- NA
V.diff.DF$TdegC.plume <- NA
V.diff.DF$Z.plume <- NA
if( settings$Prof.comp$Value == "ttest" ) {
V.diff.DF$t.value <- NA
V.diff.DF$p.value <- NA
} else {
V.diff.DF$V.diff <- NA
}
#V.diff.DF$V.diff.PC <- NA
n.Sigma <- nrow( V.diff.DF )
#
Surv.data$Sigma.UML <- Sigma.UML.calc( Surv.data, settings$Plume )
indx.bUML <- with( Surv.data, ( Sigma > Sigma.UML ) )
RefProf.dSigma <- diff( V.diff.DF$Sigma )[1]
indx.plume <- ( Surv.data$Profile == Plume.Profile )
for( k.Sigma in 1:n.Sigma ) {
indx.Sigma <- ( Surv.data$Sigma >= ( V.diff.DF$Sigma[ k.Sigma ] - RefProf.dSigma ) ) &
( Surv.data$Sigma <= ( V.diff.DF$Sigma[ k.Sigma ] + RefProf.dSigma ) )
V.diff.DF$V.plume[ k.Sigma ] <- mean( Surv.data[ indx.plume & indx.Sigma & indx.bUML, Outrange.Param ], na.rm = TRUE )
V.diff.DF$TdegC.plume[ k.Sigma ] <- mean( Surv.data$TdegC[ indx.plume & indx.Sigma & indx.bUML ], na.rm = TRUE )
V.diff.DF$Z.plume[ k.Sigma ] <- mean( Surv.data$Z[ indx.plume & indx.Sigma & indx.bUML ], na.rm = TRUE )
}
#browser()
indx.fin <- !is.na( V.diff.DF$V.plume )
if( sum( indx.fin, na.rm = TRUE ) > 1 ) {
V.diff.DF$V.plume <- signal::interp1( V.diff.DF$Sigma[indx.fin],
V.diff.DF$V.plume[indx.fin], V.diff.DF$Sigma, method="linear" )
}
indx.fin <- !is.na( V.diff.DF$TdegC.plume )
if( sum( indx.fin, na.rm = TRUE ) > 1 ) {
V.diff.DF$TdegC.plume <- signal::interp1( V.diff.DF$Sigma[indx.fin],
V.diff.DF$TdegC.plume[indx.fin], V.diff.DF$Sigma, method="linear" )
}
indx.fin <- !is.na( V.diff.DF$Z.plume )
if( sum( indx.fin, na.rm = TRUE ) > 1 ) {
V.diff.DF$Z.plume <- signal::interp1( V.diff.DF$Sigma[indx.fin],
V.diff.DF$Z.plume[indx.fin], V.diff.DF$Sigma, method="linear" )
}
RefProf.FiltW <- as.numeric( with( Outr.settings, Value[ which( ShortName == "outr_refprof_FiltW" )[1] ] ) )
V.diff.DF$V.plume <- filter_np( V.diff.DF$V.plume, RefProf.FiltW, extend = FALSE )
V.diff.DF$TdegC.plume <- filter_np( V.diff.DF$TdegC.plume, RefProf.FiltW, extend = FALSE )
V.diff.DF$Z.plume <- filter_np( V.diff.DF$Z.plume, RefProf.FiltW, extend = FALSE )
#
RefProf.Kstd <- as.numeric( with( Outr.settings, Value[ which( ShortName == "outr_refprof_Kstd" )[1] ] ) )
V.diff.DF$V.ref <- RefProf$V.mean - RefProf$V.std * RefProf.Kstd
#
Entr.effect.on <- as.logical( with( Entr.settings,
Value[ which( ShortName == "entr_effectOnOff" )[1] ] ) )
V.diff.DF$V.entr <- V.diff.DF$V.ref
if( Entr.effect.on ) {
indx.fin <- is.finite( V.diff.DF$TdegC.plume )
V.diff.DF$V.entr[ indx.fin ] <- as.numeric( predict( TdegC.V.fit,
data.frame( TdegC.ref = V.diff.DF$TdegC.plume[ indx.fin ] ) ) )
}
V.diff.DF$V.entr <- pmin( V.diff.DF$V.entr, V.diff.DF$V.ref )
#
RefProf.Z.window <- as.numeric( with( Outr.settings,
Value[ which( ShortName == "outr_refprof_Zwindow" )[1] ] ) )
#browser()
for( k.Sigma in 1:n.Sigma ) {
if ( is.finite( V.diff.DF$V.plume[ k.Sigma ] ) ) {
indx.layer <- ( abs( V.diff.DF$Z.plume - V.diff.DF$Z.plume[ k.Sigma ] ) <=
RefProf.Z.window/2 )
indx.layer[ is.na( indx.layer ) ] <- FALSE
if( settings$Prof.comp$Value == "ttest" ) {
if( ( sum( is.finite( V.diff.DF$V.entr[ indx.layer ] ) ) > 1 ) &
( sum( is.finite( V.diff.DF$V.entr[ indx.layer ] ) ) > 1 ) ) {
t.test.res <- t.test( V.diff.DF$V.plume[ indx.layer ],
V.diff.DF$V.entr[ indx.layer ] )
V.diff.DF$t.value[ k.Sigma ] <- t.test.res$statistic
V.diff.DF$p.value[ k.Sigma ] <- t.test.res$p.value
} else {
V.diff.DF$t.value[ k.Sigma ] <- NA
V.diff.DF$p.value[ k.Sigma ] <- NA
}
} else {
if ( sum( indx.layer, na.rm = TRUE ) > 1 ) {
V.plume.trp <- pracma::trapz( V.diff.DF$Sigma[ indx.layer ], V.diff.DF$V.plume[ indx.layer ] )
V.ref.trp <- pracma::trapz( V.diff.DF$Sigma[ indx.layer ], V.diff.DF$V.entr[ indx.layer ] )
} else {
V.plume.trp <- V.diff.DF$V.plume[ indx.layer ]
V.ref.trp <- V.diff.DF$V.entr[ indx.layer ]
}
if( settings$Prof.comp$Value == "percent" ) {
V.diff.DF$V.diff[ k.Sigma ] <- 100 * ( V.plume.trp - V.ref.trp ) / V.ref.trp
} else if ( settings$Prof.comp$Value == "absolute" ) {
V.diff.DF$V.diff[ k.Sigma ] <- V.plume.trp - V.ref.trp
} else {
V.diff.DF$V.diff[ k.Sigma ] <- NA
}
V.diff.DF$V.diff <- filter_np( V.diff.DF$V.diff, RefProf.FiltW,
extend = FALSE )
}
}
}
#
return( V.diff.DF )
}
# End of function V.diff.Entr.calc ********************************************
# Function V.Z.min.calc *******************************************************
V.Z.min.calc <- function( V.diff.list, Prof.comp ) {
#
n.plume <- length( V.diff.list )
if( Prof.comp == "ttest" ) {
V.Z.min <- data.frame( t.min = rep( NA, n.plume ),
p.min = rep( NA, n.plume ),
Z.min = rep( NA, n.plume ) )
} else {
V.Z.min <- data.frame( V.min = rep( NA, n.plume ),
Z.min = rep( NA, n.plume ) )
}
rownames( V.Z.min ) <- names( V.diff.list )
if( n.plume > 0 ) {
#browser()
for( k.plume in 1:n.plume ) {
if( Prof.comp == "ttest" ) {
indx.min <- which.min( V.diff.list[[k.plume]]$t.value )
if( length( indx.min ) > 0 ) {
V.Z.min$t.min[ k.plume ] <- V.diff.list[[k.plume]]$t.value[ indx.min[1] ]
V.Z.min$p.min[ k.plume ] <- V.diff.list[[k.plume]]$p.value[ indx.min[1] ]
V.Z.min$Z.min[ k.plume ] <- V.diff.list[[k.plume]]$Z.plume[ indx.min[1] ]
}
} else {
indx.min <- which.min( V.diff.list[[k.plume]]$V.diff )
if( length( indx.min ) > 0 ) {
V.Z.min$V.min[ k.plume ] <- V.diff.list[[k.plume]]$V.diff[ indx.min[1] ]
V.Z.min$Z.min[ k.plume ] <- V.diff.list[[k.plume]]$Z.plume[ indx.min[1] ]
}
}
}
}
V.Z.min$Z.min <- round( V.Z.min$Z.min, digits = 1 )
if( Prof.comp == "ttest" ) {
V.Z.min$t.min <- round( V.Z.min$t.min, digits = 3 )
V.Z.min$p.min <- round( V.Z.min$p.min, digits = 5 )
} else {
V.Z.min$V.min <- round( V.Z.min$V.min, digits = 3 )
}
return( V.Z.min )
}
# End of function V.Z.min.calc ************************************************
# Function outr.detect ********************************************************
outr.detect <- function( V.Z.min, settings, Prof.comp ) {
#
Outr.threshold <- as.numeric( with( settings$Outr,
Value[ which( ShortName == "outr_threshold" )[1] ] ) )
if( Prof.comp == "ttest" ) {
Outr.prof.list <- rownames( V.Z.min[ V.Z.min$p.min < Outr.threshold, ] )
} else {
Outr.prof.list <- rownames( V.Z.min[ V.Z.min$V.min < Outr.threshold, ] )
}
#
return( Outr.prof.list )
}
# End of function outr.detect ***************************************************
# Function plot.outr.profiles ***************************************************
plot.outr.profiles <- function( V.diff.list, Z.axis, Prof.comp, Outrange.Param,
Profiles.selected, Outr.settings ) {
if( Prof.comp == "ttest" ) {
x.lab <- "t-coefficient"
X.param <- "t.value"
} else {
x.lab <- paste( Outrange.Param, "(plume minus reference)" )
if( Prof.comp == "Percent" ) {
x.lab <- paste( x.lab, "(%)" )
}
X.param <- "V.diff"
}
n.profiles <- length( V.diff.list )
indx.fin <- is.finite( V.diff.list[[1]][,X.param] )
x.range <- extendrange( V.diff.list[[1]][indx.fin,X.param] )
y.range <- extendrange( V.diff.list[[1]][indx.fin,Z.axis] )
if( n.profiles > 1 ) {
for( k.profile in 2:n.profiles ) {
indx.fin <- is.finite( V.diff.list[[k.profile]][,X.param] )
x.range.2 <- extendrange( V.diff.list[[k.profile]][indx.fin,X.param] )
x.range[1] <- min( x.range[1], x.range.2 )
x.range[2] <- max( x.range[2], x.range.2 )
y.range.2 <- extendrange( V.diff.list[[k.profile]][indx.fin,Z.axis] )
y.range[1] <- min( y.range[1], y.range.2 )
y.range[2] <- max( y.range[2], y.range.2 )
}
}
#browser()
if( Z.axis == "Z.plume" ) {
y.label = "Depth (m)"
} else {
y.label = "Specific Density (kg/m3)"
}
plot( V.diff.list[[1]][,X.param], V.diff.list[[1]][,Z.axis],
xlim = x.range, ylim = rev( y.range ), type = "n",
ylab = y.label, xlab = x.lab )
abline( v = 0 )
Outr.threshold <- with( Outr.settings,
Value[ which( ShortName == "outr_threshold" )[1] ] )
abline( v = Outr.threshold, lwd = 3, lty = 3, col = "red" )
for( k.profile in 1:n.profiles ) {
lines( V.diff.list[[k.profile]][,X.param],
V.diff.list[[k.profile]][,Z.axis], lwd = 2, col = "gray" )
}
for( k.profile in 1:n.profiles ) {
if( names( V.diff.list )[ k.profile ] %in% Profiles.selected ) {
lines( V.diff.list[[k.profile]][,X.param],
V.diff.list[[k.profile]][,Z.axis], lwd = 4, col = "red" )
}
}
}
# End of function plot.outr.profiles ********************************************
# Function entr.plot ************************************************************
entr.plot <- function( V.diff.DF, Y.name, Outrange.Param ) {
indx.fin <- is.finite( V.diff.DF[ , Y.name ] ) & is.finite( V.diff.DF$V.plume ) &
is.finite( V.diff.DF$V.entr ) & is.finite( V.diff.DF$V.ref )
y.range <- extendrange( V.diff.DF[ indx.fin, Y.name ] )
y.range[2] <- y.range[2] + diff( y.range ) /5
x.range.plume <- extendrange( V.diff.DF[ indx.fin, "V.plume" ] )
x.range.entr <- extendrange( V.diff.DF[ indx.fin, "V.entr" ] )
x.range.ref <- extendrange( V.diff.DF[ indx.fin, "V.ref" ] )
x.range <- c( min( x.range.plume[1], x.range.entr[1], x.range.ref[1] ),
max( x.range.plume[2], x.range.entr[2], x.range.ref[2] ) )
#
if( Y.name == "Sigma" ) {
y.lab <- "Specific density (kg/m3)"
} else {
y.lab <- "Depth (m)"
}
plot( V.diff.DF$V.plume, V.diff.DF[ , Y.name ], type = "n",
ylim = rev( y.range ), xlim = x.range, ylab = y.lab, xlab = Outrange.Param )
lines( V.diff.DF$V.plume, V.diff.DF[ , Y.name ], col = "red", lwd = 3 )
lines( V.diff.DF$V.entr, V.diff.DF[ , Y.name ], col = "green", lwd = 3 )
lines( V.diff.DF$V.ref, V.diff.DF[ , Y.name ], col = "blue", lwd = 3 )
legend( "bottomright", col = c( "red","blue","green" ), lwd = 3,
legend = c("Plume","Reference","Corrected for entrainment") )
}
# End of function entr.plot *****************************************************
# Function report.data.file ************************************************
report.data.file <- function( selected.data.file, Surv.data.tot ) {
#
if( is.null( selected.data.file ) ) {
report_data_file <- NULL
} else {
report_data_file <- paste( "Data file: ", selected.data.file, ", ",
nrow( Surv.data.tot ), " obs.", sep = "" )
}
return( report_data_file )
}
# End of function report.data.file *****************************************
# Function report.data.selected ********************************************
report.data.selected <- function( Agency.selected, Year.selected, Season.selected,
Surv.data ) {
#
if( is.null( Agency.selected ) & is.null( Year.selected ) &
is.null( Season.selected ) ) {
report_data_selected <- NULL
} else {
if( is.null( Surv.data ) ) { n.surv.data <- 0
} else { n.surv.data <- nrow( Surv.data ) }
report_data_selected <- paste( "Selected: ", Agency.selected, ", ",
Year.selected, ", ", Season.selected, ", ", n.surv.data, " obs.",
sep = "" )
}
}
# End of function report.data.selected *************************************
# Function report.plume.list ********************************************
report.plume.list <- function( Stn.list.surv.Plume, Stn.list.surv.Profile ) {
#
if( sum( Stn.list.surv.Plume ) == 0 ) {
report_plume_list <- NULL
} else {
report_plume_list <- paste( "CDOM plume: ",
sum( Stn.list.surv.Plume ), " profiles (",
paste( Stn.list.surv.Profile[ Stn.list.surv.Plume ],
collapse = ", " ), ")", sep = "" )
}
#
return( report_plume_list )
}
# End of function report.plume.list *************************************
# Function report.plume.settings ********************************************
report.plume.settings <- function( Stn.list.surv.Plume, settings.Plume ) {
#
if( sum( Stn.list.surv.Plume ) == 0 ) {
report_plume_settings <- NULL
} else {
report_plume_settings <- paste(
paste( " ", settings.Plume$Comment, " = ", settings.Plume$Value, sep = "" ),
collapse = NULL )
report_plume_settings <- c( " Plume detection settings:", report_plume_settings )
}
#
return( report_plume_settings )
}
# End of function report.plume.settings *************************************
# Function report.ref.list ********************************************
report.ref.list <- function( Stn.list.surv.Ref, Stn.list.surv.Profile ) {
#
if( sum( Stn.list.surv.Ref ) == 0 ) {
report_ref_list <- NULL
} else {
report_ref_list <- paste( "Reference: ",
sum( Stn.list.surv.Ref ), " profiles (",
paste( Stn.list.surv.Profile[ Stn.list.surv.Ref ],
collapse = ", " ), ")", sep = "" )
}
#
return( report_ref_list )
}
# End of function report.ref.list *************************************
# Function report.ref.settings ********************************************
report.ref.settings <- function( Stn.list.surv.Ref, settings.Ref ) {
#
if( sum( Stn.list.surv.Ref ) == 0 ) {
report_ref_settings <- NULL
} else {
report_ref_settings <- paste(
paste( " ", settings.Ref$Comment, " = ", settings.Ref$Value, sep = "" ),
collapse = NULL )
report_ref_settings <- c( " Reference profiles selection settings:",
report_ref_settings )
}
#
return( report_ref_settings )
}
# End of function report.ref.settings *************************************
# Function report.outr.param **********************************************
report.outr.param <- function( Outrange.Param ) {
#
if( is.null( Outrange.Param ) ) {
report_outr_param <- NULL
} else {
report_outr_param <- paste( "Parameter selected for outranges detection: ",
Outrange.Param )
}
return( report_outr_param )
}
# End of function report.outr.param ***************************************
# Function report.ref.prof **********************************************
report.ref.prof <- function( Outrange.Param, RefProf ) {
#
if( is.null( RefProf ) ) {
report_ref_prof <- NULL
} else {
report_ref_prof <- paste( "Reference", Outrange.Param, "profile calculated" )
}
return( report_ref_prof )
}
# End of function report.ref.prof ***************************************
# Function report.outr.method **********************************************
report.outr.method <- function( RefProf, Prof.comp ) {
if( is.null( RefProf ) | is.null( Prof.comp ) ) {
report_outr_method <- NULL
} else {
report_outr_method <- paste( " Method of comparison between profiles:",
Prof.comp )
}
return( report_outr_method )
}
# End of function report.outr.method ***************************************
# Function report.outr.settings **********************************************
report.outr.settings <- function( RefProf, settings.Outr ) {
#
if( is.null( RefProf ) ) {
report_outr_settings <- NULL
} else {
report_outr_settings <- paste(
paste( " ", settings.Outr$Comment, " = ", settings.Outr$Value, sep = "" ),
collapse = NULL )
report_outr_settings <- c(
" Reference profile calculation and outrange detection settings:",
report_outr_settings )
}
return( report_outr_settings )
}
# End of function report.outr.settings ***************************************
# Function report.outr.settings **********************************************
report.entr.setting <- function( RefProf, settings.Entr ) {
#
if( is.null( RefProf ) ) {
report_entr_setting <- NULL
} else {
report_entr_setting <- paste( " ", settings.Entr$Comment, ": ",
settings.Entr$Value,
sep = "" )
}
return( report_entr_setting )
}
# End of function report.outr.method ***************************************
# Function report.outr.list **********************************************
report.outr.list <- function( Outrange.Param, V.diff.list, Stn.list.surv ) {
#
if( is.null( V.diff.list ) ) {
report_outr_list <- NULL
} else {
report_outr_list <- paste( Outrange.Param, " outranges detected: ",
sum( Stn.list.surv$Outrange ), sep = "" )
if( sum( Stn.list.surv$Outrange ) > 0 ) {
report_outr_list <- paste( report_outr_list, " (",
paste( Stn.list.surv$Profile[ Stn.list.surv$Outrange], collapse = ", ", sep = "" ),
")", sep = "" )
}
}
return( report_outr_list )
}
# End of function report.outr.list ***************************************
# Function report.outr.list **********************************************
report.max.decrease.depths <- function( Outrange.Param, Prof.comp, V.Z.min ) {
#
if( is.null( V.Z.min ) ) {
report_max_decrease_depths <- NULL
} else {
if( Prof.comp == "ttest" ) {
report_max_decrease_depths <- paste( rownames( V.Z.min ),
V.Z.min$t.min, V.Z.min$p.min, V.Z.min$Z.min, sep = ", ", collapse = NULL )
report_max_decrease_depths <- c(
paste( "Maximum", Outrange.Param, "decrease at depths (Profile, t, p, Depth):" ),
report_max_decrease_depths )
} else {
report_max_decrease_depths <- paste( rownames( V.Z.min ),
V.Z.min$V.min, V.Z.min$Z.min, sep = ", ", collapse = NULL )
report_max_decrease_depths <- c(
paste( "Maximum", Outrange.Param, "decrease at depths (Profile,Decrease,Depth):" ),
report_max_decrease_depths )
}
}
return( report_max_decrease_depths )
}
# End of function report.outr.method ***************************************
|
1d93678df8a488790318eff63521f15151c40a75
|
1ea5000a33609aa567ae78a734afaf6ddafb7cf1
|
/cachematrix.R
|
547936203a5c457b06c8230afef59e64793d6b08
|
[] |
no_license
|
GabeZeta/ProgrammingAssignment2
|
e722244cbc6bb5be884bfbf791b20a8b178a047b
|
a0c50a9e0e53217f4853723a391099d297b77296
|
refs/heads/master
| 2020-12-29T00:42:28.017064
| 2015-01-25T23:45:12
| 2015-01-25T23:45:12
| 29,831,462
| 0
| 0
| null | 2015-01-25T21:14:10
| 2015-01-25T21:14:09
| null |
UTF-8
|
R
| false
| false
| 2,258
|
r
|
cachematrix.R
|
# Matrix inversion is usually a costly computation and there may be some
# benefit to caching the inverse of a matrix rather than compute it repeatedly.
# The following two functions are used to cache the inverse of a matrix.
# The function makeCacheMatrix() creates a special "matrix" object that
# can cache its inverse.It's really a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix())
{
m <- NULL # default if cacheSolve hasn't yet been used
set <- function(y)
{
x <<- y # caches the inputted matrix so that
m <<- NULL # cacheSolve can check whether it has changed
}
get <- function() x # obtain "raw" matrix
setinverse <- function(inverse) m <<- inverse # assign computed inverse matrix (of x) to m
getinverse <- function() m # obtain the cached inverse matrix
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
# The function cacheSolve() returns the inverse of the special "matrix" returned
# by makeCacheMatrix above. If the inverse has already been calculated
# (and the matrix has not changed), then the cachesolve should retrieve
# the inverse from the cache and skip the computation.If not, it computes the
# inverse, sets the value in the cache via the setinverse() function.
# This function assumes that the matrix supplied is always invertible.
cacheSolve <- function(x, ...)
{
# Return a matrix that is the inverse of 'x'
invMat <- x$getinverse() # check if the inverse has already been calculated
if(!is.null(invMat))
{
message("Cached data found. Getting cached data... Done!") # if so, get the inverse from the cache
return(invMat)
}
message("No cached data found. Calculating inverse matrix...")
data <- x$get()
invMat <- solve(data) # if not, calculate the inverse and
x$setinverse(invMat) # set the value in the cache via setinverse()
message("Done!")
invMat
}
|
3339bb5d78dfbd704559b7dbe7b737e9b34ac262
|
a37c2fff0d0efd25be5daaaac630bfe20f11cb20
|
/R/3_parse_data.R
|
5874a0dd033afb69036d4a45c4be5dc173f1c1a2
|
[] |
no_license
|
mackerman44/champ_Q4s
|
b156ee1f4290534e2d9ef164f6dffcb9667b37a0
|
e68df34379b4cde8302659ec0f72a0330a5ba89c
|
refs/heads/master
| 2021-02-13T10:24:12.104652
| 2020-04-01T16:52:07
| 2020-04-01T16:52:07
| 244,687,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,332
|
r
|
3_parse_data.R
|
# parse data by watershed, channel unit type, and/or tier 1, and remove any dataset too small (min_samp_size) for comparisons or plotting
parse_data = function(data = spc_ls_hab_df, spc, ls, min_samp_size = 20) {
#------------------------------
# parse data by watershed
wtr = unique(spc_ls_hab_df$Watershed)
for(w in wtr) {
tmp = filter(spc_ls_hab_df, Watershed == as.character(w)) %>%
mutate(qrtl = cut_number(log_fish_dens_m, n = 4, labels = c("Q1", "Q2", "Q3", "Q4"))) %>%
mutate(qrtl = recode(qrtl,
`Q1` = "Rest",
`Q2` = "Rest",
`Q3` = "Rest"))
assign(paste(spc, ls, make_clean_names(w), sep = "_"), tmp)
}
if(ls == "sum" | ls == "spw") {
#------------------------------
# parse data by channel_unit
cht = unique(spc_ls_hab_df$Channel_Type)
cht = cht[!is.na(cht)]
for(c in cht) {
tmp = filter(spc_ls_hab_df, Channel_Type == as.character(c)) %>%
mutate(qrtl = cut_number(log_fish_dens_m, n = 4, labels = c("Q1", "Q2", "Q3", "Q4"))) %>%
mutate(qrtl = recode(qrtl,
`Q1` = "Rest",
`Q2` = "Rest",
`Q3` = "Rest"))
assign(paste(spc, ls, make_clean_names(c), sep = "_"), tmp)
}
}
if(ls == "win") {
#------------------------------
# parse data by tier 1
tr1 = unique(spc_ls_hab_df$Tier1)
for(t in tr1) {
tmp = filter(spc_ls_hab_df, Tier1 == as.character(t)) %>%
mutate(qrtl = cut_number(log_fish_dens_m, n = 4, labels = c("Q1", "Q2", "Q3", "Q4"))) %>%
mutate(qrtl = recode(qrtl,
`Q1` = "Rest",
`Q2` = "Rest",
`Q3` = "Rest"))
assign(paste(spc, ls, make_clean_names(t), sep = "_"), tmp)
}
}
#------------------------------
# make a list of the parsed data frames
df_list = ls(pattern = paste0("^",spc,"_",ls,"_"))
df_list = do.call("list", mget(df_list))
#------------------------------
# find those dfs that are too small for comparisons or plotting
big_dfs = names(which(sapply(df_list, nrow) > min_samp_size - 1, TRUE))
df_list = df_list[names(df_list) %in% big_dfs]; rm(big_dfs)
return(df_list)
}
|
071e2692cf1a65d01efaec7d3c5f2d344f34cb84
|
e71d5e89bf3460f647b320e044d8772112139913
|
/server.R
|
d4cff81ef4e593f6d9d760ea1c482122b9234634
|
[] |
no_license
|
JorgeSauma/WineTester
|
74977f99c5cb46261db1e455fb7e1c215051ea74
|
f11ab9bb4c1b6f78477a4b027c112ee1f9534615
|
refs/heads/master
| 2021-05-01T23:18:00.935375
| 2018-02-09T17:28:47
| 2018-02-09T17:28:47
| 120,932,417
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,722
|
r
|
server.R
|
library(shiny)
library(caret)
library(tidyr)
library(randomForest)
value=-1
#wine_data<-read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv")
wine_data<-read.csv("winequality-red.csv")
colnames(wine_data) <- "Wine_colnames"
Tidy_wine_data <- separate(wine_data, Wine_colnames, into=c("FixedAcidity", "VolatileAcidity", "CitricAcid", "ResidualSugar", "Chlorides", "FreeSulfurDioxide", "TotalSulfurDioxide", "Density", "pH", "Sulphates", "Alcohol", "Quality"), sep=";")
Tidy_wine_data<-as.data.frame(sapply(Tidy_wine_data, as.numeric))
wine_short_set<-Tidy_wine_data[c("Alcohol", "Sulphates", "VolatileAcidity", "Quality")]
training_partition <- createDataPartition(y = wine_short_set$Quality, p = 0.75, list=FALSE)
training <- wine_short_set[training_partition,]
validation <- wine_short_set[-training_partition,]
print("Ready")
shinyServer(function(input, output) {
set.seed(2402)
modFit1<-train(Quality ~ ., data=training, method = "rf", proxy=TRUE)
QualityPred <- reactive({
AlcoholInput <- input$Alcohol_sl
SulphatesInput <- input$Sulphates_sl
Acidityinput <- input$Acidity_sl
round(predict(modFit1, newdata = data.frame(Alcohol=AlcoholInput, Sulphates=SulphatesInput, VolatileAcidity=Acidityinput)))
})
output$Quality <- renderText({
value<-QualityPred()
print(value)
if (value >=0 & value<2) { "Very Bad" }
else if (value>=2 & value<3) { "Bad" }
else if (value>=3 & value<4) { "Average" }
else if (value>=4 & value<5) { "Above average" }
else if (value>=5 & value<6) { "Good" }
else if (value>=6 & value<7) { "Very Good" }
else if (value>=7) {"Excellent"}
else {"Calculating..."}
})
})
|
eb82896736e87b1bb869aa8a69a3896e908494fb
|
4dc8d0a645b02b4de44dfa2b1188d4fc32eff151
|
/Assignment/A1. Credit Rating/CreditRating.R
|
874da979d1b943e4284315f328f48314617e2fd4
|
[] |
no_license
|
Hitali-Shah/SDM
|
000e92acd2755388bb75979dff09f34e14fefb0e
|
9b9ede7828fba94d917f25129e259235d9b9e8cc
|
refs/heads/master
| 2023-08-08T01:03:02.019536
| 2021-09-16T14:32:41
| 2021-09-16T14:32:41
| 398,692,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 532
|
r
|
CreditRating.R
|
library(rio)
library(stargazer)
credit_score = import("CreditRating.xlsx")
colnames(credit_score) = tolower(make.names(colnames(credit_score)))
as.factor(credit_score$student)
as.factor(credit_score$married)
as.factor(credit_score$ethnicity)
as.factor(credit_score$gender)
score1.out = lm(rating~limit+cards+student+balance,data=credit_score)
score2.out = lm(rating~limit+cards+income+ethnicity+balance,data=credit_score)
stargazer(score1.out,score2.out, type="text", single.row=TRUE)
cor(credit_score$rating, credit_score$balance)
|
59ca57c1fed3ad323d9d2dbf0e81085743df670a
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/3504_0/rinput.R
|
48adc92cf9a6d2b1c1fdad50796202a0359fe014
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("3504_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3504_0_unrooted.txt")
|
588d0557d8fdb1b383b36537adec7aad6a5e417a
|
da04803dd0714434a1e0d458616fd9ecfdecbcce
|
/R/plot-abn.R
|
6f4b13245af6fdf3eee547cb5fd693aa085ebe2e
|
[] |
no_license
|
cran/abn
|
b232e17d29eba356f5b1df5d50c27e17de860422
|
e393f625a9de98adb351ac007b77c87d430cb7bf
|
refs/heads/master
| 2023-05-25T02:14:43.027190
| 2023-05-22T12:50:24
| 2023-05-22T12:50:24
| 17,694,223
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,880
|
r
|
plot-abn.R
|
## plot-abn.R --- Author : Gilles Kratzer Last Modified on: 06/12/2016 Last Modified on: 10/03/2017 Last modification: 19.05.2017 Node color list Last mod: 13.06.2017 Arc direction Last mod:
## 18/07/2017
## major rewrite rf 2021-04
# for final submission elimiate and # `print()` lines
plotabn <- function(...) {
.Deprecated("plotAbn", msg="'plotabn' is deprecated.\n Use 'plotAbn' instead but note that arguments have slightly changed.")
dots <- list(...)
if (!is.null(dots$dag.m)) {
dag <- dots$dag.m
dots$dag.m <- NULL
do.call('plotAbn', c(dag, dots))
} else plotAbn(...)
}
plotAbn <- function(dag, data.dists=NULL, markov.blanket.node=NULL,
fitted.values=NULL, digits=2, edge.strength=NULL, edge.strength.lwd=5,
edge.direction="pc", edge.color="black", edge.linetype="solid", edge.arrowsize=0.6, edge.fontsize=node.fontsize,
node.fontsize=12, node.fillcolor=c("lightblue","brown3","chartreuse3"),
node.fillcolor.list=NULL, node.shape=c("circle","box","ellipse","diamond"),
plot=TRUE , ... ) {
# Actually, the plot argument is wrong! i do not need the adjacency structure only. I need all but the plotting. i.e., all but the rendering of the graph.
# The following is not relevant. The nodes are calculated via mb. They are not colored.
# if(!is.null(markov.blanket.node) & ("multinomial" %in% (data.dists))) warning("Multinomial nodes are excluded from Markov blanket computation.")
## for compatibility purpose
if(inherits(x=dag, what="abnLearned")){
data.dists <- dag$score.cache$data.dists;
dag <- dag$dag
}
if(inherits(x=dag, what="abnFit")){
data.dists <- dag$abnDag$data.dists
dag <- dag$abnDag$dag
}
if (is.null(data.dists)) stop("'data.dist' need to be provided.")
name <- names(data.dists)
## dag transformation
if (!is.null(dag)) {
if (is.matrix(dag)) {
## run a series of checks on the DAG passed
dag <- abs(dag)
## consistency checks
diag(dag) <- 0
dag[dag > 0] <- 1
## naming
if (is.null(rownames(dag))) {
colnames(dag) <- name
rownames(dag) <- name
}
dag <- check.valid.dag(dag=dag, is.ban.matrix=FALSE, group.var=NULL)
} else {
if (grepl("~", as.character(dag)[1], fixed=T)) {
dag <- formula.abn(f=dag, name=name)
## run a series of checks on the DAG passed
dag <- check.valid.dag(dag=dag, is.ban.matrix=FALSE, group.var=NULL)
}
}
} else {
stop("'dag' specification must either be a matrix or a formula expression")
}
# contains Rgraphviz
if (edge.direction == "undirected") {
dag=dag + t(dag)
dag[dag != 0] <- 1 # this should not be necessary!
}
## create an object graph
am.graph <- new(Class="graphAM", adjMat=dag,
edgemode=ifelse(edge.direction=="undirected","undirected","directed"))
## ========= SHAPE =========
## Shape: plot differential depending on the distribution
node.shape <- rep(node.shape, 4)
shape <- rep(node.shape[1], length(data.dists) )
shape[data.dists == "binomial"] <- node.shape[2]
shape[data.dists == "poisson"] <- node.shape[3]
shape[data.dists == "multinomial"] <- node.shape[4]
names(shape) <- names(data.dists)
## ================= NODE FILLED COLOR =================
## fill with default value, change if MB or fillcolor.list is requested
fillcolor <- rep(node.fillcolor[1], length(data.dists))
names(fillcolor) <- names(data.dists)
## =============== MARKOV BLANKET ===============
## Markov Blanket: plot the MB of a given node
if (!is.null(markov.blanket.node)) {
markov.blanket <- mb( dag, node=markov.blanket.node, data.dists=data.dists)
fillcolor[ names(data.dists) %in% markov.blanket] <- node.fillcolor[3]
fillcolor[ names(data.dists) %in% markov.blanket.node] <- node.fillcolor[2]
} else if (!is.null(node.fillcolor.list)) {
fillcolor[ names(data.dists) %in% node.fillcolor.list] <- node.fillcolor[2]
}
names.edges <- names(Rgraphviz::buildEdgeList(am.graph))
## =============== Fitted values ===============
## Plot the fitted values in abn as edges label
# print(names.edges)
if (!is.null(fitted.values)) {
space <- " "
edge.label <- c()
for (i in 1:length(fitted.values)) {
if ((length(fitted.values[[i]]) > 1)& (data.dists[names(fitted.values)[i]] != "gaussian")) {
for (j in 1:(length(fitted.values[[i]]) - 1))
edge.label <- c(edge.label, paste(space, signif(fitted.values[[i]][j + 1], digits=digits)))
} else if ((length(fitted.values[[i]]) > 2)& (data.dists[names(fitted.values)[i]] == "gaussian")){
for (j in 1:(length(fitted.values[[i]]) - 2))
edge.label <- c(edge.label, paste(space, signif(fitted.values[[i]][j + 1], digits=digits)))
}
}
} else edge.label <- rep(" ", length(names.edges))
names(edge.label) <- names.edges
## =================== Arc Strength ===================
## Arc strength: plot the AS of the dag arcs
# if (is.matrix(edge.strength) & (edge.direction != "undirected")) {
if (is.matrix(edge.strength)) {
if (any(edge.strength<0)) stop("'edge.strength' should be positive")
if (any(edge.strength[dag ==0] >0)) stop("'edge.strength' does not match dag")
min.as <- min(edge.strength[edge.strength > 0])
max.as <- max(edge.strength[edge.strength > 0])
edge.strength.norm <- (edge.strength - min.as)/(max.as - min.as)
edge.strength.norm[edge.strength.norm < 0] <- 0
edge.lwd <- list()
for (i in 1:length(dag[1, ])) {
for (j in 1:length(dag[1, ])) {
if (dag[i, j] == 1) {
edge.lwd <- cbind(edge.lwd, round(edge.strength.lwd * edge.strength.norm[i, j]) + 1)
}
}
}
} else {
edge.lwd <- rep(1, length(names.edges))
}
class(edge.lwd) <- "character"
names(edge.lwd) <- names.edges
## ====== Plot ======
attrs <- list(graph=list(rankdir="BT"),
node=list(fontsize=node.fontsize, fixedsize=FALSE),
edge=list(arrowsize=edge.arrowsize, color=edge.color, lty=edge.linetype, fontsize=edge.fontsize))
nodeAttrs <- list(fillcolor=fillcolor, shape=shape)
edgeAttrs <- list(label=edge.label, lwd=edge.lwd)
# print(edgeAttrs)
# if (all(shape %in% c("circle","box","ellipse"))) {
am.graph <- layoutGraph(am.graph, attrs=attrs, nodeAttrs=nodeAttrs, edgeAttrs=edgeAttrs)
if (edge.direction == "pc") { # specify appropriate direction!
edgeRenderInfo(am.graph) <- list(arrowtail="open")
edgeRenderInfo(am.graph) <- list(arrowhead="none")
# edgeRenderInfo(am.graph) <- list(direction=NULL)# MESSES up!!! not needed.
}
edgeRenderInfo(am.graph) <- list(lwd=edge.lwd)
# if (plot) renderGraph(am.graph, attrs=attrs, nodeAttrs=nodeAttrs, edgeAttrs=edgeAttrs)
if (plot) renderGraph(am.graph, ...)
# } else {
# am.graph <- layoutGraph(am.graph, attrs=attrs, nodeAttrs=nodeAttrs, edgeAttrs=edgeAttrs, ...)
# the following does not work in R
# edgeRenderInfo(am.graph)[["direction"]] <- "back"
# hence
# warning("edge.direction='pc' is not working with diamond shapes.")
# edgeRenderInfo(am.graph) <- list(lwd=edge.lwd)
# if (plot) renderGraph(am.graph,attrs=attrs, nodeAttrs=nodeAttrs, edgeAttrs=edgeAttrs)
# }
invisible(am.graph)
} #EOF
|
f4c31f11d5e165b83d97f375403bf40d41ba0f88
|
478f7c571fa3f63a3a15b904d71457c8a86b51c5
|
/code/074_Modeling_lda.R
|
586de5d4d948c44ff9d6f7ba160155deb5d80dff
|
[] |
no_license
|
amacaluso/Statistical_Learning
|
d5270eb4b8cbdfed9edc7bd85618abb5bb9d70aa
|
52d3d797d9e76f3634cf317547c744ef208b2615
|
refs/heads/master
| 2020-03-09T08:10:38.620706
| 2019-07-31T20:37:23
| 2019-07-31T20:37:23
| 128,682,966
| 4
| 1
| null | 2019-07-29T15:40:33
| 2018-04-08T21:21:52
|
HTML
|
UTF-8
|
R
| false
| false
| 11,344
|
r
|
074_Modeling_lda.R
|
### ***** IMPORT ***** ###
##########################
source( 'code/Utils.R')
#SEED = 12344321
source( 'code/020_Pre_processing.R') # REQUIRE SEED
### ***** SAVING FOLDER ***** ###
folder = "results/MODELING/CLASSIFICATION"
dir.create( folder )
##################################
# DISCRIMINANT ANALYSIS
## Linear discriminant analysis
################################################
# Coefficiente di variazione
coeff_var<-apply( train.wine_binary[, -13 ], 2, CV)
lda.fit = lda(binary_quality ~ ., data = train.wine_binary)
# Summary of results
group_means_lda = lda.fit$means
coeff_lda = round(lda.fit$scaling, 3)
# ---> DA SALVARE? MAGARI INTERPRETARE
#variable importance
group_distances<-sort(abs(diff(lda.fit$means)))
names(group_distances)<-colnames(diff(lda.fit$means))[as.vector(order((abs(diff(lda.fit$means)))))]
group_distances
var_importance<-sort(abs(diff(lda.fit$means))/coeff_var)
names(var_importance)<-colnames(diff(lda.fit$means))[as.vector(order((abs(diff(lda.fit$means))/coeff_var)))]
var_importance
var_importance = data.frame( variable = names(var_importance),
Importance = round( var_importance,2) ,
row.names = 1:length(var_importance),
groups_mean_0 = group_means_lda[1,] ,
groups_mean_1 = group_means_lda[2,] )
lda_importance = ggplot(var_importance, aes( variable, Importance, color = variable, text = paste( 'Media gruppo 1:', groups_mean_1, "\n",
'Media gruppo 0:', groups_mean_0))) +
geom_bar( stat = "identity", position='stack') +
ggtitle( "LDA - Variable importance" ) + theme_bw() + guides( fill = FALSE ) +
theme(axis.text.x = element_text(angle = 45, vjust = 1, size = 12, hjust = 1))
lda_importance = ggplotly( lda_importance) %>% layout( showlegend = FALSE)
save_plot( lda_importance, type = "CLASSIFICATION")
# Histograms of discriminant function values by class
######################################################
# Predict the lda fit on the test sample
lda.pred = predict(lda.fit, newdata = test.wine_binary) #test
lda.pred1 = predict(lda.fit, newdata = train.wine_binary) #train
intersection = (mean(lda.pred1$x[train.wine_binary$binary_quality==0])+
mean(lda.pred1$x[train.wine_binary$binary_quality==1]))/2
test_accuracy <- mean(lda.pred$class==test.wine_binary$binary_quality)
# Predict the lda fit on the test sample
lda_pred_bad_ts = data.frame( label = 'bad', prob = lda.pred$x[test.wine_binary$binary_quality==0] )
lda_pred_good_ts = data.frame( label = 'good', prob = lda.pred$x[test.wine_binary$binary_quality==1] )
lda_pred_ts = rbind( lda_pred_bad_ts, lda_pred_good_ts )
lda_hist_1_vs_0 = ggplot(lda_pred_ts, aes( x = prob, y = ..density.. )) +
geom_histogram(data = subset(lda_pred_ts, label == 'bad'), fill = "red", alpha = 0.2, binwidth = 0.5) +
geom_histogram(data = subset(lda_pred_ts, label == 'good'), fill = "blue", alpha = 0.2, binwidth = 0.5) +
ggtitle( "Bad vs Good (test set)")
lda_hist_1_vs_0 = ggplotly( lda_hist_1_vs_0)
save_plot( lda_hist_1_vs_0, type = "CLASSIFICATION")
lda_line_1_vs_0 = ggplot(lda_pred_ts, aes( x = prob, y = ..density.. )) +
labs(x = "Discriminant score") +
geom_density(data = subset(lda_pred_ts, label == 'bad'), fill = "red", alpha = 0.2) +
geom_density(data = subset(lda_pred_ts, label == 'good'), fill = "blue", alpha = 0.2) +
ggtitle( "Bad vs Good") +
geom_vline( xintercept = intersection )
lda_line_1_vs_0 = ggplotly( lda_line_1_vs_0 )
save_plot( lda_line_1_vs_0, type = "CLASSIFICATION")
### plot separation function on training sample
###############################################
n <- dim(train.wine_binary)[1]
p <- dim(train.wine_binary)[2]-1 # Subtract 1 because one of the columns specifies the job
# Separate the 2 groups
good <-train.wine_binary[train.wine_binary$binary_quality==1,-13]
bad <-train.wine_binary[train.wine_binary$binary_quality==0,-13]
# Need sample statistics
n_good <- dim(good)[1]
n_bad <- dim(bad)[1]
# Group mean
mean.good <- apply(good,2,mean)
mean.bad <- apply(bad,2,mean)
mean.tot<-(mean.good*n_good+mean.bad*n_bad)/(n_good+n_bad)
# Within group covariance matrices
corr_matrix = cor(wine[, 1:12])
corr_plot = corrplot(corr_matrix, method="color")
corrplot = ggplotly( ggcorrplot(corr_matrix, hc.order = TRUE,
outline.col = "white",
#ggtheme = ggplot2::theme_gray,
colors = c("#6D9EC1", "white", "#E46726")))
S.good <- var(good)
S.bad <- var(bad)
W <- ((n_good-1)*S.good + (n_bad-1)*S.bad )/(n_good+n_bad-2)
W.inv <- solve(W)
# Between group covariance
B<-1/(2-1)*( (n_good*(mean.good-mean.tot)%*% t(mean.good-mean.tot))+
(n_bad*(mean.bad-mean.tot)%*% t(mean.bad-mean.tot)) )
A<- W.inv %*% B # Calculating the canonical matrix
eigen_res<- eigen(A)
ifelse(rep(10**(-6),length(eigen_res$values))>Re(eigen_res$values),0,eigen_res$values)
#just one eigenvalue "different" from zero
eigen_res$vectors
a.vect<-Re(eigen_res$vectors[,1]) #corresponding to the only non-zero eigenvalue
Y<-as.matrix(train.wine_binary[,-13])%*%(a.vect)
length(Y)
dim(train.wine_binary)
#PROJECTION ONTO Y
y.mean.good<-mean.good%*% a.vect
y.mean.bad<-mean.bad%*% a.vect
y.mean.good
y.mean.bad
#Euclidean centroid distance over Y
dist.groupY<-matrix(0,nrow=nrow(Y),3)
colnames(dist.groupY)<-c("dist.good","dist.bad","Group")
for (i in 1:nrow(Y)){
dist.good<-sqrt(sum((Y[i,]-y.mean.good)^2)) #Euclidean distance
dist.bad<-sqrt(sum((Y[i,]-y.mean.bad)^2)) #Euclidean distance
dist.groupY[i,]<-c(dist.good,dist.bad,which.max(c(dist.good,dist.bad))-1)
}
dist.groupY
#Mahalanobis centroid distance over X
dist.groupX<-matrix(0,nrow=nrow(Y),3)
colnames(dist.groupX)<-c("dist.good","dist.bad","Group")
for (i in 1:nrow(Y)){
dist.good<-(Y[i,]-y.mean.good)%*%(t(a.vect)%*%W.inv%*%a.vect)%*%t(Y[i,]-y.mean.good) #mahalanobis distance
dist.bad<-(Y[i,]-y.mean.bad)%*%(t(a.vect)%*%W.inv%*%a.vect)%*%t(Y[i,]-y.mean.bad) #mahalanobis distance
dist.groupX[i,]<-c(dist.good,dist.bad,which.max(c(dist.good,dist.bad))-1)
}
dist.groupX
#################################################################
Y_bad = Y[train.wine_binary[,13]==1,]
Y_good = Y[train.wine_binary[,13]==0,]
canonic_var = rbind( data.frame( label ='bad', index = 1:length(Y_bad), can_var = Y_bad ),
data.frame( label ='good', index = 1:length(Y_good),can_var = Y_good ))
canonical_variable = ggplot(canonic_var, aes( x = index, y= can_var )) +
geom_point(data = subset(canonic_var, label == 'bad'), col = "green", alpha = 0.5) +
geom_point(data = subset(canonic_var, label == 'good'), col = "red", alpha = 0.5) +
ggtitle( "Canonical variable") +
geom_hline( yintercept = y.mean.good, col="forestgreen", lty = 4, lwd = .8 ) +
geom_hline( yintercept = y.mean.bad, col = "firebrick4", lty = 4, lwd = .8) +
geom_hline( yintercept = (y.mean.good+y.mean.bad)/2, col = "black", lty = 5, lwd = .9)
canonical_variable = ggplotly( canonical_variable )
# ********** Saving a file ******************* #
file_name = paste0( folder, "/canonical_variable.Rdata")
save( canonical_variable, file = file_name)
# ******************************************** #
Y_bad = Y[dist.groupY[,3]==1,]
Y_good = Y[dist.groupY[,3]==0,]
canonic_var = rbind( data.frame( label ='bad', index = 1:length(Y_bad), can_var = Y_bad ),
data.frame( label ='good', index = 1:length(Y_good),can_var = Y_good ))
canonical_variable2 = ggplot(canonic_var, aes( x = index, y= can_var )) +
geom_point(data = subset(canonic_var, label == 'bad'), col = "green", alpha = 0.5) +
geom_point(data = subset(canonic_var, label == 'good'), col = "red", alpha = 0.5) +
ggtitle( "Canonical variable" ) +
geom_hline( yintercept = (y.mean.good+y.mean.bad)/2, col="black", lty = 5, lwd = .9 )
canonical_variable2 = ggplotly( canonical_variable2 )
canonical_variable2
# ********** Saving a file ******************* #
file_name = paste0( folder, "/canonical_variable2.Rdata")
save( canonical_variable2, file = file_name)
# ******************************************** #
pred_bad = data.frame( label = 'bad', prob = Y[which(train.wine_binary[,13]==1),] )
pred_good = data.frame( label = 'good', prob = Y[which(train.wine_binary[,13]==0),] )
pred = rbind( pred_bad, pred_good )
lda_hist = ggplot(pred, aes( x = prob, y = ..density.. )) +
labs(x = "Discriminant score") +
geom_histogram(data = subset(pred, label == 'bad'),
col = "green", alpha = 0.2) +
geom_histogram(data = subset(pred, label == 'good'),
col = "red", alpha = 0.2) +
ggtitle( "Bad vs Good")
lda_hist = ggplotly( lda_hist )
# ********** Saving a file ******************* #
file_name = paste0( folder, "/lda_hist.Rdata")
save( lda_hist, file = file_name)
# ******************************************** #
###comparing results with lda
psi<-t(a.vect)%*%W%*%a.vect
a.vect%*%(solve(psi)^(1/2))
#the other way around
coef(lda.fit)%*%solve(solve(psi)^(1/2))
# Test set confusion matrix
table(true = test.wine_binary$binary_quality, predict = lda.pred$class)
# Total success rate
mean(lda.pred$class == test.wine_binary$binary_quality)
# That's not bad, but notice the low sensitivity of this model.
# Test set ROC curve and AUC
pred_lda = prediction(lda.pred$posterior[, 2], test.wine_binary$binary_quality)
perf = performance(pred_lda, "tpr", "fpr")
auc = c(as.numeric(performance(pred_lda, "auc")@y.values))
tresholds<-seq( from = 0, to = 1, by = 0.01)
ROC_lda = cbind( Model = 'Linear Discriminant Analysis',
ROC_analysis( prediction = lda.pred$posterior[,2],
y_true = test.wine_binary$binary_quality,
probability_thresholds = tresholds))
ROC_lda$AUC = auc
ROC_all = rbind( ROC_all, ROC_lda )
ROC_matrix_lda = ROC_analysis( prediction = lda.pred$posterior[,2],
y_true = test.wine_binary$binary_quality,
probability_thresholds = tresholds)
ROC_matrix_lda = data.frame( treshold = ROC_matrix_lda$probability_thresholds,
FPR = 1-ROC_matrix_lda$`Specificity: TN/negative`,
TPR = ROC_matrix_lda$`Sensitivity (AKA Recall): TP/positive` )
roc_curve_lda = ggplot(ROC_matrix_lda, aes(x = FPR, y = TPR, label = treshold)) +
geom_line(color = "green") + theme_bw() +
style_roc() + #annotate("point", x = v, y = h, colour = "white")+
ggtitle( "Linear discriminant analysis - test set")
roc_curve_lda = ggplotly( roc_curve_lda )
save_plot( roc_curve_lda, type = "CLASSIFICATION" )
rm(list=setdiff(ls(), 'ROC_all'))
|
6db647bbce3b00bedcbe635dd607d2b9c5ceb772
|
a0ceb8a810553581850def0d17638c3fd7003895
|
/scripts/rstudioserver_analysis/WKM_and_BM_together/find_TSS_topics_winsizes_for_heatmap_MouseBM.R
|
c534fd0c67f5f9fc6ddac907bc93783c755eb39f
|
[] |
no_license
|
jakeyeung/sortchicAllScripts
|
9e624762ca07c40d23e16dbd793ef9569c962473
|
ecf27415e4e92680488b6f228c813467617e7ee5
|
refs/heads/master
| 2023-04-15T22:48:52.272410
| 2022-10-24T10:45:24
| 2022-10-24T10:45:24
| 556,698,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,162
|
r
|
find_TSS_topics_winsizes_for_heatmap_MouseBM.R
|
# Jake Yeung
# Date of Creation: 2020-06-17
# File: ~/projects/scchic/scripts/rstudioserver_analysis/WKM_and_BM_together/find_TSS_topics_winsizes_for_heatmap_MouseBM.R
# description
rm(list=ls())
library(hash)
library(ggrastr)
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(scchicFuncs)
library(preprocessCore)
library(mixtools)
library(scchicFuncs)
library(JFuncs)
library(topicmodels)
library(TxDb.Mmusculus.UCSC.mm10.knownGene)
library(org.Mm.eg.db)
library(ChIPseeker)
library(GenomicRanges)
jorg <- "org.Mm.eg.db"
jchromos <- paste("chr", c(seq(19), "X", "Y"), sep = "")
jmarks <- c("H3K4me1", "H3K4me3", "H3K27me3"); names(jmarks) <- jmarks
# Load DE genes -----------------------------------------------------------
# load this first because it loads a lot of objects, might disuprt things
inf.de <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/rdata_robjs/de_genes_stringent_objects/de_genes_sorted_and_giladi.WithHouseKeepAndNotExpressed.FixExprsOther.RData"
load(inf.de, v=T)
# Load LDA r GLMPCA ---------------------------------------------------------------
hubprefix <- "/home/jyeung/hub_oudenaarden/jyeung/data"
# load GLMPCA from bins
# jmark <- "H3K4me1"
jexperi <- "AllMerged"
mergesize <- "1000"
nbins <- "1000"
jcovar.cname <- "ncuts.var.log2.CenteredAndScaled"
jpenalty <- 1
ntopics <- 30
out.objs <- lapply(jmarks, function(jmark){
print(jmark)
inf.glmpca <- file.path(hubprefix, paste0("scChiC/from_rstudioserver/glmpca_analyses/GLMPCA_outputs.KeepBestPlates2.good_runs/PZ_", jmark, ".AllMerged.KeepBestPlates2.GLMPCA_var_correction.mergebinsize_1000.binskeep_1000.covar_ncuts.var.log2.CenteredAndScaled.penalty_1.winsorize_TRUE.2020-02-11.RData"))
inf.lda <- file.path(hubprefix, paste0("scChiC/from_rstudioserver/glmpca_analyses/GLMPCA_outputs.KeepBestPlates2.celltyping/GLMPCA_celltyping.", jmark, ".AllMerged.mergesize_1000.nbins_1000.penalty_1.covar_ncuts.var.log2.CenteredAndScaled.RData"))
inf.lda.bins <- file.path(hubprefix, paste0("scChiC/raw_demultiplexed/LDA_outputs_all/ldaAnalysisBins_B6BM_All_allmarks.2020-02-11.var_filt.UnenrichedAndAllMerged.KeepBestPlates2/lda_outputs.BM_", jmark, "_varfilt_countmat.2020-02-11.AllMerged.K-30.binarize.FALSE/ldaOut.BM_", jmark, "_varfilt_countmat.2020-02-11.AllMerged.K-30.Robj"))
load(inf.glmpca, v=T)
load(inf.lda, v=T)
load(inf.lda.bins, v=T)
out <- list(dat.umap.glm.fillNAs = dat.umap.glm.fillNAs, dat.umap.lda = dat.umap.lda, glm.out = glm.out, out.lda = out.lda)
return(out)
})
jbins <- out.objs$H3K4me1$out.lda@terms
# get imputed mats
dat.imputes.lst <- lapply(out.objs, function(x){
tm.result <- topicmodels::posterior(x$out.lda)
dat.impute <- log2(t(tm.result$topics %*% tm.result$terms) * 10^6)
return(dat.impute)
})
# Read TSS Signal to figure out which transcript to keep -----------------
jwinsize <- "10000"
indir.tss <- file.path(hubprefix, "scChiC/raw_data/ZellerRawData_B6_All_MergedByMarks_final.count_tables_TSS")
assertthat::assert_that(dir.exists(indir.tss))
tss.out <- lapply(jmarks, function(jmark){
print(jmark)
inf.tss <- file.path(indir.tss, paste0(jmark, ".countTableTSS.mapq_40.TSS_", jwinsize, ".blfiltered.csv"))
mat.tss <- ReadMatTSSFormat(inf.tss)
return(list(mat.tss = mat.tss, tss.exprs = rowSums(mat.tss)))
})
tss.exprs.lst.unfilt <- lapply(tss.out, function(x) x$tss.exprs)
tss.mats.singlecell.unfilt <- lapply(tss.out, function(x) x$mat.tss)
# exprs.vec <- tss.exprs.lst$H3K4me1
lapply(jmarks, function(jmark){
plot(density(tss.exprs.lst.unfilt[[jmark]]), main = jmark)
})
# go with the K4me3 definition...
ref.mark <- "H3K4me3"
jthres <- 275 # maybe not exactly at hump? what about tissuespecific stuff? rare celltypes? complicated from the bulk
plot(density(tss.exprs.lst.unfilt[[ref.mark]]))
abline(v = jthres)
tss.mat.ref <- CollapseRowsByGene(count.mat = tss.mats.singlecell.unfilt[[ref.mark]], as.long = FALSE, track.kept.gene = TRUE)
tss.keep <- rownames(tss.mat.ref)
tss.exprs.lst <- lapply(tss.exprs.lst.unfilt, function(exprs.vec){
jkeep <- names(exprs.vec) %in% tss.keep
return(exprs.vec[jkeep])
})
print("Dimensions of TSS raw keeping all TSS")
lapply(tss.mats.singlecell.unfilt, dim)
tss.mats.singlecell <- lapply(tss.mats.singlecell.unfilt, function(tss.mat){
jkeep <- rownames(tss.mat) %in% tss.keep
return(tss.mat[jkeep, ])
})
print("Dimensions of TSS after keeping one TSS for each gene, defined by highest expression in H3K4me3")
lapply(tss.mats.singlecell, dim)
# Get common rows ---------------------------------------------------------
lapply(tss.exprs.lst.unfilt, length)
tss.all <- lapply(tss.exprs.lst, function(exprs.lst){
names(exprs.lst)
}) %>%
unlist() %>%
unique()
tss.common <- lapply(tss.exprs.lst, function(exprs.lst){
names(exprs.lst)
}) %>%
Reduce(f = intersect, .)
# get ensembl names ?
genes.common <- sapply(tss.common, function(x) strsplit(x, ";")[[1]][[2]])
ens.common <- Gene2Ensembl.ZF(genes.common, return.original = TRUE, species = "mmusculus")
g2e.hash2 <- hash(genes.common, ens.common)
# create tss, genes, ens dat
genes.annot <- data.frame(bin = tss.common, gene = genes.common, ens = ens.common, stringsAsFactors = FALSE)
# Annotate bins to gene --------------------------------------------------
# use same winsize (10kb as the TSS analysis)
# take any mark
# inf.annot <- "/home/jyeung/hub_oudenaarden/jyeung/data/databases/gene_tss/zebrafish/gene_tss.CodingOnly.winsize_10000.species_drerio.bed"
inf.annot <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/databases/gene_tss/gene_tss_winsize.", jwinsize, ".bed")
assertthat::assert_that(file.exists(inf.annot))
annot.out <- AnnotateCoordsFromList.GeneWise(coords.vec = jbins, inf.tss = inf.annot, txdb = TxDb.Mmusculus.UCSC.mm10.knownGene, annodb = jorg, chromos.keep = jchromos)
annot.regions <- annot.out$out2.df
annot.regions <- subset(annot.regions, select = c(dist.to.tss, region_coord, gene, tssname))
# Filter bins for only TSS's that are good -------------------------------
annot.regions.filt <- subset(annot.regions, tssname %in% tss.common)
annot.regions.filt$ens <- sapply(annot.regions.filt$gene, function(g) AssignHash(g, jhash = g2e.hash2, null.fill = g))
print(head(annot.regions.filt))
# g2e.annot <- hash(annot.out$regions.annotated$SYMBOL, annot.out$regions.annotated$ENSEMBL)
g2e.annot <- hash(annot.regions.filt$gene, annot.regions.filt$ens)
r2g.annot <- hash(annot.regions.filt$region_coord, annot.regions.filt$gene)
g2tss.annot <- hash(genes.annot$gene, genes.annot$bin)
plot(density(annot.regions.filt$dist.to.tss))
# Find celltype-specific topics --------------------------------------------
topnbins <- 2000
out.lda <- out.objs[[ref.mark]]$out.lda
# browse /hpc/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/pdfs_all/BM_LDA_downstream_topics_celltypes_Giladi.UnenrichedAllMerged.KeepBestPlates2
# ertryth, bcell, granu, hsc for H3K4me3
ctypes.vec <- c("Eryth", "Bcell", "Granu", "HSPCs")
topics.vec <- c("topic16", "topic13", "topic2", "topic26")
names(topics.vec) <- ctypes.vec
tm.result <- posterior(out.lda)
tm.result <- AddTopicToTmResult(tm.result, jsep = "")
# Convert topic regions to genes -----------------------------------------
topbins.lst <- lapply(topics.vec, function(jtop){
jvec <- sort(tm.result$terms[jtop, ], decreasing = TRUE)
return(names(jvec)[1:topnbins])
})
topgenes.lst <- lapply(topbins.lst, function(jbins){
jvec <- sapply(jbins, AssignHash, r2g.annot)
jvec <- gsub(pattern = "Hoxa11", replacement = "Hoxa9", jvec)
return(jvec)
})
toptss.lst <- lapply(topgenes.lst, function(jgenes){
tss <- sapply(jgenes, AssignHash, g2tss.annot)
# remove NA
tss <- tss[which(!is.na(tss))]
})
lapply(toptss.lst, length)
# Make TSS into 2bp bins -------------------------------------------------
print(head(toptss.lst$Eryth))
tss.bed.lst <- lapply(toptss.lst, function(tss.vec){
jcoords <- sapply(tss.vec, function(x) strsplit(x, ";")[[1]][[1]])
jtx <- sapply(tss.vec, function(x) strsplit(x, ";")[[1]][[2]])
bed.tmp <- data.frame(chromo = sapply(jcoords, GetChromo),
Start = as.numeric(sapply(jcoords, GetStart)),
End = as.numeric(sapply(jcoords, GetEnd)),
tx = jtx,
stringsAsFactors = FALSE) %>%
rowwise() %>%
mutate(midpt = (Start + End) / 2,
Start2 = midpt - 1,
End2 = midpt + 1)
return(bed.tmp)
})
# Write to output ---------------------------------------------------------
outdir <- paste0("/home/jyeung/hub_oudenaarden/jyeung/data/WKM_BM_merged/from_rstudioserver/bedannotations/MouseBMFromTopics.", topnbins)
dir.create(outdir)
assertthat::assert_that(dir.exists(outdir))
for (ctype in ctypes.vec){
print(ctype)
fname <- paste0("MouseBM_TSS_FromTopics.", ctype, ".bsize_2.bed")
outf <- file.path(outdir, fname)
outdat <- tss.bed.lst[[ctype]] %>%
dplyr::select(chromo, Start2, End2, tx)
print(head(outdat))
print(outf)
fwrite(outdat, file = outf, sep = "\t", col.names = FALSE, na = "NA", quote = FALSE)
}
|
47a5d6614965555589cd3b98f790e83956349211
|
5a4ac3a10eb6ea4e5dc6b0588ce3fa03bf3c175e
|
/Day014/day14.R
|
f5d1dea91fba62866f16e9bcf773cf3d36752252
|
[] |
no_license
|
woons/project_woons
|
9bda2dcf1afebe4c3daf9c20a15605dec9ddbae3
|
3958979aa22ddba7434289792b1544be3f884d95
|
refs/heads/master
| 2021-03-16T08:40:40.350667
| 2018-05-04T05:18:45
| 2018-05-04T05:18:45
| 90,750,693
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,370
|
r
|
day14.R
|
##############################
# Day14 _ Regular Expression
##############################
library(stringr)
library(rebus)
x <- c("cat", "coat", "scotland", "tic toc")
# Match two characters, where the second is a "t"
str_view(x, pattern = ANY_CHAR %R% "t")
# Match a "t" followed by any character
str_view(x, pattern = "t" %R% ANY_CHAR)
# Match two characters
str_view(x, pattern = ANY_CHAR %R% ANY_CHAR)
# Match a string with exactly three characters
str_view(x, pattern = START %R% ANY_CHAR %R% ANY_CHAR %R% ANY_CHAR %R% END)
test <- c("배상재", "배여운 바보입니까?", "임송이 바보네", "다이소")
str_view(test, pattern = START %R% ANY_CHAR %R% ANY_CHAR %R% ANY_CHAR)
####################################################################
#---------Combining with stringr functions--------------------------
####################################################################
# q followed by any character
pattern <- "q" %R% ANY_CHAR
# Test pattern
str_view(c("Quentin", "Kaliq", "Jacques", "Jacqes"), pattern)
# Find names that have the pattern
names_with_q <- str_subset(boy_names, pattern)
head(names_with_q)
length(names_with_q)
# Find part of name that matches pattern
part_with_q <- str_extract(boy_names, pattern)
table(part_with_q)
# Did any names have the pattern more than once?
count_of_q <- str_count(boy_names, pattern)
table(count_of_q)
# Which babies got these names?
with_q <- str_detect(boy_names, pattern)
# What fraction of babies got these names?
mean(with_q)
############################################################
#---------------Alternation------------------
############################################################
boy_names <- c("Katherine", "Jeffrey", "Geoffrey", "Deffrey",
"Kathryn", "Cathleen", "Kathalina")
# Match Jeffrey or Geoffrey
whole_names <- or("Jeffrey", "Geoffrey")
str_view(boy_names, pattern = whole_names, match = TRUE)
# Match Jeffrey or Geoffrey, another way
common_ending <- or("Je", "Geo") %R% "ffrey"
str_view(boy_names, pattern = common_ending, match = TRUE)
# Match with alternate endings
by_parts <- or("Je", "Geo") %R% "ff" %R% or("ry", "ery", "rey", "erey")
str_view(boy_names, pattern = by_parts, match = TRUE)
# Match names that start with Cath or Kath
ckath <- START %R% or("C", "K") %R% "ath"
str_view(boy_names, pattern = ckath, match = TRUE)
|
0474b0a050af388aa44328107f86fa57f8282ab2
|
77c3d4443e4ec9f25ef4c6f2c9bbb6d8d608f007
|
/man/cone_logo_text.Rd
|
959cf9a88c0218d988b3611d11faae9e8647bf98
|
[
"MIT"
] |
permissive
|
phildwalker/TeamBrand
|
9c576407ad64783c39bcc4181ff1301c3deabf09
|
2b338e884b10874b2baa37c40acc2d137f0e84ec
|
refs/heads/main
| 2023-03-27T21:43:18.635726
| 2021-03-18T17:57:24
| 2021-03-18T17:57:24
| 348,366,738
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 437
|
rd
|
cone_logo_text.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cone_logo_text.R
\name{cone_logo_text}
\alias{cone_logo_text}
\title{Generate Cone Health Stylized Text Logo}
\usage{
cone_logo_text(background = "#F9F9F9")
}
\arguments{
\item{background}{Background color of the logo text. Defaults to
\code{#F9F9F9}, the default background color of charts}
}
\description{
Creates a grid object with Cone Health Text Logo
}
|
f64f5cc07878b8be7e1e822e3706fa8b3887bcc0
|
a57550b1724f3f926526dcfbce86b6fc76e6feb3
|
/R/stanmodels.R
|
e37c6f554ec9ef5596599746ccd746ec735e0867
|
[] |
no_license
|
mhandreae/rstanarm
|
874cdb4266d8cad1832cb29c31e3ab2bea39573b
|
e13a2db260930af139b2ae5a58f539194342e73e
|
refs/heads/master
| 2020-05-29T11:34:36.151733
| 2015-09-22T17:16:38
| 2015-09-22T17:16:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,619
|
r
|
stanmodels.R
|
# This file is part of rstanarm.
# Copyright 2013 Stan Development Team
# rstanarm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rstanarm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rstanarm. If not, see <http://www.gnu.org/licenses/>.
# if you change a .stan file, source() stanmodels.R when the working
# directory is the root of rstanarm/ in order to update the .rda file
# and reduce Build & Reload time
MODELS_HOME <- "exec"
if (!file.exists(MODELS_HOME)) MODELS_HOME <- sub("R$", "exec", getwd())
stanfit_lm <- rstan::stan_model(file.path(MODELS_HOME, "lm.stan"),
model_name = "Linear Regression",
auto_write = interactive(),
obfuscate_model_name = FALSE)
stanfit_continuous <- rstan::stan_model(file.path(MODELS_HOME, "continuous.stan"),
model_name = "Continuous GLM",
auto_write = interactive(),
obfuscate_model_name = FALSE)
stanfit_bernoulli <- rstan::stan_model(file.path(MODELS_HOME, "bernoulli.stan"),
model_name = "Bernoulli GLM",
auto_write = interactive(),
obfuscate_model_name = FALSE)
stanfit_binomial <- rstan::stan_model(file.path(MODELS_HOME, "binomial.stan"),
model_name = "Binomial GLM",
auto_write = interactive(),
obfuscate_model_name = FALSE)
stanfit_count <- rstan::stan_model(file.path(MODELS_HOME, "count.stan"),
model_name = "Count GLM",
auto_write = interactive(),
obfuscate_model_name = FALSE)
stanfit_polr <- rstan::stan_model(file.path(MODELS_HOME, "polr.stan"),
model_name = "Proportional Odds GLM",
auto_write = interactive(),
obfuscate_model_name = FALSE)
|
c4ab77b2ab0c0c8a0815076f2be55d72776a0f43
|
fe1fb5584e8461c3cd8332514ea51cd8b6df991c
|
/Analysis of Financial Data with R 4.r
|
aeaead6bcaff1ded8d3f8249a7d874fcdb8903fb
|
[] |
no_license
|
Allisterh/R-project---Econometrics-Theory-and-Applications
|
370535f138b61522e865a9a6ebceb9c1d93e1dbf
|
e94794e12301274ac72d61caa1c205b303a00996
|
refs/heads/master
| 2023-02-25T15:43:27.028094
| 2021-02-03T19:55:39
| 2021-02-03T19:55:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,109
|
r
|
Analysis of Financial Data with R 4.r
|
> getwd()
[1] "C:/Users/Alexa~Chutian/Documents"
> setwd('C:/#Baruch/Econometrics/Financial Data')
> da = read.table("Data2.txt", header=T)
> head(da)
date intc sp
1 19730131 0.010050 -0.017111
2 19730228 -0.139303 -0.037490
3 19730330 0.069364 -0.001433
4 19730430 0.086486 -0.040800
5 19730531 -0.104478 -0.018884
6 19730629 0.133333 -0.006575
> intc=log(da$intc+1)
> length(intc)
[1] 444
> t.test(intc)
One Sample t-test
data: intc
t = 2.3788, df = 443, p-value = 0.01779
alternative hypothesis: true mean is not equal to 0
95 percent confidence interval:
0.00249032 0.02616428
sample estimates:
mean of x
0.0143273
> SE=1/(sqrt(length(intc)))
> 2*SE
[1] 0.0949158
> acf(intc, lag=12, plot=FALSE)
Autocorrelations of series ‘intc’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.020 0.024 0.082 -0.050 -0.023 0.022 -0.121 -0.085 -0.019 0.026
11 12
-0.064 0.055
> acf(intc, lag=12)
> Box.test(intc, lag=12, type='Ljung')
Box-Ljung test
data: intc
X-squared = 18.676, df = 12, p-value = 0.09665
> acf(intc, lag=24, plot=FALSE)
Autocorrelations of series ‘intc’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.020 0.024 0.082 -0.050 -0.023 0.022 -0.121 -0.085 -0.019 0.026
11 12 13 14 15 16 17 18 19 20 21
-0.064 0.055 -0.053 -0.126 0.020 0.007 -0.021 -0.013 0.056 -0.011 0.084
22 23 24
-0.030 -0.050 0.010
> acf(intc, lag=24)
> Box.test(intc, lag=24, type='Ljung')
Box-Ljung test
data: intc
X-squared = 34.208, df = 24, p-value = 0.08103
> SE=1/(sqrt(length(intc)))
> 2*SE
[1] 0.0949158
> acf(abs(intc), lag=24, plot=FALSE)
Autocorrelations of series ‘abs(intc)’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.196 0.176 0.187 0.155 0.182 0.144 0.134 0.142 0.133 0.125
11 12 13 14 15 16 17 18 19 20 21
0.056 0.141 0.136 0.109 0.051 0.038 0.026 -0.004 0.095 0.034 0.032
22 23 24
-0.033 -0.049 0.026
> acf(abs(intc), lag=24)
> Box.test(abs(intc), lag=12, type='Ljung')
Box-Ljung test
data: abs(intc)
X-squared = 124.91, df = 12, p-value < 2.2e-16
> acf((intc^2), lag=24, plot=FALSE)
Autocorrelations of series ‘(intc^2)’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.151 0.197 0.201 0.137 0.122 0.099 0.138 0.081 0.090 0.059
11 12 13 14 15 16 17 18 19 20 21
0.018 0.189 0.064 0.142 0.075 0.005 0.010 0.001 0.049 0.001 0.071
22 23 24
-0.014 -0.035 0.007
> acf((intc^2), lag=24)
> Box.test((intc^2), lag=12, type='Ljung')
Box-Ljung test
data: (intc^2)
X-squared = 98.783, df = 12, p-value = 9.992e-16
> y = intc - mean(intc)
> Box.test(y^2, lag=12, type='Ljung')
Box-Ljung test
data: y^2
X-squared = 92.939, df = 12, p-value = 1.332e-14
> install.packages("FinTS")
Installing package into ‘C:/Users/Alexa~Chutian/Documents/R/win-library/3.4’
(as ‘lib’ is unspecified)
--- Please select a CRAN mirror for use in this session ---
also installing the dependency ‘zoo’
trying URL 'https://cran.cnr.berkeley.edu/bin/windows/contrib/3.4/zoo_1.8-0.zip'
Content type 'application/zip' length 901320 bytes (880 KB)
downloaded 880 KB
trying URL 'https://cran.cnr.berkeley.edu/bin/windows/contrib/3.4/FinTS_0.4-5.zip'
Content type 'application/zip' length 3699164 bytes (3.5 MB)
downloaded 3.5 MB
package ‘zoo’ successfully unpacked and MD5 sums checked
package ‘FinTS’ successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\Alexa~Chutian\AppData\Local\Temp\RtmponfCxu\downloaded_packages
> library(FinTS)
Loading required package: zoo
Attaching package: ‘zoo’
The following objects are masked from ‘package:base’:
as.Date, as.Date.numeric
> ArchTest(y, lags=12, demean=FALSE)
ARCH LM-test; Null hypothesis: no ARCH effects
data: y
Chi-squared = 53.901, df = 12, p-value = 2.847e-07
> SE=1/(sqrt(length(y)))
> 2*SE
[1] 0.0949158
> acf((y^2), lag=24, plot=FALSE)
Autocorrelations of series ‘(y^2)’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.145 0.208 0.196 0.133 0.116 0.088 0.121 0.076 0.083 0.060
11 12 13 14 15 16 17 18 19 20 21
0.009 0.184 0.052 0.125 0.061 0.006 0.010 -0.007 0.050 -0.005 0.081
22 23 24
-0.012 -0.033 0.014
> acf((y^2), lag=24)
> pacf((y^2), lag=24, plot=FALSE)
Partial autocorrelations of series ‘(y^2)’, by lag
1 2 3 4 5 6 7 8 9 10 11
0.145 0.190 0.152 0.062 0.037 0.014 0.062 0.016 0.025 0.001 -0.046
12 13 14 15 16 17 18 19 20 21 22
0.161 0.009 0.065 -0.019 -0.070 -0.046 -0.026 0.039 -0.013 0.066 -0.042
23 24
-0.044 -0.015
> pacf((y^2), lag=24)
> install.packages("fGarch")
Installing package into ‘C:/Users/Alexa~Chutian/Documents/R/win-library/3.4’
(as ‘lib’ is unspecified)
also installing the dependencies ‘gss’, ‘stabledist’, ‘timeDate’, ‘timeSeries’, ‘fBasics’
trying URL 'https://cran.cnr.berkeley.edu/bin/windows/contrib/3.4/gss_2.1-7.zip'
Content type 'application/zip' length 875570 bytes (855 KB)
downloaded 855 KB
trying URL 'https://cran.cnr.berkeley.edu/bin/windows/contrib/3.4/stabledist_0.7-1.zip'
Content type 'application/zip' length 42044 bytes (41 KB)
downloaded 41 KB
trying URL 'https://cran.cnr.berkeley.edu/bin/windows/contrib/3.4/timeDate_3012.100.zip'
Content type 'application/zip' length 805561 bytes (786 KB)
downloaded 786 KB
trying URL 'https://cran.cnr.berkeley.edu/bin/windows/contrib/3.4/timeSeries_3022.101.2.zip'
Content type 'application/zip' length 1618338 bytes (1.5 MB)
downloaded 1.5 MB
trying URL 'https://cran.cnr.berkeley.edu/bin/windows/contrib/3.4/fBasics_3011.87.zip'
Content type 'application/zip' length 1557651 bytes (1.5 MB)
downloaded 1.5 MB
trying URL 'https://cran.cnr.berkeley.edu/bin/windows/contrib/3.4/fGarch_3010.82.1.zip'
Content type 'application/zip' length 447671 bytes (437 KB)
downloaded 437 KB
package ‘gss’ successfully unpacked and MD5 sums checked
package ‘stabledist’ successfully unpacked and MD5 sums checked
package ‘timeDate’ successfully unpacked and MD5 sums checked
package ‘timeSeries’ successfully unpacked and MD5 sums checked
package ‘fBasics’ successfully unpacked and MD5 sums checked
package ‘fGarch’ successfully unpacked and MD5 sums checked
The downloaded binary packages are in
C:\Users\Alexa~Chutian\AppData\Local\Temp\RtmponfCxu\downloaded_packages
> library(fGarch)
Loading required package: timeDate
Loading required package: timeSeries
Attaching package: ‘timeSeries’
The following object is masked from ‘package:zoo’:
time<-
Loading required package: fBasics
Rmetrics Package fBasics
Analysing Markets and calculating Basic Statistics
Copyright (C) 2005-2014 Rmetrics Association Zurich
Educational Software for Financial Engineering and Computational Science
Rmetrics is free software and comes with ABSOLUTELY NO WARRANTY.
https://www.rmetrics.org --- Mail to: info@rmetrics.org
> m3_1=garchFit(~1+garch(3,0), data=intc, trace=F)
> summary(m3_1)
Title:
GARCH Modelling
Call:
garchFit(formula = ~1 + garch(3, 0), data = intc, trace = F)
Mean and Variance Equation:
data ~ 1 + garch(3, 0)
<environment: 0x0000000008782dc0>
[data = intc]
Conditional Distribution:
norm
Coefficient(s):
mu omega alpha1 alpha2 alpha3
0.012567 0.010421 0.232889 0.075069 0.051994
Std. Errors:
based on Hessian
Error Analysis:
Estimate Std. Error t value Pr(>|t|)
mu 0.012567 0.005515 2.279 0.0227 *
omega 0.010421 0.001238 8.418 <2e-16 ***
alpha1 0.232889 0.111541 2.088 0.0368 *
alpha2 0.075069 0.047305 1.587 0.1125
alpha3 0.051994 0.045139 1.152 0.2494
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log Likelihood:
303.9607 normalized: 0.6845963
Description:
Thu May 11 20:29:59 2017 by user: Alexa~Chutian
Standardised Residuals Tests:
Statistic p-Value
Jarque-Bera Test R Chi^2 203.362 0
Shapiro-Wilk Test R W 0.9635971 4.898647e-09
Ljung-Box Test R Q(10) 9.260782 0.5075463
Ljung-Box Test R Q(15) 19.36748 0.1975619
Ljung-Box Test R Q(20) 20.46983 0.4289059
Ljung-Box Test R^2 Q(10) 7.322136 0.6947234
Ljung-Box Test R^2 Q(15) 27.41532 0.02552908
Ljung-Box Test R^2 Q(20) 28.15113 0.1058698
LM Arch Test R TR^2 25.23347 0.01375447
Information Criterion Statistics:
AIC BIC SIC HQIC
-1.346670 -1.300546 -1.346920 -1.328481
> m1_1=garchFit(~1+garch(1,0), data=intc, trace=F)
> summary(m1_1)
Title:
GARCH Modelling
Call:
garchFit(formula = ~1 + garch(1, 0), data = intc, trace = F)
Mean and Variance Equation:
data ~ 1 + garch(1, 0)
<environment: 0x000000000755f7f8>
[data = intc]
Conditional Distribution:
norm
Coefficient(s):
mu omega alpha1
0.013130 0.011046 0.374976
Std. Errors:
based on Hessian
Error Analysis:
Estimate Std. Error t value Pr(>|t|)
mu 0.013130 0.005318 2.469 0.01355 *
omega 0.011046 0.001196 9.238 < 2e-16 ***
alpha1 0.374976 0.112620 3.330 0.00087 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log Likelihood:
299.9247 normalized: 0.675506
Description:
Thu May 11 20:31:45 2017 by user: Alexa~Chutian
Standardised Residuals Tests:
Statistic p-Value
Jarque-Bera Test R Chi^2 144.3783 0
Shapiro-Wilk Test R W 0.9678175 2.670321e-08
Ljung-Box Test R Q(10) 12.12248 0.2769429
Ljung-Box Test R Q(15) 22.30705 0.1000019
Ljung-Box Test R Q(20) 24.33412 0.2281016
Ljung-Box Test R^2 Q(10) 16.57807 0.08423723
Ljung-Box Test R^2 Q(15) 37.44349 0.001089733
Ljung-Box Test R^2 Q(20) 38.81395 0.007031558
LM Arch Test R TR^2 27.32897 0.006926821
Information Criterion Statistics:
AIC BIC SIC HQIC
-1.337499 -1.309824 -1.337589 -1.326585
> m3_2=garchFit(~1+garch(3,0), data=intc, trace=F, cond.dist="std")
> summary(m3_2)
Title:
GARCH Modelling
Call:
garchFit(formula = ~1 + garch(3, 0), data = intc, cond.dist = "std",
trace = F)
Mean and Variance Equation:
data ~ 1 + garch(3, 0)
<environment: 0x0000000012c437b8>
[data = intc]
Conditional Distribution:
std
Coefficient(s):
mu omega alpha1 alpha2 alpha3 shape
0.018215 0.009887 0.145692 0.115486 0.111302 5.974097
Std. Errors:
based on Hessian
Error Analysis:
Estimate Std. Error t value Pr(>|t|)
mu 0.018215 0.005216 3.492 0.000479 ***
omega 0.009887 0.001457 6.784 1.17e-11 ***
alpha1 0.145692 0.093614 1.556 0.119634
alpha2 0.115486 0.067772 1.704 0.088374 .
alpha3 0.111302 0.064311 1.731 0.083509 .
shape 5.974097 1.466776 4.073 4.64e-05 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log Likelihood:
321.5741 normalized: 0.724266
Description:
Thu May 11 20:32:26 2017 by user: Alexa~Chutian
Standardised Residuals Tests:
Statistic p-Value
Jarque-Bera Test R Chi^2 266.7279 0
Shapiro-Wilk Test R W 0.9589488 8.639001e-10
Ljung-Box Test R Q(10) 8.904513 0.541189
Ljung-Box Test R Q(15) 19.17938 0.2057202
Ljung-Box Test R Q(20) 20.06478 0.4538842
Ljung-Box Test R^2 Q(10) 6.04517 0.8114541
Ljung-Box Test R^2 Q(15) 25.73662 0.04088386
Ljung-Box Test R^2 Q(20) 26.72014 0.1433229
LM Arch Test R TR^2 25.1032 0.01434135
Information Criterion Statistics:
AIC BIC SIC HQIC
-1.421505 -1.366156 -1.421864 -1.399678
> m1_2=garchFit(~1+garch(1,0), data=intc, trace=F, cond.dist="std")
> summary(m1_2)
Title:
GARCH Modelling
Call:
garchFit(formula = ~1 + garch(1, 0), data = intc, cond.dist = "std",
trace = F)
Mean and Variance Equation:
data ~ 1 + garch(1, 0)
<environment: 0x00000000053d2160>
[data = intc]
Conditional Distribution:
std
Coefficient(s):
mu omega alpha1 shape
0.017202 0.011816 0.277476 5.970266
Std. Errors:
based on Hessian
Error Analysis:
Estimate Std. Error t value Pr(>|t|)
mu 0.017202 0.005195 3.311 0.000929 ***
omega 0.011816 0.001560 7.574 3.62e-14 ***
alpha1 0.277476 0.107183 2.589 0.009631 **
shape 5.970266 1.529524 3.903 9.49e-05 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log Likelihood:
315.0899 normalized: 0.709662
Description:
Thu May 11 20:32:37 2017 by user: Alexa~Chutian
Standardised Residuals Tests:
Statistic p-Value
Jarque-Bera Test R Chi^2 157.7799 0
Shapiro-Wilk Test R W 0.9663975 1.488224e-08
Ljung-Box Test R Q(10) 12.8594 0.2316396
Ljung-Box Test R Q(15) 23.40632 0.07588561
Ljung-Box Test R Q(20) 25.374 0.1874956
Ljung-Box Test R^2 Q(10) 19.96092 0.02962445
Ljung-Box Test R^2 Q(15) 42.55549 0.0001845089
Ljung-Box Test R^2 Q(20) 44.06739 0.00147397
LM Arch Test R TR^2 29.76071 0.003033508
Information Criterion Statistics:
AIC BIC SIC HQIC
-1.401306 -1.364407 -1.401466 -1.386755
> m3_3=garchFit(~1+garch(3,0), data=intc, trace=F, cond.dist="sstd")
> summary(m3_3)
Title:
GARCH Modelling
Call:
garchFit(formula = ~1 + garch(3, 0), data = intc, cond.dist = "sstd",
trace = F)
Mean and Variance Equation:
data ~ 1 + garch(3, 0)
<environment: 0x0000000009224960>
[data = intc]
Conditional Distribution:
sstd
Coefficient(s):
mu omega alpha1 alpha2 alpha3 skew shape
0.0150283 0.0098585 0.1593526 0.1144401 0.0955587 0.8888927 6.4582365
Std. Errors:
based on Hessian
Error Analysis:
Estimate Std. Error t value Pr(>|t|)
mu 0.015028 0.005510 2.727 0.006382 **
omega 0.009858 0.001397 7.055 1.72e-12 ***
alpha1 0.159353 0.092535 1.722 0.085056 .
alpha2 0.114440 0.067131 1.705 0.088244 .
alpha3 0.095559 0.059347 1.610 0.107358
skew 0.888893 0.065457 13.580 < 2e-16 ***
shape 6.458236 1.700553 3.798 0.000146 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log Likelihood:
322.8954 normalized: 0.7272418
Description:
Thu May 11 20:32:49 2017 by user: Alexa~Chutian
Standardised Residuals Tests:
Statistic p-Value
Jarque-Bera Test R Chi^2 252.0227 0
Shapiro-Wilk Test R W 0.9599619 1.247534e-09
Ljung-Box Test R Q(10) 8.835075 0.547824
Ljung-Box Test R Q(15) 19.04072 0.2118947
Ljung-Box Test R Q(20) 19.94742 0.461223
Ljung-Box Test R^2 Q(10) 6.212811 0.7970781
Ljung-Box Test R^2 Q(15) 26.08523 0.03713634
Ljung-Box Test R^2 Q(20) 26.97459 0.1359805
LM Arch Test R TR^2 25.27191 0.01358566
Information Criterion Statistics:
AIC BIC SIC HQIC
-1.422952 -1.358378 -1.423439 -1.397487
> m1_3=garchFit(~1+garch(1,0), data=intc, trace=F, cond.dist="sstd")
> summary(m1_3)
Title:
GARCH Modelling
Call:
garchFit(formula = ~1 + garch(1, 0), data = intc, cond.dist = "sstd",
trace = F)
Mean and Variance Equation:
data ~ 1 + garch(1, 0)
<environment: 0x0000000009306118>
[data = intc]
Conditional Distribution:
sstd
Coefficient(s):
mu omega alpha1 skew shape
0.013850 0.011659 0.284494 0.877621 6.523036
Std. Errors:
based on Hessian
Error Analysis:
Estimate Std. Error t value Pr(>|t|)
mu 0.013850 0.005460 2.536 0.011197 *
omega 0.011659 0.001472 7.920 2.44e-15 ***
alpha1 0.284494 0.101951 2.790 0.005263 **
skew 0.877621 0.061376 14.299 < 2e-16 ***
shape 6.523036 1.811716 3.600 0.000318 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log Likelihood:
316.9137 normalized: 0.7137695
Description:
Thu May 11 20:32:58 2017 by user: Alexa~Chutian
Standardised Residuals Tests:
Statistic p-Value
Jarque-Bera Test R Chi^2 156.6034 0
Shapiro-Wilk Test R W 0.9663426 1.455423e-08
Ljung-Box Test R Q(10) 12.79133 0.235574
Ljung-Box Test R Q(15) 23.34224 0.07713885
Ljung-Box Test R Q(20) 25.2944 0.1903993
Ljung-Box Test R^2 Q(10) 20.033 0.02894214
Ljung-Box Test R^2 Q(15) 42.96521 0.0001594213
Ljung-Box Test R^2 Q(20) 44.31549 0.001365164
LM Arch Test R TR^2 30.02046 0.002772693
Information Criterion Statistics:
AIC BIC SIC HQIC
-1.405017 -1.358892 -1.405266 -1.386827
> r_m1_1=residuals(m1_1,standardize=T)
> SE=1/(sqrt(length(y)))
> 2*SE
[1] 0.0949158
> acf(r_m1_1, lag=24)
> acf(r_m1_1, lag=24, plot=FALSE)
Autocorrelations of series ‘r_m1_1’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.012 0.038 0.089 -0.056 -0.029 0.019 -0.083 -0.076 -0.012 0.018
11 12 13 14 15 16 17 18 19 20 21
-0.036 0.057 -0.055 -0.121 -0.007 -0.010 -0.021 -0.022 0.058 0.004 0.084
22 23 24
-0.045 -0.046 0.006
> pacf((r_m1_1^2), lag=24)
> pacf((r_m1_1^2), lag=24, plot=FALSE)
Partial autocorrelations of series ‘(r_m1_1^2)’, by lag
1 2 3 4 5 6 7 8 9 10 11
-0.054 0.105 0.052 0.045 0.036 0.031 0.034 0.043 0.083 0.045 -0.063
12 13 14 15 16 17 18 19 20 21 22
0.157 0.046 0.063 -0.044 -0.018 -0.015 -0.037 0.029 -0.016 0.119 -0.010
23 24
-0.061 -0.028
> Box.test(r_m1_1, lag=12, type='Ljung')
Box-Ljung test
data: r_m1_1
X-squared = 14.199, df = 12, p-value = 0.2882
> Box.test(r_m1_1^2, lag=12, type='Ljung')
Box-Ljung test
data: r_m1_1^2
X-squared = 32.438, df = 12, p-value = 0.001184
> r_m3_1=residuals(m3_1,standardize=T)
> SE=1/(sqrt(length(y)))
> 2*SE
[1] 0.0949158
> acf(r_m3_1, lag=24)
> acf(r_m3_1, lag=24, plot=FALSE)
Autocorrelations of series ‘r_m3_1’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.020 0.017 0.065 -0.045 -0.019 0.023 -0.079 -0.077 -0.003 0.021
11 12 13 14 15 16 17 18 19 20 21
-0.027 0.067 -0.065 -0.112 -0.003 -0.006 -0.014 -0.029 0.036 0.001 0.082
22 23 24
-0.054 -0.051 0.009
> pacf((r_m3_1^2), lag=24)
> pacf((r_m3_1^2), lag=24, plot=FALSE)
Partial autocorrelations of series ‘(r_m3_1^2)’, by lag
1 2 3 4 5 6 7 8 9 10 11
-0.038 0.014 0.003 0.039 0.027 0.021 0.036 0.061 0.086 0.012 -0.047
12 13 14 15 16 17 18 19 20 21 22
0.187 0.042 0.047 -0.016 -0.017 -0.025 -0.042 0.019 -0.020 0.125 -0.012
23 24
-0.038 -0.027
> Box.test(r_m3_1, lag=12, type='Ljung')
Box-Ljung test
data: r_m3_1
X-squared = 11.623, df = 12, p-value = 0.4764
> Box.test(r_m3_1^2, lag=12, type='Ljung')
Box-Ljung test
data: r_m3_1^2
X-squared = 25.471, df = 12, p-value = 0.01274
> r_m1_2=residuals(m1_2,standardize=T)
> acf(r_m1_2, lag=24)
> acf(r_m1_2, lag=24, plot=FALSE)
Autocorrelations of series ‘r_m1_2’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.013 0.035 0.092 -0.057 -0.028 0.020 -0.087 -0.077 -0.015 0.020
11 12 13 14 15 16 17 18 19 20 21
-0.041 0.056 -0.052 -0.124 -0.004 -0.006 -0.020 -0.022 0.058 0.001 0.085
22 23 24
-0.042 -0.048 0.007
> pacf((r_m1_2^2), lag=24)
> pacf((r_m1_2^2), lag=24, plot=FALSE)
Partial autocorrelations of series ‘(r_m1_2^2)’, by lag
1 2 3 4 5 6 7 8 9 10 11
-0.036 0.125 0.061 0.051 0.044 0.033 0.038 0.043 0.072 0.042 -0.063
12 13 14 15 16 17 18 19 20 21 22
0.162 0.041 0.062 -0.042 -0.028 -0.017 -0.036 0.033 -0.017 0.112 -0.016
23 24
-0.061 -0.028
> Box.test(r_m1_2, lag=12, type='Ljung')
Box-Ljung test
data: r_m1_2
X-squared = 15.036, df = 12, p-value = 0.2395
> Box.test(r_m1_2^2, lag=12, type='Ljung')
Box-Ljung test
data: r_m1_2^2
X-squared = 36.959, df = 12, p-value = 0.0002268
> r_m3_2=residuals(m3_2,standardize=T)
> acf(r_m3_2, lag=24)
> acf(r_m3_2, lag=24, plot=FALSE)
Autocorrelations of series ‘r_m3_2’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.029 0.011 0.052 -0.040 -0.016 0.025 -0.081 -0.080 0.001 0.023
11 12 13 14 15 16 17 18 19 20 21
-0.023 0.067 -0.072 -0.110 -0.002 -0.001 -0.008 -0.035 0.025 0.000 0.082
22 23 24
-0.057 -0.054 0.009
> pacf((r_m3_2^2), lag=24)
> pacf((r_m3_2^2), lag=24, plot=FALSE)
Partial autocorrelations of series ‘(r_m3_2^2)’, by lag
1 2 3 4 5 6 7 8 9 10 11
-0.027 -0.008 -0.022 0.033 0.021 0.008 0.033 0.063 0.080 0.006 -0.040
12 13 14 15 16 17 18 19 20 21 22
0.196 0.033 0.035 -0.001 -0.021 -0.024 -0.044 0.021 -0.020 0.130 -0.018
23 24
-0.030 -0.026
> Box.test(r_m3_2, lag=12, type='Ljung')
Box-Ljung test
data: r_m3_2
X-squared = 11.207, df = 12, p-value = 0.5113
> Box.test(r_m3_2^2, lag=12, type='Ljung')
Box-Ljung test
data: r_m3_2^2
X-squared = 24.736, df = 12, p-value = 0.01612
> r_m1_3=residuals(m1_3,standardize=T)
> acf(r_m1_3, lag=24)
> acf(r_m1_3, lag=24, plot=FALSE)
Autocorrelations of series ‘r_m1_3’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.014 0.036 0.092 -0.057 -0.028 0.019 -0.087 -0.077 -0.014 0.019
11 12 13 14 15 16 17 18 19 20 21
-0.040 0.055 -0.054 -0.124 -0.004 -0.007 -0.021 -0.021 0.057 0.001 0.085
22 23 24
-0.042 -0.047 0.008
> pacf((r_m1_3^2), lag=24)
> pacf((r_m1_3^2), lag=24, plot=FALSE)
Partial autocorrelations of series ‘(r_m1_3^2)’, by lag
1 2 3 4 5 6 7 8 9 10 11
-0.037 0.124 0.061 0.050 0.043 0.034 0.040 0.043 0.075 0.040 -0.063
12 13 14 15 16 17 18 19 20 21 22
0.163 0.042 0.065 -0.043 -0.029 -0.018 -0.034 0.029 -0.018 0.109 -0.015
23 24
-0.061 -0.028
> Box.test(r_m1_3, lag=12, type='Ljung')
Box-Ljung test
data: r_m1_3
X-squared = 14.94, df = 12, p-value = 0.2447
> Box.test(r_m1_3^2, lag=12, type='Ljung')
Box-Ljung test
data: r_m1_3^2
X-squared = 37.056, df = 12, p-value = 0.0002188
> r_m3_3=residuals(m3_2,standardize=T)
> acf(r_m3_3, lag=24)
> acf(r_m3_3, lag=24, plot=FALSE)
Autocorrelations of series ‘r_m3_3’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.029 0.011 0.052 -0.040 -0.016 0.025 -0.081 -0.080 0.001 0.023
11 12 13 14 15 16 17 18 19 20 21
-0.023 0.067 -0.072 -0.110 -0.002 -0.001 -0.008 -0.035 0.025 0.000 0.082
22 23 24
-0.057 -0.054 0.009
> pacf((r_m3_3^2), lag=24)
> pacf((r_m3_3^2), lag=24, plot=FALSE)
Partial autocorrelations of series ‘(r_m3_3^2)’, by lag
1 2 3 4 5 6 7 8 9 10 11
-0.027 -0.008 -0.022 0.033 0.021 0.008 0.033 0.063 0.080 0.006 -0.040
12 13 14 15 16 17 18 19 20 21 22
0.196 0.033 0.035 -0.001 -0.021 -0.024 -0.044 0.021 -0.020 0.130 -0.018
23 24
-0.030 -0.026
> Box.test(r_m3_3, lag=12, type='Ljung')
Box-Ljung test
data: r_m3_3
X-squared = 11.207, df = 12, p-value = 0.5113
> Box.test(r_m3_3^2, lag=12, type='Ljung')
Box-Ljung test
data: r_m3_3^2
X-squared = 24.736, df = 12, p-value = 0.01612
> mm1=garchFit(~1+garch(1,1), data= intc, trace=F)
> summary(mm1)
Title:
GARCH Modelling
Call:
garchFit(formula = ~1 + garch(1, 1), data = intc, trace = F)
Mean and Variance Equation:
data ~ 1 + garch(1, 1)
<environment: 0x0000000008c52330>
[data = intc]
Conditional Distribution:
norm
Coefficient(s):
mu omega alpha1 beta1
0.01126568 0.00091902 0.08643831 0.85258554
Std. Errors:
based on Hessian
Error Analysis:
Estimate Std. Error t value Pr(>|t|)
mu 0.0112657 0.0053931 2.089 0.03672 *
omega 0.0009190 0.0003888 2.364 0.01808 *
alpha1 0.0864383 0.0265439 3.256 0.00113 **
beta1 0.8525855 0.0394322 21.622 < 2e-16 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log Likelihood:
312.3307 normalized: 0.7034475
Description:
Thu May 11 20:37:37 2017 by user: Alexa~Chutian
Standardised Residuals Tests:
Statistic p-Value
Jarque-Bera Test R Chi^2 174.904 0
Shapiro-Wilk Test R W 0.9709615 1.030282e-07
Ljung-Box Test R Q(10) 8.016844 0.6271916
Ljung-Box Test R Q(15) 15.5006 0.4159946
Ljung-Box Test R Q(20) 16.41549 0.6905368
Ljung-Box Test R^2 Q(10) 0.8746345 0.9999072
Ljung-Box Test R^2 Q(15) 11.35935 0.7267295
Ljung-Box Test R^2 Q(20) 12.55994 0.8954573
LM Arch Test R TR^2 10.51401 0.5709617
Information Criterion Statistics:
AIC BIC SIC HQIC
-1.388877 -1.351978 -1.389037 -1.374326
> mm2=garchFit(~1+garch(1,1), data= intc, trace=F, cond.dist="std")
> summary(mm2)
Title:
GARCH Modelling
Call:
garchFit(formula = ~1 + garch(1, 1), data = intc, cond.dist = "std",
trace = F)
Mean and Variance Equation:
data ~ 1 + garch(1, 1)
<environment: 0x00000000091b52a8>
[data = intc]
Conditional Distribution:
std
Coefficient(s):
mu omega alpha1 beta1 shape
0.0165075 0.0011576 0.1059030 0.8171313 6.7723503
Std. Errors:
based on Hessian
Error Analysis:
Estimate Std. Error t value Pr(>|t|)
mu 0.0165075 0.0051031 3.235 0.001217 **
omega 0.0011576 0.0005782 2.002 0.045286 *
alpha1 0.1059030 0.0372047 2.846 0.004420 **
beta1 0.8171313 0.0580141 14.085 < 2e-16 ***
shape 6.7723503 1.8572388 3.646 0.000266 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log Likelihood:
326.2264 normalized: 0.734744
Description:
Thu May 11 20:37:46 2017 by user: Alexa~Chutian
Standardised Residuals Tests:
Statistic p-Value
Jarque-Bera Test R Chi^2 203.4933 0
Shapiro-Wilk Test R W 0.9687607 3.970603e-08
Ljung-Box Test R Q(10) 7.877778 0.6407741
Ljung-Box Test R Q(15) 15.5522 0.4124197
Ljung-Box Test R Q(20) 16.50475 0.6848581
Ljung-Box Test R^2 Q(10) 1.066054 0.9997694
Ljung-Box Test R^2 Q(15) 11.49875 0.7165045
Ljung-Box Test R^2 Q(20) 12.61496 0.8932865
LM Arch Test R TR^2 10.80739 0.5454935
Information Criterion Statistics:
AIC BIC SIC HQIC
-1.446966 -1.400841 -1.447215 -1.428776
> mm3=garchFit(~1+garch(1,1), data= intc, trace=F, cond.dist="sstd")
> summary(mm3)
Title:
GARCH Modelling
Call:
garchFit(formula = ~1 + garch(1, 1), data = intc, cond.dist = "sstd",
trace = F)
Mean and Variance Equation:
data ~ 1 + garch(1, 1)
<environment: 0x0000000008b35b10>
[data = intc]
Conditional Distribution:
sstd
Coefficient(s):
mu omega alpha1 beta1 skew shape
0.0133343 0.0011621 0.1049289 0.8177875 0.8717220 7.2344225
Std. Errors:
based on Hessian
Error Analysis:
Estimate Std. Error t value Pr(>|t|)
mu 0.0133343 0.0053430 2.496 0.012572 *
omega 0.0011621 0.0005587 2.080 0.037519 *
alpha1 0.1049289 0.0358860 2.924 0.003456 **
beta1 0.8177875 0.0559863 14.607 < 2e-16 ***
skew 0.8717220 0.0629129 13.856 < 2e-16 ***
shape 7.2344225 2.1018042 3.442 0.000577 ***
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Log Likelihood:
328.0995 normalized: 0.7389628
Description:
Thu May 11 20:37:55 2017 by user: Alexa~Chutian
Standardised Residuals Tests:
Statistic p-Value
Jarque-Bera Test R Chi^2 195.2178 0
Shapiro-Wilk Test R W 0.9692506 4.892686e-08
Ljung-Box Test R Q(10) 7.882126 0.6403496
Ljung-Box Test R Q(15) 15.62496 0.4074054
Ljung-Box Test R Q(20) 16.5774 0.6802193
Ljung-Box Test R^2 Q(10) 1.078429 0.9997569
Ljung-Box Test R^2 Q(15) 11.95155 0.6826923
Ljung-Box Test R^2 Q(20) 13.03792 0.8757513
LM Arch Test R TR^2 11.18826 0.5128574
Information Criterion Statistics:
AIC BIC SIC HQIC
-1.450899 -1.395550 -1.451257 -1.429071
> r_mm1=residuals(mm1,standardize=T)
> SE=1/(sqrt(length(y)))
> 2*SE
[1] 0.0949158
> acf(r_mm1, lag=24)
> acf(r_mm1, lag=24, plot=FALSE)
Autocorrelations of series ‘r_mm1’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.034 0.028 0.057 -0.036 -0.008 0.021 -0.082 -0.061 -0.010 0.011
11 12 13 14 15 16 17 18 19 20 21
-0.024 0.050 -0.069 -0.092 -0.003 -0.009 -0.009 -0.028 0.031 0.010 0.076
22 23 24
-0.065 -0.046 0.009
> pacf((r_mm1^2), lag=24)
> pacf((r_mm1^2), lag=24, plot=FALSE)
Partial autocorrelations of series ‘(r_mm1^2)’, by lag
1 2 3 4 5 6 7 8 9 10 11
-0.017 -0.003 0.002 -0.006 0.002 -0.006 -0.001 0.033 0.019 -0.012 -0.053
12 13 14 15 16 17 18 19 20 21 22
0.129 0.025 0.054 -0.010 -0.008 -0.020 -0.041 0.019 -0.020 0.088 -0.017
23 24
-0.032 0.001
> Box.test(r_mm1, lag=12, type='Ljung')
Box-Ljung test
data: r_mm1
X-squared = 9.4104, df = 12, p-value = 0.6675
> Box.test(r_mm1^2, lag=12, type='Ljung')
Box-Ljung test
data: r_mm1^2
X-squared = 9.8678, df = 12, p-value = 0.6276
> r_mm2=residuals(mm2,standardize=T)
> acf(r_mm2, lag=24)
> acf(r_mm2, lag=24, plot=FALSE)
Autocorrelations of series ‘r_mm2’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.035 0.028 0.055 -0.035 -0.008 0.021 -0.081 -0.063 -0.010 0.012
11 12 13 14 15 16 17 18 19 20 21
-0.023 0.051 -0.070 -0.093 -0.004 -0.010 -0.007 -0.031 0.030 0.009 0.077
22 23 24
-0.066 -0.046 0.009
> pacf((r_mm2^2), lag=24)
> pacf((r_mm2^2), lag=24, plot=FALSE)
Partial autocorrelations of series ‘(r_mm2^2)’, by lag
1 2 3 4 5 6 7 8 9 10 11
-0.025 -0.010 -0.005 -0.009 -0.004 -0.012 -0.006 0.031 0.018 -0.010 -0.053
12 13 14 15 16 17 18 19 20 21 22
0.131 0.025 0.045 -0.013 -0.004 -0.017 -0.039 0.021 -0.019 0.101 -0.015
23 24
-0.030 -0.001
> Box.test(r_mm2, lag=12, type='Ljung')
Box-Ljung test
data: r_mm2
X-squared = 9.3431, df = 12, p-value = 0.6734
> Box.test(r_mm2^2, lag=12, type='Ljung')
Box-Ljung test
data: r_mm2^2
X-squared = 10.444, df = 12, p-value = 0.5771
> r_mm3=residuals(mm3,standardize=T)
> acf(r_mm3, lag=24)
> acf(r_mm3, lag=24, plot=FALSE)
Autocorrelations of series ‘r_mm3’, by lag
0 1 2 3 4 5 6 7 8 9 10
1.000 0.035 0.027 0.055 -0.035 -0.008 0.022 -0.081 -0.063 -0.009 0.012
11 12 13 14 15 16 17 18 19 20 21
-0.023 0.051 -0.070 -0.094 -0.004 -0.010 -0.008 -0.030 0.030 0.009 0.077
22 23 24
-0.066 -0.046 0.009
> pacf((r_mm3^2), lag=24)
> pacf((r_mm3^2), lag=24, plot=FALSE)
Partial autocorrelations of series ‘(r_mm3^2)’, by lag
1 2 3 4 5 6 7 8 9 10 11
-0.024 -0.010 -0.004 -0.010 -0.003 -0.010 -0.004 0.032 0.019 -0.012 -0.053
12 13 14 15 16 17 18 19 20 21 22
0.133 0.027 0.050 -0.011 -0.005 -0.018 -0.038 0.020 -0.019 0.098 -0.016
23 24
-0.030 -0.002
> Box.test(r_mm3, lag=12, type='Ljung')
Box-Ljung test
data: r_mm3
X-squared = 9.3388, df = 12, p-value = 0.6738
> Box.test(r_mm3^2, lag=12, type='Ljung')
Box-Ljung test
data: r_mm3^2
X-squared = 10.703, df = 12, p-value = 0.5545
|
5ea97dd5eac69c44feff2b6bca030c3c6ec98c08
|
09f649e97f4274903bec4f8466d456234c0de222
|
/test/mlr_codes/mlr_ksvm_undersampling_LOO_0.2.R
|
b3eb4d24c7aca35eeebb42c426b4b74b9e3deb5e
|
[] |
no_license
|
VeronicaFung/DComboNet
|
d33ddb2303dc827f79a90acf9e5320328e178723
|
545417d7d4181df455b2d119ee767f9921114db9
|
refs/heads/master
| 2023-06-05T15:36:50.203124
| 2021-06-18T09:23:47
| 2021-06-18T09:23:47
| 256,992,605
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,345
|
r
|
mlr_ksvm_undersampling_LOO_0.2.R
|
setwd('/picb/bigdata/project/FengFYM/mlr_models/')
options(stringsAsFactors = F)
# install.packages('mlr')
# install.packages('mlr3')
library(mlr)
library(mlrMBO)
set.seed(123)
source('scripts/learner_tuning_v2/elasticnet_learner.R')
source('scripts/learner_tuning_v2/glm_learner.R')
source('scripts/learner_tuning_v2/ksvm_learner.R')
source('scripts/learner_tuning_v2/naive_bayes_learner.R')
source('scripts/learner_tuning_v2/rf_learner.R')
source('scripts/learner_tuning_v2/xgboost_learner.R')
features = read.csv('data/features2.csv')
features = features[features$integrated_score2 >= 0.2,]
features$ID = paste(features$A, features$B, sep='_')
features$TAG = factor(features$TAG, levels = c('P','N'))
features = features[order(features$TAG,decreasing=F),]
task = makeClassifTask(id = "features", data = features[c(3,13:16,17)], target = "TAG")
# task = makeClassifTask(id = "features", data = features[c(3:16,17)], target = "TAG")
P_num = task$task.desc$class.distribution[[1]]
N_num = task$task.desc$class.distribution[[2]]
#rdesc = makeResampleDesc("RepCV", folds = 10, reps = 10)
rdesc = makeResampleDesc("LOO")
# rdesc = makeResampleDesc("Subsample", iters = 10, split = 9/10)
# ksvm: Support Vector Machines from kernlab
# tuning parameters, return optimal hyperparameters
ksvm_opt_ps = csvm_tuning(task)
if(ksvm_opt_ps$kernel == 'rbfdot'){
# set up model with optimal parameters
mod_ksvm_opt = setHyperPars(learner = makeLearner("classif.ksvm",
predict.type = "prob",
fix.factors.prediction = TRUE) ,
# type=ksvm_opt_ps$type,
C=ksvm_opt_ps$C,
kernel = ksvm_opt_ps$kernel,
sigma = ksvm_opt_ps$sigma)
}else if(ksvm_opt_ps$kernel == 'vanilladot'){
mod_ksvm_opt = setHyperPars(learner = makeLearner("classif.ksvm",
predict.type = "prob",
fix.factors.prediction = TRUE) ,
# type=ksvm_opt_ps$type,
C=ksvm_opt_ps$C,
kernel = ksvm_opt_ps$kernel,
)
}else if(ksvm_opt_ps$kernel == 'polydot'){
mod_ksvm_opt = setHyperPars(learner = makeLearner("classif.ksvm",
predict.type = "prob",
fix.factors.prediction = TRUE) ,
# type=ksvm_opt_ps$type,
C=ksvm_opt_ps$C,
kernel = ksvm_opt_ps$kernel,
degree = ksvm_opt_ps$degree,
scale = ksvm_opt_ps$scale,
offset = ksvm_opt_ps$offset,
sigma = ksvm_opt_ps$sigma)
}else if(ksvm_opt_ps$kernel == 'laplacedot'){
mod_ksvm_opt = setHyperPars(learner = makeLearner("classif.ksvm",
predict.type = "prob",
fix.factors.prediction = TRUE) ,
# type=ksvm_opt_ps$type,
C=ksvm_opt_ps$C,
kernel = ksvm_opt_ps$kernel,
sigma = ksvm_opt_ps$sigma)
}else if(ksvm_opt_ps$kernel == 'besseldot'){
mod_ksvm_opt = setHyperPars(learner = makeLearner("classif.ksvm",
predict.type = "prob",
fix.factors.prediction = TRUE) ,
# type=ksvm_opt_ps$type,
C=ksvm_opt_ps$C,
kernel = ksvm_opt_ps$kernel,
sigma = ksvm_opt_ps$sigma,
order = ksvm_opt_ps$order)
}
mod_ksvm_opt = makeUndersampleWrapper(mod_ksvm_opt, usw.rate = 1/(N_num/P_num))
# 10-fold cross validation to access the model performance
r_ksvm = resample(mod_ksvm_opt,
task,
rdesc,
measures = list(mmce, tpr, fnr, fpr, tnr, acc, auc, f1, timetrain))
save(ksvm_opt_ps,mod_ksvm_opt,r_ksvm,file = 'ksvm_result_LOO_0.2.Rdata')
|
0295309e7de5490385468cc68d0656f9b220c146
|
8cb3d409c80826aea5823fef0f96b9158a372bd5
|
/Training.R
|
1a7403050f7271013ab5719f14b2cdae10081908
|
[] |
no_license
|
vahtykov/r-vvp
|
fea6cf7756d3bb258caa4e768ca84433e350e807
|
617a705f53fffaa70ea9107cecc78a4710b7e6cd
|
refs/heads/master
| 2023-04-13T04:16:01.917107
| 2021-04-25T19:14:49
| 2021-04-25T19:14:49
| 350,828,663
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,487
|
r
|
Training.R
|
list.dirs("D:/RData/DIPLOM")
library("dplyr")
library("caret")
library("AER")
library("ggplot2")
library("sandwich")
library("ivpack")
h <- read.csv("D:/RData/DIPLOM/dataFull.csv", header=TRUE, sep=";")
glimpse(h)
OP1 <- lm(VVP ~ SG4Z + X4BR, data = h)
OP2 <- lm(VVP ~ X4BRZ + SNZP, data = h)
in_train <- createDataPartition(y = h$VVP, p=0.75, list=FALSE) # для обучения берём 75% данных
# Для дальнейшей оценки модели методом МНК
h_train <- h[in_train,] # сюда берём только наблюдения для обучающей выборки
h_test <- h[-in_train,] # оценка качества прогнозов, обучающие данные исключаем, остальные оставляем
nrow(h)
nrow(h_train)
nrow(h_test)
model_1 <- lm(data=h_train, VVP ~ SG4Z + X4BR)
model_2 <- lm(data=h_train, VVP ~ X4BRZ + SNZP) # высокая точность
y <- h_test$VVP
# Прогнозируем
y_hat_1 <- predict(model_1, h_test)
y_hat_1 # Очень низкая точность прогноза
y_hat_2 <- predict(model_2, h_test)
y_hat_2 # Точность очень высокая. Для 2017 года реальное ВВП = 92101, а в прогнозе = 92878.077
# Сумма квадратов ошибок прогнозов
sum((y-y_hat_1)^2)
sum((y-y_hat_2)^2)
nextYear <- data.frame(X4BRZ=2000, SNZP=55000)
nextPredict <- predict(model_2, newdata=nextYear)
nextPredict
|
f88ad7ccc91aaffba9b3b82bb69c3531e6bf76d6
|
c1748fa8115e11b8a09f1891ecc327994dfc90d9
|
/InequalityShiny/server.R
|
f1634bddf52c98a111d58ed00a0a55e2225ba555
|
[] |
no_license
|
codrin-kruijne/Developing-Data-Products-Course-Project
|
8af188b28fd4942198e9da3514d45c21aea9958c
|
014bdaad922fd82f54853ff5e50ff4e9ba2a1d62
|
refs/heads/master
| 2020-03-11T09:26:33.632952
| 2018-04-17T20:22:33
| 2018-04-17T20:22:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,490
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(plotly)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
data <- read.csv("https://stats.oecd.org/sdmx-json/data/DP_LIVE/.INCOMEINEQ.GINI.INEQ.A/OECD?contentType=csv&detail=code&separator=comma&csv-lang=en&startPeriod=2005&endPeriod=2015")
countries <- unique(data$LOCATION)
minGini <- min(data$Value)
maxGini <- max(data$Value)
avgGini <- mean(data$Value)
output$countrySelector <- renderUI({
selectInput("country", "Select country:", as.list(countries))
})
output$giniPlot <- renderPlot({
# draw the the whole plot first
g <- ggplot(data[data$LOCATION == input$country, ], aes(x = TIME, y = Value)) +
scale_y_continuous(limits = c(round(min(data$Value), 2), round(max(data$Value), 2))) +
geom_line() +
xlab("Year") +
ylab("GINI coefficient")
if (input$avgGini) {g <- g + geom_hline(aes(yintercept = avgGini), color = "blue", linetype = "dashed")}
if (input$minGini) {g <- g + geom_hline(aes(yintercept = minGini), color = "green", linetype = "dashed")}
if (input$maxGini) {g <- g + geom_hline(aes(yintercept = maxGini), color = "red", linetype = "dashed")}
return(g)
})
})
|
5c25ac8c3246e2f8983e4ebadf958213733e15f0
|
f6ce51c36418153e08d4fb2a843da95e5b0b9031
|
/lab7/R/ridgereg_coef.R
|
ba50303ff52ddcfdf183c7cd4e3ce5971c5f11a2
|
[] |
no_license
|
ClaraSchartner/lab7
|
9ecb1378eeca9765c205d4fca78a340af7830d09
|
8050c59e2ee596438bdfb910f17a0ae032c2bb57
|
refs/heads/master
| 2021-01-10T18:20:04.778799
| 2015-10-19T12:44:58
| 2015-10-19T12:44:58
| 43,873,866
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 323
|
r
|
ridgereg_coef.R
|
#'Coefficient
#'
#'\code{coef} extract model coefficients from objects of class \code{"ridgereg"}.
#'
#'@param x an object of class.
#'@param ... further arguments passed to or from other methods.
#'
#'@return coefficients extracted from the model object.
#'
coef.ridgereg <- function(x, ...){
return(x$coefficients)
}
|
829e9948166f52d4b17c4f499b09e59fbee1e191
|
69feddab3de98770afbc27ab90563f983eccdd5f
|
/Assignment2.R
|
a110f7eae16658d4c9b2267075b4b91c6d3f186c
|
[] |
no_license
|
DahamLee/Marketing-Analytics
|
11cf4e193a421100a22fd56cb73c6a9d49fccbb5
|
410d1da07d65d1b9b1a1bff1c7b5bfbd7a3f8361
|
refs/heads/master
| 2021-04-12T09:27:44.699544
| 2018-05-05T00:58:32
| 2018-05-05T00:58:32
| 126,731,947
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,945
|
r
|
Assignment2.R
|
rm(list=ls())
library("Matrix")
library("lme4")
library("MCMCpack")
cc.data = read.csv("/Users/daham/Desktop/Marketing Analysis/assignment2/CreditCard_SOW_Data.csv", header=T)
cc.data$ConsumerID = as.factor(cc.data$ConsumerID)
cc.data$logIncome = log(cc.data$Income)
cc.data$logSowRatio = log(cc.data$WalletShare/(1-cc.data$WalletShare))
cc.re1 = MCMCregress(logSowRatio~History+Balance+Promotion+History:Promotion+logIncome:Promotion, mcmc=6000, data=cc.data)
summary(cc.re1)
#cc.re1[1,1]
#cc.re1[1]
# Plot posterior simulation
head(cc.re1)
plot(cc.re1[,"Promotion"], type="l")
plot(cc.re1[,"Promotion:logIncome"], type="l")
# quantile(cc.re1[, "Promotion"], prob=c(0.025, 0.975))
# auto-correlation
autocorr.plot(cc.re1[,c("Promotion","Promotion:logIncome")])
#autocorr.plot(cc.re1[,"Promotion:logIncome"])
# MCMC HLM
cc.bayeshlm = MCMChregress(fixed=logSowRatio~History+Balance+Promotion+History:Promotion+logIncome:Promotion, random=~Promotion, group="ConsumerID", data=cc.data, r=2, R=diag(2), mcmc=6000)
summary(cc.bayeshlm$mcmc[,1:6])
cc.bayeshlm$mcmc
head(cc.bayeshlm)
plot(cc.bayeshlm$mcmc[,"beta.History"], type="l")
plot(cc.bayeshlm$mcmc[,"beta.Promotion:logIncome"], type="l")
autocorr.plot(cc.bayeshlm$mcmc[,c("beta.History","beta.Promotion:logIncome")])
#autocorr.plot(cc.bayeshlm$mcmc[,"beta.Promotion:logIncome"])
# 3. GLM considering Random Effect
brd.data = read.csv("/Users/daham/Desktop/Marketing Analysis/assignment2/Bank_Retention_Data.csv", header=T)
brd.data$TractID = as.factor(brd.data$TractID)
brd.glm = glm(Churn~Age+Income+HomeVal+Tenure+DirectDeposit+Loan+Dist+MktShare, data=brd.data, family=binomial(link="logit"))
summary(brd.glm)
brd.glmer = glmer(Churn~Age+Income+HomeVal+Tenure+DirectDeposit+Loan+Dist+MktShare+(1|TractID), data=brd.data, family=binomial, glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=100000)))
summary(brd.glmer)
AIC(brd.glm)
BIC(brd.glm)
AIC(brd.glmer)
BIC(brd.glmer)
|
7032b616598156f3e6ad1570659386d5125942bd
|
816247c509847002300485ff792778d607a7c119
|
/R/sim_ipc.R
|
478ebc880537a2d01f588ba79c796920fd9d3c17
|
[] |
no_license
|
mgaldino/line4PPPsim
|
93d93c593e22583e4d00df5371d4b93c9850c2cd
|
a026c6d5f83ffc25712eb3b6f11d13a7f530c98e
|
refs/heads/master
| 2020-04-02T02:29:55.803353
| 2019-02-11T15:28:31
| 2019-02-11T15:28:31
| 153,912,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 816
|
r
|
sim_ipc.R
|
#' @title Simulates fiscal impact of line 4 subway PPP
#'
#' @description This package allows the user to run Monte Carlo simulation to assess the fiscal impact of lline 4 PPP in São Paulo.
#'
#' @param ipc_0 A number
#' @param ipc_realizado A vector
#'
#' @return A vector of inflation for 33 years
#'
#' @examples sim_ipc(ipc_0 = 1.1, ipc_realizado = NA)
#'
#' @export sim_ipc
sim_ipc <- function(ipc_0 = 1.1, ipc_realizado) {
if (sum(is.na(ipc_realizado)) == 0) {
# inflação anual, t-student, 7 df, com drift de .05
ipc_base <- 1 +rt(32, 7)/75 + .05 # simular melhor depois. arima, algo assim, simples.
ipc <- cumprod(c(ipc_0, ipc_base))
} else {
n <- length(ipc_realizado)
ipc_base <- 1 + rt(33 - n, 7)/75 + .05
ipc <- cumprod(c(1+ipc_realizado, ipc_base))
}
return(ipc)
}
|
d374804342ab35577d0ef8ed72a6588acbfd2d75
|
3fb9a252b8ff2ce0611b78a41859eaf5aa075f52
|
/man/Compensation.Rd
|
68eeb720239a9655a2416578a30308c88def98ed
|
[] |
no_license
|
DillonHammill/CytoExploreRData
|
ad23a2e80034b4d722edf697d5849053ee3adb20
|
488edf083092247ad547172906efe6f8c2aa8700
|
refs/heads/master
| 2022-07-22T14:40:50.036551
| 2020-08-27T01:19:51
| 2020-08-27T01:19:51
| 158,751,860
| 0
| 0
| null | 2019-10-11T08:49:28
| 2018-11-22T21:31:30
|
HTML
|
UTF-8
|
R
| false
| true
| 964
|
rd
|
Compensation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CytoExploreRData.R
\docType{data}
\name{Compensation}
\alias{Compensation}
\title{CytoExploreR Compensation Data Set.}
\format{
A \code{flowSet} containing the 7 compensation controls.
}
\source{
Compensation controls used for an in vitro OT-I/II T cell activation
assay.
}
\description{
A collection of single stain compensation controls and an
unstained control used to compensate the Activation data set.
}
\details{
Compensation controls are as follows: \itemize{
\item{\strong{Compensation-7AAD.fcs}}
\item{\strong{Compensation-AF700.fcs}} \item{\strong{Compensation-APC.fcs}}
\item{\strong{Compensation-APC-Cy7.fcs}}
\item{\strong{Compensation-FITC.fcs}} \item{\strong{Compensation-PE.fcs}}
\item{\strong{Compensation-Unstained.fcs}}}
}
\seealso{
\link{CytoExploreRData}
\link{Activation}
\link{Activation_gatingTemplate}
}
\author{
Dillon Hammill (Dillon.Hammill@anu.edu.au)
}
|
41e8b216f0845c6290bcc5a7df66ae3a8f177f3f
|
df33361a1d939a735c33f79514f6dfe6ded15ea9
|
/agecount.R
|
cdbc9fd2e6dbd2357849c7e5e6be3b5892fc6e0b
|
[] |
no_license
|
kgracekennedy/BaltimoreHomicides
|
ffdf7b958dd20bdc19610738a4feb4f7a055b018
|
31bfe9cb9b69fb283c31886c16fce15b94413082
|
refs/heads/master
| 2016-08-06T18:51:57.886538
| 2014-12-17T20:33:35
| 2014-12-17T20:33:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 546
|
r
|
agecount.R
|
agecount <- function(age = NULL) {
## Check that "age" is non-NULL; else throw error
## Read "homicides.txt" data file
homicides=readLines("homicides.txt")
## Extract ages of victims; ignore records where no age is given
#male. nono years, Age
r <- regexec("(Age:|male,) +(.*?) years", homicides)
ages=regmatches(homicides,r)
ages=as.matrix(ages)
allages=sapply(ages, function (x) x[3])
## Return integer containing count of homicides for that age
allages=as.numeric(allages)
well=length(grep(age,allages))
return(well)
}
|
1f0743909716aa65d8c5bea1b972a35d41ed39c9
|
8457643a6fc09b349cc6ff2bf3573dfce9f3b589
|
/cachematrix.R
|
ba0ddbf8e1700a5ceaa233e56ac8e8c79e796cf5
|
[] |
no_license
|
nursharmini/ProgrammingAssignment2
|
ee8d52f84905d7faf19f2d36713e25c5109d6428
|
cfb9dcaba034dd9bb2f13c418fa686a4d953bf98
|
refs/heads/master
| 2021-01-09T09:00:59.934083
| 2015-07-13T06:54:33
| 2015-07-13T06:54:33
| 38,992,607
| 0
| 0
| null | 2015-07-13T05:19:05
| 2015-07-13T05:19:05
| null |
UTF-8
|
R
| false
| false
| 1,785
|
r
|
cachematrix.R
|
## These functions calculate the inverse of a matrix and saves it
## into cache. When the user attempts to calculate the matrix inverse,
## the previous value is returned instead of compute it repeatedly.
#The makeCacheMatrix function, creates a special "matrix", which is really a list containing a function to
# 1. set the value of the vector
# 2. get the value of the vector
# 3. set the value of the mean
# 4. get the value of the mean
makeCacheMatrix <- function(x = matrix()) {
## create a matrix object x and associated sub-functions
## to define cache m
m <- NULL
set <- function(y) {
# to assign the input of matrix y to the variable x in parent environment
x <<- y
## to re-initialize m to null in parent environment
m <<- NULL
}
## return the matrix x
get <- function() x
## set the cache m equal to inverse of the matrix x
setinverse <- function(inverse) m <<- inverse
## return the cached inverse of x
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function calculates the inverse of the special "matrix" that created
## with the above function. However, it first checks to see if the inverse
## has already been calculated. If so, it get's the inverse from the cache
## and skips the computation. Otherwise, it calculates the matrix inverse
## and sets the value of the inverse in the cache via the setinverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## to get's the inverse from the cache
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
6aa716aea0057ff60e390fcb51c2f8478f711443
|
638dc9da4b99cdce4c32a29c7c672ef0c5863141
|
/man/CST_EnsClustering.Rd
|
a7ca4a9c2d86dd839e6f4316ac0082b274f1c6fa
|
[] |
no_license
|
rpkgs/CSTools
|
887f2d6a31a55b9130b2b2f26880493af584ce1e
|
ab20bc268756ef30668157cebf56246102c94dcd
|
refs/heads/master
| 2023-09-01T15:38:40.114800
| 2021-10-05T06:20:21
| 2021-10-05T06:20:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,709
|
rd
|
CST_EnsClustering.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CST_EnsClustering.R
\name{CST_EnsClustering}
\alias{CST_EnsClustering}
\title{Ensemble clustering}
\usage{
CST_EnsClustering(
exp,
time_moment = "mean",
numclus = NULL,
lon_lim = NULL,
lat_lim = NULL,
variance_explained = 80,
numpcs = NULL,
time_dim = NULL,
time_percentile = 90,
cluster_dim = "member",
verbose = F
)
}
\arguments{
\item{exp}{An object of the class 's2dv_cube', containing the variables to be analysed.
Each data object in the list is expected to have an element named \code{$data} with at least two
spatial dimensions named "lon" and "lat", and dimensions "dataset", "member", "ftime", "sdate".}
\item{time_moment}{Decides the moment to be applied to the time dimension. Can be either 'mean' (time mean),
'sd' (standard deviation along time) or 'perc' (a selected percentile on time).
If 'perc' the keyword 'time_percentile' is also used.}
\item{numclus}{Number of clusters (scenarios) to be calculated.
If set to NULL the number of ensemble members divided by 10 is used, with a minimum of 2 and a maximum of 8.}
\item{lon_lim}{List with the two longitude margins in `c(-180,180)` format.}
\item{lat_lim}{List with the two latitude margins.}
\item{variance_explained}{variance (percentage) to be explained by the set of EOFs.
Defaults to 80. Not used if numpcs is specified.}
\item{numpcs}{Number of EOFs retained in the analysis (optional).}
\item{time_dim}{String or character array with name(s) of dimension(s) over which to compute statistics.
If omitted c("ftime", "sdate", "time") are searched in this order.}
\item{time_percentile}{Set the percentile in time you want to analyse (used for `time_moment = "perc").}
\item{cluster_dim}{Dimension along which to cluster. Typically "member" or "sdate".
This can also be a list like c("member", "sdate").}
\item{verbose}{Logical for verbose output}
}
\value{
A list with elements \code{$cluster} (cluster assigned for each member),
\code{$freq} (relative frequency of each cluster), \code{$closest_member}
(representative member for each cluster), \code{$repr_field} (list of fields
for each representative member), \code{composites} (list of mean fields for each cluster),
\code{$lon} (selected longitudes of output fields),
\code{$lat} (selected longitudes of output fields).
}
\description{
This function performs a clustering on members/starting dates
and returns a number of scenarios, with representative members for each of them.
The clustering is performed in a reduced EOF space.
Motivation:
Ensemble forecasts give a probabilistic insight of average weather conditions
on extended timescales, i.e. from sub-seasonal to seasonal and beyond.
With large ensembles, it is often an advantage to be able to group members
according to similar characteristics and to select the most representative member for each cluster.
This can be useful to characterize the most probable forecast scenarios in a multi-model
(or single model) ensemble prediction. This approach, applied at a regional level,
can also be used to identify the subset of ensemble members that best represent the
full range of possible solutions for downscaling applications.
The choice of the ensemble members is made flexible in order to meet the requirements
of specific (regional) climate information products, to be tailored for different regions and user needs.
Description of the tool:
EnsClustering is a cluster analysis tool, based on the k-means algorithm, for ensemble predictions.
The aim is to group ensemble members according to similar characteristics and
to select the most representative member for each cluster.
The user chooses which feature of the data is used to group the ensemble members by clustering:
time mean, maximum, a certain percentile (e.g., 75% as in the examples below),
standard deviation and trend over the time period. For each ensemble member this value
is computed at each grid point, obtaining N lat-lon maps, where N is the number of ensemble members.
The anomaly is computed subtracting the ensemble mean of these maps to each of the single maps.
The anomaly is therefore computed with respect to the ensemble members (and not with respect to the time)
and the Empirical Orthogonal Function (EOF) analysis is applied to these anomaly maps.
Regarding the EOF analysis, the user can choose either how many Principal Components (PCs)
to retain or the percentage of explained variance to keep. After reducing dimensionality via
EOF analysis, k-means analysis is applied using the desired subset of PCs.
The major final outputs are the classification in clusters, i.e. which member belongs
to which cluster (in k-means analysis the number k of clusters needs to be defined
prior to the analysis) and the most representative member for each cluster,
which is the closest member to the cluster centroid.
Other outputs refer to the statistics of clustering: in the PC space, the minimum and
the maximum distance between a member in a cluster and the cluster centroid
(i.e. the closest and the furthest member), the intra-cluster standard
deviation for each cluster (i.e. how much the cluster is compact).
}
\examples{
\donttest{
exp <- lonlat_data$exp
# Example 1: Cluster on all start dates, members and models
res <- CST_EnsClustering(exp, numclus = 3,
cluster_dim = c("member", "dataset", "sdate"))
iclus = res$cluster[2, 1, 3]
print(paste("Cluster of 2. member, 1. dataset, 3. sdate:", iclus))
print(paste("Frequency (numerosity) of cluster (", iclus, ") :", res$freq[iclus]))
library(s2dverification)
PlotEquiMap(res$repr_field[iclus, , ], exp$lon, exp$lat,
filled.continents = FALSE,
toptitle = paste("Representative field of cluster", iclus))
# Example 2: Cluster on members retaining 4 EOFs during
# preliminary dimensional reduction
res <- CST_EnsClustering(exp, numclus = 3, numpcs = 4, cluster_dim = "member")
# Example 3: Cluster on members, retain 80\% of variance during
# preliminary dimensional reduction
res <- CST_EnsClustering(exp, numclus = 3, variance_explained = 80,
cluster_dim = "member")
# Example 4: Compute percentile in time
res <- CST_EnsClustering(exp, numclus = 3, time_percentile = 90,
time_moment = "perc", cluster_dim = "member")
}
}
\author{
Federico Fabiano - ISAC-CNR, \email{f.fabiano@isac.cnr.it}
Ignazio Giuntoli - ISAC-CNR, \email{i.giuntoli@isac.cnr.it}
Danila Volpi - ISAC-CNR, \email{d.volpi@isac.cnr.it}
Paolo Davini - ISAC-CNR, \email{p.davini@isac.cnr.it}
Jost von Hardenberg - ISAC-CNR, \email{j.vonhardenberg@isac.cnr.it}
}
|
227b109b0dce5d45fe3bd05e6a731802b93e851e
|
b0255d4e54415b6fb1519b8fc0e4d1ca6717b080
|
/man/vec2symmat.Rd
|
4950f8d6fa6441e51d569ac67af8f850b5a621ef
|
[] |
no_license
|
mrdwab/SOfun
|
a94b37d9c052ed32f1f53372a164d854537fcb4a
|
e41fa6220871b68be928dfe57866992181dc4e1d
|
refs/heads/master
| 2021-01-17T10:22:12.384534
| 2020-06-19T22:10:29
| 2020-06-19T22:10:29
| 16,669,874
| 30
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 785
|
rd
|
vec2symmat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vec2symmat.R
\name{vec2symmat}
\alias{vec2symmat}
\title{Creates a Symmetric Matrix from a Vector}
\usage{
vec2symmat(invec, diag = 1, byrow = TRUE)
}
\arguments{
\item{invec}{The input vector}
\item{diag}{The value for the diagonal}
\item{byrow}{Logical. Whether the upper-triangle should be filled in by row}
}
\value{
A matrix
}
\description{
Takes a vector and, if the vector is of the correct lenght to be made into a
symmetric matrix, performs the conversion.
}
\examples{
myvec <- c(-.55, -.48, .66, .47, -.38, -.46)
vec2symmat(myvec)
vec2symmat(1:15, diag = 0)
vec2symmat(1:15, diag = 0, byrow = FALSE)
}
\references{
\url{http://stackoverflow.com/a/18598933/1270695}
}
\author{
Ananda Mahto
}
|
2e16497aa48ec8e4857def24e63b4c56c844e167
|
f817d4d29c02c8aba4ad52f0a0de03f1bf3ade8f
|
/R/endpoint.R
|
2ef9e7a7ac7d71f3b63304fe6c6b69649a9a227c
|
[
"MIT"
] |
permissive
|
be-marc/vetiver
|
463d397814169ba7237a9b2e46eac2a13025254e
|
f1e67302f4775997d4c54c1253904b1ccbca63f6
|
refs/heads/main
| 2023-08-26T06:59:30.023856
| 2021-11-02T23:43:37
| 2021-11-02T23:43:37
| 423,813,743
| 0
| 0
|
NOASSERTION
| 2021-11-02T11:16:53
| 2021-11-02T11:16:52
| null |
UTF-8
|
R
| false
| false
| 1,833
|
r
|
endpoint.R
|
#' Post new data to a deployed model API endpoint and return predictions
#'
#' @param object A model API endpoint object created with [vetiver_endpoint()].
#' @param new_data New data for making predictions, such as a data frame.
#' @param ... Extra arguments passed to [httr::POST()]
#'
#' @return A tibble of model predictions with as many rows as in `new_data`.
#' @importFrom stats predict
#' @export
#'
#' @examples
#'
#' if (FALSE) {
#' endpoint <- vetiver_endpoint("http://127.0.0.1:8088/predict")
#' predict(endpoint, mtcars[4:7, -1])
#' }
#'
#'
predict.vetiver_endpoint <- function(object, new_data, ...) {
data_json <- jsonlite::toJSON(new_data)
ret <- httr::POST(object$url, ..., body = data_json)
resp <- httr::content(ret, "text", encoding = "UTF-8")
ret <- jsonlite::fromJSON(resp)
if (has_name(ret, "error")) {
if (has_name(ret, "message")) {
abort(glue("Failed to predict: {ret$message}"))
} else {
abort("Failed to predict")
}
}
tibble::as_tibble(ret)
}
#' Create a model API endpoint object for prediction
#'
#' @param url An API endpoint URL
#' @return A new `vetiver_endpoint` object
#'
#' @examples
#' vetiver_endpoint("https://colorado.rstudio.com/rsc/biv_svm_api/predict")
#'
#' @export
vetiver_endpoint <- function(url) {
url <- as.character(url)
new_vetiver_endpoint(url)
}
new_vetiver_endpoint <- function(url = character()) {
stopifnot(is.character(url))
structure(list(url = url), class = "vetiver_endpoint")
}
#' @export
format.vetiver_endpoint <- function(x, ...) {
cli::cli_format_method({
cli::cli_h3("A model API endpoint for prediction:")
cli::cli_text("{x$url}")
})
}
#' @export
print.vetiver_endpoint <- function(x, ...) {
cat(format(x), sep = "\n")
invisible(x)
}
|
82587366a43bda7f432a9d578cdf0b3a56e92271
|
1dedfa2451f5bdf76dc6ac9f6f2e972865381935
|
/tests/testthat/test-density_standard.R
|
a6ca92d56c5215b3729c42185a6043918d0b09b4
|
[
"MIT"
] |
permissive
|
nhejazi/haldensify
|
95ef67f709e46554085371ffd4b5ade68baf06a4
|
e2cfa991e2ba528bdbf64fd2a24850e22577668a
|
refs/heads/master
| 2022-10-07T09:51:03.658309
| 2022-09-26T18:07:59
| 2022-09-26T18:07:59
| 165,715,134
| 15
| 6
|
NOASSERTION
| 2022-08-24T14:03:36
| 2019-01-14T18:43:32
|
R
|
UTF-8
|
R
| false
| false
| 3,414
|
r
|
test-density_standard.R
|
library(data.table)
set.seed(76924)
# simulate data: W ~ Rademacher and A|W ~ N(mu = \pm 1, sd = 0.5)
n_train <- 100
w <- rbinom(n_train, 1, 0.5)
w[w == 0] <- -1
a <- rnorm(n_train, 2 * w, 0.5)
# learn relationship A|W using HAL-based density estimation procedure
haldensify_fit <- haldensify(
A = a, W = w,
n_bins = c(3, 5),
lambda_seq = exp(seq(-1, -13, length = 100)),
max_degree = 2
)
# predictions to recover conditional density of A|W
new_a <- seq(-1, 1, by = 0.01)
new_w_neg <- rep(-1, length(new_a))
new_w_pos <- rep(1, length(new_a))
new_dat <- as.data.table(list(a = new_a, w_neg = new_w_neg, w_pos = new_w_pos))
new_dat$pred_w_neg <- predict(haldensify_fit,
new_A = new_dat$a, new_W = new_dat$w_neg
)
new_dat$pred_w_pos <- predict(haldensify_fit,
new_A = new_dat$a, new_W = new_dat$w_pos
)
# NOTE: these tests are poorly thought out, so temporarily removing
# test that maximum value of prediction happens at appropriate mean of the
# conditional density N(mu = \pm 1, sd = 0.5)
# test_that("Maximum predicted probability of p(A|W = -1) matches N(-1, 0.5)", {
# obs_a_max_prob_w_neg <- new_dat[which.max(new_dat$pred_w_neg), ]$a
# expect_equal(round(obs_a_max_prob_w_neg), unique(new_w_neg))
# })
# test_that("Maximum predicted probability of p(A|W = +1) matches N(+1, 0.5)", {
# obs_a_max_prob_w_pos <- new_dat[which.max(new_dat$pred_w_pos), ]$a
# expect_equal(round(obs_a_max_prob_w_pos), unique(new_w_pos))
# })
# supply fit_control additional arguments
n_lambda <- 100L
haldensify_fit_cntrl <- haldensify(
A = a, W = w,
n_bins = c(3, 5),
lambda_seq = exp(seq(-1, -13, length = n_lambda)),
max_degree = 2,
fit_control = list(cv_select = TRUE, n_folds = 3L, use_min = TRUE)
)
cv_lambda_idx <- haldensify_fit_cntrl$cv_tuning_results$lambda_loss_min_idx
# prediction with lambda_selected by cross-validation
pred_w_cv <- predict(haldensify_fit_cntrl,
new_A = new_dat$a, new_W = new_dat$w_neg, lambda_select = "cv"
)
# prediction with lambda_select undersmooth
pred_w_undersmooth <- predict(haldensify_fit_cntrl,
new_A = new_dat$a, new_W = new_dat$w_neg, lambda_select = "undersmooth"
)
test_that("Prediction for undersmoothed lambda is of correct dimensions", {
# in case the CV-chosen lambda is the last in the sequence
if (cv_lambda_idx == n_lambda) {
# number of rows should match input data nrows
expect_equal(length(pred_w_undersmooth), nrow(new_dat))
# first lambda in sequence should be the cross-validation selector's choice
expect_equal(pred_w_undersmooth, pred_w_cv)
} else {
# number of rows should match input data nrows
expect_equal(nrow(pred_w_undersmooth), nrow(new_dat))
# number of columns should be less than the full sequence of lambda
expect_lt(ncol(pred_w_undersmooth), n_lambda)
# first lambda in sequence should be the cross-validation selector's choice
expect_equal(pred_w_undersmooth[, 1], pred_w_cv)
}
})
# prediction with lambda_select all
pred_w_all <- predict(haldensify_fit_cntrl,
new_A = new_dat$a, new_W = new_dat$w_neg, lambda_select = "all"
)
test_that("Prediction for all lambda is of correct dimensions", {
# number of rows should match input data nrows
expect_equal(nrow(pred_w_all), nrow(new_dat))
# number of columns should match the full sequence of lambda
expect_equal(ncol(pred_w_all), n_lambda)
})
# print a fit
suppressWarnings(
print(haldensify_fit_cntrl)
)
|
078da30f258fc36403b2950687853338643af1fe
|
e2f3ace7d5476cc8042514b3f93e466098aaf641
|
/man/exprToPlotmathExpr.Rd
|
a7d66c69be51aef334ddc958b8e197bb246cbc8c
|
[] |
no_license
|
erp12/rgp
|
1527a5901fb6cb570e9461487fadb89a9bd66dd9
|
4f6e7a03585f75a139d232b8b817527d15c74d47
|
refs/heads/master
| 2020-12-31T02:22:38.126098
| 2016-08-22T21:42:32
| 2016-08-22T21:42:32
| 66,305,730
| 0
| 0
| null | 2016-08-22T20:30:13
| 2016-08-22T20:30:13
| null |
UTF-8
|
R
| false
| false
| 553
|
rd
|
exprToPlotmathExpr.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{exprToPlotmathExpr}
\alias{exprToPlotmathExpr}
\title{Convert any expression to an expression that is plottable by plotmath}
\usage{
exprToPlotmathExpr(expr)
}
\arguments{
\item{expr}{The GP-generated expression to convert.}
}
\value{
An expression plottable by \code{\link{plotmath}}.
}
\description{
Tries to convert a GP-generated expression \code{expr} to an expression plottable by
\code{\link{plotmath}} by replacing GP variants of arithmetic operators by their standard
counterparts.
}
|
f582da9477a6dcbbbae8a88e42a2e2f882e5b140
|
6622b0950dc4e57a3826b678054b04765ad22740
|
/man/sfactorDdp.Rd
|
6c90f4a4d74ca40c90d465a08188c899444a6cc4
|
[] |
no_license
|
RafaelSdeSouza/nuclear
|
f2d6e187298268968ea165b5a793b85c657019dc
|
86823e86b4ca0bb12083c60b033e89e583eb6301
|
refs/heads/master
| 2022-02-13T00:32:51.725874
| 2019-08-09T14:48:14
| 2019-08-09T14:48:14
| 104,823,096
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 742
|
rd
|
sfactorDdp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sfactorDdp.R
\name{sfactorDdp}
\alias{sfactorDdp}
\title{Estimate Astrophysical S-factor}
\format{\describe{
\item{x}{
The function has two arguments: ecm, a.scale }
}}
\usage{
sfactorDdp(ecm = ecm, a.scale = a.scale)
}
\arguments{
\item{ecm}{ecm}
\item{a.scale}{a.scale}
}
\value{
S-factor
}
\description{
Provides a confusion matrix of classification statistics following logistic regression.
}
\examples{
library(nuclear)
N <- 300
obsx1 <- exp(seq(log(1e-3), log(1),length.out=N))
plot(obsx1,sfactorDdp(obsx1),
col="red",cex=1.25,type="l",ylab="S-factor",xlab="E",log="x")
}
\author{
Rafael de Souza, UNC, and Christian Illiadis, UNC
}
\keyword{S-factor}
|
46fc4b7896bbd2f578acc3ce98a692b150db1d85
|
e955508b9901acb0eab7b32ca4d0429344a23087
|
/function.R
|
37a70f8f8e1be262d8ce8ac46fdd6d359546b366
|
[] |
no_license
|
joncgoodwin/sb
|
fa574f014063c06937bf44eb9c58f5e14f4f79c1
|
bd3fc66f2c72cb1a9d2deab6b8532f080f81920e
|
refs/heads/master
| 2021-01-18T23:50:47.373453
| 2016-07-14T01:28:04
| 2016-07-14T01:28:04
| 55,804,305
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,518
|
r
|
function.R
|
write_sleeping_beauty <- function(x,y) { #x=filename,
#y=citation threshold
#libraries
library(dplyr)
library(tidyr)
library(ggplot2)
library(scales) #for log axis in plotgraph
library(stringr)
#read csv
sb <- read.csv(x,stringsAsFactors=FALSE)
sb <- rename(sb,name=Title)
sb <- rename(sb,PublicationYear=Publication.Year)
sb <- rename(sb,Journal=Source.Title)
#transform from wide to long
sb <- sb %>%
select(name,Authors,PublicationYear,Journal,X1973:X2016) %>%
gather(Year,cite,X1973:X2016) %>% #assumes this range, not
#always the case
mutate(Year = as.numeric(gsub("X","", Year))) %>%
group_by(name) %>%
mutate (cite = cumsum(cite))
#create elapsed column
sb$PublicationYear <- as.numeric(sb$PublicationYear) #for text
#export from Web of Science
sb$name <- str_to_title(sb$name)
sb$Journal <- str_to_title(sb$Journal)
sb <- sb %>% mutate(elapsed=Year-PublicationYear) %>% filter (elapsed>0)
#write csv for d3.js display
sb <- sb %>% filter(max(cite)>=y) # keep only those with cites greater than
# threshold
#need to create clean key column before writing or munge titles so
#they will be clean as key values in javascript: remove all
#quotes, commas, parentheses,
#etc.
sb$id <- group_indices(sb) #Fixed per Andrew Goldstone's helpful
#explanation.
sb$id <- sub("^","flute",sb$id) #for javascript hash---seems to
#work
sb$Journal <- sub("-PUBLICATIONS OF THE MODERN LANGUAGE ASSOCIATION OF AMERICA","",sb$Journal) # journal specific tweak
# need to change 0s to 0.9 for d3.scale.log
sb$cite[sb$cite==0] <- 0.9
sb$threshold <- y #hackish way of showing threshold in d3 graph
write.csv(sb, "data.csv", row.names=FALSE)
sb
}
plotgraph <- function(sb,x,y) { #sb is dataframe from above; x is
#citation theshold, y years
#position labels
sb <- sb %>% mutate (label_x_position=max(elapsed))
sb <- sb %>% mutate(label_position=max(cite))
at <- subset(sb,cite<x & elapsed>y) #for labels
rt <- sb %>% filter(name %in% at$name) # for highlight
gg <- ggplot(data=sb, aes(x=elapsed,y=cite, Group=name))
gg <- gg + theme_bw()
gg <- gg + geom_line(colour="gray", alpha=.25)
gg <- gg + geom_line(data=rt, aes(x=elapsed, y=cite,
Group=name), alpha=1, colour="red")
gg <- gg + scale_y_continuous(trans=log2_trans())
gg <- gg + xlab("Years Elapsed Since Publication")
gg <- gg + ylab("Cumulative Citations")
gg <- gg + ggtitle("Sleeping Beauties")
gg <- gg + geom_text(data=at, aes(x=label_x_position, y=label_position, Group=name, label=name), colour="red", size=2)
gg
}
|
a8b10b2c7f3210c785769de7a9dbb1822e278d3d
|
03b686d96bc53751f3323cf9eb50bb4884db816b
|
/Source/ESS_explore.R
|
59200807bb2ef3e67beb75037a4d9999abe7244d
|
[] |
no_license
|
callum-lawson/Annuals
|
728028ffeb28488aac457898561afbe04747054f
|
b55d2037f4f038ef7979c918bc352297a0fc48aa
|
refs/heads/master
| 2021-11-11T09:36:11.838807
| 2018-03-14T13:55:15
| 2018-03-14T13:55:15
| 82,462,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,405
|
r
|
ESS_explore.R
|
### Develop explanations for patterns in ESS germination results ###
source("Source/invasion_functions_iterative.R")
# Empirical simulations ---------------------------------------------------
outlist <- evolve(
nr=1000,nt=10100,nb=100,
zam=zamo+mam*zsdo,wam=wamo+mam*wsdo,
zsd=zsdo*msd,wsd=wsdo*msd,rho=0.82,
beta_p=pl$pr$beta_p[2,19,],beta_r=pl$rs$beta_r[2,19,],
sig_y_p=pl$pr$sig_y_p[2,19],sig_y_r=pl$rs$sig_y_r[2,19],
sig_o_p=pl$pr$sig_o_p[2],phi_r=pl$rs$phi_r[1],
m0=exp(pl$go$alpha_m[2,19]),m1=exp(pl$go$beta_m[2,19]),
am0=1,bm0=1,
as0=1,bs0=1,
abr0=0.1,
smut_m=1,smut_s=0.1,smut_r=0.1,
savefile=NULL,
nsmin=10^-50
)
zam=zamo+mam*zsdo
wam=wamo+mam*wsdo
zsd=zsdo*msd
wsd=wsdo*msd
rho=0.82
zw_mu <- c(zam,wam) - log(tau_p)
zw_sig <- matrix(c(zsd^2,rep(rho*zsd*wsd,2),wsd^2),nr=2,nc=2)
zw <- mvrnorm(n=nt, mu=zw_mu, Sigma=zw_sig)
eps_y_p <- rnorm(nt,0,sig_y_p)
eps_y_r <- rnorm(nt,0,sig_y_r)
rd <- ressim(w=zw[,2],x_z,am=1,bm=1,as=1,bs=1,abr=1,
beta_p=pl$pr$beta_p[1,19,],beta_r=pl$rs$beta_r[1,19,],
eps_y_p=rep(0,nt),eps_y_r=rep(0,nt),
sig_o_p=pl$pr$sig_o_p[1],phi_r=pl$rs$phi_r[1],
So=0.1,m0=exp(pl$go$alpha_m[1,19]),m1=exp(pl$go$beta_m[1,19]),
nsmin=10^-50
)
plot(outlist$es$am,type="l")
plot(outlist$es$bm,type="l")
plot(outlist$es$as,type="l")
plot(outlist$es$bs,type="l")
plot(outlist$es$abr,type="l")
plot(outlist2$es$am)
plot(outlist2$es$bm)
plot(outlist2$es$as)
with(outlist$es[nr,],
curve(coaG(w=x,am=am,bm=bm,as=as,bs=bs,abr=abr),xlim=c(-5,5),ylim=c(0,1))
)
with(outlist$es[nr,],curve(fixG(x,am,bm),xlim=c(-5,5),col="red",ylim=c(0,1)))
with(outlist2$es[nr,],curve(fixG(x,am,bm),add=T,col="red"))
with(outlist3$es[nr,],curve(fixG(x,am,bm),add=T,col="red"))
with(outlist4$es[nr,],curve(fixG(x,am,bm),add=T,col="red"))
with(outlist5$es[nr,],curve(fixG(x,am,bm),add=T,col="red"))
with(outlistB$es[nr,],curve(fixG(x,am,bm),add=T,col="blue"))
with(outlistB2$es[nr,],curve(fixG(x,am,bm),add=T,col="blue"))
with(outlistB3$es[nr,],curve(fixG(x,am,bm),add=T,col="blue"))
with(outlistB4$es[nr,],curve(fixG(x,am,bm),add=T,col="blue"))
with(outlistB5$es[nr,],curve(fixG(x,am,bm),add=T,col="blue"))
with(outlistC$es[nr,],curve(fixG(x,am,bm),add=T,col="green"))
with(outlistC2$es[nr,],curve(fixG(x,am,bm),add=T,col="green"))
with(outlistC3$es[nr,],curve(fixG(x,am,bm),add=T,col="green"))
with(outlistC4$es[nr,],curve(fixG(x,am,bm),add=T,col="green"))
with(outlistC5$es[nr,],curve(fixG(x,am,bm),add=T,col="green"))
abline(v=quantile(outlist$zw[,1],probs=c(0.05,0.95)),lty=3)
rd1 <- with(es[1,], ressim(zw[,2],x_z,am,bm,as,bs,abr,
beta_p,beta_r,
eps_y_p,eps_y_r,
sig_o_p,phi_r,
So,m0,m1,
nt,nsmin,
full=T
) )
rd2 <- with(es[nr,], ressim(zw[,2],x_z,am,bm,as,bs,abr,
beta_p,beta_r,
eps_y_p,eps_y_r,
sig_o_p,phi_r,
So,m0,m1,
nt,nsmin,
full=T
) )
with(rd2,plot(log(Ye)~zw[,2]))
with(rd2,lines(supsmu(zw[,2],log(Ye)),col="red"))
abline(h=0,col="blue",lty=3)
abline(v=0,col="blue",lty=3)
G1 <- rd1$Gres
G2 <- rd2$Gres
d1 <- log((1-G2)*So + G2*rd1$Ye) - log((1-G1)*So + G1*rd1$Ye)
d2 <- log((1-G2)*So + G2*rd2$Ye) - log((1-G1)*So + G1*rd2$Ye)
par(mfrow=c(1,1))
plot(d1 ~ w)
lines(supsmu(w,d1),col="orange")
lines(supsmu(w,d2),col="red")
abline(h=0,col="blue",lty=3)
curve(log(fixG(x,es[nr,]$am,es[nr,]$bm)/fixG(x,es[1,]$am,es[1,]$bm)),add=T,col="purple")
curve(log((1-fixG(x,es[nr,]$am,es[nr,]$bm))/(1-fixG(x,es[1,]$am,es[1,]$bm))),add=T,col="purple")
curve(log(
(So*(1-fixG(x,es[nr,]$am,es[nr,]$bm))+fixG(x,es[nr,]$am,es[nr,]$bm))
/(So*(1-fixG(x,es[1,]$am,es[1,]$bm))+fixG(x,es[1,]$am,es[1,]$bm))
),add=T,col="purple")
# curve for Y=1
rd1med <- median(rd1$ns)
c1 <- rd1$ns<quantile(rd1$ns,0.05) # rd1$ns<=rd1med
c2 <- rd1$ns>quantile(rd1$ns,0.95) # rd1$ns>rd1med
d1a <- d1[c1]
d1b <- d1[c2]
w1 <- w[c1]
w2 <- w[c2]
plot(d1a ~ w1,col="blue")
points(d1b ~ w2,col="purple")
lines(supsmu(w1,d1a),col="black")
lines(supsmu(w2,d1b),col="black")
abline(h=0,col="blue",lty=3)
par(mfrow=c(2,2),mar=c(2,2,2,2))
with(es[1,],curve(fixG(x,am,bm),xlim=c(-5,5),ylim=c(0,1),col="orange"))
with(es[nr,],curve(fixG(x,am,bm),add=T,col="red"))
abline(v=quantile(zw[,2],probs=c(0.05,0.95)),lty=3)
with(rd2,plot(log(Ye)~zw[,2],
type="n",
xlim=quantile(zw[,2],probs=c(0.005,0.995)),
ylim=c(-2,2)))
with(rd1, lines(supsmu(zw[,2],log(Ye)),col="orange"))
with(rd2, lines(supsmu(zw[,2],log(Ye)),col="red"))
abline(h=0,col="blue",lty=3)
abline(v=0,col="blue",lty=3)
plot(density(log(rd1$ns)),xlim=c(1.5,5.5),col="orange")
lines(density(log(rd2$ns)),col="red")
plot(density(log(rd1$ns*rd1$G*rd1$Ye),n=2048),xlim=c(4,5),col="orange")
lines(density(log(rd2$ns*rd2$G*rd2$Ye),n=2048),col="red")
plot(density(log(rd1$ns*rd1$G*rd1$Ye),n=2048),xlim=c(-4,4),col="orange",ylim=c(0,0.1))
lines(density(log(rd2$ns*rd2$G*rd2$Ye),n=2048),col="red")
K <- exp(-m0*T3) / ( (m1/m0)*(1-exp(-m0*T3))/(tau_s/10) ) # = K_Y
# dN/dt = K - (So+G)N
G <- 1
K/(So+G) # overall K if env always very favourable
quantile(rd1$ns,0.95) # even at this value, Y>=1
par(mfrow=c(2,1))
ws <- w < median(w)
wl <- w >= median(w)
with(rd1[ws,], hist(log(Ye/So),breaks=1000,col=rgb(0,0,1,alpha=0.25),border=NA,xlim=c(-5,5)))
with(rd1[wl,], hist(log(Ye/So),breaks=1000,add=T,col=rgb(1,0,0,alpha=0.25),border=NA))
abline(v=0)
ws <- w < median(w)
wl <- w >= median(w)
with(rd2[ws,], hist(log(Ye/So),breaks=1000,col=rgb(0,0,1,alpha=0.25),border=NA,xlim=c(-5,5)))
with(rd2[wl,], hist(log(Ye/So),breaks=1000,add=T,col=rgb(1,0,0,alpha=0.25),border=NA))
abline(v=0)
with(rd1[ws,], mean(log(Ye/So)))
with(rd1[wl,], mean(log(Ye/So)))
with(rd2[ws,], mean(log(Ye/So)))
with(rd2[wl,], mean(log(Ye/So)))
plot(log(Ye/So)~w,data=rd1,type="n",ylim=c(-5,1))
with(rd1, lines(supsmu(w,log(Ye/So)),col="orange"))
with(rd2, lines(supsmu(w,log(Ye/So)),col="red"))
abline(v=mean(w),lty=3)
h1 <- log((1-G1)*So + G1*rd2$Ye) - log((1-G1)*So + G1*rd1$Ye)
h2 <- log((1-G2)*So + G2*rd2$Ye) - log((1-G2)*So + G2*rd1$Ye)
plot(h1~w,type="n")
lines(supsmu(w,h1),col="orange")
lines(supsmu(w,h2),col="red")
mean(h2-h1)
for(t in 1:nt){
x_t <- c(x_z[t,],-10)
pi_bar_t <- sum(beta_p * x_t) + eps_y_p[t]
mu_bar_t <- sum(beta_r * x_t) + eps_y_r[t]
pr_t <- logitnormint(mu=pi_bar_t,sigma=sig_o_p)
rs_t <- nbtmean(exp(mu_bar_t),phi_r)
Ye2[t] <- pr_t * rs_t * BHS(pr_t * rs_t,m0,m1)
}
par(new=F)
plot(density(log(Ye)))
par(new=T)
plot(density(log(Ye2)),col="blue")
# nr=1000;nt=250;nb=50;
# zam=zamo+mam*zsdo;wam=wamo+mam*wsdo;
# zsd=zsdo*msd;wsd=wsdo*msd;rho=0.82;
# beta_p=pl$pr$beta_p[1,19,];beta_r=pl$rs$beta_r[1,19,];
# sig_y_p=pl$pr$sig_y_p[1,19];sig_y_r=pl$rs$sig_y_r[1,19];
# sig_o_p=pl$pr$sig_o_p[1];phi_r=pl$rs$phi_r[1];
# m0=exp(pl$go$alpha_m[1,19]);m1=exp(pl$go$beta_m[1,19]);
# am0=1;bm0=1;
# as0=1;bs0=1;
# abr0=0.1;
# smut_m=1;smut_s=0.1;smut_r=0.1;
# savefile=NULL;
# nsmin=10^-50
#
# w=zw[,2]
# attach(es[1,])
# Simple simulations ------------------------------------------------------
pl <- list(
go = readRDS("Models/go_pars_tdistpois_naspecies_noerr_noGDD_loglik_BH_01Mar2017.rds"),
gs = readRDS("Models/gnzhh_onhh_pars_medians_26Oct2015.rds"),
# gs = g site level
# source script: venable_Stan_GO_descriptive_gnzhh_onhh_26Oct2015
# uses tau_s = 100
# but tau actually irrelevant because all multiplicative?
pr = readRDS("Models/pr_pars_yearhet_squared_pc_02Mar2016.rds"),
rs = readRDS("Models/rs_pars_yearhet_squared_pc_trunc_05Mar2016.rds")
)
Gres <- Ginv <- plogis(-5,5,length.out=100)
gd <- expand.grid(Gres,Ginv)
i <- 1
j <- 19
So <- exp(-exp(pl$go$m1[i,j]))
# Ellner 1985 exploration -------------------------------------------------
msy <- read.csv("Output/msy_seedests_18Jan2017.csv",header=T)
msy$Y <- with(msy, nsdbar/germdbar)
msy$S <- 0.5
msy$lambda <- with(msy,csdbar/prevcsdbar) # this probably wrong
msy$X <- msy$csd
msy$wc <- with(msy,ifelse(gprcp>median(gprcp,na.rm=T),0,1))
Glo <- with(subset(msy,
wc==0 & !is.na(Y) & !is.na(lambda) & !is.na(X) & lambda>0),
mean(S/lambda) / mean(Y/lambda)
)
Ghi <- with(subset(msy,
wc==1 & !is.na(Y) & !is.na(lambda) & !is.na(X) & lambda>0),
mean(S/lambda) / mean(Y/lambda)
)
with(subset(msy,wc==0),hist(log(Y/lambda),breaks=100))
with(subset(msy,wc==1),hist(log(Y/lambda),breaks=100))
source("Source/invasion_functions_iterative.R")
pl <- list(
go = readRDS("Models/go_pars_tdistpois_naspecies_noerr_noGDD_loglik_BH_01Mar2017.rds"),
pr = readRDS("Models/pr_pars_yearhet_squared_pc_02Mar2016.rds"),
rs = readRDS("Models/rs_pars_yearhet_squared_pc_trunc_05Mar2016.rds")
)
j <- 15
i <- 1
nt <- 10^5
ncy <- read.csv("Output/ncy_15Jan2016.csv",header=T)
ncy <- subset(ncy,is.na(seasprcp)==F)
zamo <- mean(log(ncy$seasprcp))
zsdo <- sd(log(ncy$seasprcp))
wamo <- mean(log(ncy$germprcp))
wsdo <- sd(log(ncy$germprcp))
require(MASS)
zw_mu <- c(zamo,wamo) - log(100)
zw_sig <- matrix(c(zsdo^2,rep(0.82*zsdo*wsdo,2),wsdo^2),nr=2,nc=2)
zw <- mvrnorm(n=nt, mu=zw_mu, Sigma=zw_sig)
x_z <- matrix(nr=nt,nc=3)
x_z[,1] <- 1 # intercept
x_z[,2] <- zw[,1]
x_z[,3] <- zw[,1]^2
So <- exp(-exp(pl$go$alpha_m[i,j]))
sim <- ressim(w=zw[,2],x_z=x_z,am=100,bm=0,
beta_p=pl$pr$beta_p[i,j,],beta_r=pl$rs$beta_r[i,j,],
eps_y_p=rnorm(nt,0,1)*pl$pr$sig_y_p[i,j],
eps_y_r=rnorm(nt,0,1)*pl$rs$sig_y_r[i,j],
sig_o_p=pl$pr$sig_o_p[i],phi_r=pl$rs$phi_r[i],
So=So,
m0=exp(pl$go$alpha_m[i,j]),
m1=exp(pl$go$beta_m[i,j]),
nt=nt,nsmin=10^-50,nstart=1,
tau_d=100)
sim$lambda <- with(sim,Gres*Ye + (1-Gres)*So)
mean(log(sim$lambda))
# doesn't persist without dormancy
# so no point in even trying Ellner harmonic mean calculation
1/mean(1/sim$lambda)
So
hist(log(sim$lambda),breaks=1000)
abline(v=0,col="red",lty=3)
plot(log(sim$lambda[1:1000]),type="l")
sapply(1:1000,function(i) sum(sims$G[i,,1]<0.95))
i <- 67
plot(sims$G[i,,1]~sims$w[i,])
oneover <- with(sims, 1 / ( G[i,,1]*Y[i,,1]*Sn[i,,1] + (1-G[i,,1])*So[i,,1] ) )
1 / mean(oneover)
sims$wc <- with(sims,ifelse(w>median(w,na.rm=T),0,1))
sims$Ye <- with(sims, Y*Sn)
sims$lambda <- with(sims, G*Ye + (1-G)*So)
relY <- with(sims, Ye/lambda)
relS <- with(sims, So/lambda)
i <- 1
j <- 19
with(sims,mean(relS[i,30:250,j]) / mean(relY[i,30:250,j]))
1/mean(1/exp(rnorm(10^3,0,1)))
1/mean(1/exp(rnorm(10^3,0,3)))
hist(1/exp(rnorm(10^3,0,1)),breaks=1000)
|
7b94c44976089e84898ad1a529efda2312a02dd8
|
400b426c3e3b56b34c38c71d473df223089906ab
|
/R/util.R
|
0c3720d6ec6cdd6c187fcf6ef33f7913b5dabafa
|
[] |
no_license
|
poissonconsulting/poiscon
|
fcea48c3e994ff86dfd7cc521aba1842ebb24ce3
|
97007c1f318cfebb21905b8f42e74486984a1970
|
refs/heads/master
| 2021-06-11T18:47:30.563459
| 2021-02-12T22:56:24
| 2021-02-12T22:56:24
| 12,257,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 539
|
r
|
util.R
|
#' Remove Dots Colnames
#'
#' Goes through all the data.frame objects in
#' the current environment and removes any dots
#' from the colnames
#' @export
remove_dots_colnames_data_frames <- function () {
for(obj in ls(envir = parent.frame())) {
expr <- parse(text = paste0(
"if(is.data.frame(", obj, ")) {",
"\ncolnames(", obj, ") <- make.names(colnames(", obj, "))",
"\ncolnames(", obj, ") <- gsub(\"[.]\", \"\", colnames(", obj, "))",
"\n}"))
eval(expr, envir = parent.frame())
}
invisible(TRUE)
}
|
1e78accf1999f4317dc405ed362fd9cb0346de6a
|
86e99dfcbc67dd4e5a86c8f7e575f7af4b60fd36
|
/man/getCurves.Rd
|
440539436115a72e90b5332fd3b519f0aaaf78f5
|
[] |
no_license
|
thejimymchai/slingshot
|
4c55e8cfbd3bcb64e660d466e4d65a2aed55a457
|
b1d9722247196d1b76a2b6bd65c0d9dfb630ba16
|
refs/heads/master
| 2023-02-24T08:32:38.851469
| 2020-11-17T16:34:07
| 2020-11-17T16:34:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 7,398
|
rd
|
getCurves.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/getCurves.R
\name{getCurves}
\alias{getCurves}
\alias{getCurves,SlingshotDataSet-method}
\alias{getCurves,SingleCellExperiment-method}
\title{Construct Smooth Lineage Curves}
\usage{
getCurves(sds, ...)
\S4method{getCurves}{SlingshotDataSet}(
sds,
shrink = TRUE,
extend = "y",
reweight = TRUE,
reassign = TRUE,
thresh = 0.001,
maxit = 15,
stretch = 2,
approx_points = FALSE,
smoother = "smooth.spline",
shrink.method = "cosine",
allow.breaks = TRUE,
...
)
\S4method{getCurves}{SingleCellExperiment}(
sds,
shrink = TRUE,
extend = "y",
reweight = TRUE,
reassign = TRUE,
thresh = 0.001,
maxit = 15,
stretch = 2,
approx_points = FALSE,
smoother = "smooth.spline",
shrink.method = "cosine",
allow.breaks = TRUE,
...
)
}
\arguments{
\item{sds}{The \code{SlingshotDataSet} for which to construct simultaneous
principal curves. This should already have lineages identified by
\code{\link{getLineages}}.}
\item{...}{Additional parameters to pass to scatter plot smoothing function,
\code{smoother}.}
\item{shrink}{logical or numeric between 0 and 1, determines whether and how
much to shrink branching lineages toward their average prior to the split.}
\item{extend}{character, how to handle root and leaf clusters of lineages
when constructing the initial, piece-wise linear curve. Accepted values are
\code{'y'} (default), \code{'n'}, and \code{'pc1'}. See 'Details' for more.}
\item{reweight}{logical, whether to allow cells shared between lineages to be
reweighted during curve-fitting. If \code{TRUE}, cells shared between
lineages will be iteratively reweighted based on the quantiles of their
projection distances to each curve. See 'Details' for more.}
\item{reassign}{logical, whether to reassign cells to lineages at each
iteration. If \code{TRUE}, cells will be added to a lineage when their
projection distance to the curve is less than the median distance for all
cells currently assigned to the lineage. Additionally, shared cells will be
removed from a lineage if their projection distance to the curve is above
the 90th percentile and their weight along the curve is less than
\code{0.1}.}
\item{thresh}{numeric, determines the convergence criterion. Percent change
in the total distance from cells to their projections along curves must be
less than \code{thresh}. Default is \code{0.001}, similar to
\code{\link[princurve]{principal_curve}}.}
\item{maxit}{numeric, maximum number of iterations, see
\code{\link[princurve]{principal_curve}}.}
\item{stretch}{numeric factor by which curves can be extrapolated beyond
endpoints. Default is \code{2}, see
\code{\link[princurve]{principal_curve}}.}
\item{approx_points}{numeric, whether curves should be approximated by a
fixed number of points. If \code{FALSE} (or 0), no approximation will be
performed and curves will contain as many points as the input data. If
numeric, curves will be approximated by this number of points; preferably
about 100 (see \code{\link[princurve]{principal_curve}}).}
\item{smoother, }{choice of scatter plot smoother. Same as
\code{\link[princurve]{principal_curve}}, but \code{"lowess"} option is
replaced with \code{"loess"} for additional flexibility.}
\item{shrink.method}{character denoting how to determine the appropriate
amount of shrinkage for a branching lineage. Accepted values are the same
as for \code{kernel} in \code{\link{density}} (default is \code{"cosine"}),
as well as \code{"tricube"} and \code{"density"}. See 'Details' for more.}
\item{allow.breaks}{logical, determines whether curves that branch very close
to the origin should be allowed to have different starting points.}
}
\value{
An updated \code{\link{SlingshotDataSet}} object containing the
oringinal input, arguments provided to \code{getCurves} as well as the
following new elements: \itemize{ \item{\code{curves}} {A list of
\code{\link[princurve]{principal_curve}} objects.}
\item{\code{slingParams}} {Additional parameters used for fitting
simultaneous principal curves.}}
}
\description{
This function takes a reduced data matrix \code{n} by \code{p},
a vector of cluster identities (optionally including \code{-1}'s for
"unclustered"), and a set of lineages consisting of paths through a forest
constructed on the clusters. It constructs smooth curves for each lineage
and returns the points along these curves corresponding to the orthogonal
projections of each data point, along with corresponding arclength
(\code{pseudotime} or \code{lambda}) values.
}
\details{
When there is only a single lineage, the curve-fitting algorithm is
nearly identical to that of \code{\link[princurve]{principal_curve}}. When
there are multiple lineages and \code{shrink > 0}, an additional step
is added to the iterative procedure, forcing curves to be similar in the
neighborhood of shared points (ie., before they branch).
The \code{extend} argument determines how to construct the
piece-wise linear curve used to initiate the recursive algorithm. The
initial curve is always based on the lines between cluster centers and if
\code{extend = 'n'}, this curve will terminate at the center of the
endpoint clusters. Setting \code{extend = 'y'} will allow the first and
last segments to extend beyond the cluster center to the orthogonal
projection of the furthest point. Setting \code{extend = 'pc1'} is similar
to \code{'y'}, but uses the first principal component of the cluster to
determine the direction of the curve beyond the cluster center. These
options typically have little to no impact on the final curve, but can
occasionally help with stability issues.
When \code{shink = TRUE}, we compute a shrinkage curve,
\eqn{w_l(t)}, for each lineage, a non-increasing function of pseudotime
that determines how much that lineage should be shrunk toward a shared
average curve. We set \eqn{w_l(0) = 1}, so that the curves will perfectly
overlap the average curve at pseudotime \code{0}. The weighting curve
decreases from \code{1} to \code{0} over the non-outlying pseudotime values
of shared cells (where outliers are defined by the \code{1.5*IQR} rule).
The exact shape of the curve in this region is controlled by
\code{shrink.method}, and can follow the shape of any standard kernel
function's cumulative density curve (or more precisely, survival curve,
since we require a decreasing function). Different choices of
\code{shrink.method} seem to have little impact on the final curves, in
most cases.
When \code{reweight = TRUE}, weights for shared cells are based on
the quantiles of their projection distances onto each curve. The
distances are ranked and converted into quantiles between \code{0} and
\code{1}, which are then transformed by \code{1 - q^2}. Each cell's weight
along a given lineage is the ratio of this value to the maximum value for
this cell across all lineages.
}
\examples{
data("slingshotExample")
rd <- slingshotExample$rd
cl <- slingshotExample$cl
sds <- getLineages(rd, cl, start.clus = '1')
sds <- getCurves(sds)
plot(rd, col = cl, asp = 1)
lines(sds, type = 'c', lwd = 3)
}
\references{
Hastie, T., and Stuetzle, W. (1989). "Principal Curves."
\emph{Journal of the American Statistical Association}, 84:502--516.
}
\seealso{
\code{\link{slingshot}}
}
|
2f933c64718af0c19cc018109015b28563cf3273
|
c348e148840c1260985291d1adb8f7860fb6037f
|
/12/ex5-1-3_boxplot.R
|
0ab7131fd2659adef48b24427e1d19213c338e3a
|
[] |
no_license
|
harute931507/R_practice
|
8d478ad884bb8cd15c35b941499bc4f7c8c09dfe
|
aa882783d915a58048e7fbb061b3b7df87ec1f3e
|
refs/heads/master
| 2020-03-31T03:45:56.599245
| 2018-10-06T19:59:58
| 2018-10-06T19:59:58
| 151,876,825
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 692
|
r
|
ex5-1-3_boxplot.R
|
?boxplot
example(boxplot)
boxplot(iris[1:4], cex.axis=0.7)
boxplot(iris[1:4], cex.axis=0.7, notch=TRUE)
boxplot(iris[1:4], cex.axis=0.7, col=heat.colors(4))
boxplot(Sepal.Length~Species, data=iris, col="gold",
ylim=c(1,8), xlim=c(0.25, 3.5), outline=F,
boxwex=0.35, at=c(1:3)-0.2)
boxplot(Sepal.Width~Species, data=iris, add=TRUE,
col="cornflowerblue", axes=FALSE, outline=F,
boxwex=0.35, at=c(1:3)+0.2)
legend("topleft", c("Sepal.Length", "Sepal.Width"),
bty="n", fill=c("gold", "cornflowerblue"), cex=0.8)
# the width parameter:
(cylnum <- table(mtcars$cyl))
bw <- cylnum/sum(cylnum)
bdat <- boxplot(mpg~cyl, data=mtcars, col="gold", width=cylnum)
|
1464bf6704a8c5f2cf4ae6e9f37281f36e70bc10
|
328d9398e187c5fa9e6fd6c50c5a1173d5829499
|
/R/zzz.R
|
ffa832aa067a3068ce7154f40ead592b19f1e0a3
|
[
"CC-BY-4.0"
] |
permissive
|
PascalCrepey/HospiCoV
|
5915e03026871c97c828b1025ba1a2c010381108
|
9a36c370f8bd384a9d83e35847aa20eb95fc88f5
|
refs/heads/master
| 2021-03-30T02:11:29.487767
| 2020-04-09T13:01:30
| 2020-04-09T13:01:30
| 248,004,955
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 146
|
r
|
zzz.R
|
utils::globalVariables(c("Time",
"Scenario",
"..extraCols",
"AgeGroup"
))
|
e23ddca2f14782a1839350a3ed991d23525217e0
|
afdcee7512dad0231bcf6f0ab92a95e82d7d0f70
|
/figure3.R
|
f8e1a77c89b98ad4eff502d35ad17e9f1385d93e
|
[] |
no_license
|
harrispopgen/gagp_mut_evol
|
4383cce285cdadbbd603a4101fb20c1e2acc179a
|
9ab6369a23a4dab10cdb490eb9c855df7111b03d
|
refs/heads/master
| 2022-12-16T13:56:05.952725
| 2020-09-24T16:59:50
| 2020-09-24T16:59:50
| 210,941,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,818
|
r
|
figure3.R
|
# Analyze mutational signatures per individual (lots of PCAs)
# Set working directory here
# setwd("~/Documents/Harris_project_directory")
options(stringsAsFactors = F)
library(ggplot2)
library(ggridges)
library(plyr)
library(RSvgDevice)
library(ggfortify)
library(scales)
library(grid)
library(gridExtra)
library(cowplot)
source("./common.R")
ncnr_trinuc_composition <- read.table("./hg18_ncnr_3mer_content.txt", header = T)
erv_trinuc_composition <- read.table("./hg18_all_erv_3mer_content.txt", header = T)
heterochromatin_trinuc_composition <- read.table("./hg18_nonrepetitive_chromHMM_heterochromatin_3mer_content.txt", header = T)
erv_hmc_high_trinuc_composition <- read.table("./hg18_all_erv_hmc_high_3mer_content.txt", header = T)
erv_hmc_low_trinuc_composition <- read.table("./hg18_all_erv_hmc_low_3mer_content.txt", header = T)
# A lot of this stuff doesn't require randomization. I haven't edited it yet.
erv_species_spectra <-
load_species_3mer_spectrum("hg18_all_erv", nmer_dir = "./snp_data/randomized_nmer_mutation_counts_maf_filter_exclude_recurrent/")
control_species_spectra <-
load_species_3mer_spectrum("hg18_control", nmer_dir = "./snp_data/randomized_nmer_mutation_counts_maf_filter_exclude_recurrent/")
heterochromatin_species_spectra <-
load_species_3mer_spectrum("hg18_nonrepetitive_chromHMM_heterochromatin", nmer_dir = "./snp_data/randomized_nmer_mutation_counts_maf_filter_exclude_recurrent/")
erv_hmc_low_species_spectra <-
load_species_3mer_spectrum("hg18_all_erv_hmc_low", nmer_dir = "./snp_data/randomized_nmer_mutation_counts_maf_filter_exclude_recurrent/")
erv_hmc_high_species_spectra <-
load_species_3mer_spectrum("hg18_all_erv_hmc_high", nmer_dir = "./snp_data/randomized_nmer_mutation_counts_maf_filter_exclude_recurrent/")
erv_species_spectra_reweight_control <-
reweight_species_spectra(erv_species_spectra,
erv_trinuc_composition,
control_trinuc_composition)
heterochromatin_species_spectra_reweight_control <-
reweight_species_spectra(heterochromatin_species_spectra,
heterochromatin_trinuc_composition,
control_trinuc_composition)
erv_hmc_low_species_spectra_reweight_control <-
reweight_species_spectra(erv_hmc_low_species_spectra,
erv_hmc_low_trinuc_composition,
control_trinuc_composition)
erv_hmc_high_species_spectra_reweight_control <-
reweight_species_spectra(erv_hmc_high_species_spectra,
erv_hmc_high_trinuc_composition,
control_trinuc_composition)
erv_hmc_fraction_for_ggplot <-
data.frame(
hmc_neg = unlist(erv_hmc_low_species_spectra_reweight_control),
hmc_pos = unlist(erv_hmc_high_species_spectra_reweight_control),
s = factor(rep(species, each=96)),
mut = factor(collapsed_trinuc_mutations)
)
ggsave(
"./scatterplot_erv_hmc_mut_type_by_species_20190830.pdf",
ggplot(erv_hmc_fraction_for_ggplot, aes(x=log(hmc_neg), y=log(hmc_pos), col=s, label=mut)) +
geom_point() +
geom_text(aes(label=ifelse(log(hmc_pos/hmc_neg)>0.4,as.character(mut),'')),hjust=0, vjust=0)
)
# Generating ERV HMC+ v HMC- p vals
erv_hmc_high_species_spectra_reweight_erv_hmc_low <-
reweight_species_spectra(erv_hmc_high_species_spectra,
erv_hmc_high_trinuc_composition,
erv_hmc_low_trinuc_composition, for_chi_sq = T)
erv_hmc_low_species_spectra_reweight_erv_hmc_high <-
reweight_species_spectra(erv_hmc_low_species_spectra,
erv_hmc_low_trinuc_composition,
erv_hmc_high_trinuc_composition, for_chi_sq = T)
write.csv(
as.data.frame(
sapply(
species,
function(s)
chisq.test(
matrix(
c(
sum(erv_hmc_high_species_spectra_reweight_erv_hmc_low[c("ACG.G", "CCG.G", "GCG.G", "TCG.G"), s]),
sum(erv_hmc_low_species_spectra_reweight_erv_hmc_high[c("ACG.G", "CCG.G", "GCG.G", "TCG.G"), s]),
sum(erv_hmc_high_species_spectra_reweight_erv_hmc_low[, s]) -
sum(erv_hmc_high_species_spectra_reweight_erv_hmc_low[c("ACG.G", "CCG.G", "GCG.G", "TCG.G"), s]),
sum(erv_hmc_low_species_spectra_reweight_erv_hmc_high[, s]) -
sum(erv_hmc_low_species_spectra_reweight_erv_hmc_high[c("ACG.G", "CCG.G", "GCG.G", "TCG.G"), s])
), nrow=2, ncol=2
)
)$p.value
)
),
"./cg_gg_p_vals_erv_hmc_high_to_low.csv"
)
ggsave(
"./heatmaps/all_species_rep_timing_20190905.pdf",
generate_heatmap_plot_multiple_species(late_rep_species_spectra_reweight_control,
early_rep_species_spectra_reweight_control)
)
ggsave(
"./heatmaps/all_species_erv_heterochromatin_20190913.pdf",
generate_heatmap_plot_multiple_species(erv_species_spectra_reweight_control,
heterochromatin_species_spectra_reweight_control)
)
ggsave(
"./heatmaps/erv_hmc_high_vs_low_Homo.pdf",
generate_heatmap_plot_single_species(
erv_hmc_high_species_spectra_reweight_control,
erv_hmc_low_species_spectra_reweight_control,
"Homo"
)
)
ggsave(
"./heatmaps/erv_hmc_high_vs_low_Homo.pdf",
generate_heatmap_plot_single_species(
erv_hmc_high_species_spectra_reweight_control,
erv_hmc_low_species_spectra_reweight_control,
"Homo"
)
)
ggsave(
"./heatmaps/erv_hmc_high_vs_low_Pan_troglodytes.pdf",
generate_heatmap_plot_single_species(
erv_hmc_high_species_spectra_reweight_control,
erv_hmc_low_species_spectra_reweight_control,
"Pan_troglodytes"
)
)
ggsave(
"./heatmaps/erv_hmc_high_vs_low_Pan_paniscus.pdf",
generate_heatmap_plot_single_species(
erv_hmc_high_species_spectra_reweight_control,
erv_hmc_low_species_spectra_reweight_control,
"Pan_paniscus"
)
)
ggsave(
"./heatmaps/heterochromatin_vs_erv_Pongo_abelii.pdf",
generate_heatmap_plot_single_species(
erv_species_spectra_reweight_control,
heterochromatin_species_spectra_reweight_control,
"Pongo_abelii"
)
)
ggsave(
"./heatmaps/heterochromatin_vs_erv_Homo.pdf",
generate_heatmap_plot_single_species(
erv_species_spectra_reweight_control,
heterochromatin_species_spectra_reweight_control,
"Homo"
)
)
ggsave(
"./heatmaps/heterochromatin_vs_erv_Gorilla.pdf",
generate_heatmap_plot_single_species(
erv_species_spectra_reweight_control,
heterochromatin_species_spectra_reweight_control,
"Gorilla"
)
)
ggsave(
"./heatmaps/erv_hmc_high_vs_low_Pongo_abelii.pdf",
generate_heatmap_plot_single_species(
erv_hmc_high_species_spectra_reweight_control,
erv_hmc_low_species_spectra_reweight_control,
"Pongo_abelii"
)
)
ggsave(
"./heatmaps/erv_hmc_high_vs_low_Pongo_pygmaeus.pdf",
generate_heatmap_plot_single_species(
erv_hmc_high_species_spectra_reweight_control,
erv_hmc_low_species_spectra_reweight_control,
"Pongo_pygmaeus"
)
)
ggsave(
"./heatmaps/maternal_hotspots_vs_control_Homo.pdf",
generate_heatmap_plot_single_species(
maternal_hotspots_species_spectra_reweight_control,
control_species_spectra_reweight_control,
"Homo"
)
)
ggsave(
"./heatmaps/late_vs_early_rep_Homo.pdf",
generate_heatmap_plot_single_species(
late_rep_species_spectra_reweight_control,
early_rep_species_spectra_reweight_control,
"Homo"
)
)
ggsave(
"./heatmaps/late_vs_early_rep_Gorilla.pdf",
generate_heatmap_plot_single_species(
late_rep_species_spectra_reweight_control,
early_rep_species_spectra_reweight_control,
"Gorilla"
)
)
ggsave(
"./heatmaps/late_vs_early_rep_Pan_troglodytes.pdf",
generate_heatmap_plot_single_species(
late_rep_species_spectra_reweight_control,
early_rep_species_spectra_reweight_control,
"Pan_troglodytes"
)
)
ggsave(
"./heatmaps/late_vs_early_rep_Pongo_abelii.pdf",
generate_heatmap_plot_single_species(
late_rep_species_spectra_reweight_control,
early_rep_species_spectra_reweight_control,
"Pongo_abelii"
)
)
ggsave(
"./heatmaps/late_vs_early_rep_repetitive_Gorilla.pdf",
generate_heatmap_plot_single_species(
late_rep_repetitive_species_spectra_reweight_control,
early_rep_repetitive_species_spectra_reweight_control,
"Gorilla"
)
)
ggsave(
"./heatmaps/late_vs_early_rep_nonrepetitive_Gorilla.pdf",
generate_heatmap_plot_single_species(
late_rep_nonrepetitive_species_spectra_reweight_control,
early_rep_nonrepetitive_species_spectra_reweight_control,
"Gorilla"
)
)
ggsave(
"./heatmaps/maternal_hotspots_vs_control_Pan_troglodytes.pdf",
generate_heatmap_plot_single_species(
maternal_hotspots_species_spectra_reweight_control,
control_species_spectra_reweight_control,
"Pan_troglodytes"
)
)
ggsave(
"./heatmaps/maternal_hotspots_vs_control_Pongo_abelii.pdf",
generate_heatmap_plot_single_species(
maternal_hotspots_species_spectra_reweight_control,
control_species_spectra_reweight_control,
"Pongo_abelii"
)
)
ggsave(
"./heatmaps/Homo_erv_vs_control_20190627.pdf",
generate_heatmap_plot_single_species(
erv_species_spectra_reweight_control,
control_species_spectra_reweight_control,
"Pongo_abelii"
)
)
ggsave(
"./heatmaps/Homo_late_rep_nr_vs_early_rep_nr_20190627.pdf",
generate_heatmap_plot_single_species(
late_rep_nonrepetitive_species_spectra_reweight_control,
early_rep_nonrepetitive_species_spectra_reweight_control,
"Homo"
)
)
ggsave(
"./heatmaps/Pan_troglodytes_late_rep_nr_vs_early_rep_nr_20190627.pdf",
generate_heatmap_plot_single_species(
late_rep_nonrepetitive_species_spectra_reweight_control,
early_rep_nonrepetitive_species_spectra_reweight_control,
"Pan_troglodytes"
)
)
ggsave(
"./heatmaps/Gorilla_late_rep_nr_vs_early_rep_nr_20190627.pdf",
generate_heatmap_plot_single_species(
late_rep_nonrepetitive_species_spectra_reweight_control,
early_rep_nonrepetitive_species_spectra_reweight_control,
"Gorilla"
)
)
generate_heatmap_plot_single_species(
maternal_hotspots_species_spectra_reweight_control,
control_species_spectra_reweight_control,
"Pan_troglodytes"
)
generate_heatmap_plot_single_species(
maternal_hotspots_species_spectra_reweight_control,
control_species_spectra_reweight_control,
"Pongo_abelii"
)
log_odds_spectra_correlation_species(
erv_species_spectra_reweight_control,
control_species_spectra_reweight_control
)
# ggsave(
# "./correlation_plots/lr_erv_vs_control_corr_spp_20190627.pdf",
# log_odds_spectra_correlation_species_heatmap(
# erv_species_spectra_reweight_control,
# control_species_spectra_reweight_control
# )
# )
# ggsave(
# "./correlation_plots/corr_erv_vs_control_lr_spp_20190627.pdf",
# log_odds_species_correlation_spectra_heatmap(
# erv_species_spectra_reweight_control,
# control_species_spectra_reweight_control
# )
# )
# ggsave(
# "./correlation_plots/lr_late_rep_nr_vs_early_rep_nr_corr_spp_20190627.pdf",
# log_odds_spectra_correlation_species_heatmap(
# late_rep_nonrepetitive_species_spectra_reweight_control,
# early_rep_nonrepetitive_species_spectra_reweight_control
# )
# )
# ggsave(
# "./correlation_plots/corr_late_rep_nr_vs_early_rep_nr_lr_spp_20190627.pdf",
# log_odds_species_correlation_spectra_heatmap(
# late_rep_nonrepetitive_species_spectra_reweight_control,
# early_rep_nonrepetitive_species_spectra_reweight_control
# )
# )
write.csv(
log_odds_spectra_correlation_species(
late_rep_species_spectra_reweight_control,
early_rep_species_spectra_reweight_control
),
"./corr_values_log_odds_rep_timing_corr_species.csv"
)
# log_odds_spectra_correlation_species(
# late_rep_species_spectra_reweight_control,
# early_rep_species_spectra_reweight_control
# )
# pdf("./heatmaps/alu_vs_control_reweight_control.pdf")
# generate_heatmap_plot_single_species(
# alu_species_spectra_reweight_control,
# control_species_spectra_reweight_control,
# "Homo"
# )
# dev.off()
# pdf("./heatmaps/subtelomere_vs_control_reweight_control.pdf")
# generate_heatmap_plot_single_species(
# subtelomere_species_spectra_reweight_control,
# control_species_spectra_reweight_control,
# "Homo"
# )
# dev.off()
# pdf("./heatmaps/pericentromere_vs_control_reweight_control.pdf")
# generate_heatmap_plot_single_species(
# pericentromere_species_spectra_reweight_control,
# control_species_spectra_reweight_control,
# "Homo"
# )
# dev.off()
# generate_heatmap_plot_single_species(
# pericentromere_species_spectra_reweight_control,
# late_rep_repetitive_species_spectra_reweight_control,
# "Homo"
# )
# generate_heatmap_plot_single_species(
# subtelomere_species_spectra_reweight_control,
# early_rep_repetitive_species_spectra_reweight_control,
# "Homo"
# )
# pdf("./heatmaps/early_rep_nonrepetitive_vs_late_rep_nonrepetitive_reweight_control.pdf")
# generate_heatmap_plot_single_species(
# early_rep_nonrepetitive_species_spectra_reweight_control,
# late_rep_nonrepetitive_species_spectra_reweight_control,
# "Homo"
# )
# dev.off()
ggsave(
"./heatmaps/erv_vs_control_chisq_homo_20190619.pdf",
generate_heatmap_plot_single_species_chisq(
erv_species_spectra,
erv_trinuc_composition,
control_species_spectra,
control_trinuc_composition,
"Homo"
)
)
ggsave(
"./heatmaps/erv_vs_control_species_20190508.pdf",
generate_heatmap_plot_single_species(
erv_species_spectra_reweight_control,
control_species_spectra_reweight_control,
"Homo"
)
)
ggsave(
"./heatmaps/erv_vs_heterochromatin_species_20190508.pdf",
generate_heatmap_plot_single_species(
erv_species_spectra_reweight_control,
heterochromatin_species_spectra_reweight_control,
"Homo"
)
)
# pdf("./heatmaps/early_vs_late_replication_timing_nonrepetitive_nonnormalized.pdf")
generate_pc_loading_heatmap(
log(early_rep_nonrepetitive_species_spectra_fraction_homo / late_rep_nonrepetitive_species_spectra_fraction_homo),
"Non-normaized early vs. late replication nonrepetitive")
# dev.off()
generate_pc_loading_heatmap(
log(early_rep_nonrepetitive_species_spectra_reweight$Homo / late_rep_nonrepetitive_species_spectra$Homo),
"Non-normaized early vs. late replication nonrepetitive")
generate_pc_loading_heatmap(
log((early_rep_nonrepetitive_species_spectra_reweight$Homo /
sum(early_rep_nonrepetitive_species_spectra_reweight$Homo))
/ (late_rep_nonrepetitive_species_spectra$Homo /
sum(late_rep_nonrepetitive_species_spectra$Homo))),
"Reweighted early vs. late replication nonrepetitive")
compare_early_late_trinuc <-
data.frame(
logodds = unlist(log(early_rep_nonrepetitive_trinuc_composition / late_rep_nonrepetitive_trinuc_composition)),
five_prime_and_center = as.factor(substr(names(early_rep_nonrepetitive_trinuc_composition), 1, 2)),
three_prime = as.factor(substr(names(early_rep_nonrepetitive_trinuc_composition), 3, 3)))
pdf("./heatmaps/early_vs_late_replication_timing_nonrepetitive_trinuc_content.pdf")
ggplot(compare_early_late_trinuc, aes(three_prime, five_prime_and_center))+
geom_tile(aes(fill=logodds), color="white") +
scale_fill_gradient2(low = muted("blue"), mid = "white", high = muted("red"), midpoint = 0)
dev.off()
pdf("./heatmaps/early_rep_nonrepetitive_vs_late_rep_nonrepetitive.pdf")
generate_heatmap_plot_single_species(
early_rep_nonrepetitive_species_spectra_norm,
late_rep_nonrepetitive_species_spectra_norm,
s="Homo"
)
dev.off()
# pdf("./heatmaps/early_rep_nonrepetitive_vs_late_rep_nonrepetitive.pdf")
generate_heatmap_plot_single_species( # ugh this isn't working
early_rep_nonrepetitive_species_spectra_reweight,
late_rep_nonrepetitive_species_spectra,
s="Homo"
)
# dev.off()
pdf("./heatmaps/early_rep_repetitive_vs_late_rep_repetitive.pdf")
generate_heatmap_plot_single_species(
early_rep_repetitive_species_spectra_norm,
late_rep_repetitive_species_spectra_norm,
s="Homo"
)
dev.off()
# Looking at what's up with Donald
pan_t_v_signature <-
sapply(
names(control_spectra_reweight),
function(x)
mean(
subset(
control_spectra_reweight[, x],
startsWith(rownames(control_spectra_reweight), "Pan_troglodytes_verus") &
!endsWith(rownames(control_spectra_reweight), "Donald")
)
)
)
pan_t_t_signature <-
sapply(
names(control_spectra_reweight),
function(x)
mean(
subset(
control_spectra_reweight[, x],
startsWith(rownames(control_spectra_reweight), "Pan_troglodytes_troglodytes")
)
)
)
dist(
rbind(
pan_t_v_signature,
pan_t_t_signature
)
)
dist(
rbind(
pan_t_v_signature,
subset(control_spectra_reweight,
endsWith(rownames(control_spectra_reweight), "Donald")
)
)
)
dist(
rbind(
pan_t_t_signature,
subset(control_spectra_reweight,
endsWith(rownames(control_spectra_reweight), "Donald")
)
)
)
# ggsave(
# "./violin_plot_control_20190526.pdf",
# plot_indiv_compartment_chisq(
# control_spectra,
# control_trinuc_composition,
# control_spectra,
# control_trinuc_composition,
# "Compare control spectra"
# )
# )
# I AM HERE!
# library(umap)
# erv_and_control_umap <- umap(erv_and_control_spectra_norm)
# colnames(erv_and_control_umap$layout) <- c("x", "y")
# plot(erv_and_control_umap$layout, col=indiv_df_erv_and_control_spectra$col)
# pdf("./erv_vs_control_umap_20190109.pdf", width=6, height=4.5)
# ggplot(
# cbind(erv_and_control_umap$layout, indiv_df_erv_and_control_spectra),
# aes(x=x, y=y, group=subspecies)) +
# geom_point(aes(color=subspecies, shape=spectrum), size=2.5)
# dev.off()
|
fad7d1d18b2d7e9ad06c9b6d4af75c8d68419193
|
bb173b7f6d00e1e7dbd368fef30dbed8837c21a1
|
/man/pnud_uf.Rd
|
426e5dfd5711634707f76af924a63ce6439ea52e
|
[
"MIT"
] |
permissive
|
abjur/abjData
|
2eb67b4196472bca0df0d78a54481d1185ec8948
|
afa83b359917b6e974fbe7281f340a66b2a86cfe
|
refs/heads/master
| 2023-04-30T18:48:02.359007
| 2023-01-12T22:41:37
| 2023-01-12T22:41:37
| 77,081,132
| 19
| 4
|
NOASSERTION
| 2020-12-08T19:16:05
| 2016-12-21T19:42:23
|
R
|
UTF-8
|
R
| false
| true
| 557
|
rd
|
pnud_uf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-data.R
\docType{data}
\name{pnud_uf}
\alias{pnud_uf}
\title{UNDP data by Federative Units}
\format{
a data frame with 81 rows e 235 columns.
\describe{
\item{ano}{for more information, check \code{\link{pnud_siglas}}}
}
}
\source{
\url{https://www.br.undp.org/content/brazil/pt/home/idh0/rankings/idhm-uf-2010.html}
}
\usage{
pnud_uf
}
\description{
A dataset that contains information about UNDP of Federative Units.
}
\examples{
summary(pnud_uf)
}
\keyword{datasets}
|
55d0a5ecd9113348a95f564df5730db5201ab501
|
3f08010675b3f874336656abbf2b0ac77939649d
|
/src/server/rserve/server.r
|
76d1179474e90a109cf701bf40ac650a4ce32bbb
|
[] |
no_license
|
ithailevi/L8K
|
f277b594cd0aa9c040d051378627e16166846c62
|
a005c96cc0f1d8f688c19768bccb62e12cb4152b
|
refs/heads/master
| 2016-09-06T16:58:07.822282
| 2013-06-27T09:29:45
| 2013-06-27T09:30:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 145
|
r
|
server.r
|
# item choosing function
item_choosing = function(items_number)
{
items = as.integer(runif(as.numeric(items_number),1,99));
return(items);
}
|
58708784e24c395d1c8a9ddcb808cc5a0297bce5
|
cfcc1f8ff8d8b134c8bc52a64c0218772d12a604
|
/Regression.R
|
87511d7522f410e7a2c15a8dca3b423b1d0c50a0
|
[] |
no_license
|
nikhilbakodiya/R-programming
|
55e48a84a1f15c714c63803fa4913b54ce20fed3
|
777b8162839a03dc7528bd6a96b893709e9b46dd
|
refs/heads/master
| 2020-11-28T11:47:55.152187
| 2019-12-23T18:47:18
| 2019-12-23T18:47:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,282
|
r
|
Regression.R
|
#Regression (Linear)
height=c(121,134,145,146,132,189,178,174)
weight=c(56,78,57,69,59,64,65,66)
relation=lm(weight~height)
relation #y=a+bx, i.e. weight=57.71+0.042*height
newheight=data.frame(height=192)#here height=192 in the above equation
weight=predict(relation,newheight) # x=57.71+0.042*192
weight
#Multiple variables, y=a+bx1+cx2+dx3
model=lm(mpg~disp+hp+wt,data = mtcars)#mpg=37.10-0.0009*disp-0.0311*hp-3.80*wt
model
newmodel=data.frame(disp=10,hp=8,wt=20)
mpg=predict(relation,newmodel)
mpg
#Regression (Logistics)
install.packages("caret")
library(caret)
install.packages("mlbench")
library(mlbench)
pid=read.csv("C:/Users/nikhil.1820548/Desktop/diabetes.csv")
set.seed(2000)
pid
partition=createDataPartition(y=pid$Outcome,p=0.70, list=FALSE)
partition=createDataPartition(y=pid$Outcome,p=0.70,list=FALSE)
analysis=pid[partition,]
validate=pid[-partition,]
#Creating a logistic model
pidmodel=glm(formula=Outcome~.,data=analysis,family=binomial())
summary(pidmodel)
#Use of predict function
anspredict=predict(pidmodel,newdata=validate, type='response')
anspredict
summary(anspredict)
#Converting the values
convert=ifelse(anspredict<0.5,"0","1")
table(convert)
convert
#
confusionMatrix(validate$Outcom,convert)
|
68d8a636cfb519d469a363fe4c5c7dcc080c52d0
|
5fd22a88b5a1ccc9dc74e8405986cc913b3543b2
|
/Basics of R and Data Types/R Matrices/Matrix_Selection_and_Indexing.R
|
f4a055915a54de29a9183b12beb302ea595d2be1
|
[] |
no_license
|
cyork95/R-for-Data-Science-and-Machine-Learning
|
99c46714abbf8b13582c6f2b5574a4a4ee0b0d74
|
044d805586dfcd6bce60fbc218f192634713aa27
|
refs/heads/master
| 2021-01-04T05:29:43.358293
| 2020-02-15T17:37:45
| 2020-02-15T17:37:45
| 240,408,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 218
|
r
|
Matrix_Selection_and_Indexing.R
|
matrix.exp <- matrix(1:50, byrow=TRUE, nrow = 5)
first.row <- matrix.exp[1,]
first.col <- matrix.exp[,1]
first.three.rows <- matrix.exp[1:3,]
first.three <- matrix.exp[1:3, 1:3]
specific.place <- matrix.exp[2:3, 5:6]
|
83688fb791d9b52fe5f694811b16ad130456dc2c
|
f32dbf645fa99d7348210951818da2275f9c3602
|
/R/mtapspec.R
|
69456b8838515bacd4817309f7855b80a26c1c2f
|
[] |
no_license
|
cran/RSEIS
|
68f9b760cde47cb5dc40f52c71f302cf43c56286
|
877a512c8d450ab381de51bbb405da4507e19227
|
refs/heads/master
| 2023-08-25T02:13:28.165769
| 2023-08-19T12:32:32
| 2023-08-19T14:30:39
| 17,713,884
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,615
|
r
|
mtapspec.R
|
`mtapspec` <-
function(a, dt, klen=length(a), MTP=NULL)
{
##### multi-taper spectrum analysis
#### Mspec = mtapspec(a$y,a$dt, klen=4096, MTP=list(kind=2,nwin=5, npi=3,inorm=0) )
#####
if(missing(MTP))
{
kind=2;
nwin=5;
npi=3;
inorm=1;
}
else
{
kind=MTP$kind;
nwin=MTP$nwin;
npi=MTP$npi;
inorm=MTP$inorm ;
}
len = length(a)
if(len<2)
{
return(0)
}
if(missing(klen))
{
klen=2*next2(len)
}
if(klen<len)
{
klen = 2*next2(len)
}
numfreqs = 1+klen/2;
numfreqtap = numfreqs*nwin;
nyquist = 0.5/dt;
df = 2*nyquist/klen;
freq = df*seq(0,numfreqs-1)
spec1 = rep(0, length=klen )
dof = rep(0, length=klen )
Fvalues = rep(0, length=klen )
ReSpec= rep(0, length= numfreqtap)
ImSpec= rep(0, length=numfreqtap )
barf = .C("CALL_Mspec",PACKAGE = "RSEIS",
as.double(a),
as.integer(len),
as.integer(kind),
as.integer(nwin) ,
as.double(npi) ,
as.integer(inorm) ,
as.double(dt) ,
as.double(spec1) ,
as.double(dof) ,
as.double(Fvalues) ,
as.integer(klen) ,
as.double(ReSpec) ,
as.double(ImSpec) )
Ispec= matrix(unlist(barf[13]), byrow=FALSE, nrow=numfreqs, ncol=nwin)
Rspec= matrix(unlist(barf[12]), byrow=FALSE, nrow=numfreqs, ncol=nwin)
invisible(list(dat=a, dt=dt, spec=unlist(barf[8]), dof=unlist(barf[9]),Fv=unlist(barf[10]),Rspec=Rspec, Ispec=Ispec, freq=freq, df=df, numfreqs=numfreqs, klen=klen, mtm=list(kind=kind, nwin=nwin, npi=npi, inorm=inorm)))
}
|
c5be2946bd152c910ac30ae420b933a32e0a10a9
|
8ebb7a4fc2583ad1bb04253b338c95f04be498ef
|
/man/swTFreeze.Rd
|
2e3ca4eaf84496f0905da98a8d3769d44665ca12
|
[] |
no_license
|
landsat/oce
|
8c9c3e27b9981e04c7cf1138a0aa4de8d2fc86b9
|
f6e0e6b43084568cd2c931593709a35ca246aa10
|
refs/heads/master
| 2020-12-31T07:33:10.029198
| 2014-08-10T10:05:43
| 2014-08-10T10:05:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 788
|
rd
|
swTFreeze.Rd
|
\name{swTFreeze}
\alias{swTFreeze}
\title{Seawater freezing temperature}
\description{Compute freezing temperature of seawater.}
\usage{swTFreeze(salinity, pressure=NULL)}
\arguments{
\item{salinity}{either salinity [PSU] or a \code{ctd} object from which
salinity will be inferred.}
\item{pressure}{seawater pressure [dbar]}
}
\details{In the first form, the argument is a \code{ctd} object, from
which the salinity and pressure values are extracted and used to for
the calculation.}
\value{Temperature [\eqn{^\circ}{deg}C]}
\examples{
Tf <- swTFreeze(40, 500) # -2.588567 degC
}
\references{UNESCO tech. papers in the marine science no. 28. 1978
eighth report JPOTS Annex 6 freezing point of seawater F.J. Millero
pp.29-35.}
\author{Dan Kelley}
\keyword{misc}
|
1605b76011ae66701fe602b6fd6c831947df1e4e
|
627bbc07d4557dfe5b49ba88b9bd253a6b47068e
|
/R/zzz.R
|
c4f1c3f6c79864d7f619217fd28f06275a7e609b
|
[] |
no_license
|
cran/coxrobust
|
e54c5a53e4a16dbf51a30f8ff6154320b593eadb
|
bd29efa9d2c3c990d3b2daddcad04ac1c25d4bd0
|
refs/heads/master
| 2022-05-17T04:41:27.999639
| 2022-04-06T13:02:33
| 2022-04-06T13:02:33
| 17,671,544
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 92
|
r
|
zzz.R
|
# .onUnload <- function(libpath) {
#
# library.dynam.unload("coxrobust", libpath)
#
# }
|
a8c6bae9251905d6d08ad6a1554ec0e28b4f712a
|
350f369998282044eeff0794540189c89ad8710c
|
/R/qle-package.R
|
def71029785e0ad0e6277fad37272a8c84c1d4d6
|
[] |
no_license
|
cran/qle
|
26b2edf6e372d4a966aa85754ba4c88377036290
|
857a96cfcf8dbbf116c944c23924f6cedb37abd8
|
refs/heads/master
| 2021-09-24T10:39:46.030022
| 2018-10-08T11:00:03
| 2018-10-08T11:00:03
| 110,973,979
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,754
|
r
|
qle-package.R
|
# Copyright (C) 2017 Markus Baaske. All Rights Reserved.
# This code is published under the GPL (>=3).
#
# File: qle-package.R
# Date: 27/10/2017
# Author: Markus Baaske
#
# General description of the package and data sets
#' Simulation-Based Quasi-Likelihood Estimation
#'
#' We provide a method for parameter estimation of parametric statistical models which can be at least
#' simulated and where standard methods, such as maximum likelihood, least squares or Bayesian
#' algorithms (including MCMC) are not applicable. We follow the \emph{quasi-likelihood} theory [3]
#' to estimate the unknown model parameter by finding a root of the so-called \dfn{quasi-score} estimating
#' function. For an overview of our method and further in-depth examples please see the vignette.
#'
#' The basic idea is to transform the general parameter estimation problem into a global (black box) optimization problem
#' (see [1]) with an expensive to evaluate objective function. This function can only be evaluated with substantial random
#' errors due to the Monte Carlo simulation approach of the statistical model and the interpolation error of the involved
#' approximating functions. The algorithm sequentially selects new evaluation points (which are the model parameters) for
#' simulating the statistical model and aims on efficiently exploring the parameter space towards a root of the quasi-score
#' vector as an estimate of the unknown model parameter by some weighted distance space-filling selection criteria of randomly
#' generated candidate points.
#'
#' The main estimation process can be started by the function \code{\link{qle}} where other functions like, for example,
#' \code{\link{qscoring}} or \code{\link{searchMinimizer}} search for a root or a local and global minimizer (without sampling new
#' candidates) of some monitor function to control the estimation procedure.
#'
#' @docType package
#' @name qle-package
#'
#' @references
#' \enumerate{
#' \item Baaske, M., Ballani, F., v.d. Boogaart,K.G. (2014). A quasi-likelihood
#' approach to parameter estimation for simulatable statistical models.
#' \emph{Image Analysis & Stereology}, 33(2):107-119.
#' \item Chiles, J. P., Delfiner, P. (1999). Geostatistics: modelling spatial uncertainty.
#' \emph{J. Wiley & Sons}, New York.
#' \item Heyde, C. C. (1997). Quasi-likelihood and its applications: a general approach
#' to optimal parameter estimation. \emph{Springer}
#' \item Kleijnen, J. P. C. & Beers, W. C. M. v. (2004). Application-driven sequential designs for simulation experiments:
#' Kriging metamodelling. \emph{Journal of the Operational Research Society}, 55(8), 876-883
#' \item Mardia, K. V. (1996). Kriging and splines with derivative information. \emph{Biometrika}, 83, 207-221
#' \item McFadden, D. (1989). A Method of Simulated Moments for Estimation of Discrete Response
#' Models without Numerical Integration. \emph{Econometrica}, 57(5), 995-1026.
#' \item Regis R. G., Shoemaker C. A. (2007). A stochastic radial basis function method for the global
#' optimization of expensive functions. \emph{INFORMS Journal on Computing}, 19(4), 497-509.
#' \item Wackernagel, H. (2003). Multivariate geostatistics. \emph{Springer}, Berlin.
#' \item Zimmermann, D. L. (1989). Computationally efficient restricted maximum likelihood estimation
#' of generalized covariance functions. \emph{Math. Geol.}. 21, 655-672
#' \item Efron, B. and Tibshirani, R. J. (1993). An Introduction to the Bootstrap, Chapman & Hall, New York.
#' }
#'
#'
NULL
#' A normal model
#'
#' A statistical model of random numbers
#'
#' This is a pedagogic example of a simulated data set for quasi-likelihood estimation using
#' normally distributed random numbers. The model outcome is a vector of summary statistics, that is,
#' simply the median and mean average deviation of \code{n=10} random numbers, which is evaluated at the
#' model parameter \eqn{\theta=(\mu,\sigma)} with mean \eqn{\mu} and standard deviation \eqn{\sigma} as
#' the parameters of the normal distribution. We estimate the model parameter given a specific
#' "observation" of those summary statistics. Clearly, maximum likelihood estimation would be the
#' method of first choice if we had a real sample of observations. However, this example is used to demonstrate
#' the basic workflow of estimating the model parameter. We use this model as a standard example in the package
#' documentation.
#'
#' @docType data
#' @keywords datasets
#' @name qsd
#' @usage data(normal)
#' @format A list object named `\code{qsd}` of class \code{\link{QLmodel}} with additional elements
#' \itemize{
#' \item{simfn}{ simulation function }
#' \item{sim}{ simulation results at design points, class `\code{simQL}`}
#' \item{OPT}{ result from call to estimation function \code{qle}}
#' \item{QS}{ quasi-scoring iteration results after initial approximation}
#' }
#' @author M. Baaske
NULL
#' QLE estimation results of the normal model
#'
#' The results of estimating the parameters of the normal model by Quasi-likelihood.
#'
#' @docType data
#' @keywords datasets
#' @name OPT
#' @usage data(qleresult)
#' @format A list named `\code{OPT}` of class \code{\link{qle}}, see function \code{\link{qle}}
#' @author M. Baaske
NULL
#' QLE estimation results of M/M/1 queue
#'
#' The results of estimating the parameter of M/M/1 queue by Quasi-likelihood.
#'
#' @docType data
#' @keywords datasets
#' @name mm1q
#' @usage data(mm1q)
#' @format A list named `\code{mm1q}` with elements
#' \itemize{
#' \item{qsd}{ initial quasi-likelihood approximation model}
#' \item{OPT}{ the results of estimation by \code{\link{qle}}}
#' \item{Stest}{ score test results }
#' \item{OPTS}{ results from simulation study, see the vignette}
#' \item{Stest0}{ Score test after estimating the model parameter }
#' \item{tet0}{ original parameter value}
#' \item{obs0}{ generated observed statistics for simulation study}
#' }
#' @author M. Baaske
NULL
#' Matern cluster process data
#'
#' A data set of quasi-likelihood estimation results of estimating the parameters of a Matern cluster
#' point process model. In the vignette we apply our method to the `\code{redwood}` data set from the
#' package \code{spatstat}.
#'
#' @docType data
#' @keywords datasets
#' @name matclust
#' @usage data(matclust)
#' @format A list object named `\code{matclust}` which consists of
#' \itemize{
#' \item{qsd}{ initial quasi-likelihood approximation model}
#' \item{OPT}{ the results of estimation by \code{\link{qle}}}
#' \item{Stest}{ score test results }
#' }
#'
#' @author M. Baaske
NULL
|
421cd83d928008d49be90c3d8efccd065b9c0268
|
10b908437ccb5123218ee56191cd4bf42c6051df
|
/Geo_again/Astral_tree/1.Relabel_gene_trees_uniqueTaxid.R
|
8f723b9bdc73137f502a9e2f384ab193a1fb4358
|
[] |
no_license
|
AlexanderEsin/Scripts
|
da258f76c572b50da270c66fde3b81fdb514e561
|
b246b0074cd00f20e5a3bc31b309a73c676ff92b
|
refs/heads/master
| 2021-01-12T15:10:47.063659
| 2019-03-10T15:09:38
| 2019-03-10T15:09:38
| 69,351,736
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,751
|
r
|
1.Relabel_gene_trees_uniqueTaxid.R
|
#!/usr/local/bin/Rscript
library(ape)
library(RSQLite)
library(stringr)
direct <- "/Users/aesin/Desktop/Geo_again/Group_fastTree/"
in_tree_dir <- file.path(direct, "Final_trees")
out_tree_dir <- file.path(direct, "Final_trees_relab")
no_dup_tree_dir <- file.path(direct, "Final_trees_noDup")
database_path <- "/Users/aesin/Desktop/Geo_again/All_prot_db_new"
dir.create(out_tree_dir, showWarnings = FALSE)
dir.create(no_dup_tree_dir, showWarnings = FALSE)
input_tree_list <- dir(in_tree_dir, pattern = "*.txt", full.names = TRUE)
## Connect to database
conn <- dbConnect(RSQLite::SQLite(), database_path)
message("Calculating number of unique taxids in the dataset...")
total_taxids <- dbGetQuery(conn,'SELECT DISTINCT taxid from t1')
total_taxids <- total_taxids$taxid
num_total_taxids <- length(total_taxids)
## Number of taxa represented
all_taxids_in_trees <- list()
all_taxids_represented <- list()
done_counter <- 1
for (input_tree in input_tree_list) {
## Read in tree and get tips
tree_data <- read.tree(input_tree)
tree_tips <- tree_data$tip.label
## In some circumstances, there are multiple entries for a particular protID in the database
## This is because some genomes contain multiple plasmids each with an identical gene / protein at the same site
## We will filter those out downstream, for now - limit to just 1
## Query the Sqlite database using the tips as protID query
taxid_tbl <- dbSendQuery(conn, 'SELECT DISTINCT protID, taxid FROM t1 WHERE protID = :tips')
dbBind(taxid_tbl, param = list(tips = tree_tips))
taxid_df <- dbFetch(taxid_tbl)
dbClearResult(taxid_tbl)
## The db results is a DF of a single column
taxid_list <- taxid_df$taxid
if (length(taxid_list) != length(tree_tips)) {
stop(paste0("The number of taxids retrieved does not equal the number of tips input for tree: ", input_tree))
}
## Produce the new tree object with taxids as tip labels
tree_relab <- tree_data
tree_relab$tip.label <- taxid_list
## Define a new name for the new tree and write it out
tree_basename <- basename(input_tree)
group_number <- str_sub(tree_basename, start = 1, end = -13)
new_file_name <- paste0(group_number, "_FT_relab_tree.txt")
# write.tree(tree_relab, file = file.path(out_tree_dir, new_file_name))
## Add a list of taxids represented in trees
unique_taxids <- unique(taxid_list)
if (length(all_taxids_in_trees) != num_total_taxids) {
all_taxids_in_trees <- c(all_taxids_in_trees, unique_taxids)
all_taxids_in_trees <- unique(all_taxids_in_trees)
}
## Check whether all the taxids are unique in tree (i.e. no paralogs at all)
num_unique_taxids <- length(unique(taxid_list))
if (length(taxid_list) == num_unique_taxids && length(tree_tips) >= 50) {
# write.tree(tree_relab, file = file.path(no_dup_tree_dir, new_file_name))
all_taxids_represented <- c(all_taxids_represented, unique_taxids)
all_taxids_represented <- unique(all_taxids_represented)
}
message(paste0("Relabelled: ", done_counter, " // ", length(input_tree_list), "... Taxids in all trees: ", length(all_taxids_in_trees), " // ", num_total_taxids, "... Taxids in noDup trees: ", length(all_taxids_represented), " // ", num_total_taxids))
done_counter = done_counter + 1
}
missing_taxids <- setdiff(total_taxids, all_taxids_represented)
message(paste0("Taxids missing from the noDup tree set: ", paste(missing_taxids, collapse = " | ")))
missing_taxid_data <- dbGetQuery(conn, 'SELECT acc_ass, binomial FROM t1 WHERE taxid = :taxids LIMIT 1', params = list(taxids = missing_taxids))
message(paste0("These correspond to: ", paste(missing_taxid_data$acc_ass, collapse = " | ")))
message(paste0("These correspond to: ", paste(missing_taxid_data$binomial, collapse = " | ")))
#dbDisconnect(conn)
|
e4b7911b9581cdba9163ba0f746d47983e18f46a
|
846eb90003c329750ca6078a7d4941cd87e578cc
|
/Section 2/Section2.4/Video24.R
|
4918920382decc9a286680b216b9c05a961496fd
|
[] |
no_license
|
PacktPublishing/Learning-Data-Analysis-with-R-Video-
|
62685d9a9f9116184afb0791e243f6f8443bbf82
|
151713640dcdc4887f8e867064f73745749c49fa
|
refs/heads/master
| 2021-06-27T06:03:25.744205
| 2021-01-19T13:09:03
| 2021-01-19T13:09:03
| 187,592,737
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,165
|
r
|
Video24.R
|
#Volume 1
#Section 2
#Video 4
#Author: Dr. Fabio Veronesi
#Load the required packages
library(sp)
library(raster)
#For this video we are going to use the data.frame we created
#in video 1.1
#Setting the working directory
setwd("E:/OneDrive/Packt - Data Analysis/Data")
#Set the URL with the CSV Files
URL <- "http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_day.csv"
#Loading CSV Files
Data <- read.table(file=URL,
sep=",",
header=TRUE,
na.string="")
Data$latitude[1]
Data$longitude[1]
#Transformation into a spatial object
coordinates(Data)=~longitude+latitude
#Assign projection
projection(Data)=CRS("+init=epsg:4326")
#This is a list of common projections
#from: http://spatialreference.org/
#CRS("+init=epsg:3857") -> wgs84/OSM
#CRS("+init=epsg:4326") -> Unprojected WGS84 for Google Maps
#CRS("+init=epsg:3395") -> wgs84/World Mercator
#Alternatively projections can be assigned using data from
#another spatial object.
NatEarth <- shapefile("Shapefile/ne_110m_admin_0_countries.shp")
projection(Data)=projection(NatEarth)
|
7784c6f5c6c8fed1618ea6f6918ea7de5ae629b0
|
de92c076034b4ccf601aea725f226b427db3bbef
|
/codigos_en_r/arbol_con_c50.R
|
fbd4d7fdaebb28518bcac45c22defe59c5a224e0
|
[
"Apache-2.0"
] |
permissive
|
armandovl/estadistica_multivariante_r
|
7b29d935c8ddace14133e3b8cc983c2727c934ec
|
9eec99d819d21b358ccc1a9c01ab8af084160c8c
|
refs/heads/main
| 2023-03-04T18:50:49.680927
| 2021-02-11T04:04:22
| 2021-02-11T04:04:22
| 335,741,063
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,503
|
r
|
arbol_con_c50.R
|
#..................Traer e inspeccionar los datos...................
Datos1<-read.csv("bases_de_datos/atitanic.csv") #traemos el dataframe
#..Traer los datos desde github
miURL="https://raw.githubusercontent.com/armandovl/estadistica_multivariante_r/main/bases_de_datos/atitanic.csv"
Datos1<-read.csv(url(miURL))
head(Datos1,10) #primeros 10 registros
str(Datos1) #estructura de los datos
#................. Tratamiento de los datos........................
Datos1$Survived=as.factor(Datos1$Survived) #Transformar caracter a factor
Datos1$Pclass=as.factor(Datos1$Pclass) #Transformar caracter a factor
Datos1$Sex=as.factor(Datos1$Sex) #Transformar caracter a factor
Datos1$Embarked=as.factor(Datos1$Embarked) #Transformar caracter a factor
#... Eliminando columnas
Datos1$PassengerId<-NULL #eliminar Id
Datos1$Name<-NULL #eliminar nombre
str(Datos1) #estructura de los datos
#................. Primer Arbol biblioteca C50..................
# .....Trabajando con biblioteca c50
#install.packages("C50")
library(C50)
modelo_c50<-C5.0(Datos1[,c(1:5)],Datos1$Survived)
plot(modelo_c50) # cex=1 no aplica font size
summary(modelo_c50)
#importancia del modelo
#install.packages("caret")
library(caret)
dt_importance <- varImp(modelo_c50)
print(dt_importance)
#................. Segundo Arbol biblioteca party..................
# .....Trabajando con biblioteca party
#install.packages("party")
library(party)
modelo_party<-ctree(Survived~.,Datos1)
plot(modelo_party) #se adapta sola la gráfica
|
1f6a1d05ac1b1339ffd26209d49e89bbaf9a2cdc
|
f8eb55c15aec611480ede47d4e15e5a6e472b4fa
|
/analysis/0037_bond_returns.R
|
e225b5ba419ed298676b943bb1910047f7e607ea
|
[] |
no_license
|
nmaggiulli/of-dollars-and-data
|
a4fa71d6a21ce5dc346f7558179080b8e459aaca
|
ae2501dfc0b72d292314c179c83d18d6d4a66ec3
|
refs/heads/master
| 2023-08-17T03:39:03.133003
| 2023-08-11T02:08:32
| 2023-08-11T02:08:32
| 77,659,168
| 397
| 32
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,468
|
r
|
0037_bond_returns.R
|
cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of_dollars_and_data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(ggplot2)
library(reshape2)
library(scales)
library(grid)
library(gridExtra)
library(gtable)
library(RColorBrewer)
library(stringr)
library(ggrepel)
library(lubridate)
library(tidyr)
library(dplyr)
folder_name <- "0037_bond_returns"
out_path <- paste0(exportdir, folder_name)
dir.create(file.path(paste0(out_path)), showWarnings = FALSE)
########################## Start Program Here ######################### #
# Load in Damodaran SP500 and Bond data
hist_bond_stock <- readRDS(paste0(localdir, "0021_historical_returns_sp500_bond_damodaran.Rds"))
# Load in the FRED CPI data
cpi <- readRDS(paste0(localdir, "0021_FRED_cpi.Rds"))
# Subset historical bond and stock returns and adjust for CPI using FRED data
hist_bond_stock <-hist_bond_stock %>%
left_join(cpi, by = c("Date" = "year")) %>%
mutate(ret_sp500 = ret_sp500 - rate_cpi,
ret_10yr_bond = ret_10yr_bond - rate_cpi,
decade = Date %/% 10 * 10) %>%
select(Date, decade, ret_10yr_bond, ret_sp500)
# Get the min and max year
min_year <- min(hist_bond_stock$Date)
max_year <- max(hist_bond_stock$Date)
# Get the average return for plotting
avg_ret <- mean(hist_bond_stock$ret_10yr_bond)
############################### First Returns Plot ###############################
# Set the file_path for the output
file_path = paste0(exportdir, "0037_bond_returns/bond-returns.jpeg")
to_plot <- hist_bond_stock
# Plot the returns to show how much they change over time
plot <- ggplot(data = to_plot, aes(x = Date, y = ret_10yr_bond)) +
geom_bar(stat = "identity", fill = "blue") +
ggtitle(paste0("U.S. 10 Year Bonds Averaged ", round(avg_ret,2)*100, "% Real Returns\n", min_year, " - ", max_year)) +
scale_y_continuous(labels = percent) +
of_dollars_and_data_theme +
labs(x = "Year" , y = "Annual Real Return (%)")
# Add a source and note string for the plots
source_string <- paste0("Source: http://www.stern.nyu.edu/~adamodar/pc/datasets/histretSP.xls (OfDollarsAndData.com)")
note_string <- paste0("Note: Adjusts for inflation using FRED CPI data.")
# Turn plot into a gtable for adding text grobs
my_gtable <- ggplot_gtable(ggplot_build(plot))
# Make the source and note text grobs
source_grob <- textGrob(source_string, x = (unit(0.5, "strwidth", source_string) + unit(0.2, "inches")), y = unit(0.1, "inches"),
gp =gpar(fontfamily = "my_font", fontsize = 8))
note_grob <- textGrob(note_string, x = (unit(0.5, "strwidth", note_string) + unit(0.2, "inches")), y = unit(0.15, "inches"),
gp =gpar(fontfamily = "my_font", fontsize = 8))
# Add the text grobs to the bototm of the gtable
my_gtable <- arrangeGrob(my_gtable, bottom = source_grob)
my_gtable <- arrangeGrob(my_gtable, bottom = note_grob)
# Save the gtable
ggsave(file_path, my_gtable, width = 15, height = 12, units = "cm")
############################### Second Returns Plot, by Decade ###############################
# Set the file_path for the output
file_path = paste0(exportdir, "0037_bond_returns/bond-stock-by-decade.jpeg")
to_plot <- hist_bond_stock %>%
select(decade, ret_10yr_bond) %>%
group_by(decade) %>%
summarise(count = n(),
`U.S. 10-Year Bond` = prod(1 + ret_10yr_bond)^(1/count) - 1) %>%
select(-count) %>%
gather(key=key, value=value, -decade)
# Plot the returns to show how much they change over time
plot <- ggplot(data = to_plot, aes(x = decade, y = value)) +
geom_bar(stat = "identity", position = "dodge", fill = "blue") +
ggtitle(paste0("Bonds Have Had Multiple Decades\nwith Negative Annualized Real Returns")) +
scale_fill_discrete(guide = FALSE) +
scale_color_discrete(guide = FALSE) +
scale_y_continuous(labels = percent) +
scale_x_continuous(breaks = seq(min(to_plot$decade), max(to_plot$decade), 10)) +
of_dollars_and_data_theme +
labs(x = "Decade" , y = "Annualized Real Return (%)")
# Add a source and note string for the plots
source_string <- paste0("Source: http://www.stern.nyu.edu/~adamodar/pc/datasets/histretSP.xls (OfDollarsAndData.com)")
note_string <- paste0("Note: Adjusts for inflation using FRED CPI data.")
# Turn plot into a gtable for adding text grobs
my_gtable <- ggplot_gtable(ggplot_build(plot))
# Make the source and note text grobs
source_grob <- textGrob(source_string, x = (unit(0.5, "strwidth", source_string) + unit(0.2, "inches")), y = unit(0.1, "inches"),
gp =gpar(fontfamily = "my_font", fontsize = 8))
note_grob <- textGrob(note_string, x = (unit(0.5, "strwidth", note_string) + unit(0.2, "inches")), y = unit(0.15, "inches"),
gp =gpar(fontfamily = "my_font", fontsize = 8))
# Add the text grobs to the bototm of the gtable
my_gtable <- arrangeGrob(my_gtable, bottom = source_grob)
my_gtable <- arrangeGrob(my_gtable, bottom = note_grob)
# Save the gtable
ggsave(file_path, my_gtable, width = 15, height = 12, units = "cm")
# ############################ End ################################## #
|
50837709eea39b53940d0d7843bd7ce32d2ba8f3
|
669fb662d125367d271b57070613801f6750178d
|
/R/AllGenerics.R
|
28967614215db4f41e033fa662ecd5e3d5ede3c7
|
[] |
no_license
|
petterbrodin/flowWorkspace
|
be72910302bcc1af2a94a6fd7f20f41bc874e851
|
f18e57a16c8389b66e999b45f80d62a29e6439e0
|
refs/heads/master
| 2021-01-17T21:33:01.093721
| 2012-09-26T22:32:44
| 2012-09-26T22:32:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,000
|
r
|
AllGenerics.R
|
setGeneric("openWorkspace", function(file){
standardGeneric("openWorkspace");
})
setGeneric("closeWorkspace",function(workspace){
standardGeneric("closeWorkspace")
})
setGeneric("parseWorkspace",function(obj,...){
standardGeneric("parseWorkspace")
})
setGeneric("getNodes",function(x,...){
standardGeneric("getNodes");
})
setGeneric("flowWorkspace2flowCore",function(obj,...){
standardGeneric("flowWorkspace2flowCore");
})
setGeneric("ellipsoidGate2FlowJoVertices",function(gate,...){
standardGeneric("ellipsoidGate2FlowJoVertices");
})
setGeneric("haveSameGatingHierarchy",function(object1,object2){
standardGeneric("haveSameGatingHierarchy");
})
setGeneric("addGate",function(obj,gate,parent,...){
standardGeneric("addGate");
})
setGeneric("getNcdf",function(obj){
standardGeneric("getNcdf")
})
setGeneric("ncFlowSet", function(x) standardGeneric("ncFlowSet"))
setGeneric("ncFlowSet<-", function(x,value) standardGeneric("ncFlowSet<-"))
setGeneric("getIndiceFile",function(obj){
standardGeneric("getIndiceFile")
})
setGeneric("execute",function(hierarchy,...){
standardGeneric("execute")
})
setGeneric("plotGate",function(x,y,...){
standardGeneric("plotGate")
})
# setGeneric("plotWf",function(x,...){
# standardGeneric("plotWf");
# })
setGeneric("getPopStats",function(x,...){
standardGeneric("getPopStats");
})
setGeneric("plotPopCV",function(x,...){
standardGeneric("plotPopCV");
})
setGeneric("getData",function(obj,...){
standardGeneric("getData")
})
setGeneric("getGate",function(obj,y,...){
standardGeneric("getGate");
})
setGeneric("getParent",function(obj,y,...){
standardGeneric("getParent")
})
setGeneric("getAxisLabels",function(obj,y,...){
standardGeneric("getAxisLabels")
})
setGeneric("getBoundaries",function(obj,y,...){
standardGeneric("getBoundaries")
})
setGeneric("getDimensions",function(obj,y,...){
standardGeneric("getDimensions");
})
setGeneric("getChildren",function(obj,y,...){
standardGeneric("getChildren");
})
setGeneric("copyGatingHierarchyFromTo",function(a,b,...){
standardGeneric("copyGatingHierarchyFromTo");
})
setGeneric("writeIndice",function(obj,y,z,...){
standardGeneric("writeIndice");
})
setGeneric("getIndices",function(obj,y,...){
standardGeneric("getIndices");
})
setGeneric("getProp",function(x,y,...){
standardGeneric("getProp");
})
setGeneric("getTotal",function(x,y,...){
standardGeneric("getTotal");
})
setGeneric("getSamples",function(x,...){
standardGeneric("getSamples");
})
setGeneric("getSample",function(x,...){
standardGeneric("getSample");
})
setGeneric("getSampleGroups",function(x){
standardGeneric("getSampleGroups")
})
setGeneric("getCompensationMatrices",function(x){
standardGeneric("getCompensationMatrices")
})
setGeneric("getTransformations",function(x){
standardGeneric("getTransformations")
})
setGeneric("getKeywords",function(obj,y){
standardGeneric("getKeywords")
})
setGeneric("exportAsFlowJoXML", function(obj, ...){
standardGeneric("exportAsFlowJoXML")
})
|
6da8fe99c4dd34ea5c4bb323eb223fae4146c33e
|
78bcb722fda2bad52e146e4bb6aeb14a29bf7d77
|
/man/fixNamesForEMU.Rd
|
20d5c7d0545cd95e6f897d801e52228a92331107
|
[] |
no_license
|
richardbeare/ultRa
|
94b3b2d04afaa049017900fc2d91379a63fbc0c5
|
4ff4d3d5929fa58b3906548733837498f97294b9
|
refs/heads/master
| 2020-12-24T16:07:04.399327
| 2018-04-30T03:55:39
| 2018-04-30T03:55:39
| 28,159,290
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 672
|
rd
|
fixNamesForEMU.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exportSSFF.R
\name{fixNamesForEMU}
\alias{fixNamesForEMU}
\title{Create modified names for AAA files. Removes punctuation that offends tcltk}
\usage{
fixNamesForEMU(files)
}
\arguments{
\item{files}{the output of list.files}
}
\value{
list of modified names
}
\description{
Create modified names for AAA files. Removes punctuation that offends tcltk
}
\examples{
\dontrun{
f<-list.files(path="c:/Tabain/English_Ultrasound/", full.names=TRUE)
f1 <- gsub("_Track1", "", f)
f2 <- fixNamesForEMU(f1)
need.to.do <- basename(f) != basename(f2)
file.rename(from=f[need.to.do], to=f2[need.to.do])
}
}
|
57eecc267ced51f699f5cbf17156cfb79952886e
|
3ba50ff12a4bdfebdf136aa0a637b6fbfd2827a3
|
/DSR Lab/3/3a.R
|
c9439d183b67c0297a2833eba7625e6c220f1dfb
|
[] |
no_license
|
smaransrao/DSRlab
|
fb96b9c5f1694e49d4920f4a00bc99f221adc5ee
|
5d20584ba68668db4481e7674f5a76380e4eeae4
|
refs/heads/master
| 2021-07-10T20:26:04.929164
| 2020-12-06T10:03:32
| 2020-12-06T10:03:32
| 221,524,322
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 297
|
r
|
3a.R
|
v1 <- c(10, 1, 37, 5, 12)
v2 <- c(8, 3, 9, 6, 4)
v3 <- c(18, 9, 12, 4, 6)
v4 <- c(8, 27, 6, 32, 23)
v5 <- c(12, 13, 16, 9, 10)
x <- rbind(v1, v2, v3, v4, v5)
m <- matrix(x, nrow = 5, ncol = 5)
m
names <- c('Thistle', 'Vipers', 'Golden Rain', 'Yellow', 'Blackberry')
l <- list(x, names)
l
|
404666c668ca2f019a54e706007d6176692c4c1e
|
5bcc79d20267e222255f7133ccdfd589158fa5d7
|
/R/nullParaEst.R
|
03c1bac5b35ad37734af8739bc29485df33d2de1
|
[
"MIT"
] |
permissive
|
zhonghualiu/DACT
|
d32fed4428892cbd9cf9e81ad8b0f2c6ee3ce02b
|
fd518e727a3ea0e1e7294754807a6c8cdc7185df
|
refs/heads/master
| 2023-02-20T04:29:23.521419
| 2023-02-06T02:35:07
| 2023-02-06T02:35:07
| 296,997,733
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 974
|
r
|
nullParaEst.R
|
nullParaEst<-function (x,gamma=0.1)
{
# x is a vector of z-values
# gamma is a parameter, default is 0.1
# output the estimated mean and standard deviation
n = length(x)
t = c(1:1000)/200
gan = n^(-gamma)
that = 0
shat = 0
uhat = 0
epshat = 0
phiplus = rep(1,1000)
phiminus = rep(1,1000)
dphiplus = rep(1,1000)
dphiminus = rep(1,1000)
phi = rep(1,1000)
dphi = rep(1,1000)
for (i in 1:1000) {
s = t[i]
phiplus[i] = mean(cos(s*x))
phiminus[i] = mean(sin(s*x))
dphiplus[i] = -mean(x*sin(s*x))
dphiminus[i] = mean(x*cos(s*x))
phi[i] = sqrt(phiplus[i]^2 + phiminus[i]^2)
}
ind = min(c(1:1000)[(phi - gan) <= 0])
tt = t[ind]
a = phiplus[ind]
b = phiminus[ind]
da = dphiplus[ind]
db = dphiminus[ind]
c = phi[ind]
that = tt
shat = -(a*da + b*db)/(tt*c*c)
shat = sqrt(shat)
uhat = -(da*b - db*a)/(c*c)
epshat = 1 - c*exp((tt*shat)^2/2)
return(musigma=list(mu=uhat,s=shat))
}
|
88a0551b2c77d37d2aeabc68d6fb9737d16adeef
|
05b71bc93cd7b6f41ee19a1d6ded9a34bbaeeea2
|
/R/Modelling/0_data_management/sentiment_scaler.R
|
671a45e41ba15c80a1b0c003f7bb97eb07876432
|
[] |
no_license
|
Nicholas-Autio-Mitchell/master_thesis
|
697b0972bc6e56a1a7146da1e524e5904f79344c
|
326d7c2b30f2eed6f2a4e82edbb090bfa1c495bf
|
refs/heads/master
| 2023-07-07T02:22:14.353564
| 2023-06-26T09:20:16
| 2023-06-26T09:20:16
| 69,510,760
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,236
|
r
|
sentiment_scaler.R
|
###################### ====================================================== ######################
###################### Scale all sentiment results, maintaining dispersion ######################
###################### ====================================================== ######################
#' All sentiment scores are scaled within their relative subsets
#' The new scale is from -1 to +1
#' This is done using a linear transformation, with the factor for scaling
#' being the absolute maximum of that subet.
#' This ensures our mean and variance also scale linearly.
#' If we were to scale with a regular (normalisation) method, the mean would be shifted in a way
#' that could force it to change it's sign, which would alter our interpretation
#' of the results. A positive results means positive sentiment and vice-versa.
## Package required to calculate statistics for the SA scores of each search term
library(pastecs)
## ======================================= ##
## Aggregate the SA results for weekends ##
## ======================================= ##
## We use the 'data_dirty.rda' data collection
load("/Volumes/Mac\ OS\ Drive/Thesis/Source\ Code/R/Modelling/raw_data/data_dirty.rda")
## Use the upgraded (weighted) sentiment data
load("/Volumes/Mac\ OS\ Drive/Thesis/Source\ Code/R/Modelling/raw_data/sentiment_data_weighted.rda")
sent <- weighted_sentiment
## ## We need the following column indices
## 11, 12, 13, 14, 15, 16
## 22, 23, 24, 25, 26, 27
## 33, 34, 35, 36, 37, 38
## Find all the column indices of the Sentiment Analysis results in order to scale them
num_col_per_search_term <- 11
n_reps <- ceiling(length(sent)/num_col_per_search_term)
diffs <- c(11, rep(c(1, 1, 1, 1, 1, 6), n_reps))
myCols <- cumsum(diffs) %>% .[. <= 148] #121 = last columns of SA results
## Take subset of all sentiment data (we are avoiding the number of tweets etc.)
## NOTE: subsetting a data table makes a copy, that is changing the subset does not also change the original data
sub_sent <- subset(sent, select = myCols)
###################### ========================== ######################
###################### Scale the sentiment data ######################
###################### ========================== ######################
## We break the data into is groups (search terms) in order to scale them
## We also create a descriptive statistics table for each search term
## See file: "sentiment_data_stats.rda"
## Names of the models
mod_names <- c("Emolex", "Sentiment140", "SentiStrength", "Vader", "Vader_Afinn")
## Names of the search terms
searchTerms <- c("bull_market", "bear_market", "dow_jones", "dow_SPDR", "dow_wallstreet", "federal_reserve",
"financial_crisis", "goldman_sachs", "interest_rates", "market_volatility", "obama_economy",
"oil_prices", "stock_prices")
## Initialise lists to store stats
sent_stats <- sapply(searchTerms, function(x) NULL)
sent_minmax <- sapply(searchTerms, function(x) NULL)
## ============================ ##
## NOT RUN - some comparisons ##
## ============================ ##
## ## Compare the results that come as a results of NOT averaging the SentiStrength scores
## rowMeans(subset(bull_market, select = seq(1, 6)))
## ## Direct comparison - visual inspection
## data.table(x, sent$bull_market_avg)
## ## identical(x, sent$bull_market_avg)
## ## FALSE
##
## ## Compare different ways to find the mean of the rows
## x <- data.table(pos = sent$bull_market_Sentistrength_pos, neg = sent$bull_market_Sentistrength_neg)
## x$original <- bull_market$SentiStrength #original output from above
## x[, avg := apply(.SD, 1, mean), .SDcols = c("pos", "neg")] #by reference
## x$my_avg <- rowSums(x[, .(pos, neg)])/2 #subsetting with data.table but using rowSums
## x #have a look
## ========================================================= ##
## Calculate stats for each search term's SA model results ##
## ========================================================= ##
## Compute stats for all search terms and use them to scale all SA scores between +/- 1
## output is save(sent_scaled, file = "sentiment_data_scaled.rda") --> further below
## Create one data table that contains all the SA scores, ready to scale
SA_scores_non_scaled <- sapply(searchTerms, function(x) NULL)
## --------------------------------------------------------- ##
## Complete example for the first search term: bull_market ##
## --------------------------------------------------------- ##
## select just the bull market data (six columns, one for each SA model output)
bull_market <- c(names(sub_sent)[grep("^bull_market", names(sub_sent))]) %>%
subset(sub_sent, select = .)
## Create one value for SentiStrength
## This also removes the scaling issue, ranging [-1 to -5] and [+1 to +5] (nothing between -1 and +1)
bull_market$SentiStrength <- rowMeans(data.table(bull_market$bull_market_Sentistrength_pos, bull_market$bull_market_Sentistrength_neg))
## Alter names and remove original SentiStrength column (the separate pos and neg values)
bull_market <- subset(bull_market, select = c("bull_market_Emolex", "bull_market_Sentiment140",
"bull_market_Vader_Afinn", "bull_market_Vader", "SentiStrength")) %>%
setcolorder(., c(1, 2, 5, 4, 3))
## Apply nicer names (Keep plotting in mind for later!)
names(bull_market) <- mod_names
## Assign the non-scaled SA results to a list to keep for later use if necessary
SA_scores_non_scaled$bull_market <- bull_market
## compute the stats (including max/min for later use)
sent_stats$bull_market <- stat.desc(bull_market)
## ----------------------------- ##
## Repeat for all search terms ##
## ----------------------------- ##
## Bear Market
bear_market <- c(names(sub_sent)[grep("^bear_market", names(sub_sent))]) %>%
subset(sub_sent, select = .)
bear_market$SentiStrength <- rowMeans(data.table(bear_market$bear_market_Sentistrength_pos, bear_market$bear_market_Sentistrength_neg))
bear_market <- subset(bear_market, select = names(bear_market)[3:7]) %>%
setcolorder(., c(1, 2, 5, 4, 3))
names(bear_market) <- mod_names
SA_scores_non_scaled$bear_market <- bear_market
sent_stats$bear_market <- stat.desc(bear_market)
## Dow Jones
dow_jones <- c(names(sub_sent)[grep("^Dow_Jones", names(sub_sent))]) %>%
subset(sub_sent, select = .)
dow_jones$SentiStrength <- rowMeans(data.table(dow_jones$Dow_Jones_Sentistrength_pos, dow_jones$Dow_Jones_Sentistrength_neg))
dow_jones <- subset(dow_jones, select = names(dow_jones)[3:7]) %>%
setcolorder(., c(1, 2, 5, 4, 3))
names(dow_jones) <- mod_names
SA_scores_non_scaled$dow_jones <- dow_jones
sent_stats$dow_jones <- stat.desc(dow_jones)
## Dow SPDR
dow_SPDR <- c(names(sub_sent)[grep("^dow_SPDR", names(sub_sent))]) %>%
subset(sub_sent, select = .)
dow_SPDR$SentiStrength <- rowMeans(data.table(dow_SPDR$dow_SPDR_Sentistrength_pos, dow_SPDR$dow_SPDR_Sentistrength_neg))
dow_SPDR <- subset(dow_SPDR, select = names(dow_SPDR)[3:7]) %>%
setcolorder(., c(1, 2, 5, 4, 3))
names(dow_SPDR) <- mod_names
SA_scores_non_scaled$dow_SPDR <- dow_SPDR
sent_stats$dow_SPDR <- stat.desc(dow_SPDR)
## Dow Wallstreet
dow_wallstreet <- c(names(sub_sent)[grep("^dow_wallstreet", names(sub_sent))]) %>%
subset(sub_sent, select = .)
dow_wallstreet$SentiStrength <- rowMeans(data.table(dow_wallstreet$dow_wallstreet_Sentistrength_pos, dow_wallstreet$dow_wallstreet_Sentistrength_neg))
dow_wallstreet <- subset(dow_wallstreet, select = names(dow_wallstreet)[3:7]) %>%
setcolorder(., c(1, 2, 5, 4, 3))
names(dow_wallstreet) <- mod_names
SA_scores_non_scaled$dow_wallstreet <- dow_wallstreet
sent_stats$dow_wallstreet <- stat.desc(dow_wallstreet)
## Federal Reserve
federal_reserve <- c(names(sub_sent)[grep("^federal_reserve", names(sub_sent))]) %>%
subset(sub_sent, select = .)
federal_reserve$SentiStrength <- rowMeans(data.table(federal_reserve$federal_reserve_Sentistrength_pos, federal_reserve$federal_reserve_Sentistrength_neg))
federal_reserve <- subset(federal_reserve, select = names(federal_reserve)[3:7]) %>%
setcolorder(., c(1, 2, 5, 4, 3))
names(federal_reserve) <- mod_names
SA_scores_non_scaled$federal_reserve <- federal_reserve
sent_stats$federal_reserve <- stat.desc(federal_reserve)
## Financial Crisis
financial_crisis <- c(names(sub_sent)[grep("^financial_crisis", names(sub_sent))]) %>%
subset(sub_sent, select = .)
financial_crisis$SentiStrength <- rowMeans(data.table(financial_crisis$financial_crisis_Sentistrength_pos, financial_crisis$financial_crisis_Sentistrength_neg))
financial_crisis <- subset(financial_crisis, select = names(financial_crisis)[3:7]) %>%
setcolorder(., c(1, 2, 5, 4, 3))
names(financial_crisis) <- mod_names
SA_scores_non_scaled$financial_crisis <- financial_crisis
sent_stats$financial_crisis <- stat.desc(financial_crisis)
## goldman Sachs
goldman_sachs <- c(names(sub_sent)[grep("^goldman_sachs", names(sub_sent))]) %>%
subset(sub_sent, select = .)
goldman_sachs$SentiStrength <- rowMeans(data.table(goldman_sachs$goldman_sachs_Sentistrength_pos, goldman_sachs$goldman_sachs_Sentistrength_neg))
goldman_sachs <- subset(goldman_sachs, select = names(goldman_sachs)[3:7]) %>%
setcolorder(., c(1, 2, 5, 4, 3))
names(goldman_sachs) <- mod_names
SA_scores_non_scaled$goldman_sachs <- goldman_sachs
sent_stats$goldman_sachs <- stat.desc(goldman_sachs)
## Interest Rates
interest_rates <- c(names(sub_sent)[grep("^interest_rates", names(sub_sent))]) %>%
subset(sub_sent, select = .)
interest_rates$SentiStrength <- rowMeans(data.table(interest_rates$interest_rates_Sentistrength_pos, interest_rates$interest_rates_Sentistrength_neg))
interest_rates <- subset(interest_rates, select = names(interest_rates)[3:7]) %>%
setcolorder(., c(1, 2, 5, 4, 3))
names(interest_rates) <- mod_names
SA_scores_non_scaled$interest_rates <- interest_rates
sent_stats$interest_rates <- stat.desc(interest_rates)
## Market Volatility
market_volatility <- c(names(sub_sent)[grep("^market_volatility", names(sub_sent))]) %>%
subset(sub_sent, select = .)
market_volatility$SentiStrength <- rowMeans(data.table(market_volatility$market_volatility_Sentistrength_pos, market_volatility$market_volatility_Sentistrength_neg))
market_volatility <- subset(market_volatility, select = names(market_volatility)[3:7]) %>%
setcolorder(., c(1, 2, 5, 4, 3))
names(market_volatility) <- mod_names
SA_scores_non_scaled$market_volatility <- market_volatility
sent_stats$market_volatility <- stat.desc(market_volatility)
## Obama Economy
obama_economy <- c(names(sub_sent)[grep("^obama_economy", names(sub_sent))]) %>%
subset(sub_sent, select = .)
obama_economy$SentiStrength <- rowMeans(data.table(obama_economy$obama_economy_Sentistrength_pos, obama_economy$obama_economy_Sentistrength_neg))
obama_economy <- subset(obama_economy, select = names(obama_economy)[3:7]) %>%
setcolorder(., c(1, 2, 5, 4, 3))
names(obama_economy) <- mod_names
SA_scores_non_scaled$obama_economy <- obama_economy
sent_stats$obama_economy <- stat.desc(obama_economy)
## Oil Prices
oil_prices <- c(names(sub_sent)[grep("^oil_prices", names(sub_sent))]) %>%
subset(sub_sent, select = .)
oil_prices$SentiStrength <- rowMeans(data.table(oil_prices$oil_prices_Sentistrength_pos, oil_prices$oil_prices_Sentistrength_neg))
oil_prices <- subset(oil_prices, select = names(oil_prices)[3:7]) %>%
setcolorder(., c(1, 2, 5, 4, 3))
names(oil_prices) <- mod_names
SA_scores_non_scaled$oil_prices <- oil_prices
sent_stats$oil_prices <- stat.desc(oil_prices)
## Stock Prices
stock_prices <- c(names(sub_sent)[grep("^stock_prices", names(sub_sent))]) %>%
subset(sub_sent, select = .)
stock_prices$SentiStrength <- rowMeans(data.table(stock_prices$stock_prices_Sentistrength_pos, stock_prices$stock_prices_Sentistrength_neg))
stock_prices <- subset(stock_prices, select = names(stock_prices)[3:7]) %>%
setcolorder(., c(1, 2, 5, 4, 3))
names(stock_prices) <- mod_names
SA_scores_non_scaled$stock_prices <- stock_prices
sent_stats$stock_prices <- stat.desc(stock_prices)
## Group all min and max values together in one list of data tables
for(i in 1:length(sent_stats)){ # i is the search term
for(j in 1:length(sent_stats[[i]])){ # j is the SA model used
sent_minmax[[i]][[j]] <- data.table(min = sent_stats[[i]][[j]][[4]], max = sent_stats[[i]][[j]][[5]])}}
## Give them the names of the models
for(i in 1:13){names(sent_minmax[[i]]) <- mod_names}
## Group all min and max values together in one list of data tables
for(i in 1:length(sent_stats)){ # i is the search term
for(j in 1:length(sent_stats[[i]])){ # j is the SA model used
sent_minmax[[i]][[j]] <- max(abs(c(sent_stats[[i]][[j]][[4]], sent_stats[[i]][[j]][[5]])))
}
}
## ========================================================================== ##
## Scale all the sentiment results to take daily average in an unbiased way ##
## ========================================================================== ##
#' We use the min and max values obtained to scale the sentiment data, but we could
#' additionally recreate the stats info about all the models' SA results here after
#' they have been scaled for comparison.
#' Or to simply plot the actual results used for further analysis.
## ------------------------------------- ##
## Create a list of of scaling factors ##
## ------------------------------------- ##
model_list <- sapply(mod_names, function(x) NULL)
scaling_factors <- sapply(searchTerms, function(x) NULL)
for(i in 1:length(searchTerms)){scaling_factors[[i]] <- sapply(model_list, function(x) NULL)}
## Save all scaling factors in one list
for(i in 1:length(searchTerms)) {
for(j in 1:length(mod_names)) {
scaling_factors[[i]][[j]] <- max(abs(sent_minmax[[i]][[j]]))
}
}
## ------------------------------------ ##
## Save the stats and scaling factors ##
## ------------------------------------ ##
SA_stats_and_scaling_factors <- sapply(c("stats", "scaling_factors"), function(x) NULL)
SA_stats_and_scaling_factors$stats <- sent_stats
SA_stats_and_scaling_factors$scaling_factors <- scaling_factors
save(SA_stats_and_scaling_factors, file = "SA_stats_and_scaling_factors.rda")
## -------------------------------------------------------------------------------- ##
## Scale all the data then create one data table to combine with market data etc. ##
## -------------------------------------------------------------------------------- ##
## Create object to hold all the scaled SA scores
SA_scores_scaled <- sapply(searchTerms, function(x) NULL)
for(i in 1:length(SA_scores_scaled)){SA_scores_scaled[[i]] <- model_list}
## Scale the data
for(i in 1:length(SA_scores_non_scaled)) {
for(j in 1:length(SA_scores_non_scaled$dow_jones)) {
SA_scores_scaled[[i]][[j]] <- SA_scores_non_scaled[[i]][[j]] / scaling_factors[[i]][[j]]
}
}
## Check that the new maximums of the scaled data is indeed +/- 1
for(i in 1:length(SA_scores_scaled)) {
for(i in 1:length(SA_scores_scaled$bull_market)) {
print(max(abs(SA_scores_scaled[[i]][[j]])))
}
}
## Initialise object with first
sent_scaled <- as.data.table(sapply(SA_scores_scaled$bull_market, function(x) x))
names(sent_scaled) <- paste0(names(SA_scores_scaled)[1], ".", mod_names)
## Create one data table
for(i in 1:(length(SA_scores_scaled)-1)) {
this_subset <- as.data.table(sapply(SA_scores_scaled[[i+1]], function(x) x))
names(this_subset) <- paste0(names(SA_scores_scaled)[i+1], ".", mod_names)
sent_scaled <- cbind(sent_scaled, this_subset)
}
## Save the scaled sentiment data
save(sent_scaled, file = "sentiment_data_scaled.rda")
###################### ========================================== ######################
###################### Find daily averages for sentiment scores ######################
###################### ========================================== ######################
## Calculate the average for each day and append to sentiment data
sent$bull_market_avg <- rowMeans(subset(bull_market, select = seq(3, 7, 1))) #Leaves out SS_pos and SS_neg
## ====================================================================== ##
## for each search term: save daily average sentiment as a new variable ##
## ====================================================================== ##
## This finds the average over all SA models for each search term
## The output should be 971 observations ofor 13 variables (search terms)
## This can then be attached to 'data_dirty' for imputation and dummy variable creation
## As the scaled data is still all in one list, it must be converted to a data table
daily_avg_scores <- sapply(searchTerms, function(x) NULL)
daily_avg_scores <- data.table(to_remove = matrix(nrow = 971))[, as.vector(searchTerms) := as.data.table(0)]
for(i in 1:13){
daily_avg_scores[, searchTerms[i] := subset(as.data.table(sent_scaled), select = seq(5*i - 4, 5*i)) %>%
rowMeans(.)]
## daily_avg_scores[[i]] <- subset(as.data.table(sent_scaled), select = seq(5*i - 4, 5*i)) %>%
## rowMeans(.)
}
daily_avg_scores[, to_remove.V1 := NULL]
save(daily_avg_scores, file = "sentiment_data_daily_averages.rda")
## ================================================================= ##
## Append the daily averages to the entire data set for imputation ##
## ================================================================= ##
## Create names for the daily_avg_scores to make more sense in the aggregated table
names(daily_avg_scores) <- paste0(names(daily_avg_scores), ".averaged")
## Join all data and reorder to have average SA results with dates and dummy variables
data_to_impute <- cbind(data_dirty, as.data.table(daily_avg_scores))
setcolorder(data_to_impute, neworder = c(1, 2, 3, 4, seq(151, 163), seq(5, 150)))
save(data_to_impute, file = "data_to_impute.rda")
###################### ================== ######################
###################### Defunct function ######################
###################### ================== ######################
## These functions don't all do exactly what we need...
## A function to base SentiStrength data from -4 to +4, (not +/-1 to +/-5)
## This linear transformation doesn't change the variance
## THIS IS NOT USEFUL --> doesn't change the output of data once scaled to {-1:+1}
scaler1 <- function(x) {ifelse(x < 0, x+1, ifelse(x > 0, x-1, ifelse(x == 0, x, x)))}
## A function to normalise all SA data to -1 and +1
## This isn't used in the end
scaler2 <- function(input_data, new_min, new_max) {
## Define parameters for scaling
old_min <- min(input_data)
old_max <- max(input_data)
a <- (new_max - new_min)/(old_max - old_min)
b <- new_max - a * old_max
## Scale the input_data
output_data <- a * input_data + b
return(output_data)
}
## A scaler that keeps the relative dispersion between values
my_scaler <- function(x) {x / max(sqrt(x*x))}
## It is better to scale by a fixed factor, the maximum possible on scale, not max observation
## This is (+/-) 5 for SentiStrength
|
e94745582b2478cbaadddb7faf212dfa2512d1fc
|
c745a74ab42c02097d0132ab07702f76c2807924
|
/R代码/数据预处理/排序.R
|
4838955947aa6cda814652ccc41eeae84d38b344
|
[] |
no_license
|
Kaleid-fy/R-language-
|
13f7b8ced2f88b5a64f3762786c2cd11f84649ee
|
0e31f5775c994858972c2a639e1ca48af8335f60
|
refs/heads/master
| 2020-03-25T00:09:51.012230
| 2018-08-16T15:25:24
| 2018-08-16T15:25:24
| 143,172,027
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 714
|
r
|
排序.R
|
### 排序
# R中涉及排序的基本函数有order、sort和rank三个。
# order函数返回的是排序数据所在向量中的索引,
# rank函数返回该值处于第几位(在统计学上称为秩),
# sort函数则返回的是按次排好的数据。
(x <- c(19,84,64,2))
order(x)
rank(x)
sort(x)
# 下面再看一个例子,来更加深入了解order的用法
d <- data.frame(x=c(19,84,64,2,2),
y=c(20,13,5,40,21))
d
# 按x的升序排序,如果x一样,则按y的升序排序
d[order(d$x,d$y),]
# 按x的升序排序,如果x一样,则按y的降序排序
d[order(d$x,-d$y),]
# 按y的升序排序,如果y一样,则按x的升序排序
d[order(d$y,d$x),]
|
9503a2c0c7235bee52b6a4dc38fb7b16eb57d721
|
4b10c2e443fcbec746cb8f5db8aedf0a0933a439
|
/man/TreeWalkerDiscrete.Rd
|
c75b815378c61f3c5e427f3b337931c30bb06098
|
[] |
no_license
|
laurasoul/dispeRse
|
81968d976ce9477f45584f62c9a7baa87bb42273
|
0f1316bc963fa8cea3ed3da0f7bb585e8acd7079
|
refs/heads/master
| 2021-06-05T09:02:45.991357
| 2021-05-24T21:15:14
| 2021-05-24T21:15:14
| 33,941,723
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,391
|
rd
|
TreeWalkerDiscrete.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TreeWalkerDiscrete.R
\name{TreeWalkerDiscrete}
\alias{TreeWalkerDiscrete}
\title{Generate random birth-death tree with associated coordinates}
\usage{
TreeWalkerDiscrete(
b = 0.1,
d = 0.05,
steps = 50,
slon = 0,
slat = 0,
steplengthsd = 100,
EarthRad = 6367.4447
)
}
\arguments{
\item{b}{per-lineage birth (speciation) rate}
\item{d}{per-lineage death (extinction) rate}
\item{steps}{number of time steps to use}
\item{slon}{starting longitude}
\item{slat}{starting latitude}
\item{steplengthsd}{standard deviation used for random walk draws}
\item{EarthRad}{Earth radius in kilometres.}
}
\value{
tree a phylogenetic tree
longitudes a matrix with rows corresponding to the tree edges and colunns to time step
latitudes a matrix with rows corresponding to the tree edges and colunns to time step
}
\description{
This function generates a birth-death tree in discrete time steps at the same time as recording the long lat of each brach at the end of each step
}
\details{
This function is based on the function sim.bdtree in geiger <http://cran.r-project.org/web/packages/geiger/geiger.pdf>.
}
\examples{
TreeWalkerDiscrete(b=0.1, d=0.05, steps=50, slon=0, slat=0, steplengthsd = 100)
}
\author{
Laura C. Soul \email{lauracsoul@gmail.com}
}
\keyword{discrete}
\keyword{random}
\keyword{walk}
|
6c2bbc649f291f777d38097fd421c6c830f74643
|
27edde77c68ce3cfd1149ea659d56658f5d83bec
|
/temp.R
|
46a046042411213ae7417d1be8674edd50ab9fc0
|
[] |
no_license
|
brophyj/book_v1
|
b8307f4067200f4a61fa5e910956693dd39f2166
|
b5367d8c0e0cfcd5151c6b8469b00a6b6cb8b87a
|
refs/heads/main
| 2023-03-24T06:18:53.954211
| 2021-03-03T02:42:10
| 2021-03-03T02:42:10
| 343,909,995
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,294
|
r
|
temp.R
|
library (BayesFactor)
data(sleep)
## Compute difference scores
diffScores = sleep$extra[1:10] - sleep$extra[11:20]
## Traditional two-tailed t test
t.test(diffScores)
dead <- c(938,1238)
alive <- c(18760, 26340)
prop.test(dead,alive)
bf = proportionBF(y = 15, N = 25, p = .5)
bf
mat <- matrix(c(50,48,21,41), nrow=2, byrow = TRUE)
mat
contingencyTableBF(mat, sampleType = "indepMulti", fixedMargin = "cols")
mat <- matrix(c(938,1238,18760,26340), nrow=2, byrow = TRUE,
dimnames = list(Outcome = c("Dead", "Alive"), Year = c("2005", "2008")))
mat
contingencyTableBF(mat, sampleType = "indepMulti", fixedMargin = "cols")
chisq.test(mat)
prop.test(mat[1,],mat[2,]+mat[1,])
min_bf <- function(z){
bf <- exp((-z^2)/2)
paste("Minimum Bayes Factor = ", round(bf,2), "so there is ", round(1/bf,2), "times more evidence supporting the alternative hypothesis of the observed data than for the null of no benefit")
}
min_bf(1.96)
post_prob <- function(prior,bf){
odds <- prior/(1-prior)
post_odds <- odds * bf
post_prob <- post_odds / (1+ post_odds)
paste("If Bayes Factor = ", round(bf,2), "and the prior probability = ", round(100*prior,2), "%, the posterior probability = ",round(100*post_prob,0), "%")
}
post_prob(.5,.15)
df <- data.frame (prior_prob=seq(0,.99, length.out = 99), post = seq(0,1, length.out = 99))
t <- rerun(4, df) %>%
map_df( ~ tibble(.), .id = "dist", x.x ="x") %>%
mutate(bf = ifelse(dist == "1", 1/5,
ifelse(dist == "2", 1/10,
ifelse(dist == "3", 1/20,
ifelse(dist == "4", 1/100, "NA"))))) %>%
mutate(bf=as.numeric(bf), prior_odds = prior_prob/(1-prior_prob)) %>%
mutate(post_odds = bf * prior_odds) %>%
mutate(post_prob = post_odds / (1+ post_odds))
ggplot(t, aes(prior_prob,post_prob,color = as.factor(bf))) +
geom_line() +
labs(x="Prior probability Ho true", y="Posterior probability Ho true") +
labs(color='Bayes factor') +
geom_hline(yintercept = 0.05, color="blue") +
annotate("text", label ="Blue horizontal line = posterior probability Ho = 0.05", x=0, y=.1, hjust=0) +
ggtitle("Posterior probability Ho is true", subtitle = "Varying levels of Bayes factors from weak (0.2) to strong (0.01))") +
theme_bw()
|
a97d05ac53b0aec679ed2b7797141f7f7b52bbfc
|
ef84851bd06ab41faa62190f6c8464809605cbb9
|
/functions/plot.novel.comms.R
|
173ff0619d3f087662d139743f47c2089dbc7cba
|
[] |
no_license
|
TimothyStaples/novelty-cenozoic-microplankton
|
f306c22161c7fdaf840c1662f67178a91c92748a
|
0a062c18a6e661d1d0a4af750186a9e42448470a
|
refs/heads/master
| 2022-12-23T11:58:01.238855
| 2020-09-15T23:36:37
| 2020-09-15T23:36:37
| 288,867,070
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,984
|
r
|
plot.novel.comms.R
|
plot.novel.comm <- function(site.sp.mat, alpha, metric, site, axis.label){
return.data <- identify.novel.gam(site.sp.mat, alpha, metric, site=site, plot=FALSE,
plot.data=TRUE)
min.p <- return.data[[3]]
seq.p <- return.data[[2]]
save.data <- return.data
return.data<-return.data[[1]]
ylims <- c(max(c(max(seq.p$upr, na.rm=TRUE), max(return.data$seq.dist, na.rm=TRUE))) * 1.1,
min(c(min(seq.p$lwr, na.rm=TRUE), min(return.data$seq.dist, na.rm=TRUE))) *0.9)
plot(return.data$seq.dist ~
as.numeric(as.character(return.data$bins)), type="n",
ylim=ylims,
axes=FALSE, xlab="", ylab="", yaxt="n")
axis(side=2, lwd=0.5)
lims <- par("usr")
polygon(x=c(as.numeric(as.character(return.data$bins)),
rev(as.numeric(as.character(return.data$bins)))),
y=c(seq.p$lwr, rev(seq.p$upr)),
col="grey75", border=NA)
lines(seq.p[,4] ~ as.numeric(as.character(return.data$bins)), col="grey15",
lty="dashed")
with(return.data,
lines(seq.dist ~ as.numeric(as.character(bins)), lwd=1.5))
with(return.data[return.data$instant & !is.na(return.data$instant),],
points(seq.dist ~ as.numeric(as.character(bins)),
pch=21, bg="red"))
sapply(which(return.data$novel), function(x){
segments(x0 = as.numeric(as.character(return.data$bins))[x],
x1 = as.numeric(as.character(return.data$bins))[x],
y0 = return.data$seq.dist[x] + (0.05 * (par("usr")[3] - par("usr")[4])),
y1 = par("usr")[3], col="orange", lwd=2)
})
segments(x0=par("usr")[1], x1=par("usr")[1], y0=par("usr")[3], y1=par("usr")[4])
segments(x0=par("usr")[1], x1=par("usr")[2], y0=par("usr")[4], y1=par("usr")[4])
segments(x0=par("usr")[2], x1=par("usr")[2], y0=par("usr")[3], y1=par("usr")[4])
mtext(side=2, text = "Instantaneous\ndissimilarity", line=2)
ylims <- c(min(c(min(min.p$lwr, na.rm=TRUE), min(return.data$raw.min.dist, na.rm=TRUE))) *0.9,
max(c(max(min.p$upr, na.rm=TRUE), max(return.data$raw.min.dist, na.rm=TRUE))) * 1.1)
par(xpd=NA)
legend(x=relative.axis.point(0.5, "x"),
y=relative.axis.point(1.175, "y"),
legend=c("Instantaneous novelty", "Cumulative novelty", "Novel community"),
pch=21, pt.bg=c("red","skyblue", "orange"),
xjust=0.5, y.intersp=0, bty="n", x.intersp=0.75,
horiz=TRUE)
par(xpd=FALSE)
plot(y=return.data$raw.min.dist,
x=as.numeric(as.character(return.data$bins)), type="n",
ylim=ylims,
xlim=c(lims[1], lims[2]), xaxs="i",
axes=FALSE, ylab="", xlab="")
polygon(x=c(as.numeric(as.character(return.data$bins)),
rev(as.numeric(as.character(return.data$bins)))),
y=c(min.p$lwr, rev(min.p$upr)),
col="grey75", border=NA)
lines(min.p[,4] ~ as.numeric(as.character(return.data$bins)), col="grey15",
lty="dashed")
with(return.data,
lines(raw.min.dist ~ as.numeric(as.character(bins)), lwd=1.5))
with(return.data[return.data$cumul,],
points(raw.min.dist ~ as.numeric(as.character(bins)),
pch=21, bg="skyblue"))
par(xpd=NA)
sapply(which(return.data$novel), function(x){
segments(x0 = as.numeric(as.character(return.data$bins))[x],
x1 = as.numeric(as.character(return.data$bins))[x],
y0 = return.data$raw.min.dist[x] + (0.05 * (par("usr")[4] - par("usr")[3])),
y1 =par("usr")[4], col="orange", lwd=2)
points(x=as.numeric(as.character(return.data$bins))[x],
y=par("usr")[4], pch=21, bg="orange")
})
par(xpd=FALSE)
segments(x0=par("usr")[1], x1=par("usr")[1], y0=par("usr")[3], y1=par("usr")[4])
segments(x0=par("usr")[1], x1=par("usr")[2], y0=par("usr")[3], y1=par("usr")[3])
segments(x0=par("usr")[2], x1=par("usr")[2], y0=par("usr")[3], y1=par("usr")[4])
axis(side=1, mgp=c(3,0.2,0), lwd=0.5)
axis(side=1,
tcl=-0.125, labels=NA)
mtext(side=1, text = axis.label, line=1)
axis(side=2, lwd=0.5)
mtext(side=2, text = "Cumulative\ndissimilarity", line=2)
return(return.data)
}
|
3109cbf5d39896a4c741e1230ddb77e0c0e1d8c5
|
1bd01254e9226ec9777a91b29df09ec70b4824e6
|
/scripts/F_ESC_Binomial_Beta_Hurdle_GAM.R
|
711e338e081df8da939381c95448bc48f7d620b5
|
[] |
no_license
|
RafaelSdeSouza/Beta_regression
|
b75148071e37faa4a73ed896f8058ca129b4c61e
|
66c8a5dad19464edba2fa8390bf7e0e729e15c5f
|
refs/heads/master
| 2020-05-22T00:03:37.197545
| 2019-03-22T01:51:32
| 2019-03-22T01:51:32
| 37,638,437
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,639
|
r
|
F_ESC_Binomial_Beta_Hurdle_GAM.R
|
rm(list=ls(all=TRUE))
library(mgcv);library(ggplot2);library(reshape2);library(ggthemes);library(MASS);library(hexbin);library(scales)
# Read data
Data=read.csv("..//data/FiBY.csv")
Data=subset(Data,redshift < 25)
##
## Log modulus transformation
L_M <-function(x){sign(x)*log10(abs(x) + 1)}
## fEsc is the variable of interest
## x is a vector of covariates
x = c("Mstar","Mvir","ssfr_stars","baryon_fraction","spin","QHI","C") # with variable names
#var.names <- c("M[star]/M[sun]","M[200]/M[sun]", "sSFR/Gyrs^-1","f[b]", "lambda","Q[HI]/s^-1","C")
var.names <- c("M['*']","M[200]", "sSFR","f[b]", "lambda","Q[HI]","C")
Data <- Data[,c("fEsc",x)]
##
## Log Transform each variable except C
Data$Mstar <- log10(Data$Mstar)
Data$Mvir <- log10(Data$Mvir)
Data$ssfr_stars <- L_M(Data$ssfr_stars)
Data$spin <- log10(Data$spin)
Data$QHI <- log10(Data$QHI)
Data$C <- log10(Data$C)
# Transform to zero everything below 1e-3
Data$fEsc[Data$fEsc < 10^-3] = 0
colnames(Data)[1] = "f_esc"
# Add 0/1 variable indicating f_esc = 0 vs f_esc > 0
Data$non.zero.f_esc <- ifelse(Data$f_esc> 0, 1, 0)
n = nrow(Data) # Number of Observations
n0 = sum(Data$f_esc == 0) # Number of zeros
p = length(var.names) # Number of covariates
###
###################################################################################################################################################
###################################################################################################################################################
###################################################################################################################################################
cutF <- function(x){cut(x,breaks=5)}
cutMat <- apply(Data[,2:8],2,cutF)
xcutMat <- melt(cutMat)[,3]
dc <- melt(Data[,2:8])
dc$cutMat <- xcutMat
dc$y <- Data[,1]
colnames(dc) <- c("var","value","cutMat","y")
levels(dc$var) <- var.names
####
#### Modelling using Hurdle Bionmial_Beta_GAM
#### Two stages
#### 1) Model Prob(f_esc>0) through Bionmial_GAM with logistic link
## Binomial_GAM
Binomial_GAM <- gam(non.zero.f_esc ~ s(Mstar,bs="cr",k=12) + s(Mvir,bs="cr",k=12) + s(ssfr_stars,bs="cr",k=12) + s(baryon_fraction,bs="cr",k=25) +
s(spin,bs="cr") + s(QHI,bs="cr",k=25) + s(C,bs="cr",k=20),
data=Data,family= binomial(link="logit"),method="REML")
summary(Binomial_GAM)
######################################################################################
##
##
gg <-list()
gg_x <- list()
gg_original <-list()
for(i in 1:p){
nn = 3*10^4;
R = matrix(apply(Data[,x],2,median),nrow=1);
R = R%x% rep(1,nn);colnames(R) = x;
R = as.data.frame(R)
a = quantile(Data[,x[i]],0.001); b= quantile(Data[,x[i]],0.999); I = Data[,x[i]] > a & Data[,x[i]] < b
R[,i] = seq(a,b,length=nn)
#
# Predict and Produce confidence intervals:
#
Preds_nzero <- predict(Binomial_GAM,newdata = R,type="link",se=T,unconditional=T)
fit.link <- Preds_nzero$fit
se <- Preds_nzero$se
CI.L <- fit.link-2*se
CI.R <- fit.link+2*se
CI <- cbind(fit.link,CI.L,CI.R)
CI <- exp(CI)/(1+exp(CI)) # The first column correponds to the estimated probability of being non-zero.
colnames(CI) <- c("Predictions","CI_L","CI_R")
##
gg_x[[i]] <- data.frame(cbind(CI,x=R[,i]),var = rep(var.names[i],nn))
gg_original[[i]] <- data.frame(x=Data[I,x[i]],y=Data[I,"non.zero.f_esc"],var = rep(var.names[i],sum(I)))
}
# put altogether for facets
ggg_x<-c()
for(i in 1:p){
ggg_x <- rbind(ggg_x,gg_x[[i]])
}
ggg_original <- c()
for(i in 1:p){
ggg_original <- rbind(ggg_original,gg_original[[i]])
}
#
#
# Plot via ggplot2
pdf("Binomial_GAM.pdf",width = 16,height = 8)
ggplot(ggg_x,aes(x=x,y=Predictions))+
# geom_boxplot(data=dc,mapping =aes(x=cutMat,y=y))+
# geom_hex(data=ggg_original,bins = 75,size=2,aes(x=x,y=y))+
geom_point(data=ggg_original,size=0.5,alpha=0.2,color="#D97C2B",aes(x=x,y=y),position = position_jitter(w = 0, h = 0.015))+
scale_fill_continuous(low = "white", high = "#D97C2B", trans = log10_trans())+
geom_ribbon(aes(ymin=CI_L, ymax=CI_R),fill = c("#3698BF"),alpha=0.75) +
geom_line(col="#D97C2B",size=0.75)+
theme_stata()+
ylab(expression(paste("Probability of ",~f[esc] > 0,sep="")))+
xlab("")+
theme(legend.background = element_rect(fill="white"),
legend.key = element_rect(fill = "white",color = "white"),
plot.background = element_rect(fill = "white"),
legend.position="none",
axis.title.y = element_text(vjust = 0.1,margin=margin(0,10,0,0)),
axis.title.x = element_text(vjust = -0.25),
text = element_text(size = 20,family="serif"))+
facet_wrap(~var,scales = "free_x",ncol=4,labeller = label_parsed,strip.position="bottom")
dev.off()
####
####
#
#######################################################################################
#######################################################################################
## The Second Stage
### 2) Model f_esc when f_esc > 0 throug Beta_GAM with logistic link
###
r <- 35
Beta_GAM <- gam(f_esc ~ s(Mstar,bs="cr",k=r) + s(Mvir,bs="cr",k=r) + s(ssfr_stars,bs="cr",k=r) + s(baryon_fraction,bs="cr",k=r) +
s(spin,bs="cr",k=r) + s(QHI,bs="cr",k=r) + s(C,bs="cr",k=r),
subset=f_esc>0,data=Data,family=betar(link="logit"),method="REML")
summary(Beta_GAM)
###########################
##
##
gg <-list()
gg_x <- list()
gg_original <-list()
for(i in 1:p){
nn = 3*10^4+1;
R = matrix(apply(Data[,x],2,median),nrow=1);
R = R%x% rep(1,nn);colnames(R) = x;
R = as.data.frame(R)
a = quantile(Data[,x[i]],0.001); b= quantile(Data[,x[i]],0.999); I = Data[,x[i]] > a & Data[,x[i]] < b
R[,i] = seq(a,b,length=nn)
#
# Predict and Produce confidence intervals:
#
Preds_fesc <- predict(Beta_GAM,newdata = R,type="link",se=T,unconditional=T)
fit.link <- Preds_fesc$fit
se <- Preds_fesc$se
CI.L <- fit.link-2*se
CI.R <- fit.link+2*se
CI <- cbind(fit.link,CI.L,CI.R)
CI <- exp(CI)/(1+exp(CI)) # The first column correponds to the estimated average of f_esc when f_esc > 0
colnames(CI) <- c("Predictions","CI_L","CI_R")
##
gg_x[[i]] <- data.frame(cbind(CI,x=R[,i]),var = rep(var.names[i],nn))
gg_original[[i]] <- data.frame(x=subset(Data,f_esc >0&I)[,x[i]],y=subset(Data,f_esc >0&I)[,"f_esc"],var = rep(var.names[i],sum(Data$f_esc>0&I)))
}
# put altogether for facets
ggg_x<-c()
for(i in 1:p){
ggg_x <- rbind(ggg_x,gg_x[[i]])
}
ggg_original <- c()
for(i in 1:p){
ggg_original <- rbind(ggg_original,gg_original[[i]])
}
# Plot via ggplot2
pdf("Beta_GAM.pdf",width = 16,height = 8)
ggplot(ggg_x,aes(x=x,y=Predictions))+
geom_hex(data=ggg_original,alpha=0.65,bins = 75,aes(x=x,y=y))+
scale_fill_continuous(low = "white", high = "#D97C2B", trans = log10_trans())+
geom_ribbon(aes(ymin=CI_L, ymax=CI_R),fill = c("#3698BF"),alpha=0.75) +
geom_line(col="#D97C2B",size=0.75)+
theme_stata()+
ylab(expression(paste("Average of ",~f[esc]," given that ", ~f[esc] > 0 ,sep="")))+
xlab("")+
scale_x_continuous(breaks = scales::pretty_breaks(n = 4)) +
theme(legend.background = element_rect(fill="white"),
legend.key = element_rect(fill = "white",color = "white"),
plot.background = element_rect(fill = "white"),
legend.position="none",
axis.title.y = element_text(vjust = 0.1,margin=margin(0,10,0,0)),
axis.title.x = element_text(vjust = -0.25),
text = element_text(size = 20,family="serif"))+
facet_wrap(~var,scales = "free_x",ncol=4,labeller = label_parsed,strip.position="bottom")
dev.off()
#######################################################################################
#######################################################################################
### Hurdle model:Putting things together
###
### Fitted values:
####
#### Produce hurdle plots, using delta method
gg <-list()
gg_x <- list()
gg_original <-list()
for(i in 1:p){
nn = 3*10^4+1;
R = matrix(apply(Data[,x],2,median),nrow=1);
R = R%x% rep(1,nn);colnames(R) = x;
R = as.data.frame(R)
a = quantile(Data[,x[i]],0.001); b= quantile(Data[,x[i]],0.999); I = Data[,x[i]] > a & Data[,x[i]] < b
R[,i] = seq(a,b,length=nn)
#
fit_Binomial = predict(Binomial_GAM,newdata=R,type="response",se=T,unconditional=T)
fit_Beta = predict(Beta_GAM,newdata=R,type="response",se=T,unconditional=T)
mu_Binomial = fit_Binomial$fit
mu_Beta = fit_Beta$fit
se_Binomial = fit_Binomial$se
se_Beta = fit_Beta$se
##
mu_Hurdle = mu_Binomial*mu_Beta
se_Hurdle = sqrt(se_Binomial^2*mu_Beta^2 + mu_Binomial^2*se_Beta^2 + se_Binomial^2*se_Beta^2)
##
phi <- Beta_GAM$family$getTheta(TRUE)
sd.y <- sqrt(mu_Binomial*(mu_Beta*(1-mu_Beta)/(1+phi) + (1-mu_Binomial)*mu_Beta^2))
#
CI.L <- mu_Hurdle-2*se_Hurdle
CI.R <- mu_Hurdle+2*se_Hurdle
CI <- cbind(mu_Hurdle,CI.L,CI.R,sd.y)
colnames(CI) <- c("Predictions","CI_L","CI_R","SD")
##
gg_x[[i]] <- data.frame(cbind(CI,x=R[,i]),var = rep(var.names[i],nn))
gg_original[[i]] <- data.frame(x=Data[I,x[i]],y=Data[I,"f_esc"],var = rep(var.names[i],sum(I)))
}
# put altogether for facets
ggg_x<-c()
for(i in 1:p){
ggg_x <- rbind(ggg_x,gg_x[[i]])
}
ggg_original <- c()
for(i in 1:p){
ggg_original <- rbind(ggg_original,gg_original[[i]])
}
# Plot via ggplot2
pdf("Hurdle_GAM.pdf",width = 16,height = 8)
ggplot(ggg_x,aes(x=x,y=Predictions))+
geom_hex(data=ggg_original,alpha=0.65,bins = 75,aes(x=x,y=y))+
geom_line(aes(x=x,y=SD),size=0.75,linetype="dashed")+
scale_fill_continuous(low = "white", high = "#D97C2B", trans = log10_trans())+
geom_ribbon(aes(ymin=CI_L, ymax=CI_R),fill = c("#3698BF"),alpha=0.75) +
geom_line(col="#D97C2B",size=0.75)+
theme_stata()+
ylab(expression(paste(~f[esc],sep="")))+
xlab("")+
theme(legend.background = element_rect(fill="white"),
legend.key = element_rect(fill = "white",color = "white"),
plot.background = element_rect(fill = "white"),
legend.position="none",
axis.title.y = element_text(vjust = 0.1,margin=margin(0,10,0,0)),
axis.title.x = element_text(vjust = -0.25),
text = element_text(size = 20,family="serif"))+
facet_wrap(~var,scales = "free_x",ncol=4,labeller = label_parsed,strip.position="bottom")
dev.off()
|
4beaf170279bf31e3ec1997d920640dc6987531a
|
36d73bd4ec51b24f9aa427003d41ace725c23a14
|
/man/scNMT.Rd
|
b987f78b23768b08ff872cf38e5da15b33a30267
|
[] |
no_license
|
drisso/SingleCellMultiModal
|
f613c4f7b7470f27ee25445160ecd798bdd5f89c
|
2685521119f5b162809da3f5f73dab01cb08a1de
|
refs/heads/master
| 2022-11-07T04:59:38.832585
| 2020-06-23T13:33:40
| 2020-06-23T13:33:40
| 279,685,426
| 1
| 0
| null | 2020-07-14T20:22:47
| 2020-07-14T20:22:46
| null |
UTF-8
|
R
| false
| true
| 2,523
|
rd
|
scNMT.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scNMT.R
\name{scNMT}
\alias{scNMT}
\title{Single-cell Nucleosome, Methylation and Transcription sequencing}
\source{
\url{http://ftp.ebi.ac.uk/pub/databases/scnmt_gastrulation/}
}
\usage{
scNMT(
dataType = "mouse_gastrulation",
modes = "*",
dry.run = TRUE,
verbose = TRUE,
...
)
}
\arguments{
\item{dataType}{character(1) Indicates study that produces this type of
data (default: 'mouse_gastrulation')}
\item{modes}{character() The assay types or modes of data to obtain these
include single cell Chromatin Accessibilty ("acc"), Methylation ("met"),
RNA-seq ("rna") by default.}
\item{dry.run}{logical(1) Whether to return the dataset names before actual
download (default TRUE)}
\item{verbose}{logical(1) Whether to show the dataset currently being
(down)loaded (default TRUE)}
\item{...}{Additional arguments passed on to the
\link[ExperimentHub]{ExperimentHub-class} constructor}
}
\value{
A single cell multi-modal \linkS4class{MultiAssayExperiment}
}
\description{
scNMT assembles data on-the-fly from `ExperimentHub` to
provide a \linkS4class{MultiAssayExperiment} container. The `dataType`
argument provides access to the `mouse_gastrulation` dataset as obtained
from Argelaguet et al. (2019). Pre-processing code can be seen at
\url{https://github.com/rarguelaguet/mouse_gastrulation}. Protocol
information for this dataset is available at Clark et al. (2018). See
the vignette for the full citation.
}
\details{
scNMT is a combination of RNA-seq (transcriptome) and an adaptation
of Nucleosome Occupancy and Methylation sequencing (NOMe-seq, the
methylome and chromatin accessibility) technologies. For more
information, see Reik et al. (2018) DOI: 10.1038/s41467-018-03149-4
\itemize{
\item{mouse_gastrulation:}
\itemize{
\item{rna} - RNA-seq
\item{acc_*} - chromatin accessibility
\item{met_*} - DNA methylation
\itemize{
\item{cgi} - CpG islands
\item{CTCF} - footprints of CTCF binding
\item{DHS} - DNase Hypersensitive Sites
\item{genebody} - gene bodies
\item{p300} - p300 binding sites
\item{promoter} - gene promoters
}
}
}
}
\examples{
scNMT(dataType = "mouse_gastrulation", modes = "*", dry.run = TRUE)
}
\references{
Argelaguet et al. (2019)
}
\seealso{
SingleCellMultiModal-package
}
|
9f439679ec331d415da9ecfe60e28c2bb1c07c40
|
87092bd3c5d1e8c864502f851085ec80bda39705
|
/PAMR.r
|
f9948057218d1199cf276a54f5d41ec5c66a6bd3
|
[] |
no_license
|
ngokchaoho/robust-median-mean-reversion
|
0c66c964883ecf2ec9f4736f1e267a07dd8feeee
|
5cf3fa4e28f1dbd36217441b71254cf7456ed8c0
|
refs/heads/master
| 2020-04-10T10:40:56.094305
| 2018-11-20T05:57:48
| 2018-11-20T05:57:48
| 160,973,104
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,772
|
r
|
PAMR.r
|
pamr_run <- function(fid, data, tc)
{
data_matrix=data
t = nrow(data_matrix)
m = ncol(data_matrix)
cum_ret = 1
daily_ret = NULL
cumpro_ret = NULL
e = 0.5
tc = 0
SumReturn = 1
day_weight = t(as.matrix(rep(1/m,times = m)))
day_weight_o = t(as.matrix(rep(0,times = m)))
daily_portfolio = as.vector(rep(NULL,times = m))
for (i in seq(from = 1, to = t))
{
data <- t(as.matrix(data_matrix[i,]))
if (i >= 2)
{
data1 <- t(as.matrix(data_matrix[i - 1,]))
day_weight2 <- day_weight - eta*(data1 - sum(data1)/m)
day_weight = simplex_projection(day_weight2,1)
}
day_weight <- day_weight/sum(day_weight)
if (i == 1)
{
daily_portfolio = day_weight
}
else
{
daily_portfolio = rbind(daily_portfolio,day_weight)
}
# daily_portfolio[i,] is the day_weight of the i-th period.
#data=# the closing prices of m assets in i th period From the dataset.
daily_ret = cbind(daily_ret,((data) %*% t(day_weight) %*% (1 - tc/2*sum(abs(day_weight - day_weight_o)))))#every element is the the return of every day.
cum_ret = cum_ret*daily_ret[i]
cumpro_ret = cbind(cumpro_ret,cum_ret)
day_weight_o = day_weight*data/daily_ret[i]
denominator = (data - 1/m*sum(data)) %*% t(data - 1/m*sum(data))# 1*30
if (denominator != 0)
eta = (daily_ret[i] - e)/denominator
eta = max(0,eta)
# eta
}
return(list(cum_ret,cumpro_ret,daily_ret))
}
simplex_projection <- function(v,b)
{
if (b < 0)
{print('error')}
v = (v > 0) * v
u = sort(v, decreasing = TRUE)
sv = cumsum(u)
rho = tail(which(u > (sv - b)/c(1:length(u))),n = 1)
#print(rho)
#print((sv[rho]-b)/rho)
theta = max(0,(sv[rho] - b)/rho)
#print("theta")
#print(theta)
temp = v - theta
temp[temp < 0] = 0
w = temp
return(w)
}
#install.packages('R.matlab')
library("R.matlab")
#install.packages("readxl")
#install.packages("stats")
#library(stats)
#library(readxl)
path <- ('Data')
#input
pathname <- file.path(path,'tse.mat')
data_1 <- as.vector(readMat(pathname))
#data_matrix <- read_excel(pathname, sheet = "P4", skip=4, col_names = FALSE)
#data_matrix <- data.matrix(data_matrix[,2:ncol(data_matrix)])
#data_matrix <- data_matrix[complete.cases(data_matrix),]
#data_matrix <- read.csv(pathname,sep=',',stringsAsFactors = FALSE,skip=3,header=TRUE)
#class(data_1)
#print(data_1)
data_matrix <- as.matrix(as.data.frame(data_1))
#class(data_matrix)
fid = "pamr.txt"
tc = 0
result = pamr_run(fid,data_matrix,tc)
write.csv(file = "pamr.csv",result)
source("ra_result_analyze.R")
ra_result_analyze(paste(pathname,"pamr.csv",sep = '_'),data_matrix,as.numeric(result[[1]]),as.numeric(result[[2]]),as.numeric(result[[3]]))
|
89358038aa9d1c0680038f894e6e65efc5e88614
|
1cbad6b517ea7555ccab4123e510f9f1050cfc9c
|
/naomi/R/utils.R
|
d81ff1df1e41f2c192933ebb104eb758db7f1661
|
[
"MIT"
] |
permissive
|
jeffeaton/naomi-model-paper
|
483139c7052b717f52a4f35a81821e5d2b8a297e
|
c7207417f79da3e7be2bcbb265a798520623e0ef
|
refs/heads/master
| 2023-06-15T19:14:32.322162
| 2021-07-16T21:12:21
| 2021-07-16T21:12:21
| 360,933,843
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,235
|
r
|
utils.R
|
naomi_write_csv <- function(...) {
write.csv(..., row.names = FALSE, na = "")
}
naomi_read_csv <- function(file, ..., col_types = readr::cols()) {
as.data.frame(csv_reader(file, TRUE)(file, ..., col_types = col_types))
}
readr_read_csv <- function(file, ..., col_types = readr::cols()) {
csv_reader(file, TRUE)(file, ..., col_types = col_types)
}
csv_reader <- function(file, readr = FALSE) {
header <- brio::readLines(file, 1)
if (!grepl(",", header) && grepl(";", header)) {
if (readr) readr::read_csv2 else utils::read.csv2
} else {
if (readr) readr::read_csv else utils::read.csv
}
}
system_file <- function(...) {
system.file(..., package = "naomi", mustWork = TRUE)
}
write_csv_string <- function(x, ..., row.names = FALSE) {
tmp <- tempfile()
on.exit(unlink(tmp))
write.csv(x, tmp, ..., row.names = row.names)
paste0(brio::readLines(tmp), collapse = "\n")
}
suppress_one_warning <- function(expr, regexp) {
withCallingHandlers(expr,
warning = function(w) {
if(grepl(regexp, w$message))
invokeRestart("muffleWarning")
})
}
`%||%` <- function(a, b) {
if (is.null(a)) b else a
}
naomi_translator_unregister <- function() {
traduire::translator_unregister()
}
|
0c3ffdb9ef72a45a5f4a065e45ce49d415811424
|
3dcc2b4999a6325d98c7537851b10cd44fe589a7
|
/Week2_RandomWalks.R
|
c1d64a8a8df02b6a2419d75b422d97c49c7a4593
|
[] |
no_license
|
robjohnnoble/MathProcFin_R_code
|
93d271ff4770a84cc6e582ffcd0124d299a04317
|
50fb16cec4b4762fd0ead77c45956251104a3e35
|
refs/heads/main
| 2023-02-25T01:08:34.196940
| 2021-02-01T13:07:04
| 2021-02-01T13:07:04
| 331,593,620
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,588
|
r
|
Week2_RandomWalks.R
|
# variance of random walk:
var_rw <- function(p, n, x, y) n * p * (1 - p) * (x - y)^2
# expectation of random walk:
exp_rw <- function(n, p, x, y) n * (p * x + (1 - p) * y)
# PMF of random walk:
rw_pmf <- function(p, x, y, w_n, n) {
k <- (w_n - n * y) / (x - y)
if(k != round(k)) return(NA)
if(choose(n, k) == 0) return(NA)
return(choose(n, k) * p^k * (1 - p)^(n - k))
}
# plot trajectories of a symmetric random walk (black)
# and an asymmetric random walk (red):
W1 <- sample(c(-1, 1), 100, replace = TRUE)
S1 <- cumsum(W1)
W2 <- sample(c(-1, 1), 100, replace = TRUE, prob = c(0.2, 0.8))
S2 <- cumsum(W2)
# pdf("RandomWalk.pdf", width = 4, height = 3)
par(mar = c(4,4,1,1))
plot(S1, type = "l", ylim = c(-20, 80), xlab = "Time", ylab = "Position")
lines(S2, col = "red")
# dev.off()
# plot five trajectories of a symmetric random walk:
W1 <- sample(c(-1, 1), 100, replace = TRUE)
S1 <- cumsum(W1)
# pdf("RandomWalk2.pdf", width = 4, height = 3)
par(mar = c(4,4,1,1))
plot(S1, type = "l", ylim = c(-20, 20), xlab = "Time", ylab = "Position")
cols <- c("red", "blue", "gold", "green3")
for(i in 1:4) {
W1 <- sample(c(-1, 1), 100, replace = TRUE)
S1 <- cumsum(W1)
lines(S1, col = cols[i])
}
# dev.off()
# plot 1,000 trajectories of a symmetric random walk:
W1 <- sample(c(-1, 1), 100, replace = TRUE)
S1 <- cumsum(W1)
# pdf("RandomWalkMany.pdf", width = 4, height = 3)
par(mar = c(4,4,1,1))
plot(S1, type = "l", ylim = c(-40, 40), xlab = "Time", ylab = "Position", col = rgb(0,0,0,0.02))
for(i in 1:999) {
W1 <- sample(c(-1, 1), 100, replace = TRUE)
S1 <- cumsum(W1)
lines(S1, col = rgb(0,0,0,0.02))
}
# dev.off()
# plot 1,000 trajectories of a symmetric random walk
# with curves showing predicted mean and standard deviation:
W1 <- sample(c(-1, 1), 100, replace = TRUE)
S1 <- cumsum(W1)
dist1 <- S1[100]
# pdf("RandomWalkManyWithStdev.pdf", width = 4, height = 3)
par(mar = c(4,4,1,1))
plot(S1, type = "l", ylim = c(-40, 40), xlab = "Time", ylab = "Position", col = rgb(0,0,0,0.02))
for(i in 1:999) {
W1 <- sample(c(-1, 1), 100, replace = TRUE)
S1 <- cumsum(W1)
lines(S1, col = rgb(0,0,0,0.02))
}
t <- 1:100
v <- 2 * sqrt(sapply(t, var_rw, p = 0.5, x = 1, y = -1))
lines(pmin(t, 0), col = "gold")
lines(v, col = "magenta")
lines(-v, col = "magenta")
# dev.off()
# plot 1,000 trajectories of an asymmetric random walk
# with curves showing predicted mean and standard deviation:
p1 <- 0.8
W1 <- sample(c(-1, 1), 100, replace = TRUE, prob = c(1 - p1, p1))
S1 <- cumsum(W1)
# pdf("RandomWalkManyWithStdevAsymmetric.pdf", width = 4, height = 3)
par(mar = c(4,4,1,1))
plot(S1, type = "l", ylim = c(-20, 100), xlab = "Time", ylab = "Position", col = rgb(0,0,0,0.02))
for(i in 1:999) {
W1 <- sample(c(-1, 1), 100, replace = TRUE, prob = c(1 - p1, p1))
S1 <- cumsum(W1)
lines(S1, col = rgb(0,0,0,0.02))
}
t <- 1:100
m <- sapply(t, exp_rw, p = p1, x = 1, y = -1)
v <- 2 * sqrt(sapply(t, var_rw, p = p1, x = 1, y = -1))
lines(m, col = "gold")
lines(m + v, col = "magenta")
lines(m - v, col = "magenta")
abline(h = 0, lty = 2)
# dev.off()
# plot 1,000 trajectories each of three asymmetric random walks
# with curves showing predicted means and standard deviations:
pdf("RandomWalkManyWithStdevAsymmetric2.pdf", width = 4, height = 3)
par(mar = c(4,4,1,3.2))
W1 <- sample(c(-1, 1), 100, replace = TRUE, prob = c(1 - p1, p1))
S1 <- cumsum(W1)
# plot(S1, type = "l", ylim = c(-20, 100), xlab = "Time", ylab = "Position", col = rgb(0,0,0,0.02))
for(p1 in c(0.6, 0.8, 0.95)) {
W1 <- sample(c(-1, 1), 100, replace = TRUE, prob = c(1 - p1, p1))
S1 <- cumsum(W1)
for(i in 1:1000) {
W1 <- sample(c(-1, 1), 100, replace = TRUE, prob = c(1 - p1, p1))
S1 <- cumsum(W1)
lines(S1, col = rgb(0,0,0,0.02))
}
}
t <- 1:100
for(p1 in c(0.6, 0.8, 0.95)) {
m <- sapply(t, exp_rw, p = p1, x = 1, y = -1)
v <- 2 * sqrt(sapply(t, var_rw, p = p1, x = 1, y = -1))
lines(m, col = "gold")
lines(m + v, col = "magenta")
lines(m - v, col = "magenta")
}
abline(h = 0, lty = 2)
mtext(" p = 0.6", 4, las = 2, at = 100 * (0.6 - (1 - 0.6)))
mtext(" p = 0.8", 4, las = 2, at = 100 * (0.8 - (1 - 0.8)))
mtext(" p = 0.95", 4, las = 2, at = 100 * (0.95 - (1 - 0.95)))
# dev.off()
# plot PMF of a random walk:
n <- 10
p <- 0.5
x <- 2
y <- -1
w_n_vec <- (n * y):(n * x)
# pdf("RandomWalk_PMF.pdf", width = 4, height = 3)
pmf_vec <- sapply(w_n_vec, rw_pmf, p = p, x = x, y = y, n = n)
plot(pmf_vec ~ w_n_vec,
xlab = expression(w[n]),
ylab = expression(paste("P(", W[n] ," = ", w[n], ")")),
ylim = c(0, max(pmf_vec, na.rm = TRUE)))
# dev.off()
|
df8ad0e60e8cc54b56be6639803af1f651f6f280
|
8516a1b12744c52a8775250fea9be7f2bf535f4b
|
/shiny_form_table/ui.R
|
171dccc300720096a3b66aea0885a94a161be421
|
[] |
no_license
|
Ronlee12355/ShinyTrials
|
5696482712d87fce6349bfbb5f8fc3915b32154c
|
c00691d7a1cdbdd0f608bbfe2c09dbe3878fe590
|
refs/heads/master
| 2021-03-22T22:29:47.263599
| 2020-04-12T06:04:46
| 2020-04-12T06:04:46
| 247,402,563
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,190
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
# header title
titlePanel('First try of shiny app, only for form elements'),
br(),
shinythemes::themeSelector(),
wellPanel(
dateInput('date', 'Date Choose: ', startview = 'month', language = 'zh-CN'),
sliderInput('num', 'Choose a number: ', min = 0, max = 100, value = 30),
radioButtons('gender', 'Gender', c('Male'='m', 'Female'='f','Transgender'='trans'),inline = T),
conditionalPanel("input.gender == 'f'",
radioButtons('gender1', 'Gender', c('Male'='m', 'Female'='f','Transgender'='trans'),inline = T)),
selectInput("variable", "Variable:",
c("Cylinders" = "cyl",
"Transmission" = "am",
"Gears" = "gear"), selected = 'am')
),
splitLayout(
textInput('name', 'Your name is: ', placeholder = 'Your name please'),
passwordInput('passwd', 'Your password is: ', placeholder = 'Your password please', width = '100%')
),
fileInput('file', 'Choose a file: ', accept = c(
"text/csv",
"text/comma-separated-values,text/plain",
".csv")
),
br(),
actionButton('submit', 'Submit Now'),
tableOutput('tableOut')
))
|
bb6a68c1d421ab701bd4a66b1d4d03bb5654b3b7
|
a7c370386ab2e6534985275107323a128b0e16fe
|
/man/define.versions.Rd
|
b4e645c7c5b9e9aa87670469632e6723e37d73f8
|
[
"MIT"
] |
permissive
|
sarkar-deep96/climateR
|
05f4a7c62f24266f03ea1a70d7ade01ebdba54cf
|
93332328c1bf6f875dc2e0d184f0fdf597501852
|
refs/heads/master
| 2023-07-03T08:12:00.709572
| 2021-08-03T14:54:05
| 2021-08-03T14:54:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 856
|
rd
|
define.versions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_define_versions.R
\name{define.versions}
\alias{define.versions}
\title{Define climateR versions}
\usage{
define.versions(
dates,
first.date = "1950-01-01",
sep.date = "2006-01-01",
scenario,
future.call,
historic.call,
timeRes = "daily"
)
}
\arguments{
\item{dates}{a vector of dates}
\item{first.date}{the first date in a dataset}
\item{sep.date}{the data in which seperates TDS catolouges}
\item{scenario}{the climate scenario}
\item{future.call}{the TDS future catologue}
\item{historic.call}{the TDS historic catologue}
\item{timeRes}{the time resolution of dataset (e.g. monthly or daily)}
}
\value{
data.frame
}
\description{
**INTERNAL** Define if data belongs to separate
(historic and future) archives based on a defiend seperation dat.
}
|
0e3e04354bb43118eb02cab10849f43c2d4fff7b
|
79c2ddfa41d2a18da3ac243d600e01944cafb175
|
/cachematrix.R
|
3209cb13b90697e439aebbf6c874fa37ed69dbd5
|
[] |
no_license
|
carojasq/ProgrammingAssignment2
|
946811dd1d836795092789af6fe3f873bdc70f7c
|
0de34e180292b3eb90395d41bbd717b561e26493
|
refs/heads/master
| 2020-05-01T01:16:03.889898
| 2014-12-21T01:30:02
| 2014-12-21T01:30:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 872
|
r
|
cachematrix.R
|
# This function build a special matrix to help with inverse matrix caching
makeCacheMatrix <- function(x=matrix()) {
inverse_matrix <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <-function(inverse) inverse_matrix <<- inverse
getinverse <- function() inverse_matrix
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# This function returns the inverse of a existing matrix, if the inverse is not cached then calculates and return
cacheSolve<- function(x, ...) {
inverse <- x$getinverse()
if (!is.null(inverse)){
message("Getting cached data")
return (inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
# Example code, uncomment to test
#x <- matrix(c(4,2,7,6), 2, 2)
#new_matrix <- makeCacheMatrix(x)
#print (cacheSolve(new_matrix))
|
e36c4a5abccfddef80ad6c3167263a8a00f3c8c2
|
ea524efd69aaa01a698112d4eb3ee4bf0db35988
|
/man/TeamcityReporter.Rd
|
f2b2b79de6243360dc0c6bd14fde65844205f782
|
[
"MIT"
] |
permissive
|
r-lib/testthat
|
92f317432e9e8097a5e5c21455f67563c923765f
|
29018e067f87b07805e55178f387d2a04ff8311f
|
refs/heads/main
| 2023-08-31T02:50:55.045661
| 2023-08-08T12:17:23
| 2023-08-08T12:17:23
| 295,311
| 452
| 217
|
NOASSERTION
| 2023-08-29T10:51:30
| 2009-09-02T12:51:44
|
R
|
UTF-8
|
R
| false
| true
| 890
|
rd
|
TeamcityReporter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reporter-teamcity.R
\name{TeamcityReporter}
\alias{TeamcityReporter}
\title{Test reporter: Teamcity format.}
\description{
This reporter will output results in the Teamcity message format.
For more information about Teamcity messages, see
http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity
}
\seealso{
Other reporters:
\code{\link{CheckReporter}},
\code{\link{DebugReporter}},
\code{\link{FailReporter}},
\code{\link{JunitReporter}},
\code{\link{ListReporter}},
\code{\link{LocationReporter}},
\code{\link{MinimalReporter}},
\code{\link{MultiReporter}},
\code{\link{ProgressReporter}},
\code{\link{RStudioReporter}},
\code{\link{Reporter}},
\code{\link{SilentReporter}},
\code{\link{StopReporter}},
\code{\link{SummaryReporter}},
\code{\link{TapReporter}}
}
\concept{reporters}
|
7fd73428d8407e4d087ea4981907262c003d4703
|
a33b1a6c61f80539343be9ac6aec5412f30cdc12
|
/20170620geologyGeometry/libraryC/orientationsUsingC.R
|
8e936fcecf5e0467fa5bdb0b867695a216065654
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
nicolasmroberts/nicolasmroberts.github.io
|
9a143c93859f2b3f133ade1acf54fb1ba1c966d3
|
f6e8a5a02eea031fb68c926d6d922846eeb71781
|
refs/heads/master
| 2022-09-08T22:03:26.646877
| 2022-07-27T20:50:50
| 2022-07-27T20:50:50
| 117,170,161
| 0
| 1
|
MIT
| 2018-03-04T23:16:00
| 2018-01-12T00:23:20
|
HTML
|
UTF-8
|
R
| false
| false
| 8,791
|
r
|
orientationsUsingC.R
|
# Copyright 2016 Joshua R. Davis
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
# In this file, 'rotation matrix' means 'special orthogonal 3x3 real matrix'. These functions complement and replace some functions in orientations.R, with versions written in C and called from R. These functions require compilation of the shared library orientationsForR.c. After that, you have to issue a command like this:
# dyn.load("orientationsForR.so")
### INFERENCE ###
# Helper function.
oricMCMCParsing <- function(raw, group, numReport) {
if (length(raw) == 4) {
# Burn-in failed. Just report the burn-in metadata.
list(nu=raw[[1]], nuRate=raw[[2]], gamma=exp(raw[[3]]), gammaRate=raw[[4]])
} else {
# raw consists of means-mean, means-covarInv, 101 percentiles, 8 meta-data, numReport means, and numReport etas.
mBar <- matrix(raw[1:9], 3, 3)
covarInv <- matrix(raw[10:18], 3, 3)
percs <- raw[19:119]
burnin <- list(nu=raw[[120]], nuRate=raw[[121]], gamma=exp(raw[[122]]), gammaRate=raw[[123]])
collection <- list(nu=raw[[124]], nuRate=raw[[125]], gamma=exp(raw[[126]]), gammaRate=raw[[127]])
ms <- list()
if (numReport <= 0)
kappas <- c()
else {
for (j in 0:(numReport - 1))
ms[[length(ms) + 1]] <- matrix(raw[(128 + j * 9):(128 + j * 9 + 8)], 3, 3)
kappas <- exp(-raw[(128 + numReport * 9):(128 + numReport * 9 + numReport - 1)])
}
# p-value function based on Mahalanobis distance.
f <- approxfun(x=percs, y=((0:100) / 1000 + 0.9), yleft=NA, yright=1)
pvalue <- function(r) {
vs <- lapply(group, function(g) rotLeftTangentFromMatrix(g %*% r, mBar))
ps <- sapply(vs, function(v) {1 - f(sqrt(v %*% covarInv %*% v))})
# If any of the ps are NA, then max will return NA.
max(ps)
}
list(pvalue=pvalue, ms=ms, kappas=kappas, mBar=mBar, leftCovarInv=covarInv,
q090=percs[[1]], q095=percs[[51]], q099=percs[[91]], q100=percs[[101]],
burnin=burnin, collection=collection)
}
}
#' MCMC of posterior distribution rho(S, eta | D) for wrapped trivariate normal distribution parameters.
#'
#' Implemented in C for speed. See Qiu et al. (2014). This function requires compilation of the C shared library orientationsForR.c.
#' @param rs A list of rotation matrices. The data set D.
#' @param group A list of rotation matrices. The symmetry group G.
#' @param numTerms A real number (non-negative integer). Controls how many terms are used in the asymptotic expansions in the Jeffreys prior for kappa.
#' @param numTuning A real number (non-negative integer). The tuning parameters are re-tuned every numTuning MCMC iterations, based on the acceptance rate since the last tuning.
#' @param numBurnin A real number (non-negative integer). The number of MCMC iterations in the burn-in phase.
#' @param numCollection A real number (non-negative integer). The number of MCMC iterations in the collection phase. Should not exceed 100,000,000, or else we might overflow 32-bit integers along the way.
#' @param numReport A real number (non-negative integer). The number of MCMC samples (M, kappa) to report. If 0, then none are reported. If numCollection (or greater), then all are reported.
#' @return A list with elements pvalue (R function from rotations to {NA} union [0, 0.1]), ms (the reported Ms), kappas (the reported kappas), mBar (rotation matrix, the mean of the collected Ms), leftCovarInv (the inverse covariance matrix of the collected Ms in the left-invariant tangent space at mBar).
oricWrappedTrivariateNormalMCMCInference <- function(rs, group, numTerms=10, numTuning=10000, numBurnin=100, numCollection=1000, numReport=10000) {
# Check that the inputs are of the correct types.
# !!nums are integer; rs are real
if (numTuning * numCollection < numReport) {
numReport <- numTuning * numCollection
print("warning: oricWrappedTrivariateNormalMCMCInference: clamping numReport to numTuning * numCollection")
}
# Flatten rs into an array of n * 9 numbers, by column-major order. Same with group.
flat <- c(simplify2array(rs))
flatGroup <- c(simplify2array(group))
# Get a huge vector of numbers from C, to be parsed in a moment.
raw <- .Call("mcmcOrientationWrappedTrivariateNormalC",
flat, flatGroup, numTerms, numTuning, numBurnin, numCollection, numReport)
oricMCMCParsing(raw, group, numReport)
}
#' Bootstrapping of the Frechet mean.
#'
#' Similar to oriBootstrapInference, but implemented in C for speed, and offers different percentiles of Mahalanobis norm. This function requires compilation of the C shared library orientationsForR.c.
#' @param rs A list of rotation matrices. The data set.
#' @param group A list of rotation matrices. The symmetry group G.
#' @param numBoots A real number (non-negative integer). The number of bootstrap samples. Affects the memory requirements of the function.
#' @return A list with elements pvalue (R function from rotations to {NA} union [0, 0.1]), bootstraps (the bootstrapped means), center (rotation matrix, the mean of the bootstrapped means), leftCovarInv (the inverse covariance matrix at rBar), q090, q095, q099, q100 (percentiles of Mahalanobis norm).
oricBootstrapInference <- function(rs, group, numBoots=10000) {
# Check that the inputs are of the correct types.
# !!numBoots is integer; rs are real
# Flatten rs into an array of n * 9 numbers, by column-major order. Same for group.
flat <- c(simplify2array(rs))
flatGroup <- c(simplify2array(group))
# Get a huge vector of numbers from C, to be parsed in a moment.
raw <- .Call("pvalueOrientationBootstrappingC", flat, flatGroup, numBoots)
# raw consists of means-mean, means-covarInv, 101 percentiles, numBoots means.
mBar <- matrix(raw[1:9], 3, 3)
covarInv <- matrix(raw[10:18], 3, 3)
percs <- raw[19:119]
ms <- list()
for (j in 0:(numBoots - 1))
ms[[length(ms) + 1]] <- matrix(raw[(120 + j * 9):(120 + j * 9 + 8)], 3, 3)
# p-value function based on Mahalanobis distance.
f <- approxfun(x=percs, y=((0:100) / 1000 + 0.9), yleft=NA, yright=1)
pvalue <- function(r) {
vs <- lapply(group, function(g) rotLeftTangentFromMatrix(g %*% r, mBar))
ps <- sapply(vs, function(v) {1 - f(sqrt(v %*% covarInv %*% v))})
# If any of the ps are NA, then max will return NA.
max(ps)
}
list(pvalue=pvalue, bootstraps=ms, center=mBar, leftCovarInv=covarInv,
q090=percs[[1]], q095=percs[[51]], q099=percs[[91]], q100=percs[[101]])
}
### PLOTTING ###
#' Equal-volume plot of orientations with an accompanying Kamb density level surface.
#'
#' This function requires compilation of the C shared library orientationsForR.c.
#' @param points A list of rotation matrices.
#' @param group A list of rotation matrices. The symmetry group G.
#' @param multiple A real number (positive). Indicates which multiple of the standard deviation to plot; for example, mult is 12 in a 12-sigma plot.
#' @param k A real number (positive). A smoothing factor, which equaled 3 in the original paper of Kamb (1959).
#' @param degree A real number (0, 1, or 3). The degree of the weighting polynomial; higher degrees generally produce smoother plots.
#' @param numNonAdapt A real number (non-negative integer). The number of non-adaptive refinements. Time and space required are proportional to 8^(numNonAdapt + numAdapt), so don't make it too big.
#' @param numAdapt A real number (non-negative integer). The number of adaptive refinements after the non-adaptive ones. Time and space required are proportional to 8^(numNonAdapt + numAdapt), so don't make it too big.
#' @param colors A list of strings (colors). Used to color the points only.
#' @param ... Plotting options: boundaryAlpha, simplePoints, etc. See rotPlotBall for details. Options about curves are ignored.
#' @return NULL.
oricKambPlot <- function(points, group, multiple=6, k=3, degree=3, numNonAdapt=3, numAdapt=3, colors=c("white"), ...) {
pointss <- Reduce(c, lapply(group, function(g) lapply(points, function(p) g %*% p)))
raws <- rotcKambTriangles(pointss, multiple, k, degree, numNonAdapt, numAdapt, length(group))
vs <- lapply(pointss, rotEqualVolumeFromMatrix)
colorss <- Reduce(c, replicate(length(group), colors))
rotNativeEqualVolumePlot(points=vs, colors=colorss, trianglesRaw=raws, ...)
}
|
aa51a2afa717fb9635012ee7e8bb4e75c44ef3f1
|
67c401741bb8e2518c66977b3293d6901259c2fc
|
/_archive/_archive/random_forests/rf_code_dmacs.R
|
15e89287afc3b3de0d4019f4064825e945d52397
|
[
"MIT"
] |
permissive
|
andrewcistola/healthy-neighborhoods
|
64e37462d39270a02b915c6a56a4abf9f9413136
|
08bd0cd9dcb81b083a003943cd6679ca12237a1e
|
refs/heads/master
| 2023-01-28T23:15:12.850704
| 2020-11-18T15:29:32
| 2020-11-18T15:29:32
| 192,378,489
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,206
|
r
|
rf_code_dmacs.R
|
## Code Prep
setwd("C:/Users/drewc/Documents/GitHub/healthy_neighborhoods")
library(dplyr)
library(randomForest)
library(MASS)
library(reshape)
rf = read.csv("rf/rf_data_dmacs.csv")
## Random Forest
rf$Tract <- NULL
rf = rf %>% mutate_if(is.factor, as.numeric)
of <- randomForest(
formula = Diabetes ~ .,
data = rf,
ntree = 1000,
importance=TRUE)
rank = importance(of)
write.csv(rank, "C:/Users/drewc/Documents/healthy_neighborhoods/rf/rf_results_rank.csv") # clean and transpose in excel
## Bind Variables to Prep Model
rank = read.csv("rf/rf_results_rank41.csv")
rf = read.csv("rf/rf_master_dmacs.csv")
bind = rbind.fill(rank, rf)
write.csv(bind, "C:/Users/drewc/Documents/healthy_neighborhoods/rf/rf_results_bind.csv") #remove NA and clean in excel
mod = read.csv("rf/rf_results_bind.csv")
frmla = as.formula(paste("Diabetes ~ ", paste(colnames(rank), collapse=" + "), sep = ""))
fit = lm(frmla, data=mod)
## Stepwise backwards Fit
back <- stepAIC(fit, direction="backward")
final <- data.frame(summary(back)$coefficients)
colnames(final) <- c("Estimate", "Std.Error", "t", "Pr.t")
finalcoef = final$Estimate
finalcoef = finalcoef[-1]
finalvars = rownames(final)
finalvars = finalvars[-1]
finalvars = c("With a Computer", "With Income from Earnings", "College Educated", "With a Disability", "85 Years and Over", "62 Years and Over", "Born in U.S.", "Not in Labor Force with Public Coverage", "Householder in Household", "Not in Labor Force", "Nonfamily Households", "English Only Households", "Households with Children", "Housing Value $50,000 to $99,999", "With Social Security", "Householder Living Alone", "Married Females", "Family Households", "Males Widowed", "65 and Over Households")
barplot(finalcoef, names.arg = finalvars, main = "Social Variables Assocaited with Diabetets Mortality", xlab = "Coefficient in Final Fit Model", col = "blue", las = 1, horiz = TRUE)
## Conduct Linear Regression on Variable of Choice and Health Outcome
model = lm(Percent..EDUCATIONAL.ATTAINMENT...Percent.bachelor.s.degree.or.higher ~ Diabetes, data = rf)
summary(model)
|
4a9cc42c50382edf3f963e2a4c36e19f6af698ca
|
0c1b525e3c773211a1158ed6ec71cd80c9a5caa3
|
/library/timetk/function/transform/normalize_vec.R
|
f35d4eeb3eaedb882339db1628d6204aea8d7529
|
[] |
no_license
|
jimyanau/r-timeseries
|
c0f6d55d6be43a2f066a3f948e23378da2cb70d2
|
04e3375bc5cb2fe200f6b259907ccdf6424871d7
|
refs/heads/master
| 2023-07-12T23:21:05.971574
| 2021-08-14T23:20:53
| 2021-08-14T23:20:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,376
|
r
|
normalize_vec.R
|
# ***************************************************************************************
# Library : timetk
# Function : normalize_vec
# Created on: 2021/8/14
# URL : https://business-science.github.io/timetk/reference/normalize_vec.html
# ***************************************************************************************
# <概要>
# - リスケール系列の作成(0-1変換)
# <構文>
# normalize_vec(x, min = NULL, max = NULL, silent = FALSE)
# <目次>
# 0 準備
# 1 ベクトル操作
# 2 データフレーム操作
# 0 準備 ---------------------------------------------------------------------
# ライブラリ
library(dplyr)
library(timetk)
# データ準備
d10_daily <- m4_daily %>% filter(id == "D10")
# 1 ベクトル操作 --------------------------------------------------------------
# 基準化
value_norm <- d10_daily$value %>% normalize_vec()
# 確認
d10_daily$value %>% ts.plot()
value_norm %>% ts.plot()
# 統計量
d10_daily$value %>% min()
d10_daily$value %>% max()
# リスケールの逆変換
value <-
value_norm %>%
normalize_inv_vec(min = 1781.6,
max = 2649.3)
# 2 データフレーム操作 ---------------------------------------------------------
# リスケール系列の追加
m4_daily %>%
group_by(id) %>%
mutate(value_std = standardize_vec(value))
|
9f8635c1676f9aa9fd6facfca69d6b31e6d68f3b
|
956033e3826cfdcefdf81725c1343e94d9c12a2c
|
/R/ACQRS_sub.R
|
66b3fb6ae444dfe9f5521565c3a600184d3c2e46
|
[] |
no_license
|
Allisterh/emssm
|
c38505a401c612289622b1607bfab1718f6bd752
|
2ae000e49c48a00e9dfa06f36bcb27b085b7fa2e
|
refs/heads/master
| 2023-08-14T04:28:40.562688
| 2021-10-04T14:25:16
| 2021-10-04T14:25:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,687
|
r
|
ACQRS_sub.R
|
#'
#' Estimate the state space model using subspace algorithm
#'
#' Subspace algorithm for the estimation of model
#' \deqn{x_{t+1} = Ax_{t} + w_{t}}
#' \deqn{y_{t} = Cx_{t} + v_{t}}
#' where \eqn{w_{t}} and \eqn{v_{t}} have zero mean and covariance matrices
#' \deqn{Var(w_{t})=Q, Var(v_{t})=R, Cov(w_{t},v_{t})=S}
#'
#' @param \bold{y} data
#' @param \bold{nx} number of rows of matrix A
#' @param \bold{i} auxiliar parameter for building the Hankel matrix. By default, i = nx+1
#' @export
#' @return
#' A, C, Q, R, S
#' @references
#' \emph{Subspace Identification for Linear Systems.}
#' \emph{Theory - Implementation - Applications.}
#' Peter Van Overschee / Bart De Moor.
#' Kluwer Academic Publishers, 1996
#'
ACQRS_sub <- function(y,nx,ny,i=nx+1){
# data as matrix and by rows
y = as.matrix(y)
if (nrow(y) != ny){
y = t(y)
}
nt = ncol(y)
# Hankel matrix
# --------------------------------------------------
# number of Hankel matrix columns
j <- nt - 2*i + 1
if (j < ny*2*i){
# rows(H) has to be > columns(H)
stop("Not enough data for building the Hankel matrix")
}
H <- hankel_yij(y/sqrt(j),2*i,j)
# LQ factorization
# --------------------------------------------------
q <- qr(t(H))
L <- t(qr.R(q))
L21 <- L[(ny*i+1):(2*ny*i),1:(ny*i)]
# singular values
# --------------------------------------------------
s <- svd(L21)
if (nx==1){
U1 <- matrix(s$u[,1],ncol=1) # as matrix, not vector
S1 <- s$d[1]
}
else{
U1 <- s$u[,1:nx]
S1 <- s$d[1:nx]
}
# Matrices gam and gam1
# --------------------------------------------------
if (nx==1){
gam <- U1 %*% sqrt(S1)
gam1 <- U1[1:(ny*(i-1)),] %*% sqrt(S1)
}
else{
gam <- U1 %*% diag(sqrt(S1))
gam1 <- U1[1:(ny*(i-1)),] %*% diag(sqrt(S1))
}
# and pseudo-inverses
gam_inv <- pinv(gam)
gam1_inv <- pinv(gam1)
# Determine the states Xi and Xi1
# --------------------------------------------------
Xi <- gam_inv %*% L21
Xi1 <- gam1_inv %*% L[(ny*(i+1)+1):(2*ny*i),1:(ny*(i+1))]
# Computing the state matrices A and C
# --------------------------------------------------
Rhs <- cbind(Xi, matrix(0,nx,ny)) # Right hand side
Lhs <- rbind(Xi1, L[(ny*i+1):(ny*(i+1)),1:(ny*(i+1))]) # Left hand side
# Least squares
sol <- Lhs %*% pinv(Rhs)
A <- sol[1:nx,1:nx]
C <- sol[(nx+1):(nx+ny),1:nx]
# Computing the covariance matrices Q, R and S
# -------------------------------------------------
# Residuals
res <- Lhs - sol %*% Rhs
cov <- res %*% t(res)
Q <- cov[1:nx,1:nx]
S <- cov[1:nx,(nx+1):(nx+ny)]
R <- cov[(nx+1):(nx+ny),(nx+1):(nx+ny)]
return( list(A=A,C=C,Q=Q,R=R,S=S) )
}
|
b5573a63cfb7c27b2b23d27b11a6d34c1c61a963
|
b6d80916052926fff06f988a6840335dec6cc997
|
/skyline_external_tool/AvG_skylineexternaltool/AvantGardeDIA_Help_GitHubRepo.R
|
a71eafb8ddab19466fd3b9dc7fd4dd6d55ff9db4
|
[
"BSD-3-Clause"
] |
permissive
|
SebVaca/Avant_garde
|
043df8165272b0becf823bd4d13338fc25a55652
|
2c2fc25789b2bef8524a867d97158d043244297c
|
refs/heads/master
| 2021-06-07T18:51:36.910796
| 2021-05-07T18:07:59
| 2021-05-07T18:07:59
| 167,582,571
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 52
|
r
|
AvantGardeDIA_Help_GitHubRepo.R
|
browseURL("https://github.com/SebVaca/Avant_garde")
|
6d6a3e1e42f5ed52709cdfd389511558eae7dfc8
|
b68ba79cfb162536c78644772b08440e6d32fd79
|
/plot3.R
|
dd8fdfde4c2a6488bb6c4643a8427f4904f9abb8
|
[] |
no_license
|
farabi1038/ExData_Plotting1
|
6d6201a5c9f40bed7cf07b459865f0889d33c500
|
9297e58083e77130d741777e28929736bcca5a78
|
refs/heads/master
| 2021-01-20T02:28:06.442521
| 2017-04-26T01:14:32
| 2017-04-26T01:14:32
| 89,408,803
| 0
| 0
| null | 2017-04-25T21:33:05
| 2017-04-25T21:33:05
| null |
UTF-8
|
R
| false
| false
| 722
|
r
|
plot3.R
|
File <- "/Users/FARABI/Desktop/fg.txt"
data <- read.table(File, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSet <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#str(subSetData)
datetime <- strptime(paste(subSet$Date, subSet$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
g <- as.numeric(subSet$Global_active_power)
sM1 <- as.numeric(subSet$Sub_metering_1)
sM2 <- as.numeric(subSet$Sub_metering_2)
sM3 <- as.numeric(subSet$Sub_metering_3)
plot(datetime, sM1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, sM2, type="l", col="red")
lines(datetime, sM3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
|
f993acb3af92cd4eb1bc35f35597ed06b911b67a
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/DiceOptim/R/goldsteinprice.R
|
8b637fe628761eac4245f8a3fe0d437a0c6ef43b
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 692
|
r
|
goldsteinprice.R
|
goldsteinprice <- function(x)
{
# Goldstein and Price test function (standardized version)
# --------------------------------------------------------
# Dimension: n = 2
# Number of local minima: 4
# The global minimum:
# x* = c(0.5, 0.25), f(x*) = -3.129172
# The local minima:
# x*,2 = c(0.35, 0.4), f(x*,2) = -2.180396
# x*,3 = c(0.95, 0.55), f(x*,3) = -1.756143
# x*,4 = c(0.8 , 0.7), f(x*,4) = -0.807367
m <- 8.6928
s <- 2.4269
x1 <- 4 * x[1] - 2
x2 <- 4 * x[2] - 2
a <- 1 + (x1+x2+1)^2 * (19 - 14*x1 + 3*x1^2 - 14*x2 + 6*x1*x2 + 3*x2^2)
b <- 30 + (2*x1 - 3*x2)^2 * (18 - 32*x1 + 12*x1^2 + 48*x2 - 36*x1*x2 + 27*x2^2)
f <- log(a*b)
f <- (f-m)/s
return(f)
}
|
b8b76e0ba914aebc01161f17a4e50591b09550af
|
62f1743aae6487e3e53b8f55c7e6fbf07d9abaa1
|
/plot1.R
|
51f484a15ed8faf2cfebeeb394eeeec38b656f76
|
[] |
no_license
|
cgerstner/ExData_Plotting1
|
082d44142ccdbca65dcc63e6b0d30138e2895f68
|
4fd5fc1794cbef0ec68023c68e8c9bc54181ae1a
|
refs/heads/master
| 2020-03-14T04:19:12.852952
| 2018-04-28T22:05:59
| 2018-04-28T22:05:59
| 131,439,518
| 0
| 0
| null | 2018-04-28T19:49:39
| 2018-04-28T19:49:39
| null |
UTF-8
|
R
| false
| false
| 654
|
r
|
plot1.R
|
zipFile <- "household_power_consumption.zip"
dataFile <- "household_power_consumption.txt"
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if (!file.exists(zipFile)) {
download.file(url, zipFile)
}
if (!file.exists(dataFile)) {
unzip(zipFile)
}
power <- read.csv(dataFile, sep = ";", na.strings = "?")
power$Date <- strptime(power$Date, "%d/%m/%Y")
consumption <- subset(power, Date >= "2007-02-01" & Date <= "2007-2-02")
png("plot1.png", width = 480, height = 480)
hist(consumption$Global_active_power, col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.off()
|
b48e170b372b073f525ccde5706e587bf9c814a7
|
9992af6db68a9d3a92844b83cf992210da05cc32
|
/CINormalizada.R
|
a3d3168c57ab5380329a02ab1f04625e2df340f3
|
[] |
no_license
|
cristinacambronero/CovarianzaR
|
bcaa06f763ef13220e1090995f2e989663ebf19a
|
e899f3234ca19ccff410d838e6ecff81d17bf5bf
|
refs/heads/master
| 2020-04-13T12:53:38.498218
| 2015-07-26T09:42:44
| 2015-07-26T09:42:44
| 39,720,639
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,031
|
r
|
CINormalizada.R
|
datos3<-CI21548Empresas
install.packages("quantmod")
library(quantmod)
***********************************************************
/* ELIMINAMOS LAS EMPRESAS QUE TENGAN MAS DE 200 NA */
***********************************************************
indices<-c(0)
for(i in 2:length(CI21548Empresas)){
w<-c(CI21548Empresas[,i])
x<-w[is.na(w)]
if(length(x)>200){
v<-c(i)
indices<-c(indices,v)
}
}
datos3<-datos3[,-indices[2:length(indices)]]
datos3<-na.omit(datos3)
**********************************************************************
/* CALCULO DE MEDIA Y DESVIACION TIPICA PARA RELLENAR VALORES NA */
**********************************************************************
for(i in 1:length(datos3)){
w<-c(datos3[,i])
x<-is.na(w)
x1<-which(x>0)
if(length(x1)>0){
w1<-na.omit(w)
mediaCI<-mean(w1)
sdCI<-sd(w1)
randomNum<-abs(rnorm(length(x1),mediaCI,sdCI))
datos3[x1,i]<-randomNum
}
}
write.table(datos3, 'CI_Empresas.txt', sep='\t', dec='.')
|
6beb400b462144879072d20c1222d9cb2baebc3b
|
d2c8e45888d5be7f4a6cbcc516b11827a2f16911
|
/man/calcul_p.Rd
|
975aa5fd32988fda944556337cacc4614a871d82
|
[] |
no_license
|
genostats/tail.modeling
|
73652274649cc73f47466809c6eb285ccf41236d
|
8248c2a3eb4416e3d48b094301f1c9a8cc58b1a5
|
refs/heads/master
| 2020-03-14T11:59:40.699431
| 2018-06-04T10:43:05
| 2018-06-04T10:43:05
| 131,601,540
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,382
|
rd
|
calcul_p.Rd
|
\name{calcul_p}
\alias{calcul_p}
\title{Estimation of the p-value with Pareto's function or Box-Cox function used on distribution's tail.
}
\description{ Estimates the p-value of a given data set zsim with the test statistics Zobs thanks to Pareto's method or Box-Cox method with their different estimated parameters.
}
\usage{
calcul_p(zsim,Ntail=500,estim=c("PWM","EMV"),Zobs,param,method=c("BC","GPD"),Nperm=length(zsim),draw=FALSE)
}
\arguments{
\item{zsim}{ Data set - list of real numbers
}
\item{Ntail}{ Length of the tail of the data set taken - integer
}
\item{estim}{ Method to estimate the parameters of Pareto's function - String: estim takes either a string that matches "PWM" ("P","PW",...) for the method of probability weighted moments or a string that matches "EMV" ("EM","V",...) for the method of maximum likelihood. Default value is "PWM".
}
\item{Zobs}{ Test statistic of the data set - real number
}
\item{param}{ Box-cox parameter lambda - real number: if param is missing, the parameter will be estimated with least squares; if it is a real number, this value will be used for lambda without performing any estimation
}
\item{method}{ Method chosen to estimate the p-value - String: either a string that matches "GDP" ("G", "GP", "PD",...) for Pareto's method or a string that matches "BC" ("BC", "B", ...) for Box-Cox's method. Default value is "BC".
}
\item{Nperm}{ Number of permutations of the original data set - integer. Default value is length(zsim)
}
\item{draw}{ If the linear regression of the Box-Plot method should be plotted or not - Boolean. Default value is FALSE.
}
}
\details{ Both methods are applied on the distribution's tail of the data set.
}
\value{ Returns a list composed of the estimated p-value and the parameter(s) of the selected method of estimation.
If the selected method is "BC", it returns a list composed of:
\item{Pbc_z }{The estimated p-value with Box-Cox function - real number}
\item{interc }{The intercept of the linear regression used to estimate the p-value - real number}
\item{pente }{The slope of the linear regression used to estimate the p-value - real number}
\item{lambda }{The estimated parameter lambda (or the lambda given if not estimated) - real number}
If the selected method is "GPD", it returns a list composed of:
\item{Pgpd}{The estimated p-value with Pareto's function - real number}
\item{k }{The estimated parameter k of Pareto's cumulative distribution function - real number}
\item{a }{The estimated parameter a of Pareto's cumulative distribution function - real number}
}
\references{Theo A. Knijnenburg, Lodewyk F. A. Wessels, Marcel J. T. Reinders
and Ilya Shmulevich, Fewer permutations, more accurate P-values, Bioinformatics.
}
\author{ Marion
}
\examples{
calcul_p(zsim=rnorm(1e6),Zobs=5,method="BC")
## The function is currently defined as
function(zsim,Ntail=500,estim=c("PWM","EMV"),Zobs,param,method = c("BC","GPD"),Nperm=length(zsim),draw=FALSE){
if(length(zsim) < Ntail)
stop("Ntail can't be larger than length(zsim)")
method <- match.arg(method)
if (method == "BC") {
# les Ntail plus grandes valeurs (les dernieres)
z1 <- tail( sort(zsim) , Ntail )
if (length(log(z1)[log(z1)<=0]) > 0) # eviter les negatifs
{
result <- PBC_Z(z1, Nperm, Ntail, param=1, Zobs, draw)
return(list(Pbc_z = result$p,
pente = result$pente,
interc = result$interc,
lbda = result$lbda))
}
else
{
result <- PBC_Z(z1, Nperm, Ntail, param, Zobs, draw)
return(list(Pbc_z = result$p,
pente = result$pente,
interc = result$interc,
lbda = result$lbda))
}
}
if (method =="GPD"){
# les Ntail + 1 plus grandes valeurs (les dernieres)
z1 <- tail( sort(zsim) , Ntail + 1 )
#seuil pour la GDP
t<-(z1[1] + z1[2])/2
#calcul des excedents de la GDP, ceux qui lui sont superieurs
z1<-z1[-1]
zgpd<-z1-t
zgpd<-zgpd[zgpd>0] #uniquement ceux superieurs au seuil
estim<-match.arg(estim)
result<-PGPD(Zobs, zgpd, Nperm, t, estim)
return(list(Pgpd = result$p,
a = result$a,
k = result$k))
}
}
}
|
115df55f6bab76f382e58e648248bf10bb6bf1f9
|
0ca11666bce33a12e0e33a972d53438d0dc3674c
|
/tests/testthat.R
|
ee23c4ed3eb69db311d9c7748698f779bb08e0b8
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
alixlahuec/syntaxr
|
8305ac297632eda8c42325f0363c517f1581d47d
|
252646cb70546f5f949bebec84482dff9f442cfa
|
refs/heads/master
| 2022-11-26T20:50:47.540924
| 2020-08-04T21:33:20
| 2020-08-04T21:33:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58
|
r
|
testthat.R
|
library(testthat)
library(syntaxr)
test_check("syntaxr")
|
ce3430497322d961e9e86dfd821c4a899e9d6fe0
|
0db9b9ad4b00a908d9ddba1f157d2d3bba0331c4
|
/tests/testthat/test-as_point.R
|
fc070cb4185b561e77b0b9a919f3ddf1ec125224
|
[
"MIT"
] |
permissive
|
elipousson/sfext
|
c4a19222cc2022579187fe164c27c78470a685bb
|
bbb274f8b7fe7cc19121796abd93cd939279e30a
|
refs/heads/main
| 2023-08-18T15:29:28.943329
| 2023-07-19T20:16:09
| 2023-07-19T20:16:09
| 507,698,197
| 16
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,192
|
r
|
test-as_point.R
|
test_that("as_point works", {
# Check numeric inputs
expect_true(is_point(as_point(c(0, 1))))
expect_true(is_point(as_points(c(0, 1), c(1, 0))))
expect_true(is_multipoint(as_points(c(0, 1), c(1, 0), to = "MULTIPOINT")))
# Check crs parameter
expect_true(is.na(sf::st_crs(as_points(c(0, 1), c(1, 0), to = "MULTIPOINT"))))
expect_true(!is.na(sf::st_crs(as_points(c(0, 1), c(1, 0), crs = 4326, to = "MULTIPOINT"))))
# Check sf inputs and outputs
nc <- sf::read_sf(system.file("shape/nc.shp", package = "sf"))
nc_crs <- sf::st_crs(nc)
expect_true(is_point(as_point(nc)))
expect_true(is_sfg(as_point(nc)))
expect_true(is_point(as_points(nc)))
expect_true(is_sfc(as_points(nc)))
nc_pt_1 <- as_points(nc[1, ])
nc_pt_2 <- as_points(nc[2, ])
expect_true(is_line(as_line(nc_pt_1, nc_pt_2)))
# FIXME: Should two points produce two lines with as_lines?
expect_true(is_line(as_lines(nc_pt_1, nc_pt_2, crs = nc_crs)))
# FIXME: If as_lines is provided with sfg and sfc objects it returns a difficult to interpret error
expect_true(is_line(as_lines(c(nc_pt_1, nc_pt_2), c(nc_pt_2, nc_pt_1), crs = nc_crs)))
expect_s3_class(as_centroid(as_bbox(nc)), "sfc")
})
|
401c690d6ecfef66595e3e1b89031eec6c356126
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rdrobust/examples/rdbwselect.Rd.R
|
24e2afb7543d339273493173256c5674712896e0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 293
|
r
|
rdbwselect.Rd.R
|
library(rdrobust)
### Name: rdbwselect
### Title: Bandwidth Selection Procedures for Local Polynomial Regression
### Discontinuity Estimators
### Aliases: rdbwselect print.rdbwselect summary.rdbwselect
### ** Examples
x<-runif(1000,-1,1)
y<-5+3*x+2*(x>=0)+rnorm(1000)
rdbwselect(y,x)
|
3f8f30fa47d9fab743a2b5b5a5f060e3af6cfa3e
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlesourcerepov1.auto/man/projects.repos.testIamPermissions.Rd
|
0d6c4a6f4fc8d2f53691712cb0ed43cdf95a1fa6
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,257
|
rd
|
projects.repos.testIamPermissions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sourcerepo_functions.R
\name{projects.repos.testIamPermissions}
\alias{projects.repos.testIamPermissions}
\title{Returns permissions that a caller has on the specified resource.If the resource does not exist, this will return an empty set ofpermissions, not a NOT_FOUND error.}
\usage{
projects.repos.testIamPermissions(TestIamPermissionsRequest, resource)
}
\arguments{
\item{TestIamPermissionsRequest}{The \link{TestIamPermissionsRequest} object to pass to this method}
\item{resource}{REQUIRED: The resource for which the policy detail is being requested}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/eap/cloud-repositories/cloud-sourcerepo-api}{Google Documentation}
Other TestIamPermissionsRequest functions: \code{\link{TestIamPermissionsRequest}}
}
|
f61e384e3842a882c1f9f2df5b1ae482e70af5ea
|
9132996d08213cdf27c8f6d444e3f5b2cfdcfc85
|
/R/add_cplex_solver.R
|
8da1b8f72e7b2c0c940db4af7664a5e14eceebde
|
[] |
no_license
|
prioritizr/prioritizr
|
152013e81c1ae4af60d6e326e2e849fb066d80ba
|
e9212a5fdfc90895a3638a12960e9ef8fba58cab
|
refs/heads/main
| 2023-08-08T19:17:55.037205
| 2023-08-08T01:42:42
| 2023-08-08T01:42:42
| 80,953,648
| 119
| 30
| null | 2023-08-22T01:51:19
| 2017-02-04T22:45:17
|
R
|
UTF-8
|
R
| false
| false
| 10,638
|
r
|
add_cplex_solver.R
|
#' @include Solver-class.R
NULL
#' Add a *CPLEX* solver
#'
#' Specify that the
#' [*IBM CPLEX*](https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer) software
#' should be used to solve a conservation planning problem (IBM 2017) .
#' This function can also be used to customize the behavior of the solver.
#' It requires the \pkg{cplexAPI} package to be installed
#' (see below for installation instructions).
#'
#' @inheritParams add_gurobi_solver
#'
#' @param presolve `logical` attempt to simplify the
#' problem before solving it? Defaults to `TRUE`.
#'
#' @details
#' [*IBM CPLEX*](https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer) is a
#' commercial optimization software. It is faster than
#' the available open source solvers (e.g., [add_lpsymphony_solver()] and
#' [add_rsymphony_solver()].
#' Although formal benchmarks examining the performance of this solver for
#' conservation planning problems have yet to be completed, preliminary
#' analyses suggest that it performs slightly slower than the *Gurobi*
#' solver (i.e., [add_gurobi_solver()]).
#' We recommend using this solver if the *Gurobi* solver is not available.
#' Licenses are available for the *IBM CPLEX* software to academics at no cost
#' (see < https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer>).
#'
#' @section Installation:
#' The \pkg{cplexAPI} package is used to interface with *IBM CPLEX* software.
#' To install the package, the *IBM CPLEX* software must be installed
#' (see <https://www.ibm.com/products/ilog-cplex-optimization-studio/cplex-optimizer>). Next, the `CPLEX_BIN`
#' environmental variable must be set to specify the file path for the
#' *IBM CPLEX* software. For example, on a Linux system,
#' this variable can be specified by adding the following text to the
#' `~/.bashrc` file:
#' ```
#' export CPLEX_BIN="/opt/ibm/ILOG/CPLEX_Studio128/cplex/bin/x86-64_linux/cplex"
#' ```
#' Please Note that you may need to change the version number in the file path
#' (i.e., `"CPLEX_Studio128"`). After specifying the `CPLEX_BIN`
#' environmental variable, the \pkg{cplexAPI} package can be installed.
#' Since the \pkg{cplexAPI} package is not available on the
#' the Comprehensive R Archive Network (CRAN), it must be installed from
#' [its GitHub repository](https://github.com/cran/cplexAPI). To
#' install the \pkg{cplexAPI} package, please use the following code:
#' ```
#' if (!require(remotes)) install.packages("remotes")
#' remotes::install_github("cran/cplexAPI")
#' ```
#' For further details on installing this package, please consult the
#' [installation instructions](https://github.com/cran/cplexAPI/blob/master/inst/INSTALL).
#'
#' @inherit add_gurobi_solver return seealso
#'
#' @family solvers
#'
#' @references
#' IBM (2017) IBM ILOG CPLEX Optimization Studio CPLEX User's Manual.
#' Version 12 Release 8. IBM ILOG CPLEX Division, Incline Village, NV.
#'
#' @examples
#' \dontrun{
#' # load data
#' sim_pu_raster <- get_sim_pu_raster()
#' sim_features <- get_sim_features()
#'
#' # create problem
#' p <-
#' problem(sim_pu_raster, sim_features) %>%
#' add_min_set_objective() %>%
#' add_relative_targets(0.1) %>%
#' add_binary_decisions() %>%
#' add_cplex_solver(gap = 0.1, time_limit = 5, verbose = FALSE)
#'
#' # generate solution
#' s <- solve(p)
#'
#' # plot solution
#' plot(s, main = "solution", axes = FALSE)
#' }
#' @name add_cplex_solver
NULL
#' @rdname add_cplex_solver
#' @export
add_cplex_solver <- function(x, gap = 0.1, time_limit = .Machine$integer.max,
presolve = TRUE, threads = 1, verbose = TRUE) {
# assert that arguments are valid
assert_required(x)
assert_required(gap)
assert_required(time_limit)
assert_required(presolve)
assert_required(threads)
assert_required(verbose)
assert(
is_conservation_problem(x),
assertthat::is.number(gap),
all_finite(gap),
gap >= 0,
assertthat::is.count(time_limit),
all_finite(time_limit),
assertthat::is.flag(presolve),
assertthat::noNA(presolve),
is_thread_count(threads),
assertthat::noNA(threads),
assertthat::is.flag(verbose),
is_installed("cplexAPI")
)
# add solver
x$add_solver(
R6::R6Class(
"CplexSolver",
inherit = Solver,
public = list(
name = "cplex solver",
data = list(
gap = gap,
time_limit = time_limit,
threads = threads,
presolve = presolve,
verbose = verbose
),
calculate = function(x, ...) {
# create problem
model <- list(
modelsense = x$modelsense(),
vtype = x$vtype(),
obj = x$obj(),
A = x$A(),
A2 = cplex_matrix(x$A()),
rhs = x$rhs(),
sense = x$sense(),
lb = x$lb(),
ub = x$ub()
)
# format problem for CPLEX
model$sense[model$sense == ">="] <- "G"
model$sense[model$sense == "="] <- "E"
model$sense[model$sense == "<="] <- "L"
model$vtype[model$vtype == "S"] <- "C"
# create parameters
p <- list(
verbose = as.integer(self$get_data("verbose")),
presolve = as.integer(self$get_data("presolve")),
gap = self$get_data("gap"),
threads = self$get_data("threads"),
time_limit = self$get_data("time_limit")
)
# store input data and parameters
self$set_internal("model", model)
self$set_internal("parameters", p)
# return success
invisible(TRUE)
},
run = function() {
# access input data and parameters
model <- self$get_internal("model")
p <- self$get_internal("parameters")
# solve problem
rt <- system.time({
x <- cplex(model, p)
})
# fix potential floating point arithmetic issues
b <- model$vtype == "B"
if (is.numeric(x$x)) {
## round binary variables because default precision is 1e-5
x$x[b] <- round(x$x[b])
## truncate variables to account for rounding issues
x$x <- pmax(x$x, model$lb)
x$x <- pmin(x$x, model$ub)
}
# extract solution values, and
# set values to NULL if any values have NA in result
sol <- x$x
if (any(is.na(sol))) sol <- NULL
# return solution
list(
x = sol,
objective = x$objval,
status = x$status,
runtime = rt[[3]]
)
},
set_variable_ub = function(index, value) {
self$internal$model$ub[index] <- value
invisible(TRUE)
},
set_variable_lb = function(index, value) {
self$internal$model$lb[index] <- value
invisible(TRUE)
}
)
)$new()
)
}
cplex_error_wrap <- function(result, env = NULL) {
if (!(identical(result, 0) || identical(result, 0L))) {
if (!is.null(env)) {
cplexAPI::closeEnvCPLEX(env)
}
cli::cli_abort(
cplexAPI::errmsg(result),
call = rlang::expr(add_cbc_solver()),
.internal = TRUE
)
}
invisible(TRUE)
}
cplex_matrix <- function(m) {
# inspired by Rcplex:::toCPXMatrix function
assert(inherits(m, "dgCMatrix"))
matbeg <- m@p
matcnt <- diff(c(m@p, length(m@x)))
matind <- m@i
matval <- m@x
list(
matbeg = as.integer(matbeg),
matcnt = as.integer(matcnt),
matind = as.integer(matind),
matval = as.double(matval)
)
}
cplex <- function(model, control) {
# assert valid arguments
assert(is.list(model), is.list(control))
# prepare model data for CPLEX
model$lb[which(!is.finite(model$lb) & model$lb < 0)] <-
-1 * cplexAPI::CPX_INFBOUND
model$lb[which(!is.finite(model$lb) & model$lb > 0)] <-
cplexAPI::CPX_INFBOUND
model$ub[which(!is.finite(model$ub) & model$ub < 0)] <-
-1 * cplexAPI::CPX_INFBOUND
model$ub[which(!is.finite(model$ub) & model$ub > 0)] <-
cplexAPI::CPX_INFBOUND
# create environment
env <- cplexAPI::openEnvCPLEX()
if (inherits(env, "cplexError")) {
stop(cplexAPI::errmsg(env))
}
# set solving parameters
## verbose (parameter: CPX_PARAM_SCRIND)
cplex_error_wrap(
cplexAPI::setIntParmCPLEX(env, 1035, as.integer(control$verbose)),
env
)
## presolve (parameter: CPX_PARAM_PREIND)
cplex_error_wrap(
cplexAPI::setIntParmCPLEX(env, 1030, as.integer(control$presolve)),
env
)
## threads (parameter: CPX_PARAM_THREADS)
cplex_error_wrap(
cplexAPI::setIntParmCPLEX(env, 1067, as.integer(control$threads)),
env
)
## (relative) optimality gap (parameter: CPX_PARAM_EPGAP)
cplex_error_wrap(
cplexAPI::setDblParmCPLEX(env, 2009, as.double(control$gap)),
env
)
## time limit (parameter: CPX_PARAM_TILIM)
cplex_error_wrap(
cplexAPI::setDblParmCPLEX(env, 1039, as.double(control$time_limit)),
env
)
# initialize problem
p <- cplexAPI::initProbCPLEX(env)
cplex_error_wrap(cplexAPI::chgProbNameCPLEX(env, p, "prioritizr"), env)
# build problem
result <- cplexAPI::copyLpwNamesCPLEX(
env = env, lp = p,
nCols = ncol(model$A),
nRows = nrow(model$A),
lpdir = ifelse(
identical(model$modelsense, "max"),
cplexAPI::CPX_MAX, cplexAPI::CPX_MIN
),
objf = model$obj,
rhs = model$rhs,
sense = model$sense,
lb = model$lb,
ub = model$ub,
matbeg = model$A2$matbeg,
matcnt = model$A2$matcnt,
matind = model$A2$matind,
matval = model$A2$matval
)
if (!(identical(result, 0) || identical(result, 0L))) {
cli::cli_abort(
"Failed to prepare data for IBM CPLEX.",
.internal = TRUE,
call = rlang::expr(add_cplex_solver())
)
}
# solve problem
if (all(model$vtype == "C")) {
result <- cplexAPI::lpoptCPLEX(env, p)
} else {
cplexAPI::copyColTypeCPLEX(env, p, model$vtype)
result <- cplexAPI::mipoptCPLEX(env, p)
}
# extract solution
stat <- cplexAPI::getStatStrCPLEX(env, cplexAPI::getStatCPLEX(env, p))
if (identical(result, 0) || identical(result, 0L)) {
sol <- cplexAPI::solutionCPLEX(env, p)
if (!inherits(sol, "cplexError")) {
out <- list(x = sol$x, objval = sol$objval, status = stat)
} else {
out <- list(x = NULL, objval = NULL, status = stat)
}
} else {
out <- list(x = NULL, objval = NULL, status = stat)
}
# clean up
cplex_error_wrap(cplexAPI::delProbCPLEX(env, p), env)
cplex_error_wrap(cplexAPI::closeEnvCPLEX(env))
# return result
out
}
|
82e295558df237523c2db0c32a828733fe8083d4
|
2c4dbf42a157b8691ad66da48a34c98e92407d18
|
/R/12-create-data-subsets.R
|
315930d62b7b575126edf6c972a8deff352365ae
|
[] |
no_license
|
timkiely/spatially-conscious-ml-model
|
05f829b8efb181fe4f0f1454427589a3443f0d1a
|
3a81a9ce61a48dd8d34aca427370968f9580c2bd
|
refs/heads/master
| 2021-10-10T12:19:08.269686
| 2019-01-10T16:39:12
| 2019-01-10T16:39:12
| 95,896,422
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,620
|
r
|
12-create-data-subsets.R
|
source("R/helper/load-packages.R")
source("R/helper/source-files.R")
data_path <- "C:/Users/tkiely/Dropbox/MSPA/Thesis/Analysis/full-data"
## all base data:
base_data <- read_rds(paste0(data_path,"/","p05_pluto_with_sales.rds"))
# 1) ----------------------------------------------------------------------
# step 1: create subset of base data with bldg typ C, D and Manhattan, BK and BX
base_data_subset <- base_data %>%
filter(Building_Type %in% c("D","C")) %>%
filter(Borough %in% c("MN","BK","BX"))
# write subset data to new file
write_rds(base_data_subset, "data/processing steps/p17_pluto_with_sales_subset.rds")
# 2) ----------------------------------------------------------------------
# step 2: re-run feature generation on subset of data
# base data
create_base_data(pluto_with_sales_infile = "data/processing steps/p17_pluto_with_sales_subset.rds"
, outfile = "data/processing steps/p18_base_model_data_subset.rds"
, limit_boros = FALSE)
# zipcode data
create_zipcode_data(base_model_data = "data/processing steps/p18_base_model_data_subset.rds"
, outfile = "data/processing steps/p19_zipcode_model_data_subset.rds")
# radii data. Note: extremely time intensive. Last full data run was 3.3 hours
create_radii_data(base_model_data = "data/processing steps/p18_base_model_data_subset.rds"
, outfile = "data/processing steps/p20_radii_model_data_subset.rds"
# to run, explicity supply the "--run-radii" argument or modify the funtion argument run_radii below
, run_radii = FALSE)
|
bb73020ea02aafb2e54723cea72d624d8cddcde3
|
1239b241f22041185e473772c97be748982fd005
|
/tests/tests.R
|
e3010bee1cd2b354f7cc19ed57038f0bc6a2c185
|
[] |
no_license
|
djvanderlaan/lvec
|
7e5e3b030b477e8edb439662541b37e8e7b5e6de
|
fec0f36d32cfbef2905f105a22917b4098c4ae85
|
refs/heads/master
| 2022-11-10T00:55:40.310109
| 2022-10-22T14:07:09
| 2022-10-22T14:07:09
| 72,359,688
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 53
|
r
|
tests.R
|
library(lvec)
library(testthat)
test_check("lvec")
|
074e4170d7e98bd082abc12a5dea362aa3b2f45f
|
4f6723c128a8cf6f41d146e71c59e5cf4323f6c3
|
/R/qp1qc_solver.R
|
e932b4f832a3e4bc7822119bf6fdfe810bfe078e
|
[] |
no_license
|
aaronjfisher/qp1qc
|
6587440fdec6915d484d4121af59f0952fd2ed48
|
d414cc5cd0f0ba805ceb5a9c5776523b67a4f5ea
|
refs/heads/master
| 2020-09-03T01:55:45.967140
| 2020-08-19T14:02:44
| 2020-08-19T14:02:44
| 219,356,240
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,481
|
r
|
qp1qc_solver.R
|
# To do:
# !! rather than requiring positive definiteness, also accept diagonal matrices?
# !! = important note to keep track of, or note for future changes.
#' binary search with arbitrary resolution
#'
#' The \code{\link{binsearch}} function in the \code{gtools} package searches only over integers. This is a wrapper for \code{\link{binsearch}} that also allows searching over a grid of points with distance \code{tol} between them. A target must also be entered (see \code{\link{binsearch}}).
#' @param fun a monotonic function over which we search (passed to \code{\link{binsearch}})
#' @param tol resolution of the grid over which to search for \code{target} (see \code{\link{binsearch}})
#' @param range a range over which to search for the input to \code{fun}
#' @param ... passed to \code{\link{binsearch}}
#' @export
#' @import gtools
#' @seealso \code{\link{binsearch}}
#' @examples
#' # best solution here is at x0,
#' # which we can find with increasing precision
#' x0 <- 10.241
#' binsearchtol( function(x) x-x0, target=0, range=c(0,2*x0) , tol=1.00)
#' binsearchtol( function(x) x-x0, target=0, range=c(0,2*x0) , tol=0.10)
#' binsearchtol( function(x) x-x0, target=0, range=c(0,2*x0) , tol=0.05)
#'
binsearchtol <- function(fun, tol=0.01, range, ...){
funTol <- function(x) fun(x*tol)
soln <- binsearch(fun=funTol, range=range/tol, ...)
soln$where <- soln$where*tol
soln
}
#' Simultaneously diagonalize two symmetric matrices (1 being positive definite)
#'
#' @param M1 a positive definite symmetric matrix
#' @param M2 a symmetric matrix
#' @param eigenM1 (optional) the value of \code{eigen(M1)}, if precomputed and available
#' @param eigenM2 (optional) the value of \code{eigen(M2)}, if precomputed and available
#' @param tol used for error checks
#' @param return_diags should only the diagonalizing matrix be returned.
#' @details This function determines an invertible matrix Q such that (Q' M1 Q) is an identity matrix, and (Q' M2 Q) is diagonal, where Q' denotes the transpose of Q. Note, Q is not necessarily orthonormal or symmetric.
#' @return If \code{return_diags = FALSE}, the matrix Q is returned. Otherwise, a list with the following elements is returned
#' \itemize{
#' \item{diagonalizer}{ - the matrix Q}
#' \item{inverse_diagonalizer}{ - the inverse of Q}
#' \item{M1diag}{ - the diagonal elements of (Q' M1 Q)}
#' \item{M2diag}{ - the diagonal elements of (Q' M2 Q)}
#' }
#' @export
#' @examples p <- 5
#' M1 <- diag(p)+2
#' M2 <- diag(p) + crossprod(matrix(rnorm(p^2),p,p))
#' M2[1,] <- M2[,1] <- 0
#' sdb <- sim_diag(M1=M1,M2=M2,tol=10^-10,return_diags=TRUE)
#' Q <- sdb$diagonalizer
#' QM1Q <- t(Q) %*% M1 %*% Q
#' QM2Q <- t(Q) %*% M2 %*% Q
#' range(QM1Q -diag(sdb$M1diag))
#' range(QM2Q -diag(sdb$M2diag))
#' range(Q %*% sdb$inverse_diagonalizer - diag(p))
#' range(sdb$inverse_diagonalizer %*% Q - diag(p))
#'
#' # if M1 is not p.d., a warning is produced, but the computation
#' # proceeds by switching M1 & M2, and then switching them back.
#' sdb <- sim_diag(M1=M2,M2=M1,tol=10^-10,return_diags=TRUE)
sim_diag <- function(M1,M2, eigenM1=NULL, eigenM2=NULL, tol = 10^-4, return_diags=FALSE){
if(!isSymmetric(M1)) stop('M1 must be symmetric')
if(!isSymmetric(M2)) stop('M2 must be symmetric')
############
#Central computation - part 1
if(is.null(eigenM1)) eigenM1 <- eigen(M1)
############
#Error checks on inputs
if(any(eigenM1$value<=0)){ #M1 not pos def.
if(is.null(eigenM2)) eigenM2 <- eigen(M2)
if(any(eigenM2$value<=0)){ #M2 not pos def.
stop('Neither M1 nor M2 is positive definite')
}
if(all(eigenM2$value>0)){ #M2 not pos def.
warning('M1 is not positive definite, but M2 is. Switching roles of M1 & M2')
out_pre <- sim_diag(M1=M2,M2=M1,eigenM1=eigenM2,tol=tol,return_diags=return_diags)
if(!return_diags) return(out_pre)
out <- out_pre
out$M1diag <- out_pre$M2diag
out$M2diag <- out_pre$M1diag
return(out)
}
}
############
#Central computation - part 2
sqrtInvM1 <- eigenM1$vectors %*% (diag(1/sqrt(eigenM1$values)))
Z <- t(sqrtInvM1) %*% M2 %*% sqrtInvM1
# if(any( abs(t(Z)-Z) > tol)) stop('Symmetry error') #!! Computational instability!! This shouldn't be needed. This is flagging errors where it shouldn't (for smaller kernel size, errors are more common?)
Z <- (Z + t(Z) )/ 2 #force matrices to by symmetric
Q <- sqrtInvM1 %*% eigen(Z)$vectors # recall, real symmetric matrices are diagonalizable by orthogonal matrices (wikipedia)
# prove that this is invertible
# let V = eigen(Z)$vectors, then V'V=VV'=I. Let M^-1/2 = WD^-1/2,
# Then Q_inverse =V' M1^(1/2)
# Q [V' M1^(1/2)] = Q [V' D^(1/2)W'] = WD^(-1/2) V [V' D^(1/2)W'] = I
# [V' M1^(1/2)] Q = [V' D^(1/2)W'] Q = [V'D^(1/2 )W'] WD^(-1/2) V = I
Q_inv <- t(eigen(Z)$vectors) %*% diag(sqrt(eigenM1$values)) %*% t(eigenM1$vectors)
inv_err <- max(abs(Q_inv %*% Q - diag(dim(M1)[1])))
if( inv_err > tol * min( sqrt(abs(eigenM1$values))) ){
warning(paste('possible inverting or machine error of order', signif(inv_err,4)))
}
if(!return_diags) return(Q)
return(list(
diagonalizer = Q,
inverse_diagonalizer = Q_inv,
M1diag = rep(1,dim(M1)[1]),
M2diag = diag(t(Q) %*% M2 %*% Q)
))
# https://math.stackexchange.com/questions/1079627/simultaneously-diagonalization-of-two-matrices
# Z = UDU'
# Q'AQ = U'[M1^(-1/2)' M1 M1^(-1/2)] U = U'[I]U = I
# Q'BQ = U'[M1^(-1/2)' M2 M1^(-1/2)] U = U'[Z]U = U'[UDU']U = D
############
#Workchecks
# round(t(sqrtInvM1) %*% M1 %*% sqrtInvM1,12)
# round(t(Q) %*% M1 %*% Q, 10)
# round(t(Q) %*% M2 %*% Q, 10)
}
pseudo_invert_diagonal_matrix<- function(M){
diag_M<-diag(as.matrix(M))
M_pseudo_inv_diagonal <- 1/diag_M
M_pseudo_inv_diagonal[diag_M==0] <- 0
M_pseudo_inv <- diag(M_pseudo_inv_diagonal)
M_pseudo_inv
}
is.diagonal <- function(M,tol=10^-8){
if(!is.matrix(M)) stop('M must be a matrix')
if(any(abs(M-t(M))>tol)) warning(paste('M must be symmetric, possible machine error of order', max(abs(M-t(M)) )))
all( abs( M - drop_off_diagonals(M) ) < tol)
}
to_diag_mat <- function(vec){
#input is a vector to put on the diagonal of a matrix
if(length(vec)==1) return(as.matrix(vec))
return(diag(vec))
}
drop_off_diagonals <- function(mat){
to_diag_mat(diag(as.matrix(mat)))
}
# ____ _____ ____ _
# / __ \| __ \ | _ \ (_)
# | | | | |__) | | |_) | __ _ ___ _ ___ ___
# | | | | ___/ | _ < / _` / __| |/ __/ __|
# | |__| | | | |_) | (_| \__ \ | (__\__ \
# \___\_\_| |____/ \__,_|___/_|\___|___/
# TEST # set_QP_unconstrained_eq_0(M=diag(0:3),v=0:3,k=-2,tol=10^-9)
set_QP_unconstrained_eq_0 <- function(M,v,k,tol){
# find x s.t. x'Mx + v'x + k = 0,
# or show that it is not possible
# M should be diagonal
# We find the saddle point
# Then determine the direction we need to go (up or down) to get to zero
# solve the resulting polynomial
# (See notes in pdf)
feasible <- FALSE
soln <- value <- NA
############################
#Initialize a reference point where the derivative = 0.
diag_M <- diag(as.matrix(M))
M_pseudo_inv <- pseudo_invert_diagonal_matrix(M)
x0 <- -(1/2)*M_pseudo_inv %*% v
eval_x <- function(x){
sum(x^2 * diag_M) + sum(x*v) + k
}
eval_x0 <- eval_x(x0)
feasible <- eval_x0==0
if(eval_x0==0){
return(list(feasible=TRUE, soln=x0, value=eval_x0))
}
############################
# Find an index of x0 along which we can move such that a new x0 solution evaluates to zero (with eval_x)
move_ind <- NA
v_candidates <- (diag_M==0)&(v!=0)
if(sum(v_candidates)>0){
move_ind <- which(v==max(abs(v[v_candidates])))[1]
}else if(eval_x0 < 0 & max(diag_M) > 0){
move_ind <- which(diag_M==max(diag_M))[1]
}else if(eval_x0 > 0 & min(diag_M) < 0){
move_ind <- which(diag_M==min(diag_M))[1]
}
if(!is.na(move_ind)){
soln <-
soln_base <- x0
soln_base[move_ind] <- 0
coeffs <- c(eval_x(soln_base),
v[move_ind],
diag_M[move_ind])
move_complex <- polyroot(coeffs)[1]
if(Im(move_complex) > tol) stop('error in root finding')
soln[move_ind] <- Re(move_complex)
if(abs(eval_x(soln))>tol) stop('calculation error')
feasible <- TRUE
}
return(list(
feasible=feasible,
soln=soln,
value=eval_x(soln)
))
}
min_QP_unconstrained <- function(M,v,tol){
# minimize x'Mx+v'x
# if M can be invertible (after zero elements are discarded), then solution satisfies
# 2Mx + v=0; (-1/2)M^-1 v = x
BIG <- (1/tol)^4 #To avoid Inf*0 issues
if(!all(is.finite(c(M,v)))) stop('M & v must be finite')
if(!is.diagonal(M)) stop('M should be diagonal')
if(length(v)!=dim(M)[1]) stop('dimension of M & v must match')
diag_M <- diag(M)
zero_directions <- (diag_M==0) & c(v==0)
moves_up_to_pos_Inf<- ((diag_M==0) & c(v>0)) | (diag_M>0) #as we increase these indeces, do we approach +Inf?
moves_up_to_neg_Inf <- ((diag_M==0) & c(v<0)) | (diag_M<0) #as we increase these indeces, do we approach -Inf?
moves_down_to_pos_Inf<- ((diag_M==0) & c(v<0)) | (diag_M>0)
moves_down_to_neg_Inf<- ((diag_M==0) & c(v>0)) | (diag_M<0)
if( any(
moves_down_to_pos_Inf + moves_down_to_neg_Inf + zero_directions !=1 |
moves_up_to_pos_Inf + moves_up_to_neg_Inf + zero_directions !=1
)){
stop('direction error')#work check
}
finite_soln <- !any(moves_up_to_neg_Inf|moves_down_to_neg_Inf)
if(finite_soln){ #only true if all zero elements of M correspond to zero elements of v
M_pseudo_inv <- pseudo_invert_diagonal_matrix(M)
soln <- -(1/2)*M_pseudo_inv %*% v
}else{
soln <- rep(0,length(v))
#need to use "big number" BIG to avoid multiplying by zero
soln[moves_up_to_neg_Inf] <- BIG
soln[moves_down_to_neg_Inf] <- - BIG
}
value <- t(soln) %*% M %*% soln + crossprod(v,soln)
return(list(
zero_directions=zero_directions,
moves_up_to_pos_Inf=moves_up_to_pos_Inf,
moves_up_to_neg_Inf=moves_up_to_neg_Inf,
moves_down_to_pos_Inf=moves_down_to_pos_Inf,
moves_down_to_neg_Inf=moves_down_to_neg_Inf,
finite_soln=finite_soln,
value=value,
soln=soln
))
}
# ____ _____ __ ____ _____
# / __ \ | __ \ /_ | / __ \ / ____|
# | | | | | |__) | | | | | | | | |
# | | | | | ___/ | | | | | | | |
# | |__| | | | | | | |__| | | |____
# \___\_\ |_| |_| \___\_\ \_____|
#' Solve (non-convex) quadratic program with 1 quadratic constraint
#'
#' Solves a possibly non-convex quadratic program with 1 quadratic constraint. Either \code{A_mat} or \code{B_mat} must be positive definite, but not necessarily both (see Details, below).
#'
#' Solves a minimization problem of the form:
#'
#' \deqn{ min_{x} x^T A_mat x + a_vec^T x }
#' \deqn{ such that x^T B_mat x + b_vec^T x + k \leq 0,}
#'
#' where either \code{A_mat} or \code{B_mat} must be positive definite, but not necessarily both.
#'
#' @param A_mat see details below
#' @param a_vec see details below
#' @param B_mat see details below
#' @param b_vec see details below
#' @param k see details below
#' @param tol a calculation tolerance variable used at several points in the algorithm.
#' @param eigen_A_mat (optional) the precalculated result \code{eigen(A_mat)}.
#' @param eigen_B_mat (optional) the precalculated result \code{eigen(B_mat)}.
#' @param verbose show progress from calculation
#' @import quadprog
#' @return a list with elements
#' \itemize{
#' \item{soln}{ - the solution for x}
#' \item{constraint}{ - the value of the constraint function at the solution}
#' \item{objective}{ - the value of the objective function at the solution}
#' }
#' @export
solve_QP1QC <- function(A_mat, a_vec, B_mat, b_vec, k, tol=10^-7, eigen_B_mat=NULL, eigen_A_mat=NULL, verbose= TRUE){
if(tol<0){tol <- abs(tol); warning('tol must be positive; switching sign')}
if(tol>.01){tol <- 0.01; warning('tol must be <0.01; changing value of tol')}
if(is.null(eigen_B_mat)) eigen_B_mat <- eigen(B_mat)
if(any(eigen_B_mat$value<=0)){
#B_mat not pos def.
if(is.null(eigen_A_mat)) eigen_A_mat <- eigen(A_mat)
if(any(eigen_A_mat$value<=0)){
#A_mat not pos def.
stop('Neither B_mat nor A_mat is positive definite')
}
}
####### Diagonalize
suppressWarnings({
sdb <- sim_diag(
M1=B_mat, M2=A_mat, eigenM1=eigen_B_mat, eigenM2= eigen_A_mat, tol=tol, return_diags=TRUE)
})
Q <- sdb$diagonalizer
B <- crossprod(Q,B_mat)%*%Q
b <- crossprod(Q,b_vec)
A <- crossprod(Q,A_mat)%*%Q
a <- crossprod(Q,a_vec)
# Finish diagonalizing by dropping machine error
# if(!is.diagonal(A)) stop('A should be diagonal') #!! throwing bugs due to machine error, possibly for low rank matrices?
# if(!is.diagonal(B)) stop('B should be diagonal')#!! throwing bugs due to machine error, possibly for low rank matrices?
A <- drop_off_diagonals(A)
B <- drop_off_diagonals(B)
Bd <- diag(as.matrix(B))
Ad <- diag(as.matrix(A))
# Define helper functions on diagonal space
calc_diag_Lagr <- function(x,nu){
#output as vector
sum(x^2 * diag(as.matrix(A + nu * B))) + c(crossprod(x,(a + nu* b)))
}
calc_diag_obj <- function(x){
#output as vector
sum(x^2 * diag(as.matrix(A))) + c(crossprod(x,a))
}
calc_diag_constraint <- function(x){
#output as vector
sum(x^2 * diag(as.matrix(B))) + c(crossprod(x,b)) + k
}
return_on_original_space <- function(x){
list(
soln = Q %*% x,
constraint = calc_diag_constraint(x),
objective = calc_diag_obj(x)
)
}
# _ _ _
# | | | | (_)
# | |_ ___ ___| |_ _ _ __ __ _ _ __ _ _
# | __/ _ \/ __| __| | '_ \ / _` | | '_ \| | | |
# | || __/\__ \ |_| | | | | (_| | | | | | |_| |
# \__\___||___/\__|_|_| |_|\__, | |_| |_|\__,_|
# __/ |
# |___/
test_nu <- function(nu, tol){
#return: is nu too low, too high, or optimal
if(any(Ad+nu*Bd < -tol)) warning(paste('invalid nu, should not have been submitted. Possible machine error of magnitude',abs(min(Ad+nu*Bd))))
if(all(Ad+nu*Bd > 0)){
return(test_nu_pd(nu))
}else{
return(test_nu_psd(nu,tol))
}
}
# Returns high, low, optimal or non-optimal. Beacuse it's used for a binary search, it can't just say "non-optimal."
test_nu_pd <- function(nu){
soln <- -(1/2)*((Ad+nu*Bd)^-1)*(a+nu*b)
constraint_value <- calc_diag_constraint(soln)
if(constraint_value>0) out_type <- 'low' #contraint value is monotone decreasing in nu
if(constraint_value<0) out_type <- 'high'
if(constraint_value==0){
out_type <- 'optimal'
}
return(list(
type=out_type,
soln=soln
))
}
test_nu_psd <- function(nu,tol){
diag_A_nu_B <- Ad + nu * Bd
if(any(diag_A_nu_B< -tol)) warning(paste('possible error in pd, or machine error of order',abs(min(diag_A_nu_B))))
diag_A_nu_B[diag_A_nu_B<0] <- 0
mat_A_nu_B <- to_diag_mat(diag_A_nu_B)
if(all(diag_A_nu_B>0)) stop('error in psd')
I_nu <- diag_A_nu_B > 0
N_nu <- diag_A_nu_B == 0
##### Infer context in which function was called:
#if this value of nu turns out to be non-optimal, infer whether we were testing nu_min or nu_max. If nu_min is non-optimal, then that means nu_min is lower than the optimum nu. Likewise, if we are testing nu_max and it proves non-optimal, than nu_max is too high.
non_opt_value <- NA
if(any(Bd[N_nu]<0) & any(Bd[N_nu]>0)) return('optimal')
#min=max!! need to say what the actual returned value is though. (!!) Maybe handle this separately
if(any(Bd[N_nu]<0)) non_opt_value <- 'high' #we've inferred that we were testing nu_max.
if(any(Bd[N_nu]>0)) non_opt_value <- 'low' #we've inferred that we were testing nu_min.
if(is.na(non_opt_value)) stop('PSD test should not have been called') # !! Relevant for using this for initial checks of feasibility?
#####
#### Optimality check 1 (necessary)
if(max(abs(a + nu * b)[N_nu])>0){
return(list(
type=non_opt_value,
soln=NA))
}
#### Optimality check 2 (necessity implied by 1st check not holding)
# First solve PD problem over I_nu
A_nu_B_pseudo_inv <- pseudo_invert_diagonal_matrix(mat_A_nu_B)
x_I <- -(1/2)*A_nu_B_pseudo_inv %*% (a + nu * b)
if(any(x_I[N_nu]!=0)) stop('indexing error')
free_constr_opt <- set_QP_unconstrained_eq_0(
M = as.matrix(B[N_nu, N_nu]),
v = b[N_nu],
k = calc_diag_constraint(x_I),
tol=tol
)
if(free_constr_opt$feasible){
soln <- x_I
soln[N_nu] <- free_constr_opt$soln
return(list(
type = 'optimal',
soln = soln
))
}else{
return(list(
type=non_opt_value,
soln=NA
))
}
}
# Test constraint feasibility
constr_prob <- min_QP_unconstrained(M=B, v=b, tol=tol)
if(constr_prob$finite_soln){
if(constr_prob$value > -k) stop('Constraint is not feasible')
}
if(constr_prob$value == -k) warning('Constraint may be too strong -- no solutions exist that strictly satisfy the constraint.')
###### Step 1 ######
# Check if unconstrained solution is feasible
# Notes
# Could we simplify by just doing test_nu(0)? No, later functions assume that constraint is met with equality.
u_prob <- min_QP_unconstrained(M=A, v= a, tol=tol) # unconstrained problem
min_constr_over_restricted_directions <- function(directions){
if(length(directions)==0) return(u_soln)
search_over_free_elements <- min_QP_unconstrained(
M = to_diag_mat(Bd[directions]),
v = b[directions],
tol= tol
)
u_soln <- u_prob$soln
u_soln[directions] <- search_over_free_elements$soln
u_soln
}
u_soln <- u_prob$soln
if(u_prob$finite_soln){
if(any(u_prob$zero_directions)){ # we have a finite nonunique solution
u_soln <- min_constr_over_restricted_directions(u_prob$zero_directions)
}
#otherwise we have a UNQIUE finite solution (u_soln), assigned above
}
# (Code commented out below) Don't bother to check the case when solution is inifite. Since we need A or B to PD in order to simultaneously diagonalize them (right now), the solution to the unconstrained problem has to either be finite, or lead to a non-feasible constraint value. Simply using u_soln should achieve this, unless BIG is too small, which is highly unlikely.
# if(!u_prob$finite_soln){
# #We have an infinite solution, so 1 dimension of soln must be Inf.
# #Check each of these dimensions to see if this is possible while
# #Meeting constraint
# u_soln <- u_prob$soln
# search_set <- u_prob$moves_up_to_neg_Inf |
# u_prob$moves_down_to_neg_Inf |
# u_prob$zero_directions
# if(length(search_set)>1){ for(i in 1:p){ #If solution is not unique, then for each element in the search set...
# BIG <- (1/tol)^4 #To avoid Inf*0 issues
# if(u_prob$moves_up_to_neg_Inf[i]){
# #Fix index i at +infinity, see if we can satisfy constraint.
# if(constr_prob$moves_up_to_pos[i]) next #can't satisfy
# search_set_up_i <- search_set
# search_set_up_i[i] <- FALSE #don't search over index i
# u_soln_i <- min_constr_over_restricted_directions(search_set_up_i)
# u_soln_i[i] <- BIG #fix at +Inf
# if(c(calc_diag_constraint(u_soln_i) <= 0)){
# u_soln <- u_soln_i
# break
# }
# }
# if(u_prob$moves_down_to_neg_Inf[i]){
# #Fix index i at -Inf, see if we can satisfy constraint
# if(constr_prob$moves_down_to_pos[i]) next #can't satisfy
# search_set_down_i <- search_set
# search_set_down_i[i] <- FALSE
# u_soln_i <- min_constr_over_restricted_directions(search_set_down_i)
# u_soln_i[i] <- -BIG
# if(c(calc_diag_constraint(u_soln_i) <= 0)){
# u_soln <- u_soln_i
# break
# }
# }
# }}
# }
if(c(calc_diag_constraint(u_soln) <= 0)){
if(verbose) cat('\nUnconstrained solution also satisfies the constraint\n')
return(return_on_original_space(u_soln))
}
###### Step 2 #####
#Get upper/lower bounds on nu
nu_opt <- x_opt <- NA # (yet) unknown optimal nu value
nu_to_check <- c()
nu_max <- Inf
nu_min <- -Inf
if(any(Bd>0)){
nu_min <- max( (-Ad/Bd) [Bd>0] )
nu_to_check <- c(nu_to_check,nu_min)
}
if(any(Bd<0)){ #non infinite check!! only relevant if not infinite Bd
nu_max <- min( (-Ad/Bd) [Bd<0] )
nu_to_check <- c(nu_to_check,nu_max)
}
if(length(nu_to_check)==0){
if(any(Bd!=0)) stop('Error in Bd check')
if(any(B_mat!=0)) stop('Error in Bd check')
if(any(Ad<=0)) stop('Error in PD check')
warning('Quadratic constraint not active')# (All diagonal elements of B are zero; a linear constraint is sufficient)
qp_soln <- solve.QP(Dmat = 2*A_mat, dvec= -a_vec,
Amat = matrix(-b_vec,ncol=1), bvec = k)$solution
# solve.QP formulates their problem with different constants than us.
# Requires PD
return( return_on_original_space( solve(Q)%*%qp_soln ) )
# Need to cancel out multiplication by Q in return_on_original_space function
}
# Check nu_min, nu_max
for(i in 1:length(nu_to_check)){
test_nu_check <- test_nu(nu_to_check[i],tol=tol)
if(test_nu_check$type=='optimal'){
nu_opt <- nu_to_check[i]
}
}
# Find endpoints for binary search.
# print(c(nu_min,nu_max))
if(is.infinite(nu_max)){
nu_max <- abs(nu_min) + 10 #arbitrary number just to make it >1
test_nu_max <- test_nu(nu_max, tol=tol)
counter <- 0
while(abs(nu_max) < 1/tol & test_nu_max$type=='low'){
nu_max <- abs(nu_max) * 10
test_nu_max <- test_nu(nu_max, tol=tol)
# Extra safety/error check
counter <- counter + 1
if(counter > 1000) stop('While loop broken')
}
if(test_nu_max$type=='low'){nu_opt <- nu_max; warning('outer limit reached')}
}
if(is.infinite(nu_min)){
nu_min <- -abs(nu_max) - 10 #arbitrary number just to make it < -1
test_nu_min <- test_nu(nu_min, tol=tol)
counter <- 0
while(abs(nu_min) < 1/tol & test_nu_min$type=='high'){
nu_min <- -abs(nu_min) * 10
test_nu_min <- test_nu(nu_min, tol=tol)
# Extra safety/error check
counter <- counter + 1
if(counter > 1000) stop('While loop broken')
}
if(test_nu_min$type=='high'){nu_opt <- nu_min; warning('outer limit reached')}
}
# print(c(nu_min,nu_max))
##### Step 3 #####
# Binary Search (if nu_min or nu_max are not optimal)
if(is.na(nu_opt)){
bin_serach_fun <- function(nu){
tested_type <- test_nu(nu, tol=tol)$type
if(tested_type=='high') return(1)
if(tested_type=='low') return(-1)
if(tested_type=='optimal') return(0)
}
nu_opt <- binsearchtol(fun=bin_serach_fun, tol=tol, range=c(nu_min, nu_max), target=0)$where[1]
}
x_opt <- test_nu(nu_opt,tol=tol)$soln #either from binary search, or from nu_min or nu_max
return(return_on_original_space(x_opt))
}
|
e467c547a043570513c32e7b35e93bf267c06017
|
e61d4e17b5683e6c5c79588588aa302f24b03863
|
/xrp_data_vis.R
|
5ae858cf1963b8fb9cab3f7b5e4059661b3d6758
|
[] |
no_license
|
Joseph-C-Fritch/web_scrape_project
|
89466e585b3e10dab0be11e1d2d7c803945d1962
|
0d56f349421d8f564a4ade6ce15c4bda7be11407
|
refs/heads/master
| 2020-04-22T02:05:24.170732
| 2019-02-12T19:06:26
| 2019-02-12T19:06:26
| 170,035,903
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,803
|
r
|
xrp_data_vis.R
|
library(dplyr)
library(ggplot2)
library(wordcloud2)
library(tm)
library(knitr)
library(Hmisc)
library(corrplot)
df3 <- readr::read_csv("./df3_wsentiment.csv")
df4 <- readr::read_csv("./df4.csv")
df5 = left_join(df3,df4, by = 'week')
df30 = df5%>%
mutate(daily_percent_change = (daily_price_change/open)*100)%>%
select(., date, number_of_posts, daily_percent_change, weekly_percent_change, polarity,
subjectivity, Volume, week)%>%
group_by(., date)%>%
summarise(.,
count=n(),
percent_change = mean(weekly_percent_change),
volume = mean(Volume), polarity = mean(polarity),
subjectivity = mean(subjectivity))%>%
mutate(., yesterday_posts = lag(count))%>%
mutate(., yesterday_polarity = lag(polarity))%>%
mutate(., yesterday_subject = lag(subjectivity))%>%
rename(., today_post = count,
today_polarity = polarity,
today_volume = volume,
today_perc_change = percent_change,
today_subjectivity = subjectivity)%>%
select(., -date)%>%
na.omit()
df31 = df5%>%
mutate(daily_percent_change = (daily_price_change/open)*100)%>%
select(., date, number_of_posts, daily_percent_change, weekly_percent_change, polarity,
subjectivity, Volume, week)%>%
group_by(., week)%>%
summarise(.,
count=n(),
percent_change = mean(weekly_percent_change),
volume = mean(Volume), polarity = mean(polarity),
subjectivity = mean(subjectivity))%>%
mutate(., last_week_posts = lag(count))%>%
mutate(., last_week_polarity = lag(polarity))%>%
mutate(., last_week_subject = lag(subjectivity))%>%
rename(., this_week_posts = count,
this_week_volume = volume,
this_week_perc_change = percent_change,
this_week_polarity = polarity,
this_week_subjectivity = subjectivity)%>%
select(., -week)%>%
na.omit()
#Plot posts and price as function of time
#df7 = df5%>%
#filter(., weekly_price_change > 0)
#df7
#write.csv(df7,file = 'df7.csv',fileEncoding = 'UTF-8')
#########################################################################
#Add percentage change column and compare perious time period
#Week Prior
df20 = df5%>%
group_by(., week)%>%
summarise(.,count=n(), percentage_change = mean(weekly_price_change))%>%
mutate(., prev_count = lag(count))%>%
select(., percentage_change, prev_count)%>%
na.omit()
#Day Prior
df21 = df5%>%
rename(., day = date)%>%
group_by(., day)%>%
summarise(.,count=n(), percentage_change = mean(((close-open)/open)*100))%>%
mutate(., prev_count = lag(count))%>%
select(., percentage_change, prev_count)%>%
na.omit()
#Day Prior
df65 = df5%>%
rename(., day = date)%>%
group_by(., day)%>%
summarise(.,count=n(), percentage_change = mean(((close-open)/open)*100))%>%
mutate(., prev_count = lag(count))%>%
mutate(., first_diff_count = prev_count - lag(prev_count))%>%
mutate(., first_diff_pc = percentage_change - lag(percentage_change))%>%
na.omit()
#Day Prior
df66 = df5%>%
group_by(., week)%>%
summarise(.,count=n(), percentage_change = mean(weekly_price_change))%>%
mutate(., prev_count = lag(count))%>%
mutate(., first_diff_count = prev_count - lag(prev_count))%>%
mutate(., first_diff_pc = percentage_change - lag(percentage_change))%>%
na.omit()
#Plot
ggplot(data = df21, aes(x = (first_diff_count), y = first_diff_pc))+
geom_point() +
labs(y = "Price Change, (%)",
x = "Number of Posts, (n)",
colour = "Legend")+
theme(plot.title = element_text(hjust = 0.5)) +
theme(legend.position = c(0.8, 0.9))+
ggtitle('Price Change vs Number of Posts')+
geom_smooth(method = "lm")
#Plot
ggplot(data = df66, aes(x = (first_diff_count), y = first_diff_pc))+
geom_point() +
labs(y = "Price Change, (%)",
x = "Number of Posts, (n)",
colour = "Legend")+
theme(plot.title = element_text(hjust = 0.5)) +
theme(legend.position = c(0.8, 0.9))+
ggtitle('Price Change vs Number of Posts')+
geom_smooth(method = "lm")
#########################################################################
#Sentiment vs Price Change
df22 = df5%>%
group_by(., week)%>%
summarise(.,polarity = mean(polarity), percentage_change = mean(weekly_price_change))%>%
mutate(., prev_polarity = lag(polarity))%>%
select(., percentage_change, prev_polarity)%>%
na.omit()
#Day Prior
df23 = df5%>%
rename(., day = date)%>%
group_by(., day)%>%
summarise(.,polarity = mean(polarity), percentage_change = mean(((close-open)/open)*100))%>%
mutate(., prev_polarity = lag(polarity))%>%
select(., percentage_change, prev_polarity)%>%
na.omit()
#Day Prior
df70 = df5%>%
rename(., day = date)%>%
group_by(., day)%>%
summarise(.,polarity = mean(polarity), percentage_change = mean(((close-open)/open)*100))%>%
mutate(., prev_polarity = lag(polarity))%>%
mutate(., first_diff_polarity = prev_polarity - lag(prev_polarity))%>%
mutate(., first_diff_pc = percentage_change - lag(percentage_change))%>%
na.omit()
#Day Prior
df71 = df5%>%
group_by(., week)%>%
summarise(.,polarity = mean(polarity), percentage_change = mean(weekly_price_change))%>%
mutate(., prev_polarity = lag(polarity))%>%
mutate(., first_diff_polarity = prev_polarity - lag(prev_polarity))%>%
mutate(., first_diff_pc = percentage_change - lag(percentage_change))%>%
na.omit()
#Plot
ggplot(data = df71, aes(x = first_diff_polarity, y = first_diff_pc))+
geom_point() +
labs(y = "Price Change, (%)",
x = "Average Post Sentiment Polarity",
colour = "Legend")+
theme(plot.title = element_text(hjust = 0.5)) +
theme(legend.position = c(0.8, 0.9))+
ggtitle('Price Change vs Average Post Sentiment')+
geom_smooth(method = "lm")
#########################################################################
#Plot
ggplot(data = df23, aes(x = prev_polarity, y = percentage_change))+
geom_point() +
labs(y = "Price Change, (%)",
x = "Average Post Sentiment Polarity",
colour = "Legend")+
theme(plot.title = element_text(hjust = 0.5)) +
theme(legend.position = c(0.8, 0.9))+
ggtitle('Price Change vs Average Post Sentiment')
#########################################################################
df83 = dplyr::select(df70, -day)
#mydata.cor = cor(df8, method = c("spearman"))
mydata.rcorr = rcorr(as.matrix(df81))
rcx = mydata.rcorr
df.rcx.r=round(data.frame(rcx$r),2)
df.rcx.p=round(data.frame(rcx$P),2)
write.csv(df.rcx.r,file = 'df.rcx.r83.csv',fileEncoding = 'UTF-8')
write.csv(df.rcx.p,file = 'df.rcx.p83.csv',fileEncoding = 'UTF-8')
mydata.cor = cor(df83, method = c("pearson"))
corrplot(mydata.cor)
mydata.rcorr = rcorr(as.matrix(df30))
mydata.cor = cor(df31, method = c("pearson"))
corrplot(mydata.cor)
#ggplot(df8, aes(x=date, y=count, fill = count)) +
#geom_histogram(stat="identity", position = 'dodge')
#geom_line(data=df8, aes(x=date, y=percentage_change*20), colour="red")+
#scale_y_continuous(sec.axis = sec_axis(~./20, name = "Price Change, (%)"))
#labs(y = "Number of Posts, (n)",
# x = "Date",
# colour = "Legend")+
#theme(plot.title = element_text(hjust = 0.5)) +
#theme(legend.position = c(0.8, 0.9))+
#ggtitle('Post Count & Percentage Change Over Time')
#docs <- Corpus(VectorSource(df7$text))
#tdm <- TermDocumentMatrix(docs)
#m <- as.matrix(tdm)
#v <- sort(rowSums(m),decreasing=TRUE)
#d <- data.frame(word = names(v),freq=v)
##### from frequency counts #####
#docs <- Corpus(VectorSource(df3$text))
#tdm <- TermDocumentMatrix(docs)
#m <- as.matrix(tdm)
#v <- sort(rowSums(m),decreasing=TRUE)
#d <- data.frame(word = names(v),freq=v)
#set.seed(1234)
#wordcloud2(data = d[0:400, ], size = .30, backgroundColor = 'black',color = 'white',
# figPath = "./xrp_logo2.jpg")
#Plot posts and price as function of time
#df6 = df3%>%
# group_by(., date)%>%
# summarise(.,count=n(), price = mean(close))
#df6
#ggplot(df6, aes(x = date)) +
# geom_line(aes(y = count, colour = "posts")) +
# geom_line(aes(y = price*800, colour = "price")) +
# scale_y_continuous(sec.axis = sec_axis(~./800, name = "Price, ($)")) +
# scale_colour_manual(values = c("blue", "red")) +
# labs(y = "Number of Posts, (n)",
# x = "Date",
# colour = "Legend")+
# theme(plot.title = element_text(hjust = 0.5)) +
# theme(legend.position = c(0.8, 0.9))+
# ggtitle('Number of Posts & Price Over Time')
#Filter words that appear the day before price increase
#Day Prior
df40 = df5%>%
#mutate(., prev_count = lag(count))%>%
filter(., daily_price_change>0)%>%
select(., date)%>%
unique()
df41 = df40
df41$date = df40$date-1
df42 = df5%>%
filter(., date %in% df41$date)%>%
select(., text)
write.csv(df42,file = 'df42.csv',fileEncoding = 'UTF-8')
df43 = df5%>%
#mutate(., prev_count = lag(count))%>%
filter(., weekly_price_change>0)%>%
select(., week)%>%
unique()
df44 = df43
df44$week = df43$week-1
df45 = df5%>%
filter(., week %in% df44$week)%>%
select(., text)
write.csv(df45,file = 'df45.csv',fileEncoding = 'UTF-8')
#Filter words that appear the day before price decresase
#Day Prior
df46 = df5%>%
#mutate(., prev_count = lag(count))%>%
filter(., daily_price_change<0)%>%
select(., date)%>%
unique()
df47 = df46
df47$date = df46$date-1
df48 = df5%>%
filter(., date %in% df41$date)%>%
select(., text)
write.csv(df48,file = 'df48.csv',fileEncoding = 'UTF-8')
#Filter words that appear the day before price increase
#Day Prior
df49 = df5%>%
#mutate(., prev_count = lag(count))%>%
filter(., weekly_price_change<0)%>%
select(., week)%>%
unique()
df50 = df49
df50$week = df49$week-1
df51 = df5%>%
filter(., week %in% df44$week)%>%
select(., text)
write.csv(df51,file = 'df51.csv',fileEncoding = 'UTF-8')
|
6c185a9237c3d59e1f974897248706cb3fa2393e
|
dc054313b0da31cb82de6b8bafa2999379e4ed5a
|
/cachematrix.R
|
862c938a31cd52a294f1f35c0e8d43227a892a80
|
[] |
no_license
|
anfide/ProgrammingAssignment2
|
75baa1edc3170179a1fff7c2f9fd8cafc5fde585
|
8477ec8b25e6280a1b270225250a1092d0fcec61
|
refs/heads/master
| 2021-01-15T14:41:57.082954
| 2014-11-27T09:43:57
| 2014-11-27T09:43:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,177
|
r
|
cachematrix.R
|
## Maintain a "matrix vector" that holds a matrix and its inverse.
# ( NOTE: It could be built by a simple vector with two elements (matrix, inverse)
# but I think there would be one main disadvantage:
# the update of the inverse value would be complete responsibility of the
# matrix user
#
## Create a special "matrix vector" object to hold a matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
cached_inverse = NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) cached_inverse <<- inverse
getinverse <- function() cached_inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## returns the inverse of a "matrix vector" created by a call to makeCacheMatrix.
# The inverse is computed on the first call after the matrix in x changes.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
m
}
|
353880e228f0e134f7de1bc36c37f20f3ca90117
|
e97358ae5d7dcdf22f2ae865f11101de7fcccebb
|
/R/plot size at age v time.R
|
e76cd24185feab410c5133229f0d06c9682b7d5a
|
[] |
no_license
|
tessington/biochronology
|
2a5cc0041e0f9cf0d0d9725264025ba614a99220
|
ca0dee5b41860eca36f45250402b8080ff53c99c
|
refs/heads/master
| 2023-05-06T12:04:21.783805
| 2021-05-25T00:23:16
| 2021-05-25T00:23:16
| 258,607,344
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,258
|
r
|
plot size at age v time.R
|
require(rstan)
require(KernSmooth)
require(viridis)
####### Plotting Function #####
interp.fun <- function(win, df) {
df$h.at.age <- df$Lstart / df$wstart
# find the right row
index <- which(df$wstart>=win)[1]
if (is.na(index)) h.predict <- df$h.at.age[nrow(df)]
if (!is.na(index)) h.predict <- approx(x = df$wstart[c(index-1, index)], y = df$h.at.age[c(index-1, index)], xout = win)$y
return(h.predict)
}
make.plot <-
function(spc,
output,
thedata,
min.year,
max.year,
small.data,
ages.2.use,
legend.text,
legend.pos,
years.2.trim = 10,
age.ref) {
require(dplyr)
output <- extract(model.stan)
if (spc == "POP") {
Linf <- 41.55
k <- 0.14
tzero <- -1.317
ages <- 2:90
df <- readRDS("Outputs/POP_length_otolith.RDS")
}
if (spc == "YFS") {
Linf <- 33.7
k <- 0.151
tzero <- -0.111
ages <- 2:35
df <- readRDS("Outputs/YFS_length_otolith.RDS")
}
df$h.at.age <- df$Lstart / df$wstart
# extract median qbar, kbar
q.bar <- median(output$q_base)
k.bar <- median(output$k_base)
beta.t <- output$beta_t
eps.q.raw <- output$eps_q
eps.q <- matrix(NA, nrow= nrow(eps.q.raw), ncol = ncol(eps.q.raw))
# adjust for beta.t
for (i in 1:ncol(eps.q.raw)) eps.q[,i] <- beta.t * eps.q.raw[,i]
eps.q <- apply(eps.q, MARGIN = 2, median)
eps.q <- eps.q[-(1:years.2.trim)]
rm(output)
q.t <- q.bar * exp(eps.q)
winf.bar <- q.bar / k.bar
h.at.age <- df$Lstart / df$wstart
winf.t <- q.t / k.bar
# make a matrix of size at age, where rows are ages, columns are ages
n.years <- length(winf.t)
n.ages <- length(ages)
woutput <- matrix(data = NA,
nrow = n.ages,
ncol = n.years)
w.t.start <- rep(x = NA, times = n.ages)
w.t.start[1] <- winf.bar[1]*(1 - exp(-(ages[1] - tzero) * k.bar))
for (a in 2:n.ages)
w.t.start[a] <-
w.t.start[a - 1] + (1 - exp (-k.bar)) * (winf.t[1] - w.t.start[a - 1])
woutput[, 1] <- w.t.start
for (i in 2:n.years) {
woutput[1, i] <- w.t.start[1]
for (a in 2:n.ages)
woutput[a, i] <-
woutput[a - 1, i - 1] + (1 - exp (-k.bar)) * (winf.t[i] - woutput[a - 1, i -
1])
}
# now scale output based on h.at.age
# print(ages.2.use + 5 - 2)
# print(dim(output))
minsize <- (apply(X=woutput[ages.2.use[1:4] + 5 - 2,], MARGIN = 1, FUN = min))
maxsize <- (apply(X=woutput[ages.2.use[1:4] + 5 - 2,], MARGIN = 1, FUN = max))
print(maxsize - minsize)
for (i in 1:4) {
if (i == 1)
plot(
1:n.years + min.year+years.2.trim,
df$h.at.age[df$ages == age.ref] * woutput[ages.2.use[i] + 5 - 2, ],
type = "l",
lwd = 2,
ylim = c(10, 70),
col = col[1],
xlab = "Year",
ylab = "Length (cm)",
xlim = c(1+min.year+years.2.trim - 5, n.years + min.year + years.2.trim)
)
if (i > 1)
lines(1:n.years + min.year + years.2.trim,
df$h.at.age[df$ages == age.ref]* woutput[ages.2.use[i] + 5 - 2, ],
lwd = 2,
col = col[i])
tmp.data <- small.data %>%
filter(Capture_age_chron < ages.2.use[i + 1] &
Capture_age_chron >= ages.2.use[i])
points(tmp.data$Year,
tmp.data$Length,
pch = 21,
bg = col[i])
}
legend(legend.pos,
pch = 21,
pt.bg = col,
legend = legend.text,
cex = 0.75,
bty = "n")
}
#######
plotfilename = "Graphics/compare_fit_to_obs.pdf"
pdf(file = plotfilename,
height = 3.5,
width = 7)
par(mfrow = c(1,2), las = 1)
spc = "YFS"
load("Outputs/YFS_result.Rdata")
filename <- "data/YFS_all.csv"
thedata <- read.csv(file = filename, header = T)
min.year <- min(thedata$Year)
max.year <- max(thedata$Year)
# get final size-at-age
unique.ids <- which(duplicated(as.character(thedata$FishID)))
small.data <- thedata[-unique.ids,]
ages.2.use <- c(15,20,25,30,35)
legend.text <- c("15-20", "20-25","25-30", "30-35")
col <- plasma(n=16)[c(2,6,10,16)]
make.plot(spc, output, thedata, min.year, max.year, small.data, ages.2.use, legend.text, legend.pos = "topright", years.2.trim = 5, age.ref = 20)
spc ="POP"
load("Outputs/POP_result.Rdata")
filename <- "data/POP_meas.csv"
thedata <- read.csv(file = filename, header = T)
thedata$Length <- thedata$Length/10
min.year <- min(thedata$Year)
max.year <- max(thedata$Year)
# get final size-at-age
unique.ids <- unique(thedata$FishID)
small.data <- thedata[unique.ids,]
ages.2.use <- c(50,60,70,80,90)
legend.text <- c("50-60", "60-70","70-80", "80-90")
age.ref <- 70
make.plot(spc, output, thedata, min.year, max.year, small.data, ages.2.use, legend.text, legend.pos = "bottomright", age.ref = 70)
dev.off()
system2("open", args = c("-a Skim.app", plotfilename))
|
e25c15f52100451b4f9b37c6dea956437acefa10
|
60632022e8d582f96869911de94a3cc87a4ec464
|
/R/merge_data_sources.R
|
59623e331e933656255d6a8b19508799162ee1ca
|
[] |
no_license
|
guillecarc/COVID19-global-forecasting
|
508726a9cb81646b5f877458dbc547c15e1bf667
|
4f33a44416e60012f28eb2bf2c5e1fc5fa24b1ff
|
refs/heads/master
| 2022-04-22T18:45:18.326789
| 2020-04-20T06:27:33
| 2020-04-20T06:52:46
| 254,617,115
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,062
|
r
|
merge_data_sources.R
|
get_data <- function(train_test_path = "./data/train_test/week4/",
UNPop_path = "./data/World Population Prospects 2019 - UN/",
KaggleTemp_path = "./data/Climate change earth surface temperature data - Kaggle/GlobalLandTemperaturesByCountry.csv",
AppleMob_path = "./data/Apple Mobility Trends/applemobilitytrends-2020-04-15.csv",
GoogleMob_path = "./data/Google Mobility Trends/Global_Mobility_Report.csv",
Wiki_path = "./data/Wikipedia metadata/region_metadata.csv",
GovMsrs_path = "./data/Government measures/20200414_acaps_covid-19_goverment_measures_dataset_v7.csv",
DarkSkyTemp_path = "./data/Dark Sky Weather/weather_covid19.csv",
LegatumPI_path = "./data/Prosperity Index - The Legatum Institute Foundation/PI_2019_Data.xlsx",
complete.cases = TRUE){
source("./R/preprocess_un_pop_data.R")
source("./R/preprocess_train_test_data.R")
source("./R/preprocess_kaggle_temp_data.R")
source("./R/preprocess_apple_mobility_data.R")
source("./R/preprocess_google_mobility_data.R")
source("./R/preprocess_wiki_pop_data.R")
source("./R/preprocess_government_measures_data.R")
source("./R/preprocess_darksky_temp_data.R")
source("./R/preprocess_legatum_prosperity_index_data.R")
message("Reading sources")
train_test_df <- preprocess_traintest(train_test_path)
UNPop_df <- preprocess_pop_data(UNPop_path)
KaggleTemp_df <- preprocess_temp_data(KaggleTemp_path)
AppleMob_df <- preprocess_apple_mobility(AppleMob_path)
GoogleMob_df <- preprocess_google_mobility(GoogleMob_path)
WikiPop_df <- preprocess_wiki_pop_data(Wiki_path)
GovMsrs_df <- preprocess_gob_data(GovMsrs_path)
DarkSkyTemp_df <- preprocess_darksky_temp(DarkSkyTemp_path)
LegatumPI_df <- preprocess_legatum_prosperity_index_data(LegatumPI_path)
# if complete cases is true, then only existing countries in all data sets
# will be selected
if (complete.cases) {
sources <- list(
train_test_df = train_test_df,
UNPop_df = UNPop_df,
KaggleTemp_df = KaggleTemp_df,
AppleMob_df = AppleMob_df,
GoogleMob_df = GoogleMob_df,
WikiPop_df = WikiPop_df,
GovMsrs_df = GovMsrs_df,
DarkSkyTemp_df = DarkSkyTemp_df,
LegatumPI_df = LegatumPI_df
)
sources <- map(sources, ~{enframe(as.character(unique(.x %>% ungroup %>% .$Country_Region)),
value = "Country_Region")})
country_check <- data.frame()
for (s in 1:length(sources)){
if (names(sources[s]) == "train_test_df") {
mapping <- sources[[s]]
mapping$lower <- str_to_lower(mapping$Country_Region)
mapping <- rename(mapping, original = Country_Region)
}
sources[[s]]$Country_Region <- str_to_lower(sources[[s]]$Country_Region)
if (is_empty(country_check)){
country_check <- sources[[s]]
} else {
country_check <- inner_join(country_check,
sources[[s]],
by = "Country_Region")
}
}
country_check <- left_join(mapping, country_check,
by = c("lower"="Country_Region"))
country_check <- country_check$original[which(complete.cases(country_check))]
message(length(country_check), " countries were left after checking for complete.cases")
}
message("Joining sources")
# Join population data ----------------------------------------------------
df <- left_join(train_test_df,
UNPop_df,
by = c("Country_Region"))
# Join temperature data ---------------------------------------------------
df <- left_join(df %>% mutate(month = month(Date)),
KaggleTemp_df,
by = c("Country_Region", "month"))
df <- select(df, -month)
# Join Apple mobility data ------------------------------------------------
df <- left_join(df,
AppleMob_df,
by = c("Country_Region", "Date" = "date"))
# Join Google mobility data -----------------------------------------------
df <- left_join(df,
GoogleMob_df,
by = c("Country_Region", "Date"))
google_names <- names(df)
google_names <- google_names[which(str_detect(google_names, "^Google__"))]
df <- df %>%
mutate_at(vars(google_names), replace_na, 0)
message("Google Mobility data does not contain data before 2020-02-14, it will be imputed with 0")
# Join Wikipedia medatadata -----------------------------------------------
df <- left_join(df, WikiPop_df,
by = c("Country_Region"))
# Join Government measures data -------------------------------------------
df <- left_join(df %>% mutate(lower = str_to_lower(Country_Region)),
GovMsrs_df,
by = c("lower"="Country_Region", "Date"))
df <- select(df, -lower)
gob_names <- names(df)[which(str_detect(names(df), pattern = "Gob__"))]
df <- df %>%
mutate_at(vars(gob_names), replace_na, 0)
# Join DarkSky temperatures -----------------------------------------------
df <- left_join(df,
DarkSkyTemp_df,
by = c("Country_Region", "Date"))
# Join Legatum Prosperity Indey data --------------------------------------
df <- left_join(df,
LegatumPI_df,
by = c("Country_Region"))
if (complete.cases){
message("Leaving only complete cases")
df <- df[which(df$Country_Region %in% country_check),]
# Test data to merge later
test_df <- df %>%
filter(data_type == "test")
# Get the maximun date from the train data to check for missings
max_train_date <- df %>%
filter(data_type == "train") %>%
select(Date) %>%
pull %>%
max
message("The maximum date for training data is ", max_train_date)
names <- names(df)
sources <- unique(str_extract(names, pattern = "^[:alpha:]+(?=__)"))
sources <- sources[which(!is.na(sources))]
vars2rm <- vector()
df <- df %>%
filter(data_type == "train")
for (s in sources){
s_names <- names[str_detect(names, pattern = paste(s, collapse = "|"))]
missing_vars <- colSums(is.na(df %>% select_at(vars(s_names))))
if (sum(missing_vars) > 0) {
# How many data points
message(sum(missing_vars), " data points are missing for ", s, " source")
# Which variables
missing_vars <- names(df %>% select_at(vars(s_names)))[which(colSums(is.na(df %>% select_at(vars(s_names)))) > 0)]
vars2rm <- c(vars2rm, missing_vars)
text <- paste(c("The missing variables are:",missing_vars), collapse = "\n")
message(text)
}
}
df <- bind_rows(df, test_df)
df <- df %>% select_at(vars(-missing_vars))
message("missing variables detected were removed")
}
return(df)
}
|
7b1069bafe857b39fc98e13387607a7ce7d39c07
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/qdap/examples/end_inc.Rd.R
|
64a439274dc2532c6cd8a31a62d3543e51722710
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 410
|
r
|
end_inc.Rd.R
|
library(qdap)
### Name: end_inc
### Title: Test for Incomplete Sentences
### Aliases: end_inc
### Keywords: incomplete
### ** Examples
## Not run:
##D dat <- sentSplit(DATA, "state", stem.col = FALSE)
##D dat$state[c(2, 5)] <- paste(strip(dat$state[c(2, 5)]), "|")
##D end_inc(dat, "state")
##D end_inc(dat, "state", warning.report = FALSE)
##D end_inc(dat, "state", which.mode = TRUE)
## End(Not run)
|
3c172f2be56d835de2cb312601a6d267ca949775
|
2e627e0abf7f01c48fddc9f7aaf46183574541df
|
/PBStools/man/getName.Rd
|
f34f8f2729491054ea278f4a9034d6ca0347436b
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
pbs-software/pbs-tools
|
30b245fd4d3fb20d67ba243bc6614dc38bc03af7
|
2110992d3b760a2995aa7ce0c36fcf938a3d2f4e
|
refs/heads/master
| 2023-07-20T04:24:53.315152
| 2023-07-06T17:33:01
| 2023-07-06T17:33:01
| 37,491,664
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,431
|
rd
|
getName.Rd
|
\name{getName}
\alias{getName}
\title{Get String Names from Literals or Named Objects}
\description{
Get string names from user supplied input. If the name supplied
exists as an object in the parent frame, the object will be assessed
for its potential as a source of names.
}
\usage{
getName(fnam)
}
\arguments{
\item{fnam}{ file name(s) specified directly or through names in objects.}
}
\details{
If \code{fnam} exists as a list, the function returns the names of the list.\cr
If \code{fnam} exists as a string vector, the function returns the strings in the vector.\cr
If \code{fnam} does not exist as an object, it simply returns itself as a string.
}
\value{
A vector of string names.
}
\author{
\href{mailto:rowan.haigh@dfo-mpo.gc.ca}{Rowan Haigh}, Program Head -- Offshore Rockfish\cr
Pacific Biological Station (PBS), Fisheries & Oceans Canada (DFO), Nanaimo BC\cr
\emph{locus opus}: Institute of Ocean Sciences (IOS), Sidney BC\cr
Last modified \code{Rd: 2021-06-15}\cr
}
\seealso{
\code{\link{getFile}}, \code{\link{getData}}
}
\examples{
local(envir=.PBStoolEnv,expr={
pbsfun=function() {
cat("Data object 'swiss' doesn't appear in the parent frame\n")
print(getName(swiss))
swiss=swiss
cat("And now it does, so it acts like a source of names\n")
print(getName(swiss))
invisible() }
pbsfun()
})
}
\keyword{ data }
\concept{M01_Utility}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.