blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5e529710f5b1e5ff2c0dd8b9a61baa628457f609
|
49db2824f0aaaddf55d2a656476d261a9290e22f
|
/man/row.names.as.col.Rd
|
fd1f776c6005f6a2151b6695bd429dbcf10fdc2c
|
[] |
no_license
|
kuremon/lazyr
|
47f02e34de1681ec8aeb45555a21d887cebc9875
|
8248d2b879f429947d781807bbd1456c8ccfc5c2
|
refs/heads/master
| 2021-01-23T10:44:47.161269
| 2013-12-17T07:03:14
| 2013-12-17T07:03:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 492
|
rd
|
row.names.as.col.Rd
|
\name{row.names.as.col}
\alias{row.names.as.col}
\title{Add row.names to a data frame as a new column}
\usage{
row.names.as.col(data, position = 1, var.name)
}
\arguments{
\item{data}{the data frame}
\item{position}{position of the new column (1 by
default)}
\item{va.name}{name of the newly created column. By
default \code{var.name=".row"}.}
}
\description{
Add row.names to a data frame as a new column
}
\seealso{
\code{\link{col.as.row.names}}
}
|
d2ab4599872a2370cc294f4272338dc4d02b2ce2
|
fa3f3612a143c184a7b1489d1281205bfed6aaaf
|
/LAND-SVA_v1r0b4_20171130.R
|
e419394f638cf88ff90e4d89331601e38c5710bb
|
[] |
no_license
|
maurorossi/LAND-SVA
|
c7631a5a2bba47988657dee460b69af8039abe3b
|
79be7f7799f7e09a038e55e6bccfbde09dc52e77
|
refs/heads/master
| 2020-04-02T00:22:47.595161
| 2018-10-22T13:41:29
| 2018-10-22T13:41:29
| 153,801,520
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,046
|
r
|
LAND-SVA_v1r0b4_20171130.R
|
#########################################################################
#########################################################################
#### ####
#### ####
#### ####
#### LAND-SVA ####
#### LANDSLIDE SUSCEPTIBILITY VARIABLE ANALYSIS ####
#### IRPI CNR ####
#### MAURO ROSSI - IRPI CNR ####
#### TXOMIN BORNAETXEA - UPV/EHU ####
#### ####
#### v1r0b1 - 11 November 2016 ####
#### Copyright (C) 2016 Mauro Rossi, Txomin Bornaetxea ####
#### ####
#### This program is free software; you can redistribute it and/or ####
#### modify it under the terms of the GNU General Public License ####
#### as published by the Free Software Foundation; either version 2 ####
#### of the License, or (at your option) any later version. ### ####
#### ####
#### This program is distributed in the hope that it will be useful, ####
#### but WITHOUT ANY WARRANTY; without even the implied warranty of ####
#### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ####
#### GNU General Public License for more details. ####
#### ####
#### Istituto di Ricerca per la Protezione Idrogeologica ####
#### Consiglio Nazionale delle Ricerche ####
#### Gruppo di Geomorfologia ####
#### Via della Madonna Alta, 126 ####
#### 06128 Perugia (Italia) ####
#### +39 075 5014421 ####
#### +39 075 5014420 ####
#### mauro.rossi@irpi.cnr.it ####
#### geomorfologia@irpi.cnr.it ####
#### ####
#### Universidad del País Vasco/Euskal Herriko Unibertsitatea ####
#### Facultad de Ciencias, Departamento de Geodinamica ####
#### Zientzia Fakultatea, Geodinamika Saila ####
#### txomin.bornaetxea@ehu.eus ####
#### ####
#### This script was prepared using R 3.1.3 ####
#### The script requires the following R packages: ####
#### 1: corrplot ####
#### 2: perturb ####
#### 3: Hmisc ####
#### 4: data.table ####
#### 5: RColorBrewer ####
#### 6: rgdal ####
#### 7: ####
#### 8: ####
#### 9: ####
#### ####
#### INPUTS: 1) datatable_inventory.RData ####
#### produced by the script LAND-SIP.R ####
#### ####
#### ####
#########################################################################
#########################################################################
#R CMD BATCH --no-save --no-restore '--args -wd /media/disco_dati/R/grid_to_xyz/generali/adb_07_medium/' LAND-SVA_v1r0b1_20161111.R variable_analysis.log
rm(list=(ls()))
graphics.off()
#setwd("X:/R/grid_to_xyz/")
pars <-commandArgs(trailingOnly=TRUE)
if (length(table(pars == "-wd"))==2)
{
wd_selected<-pars[which(pars=="-wd")+1]
} else
{
wd_selected<-"X:/R/grid_to_xyz/LAND-SVA_github/" # manually specified
}
setwd(wd_selected)
#memory.limit(size=12000)
### --------------- SVA analysis paramter definition --------------- ###
rdata_file<-paste("datatable_inventory.RData",sep="")
load(file=rdata_file)
enable_NA_removal<-TRUE # If TRUE rows with at least an NA value will be removed. If FALSE and error message will be returned.
enable_multicollinearity_test<-TRUE
type_correlation<-"pearson" # It could be "pearson" or "spearman" and it specifies the type of correlations to compute. Spearman correlations are the Pearson linear correlations computed on the ranks of non-missing elements, using midranks for ties. Pearson's coefficient and Spearman's rank order coefficient each measure aspects of the relationship between two variables. They are closely related, but not the same. Spearman's coefficient measures the rank order of the points. Pearson's coefficient measures the linear relationship between the two.
export_shapefiles<-TRUE # Enable this to export shapefile of points corresponding to the data tables, Usefull to check location of point of training and validation datasets using GIS clients
export_txtfiles<-TRUE # Enable this to export the training and validation tables in tab separeted .txt format
### --------------- Data conversion --------------- ###
library(data.table)
# converting to data.table format
training.table<-data.table(training.table)
original_rows_training<-dim(training.table)[1]
validation.table<-data.table(validation.table)
original_rows_validation<-dim(validation.table)[1]
### --------------- NAs analysis and selection --------------- ###
index_selection_training<-is.finite(rowSums(training.table))
training.table<-training.table[index_selection_training,1:dim(training.table)[2],with=FALSE]
variables_training<-training.table[,3:dim(training.table)[2],with=FALSE]
index_selection_validation<-is.finite(rowSums(validation.table))
validation.table<-validation.table[index_selection_validation,1:dim(validation.table)[2],with=FALSE]
variables_validation<-validation.table[,3:dim(validation.table)[2],with=FALSE]
### --------------- Conditional Density Plots --------------- ###
for(count in 1:dim(variables_training)[2])
{
#count<-2
selected_variable<-names(variables_training)[count]
print(paste("Conditional and Spine plots training variable: ",selected_variable," - Done: ",round(count/dim(variables_training)[2]*100,1),"%",sep=""))
pdf(paste("ConditionalDensityPlot_",selected_variable,".pdf",sep=""))
cdplot(x=as.numeric(variables_training[,count,with=FALSE][[1]]),y=as.factor(training.table[,2,with=FALSE][[1]]),bw="nrd0",kernel="gaussian",xlab=selected_variable,ylab="Dependent variable",main=paste("Conditional plot: ",selected_variable,sep=""))
dev.off()
pdf(paste("SpinePlot_",selected_variable,".pdf",sep=""))
breaks_sel<-nclass.Sturges(as.numeric(variables_training[,count,with=FALSE][[1]]))
#breaks_sel<-nclass.scott(as.numeric(variables_training[,count,with=FALSE][[1]]))
#breaks_sel<-nclass.FD(as.numeric(variables_training[,count,with=FALSE][[1]]))
spineplot(x=as.numeric(variables_training[,count,with=FALSE][[1]]),y=as.factor(training.table[,2,with=FALSE][[1]]),breaks=breaks_sel,xlab=selected_variable,ylab="Dependent variable",main=paste("Spineplot: ",selected_variable,sep=""))
dev.off()
pdf(paste("DensityPlot_",selected_variable,".pdf",sep=""))
dependent_values<-as.numeric(names(table(training.table[,2,with=FALSE])))
density_results_names<-paste("den_",dependent_values,sep="")
require(RColorBrewer)
#display.brewer.all()
colors_vector<-brewer.pal(length(dependent_values)+1,"Set1")
range_y_den<-c(0,0.01)
for(count_den in 1:length(dependent_values))
{
#count_den<-1
dependent_values_selected<-dependent_values[count_den]
index_dependent<-which(training.table[,2,with=FALSE]==dependent_values_selected)
assign(density_results_names[count_den],density(x=as.numeric(variables_training[index_dependent,count,with=FALSE][[1]]),bw="nrd0",kernel="gaussian"))
if(max(get(density_results_names[count_den])$y,na.rm=TRUE)>range_y_den[2]) range_y_den<-c(0,max(get(density_results_names[count_den])$y,na.rm=TRUE))
}
plot(NULL,NULL,xlab=selected_variable,ylab="Density",xlim=range(as.numeric(variables_training[,count,with=FALSE][[1]])),ylim=range_y_den,main=paste("Density plot: ",selected_variable,sep=""))
for(count_plot_den in 1:length(dependent_values))
{
lines(get(density_results_names[count_plot_den]), col=colors_vector[count_plot_den],lwd=2)
}
legend("topleft",legend=dependent_values,lty=1,lwd=2,col=colors_vector,bty="n") #
dev.off()
}
### --------------- Multicolinearity test for the training and validation dataset --------------- ###
if(enable_multicollinearity_test==TRUE)
{
#load collinearity package (perturb)
library(perturb)
#colnames(training.table)
collinearity.test.training<-colldiag(variables_training)
collinearity.test.validation<-colldiag(variables_validation)
#collinearity.test.training$condindx
#collinearity.test.training$pi
#range(collinearity.test.training$condindx)
if(range(collinearity.test.training$condindx)[2] >= 30)
{
collinearity.value.training<-"Some explanatory variables are collinear"
} else {
collinearity.value.training<-"Explanatory variables are not collinear"
}
print(collinearity.test.training,fuzz=.5)
collinearity.evaluation.matrix.training<-print(collinearity.test.training,fuzz=.5)
write.table("COLLINEARITY ANALYSIS RESULT",file="Variables_CollinearityAnalysis_training.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="Variables_CollinearityAnalysis_training.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("EXPLANATION",file="Variables_CollinearityAnalysis_training.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("This analysis was performed with Colldiag an implementation of the regression collinearity diagnostic procedures found in Belsley, Kuh,
and Welsch (1980). These procedures examine the ?conditioning? of the matrix of independent variables. The procedure computes the condition
indexes of the matrix. If the largest condition index (the condition number) is large (Belsley et al suggest 30 or higher), then there may be
collinearity problems. All large condition indexes may be worth investigating. The procedure also provides further information that may help to
identify the source of these problems, the variance decomposition proportions associated with each condition index. If a large condition
index (> 30) is associated with two or more variables with large variance decomposition proportions, these variables may be causing collinearity problems.
Belsley et al suggest that a large proportion is 50 percent or more.",file="Variables_CollinearityAnalysis_training.txt", append=TRUE, quote = FALSE,sep = "\t",
row.names=FALSE, col.names=FALSE)
write.table("",file="Variables_CollinearityAnalysis_training.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("RESULTS",file="Variables_CollinearityAnalysis_training.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(paste("Largest condition index (the condition number) =",range(collinearity.test.training$condindx)[2]),file="Variables_CollinearityAnalysis_training.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="Variables_CollinearityAnalysis_training.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(collinearity.value.training,file="Variables_CollinearityAnalysis_training.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="Variables_CollinearityAnalysis_training.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Matrix of the variance decomposition proportions associated with each condition index (1st column)",file="Variables_CollinearityAnalysis_training.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(colnames(collinearity.evaluation.matrix.training),collinearity.evaluation.matrix.training),file="Variables_CollinearityAnalysis_training.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
if(range(collinearity.test.validation$condindx)[2] >= 30)
{
collinearity.value.validation<-"Some explanatory variables are collinear"
} else {
collinearity.value.validation<-"Explanatory variables are not collinear"
}
print(collinearity.test.validation,fuzz=.5)
collinearity.evaluation.matrix.validation<-print(collinearity.test.validation,fuzz=.5)
write.table("COLLINEARITY ANALYSIS RESULT",file="Variables_CollinearityAnalysis_validation.txt", quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="Variables_CollinearityAnalysis_validation.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("EXPLANATION",file="Variables_CollinearityAnalysis_validation.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("This analysis was performed with Colldiag an implementation of the regression collinearity diagnostic procedures found in Belsley, Kuh,
and Welsch (1980). These procedures examine the ?conditioning? of the matrix of independent variables. The procedure computes the condition
indexes of the matrix. If the largest condition index (the condition number) is large (Belsley et al suggest 30 or higher), then there may be
collinearity problems. All large condition indexes may be worth investigating. The procedure also provides further information that may help to
identify the source of these problems, the variance decomposition proportions associated with each condition index. If a large condition
index (> 30) is associated with two or more variables with large variance decomposition proportions, these variables may be causing collinearity problems.
Belsley et al suggest that a large proportion is 50 percent or more.",file="Variables_CollinearityAnalysis_validation.txt", append=TRUE, quote = FALSE,sep = "\t",
row.names=FALSE, col.names=FALSE)
write.table("",file="Variables_CollinearityAnalysis_validation.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("RESULTS",file="Variables_CollinearityAnalysis_validation.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(paste("Largest condition index (the condition number) =",range(collinearity.test.validation$condindx)[2]),file="Variables_CollinearityAnalysis_validation.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="Variables_CollinearityAnalysis_validation.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(collinearity.value.validation,file="Variables_CollinearityAnalysis_validation.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("",file="Variables_CollinearityAnalysis_validation.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table("Matrix of the variance decomposition proportions associated with each condition index (1st column)",file="Variables_CollinearityAnalysis_validation.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
write.table(rbind(colnames(collinearity.evaluation.matrix.validation),collinearity.evaluation.matrix.validation),file="Variables_CollinearityAnalysis_validation.txt", append=TRUE, quote = FALSE,sep = "\t", row.names=FALSE, col.names=FALSE)
}
### --------------- Calculating and plotting correlation matrix --------------- ###
library(Hmisc)
coeff_training<-rcorr(as.matrix(variables_training),type=type_correlation) # This comand run the correlation matrix and the P value matrix with the Pearson method and exculding the NA values
coeff_validation<-rcorr(as.matrix(variables_validation),type=type_correlation) # This comand run the correlation matrix and the P value matrix with the Pearson method and exculding the NA values
library(corrplot)
colors<-colorRampPalette(c("darkred","grey40","forestgreen"))(100)
pdf("Variables_Correlogram_matrix_training.pdf")
corrplot.mixed(coeff_training$r, lower="ellipse", upper="number",tl.col="black",tl.pos="lt",number.cex=0.7,tl.cex=0.6,cl.cex=0.6,cl.ratio=0.2,cl.align.text = "l",title = "Correlation matrix", order = "original",p.mat=coeff_training$P,sig.level=0.01,insig="blank")
#corrplot.mixed(coeff_training$r, lower="ellipse", upper="number",tl.col="black",tl.pos="lt",col=colors,number.cex=0.7,tl.cex=0.6,cl.cex=0.6,cl.ratio=0.2,cl.align.text = "l",title = "Correlation matrix", order = "original",p.mat=coeff_training$P,sig.level=0.01,insig="blank")
dev.off()
pdf("Variables_Correlogram_matrix_validation.pdf")
corrplot.mixed(coeff_validation$r, lower="ellipse", upper="number",tl.col="black",tl.pos="lt",number.cex=0.7,tl.cex=0.6,cl.cex=0.6,cl.ratio=0.2,cl.align.text = "l",title = "Correlation matrix", order = "original",p.mat=coeff_validation$P,sig.level=0.01,insig="blank")
#corrplot.mixed(coeff_validation$r, lower="ellipse", upper="number",tl.col="black",tl.pos="lt",col=colors,number.cex=0.7,tl.cex=0.6,cl.cex=0.6,cl.ratio=0.2,cl.align.text = "l",title = "Correlation matrix", order = "original",p.mat=coeff_validation$P,sig.level=0.01,insig="blank")
dev.off()
### --------------- NAs removal and writing output training and validation tables --------------- ###
if(enable_NA_removal==TRUE)
{
if(original_rows_training!=dim(training.table)[1]) print("Warning: Analysis will be executed excluding rows with NA from the training set")
if(original_rows_validation!=dim(validation.table)[1]) print("Warning: Analysis will be executed excluding rows with NA from the validation set")
training.table<-data.frame(training.table)
validation.table<-data.frame(validation.table)
training.xy.table@data<-data.frame(id=training.xy.table@data[index_selection_training,])
training.xy.table@coords<-training.xy.table@coords[index_selection_training,]
validation.xy.table@data<-data.frame(id=validation.xy.table@data[index_selection_validation,])
validation.xy.table@coords<-validation.xy.table@coords[index_selection_validation,]
save(list=c("training.table","validation.table","training.xy.table","validation.xy.table"),file = paste("datatable_inventory.RData",sep=""))
} else
{
if(original_rows_training!=dim(training.table)[1]) print("Error: training set contains rows with NA values. All raster layers should contain finite values within the mask. If you want to execute the analysis excluding NA set the variable enable_NA_removal<-TRUE")
if(original_rows_validation!=dim(validation.table)[1]) print("Error: validation set contains rows with NA values. All raster layers should contain finite values within the mask. If you want to execute the analysis excluding NA set the variable enable_NA_removal<-TRUE")
}
result_dir_susceptibility<-getwd()
if(export_shapefiles==TRUE)
{
require(rgdal)
writeOGR(training.xy.table,dsn=paste(result_dir_susceptibility,sep=""),layer="training",driver="ESRI Shapefile",overwrite_layer=TRUE)
writeOGR(validation.xy.table,dsn=paste(result_dir_susceptibility,sep=""),layer="validation",driver="ESRI Shapefile",overwrite_layer=TRUE)
}
if(export_txtfiles==TRUE)
{
write.table(training.table,file=paste(result_dir_susceptibility,"/training.txt",sep=""),sep="\t",dec=".",row.names=FALSE,col.names=TRUE)
write.table(validation.table,file=paste(result_dir_susceptibility,"/validation.txt",sep=""),sep="\t",dec=".",row.names=FALSE,col.names=TRUE)
}
|
b17babace064bbedd0d8db1de3596178b165d4a3
|
ce0c807647977f8125252a589fe49230493631db
|
/run_bayes_adults.R
|
787a4260a3f740590e8896e1975f39759baad4cd
|
[] |
no_license
|
kamilTuszynski/MOW
|
4af4ad729ca12ddc4e620b9c7327d7eb4df2813b
|
46cb824d021a85e2d0eb78437d3089a89a0701ed
|
refs/heads/master
| 2022-09-09T05:18:06.691257
| 2020-06-05T17:28:41
| 2020-06-05T17:28:41
| 268,463,944
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,091
|
r
|
run_bayes_adults.R
|
rm(list = ls())
library(caret,quietly = TRUE)
library(e1071,quietly = TRUE)
source("local_classification.R")
data = read.csv("adult_fixed.csv")
set.seed(12345) # for reproducibility
train <- sample(1:nrow(data),size = ceiling(0.985*nrow(data)),replace = FALSE)
data_train <- data[train,]
data_test <- data[-train,]
start.time <- Sys.time()
predLocal <- matrix(ncol = 2, nrow=0)
for(i in 1:nrow(data_test)) {
row <- data_test[i,]
local <- localClassification(row, data_train, 8000, algorithm = "NaiveBayes")
predLocal = rbind(predLocal, local)
cat(sprintf("%s z %s\n", i, nrow(data_test)))
}
predLocal <- ifelse ( predLocal[, 2] > 0.5, '>50K', '<=50K' )
nb.Model <- naiveBayes( class~., data = data_train )
predGlobal <- predict( nb.Model, newdata = data_test, type = "raw" )
predGlobal <- ifelse ( predGlobal[, 2] > 0.5, '>50K', '<=50K' )
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken
tLocal <- table(data_test$class,predLocal)
tGlobal <- table(data_test$class,predGlobal)
confusionMatrix(tLocal)
confusionMatrix(tGlobal)
|
f6685f1f44d324e4a6661030da36c7c89c6bcd5f
|
1c591b580a42e90ba318675e3cebeabdfc06534a
|
/R/func__plr.R
|
40471339643e047146513fb99a4ce23c9c87afa5
|
[
"Apache-2.0"
] |
permissive
|
wanyuac/GeneMates
|
974d9a883d43ccd7602167204d8b3ff5bba6b74c
|
e808430b2cdd920f1b9abd8b6b59993fde8754a7
|
refs/heads/master
| 2022-08-31T13:00:42.583172
| 2022-08-08T10:01:02
| 2022-08-08T10:01:02
| 138,949,733
| 25
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,661
|
r
|
func__plr.R
|
#' @title Firth's penalised logistic regression
#'
#' @description Fit pairwise allelic presence/absence data with Firth's penalised
#' logistic regression without a control for bacterial population structure.
#' Because this function is not the focus of GeneMates, it takes as inputs the
#' outputs of the lmm or findPhysLink function. As a result, users must run
#' either function before calling this one.
#'
#' Dependency: parallel, logistf and data.table
#'
#' @param pat An uncentred matrix of patterns of the allelic presence/absence status
#' Expect every cell of this matrix to be a dichotomous variable.
#' @param tests.pat A data frame specifying pairs of patterns whose association
#' will be tested for with the logistic regression.
#' @param tests.allele A data frame recording pairs of alleles whose association
#' have been tested for by the findPhysLink function.
#' @param n.cores Number of cores used to run GEMMA in parallel where possible.
#' -1: automatically detect the number of available cores N, but use N - 1
#' cores (recommended)
#' 0: automatically detect the number of available cores and use all of them.
#' Be careful when the current R session is not running through SLURM.
#' >= 1: use the number of cores as specified. n.cores is reset to the maximum
#' number of available cores N when n.cores > N.
#' @param p.adj.method Method for correcting p-values. Refer to the base function
#' p.adjust for legitimate values of this argument.
#'
#' @examples
#' assoc <- findPhysLink(...)
#' lr <- plr(pat = assoc$alleles$B, tests.pat = assoc$lmms.pat$dif$h1[, c("y_pat", "x_pat")],
#' tests.allele = assoc$tests$dif$tests, n.cores = 8, p.adj.method = "bonferroni")
#'
#' @author Yu Wan (\email{wanyuac@@126.com})
#' @export
#
# Copyright 2017 Yu Wan
# Licensed under the Apache License, Version 2.0
# First edition: 17 June 2017; latest edition: 30 August 2018
plr <- function(pat, tests.pat, tests.allele, n.cores = -1, p.adj.method = "bonferroni") {
require(parallel)
require(data.table) # rbindlist function
# perform pairwise Firth's logistic regression
nc <- .setCoreNum(n.cores = n.cores, cores.avai = detectCores())
print(paste("Use", nc, "cores to fit logistic models.", sep = " "))
print(paste0(Sys.time(), ": Starting penalised logistic regression."))
cl <- makeCluster(nc)
clusterExport(cl = cl,
varlist = list("pat", "tests.pat"),
envir = environment()) # make variables accessible to different cores
flms <- parLapply(cl, 1 : nrow(tests.pat), .flr)
stopCluster(cl)
print(paste0(Sys.time(), ": Regression analysis finished."))
# concatenate results and adjust p-values for multiple tests
print("Concatenating results and adjusting p-values.")
flms.pat <- as.data.frame(rbindlist(flms)) # pattern-level results
flms.pat$p_chisq_adj <- p.adjust(p = flms.pat$p_chisq, method = p.adj.method)
# utilise the existing function .patternToAlleles to restore allele names from patterns
flms.a <- .patternToAlleles(lmms = flms.pat,
tests = list(tests = tests.allele,
y.pats = unique(tests.pat$y_pat)),
h1 = TRUE, mapping = NULL)
# assign pair IDs
flms.a <- assignPairID(lmms = flms.a)
print(paste0(Sys.time(), ": This job is finished successfully."))
return(list(pat = flms.pat, allele = flms.a)) # pattern-level and allele-level results
}
# This is a subordinate function of penalisedLogisticReg.
# Requires data frames pat, tests and mapping in its parental environment.
# i: row number in the data frame mapping
.flr <- function(i) {
require(logistf)
t <- tests.pat[i, ] # take a single row from the data frame
y <- pat[, paste0("pat_", t[["y_pat"]])]
x <- pat[, paste0("pat_", t[["x_pat"]])]
# fit a Firth simple logistic model: logit(Y) ~ X
r <- logistf(y ~ 1 + x)
# obtain estimates of parameters and summary statistics; cf. the logistf manual for parsing results of the logistf function
p.chisq <- r$prob[["x"]]
d <- data.frame(y_pat = t[["y_pat"]], x_pat = t[["x_pat"]],
n_y = sum(y), n_x = sum(x), n_xy = sum(as.logical(y) & as.logical(x)),
beta = r$coefficients[["x"]],
se = sqrt(r$var[2, 2]),
lower_95 = r$ci.lower[["x"]],
upper_95 = r$ci.upper[["x"]],
chisq = qchisq(p = p.chisq, df = r$df, lower.tail = FALSE),
p_chisq = p.chisq, stringsAsFactors = FALSE)
return(d)
}
|
075ec14280217f7bc607aa417a8f70302451f625
|
2bd971cc829a8639792f615d48fe143bd898a821
|
/modules/Operations/Type/type_operation.R
|
2a18608fd8ae4e56badb8a6560f38aee7526e5b7
|
[] |
no_license
|
DavidBarke/shinyplyr
|
e2acaf11585c3510df38982401fd83c834932e3d
|
ddc30c2c2361cec74d524f2000a07f3304a5b15f
|
refs/heads/master
| 2023-04-20T07:43:47.992755
| 2021-05-11T10:56:49
| 2021-05-11T10:56:49
| 250,501,858
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,549
|
r
|
type_operation.R
|
type_operation_ui <- function(id) {
ns <- shiny::NS(id)
shiny::uiOutput(
outputId = ns("op_container"),
class = "type-op-container"
)
}
type_subrows_ui <- function(id) {
ns <- shiny::NS(id)
shiny::uiOutput(
outputId = ns("subrows"),
class = "subrows type-subrows"
)
}
type_operation <- function(
input, output, session, .values, data_r, row_index, subrows_open_r
) {
ns <- session$ns
output$op_container <- shiny::renderUI({
if (subrows_open_r()) {
htmltools::div(
class = "type-op-sr-open grid-gap",
htmltools::div(
class = "grid-vertical-center",
htmltools::tags$b(
"Column"
)
),
htmltools::div(
class = "grid-vertical-center",
htmltools::tags$b(
"Old type"
)
),
htmltools::div(
class = "grid-vertical-center",
htmltools::tags$b(
"New type"
)
)
)
} else {
shiny::uiOutput(
outputId = ns("type_overview"),
class = "type-op-sr-closed grid-vertical-center"
)
}
})
col_names_r <- shiny::reactive({
names(data_r())
})
old_types_r <- shiny::reactive({
purrr::map_chr(data_r(), function(col) pillar::type_sum(col))
})
old_type_names_r <- shiny::reactive({
purrr::map_chr(old_types_r(), function(type) {
if (!type %in% .values$TYPE_DATA$type) {
"unknown"
} else {
.values$TYPE_DATA$name[.values$TYPE_DATA$type == type]
}
})
})
output$type_overview <- shiny::renderUI({
repl <- purrr::map2_chr(col_names_r(), new_type_names_r(), function(col_name, type_name) {
paste(col_name, type_name, sep = ": ")
})
paste(repl, collapse = ", ")
})
allowed_type_data <- dplyr::filter(.values$TYPE_DATA, allowed == TRUE)
allowed_types <- allowed_type_data$type
names(allowed_types) <- allowed_type_data$name
output$subrows <- shiny::renderUI({
ui <- purrr::pmap(
list(
col = col_names_r(), old_type = old_types_r(),
old_type_name = old_type_names_r(), index = seq_along(col_names_r())
),
function(col, old_type, old_type_name, index) {
# Only columns of allowed types can be changed to another allowed type
if (old_type %in% allowed_types) {
new_type_ui <- shiny::selectInput(
inputId = ns("new_type" %_% index),
label = NULL,
choices = list(
"Select a new type" = as.list(allowed_types)
),
selected = old_type
)
} else {
new_type_ui <- old_type
}
htmltools::div(
class = "subrow-container",
htmltools::div(
class = "subrow-index grid-center",
paste(row_index, index, sep = ".")
),
htmltools::div(
class = "subrow-content grid-gap",
htmltools::div(
class = "grid-vertical-center",
col
),
htmltools::div(
class = "grid-vertical-center",
old_type_name
),
htmltools::div(
new_type_ui
)
)
)
}
)
ui
})
# Indices of column, that have allowed type
allowed_indices_r <- shiny::reactive({
which(old_types_r() %in% allowed_types)
})
new_types_r <- shiny::reactive({
purrr::map_chr(seq_along(col_names_r()), function(index) {
if (index %in% allowed_indices_r()) {
shiny::req(input[["new_type" %_% index]])
} else {
old_types_r()[index]
}
})
})
new_type_names_r <- shiny::reactive({
purrr::map_chr(new_types_r(), function(type) {
if (!type %in% .values$TYPE_DATA$type) {
"unknown"
} else {
.values$TYPE_DATA$name[.values$TYPE_DATA$type == type]
}
})
})
# Walk only over columns, whose type changed
diff_indices_r <- shiny::reactive({
which(new_types_r() != old_types_r())
})
typed_data_r <- shiny::reactive({
new_types <- new_types_r()
data <- data_r()
for (index in diff_indices_r()) {
type_index <- which(.values$TYPE_DATA$type == new_types_r()[index])
type_fun <- .values$TYPE_DATA$convert_fun[[type_index]]
data[[index]] <- type_fun(data[[index]])
}
data
})
return_list <- list(
data_r = typed_data_r
)
return(return_list)
}
|
977866b3beef88c41de6e60e49dcfe6c63f987a9
|
03738314d1a665b54db4786b681246931eb62c2a
|
/Plot2.R
|
12a221d6032c47967533ce71e96dcad07b3ba217
|
[] |
no_license
|
Kornwhalice/ExData_Plotting1
|
0161590ab5b58318444469eae87fd603439444fa
|
05593b72a440acd6789060bf0576979181c90426
|
refs/heads/master
| 2021-01-17T08:46:19.303133
| 2015-03-08T23:10:36
| 2015-03-08T23:10:36
| 31,670,589
| 0
| 0
| null | 2015-03-04T17:40:05
| 2015-03-04T17:40:05
| null |
UTF-8
|
R
| false
| false
| 417
|
r
|
Plot2.R
|
totData <- read.csv("~/Math 378/plotData/household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
partData=totData[totData$Date %in% c("1/2/2007","2/2/2007") ,]
date_time <- strptime(paste(partData$Date, partData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot2.png",width=480,height=480)
plot(datetime, partData$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
7b09bc106d0aa1dc7b5670223191bc9911199227
|
2e6f8e6eaf11f6e3fe622428dd3d4ce9b9185278
|
/ctsmr/ctsmr-package/man/predict.ctsmr.Rd
|
5e5b8eff388321b805e6b2e90b2cc2b34c10609d
|
[
"MIT"
] |
permissive
|
perNyfelt/renjinSamplesAndTests
|
c9498a3eebf35f668bc1061a4c1f74a6bf8e2417
|
5140850aff742dbff02cd4a12a4f92b32a59aa25
|
refs/heads/master
| 2021-07-23T23:58:59.668537
| 2021-07-23T10:21:39
| 2021-07-23T10:21:39
| 202,578,093
| 1
| 1
|
MIT
| 2020-10-15T18:13:49
| 2019-08-15T16:45:33
|
Fortran
|
UTF-8
|
R
| false
| false
| 775
|
rd
|
predict.ctsmr.Rd
|
\name{predict.ctsmr}
\alias{predict.ctsmr}
\title{Predict method for CTSM fits}
\usage{
\method{predict}{ctsmr}(object, n.ahead = 1, covariance = FALSE,
newdata = NULL, firstorderinputinterpolation = FALSE, x0 = NULL,
vx0 = NULL, ...)
}
\arguments{
\item{object}{Object of class 'ctsmr'}
\item{n.ahead}{The number of steps ahead for which
prediction is required.}
\item{covariance}{Should the full covariance matrix be
outputted.}
\item{newdata}{An optional data frame with new data.}
\item{firstorderinputinterpolation}{If TRUE (default
FALSE) first order linear interpolation of the inputs
between samples.}
\item{x0}{initial state}
\item{vx0}{initial state covariance}
\item{...}{Unused.}
}
\description{
Predict method for CTSM fits
}
|
cfc0476175de4c5b8aae03767a5d074d19506731
|
c0e8301d190b515ac7a390dd6e0f269e3ca3844c
|
/brms_modsel_func.R
|
675a59e71e106e221c60922f70b0c0250fbddb2d
|
[] |
no_license
|
tmerkling/TBMU-divorce
|
d2c0d4813b768452858fb509dd14e98d8d6462d3
|
089e24c44729dc7d5f4802c9f1535e0efd421d39
|
refs/heads/master
| 2020-03-22T20:20:50.604257
| 2018-11-26T16:40:55
| 2018-11-26T16:40:55
| 140,591,752
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,566
|
r
|
brms_modsel_func.R
|
#brms functions for model selection on list of models
# function creating a WAIC table when models are stored in a list
waic_wrapper <- function(...) {
model_list <- list(...)
if(!"x" %in% names(model_list)) {
names(model_list)[1] <- "x"
args <- c(model_list)
}
do.call(brms::WAIC, args)
}
# function calculating WAIC weights when models are stored in a list
weights_wrapper <- function(...) {
model_list <- list(...)
if (!"x" %in% names(model_list)) {
names(model_list)[1] <- "x"
args <- c(model_list, weights = "waic")
}
do.call(brms::model_weights, args)
}
# function creating a table combining WAIC, deltaWAIC and WAIC weights in decreasing order
waic_table <- function(x) {
WAIC_results <- invoke(waic_wrapper, .x = x$model, model_names = x$name)
min_WAIC <- min(sapply(WAIC_results, "[[", 5)[-length(WAIC_results)]) # extracting WAIC value from the WAIC wrapper output
deltas <- sapply(WAIC_results, "[[", 5)[-length(WAIC_results)] - min_WAIC # calculating deltaWAIC to order models
modsel_table <- data.frame(
Model = names(WAIC_results)[-length(WAIC_results)], # removing the last item because it's the difference between models
WAIC = sapply(WAIC_results, "[[", 5)[-length(WAIC_results)],
delta_WAIC = sapply(WAIC_results, "[[", 5)[-length(WAIC_results)] - min_WAIC,
WAIC_weights = invoke(weights_wrapper, .x = x$model, model_names = x$name),
row.names = NULL
)[order(deltas),]
modsel_table$WAIC_cumsum <- cumsum(modsel_table$WAIC_weights)
print(modsel_table)
}
#function to subset the 95% confidence set and get the variables present in those models
sub_modvar <- function(x,y) { # x being tibble with all models, y being output of WAIC_table
list(sub_mod <- x[x$name %in% y$Model[1:which.min(abs(y$WAIC_cumsum - 0.95))],],
sub_tab <- tibble(
name = sub_mod$name,
variables = lapply(sub_mod$model, get_variables),
fixed = lapply(variables, grep, pattern = "^b_.*", value = TRUE)),
sub_var <- unique(unlist(sub_tab$fixed)))
}
nam_fixef <- function(x){row.names(fixef(x))} # extracts coeff names from model list and
# subsetting model set depending on which variable is present to get model averaged estimates with no shrinkage
par_vec <- function(z){ # z is output from sub_modvar function
unique(unlist(lapply(z[1][[1]]$model, nam_fixef)))
} # gives a vector of parameters present in at least one model
post_avg_wrapper <- function(...) { # function to make posterior_average function work with lists of models
model_list <- list(...)
if (!"x" %in% names(model_list)) {
names(model_list)[1] <- "x"
args <- c(model_list, weights = "waic")
}
do.call(brms::posterior_average, args)
}
brm_mod_avg <- function(z) { # z is output from sub_modvar function
parm <- par_vec(z)
# creates a matrix of zero and one showing which parameters are in which model
mod_tab <- data.frame(
apply(sapply(parm,
function(y){as.numeric(grepl(y, lapply(z[1][[1]]$model, nam_fixef)))}) -
# substracts models with interactions for main effects, because they don't have the same meaning
sapply(parm,
function(y) {as.numeric(grepl(paste0(y,":"), lapply(z[1][[1]]$model, nam_fixef)))}) -
sapply(parm,
function(y) {as.numeric(grepl(paste0(":", y), lapply(z[1][[1]]$model, nam_fixef)))}),
2, as.logical))
list_mod <- apply(mod_tab,2,function(x) {z[1][[1]][x,]}) # makes subset of models for each variables to select only those with variable present
names(list_mod) <- gsub("\\.",":",names(list_mod))
tab_modavg <- data.frame(matrix(rep(0,length(z[3][[1]]) * 4), ncol = 4, dimnames = list(gsub("b_","",z[3][[1]]),c("Estimate","Est_Error","Q2.5","Q97.5"))))
for(i in 1:length(z[3][[1]]))
{
brms_parm = z[3][[1]][i]
mod_parm = gsub("b_","", brms_parm)
mod_set = list_mod[i][[1]]$model
mod_names = list_mod[i][[1]]$name
# calculates posterior average for variables appearing in more than one model
if(length(mod_set) > 1){
parm_modavg = posterior_summary(invoke(post_avg_wrapper, .x = mod_set, pars = brms_parm, model_names = mod_names))}
else {
# extracts estimates for variables appearing in only one model
mod_summ = fixef(mod_set[[1]])
parm_modavg = mod_summ[which(row.names(mod_summ) == mod_parm),]
}
tab_modavg[i,] <- parm_modavg
}
print(tab_modavg)
}
|
1e5f676b1f9d24f1d428f889661dc44a5efe2c4f
|
5852d290393c0344e65d8f1722546e919ee48ebc
|
/plot4.R
|
56b5b0518d7ef4681dc805fc0a00a84d74cfcef3
|
[] |
no_license
|
owenkern/ExData_Plotting1
|
621dbc5f5aa6181e997ddba6c93723d0fb95621c
|
ee955ba24041f63ceb71b6ee683f70fd2681bcdf
|
refs/heads/master
| 2020-12-24T10:15:56.095107
| 2015-03-08T04:10:51
| 2015-03-08T04:10:51
| 31,831,313
| 0
| 0
| null | 2015-03-07T23:19:42
| 2015-03-07T23:19:42
| null |
UTF-8
|
R
| false
| false
| 1,591
|
r
|
plot4.R
|
filename <- "plot4.png"
data <- read.csv2(
"exdata_data_household_power_consumption/household_power_consumption.txt",
na.strings = "?")
charDate <- as.character(data$Date)
data$Date <- strptime(data$Date, "%d/%m/%Y")
goodDates <- c(as.POSIXlt("2007-02-01", format = "%Y-%m-%d"),
as.POSIXlt("2007-02-02", format = "%Y-%m-%d"))
good <- data$Date %in% goodDates
data <- data[good,]
charDate <- charDate[good]
data$Time <- strptime(paste(charDate, data$Time), "%d/%m/%Y %H:%M:%S")
png(filename)
data$Global_active_power = as.numeric(as.character(data$Global_active_power))
data$Global_reactive_power = as.numeric(as.character(
data$Global_reactive_power))
data$Voltage = as.numeric(as.character(data$Voltage))
data$Sub_metering_1 = as.numeric(as.character(data$Sub_metering_1))
data$Sub_metering_2 = as.numeric(as.character(data$Sub_metering_2))
data$Sub_metering_3 = as.numeric(as.character(data$Sub_metering_3))
par(mfrow = c(2, 2))
plot(data$Time, data$Global_active_power, type = "l", xlab = "",
ylab = "Global Active Power")
plot(data$Time, data$Voltage, type = "l", xlab = "datetime",
ylab = "Voltage")
plot(data$Time, data$Sub_metering_1, type = "l", xlab = "",
ylab = "Energy sub metering")
lines(data$Time, data$Sub_metering_2, col = "red")
lines(data$Time, data$Sub_metering_3, col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = 1, col = c("black", "red", "blue"), bty = "n")
plot(data$Time, data$Global_reactive_power, type = "l", xlab = "datetime",
ylab = "Global_reactive_power")
|
a5fafce3fa17bd03dfa1a2a262ff7441ee9dfbd1
|
f69d8832dcd0e0072a81847b59cf5c68c541708f
|
/inst/check/check_reflmaxcoupling.R
|
24fd32f67fdb04d808c3390275f7d0976deb076c
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
shizelong1985/unbiasedmcmc
|
5382f0bc1780193c9e72ce9962332c02645311f5
|
24dab0bf66597d45a82fd83e5302daf644164cae
|
refs/heads/master
| 2022-10-15T03:15:44.833752
| 2020-06-08T09:32:59
| 2020-06-08T09:32:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,384
|
r
|
check_reflmaxcoupling.R
|
library(unbiasedmcmc)
library(doParallel)
library(doRNG)
# register parallel cores
registerDoParallel(cores = detectCores()-2)
setmytheme()
rm(list = ls())
set.seed(21)
### maximal coupling of bivariate Gaussians with identical covariance matrices
mu1 <- c(0.2, 0.3)
mu2 <- c(0.0, 0.8)
Sigma <- diag(1, 2, 2)
Sigma[1,2] <- Sigma[2,1] <- 0.8
Sigma[2,2] <- 2
Sigma_chol <- chol(Sigma)
Sigma_inv_chol <- solve(Sigma_chol)
# sample once
rmvnorm_reflectionmax(mu1, mu2, Sigma_chol, Sigma_inv_chol)
# function output samples (column-bound) in $xy, and a boolean indicator of identity in $identical
nsamples <- 5e4
xy <- foreach(i = 1:nsamples) %dorng% {
rmvnorm_reflectionmax(mu1, mu2, Sigma_chol, Sigma_inv_chol)
}
identicals <- sapply(xy, function(x) x$identical)
equalvalues <- sapply(xy, function(x) all(x$xy[,1] == x$xy[,2]))
all(identicals == equalvalues)
# collect samples from both distributions
sample1 <- t(sapply(xy, function(x) x$xy[,1]))
sample2 <- t(sapply(xy, function(x) x$xy[,2]))
# and check that they follow the correct distribution
colMeans(sample1)
mu1
colMeans(sample2)
mu2
cov(sample1)
cov(sample2)
Sigma
# estimate of 1-TVD
mean(sapply(xy, function(x) x$identical))
# visualize marginals
hist(sample1[,1], prob = TRUE, nclass = 100)
curve(dnorm(x, mu1[1], sqrt(Sigma[1,1])), add = TRUE, col = "red")
hist(sample1[,2], prob = TRUE, nclass = 100)
curve(dnorm(x, mu1[2], sqrt(Sigma[2,2])), add = TRUE, col = "red")
hist(sample2[,1], prob = TRUE, nclass = 100)
curve(dnorm(x, mu2[1], sqrt(Sigma[1,1])), add = TRUE, col = "red")
hist(sample2[,2], prob = TRUE, nclass = 100)
curve(dnorm(x, mu2[2], sqrt(Sigma[2,2])), add = TRUE, col = "red")
mu1 <- 1
mu2 <- 2
Sigma_proposal <- diag(2, 1, 1)
Sigma_chol <- chol(Sigma_proposal)
Sigma_chol_inv <- solve(Sigma_chol)
nsamples <- 5e4
xy <- foreach(i = 1:nsamples) %dorng% {
rnorm_reflectionmax(mu1, mu2, Sigma_chol[1,1])
}
hist(sapply(xy, function(x) x$xy[1]), prob = TRUE, nclass = 100)
curve(dnorm(x, mu1, Sigma_chol[1]), add = T)
hist(sapply(xy, function(x) x$xy[2]), prob = TRUE, nclass = 100)
curve(dnorm(x, mu2, Sigma_chol[1]), add = T)
rmvnorm_reflectionmax(mu1, mu2, Sigma_chol, Sigma_chol_inv)
mean(sapply(xy, function(xy) xy$identical))
xymax <- foreach(i = 1:nsamples) %dorng% {
rnorm_max_coupling(mu1, mu2, Sigma_chol[1,1], Sigma_chol[1,1])
}
mean(sapply(xymax, function(xy) xy$identical))
|
6c3b41b429b3788c470528d636c133dd7b05baef
|
008071d29a3524ca79b1af2fb79de92582d55f0c
|
/waitaki_waikato.R
|
c4a386b3944ef1c69fa9a528dfc61c2a0659a25a
|
[
"MIT"
] |
permissive
|
merrillrudd/stream_network_NZ
|
be8a5bc4fcef8abe097c477254af2c29d314fe1f
|
ffe2778109c2851be5309cda46671897b13ab3af
|
refs/heads/master
| 2020-05-25T08:34:00.850684
| 2019-12-05T19:00:30
| 2019-12-05T19:00:30
| 187,713,470
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,541
|
r
|
waitaki_waikato.R
|
rm(list=ls())
################
## Directories
################
nz_dir <- "/home/merrill/stream_network_NZ"
sub_dir1 <- file.path(nz_dir, "Waitaki")
sub_dir2 <- file.path(nz_dir, "Waikato")
data_dir <- file.path(nz_dir, "data")
data_dir1 <- file.path(sub_dir1, "data")
data_dir2 <- file.path(sub_dir2, "data")
fig_dir <- file.path(nz_dir, "figures")
dir.create(fig_dir, showWarnings=FALSE)
#################
## Packages
#################
library(tidyverse)
library(RColorBrewer)
# ################
## Load data
################
load(file.path(data_dir, 'nz_longfin_eel.rda'))
load(file.path(data_dir1, 'nz_waitaki_longfin_eel.rda'))
load(file.path(data_dir2, 'nz_waikato_longfin_eel.rda'))
taki_net <- nz_waitaki_longfin_eel[["network"]] %>% mutate("Catchment" = "Waitaki")
kato_net <- nz_waikato_longfin_eel[["network"]] %>% mutate("Catchment" = "Waikato")
reg_net <- rbind.data.frame(taki_net, kato_net)
nz_net <- nz_longfin_eel[["network"]] %>% mutate("Catchment" = "Other") %>% filter(lat %in% reg_net$lat == FALSE) %>% filter(long %in% reg_net$long == FALSE)
nz_reg_net <- rbind.data.frame(reg_net, nz_net)
taki_obs <- nz_waitaki_longfin_eel[["observations"]] %>%
mutate(Catchment = "Waitaki") %>%
rename(present = data_value)
kato_obs <- nz_waikato_longfin_eel[["observations"]] %>%
mutate(Catchment = "Waikato") %>%
rename(present = data_value)
kato_obs$present <- sapply(1:nrow(kato_obs), function(x) ifelse(kato_obs$present[x] > 1, 1, kato_obs$present[x]))
reg_obs <- rbind.data.frame(taki_obs, kato_obs)
reg_obs$year <- as.numeric(reg_obs$year)
reg_obs_info <- reg_obs %>%
group_by(year, Catchment) %>%
summarise(nsamp = length(present),
npres = length(which(present > 0)),
ppres = length(which(present > 0))/length(present))
kato_obs_info <- reg_obs_info %>% filter(Catchment == "Waikato")
taki_obs_info <- reg_obs_info %>% filter(Catchment == "Waitaki")
map <- ggplot() +
geom_point(data = nz_net, aes(x = long, y = lat), pch = ".") +
geom_point(data = reg_net, aes(x = long, y = lat, color = Catchment), cex=0.5) +
scale_color_brewer(palette = "Set1") +
xlab("Longitude") + ylab("Latitude") +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Region_map.png"), map, height = 10, width = 11)
## map with regions
regmap <- ggplot() +
geom_point(data = reg_net, aes(x = long, y = lat), color = "gray") +
geom_point(data = reg_obs, aes(x = long, y = lat, fill = year), pch=21, cex=5) +
scale_fill_distiller(palette = "RdBu") +
facet_wrap(Catchment~., scales = "free") +
guides(fill=guide_legend(title="Year")) +
xlab("Longitude") + ylab("Latitude") +
theme_bw(base_size = 14)
ggsave(file.path(fig_dir, "Region_map_observations.png"), regmap, height = 10, width = 20)
takimap <- ggplot() +
geom_point(data = taki_net, aes(x = long, y = lat), color = "gray", cex = 0.5) +
geom_point(data = taki_obs, aes(x = long, y = lat, fill = factor(present)), pch = 21, cex = 4, alpha = 0.75) +
scale_fill_viridis_d() +
facet_wrap(year~.) +
guides(fill=guide_legend(title="Present")) +
theme_bw()
ggsave(file.path(fig_dir, "Waitaki_map_byYear.png"), takimap, height = 15, width = 18)
katomap <- ggplot() +
geom_point(data = kato_net, aes(x = long, y = lat), color = "gray", cex = 0.5) +
geom_point(data = kato_obs, aes(x = long, y = lat, fill = factor(present)), pch = 21, cex = 4, alpha = 0.75) +
scale_fill_viridis_d() +
facet_wrap(year~.) +
guides(fill=guide_legend(title="Present")) +
theme_bw()
ggsave(file.path(fig_dir, "Waikato_map_byYear.png"), katomap, height = 15, width = 18)
|
fd54a87adec527e0a6e7e2c0dfd8e1dd3b84c25f
|
61c12c2ca9ce163ca6dd9715ffd33b50c3f5e32f
|
/FinalMS.R
|
57ee66887a2ca2acd68f3d1616d42d04514f76d7
|
[] |
no_license
|
modestosierra/RProjects
|
1b3b61030594aa5dc3e931166b17c94ef52eab08
|
493d5c07908ec6e2545a58a74688ebdc1bc281d5
|
refs/heads/master
| 2021-01-17T17:35:19.571583
| 2016-10-17T07:04:40
| 2016-10-17T07:04:40
| 70,426,132
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 959
|
r
|
FinalMS.R
|
#load libraries
library(caret)
library(randomForest)
#load data and have a look at it, remove the columns with NA
trainingData = read.csv("pml-training.csv", na.strings=c("NA","#DIV/0!",""));
#summary(trainingData);
#remove columns without relevant info
trainingData <-trainingData[,-c(1:7)]
#do some cleaning
trainingData <- trainingData[, colSums(is.na(trainingData)) == 0]
#split into training and validation
trainingPartition <- createDataPartition(y=trainingData$classe, p=0.6, list=FALSE)
myTraining <- trainingData[trainingPartition, ]
myValidation <- trainingData[-trainingPartition, ]
dim(myTraining); dim(myValidation);
#model 1 RF
modRF <- randomForest(classe ~. , data=myTraining)
predRF <- predict(modRF, myValidation, type = "class")
confusionMatrix(predRF , myValidation$classe)
# The model looks good, so I'm not trying any other
#model test
testData = read.csv("pml-testing.csv")
predict(modRF,testData)
|
dac6d52dfed15075ed7c5fe559f03f41bfa924d7
|
caea54b3de3c4373bedffbd9d09e47e244636019
|
/man/rhub.Rd
|
65da3c9ffa37447b0aa0c3b0e17d76096324df42
|
[] |
no_license
|
ashiklom/rtcl
|
66ce642dce90727c40f066b0ee8a2016da315e41
|
995519859544900ba2056a711cec8371da5a4bf0
|
refs/heads/master
| 2023-01-31T00:14:05.680419
| 2020-12-03T22:19:13
| 2020-12-03T22:19:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,000
|
rd
|
rhub.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rhub.r
\name{rhub}
\alias{rhub}
\title{Upload package to rhub}
\usage{
rhub(platform = NULL, checkforcran = FALSE, rdevel = FALSE, path = getwd())
}
\arguments{
\item{platform}{[\code{character(1)}]\cr
Check on the platform specified here. For details see \code{\link[rhub]{platforms}}}
\item{checkforcran}{[\code{logical(1L)}]\cr
Use \code{\link[rhub]{check_for_cran}} instead.}
\item{rdevel}{[\code{logical(1L)}]\cr
Use \code{\link[rhub]{check_with_rdevel}} instead. This switch is only taken into account
with \code{checkforcran = FALSE}. It automatically selects one devel platform.}
\item{path}{[\code{character}]\cr
If no path to a DESCRIPTION is given, the package looks for a DESCRIPTION in
the current directory and up to two parent directories.}
}
\value{
Invisibly returns \code{TRUE} on success.
}
\description{
Uploads a package located in \code{path} to the rhub service via \code{\link[rhub]{check}}.
}
|
768f6d32fae96446805aeb9c0c29c92e81c0131d
|
67c59a878124ec9f1282a8b323fb01325661e9c8
|
/NCAAF_L1_Model.R
|
c96f1a46c1c8fdbb72a937f6b50c58ae32f92d70
|
[] |
no_license
|
MattC137/NCAA-Football-Forecasts
|
19bd37746b51628584f7178c267e770cbb0c124f
|
cce1dd83ec720ceb2501770bca147cc76a1cd9ed
|
refs/heads/master
| 2023-01-08T09:05:25.663974
| 2020-11-09T00:54:07
| 2020-11-09T00:54:07
| 308,741,722
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,375
|
r
|
NCAAF_L1_Model.R
|
library(dplyr)
library(readr)
library(ggplot2)
NCAAF_L1 <- read_csv("https://raw.githubusercontent.com/MattC137/Open_Data/master/Data/Sports/NCAAF/NCAAF_Level_One.csv")
NCAAF_L1_Teams <- read_csv("https://raw.githubusercontent.com/MattC137/Open_Data/master/Data/Sports/NCAAF/NCAAF_Team_List.csv")
#### Setup ####
NCAAF_L1_Future <- NCAAF_L1 %>%
filter(Played == FALSE, Game_ID != "Canceled", Game_ID != "Postponed")
NCAAF_L1 <- NCAAF_L1 %>%
filter(Played == TRUE) %>%
arrange(Date, Game_ID)
NCAAF_L1 <- NCAAF_L1 %>% mutate(
ELO = 0,
Opp_ELO = 0,
Result = ifelse(Result == "W", 1, Result),
Result = ifelse(Result == "L", 0, Result),
Result = ifelse(Result == "T", 0.5, Result),
Result = as.numeric(Result)
)
NCAAF_L1_Teams <- NCAAF_L1_Teams %>% mutate(
ELO = ifelse(FBS == 1, 1500, 1200),
)
#### ELO ####
for(i in 1:nrow(NCAAF_L1)){
if(i %% 2 != 0){
# i = 1
print(i)
# View(head(NCAAF_L1))
Team_A <- NCAAF_L1$Team[i]
Team_B <- NCAAF_L1$Team[i+1]
Result_A <- NCAAF_L1$Result[i]
Result_B <- NCAAF_L1$Result[i+1]
## Get Current ELO ##
ELO_A <- as.numeric(NCAAF_L1_Teams[NCAAF_L1_Teams$Team == Team_A, "ELO"])
ELO_B <- as.numeric(NCAAF_L1_Teams[NCAAF_L1_Teams$Team == Team_B, "ELO"])
## Load current ELO into the main dataset ##
NCAAF_L1$ELO[i] <- ELO_A
NCAAF_L1$Opp_ELO[i] <- ELO_B
NCAAF_L1$ELO[i+1] <- ELO_B
NCAAF_L1$Opp_ELO[i+1] <- ELO_A
# View(NCAAF_L1 %>% select(Date, Season, Team, Opponent, Result, Points_For, Points_Against, ELO, Opp_ELO))
## Update ELOs
R_A <- 10^(ELO_A/400)
R_B <- 10^(ELO_B/400)
E_A <- R_A/(R_A + R_B)
E_B <- R_B/(R_A + R_B)
Elo_Updated_A <- ELO_A + 40 * (Result_A - E_A)
Elo_Updated_B <- ELO_B + 40 * (Result_B - E_B)
## Update Team ELOs
NCAAF_L1_Teams[NCAAF_L1_Teams$Team == Team_A, "ELO"] <- Elo_Updated_A
NCAAF_L1_Teams[NCAAF_L1_Teams$Team == Team_B, "ELO"] <- Elo_Updated_B
}
}
View(NCAAF_L1_Teams %>% filter(FBS == 1) %>% arrange(desc(ELO)) %>% top_n(25))
#### Naive Wins ####
NCAAF_L1 <- NCAAF_L1 %>% mutate(
ELO = as.numeric(ELO),
Opp_ELO = as.numeric(Opp_ELO),
Elo_Difference = ELO - Opp_ELO,
Elo_Forecast_Pred = ifelse(ELO > Opp_ELO, 1, 0),
Elo_Forecast_Result = ifelse(Elo_Forecast_Pred == Result, 1, 0),
)
#### 2019 Naive Win Rate ####
Results_2019 <- NCAAF_L1 %>% filter(Season == 2019)
sum(Results_2019$Elo_Forecast_Result)/nrow(Results_2019)
#### Spread Forecast ####
spread_lm_1 <- lm(Spread ~ Elo_Difference + Home, data = NCAAF_L1 %>% filter(Season > 2013, Season <= 2018))
NCAAF_L1$Spread_Pred_lm_1 <- predict(spread_lm_1, newdata = NCAAF_L1)
Results_2019$Spread_Pred_lm_1 <- predict(spread_lm_1, newdata = Results_2019)
#### Win Forecast ####
win_prob_glm_1 <- glm(Result ~ Elo_Difference + Home, family = binomial, NCAAF_L1 %>% filter(Season > 2013, Season <= 2018))
NCAAF_L1$win_prob_glm_1 <- predict(win_prob_glm_1, newdata = NCAAF_L1, type = "response")
#### Test Model on 2019 ####
Results_2019$win_prob_glm_1 <- predict(win_prob_glm_1, newdata = Results_2019)
Results_2019$win_prob_glm_1 <- ifelse(Results_2019$Spread_Pred_lm_1 >= 0.5, 1, 0)
sum(Results_2019$win_prob_glm_1 == Results_2019$Result )/nrow(Results_2019)
# ggplot(Results_2019) + geom_point(aes(x = Spread, y = Spread_Pred_lm_1))
#### Next Weeks Games ####
NCAAF_L1_Future$Date <- as.Date(NCAAF_L1_Future$Date, origin = "1970-01-01")
NCAAF_This_Week <- NCAAF_L1_Future %>% filter(Date > "2020-11-08", Date <= "2020-11-15") %>%
select(Date, Season, Team, Opponent, Home, Neutral_Location, Game_ID)
NCAAF_This_Week <- NCAAF_This_Week %>%
left_join(NCAAF_L1_Teams %>% select(Team, ELO), by = c("Team" = "Team"))
NCAAF_This_Week <- NCAAF_This_Week %>%
left_join(NCAAF_L1_Teams %>% select(Team, ELO), by = c("Opponent" = "Team"))
names(NCAAF_This_Week) <- c("Date", "Season", "Team", "Opponent", "Home",
"Neutral_Location", "Game_ID", "ELO", "Opp_ELO")
NCAAF_This_Week <- NCAAF_This_Week %>% mutate(
Elo_Difference = ELO - Opp_ELO
)
NCAAF_This_Week$Spread_Pred_lm_1 <- predict(spread_lm_1, newdata = NCAAF_This_Week)
NCAAF_This_Week$win_prob_glm_1 <- predict(win_prob_glm_1, newdata = NCAAF_This_Week, type = "response")
View(NCAAF_This_Week %>% arrange(desc(win_prob_glm_1)))
|
22b493fe55b811ecbdb6ee48d705b24443366ef8
|
7853c37eebe37fa6a0307e0dd9e197830ee6ac71
|
/explorations/grid.R
|
655a9df1b80234f440fedf5665f0c217acc3a671
|
[] |
no_license
|
chen0031/RCUDA
|
ab34ffe4f7e7036a8d39060639f09617943afbdf
|
2479a3b43c6d51321b0383a88e7a205b5cb64992
|
refs/heads/master
| 2020-12-22T22:47:37.213822
| 2016-07-27T03:50:44
| 2016-07-27T03:50:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 656
|
r
|
grid.R
|
library(RCUDA)
m = loadModule("inst/sampleKernels/set.ptx")
k = m$setValue_kernel
N = 1e7L
i = integer(N)
ci = copyToDevice(i)
# To get over N threads, we use 512 within a block for the maximum amount
# and then 256 x 128 grid.
# Would we be better off with a different break down of the grid or the block?
system.time(replicate(100, .cuda(k, ci, N, gridDim = c(256L, 128L), blockDim = c(512L))))
system.time(replicate(100, .cuda(k, ci, N, gridDim = c(32768L), blockDim = c(512L))))
system.time(replicate(100, .cuda(k, ci, N, gridDim = c(32768L), blockDim = c(32, 16))))
i = ci[]
head(i)
done = i[i != 0]
length(done) + 1L
table(diff(done))
|
1946c07101cf6f6deb4f802019aa4d1f389b5222
|
850898c179e63adf03e07ec066046e3eba524aee
|
/rcpp20popcount/tests/testthat.R
|
f39a46972ba38bd2e95094236cc9df138baf44eb
|
[
"MIT"
] |
permissive
|
zettsu-t/cPlusPlusFriend
|
c658810a7392b71bbcd0fbf6e73fa106e227c0d0
|
8eefb1c18e1b57b1b7ca906027f08500f9fbefcc
|
refs/heads/master
| 2023-08-28T09:29:02.669194
| 2023-08-27T04:43:24
| 2023-08-27T04:43:24
| 81,944,943
| 10
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 76
|
r
|
testthat.R
|
library(testthat)
library(rcpp20popcount)
test_check("rcpp20popcount")
|
d82f2d7a1aef7bb71d32258bb6b094713d5e1db7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ggstatsplot/tests/test_ggsignif_adder.R
|
46b47633a7292fe8ddfa038686cbc51e544fa6f4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,001
|
r
|
test_ggsignif_adder.R
|
context(desc = "ggsignif_adder")
# ggsignif_adder works ----------------------------------------------------
testthat::test_that(
desc = "ggsignif_adder works",
code = {
testthat::skip_on_cran()
set.seed(123)
library(ggplot2)
# data
df <- data.frame(x = iris$Species, y = iris$Sepal.Length)
# plot
p <- ggplot(df, aes(x, y)) + geom_boxplot()
# dataframe with pairwise comparison test results
df_pair <- ggstatsplot::pairwise_p(df, x, y, messages = FALSE)
# adding plot with
p_new <- ggstatsplot:::ggsignif_adder(
plot = p,
df_pairwise = df_pair,
data = df
)
# build the plot
pb <- ggplot2::ggplot_build(p_new)
# tests
testthat::expect_equal(length(pb$data), 2L)
testthat::expect_identical(
as.character(unique(pb$data[[2]]$group)),
c(
"setosa-versicolor-1",
"setosa-virginica-2",
"versicolor-virginica-3"
)
)
}
)
|
64264e82126ac285513c6ed2cc12d59b09def685
|
fbf3cf0aff5ed4b6d29a325f3e3ef288d4da4152
|
/lesson8.R
|
ae765f2822cf0fcac9a85d440713c254b71933d1
|
[] |
no_license
|
wonder2025/RExperiment
|
ffd2bc5ccf3bb60b17c2620d2449d3471edc09a2
|
0871f4c284d4eb625f3e574ecb65de128aed3a7d
|
refs/heads/master
| 2021-08-22T15:12:42.630971
| 2017-11-30T14:09:12
| 2017-11-30T14:09:12
| 111,537,198
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,347
|
r
|
lesson8.R
|
library(grid)
library(vcd)
counts <- table(Arthritis$Improved)
counts
barplot(counts, main = "Simple Bar Plot", xlab = "Improvement", ylab = "Frequency",horiz = TRUE)
Arthritis
counts <- table(Arthritis$Improved, Arthritis$Treatment)
counts
barplot(counts, main = "Stacked Bar Plot", xlab = "Treatment",
ylab = "Frequency", col = c("red", "yellow", "green"),
legend = rownames(counts))
barplot(counts, main = "Stacked Bar Plot", xlab = "Treatment",
ylab = "Frequency", col = c("red", "yellow", "green"),
legend = rownames(counts))
opar <- par(no.readonly=TRUE) # record current settings
attach(mtcars)
plot(wt, mpg,
main="Basic Scatterplot of MPG vs. Weight",
xlab="Car Weight (lbs/1000)",
ylab="Miles Per Gallon ", pch=19)
abline(lm(mpg ~ wt), col="red", lwd=2, lty=1)
lines(lowess(wt, mpg), col="blue", lwd=2, lty=2)
set.seed(1234)
n <- 10000
c1 <- matrix(rnorm(n, mean=0, sd=.5), ncol=2)
c2 <- matrix(rnorm(n, mean=3, sd=2), ncol=2)
mydata <- rbind(c1, c2)
mydata <- as.data.frame(mydata)
names(mydata) <- c("x", "y")
with(mydata, plot(x, y, pch=19, main="Scatter Plot with 10000 Observations"))
with(mydata,
smoothScatter(x, y, main="Scatterplot colored by Smoothed Densities"))
boxplot(mpg~cyl,data=mtcars)
|
8c3dee58b71357e0d7085f86bd027d2f9bf69b74
|
c2e8ae8bbeb0db8af81e9955157e394fc518efaf
|
/cachematrix.R
|
cd4c39dbcef1fa6d2642241579fe35e60fddadb5
|
[] |
no_license
|
paulboys/ProgrammingAssignment2
|
cf830b7bb008c21e6e55722073552db09c8a2fa6
|
6c3e94c8eace294314bebb54db38f26fef611261
|
refs/heads/master
| 2022-11-24T00:29:49.876442
| 2020-07-31T14:26:45
| 2020-07-31T14:26:45
| 280,183,450
| 0
| 0
| null | 2020-07-16T15:01:33
| 2020-07-16T15:01:32
| null |
UTF-8
|
R
| false
| false
| 1,100
|
r
|
cachematrix.R
|
## This is a function to calculate the inverse of a matrix: the funciton checks
## to see if the inverse has already been calculated and cached. If so it
## retrieves the inverse from the cache. Otherwise it calculates the inverse.
## This is a function that creates a list:
#1)sets the value of the matrix
#2)gets the value of the matrix
#3)sets the value of the inverse
#4)gets the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(inverse) m <<- inverse
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of matrix returned by makeCacheMatrix;
## if the inverse has been calcualted already, cacheSolve retrieves the inverse
## from the cache
cacheSolve <- function(x, ...) {
i <- x$getinv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinv(i)
i
}
|
406f117bbb27c0fce12568a58b080b4d160dfabe
|
cabe99c8d91575cad196a5e9244970971e15be5d
|
/main.R
|
e6a741dbae7721f8ca225d283df42aec9e4a2fe1
|
[] |
no_license
|
mikeyfatfree/ExploratoryDataAnalysis_Project1
|
052716224c96cffb1aa292c674c89900c3450050
|
5ccbac8b809d9fcf41b563fd1914cc87123fa6b3
|
refs/heads/master
| 2021-01-10T06:35:52.378645
| 2016-02-06T14:15:01
| 2016-02-06T14:15:01
| 51,204,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 208
|
r
|
main.R
|
source("data.R")
source("plot1.R")
source("plot2.R")
source("plot3.R")
source("plot4.R")
dataDir <- getwd()
power <- readFile(dataDir)
plot1.run(power)
plot2.run(power)
plot3.run(power)
plot4.run(power)
|
eff1fb6fb7d0608a16dbf483e252c13a07ff3b9b
|
d66b1c07135991de77c33af65e9317b519acac20
|
/man/powercurve.t.test.Rd
|
cf5037ff0cfe49526385a891b5c03d21a64fd71e
|
[] |
no_license
|
cran/smd.and.more
|
6ad761925eaa249da76f20739d4131b8a6f0e22c
|
14799ead6579561922b91cfae2f42ef9db566f39
|
refs/heads/master
| 2021-01-13T14:19:54.892234
| 2010-05-08T00:00:00
| 2010-05-08T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,009
|
rd
|
powercurve.t.test.Rd
|
\name{powercurve.t.test}
\alias{powercurve.t.test}
\title{Compute a Power Curve with Colors}
\description{
From the sample size and either the within-cell or pooled standard deviation, or the two separate group standard deviations, automatically calibrate and calculate a power curve for the independent-groups t-test or one-sample t-test, as well as ancillary statistics. Uses the \code{\link{color.plot}} function in this package to automatically display the power curve with colors, either by default or explicit specification of values of the relevant parameters. Also, for the two-sample test, automatically calculates the within-group standard deviation from the two separate group standard deviations if not provided directly.
}
\usage{
powercurve.t.test(n=NULL, s=NULL, n1=NULL, n2=NULL, s1=NULL, s2=NULL,
mmd=NULL, msmd=NULL, mdp=.8, mu0=NULL, \dots)
}
\arguments{
\item{n}{Sample size for each of the two groups.}
\item{s}{Within-group, or pooled, standard deviation.}
\item{n1}{Sample size for Group 1.}
\item{n2}{Sample size for Group 2.}
\item{s1}{Sample standard deviation for Group 1.}
\item{s2}{Sample standard deviation for Group 2.}
\item{mmd}{Minimum Mean Difference of practical importance, the difference of the
response variable between two group means. The concept is optional, and only one of mmd and msmd is provided.}
\item{msmd}{For the Standardized Mean Difference, Cohen's d, the Minimum value of
practical importance. The concept is optional, and only one of mmd and msmd is provided.}
\item{mdp}{Minimum Desired Power, the smallest value of power considered to provide sufficient power. Default is 0.8. If changed to 0 then the concept is dropped from the analysis.}
\item{mu0}{Hypothesized mean, of which a provided value triggers a one-sample
analysis.}
\item{\dots}{Other parameter values, such as lwd and cex.lab from \code{\link{plot}} and col.line and col.bg from \code{\link{color.plot}}.}
}
\details{
This function relies upon the standard \code{\link{power.t.test}} function to calibrate and then calculate the power curve according to the relevant non-central t-distribution. The \code{\link{color.plot}} function from this package, which in turn relies upon the standard \code{\link{plot}} function, plots the power curve. As such, parameters in \code{\link{color.plot}} for controlling the different colors and other aspects of the display are also available, as are many of the more basic parameters in the usual \code{\link{plot}} function.
Also plotted, if provided, is the minimal meaningful difference, mmd, as well as the minimal desired power, mdp, provided by default. Relevant calculations regarding these values are also displayed at the console. One or both concepts can be deleted from the analysis. Not providing a value mmd implies that the concept will not be considered, and similarly for setting mdp to 0.
Invoke the function with the either the within-group (pooled) standard deviation, s, or the two separate group standard deviations, s1 and s2, from which s is computed. If the separate standard deviations are provided, then also provide the sample sizes, either as a single value of n or as two separate sample sizes, n1 and n2. If separate sample sizes n1 and n2 are entered, their harmonic mean serves as the value of n.
For power analysis of the two-sample t-test, the null hypothesis is a zero population mean difference. For a one-sample test, the null hypothesis is specified, and it is this non-null specification of mu0 that triggers the one-sample analysis. Only non-directional or two-tailed tests are analyzed.
The effect size that achieves a power of 0.8 is displayed. If a minimal meaningful difference, mmd, is provided, then the associated power is also displayed, as well as the needed sample size to achieve a power of 0.8.
}
\author{David W. Gerbing (Portland State University; \email{davidg@sba.pdx.edu})}
\seealso{
\code{\link{color.plot}}, \code{\link{plot}}, \code{\link{power.t.test}}.
}
\examples{
# default power curve and colors
powercurve.t.test(n=20, s=5)
# default power curve and colors
# plus optional smallest meaningful effect to enhance the analysis
powercurve.t.test(n=20, s=5, mmd=2)
# power curve from both group standard deviations and sample sizes
# also provide the minimum standardized mean difference of
# practical importance to obtain corresponding power
powercurve.t.test(n1=15, n2=25, s1=4, s2=6, msmd=.5)
# power curve from both group standard deviations but common sample size
# color and display options from plot and color.plot functions
powercurve.t.test(n=20, s1=4, s2=6, lwd=2, col.line="darkred",
col.bg="moccasin", col.grid="lightsteelblue", mmd=2, mdp=.6)
# power curve for one sample t-test, triggered by non-null mu0
powercurve.t.test(n=20, s=5, mu0=30, mmd=2)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ power }
\keyword{ t.test }
|
d92a314ca3b5f5cba91c2b2416c1f0373fa25bd6
|
f0780bf1ab59bbbe076e063e677bc0885f11cd59
|
/DA/R_DA/PLOTS/BoxPlot.R
|
434d5540177999320e8204ed053f2b1576dea72f
|
[] |
no_license
|
sanchayana2007/DataAnalysis
|
a7308643d8ea3e67aadfafab7a1d559f4d948880
|
3de3a1d5215861501d69879a13ff17b4f5bec3ac
|
refs/heads/master
| 2020-04-12T05:44:08.040284
| 2019-03-28T01:41:06
| 2019-03-28T01:41:06
| 162,330,180
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 310
|
r
|
BoxPlot.R
|
library(xlsx)
# Giving the full path
#PLOT 1
df=read.xlsx("D:/R_DA/sales-jan-2014.xlsx",1,header= T,sep=',')
head(df)
range(df$unit.price)
#hist(df$quantity,xlim = c(-5,30),ylim = c(0,110),xlab = "MEDV")
graphics.off()
boxplot(df$ext.price~ df$quantity,ylim=c(0,15),xlab="Price",ylab="Quantity")
|
90b65283e0f297f849015f45f6019ec81a2366c9
|
017e1d3c8002e6b0835a97985168d6fb2bb652f0
|
/R/dplyr grammar.R
|
9173722d8dd553a7d35a125f1e46882efe1f6195
|
[] |
no_license
|
wnk4242/Rcheatsheet
|
e38baa4b09713c931caaef64eee5505b2b3a17b8
|
70054150c84b00affe6f525ce0f900755dd3e919
|
refs/heads/master
| 2021-07-26T19:55:23.175155
| 2020-07-03T14:19:32
| 2020-07-03T14:19:32
| 196,735,006
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 237
|
r
|
dplyr grammar.R
|
#' tbl_df function template
#' @export
cs_tbl <- function(){
cat("\014")
cat(bold$blue('\n| Convert a data frame to tibble\n\n'))
cat(bold$red('Example:\n'),
"\ttbl_df(dataframe_name)")
cat(rep("\n", 3))
ask_dplyrpkg()
}
|
71f689414e4eb49ae989057d332f2a3a02c2370b
|
a915ca4b65a027649aed3f8fc3d968dd134ecd8a
|
/cachematrix.R
|
2e76d8e90f830db40c1e64a9e228af25a514b8ea
|
[] |
no_license
|
vandretti/ProgrammingAssignment2
|
69d56bc462e6c1efff193b9e2b40e731cd4cd765
|
6fc53076f977de8ce0ba785216080c6a92f8a192
|
refs/heads/master
| 2021-01-22T18:32:58.015639
| 2015-07-22T00:01:15
| 2015-07-22T00:01:15
| 39,473,763
| 0
| 0
| null | 2015-07-21T22:52:02
| 2015-07-21T22:52:01
| null |
UTF-8
|
R
| false
| false
| 1,123
|
r
|
cachematrix.R
|
# cachematrix.R
#
# This function creates a speical square matrix that is stored in a cache. Also, it
# also provide the following function to operate on this matrix:
#
# 1. set the matrix data
# 2. get the matrix data
# 3. set the inverse of the matrix
# 4. get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inverse <- matrix(,)
set <- function(y){
x <<- y
inverse <<- matrix(,)
}
get <- function() x
setinverse <- function(inv) inverse <<- inv
getinverse <- function() inverse
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# This function computes the inverse of the special square matrix via the solve()
# function in R. For this assignment, the function assume the input
# is a square matrix, and it is inverteible.
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if (!is.na(inverse)) {
message('retrieve inverse matrix from cache')
return(inverse)
}
data <- x$get()
inverse <-solve(data)
x$setinverse(inverse)
inverse
}
|
a2c98c37175299cf5611db7d4f84aa2bb1c39c47
|
c19b0be23216483ffaba0994f2ae78e5b9e0c000
|
/plot4.R
|
998cc6b740520a379d9e264a83d6f6566254b21e
|
[] |
no_license
|
jlow2499/exploratory_data_analysis_project1
|
d3f941dbcdfbe92e76dbb957529eb1eb5ae23cdf
|
19116e1c54c1bb4d4b42dbc79a56b9b2702407a6
|
refs/heads/master
| 2021-01-10T09:49:11.381915
| 2015-06-08T11:40:31
| 2015-06-08T11:40:31
| 36,837,050
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,571
|
r
|
plot4.R
|
###Read the data frame into R
df <- read.csv("household_power_consumption.txt", sep=";",stringsAsFactors=FALSE,dec=".")
###subset the data for the dates of
df2 <- df[which(df$"Date" %in% c("1/2/2007","2/2/2007")),]
###convert DATE column to date format with time
DATE<-strptime(paste(df2$"Date",df2$"Time",sep=" "), "%d/%m/%Y %H:%M:%S")
###subset the sub metering vectors
sub1<- df2$"Sub_metering_1"
sub2<- df2$"Sub_metering_2"
sub3<- df2$"Sub_metering_3"
###subset the global active power vector for the plot
globalactivepower<-as.numeric(df2$"Global_active_power")
###subset the global reactive power vector
globalreactivepower<-as.numeric(df2$"Global_reactive_power")
###subset the voltage vector
voltage<-as.numeric(df2$"Voltage")
###set the plot width and height & create the PNG file
png("plot4.png", width=480, height=480)
###set the plot for a 2x2 plot matrix
par(mfrow = c(2, 2))
###Plot1
###draw the plot
plot(DATE, globalactivepower,type="l",ylab="Global Active Power (kilowatts)", xlab="")
###Plot2
###draw the plot
plot(DATE,voltage,type="l",xlab="datetime",ylab="Voltage")
###plot3
###draw the plot
plot(DATE, sub1,type="l",ylab="Energy Submetering", xlab="")
###add sub metering 2 & 3 with appropriate colors
lines(DATE,sub2,type="l",col="red")
lines(DATE,sub3,type="l",col="blue")
###draw the legend
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2, col=c("black", "red", "blue"))
###plot4
###draw the plot
plot(DATE,globalreactivepower,type="l",xlab="datetime",ylab="Global_reactive_power")
|
51c0e6424feb9c2bc296c6eaa5b25a78a1af03d3
|
675846a7beb6c118a83150d99cfe40e1e968323a
|
/tests/impl2/rpd.R
|
b313cd5c5f4e511f0c155d9535f566d91ed90c15
|
[
"MIT"
] |
permissive
|
wtong1989/PFSP
|
281bd4e3df8c84479b4ae2e6b21599e6ddf6b249
|
eec9c5adbebe041296a634dafdd996439fd70cfa
|
refs/heads/master
| 2021-04-06T20:08:16.272604
| 2017-05-17T22:01:45
| 2017-05-17T22:01:45
| 125,261,216
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,701
|
r
|
rpd.R
|
# average percentage deviations means
sa_arpd <- read.csv("../impl2/sa/arpd_sa.csv")
grasp_arpd <- read.csv("../impl2/grasp/grasp_arpd.csv")
# sa 50
mean(sa_arpd[31:60,3])
sd(sa_arpd[31:60,3])
# sa 100
mean(sa_arpd[1:30,3])
sd(sa_arpd[1:30,3])
# grasp 50
mean(grasp_arpd[31:60,3])
sd(grasp_arpd[31:60,3])
# grasp 100
mean(grasp_arpd[1:30,3])
sd(grasp_arpd[1:30,3])
# statistical difference test: wilcoxon
# size 100
wilcox.test(grasp_arpd[1:30,3], sa_arpd[1:30,3])$p.value
# size 50
wilcox.test(grasp_arpd[31:60,3], sa_arpd[31:60,3])$p.value
# correlation plots
grasp_arpd <- read.csv("../impl2/grasp/grasp_arpd.csv")
sa_arpd <- read.csv("../impl2/sa/arpd_sa.csv")
plot(x=grasp_arpd[1:30,3], y=sa_arpd[1:30,3],
main="arpd correlation",
xlab="arpd GRASP",
ylab="arpd Simulated Annealing",
col="red",
xlim=c(0,1.5), ylim=c(0, 4.0),
pch=4)
points(x=grasp_arpd[31:60, 3], y=sa_arpd[31:60, 3], col="blue", pch=4)
legend("topleft", inset=.05, title="Instances size",
c("50","100"), fill=c("blue", "red"), horiz=FALSE)
# text(x=grasp_arpd[, 3], y=sa_arpd[, 3], grasp_arpd[,2], cex=0.5, pos=4, col="black")
# statistical tests on correlations
# pearson, null hypothesis: rho = 0 the ranks of one variable do not covary with the ranks of the other variable
# http://www.biostathandbook.com/spearman.html
# correlation coefficient for size 100
cor(x = grasp_arpd[1:30,3], y = sa_arpd[1:30, 3])
cor.test(x = grasp_arpd[1:30,3], y = sa_arpd[1:30, 3], method="spearman")$p.value
#correlation coefficient for size 50
cor(x = grasp_arpd[31:60,3], y = sa_arpd[31:60, 3])
cor.test(x = grasp_arpd[31:60,3], y = sa_arpd[31:60, 3], method="spearman")$p.value
|
edd8ff63f120cda5ddd3fb903df09127b8373a71
|
84640be8d6731e9e04cb0e67d9dd11452f374f29
|
/herringSEICpulse.R
|
b2bbd13e4a98491496f2579b1f27114c074d5dd0
|
[] |
no_license
|
talbenhorin/Herring-VHS
|
27c3d349ae4443ed15aca1e7efaa285fc3b97c6d
|
148974b4a94b79fb97824461c54947e935f75e70
|
refs/heads/master
| 2021-07-05T00:57:09.389965
| 2020-10-15T20:11:01
| 2020-10-15T20:11:01
| 193,991,671
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,431
|
r
|
herringSEICpulse.R
|
rm(list=ls(all=TRUE)) #clears workspace
# Load deSolve package
library(deSolve)
# Create an SEIC function
seic.pulse <- function(t, x, params) {
qS <- params["qS"]
qC <- params["qC"]
gamma <- params["gamma"]
rho <- params["rho"]
beta <- params["beta"]
mu <- params["mu"]
c <- params["c"]
alpha <- params["alpha"]
f <- params["f"]
s <- params["s"]
k <- params["k"]
phi <- params["phi"]
b <- k*exp(-s*cos(pi*t-phi)^2)
dS <- (b-c*(x[1]+x[2]+x[3]+x[4]))*(x[1]+x[2]+x[3]+x[4]) - (mu+qS*f)*x[1] - beta*x[1]*(x[3]/(x[1]+x[2]+x[3]+x[4]))
dE <- beta*x[1]*(x[3]/(x[1]+x[2]+x[3]+x[4])) - (mu+qS*f+gamma)*x[2]
dI <- gamma*x[2] - (mu+qS*f+alpha+rho)*x[3]
dC <- rho*x[3] - (mu+qC*f)*x[4]
dH <- qS*f*x[1]+qS*f*x[2]+qS*f*x[3]+qC*f*x[4]
list(c(dS,dE,dI,dC))
}
## Set parameters, initial states, and time frame
params <- c(qS=0.1,qC = 1,gamma = 91.25, rho = 30, beta = 45, b = 0.6, mu = 0.15, c = 1.05e-06,alpha = 2,f = 0, s = 50, phi = -1.5708, k = 3.98)#parameter string
xstart <- c(S= 100000, E = 100, I = 0, C = 0)
times <- seq(0, 50, by = 0.01917808)
out <- as.data.frame(
ode(
func = seic.pulse,
y = xstart,
times = times,
parms = params
)
)
op <-par(fig=c(0,1,0,1),mfrow=c(2,2),
mar=c(3,3,1,1),mgp=c(2,1,0))
plot(S~time,data=out,type='l')
plot(E~time,data=out,type='l')
plot(I~time,data=out,type='l')
plot(C~time,data=out,type='l')
|
7e0c222ac722f5fec7adb0764ca0e6957571db28
|
928ec37ec0bad12fbc77b21fdaaa0635c49b8cb1
|
/man/generate_differences.Rd
|
fa9700d7aa5f5675339434cd3a1312a328a79d44
|
[] |
no_license
|
leonarDubois/metaDAF
|
6d6032f56550303c99f847b67816ba5729a694c1
|
2c59a87bee23f8d83bac96e908ad445aa098ca62
|
refs/heads/master
| 2020-06-12T00:40:08.724888
| 2019-07-12T11:49:44
| 2019-07-12T11:49:44
| 194,138,439
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 938
|
rd
|
generate_differences.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_differences.R
\name{generate_differences}
\alias{generate_differences}
\title{Generate artificial dataset with differences between 2 groups}
\usage{
generate_differences(count_table, ...)
}
\arguments{
\item{count_table}{a matrix. Used as the \code{inputCount} paramter of the
\code{\link{EDDA::generateData}} function}
\item{...}{additional parameters for the \code{\link{EDDA::generateData}} function}
}
\value{
a list of 4 elements :
\item{count_table}{ a matrix of count values.}
\item{metadata}{ a data frame of at least one column countaining the group variable.}
\item{true_DAF}{ a character vector. The names of the true DAF.}
\item{FC_true_DAF}{ a numeric vector. The fold-change of the true DAF.}
}
\description{
Use the \code{\link{EDDA::generateData}} function to generate artificial count values table
}
|
e363322bf2d96037b8cb83be37993373b74135d6
|
337e3a7e89bacae36ac06613b99bea5196c1b57f
|
/MyRProject.R
|
10d15d7bcdd54d747a1289320a36cd3e122bb188
|
[] |
no_license
|
pnightsore/New_Rproject
|
35ed69dc28513e09d8a9cd04cffc9cee7ebf396a
|
db4206152822beaf62c004d25e40c44fbcf5986d
|
refs/heads/master
| 2022-06-17T06:44:58.349195
| 2020-05-06T09:45:59
| 2020-05-06T09:45:59
| 259,067,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,828
|
r
|
MyRProject.R
|
#1. Set workingdirectory:
setwd("/Users/nic/Desktop/DSE/CODING_FOR_DATA_SCIENCE/R_project/data_text")
#2. Import the data.(in this case three dataset,DGP/GINI/refugee):
GDP<-read.table('GDP.txt',header=TRUE)
refugee<-read.table('refugee.txt',header=TRUE)
GINI<-read.table('GINI.txt',header=TRUE)
#3. Calculate the mean for GDP,refugee,GINI per country across the years:
GDP_ARG<-as.numeric(GDP[1,4:32])
mean(as.numeric(GDP_ARG))
boxplot(GDP_ARG)
#4. Use a loop to get the mean of all countries for each dataset:
for (i in seq(1,17, by=1)){
m<- as.numeric(GDP[i, 4:32])
print(mean(m, na.rm=TRUE))
}
for (i in seq(1,17, by=1)){
n<- as.numeric(refugee[i, 4:32])
print(mean(n, na.rm=TRUE))
}
for (i in seq(1,17, by=1)){
n<- as.numeric(GINI[i, 4:31])
print(mean(n, na.rm=TRUE))
}
#5. Creat a function for the loop:
meancal<-function(x){
result<-sum(as.numeric(GDP[x,4:32]))/28
return(result)
}
#EX:
meancal(2)
#6. Plot the bocplot for the GDP of each country:
gdparg<- log(as.numeric(GDP[1,4:32]))
gdparm<-log(as.numeric(GDP[2,4:32]))
gdpbra<-log(as.numeric(GDP[3,4:32]))
gdpchl<-log(as.numeric(GDP[4,4:32]))
gdpcol<-log(as.numeric(GDP[5,4:32]))
gdpcri<-log(as.numeric(GDP[6,4:32]))
gdpdeu<-log(as.numeric(GDP[7,4:32]))
gdpecu<-log(as.numeric(GDP[8,4:32]))
gdphnd<-log(as.numeric(GDP[9,4:32]))
gdpidn<-log(as.numeric(GDP[10,4:32]))
gdpkgz<-log(as.numeric(GDP[11,4:32]))
gdpmex<-log(as.numeric(GDP[12,4:32]))
gdpper<-log(as.numeric(GDP[13,4:32]))
gdprus<-log(as.numeric(GDP[14,4:32]))
gdpslv<-log(as.numeric(GDP[15,4:32]))
gdptha<-log(as.numeric(GDP[16,4:32]))
gdpukr<-log(as.numeric(GDP[17,4:32]))
boxplot(gdparg,gdparm,gdpbra,gdpchl,gdpcol,gdpcri,gdpdeu,gdpecu,gdphnd,gdpidn,gdpkgz,gdpmex,gdpper,gdprus,gdpslv,gdptha,gdpukr,main='boxplot distribution of gdp across countries',ylab='GDP',xlab='Countries')
#6. On the x axis instead of the numbers, write down the country code:
a<-GDP$Country_Code
#7. Give label GDP to the y axis.
#8. Use log for GDP values.
#9. Save the graph in a pdf format.(function is called pdf):
pdf(file="/Users/nic/Desktop/DSE/Coding/R_project/data_text/gdp.pdf",width=12,height=8)
boxplot(gdparg,gdparm,gdpbra,gdpchl,gdpcol,gdpcri,gdpdeu,gdpecu,gdphnd,gdpidn,gdpkgz,gdpmex,gdpper,gdprus,gdpslv,gdptha,gdpukr,main='boxplot distribution of gdp across countries',ylab='GDP',xlab='Countries',names=c( 'ARG','ARM','BRA','CHL','COL','CRI','DEU','ECU','HND','IDN','KGZ','MEX','PER','RUS','SLV','THA','UKR'))
dev.off()
as.vector(GDP$Country_Code)
#10. Change the function so that GDP can be a variable:
meancal<-function(x,y){
result<-sum(as.numeric(y[x,4:31]),na.rm=TRUE)/27
print(result)
}
meancal(1,GDP)
meancal(2,GDP)
meancal(3,refugee)
meancal(2,GINI)
#11. Use the function in a loop.(use the function in the already existing loop):
meancal<-function(x,y){
result<-sum(as.numeric(y[x,4:31]))/27
print(result)
}
for (i in seq(1,17, by=1)){
print(meancal(i,GDP))
}
#12. Creat a function for summary statistics:
summary_stat<-function(x,y,z) {
mean_d<- mean(as.numeric(y[x,z:length(y)]), na.rm=TRUE)
sd_d<- sd(y[x,z:length(y)], na.rm=TRUE)
max_d<- max(y[x,z:length(y)], na.rm=TRUE)
min_d<- min(y[x,z:length(y)], na.rm=TRUE)
med_d<- median(as.numeric(y[x,z:length(y)]), na.rm=TRUE )
return(c(mean_d, sd_d, med_d, max_d, min_d))
}
#EX:
summary_stat(1,GDP,4)
summary_stat(5,refugee,4)
summary_stat(5,GINI,4)
#13. Return the output in form of a table, with header: stat/GDP and save it to a file.(excel) instead of vector:
summary_stat<-function(x,y,z,s) {
mean_d<-mean(as.numeric(y[x,z:length(y)]), na.rm=TRUE)
sd_d<-sd(y[x,z:length(y)], na.rm=TRUE)
max_d<- max(y[x,z:length(y)], na.rm=TRUE)
min_d<-min(y[x,z:length(y)], na.rm=TRUE)
med_d<- median(as.numeric(y[x,z:length(y)]), na.rm=TRUE )
sumstat<- c('mean_d','sd_d','max_d','min_d','med_d')
l<- c(mean_d,sd_d,max_d,min_d,med_d)
df<- data.frame(sumstat,l)
colnames(df)<-c('sumstat',s)
table<- write.table(df, file='summary_stat.csv',sep=",", row.names = F)
return(table)
}
#EX:
summary_stat(1,GDP,4,s='GDP')
#14. Creat a trend graph,density graph,histogram and a boxplot for one country:
plot(as.numeric(GDP[1,4:32]), type='l')
plot(density(as.numeric(GDP[1,4:32])))
hist(as.numeric(GDP[1,4:32]), breaks= 2)
boxplot(as.numeric(GDP[1,4:32]))
#15. Creat a function that gives unique pdf with 4 plots for each country and each dataset:
explore_plot<- function(x,y,z){
?par(mfrow=c(1,4))
plot1<- plot(as.numeric(y[x,z:length(y)]), type='l')
plot2<- plot(density(as.numeric(y[x,z:length(y)])))
plot3<- hist(as.numeric(y[x,z:length(y)]), breaks= 2)
plot4<- boxplot(as.numeric(y[x,z:length(y)]))
return(plot1)
return(plot2)
return(plot3)
return(plot4)
}
#EX:
explore_plot(1,GDP,4)
#DRAFT:
pdfplot<- pdf(file='4plot.pdf',width=12,height=8)
dev.off()
|
ee1b2e92adf8d7ea41046362117c781b780bf86d
|
57854e2a3731cb1216b2df25a0804a91f68cacf3
|
/R/projects.R
|
157eb3af2f3a979c2dbb41b7120b6efe0182a1e2
|
[] |
no_license
|
persephonet/rcrunch
|
9f826d6217de343ba47cdfcfecbd76ee4b1ad696
|
1de10f8161767da1cf510eb8c866c2006fe36339
|
refs/heads/master
| 2020-04-05T08:17:00.968846
| 2017-03-21T23:25:06
| 2017-03-21T23:25:06
| 50,125,918
| 1
| 0
| null | 2017-02-10T23:23:34
| 2016-01-21T17:56:57
|
R
|
UTF-8
|
R
| false
| false
| 5,687
|
r
|
projects.R
|
#' Get the project catalog
#'
#' @param x a \code{ShojiObject} that has a project catalog associated. If omitted,
#' the default value for \code{x} means that you will load the user's primary
#' project catalog. (Currently, there are no other project catalogs to load.)
#' @return An object of class \code{ProjectCatalog}.
#' @name projects
#' @export
#' @examples
#' \dontrun{
#' myprojects <- projects()
#' proj <- myprojects[["Project name"]]
#' }
projects <- function (x=getAPIRoot()) {
ProjectCatalog(crGET(shojiURL(x, "catalogs", "projects")))
}
#' @rdname catalog-extract
#' @export
setMethod("[[", c("ProjectCatalog", "numeric"), function (x, i, ...) {
getTuple(x, i, CrunchProject)
})
#' @rdname catalog-extract
#' @export
setMethod("[[<-", c("ProjectCatalog", "character", "missing", "list"),
function (x, i, j, value) {
if (i %in% names(x)) {
## TODO: update team attributes
halt("Cannot (yet) modify project attributes")
} else {
## Creating a new project
proj <- do.call(newProject,
modifyList(value, list(name=i, catalog=x)))
return(refresh(x))
}
})
#' Create a new project
#'
#' This function creates a new project. You can achieve the same results by
#' assigning into the projects catalog, but this may be a more natural way to
#' think of the action, particularly when you want to do something with the
#' project entity after you create it.
#' @param name character name for the project
#' @param members Optional character vector of emails or user URLs to add as
#' project members.
#' @param catalog ProjectCatalog in which to create the new project. There is
#' only one project catalog currently, \code{projects()}, but this is left here
#' so that all \code{new*} functions follow the same pattern.
#' @param ... Additional project attributes to set
#' @return A \code{CrunchProject} object.
#' @examples
#' \dontrun{
#' proj <- newProject("A project name")
#' # That is equivalent to doing:
#' p <- projects()
#' p[["A project name"]] <- list()
#' proj <- p[["A project name"]]
#'
#' proj2 <- newProject("Another project", members="you@yourco.com")
#' # That is equivalent to doing:
#' p[["Another project"]] <- list(members="you@yourco.com")
#' proj <- p[["Another project"]]
#' }
#' @export
newProject <- function (name, members=NULL, catalog=projects(), ...) {
u <- crPOST(self(catalog), body=toJSON(list(name=name, ...)))
## Fake a CrunchProject (tuple) by getting the entity
## TODO: make this more robust and formal (useful elsewhere too?)
out <- CrunchProject(index_url=self(catalog), entity_url=u,
body=crGET(u)$body)
## Add members to project, if given
if (!is.null(members)) {
members(out) <- members
}
invisible(out)
}
#' @rdname catalog-extract
#' @export
setMethod("[[<-", c("ProjectCatalog", "character", "missing", "CrunchProject"),
function (x, i, j, value) {
## Assumes that modifications have already been persisted
## by other operations on the team entity (like members<-)
index(x)[[value@entity_url]] <- value@body
return(x)
})
#' @rdname teams
#' @export
setMethod("members", "CrunchProject", function (x) {
MemberCatalog(crGET(shojiURL(x, "catalogs", "members")))
})
#' @rdname teams
#' @export
setMethod("members<-", c("CrunchProject", "MemberCatalog"), function (x, value) {
## TODO: something
## For now, assume action already done in other methods, like NULL
## assignment above.
return(x)
})
#' @rdname teams
#' @export
setMethod("members<-", c("CrunchProject", "character"), function (x, value) {
value <- setdiff(value, emails(members(x)))
if (length(value)) {
payload <- sapply(value, emptyObject, simplify=FALSE)
crPATCH(self(members(x)), body=toJSON(payload))
}
return(x)
})
#' @rdname tuple-methods
#' @export
setMethod("entity", "CrunchProject", function (x) {
return(ProjectEntity(crGET(x@entity_url)))
})
#' @rdname delete
#' @export
setMethod("delete", "CrunchProject", function (x, confirm=requireConsent(), ...) {
if (!missing(confirm)) {
warning("The 'confirm' argument is deprecated. See ?with_consent.",
call.=FALSE)
}
prompt <- paste0("Really delete project ", dQuote(name(x)), "? ",
"This cannot be undone.")
if (confirm && !askForPermission(prompt)) {
halt("Must confirm deleting project")
}
u <- self(x)
out <- crDELETE(u)
dropCache(absoluteURL("../", u))
invisible(out)
})
#' @rdname datasets
#' @export
`datasets<-` <- function (x, value) {
stopifnot(inherits(x, "CrunchProject"))
if (is.dataset(value)) {
## This is how we add a dataset to a project: change its owner
owner(value) <- x
dropCache(shojiURL(x, "catalogs", "datasets"))
}
## Else, we're doing something like `ordering(datasets(proj)) <- `
## and no action is required.
## TODO: setmethods for this. This feels fragile.
return(x)
}
#' A project's icon
#' @param x a \code{CrunchProject}
#' @param value charcter file path of the icon image file to set
#' @return The URL of the project's icon. The setter returns the
#' project after having uploaded the specified file as the new icon.
#' @name project-icon
#' @export
icon <- function (x) {
stopifnot(inherits(x, "CrunchProject"))
return(x@body$icon)
}
#' @rdname project-icon
#' @export
`icon<-` <- function (x, value) {
crPUT(shojiURL(x, "views", "icon"),
body=list(icon=upload_file(value)))
dropOnly(absoluteURL("../", self(x))) ## Invalidate catalog
return(refresh(x))
}
|
a95279d03f0aec4f2e3b240397adf80688653a34
|
ef6190ce18343ce866942fb184ea165c908af074
|
/learningCurvePlots.R
|
53bccce07693b298c31b83778caacae6bcb8176b
|
[
"MIT"
] |
permissive
|
vagechirkov/gridsearch
|
e91994967142812b6b902d6b8a8a78698b35ce85
|
00101202f448150f2a194b75908e6e2604475d4b
|
refs/heads/master
| 2023-07-17T00:22:10.314630
| 2020-02-15T16:57:58
| 2020-02-15T16:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,125
|
r
|
learningCurvePlots.R
|
#Analysis of 1D learning curves
#Charley Wu 2018
#house keeping
rm(list=ls())
#load packages
packages <- c('plyr', 'jsonlite', 'ggplot2', 'reshape2', "grid", 'matrixcalc', 'data.table')
lapply(packages, require, character.only = TRUE)
##############################################################################################################
#Load simulation data
##############################################################################################################
setwd('rationalModels/simulatedData/')
# Get the files names
files = list.files(pattern="*.csv")
datafiles = do.call(rbind, lapply(files, fread))
setwd('..')
setwd('..')
df <- ddply(datafiles, ~id+trial+scenario+horizon+Model+kernel, summarise, meanReward=mean(y), meanSE= sd(y)/sqrt(length(y)), maxReward=mean(ymax), maxSE= sd(ymax)/sqrt(length(ymax)))
colnames(df) <- c("id", "trial", "PayoffCondition", "Horizon", "Model", "Environment", "meanReward", "meanSE", "maxReward", "maxSE")
df$PayoffCondition <- factor(df$PayoffCondition)
levels(df$PayoffCondition) <- c("Cumulative", "Best")
df$Horizon <- factor(df$Horizon)
df$Model <- factor(df$Model)
df <- df[c("id", "trial", "PayoffCondition", "Horizon", "meanReward", "meanSE", "maxReward", "maxSE", "Model", "Environment")]
##############################################################################################################
#Add Human Data
##############################################################################################################
#add human data
source('dataMunging.R') #source data import function
d <- dataImport(normalize=FALSE)
humanData <- ddply(d, ~id+trial+kernel+scenario+horizon, summarise, meanReward=mean(y), meanSE= sd(y)/sqrt(length(y)), maxReward=mean(ymax), maxSE= sd(ymax)/sqrt(length(ymax)))
humanData$Model <- rep("Human", nrow(humanData))
colnames(humanData) <- c("id", "trial", "Environment", "PayoffCondition", "Horizon", "meanReward", "meanSE", "maxReward", "maxSE", "Model")
humanData$PayoffCondition <- factor(humanData$PayoffCondition)
levels(humanData$PayoffCondition) <- c("Cumulative", "Best")
df <- rbind(df, humanData)
##############################################################################################################
#Add Random data
##############################################################################################################
randomDF <- read.csv("rationalModels/random.csv")
randomDF <- randomDF[ , (names(randomDF) != "X")]
randomDF$Horizon <- 10
randomDF$PayoffCondition <- "Cumulative"
randomDF2 <- randomDF
randomDF2$PayoffCondition <- "Best"
randomDF <- rbind(randomDF, randomDF2)
randomDF$id <- 10
randomDF<- randomDF[c("id","trial", "PayoffCondition", "Horizon", "meanReward", "meanSE", "maxReward", "maxSE", "Model", "Environment")] #double check this is correct
df<- rbind(df, randomDF)
##############################################################################################################
#Plot Data
##############################################################################################################
levels(df$Model) <- c( "Option Learning", "Function Learning","Option Learning*", "Function Learning*", "Human", "Random")
df$Model <- factor(df$Model, level=c("Random", "Option Learning", "Option Learning*","Function Learning", "Function Learning*", "Human"))
levels(df$Horizon) <- c("Short", "Long")
df$Environment <- factor(df$Environment, levels = c("Rough", "Smooth"))
levels(df$PayoffCondition) <- c("Accumulation", "Maximization")
#Average REward
p1 <- ggplot(subset(df, Model!="Local Search"), aes(x = trial, y = meanReward, color = Model,fill=Model, linetype = Horizon))+
stat_summary(fun.y=mean, geom='line', size=.7)+
#geom_ribbon(aes(ymin=meanReward-meanSE, ymax=meanReward+meanSE),alpha=0.1, color=NA) +
theme_classic()+
xlab('Trial')+
ylab('Average Reward')+
#coord_cartesian(ylim=c(45,90))+
scale_linetype_manual(values=c("dashed", "solid"))+
scale_color_manual(values=c("black", "#F0E442", "#E69F00", "#009E73", "#56B4E9", "#fb9a99"))+
scale_fill_manual(values=c("black", "#F0E442", "#E69F00", "#009E73", "#56B4E9", "#fb9a99"))+
facet_grid( ~Environment+PayoffCondition)+
scale_x_continuous(breaks = c(0, 2, 4, 6,8,10))+
theme(legend.position='none',strip.background=element_blank(), legend.key=element_rect(color=NA))
p1
ggsave(filename = 'plots/separatedLearningCurve1DAvg.pdf', p1, height = 2.5, width = 8, unit='in')
p2 <- ggplot(subset(df, Model!="Local Search"), aes(x = trial, y = maxReward, color = Model,fill=Model, linetype = Horizon))+
stat_summary(fun.y=mean, geom='line', size=.7)+
#geom_ribbon(aes(ymin=maxReward-meanSE, ymax=maxReward+meanSE),alpha=0.1, color=NA) +
theme_classic()+
xlab('Trial')+
ylab('Maximum Reward')+
#coord_cartesian(ylim=c(45,100))+
scale_linetype_manual(values=c("dashed", "solid"))+
scale_color_manual(values=c("black", "#F0E442", "#E69F00", "#009E73", "#56B4E9", "#fb9a99"))+
scale_fill_manual(values=c("black", "#F0E442", "#E69F00", "#009E73", "#56B4E9", "#fb9a99"))+
facet_grid( ~Environment+PayoffCondition)+
scale_x_continuous(breaks = c(0, 2, 4, 6,8,10))+
theme(legend.position='none',strip.background=element_blank(), legend.key=element_rect(color=NA))
p2
ggsave(filename = 'plots/separatedLearningCurve1DMax.pdf', p2, height = 2.5, width = 8, unit='in')
p3 <- ggplot(subset(df, Model!="Local Search"), aes(x = trial, y = meanReward, color = Model,fill=Model, shape = Model))+
stat_summary(fun.y=mean, geom='line', size=.7)+
stat_summary(fun.y=mean, geom='point', size=2)+
#geom_ribbon(aes(ymin=meanReward-meanSE, ymax=meanReward+meanSE),alpha=0.1, color=NA) +
theme_classic()+
coord_cartesian(ylim=c(45,85))+
xlab('Trial')+
ylab('Average Reward')+
scale_linetype_manual(values=c("dashed", "solid"))+
scale_color_manual(values=c("black", "#F0E442", "#E69F00", "#009E73", "#56B4E9", "#fb9a99"))+
scale_fill_manual(values=c("black", "#F0E442", "#E69F00", "#009E73", "#56B4E9", "#fb9a99"))+
facet_grid(~Environment)+
scale_x_continuous(breaks = c(0, 2, 4, 6,8,10))+
theme(legend.position="none", strip.background=element_blank(), legend.key=element_rect(color=NA))
p3
ggsave(filename = 'plots/LearningCurve1DAvg.pdf', p3, height = 2.5, width = 4.2, unit='in', useDingbats=FALSE)
########## Individual Learning Curves ###############
source('dataMunging.R') #source data import function
d <- dataImport(normalize=FALSE)
humanData <- ddply(d, ~id+trial+kernel+scenario+horizon, summarise, meanReward=mean(y), meanSE= sd(y)/sqrt(length(y)), maxReward=mean(ymax), maxSE= sd(ymax)/sqrt(length(ymax)), reward= mean(reward))
humanData$Model <- rep("Human", nrow(humanData))
colnames(humanData) <- c("id", "trial", "Environment", "PayoffCondition", "Horizon", "meanReward", "meanSE", "maxReward", "maxSE", "reward", "Model")
humanData$PayoffCondition <- factor(humanData$PayoffCondition)
levels(humanData$PayoffCondition) <- c("Accumulation", "Maximization")
humanData$trial <- humanData$trial - 1 #to range 0 - 10
humanData$normalizedReward <- (humanData$reward - min(humanData$reward))/(max(humanData$reward) - min(humanData$reward))
p4<- ggplot(humanData, aes(x=trial, y = meanReward,group=interaction(id, Horizon),fill =as.factor(Horizon), color=as.factor(Horizon)))+
geom_line(alpha = .5)+
#geom_ribbon(aes(ymin=meanReward-meanSE, ymax=meanReward+meanSE),alpha=0.05, color = NA) +
#stat_summary(aes(group=Horizon), fun.y=mean, geom='line', size=.7)+
theme_classic()+
xlab('Trial')+
ylab('Average Reward')+
#coord_cartesian(ylim=c(45,90))+
scale_linetype_manual(values=c("dashed", "solid"))+
scale_color_manual(values=c("#E69F00", "#56B4E9"))+
scale_fill_manual(values=c("#E69F00", "#56B4E9"))+
facet_grid(PayoffCondition~Environment)+
scale_x_continuous(breaks = c(0, 2, 4, 6,8,10))+
theme(legend.position='none',strip.background=element_blank(), legend.key=element_rect(color=NA))
p4
ggsave(filename = 'plots/indCurves1Davg.pdf', p4, height = 4.5, width = 3, unit='in', useDingbats=FALSE)
p5<- ggplot(humanData, aes(x=trial, y = maxReward,group=interaction(id, Horizon),fill =as.factor(Horizon), color=as.factor(Horizon)))+
geom_line(alpha=.5)+
#geom_ribbon(aes(ymin=meanReward-meanSE, ymax=meanReward+meanSE),alpha=0.05, color = NA) +
#stat_summary(aes(group=Horizon), fun.y=mean, geom='line', size=.7)+
theme_classic()+
xlab('Trial')+
ylab('Maximum Reward')+
#coord_cartesian(ylim=c(45,90))+
scale_linetype_manual(values=c("dashed", "solid"))+
scale_color_manual(values=c("#E69F00", "#56B4E9"))+
scale_fill_manual(values=c("#E69F00", "#56B4E9"))+
#scale_color_manual(values=c("black", "#F0E442", "#E69F00", "#009E73", "#56B4E9", "#fb9a99"))+
#scale_fill_manual(values=c("black", "#F0E442", "#E69F00", "#009E73", "#56B4E9", "#fb9a99"))+
facet_grid(PayoffCondition~Environment)+
scale_x_continuous(breaks = c(0, 2, 4, 6,8,10))+
theme(legend.position='none',strip.background=element_blank(), legend.key=element_rect(color=NA))
p5
ggsave(filename = 'plots/indCurves1Dmax.pdf', p5, height = 4.5, width = 3, unit='in', useDingbats=FALSE)
|
1fed0cc2dfcb351596983226060efb6defa67413
|
44eaee03687104a21cb0b2548a994d7e2cc038f9
|
/Bike Rental/bike_rental.R
|
4cb90429fe234206f502ffe6985fef7b47105f79
|
[] |
no_license
|
bharat284/MyLearningData
|
5757257ee45db9b6de18c75ceb63407e43b95b57
|
c44323754c88779da9005fd3ab73ec64be40cbfb
|
refs/heads/master
| 2020-04-25T20:50:27.849925
| 2019-10-29T18:31:18
| 2019-10-29T18:31:18
| 173,061,949
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,514
|
r
|
bike_rental.R
|
#install.package(c("dplyr","plyr","data.table","ggplot2","ggplot"))
library("dplyr")
library("plyr")
library("data.table")
library("ggplot2")
library("ggplot")
# load the dataset to R
bike<- read.csv("day.csv")
#Heading and summary of data
head(bike)
summary(bike) # From this data set season, yr, mnth,holiday,weekday,workingday,weatherlist columns are categorical variables.
# Remaining Temp,atemp, hum, windspeed,casual,registered are numeric varibale.
# cnt is the target variable
# Finding missing value
sum(is.na(bike)) # There is no missing value is the dataset.
# Get structure of dataset
str(bike)
# convert int to factior for categorical variables.
categorical_variables <- c("mnth","holiday","weekday","workingday","weathersit")
numeric_variable<-c("temp","atemp","hum","windspeed","casual","registered","cnt")
bike[categorical_variables] <- lapply(bike[categorical_variables],factor)
#Box plot for all numeric variables
par(mfrow = c(3,3))
boxplot(bike$casual)
boxplot(bike$temp)
boxplot(bike$hum,xlab="hum")
boxplot(bike$atemp)
boxplot(bike$windspeed)
boxplot(bike$registered)
boxplot(bike$cnt)
# Density Plot for numerical variables
plot(bike)
par(mfrow = c(3,3))
plot(density(bike$temp)) # This density is mostly same with atemp
plot(density(bike$atemp))
plot(density(bike$hum))
plot(density(bike$windspeed))
plot(density(bike$casual))
plot(density(bike$registered))
plot(density(bike$cnt))
|
527278f13aa9137e222fed422fcb87fca330b80f
|
42c5613984794b9b9c08b792e6a1b91772613495
|
/man/medjs.Rd
|
dff57370eca1009faa884e3b5838f89cb81a75cb
|
[
"MIT"
] |
permissive
|
chrisaberson/pwr2ppl
|
87b5c8ca9af5081613d8a49c76a9fea9cdd5de12
|
06a0366cf87710cb79ef45bdc6535fd4d288da51
|
refs/heads/master
| 2022-09-28T13:57:48.675573
| 2022-09-05T23:35:22
| 2022-09-05T23:35:22
| 54,674,476
| 16
| 7
| null | 2019-03-29T16:55:16
| 2016-03-24T21:10:28
|
R
|
UTF-8
|
R
| false
| true
| 2,843
|
rd
|
medjs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/medjs.R
\name{medjs}
\alias{medjs}
\title{Compute Power for Mediated (Indirect) Effects Using Joint Significance
Requires correlations between all variables as sample size.
This is the recommended approach for determining power}
\usage{
medjs(
rx1x2 = NULL,
rx1m1,
rx1m2 = NULL,
rx1m3 = NULL,
rx1m4 = NULL,
rx1y,
rx2m1 = NULL,
rx2m2 = NULL,
rx2m3 = NULL,
rx2m4 = NULL,
rx2y,
rym1,
rym2 = NULL,
rym3 = NULL,
rym4 = NULL,
rm1m2 = NULL,
rm1m3 = NULL,
rm1m4 = NULL,
rm2m3 = NULL,
rm2m4 = NULL,
rm3m4 = NULL,
n,
alpha = 0.05,
mvars,
rep = 1000,
pred = 1
)
}
\arguments{
\item{rx1x2}{Correlation between first predictor (x1) and second predictor (x2)}
\item{rx1m1}{Correlation between first predictor (x1) and first mediator (m1)}
\item{rx1m2}{Correlation between first predictor (x1) and second mediator (m2)}
\item{rx1m3}{Correlation between first predictor (x1) and third mediator (m3)}
\item{rx1m4}{Correlation between first predictor (x1) and fourth mediator (m4)}
\item{rx1y}{Correlation between DV (y) and first predictor (x1)}
\item{rx2m1}{Correlation between second predictor (x2) and first mediator (m1)}
\item{rx2m2}{Correlation between second predictor (x2) and second mediator (m2)}
\item{rx2m3}{Correlation between second predictor (x2) and third mediator (m3)}
\item{rx2m4}{Correlation between second predictor (x2) and fourth mediator (m4)}
\item{rx2y}{Correlation between DV (y) and second predictor (x2)}
\item{rym1}{Correlation between DV (y) and first mediator (m1)}
\item{rym2}{Correlation between DV (y) and second mediator (m2)}
\item{rym3}{Correlation DV (y) and third mediator (m3)}
\item{rym4}{Correlation DV (y) and fourth mediator (m4)}
\item{rm1m2}{Correlation first mediator (m1) and second mediator (m2)}
\item{rm1m3}{Correlation first mediator (m1) and third mediator (m3)}
\item{rm1m4}{Correlation first mediator (m1) and fourth mediator (m4)}
\item{rm2m3}{Correlation second mediator (m2) and third mediator (m3)}
\item{rm2m4}{Correlation second mediator (m2) and fourth mediator (m4)}
\item{rm3m4}{Correlation third mediator (m3) and fourth mediator (m4)}
\item{n}{Sample size}
\item{alpha}{Type I error (default is .05)}
\item{mvars}{Number of Mediators}
\item{rep}{number of repetitions (1000 is default)}
\item{pred}{number of predictors (default is one)}
}
\value{
Power for Mediated (Indirect) Effects
}
\description{
Compute Power for Mediated (Indirect) Effects Using Joint Significance
Requires correlations between all variables as sample size.
This is the recommended approach for determining power
}
\examples{
\donttest{medjs(rx1m1=.3, rx1m2=.3, rx1m3=.25, rx1y=-.35, rym1=-.5,rym2=-.5, rym3 = -.5,
rm1m2=.7, rm1m3=.4,rm2m3=.4, mvars=3, n=150)}
}
|
3fec73e1349603e3233682c3bb274ddd9bb76776
|
ec447fc767bd08cd006629cdf36953fb20998081
|
/ML/FirstSteps.R
|
f900c7383cedaff2311ba16b956447207ba8910a
|
[] |
no_license
|
jancschaefer/thesis_code
|
004ff34070f9773816e46b2435aa43535d648ee7
|
5472bfbd8f62c812f5a97eabaceec0024efa115d
|
refs/heads/master
| 2021-09-15T13:07:25.440979
| 2018-06-02T21:21:04
| 2018-06-02T21:21:04
| 126,164,497
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 146
|
r
|
FirstSteps.R
|
library(h2o)
h2o.connect('localhost',port = 54444)
converted <- h2o.getFrame('converted_dropped_parquet.hex')
converted <- h2o.na_omit(converted)
|
98488c4cfd2c97da670af505c56fc83135126d50
|
fb19754c596e722532d0b86d4cebc7bbce9c73c8
|
/best.R
|
900be8ba05cd5578ee2e5f1a3e3fd9ec89624cc2
|
[] |
no_license
|
yuchun-sc/HospitalRanking
|
b5118b7afcdf0d0dc550f4b99a3a5c4f99a37cd8
|
c58c7613ba626ba5c78b39c4bb1ce7a154c80cbc
|
refs/heads/master
| 2021-01-10T00:56:50.085860
| 2016-02-18T18:40:55
| 2016-02-18T18:40:55
| 52,029,560
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,133
|
r
|
best.R
|
## this function finds the best hospital in a state
## based on the outcome measure, if there is a tie,
## the first one based on the alphabatic order is returned
best <- function(state, outcome) {
## Read outcome data
data <- read.csv("data/outcome-of-care-measures.csv",
header = T);
## check if the input state name is valid
stateNames <- as.factor(data$State);
if(!(state %in% stateNames)) {
stop("invalid state");
}
## check if the input outcome measure is valid
validOutcome <- list(c(11, 17, 23),
c("heart attack", "heart failure", "pneumonia"));
if(!(outcome %in% validOutcome[[2]])) {
stop("invalid outcome");
}
dataState <- data[data$State == state, ];
colNo <- validOutcome[[1]][match(outcome, validOutcome[[2]])];
dataState <- dataState[!is.na(dataState[,colNo]),];
dataState[,colNo] <- as.numeric(dataState[,colNo]);
winner <- min(dataState[,colNo]);
winHosp <- as.character(dataState[dataState[,colNo] == winner,2]);
sort(winHosp);
winHosp[1]
}
|
890e201efcb4fda795885f307c2e1526ffbe0ab6
|
d4608310406b4a60580c47c0ccdfaf8c7e58cf22
|
/PRS_threshold_selection.R
|
791d86517a1c3f145453dc8e4897af99c0c3953c
|
[] |
no_license
|
marieleyse/paper-Fall-2020
|
d4c511a0cd318e6a10e547e5ce0fb697fb4bfa9d
|
2e543d6c28015dbb2c9255eed0aa19d1588e18a7
|
refs/heads/master
| 2023-05-06T01:58:51.277656
| 2021-06-02T19:23:07
| 2021-06-02T19:23:07
| 297,434,580
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,781
|
r
|
PRS_threshold_selection.R
|
#Marie-Elyse Lafaille-Magnan, PhD
#marie-elyse.lafaille-magnan@mail.mcgill.ca
setwd("/Users/Marie-Elyse/Downloads")
NEW = read.csv("MAVAN_48M_and_up_jun2020_new.csv")
sink('threshold_selection.txt')
fit1 <- lm(ADHD ~ PRS_0_5_adhd_child + PC1 +PC2 +PC3, data=NEW)
par(mfrow=c(2,2)) # init 4 charts in 1 panel
plot(fit1)
lmtest::bptest(fit1) #studentized Breusch-Pagan test
fit2 <- lm(ADHD ~ PRS_0_2_adhd_child + PC1 +PC2 +PC3, data=NEW)
par(mfrow=c(2,2)) # init 4 charts in 1 panel
plot(fit2)
lmtest::bptest(fit2) #studentized Breusch-Pagan test
fit3 <- lm(ADHD ~ PRS_0_1_adhd_child + PC1 +PC2 +PC3, data=NEW)
par(mfrow=c(2,2)) # init 4 charts in 1 panel
plot(fit3)
lmtest::bptest(fit3) #studentized Breusch-Pagan test
fit4 <- lm(ADHD ~ PRS_0_05_adhd_child + PC1 +PC2 +PC3, data=NEW)
par(mfrow=c(2,2)) # init 4 charts in 1 panel
plot(fit4)
lmtest::bptest(fit4) #studentized Breusch-Pagan test
fit5 <- lm(ADHD ~ PRS_0_01_adhd_child + PC1 +PC2 +PC3, data=NEW)
par(mfrow=c(2,2)) # init 4 charts in 1 panel
plot(fit5)
lmtest::bptest(fit5) #studentized Breusch-Pagan test
fit6 <- lm(ADHD ~ PRS_0_001_adhd_child + PC1 +PC2 +PC3, data=NEW)
par(mfrow=c(2,2)) # init 4 charts in 1 panel
plot(fit6)
lmtest::bptest(fit6) #studentized Breusch-Pagan test
fit7 <- lm(ADHD ~ PRS_0_0001_adhd_child + PC1 +PC2 +PC3, data=NEW)
par(mfrow=c(2,2)) # init 4 charts in 1 panel
plot(fit7)
lmtest::bptest(fit7) #studentized Breusch-Pagan test
summary(fit1)
y1 <-summary(fit1)$r.squared
summary(fit1)$adj.r.squared
summary(fit2)
y2 <- summary(fit2)$r.squared
summary(fit2)$adj.r.squared
summary(fit3)
y3 <-summary(fit3)$r.squared
summary(fit3)$adj.r.squared
summary(fit4)
y4 <-summary(fit4)$r.squared
summary(fit4)$adj.r.squared
summary(fit5)
y5 <-summary(fit5)$r.squared
summary(fit5)$adj.r.squared
summary(fit6)
y6 <-summary(fit6)$r.squared
summary(fit6)$adj.r.squared
summary(fit7)
y7 <-summary(fit7)$r.squared
summary(fit7)$adj.r.squared
AIC <- AIC(fit1, fit2, fit3, fit4, fit5, fit6, fit7)
BIC <- BIC(fit1, fit2, fit3, fit4, fit5, fit6, fit7)
r.squared <- c(y1, y2, y3, y4, y5, y6, y7)
PRS.threshold <- c(0.5, 0.2, 0.1, 0.05, 0.01, 0.001, 0.0001)
print(AIC)
print(BIC)
print(r.squared)
print(PRS.threshold)
# install.packages("broom")
# library(broom)
glance(fit1) %>%
dplyr::select(adj.r.squared, sigma, AIC, BIC, p.value)
glance(fit2) %>%
dplyr::select(adj.r.squared, sigma, AIC, BIC, p.value)
glance(fit3) %>%
dplyr::select(adj.r.squared, sigma, AIC, BIC, p.value)
glance(fit4) %>%
dplyr::select(adj.r.squared, sigma, AIC, BIC, p.value)
glance(fit5) %>%
dplyr::select(adj.r.squared, sigma, AIC, BIC, p.value)
glance(fit6) %>%
dplyr::select(adj.r.squared, sigma, AIC, BIC, p.value)
glance(fit7) %>%
dplyr::select(adj.r.squared, sigma, AIC, BIC, p.value)
sink()
|
cc82311c01c73f51103f6cf3aacebdd733b60f60
|
63e68a1da4c46ce031c57645b40f50ef5d656ae3
|
/Functional_Fit/function_fit.R
|
2508d45c1528d1c0abb24e8f5aa784844802b5e8
|
[] |
no_license
|
anooshrees/Numerical-Methods
|
623065e90cdf71a59d6b49ab54694e370a945354
|
657fd7ef01bb0a3f73b048e8abe425ab45249d9c
|
refs/heads/master
| 2020-03-23T00:39:56.295235
| 2018-07-13T17:53:43
| 2018-07-13T17:53:43
| 140,877,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,881
|
r
|
function_fit.R
|
##############################################
# Author: Anooshree Sengupta
# Created on: 10/18/17
# Description: Functional fit for linear,
# parabolic, and gaussian
##############################################
######## RUN ME #########
# Linear table test
line_table <- line_w_zero(-10, 10, 0.1, 3)
write.csv(line_table, file = "~/Desktop/School 2017-2018/Numerical Methods/line_table.csv")
# Gaussian table test
gaussian_table <- gaussian_fun(-10, 10, 0.1)
write.csv(gaussian_table, file = "~/Desktop/School 2017-2018/Numerical Methods/gaussian_table.csv")
# Actual test of function
start = 0 # beginning of domain
end = 41 # end of domain
step = 5 # step used for two-point derivative
q_vector = c(275000,12,6,5) # beginning guess for constants
file_name = "GeigerHisto.txt"
functional_fit(start, end, step, q_vector, file_name)
############# FUNCTIONS #######################
##############################################
# Function that computes the constants for a
# functional fit of a given set of data points
# contained in a file by reducing the error
# function across each of the constants. The
# method can fit points to either a line,
# two-degree polynomial, or a guassian function.
# It guesses the form based on the number of
# constants provided
# INPUT: start, the beginning of the domain of points
# end, the end of the domain of points
# file_name, the file which contains the data
# step, the step size that will be used when finding
# the two-point derivative when reducing error
# q_vector, the vector of constants for the functional
# form. Refer to functional_form for details
# OUTPUT: a vector with the constants for a given function form
# that provides fit for set of data points
# PRECONDITION: points can be fit to one of three forms
# step is not larger than domain
##############################################
functional_fit <- function(start, end, step, q_vector, file_name){
# Read in the table containing the data values, assumes tsv
df <- read.csv(file = paste("~/Desktop/School 2017-2018/Numerical Methods/", file_name, sep=""), sep="\t")
df <- df[intersect(which(df[,1]<=end), which(df[,1]>=start)),]
# Initial values for each of the vectors and variables used in error calculation
sum_vector = rep(0, length(q_vector))
error_vector = rep(0, length(q_vector))
lambda = 1
past_error = rep(1, length(q_vector))
past_sum = rep(1, length(q_vector))
count = 0
# While loop continuing for as many counts as possible (before RStudio crashes)
while(count <= 10000){
# Calculate vectors with error and partial derivatives of each q
for(q in 1:length(q_vector)){
error = 0
sum = 0
for(i in 1:nrow(df)){
sum = sum + (df[i,2]-functional_form(df[i,1], q_vector))*partial_derivative(q_vector, df[i,1], step)[q]
error = error + (df[i,2]-functional_form(df[i,1], q_vector))^2
}
# Divide error by 2 to avoid the factor of 2 in derivative
error = error/(2)
error_vector[q] <- error
sum_vector[q] <- sum
}
# Normalize the partial derivative vector
sum_vector <- sum_vector/sqrt(sum(sum_vector^2))
# Now go through both vectors and change the value of q
curr_lambda = lambda # Hold previous lambda constant throughout loop
for(q in 1:length(q_vector)){
if(error_vector[q]>past_error[q]){
lambda = curr_lambda/2
}
if(error_vector[q]<past_error[q]){
lambda = curr_lambda*1.5
}
# Change the value in q_vector
delta_q <- lambda*sum_vector[q]
q_vector[q] <- q_vector[q] + delta_q
}
# Update vectors and count
past_error = error_vector
past_sum = sum_vector
count = count+1
# Print to keep user updated
if(count%%250 == 0){
print(paste("Currently on run", count))
print(paste("The constants are", q_vector))
print(paste("The error is", error_vector))
print(paste("The learning factor is", lambda))
}
}
# print out function approximation and q_vector
if(length(q_vector) == 2){
q_vector <- signif(q_vector)
print(paste("Our fit is y = ", signif(q_vector[1]), "*x + (", signif(q_vector[2]), ")", sep = ""))
print(paste("error is:", past_error))
# graph the function and the points
curve(functional_form(x, q_vector), from=start, to=end, , xlab="x", ylab="y")
points(df[,1], df[,2],
pch=2, col="green")
return(q_vector)
}
if(length(q_vector) == 3){
q_vector <- signif(q_vector)
print(paste("Our fit is y = ", signif(q_vector[1]), "*(x^2) + ", signif(q_vector[2]), "*x + (", signif(q_vector[3]), ")", sep = ""))
print(paste("error is:", past_error))
# graph the function and the points
curve(functional_form(x, q_vector), from=start, to=end, , xlab="x", ylab="y")
points(df[,1], df[,2],
pch=2, col="green")
return(q_vector)
}
if(length(q_vector == 4)){
q_vector <- signif(q_vector)
print(paste("Our fit is y = ", q_vector[1], "*e^-(", -q_vector[2], "+x)^2)/(", q_vector[3], "^2) + ", q_vector[4], sep=""))
print(paste("error is:", past_error))
# graph the function and the points
curve(functional_form(x, q_vector), from=start, to=end, , xlab="x", ylab="y")
points(df[,1], df[,2],
pch=2, col="green")
return(q_vector)
}
if(length(q_vector == 7)){
q_vector <- signif(q_vector)
print(paste("Our fit is y = ", q_vector[1], "*e^-(", q_vector[2], "-x)^2)/(", q_vector[3], "^2) + ", q_vector[4], "*e^-(", q_vector[5], "-x)^2)/(", q_vector[6], "^2) + ", q_vector[7], sep=""))
print(paste("error is:", past_error))
# graph the function and the points
curve(functional_form(x, q_vector), from=start, to=end, , xlab="x", ylab="y")
points(df[,1], df[,2],
pch=2, col="green")
return(q_vector)
}
}
##############################################
# Takes the partial derivative relative to each
# of the constants defined by the functional forms
# in functional_form, based on the length of the
# q_vector provided by the user
# INPUT: point, the point at which derivative is calculated
# step, the step used between points when calculating
# the partial derivative
# q_vector, the vector of constants for the functional
# form. Refer to functional_form for details
# OUTPUT: a vector with the partial derivatives for each
# constant in q vector at the point, point
# PRECONDITION: points can be fit to one of three forms
# step is not larger than domain
##############################################
partial_derivative <- function(q_vector, point, step){
partial_vector <- rep(0, length(q_vector))
for(q in 1:length(q_vector)){
greater <- q_vector
greater[q] <- greater[q]+step
partial_vector[q] <- (functional_form(point, greater) - functional_form(point, q_vector))/step
}
return(partial_vector)
}
##############################################
# Function that provides the form of the
# function the points are being fit to.
# The form is either a line, two-degree polynomial,
# or a gaussian function, depending on the number
# of constants provided:
# 2: line, 3: polynomial, 4: gaussian
# INPUT: q_vector, the vector of constants for the functional
# form.
# x, the value at which the function is being calculated
# OUTPUT: a vector with the constants for a given function form
# that provides fit for set of data points
# PRECONDITION: points can be fit to one of three forms
# step is not larger than domain
##############################################
functional_form <- function(x, q_vector){
# linear form
if(length(q_vector) == 2){
return(q_vector[1]*x + q_vector[2])
}
# parabolic form
else if(length(q_vector) == 3){
return(q_vector[1]*x^2 + q_vector[2]*x + q_vector[3])
}
# gaussian form
else if(length(q_vector) == 4){
return(q_vector[1]*e^(-(((x-q_vector[2])^2)/(q_vector[3]^2)))+q_vector[4])
}
# double gaussian form
else{
return(q_vector[1]*exp(-((x-q_vector[2])^2)/(q_vector[3])) + q_vector[4]^2*exp(-((x-q_vector[5])^2)/(q_vector[6]^2)) + q_vector[7])
}
}
#######################################
# Creates a table of (x,y) pairs for a line
# with slope 1 and x-intercept of intersection
# formula: y = x-intersection
# INPUT: min, the minimum x value
# max, the maximum x value
# step, the difference between consecutive
# x values
# intersection, where the function intersects
# the x-axis
# OUTPUT: a dataframe with x and y values
# PRECONDITIONS: function exists over all x values
# POSTCONDITIONS: data frame returned is sorted
#######################################
line_w_zero <- function(min, max, step, intersection){
x_vals <- seq(min, max, by = step)
y_vals <- rep(0, length(seq(min, max, by = step)))
for(i in 1:length(seq(min, max, by = step))){
y_vals[i] <- x_vals[i]-intersection
}
return(data.frame(x_vals, y_vals))
}
#######################################
# Creates a table of (x,y) pairs for the
# function e^(-x^2)
# INPUT: min, the minimum x value
# max, the maximum x value
# step, the difference between consecutive
# x values
# OUTPUT: a dataframe with x and y values
# PRECONDITIONS: function exists over all x values
# POSTCONDITIONS: data frame returned is sorted
#######################################
gaussian_fun <- function(min, max, step){
e <- exp(1)
x_vals <- seq(min, max, by = step)
y_vals <- rep(0, length(seq(min, max, by = step)))
for(i in 1:length(seq(min, max, by = step))){
y_vals[i] <- signif(e^(-(x_vals[i]^2)))
}
return(data.frame(x_vals, y_vals))
}
|
b28eb5bee8cc83af7fa40c2d430904f2729a972d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rgr/examples/gx.ngr.stats.Rd.R
|
531649b0fad6e51fc83120dff0e6c125a6b4675e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 331
|
r
|
gx.ngr.stats.Rd.R
|
library(rgr)
### Name: gx.ngr.stats
### Title: Computes Summary Statistics for a NGR Report Table
### Aliases: gx.ngr.stats
### Keywords: univar
### ** Examples
## Make test data available
data(sind)
attach(sind)
## Generate and display the results for Zn
table <- gx.ngr.stats(Zn)
table
## Detach test data
detach(sind)
|
5ec93093273dcd86f454d13027f1e14693a1c231
|
30510c10caf14f0a3b79c6d9502d6c5f24ade7fd
|
/R/play_n_games.R
|
25b3e71457b073195f6d97f3301cfb7b0cf9c4a6
|
[] |
no_license
|
JayCastro/montyhall
|
2e00432bc259ed8c06f197d6341de0ee820dc5e9
|
9ccb258ba291db74da6c18e0bcd4e01ffe87528b
|
refs/heads/master
| 2022-12-14T05:16:57.653797
| 2020-09-18T18:05:37
| 2020-09-18T18:05:37
| 295,783,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 806
|
r
|
play_n_games.R
|
#' @title
#' Lets Play x 100
#' @description
#' Plays the game a hundred times
#' @details
#' PLays a hundred time and shows your results after those hundred and it determine
#' what had the better winning percentage: staying or winning
#' @param ...
#' n=100
#' @return
#' return( results.df )
#' @examples
#' play_n_games()
#' @export
play_n_games <- function( n=100 )
{
library( dplyr )
results.list <- list() # collector
loop.count <- 1
for( i in 1:n ) # iterator
{
game.outcome <- play_game()
results.list[[ loop.count ]] <- game.outcome
loop.count <- loop.count + 1
}
results.df <- dplyr::bind_rows( results.list )
table( results.df ) %>%
prop.table( margin=1 ) %>% # row proportions
round( 2 ) %>%
print()
return( results.df )
}
|
ed94ea5e87360e7dd02d8bbd530c0e385e2e1807
|
955b1a81f643451b9c73a2d86b9a46619820e955
|
/tests/testthat.R
|
963eaf9720354f9790276f2b29c179e698e5e6a0
|
[] |
no_license
|
venkatavarun-123/R_LAB3
|
a0a9d4ee8a12b7f91d9985992fcdcb501755bc09
|
05496eca0c9f5dbd47f133296ae95314c0463d36
|
refs/heads/master
| 2023-07-31T17:16:14.567143
| 2021-09-13T17:12:12
| 2021-09-13T17:12:12
| 405,846,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 66
|
r
|
testthat.R
|
library(testthat)
library(lab3package)
test_check("lab3package")
|
4ba07e35b74a981e3a8d15ab5edf73aeae046164
|
aaf8f198f80a3b9ef27f52e067b1bcaba27ce6fb
|
/Rcode/Simhyd/Simhyd.R
|
1190645331a571e433f6763e95b462055f6f4260
|
[
"MIT"
] |
permissive
|
WillemVervoort/VirtExp
|
32fd96058ae74fd8347fad1835778883b230c774
|
a7a35a7a3a152e307094ef07032b8659c91e54cb
|
refs/heads/master
| 2021-01-11T23:51:16.311933
| 2020-11-29T22:29:18
| 2020-11-29T22:29:18
| 78,632,674
| 1
| 1
|
MIT
| 2020-04-19T04:02:13
| 2017-01-11T11:30:29
|
R
|
UTF-8
|
R
| false
| false
| 24,845
|
r
|
Simhyd.R
|
## hydromad: Hydrological Modelling and Analysis of Data
## Rewrite
## willem V
## Including code for four versions of Simhyd
# references
# eWater: Podger (2004) https://ewater.atlassian.net/wiki/display/SD41/SIMHYD+-+SRG
# Chiew et al. (2002) Chapter 11 in VP Singh (ed) Mathematical Models of Small Watershed Hydrology and Applications
# Water Resources Publication, 2002 - Technology & Engineering - 950 pages
# Chiew et al. (2009) WRR VOL. 45, W10414, doi:10.1029/2008WR007338, 2009
# Chiew 2006 Hydrological Sciences-Journal-des Sciences Hydrologiques, 51(4) 2006
# Mun Ju Shin's/Felix Andrew's version rewritten in R and cpp
# temporarily, until fully compiled
# compile the cpp code
old_dir <- getwd()
setwd(rcode_dir)
#require(Rcpp)
#sourceCpp("SimhydC2002.cpp") # Chiew et al. 2002
sourceCpp("SimhydC2009.cpp") # Chiew et al. 2009 and Chiew 2006
sourceCpp("Simhyd_eWater.cpp") # eWater and SOURCE version
#sourceCpp("SimhydMJEq.cpp") # rewrite of Min Ju Shin's version
setwd(old_dir)
## SimHyd_C2009 model
simhyd_C2009.sim <-
function(DATA,
INSC,COEFF,
SQ,
SMSC, SUB, CRAK, K,
etmult = 0.15,
return_state = FALSE)
# See Figure 2 in Chiew et al. 2009
# INSC interception store capacity (mm)
# COEFF maximum infiltration loss
# SQ Infiltration loss exponent
# SMSC = Soil Moisture Storage Capacity
# SUB constant of proportionality in interflow equation
# CRAK constant of proportionality in groundwater rechareg equation
# K baseflow linear recession parameter
# etmult = added parameter to convert maxT to PET
# return_state, whether or not to return all components
{
stopifnot(c("P","E") %in% colnames(DATA))
## check values
stopifnot(INSC >= 0)
stopifnot(COEFF >= 0)
stopifnot(SQ >= 0)
stopifnot(SMSC >= 0)
stopifnot(SUB >= 0)
stopifnot(CRAK >= 0)
stopifnot(K >= 0)
xpar <-
c(INSC, COEFF, SQ, SMSC, SUB, CRAK, K)
inAttr <- attributes(DATA[,1])
DATA <- as.ts(DATA)
P <- DATA[,"P"]
E <- etmult*DATA[,"E"]
## skip over missing values
bad <- is.na(P) | is.na(E)
P[bad] <- 0
E[bad] <- 0
COMPILED <- (hydromad.getOption("pure.R.code") == FALSE)
if (COMPILED) {
# run the cpp version
ans <- simhydC2009_sim(P, E, INSC, COEFF, SQ,SMSC,
SUB,CRAK,K)
U <- ans$U
aET <- ans$ET
if (return_state==T) {
INR = ans$INR
INT = ans$INT
RMO = ans$RMO
IRUN = ans$IRUN
SRUN = ans$SRUN
RMO= ans$RMO
SMF = ans$SMF
SMS = ans$SMS
REC = ans$REC
GW = ans$GW
ET = ans$ET
BAS = ans$BAS
}
} else { ## very slow, even on my x64
U <- IMAX <- INT <- INR <- RMO <- IRUN <- NA
aET <- ET <- SRUN <- REC <- SMF <- POT <- BAS <- NA
SMS <- GW <- rep(0,length(P))
SMS[1] <- 0.5*SMSC
# run through a loop
for (t in seq(2, length(P))) {
## testing
#t <- 2
# interception store
IMAX[t] <- min(INSC,E[t])
#print(IMAX[t])
# calculate interception
INT[t] <- min(IMAX[t],P[t])
#print(INT[t])
# calculate interception runoff (INR)
INR[t] <- P[t] - INT[t]
#print(INR[t])
# Calculate infiltration capacity
RMO[t] <- min(COEFF*exp(-SQ*SMS[t-1]/SMSC),INR[t])
#print(RMO[t])
# calculate direct runoff
IRUN[t] <- INR[t] - RMO[t]
#print(IRUN[t])
# SRUN (Saturation excess runoff and interflow)
SRUN[t] = SUB*SMS[t-1]/SMSC*RMO[t]
#print(SRUN[t])
# calculate Recharge
REC[t] <- CRAK*SMS[t-1]/SMSC*(RMO[t] - SRUN[t])
#print(REC[t])
# Infiltration into soil store (SMF)
SMF[t] <- RMO[t] - SRUN[t] - REC[t]
#print(SMF[t])
# calculate potential ET
POT[t] <- E[t] - INT[t]
#print(E[t])
# calculate SMS overflow (see Figure 2 in Chiew et al 2009)
# calculate soil moisture storage
SMS[t] <- SMS[t-1] + SMF[t]
#print(SMS[t])
if (SMS[t] > SMSC) {
REC[t] <- REC[t] + SMS[t] - SMSC
SMS[t] <- SMSC
}
# Calculate Soil ET
ET[t] <- min(10*SMS[t]/SMSC,POT[t])
SMS[t] <- SMS[t] - ET[t]
#print(SMS[t])
# Calculate GW storage
GW[t] <- GW[t-1] + REC[t]
# calculate baseflow
BAS[t] <- K*GW[t-1]
GW[t] <- GW[t] - BAS[t]
# Calculate runoff
U[t] <- IRUN[t] + SRUN[t] + BAS[t]
#print(paste("U =",U[t]))
aET[t] <- ET[t] + IMAX[t]
}
}
## missing values
U[bad] <- NA
if (return_state==T) {
aET[bad] <- NA
INT[bad] <- NA
INR[bad] <- NA
RMO[bad] <- NA
IRUN[bad] <- NA
REC[bad] <- NA
SMF[bad] <- NA
ET[bad] <- NA
SMS[bad] <- NA
BAS[bad] <- NA
GW[bad] <- NA
}
## attributes
attributes(U) <- inAttr
ans <- U
if (return_state==T) {
attributes(aET) <- inAttr
attributes(INT) <- inAttr
attributes(INR) <- inAttr
attributes(RMO) <- inAttr
attributes(IRUN) <- inAttr
attributes(SRUN) <- inAttr
attributes(REC) <- inAttr
attributes(SMF) <- inAttr
attributes(ET) <- inAttr
attributes(SMS) <- inAttr
attributes(BAS) <- inAttr
attributes(GW) <- inAttr
ans <- cbind(U=U,aET=aET, throughfall = INR,
interceptionET = INT,
infiltration = RMO,
infiltrationXSRunoff = IRUN,
interflowRunoff = SRUN,
infiltrationAfterInterflow = RMO-SRUN,
soilInput = SMF,
soilMoistureStore = SMS,
recharge = REC,
groundwater=GW,
soilET = ET,
baseflow = BAS)
}
ans
}
## SimHyd_C2002 model
simhyd_C2002.sim <-
function(DATA,
INSC,COEFF,
SQ,
SMSC, SUB, CRAK, K,
etmult = 0.15,
return_state = FALSE)
# See Figure on page 339 in Chiew et al. 2002
# INSC interception store capacity (mm)
# COEFF maximum infiltration loss
# SQ Infiltration loss exponent
# SMSC = Soil Moisture Storage Capacity
# SUB constant of proportionality in interflow equation
# CRAK constant of proportionality in groundwater rechareg equation
# K baseflow linear recession parameter
# etmult = added parameter to convert maxT to PET
{
stopifnot(c("P","E") %in% colnames(DATA))
## check values
stopifnot(INSC >= 0)
stopifnot(COEFF >= 0)
stopifnot(SQ >= 0)
stopifnot(SMSC >= 0)
stopifnot(SUB >= 0)
stopifnot(CRAK >= 0)
stopifnot(K >= 0)
xpar <-
c(INSC, COEFF, SQ, SMSC, SUB, CRAK, K)
inAttr <- attributes(DATA[,1])
DATA <- as.ts(DATA)
P <- DATA[,"P"]
E <- etmult*DATA[,"E"]
## skip over missing values
bad <- is.na(P) | is.na(E)
P[bad] <- 0
E[bad] <- 0
COMPILED <- (hydromad.getOption("pure.R.code") == FALSE)
if (COMPILED) {
# run the cpp version
ans <- simhydC2002_sim(P, E, INSC, COEFF, SQ,SMSC,
SUB,CRAK,K)
U <- ans$U
aET <- ans$ET
if (return_state==T) {
INR = ans$INR
INT = ans$INT
RMO = ans$RMO
IRUN = ans$IRUN
SRUN = ans$SRUN
RMO= ans$RMO
SMF = ans$SMF
SMS = ans$SMS
REC = ans$REC
GW = ans$GW
ET = ans$ET
BAS = ans$BAS
}
} else { ## very slow, even on my x64
U <- INT <- INR <- RMO <- IRUN <- NA
aET <- ET <- SRUN <- REC <- SMF <- BAS <- NA
SMS <- GW <- rep(0,length(P))
SMS[1] <- 0.5*SMSC
# run through a loop
for (t in seq(2, length(P))) {
# interception store
tempET <- min(INSC,E[t])
# calculate interception
INT[t] <- min(tempET,P[t])
# calculate interception runoff (INR)
INR[t] <- P[t] - INT[t]
#print(INR[t])
# Calculate infiltration
RMO[t] <- min(COEFF*exp(-SQ*SMS[t-1]/SMSC),INR[t])
#print(RMO[t])
# calculate direct runoff
IRUN[t] <- INR[t] - RMO[t]
# SRUN (Saturation excess runoff and interflow)
SRUN[t] = SUB*SMS[t-1]/SMSC*RMO[t]
# calculate Recharge
REC[t] <- CRAK*SMS[t-1]/SMSC*(RMO[t] - SRUN[t])
# Infiltration into soil store (SMF)
SMF[t] <- RMO[t] - SRUN[t] - REC[t]
# calculate SMS overflow
# calculate soil moisture storage
SMS[t] <- SMS[t-1] + SMF[t]
if (SMS[t] > SMSC) {
REC[t] <- REC[t] + SMS[t] - SMSC
SMS[t] <- SMSC
}
# Calculate Soil ET
ET[t] <- min(10*SMS[t]/SMSC,E[t])
SMS[t] <- SMS[t] - ET[t]
# Calculate GW storage
GW[t] <- GW[t-1] + REC[t]
# calculate baseflow
BAS[t] <- K*GW[t-1]
GW[t] <- GW[t] - BAS[t]
# Calculate runoff
U[t] <- IRUN[t] + SRUN[t] + BAS[t]
aET[t] <- ET[t] + tempET
}
}
## make it a time series object again
## re-insert missing values
U[bad] <- NA
aET[bad] <- NA
if (return_state==T) {
INT[bad] <- NA
INR[bad] <- NA
RMO[bad] <- NA
IRUN[bad] <- NA
REC[bad] <- NA
SMF[bad] <- NA
ET[bad] <- NA
SMS[bad] <- NA
BAS[bad] <- NA
GW[bad] <- NA
}
attributes(U) <- inAttr
ans <- U
if (return_state==T) {
attributes(aET) <- inAttr
attributes(INT) <- inAttr
attributes(INR) <- inAttr
attributes(RMO) <- inAttr
attributes(IRUN) <- inAttr
attributes(SRUN) <- inAttr
attributes(REC) <- inAttr
attributes(SMF) <- inAttr
attributes(ET) <- inAttr
attributes(SMS) <- inAttr
attributes(BAS) <- inAttr
attributes(GW) <- inAttr
#browser()
ans <- cbind(U=U,aET=aET, throughfall = INR,
interceptionET = INT,
infiltration = RMO,
infiltrationXSRunoff = IRUN,
interflowRunoff = SRUN,
infiltrationAfterInterflow = RMO-SRUN,
soilInput = SMF,
soilMoistureStore = SMS,
recharge = REC,
groundwater=GW,
soilET = ET,
baseflow = BAS)
}
ans
}
## SimHyd_eWater model
simhyd_eWater.sim <-
function(DATA,
pFrac = 1, impTh = 1,
INSC,COEFF,
SQ,
SMSC, SUB, CRAK, K,
etmult = 0.15,
return_state = FALSE)
# Same as Chiew et al. 2002 except for impervious
# INSC interception store capacity (mm)
# COEFF maximum infiltration loss
# SQ Infiltration loss exponent
# SMSC = Soil Moisture Storage Capacity
# SUB constant of proportionality in interflow equation
# CRAK constant of proportionality in groundwater rechareg equation
# K baseflow linear recession parameter
# etmult = added parameter to convert maxT to PET
{
stopifnot(c("P","E") %in% colnames(DATA))
## check values
stopifnot(INSC >= 0)
stopifnot(COEFF >= 0)
stopifnot(SQ >= 0)
stopifnot(SMSC >= 0)
stopifnot(SUB >= 0)
stopifnot(CRAK >= 0)
stopifnot(K >= 0)
xpar <-
c(pFrac, impTh, INSC, COEFF, SQ, SMSC, SUB, CRAK, K)
inAttr <- attributes(DATA[,1])
DATA <- as.ts(DATA)
P <- DATA[,"P"]
E <- etmult*DATA[,"E"]
## skip over missing values
bad <- is.na(P) | is.na(E)
P[bad] <- 0
E[bad] <- 0
COMPILED <- (hydromad.getOption("pure.R.code") == FALSE)
if (COMPILED) {
# run the cpp version
ans <- simhydeWater_sim(P, E, pFrac, impTh,
INSC, COEFF, SQ,SMSC,
SUB,CRAK,K)
U <- ans$U
aET <- ans$ET
if (return_state==T) {
aET <- ans$ET
INR = ans$INR
INT = ans$INT
INF = ans$INF
RMO = ans$RMO
IRUN = ans$IRUN
SRUN = ans$SRUN
RMO= ans$RMO
SMF = ans$SMF
SMS = ans$SMS
REC = ans$REC
GW = ans$GW
ET = ans$ET
BAS = ans$BAS
}
} else { ## very slow, even on my x64
U <- INT <- INF <- INR <- RMO <- IRUN <- ImpQ <- NA
aET <- ET <- ImpET <- SRUN <- REC <- SMF <- BAS <- NA
SMS <- GW <- rep(0,length(P))
SMS[1] <- 0.33*SMSC
# run through a loop
for (t in seq(2, length(P))) {
# equation 1 (both these equations
ImpET[t] = min(min(E[t],(1 - pFrac)*impTh),(1- pFrac)*P[t])
# calculate impervious runoff as remainder
ImpQ[t] = 0.0
if((1-pFrac)*P[t] - ImpET[t]>0) {
ImpQ[t] = (1-pFrac)*P[t] - ImpET[t]
}
# interception store
tempET <- min(INSC,E[t])
# equation 2 calculate interception
INT[t] <- min(tempET,pFRac*P[t])
# calculate interception runoff (INR)
INR[t] <- pFrac*P[t] - INT[t]
#print(INR[t])
# equation 3 Calculate infiltration capacity
RMO[t] <- min(COEFF*exp(-SQ*SMS[t-1]/SMSC),INR[t])
# calculate direct (infiltration excess) runoff
IRUN[t] <- INR[t] - RMO[t]
# equation 5 Interflow runoff
SRUN[t] = SUB*SMS[t-1]/SMSC*RMO[t]
# equation 6 infiltration after interflow
# (INF[t] - SRUN[t]
# equation 7 calculate Recharge
REC[t] <- CRAK*SMS[t-1]/SMSC*(RMO[t] - SRUN[t])
# equantion 8 Infiltration into soil store (SMF)
SMF[t] <- RMO[t] - SRUN[t] - REC[t]
# calculate SMS overflow
# calculate soil moisture storage
SMS[t] <- SMS[t-1] + SMF[t]
if (SMS[t] > SMSC) {
REC[t] <- REC[t] + SMS[t] - SMSC
SMS[t] <- SMSC
}
# Calculate Soil ET
ET[t] <- min(10*SMS[t]/SMSC,E[t])
SMS[t] <- SMS[t] - ET[t]
# calculate GW balance
GW[t] <- GW[t-1] + REC[t]
# calculate baseflow
BAS[t] <- K*GW[t]
# Calculate GW storage
GW[t] <- GW[t] - BAS[t]
# Calculate runoff
U[t] <- IRUN[t] + SRUN[t] + BAS[t]
aET[t] <- ET[t] + tempET + ImpET
}
}
## re-insert missing values
U[bad] <- NA
aET[bad] <- NA
if (return_state==T) {
INT[bad] <- NA
INR[bad] <- NA
RMO[bad] <- NA
IRUN[bad] <- NA
REC[bad] <- NA
SMF[bad] <- NA
ET[bad] <- NA
SMS[bad] <- NA
BAS[bad] <- NA
GW[bad] <- NA
}
## make it a time series object again
# output all the different waterbalance components
attributes(U) <- inAttr
ans <- U
if (return_state==T) {
attributes(aET) <- inAttr
attributes(INT) <- inAttr
attributes(INR) <- inAttr
attributes(RMO) <- inAttr
attributes(IRUN) <- inAttr
attributes(SRUN) <- inAttr
attributes(REC) <- inAttr
attributes(SMF) <- inAttr
attributes(ET) <- inAttr
attributes(SMS) <- inAttr
attributes(BAS) <- inAttr
attributes(GW) <- inAttr
ans <- cbind(U=U,aET=aET, throughfall = INR,
interceptionET = INT,
infiltration = RMO,
infiltrationXSRunoff = IRUN,
interflowRunoff = SRUN,
infiltrationAfterInterflow = RMO-SRUN,
soilInput = SMF,
soilMoistureStore = SMS,
recharge = REC,
groundwater=GW,
soilET = ET,
baseflow = BAS)
}
ans
}
# # Mun-Ju/Felix Andrews equivalent
# simhyd_MJeq.sim <- function (DATA, rainfallInterceptionStoreCapacity = 1.5, infiltrationCoefficient = 200,
# infiltrationShape = 3, soilMoistureStoreCapacity = 320, interflowCoefficient = 0.1,
# rechargeCoefficient = 0.2, baseflowCoefficient = 0.3, perviousFraction = 0.9,
# imperviousThreshold = 1, groundwater_0 = 5, soilMoistureStore_0 = soilMoistureStoreCapacity *
# 0.33, CONST_FOR_SOIL_ET = 10.0, return_state = FALSE, pure.R.code = FALSE)
# {
# inAttr <- attributes(DATA[, 1])
# DATA <- as.ts(DATA)
# stopifnot(c("P", "E") %in% colnames(DATA))
# stopifnot(rainfallInterceptionStoreCapacity >= 0)
# stopifnot(infiltrationCoefficient >= 0)
# stopifnot(infiltrationShape >= 0)
# stopifnot(soilMoistureStoreCapacity >= 0)
# stopifnot(interflowCoefficient >= 0)
# stopifnot(rechargeCoefficient >= 0)
# stopifnot(baseflowCoefficient >= 0)
# stopifnot(perviousFraction >= 0)
# stopifnot(imperviousThreshold >= 0)
# P <- DATA[, "P"]
# E <- DATA[, "E"]
# bad <- is.na(P) | is.na(E)
# P[bad] <- 0
# E[bad] <- 0
# X <- P
# COMPILED <- (hydromad.getOption("pure.R.code") == FALSE)
# if (COMPILED) {
# ans <- simhydMJEQ_sim(P,E,rainfallInterceptionStoreCapacity,
# infiltrationCoefficient,
# infiltrationShape,
# soilMoistureStoreCapacity,
# interflowCoefficient,
# rechargeCoefficient,
# baseflowCoefficient,
# perviousFraction,
# imperviousThreshold,
# groundwater_0,
# soilMoistureStore_0,
# CONST_FOR_SOIL_ET)
# X <- ans$X
# if (return_state==T) {
# aET <- ans$aET
# imperviousRunoff = ans$imperviousRunoff
# interceptionET = ans$interceptionET
# throughfall = ans$throughfall
# infiltrationXSRunoff = ans$infiltrationXSRunoff
# infiltration = ans$infiltration
# interflowRunoff = ans$interflowRunoff
# infiltrationAfterInterflow = ans$infiltrationAfterInterflow
# soilMoistureFraction = ans$soilMoistureFraction
# soilMoistureStore = ans$soilMoistureStore
# soilInput = ans$soilInput
# recharge = ans$recharge
# groundwater = ans$groundwater
# soilET = ans$soilET
# baseflow = ans$baseflow
# }
# } else {
# interceptionET <- infiltration <- throughfall <- infiltrationXSRunoff <- imperviousRunoff <- infiltrationAfterInterflow <- NA
# totalET <- soilET <- imperviousET <- interFlowRunoff <- recharge <- soilInput <- baseflow <- interception <- NA
# soilMoistureStore <- groundwater <- rep(0,length(P))
# groundwater[1] <- groundwater_0
# soilMoistureStore[1] <- soilMoistureStore_0
# for (t in seq(2, length(P))) {
# perviousIncident <- P[t]
# imperviousIncident <- P[t]
# # equation 1
# imperviousET[t] <- min(imperviousThreshold, imperviousIncident)
# imperviousRunoff[t] <- imperviousIncident - imperviousET
# # equation 2
# interceptionET[t] <- min(perviousIncident, min(E[t],
# rainfallInterceptionStoreCapacity))
# throughfall[t] <- perviousIncident - interceptionET
# soilMoistureFraction <- soilMoistureStore[t-1]/soilMoistureStoreCapacity
# # equation 3
# infiltrationCapacity <- infiltrationCoefficient *
# exp(-infiltrationShape * soilMoistureFraction)
# # equation 4
# infiltration[t] <- min(throughfall[t], infiltrationCapacity)
# infiltrationXSRunoff[t] <- throughfall[t] - infiltration[t]
# # equation 5
# interflowRunoff[t] <- interflowCoefficient * soilMoistureFraction *
# infiltration[t]
# # equation 6
# infiltrationAfterInterflow[t] <- infiltration[t] - interflowRunoff[t]
# # equation 7
# recharge[t] <- rechargeCoefficient * soilMoistureFraction *
# infiltrationAfterInterflow[t]
# # equation 8
# soilInput[t] <- infiltrationAfterInterflow[t] - recharge[t]
# soilMoistureStore[t] <- soilMoistureStore[t-1] + soilInput[t]
# soilMoistureFraction <- soilMoistureStore[t]/soilMoistureStoreCapacity
# groundwater[t] <- groundwater[t-1] + recharge[t]
# if (soilMoistureFraction > 1) {
# groundwater[t] <- groundwater[t] + soilMoistureStore[t] -
# soilMoistureStoreCapacity
# soilMoistureStore[t] <- soilMoistureStoreCapacity
# soilMoistureFraction <- 1
# }
# baseflowRunoff[t] <- baseflowCoefficient * groundwater[t]
# groundwater[t] <- groundwater[t] - baseflowRunoff[t]
# soilET[t] <- min(soilMoistureStore[t], min(E[t] - interceptionET[t],
# soilMoistureFraction * CONST_FOR_SOIL_ET))
# soilMoistureStore[t] <- soilMoistureStore[t] - soilET[t]
# eventRunoff <- (1 - perviousFraction) * imperviousRunoff[t] +
# perviousFraction * (infiltrationXSRunoff[t] + interflowRunoff[t])
# totalET[t] = (1 - perviousFraction) * imperviousET[t] + perviousFraction * (interceptionET[t] + soilET[t]);
# totalRunoff <- eventRunoff + perviousFraction * baseflowRunoff[t]
# X[t] <- totalRunoff
# }
# aET <- totalET
# }
# # reset the missing values
# X[bad] <- NA
# if (return_state==T) {
# aET[bad] <- NA
# imperviousRunoff[bad] <- NA
# interceptionET[bad] <- NA
# infiltration[bad] <- NA
# throughfall[bad] <- NA
# infiltrationXSRunoff[bad] <- NA
# interflowRunoff[bad] <- NA
# infiltrationAfterInterflow[bad] <- NA
# soilET[bad] <- NA
# recharge[bad] <- NA
# soilInput[bad] <- NA
# baseflow[bad] <- NA
# soilMoistureStore[bad] <- NA
# groundwater[bad] <- NA
# }
# # pass on attributes
# attributes(X) <- inAttr
# ans <- X
# if (return_state==T) {
# attributes(aET) <- inAttr
# attributes(imperviousRunoff) <- inAttr
# attributes(interceptionET) <- inAttr
# attributes(infiltration) <- inAttr
# attributes(throughfall) <- inAttr
# attributes(infiltrationXSRunoff) <- inAttr
# attributes(interflowRunoff) <- inAttr
# attributes(infiltrationAfterInterflow) <- inAttr
# attributes(soilET) <- inAttr
# attributes(recharge) <- inAttr
# attributes(soilInput) <- inAttr
# attributes(baseflow) <- inAttr
# attributes(soilMoistureStore) <- inAttr
# attributes(groundwater) <- inAttr
# ans <- cbind(U=X,aET=aET, throughfall = throughfall,
# imperviousRunoff = imperviousRunoff,
# interceptionET = interceptionET,
# infiltration = infiltration,
# infiltrationXSRunoff = infiltrationXSRunoff,
# interflowRunoff = interflowRunoff,
# infiltrationAfterInterflow = infiltrationAfterInterflow,
# soilInput = soilInput,
# soilMoistureStore = soilMoistureStore,
# recharge = recharge,
# groundwater=groundwater,
# soilET = soilET,
# baseflow = baseflow)
# }
# ans
# }
# Routing based on Muskinghum
simhydrouting.sim <- function(U, DELAY=1, X_m=0.2,
epsilon = hydromad.getOption("sim.epsilon"),
return_components = FALSE) {
X <- rep(0,length(U))
inAttr <- attributes(U)
U <- as.ts(U)
bad <- is.na(U)
U[bad] <- 0
if(2*DELAY*X_m<1 & 2*DELAY*(1-X_m)>1) {
# Muskingum components
C0 <- (-DELAY*X_m+0.5)/(DELAY*(1-X_m)+0.5)
#print(C0)
C1 <- (DELAY*X_m+0.5)/(DELAY*(1-X_m)+0.5)
#print(C1)
C2 <- (DELAY*(1-X_m)-0.5)/(DELAY*(1-X_m)+0.5)
#print(C2)
} else {
C0 <- 0; C1 <- 1; C2 <- 0
# print("model parameters adjusted")
}
if (C0 + C1 + C2 != 1) {
C0 <- 0; C1 <- 1; C2 <- 0
# print("model parameters adjusted again")
}
#print(C0+C1+C2)
#if (round(C0+C1+C2)!=1) C0 <- 0; C1 <- 1; C2 <- 0
X[1] <- U[1]
for(t in 1:(length(U)-1)){
X[t+1] <- C0*U[t+1]+C1*U[t]+C2*X[t]
#print(X[t+1])
}
X[abs(X) < epsilon] <- 0
X[bad] <- NA
attributes(X) <- inAttr
if (return_components) {
return(X)
}
return(X)
}
#define ranges of parameters
simhyd_C2009.ranges <- simhyd_C2002.ranges <- function()
list(INSC = c(0,50),
COEFF = c(0.0,400),
SQ = c(0,10),
SMSC = c(1,500),
SUB = c(0.0,1),
CRAK = c(0.0,1),
K = c(0.0,1),
etmult = c(0.01,1))
simhydrouting.ranges <- function()
list( DELAY = c(0.1,5),
X_m = c(0.01,0.5))
simhyd_eWater.ranges <- function()
list(impTh = c(1,5),
pFrac = c(0,1),
INSC = c(0,50),
COEFF = c(0.0,400),
SQ = c(0,10),
SMSC = c(1,500),
SUB = c(0.0,1),
CRAK = c(0.0,1),
K = c(0.0,1),
etmult = c(0.01,1))
# simhyd_MJeq.ranges <- function()
# list(rainfallInterceptionStoreCapacity = c(0, 5),
# infiltrationCoefficient = c(0, 400),
# infiltrationShape = c(0, 10),
# soilMoistureStoreCapacity = c(1, 500),
# interflowCoefficient = c(0, 1),
# rechargeCoefficient = c(0, 1),
# baseflowCoefficient = c(0, 1),
# perviousFraction = c(0, 1),
# imperviousThreshold = c(0, 5))
|
7afb63d31330d70e6337ea63134c4e95dd43f45c
|
ce9039377b71f5f71981223379435a722a961e5e
|
/R/dive_place.R
|
fc2b871fd86b98085b9163c7092bc0cbb0912cc4
|
[] |
no_license
|
hareshsuppiah/SwimmeR-1
|
4cbc1023dd56aba1b0a06fc70b08a3d62f3022b1
|
6466ac3aeb1096d5654bbb4dc31f71b8bc013475
|
refs/heads/master
| 2023-07-17T05:33:53.593892
| 2021-08-14T19:00:02
| 2021-08-14T19:00:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,340
|
r
|
dive_place.R
|
#' Adds places to diving results
#'
#' Places are awarded on the basis of score, with highest score winning. Ties
#' are placed as ties (both athletes get 2nd etc.)
#'
#' @importFrom stringr str_detect
#' @importFrom stringr str_to_lower
#' @importFrom dplyr slice
#' @importFrom dplyr ungroup
#' @importFrom dplyr group_by
#' @importFrom dplyr mutate
#' @importFrom dplyr filter
#' @importFrom dplyr desc
#'
#' @param df a data frame with results from \code{swim_parse}, including only
#' diving results (not swimming)
#' @param max_place highest place value that scores
#' @return data frame modified so that places have been appended based on diving
#' score
#'
#' @seealso \code{dive_place} is a helper function used inside of
#' \code{results_score}
dive_place <- function(df, max_place) {
df <- df %>%
dplyr::filter(stringr::str_detect(str_to_lower(Event), "diving") == TRUE) %>%
dplyr::group_by(Event, Name) %>%
dplyr::slice(1) %>% # first instance of every diver
dplyr::ungroup() %>%
dplyr::group_by(Event) %>%
dplyr::mutate(Finals_Time = as.numeric(Finals_Time)) %>%
dplyr::mutate(
Place = rank(desc(Finals_Time), ties.method = "min"),
# again, highest score gets rank 1
Finals_Time = as.character(Finals_Time)
) %>%
dplyr::filter(Place <= max_place)
return(df)
}
|
300257e3a55e2efc9710ba6f5dca84494a13bc2c
|
7cc82f192fdc7dcc7b0b021fb20d30e036ca66b5
|
/site-lisp/xml-http-request/docs/NEWS.rd
|
7825acd2b4348da0a8e059e16a17642372775a29
|
[
"LicenseRef-scancode-nysl-0.9982",
"ICU",
"LicenseRef-scancode-other-permissive",
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
hutoiti/xyzzy_config
|
784a678e07b24f49b64651c472e69842e3073483
|
0c8026f0b41145bd77911f0eb41b19e822f32ccf
|
refs/heads/master
| 2021-01-10T19:10:38.745218
| 2012-10-28T07:31:30
| 2012-10-28T07:31:30
| null | 0
| 0
| null | null | null | null |
SHIFT_JIS
|
R
| false
| false
| 3,489
|
rd
|
NEWS.rd
|
=begin
=== 2008-07-12 / 1.2.1
xml-http-request 1.2.1 リリース!
: 新規機能
* なし
: 非互換を含む変更点
* なし
: バグ修正
* なし
: その他
* ライセンスファイルを同梱
=== 2008-03-30 / 1.2.0
xml-http-request 1.2.0 リリース!
: 新規機能
* 各リクエストメソッドに basic-auth 引数を追加しました。
Basic 認証のためのユーザ情報とパスワードを指定します。
(xhr-get "http://foo.com" :basic-auth (xhr-credential "user" "password"))
: 非互換を含む変更点
* basic-auth 引数を指定せずに Basic 認証が必要な URI に接続した場合
認証情報を入力するダイアログが表示されます。
1.0.0 〜 1.1.1 では認証ダイアログは表示されません。
0.1 では表示されます。
: バグ修正
* 接続する URL の userinfo に認証情報を指定しても無視される問題を修正
(楓月さんによる報告)
(xhr-get "http://user:password@foo.com")
※ basic-auth 引数を指定した場合は URL の userinfo は無視されます。
=== 2008-03-03 / 1.1.1 / ひなまつり
xml-http-request 1.1.1 リリース!
: 新規機能
* post 以外のリクエスト関数に query と encoding キーワード引数を追加。
query string をリストで指定できます。
: 非互換を含む変更点
* なし
: バグ修正
* xml-http-request 1.1.0 で利用する XMLHttpRequest オブジェクトを
Msxml2.XMLHTTP.6.0 にこっそり更新していたが、
xyzzy との組み合わせに問題があったので Msxml2.XMLHTTP に戻した。
=== 2008-02-23 / 1.1.0
xml-http-request 1.1.0 リリース!
: 新規機能
* 各リクエスト関数に nomsg キーワード引数を追加。
* nomsg に non-nil を指定するとメッセージを出力しません。
* xhr-future-value に no-redraw と sleep キーワード引数を追加。
* no-redraw に non-nil を指定すると待ち合わせ中に画面の再描画を行いません。
* sleep に non-nil を指定すると待ち合わせ中に割り込みできないようにします。
: 非互換を含む変更点
* なし
: バグ修正
* なし
=== 2008-02-11 / 1.0.1 / 建国記念の日
xml-http-request 1.0.1 リリース!
: 新規機能
* (xhr-abort): 既に通信が終了していたら何もせず nil を返す、
通信を中断したなら t を返すようにした。
: 非互換を含む変更点
* (xhr-xxx-async): 戻り値に cancel-ticket を返すようにした。
cancel-ticket は xhr-abort に指定して通信を中断可能。
* (http-get, http-post): 非同期送信時は oledata を返す。
* xhr-xxx-future に指定した key 関数の中でエラーが発生した場合、
xhr-future-value した時点でエラーを通知。
: バグ修正
* なし
=== 2008-02-11 / 1.0.0 / 建国記念の日
xml-http-request 1.0.0 リリース!
: 新規機能
* リニューアル
* Future パターンのサポート
* イベントハンドラのマクロ化
: 非互換を含む変更点
* xml-http-request 0.1 との互換層を用意しているので基本的には動くはずです。
: バグ修正
* たぶんなし
=== 2006-06-13 / 0.1
xml-http-request 0.1 リリース!
=end
|
02275f426afbdfaf68491fd8065cccbf611931bb
|
301a3c7037c9e2b4e2ee5b2db3c6568a3392d1d1
|
/R-basic-examples.R
|
da1d26f029028db659e7b42efc6acfca4d50e4f4
|
[
"Apache-2.0"
] |
permissive
|
meg-codes/R-notes
|
ba9cf82152179826ad9f183fc4754038d21074ec
|
9c20cb71026a3b6ad70bb21fcb96848b0db5f896
|
refs/heads/master
| 2023-09-01T05:31:43.180426
| 2017-03-01T20:46:44
| 2017-03-01T20:46:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 919
|
r
|
R-basic-examples.R
|
# Ex. 1 - Setting objects to use as variables, then printing the result to the
# console
a <- 1
b <- 2
a+b
# Ex. 2 - Showing how to concatenate data of type character
first_name <- 'Benjamin'
last_name <- 'Hicks'
cat(first_name, last_name, sep=' ')
# Ex. 3 - Demonstrating adding two numeric vectors of equal length
a <- c(1, 2, 3)
b <- c(1, 2, 3)
a + b
# Ex. 4 - Demonstarting implicit coercion of numeric (1, 3.4)
# to character in a vector with a character elements ("aardvark")
a <- c(1, "aardvark", 3.4)
class(a)
a
# Ex. 5 - Demonstrating explicit coercion/type conversion
a <- as.numeric("3.4")
class(a)
# Ex. 6 - Demonstrating lists and their ability to hold more than one type of item
test_list <- list('a', c(1,3), c(2,4))
# This is using subset notation to pick the second and third vectors of the
# list. Then a second notation to pick the first elements in each.
test_list[[2]][1] + test_list[[3]][1]
|
5594c7b02e4cdf5ad136559aca11287b67bc43f4
|
bb675cb410cbba93fc1d43cc9e859aac999f2a4a
|
/Session5_StatisticalConsiderations/Clukay_Genostats_Session.r
|
9b2758804cc3ac2338914a45ab501c2b5fb13493
|
[] |
no_license
|
rAntonioh/AAAGs_2018
|
b0263f0d7985b2e0049d693af1606285bace005e
|
2656c0e9349b056faf3b86b52703c432a573c2b9
|
refs/heads/master
| 2020-03-24T20:10:05.679427
| 2018-08-02T17:57:28
| 2018-08-02T17:57:28
| 142,962,882
| 2
| 0
| null | 2018-07-31T04:37:59
| 2018-07-31T04:37:59
| null |
UTF-8
|
R
| false
| false
| 11,296
|
r
|
Clukay_Genostats_Session.r
|
############################################################################################################
######## Part 0: Data Read-in ########
############################################################################################################
rawgenos <- read.table(file="Consolidated Genotypes.csv", header = TRUE, sep=",", row.names = 1)
snpinfo <- read.table(file="SNPinfo.csv", header = TRUE, sep=",")
pheno <- read.table(file="pheno.csv", header = TRUE, sep=",",row.names = 1)
############################################################################################################
######## Part 1: mini-GWAS and Basic Bonferroni Correction ########
############################################################################################################
######Package install######
#Answer y when asked whether to install from a source that needs compilation
install.packages("SNPassoc")
library(SNPassoc)
######format data for SNPassoc (similar to PLINK format)######
geno_pheno<-cbind(pheno,rawgenos)
org_geno<-setupSNP(geno_pheno, 4:ncol(geno_pheno), sort = TRUE, snpinfo, sep = "")
#Check that setup worked (wouldn't recommend doing this w/o row specification)
summary(org_geno[,4:8])
plot(org_geno,which=13)
######Run the mini-GWAS######
suborg_geno<-org_geno[,1:1000]
start_time <- Sys.time()
#This next line of code is all you really need, start and end times are just for reference.
#You purposely leave the SNP out of the equation as part of how this function works.
miniGWAS<-WGassociation(birthweight_kg~1, suborg_geno, model = "codominant", genotypingRate = 80)
end_time <- Sys.time()
end_time - start_time
Bonferroni.sig(miniGWAS, model = "codominant", alpha = 0.05)
plot(miniGWAS)
#BONUS: If you wanted to include covariates, you would simply replace the 1 in the minigwas
#with the name of the covariate. If you want to try you can add the covariate 'trauma' to
#the model, which I've included in column 3 of suborg_geno if you examine it.
############################################################################################################
######## Part 2: Testing Using Simple M ########
############################################################################################################
#Useful citations for this:
#Gao X, Starmer J, and Martin ER. (2008). A multiple testing correction method for genetic
#association studies using correlated single nucleotide polymorphisms. Genetic Epidemiology,
#32, 361-369.
#Johnson RC, Nelson GW, Troyer JL, Lautenberger JA, Kessing BD, Winkler CA, and O'Brien SJ.
#(2010). Accounting for multiple comparisons in a genome-wide association study (GWAS).
#BMC Genomics, 11, 724-724.
######Convert the genotypes to numeric values######
mgenos<-rawgenos
for(n in 1:ncol(mgenos)){
mgenos[,n]<-as.numeric(mgenos[,n])-1
}
######Compute the Composite Linkage Disequilibrium Score######
#The commented out line will likely not work with a larger dataset due to lack of RAM.
#However, I am including it for illustration purposes as its the first thing many think of
#and even the papers above recommend using this function, but don't adress this difficulty.
#compld<-cor(mgenos)
#In light of this, you usually need to break the data up into ~5,000 SNP chunks.
#Ideally, you would do this based on haplotype blocks with haploview, but since our
#data is small, we'll just separate it by chromosome. We'll do chromosome 10 as an example.
n<-10
chromsnps<-snpinfo$Chromosome == n
#If you want to know how many SNPs that is use sum(chromsnps ==TRUE)
compld<-cor(mgenos[,chromsnps], use= "complete.obs")
#Always a good idea to inspect the matrix
compld[1:10,1:10]
#There are snps without enough variance so they'll need to be removed. The fact they have
#so little variance means they couldn't be tested anyway meaning they don't contribute to
#the multiple testing burden.
newcompld<-compld[is.na(compld[,1]) == FALSE, is.na(compld[,1]) == FALSE]
#Calculate the Eigenvalues
eigns<-eigen(newcompld, only.values = TRUE)
#We now need to add the eigenvalues together until we reach 99.5% of the variance and count
#how many that takes. The total variance in this case is the sum of the number of variables.
#Keep in mind this number is going to be very low because we have so few samples. With a
#normal genomics study you would want many more and you'd need to worry about things like
#population structure, which we ignored in our miniGWAS, but are likely reflected here.
thresholdvar<-ncol(newcompld)*.995
eigentotal<-0
counter<-0
for (v in 1:length(eigns$values)){
if(eigentotal<thresholdvar){
counter<-counter + 1
eigentotal<-eigentotal + eigns$values[v]
}
}
print(counter)
#This would then be repeated for each chromosome and you would add the values together
#to get the total number of tests to apply Bonferroni correction. Try it with chromosome 1
#and be sure to inspect the corrlation matrix before removing bad SNPs as there is a trap!
############################################################################################################
######## Part 3: Basic FDR Correction ########
############################################################################################################
#Extract the p-values from our earlier mini-GWAS
pvaladd<-codominant(miniGWAS)
#Check if FDR is appropriate
hist(pvaladd, breaks = 20)
#Since the histogram is relatively uniform, by strictest standards we shouldn't be using
#FDR here, but well keep going for demonstration purposes
qvals<-p.adjust(pvaladd, "fdr")
#Since we only did 1000 SNPs, this is small enough to look at directly.
hist(qvals, xlim = 0:1)
#Clearly there is nothing significant in this case, which is to be expected based on the
#histogram and sample size. Just to confirm:
sum(qvals < 0.05, na.rm = TRUE)
############################################################################################################
######## Part 4: Empirical P-values using Reshuffling (Permutation Testing) ########
############################################################################################################
#We're going to use a method called max(T) permutation where we take the lowest p-value from
#testing each reshuffle against the simulated phenotype. Not necessarily the most efficient,
#but certainly valid under the majority of circumstances.
######Simulate p-values and generate empirical ones######
n_sims<-40
simoutputGWAS<-NULL
#system timer so we can see how long it takes
start_time_sim <- Sys.time()
for (n in 1:n_sims){
#Create a new object to simulate the dataset (might need to modify original if larger)
sim_suborg_geno<-suborg_geno
#Shuffle the phenotype row order using the sample function
newrowindex<-sample(nrow(sim_suborg_geno), nrow(sim_suborg_geno))
#Use the new row order to assign phenotypes
sim_suborg_geno[,1]<-as.data.frame(as.matrix(sim_suborg_geno[newrowindex,1]))
#Run the mini-GWAS
sim_miniGWAS<-WGassociation(birthweight_kg~1, sim_suborg_geno, model = "codominant", genotypingRate = 80)
#Extract the pvalues
sim_pvals<-codominant(sim_miniGWAS)
#Take the smallest p-value and save it
simoutputGWAS[n]<-min(sim_pvals, na.rm = TRUE)
}
#Generate empirical p-values by comparing to the simulated ones
permutedpvals<-NULL
for (c in 1:length(pvaladd)){
n_nonsig<-sum(pvaladd[c]>=simoutputGWAS, na.rm = TRUE)
permutedpvals[c]<-n_nonsig/n_sims
}
#Check total time it took
end_time_sim <- Sys.time()
end_time_sim - start_time_sim
######See how many SNPs are 'significant'######
#See how many SNPs are significant
sum(permutedpvals < 0.05, na.rm = TRUE)
#Get SNP IDs and info
sig_snps<-which(permutedpvals < 0.05)
sig_snpIDs<-colnames(suborg_geno)[sig_snps]
snpinfo[snpinfo$dbSNP.RS.ID %in% sig_snpIDs,]
#NOTE: %in% compares something to a vector like == compares to a value
#Permutation gets much more complicated with covariates and you need a lot more planning.
#If interested in this, look up the 'BiasedUrn' package and literature related to it.
############################################################################################################
######## Part 5: Basic Parallel Computing ########
############################################################################################################
#Detecting the number of cores and setting them up for use
library(doParallel)
library(foreach)
ncore<-detectCores()
cl<-makeCluster(as.numeric(ncore))
registerDoParallel(cl)
######Define the permutation reshuffles we did as its own function######
permshuffle<-function(genodata, n_sims) {
simoutputGWAS<-NULL
for (n in 1:n_sims){
#Create a new object to simulate the dataset (might need to modify original if larger)
sim_suborg_geno<-genodata
#Shuffle the phenotype row order using the sample function
newrowindex<-sample(nrow(sim_suborg_geno), nrow(sim_suborg_geno))
#Use the new row order to assign phenotypes
sim_suborg_geno[,1]<-as.data.frame(as.matrix(sim_suborg_geno[newrowindex,1]))
#Run the mini-GWAS
sim_miniGWAS<-WGassociation(birthweight_kg~1, sim_suborg_geno, model = "codominant", genotypingRate = 80)
#Extract the pvalues
sim_pvals<-codominant(sim_miniGWAS)
#Take the smallest p-value and save it
simoutputGWAS[n]<-min(sim_pvals, na.rm = TRUE)
}
simoutputGWAS
}
######Run the function across multiple cores######
#Start time
start_time_parasim <- Sys.time()
#Split the simulations among each core and combine the results together
#(I changed n_sims because the times 4 means it will end up being 40 and
#that splits it evenly across 4 cores).
parallelsimoutput<-foreach(times(4), .combine = "c", .packages="SNPassoc") %dopar% permshuffle(suborg_geno, n_sims=10)
#Generate empirical p-values by comparing to the simulated ones
parallelpermutedpvals<-NULL
for (c in 1:length(pvaladd)){
n_nonsig<-sum(pvaladd[c]>=parallelsimoutput, na.rm = TRUE)
parallelpermutedpvals[c]<-n_nonsig/40
}
#Check total time it took
end_time_parasim <- Sys.time()
end_time_parasim - start_time_parasim
######See how many SNPs are 'significant'######
#See how many SNPs are significant
sum(parallelpermutedpvals < 0.05, na.rm = TRUE)
#Get SNP IDs and info
sig_snps<-which(parallelpermutedpvals < 0.05)
sig_snpIDs<-colnames(suborg_geno)[sig_snps]
snpinfo[snpinfo$dbSNP.RS.ID %in% sig_snpIDs,]
#This version of the code is designed mainly for someone with 4 cores, but depending on
#how many your setup may have available, other configurations might be better. Feel free
#to place with the times() and n_sims options in the foreach line to see how it affects
#computing time.
|
bac505951e1b0969483c3ade0bf8aea92ffc83c7
|
c988f7ae36884541bf17394e02baa07a4bb88a00
|
/man/docdb_update.Rd
|
6f410191365c78e4e85696cc4450cc2804df7b5c
|
[
"MIT"
] |
permissive
|
MhAmine/nodbi
|
05d2cbe80135882c58ec15d3431429627970e96a
|
42e06b4751b25360b85e7c8206de06f461778a73
|
refs/heads/master
| 2021-05-09T10:55:49.893028
| 2018-01-25T00:51:29
| 2018-01-25T00:51:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 697
|
rd
|
docdb_update.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update.R
\name{docdb_update}
\alias{docdb_update}
\title{Update documents}
\usage{
docdb_update(src, key, value, ...)
}
\arguments{
\item{src}{source object, result of call to src}
\item{key}{A key. See Details.}
\item{value}{A value}
\item{...}{Ignored}
}
\description{
Update documents
}
\details{
Note that with etcd, you have to prefix a key with a forward slash.
}
\examples{
\dontrun{
# CouchDB
src <- src_couchdb()
docdb_create(src, "mtcars2", mtcars)
docdb_get(src, "mtcars2")
mtcars$letter <- sample(letters, NROW(mtcars), replace = TRUE)
docdb_update(src, "mtcars2", mtcars)
docdb_get(src, "mtcars2")
}
}
|
7124fcfe2a37b73e80502cabb313bf98b196aed3
|
812720f93b43704a1bb00c16716c74e2e637fd4f
|
/man/poids.D.Rd
|
7cca7db6a61ebf92c8024f982dc2f65f49a5671f
|
[] |
no_license
|
cran/HAPim
|
9df01704bb002f166674d189790fc19a59ecc789
|
8b189df9b1547d74bfbad541ed2c1af88b18054f
|
refs/heads/master
| 2020-05-17T15:48:30.314265
| 2009-10-10T00:00:00
| 2009-10-10T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,066
|
rd
|
poids.D.Rd
|
\name{poids.D}
\alias{poids.D}
\title{poids.D}
\description{
This function calculates the probability of no recombinaison between loci for each Bennett's desequilibrium.
The function can be viewed as an internal function.
The user does not have to call it by himself.
}
\usage{
poids.D(dist.test, pos.QTL, res.structure)
}
\arguments{
\item{dist.test}{results provided by \code{distance.test()} function, list of numeric objects. }
\item{pos.QTL}{numeric value, interval of a test position}
\item{res.structure}{results provided by \code{structure.hap()} function, list of objects.}
}
\value{
The returned value is a list of numeric objects.
}
\references{publication to be submitted: C. Cierco-Ayrolles, S. Dejean, A. Legarra, H. Gilbert,
T. Druet, F. Ytournel, D. Estivals, N. Oumouhou and B. Mangin.
Combining linkage analysis and linkage disequilibrium for QTL fine mapping in animal pedigrees.}
\author{S. Dejean, N. Oumouhou, D. Estivals, B. Mangin }
\seealso{\code{\link{structure.hap}}, \code{\link{distance.test}} }
\keyword{models}
|
6298ce3236df0be6abf059daedb53107afa606d1
|
459199610ff49bd7bfe0cb0fe9d9b12cce9fc031
|
/man/rg_geom_equation.Rd
|
caf9db934299a4d2e3edff5f680af5ddef9ee8f3
|
[] |
no_license
|
sitscholl/Rgadgets
|
4e8e5c493adbc045264bb35db5b8fefcd4530231
|
b142de2c996f3c0773951e43813c8cf0713bf77d
|
refs/heads/master
| 2023-03-10T11:08:12.905825
| 2021-02-18T10:50:44
| 2021-02-18T10:50:44
| 275,093,346
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 857
|
rd
|
rg_geom_equation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_geoms.R
\name{rg_geom_equation}
\alias{rg_geom_equation}
\title{Adding model equation to ggplot. Wrapper around \link[ggpmisc]{stat_poly_eq} with standard options for label}
\usage{
rg_geom_equation(formula, elements = c("eq", "r2", "p"), sep = ", ", ...)
}
\arguments{
\item{formula}{Formula, Model formula}
\item{elements}{String or vector of strings, The parts of the model statistics that should be included in the output}
\item{sep}{String, separator between model elements}
\item{...}{Further parameters passed on to \link[ggpmisc]{stat_poly_eq}
one of index or name.}
}
\description{
Adding model equation to ggplot. Wrapper around \link[ggpmisc]{stat_poly_eq} with standard options for label
}
\examples{
}
\keyword{equation}
\keyword{ggplot,}
\keyword{model,}
|
265df7e5484bd58c71cfc276e2c9b909caa9903c
|
44e47ad78f8c4588a4b0dc5813a79eda6fa04a24
|
/src/labelerApp/shinyHelpers.R
|
a55c6fde6f13233f0c7212ced97f577018f27a50
|
[
"MIT"
] |
permissive
|
vrodriguezf/ESWA-2017
|
895ec5e66e1592b9bef667e3a040e8b02846a5c0
|
9778cf54724b6c55f68dfe77bbfc206aab769730
|
refs/heads/master
| 2021-06-18T05:13:37.510292
| 2017-06-30T16:34:41
| 2017-06-30T16:34:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,579
|
r
|
shinyHelpers.R
|
shinyHelper.simulationTS <- function(simSeries,
simulationID,
measureResolution,
measures = "all",
incidentsDrawing=TRUE,
markExecutionStartTime=FALSE,
customXLim = NULL) {
metric_labeller <- function (variable, value) {
metric_names <- list(
'scoreTS' = "S",
'agilityTS' = "A",
'attentionTS'= "At",
'cooperationTS' = "C",
'aggressivenessTS' = "Ag",
'precisionTS' = "P"
)
return(metric_names[value])
}
TSList <- simSeries[[simulationID]]
aux <- melt(as.matrix(data.frame(TSList)))
aux$Var1 <- ((aux$Var1 -1)*measureResolution) #From time steps to seconds
#Incidents data
if (incidentsDrawing) {
incidents <- dataHelper.getSimulationIncidentSnapshots(simulationID)
}
#Simulation monitoring real start time
if (markExecutionStartTime) {
firstSimulationSpeedUp <- dataHelper.getMonitoringStartTime(simulationID)
}
p <- ggplot(data = aux, mapping = aes(x = Var1, y = value, group=1))
if (measures == "ALL" || length(measures) > 2) {
p <- p + facet_grid(Var2~., scales="free_x", labeller = metric_labeller)
}
for (i in 1:length(TSList)) {
if (measures == "ALL" || names(TSList[i]) %in% measures) {
p <- p + layer(data= aux %>% filter(Var2==names(TSList)[i]),
geom = c("line"),
stat = "identity",
position = "identity") +
ylim(c(0,1))
if (!is.null(customXLim)) p <- p + xlim(customXLim)
if (incidentsDrawing) {
p <- p + geom_vline(xintercept=incidents$elapsedRealTime, colour="red",linetype="longdash")
}
#Draw a vertical line where the simulation time is accelerated (simulation start)
if (markExecutionStartTime) {
p <- p + geom_vline(xintercept=firstSimulationSpeedUp, colour="green",linetype="longdash")
}
}
}
p <- p + labs(x="Time (ms)")
if (measures == "ALL" || length(measures) > 1) {
p <- p + labs(y="Performance Measure [0,1]")
} else {
p <- p + labs(y=paste(measures[[1]],"[0,1]"))
}
# p <- ifelse(measures == "ALL" || length(measures) > 2,
# p + labs(y=paste(measures[[1]],"[0,1]"))
# )
p <- p + theme(axis.text = element_text(size=24),
axis.title=element_text(size=27),
#axis.text.y = element_blank(),
#axis.ticks.y = element_blank(),
strip.text = element_text(size=27))
#p <- p + scale_y_continuous(breaks=c(0,0.5,1), minor_breaks=c(0.25,0.75))
p
}
##
#
##
shinyHelpers.narrowSimSeries <- function (originalSimSeries, simsCount = 20, fixedSimIds = c()) {
#Bound the possibilities to tag
randomSimsNumber <- simsCount - length(fixedSimIds)
randomSims <- originalSimSeries[!(names(originalSimSeries) %in% fixedSimIds)] %>% sample(size=randomSimsNumber)
c(originalSimSeries[fixedSimIds],randomSims)
}
##
# TODO: Add connection params?
##
shinyHelper.loadHumanGroundTruth <- function() {
host <- "savier.ii.uam.es"
db <- "DroneWatchAndRescue"
humanGroundTruthNs <- paste(db,"humanGroundTruth",sep=".")
dwrConn <- mongo.create(host = host, db=db, username = "dwr", password = "dwr")
humanGroundTruth <- mongo.find.all(mongo = dwrConn,ns = humanGroundTruthNs,data.frame = TRUE) %>% as_data_frame()
mongo.destroy(dwrConn)
return(humanGroundTruth)
}
#
# Input: human groundtruth, as it comes from the database
# Output:
#
shinyHelper.filterHumanGroundTruth <- function(df,validSimSeries) {
df %>%
filter(
labeler!="Test",
sim1 %in% names(validSimSeries),
sim2 %in% names(validSimSeries)
) %>%
rowwise() %>%
filter(
difftime(mongo.oid.time(mongo.oid.from.string(`_id`)),make_datetime(year=2016,month=9L,day=16L)) > 0
) %>%
ungroup() %>%
as_data_frame()
}
##
#
##
shinyHelper.calculate_evaluatedShowdowns <- function(validSimSeries) {
#Load and filter current human ground truth
humanGroundTruth <- shinyHelper.loadHumanGroundTruth() %>%
shinyHelper.filterHumanGroundTruth(validSimSeries = validSimSeries)
#Calculate the different combinations of sim1-sim2-performanceMeasure found
humanGroundTruth %>%
mutate(sim1_ = pmin(sim1,sim2), sim2_ = pmax(sim1,sim2)) %>%
group_by(sim1_,sim2_,performanceMeasure) %>%
dplyr::summarise(n()) %>%
nrow
}
|
4d1684e18b080f6016f0f3e5a3368650fec23a3c
|
057ac9d20c897349eacfb47405136bdb8e0e071f
|
/man/instantData-class.Rd
|
176483ea69e119be12f2dba273e516b268eeb5a7
|
[] |
no_license
|
cran/portfolioSim
|
a588b186a93b60474b2b24c50d2c3887d4cde85a
|
30dd4328db735ab0226e9ddd04f9b70f0df0a2e8
|
refs/heads/master
| 2020-05-17T11:25:32.863031
| 2013-07-08T00:00:00
| 2013-07-08T00:00:00
| 17,698,675
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,922
|
rd
|
instantData-class.Rd
|
\name{instantData-class}
\docType{class}
\alias{instantData-class}
\alias{saveOut,instantData,character,missing,character,character,logical-method}
\title{Class "instantData"}
\description{Contains coross-sectional simulation data that pertains to a single
instant in time, such as held positions and exposures.}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("instantData", ...)}.
}
\section{Slots}{
\describe{
\item{\code{instant}:}{Object of class \code{"orderable"} specifying
the instant to which the object pertains.}
\item{\code{equity.long}:}{Object of class \code{"numeric"}
containing the total market value of held long positions at this
instant. }
\item{\code{equity.short}:}{Object of class \code{"numeric"}
containing the total market value of held short positions at this
instant. }
\item{\code{size.long}:}{Object of class \code{"numeric"}
containing the total number of held long positions at this
instant. }
\item{\code{size.short}:}{Object of class \code{"numeric"}
containing the total number of held short positions at this
instant. }
\item{\code{holdings}:}{Object of class \code{"portfolio"}
reflecting the portfolio held at this instant in the simluation. }
\item{\code{exposure}:}{Object of class \code{"exposure"} containing
exposures of the \code{holdings} portfolio to a set of factors at
this instant.}
}
}
\section{Methods}{
\describe{
\item{saveOut}{\code{signature(object = "instantData", type =
"character", fmt = "missing", out.loc = "character", name =
"character", verbose = "logical")}: save this object. Currently
only one format, binary .RData, is available, and so the
\code{fmt} parameter is missing here.}
}
}
\author{Jeff Enos \email{jeff@kanecap.com}}
\seealso{\code{\link{periodData-class}}
}
\keyword{classes}
|
2113529ba2458e4cea4371ddf3dbad84c3ab1dcf
|
11d7628851051881f1790e9e3929c4b9925593c7
|
/run_analysis.R
|
f6e8fae5384646ba2612facfed16524718a5515c
|
[] |
no_license
|
DongjunCho/Cleaning_Data_Course_Project_Coursera
|
8e941470ce1730d7c52e84cd4f00c66e085580d1
|
7a2794a0a46c0c45aab26cca8f81547eaebb1376
|
refs/heads/master
| 2022-11-07T16:33:59.564911
| 2020-06-28T02:26:40
| 2020-06-28T02:26:40
| 275,487,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,000
|
r
|
run_analysis.R
|
#load package from url
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
filename <- "getdata-projectfiles-UCI HAR Dataset.zip"
path <- getwd()
#if file doesn't exit, download from url
if(!file.exists(filename)){
download.file(fileURL, file.path(path, filename))
}
#unzip the file
unzip(filename)
#read activity
activity<- read.table("UCI HAR Dataset/activity_labels.txt", header = FALSE, col.names = c("activityId", "activity"))
#read Features.
features<- read.table("UCI HAR Dataset/features.txt", header = FALSE, col.names = c("featureId", "feature"))
#read Test Data.
test_subject<- read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE, col.names = "subjectId")
test_y<-read.table("UCI HAR Dataset/test/y_test.txt", header = FALSE, col.names = "activityId")
test_x<-read.table("UCI HAR Dataset/test/x_test.txt", header = FALSE, col.names = features[,2])
#read Train Data.
train_subject<- read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE, col.names = "subjectId")
train_y<- read.table("UCI HAR Dataset/train/y_train.txt", header = FALSE, col.names = "activityId")
train_x<-read.table("UCI HAR Dataset/train/x_train.txt", header = FALSE, col.names = features[,2])
#merge those data using rbind function
test<- cbind(test_subject, test_y, test_x)
train<- cbind(train_subject, train_y, train_x)
dataSet<-rbind(test,train)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
dataSet<- dataSet[,grep("subject|activity|mean|std", colnames(dataSet))]
# 3. Uses descriptive activity names to name the activities in the data set
dataSet$activityId<- factor(dataSet$activityId, levels = activity[,1], labels = activity[,2])
# 4. Appropriately labels the data set with descriptive variable names.
dataColNames <- colnames(dataSet)
dataColNames <- gsub("\\.", "", dataColNames)
dataColNames <- gsub("^t", "timeDomain", dataColNames)
dataColNames <- gsub("^f", "frequencyDomain", dataColNames)
dataColNames <- gsub("Acc", "Accelerometer", dataColNames)
dataColNames <- gsub("activityId", "activity", dataColNames)
dataColNames <- gsub("BodyBody", "Body", dataColNames)
dataColNames <- gsub("Freq", "Frequency", dataColNames)
dataColNames <- gsub("Gyro", "Gyroscope", dataColNames)
dataColNames <- gsub("Mag", "Magnitude", dataColNames)
dataColNames <- gsub("mean", "Mean", dataColNames)
dataColNames <- gsub("std", "StandardDeviation", dataColNames)
colnames(dataSet)<- dataColNames
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
dataMeans<- dataSet %>%
group_by(subjectId, activity) %>%
across(funs = mean)
#Generating the text file for submission.
if(!file.exists("tidyData.txt")){
file.create("tidyData.txt")
write.table(dataMeans, file = "tidyData.txt", row.names = FALSE)
}
|
0d1117bed21f9002021b05df8144af2248db17f2
|
81af737c7f206593c900d3973d8d175dc50ac8af
|
/man/swapcheck.Rd
|
5a71fc266ea047e4f874fc6bf268f7264f4fbb26
|
[
"MIT"
] |
permissive
|
CambridgeAssessmentResearch/POSAC
|
df607fd2f1cc6cd0d86a24212f28f471cdb89fb2
|
6aaeecfc76e643bde973c613bcdef14588e0431a
|
refs/heads/master
| 2021-10-23T16:02:08.065883
| 2019-03-18T15:29:14
| 2019-03-18T15:29:14
| 109,018,236
| 0
| 0
|
MIT
| 2018-07-10T15:15:46
| 2017-10-31T15:50:29
|
R
|
UTF-8
|
R
| false
| true
| 2,234
|
rd
|
swapcheck.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/swapcheck.R
\name{swapcheck}
\alias{swapcheck}
\title{Function to explore whether swapping the location of any two patterns may improve POSAC performance}
\usage{
swapcheck(X, Y, patmat, freqs)
}
\arguments{
\item{X}{The initial X values assigned to each pattern.}
\item{Y}{The initial Y values assigned to each pattern.}
\item{patmat}{A matrix of patterns of values across. Each row of the matrix should represents a distinct pattern with no duplicates. The columns are the variables defining the patterns.}
\item{freqs}{A vector of frequencies of each pattern. This should have the same length as nrow(patmat).}
}
\value{
The function returns a list with the following elements:
\describe{
\item{CurrentCorrect}{The percentage of pairs correctly mapped by the POSAC function to begin with.}
\item{BestSwap}{Details of the best possible swap including an updated per cent correct. This need not be above the current value.}
}
}
\description{
This function takes an existing set of X and Y estimates, a matrix of patterns, and the frequencies of the patterns and examines whether
swapping the (X,Y) coordinates of any two patterns may improve the criterion value that POSAC attempts to optimise.
As such this function can be used as one check of whether the results from the POSAC function itself are optimal.
}
\examples{
posac2=POSAC(CRASdata[,1:5],CRASdata[,6])
swapcheck(posac2$X,posac2$Y,posac2$patmat,posac2$freqs)
#no improvement - however it is possible to swap some incomparable patterns without damaging criteria
#showing how successive looking for swaps could be used as a (very poor) alternative algorithm
randX=rank(rnorm(nrow(CRASdata)))
randY=rank(rnorm(nrow(CRASdata)))
swap1=swapcheck(randX,randY,CRASdata[,1:5],CRASdata[,6])
swap1
#make the suggested swaps
randX=replace(randX,sort(as.numeric(swap1$BestSwap[1,1:2])),randX[as.numeric(swap1$BestSwap[1,1:2])])
randY=replace(randY,sort(as.numeric(swap1$BestSwap[1,1:2])),randY[as.numeric(swap1$BestSwap[1,1:2])])
#check for any more
swap2=swapcheck(randX,randY,CRASdata[,1:5],CRASdata[,6])
swap2
#and so on...
}
\keyword{Scalogram}
|
7e09ef241bb2dd9fae1bb588bb5fcf44a9133fc7
|
9fcc89d58b20b0b24f725e4f751f0e737307892b
|
/cachematrix.R
|
aa09710dccc10fbfd58cde86c00fb532cd8c9582
|
[] |
no_license
|
si48github/ProgrammingAssignment2
|
46530cf17ef353c1c03fcb1434243bd9ac1e9d04
|
da7c8d7d6feadf69d51bd528667a23cdcc6e467c
|
refs/heads/master
| 2020-12-11T09:06:27.907862
| 2015-04-26T17:24:27
| 2015-04-26T17:24:27
| 34,620,422
| 0
| 0
| null | 2015-04-26T16:33:24
| 2015-04-26T16:33:23
| null |
UTF-8
|
R
| false
| false
| 1,697
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
##
## R programming assignment 2 requires a function to cache the inverse of a matrix
## Write a short comment describing this function
## Sunder Iyer comments
# Function makeCacheMatrix follows the same approach as makeVector
# It takes a matrix as the argument.
# It returns a special "matrix", which is a list containing setter/getter functions to
# - set the value of the matrix
# - get the value of the matrix
# - set the inverse of the matrix
# - get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y)
{
x <<- y
m <<- NULL
}
get <- function() {x}
setinverse <- function(inverse) {m <<- inverse}
getinverse <- function() {m}
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Write a short comment describing this function
## Sunder Iyer comments
# Function cacheSolve follows the same approach as cachemean
# It takes a matrix as the argument and returns the inverse of the matrix
# If the inverse is in the cache, it does not recalculate the inverse
# Otherwise, it calculates the inverse and caches it for future use
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
## solve takes a matrix a returns the inverse
# how to handle exceptions when inverse cannot be computed
# i.e when det(x) is 0 ?
x$setinverse(m)
# cache the inverse for future use
m
}
|
cb4a3123186871a3e1c9421864a2ceb51065f6a8
|
a7c0df9746dbcb234326dcee25bef9ad58335721
|
/Spotify-Data-science-Exam_github.R
|
aae66ef289e9629c7de5df837816d80f1d14e120
|
[] |
no_license
|
nireshr/Spotify-Data-science-Exam
|
8c0998e9465dbeda5f2168f07fc5faf18e8937d0
|
9f122b39b2c64cce965fdd7d0bbf5fa629dde56b
|
refs/heads/master
| 2023-05-01T09:35:47.487390
| 2021-04-29T00:57:39
| 2021-04-29T00:57:39
| 362,647,340
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,349
|
r
|
Spotify-Data-science-Exam_github.R
|
install.packages("funModeling")
install.packages("data.table")
install.packages("kableExtra")
install.packages("radarchart")
install.packages("fmsb")
install.packages("DT")
library(tidyverse)
library(dplyr)
library(ggplot2)
library(plotly)
library(corrplot)
library(factoextra)
library(plyr)
library(funModeling)
library(DT)
library(data.table)
library(radarchart)
library(kableExtra)
library(fmsb)
# Get the Data
spotify_songs <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-01-21/spotify_songs.csv')
# Or read in with tidytuesdayR package (https://github.com/thebioengineer/tidytuesdayR)
# PLEASE NOTE TO USE 2020 DATA YOU NEED TO UPDATE tidytuesdayR from GitHub
# Either ISO-8601 date or year/week works!
# Install via devtools::install_github("thebioengineer/tidytuesdayR")
tuesdata <- tidytuesdayR::tt_load('2020-01-21')
tuesdata <- tidytuesdayR::tt_load(2020, week = 4)
spotify_songs <- tuesdata$spotify_songs
# ---------
# Data cleaning
# To view rhe summary statics for all the columns of spotify_songs
summary(spotify_songs)
# to verify the NA.
any(is.na(spotify_songs))
# to see how many NA
sum(is.na(spotify_songs))
# To remove the NA
spotify_songs_clean <- spotify_songs %>%
filter(!is.na(track_name) & !is.na(track_artist) & !is.na(track_album_name))
#Removing unnecessary columns
spotify_songs_clean <- spotify_songs_clean%>%dplyr::select(-track_id,-track_album_id,-playlist_id)
summary(spotify_songs_clean)
# Revisit the cleaned dataset
dim(spotify_songs_clean)
# ---------
boxplot(spotify_songs_clean$danceability,
spotify_songs_clean$energy,
spotify_songs_clean$key,
spotify_songs_clean$loudness,
spotify_songs_clean$mode,
spotify_songs_clean$speechiness,
spotify_songs_clean$acousticness,
spotify_songs_clean$instrumentalness,
spotify_songs_clean$liveness,
spotify_songs_clean$valence,
spotify_songs_clean$tempo,
spotify_songs_clean$duration_ms,
horizontal=TRUE)
boxplot(spotify_songs_clean$danceability, horizontal=TRUE)
# ---------
#Outlier with boxplot
speech_with_outiner <-spotify_songs_clean %>%
ggplot(aes(y = speechiness, coef = 4)) +
geom_boxplot() +
coord_flip() +
labs(title = 'Speechiness, with outliers')
#removing outliner
speechiness_outliers <- boxplot(spotify_songs_clean$speechiness,
plot = FALSE, range = 4)$out
playlist_songs_no_outliers <- spotify_songs_clean %>%
filter(!speechiness %in% speechiness_outliers)
# without outlier boxplot
playlist_songs_no_outliers %>%
ggplot(aes(y = speechiness)) +
geom_boxplot(coef = 4) +
coord_flip() +
labs(title = 'Speechiness, outliers removed')
length(speech_with_outiner) - length(playlist_songs_no_outliers)
# ---------
colnames(spotify_songs_clean)
spoti_new <- spotify_songs_clean [, c(1,2,3,4,5,6,7,8,11,13,16,9,10,12,14,15,17,18,19,20)]
colnames(spoti_new)
feature_names <- names(spoti_new) [12:20]
spotify_songs_clean %>%
select(c('playlist_genre', feature_names)) %>%
pivot_longer(cols = feature_names) %>%
ggplot(aes(x = value)) +
geom_density(aes(color = playlist_genre), alpha = 0.5) +
facet_wrap(~name, ncol = 3, scales = 'free') +
labs(title = 'Spotify Audio Feature Density - by Genre',
x = '', y = 'density') +
theme(axis.text.y = element_blank())
# ---------
# Radar chart
radar_chart <- function(arg){
songs_clean_filtered <- songs_clean %>% filter(playlist_genre==arg)
radar_data_v1 <- songs_clean_filtered %>%
select(danceability,energy,loudness,speechiness,valence,acousticness)
radar_data_v2 <- apply(radar_data_v1,2,function(x){(x-min(x)) / diff(range(x))})
radar_data_v3 <- apply(radar_data_v2,2,mean)
radar_data_v4 <- rbind(rep(1,6) , rep(0,6) , radar_data_v3)
return(radarchart(as.data.frame(radar_data_v4),title=arg))
}
par(mfrow = c(2, 3))
Chart_edm<-radar_chart("edm")
Chart_pop<-radar_chart("pop")
Chart_rb<-radar_chart("r&b")
Chart_latin<-radar_chart("latin")
Chart_rap<-radar_chart("rap")
Chart_rock<-radar_chart("rock")
# ---------
ggplot(spotify_songs_clean, aes(x = energy, y = loudness, color = playlist_genre)) +
geom_point(alpha = 0.5, position = "jitter")
coldplay = spotify_songs_clean %>%
filter(track_artist == "Coldplay")
ggplot(coldplay, aes(x = energy, y = loudness, color = playlist_genre)) +
geom_point(alpha = 0.5, position = "jitter")
enriq = spotify_songs_clean %>%
filter(track_artist == "Enrique Iglesias")
ggplot(spotify_songs_clean, aes(x = energy, y = danceability, color = playlist_genre)) +
geom_point(alpha = 0.5, position = "jitter")
ggplot(enriq, aes(x = energy, y = danceability, color = playlist_genre)) +
geom_point(alpha = 0.5, position = "jitter")
maro = spotify_songs_clean %>%
filter(track_artist == "Maroon 5")
ggplot(maro, aes(x = energy, y = danceability, color = playlist_genre)) +
geom_point(alpha = 0.5, position = "jitter")
# ---------
#Proportion of playlist genres
songs_clean_plot_data<-spotify_songs %>%
group_by(playlist_genre) %>%
summarise(Total_number_of_tracks = length(playlist_genre))
#Drawing the diagram
ggplot(songs_clean_plot_data, aes(x=playlist_genre, y=Total_number_of_tracks)) +
geom_bar(width = 0.5, stat = "identity") +
geom_text(aes(label = paste(round(Total_number_of_tracks / sum(Total_number_of_tracks) * 100, 1), "%")),
position = position_stack(vjust = 1.05)) +
theme(axis.text.x = element_text(angle = 90))
#Reorder
ggplot(songs_clean_plot_data, aes(x= reorder(playlist_genre, -Total_number_of_tracks), y=Total_number_of_tracks)) +
geom_bar(width = 0.5, stat = "identity") +
geom_text(aes(label = paste(round(Total_number_of_tracks / sum(Total_number_of_tracks) * 100, 1), "%")),
position = position_stack(vjust = 1.05)) +
theme(axis.text.x = element_text(angle = 90))
# pie chart
ggplot(songs_clean_plot_data, aes(x="", y=Total_number_of_tracks, fill=playlist_genre)) +
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0) +
geom_text(aes(label = paste(round(Total_number_of_tracks / sum(Total_number_of_tracks) * 100, 1), "%")),
position = position_stack(vjust = 0.5))
# ---------
#Proportion of playlist subgenres
subsongs_clean_plot_data<-spotify_songs %>%
group_by(playlist_subgenre) %>%
summarise(Total_number_of_tracks = length(playlist_subgenre))
#Drawing the diagram
ggplot(subsongs_clean_plot_data, aes(x= reorder(playlist_subgenre, Total_number_of_tracks), y=Total_number_of_tracks)) +
geom_bar(width = 0.5, stat = "identity") +
geom_text(aes(label = paste(round(Total_number_of_tracks / sum(Total_number_of_tracks) * 100, 1), "%")),
position = position_stack(vjust = 1.05)) +
theme(axis.text.x = element_text(angle = 90)) +
coord_flip()
#Drawing the diagram
ggplot(subsongs_clean_plot_data, aes(x=playlist_subgenre, y=Total_number_of_tracks)) +
geom_bar(width = 0.5, stat = "identity") +
geom_text(aes(label = paste(round(Total_number_of_tracks / sum(Total_number_of_tracks) * 100, 1), "%")),
position = position_stack(vjust = 1.05)) +
theme(axis.text.x = element_text(angle = 90)) +
coord_flip()
# pie chart
ggplot(subsongs_clean_plot_data, aes(x="", y=Total_number_of_tracks, fill=playlist_subgenre)) +
geom_bar(width = 1, stat = "identity") +
coord_polar("y", start=0) +
geom_text(aes(label = paste(round(Total_number_of_tracks / sum(Total_number_of_tracks) * 100, 1), "%")),
position = position_stack(vjust = 0.5))
# ---------
# Scatter plot
ggplot(spotify_songs_clean, aes(x = energy, y = loudness, color = playlist_genre)) +
geom_point(size = 0.5, alpha = 0.5)
ggplot(spotify_songs_clean, aes(x = energy, y = danceability, color = playlist_genre)) +
geom_point(size = 0.5, alpha = 0.5)
ggplot(coldplay, aes(x= energy, y = loudness, color = playlist_genre)) +
geom_point(size = 7, position = "jitter", alpha = 0.6)
ggplot(maro, aes(x= energy, y = danceability, color = playlist_genre)) +
geom_point(size = 7, position = "jitter", alpha = 0.6)
ggplot(enriq, aes(x= energy, y = danceability, color = playlist_genre)) +
geom_point(size = 7, position = "jitter", alpha = 0.6)
# ---------
spotify_histograms <- spotify_songs_clean [,-c(1,2,3,4,5,6,7,8,13)]
plot_num(spotify_histograms)
as.numeric(spotify_histograms$danceability)
is.numeric(spotify_histograms$danceability)
as.numeric(spotify_histograms$energy)
as.numeric(spotify_histograms$loudness)
as.numeric(spotify_histograms$speechiness)
as.numeric(spotify_histograms$acousticness)
as.numeric(spotify_histograms$instrumentalness)
as.numeric(spotify_histograms$liveness)
as.numeric(spotify_histograms)
spotify_songs_clean %>%
select(c('playlist_genre', spotify_histograms)) %>%
pivot_longer(cols = feature_names) %>%
ggplot(aes(x = value)) +
geom_density(aes(color = playlist_genre), alpha = 0.5) +
facet_wrap(~name, ncol = 3, scales = 'free') +
labs(title = 'Spotify Audio Feature Density - by Genre',
x = '', y = 'density') +
theme(axis.text.y = element_blank()) +
scale_color_kp(palette = 'mixed')
# ---------
# ---------
# Clustering
install.packages("plotly")
library(tidyverse)
library(cluster)
library(factoextra)
library(tidymodels)
library(plotly)
library(janitor)
library(GGally)
library(ggplot2)
#exploratry data analysis
spotify_songs_clean%>%select(playlist_genre,
energy,liveness,tempo,speechiness,acousticness,
danceability,duration_ms,loudness,valence)%>%
ggpairs(aes(color=playlist_genre))
# scaling
str(spotify_songs_clean)
spotify_scaled <- spotify_songs_clean%>%
mutate(across(is.numeric,~as.numeric(scale(.))))
# Perform intitial k-means
kmeans_spoti <- kmeans(x=select(spotify_scaled, -c(1:8)),
centers=12)
print(kmeans_spoti)
#Use the fviz_cluster option to visualize the clusters
#it takes two arguments one is the clustering result,
# Second is the original data-set
fviz_cluster(kmeans_spoti,
spotify_songs_clean%>%select(-(1:8)))
# what is the optimal number of k
# method 1: within sum of squares - for the elbow plot
fviz_nbclust(spotify_scaled%>%select(-(1:8)), kmeans, method = "wss")
# method 2: silhouette method
fviz_nbclust(spotify_scaled%>%select(-(1:8)), kmeans, method = "silhouette")
#final K means with k 2
kmeans_spoti <- kmeans(x=select(spotify_scaled, -c(1:8)),
centers=2)
print(kmeans_spoti)
# method 3:
# Lets do it the 'tidymodel' way and combine multiple clustering results
k_clust_investigate<-
tibble(k= 1:10)%>%
mutate(
kmeans_spoti=map(k,~kmeans(spotify_scaled%>%select(-(1:8)),.x)),
tidied = map(kmeans_spoti,tidy),
glanced = map(kmeans_spoti,glance),
augument = map(kmeans_spoti,augment,spotify_scaled))
k_clust_investigate%>%
unnest(glanced)%>%
ggplot(aes(k,tot.withinss))+
geom_line()+
geom_point()
final_clustering_model<-k_clust_investigate%>%
filter(k==2)%>%
select(augument)%>%
unnest(augument)
#Printing the final cluster with k = 2 with energy vs danceability
inter <- ggplot(final_clustering_model,
mapping = aes(x = energy,
y = danceability,
color = .cluster, name = track_artist)) +
geom_point(size = 0.5, alpha = 0.5)
ggplotly(inter)
#Printing the final cluster with k = 2 with energy vs loudness
interactive_plot1<-ggplot(final_clustering_model,
mapping=aes(x=energy,
y=loudness,
color=.cluster, name = track_artist))+
geom_point(size = 0.5, alpha = 0.5)+theme_minimal()
ggplotly(interactive_plot1)
# ---------
# Classification
install.packages("rpart")
install.packages("rpart.plot")
library(tidyverse)
library(rpart)
library(rpart.plot)
# step 1:
# step 2:
classi_spoti <- select(spotify_songs, 12:23, 10)
#turn all categorical variables into factors
str(classi_spoti)
classi_spoti <- mutate(classi_spoti, playlist_genre = factor(playlist_genre))
# Randomize the order of the rows.
shuffle_index <- sample(1:nrow(classi_spoti))
classi_spoti <- classi_spoti[shuffle_index, ]
### Split the data into a train and test set.
# Function to create a train/test split
#
# data Input tibble containing the full dataset
# size Size of the training set as proportion of total. For example,
# a value of 0.8 means 80% of the data goes into the train set
# train Are we extracting the training part of the dataset (TRUE) or the
# test part (FALSE)?
create_train_test <- function(data, proportion = 0.8, train = TRUE) {
n_row = nrow(data)
total_row = proportion * n_row
train_sample <- 1:total_row
if (train == TRUE) {
return (data[train_sample, ])
} else {
return (data[-train_sample, ])
}
}
# Do the split.
proportion <- 0.8
data_train <- create_train_test(classi_spoti, proportion, train = TRUE)
data_test <- create_train_test(classi_spoti, proportion, train = FALSE)
# just checking up
prop.table(table(classi_spoti$playlist_genre)) # Share of pos/neg in complete dataset
# this can we check with the histogram we previsoly did
# If i spilt my data, the train and test data should have a similar kind of data.
prop.table(table(data_train$playlist_genre)) # Share of pos/neg in train set
prop.table(table(data_test$playlist_genre)) # Share of pos/neg in test set
# we can see it has a similar spilting, which means the spilting is good to go!!
# and we have divided our data proporly
### Train the decision tree algorithm.
# Set the parameters for the model.
param_settings <- rpart.control(maxdepth = 12, minbucket = 33, minsplit = 10, cp = 0)
# param_settings <- rpart.control(maxdepth = , minbucket = 10, minsplit = 10, cp = 0)
# Train the model.
fitted_model <- rpart(playlist_genre ~ .,
data = data_train,
method = 'class',
control = param_settings)
# if we have run this command, then it has trained our dataset.
#now lets visualize this
### Visualize the decision tree.
# The type and extra parameters control the way the learned decision tree
# is shown on screen (but not its structure, that remains the same)
# maj. label / class. rate / % of all cases
rpart.plot(fitted_model, type = 5, extra = 102, box.palette="RdGn")
# maj. label / prob. per class / % of all cases
rpart.plot(fitted_model, extra = 104, box.palette="RdGn")
# maj. label / prob. of fitted class / % of all cases
rpart.plot(fitted_model, extra = 108, box.palette="RdGn")
# maj. label / prob. of fitted class
rpart.plot(fitted_model, extra = 08, box.palette="RdGn")
### Generate predictions on the unseen test data.
# Use the fitted model to generate predictions for the test set.
predictions <- predict(fitted_model, data_test, type = "class")
# Now lets compare tthe predictions to the real data.
# Generate a confusion matrix.
confusion_matrix <- table(predictions, data_test$playlist_genre)
confusion_matrix
# Calculate accuracy, P, R, and F.
acc <- sum(diag(confusion_matrix)) / sum(confusion_matrix) # more efficient way
#calculating the precision
# How often was I right. How often did I predict the positive match in total,
P <- confusion_matrix[6,6] / sum(confusion_matrix[6, ])
# recall
R <- confusion_matrix[6,6] / sum(confusion_matrix[, 6])
F <- 2 * (P * R) / (P + R)
# the f score is .64
# this tells us how well it did
print(acc)
print(confusion_matrix)
#to see the confusion matrix in %
round((confusion_matrix/rowSums(confusion_matrix))*100,2)
### Parameter optimization.
# Make a tibble to store the results.
results <- tibble(maxdepth = numeric(), minbucket = numeric(), value = numeric())
# Loop through different values for maxdepth (1 through 20).
# (maxdepth controls how many levels the decision tree is max. allowed to have)
for (maxdepth_value in 1:20) {
# Loop through different values for minbucket (2 through 50).
# (minbucket controls how many instances each leaf node has to have at least)
for (minbucket_value in 2:50) {
# Define parameter settings with this value.
param_settings <- rpart.control(maxdepth = maxdepth_value, # take the value from the outer loop
minbucket = minbucket_value, # take the value from the inner loop
minsplit = 10, cp = 0)
# Train the model using these parameter settings.
fitted_model <- rpart(playlist_genre ~ .,
data = data_train,
method = 'class',
control = param_settings)
# Generate predictions on the test set.
predictions <- predict(fitted_model, data_test, type = "class")
# Generate the confusion matrix.
confusion_matrix <- table(predictions, data_test$playlist_genre)
# Calculate the relevant evaluation metric(s).
acc <- sum(diag(confusion_matrix)) / sum(confusion_matrix)
# Add the result to the tibble.
results <- add_row(results, maxdepth = maxdepth_value,
minbucket = minbucket_value,
value = acc)
}
}
print(results)
|
3af844baa26f9585611052c1a6ac1784e7f4497d
|
49f0f65c402374763d297c59fcf5e53b74c77029
|
/R/rk3g.r
|
87e27b30d5c767bda67bfefdcb7dd99fb0e549b1
|
[] |
no_license
|
mu2013/KGode
|
259192727cbe61dcb219997ff411b1b79a557ea1
|
2816e7facf92b465a808727e9214746d5d2dae3e
|
refs/heads/master
| 2021-05-04T18:43:32.307550
| 2020-06-23T16:30:23
| 2020-06-23T16:30:23
| 106,051,870
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,673
|
r
|
rk3g.r
|
#' The 'rkg3' class object
#'
#' This class provides advanced gradient matching method by using the ode as a regularizer.
#'
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords data
#' @return an \code{\link{R6Class}} object which can be used for improving ode parameters estimation by using ode as a regularizer.
#' @format \code{\link{R6Class}} object.
#' @field rk the 'rkhs' class object containing the interpolation information for each state of the ode.
#' @field ode_m the 'ode' class object containing the information about the odes.
#' @section Methods:
#' \describe{
#' \item{\code{iterate(iter,innerloop,lamb)}}{Iteratively updating ode parameters and interpolation regression coefficients.}
#' \item{\code{witerate(iter,innerloop,dtilda,lamb)}}{Iteratively updating ode parameters and the warped interpolation regression coefficients.}
#' \item{\code{full(par,lam)}}{Updating ode parameters and rkhs interpolation regression coefficients simultaneously. This method is slow but guarantee convergence.} }
#' @export
#' @author Mu Niu, \email{mu.niu@glasgow.ac.uk}
rkg3 <- R6Class("rkg3",
public = list(
rk= list(),
odem = NULL,
initialize = function(rk=NULL,odem=NULL) {
self$rk = rk
self$odem = odem
self$greet()
},
greet = function() {
cat(paste("RK3G ",".\n"))
},
add = function(x) {
self$rk <- c(self$rk, x)
#invisible(self)
},
iterate= function( iter,innerloop,lamb )
{
t = as.numeric( self$rk[[1]]$t)
n = length(t)
nd = length(self$rk)
#lamb = 1
## first step fix b in nonlinear part loss fun and work out the optimised b
y_pkl = array(c(0),c(n,n,nd))
z_tkl = array(c(0),c(n,n,nd))
SKl = array(c(0),c(n,n,nd))
fstl = array(c(0),c(1,n,nd))
for(j in 1:nd)
{
for (i in 1:n) ## through data point
{
y_pkl[i,,j] = self$rk[[j]]$ker$kern(t(t),t[i])
z_tkl[i,,j] = self$rk[[j]]$ker$dkdt(t[i],t(t))
}
SKl[,,j] = tryCatch({ solve(t(y_pkl[,,j])%*%y_pkl[,,j]+ lamb*t(z_tkl[,,j])%*%z_tkl[,,j])
}, warning = function(war)
{
print(paste("MY_WARNING: ",war))
},error = function(err)
{# error handler picks up where error was generated
print(paste("MY_ERROR: ",err))
return( solve(t(y_pkl[,,j])%*%y_pkl[,,j]+ lamb*t(z_tkl[,,j])%*%z_tkl[,,j] +1e-10*diag(n) ) )
},finally = {} )
fstl[,,j] = c( scale(self$rk[[j]]$y, center=TRUE, scale=FALSE) ) %*% y_pkl[,,j]
}
lbl = array(c(0),c(nd,n))
y_p = array(c(0),c(nd,n))
z_p = array(c(0),c(nd,n))
for (it in 1:iter)
{
for(inlp in 1:innerloop)
{ ## linear B function in old code
for(j in 1:nd) {
y_p[j,] = self$rk[[j]]$predict()$pred
}
dzl=self$odem$gradient(y_p,self$odem$ode_par)
dim(dzl) = dim(y_p)
for(j in 1:nd) {
lbl[j,]=( fstl[,,j] + lamb*dzl[j,]%*%(z_tkl[,,j]) ) %*% SKl[,,j]
self$rk[[j]]$b = lbl[j,]
}
}
for(j in 1:nd)
{
reslll = self$rk[[j]]$predict()
y_p[j,] = reslll$pred
z_p[j,] = reslll$grad
}
grlNODE = if (is.null(self$odem$gr_lNODE)) NULL else self$odem$grlNODE
s32 <- tryCatch({ optim(log(self$odem$ode_par),self$odem$lossNODE,gr=grlNODE,y_p,z_p,method="L-BFGS-B")
}, warning = function(war)
{
print(paste("MY_WARNING: ",war))
},
error = function(err)
{
print(paste("MY_ERROR: ",err))
return( optim(log(self$odem$ode_par),self$odem$lossNODE,gr=grlNODE,y_p,z_p,method="BFGS") )
},finally = { } )
par_rbf = exp(s32$par)
self$odem$ode_par = par_rbf
cat(par_rbf,"\n")
}
},
witerate= function( iter,innerloop,dtilda,lamb)
{
n = dim(dtilda)[2]#length( self$odem$y_ode[1,])
nd = length(self$rk)
#lamb = 1
## first step fix b in nonlinear part loss fun and work out the optimised b
y_pkl = array(c(0),c(n,n,nd))
z_tkl = array(c(0),c(n,n,nd))
SKl = array(c(0),c(n,n,nd))
fstl = array(c(0),c(1,n,nd))
for(j in 1:nd)
{
t = as.numeric( self$rk[[j]]$t)
for (i in 1:n) ## through data point
{
y_pkl[i,,j] = self$rk[[j]]$ker$kern(t(t),t[i])
z_tkl[i,,j] = self$rk[[j]]$ker$dkdt(t[i],t(t))
}
oo = dtilda[j,]%*%ones(1,n)
wkdot = t( t(z_tkl[,,j])*(oo) )
twkdot= ( t(z_tkl[,,j])*(oo) )
SKl[,,j] = tryCatch({ solve( t(y_pkl[,,j])%*%y_pkl[,,j]+ lamb*twkdot%*%wkdot +1e-10*diag(n) )
}, warning = function(war)
{
print(paste("MY_WARNING: ",war))
},error = function(err)
{# error handler picks up where error was generated
print(paste("MY_ERROR: ",err))
return( solve(t(y_pkl[,,j])%*%y_pkl[,,j]+ lamb*twkdot%*%wkdot +1e-5*diag(n) ) )
},finally = {} )
fstl[,,j] = c( scale(self$rk[[j]]$y, center=TRUE, scale=FALSE) ) %*% y_pkl[,,j]
}
lbl = array(c(0),c(nd,n))
y_p = array(c(0),c(nd,n))
z_p = array(c(0),c(nd,n))
for (it in 1:iter)
{
for(inlp in 1:innerloop)
{ ## linear B function in old code
for(j in 1:nd) {
y_p[j,] = self$rk[[j]]$predict()$pred
}
dzl=self$odem$gradient(y_p,self$odem$ode_par)
dim(dzl) = dim(y_p)
for(j in 1:nd) {
#wkdot = (z_tkl[,,j])*(oo)
lbl[j,] = ( fstl[,,j] + lamb*dzl[j,]%*%z_tkl[,,j]*dtilda[j,] ) %*% SKl[,,j]
self$rk[[j]]$b = lbl[j,]
}
}
for(j in 1:nd)
{
reslll = self$rk[[j]]$predict()
y_p[j,] = reslll$pred
z_p[j,] = reslll$grad*dtilda[j,]
}
grlNODE = if (is.null(self$odem$gr_lNODE)) NULL else self$odem$grlNODE
#s32 <- tryCatch({ optim(log(c(0.1,0.1,0.1,0.1)),self$odem$lossNODE,gr=self$odem$grlNODE,y_p,z_p,method="L-BFGS-B")
s32 <- tryCatch({ optim(log(self$odem$ode_par),self$odem$lossNODE,gr=grlNODE,y_p,z_p,method="L-BFGS-B")
}, warning = function(war)
{
print(paste("MY_WARNING: ",war))
},
error = function(err)
{
print(paste("MY_ERROR: ",err))
return( optim(log(self$odem$ode_par),self$odem$lossNODE,gr=grlNODE,y_p,z_p,method="BFGS") )
},finally = { } )
par_rbf = exp(s32$par)
self$odem$ode_par = par_rbf
cat(par_rbf,"\n")
}
},
full= function( par,lam )
{ #lam=1
t = as.numeric( self$rk[[1]]$t)
n = length(t)
nd = length(self$rk)
par_ode = exp( par[(nd*n+1):length(par)] )
lbl = array(c(0),c(nd,n))
y_t = array(c(0),c(nd,n))
z_t = array(c(0),c(nd,n))
res= 0
for(j in 1:nd) {
self$rk[[j]]$b = par[ ((j-1)*n+1):(j*n) ]
reslll = self$rk[[j]]$predict()
y_t[j,] = reslll$pred
z_t[j,] = reslll$grad
res =res+ sum( (self$rk[[j]]$y-y_t[j,])^2 )
}
dzl=self$odem$gradient(y_t,par_ode)
dim(dzl) = dim(y_t)
res = res + lam*sum( (z_t- dzl)^2 )
res
},
wfull= function( par,lam,dtilda )
{ #lam=1
t = as.numeric( self$rk[[1]]$t)
n = length(t)
nd = length(self$rk)
par_ode = exp( par[(nd*n+1):length(par)] )
lbl = array(c(0),c(nd,n))
y_t = array(c(0),c(nd,n))
z_t = array(c(0),c(nd,n))
res= 0
for(j in 1:nd) {
self$rk[[j]]$b = par[ ((j-1)*n+1):(j*n) ]
reslll = self$rk[[j]]$predict()
y_t[j,] = reslll$pred
z_t[j,] = reslll$grad*dtilda[j,]
res =res+ sum( (self$rk[[j]]$y-y_t[j,])^2 )
}
dzl=self$odem$gradient(y_t,par_ode)
dim(dzl) = dim(y_t)
res = res + lam*sum( (z_t- dzl)^2 )
res
},
opfull= function(lam)
{
nd = length(self$rk)
par=c()
for(j in 1:nd) {
par=c( par,self$rk[[j]]$b)
}
#par=rep(1,length(par))
par=c(par, log(self$odem$ode_par) )
baseloss = self$full(par,lam)
op = optim(par,self$full,,lam,method="BFGS",control=list(trace=3))
np= length(self$odem$ode_par)
plist = c( head(op$par, -np) , exp(tail(op$par,np)) )
list( plist,op$value,baseloss)
},
wopfull= function(lam,dtilda)
{
nd = length(self$rk)
par=c()
for(j in 1:nd) {
par=c( par,self$rk[[j]]$b)
}
#par=rep(1,length(par))
par=c(par, log(self$odem$ode_par) )
baseloss = self$full(par,lam)
op = optim(par,self$wfull,,lam,dtilda,method="BFGS")
np= length(self$odem$ode_par)
plist = c( head(op$par, -np) , exp(tail(op$par,np)) )
list( plist,op$value,baseloss)
},
cross = function(lam,testX,testY)
{
nd = length(self$rk)
par=c()
for(j in 1:nd) {
par=c( par,self$rk[[j]]$b)
}
#par=rep(1,length(par))
par=c(par, log(self$odem$ode_par) )
baseloss = self$full(par,lam)
op = optim(par,self$full,,lam,method="BFGS")
y_t = array(c(0),c(nd,ntest))
res = 0
for(j in 1:nd)
{
mean_y = apply(as.matrix(iterp$rk[[j]]$y),2,mean)
for(jj in 1:ntest)
{
y_t[j,jj] = iterp$rk[[j]]$ker$kern(t(trainData),testData[jj]) %*%iterp$rk[[j]]$b + mean_y
}
res =res+ sum( (y_test_me-y_t[j,])^2 )
}
},
fullos= function( par )
{
t = as.numeric( self$rk[[1]]$t)
n = length(t)
nd = length(self$rk)
par_ode = par[(nd*n+1):length(par)]
lbl = array(c(0),c(nd,n))
y_t = array(c(0),c(nd,n))
z_t = array(c(0),c(nd,n))
res= 0
for(j in 1:nd)
{
b = par[ ((j-1)*n+1):(j*n) ]
mean_y = apply(as.matrix(self$rk[[j]]$y),2,mean)
for(jj in 1:n)
{
y_t[j,jj] = self$rk[[j]]$ker$kern(t(t),t[jj]) %*%b + mean_y
z_t[j,jj] = self$rk[[j]]$ker$dkdt(t[jj],t(t)) %*%b
}
res =res+ sum( (self$rk[[j]]$y-y_t[j,])^2 )
}
dzl=self$odem$gradient(y_t,par_ode)
dim(dzl) = dim(y_t)
res = res + sum( (z_t- dzl)^2 )
res
}
)
)
## par=c( rk1$b,rk2$b ,kkk$ode_par)
|
26d738a0646278889681291a972e991b730ed168
|
4052e087fec60c5073764fdb4ff873ec548a6e2b
|
/DatabaseConnector/tests/testthat/test-connection.R
|
ac1dd2dbd0133f14758d57643e0c507872712016
|
[
"Apache-2.0"
] |
permissive
|
amazon-archives/aws-ohdsi-automated-deployment
|
1350dcd1d1f5fce0287e0c2f673b0ca0bbb539f2
|
8deb86143a967b32cb55caab624aec526df11a40
|
refs/heads/master
| 2022-04-01T02:26:35.043472
| 2019-09-12T14:10:11
| 2019-09-12T14:10:11
| 124,141,866
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,928
|
r
|
test-connection.R
|
library(testthat)
test_that("Open and close connection", {
# Postgresql
details <- createConnectionDetails(dbms = "postgresql",
user = Sys.getenv("CDM5_POSTGRESQL_USER"),
password = URLdecode(Sys.getenv("CDM5_POSTGRESQL_PASSWORD")),
server = Sys.getenv("CDM5_POSTGRESQL_SERVER"),
schema = Sys.getenv("CDM5_POSTGRESQL_CDM_SCHEMA"))
connection <- connect(details)
expect_true(inherits(connection, "Connection"))
expect_true(disconnect(connection))
# SQL Server
details <- createConnectionDetails(dbms = "sql server",
user = Sys.getenv("CDM5_SQL_SERVER_USER"),
password = URLdecode(Sys.getenv("CDM5_SQL_SERVER_PASSWORD")),
server = Sys.getenv("CDM5_SQL_SERVER_SERVER"),
schema = Sys.getenv("CDM5_SQL_SERVER_CDM_SCHEMA"))
connection <- connect(details)
expect_true(inherits(connection, "Connection"))
expect_true(disconnect(connection))
# Oracle
details <- createConnectionDetails(dbms = "oracle",
user = Sys.getenv("CDM5_ORACLE_USER"),
password = URLdecode(Sys.getenv("CDM5_ORACLE_PASSWORD")),
server = Sys.getenv("CDM5_ORACLE_SERVER"),
schema = Sys.getenv("CDM5_ORACLE_CDM_SCHEMA"))
connection <- connect(details)
expect_true(inherits(connection, "Connection"))
expect_true(disconnect(connection))
# # RedShift
# details <- createConnectionDetails(dbms = "redshift",
# user = Sys.getenv("CDM5_REDSHIFT_USER"),
# password = URLdecode(Sys.getenv("CDM5_REDSHIFT_PASSWORD")),
# server = Sys.getenv("CDM5_REDSHIFT_SERVER"),
# schema = Sys.getenv("CDM5_REDSHIFT_CDM_SCHEMA"))
# connection <- connect(details)
# expect_true(inherits(connection, "Connection"))
# expect_true(disconnect(connection))
})
test_that("Open and close connection using connection strings with embedded user and pw", {
# Postgresql
parts <- unlist(strsplit(Sys.getenv("CDM5_POSTGRESQL_SERVER"), "/"))
host <- parts[1]
database <- parts[2]
port <- "5432"
connectionString <- paste0("jdbc:postgresql://",
host,
":",
port,
"/",
database,
"?user=",
Sys.getenv("CDM5_POSTGRESQL_USER"),
"&password=",
URLdecode(Sys.getenv("CDM5_POSTGRESQL_PASSWORD")))
details <- createConnectionDetails(dbms = "postgresql", connectionString = connectionString)
connection <- connect(details)
expect_true(inherits(connection, "Connection"))
expect_true(disconnect(connection))
# SQL Server
connectionString <- paste0("jdbc:sqlserver://",
Sys.getenv("CDM5_SQL_SERVER_SERVER"),
";user=",
Sys.getenv("CDM5_SQL_SERVER_USER"),
";password=",
URLdecode(Sys.getenv("CDM5_SQL_SERVER_PASSWORD")))
details <- createConnectionDetails(dbms = "sql server", connectionString = connectionString)
connection <- connect(details)
expect_true(inherits(connection, "Connection"))
expect_true(disconnect(connection))
# Oracle
port <- "1521"
parts <- unlist(strsplit(Sys.getenv("CDM5_ORACLE_SERVER"), "/"))
host <- parts[1]
sid <- parts[2]
connectionString <- paste0("jdbc:oracle:thin:",
Sys.getenv("CDM5_ORACLE_USER"),
"/",
URLdecode(Sys.getenv("CDM5_ORACLE_PASSWORD")),
"@",
host,
":",
port,
":",
sid)
details <- createConnectionDetails(dbms = "oracle", connectionString = connectionString)
connection <- connect(details)
expect_true(inherits(connection, "Connection"))
expect_true(disconnect(connection))
# RedShift
# parts <- unlist(strsplit(Sys.getenv("CDM5_REDSHIFT_SERVER"), "/"))
# host <- parts[1]
# database <- parts[2]
# port <- "5439"
# connectionString <- paste0("jdbc:redshift://",
# host,
# ":",
# port,
# "/",
# database,
# "?user=",
# Sys.getenv("CDM5_REDSHIFT_USER"),
# "&password=",
# URLdecode(Sys.getenv("CDM5_REDSHIFT_PASSWORD")))
# details <- createConnectionDetails(dbms = "redshift", connectionString = connectionString)
# connection <- connect(details)
# expect_true(inherits(connection, "Connection"))
# expect_true(disconnect(connection))
})
test_that("Open and close connection using connection strings with separate user and pw", {
# Postgresql
parts <- unlist(strsplit(Sys.getenv("CDM5_POSTGRESQL_SERVER"), "/"))
host <- parts[1]
database <- parts[2]
port <- "5432"
connectionString <- paste0("jdbc:postgresql://", host, ":", port, "/", database)
details <- createConnectionDetails(dbms = "postgresql",
connectionString = connectionString,
user = Sys.getenv("CDM5_POSTGRESQL_USER"),
password = URLdecode(Sys.getenv("CDM5_POSTGRESQL_PASSWORD")))
connection <- connect(details)
expect_true(inherits(connection, "Connection"))
expect_true(disconnect(connection))
# SQL Server
connectionString <- paste0("jdbc:sqlserver://", Sys.getenv("CDM5_SQL_SERVER_SERVER"))
details <- createConnectionDetails(dbms = "sql server",
connectionString = connectionString,
user = Sys.getenv("CDM5_SQL_SERVER_USER"),
password = URLdecode(Sys.getenv("CDM5_SQL_SERVER_PASSWORD")))
connection <- connect(details)
expect_true(inherits(connection, "Connection"))
expect_true(disconnect(connection))
# Oracle
port <- "1521"
parts <- unlist(strsplit(Sys.getenv("CDM5_ORACLE_SERVER"), "/"))
host <- parts[1]
sid <- parts[2]
connectionString <- paste0("jdbc:oracle:thin:@", host, ":", port, ":", sid)
details <- createConnectionDetails(dbms = "oracle",
connectionString = connectionString,
user = Sys.getenv("CDM5_ORACLE_USER"),
password = URLdecode(Sys.getenv("CDM5_ORACLE_PASSWORD")))
connection <- connect(details)
expect_true(inherits(connection, "Connection"))
expect_true(disconnect(connection))
# RedShift
# parts <- unlist(strsplit(Sys.getenv("CDM5_REDSHIFT_SERVER"), "/"))
# host <- parts[1]
# database <- parts[2]
# port <- "5439"
# connectionString <- paste0("jdbc:redshift://", host, ":", port, "/", database)
# details <- createConnectionDetails(dbms = "redshift",
# connectionString = connectionString,
# user = Sys.getenv("CDM5_REDSHIFT_USER"),
# password = URLdecode(Sys.getenv("CDM5_REDSHIFT_PASSWORD")))
# connection <- connect(details)
# expect_true(inherits(connection, "Connection"))
# expect_true(disconnect(connection))
})
|
8839d84af0ae133f771143168f4cb4256b961bbe
|
b73e8cd3cf80162717d9f22227dc25f735e6bd54
|
/R/rowMatch.R
|
439c1e6360e50f32bd4b56b666087f5b4019e0d6
|
[] |
no_license
|
cran/BANOVA
|
c4c73a55f282f748ffa8b62cfe6c62212dcdacd7
|
ddc0fc39ecf17895e841917b2df9ca1c0d7d2f17
|
refs/heads/master
| 2022-07-07T14:05:51.380634
| 2022-06-21T06:30:13
| 2022-06-21T06:30:13
| 21,282,366
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 221
|
r
|
rowMatch.R
|
rowMatch <-
function(vector, matrix){
if (length(vector) == 1) return(match(vector, matrix))
for (i in 1:nrow(matrix)){
if (sum(vector == matrix[i,]) == length(vector))
return(i)
}
return(NA)
}
|
d0da840f4d5847462dc346e518244331a653de79
|
7f141116154eed50968bddd35c9a47b7194e9b88
|
/man/true_simpson.Rd
|
f3dc1465945ab8918c8ec1299029fc7940c3f25d
|
[] |
no_license
|
adw96/breakaway
|
36a9d2416db21172f7623c1810d2c6c7271785ed
|
d81b1799f9b224113a58026199a849c2ec147524
|
refs/heads/main
| 2022-12-22T06:20:56.466849
| 2022-11-22T22:35:57
| 2022-11-22T22:35:57
| 62,469,870
| 65
| 22
| null | 2022-11-22T22:35:58
| 2016-07-02T21:10:56
|
R
|
UTF-8
|
R
| false
| true
| 493
|
rd
|
true_simpson.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alpha_true.R
\name{true_simpson}
\alias{true_simpson}
\title{Calculate the true Simpson index}
\usage{
true_simpson(input)
}
\arguments{
\item{input}{A vector of proportions.}
}
\value{
The Simpson index of the population given by input.
}
\description{
Calculate the true Simpson index
}
\note{
This function is intended for population-level data. If you are
dealing with a microbial sample, use DivNet instead.
}
|
e475f8bc216fe7a5d3b77683c5826ef3c3931763
|
bd55979bc72fb0276f3d0e31c7607b13577e0ebd
|
/Flight/load_2020_flight_data.R
|
3f4b8cbcdbed80b7e0eb6899e43237ccddea1dcb
|
[] |
no_license
|
arestrom/intertidal
|
d13b9f4df03f9de3edde3b5456dbb6a4b15056e0
|
f78627d14d31e2e0eb94bd8fa381072fbdf99bfa
|
refs/heads/master
| 2023-06-16T11:39:22.728506
| 2021-07-10T22:35:00
| 2021-07-10T22:35:00
| 306,503,719
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45,989
|
r
|
load_2020_flight_data.R
|
#=================================================================
# Load 2020 flight data to shellfish database
#
# NOTES:
# 1. Load BIDN data first. Then create files for FlightProof program.
# 2. Need to enforce consistent naming and EPSG codes for GIS data.
#
# ToDo:
# 1.
#
# AS 2020-10-29
#=================================================================
# Clear workspace
rm(list = ls(all.names = TRUE))
# Libraries
library(dplyr)
library(DBI)
library(glue)
library(sf)
library(stringi)
library(lubridate)
library(openxlsx)
library(uuid)
# Keep connections pane from opening
options("connectionObserver" = NULL)
# Globals
current_year = 2020L
#=====================================================================================
# Function to get user for database
pg_user <- function(user_label) {
Sys.getenv(user_label)
}
# Function to get pw for database
pg_pw <- function(pwd_label) {
Sys.getenv(pwd_label)
}
# Function to get pw for database
pg_host <- function(host_label) {
Sys.getenv(host_label)
}
# Function to connect to postgres
pg_con_local = function(dbname, port = '5432') {
con <- dbConnect(
RPostgres::Postgres(),
host = "localhost",
dbname = dbname,
user = pg_user("pg_user"),
password = pg_pw("pg_pwd_local"),
port = port)
con
}
# Function to connect to postgres
pg_con_prod = function(dbname, port = '5432') {
con <- dbConnect(
RPostgres::Postgres(),
host = pg_host("pg_host_prod"),
dbname = dbname,
user = pg_user("pg_user"),
password = pg_pw("pg_pwd_prod"),
port = port)
con
}
# Function to generate dataframe of tables and row counts in database
db_table_counts = function(db_server = "local", db = "shellfish", schema = "public") {
if ( db_server == "local" ) {
db_con = pg_con_local(dbname = db)
} else {
db_con = pg_con_prod(dbname = db)
}
# Run query
qry = glue("select table_name FROM information_schema.tables where table_schema = '{schema}'")
db_tables = DBI::dbGetQuery(db_con, qry) %>%
pull(table_name)
tabx = integer(length(db_tables))
get_count = function(i) {
tabxi = dbGetQuery(db_con, glue("select count(*) from {schema}.", db_tables[i]))
as.integer(tabxi$count)
}
rc = lapply(seq_along(tabx), get_count)
dbDisconnect(db_con)
rcx = as.integer(unlist(rc))
dtx = tibble(table = db_tables, row_count = rcx)
dtx = dtx %>%
arrange(table)
dtx
}
# Generate a vector of Version 4 UUIDs (RFC 4122)
get_uuid = function(n = 1L) {
if (!typeof(n) %in% c("double", "integer") ) {
stop("n must be an integer or double")
}
uuid::UUIDgenerate(use.time = FALSE, n = n)
}
# Function to convert tide time to minutes
hm_to_min = function(x) {
hr = as.integer(substr(x, 1, 2))
hr = hr * 60
mn = as.integer(substr(x, 4, 5))
mins = hr + mn
mins
}
# Function to convert time in minutes from midnight to h:m
min_to_hm = function(x) {
h = floor(x / 60)
m = x %% 60
hm = paste0(h, ":", m)
return(hm)
}
#============================================================================================
# Verify the same number of rows exist in both local and production DBs
#============================================================================================
# Get table names and row counts
local_row_counts = db_table_counts(db_server = "local")
prod_row_counts = db_table_counts(db_server = "prod")
# Combine to a dataframe
compare_counts = local_row_counts %>%
left_join(prod_row_counts, by = "table") %>%
# Ignore tables that exist in local but not prod
filter(!table %in% c("geometry_columns", "geometry_columns", "spatial_ref_sys")) %>%
filter(!substr(table, 1, 10) == "beach_info") %>%
filter(!substr(table, 1, 12) == "flight_count") %>%
# Pull out and rename
select(table, local = row_count.x, prod = row_count.y) %>%
mutate(row_diff = abs(local - prod))
# Inspect any differences
diff_counts = compare_counts %>%
filter(!row_diff == 0L)
# Output message
if ( nrow(diff_counts) > 0 ) {
cat("\nWARNING: Some row counts differ. Inspect 'diff_counts'.\n\n")
} else {
cat("\nRow counts are the same. Ok to proceed.\n\n")
}
#==============================================================================
# Import beach data from shellfish DB
#==============================================================================
# Read beach data ====================================
# Import the beach data from shellfish DB
qry = glue::glue("select beach_id, beach_number as bidn, beach_name, active_datetime, ",
"inactive_datetime, geom AS geometry ",
"from beach_boundary_history")
db_con = pg_con_local(dbname = "shellfish")
beach_st = st_read(db_con, query = qry)
dbDisconnect(db_con)
# Get start and end years for beaches
beach_st = beach_st %>%
mutate(active_datetime = with_tz(active_datetime, tzone = "America/Los_Angeles")) %>%
mutate(inactive_datetime = with_tz(inactive_datetime, tzone = "America/Los_Angeles")) %>%
mutate(start_yr = year(active_datetime)) %>%
mutate(end_yr = year(inactive_datetime))
# Inspect
sort(unique(beach_st$end_yr))
sort(unique(beach_st$inactive_datetime))
# Pull out separate dataset without geometry
beach = beach_st %>%
st_drop_geometry()
# Read shellfish_management_area as geometry data ====================================
# Import from shellfish DB
qry = glue::glue("select shellfish_management_area_id, shellfish_area_code, ",
"geom AS geometry ",
"from shellfish_management_area_lut")
# Read
db_con = pg_con_local(dbname = "shellfish")
sfma_st = st_read(db_con, query = qry)
dbDisconnect(db_con)
# Read management_region as geometry data ====================================
# Import from shellfish DB
qry = glue::glue("select management_region_id, management_region_code, ",
"geom AS geometry ",
"from management_region_lut")
# Read
db_con = pg_con_local(dbname = "shellfish")
mng_reg_st = st_read(db_con, query = qry)
dbDisconnect(db_con)
#======================================================================================
# Output for FlightProof program
#======================================================================================
# # Output BIDN file to proofing folder...create folder first
# bidn_proof = beach_st %>%
# mutate(active_year = year(active_datetime)) %>%
# mutate(inactive_year = year(inactive_datetime)) %>%
# filter(active_year == current_year & inactive_year == current_year) %>%
# select(BIDN = bidn, name = beach_name) %>%
# st_transform(., 4326)
# st_crs(bidn_proof)$epsg
# proof_path = glue("C:\\data\\intertidal\\Apps\\gis\\{current_year}\\")
# write_sf(bidn_proof, dsn = glue("{proof_path}\\BIDN_{current_year}.shp"), delete_layer = TRUE)
#======================================================================================
# Flight data
#======================================================================================
#==========================================================================================
# Get the flight count data. File holds counts > 0, obs_time and all other associated data
#==========================================================================================
# Get flight data
flt_obs = read_sf(glue("Flight/data/{current_year}_FlightCounts/UClam_{current_year}.shp"))
# Check CRS: 4152 in 2020
st_crs(flt_obs)$epsg
# Inspect
sort(unique(flt_obs$UClam))
unique(flt_obs$User_)
#any(is.na(flt_obs$Time))
any(is.na(flt_obs$TIME))
# unique(flt_obs$TIME) # MUST BE "00:00" format !!!!! Otherwise get error below
# unique(flt_obs$Time) # MUST BE "00:00" format !!!!! Otherwise get error below
# unique(flt_obs$Comments)
unique(flt_obs$DATE)
#unique(flt_obs$Date)
n_flt_dates = unique(flt_obs$DATE)
length(n_flt_dates)
# # Check if any Date_CH2 disagree with Date....All agree
# chk_date = flt_obs %>%
# filter(!Date_CH2 == Date)
# Format
flt_obs = flt_obs %>%
mutate(TIME = if_else(nchar(TIME) == 4, paste0("0", TIME), TIME)) %>%
# mutate(Time = if_else(nchar(Time) == 4, paste0("0", Time), Time)) %>%
mutate(uuid = get_uuid(nrow(flt_obs))) %>%
# select(uuid, flt_date = Date, flt_bidn = BIDN, flt_beach_name = NAME,
# obs_time = Time, uclam = UClam, user = User_, comments = Comments)
select(uuid, flt_date = DATE, flt_bidn = BIDN, flt_beach_name = name,
obs_time = TIME, uclam = UClam, user = User_, comments = Comments)
# Check time again
# unique(flt_obs$obs_time)
unique(nchar(flt_obs$obs_time))
# Verify crs...need to standardize on mobile end
st_crs(flt_obs)$epsg
# Convert to proper crs
flt_obs = st_transform(flt_obs, 2927)
# Verify crs
st_crs(flt_obs)$epsg
# Verify how many bidns per date
chk_flt_dates = flt_obs %>%
st_drop_geometry %>%
select(flt_date, flt_bidn) %>%
distinct() %>%
group_by(flt_date) %>%
tally()
# Get geometry data for spatial join
bchyr_st = beach_st %>%
filter(start_yr <= current_year & end_yr >= current_year) %>%
select(beach_id_st = beach_id, bidn_st = bidn, beach_name_st = beach_name,
start_yr, end_yr, geometry)
# # Get geometry data for spatial join
# bchyr_st = beach_st %>%
# filter(start_yr <= current_year & end_yr >= current_year) %>%
# select(beach_id_st = beach_id, bidn_st = bidn, beach_name_st = beach_name,
# start_yr, end_yr, geometry)
# Verify all are in current year
unique(bchyr_st$start_yr)
unique(bchyr_st$end_yr)
# Get rid of year values
bchyr_st = bchyr_st %>%
select(-c(start_yr, end_yr))
# Warn for duplicated bidns
if (any(duplicated(bchyr_st$bidn_st))) {
cat("\nWarning: Duplicated BIDN. Investigate!\n\n")
}
# # Identify second instance of Toandos State Park geom
# bchyr_st = bchyr_st %>%
# group_by(bidn_st) %>%
# mutate(n_seq = row_number(bidn_st)) %>%
# ungroup()
#
# # Check if beach truly duplicated...not exactly
# chk_bch = bchyr_st %>%
# filter(bidn_st == 270080)
# identical(chk_bch$geometry[1], chk_bch$geometry[2])
#
# # Get rid of second instance of Toandos State Park geom
# bchyr_st = bchyr_st %>%
# filter(n_seq == 1) %>%
# select(- n_seq)
#
# # Warn for duplicated bidns
# if (any(duplicated(bchyr_st$bidn_st))) {
# cat("\nWarning: Duplicated BIDN. Investigate!\n\n")
# }
# Check crs
flt_st = flt_obs
st_crs(bchyr_st)$epsg
st_crs(flt_st)$epsg
# Check for missing geometry
any(is.na(flt_st$geometry))
all(st_is_valid(flt_st))
# Add BIDNs via spatial join
flt_st = st_join(flt_st, bchyr_st)
# Look for missing BIDNs....None this year
no_bidn_flt_obs = flt_st %>%
filter(is.na(bidn_st)) %>%
arrange(flt_date, obs_time) %>%
select(uuid, flt_date, obs_time, uclam, user, flt_bidn, flt_beach_name, comments)
# Check for any duplicated flt_date zero:
# Result: None found
chk_zero_flt_date_dup = flt_st %>%
st_drop_geometry() %>%
filter(uclam == 0) %>%
group_by(bidn_st, flt_date) %>%
mutate(n_seq = row_number()) %>%
ungroup() %>%
filter(n_seq > 1) %>%
select(bidn_st, flt_date) %>%
left_join(flt_st, by = c("bidn_st", "flt_date"))
# # Delete any duplicated zero LTC counts
# flt_st = flt_st %>%
# group_by(bidn_st, flt_date) %>%
# mutate(n_seq = row_number()) %>%
# ungroup() %>%
# filter(n_seq == 1L) %>%
# select(-n_seq)
# Verify same number of dates remain: Result 42...Good.
length(unique(flt_st$flt_date))
#======================================================================================
# Output for FlightProof program
#======================================================================================
# # Output flight data to proofing folder
# flt_proof = flt_st %>%
# select(BIDN = bidn_st, name = beach_name_st, date = flt_date, time = obs_time,
# uclam, user_ = user, comments) %>%
# st_transform(., 4326)
# st_crs(flt_proof)$epsg
# proof_path = glue("C:\\data\\intertidal\\Apps\\gis\\{current_year}\\")
# write_sf(flt_proof, dsn = glue("{proof_path}\\UCLAM_{current_year}.shp"), delete_layer = TRUE)
#==========================================================================================
# Get the zero count data. File holds counts == 0
# Had to manually move zeros into BIDNs in 221 cases
#==========================================================================================
# # In 2019 was being imported as 4269, but was actually 2927
# # In 2020 was imported as 4326
flt_zero = read_sf(glue("Flight/data/{current_year}_FlightCounts/UClam_Zero_{current_year}.shp"))
# Verify crs
st_crs(flt_zero)$epsg
# Convert to proper crs
flt_zero = st_transform(flt_zero, 2927)
# Verify crs
st_crs(flt_zero)$epsg
# Inspect
sort(unique(flt_zero$uclam))
# sort(unique(flt_zero$Uclam))
unique(flt_zero$user_)
# unique(flt_zero$User_)
any(is.na(flt_zero$time))
#any(is.na(flt_zero$Time))
unique(flt_zero$time) # MUST BE "0:00" !!!!! Otherwise get error below
# unique(flt_zero$Time) # MUST BE "0:00" !!!!! Otherwise get error below
unique(flt_zero$date)
# unique(flt_zero$Date)
unique(nchar(flt_zero$date))
# unique(nchar(flt_zero$Date))
n_zero_dates = unique(flt_zero$date)
# n_zero_dates = unique(flt_zero$Date)
length(n_zero_dates)
# Check for dates in zero's not in flt or vice versa
all(n_flt_dates %in% n_zero_dates)
all(n_zero_dates %in% n_flt_dates)
n_zero_dates[!n_zero_dates %in% n_flt_dates]
# Compare visually
sort(n_zero_dates)
sort(n_flt_dates)
# What?...so check data types for this iteration
typeof(n_flt_dates)
typeof(n_zero_dates)
# Convert n_flt_dates to character
n_flt_dates = as.character(n_flt_dates)
# Check for dates in zero's not in flt or vice versa
all(n_flt_dates %in% n_zero_dates)
all(n_zero_dates %in% n_flt_dates)
# Check for zero dates not in flt_dates
n_zero_dates[!n_zero_dates %in% n_flt_dates]
# Check for flt_dates not in zero_dates
n_flt_dates[!n_flt_dates %in% n_zero_dates]
# REMEMBER TO CORRECT date type in flt data !!!!!!!!!!!!!!!!!!!!
# Check NA times
chk_time = flt_zero %>%
filter(is.na(time))
# Check user
table(flt_zero$user_, useNA = "ifany")
# # Get rid of NA record for TIME....No useful coordinates
# flt_zero = flt_zero %>%
# filter(!is.na(TIME))
# Format
flt_zero = flt_zero %>%
mutate(flt_time = if_else(nchar(time) == 4, paste0("0", time), time)) %>%
mutate(uuid = get_uuid(nrow(flt_zero))) %>%
mutate(user_ = "S") %>%
mutate(obs_type = "flight") %>%
mutate(comments = NA_character_) %>%
select(uuid, obs_type, flt_date = date, flt_bidn = BIDN, flt_beach_name = name,
obs_time = time, uclam, user = user_, comments)
# Verify crs
st_crs(flt_zero)$epsg
# Verify how many bidns per date
chk_zero_dates = flt_zero %>%
st_drop_geometry %>%
select(flt_date, flt_bidn) %>%
distinct() %>%
group_by(flt_date) %>%
tally()
# Check for missing geometry
any(is.na(flt_zero$geometry))
all(st_is_valid(flt_zero))
# Add BIDNs via spatial join
flt_zero = st_join(flt_zero, bchyr_st)
# Look for missing BIDNs....Only cases were for Terrel Cove and Cottonwood Park
no_bidn_zero_obs = flt_zero %>%
filter(is.na(bidn_st)) %>%
arrange(flt_date, obs_time) %>%
select(uuid, flt_date, obs_time, uclam, user, flt_bidn, flt_beach_name, comments)
# Check for any duplicated flt_date zero:
# Result: There's 121 sets of duplicate zero entries. Need to delete duplicates
chk_zero_date_dup = flt_zero %>%
st_drop_geometry() %>%
group_by(bidn_st, flt_date) %>%
mutate(n_seq = row_number()) %>%
ungroup() %>%
filter(n_seq > 1) %>%
select(bidn_st, flt_date) %>%
left_join(flt_zero, by = c("bidn_st", "flt_date"))
# Delete duplicate zero entries: Got rid of 121 rows...Correct
flt_zero = flt_zero %>%
group_by(bidn_st, flt_date) %>%
mutate(n_seq = row_number()) %>%
ungroup() %>%
filter(n_seq == 1L) %>%
select(-n_seq)
# Verify same number of dates remain: Result 42...Good.
length(unique(flt_zero$flt_date))
# #======================================================================================
# # Output for FlightProof program
# #======================================================================================
# # Output zero data to proofing folder
# zero_proof = flt_zero %>%
# select(BIDN = bidn_st, name = beach_name_st, date = flt_date, time = obs_time,
# uclam, user_ = user) %>%
# st_transform(., 4326)
# st_crs(bidn_proof)$epsg
# proof_path = glue("C:\\data\\intertidal\\Apps\\gis\\{current_year}\\")
# write_sf(zero_proof, dsn = glue("{proof_path}\\ZERO_{current_year}.shp"), delete_layer = TRUE)
#=============================================================================
# Check for errant obs_time values in flt_obs
#=============================================================================
# Get tide times data
qry = glue("select distinct t.low_tide_datetime as tide_date, pl.location_name as tide_station, ",
"t.tide_time_minutes as tide_time, t.tide_height_feet as tide_height, ",
"ts.tide_strata_code as tide_strata ",
"from tide as t ",
"left join point_location as pl ",
"on t.tide_station_location_id = pl.point_location_id ",
"left join tide_strata_lut as ts ",
"on t.tide_strata_id = ts.tide_strata_id ",
"where date_part('Year', t.low_tide_datetime) = {current_year} ",
"order by t.low_tide_datetime")
# Run the query
db_con = pg_con_local(dbname = "shellfish")
tide_times = dbGetQuery(db_con, qry)
dbDisconnect(db_con)
# Explicitly convert timezones
tide_times = tide_times %>%
mutate(tide_date = with_tz(tide_date, tzone = "America/Los_Angeles")) %>%
mutate(tide_date = format(tide_date))
# Check if any tide_times strata differ by date...should only be Seattle strata...All Ok
time_check = tide_times %>%
select(tide_date, tide_strata) %>%
distinct() %>%
group_by(tide_date) %>%
mutate(n_seq = row_number()) %>%
ungroup() %>%
filter(n_seq > 1)
# Reduce down to only lowest tide per day
tides = tide_times %>%
rename(low_tide_datetime = tide_date) %>%
mutate(mnth = as.integer(month(low_tide_datetime))) %>%
filter(mnth %in% seq(2, 9)) %>%
filter(!tide_strata == "NIGHT") %>%
mutate(tide_date = substr(format(low_tide_datetime), 1, 10)) %>%
arrange(tide_date, tide_height) %>%
mutate(tide_time_hm = min_to_hm(tide_time)) %>%
mutate(flt_date = tide_date) %>%
select(flt_date, tide_station, tide_time_hm, tide_time, tide_height, tide_strata)
# Filter down to only one tide per day
tides = tides %>%
arrange(flt_date, desc(tide_station), tide_strata) %>%
group_by(flt_date, tide_station) %>%
mutate(n_seq = row_number()) %>%
ungroup() %>%
filter(n_seq == 1) %>%
select(-n_seq)
# Check for duplicates
any(duplicated(tides$flt_date))
# Get the tide correction data
qry = glue("select distinct bb.beach_number as bidn, b.local_beach_name as beach_name, ",
"pl.location_name as tide_station, b.low_tide_correction_minutes as lt_corr ",
"from beach as b ",
"inner join beach_boundary_history as bb ",
"on b.beach_id = bb.beach_id ",
"left join point_location as pl ",
"on b.tide_station_location_id = pl.point_location_id ",
"order by bb.beach_number")
# Run the query
db_con = pg_con_local(dbname = "shellfish")
tide_corr = dbGetQuery(db_con, qry)
dbDisconnect(db_con)
# Check for duplicated BIDNs
chk_dup_beach = tide_corr %>%
filter(duplicated(bidn)) %>%
left_join(tide_corr, by = "bidn")
# Report if any duplicated beach_ids or BIDNs
if (nrow(chk_dup_beach) > 0) {
cat("\nWARNING: Duplicated BIDNs. Do not pass go!\n\n")
} else {
cat("\nNo duplicated BIDNs. Ok to proceed.\n\n")
}
# Pull out count data for comparison with tides data
count_times = flt_st %>%
st_drop_geometry() %>%
mutate(data_source = "flt_counts") %>%
select(flt_date, bidn = bidn_st, beach_name = flt_beach_name, obs_time, uclam,
user, comments, data_source) %>%
filter(!is.na(obs_time) & !obs_time == "00:00")
# Join obs_times and corrections
tide_times = count_times %>%
select(-beach_name) %>%
mutate(bidn = as.integer(bidn)) %>%
left_join(tide_corr, by = "bidn")
# Join obs_times and tide_times
tide_times = tide_times %>%
mutate(flt_date = as.character(flt_date)) %>%
left_join(tides, by = c("flt_date", "tide_station")) %>%
mutate(tide_min = tide_time + lt_corr) %>%
# CAN GENERATE WARNING HERE IF NO OBS_TIME !!!!!!!!!!!!!!!
mutate(obs_min = hm_to_min(obs_time)) %>%
mutate(minutes_off = abs(tide_min - obs_min)) %>%
arrange(desc(minutes_off))
# Check distribution
table(tide_times$minutes_off, useNA = "ifany")
# RESULT: Max dif was 85 minutes,
# Typically times are off by an hour or more at the start
# then dwindle to minimal as route heads southward
# Check where times differ
chk_count_times = tide_times %>%
filter(minutes_off > 60) %>%
select(flt_date, bidn, beach_name, ref_tide_time = tide_time, tide_station,
tide_height, tide_strata, lt_corr, tide_min, obs_min, minutes_off)
#=============================================================================
# Prep to combine with other datasets
#=============================================================================
# Check for missing beach_id...None
any(is.na(flt_st$beach_id_st))
# Check user
unique(flt_st$user)
# Check beach_names and BIDNs
chk_beach_names = flt_st %>%
st_drop_geometry() %>%
mutate(flt_bidn = as.integer(flt_bidn)) %>%
select(flt_bidn, bidn_st, flt_beach_name, beach_name_st) %>%
distinct() %>%
mutate(bidn_dif = if_else(!flt_bidn == bidn_st, "different", "the same"))
# Message to inspect
cat("\nMake sure to manually inspect 'chk_beach_names' to make sure names and BIDNs match!\n\n")
# Format flt_obs for binding
flt_obs = flt_st %>%
mutate(user = "R") %>%
mutate(comments = trim(comments)) %>%
mutate(comments = if_else(comments == "", NA_character_, comments)) %>%
mutate(source = "aerial") %>%
select(flt_date, beach_id = beach_id_st, bidn = bidn_st, beach_name = beach_name_st,
obs_time, uclam, user, comments, source)
#=============================================================================
# Add zero counts to flt_obs
#=============================================================================
# Format fz for binding
fz = flt_zero %>%
mutate(user = "S") %>%
mutate(comments = trim(comments)) %>%
mutate(comments = if_else(comments == "", NA_character_, comments)) %>%
mutate(source = "aerial") %>%
select(flt_date, flt_bidn, flt_beach_name, obs_time, uclam, user,
comments, source)
# Check crs prior to join
st_crs(fz)$epsg
st_crs(bchyr_st)$epsg
# Add BIDNs via spatial join
fz = st_join(fz, bchyr_st)
# Check for missing beach_ids: Result: None
chk_fz = fz %>%
filter(is.na(beach_id_st)) %>%
select(flt_bidn, flt_beach_name) %>%
st_drop_geometry()
# Pull out variables in common with flt_obs
fz_obs = fz %>%
mutate(uclam = as.integer(uclam)) %>%
select(flt_date, beach_id = beach_id_st, bidn = bidn_st, beach_name = beach_name_st,
obs_time, uclam, user, comments, source)
# Needed to change datatypes for flt_obs
flt_obs = flt_obs %>%
mutate(flt_date = as.character(flt_date)) %>%
mutate(uclam = as.integer(uclam))
# Combine data into one dataset
flt = rbind(flt_obs, fz_obs)
# Check user
unique(flt$user)
#=============================================================================
# Get the ltc data
#=============================================================================
# Get the LTC data
ltc_obs = read_sf(glue("Flight/data/{current_year}_FlightCounts/LTC_{current_year}.shp"))
# Inspect
sort(unique(ltc_obs$Uclam))
unique(ltc_obs$User_)
any(is.na(ltc_obs$Time))
unique(ltc_obs$Time) # MUST BE "00:00" format !!!!! Otherwise get error below
# unique(ltc_obs$Comments)
unique(ltc_obs$Date)
unique(ltc_obs$obs_type)
n_ltc_dates = unique(ltc_obs$Date)
length(n_ltc_dates)
# Format
ltc_obs = ltc_obs %>%
mutate(uuid = get_uuid(nrow(ltc_obs))) %>%
mutate(Time = if_else(nchar(Time) == 4, paste0("0", Time), Time)) %>%
select(uuid, flt_date = Date, flt_bidn = BIDN, flt_beach_name = name,
obs_type, obs_time = Time, uclam = Uclam, user = User_,
comments = Comments)
# Check again
unique(ltc_obs$obs_time)
all(nchar(ltc_obs$obs_time) == 5L)
# Verify crs
st_crs(ltc_obs)$epsg
# Convert to proper crs
ltc_obs = st_transform(ltc_obs, 2927)
# Verify crs
st_crs(ltc_obs)$epsg
# Add source
ltc = ltc_obs %>%
mutate(source = "ground") %>%
select(uuid, flt_date, flt_bidn, flt_beach_name, obs_type, obs_time, uclam,
user, comments, source)
# Format user
table(ltc$user, useNA = "ifany")
ltc = ltc %>%
mutate(user = case_when(
user == "Tribal" ~ "T",
user == "Rec" ~ "R",
user == "R" ~ "R"))
table(ltc$user, useNA = "ifany")
# Add BIDNs via spatial join
ltc_st = st_join(ltc, bchyr_st)
# Look for missing BIDNs
no_bidn_ltc_obs = ltc_st %>%
filter(is.na(bidn_st)) %>%
arrange(flt_date, obs_time) %>%
select(uuid, flt_date, obs_type, obs_time, uclam, user, flt_bidn, flt_beach_name, comments)
# Check for any duplicated ltc_date zero:
# Result: None found
chk_zero_ltc_date_dup = ltc_st %>%
st_drop_geometry() %>%
filter(uclam == 0) %>%
group_by(bidn_st, flt_date) %>%
mutate(n_seq = row_number()) %>%
ungroup() %>%
filter(n_seq > 1) %>%
select(bidn_st, flt_date) %>%
left_join(ltc_st, by = c("bidn_st", "flt_date"))
# # Delete any duplicated zero LTC counts
# ltc_st = ltc_st %>%
# group_by(bidn_st, flt_date) %>%
# mutate(n_seq = row_number()) %>%
# ungroup() %>%
# filter(n_seq == 1L) %>%
# select(-n_seq)
# Verify same number of dates remain: Result 42...Good.
length(unique(ltc_st$flt_date))
# # Output ltc data to proofing folder
# ltc_proof = ltc_st %>%
# select(BIDN = bidn_st, name = beach_name_st, date = flt_date, time = obs_time,
# uclam, user_ = user, obs_type, comments) %>%
# st_transform(., 4326)
# st_crs(ltc_proof)$epsg
# proof_path = glue("C:\\data\\intertidal\\Apps\\gis\\{current_year}\\")
# write_sf(ltc_proof, dsn = glue("{proof_path}\\LTC_{current_year}.shp"), delete_layer = TRUE)
#=============================================================================
# Check for errant obs_time values in ltc_obs
#=============================================================================
# # Filter out Dash Point (2019)...no polygon for beach. Not in plans.
# ltc_st = ltc_st %>%
# filter(!flt_beach_name == "DASH POINT")
# Join tide_correction data to ltc_st
ltc_st_chk = ltc_st %>%
mutate(bidn = as.integer(flt_bidn)) %>%
mutate(flt_date = as.character(flt_date)) %>%
left_join(tide_corr, by = "bidn") %>%
left_join(tides, by = c("flt_date", "tide_station")) %>%
mutate(tide_min = tide_time + lt_corr) %>%
# CAN GENERATE WARNING HERE IF NO OBS_TIME !!!!!!!!!!!!!!!
mutate(obs_min = hm_to_min(obs_time)) %>%
mutate(minutes_off = abs(tide_min - obs_min)) %>%
arrange(desc(minutes_off))
# Check distribution
table(ltc_st_chk$minutes_off, useNA = "ifany")
# RESULT: None greater than 62 minutes
# Double-check user
table(ltc_st$user, useNA = "ifany")
# Pull out variables in common with flt_obs
ltc_obs = ltc_st %>%
mutate(source = "ground") %>%
select(flt_date, beach_id = beach_id_st, bidn = bidn_st, beach_name = beach_name_st,
obs_time, uclam, user, comments, source)
# Check for missing beach_ids
any(is.na(ltc_obs$beach_id))
#============================================================================================
# Process annual flight data for upload. ONLY UPLOAD AFTER DATA RUN THROUGH FlightProof !
#============================================================================================
# Check crs
st_crs(flt)$epsg
st_crs(ltc_st)$epsg
# Update datatypes as needed before binding
ltc_obs = ltc_obs %>%
mutate(flt_date = as.character(flt_date)) %>%
mutate(uclam = as.integer(uclam))
# Combine flt and ltc data
flt = rbind(flt, ltc_obs)
# Check flight data. Convert to st geometry. Will not work unless all coordinates present
chk_flt = st_transform(flt, crs = 4326)
# Pull out original lat-lons
chk_flt = chk_flt %>%
mutate(lon = as.numeric(st_coordinates(geometry)[,1])) %>%
mutate(lat = as.numeric(st_coordinates(geometry)[,2]))
# Check for any outlier coordinates
sort(unique(round(chk_flt$lat, 1)))
sort(unique(round(chk_flt$lon, 1)))
# Check dates
sort(unique(substr(flt$flt_date, 1, 4))) # Year
sort(unique(substr(flt$flt_date, 6, 7))) # Months
# Check obs_times
sort(unique(substr(flt$obs_time, 1, 2))) # Hour
sort(unique(substr(flt$obs_time, 4, 5))) # Minute
#======================================================================
# Process annual flight data for upload
#======================================================================
# Locate cases where bidn > 0 and beach_id is missing
# None in 2020
chk_bidn = flt %>%
filter(is.na(beach_id))
# # Output chk_bidn as a shape file
# write_sf(chk_bidn, dsn = glue("Shapefiles\\FlightCounts_{current_year}\\chk_bidn.shp"), delete_layer = TRUE)
# survey table ===========================================================================
# Define LTC vs Flight survey_type =========
# Check for missing date or geometry
any(is.na(flt$flt_date))
any(is.na(flt$geometry))
# Check varieties of User. flight surveys and LTCs are separate survey types.
unique(flt$user)
# Verify all entries in flt have a source
table(flt$source, useNA = "ifany")
# Define survey_type
flt = flt %>%
mutate(ltc = if_else(source == "ground", "yes", "no"))
# Check
unique(flt$user)
(g = table(flt$user, useNA = "ifany"))
# Check
unique(flt$ltc)
(g2 = table(flt$ltc, useNA = "ifany"))
# Define the survey type
flt = flt %>%
mutate(survey_type_id = if_else(ltc == "yes",
"40b60612-2425-46b3-a5d5-c7fa9b4b0571",
"7153c1cb-79cf-41d6-9fc1-966470c1460b"))
# Warn if survey_type_id missing
if (any(is.na(flt$survey_type_id))) {
cat("\nWARNING: survey_type_id missing. Do not pass go!\n\n")
} else {
cat("\nAll survey_type_ids defined. Ok to proceed.\n\n")
}
#==============================================================================================
# Need to separate LTCs from flight surveys. Each LTC date and beach combo is a separate survey
#==============================================================================================
# Verify no missing date
if (any(is.na(flt$flt_date))) {
cat("\nWARNING: Date missing somewhere. Do not pass go!\n\n")
} else {
cat("\nNo missing Dates. Ok to proceed.\n\n")
}
# Get ltc data
ltc_tab = flt %>%
filter(ltc == "yes")
# Get flight data
flt_tab = flt %>%
filter(ltc == "no")
#========== LTC data ================================
# Look for cases when two or more zero counts entered for same date, and beach combo
ltc_zero = ltc_tab %>%
filter(uclam == 0L)
# Add a count by bidn, date, and user
ltc_zero = ltc_zero %>%
group_by(bidn, flt_date, user) %>%
mutate(n_zero = row_number(uclam)) %>%
ungroup() %>%
filter(n_zero == 1L) %>%
select(-n_zero)
ltc_count = ltc_tab %>%
filter(uclam > 0L)
# Combine back together
ltc_tab = rbind(ltc_count, ltc_zero)
# Generate the survey_id for LTCs
ltc_tab = ltc_tab %>%
group_by(flt_date, bidn) %>%
mutate(survey_id = get_uuid(1L)) %>%
ungroup()
# Drop the geometry column for the survey table
surv_ltc = ltc_tab %>%
select(survey_id, survey_type_id, beach_id,
survey_datetime = flt_date) %>%
st_drop_geometry()
# Set to unique. Won't work with geometry column still in place.
# Also need beach_id for LTC data...but not with flight data
# For LTC one beach = one survey. For flights one survey = many beaches
survey_ltc = surv_ltc %>%
distinct()
# Check for duplicated survey_ids
any(duplicated(survey_ltc$survey_id))
#========== Flight data ================================
# Look for cases when two or more zero counts entered for same date, and beach combo
flt_zero = flt_tab %>%
filter(uclam == 0L)
# Add a count by bidn, date, and user so only the first zero can be filtered out for use
flt_zero = flt_zero %>%
group_by(bidn, flt_date, user) %>%
mutate(n_zero = row_number(uclam)) %>%
ungroup() %>%
filter(n_zero == 1L) %>%
select(-n_zero)
flt_count = flt_tab %>%
filter(uclam > 0L)
# Combine back together
flt_tab = rbind(flt_count, flt_zero)
# Generate the survey_id for flights
flt_tab = flt_tab %>%
group_by(flt_date) %>%
mutate(survey_id = get_uuid(1L)) %>%
ungroup() %>%
filter(!is.na(beach_id))
# Drop the geometry column for the survey table
surv_flt = flt_tab %>%
select(survey_id, survey_type_id, survey_datetime = flt_date) %>%
st_drop_geometry()
# Set to unique. Won't work with geometry column still in place.
survey_flt = surv_flt %>%
distinct()
# Check for duplicated survey_ids
any(duplicated(survey_ltc$survey_id))
# Check for duplicated survey_ids
any(duplicated(survey_flt$survey_id))
# Combine flt and ltc into one dataset for later tables
fltall = rbind(flt_tab, ltc_tab)
# Check on zero counts and duplicates over both flt and ltc data ================================
# Get data
chk_zero = fltall %>%
select(survey_id, flt_date, obs_time, bidn, uclam, user, source)
# Convert to lat-lon
chk_zero = st_transform(chk_zero, 4326)
chk_zero = chk_zero %>%
mutate(lon = as.numeric(st_coordinates(geometry)[,1])) %>%
mutate(lat = as.numeric(st_coordinates(geometry)[,2])) %>%
mutate(coords = paste0(lat, ":", lon)) %>%
select(survey_id, flt_date, obs_time, bidn, uclam,
user, source, coords) %>%
st_drop_geometry()
# Make sure there is no more than one case per beach and day of a zero count...Unless one is from flight and the other from LTC
chk_zero = chk_zero %>%
filter(uclam == 0L) %>%
group_by(survey_id, flt_date, bidn) %>%
mutate(n_zero = row_number(uclam)) %>%
ungroup() %>%
filter(n_zero > 1L) %>%
select(survey_id, bidn, uclam, coords) %>%
left_join(fltall, by = c("survey_id", "bidn", "uclam"))
# Inspection of coordinates shows that each set of data was entered separately
# One of each pair came separately from flight and zeros files. Just get rid of
# the zero's data since the flight data has more info, i.e. time-stamps
# Get rows to delete to use in anti-join
chk_zero = chk_zero %>%
arrange(flt_date, bidn, user) %>%
group_by(survey_id) %>%
mutate(n_seq = row_number()) %>%
ungroup() %>%
filter(n_seq > 1L) %>%
select(survey_id, flt_date, bidn, user, uclam)
# Do an anti-join to get rid of extraneous zero counts
fltall = fltall %>%
anti_join(chk_zero, by = c("survey_id", "flt_date", "bidn", "user", "uclam"))
# Check again
chk_zero = fltall %>%
st_drop_geometry() %>%
filter(uclam == 0L) %>%
group_by(survey_id, flt_date, bidn) %>%
mutate(n_zero = row_number(uclam)) %>%
ungroup() %>%
filter(n_zero > 1L) %>%
select(survey_id, bidn, uclam) %>%
left_join(fltall, by = c("survey_id", "bidn", "uclam"))
# Message
if ( nrow(chk_zero) > 0L ) {
cat("\nWARNING: Some zero counts entered more than once. Do not pass go!\n\n")
} else {
cat("\nAll zero counts entered only once. Ok to proceed!\n\n")
}
# Load survey data ===========================================
# Add beach_id to survey_flt so datasets can be combined
survey_flt = survey_flt %>%
mutate(beach_id = NA_character_) %>%
select(survey_id, survey_type_id, beach_id, survey_datetime)
# Combine survey_flt and survey_ltc into one dataset
survey = rbind(survey_flt, survey_ltc)
# Check for any duplicated survey_ids
any(duplicated(survey$survey_id))
# Add remaining fields
survey = survey %>%
mutate(survey_datetime = with_tz(as.POSIXct(survey_datetime, tz = "America/Los_Angeles"), tzone = "UTC")) %>%
mutate(sampling_program_id = "f1de1d54-d750-449f-a700-dc33ebec04c6") %>%
mutate(area_surveyed_id = "72e4eeea-ad0e-45d9-a2c9-3e2a5a5d06b7") %>% # Defaulting to "Entire beach" correct the few incorrect later.
mutate(data_review_status_id = "bdefcb1f-80c4-4921-9cf4-66c8dde02d4b") %>% # Defaulting to "Final"
mutate(survey_completion_status_id = "d192b32e-0e4f-4719-9c9c-dec6593b1977") %>% # Defaulting to "Completed"
mutate(point_location_id = NA_character_) %>%
mutate(start_datetime = with_tz(as.POSIXct(NA), "UTC")) %>%
mutate(end_datetime = with_tz(as.POSIXct(NA), "UTC")) %>%
mutate(comment_text = NA_character_) %>%
mutate(created_datetime = with_tz(Sys.time(), "UTC")) %>%
mutate(created_by = Sys.getenv("USERNAME")) %>%
mutate(modified_datetime = with_tz(as.POSIXct(NA), "UTC")) %>%
mutate(modified_by = NA_character_) %>%
select(survey_id, survey_type_id, sampling_program_id, beach_id, point_location_id,
area_surveyed_id, data_review_status_id, survey_completion_status_id,
survey_datetime, start_datetime, end_datetime, comment_text,
created_datetime, created_by, modified_datetime, modified_by)
# # Write to shellfish
# db_con = pg_con_local(dbname = "shellfish")
# DBI::dbWriteTable(db_con, "survey", survey, row.names = FALSE, append = TRUE)
# DBI::dbDisconnect(db_con)
#
# # Write to shellfish on prod
# db_con = pg_con_prod(dbname = "shellfish")
# DBI::dbWriteTable(db_con, "survey", survey, row.names = FALSE, append = TRUE)
# DBI::dbDisconnect(db_con)
# Clean-up
rm(list = c("chk_bidn", "chk_fz", "flt", "flt_count", "flt_obs", "flt_tab",
"flt_zero", "fz", "fz_obs", "ltc_tab", "ltc", "surv_flt",
"surv_ltc", "survey", "survey_flt", "survey_ltc", "ltc_zero",
"ltc_count"))
# survey_event table =================================================================
# Generate the survey_event_id
flt = fltall %>%
mutate(survey_event_id = get_uuid(nrow(fltall))) %>%
arrange(bidn, flt_date, obs_time)
# Check for duplicate survey_event_ids. There should no duplicates.
# Each observation should be unique.
if (any(duplicated(flt$survey_event_id))) {
cat("\nWARNING: Some duplicated survey_event_ids. Do not pass go!\n\n")
} else {
cat("\nNo duplicated survey_event_ids. Ok to proceed.\n\n")
}
#========== Point location table ======================
# Since every row is a unique observation, generate UUID in flt
flt = flt %>%
mutate(point_location_id = get_uuid(nrow(flt)))
# Pull out needed columns
pt_loc = flt %>%
select(point_location_id, beach_id)
# Spatial join to get shellfish_management_area_id
# Verify both sides of st_join will be sf objects
inherits(pt_loc, "sf")
inherits(sfma_st, "sf")
inherits(mng_reg_st, "sf")
st_crs(pt_loc)$epsg
st_crs(sfma_st)$epsg
st_crs(mng_reg_st)$epsg
# Add sfma and mng_area via spatial join
pt_loc = st_join(pt_loc, sfma_st)
pt_loc = st_join(pt_loc, mng_reg_st)
# Check for missing values of sfma and mng_reg
any(is.na(pt_loc$shellfish_management_area_id))
any(is.na(pt_loc$management_region_id))
any(is.na(pt_loc$point_location_id))
any(duplicated(pt_loc$point_location_id))
# Get most recent gid
qry = glue("select max(gid) from point_location")
# Get values from shellfish
db_con = pg_con_local(dbname = "shellfish")
max_gid = dbGetQuery(db_con, qry)
dbDisconnect(db_con)
# Add one
new_gid = max_gid$max + 1L
# Add some columns
pt_loc_tab = pt_loc %>%
mutate(location_type_id = "cd9b431e-4750-4f54-a67e-a4252a4189f2") %>% # Intertidal harvest count
mutate(beach_id = NA_character_) %>%
mutate(location_code = NA_character_) %>%
mutate(location_name = NA_character_) %>%
mutate(location_description = NA_character_) %>%
mutate(horizontal_accuracy = NA_real_) %>%
mutate(comment_text = NA_character_) %>%
mutate(gid = seq(new_gid, new_gid + nrow(pt_loc) - 1L)) %>%
mutate(created_datetime = with_tz(Sys.time(), "UTC")) %>%
mutate(created_by = Sys.getenv("USERNAME")) %>%
mutate(modified_datetime = with_tz(as.POSIXct(NA), "UTC")) %>%
mutate(modified_by = NA_character_) %>%
select(point_location_id, location_type_id, beach_id,
shellfish_management_area_id, management_region_id, location_code,
location_name, location_description, horizontal_accuracy,
comment_text, gid, geometry, created_datetime,
created_by, modified_datetime, modified_by)
# # Write beach_history_temp to shellfish
# db_con = pg_con_local(dbname = "shellfish")
# st_write(obj = pt_loc_tab, dsn = db_con, layer = "point_location_temp")
# DBI::dbDisconnect(db_con)
#
# # Write beach_history_temp to shellfish
# db_con = pg_con_prod(dbname = "shellfish")
# st_write(obj = pt_loc_tab, dsn = db_con, layer = "point_location_temp")
# DBI::dbDisconnect(db_con)
# Use select into query to get data into point_location
qry = glue::glue("INSERT INTO point_location ",
"SELECT CAST(point_location_id AS UUID), CAST(location_type_id AS UUID), ",
"CAST(beach_id AS UUID), location_code, location_name, ",
"location_description, horizontal_accuracy, comment_text, gid, ",
"geometry as geom, CAST(created_datetime AS timestamptz), created_by, ",
"CAST(modified_datetime AS timestamptz), modified_by ",
"FROM point_location_temp")
# # Insert select to shellfish
# db_con = pg_con_local(dbname = "shellfish")
# DBI::dbExecute(db_con, qry)
# DBI::dbDisconnect(db_con)
#
# # Insert select to shellfish
# db_con = pg_con_prod(dbname = "shellfish")
# DBI::dbExecute(db_con, qry)
# DBI::dbDisconnect(db_con)
#
# # Drop temp
# db_con = pg_con_local(dbname = "shellfish")
# DBI::dbExecute(db_con, "DROP TABLE point_location_temp")
# DBI::dbDisconnect(db_con)
#
# # Drop temp
# db_con = pg_con_prod(dbname = "shellfish")
# DBI::dbExecute(db_con, "DROP TABLE point_location_temp")
# DBI::dbDisconnect(db_con)
#========== Back to survey_event table ======================
# # Write flt to a temp file
# flight_data = flt
# flt_dat = flt
# saveRDS(flt_dat, "flt_dat.rds")
# flt = readRDS("flt_dat.rds")
# identical(flt_dat, flt)
# Pull out survey_event data
survey_event = flt %>%
select(survey_event_id, survey_id, event_location_id = point_location_id,
bidn, beach_name, user, event_time = obs_time, event_date = flt_date,
harvester_count = uclam, comments) %>%
st_drop_geometry()
# Verify no duplicated survey_event_ids
if (any(duplicated(survey_event$survey_event_id))) {
cat("\nWARNING: Duplicated survey_event_id. Do not pass go!\n\n")
} else {
cat("\nNo duplicated survey_event_ids. Ok to proceed.\n\n")
}
# No need for event number:
# Zero's and no time values for zero entries make that impossible for now.
# Check unique event_time
unique(survey_event$event_time)
any(!nchar(survey_event$event_time) == 5)
# Convert any time values where nchar == 5 to add leading zero
survey_event = survey_event %>%
mutate(event_time = if_else(nchar(event_time) == 4,
paste0("0", event_time), event_time))
# Check agian
unique(survey_event$event_time)
any(!nchar(survey_event$event_time) == 5)
# Generate event_datetime
survey_event = survey_event %>%
mutate(event_date = paste0(event_date, " ", event_time, ":00")) %>%
mutate(event_datetime = with_tz(as.POSIXct(event_date, tz = "America/Los_Angeles"), tzone = "UTC"))
# Set harvester_type_id
unique(survey_event$user)
survey_event = survey_event %>%
mutate(harvester_type_id = recode(user,
"S" = "772f21fb-d07c-4998-a1d9-f7bb49df4db4",
"R" = "772f21fb-d07c-4998-a1d9-f7bb49df4db4",
"T" = "070980cf-74a7-4192-9319-f93325c7b6e4"))
# Add remaining fields
survey_event = survey_event %>%
mutate(harvest_method_id = "68cb2cb1-77df-49f3-872b-5e3ba3299e14") %>% # NA
mutate(harvest_gear_type_id = "5d1e6be6-cd85-498c-b2fa-43b370d951b4") %>% # NA
mutate(harvest_depth_range_id = "b27281f7-9387-4a51-b91f-95748676f918") %>% # NA
mutate(event_number = NA_integer_) %>%
mutate(harvest_gear_count = NA_integer_) %>%
mutate(harvester_zip_code = NA_integer_) %>%
mutate(comment_text = trimws(comments)) %>%
mutate(created_datetime = with_tz(Sys.time(), "UTC")) %>%
mutate(created_by = Sys.getenv("USERNAME")) %>%
mutate(modified_datetime = with_tz(as.POSIXct(NA), "UTC")) %>%
mutate(modified_by = NA_character_) %>%
select(survey_event_id, survey_id, event_location_id, harvester_type_id,
harvest_method_id, harvest_gear_type_id, harvest_depth_range_id,
event_number, event_datetime, harvester_count, harvest_gear_count,
harvester_zip_code, comment_text, created_datetime, created_by,
modified_datetime, modified_by)
# # Write to shellfish
# db_con = pg_con_local(dbname = "shellfish")
# DBI::dbWriteTable(db_con, "survey_event", survey_event, row.names = FALSE, append = TRUE)
# DBI::dbDisconnect(db_con)
#
# # Write to shellfish on prod
# db_con = pg_con_prod(dbname = "shellfish")
# DBI::dbWriteTable(db_con, "survey_event", survey_event, row.names = FALSE, append = TRUE)
# DBI::dbDisconnect(db_con)
#============================================================================================
# Final check to verify the same number of rows exist in both local and production DBs
#============================================================================================
# Get table names and row counts
local_row_counts = db_table_counts(db_server = "local")
prod_row_counts = db_table_counts(db_server = "prod")
# Combine to a dataframe
compare_counts = local_row_counts %>%
left_join(prod_row_counts, by = "table") %>%
# Ignore tables that exist in local but not prod
filter(!table %in% c("geometry_columns", "geometry_columns", "spatial_ref_sys")) %>%
filter(!substr(table, 1, 10) == "beach_info") %>%
filter(!substr(table, 1, 12) == "flight_count") %>%
# Pull out and rename
select(table, local = row_count.x, prod = row_count.y) %>%
mutate(row_diff = abs(local - prod))
# Inspect any differences
diff_counts = compare_counts %>%
filter(!row_diff == 0L)
# Output message
if ( nrow(diff_counts) > 0 ) {
cat("\nWARNING: Some row counts differ. Inspect 'diff_counts'.\n\n")
} else {
cat("\nRow counts are the same. Ok to proceed.\n\n")
}
|
c3dfa87044c5fea26c4d2002de55f8fba9abfbc5
|
11ab32074034da3e5a2d2d1235ea6bbc3241366a
|
/R/compileDatabase.R
|
09da57163607354392a45695f1aa011258b8c263
|
[] |
no_license
|
powellcenter-soilcarbon/soilcarbon
|
0b3e79c324d0f7e192fc5069366a1e47604e354c
|
99781ba2bb2161ff44b8965f67149f7a7ba635bc
|
refs/heads/master
| 2021-01-11T15:09:05.004344
| 2018-08-23T10:52:17
| 2018-08-23T10:52:17
| 80,300,357
| 17
| 8
| null | 2019-07-01T18:54:43
| 2017-01-28T17:58:47
|
HTML
|
UTF-8
|
R
| false
| false
| 1,558
|
r
|
compileDatabase.R
|
#' compileDatabase
#'
#' adds dataset to soilcarbon database
#'
#' @param dataset_directory directory where compeleted and QC passed soilcarbon datasets are stored
#' @export
#' @import devtools
#' @import stringi
compileDatabase <- function(dataset_directory ){
requireNamespace("stringi")
data_files<-list.files(dataset_directory, full.names = T)
data_files<-data_files[grep("xlsx", data_files)]
# special dataset (Yujie)
Yujie_file<-system.file("extdata", "Yujie_dataset.csv", package = "soilcarbon")
Yujie_database<-readYujie(Yujie_file)
working_database<-Yujie_database
working_database[] <- lapply(working_database, as.character)
for(i in 1:length(data_files)){
soilcarbon_data<-read.soilcarbon(data_files[i])
qc_file<-basename(gsub("\\.xlsx", "_QCreport.txt", attributes(soilcarbon_data)$file_name))
qc_file<-paste0(dataset_directory, "dataQC_log/", qc_file)
qc_out<-dataQC(soilcarbon_data, writeQCreport = T, outfile = qc_file)
if (qc_out>0){
print(paste(basename(data_files[i]),"...", qc_out,"errors"))
} else{
print(paste(basename(data_files[i]),"...", "passed and added to database"))
flat_data <- flatten(soilcarbon_data)
flat_data[] <- lapply(flat_data, as.character)
working_database<-rbind(working_database, flat_data)
}
}
working_database[]<-lapply(working_database, function(x) stri_trans_general(x, "latin-ascii"))
working_database[]<-lapply(working_database, type.convert)
soilcarbon_database<-working_database
return(soilcarbon_database)
}
|
87d641ba04d4080b679d67afb7cbbd34cf69b76c
|
aac30b537cb879a203a73a44b155cfcf35c7574f
|
/server.R
|
fb9300c5271c608414dcfdc0a1c9df38a9a8ccb8
|
[] |
no_license
|
sohammishra/Project1_ShinyApp
|
efb3941920c5737effddb2a0ebf99b83a8d7c644
|
49a41d7a4a5afbc4959f9861fb4fbc5b2650f945
|
refs/heads/master
| 2022-05-26T05:37:26.789649
| 2020-04-25T23:27:00
| 2020-04-25T23:27:00
| 257,772,657
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,242
|
r
|
server.R
|
server = function(input, output, session){
g_rate = reactive({
tidy_time_series %>%
filter(state %in% c(input$state, input$multistate_growth)) %>%
filter(date >= '2020-03-15') %>% #& date <= '2020-04-23') %>%
group_by(state,date) %>%
summarise( totalperday = sum(cases)) %>%
mutate(new_cases = totalperday - lag(totalperday), growth_rate = (totalperday - lag(totalperday))/totalperday*100)
})
axis = reactive({
ifelse(input$mapType == 'Confirmed', "{values:[0,7000,50000], colors:[\'white', \'pink\', \'red']}",
ifelse(input$mapType == 'Mortality_Rate',"{values:[0,2.5,7], colors:[\'white', \'pink\', \'purple']}",
ifelse(input$mapType == 'Testing_Rate', "{values:[750,1250,2500], colors:[\'white', \'lightgreen\', \'green']}",
"{values:[0,10,30], colors:[\'white', \'lightblue\', \'blue']}")))
})
dataSource <- reactive({
df_num %>%
select(State, Confirmed, Mortality_Rate, Testing_Rate)
})
output$USmap = renderGvis({
geo_data = dataSource()
axis_label=axis()
gvisGeoChart(geo_data, 'State', 'Confirmed',
options = list(title = 'US Coronavirus',
region='US',
displayMode='regions',
resolution="provinces",
width='800',height='550',
colorAxis = axis_label,
# colorAxis=ifelse(input$mapType == 'Confirmed', "{values:[0,5000,40000], colors:[\'white', \'pink\', \'red']}",
# ifelse(input$mapType == 'Mortality_Rate',"{values:[0,2.5,7], colors:[\'white', \'pink\', \'purple']}",
# ifelse(input$mapType == 'Testing_Rate', "{values:[750,1250,2500], colors:[\'white', \'lightgreen\', \'green']}",
# "{values:[0,10,30], colors:[\'white', \'lightblue\', \'blue']}"))),
backgroundColor="white"))
})
output$density = renderPlot({
ggplot(filter(joined, region==input$state & date == input$Date), mapping = aes(x=long,
y=lat,fill = cases,
group = group), color='white') +
geom_polygon() +
scale_fill_gradient(low='white', high='red',trans = "log10") +
ggtitle('Which Counties are Most Impacted?') +
geom_polygon(color = "black", fill = NA) +
theme_void()+
theme(text = element_text(family='.New York', size =15)) +
coord_map()
})
output$grey = renderText({
'*Grey = County Data Unavailable'
})
output$linear = renderPlot({
tidy_time_series %>%
group_by(state,date) %>%
filter(state %in% c(input$state, input$multistate)) %>%
filter(date >= 2020-03-01 & date <= input$Date) %>%
summarise( totalperday = sum(cases)) %>%
ggplot(., aes(x=date, y=totalperday)) + geom_jitter(aes(color=state)) +
geom_smooth(aes(color = state), se=F, size=.5) +
theme_bw() + xlab('Date') + ylab('Number of Cases') + ggtitle('Exponential Growth?') +
theme(text = element_text(family='.New York', size =15))
})
output$log = renderPlot({
breaks = 10**(1:10)
g_rate_df = g_rate()
#log graph
tidy_time_series %>%
group_by(state,date) %>%
filter(state %in% c(input$state, input$multistate_log)) %>%
filter(date >= 2020-03-01 & date <= input$Date) %>%
summarise( totalperday = sum(cases)) %>%
ggplot(., aes(x=date, y=totalperday)) + geom_jitter(aes(color=state)) +
scale_y_log10(breaks=breaks, labels=breaks)+
geom_smooth(aes(color = state), se=F, size=.5) +
theme_bw() + xlab('Date') + ylab('Number of Cases') + ggtitle('Is your state flattening the curve?') +
theme(text = element_text(family='.New York', size =15))
})
output$growth = renderPlot({
tidy_time_series %>%
group_by(state,date) %>%
filter(state %in% c(input$state, input$multistate_growth)) %>%
filter(date >= 2020-03-15 & date <= input$Date) %>%
summarise( totalperday = sum(cases)) %>%
mutate(new_cases = totalperday - lag(totalperday), growth_rate = (totalperday - lag(totalperday))/totalperday*100,
moving_avg = (new_cases + lag(new_cases) + lag(new_cases,2) + lag(new_cases,3)+lag(new_cases,4)+ lag(new_cases,5))/5)%>%
ggplot() + geom_bar(aes(x=date, y=new_cases, fill = state), alpha = .3, stat = 'identity', position = 'dodge') +
geom_smooth(aes(x=date, y=moving_avg, color=state), se=F, size=1) +
theme_bw() + xlab('Date') + ylab('New Cases') + ggtitle('Daily Growth') +
theme(text = element_text(family='.New York', size =15))
})
output$rawtable = renderPrint({
print(time_series)
print(df_num %>%
select(c(1:8)))
})
}
|
97814baa56b9054587ff1fb3de37b32fa2f90ff5
|
df70da3406df1a50a08d28bcf60664acdf216640
|
/R/simulate.R
|
5c873b8c6ca8c5d53f0fd786dcaf64be2aa62adc
|
[] |
no_license
|
cschieberle/lifeCourseExposureTrajectories
|
4e03c7e60b5c33294a74a1d2985f5f322c090193
|
cfa7bfca809ae49fec6c4e5d824c6a774b2a086d
|
refs/heads/master
| 2020-03-18T06:19:07.717291
| 2018-06-13T08:05:58
| 2018-06-13T08:05:58
| 134,388,199
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,635
|
r
|
simulate.R
|
#' @export
data.alphabet <- c(
seq(1, 11),
adolescModelAlphabet()
)
#' @export
data.labels <- c(
"Employee working full-time",
"Employee working part-time",
"Self-employed working full-time (including family worker)",
"Self-employed working part-time (including family worker)",
"Unemployed",
"Pupil, student, further training, unpaid work experience",
"In retirement or in early retirement or has given up business",
"Permanently disabled or/and unfit to work",
"In compulsory military community or service",
"Fulfilling domestic tasks and care responsibilities",
"Other inactive person",
adolescModelLabels()
)
#' @export
data.scodes <- c(
"EWFT", "EWPT", "SEFT", "SEPT", "UNEM", "STUD", "RETD", "UNFT", "CMCS", "DOME", "INAC",
adolescModelCodes()
)
#' Simulates life trajectories
#'
#' @param st Sequence tree.
#' @param indiv.age Age of the individual.
#' @param indiv.sex Sex of the individual.
#' @param indiv.activity Activity of the individual at the given age.
#' @param age.step Number of additional age steps per given sequence (defaults to 3 due to sequence length of 4)
#' @export
#
simulate <- function(st, indiv.age, indiv.sex, indiv.activity, age.step = 3) {
traj <- data.frame()
df <- lifeCourseExposureTrajectories::traversePreOrder(st$root)
message(paste("simulation: [age = ", indiv.age, ", sex = ", indiv.sex, ", activity = ", indiv.activity, "]" ))
# store original information and first
# (i) perform simulation of RETROSPECTIVE (i.e. past) life course
orig.age <- indiv.age
orig.activity <- indiv.activity
while (indiv.age <= 82) {
node.id <- getNodeId(df, indiv.age, indiv.sex)
node <- findNodeById(st$root, node.id)
# pick only the sequences that are in the node, and...
potential.seq <- data.frame(data.seq[ node$info$ind, ])
# ..pick only the ones that START with the current main activity
potential.seq <- subset(potential.seq, potential.seq$ECON.STATUS.CURR.SELFDEF.0 == indiv.activity)
act.seq.in.node <- NULL
# if at least one valid sequence exists:
nrow.potential.seq <- nrow(potential.seq)
if (nrow.potential.seq >= 1) {
# very important to reset row.names as we like to sample only from the subset!
row.names(potential.seq) <- seq(1:nrow(potential.seq))
sample.seq.idx <- sample.int(nrow.potential.seq, 1)
act.seq.in.node <- potential.seq[ sample.seq.idx, ]
} else {
# if no sequence starting with the given activity exists,
# just picky any sequence within the node
potential.seq <- data.frame(data.seq[ node$info$ind, ])
row.names(potential.seq) <- seq(1:nrow(potential.seq))
nrow.potential.seq <- nrow(potential.seq)
sample.seq.idx <- sample.int(nrow.potential.seq, 1)
act.seq.in.node <- potential.seq[ sample.seq.idx, ]
}
traj <- rbind(
traj,
c(
indiv.age,
indiv.sex,
node.id,
nrow(potential.seq),
act.seq.in.node$ECON.STATUS.CURR.SELFDEF.0,
act.seq.in.node$ECON.STATUS.CURR.SELFDEF.1,
act.seq.in.node$ECON.STATUS.CURR.SELFDEF.2
)
)
#message(
# paste(
# "age:", indiv.age,
# ", sex: ", indiv.sex,
# " [ node.id:", node.id, ", numseq: ", nrow(potential.seq), "]: ",
# act.seq.in.node$ECON.STATUS.CURR.SELFDEF.0,
# act.seq.in.node$ECON.STATUS.CURR.SELFDEF.1,
# act.seq.in.node$ECON.STATUS.CURR.SELFDEF.2,
# "(", act.seq.in.node$ECON.STATUS.CURR.SELFDEF.3, ")"
# )
#)
indiv.age <- indiv.age + age.step
# set activity to the LAST activity in the 4-year sequence (as we continue the PROSPECTIVE analysis)
indiv.activity <- act.seq.in.node$ECON.STATUS.CURR.SELFDEF.3
}
# restore original information and secondly
# (ii) perform simulation of PROSPECTIVE (i.e. future) life course
indiv.age <- orig.age
indiv.activity <- orig.activity
while ((indiv.age - age.step) >= 16) {
indiv.age <- indiv.age - age.step
node.id <- getNodeId(df, indiv.age, indiv.sex)
node <- findNodeById(st$root, node.id)
# pick only the sequences that are in the node, and...
potential.seq <- data.frame(data.seq[ node$info$ind, ])
# ..pick only the ones that END with the current main activity
potential.seq <- subset(potential.seq, potential.seq$ECON.STATUS.CURR.SELFDEF.3 == indiv.activity)
act.seq.in.node <- NULL
# if at least one valid sequence exists:
nrow.potential.seq <- nrow(potential.seq)
if (nrow.potential.seq >= 1) {
# very important to reset row.names as we like to sample only from the subset!
row.names(potential.seq) <- seq(1:nrow(potential.seq))
sample.seq.idx <- sample.int(nrow.potential.seq, 1)
act.seq.in.node <- potential.seq[ sample.seq.idx, ]
} else {
# if no sequence starting with the given activity exists,
# just picky any sequence within the node
potential.seq <- data.frame(data.seq[ node$info$ind, ])
row.names(potential.seq) <- seq(1:nrow(potential.seq))
nrow.potential.seq <- nrow(potential.seq)
sample.seq.idx <- sample.int(nrow.potential.seq, 1)
act.seq.in.node <- potential.seq[ sample.seq.idx, ]
}
traj <- rbind(
traj,
c(
indiv.age,
indiv.sex,
node.id,
nrow(potential.seq),
act.seq.in.node$ECON.STATUS.CURR.SELFDEF.1,
act.seq.in.node$ECON.STATUS.CURR.SELFDEF.2,
act.seq.in.node$ECON.STATUS.CURR.SELFDEF.3
)
)
#message(
# paste(
# "age:", indiv.age,
# ", sex: ", indiv.sex,
# " [ node.id:", node.id, ", numseq: ", nrow(potential.seq), "]: ",
# "(", act.seq.in.node$ECON.STATUS.CURR.SELFDEF.0, ")",
# act.seq.in.node$ECON.STATUS.CURR.SELFDEF.1,
# act.seq.in.node$ECON.STATUS.CURR.SELFDEF.2,
# act.seq.in.node$ECON.STATUS.CURR.SELFDEF.3
# )
#)
# set activity to the FIRST activity in the 4-year sequence (as we continue the RETROSPECTIVE analysis)
if (indiv.age <= 16) {
indiv.activity <- act.seq.in.node$ECON.STATUS.CURR.SELFDEF.1
} else {
indiv.activity <- act.seq.in.node$ECON.STATUS.CURR.SELFDEF.0
}
}
names(traj) <- c("age", "sex", "node.id", "num.seq", "activity.0", "activity.1", "activity.2")
traj <- traj[order(traj$age),]
return(traj)
}
|
c0d9bdf19659cc8a87de6d6a7b206ecf4afd067e
|
9462c1674ac612f23b6d73548b79f75751254997
|
/cachematrix.R
|
73d81294b832195cdcc76e04c58d2e7f316cc9d8
|
[] |
no_license
|
pavtok/ProgrammingAssignment2
|
7dbce39b905c2862adfdd633e11ddd3b85d91d6e
|
7e418a2336f55cd7150917e45139118fe4405768
|
refs/heads/master
| 2022-12-11T01:02:51.247135
| 2020-09-13T23:03:06
| 2020-09-13T23:03:06
| 294,815,733
| 0
| 0
| null | 2020-09-11T21:29:31
| 2020-09-11T21:29:30
| null |
UTF-8
|
R
| false
| false
| 1,653
|
r
|
cachematrix.R
|
## Matrix inversion can be a time consuming task.
## That's whay this pair of functions can be used to perform this calculation,
## stores it in cache and use it without having to calculate it again.
## The function 'makeCacheMatrix' will prepare the necessary data and internal
## functions (set, get, setsolve and getsolve) for 'cacheSolve', which will
## return the inverse of the matrix, getting cached data or calculating it.
## This function takes a invertible matrix as an argument, saves it, and returns
## a list of functions that will be queried by the 'cacheSolve' function, to
## calculate the inverse of the matrix.
## To simplify its use is recommended to save the result in an object like the
## example: m1 <- makeCacheMatrix(matrix(sample(36), nrow=6))
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y)
{
x <<- y
s <<- NULL
}
get <- function() x
setsolve <- function(solve) s <<- solve
getsolve <- function() s
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## This function calculates the inverse of a matrix.
## If that value has already been calculated the function returns it from the
## cached data, with the message 'getting cache data'.
## Instead, if it has not been previously calculated, this function calculates
## the inverse of the matrix, stores it in cache for future uses and then
## returns it. Example of use: cacheSolve(m1)
cacheSolve <- function(x, ...) {
s <- x$getsolve()
if(!is.null(s)) {
message("getting cached data")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setsolve(s)
s
}
|
4f8ea0a66ddd82ca2f81faae9459bf5ac5a72e41
|
76acdfc6d4faeaa150864dee02d7433b41dc408a
|
/COURS ET TP STATISTIQUE COMPUTATNELLE/Cours et TP/Corrigés de TP/TP2_solution.R
|
9aac9660b9857e360ea3a39df614598827c6c615
|
[] |
no_license
|
komiagblodoe/M2-SSD
|
ca5ff6eb8dac15247042605883cc66bbf644173f
|
961a4a1b0c1648a6ddf1d7ac41e7657c230132b3
|
refs/heads/main
| 2023-04-16T08:22:24.389417
| 2021-04-23T08:10:13
| 2021-04-23T08:10:13
| 360,799,679
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,672
|
r
|
TP2_solution.R
|
##############################
##### STARTING EXERCICE 1 ####
##############################
sigma1 = 1
sigma2 = 10
n = 20
p = 0.9
sigma.smple = sample(c(sigma1,sigma2), size = n, replace = TRUE, prob = c(p,1-p))
x = rnorm(n, mean = 0, sd = sigma.smple)
n = 20
m = 1000
p.list = c(1,0.95,0.9,0.8,0.7,0.6,0.5)
k.list = c(1,3,5,7)
MSE = c()
SE = c()
for(p in p.list){
tmp = replicate(m, expr = {
# generate sd to use
sigma = sample(c(1,10), size = n, replace = TRUE, prob = c(p,1-p))
# draw samples
x = sort(rnorm(n, mean = 0, sd = sigma))
# compute estimators
res = c(mean(x))
for(k in k.list){
res = c(res,mean(x[(k+1):(n-k)]))
}
res = c(res, median(x))
return(res)
})
mse = apply(tmp, 1, function(x){ mean(x^2) })
se = apply(tmp, 1, function(x){ sqrt( sum( (x - mean(x))^2 ) / m ) })
MSE = cbind(MSE, mse)
SE = cbind(SE, se)
}
cols = c("black","red","blue","palegreen2", "purple","orange")
plot(p.list, MSE[1,], log = "y", ylim = range(MSE), col = cols[1], type = "l", xlab = "p", ylab = "MSE", main = "MSE of trimmed estimators vs level of contamination")
grid()
for(i in 1:nrow(MSE)){
lines(p.list, MSE[i,], type = "l", col = cols[i], lwd = 2)
}
legend("topright", c("mean",paste("trimmed-",k.list,sep=""),"median"), col = cols, lwd = 2, bg = "white")
p.list = seq(0, 1, by = 0.1)
k.list = c(1,3,5,7)
sigma1 = 1
sigma2 = 10
MSE = c()
SE = c()
for(p in p.list){
tmp = replicate(m, expr = {
# generate sd to use
sigma = sample(c(sigma1,sigma2), size = n, replace = TRUE, prob = c(p,1-p))
# draw samples
x = sort(rnorm(n, mean = 0, sd = sigma))
# compute estimators
res = c(mean(x))
for(k in k.list){
res = c(res,mean(x[(k+1):(n-k)]))
}
res = c(res, median(x))
return(res)
})
mse = apply(tmp, 1, function(x){ mean(x^2) })
se = apply(tmp, 1, function(x){ sqrt( sum( (x - mean(x))^2 ) / m ) })
MSE = cbind(MSE, mse)
SE = cbind(SE, se)
}
# plot
cols = c("black","red","blue","palegreen2", "purple","orange")
plot(p.list, MSE[1,], log = "y", ylim = range(MSE), col = cols[1], type = "l", xlab = "p", ylab = "MSE")
title("MSE of trimmed estimators vs level of contamination")
grid()
for(i in 1:nrow(MSE)){
lines(p.list, MSE[i,], type = "l", col = cols[i], lwd = 2)
}
legend("topright", c("mean",paste("trimmed-",k.list,sep=""),"median"), col = cols, lwd = 2, bg = "white")
##############################
#### STARTING EXERCICE 2 ####
##############################
n = 20
m = 1000
p.list = c(1,0.95,0.9,0.8,0.7)
sigma1 = 1
sigma2.list = c(1,2,5,10)
alpha = 0.05
# initialize output matrix
CONF = matrix(0, nrow = length(sigma2.list), ncol = length(p.list))
rownames(CONF) = paste("sigma2-",sigma2.list,sep="")
colnames(CONF) = paste("p-",p.list,sep="")
# process each configuration
for(i in 1:length(p.list)){
p = p.list[i]
for(j in 1:length(sigma2.list)){
sigma2 = sigma2.list[j]
tmp = replicate(m, expr = {
# generate sd to use
sigma.smple = sample(c(sigma1,sigma2), size = n, replace = TRUE, prob = c(p,1-p))
# draw samples
x = rnorm(n, mean = 0, sd = sigma.smple)
# compute upper limit of CI
Ilow = (n-1) * var(x) /qchisq(1-alpha/2, df = n-1)
Ihigh = (n-1) * var(x) /qchisq(alpha/2, df = n-1)
# return
return(c(Ilow,Ihigh))
})
CONF[j,i] = mean(sigma1^2 > tmp[1,] & sigma1^2 < tmp[2,])
}
}
CONF = round(100*CONF, digits = 1)
# plot
plot(p.list, CONF[1,], ylim = range(CONF), xlab = "p", ylab = "empirical confidence level", , type = "l", lwd = 2, col = "gray")
title("empirical confidence level vs p and sigma2")
grid()
for(i in 2:nrow(CONF)){
lines(p.list, CONF[i,], col = i, lwd = 2)
}
abline(h = 95, lty = 2, lwd = 2)
legend("bottomright", paste("sigma2 =", sigma2.list), col = c("gray",seq(2,nrow(CONF))), lwd = 2, bg = "white")
##############################
#### STARTING EXERCICE 3 ####
##############################
mu0 = 500
alpha = 0.05
m = 10000
n = 20
sigma = 100
p.val = numeric(m)
for(i in 1:m){
x = rnorm(n, mu0, sigma)
tt = t.test(x, alternative="greater", mu = mu0)
p.val[i] = tt$p.value
}
alpha.hat = mean(p.val < alpha)
cat("*** empirical Type-I error =", alpha.hat, "(expected =", alpha, ")***\n")
##############################
#### STARTING EXERCICE 4 ####
##############################
mu0 = 500
alpha = 0.05
m = 1000
sigma = 100
# define mu1 and n values to consider
mu1.list = seq(500, 700, by = 10)
n.list = c(20, 50, 100, 200)
#initialize output
P = matrix(0, nrow = length(n.list), ncol = length(mu1.list))
rownames(P) = n.list
colnames(P) = mu1.list
# process each configuration
for(i in seq(length(n.list))){
n = n.list[i]
for(j in seq(length(mu1.list))){
mu1 = mu1.list[j]
# initialize vector of p.values
p.val = numeric(m)
# simulater according to mu1 and compute p.value
for(k in 1:m){
x = rnorm(n, mu1, sigma)
tt = t.test(x, alternative="greater", mu = mu0)
p.val[k] = tt$p.value
}
# compute power
P[i,j] = mean(p.val < alpha)
}
}
# plot
plot(mu1.list, P[1,], type = "l", lwd = 2, xlab = "mu1", ylab = "power", main = "empirical power vs n and mu1")
grid()
for(i in 1:nrow(P)){
lines(mu1.list, P[i,], type = "l", col = i, lwd = 2)
}
legend("bottomright", paste("n =", n.list), col = seq(length(n.list)), lwd = 2, bg = "white")
|
988d1f1704a0a935c2e55fb298b2c4fe58c51c9d
|
307daa5d64a3e1e5ab2b6e4e85c83133e9d43e7b
|
/man/getMortalityData.Rd
|
88cc6908a1eabd7052e8d143dd697bbf092678d4
|
[] |
no_license
|
hkim207/ahri-1
|
80be69695ee74acb2f10fc9b63b74998f02e04f3
|
d2a671671c1b8cf66bc97d6d7135581254ebb30e
|
refs/heads/master
| 2022-12-13T16:51:37.618854
| 2020-09-26T20:05:22
| 2020-09-26T20:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 637
|
rd
|
getMortalityData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcMortality.R
\name{getMortalityData}
\alias{getMortalityData}
\title{getMortalityData}
\usage{
getMortalityData(Args, startVar = "HIVPositive", dropHIVPos = FALSE)
}
\arguments{
\item{Args}{see \code{\link{setArgs}}.}
\item{startDate}{string variable at which person-time starts. Use
HIVPositive for AIDS-related mortality, otherwise only those with an HIVNegative test
or those with the EarliestTest from HIVSurveillance. Or use ObservationStart from the Episodes dataset.}
}
\value{
data.frame
}
\description{
gets mortality data.
}
\keyword{internal}
|
020a801a16c06a863c405ec6775da28cdc0954f3
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.compute/man/ec2_describe_scheduled_instance_availability.Rd
|
3234eba6b00ddb1e4b4bedfdb0cdda86e4bc7180
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 4,521
|
rd
|
ec2_describe_scheduled_instance_availability.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_scheduled_instance_availability}
\alias{ec2_describe_scheduled_instance_availability}
\title{Finds available schedules that meet the specified criteria}
\usage{
ec2_describe_scheduled_instance_availability(DryRun, Filters,
FirstSlotStartTimeRange, MaxResults, MaxSlotDurationInHours,
MinSlotDurationInHours, NextToken, Recurrence)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{Filters}{The filters.
\itemize{
\item \code{availability-zone} - The Availability Zone (for example,
\verb{us-west-2a}).
\item \code{instance-type} - The instance type (for example, \code{c4.large}).
\item \code{network-platform} - The network platform (\code{EC2-Classic} or
\code{EC2-VPC}).
\item \code{platform} - The platform (\code{Linux/UNIX} or \code{Windows}).
}}
\item{FirstSlotStartTimeRange}{[required] The time period for the first schedule to start.}
\item{MaxResults}{The maximum number of results to return in a single call. This value can
be between 5 and 300. The default value is 300. To retrieve the
remaining results, make another call with the returned \code{NextToken}
value.}
\item{MaxSlotDurationInHours}{The maximum available duration, in hours. This value must be greater
than \code{MinSlotDurationInHours} and less than 1,720.}
\item{MinSlotDurationInHours}{The minimum available duration, in hours. The minimum required duration
is 1,200 hours per year. For example, the minimum daily schedule is 4
hours, the minimum weekly schedule is 24 hours, and the minimum monthly
schedule is 100 hours.}
\item{NextToken}{The token for the next set of results.}
\item{Recurrence}{[required] The schedule recurrence.}
}
\value{
A list with the following syntax:\preformatted{list(
NextToken = "string",
ScheduledInstanceAvailabilitySet = list(
list(
AvailabilityZone = "string",
AvailableInstanceCount = 123,
FirstSlotStartTime = as.POSIXct(
"2015-01-01"
),
HourlyPrice = "string",
InstanceType = "string",
MaxTermDurationInDays = 123,
MinTermDurationInDays = 123,
NetworkPlatform = "string",
Platform = "string",
PurchaseToken = "string",
Recurrence = list(
Frequency = "string",
Interval = 123,
OccurrenceDaySet = list(
123
),
OccurrenceRelativeToEnd = TRUE|FALSE,
OccurrenceUnit = "string"
),
SlotDurationInHours = 123,
TotalScheduledInstanceHours = 123
)
)
)
}
}
\description{
Finds available schedules that meet the specified criteria.
You can search for an available schedule no more than 3 months in
advance. You must meet the minimum required duration of 1,200 hours per
year. For example, the minimum daily schedule is 4 hours, the minimum
weekly schedule is 24 hours, and the minimum monthly schedule is 100
hours.
After you find a schedule that meets your needs, call
\code{\link[=ec2_purchase_scheduled_instances]{purchase_scheduled_instances}} to
purchase Scheduled Instances with that schedule.
}
\section{Request syntax}{
\preformatted{svc$describe_scheduled_instance_availability(
DryRun = TRUE|FALSE,
Filters = list(
list(
Name = "string",
Values = list(
"string"
)
)
),
FirstSlotStartTimeRange = list(
EarliestTime = as.POSIXct(
"2015-01-01"
),
LatestTime = as.POSIXct(
"2015-01-01"
)
),
MaxResults = 123,
MaxSlotDurationInHours = 123,
MinSlotDurationInHours = 123,
NextToken = "string",
Recurrence = list(
Frequency = "string",
Interval = 123,
OccurrenceDays = list(
123
),
OccurrenceRelativeToEnd = TRUE|FALSE,
OccurrenceUnit = "string"
)
)
}
}
\examples{
\dontrun{
# This example describes a schedule that occurs every week on Sunday,
# starting on the specified date. Note that the output contains a single
# schedule as an example.
svc$describe_scheduled_instance_availability(
FirstSlotStartTimeRange = list(
EarliestTime = "2016-01-31T00:00:00Z",
LatestTime = "2016-01-31T04:00:00Z"
),
Recurrence = list(
Frequency = "Weekly",
Interval = 1L,
OccurrenceDays = list(
1L
)
)
)
}
}
\keyword{internal}
|
9ec8a06f7b6db9f1da0c3a379bf99aec1597670e
|
c897422cfad7d729ce60609a32eaa3252d041854
|
/server.R
|
529c70332db423596781270905087eb30900c1c1
|
[] |
no_license
|
Srinivasankrishnan27/sri-project-alpha
|
4c3ef88c30099c0b09b3a3b9382f7df95647603b
|
b7bf5ea66ee42dd84b6507729f8b2632d2685a5c
|
refs/heads/main
| 2023-06-29T19:30:10.485162
| 2021-07-15T05:43:08
| 2021-07-15T05:43:08
| 386,174,785
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,431
|
r
|
server.R
|
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
data <- reactiveValues(raw_input = NULL)
observeEvent(input$data_input,{
output$show_stats <- reactive({FALSE})
output$raw_data <- renderDataTable({
inFile <- input$data_input
if (is.null(inFile)){
return(NULL)
}
else{
data$raw_input<-read.csv(inFile$datapath)
output$show_stats <- reactive({nrow(data$raw_input)> 0})
outputOptions(output, 'show_stats', suspendWhenHidden = FALSE)
datatable(data$raw_input,
options = list(orderClasses = TRUE,
scrollX = TRUE,
scrollY = '65vh',
ajax = list(serverSide = TRUE, processing = TRUE)),
editable='cell')
}
})
})
observeEvent(input$stats_preview, {
print(summary.data.frame(data$raw_input))
showModal(modalDialog(
title = "Summary",
output$no_of_records<- renderText({paste0('No. records: ', nrow(data$raw_input))}),
output$no_of_cols<- renderText({paste0('No. columns: ', length(colnames(data$raw_input)))}),
output$raw_data_summary <- renderTable({
summary.data.frame(data$raw_input)
}),
easyClose = TRUE,
footer = NULL
))
})
})
|
ec12fa5146a85ce32a46885926dc1c30f186724c
|
cdbdfa2809213938a9fefd8bdd304a2cb5ad6278
|
/R/rcustom.R
|
db5a6a46fad026b1256633626ec60b466bf0f919
|
[
"MIT"
] |
permissive
|
DavisVaughan/almanac
|
49491a478e3bcdfae801111e5263efc86c33a3fb
|
7b14f6e8f1e685975231e5dadb40bb5bb8f2a9c8
|
refs/heads/main
| 2023-04-27T20:31:58.281595
| 2023-04-14T17:29:53
| 2023-04-14T17:29:53
| 208,673,066
| 74
| 4
|
NOASSERTION
| 2023-04-19T19:08:04
| 2019-09-15T23:45:27
|
R
|
UTF-8
|
R
| false
| false
| 1,667
|
r
|
rcustom.R
|
#' Create a custom rschedule
#'
#' @description
#' `rcustom()` creates an rschedule from manually defined event dates. This can
#' be useful when combined with [runion()] and [rsetdiff()] if you have a set of
#' fixed event dates to forcibly include or exclude from an rschedule.
#'
#' @param events `[Date]`
#'
#' A vector of event dates.
#'
#' @return
#' A custom rschedule.
#'
#' @export
#' @examples
#' include <- rcustom("2019-07-05")
#' exclude <- rcustom("2019-07-04")
#'
#' independence_day <- yearly() %>%
#' recur_on_month_of_year("July") %>%
#' recur_on_day_of_month(4)
#'
#' # Remove forcibly excluded day
#' independence_day <- rsetdiff(independence_day, exclude)
#'
#' # Add forcibly included day
#' independence_day <- runion(independence_day, include)
#'
#' alma_search("2018-01-01", "2020-12-31", independence_day)
rcustom <- function(events) {
events <- vec_cast_date(events)
check_no_missing(events)
check_finite(events)
events <- vec_unique(events)
events <- vec_sort(events)
new_rcustom(events)
}
new_rcustom <- function(events, ..., class = character()) {
check_date(events)
new_rschedule(events = events, ..., class = c(class, "almanac_rcustom"))
}
#' @export
print.almanac_rcustom <- function(x, ...) {
events <- rcustom_events(x)
events <- as.character(events)
n <- length(events)
if (n > 5L) {
events <- vec_slice(events, 1:5)
events <- c(events, cli::format_inline("and {n - 5L} more"))
}
cli::cli_text("<rcustom[{n}]>")
cli::cli_ul(events)
invisible(x)
}
#' @export
rschedule_events.almanac_rcustom <- function(x) {
rcustom_events(x)
}
rcustom_events <- function(x) {
x$events
}
|
bfcea93fe141f9ec7e988bb77acaa468368297e5
|
231114399cf254361f2b1e7e8ed3ef39c0211504
|
/viterbi_hmm7Rcpp_2Int.R
|
571acd6be60162a2501522c7beef228e82abb91b
|
[
"CC0-1.0"
] |
permissive
|
seanevans7/emews
|
d329f86ff17c1de7a3fbb554ee35778e69b0b141
|
5b947a6fd3f25e294070d33660bfc645317121ef
|
refs/heads/master
| 2022-10-09T08:03:16.112767
| 2020-06-08T13:51:33
| 2020-06-08T13:51:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,647
|
r
|
viterbi_hmm7Rcpp_2Int.R
|
####################################################################################################################################
### Function used to implement the Viterbi algorithm for calculating the most likely state sequence from a ###
### hidden Markov model to diving data from Weddell seals. ###
### ###
### Used in the analysis presented in: ###
### "Sex-specific variation in the use of vertical habitat by a resident Antarctic top predator" ###
### Theoni Photopoulou, Karine Heerah, Jennifer Pohle and Lars Boehme (2020) ###
####################################################################################################################################
# Viterbi algorithm for hidden Markov model including two interactions
viterbi <- function(obslist,cov.names,mod,N){
K <- length(obslist)
iv.seg <- vector("list")
beta.mat <- t(mod$beta)
for (k in 1:K){ # loop through data from each individual
cov.mat <- matrix(1, nrow=nrow(obslist[[k]]), ncol=length(cov.names)+3) # cov.names plus intercept (1) plus interactions (2)
for (cov in 1:length(cov.names)){
cov.mat[,cov+1] <- obslist[[k]][,cov.names[cov]]
if(cov==length(cov.names)){
cov.mat[,cov+2] <- obslist[[k]][,cov.names[cov-1]]*obslist[[k]][,cov.names[cov]] # column 3+2=5 holds the interaction of covariate 2 with covariate 3
cov.mat[,cov+3] <- obslist[[k]][,cov.names[cov-2]]*obslist[[k]][,cov.names[cov]] # column 3+3=6 holds the interaction of covariate 1 with covariate 3
}
}
ind.ho <- which(obslist[[k]]$source=="ho") # index for all haulout observations
ind.sf <- which(obslist[[k]]$source=="sf") # index for all surface observations
ind.dv1 <- which(obslist[[k]]$source=="dv") # index for all diving observations
ind.dv2 <- which(obslist[[k]]$source=="dv" & !is.na(obslist[[k]]$hunt_avgdep)) # index for diving observations without missing hunting depth
n <- dim(obslist[[k]])[1] # obs within a segment
allprobs <- matrix(rep(1, N*n), nrow=n) # one allprobs matrix per segment
## ## ## KNOWN STATES
# in state 1 (haulout) only duration contributes to state density
j <- 1
# DURATION (gamma)
dd.prob <- rep(0,n) # assume 0 probability if not in ho
dd.prob[ind.ho] <- dgamma(obslist[[k]][ind.ho,"DURATION"],
shape=mod$dd.mu[j]^2/mod$dd.sigma[j]^2,
scale=mod$dd.sigma[j]^2/mod$dd.mu[j])
allprobs[,j] <- dd.prob
# in state 2 (surface) only maxdep and duration contribute to state density
j <- 2
# DURATION (gamma)
dd.prob <- rep(0,n) # assume 0 probability if not in sf
dd.prob[ind.sf] <- dgamma(obslist[[k]][ind.sf,"DURATION"],
shape=mod$dd.mu[j]^2/mod$dd.sigma[j]^2,
scale=mod$dd.sigma[j]^2/mod$dd.mu[j])
allprobs[,j] <- dd.prob
## ## ## KNOWN STATES
for (j in 3:N){ # loop through dive states
dd.prob <- hd.prob <- ph.prob <- pb.prob <- sal.prob <- rep(0,n) # note 0 (if not in a dive state then no depth etc)
dd.prob[ind.dv1] <- hd.prob[ind.dv1] <- ph.prob[ind.dv1] <- pb.prob[ind.dv1] <- sal.prob[ind.dv1] <- 1 # 1 by default if in diving state
# DURATION (gamma)
dd.prob[ind.dv1] <- dgamma(obslist[[k]][ind.dv1,"DURATION"],
shape=mod$dd.mu[j]^2/mod$dd.sigma[j]^2,
scale=mod$dd.sigma[j]^2/mod$dd.mu[j])
# depth of interest (gamma) - this either the average hunting depth OR the maxdep if there was no hunting
hd.prob[ind.dv2] <- dgamma(obslist[[k]][ind.dv2,"hunt_avgdep"],
shape=mod$hd.mu[j-2]^2/mod$hd.sigma[j-2]^2,
scale=mod$hd.sigma[j-2]^2/mod$hd.mu[j-2])
# proportion time spent hunting (beta)
ph.prob[ind.dv2] <- dbeta(obslist[[k]][ind.dv2,"hunt_prop"], # note j-2 (only states 3-N so 3 params)
shape1=mod$ph.alpha[j-2],
shape2=mod$ph.beta[j-2])
# proportion bathymetry reached (beta)
pb.prob[ind.dv2] <- obslist[[k]][ind.dv2,"benthic"]*mod$pb.pi[j-2] +
obslist[[k]][ind.dv2,"notbenthic"]*(1-mod$pb.pi[j-2])
# salinity at hunting depth (normal)
sal.prob[ind.dv2] <- dnorm(obslist[[k]][ind.dv2,"psal"],
mean=mod$sal.mu[j-2],
sd=mod$sal.sigma[j-2])
allprobs[,j] <- dd.prob*hd.prob*ph.prob*pb.prob*sal.prob
} # closes j loop
xi <- matrix(0,n,N)
foo <- mod$delta*allprobs[1,]
xi[1,] <- foo/sum(foo)
trMat <- moveHMM:::trMatrix_rcpp(N,beta.mat,cov.mat)
for (i in 2:n){ # forward loop
foo <- apply(xi[i-1,]*trMat[,,i],2,max)*allprobs[i,]
xi[i,] <- foo/sum(foo)
} # closes i loop (forward)
iv <- numeric(n) # state sequence
iv[n] <- which.max(xi[n,])
for (h in (n-1):1){ # backward loop
iv[h] <- which.max(trMat[,iv[h+1],h+1]*xi[h,])
} # closes h loop (backward)
iv.seg[[k]] <- iv
} # closes k loop
return(iv.seg)
} # end function
|
c15da62ca79d17d061f854880a5a9f8f1729933c
|
552cff5565279ea7cb09e0d0727a8000c1fcfde9
|
/Lab5.2_simIDE_codeexample.R
|
d98f323dbcf056c3a04ed889fbbf4a668da8aa07
|
[
"MIT"
] |
permissive
|
xc308/Learn_STRbook
|
52ab69942dd5bd12d20ca0eb07342fc50652d2f5
|
4fb057070dfc72cabedc47a9dff947f4141ccbd7
|
refs/heads/main
| 2023-04-22T18:55:37.366648
| 2021-05-15T17:05:38
| 2021-05-15T17:05:38
| 333,569,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,556
|
r
|
Lab5.2_simIDE_codeexample.R
|
simIDE <- function(T = 9, nobs = 100, k_spat_invariant = 1, IDEmodel = NULL) {
## Suppress bindings warning
timeind <- val <- s1 <- s2 <- z <- NULL
if(is.null(IDEmodel)) {
set.seed(1)
zlocs <- data.frame(s1 = runif(100),
s2 = runif(100))
## Spatial decomposition
Y_basis <- auto_basis(manifold = plane(),
data = SpatialPoints(zlocs),
regular = 1,
nres = 2) # Large Basis
r <- nbasis(Y_basis)
## Kernel decomposition
G_const <- constant_basis()
## Regression coeffocients
beta <- c(0.2,0.2,0.2)
## Other parameters
sigma2_eta <- 0.01^2
sigma2_eps <- 0.01^2
## Spatial domain
bbox <- matrix(c(0,0,1,1),2,2)
s <- construct_grid(bbox, 41)
alpha <- matrix(0,r,T)
## Kernel
if(k_spat_invariant) {
K_basis <- list(G_const, G_const, G_const, G_const)
k <- list(150, 0.002, -0.1, 0.1)
alpha[65,1] <- 1
} else {
G <- auto_basis(plane(), data = SpatialPoints(s$s_grid_df),nres = 1)
nbk <- nbasis(G)
K_basis <- list(G_const, G_const, G, G)
k <- list(200, 0.002, 0.1*rnorm(nbk), 0.1*rnorm(nbk))
alpha[sample(1:r,10),1] <- 1
}
time_map <- data.frame(timeind = paste0("Y",0:(T-1)),
time = as.Date(0:(T-1), origin = "2017-12-01"),
stringsAsFactors = FALSE)
} else {
Y_basis <- IDEmodel$get("process_basis")
r <- nbasis(Y_basis)
beta <- coef(IDEmodel)
sigma2_eta <- c(IDEmodel$get("sigma2_eta"))
sigma2_eps <- c(IDEmodel$get("sigma2_eps"))
s <- IDEmodel$get("s")
T <- IDEmodel$get("T")
nobs <- nrow(IDEmodel$get("data"))
K_basis <- IDEmodel$get("kernel_basis")
k <- IDEmodel$get("k")
alpha <- matrix(0,r,T)
alpha[,1] <- sqrt(sigma2_eta) * rnorm(r)
time_map <- data.frame(timeind = paste0("Y",0:(T-1)),
time = IDEmodel$get("time_points"),
stringsAsFactors = FALSE)
}
## Construct matrices
Sigma_eta <- sigma2_eta * Diagonal(r)
Sigma_eps <- sigma2_eps * Diagonal(nobs * T)
Q_eta <- Sigma_eta %>% solve()
Q_eps <- Sigma_eps %>% solve()
Mfun <- construct_M(Y_basis, s)
M <- Mfun(K_basis, k)
PHI <- eval_basis(Y_basis, s$s_grid_mat)
s$s_grid_df$Y0 <- (PHI %*% alpha[,1]) %>% as.numeric()
for(i in 1:(T-1)) {
alpha[,i+1] <- (M %*% alpha[,i]) %>% as.numeric() + sqrt(sigma2_eta)*rnorm(r)
s$s_grid_df[paste0("Y",i)] <- (PHI %*% alpha[,i+1]) %>% as.numeric()
}
## process_value in long format
## head(s$s_grid_df): s1 s2 Y0 Y1 ... Y8
s_long <- gather(s$s_grid_df, timeind, val, -s1, -s2) %>%
left_join(time_map, by = "timeind") %>%
select(-timeind)
if(is.null(IDEmodel))
X_proc <- cbind(1, s_long[,c("s1","s2")]) %>% as.matrix()
## simulate data
if(is.null(IDEmodel)) {
fixed_effects <- (X_proc %*% beta) %>% as.numeric()
s_long$val <- s_long$val + fixed_effects
zlocs <- data.frame(s1 = runif(nobs),
s2 = runif(nobs)) # nobs = 100
PHI_obs_1 <- eval_basis(Y_basis, zlocs[,1:2] %>% as.matrix())
PHI_obs <- do.call("bdiag", lapply(1:T, function(x) PHI_obs_1))
X_obs <- cbind(1, do.call("rbind", lapply(1:T, function(x) zlocs))) %>% as.matrix()
Z <- X_obs %*% beta + PHI_obs %*% c(alpha) +
sqrt(sigma2_eps) * rnorm(nrow(PHI_obs))
z_df <- data.frame(expand.grid.df(zlocs, data.frame(time = time_map$time)))
z_df$z <- Z %>% as.numeric()
} else {
fixed_effects <- 0
s_long$val <- s_long$val + fixed_effects
PHI_obs <- IDEmodel$get("PHI_obs")
X_obs <- IDEmodel$get("X_obs")
Z <- X_obs %*% beta + PHI_obs %*% c(alpha) +
sqrt(sigma2_eps) * rnorm(nrow(PHI_obs))
z_df <- as.data.frame(IDEmodel$get("data"))
z_df[[all.vars(IDEmodel$get("f"))[1]]] <- Z %>% as.numeric()
}
g_obs <- ggplot(z_df) + geom_point(aes(s1, s2, colour = z)) +
facet_wrap(~time) +
scale_colour_distiller(palette = "Spectral")
if(is.null(IDEmodel)) g_obs <- g_obs + coord_fixed(xlim=c(0,1), ylim = c(0,1))
g_truth <- ggplot(s_long) + geom_tile(aes(s1,s2,fill=val)) +
facet_wrap(~time) +
scale_fill_distiller(palette="Spectral",
limits = c(min(c(z_df$z,s_long$val)),
max(z_df$z,s_long$val)))
if(is.null(IDEmodel)) g_truth <- g_truth + coord_fixed(xlim=c(0,1), ylim = c(0,1))
## Data as STIDF
if(is.null(IDEmodel)) {
cnames <- c("s1","s2")
z_STIDF <- STIDF(sp = SpatialPoints(z_df[,cnames]),
time = z_df$time,
data = select(z_df, -time, -s1, -s2))
} else {
z_STIDF <- IDEmodel$get("data")
z_STIDF$z <- as.numeric(Z)
}
## IDEmode used to generate data
if(is.null(IDEmodel)) {
IDEmodel <- IDE(f = z ~ s1 + s2 + 1,
data = z_STIDF,
dt = as.difftime(1, units = "days"),
grid_size = 41,
kernel_basis = K_basis)
IDEmodel$set(sigma2_eps = sigma2_eps,
sigma2_eta = sigma2_eta,
k = k)
}
list(s_df = s_long,
z_df = z_df,
z_STIDF = z_STIDF,
g_truth = g_truth,
g_obs = g_obs,
IDEmodel = IDEmodel)
}
construct_grid <- function(bbox, ngrid, coordnames = NULL) {
ndim <- nrow(bbox)
if(length(ngrid) == 1)
ngrid <- rep(ngrid, ndim)
if(!(length(ngrid) == ndim) | !is.numeric(ngrid))
stop("ngrid needs to be a numeric (which will be rounded if not an integer)
with length one or equal to the number of columns in bbox")
ngrid <- round(ngrid)
s <- lapply(1:nrow(bbox), function(i)
seq(bbox[i,1], bbox[i,2], length.out = ngrid[i]))
s_grid <- do.call("expand.grid", s)
if(is.null(coordnames)) {
names(s_grid) <- paste0("s",1:ndim)
} else {
names(s_grid) <- coordnames
}
list(s_grid_df = s_grid,
s_grid_mat = s_grid %>% as.matrix(),
ds = (bbox[,2] - bbox[,1])/ngrid,
area = prod((bbox[,2] - bbox[,1])/ngrid))
}
construct_M <- function(Y_basis, s) {
PHI <- eval_basis(Y_basis, s$s_grid_mat)
GRAM <- crossprod(PHI)*s$area
GRAM_inv <- solve(GRAM)
ndim <- dimensions(Y_basis)
function(K_basis, ki) {
K <- construct_kernel(K_basis, ki)
Kmat <- K(s$s_grid_mat, s$s_grid_mat)
M <- GRAM_inv %*% crossprod(t(Kmat) %*% PHI, PHI)*s$area^2
}
}
|
1421d9dbe8fc7bac1fac75cc2a538a0b383dcde8
|
bf54966f2a7428e96b89e7d35c18e334e4beff0b
|
/evalRandom.R
|
e828f78a39647c6851cd759e167166957b6b4682
|
[] |
no_license
|
Civanespinosa/Spp-Density-vs-richness_Port
|
c692c58431599314523b5c7955f63cef3a189fd3
|
4dd31772ef99df995217c4399450eab85b4d219c
|
refs/heads/master
| 2021-06-16T14:49:08.400643
| 2021-02-04T16:42:22
| 2021-02-04T16:42:22
| 148,078,528
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 425
|
r
|
evalRandom.R
|
#Evaluación
#Cargamos las preguntas
library(readxl)
eval <- read_excel("Evaluación.xlsx")
x <- eval[sample(1:22, 10, replace = F),]
x <- rbind.data.frame(x[order(x$Número),],
eval[23,])
ranE <- data.frame(Numero=1:1044, pregunta=1:1044)
x <- seq(11,1044, by=11)
y <- seq(1,1033, by=11)
for (i in 1:95){
ranE[y[i]:x[i],] <- rbind(eval[sample(1:22, 10, replace = F),], c("",""))
}
write.csv(ranE, "eval1.csv")
|
b4c07c8a9211696f6248611c8f511798c18f9e66
|
859ca12c7fcbc6dd36584cc3a41c173c488c9c3c
|
/Investing.R
|
65d5b8d1944e7ff46d1e19c2778a6c6a0df28f15
|
[] |
no_license
|
FinanceStudyGroup/Investing.R
|
b8dcb3cf7f2e78d11e87f6de75b5a30aac3486d1
|
e7036f197fb17971e78fa39ae725d818c7abc746
|
refs/heads/master
| 2020-04-29T04:02:47.402368
| 2019-04-01T07:14:54
| 2019-04-01T07:14:54
| 175,833,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 119,271
|
r
|
Investing.R
|
#### Notes: Investing.R .............................................. ####
# This script describes a protocol for selecting files from a folder of
# csv data downloaded automatically from Investing.com, and
# converting these files to the xts format for further analysis in R.
# Specifically these data were collected using the webscraper, Investing.py.
# To perform this process, first state the location on your local
# machine where you have stored the csv data.
# Mine is shown below, as an example of the file path syntax in R.
# See line 24. This location can be changed for your unique set up.
# We then define the function for performing the conversion, and convert
# each file within this script.
# The reason for scripting this process as opposed to using a function
# is that in order to complete the process on each file, we need to
# convert each file to the "daily" format so they will be more easily
# recognized by xts based functions. This is more easily done with a script.
#### Investing.com folder ............................................ ####
Investing.com<-("C:\\Users\\Frankie\\Desktop\\Investing.com")
#### Convert to xts: Syntax: Investing.csv.xts(SPX) .................. ####
Investing.csv.xts<-function(..., type = "p"){
# Require quantmod in order to work with xts
require(quantmod)
name<-deparse(substitute(...))
data<-data.frame(...)
# Defining the set of dates.
Dates<-as.character(data[,1])
Dates<-as.character(gsub(",","",Dates))
Dates<-as.Date(Dates, format = "%b %d %Y")
# Type: Price ("p") or yield ("y")
if(type == "p"){
n = 1
}
if(type == "y"){
n = 100
}
# Reformatting the data.
if((colnames(data)[6])=="Vol.") {
# Reformatting the data.
# Open
data[,3]<-as.character(data[,3])
data[,3]<-as.numeric(gsub(",","",data[,3]))*(1/n)
# High
data[,4]<-as.character(data[,4])
data[,4]<-as.numeric(gsub(",","",data[,4]))*(1/n)
# Low
data[,5]<-as.character(data[,5])
data[,5]<-as.numeric(gsub(",","",data[,5]))*(1/n)
# Close
data[,2]<-as.character(data[,2])
data[,2]<-as.numeric(gsub(",","",data[,2]))*(1/n)
# Volume
data[,6]<-as.character(data[,6])
# Unit function for converting abbreviated units
Unit<-function(x){
if((grepl("-", x))=="TRUE"){
x<-as.numeric(0)
}
if((grepl("K", x))=="TRUE"){
x<-strsplit(x, "K")
x<-as.numeric(x[1])
x<-x*1000
}
if((grepl("M", x))=="TRUE"){
x<-strsplit(x, "M")
x<-as.numeric(x[1])
x<-x*1000000
}
if((grepl("B", x))=="TRUE"){
x<-strsplit(x, "B")
x<-as.numeric(x[1])
x<-x*1000000000
}
return(
x
)
}
data[,6]<-as.numeric(sapply(data[,6],Unit))
# Defining the data frame.
if(sum(data[,6])==0) {
# Defining the data frame.
data<-data.frame(cbind(data[,3], data[,4], data[,5], data[,2]))
colnames(data)[1]<-paste(name,"Open", sep=".")
colnames(data)[2]<-paste(name,"High", sep=".")
colnames(data)[3]<-paste(name,"Low", sep=".")
colnames(data)[4]<-paste(name,"Close", sep=".")
row.names(data)<-Dates
} else {
# Defining the data frame.
data<-data.frame(cbind(data[,3], data[,4], data[,5], data[,2], data[,6]))
colnames(data)[1]<-paste(name,"Open", sep=".")
colnames(data)[2]<-paste(name,"High", sep=".")
colnames(data)[3]<-paste(name,"Low", sep=".")
colnames(data)[4]<-paste(name,"Close", sep=".")
colnames(data)[5]<-paste(name,"Volume", sep=".")
row.names(data)<-Dates
}
} else {
# Reformatting the data.
# Open
data[,3]<-as.character(data[,3])
data[,3]<-as.numeric(gsub(",","",data[,3]))*(1/n)
# High
data[,4]<-as.character(data[,4])
data[,4]<-as.numeric(gsub(",","",data[,4]))*(1/n)
# Low
data[,5]<-as.character(data[,5])
data[,5]<-as.numeric(gsub(",","",data[,5]))*(1/n)
# Close
data[,2]<-as.character(data[,2])
data[,2]<-as.numeric(gsub(",","",data[,2]))*(1/n)
# Defining the data frame.
data<-data.frame(cbind(data[,3], data[,4], data[,5], data[,2]))
colnames(data)[1]<-paste(name,"Open", sep=".")
colnames(data)[2]<-paste(name,"High", sep=".")
colnames(data)[3]<-paste(name,"Low", sep=".")
colnames(data)[4]<-paste(name,"Close", sep=".")
row.names(data)<-Dates
}
# Assigning the data to the environment.
return(
assign(name, (data<-as.xts(data)), env=parent.frame())
)
}
#### Convert all index data .......................................... ####
# DJIA
DJIA<-data.frame(read.csv(file.path(Investing.com,"DJIA.csv")))
Investing.csv.xts(DJIA)
DJIA<-to.daily(DJIA)
# SPX
SPX<-data.frame(read.csv(file.path(Investing.com,"SPX.csv")))
Investing.csv.xts(SPX)
SPX<-to.daily(SPX)
# IXIC
IXIC<-data.frame(read.csv(file.path(Investing.com,"IXIC.csv")))
Investing.csv.xts(IXIC)
IXIC<-to.daily(IXIC)
# RUT
RUT<-data.frame(read.csv(file.path(Investing.com,"RUT.csv")))
Investing.csv.xts(RUT)
RUT<-to.daily(RUT)
# VIX
VIX<-data.frame(read.csv(file.path(Investing.com,"VIX.csv")))
Investing.csv.xts(VIX)
VIX<-to.daily(VIX)
# GSPTSE
GSPTSE<-data.frame(read.csv(file.path(Investing.com,"GSPTSE.csv")))
Investing.csv.xts(GSPTSE)
GSPTSE<-to.daily(GSPTSE)
# BVSP
BVSP<-data.frame(read.csv(file.path(Investing.com,"BVSP.csv")))
Investing.csv.xts(BVSP)
BVSP<-to.daily(BVSP)
# MXX
MXX<-data.frame(read.csv(file.path(Investing.com,"MXX.csv")))
Investing.csv.xts(MXX)
MXX<-to.daily(MXX)
# GDAXI
GDAXI<-data.frame(read.csv(file.path(Investing.com,"GDAXI.csv")))
Investing.csv.xts(GDAXI)
GDAXI<-to.daily(GDAXI)
# FTSE
FTSE<-data.frame(read.csv(file.path(Investing.com,"FTSE.csv")))
Investing.csv.xts(FTSE)
FTSE<-to.daily(FTSE)
# FCHI
FCHI<-data.frame(read.csv(file.path(Investing.com,"FCHI.csv")))
Investing.csv.xts(FCHI)
FCHI<-to.daily(FCHI)
# STOXX50E
STOXX50E<-data.frame(read.csv(file.path(Investing.com,"STOXX50E.csv")))
Investing.csv.xts(STOXX50E)
STOXX50E<-to.daily(STOXX50E)
# AEX
AEX<-data.frame(read.csv(file.path(Investing.com,"AEX.csv")))
Investing.csv.xts(AEX)
AEX<-to.daily(AEX)
# IBEX
IBEX<-data.frame(read.csv(file.path(Investing.com,"IBEX.csv")))
Investing.csv.xts(IBEX)
IBEX<-to.daily(IBEX)
# FTSEMIB
FTSEMIB<-data.frame(read.csv(file.path(Investing.com,"FTSEMIB.csv")))
Investing.csv.xts(FTSEMIB)
FTSEMIB<-to.daily(FTSEMIB)
# SSMI
SSMI<-data.frame(read.csv(file.path(Investing.com,"SSMI.csv")))
Investing.csv.xts(SSMI)
SSMI<-to.daily(SSMI)
# PSI20
PSI20<-data.frame(read.csv(file.path(Investing.com,"PSI20.csv")))
Investing.csv.xts(PSI20)
PSI20<-to.daily(PSI20)
# BFX
BFX<-data.frame(read.csv(file.path(Investing.com,"BFX.csv")))
Investing.csv.xts(BFX)
BFX<-to.daily(BFX)
# ATX
ATX<-data.frame(read.csv(file.path(Investing.com,"ATX.csv")))
Investing.csv.xts(ATX)
ATX<-to.daily(ATX)
# OMX
OMX<-data.frame(read.csv(file.path(Investing.com,"OMX.csv")))
Investing.csv.xts(OMX)
OMX<-to.daily(OMX)
# OMXC25
OMXC25<-data.frame(read.csv(file.path(Investing.com,"OMXC25.csv")))
Investing.csv.xts(OMXC25)
OMXC25<-to.daily(OMXC25)
# MOEX
MOEX<-data.frame(read.csv(file.path(Investing.com,"MOEX.csv")))
Investing.csv.xts(MOEX)
MOEX<-to.daily(MOEX)
# RTSI
RTSI<-data.frame(read.csv(file.path(Investing.com,"RTSI.csv")))
Investing.csv.xts(RTSI)
RTSI<-to.daily(RTSI)
# WIG20
WIG20<-data.frame(read.csv(file.path(Investing.com,"WIG20.csv")))
Investing.csv.xts(WIG20)
WIG20<-to.daily(WIG20)
# BUX
BUX<-data.frame(read.csv(file.path(Investing.com,"BUX.csv")))
Investing.csv.xts(BUX)
BUX<-to.daily(BUX)
# XU100
XU100<-data.frame(read.csv(file.path(Investing.com,"XU100.csv")))
Investing.csv.xts(XU100)
XU100<-to.daily(XU100)
# TA35
TA35<-data.frame(read.csv(file.path(Investing.com,"TA35.csv")))
Investing.csv.xts(TA35)
TA35<-to.daily(TA35)
# SASEIDX
SASEIDX<-data.frame(read.csv(file.path(Investing.com,"SASEIDX.csv")))
Investing.csv.xts(SASEIDX)
SASEIDX<-to.daily(SASEIDX)
# NKY
NKY<-data.frame(read.csv(file.path(Investing.com,"NKY.csv")))
Investing.csv.xts(NKY)
NKY<-to.daily(NKY)
# AS51
AS51<-data.frame(read.csv(file.path(Investing.com,"AS51.csv")))
Investing.csv.xts(AS51)
AS51<-to.daily(AS51)
# NZDOW
NZDOW<-data.frame(read.csv(file.path(Investing.com,"NZDOW.csv")))
Investing.csv.xts(NZDOW)
NZDOW<-to.daily(NZDOW)
# SHCOMP
SHCOMP<-data.frame(read.csv(file.path(Investing.com,"SHCOMP.csv")))
Investing.csv.xts(SHCOMP)
SHCOMP<-to.daily(SHCOMP)
# SICOM
SICOM<-data.frame(read.csv(file.path(Investing.com,"SICOM.csv")))
Investing.csv.xts(SICOM)
SICOM<-to.daily(SICOM)
# TXIN9
TXIN9<-data.frame(read.csv(file.path(Investing.com,"TXIN9.csv")))
Investing.csv.xts(TXIN9)
TXIN9<-to.daily(TXIN9)
# DJSH
DJSH<-data.frame(read.csv(file.path(Investing.com,"DJSH.csv")))
Investing.csv.xts(DJSH)
DJSH<-to.daily(DJSH)
# HSI
HSI<-data.frame(read.csv(file.path(Investing.com,"HSI.csv")))
Investing.csv.xts(HSI)
HSI<-to.daily(HSI)
# TWSE
TWSE<-data.frame(read.csv(file.path(Investing.com,"TWSE.csv")))
Investing.csv.xts(TWSE)
TWSE<-to.daily(TWSE)
# SET
SET<-data.frame(read.csv(file.path(Investing.com,"SET.csv")))
Investing.csv.xts(SET)
SET<-to.daily(SET)
# KOSPI
KOSPI<-data.frame(read.csv(file.path(Investing.com,"KOSPI.csv")))
Investing.csv.xts(KOSPI)
KOSPI<-to.daily(KOSPI)
# JCI
JCI<-data.frame(read.csv(file.path(Investing.com,"JCI.csv")))
Investing.csv.xts(JCI)
JCI<-to.daily(JCI)
# NIFTY
NIFTY<-data.frame(read.csv(file.path(Investing.com,"NIFTY.csv")))
Investing.csv.xts(NIFTY)
NIFTY<-to.daily(NIFTY)
# SENSEX
SENSEX<-data.frame(read.csv(file.path(Investing.com,"SENSEX.csv")))
Investing.csv.xts(SENSEX)
SENSEX<-to.daily(SENSEX)
# PCOMP
PCOMP<-data.frame(read.csv(file.path(Investing.com,"PCOMP.csv")))
Investing.csv.xts(PCOMP)
PCOMP<-to.daily(PCOMP)
# FSSTI
FSSTI<-data.frame(read.csv(file.path(Investing.com,"FSSTI.csv")))
Investing.csv.xts(FSSTI)
FSSTI<-to.daily(FSSTI)
# KSE100
KSE100<-data.frame(read.csv(file.path(Investing.com,"KSE100.csv")))
Investing.csv.xts(KSE100)
KSE100<-to.daily(KSE100)
# HNX30
HNX30<-data.frame(read.csv(file.path(Investing.com,"HNX30.csv")))
Investing.csv.xts(HNX30)
HNX30<-to.daily(HNX30)
# CSEALL
CSEALL<-data.frame(read.csv(file.path(Investing.com,"CSEALL.csv")))
Investing.csv.xts(CSEALL)
CSEALL<-to.daily(CSEALL)
#### Convert Watchlist ............................................... ####
LYB<-data.frame(read.csv(file.path(Investing.com,"LYB.csv")))
Investing.csv.xts(LYB)
LYB<-to.daily(LYB)
PPG<-data.frame(read.csv(file.path(Investing.com,"PPG.csv")))
Investing.csv.xts(PPG)
PPG<-to.daily(PPG)
SHW<-data.frame(read.csv(file.path(Investing.com,"SHW.csv")))
Investing.csv.xts(SHW)
SHW<-to.daily(SHW)
IFF<-data.frame(read.csv(file.path(Investing.com,"IFF.csv")))
Investing.csv.xts(IFF)
IFF<-to.daily(IFF)
DWDP<-data.frame(read.csv(file.path(Investing.com,"DWDP.csv")))
Investing.csv.xts(DWDP)
DWDP<-to.daily(DWDP)
PX<-data.frame(read.csv(file.path(Investing.com,"PX.csv")))
Investing.csv.xts(PX)
PX<-to.daily(PX)
APD<-data.frame(read.csv(file.path(Investing.com,"APD.csv")))
Investing.csv.xts(APD)
APD<-to.daily(APD)
EMN<-data.frame(read.csv(file.path(Investing.com,"EMN.csv")))
Investing.csv.xts(EMN)
EMN<-to.daily(EMN)
FMC<-data.frame(read.csv(file.path(Investing.com,"FMC.csv")))
Investing.csv.xts(FMC)
FMC<-to.daily(FMC)
CF<-data.frame(read.csv(file.path(Investing.com,"CF.csv")))
Investing.csv.xts(CF)
CF<-to.daily(CF)
JNJ<-data.frame(read.csv(file.path(Investing.com,"JNJ.csv")))
Investing.csv.xts(JNJ)
JNJ<-to.daily(JNJ)
PFE<-data.frame(read.csv(file.path(Investing.com,"PFE.csv")))
Investing.csv.xts(PFE)
PFE<-to.daily(PFE)
MRK<-data.frame(read.csv(file.path(Investing.com,"MRK.csv")))
Investing.csv.xts(MRK)
MRK<-to.daily(MRK)
ABBV<-data.frame(read.csv(file.path(Investing.com,"ABBV.csv")))
Investing.csv.xts(ABBV)
ABBV<-to.daily(ABBV)
BMY<-data.frame(read.csv(file.path(Investing.com,"BMY.csv")))
Investing.csv.xts(BMY)
BMY<-to.daily(BMY)
LLY<-data.frame(read.csv(file.path(Investing.com,"LLY.csv")))
Investing.csv.xts(LLY)
LLY<-to.daily(LLY)
UNH<-data.frame(read.csv(file.path(Investing.com,"UNH.csv")))
Investing.csv.xts(UNH)
UNH<-to.daily(UNH)
CVS<-data.frame(read.csv(file.path(Investing.com,"CVS.csv")))
Investing.csv.xts(CVS)
CVS<-to.daily(CVS)
ESRX<-data.frame(read.csv(file.path(Investing.com,"ESRX.csv")))
Investing.csv.xts(ESRX)
ESRX<-to.daily(ESRX)
AET<-data.frame(read.csv(file.path(Investing.com,"AET.csv")))
Investing.csv.xts(AET)
AET<-to.daily(AET)
ANTM<-data.frame(read.csv(file.path(Investing.com,"ANTM.csv")))
Investing.csv.xts(ANTM)
ANTM<-to.daily(ANTM)
CI<-data.frame(read.csv(file.path(Investing.com,"CI.csv")))
Investing.csv.xts(CI)
CI<-to.daily(CI)
HUM<-data.frame(read.csv(file.path(Investing.com,"HUM.csv")))
Investing.csv.xts(HUM)
HUM<-to.daily(HUM)
GILD<-data.frame(read.csv(file.path(Investing.com,"GILD.csv")))
Investing.csv.xts(GILD)
GILD<-to.daily(GILD)
AMGN<-data.frame(read.csv(file.path(Investing.com,"AMGN.csv")))
Investing.csv.xts(AMGN)
AMGN<-to.daily(AMGN)
CELG<-data.frame(read.csv(file.path(Investing.com,"CELG.csv")))
Investing.csv.xts(CELG)
CELG<-to.daily(CELG)
BIIB<-data.frame(read.csv(file.path(Investing.com,"BIIB.csv")))
Investing.csv.xts(BIIB)
BIIB<-to.daily(BIIB)
REGN<-data.frame(read.csv(file.path(Investing.com,"REGN.csv")))
Investing.csv.xts(REGN)
REGN<-to.daily(REGN)
ALXN<-data.frame(read.csv(file.path(Investing.com,"ALXN.csv")))
Investing.csv.xts(ALXN)
ALXN<-to.daily(ALXN)
VRTX<-data.frame(read.csv(file.path(Investing.com,"VRTX.csv")))
Investing.csv.xts(VRTX)
VRTX<-to.daily(VRTX)
MDT<-data.frame(read.csv(file.path(Investing.com,"MDT.csv")))
Investing.csv.xts(MDT)
MDT<-to.daily(MDT)
ABT<-data.frame(read.csv(file.path(Investing.com,"ABT.csv")))
Investing.csv.xts(ABT)
ABT<-to.daily(ABT)
SYK<-data.frame(read.csv(file.path(Investing.com,"SYK.csv")))
Investing.csv.xts(SYK)
SYK<-to.daily(SYK)
BSX<-data.frame(read.csv(file.path(Investing.com,"BSX.csv")))
Investing.csv.xts(BSX)
BSX<-to.daily(BSX)
ZBH<-data.frame(read.csv(file.path(Investing.com,"ZBH.csv")))
Investing.csv.xts(ZBH)
ZBH<-to.daily(ZBH)
ISRG<-data.frame(read.csv(file.path(Investing.com,"ISRG.csv")))
Investing.csv.xts(ISRG)
ISRG<-to.daily(ISRG)
EW<-data.frame(read.csv(file.path(Investing.com,"EW.csv")))
Investing.csv.xts(EW)
EW<-to.daily(EW)
VAR<-data.frame(read.csv(file.path(Investing.com,"VAR.csv")))
Investing.csv.xts(VAR)
VAR<-to.daily(VAR)
TMO<-data.frame(read.csv(file.path(Investing.com,"TMO.csv")))
Investing.csv.xts(TMO)
TMO<-to.daily(TMO)
A<-data.frame(read.csv(file.path(Investing.com,"A.csv")))
Investing.csv.xts(A)
A<-to.daily(A)
LH<-data.frame(read.csv(file.path(Investing.com,"LH.csv")))
Investing.csv.xts(LH)
LH<-to.daily(LH)
DGX<-data.frame(read.csv(file.path(Investing.com,"DGX.csv")))
Investing.csv.xts(DGX)
DGX<-to.daily(DGX)
PKI<-data.frame(read.csv(file.path(Investing.com,"PKI.csv")))
Investing.csv.xts(PKI)
PKI<-to.daily(PKI)
BDX<-data.frame(read.csv(file.path(Investing.com,"BDX.csv")))
Investing.csv.xts(BDX)
BDX<-to.daily(BDX)
BAX<-data.frame(read.csv(file.path(Investing.com,"BAX.csv")))
Investing.csv.xts(BAX)
BAX<-to.daily(BAX)
WAT<-data.frame(read.csv(file.path(Investing.com,"WAT.csv")))
Investing.csv.xts(WAT)
WAT<-to.daily(WAT)
XRAY<-data.frame(read.csv(file.path(Investing.com,"XRAY.csv")))
Investing.csv.xts(XRAY)
XRAY<-to.daily(XRAY)
HCA<-data.frame(read.csv(file.path(Investing.com,"HCA.csv")))
Investing.csv.xts(HCA)
HCA<-to.daily(HCA)
UHS<-data.frame(read.csv(file.path(Investing.com,"UHS.csv")))
Investing.csv.xts(UHS)
UHS<-to.daily(UHS)
THC<-data.frame(read.csv(file.path(Investing.com,"THC.csv")))
Investing.csv.xts(THC)
THC<-to.daily(THC)
ENDP<-data.frame(read.csv(file.path(Investing.com,"ENDP.csv")))
Investing.csv.xts(ENDP)
ENDP<-to.daily(ENDP)
DVA<-data.frame(read.csv(file.path(Investing.com,"DVA.csv")))
Investing.csv.xts(DVA)
DVA<-to.daily(DVA)
PRGO<-data.frame(read.csv(file.path(Investing.com,"PRGO.csv")))
Investing.csv.xts(PRGO)
PRGO<-to.daily(PRGO)
AGN<-data.frame(read.csv(file.path(Investing.com,"AGN.csv")))
Investing.csv.xts(AGN)
AGN<-to.daily(AGN)
MYL<-data.frame(read.csv(file.path(Investing.com,"MYL.csv")))
Investing.csv.xts(MYL)
MYL<-to.daily(MYL)
ZTS<-data.frame(read.csv(file.path(Investing.com,"ZTS.csv")))
Investing.csv.xts(ZTS)
ZTS<-to.daily(ZTS)
MNK<-data.frame(read.csv(file.path(Investing.com,"MNK.csv")))
Investing.csv.xts(MNK)
MNK<-to.daily(MNK)
AI.PA<-data.frame(read.csv(file.path(Investing.com,"AI.PA.csv")))
Investing.csv.xts(AI.PA)
AI.PA<-to.daily(AI.PA)
BNP.PA<-data.frame(read.csv(file.path(Investing.com,"BNP.PA.csv")))
Investing.csv.xts(BNP.PA)
BNP.PA<-to.daily(BNP.PA)
ACA.PA<-data.frame(read.csv(file.path(Investing.com,"ACA.PA.csv")))
Investing.csv.xts(ACA.PA)
ACA.PA<-to.daily(ACA.PA)
SAN.PA<-data.frame(read.csv(file.path(Investing.com,"SAN.PA.csv")))
Investing.csv.xts(SAN.PA)
SAN.PA<-to.daily(SAN.PA)
GLE.PA<-data.frame(read.csv(file.path(Investing.com,"GLE.PA.csv")))
Investing.csv.xts(GLE.PA)
GLE.PA<-to.daily(GLE.PA)
SOLB.BR<-data.frame(read.csv(file.path(Investing.com,"SOLB.BR.csv")))
Investing.csv.xts(SOLB.BR)
SOLB.BR<-to.daily(SOLB.BR)
FTI.PA<-data.frame(read.csv(file.path(Investing.com,"FTI.PA.csv")))
Investing.csv.xts(FTI.PA)
FTI.PA<-to.daily(FTI.PA)
FP.PA<-data.frame(read.csv(file.path(Investing.com,"FP.PA.csv")))
Investing.csv.xts(FP.PA)
FP.PA<-to.daily(FP.PA)
#### Convert all bond data ........................................... ####
EGYOvernight<-data.frame(read.csv(file.path(Investing.com,"EGYOvernight.csv")))
Investing.csv.xts(EGYOvernight, type = "y")
EGYOvernight<-to.daily(EGYOvernight)
KENOvernight<-data.frame(read.csv(file.path(Investing.com,"KENOvernight.csv")))
Investing.csv.xts(KENOvernight, type = "y")
KENOvernight<-to.daily(KENOvernight)
MEXOvernight<-data.frame(read.csv(file.path(Investing.com,"MEXOvernight.csv")))
Investing.csv.xts(MEXOvernight, type = "y")
MEXOvernight<-to.daily(MEXOvernight)
POLOvernight<-data.frame(read.csv(file.path(Investing.com,"POLOvernight.csv")))
Investing.csv.xts(POLOvernight, type = "y")
POLOvernight<-to.daily(POLOvernight)
RUSOvernight<-data.frame(read.csv(file.path(Investing.com,"RUSOvernight.csv")))
Investing.csv.xts(RUSOvernight, type = "y")
RUSOvernight<-to.daily(RUSOvernight)
CHEOvernight<-data.frame(read.csv(file.path(Investing.com,"CHEOvernight.csv")))
Investing.csv.xts(CHEOvernight, type = "y")
CHEOvernight<-to.daily(CHEOvernight)
HKG1W<-data.frame(read.csv(file.path(Investing.com,"HKG1W.csv")))
Investing.csv.xts(HKG1W, type = "y")
HKG1W<-to.daily(HKG1W)
RUS1W<-data.frame(read.csv(file.path(Investing.com,"RUS1W.csv")))
Investing.csv.xts(RUS1W, type = "y")
RUS1W<-to.daily(RUS1W)
CHE1W<-data.frame(read.csv(file.path(Investing.com,"CHE1W.csv")))
Investing.csv.xts(CHE1W, type = "y")
CHE1W<-to.daily(CHE1W)
RUS2W<-data.frame(read.csv(file.path(Investing.com,"RUS2W.csv")))
Investing.csv.xts(RUS2W, type = "y")
RUS2W<-to.daily(RUS2W)
MYS3W<-data.frame(read.csv(file.path(Investing.com,"MYS3W.csv")))
Investing.csv.xts(MYS3W, type = "y")
MYS3W<-to.daily(MYS3W)
BEL1M<-data.frame(read.csv(file.path(Investing.com,"BEL1M.csv")))
Investing.csv.xts(BEL1M, type = "y")
BEL1M<-to.daily(BEL1M)
BGR1M<-data.frame(read.csv(file.path(Investing.com,"BGR1M.csv")))
Investing.csv.xts(BGR1M, type = "y")
BGR1M<-to.daily(BGR1M)
CAN1M<-data.frame(read.csv(file.path(Investing.com,"CAN1M.csv")))
Investing.csv.xts(CAN1M, type = "y")
CAN1M<-to.daily(CAN1M)
CHL1M<-data.frame(read.csv(file.path(Investing.com,"CHL1M.csv")))
Investing.csv.xts(CHL1M, type = "y")
CHL1M<-to.daily(CHL1M)
FRA1M<-data.frame(read.csv(file.path(Investing.com,"FRA1M.csv")))
Investing.csv.xts(FRA1M, type = "y")
FRA1M<-to.daily(FRA1M)
GRC1M<-data.frame(read.csv(file.path(Investing.com,"GRC1M.csv")))
Investing.csv.xts(GRC1M, type = "y")
GRC1M<-to.daily(GRC1M)
HKG1M<-data.frame(read.csv(file.path(Investing.com,"HKG1M.csv")))
Investing.csv.xts(HKG1M, type = "y")
HKG1M<-to.daily(HKG1M)
IDN1M<-data.frame(read.csv(file.path(Investing.com,"IDN1M.csv")))
Investing.csv.xts(IDN1M, type = "y")
IDN1M<-to.daily(IDN1M)
ISR1M<-data.frame(read.csv(file.path(Investing.com,"ISR1M.csv")))
Investing.csv.xts(ISR1M, type = "y")
ISR1M<-to.daily(ISR1M)
ITA1M<-data.frame(read.csv(file.path(Investing.com,"ITA1M.csv")))
Investing.csv.xts(ITA1M, type = "y")
ITA1M<-to.daily(ITA1M)
JPN1M<-data.frame(read.csv(file.path(Investing.com,"JPN1M.csv")))
Investing.csv.xts(JPN1M, type = "y")
JPN1M<-to.daily(JPN1M)
MLT1M<-data.frame(read.csv(file.path(Investing.com,"MLT1M.csv")))
Investing.csv.xts(MLT1M, type = "y")
MLT1M<-to.daily(MLT1M)
MEX1M<-data.frame(read.csv(file.path(Investing.com,"MEX1M.csv")))
Investing.csv.xts(MEX1M, type = "y")
MEX1M<-to.daily(MEX1M)
NLD1M<-data.frame(read.csv(file.path(Investing.com,"NLD1M.csv")))
Investing.csv.xts(NLD1M, type = "y")
NLD1M<-to.daily(NLD1M)
NZL1M<-data.frame(read.csv(file.path(Investing.com,"NZL1M.csv")))
Investing.csv.xts(NZL1M, type = "y")
NZL1M<-to.daily(NZL1M)
NOR1M<-data.frame(read.csv(file.path(Investing.com,"NOR1M.csv")))
Investing.csv.xts(NOR1M, type = "y")
NOR1M<-to.daily(NOR1M)
PHL1M<-data.frame(read.csv(file.path(Investing.com,"PHL1M.csv")))
Investing.csv.xts(PHL1M, type = "y")
PHL1M<-to.daily(PHL1M)
POL1M<-data.frame(read.csv(file.path(Investing.com,"POL1M.csv")))
Investing.csv.xts(POL1M, type = "y")
POL1M<-to.daily(POL1M)
RUS1M<-data.frame(read.csv(file.path(Investing.com,"RUS1M.csv")))
Investing.csv.xts(RUS1M, type = "y")
RUS1M<-to.daily(RUS1M)
SGP1M<-data.frame(read.csv(file.path(Investing.com,"SGP1M.csv")))
Investing.csv.xts(SGP1M, type = "y")
SGP1M<-to.daily(SGP1M)
ESP1M<-data.frame(read.csv(file.path(Investing.com,"ESP1M.csv")))
Investing.csv.xts(ESP1M, type = "y")
ESP1M<-to.daily(ESP1M)
SWE1M<-data.frame(read.csv(file.path(Investing.com,"SWE1M.csv")))
Investing.csv.xts(SWE1M, type = "y")
SWE1M<-to.daily(SWE1M)
CHE1M<-data.frame(read.csv(file.path(Investing.com,"CHE1M.csv")))
Investing.csv.xts(CHE1M, type = "y")
CHE1M<-to.daily(CHE1M)
GBR1M<-data.frame(read.csv(file.path(Investing.com,"GBR1M.csv")))
Investing.csv.xts(GBR1M, type = "y")
GBR1M<-to.daily(GBR1M)
USA1M<-data.frame(read.csv(file.path(Investing.com,"USA1M.csv")))
Investing.csv.xts(USA1M, type = "y")
USA1M<-to.daily(USA1M)
CAN2M<-data.frame(read.csv(file.path(Investing.com,"CAN2M.csv")))
Investing.csv.xts(CAN2M, type = "y")
CAN2M<-to.daily(CAN2M)
ITA3M<-data.frame(read.csv(file.path(Investing.com,"ITA3M.csv")))
Investing.csv.xts(ITA3M, type = "y")
ITA3M<-to.daily(ITA3M)
MUS2M<-data.frame(read.csv(file.path(Investing.com,"MUS2M.csv")))
Investing.csv.xts(MUS2M, type = "y")
MUS2M<-to.daily(MUS2M)
NZL2M<-data.frame(read.csv(file.path(Investing.com,"NZL2M.csv")))
Investing.csv.xts(NZL2M, type = "y")
NZL2M<-to.daily(NZL2M)
NOR2M<-data.frame(read.csv(file.path(Investing.com,"NOR2M.csv")))
Investing.csv.xts(NOR2M, type = "y")
NOR2M<-to.daily(NOR2M)
POL2M<-data.frame(read.csv(file.path(Investing.com,"POL2M.csv")))
Investing.csv.xts(POL2M, type = "y")
POL2M<-to.daily(POL2M)
RUS2M<-data.frame(read.csv(file.path(Investing.com,"RUS2M.csv")))
Investing.csv.xts(RUS2M, type = "y")
RUS2M<-to.daily(RUS2M)
SWE2M<-data.frame(read.csv(file.path(Investing.com,"SWE2M.csv")))
Investing.csv.xts(SWE2M, type = "y")
SWE2M<-to.daily(SWE2M)
CHE2M<-data.frame(read.csv(file.path(Investing.com,"CHE2M.csv")))
Investing.csv.xts(CHE2M, type = "y")
CHE2M<-to.daily(CHE2M)
BHR3M<-data.frame(read.csv(file.path(Investing.com,"BHR3M.csv")))
Investing.csv.xts(BHR3M, type = "y")
BHR3M<-to.daily(BHR3M)
BGD3M<-data.frame(read.csv(file.path(Investing.com,"BGD3M.csv")))
Investing.csv.xts(BGD3M, type = "y")
BGD3M<-to.daily(BGD3M)
BEL3M<-data.frame(read.csv(file.path(Investing.com,"BEL3M.csv")))
Investing.csv.xts(BEL3M, type = "y")
BEL3M<-to.daily(BEL3M)
BRA3M<-data.frame(read.csv(file.path(Investing.com,"BRA3M.csv")))
Investing.csv.xts(BRA3M, type = "y")
BRA3M<-to.daily(BRA3M)
CAN3M<-data.frame(read.csv(file.path(Investing.com,"CAN3M.csv")))
Investing.csv.xts(CAN3M, type = "y")
CAN3M<-to.daily(CAN3M)
DNK3M<-data.frame(read.csv(file.path(Investing.com,"DNK3M.csv")))
Investing.csv.xts(DNK3M, type = "y")
DNK3M<-to.daily(DNK3M)
EGY3M<-data.frame(read.csv(file.path(Investing.com,"EGY3M.csv")))
Investing.csv.xts(EGY3M, type = "y")
EGY3M<-to.daily(EGY3M)
FRA3M<-data.frame(read.csv(file.path(Investing.com,"FRA3M.csv")))
Investing.csv.xts(FRA3M, type = "y")
FRA3M<-to.daily(FRA3M)
DEU3M<-data.frame(read.csv(file.path(Investing.com,"DEU3M.csv")))
Investing.csv.xts(DEU3M, type = "y")
DEU3M<-to.daily(DEU3M)
GRC3M<-data.frame(read.csv(file.path(Investing.com,"GRC3M.csv")))
Investing.csv.xts(GRC3M, type = "y")
GRC3M<-to.daily(GRC3M)
HKG3M<-data.frame(read.csv(file.path(Investing.com,"HKG3M.csv")))
Investing.csv.xts(HKG3M, type = "y")
HKG3M<-to.daily(HKG3M)
HUN3M<-data.frame(read.csv(file.path(Investing.com,"HUN3M.csv")))
Investing.csv.xts(HUN3M, type = "y")
HUN3M<-to.daily(HUN3M)
IND3M<-data.frame(read.csv(file.path(Investing.com,"IND3M.csv")))
Investing.csv.xts(IND3M, type = "y")
IND3M<-to.daily(IND3M)
IDN3M<-data.frame(read.csv(file.path(Investing.com,"IDN3M.csv")))
Investing.csv.xts(IDN3M, type = "y")
IDN3M<-to.daily(IDN3M)
IRL3M<-data.frame(read.csv(file.path(Investing.com,"IRL3M.csv")))
Investing.csv.xts(IRL3M, type = "y")
IRL3M<-to.daily(IRL3M)
ISR3M<-data.frame(read.csv(file.path(Investing.com,"ISR3M.csv")))
Investing.csv.xts(ISR3M, type = "y")
ISR3M<-to.daily(ISR3M)
JPN3M<-data.frame(read.csv(file.path(Investing.com,"JPN3M.csv")))
Investing.csv.xts(JPN3M, type = "y")
JPN3M<-to.daily(JPN3M)
JOR3M<-data.frame(read.csv(file.path(Investing.com,"JOR3M.csv")))
Investing.csv.xts(JOR3M, type = "y")
JOR3M<-to.daily(JOR3M)
KEN3M<-data.frame(read.csv(file.path(Investing.com,"KEN3M.csv")))
Investing.csv.xts(KEN3M, type = "y")
KEN3M<-to.daily(KEN3M)
MYS3M<-data.frame(read.csv(file.path(Investing.com,"MYS3M.csv")))
Investing.csv.xts(MYS3M, type = "y")
MYS3M<-to.daily(MYS3M)
MLT3M<-data.frame(read.csv(file.path(Investing.com,"MLT3M.csv")))
Investing.csv.xts(MLT3M, type = "y")
MLT3M<-to.daily(MLT3M)
MEX3M<-data.frame(read.csv(file.path(Investing.com,"MEX3M.csv")))
Investing.csv.xts(MEX3M, type = "y")
MEX3M<-to.daily(MEX3M)
MAR3M<-data.frame(read.csv(file.path(Investing.com,"MAR3M.csv")))
Investing.csv.xts(MAR3M, type = "y")
MAR3M<-to.daily(MAR3M)
NAM3M<-data.frame(read.csv(file.path(Investing.com,"NAM3M.csv")))
Investing.csv.xts(NAM3M, type = "y")
NAM3M<-to.daily(NAM3M)
NLD3M<-data.frame(read.csv(file.path(Investing.com,"NLD3M.csv")))
Investing.csv.xts(NLD3M, type = "y")
NLD3M<-to.daily(NLD3M)
NZL3M<-data.frame(read.csv(file.path(Investing.com,"NZL3M.csv")))
Investing.csv.xts(NZL3M, type = "y")
NZL3M<-to.daily(NZL3M)
NGA3M<-data.frame(read.csv(file.path(Investing.com,"NGA3M.csv")))
Investing.csv.xts(NGA3M, type = "y")
NGA3M<-to.daily(NGA3M)
NOR3M<-data.frame(read.csv(file.path(Investing.com,"NOR3M.csv")))
Investing.csv.xts(NOR3M, type = "y")
NOR3M<-to.daily(NOR3M)
PAK3M<-data.frame(read.csv(file.path(Investing.com,"PAK3M.csv")))
Investing.csv.xts(PAK3M, type = "y")
PAK3M<-to.daily(PAK3M)
PHL3M<-data.frame(read.csv(file.path(Investing.com,"PHL3M.csv")))
Investing.csv.xts(PHL3M, type = "y")
PHL3M<-to.daily(PHL3M)
PRT3M<-data.frame(read.csv(file.path(Investing.com,"PRT3M.csv")))
Investing.csv.xts(PRT3M, type = "y")
PRT3M<-to.daily(PRT3M)
RUS3M<-data.frame(read.csv(file.path(Investing.com,"RUS3M.csv")))
Investing.csv.xts(RUS3M, type = "y")
RUS3M<-to.daily(RUS3M)
SGP3M<-data.frame(read.csv(file.path(Investing.com,"SGP3M.csv")))
Investing.csv.xts(SGP3M, type = "y")
SGP3M<-to.daily(SGP3M)
ZAF3M<-data.frame(read.csv(file.path(Investing.com,"ZAF3M.csv")))
Investing.csv.xts(ZAF3M, type = "y")
ZAF3M<-to.daily(ZAF3M)
ESP3M<-data.frame(read.csv(file.path(Investing.com,"ESP3M.csv")))
Investing.csv.xts(ESP3M, type = "y")
ESP3M<-to.daily(ESP3M)
LKA3M<-data.frame(read.csv(file.path(Investing.com,"LKA3M.csv")))
Investing.csv.xts(LKA3M, type = "y")
LKA3M<-to.daily(LKA3M)
SWE3M<-data.frame(read.csv(file.path(Investing.com,"SWE3M.csv")))
Investing.csv.xts(SWE3M, type = "y")
SWE3M<-to.daily(SWE3M)
CHE3M<-data.frame(read.csv(file.path(Investing.com,"CHE3M.csv")))
Investing.csv.xts(CHE3M, type = "y")
CHE3M<-to.daily(CHE3M)
UGA3M<-data.frame(read.csv(file.path(Investing.com,"UGA3M.csv")))
Investing.csv.xts(UGA3M, type = "y")
UGA3M<-to.daily(UGA3M)
GBR3M<-data.frame(read.csv(file.path(Investing.com,"GBR3M.csv")))
Investing.csv.xts(GBR3M, type = "y")
GBR3M<-to.daily(GBR3M)
USA3M<-data.frame(read.csv(file.path(Investing.com,"USA3M.csv")))
Investing.csv.xts(USA3M, type = "y")
USA3M<-to.daily(USA3M)
MUS4M<-data.frame(read.csv(file.path(Investing.com,"MUS4M.csv")))
Investing.csv.xts(MUS4M, type = "y")
MUS4M<-to.daily(MUS4M)
NZL4M<-data.frame(read.csv(file.path(Investing.com,"NZL4M.csv")))
Investing.csv.xts(NZL4M, type = "y")
NZL4M<-to.daily(NZL4M)
NZL5M<-data.frame(read.csv(file.path(Investing.com,"NZL5M.csv")))
Investing.csv.xts(NZL5M, type = "y")
NZL5M<-to.daily(NZL5M)
ITA6M<-data.frame(read.csv(file.path(Investing.com,"ITA6M.csv")))
Investing.csv.xts(ITA6M, type = "y")
ITA6M<-to.daily(ITA6M)
BHR6M<-data.frame(read.csv(file.path(Investing.com,"BHR6M.csv")))
Investing.csv.xts(BHR6M, type = "y")
BHR6M<-to.daily(BHR6M)
BGD6M<-data.frame(read.csv(file.path(Investing.com,"BGD6M.csv")))
Investing.csv.xts(BGD6M, type = "y")
BGD6M<-to.daily(BGD6M)
BEL6M<-data.frame(read.csv(file.path(Investing.com,"BEL6M.csv")))
Investing.csv.xts(BEL6M, type = "y")
BEL6M<-to.daily(BEL6M)
BWA6M<-data.frame(read.csv(file.path(Investing.com,"BWA6M.csv")))
Investing.csv.xts(BWA6M, type = "y")
BWA6M<-to.daily(BWA6M)
BRA6M<-data.frame(read.csv(file.path(Investing.com,"BRA6M.csv")))
Investing.csv.xts(BRA6M, type = "y")
BRA6M<-to.daily(BRA6M)
CAN6M<-data.frame(read.csv(file.path(Investing.com,"CAN6M.csv")))
Investing.csv.xts(CAN6M, type = "y")
CAN6M<-to.daily(CAN6M)
HRV6M<-data.frame(read.csv(file.path(Investing.com,"HRV6M.csv")))
Investing.csv.xts(HRV6M, type = "y")
HRV6M<-to.daily(HRV6M)
DNK6M<-data.frame(read.csv(file.path(Investing.com,"DNK6M.csv")))
Investing.csv.xts(DNK6M, type = "y")
DNK6M<-to.daily(DNK6M)
EGY6M<-data.frame(read.csv(file.path(Investing.com,"EGY6M.csv")))
Investing.csv.xts(EGY6M, type = "y")
EGY6M<-to.daily(EGY6M)
FRA6M<-data.frame(read.csv(file.path(Investing.com,"FRA6M.csv")))
Investing.csv.xts(FRA6M, type = "y")
FRA6M<-to.daily(FRA6M)
DEU6M<-data.frame(read.csv(file.path(Investing.com,"DEU6M.csv")))
Investing.csv.xts(DEU6M, type = "y")
DEU6M<-to.daily(DEU6M)
GRC6M<-data.frame(read.csv(file.path(Investing.com,"GRC6M.csv")))
Investing.csv.xts(GRC6M, type = "y")
GRC6M<-to.daily(GRC6M)
HKG6M<-data.frame(read.csv(file.path(Investing.com,"HKG6M.csv")))
Investing.csv.xts(HKG6M, type = "y")
HKG6M<-to.daily(HKG6M)
HUN6M<-data.frame(read.csv(file.path(Investing.com,"HUN6M.csv")))
Investing.csv.xts(HUN6M, type = "y")
HUN6M<-to.daily(HUN6M)
IND6M<-data.frame(read.csv(file.path(Investing.com,"IND6M.csv")))
Investing.csv.xts(IND6M, type = "y")
IND6M<-to.daily(IND6M)
IDN6M<-data.frame(read.csv(file.path(Investing.com,"IDN6M.csv")))
Investing.csv.xts(IDN6M, type = "y")
IDN6M<-to.daily(IDN6M)
IRL6M<-data.frame(read.csv(file.path(Investing.com,"IRL6M.csv")))
Investing.csv.xts(IRL6M, type = "y")
IRL6M<-to.daily(IRL6M)
ISR6M<-data.frame(read.csv(file.path(Investing.com,"ISR6M.csv")))
Investing.csv.xts(ISR6M, type = "y")
ISR6M<-to.daily(ISR6M)
JPN6M<-data.frame(read.csv(file.path(Investing.com,"JPN6M.csv")))
Investing.csv.xts(JPN6M, type = "y")
JPN6M<-to.daily(JPN6M)
JOR6M<-data.frame(read.csv(file.path(Investing.com,"JOR6M.csv")))
Investing.csv.xts(JOR6M, type = "y")
JOR6M<-to.daily(JOR6M)
KEN6M<-data.frame(read.csv(file.path(Investing.com,"KEN6M.csv")))
Investing.csv.xts(KEN6M, type = "y")
KEN6M<-to.daily(KEN6M)
MLT6M<-data.frame(read.csv(file.path(Investing.com,"MLT6M.csv")))
Investing.csv.xts(MLT6M, type = "y")
MLT6M<-to.daily(MLT6M)
MUS6M<-data.frame(read.csv(file.path(Investing.com,"MUS6M.csv")))
Investing.csv.xts(MUS6M, type = "y")
MUS6M<-to.daily(MUS6M)
MEX6M<-data.frame(read.csv(file.path(Investing.com,"MEX6M.csv")))
Investing.csv.xts(MEX6M, type = "y")
MEX6M<-to.daily(MEX6M)
MAR6M<-data.frame(read.csv(file.path(Investing.com,"MAR6M.csv")))
Investing.csv.xts(MAR6M, type = "y")
MAR6M<-to.daily(MAR6M)
NAM6M<-data.frame(read.csv(file.path(Investing.com,"NAM6M.csv")))
Investing.csv.xts(NAM6M, type = "y")
NAM6M<-to.daily(NAM6M)
NLD6M<-data.frame(read.csv(file.path(Investing.com,"NLD6M.csv")))
Investing.csv.xts(NLD6M, type = "y")
NLD6M<-to.daily(NLD6M)
NZL6M<-data.frame(read.csv(file.path(Investing.com,"NZL6M.csv")))
Investing.csv.xts(NZL6M, type = "y")
NZL6M<-to.daily(NZL6M)
NGA6M<-data.frame(read.csv(file.path(Investing.com,"NGA6M.csv")))
Investing.csv.xts(NGA6M, type = "y")
NGA6M<-to.daily(NGA6M)
NOR6M<-data.frame(read.csv(file.path(Investing.com,"NOR6M.csv")))
Investing.csv.xts(NOR6M, type = "y")
NOR6M<-to.daily(NOR6M)
PAK6M<-data.frame(read.csv(file.path(Investing.com,"PAK6M.csv")))
Investing.csv.xts(PAK6M, type = "y")
PAK6M<-to.daily(PAK6M)
PHL6M<-data.frame(read.csv(file.path(Investing.com,"PHL6M.csv")))
Investing.csv.xts(PHL6M, type = "y")
PHL6M<-to.daily(PHL6M)
PRT6M<-data.frame(read.csv(file.path(Investing.com,"PRT6M.csv")))
Investing.csv.xts(PRT6M, type = "y")
PRT6M<-to.daily(PRT6M)
ROU6M<-data.frame(read.csv(file.path(Investing.com,"ROU6M.csv")))
Investing.csv.xts(ROU6M, type = "y")
ROU6M<-to.daily(ROU6M)
RUS6M<-data.frame(read.csv(file.path(Investing.com,"RUS6M.csv")))
Investing.csv.xts(RUS6M, type = "y")
RUS6M<-to.daily(RUS6M)
SGP6M<-data.frame(read.csv(file.path(Investing.com,"SGP6M.csv")))
Investing.csv.xts(SGP6M, type = "y")
SGP6M<-to.daily(SGP6M)
ESP6M<-data.frame(read.csv(file.path(Investing.com,"ESP6M.csv")))
Investing.csv.xts(ESP6M, type = "y")
ESP6M<-to.daily(ESP6M)
LKA6M<-data.frame(read.csv(file.path(Investing.com,"LKA6M.csv")))
Investing.csv.xts(LKA6M, type = "y")
LKA6M<-to.daily(LKA6M)
SWE6M<-data.frame(read.csv(file.path(Investing.com,"SWE6M.csv")))
Investing.csv.xts(SWE6M, type = "y")
SWE6M<-to.daily(SWE6M)
CHE6M<-data.frame(read.csv(file.path(Investing.com,"CHE6M.csv")))
Investing.csv.xts(CHE6M, type = "y")
CHE6M<-to.daily(CHE6M)
UGA6M<-data.frame(read.csv(file.path(Investing.com,"UGA6M.csv")))
Investing.csv.xts(UGA6M, type = "y")
UGA6M<-to.daily(UGA6M)
GBR6M<-data.frame(read.csv(file.path(Investing.com,"GBR6M.csv")))
Investing.csv.xts(GBR6M, type = "y")
GBR6M<-to.daily(GBR6M)
USA6M<-data.frame(read.csv(file.path(Investing.com,"USA6M.csv")))
Investing.csv.xts(USA6M, type = "y")
USA6M<-to.daily(USA6M)
MYS7M<-data.frame(read.csv(file.path(Investing.com,"MYS7M.csv")))
Investing.csv.xts(MYS7M, type = "y")
MYS7M<-to.daily(MYS7M)
MUS8M<-data.frame(read.csv(file.path(Investing.com,"MUS8M.csv")))
Investing.csv.xts(MUS8M, type = "y")
MUS8M<-to.daily(MUS8M)
ITA9M<-data.frame(read.csv(file.path(Investing.com,"ITA9M.csv")))
Investing.csv.xts(ITA9M, type = "y")
ITA9M<-to.daily(ITA9M)
BHR9M<-data.frame(read.csv(file.path(Investing.com,"BHR9M.csv")))
Investing.csv.xts(BHR9M, type = "y")
BHR9M<-to.daily(BHR9M)
BEL9M<-data.frame(read.csv(file.path(Investing.com,"BEL9M.csv")))
Investing.csv.xts(BEL9M, type = "y")
BEL9M<-to.daily(BEL9M)
BRA9M<-data.frame(read.csv(file.path(Investing.com,"BRA9M.csv")))
Investing.csv.xts(BRA9M, type = "y")
BRA9M<-to.daily(BRA9M)
HRV9M<-data.frame(read.csv(file.path(Investing.com,"HRV9M.csv")))
Investing.csv.xts(HRV9M, type = "y")
HRV9M<-to.daily(HRV9M)
EGY9M<-data.frame(read.csv(file.path(Investing.com,"EGY9M.csv")))
Investing.csv.xts(EGY9M, type = "y")
EGY9M<-to.daily(EGY9M)
FRA9M<-data.frame(read.csv(file.path(Investing.com,"FRA9M.csv")))
Investing.csv.xts(FRA9M, type = "y")
FRA9M<-to.daily(FRA9M)
DEU9M<-data.frame(read.csv(file.path(Investing.com,"DEU9M.csv")))
Investing.csv.xts(DEU9M, type = "y")
DEU9M<-to.daily(DEU9M)
HKG9M<-data.frame(read.csv(file.path(Investing.com,"HKG9M.csv")))
Investing.csv.xts(HKG9M, type = "y")
HKG9M<-to.daily(HKG9M)
ISR9M<-data.frame(read.csv(file.path(Investing.com,"ISR9M.csv")))
Investing.csv.xts(ISR9M, type = "y")
ISR9M<-to.daily(ISR9M)
IDN1Y<-data.frame(read.csv(file.path(Investing.com,"IDN1Y.csv")))
Investing.csv.xts(IDN1Y, type = "y")
IDN1Y<-to.daily(IDN1Y)
JPN9M<-data.frame(read.csv(file.path(Investing.com,"JPN9M.csv")))
Investing.csv.xts(JPN9M, type = "y")
JPN9M<-to.daily(JPN9M)
JOR9M<-data.frame(read.csv(file.path(Investing.com,"JOR9M.csv")))
Investing.csv.xts(JOR9M, type = "y")
JOR9M<-to.daily(JOR9M)
MEX9M<-data.frame(read.csv(file.path(Investing.com,"MEX9M.csv")))
Investing.csv.xts(MEX9M, type = "y")
MEX9M<-to.daily(MEX9M)
NAM9M<-data.frame(read.csv(file.path(Investing.com,"NAM9M.csv")))
Investing.csv.xts(NAM9M, type = "y")
NAM9M<-to.daily(NAM9M)
NOR9M<-data.frame(read.csv(file.path(Investing.com,"NOR9M.csv")))
Investing.csv.xts(NOR9M, type = "y")
NOR9M<-to.daily(NOR9M)
ESP9M<-data.frame(read.csv(file.path(Investing.com,"ESP9M.csv")))
Investing.csv.xts(ESP9M, type = "y")
ESP9M<-to.daily(ESP9M)
ARG1Y<-data.frame(read.csv(file.path(Investing.com,"ARG1Y.csv")))
Investing.csv.xts(ARG1Y, type = "y")
ARG1Y<-to.daily(ARG1Y)
AUS1Y<-data.frame(read.csv(file.path(Investing.com,"AUS1Y.csv")))
Investing.csv.xts(AUS1Y, type = "y")
AUS1Y<-to.daily(AUS1Y)
AUT1Y<-data.frame(read.csv(file.path(Investing.com,"AUT1Y.csv")))
Investing.csv.xts(AUT1Y, type = "y")
AUT1Y<-to.daily(AUT1Y)
BHR1Y<-data.frame(read.csv(file.path(Investing.com,"BHR1Y.csv")))
Investing.csv.xts(BHR1Y, type = "y")
BHR1Y<-to.daily(BHR1Y)
BGD1Y<-data.frame(read.csv(file.path(Investing.com,"BGD1Y.csv")))
Investing.csv.xts(BGD1Y, type = "y")
BGD1Y<-to.daily(BGD1Y)
BEL1Y<-data.frame(read.csv(file.path(Investing.com,"BEL1Y.csv")))
Investing.csv.xts(BEL1Y, type = "y")
BEL1Y<-to.daily(BEL1Y)
BRA1Y<-data.frame(read.csv(file.path(Investing.com,"BRA1Y.csv")))
Investing.csv.xts(BRA1Y, type = "y")
BRA1Y<-to.daily(BRA1Y)
BGR1Y<-data.frame(read.csv(file.path(Investing.com,"BGR1Y.csv")))
Investing.csv.xts(BGR1Y, type = "y")
BGR1Y<-to.daily(BGR1Y)
CAN1Y<-data.frame(read.csv(file.path(Investing.com,"CAN1Y.csv")))
Investing.csv.xts(CAN1Y, type = "y")
CAN1Y<-to.daily(CAN1Y)
CHL1Y<-data.frame(read.csv(file.path(Investing.com,"CHL1Y.csv")))
Investing.csv.xts(CHL1Y, type = "y")
CHL1Y<-to.daily(CHL1Y)
CHN1Y<-data.frame(read.csv(file.path(Investing.com,"CHN1Y.csv")))
Investing.csv.xts(CHN1Y, type = "y")
CHN1Y<-to.daily(CHN1Y)
COL1Y<-data.frame(read.csv(file.path(Investing.com,"COL1Y.csv")))
Investing.csv.xts(COL1Y, type = "y")
COL1Y<-to.daily(COL1Y)
HRV1Y<-data.frame(read.csv(file.path(Investing.com,"HRV1Y.csv")))
Investing.csv.xts(HRV1Y, type = "y")
HRV1Y<-to.daily(HRV1Y)
CZE1Y<-data.frame(read.csv(file.path(Investing.com,"CZE1Y.csv")))
Investing.csv.xts(CZE1Y, type = "y")
CZE1Y<-to.daily(CZE1Y)
EGY1Y<-data.frame(read.csv(file.path(Investing.com,"EGY1Y.csv")))
Investing.csv.xts(EGY1Y, type = "y")
EGY1Y<-to.daily(EGY1Y)
FRA1Y<-data.frame(read.csv(file.path(Investing.com,"FRA1Y.csv")))
Investing.csv.xts(FRA1Y, type = "y")
FRA1Y<-to.daily(FRA1Y)
DEU1Y<-data.frame(read.csv(file.path(Investing.com,"DEU1Y.csv")))
Investing.csv.xts(DEU1Y, type = "y")
DEU1Y<-to.daily(DEU1Y)
HKG1Y<-data.frame(read.csv(file.path(Investing.com,"HKG1Y.csv")))
Investing.csv.xts(HKG1Y, type = "y")
HKG1Y<-to.daily(HKG1Y)
HUN1Y<-data.frame(read.csv(file.path(Investing.com,"HUN1Y.csv")))
Investing.csv.xts(HUN1Y, type = "y")
HUN1Y<-to.daily(HUN1Y)
IND1Y<-data.frame(read.csv(file.path(Investing.com,"IND1Y.csv")))
Investing.csv.xts(IND1Y, type = "y")
IND1Y<-to.daily(IND1Y)
IRL2Y<-data.frame(read.csv(file.path(Investing.com,"IRL2Y.csv")))
Investing.csv.xts(IRL2Y, type = "y")
IRL2Y<-to.daily(IRL2Y)
IRL1Y<-data.frame(read.csv(file.path(Investing.com,"IRL1Y.csv")))
Investing.csv.xts(IRL1Y, type = "y")
IRL1Y<-to.daily(IRL1Y)
ISR1Y<-data.frame(read.csv(file.path(Investing.com,"ISR1Y.csv")))
Investing.csv.xts(ISR1Y, type = "y")
ISR1Y<-to.daily(ISR1Y)
ITA1Y<-data.frame(read.csv(file.path(Investing.com,"ITA1Y.csv")))
Investing.csv.xts(ITA1Y, type = "y")
ITA1Y<-to.daily(ITA1Y)
JPN1Y<-data.frame(read.csv(file.path(Investing.com,"JPN1Y.csv")))
Investing.csv.xts(JPN1Y, type = "y")
JPN1Y<-to.daily(JPN1Y)
JOR1Y<-data.frame(read.csv(file.path(Investing.com,"JOR1Y.csv")))
Investing.csv.xts(JOR1Y, type = "y")
JOR1Y<-to.daily(JOR1Y)
KEN1Y<-data.frame(read.csv(file.path(Investing.com,"KEN1Y.csv")))
Investing.csv.xts(KEN1Y, type = "y")
KEN1Y<-to.daily(KEN1Y)
MYS1Y<-data.frame(read.csv(file.path(Investing.com,"MYS1Y.csv")))
Investing.csv.xts(MYS1Y, type = "y")
MYS1Y<-to.daily(MYS1Y)
MLT1Y<-data.frame(read.csv(file.path(Investing.com,"MLT1Y.csv")))
Investing.csv.xts(MLT1Y, type = "y")
MLT1Y<-to.daily(MLT1Y)
MUS1Y<-data.frame(read.csv(file.path(Investing.com,"MUS1Y.csv")))
Investing.csv.xts(MUS1Y, type = "y")
MUS1Y<-to.daily(MUS1Y)
MEX1Y<-data.frame(read.csv(file.path(Investing.com,"MEX1Y.csv")))
Investing.csv.xts(MEX1Y, type = "y")
MEX1Y<-to.daily(MEX1Y)
NAM1Y<-data.frame(read.csv(file.path(Investing.com,"NAM1Y.csv")))
Investing.csv.xts(NAM1Y, type = "y")
NAM1Y<-to.daily(NAM1Y)
NZL1Y<-data.frame(read.csv(file.path(Investing.com,"NZL1Y.csv")))
Investing.csv.xts(NZL1Y, type = "y")
NZL1Y<-to.daily(NZL1Y)
NGA1Y<-data.frame(read.csv(file.path(Investing.com,"NGA1Y.csv")))
Investing.csv.xts(NGA1Y, type = "y")
NGA1Y<-to.daily(NGA1Y)
NOR1Y<-data.frame(read.csv(file.path(Investing.com,"NOR1Y.csv")))
Investing.csv.xts(NOR1Y, type = "y")
NOR1Y<-to.daily(NOR1Y)
PAK1Y<-data.frame(read.csv(file.path(Investing.com,"PAK1Y.csv")))
Investing.csv.xts(PAK1Y, type = "y")
PAK1Y<-to.daily(PAK1Y)
PHL1Y<-data.frame(read.csv(file.path(Investing.com,"PHL1Y.csv")))
Investing.csv.xts(PHL1Y, type = "y")
PHL1Y<-to.daily(PHL1Y)
POL1Y<-data.frame(read.csv(file.path(Investing.com,"POL1Y.csv")))
Investing.csv.xts(POL1Y, type = "y")
POL1Y<-to.daily(POL1Y)
PRT1Y<-data.frame(read.csv(file.path(Investing.com,"PRT1Y.csv")))
Investing.csv.xts(PRT1Y, type = "y")
PRT1Y<-to.daily(PRT1Y)
ROU1Y<-data.frame(read.csv(file.path(Investing.com,"ROU1Y.csv")))
Investing.csv.xts(ROU1Y, type = "y")
ROU1Y<-to.daily(ROU1Y)
RUS1Y<-data.frame(read.csv(file.path(Investing.com,"RUS1Y.csv")))
Investing.csv.xts(RUS1Y, type = "y")
RUS1Y<-to.daily(RUS1Y)
SRB1Y<-data.frame(read.csv(file.path(Investing.com,"SRB1Y.csv")))
Investing.csv.xts(SRB1Y, type = "y")
SRB1Y<-to.daily(SRB1Y)
SGP1Y<-data.frame(read.csv(file.path(Investing.com,"SGP1Y.csv")))
Investing.csv.xts(SGP1Y, type = "y")
SGP1Y<-to.daily(SGP1Y)
SVK1Y<-data.frame(read.csv(file.path(Investing.com,"SVK1Y.csv")))
Investing.csv.xts(SVK1Y, type = "y")
SVK1Y<-to.daily(SVK1Y)
SVN1Y<-data.frame(read.csv(file.path(Investing.com,"SVN1Y.csv")))
Investing.csv.xts(SVN1Y, type = "y")
SVN1Y<-to.daily(SVN1Y)
KOR1Y<-data.frame(read.csv(file.path(Investing.com,"KOR1Y.csv")))
Investing.csv.xts(KOR1Y, type = "y")
KOR1Y<-to.daily(KOR1Y)
ESP1Y<-data.frame(read.csv(file.path(Investing.com,"ESP1Y.csv")))
Investing.csv.xts(ESP1Y, type = "y")
ESP1Y<-to.daily(ESP1Y)
LKA1Y<-data.frame(read.csv(file.path(Investing.com,"LKA1Y.csv")))
Investing.csv.xts(LKA1Y, type = "y")
LKA1Y<-to.daily(LKA1Y)
CHE1Y<-data.frame(read.csv(file.path(Investing.com,"CHE1Y.csv")))
Investing.csv.xts(CHE1Y, type = "y")
CHE1Y<-to.daily(CHE1Y)
THA1Y<-data.frame(read.csv(file.path(Investing.com,"THA1Y.csv")))
Investing.csv.xts(THA1Y, type = "y")
THA1Y<-to.daily(THA1Y)
TUR1Y<-data.frame(read.csv(file.path(Investing.com,"TUR1Y.csv")))
Investing.csv.xts(TUR1Y, type = "y")
TUR1Y<-to.daily(TUR1Y)
UGA1Y<-data.frame(read.csv(file.path(Investing.com,"UGA1Y.csv")))
Investing.csv.xts(UGA1Y, type = "y")
UGA1Y<-to.daily(UGA1Y)
UKR1Y<-data.frame(read.csv(file.path(Investing.com,"UKR1Y.csv")))
Investing.csv.xts(UKR1Y, type = "y")
UKR1Y<-to.daily(UKR1Y)
GBR1Y<-data.frame(read.csv(file.path(Investing.com,"GBR1Y.csv")))
Investing.csv.xts(GBR1Y, type = "y")
GBR1Y<-to.daily(GBR1Y)
USA1Y<-data.frame(read.csv(file.path(Investing.com,"USA1Y.csv")))
Investing.csv.xts(USA1Y, type = "y")
USA1Y<-to.daily(USA1Y)
VNM1Y<-data.frame(read.csv(file.path(Investing.com,"VNM1Y.csv")))
Investing.csv.xts(VNM1Y, type = "y")
VNM1Y<-to.daily(VNM1Y)
AUS2Y<-data.frame(read.csv(file.path(Investing.com,"AUS2Y.csv")))
Investing.csv.xts(AUS2Y, type = "y")
AUS2Y<-to.daily(AUS2Y)
AUT2Y<-data.frame(read.csv(file.path(Investing.com,"AUT2Y.csv")))
Investing.csv.xts(AUT2Y, type = "y")
AUT2Y<-to.daily(AUT2Y)
BHR2Y<-data.frame(read.csv(file.path(Investing.com,"BHR2Y.csv")))
Investing.csv.xts(BHR2Y, type = "y")
BHR2Y<-to.daily(BHR2Y)
BGD2Y<-data.frame(read.csv(file.path(Investing.com,"BGD2Y.csv")))
Investing.csv.xts(BGD2Y, type = "y")
BGD2Y<-to.daily(BGD2Y)
BEL2Y<-data.frame(read.csv(file.path(Investing.com,"BEL2Y.csv")))
Investing.csv.xts(BEL2Y, type = "y")
BEL2Y<-to.daily(BEL2Y)
BRA2Y<-data.frame(read.csv(file.path(Investing.com,"BRA2Y.csv")))
Investing.csv.xts(BRA2Y, type = "y")
BRA2Y<-to.daily(BRA2Y)
CAN2Y<-data.frame(read.csv(file.path(Investing.com,"CAN2Y.csv")))
Investing.csv.xts(CAN2Y, type = "y")
CAN2Y<-to.daily(CAN2Y)
CHL2Y<-data.frame(read.csv(file.path(Investing.com,"CHL2Y.csv")))
Investing.csv.xts(CHL2Y, type = "y")
CHL2Y<-to.daily(CHL2Y)
CHN2Y<-data.frame(read.csv(file.path(Investing.com,"CHN2Y.csv")))
Investing.csv.xts(CHN2Y, type = "y")
CHN2Y<-to.daily(CHN2Y)
CZE2Y<-data.frame(read.csv(file.path(Investing.com,"CZE2Y.csv")))
Investing.csv.xts(CZE2Y, type = "y")
CZE2Y<-to.daily(CZE2Y)
DNK2Y<-data.frame(read.csv(file.path(Investing.com,"DNK2Y.csv")))
Investing.csv.xts(DNK2Y, type = "y")
DNK2Y<-to.daily(DNK2Y)
EGY2Y<-data.frame(read.csv(file.path(Investing.com,"EGY2Y.csv")))
Investing.csv.xts(EGY2Y, type = "y")
EGY2Y<-to.daily(EGY2Y)
FIN2Y<-data.frame(read.csv(file.path(Investing.com,"FIN2Y.csv")))
Investing.csv.xts(FIN2Y, type = "y")
FIN2Y<-to.daily(FIN2Y)
FRA2Y<-data.frame(read.csv(file.path(Investing.com,"FRA2Y.csv")))
Investing.csv.xts(FRA2Y, type = "y")
FRA2Y<-to.daily(FRA2Y)
DEU2Y<-data.frame(read.csv(file.path(Investing.com,"DEU2Y.csv")))
Investing.csv.xts(DEU2Y, type = "y")
DEU2Y<-to.daily(DEU2Y)
HKG2Y<-data.frame(read.csv(file.path(Investing.com,"HKG2Y.csv")))
Investing.csv.xts(HKG2Y, type = "y")
HKG2Y<-to.daily(HKG2Y)
ISL2Y<-data.frame(read.csv(file.path(Investing.com,"ISL2Y.csv")))
Investing.csv.xts(ISL2Y, type = "y")
ISL2Y<-to.daily(ISL2Y)
IND2Y<-data.frame(read.csv(file.path(Investing.com,"IND2Y.csv")))
Investing.csv.xts(IND2Y, type = "y")
IND2Y<-to.daily(IND2Y)
IRL3Y<-data.frame(read.csv(file.path(Investing.com,"IRL3Y.csv")))
Investing.csv.xts(IRL3Y, type = "y")
IRL3Y<-to.daily(IRL3Y)
ISR2Y<-data.frame(read.csv(file.path(Investing.com,"ISR2Y.csv")))
Investing.csv.xts(ISR2Y, type = "y")
ISR2Y<-to.daily(ISR2Y)
ITA2Y<-data.frame(read.csv(file.path(Investing.com,"ITA2Y.csv")))
Investing.csv.xts(ITA2Y, type = "y")
ITA2Y<-to.daily(ITA2Y)
JPN2Y<-data.frame(read.csv(file.path(Investing.com,"JPN2Y.csv")))
Investing.csv.xts(JPN2Y, type = "y")
JPN2Y<-to.daily(JPN2Y)
JOR2Y<-data.frame(read.csv(file.path(Investing.com,"JOR2Y.csv")))
Investing.csv.xts(JOR2Y, type = "y")
JOR2Y<-to.daily(JOR2Y)
KEN2Y<-data.frame(read.csv(file.path(Investing.com,"KEN2Y.csv")))
Investing.csv.xts(KEN2Y, type = "y")
KEN2Y<-to.daily(KEN2Y)
LVA2Y<-data.frame(read.csv(file.path(Investing.com,"LVA2Y.csv")))
Investing.csv.xts(LVA2Y, type = "y")
LVA2Y<-to.daily(LVA2Y)
MUS2Y<-data.frame(read.csv(file.path(Investing.com,"MUS2Y.csv")))
Investing.csv.xts(MUS2Y, type = "y")
MUS2Y<-to.daily(MUS2Y)
MAR2Y<-data.frame(read.csv(file.path(Investing.com,"MAR2Y.csv")))
Investing.csv.xts(MAR2Y, type = "y")
MAR2Y<-to.daily(MAR2Y)
NLD2Y<-data.frame(read.csv(file.path(Investing.com,"NLD2Y.csv")))
Investing.csv.xts(NLD2Y, type = "y")
NLD2Y<-to.daily(NLD2Y)
NZL2Y<-data.frame(read.csv(file.path(Investing.com,"NZL2Y.csv")))
Investing.csv.xts(NZL2Y, type = "y")
NZL2Y<-to.daily(NZL2Y)
NGA2Y<-data.frame(read.csv(file.path(Investing.com,"NGA2Y.csv")))
Investing.csv.xts(NGA2Y, type = "y")
NGA2Y<-to.daily(NGA2Y)
PHL2Y<-data.frame(read.csv(file.path(Investing.com,"PHL2Y.csv")))
Investing.csv.xts(PHL2Y, type = "y")
PHL2Y<-to.daily(PHL2Y)
POL2Y<-data.frame(read.csv(file.path(Investing.com,"POL2Y.csv")))
Investing.csv.xts(POL2Y, type = "y")
POL2Y<-to.daily(POL2Y)
PRT2Y<-data.frame(read.csv(file.path(Investing.com,"PRT2Y.csv")))
Investing.csv.xts(PRT2Y, type = "y")
PRT2Y<-to.daily(PRT2Y)
ROU2Y<-data.frame(read.csv(file.path(Investing.com,"ROU2Y.csv")))
Investing.csv.xts(ROU2Y, type = "y")
ROU2Y<-to.daily(ROU2Y)
RUS2Y<-data.frame(read.csv(file.path(Investing.com,"RUS2Y.csv")))
Investing.csv.xts(RUS2Y, type = "y")
RUS2Y<-to.daily(RUS2Y)
SRB2Y<-data.frame(read.csv(file.path(Investing.com,"SRB2Y.csv")))
Investing.csv.xts(SRB2Y, type = "y")
SRB2Y<-to.daily(SRB2Y)
SGP2Y<-data.frame(read.csv(file.path(Investing.com,"SGP2Y.csv")))
Investing.csv.xts(SGP2Y, type = "y")
SGP2Y<-to.daily(SGP2Y)
SVK2Y<-data.frame(read.csv(file.path(Investing.com,"SVK2Y.csv")))
Investing.csv.xts(SVK2Y, type = "y")
SVK2Y<-to.daily(SVK2Y)
SVN2Y<-data.frame(read.csv(file.path(Investing.com,"SVN2Y.csv")))
Investing.csv.xts(SVN2Y, type = "y")
SVN2Y<-to.daily(SVN2Y)
ZAF2Y<-data.frame(read.csv(file.path(Investing.com,"ZAF2Y.csv")))
Investing.csv.xts(ZAF2Y, type = "y")
ZAF2Y<-to.daily(ZAF2Y)
KOR2Y<-data.frame(read.csv(file.path(Investing.com,"KOR2Y.csv")))
Investing.csv.xts(KOR2Y, type = "y")
KOR2Y<-to.daily(KOR2Y)
ESP2Y<-data.frame(read.csv(file.path(Investing.com,"ESP2Y.csv")))
Investing.csv.xts(ESP2Y, type = "y")
ESP2Y<-to.daily(ESP2Y)
LKA2Y<-data.frame(read.csv(file.path(Investing.com,"LKA2Y.csv")))
Investing.csv.xts(LKA2Y, type = "y")
LKA2Y<-to.daily(LKA2Y)
SWE2Y<-data.frame(read.csv(file.path(Investing.com,"SWE2Y.csv")))
Investing.csv.xts(SWE2Y, type = "y")
SWE2Y<-to.daily(SWE2Y)
CHE2Y<-data.frame(read.csv(file.path(Investing.com,"CHE2Y.csv")))
Investing.csv.xts(CHE2Y, type = "y")
CHE2Y<-to.daily(CHE2Y)
TWN2Y<-data.frame(read.csv(file.path(Investing.com,"TWN2Y.csv")))
Investing.csv.xts(TWN2Y, type = "y")
TWN2Y<-to.daily(TWN2Y)
THA2Y<-data.frame(read.csv(file.path(Investing.com,"THA2Y.csv")))
Investing.csv.xts(THA2Y, type = "y")
THA2Y<-to.daily(THA2Y)
TUR2Y<-data.frame(read.csv(file.path(Investing.com,"TUR2Y.csv")))
Investing.csv.xts(TUR2Y, type = "y")
TUR2Y<-to.daily(TUR2Y)
UGA2Y<-data.frame(read.csv(file.path(Investing.com,"UGA2Y.csv")))
Investing.csv.xts(UGA2Y, type = "y")
UGA2Y<-to.daily(UGA2Y)
UKR2Y<-data.frame(read.csv(file.path(Investing.com,"UKR2Y.csv")))
Investing.csv.xts(UKR2Y, type = "y")
UKR2Y<-to.daily(UKR2Y)
GBR2Y<-data.frame(read.csv(file.path(Investing.com,"GBR2Y.csv")))
Investing.csv.xts(GBR2Y, type = "y")
GBR2Y<-to.daily(GBR2Y)
USA2Y<-data.frame(read.csv(file.path(Investing.com,"USA2Y.csv")))
Investing.csv.xts(USA2Y, type = "y")
USA2Y<-to.daily(USA2Y)
VEN2Y<-data.frame(read.csv(file.path(Investing.com,"VEN2Y.csv")))
Investing.csv.xts(VEN2Y, type = "y")
VEN2Y<-to.daily(VEN2Y)
VNM2Y<-data.frame(read.csv(file.path(Investing.com,"VNM2Y.csv")))
Investing.csv.xts(VNM2Y, type = "y")
VNM2Y<-to.daily(VNM2Y)
AUS3Y<-data.frame(read.csv(file.path(Investing.com,"AUS3Y.csv")))
Investing.csv.xts(AUS3Y, type = "y")
AUS3Y<-to.daily(AUS3Y)
AUT3Y<-data.frame(read.csv(file.path(Investing.com,"AUT3Y.csv")))
Investing.csv.xts(AUT3Y, type = "y")
AUT3Y<-to.daily(AUT3Y)
BEL3Y<-data.frame(read.csv(file.path(Investing.com,"BEL3Y.csv")))
Investing.csv.xts(BEL3Y, type = "y")
BEL3Y<-to.daily(BEL3Y)
BWA3Y<-data.frame(read.csv(file.path(Investing.com,"BWA3Y.csv")))
Investing.csv.xts(BWA3Y, type = "y")
BWA3Y<-to.daily(BWA3Y)
BRA3Y<-data.frame(read.csv(file.path(Investing.com,"BRA3Y.csv")))
Investing.csv.xts(BRA3Y, type = "y")
BRA3Y<-to.daily(BRA3Y)
BGR3Y<-data.frame(read.csv(file.path(Investing.com,"BGR3Y.csv")))
Investing.csv.xts(BGR3Y, type = "y")
BGR3Y<-to.daily(BGR3Y)
CAN3Y<-data.frame(read.csv(file.path(Investing.com,"CAN3Y.csv")))
Investing.csv.xts(CAN3Y, type = "y")
CAN3Y<-to.daily(CAN3Y)
CHL3Y<-data.frame(read.csv(file.path(Investing.com,"CHL3Y.csv")))
Investing.csv.xts(CHL3Y, type = "y")
CHL3Y<-to.daily(CHL3Y)
CHN3Y<-data.frame(read.csv(file.path(Investing.com,"CHN3Y.csv")))
Investing.csv.xts(CHN3Y, type = "y")
CHN3Y<-to.daily(CHN3Y)
HRV3Y<-data.frame(read.csv(file.path(Investing.com,"HRV3Y.csv")))
Investing.csv.xts(HRV3Y, type = "y")
HRV3Y<-to.daily(HRV3Y)
CZE3Y<-data.frame(read.csv(file.path(Investing.com,"CZE3Y.csv")))
Investing.csv.xts(CZE3Y, type = "y")
CZE3Y<-to.daily(CZE3Y)
DNK3Y<-data.frame(read.csv(file.path(Investing.com,"DNK3Y.csv")))
Investing.csv.xts(DNK3Y, type = "y")
DNK3Y<-to.daily(DNK3Y)
EGY3Y<-data.frame(read.csv(file.path(Investing.com,"EGY3Y.csv")))
Investing.csv.xts(EGY3Y, type = "y")
EGY3Y<-to.daily(EGY3Y)
FIN3Y<-data.frame(read.csv(file.path(Investing.com,"FIN3Y.csv")))
Investing.csv.xts(FIN3Y, type = "y")
FIN3Y<-to.daily(FIN3Y)
FRA3Y<-data.frame(read.csv(file.path(Investing.com,"FRA3Y.csv")))
Investing.csv.xts(FRA3Y, type = "y")
FRA3Y<-to.daily(FRA3Y)
DEU3Y<-data.frame(read.csv(file.path(Investing.com,"DEU3Y.csv")))
Investing.csv.xts(DEU3Y, type = "y")
DEU3Y<-to.daily(DEU3Y)
HKG3Y<-data.frame(read.csv(file.path(Investing.com,"HKG3Y.csv")))
Investing.csv.xts(HKG3Y, type = "y")
HKG3Y<-to.daily(HKG3Y)
HUN3Y<-data.frame(read.csv(file.path(Investing.com,"HUN3Y.csv")))
Investing.csv.xts(HUN3Y, type = "y")
HUN3Y<-to.daily(HUN3Y)
IND3Y<-data.frame(read.csv(file.path(Investing.com,"IND3Y.csv")))
Investing.csv.xts(IND3Y, type = "y")
IND3Y<-to.daily(IND3Y)
IDN3Y<-data.frame(read.csv(file.path(Investing.com,"IDN3Y.csv")))
Investing.csv.xts(IDN3Y, type = "y")
IDN3Y<-to.daily(IDN3Y)
ISR3Y<-data.frame(read.csv(file.path(Investing.com,"ISR3Y.csv")))
Investing.csv.xts(ISR3Y, type = "y")
ISR3Y<-to.daily(ISR3Y)
ITA3Y<-data.frame(read.csv(file.path(Investing.com,"ITA3Y.csv")))
Investing.csv.xts(ITA3Y, type = "y")
ITA3Y<-to.daily(ITA3Y)
JPN3Y<-data.frame(read.csv(file.path(Investing.com,"JPN3Y.csv")))
Investing.csv.xts(JPN3Y, type = "y")
JPN3Y<-to.daily(JPN3Y)
JOR3Y<-data.frame(read.csv(file.path(Investing.com,"JOR3Y.csv")))
Investing.csv.xts(JOR3Y, type = "y")
JOR3Y<-to.daily(JOR3Y)
KEN3Y<-data.frame(read.csv(file.path(Investing.com,"KEN3Y.csv")))
Investing.csv.xts(KEN3Y, type = "y")
KEN3Y<-to.daily(KEN3Y)
LVA3Y<-data.frame(read.csv(file.path(Investing.com,"LVA3Y.csv")))
Investing.csv.xts(LVA3Y, type = "y")
LVA3Y<-to.daily(LVA3Y)
LTU3Y<-data.frame(read.csv(file.path(Investing.com,"LTU3Y.csv")))
Investing.csv.xts(LTU3Y, type = "y")
LTU3Y<-to.daily(LTU3Y)
MYS3Y<-data.frame(read.csv(file.path(Investing.com,"MYS3Y.csv")))
Investing.csv.xts(MYS3Y, type = "y")
MYS3Y<-to.daily(MYS3Y)
MLT3Y<-data.frame(read.csv(file.path(Investing.com,"MLT3Y.csv")))
Investing.csv.xts(MLT3Y, type = "y")
MLT3Y<-to.daily(MLT3Y)
MUS3Y<-data.frame(read.csv(file.path(Investing.com,"MUS3Y.csv")))
Investing.csv.xts(MUS3Y, type = "y")
MUS3Y<-to.daily(MUS3Y)
MEX3Y<-data.frame(read.csv(file.path(Investing.com,"MEX3Y.csv")))
Investing.csv.xts(MEX3Y, type = "y")
MEX3Y<-to.daily(MEX3Y)
NAM3Y<-data.frame(read.csv(file.path(Investing.com,"NAM3Y.csv")))
Investing.csv.xts(NAM3Y, type = "y")
NAM3Y<-to.daily(NAM3Y)
NLD3Y<-data.frame(read.csv(file.path(Investing.com,"NLD3Y.csv")))
Investing.csv.xts(NLD3Y, type = "y")
NLD3Y<-to.daily(NLD3Y)
NOR3Y<-data.frame(read.csv(file.path(Investing.com,"NOR3Y.csv")))
Investing.csv.xts(NOR3Y, type = "y")
NOR3Y<-to.daily(NOR3Y)
PAK3Y<-data.frame(read.csv(file.path(Investing.com,"PAK3Y.csv")))
Investing.csv.xts(PAK3Y, type = "y")
PAK3Y<-to.daily(PAK3Y)
PHL3Y<-data.frame(read.csv(file.path(Investing.com,"PHL3Y.csv")))
Investing.csv.xts(PHL3Y, type = "y")
PHL3Y<-to.daily(PHL3Y)
POL3Y<-data.frame(read.csv(file.path(Investing.com,"POL3Y.csv")))
Investing.csv.xts(POL3Y, type = "y")
POL3Y<-to.daily(POL3Y)
PRT3Y<-data.frame(read.csv(file.path(Investing.com,"PRT3Y.csv")))
Investing.csv.xts(PRT3Y, type = "y")
PRT3Y<-to.daily(PRT3Y)
ROU3Y<-data.frame(read.csv(file.path(Investing.com,"ROU3Y.csv")))
Investing.csv.xts(ROU3Y, type = "y")
ROU3Y<-to.daily(ROU3Y)
RUS3Y<-data.frame(read.csv(file.path(Investing.com,"RUS3Y.csv")))
Investing.csv.xts(RUS3Y, type = "y")
RUS3Y<-to.daily(RUS3Y)
SVK3Y<-data.frame(read.csv(file.path(Investing.com,"SVK3Y.csv")))
Investing.csv.xts(SVK3Y, type = "y")
SVK3Y<-to.daily(SVK3Y)
ZAF3Y<-data.frame(read.csv(file.path(Investing.com,"ZAF3Y.csv")))
Investing.csv.xts(ZAF3Y, type = "y")
ZAF3Y<-to.daily(ZAF3Y)
KOR3Y<-data.frame(read.csv(file.path(Investing.com,"KOR3Y.csv")))
Investing.csv.xts(KOR3Y, type = "y")
KOR3Y<-to.daily(KOR3Y)
ESP3Y<-data.frame(read.csv(file.path(Investing.com,"ESP3Y.csv")))
Investing.csv.xts(ESP3Y, type = "y")
ESP3Y<-to.daily(ESP3Y)
LKA3Y<-data.frame(read.csv(file.path(Investing.com,"LKA3Y.csv")))
Investing.csv.xts(LKA3Y, type = "y")
LKA3Y<-to.daily(LKA3Y)
CHE3Y<-data.frame(read.csv(file.path(Investing.com,"CHE3Y.csv")))
Investing.csv.xts(CHE3Y, type = "y")
CHE3Y<-to.daily(CHE3Y)
THA3Y<-data.frame(read.csv(file.path(Investing.com,"THA3Y.csv")))
Investing.csv.xts(THA3Y, type = "y")
THA3Y<-to.daily(THA3Y)
TUR3Y<-data.frame(read.csv(file.path(Investing.com,"TUR3Y.csv")))
Investing.csv.xts(TUR3Y, type = "y")
TUR3Y<-to.daily(TUR3Y)
UGA3Y<-data.frame(read.csv(file.path(Investing.com,"UGA3Y.csv")))
Investing.csv.xts(UGA3Y, type = "y")
UGA3Y<-to.daily(UGA3Y)
UKR3Y<-data.frame(read.csv(file.path(Investing.com,"UKR3Y.csv")))
Investing.csv.xts(UKR3Y, type = "y")
UKR3Y<-to.daily(UKR3Y)
GBR3Y<-data.frame(read.csv(file.path(Investing.com,"GBR3Y.csv")))
Investing.csv.xts(GBR3Y, type = "y")
GBR3Y<-to.daily(GBR3Y)
USA3Y<-data.frame(read.csv(file.path(Investing.com,"USA3Y.csv")))
Investing.csv.xts(USA3Y, type = "y")
USA3Y<-to.daily(USA3Y)
VNM3Y<-data.frame(read.csv(file.path(Investing.com,"VNM3Y.csv")))
Investing.csv.xts(VNM3Y, type = "y")
VNM3Y<-to.daily(VNM3Y)
ARG4Y<-data.frame(read.csv(file.path(Investing.com,"ARG4Y.csv")))
Investing.csv.xts(ARG4Y, type = "y")
ARG4Y<-to.daily(ARG4Y)
AUS4Y<-data.frame(read.csv(file.path(Investing.com,"AUS4Y.csv")))
Investing.csv.xts(AUS4Y, type = "y")
AUS4Y<-to.daily(AUS4Y)
AUT4Y<-data.frame(read.csv(file.path(Investing.com,"AUT4Y.csv")))
Investing.csv.xts(AUT4Y, type = "y")
AUT4Y<-to.daily(AUT4Y)
BEL4Y<-data.frame(read.csv(file.path(Investing.com,"BEL4Y.csv")))
Investing.csv.xts(BEL4Y, type = "y")
BEL4Y<-to.daily(BEL4Y)
CAN4Y<-data.frame(read.csv(file.path(Investing.com,"CAN4Y.csv")))
Investing.csv.xts(CAN4Y, type = "y")
CAN4Y<-to.daily(CAN4Y)
CHL4Y<-data.frame(read.csv(file.path(Investing.com,"CHL4Y.csv")))
Investing.csv.xts(CHL4Y, type = "y")
CHL4Y<-to.daily(CHL4Y)
COL4Y<-data.frame(read.csv(file.path(Investing.com,"COL4Y.csv")))
Investing.csv.xts(COL4Y, type = "y")
COL4Y<-to.daily(COL4Y)
CZE4Y<-data.frame(read.csv(file.path(Investing.com,"CZE4Y.csv")))
Investing.csv.xts(CZE4Y, type = "y")
CZE4Y<-to.daily(CZE4Y)
FIN4Y<-data.frame(read.csv(file.path(Investing.com,"FIN4Y.csv")))
Investing.csv.xts(FIN4Y, type = "y")
FIN4Y<-to.daily(FIN4Y)
FRA4Y<-data.frame(read.csv(file.path(Investing.com,"FRA4Y.csv")))
Investing.csv.xts(FRA4Y, type = "y")
FRA4Y<-to.daily(FRA4Y)
DEU4Y<-data.frame(read.csv(file.path(Investing.com,"DEU4Y.csv")))
Investing.csv.xts(DEU4Y, type = "y")
DEU4Y<-to.daily(DEU4Y)
IND4Y<-data.frame(read.csv(file.path(Investing.com,"IND4Y.csv")))
Investing.csv.xts(IND4Y, type = "y")
IND4Y<-to.daily(IND4Y)
IRL5Y<-data.frame(read.csv(file.path(Investing.com,"IRL5Y.csv")))
Investing.csv.xts(IRL5Y, type = "y")
IRL5Y<-to.daily(IRL5Y)
IRL4Y<-data.frame(read.csv(file.path(Investing.com,"IRL4Y.csv")))
Investing.csv.xts(IRL4Y, type = "y")
IRL4Y<-to.daily(IRL4Y)
ITA4Y<-data.frame(read.csv(file.path(Investing.com,"ITA4Y.csv")))
Investing.csv.xts(ITA4Y, type = "y")
ITA4Y<-to.daily(ITA4Y)
JPN4Y<-data.frame(read.csv(file.path(Investing.com,"JPN4Y.csv")))
Investing.csv.xts(JPN4Y, type = "y")
JPN4Y<-to.daily(JPN4Y)
KEN4Y<-data.frame(read.csv(file.path(Investing.com,"KEN4Y.csv")))
Investing.csv.xts(KEN4Y, type = "y")
KEN4Y<-to.daily(KEN4Y)
MUS4Y<-data.frame(read.csv(file.path(Investing.com,"MUS4Y.csv")))
Investing.csv.xts(MUS4Y, type = "y")
MUS4Y<-to.daily(MUS4Y)
NLD4Y<-data.frame(read.csv(file.path(Investing.com,"NLD4Y.csv")))
Investing.csv.xts(NLD4Y, type = "y")
NLD4Y<-to.daily(NLD4Y)
NGA4Y<-data.frame(read.csv(file.path(Investing.com,"NGA4Y.csv")))
Investing.csv.xts(NGA4Y, type = "y")
NGA4Y<-to.daily(NGA4Y)
PHL4Y<-data.frame(read.csv(file.path(Investing.com,"PHL4Y.csv")))
Investing.csv.xts(PHL4Y, type = "y")
PHL4Y<-to.daily(PHL4Y)
POL4Y<-data.frame(read.csv(file.path(Investing.com,"POL4Y.csv")))
Investing.csv.xts(POL4Y, type = "y")
POL4Y<-to.daily(POL4Y)
PRT4Y<-data.frame(read.csv(file.path(Investing.com,"PRT4Y.csv")))
Investing.csv.xts(PRT4Y, type = "y")
PRT4Y<-to.daily(PRT4Y)
ROU4Y<-data.frame(read.csv(file.path(Investing.com,"ROU4Y.csv")))
Investing.csv.xts(ROU4Y, type = "y")
ROU4Y<-to.daily(ROU4Y)
SRB4Y<-data.frame(read.csv(file.path(Investing.com,"SRB4Y.csv")))
Investing.csv.xts(SRB4Y, type = "y")
SRB4Y<-to.daily(SRB4Y)
KOR4Y<-data.frame(read.csv(file.path(Investing.com,"KOR4Y.csv")))
Investing.csv.xts(KOR4Y, type = "y")
KOR4Y<-to.daily(KOR4Y)
ESP4Y<-data.frame(read.csv(file.path(Investing.com,"ESP4Y.csv")))
Investing.csv.xts(ESP4Y, type = "y")
ESP4Y<-to.daily(ESP4Y)
LKA4Y<-data.frame(read.csv(file.path(Investing.com,"LKA4Y.csv")))
Investing.csv.xts(LKA4Y, type = "y")
LKA4Y<-to.daily(LKA4Y)
CHE4Y<-data.frame(read.csv(file.path(Investing.com,"CHE4Y.csv")))
Investing.csv.xts(CHE4Y, type = "y")
CHE4Y<-to.daily(CHE4Y)
UGA4Y<-data.frame(read.csv(file.path(Investing.com,"UGA4Y.csv")))
Investing.csv.xts(UGA4Y, type = "y")
UGA4Y<-to.daily(UGA4Y)
GBR4Y<-data.frame(read.csv(file.path(Investing.com,"GBR4Y.csv")))
Investing.csv.xts(GBR4Y, type = "y")
GBR4Y<-to.daily(GBR4Y)
AUS5Y<-data.frame(read.csv(file.path(Investing.com,"AUS5Y.csv")))
Investing.csv.xts(AUS5Y, type = "y")
AUS5Y<-to.daily(AUS5Y)
AUT5Y<-data.frame(read.csv(file.path(Investing.com,"AUT5Y.csv")))
Investing.csv.xts(AUT5Y, type = "y")
AUT5Y<-to.daily(AUT5Y)
BHR5Y<-data.frame(read.csv(file.path(Investing.com,"BHR5Y.csv")))
Investing.csv.xts(BHR5Y, type = "y")
BHR5Y<-to.daily(BHR5Y)
BGD5Y<-data.frame(read.csv(file.path(Investing.com,"BGD5Y.csv")))
Investing.csv.xts(BGD5Y, type = "y")
BGD5Y<-to.daily(BGD5Y)
BEL5Y<-data.frame(read.csv(file.path(Investing.com,"BEL5Y.csv")))
Investing.csv.xts(BEL5Y, type = "y")
BEL5Y<-to.daily(BEL5Y)
BWA5Y<-data.frame(read.csv(file.path(Investing.com,"BWA5Y.csv")))
Investing.csv.xts(BWA5Y, type = "y")
BWA5Y<-to.daily(BWA5Y)
BRA5Y<-data.frame(read.csv(file.path(Investing.com,"BRA5Y.csv")))
Investing.csv.xts(BRA5Y, type = "y")
BRA5Y<-to.daily(BRA5Y)
BGR5Y<-data.frame(read.csv(file.path(Investing.com,"BGR5Y.csv")))
Investing.csv.xts(BGR5Y, type = "y")
BGR5Y<-to.daily(BGR5Y)
CAN5Y<-data.frame(read.csv(file.path(Investing.com,"CAN5Y.csv")))
Investing.csv.xts(CAN5Y, type = "y")
CAN5Y<-to.daily(CAN5Y)
CHL5Y<-data.frame(read.csv(file.path(Investing.com,"CHL5Y.csv")))
Investing.csv.xts(CHL5Y, type = "y")
CHL5Y<-to.daily(CHL5Y)
CHN5Y<-data.frame(read.csv(file.path(Investing.com,"CHN5Y.csv")))
Investing.csv.xts(CHN5Y, type = "y")
CHN5Y<-to.daily(CHN5Y)
COL5Y<-data.frame(read.csv(file.path(Investing.com,"COL5Y.csv")))
Investing.csv.xts(COL5Y, type = "y")
COL5Y<-to.daily(COL5Y)
HRV5Y<-data.frame(read.csv(file.path(Investing.com,"HRV5Y.csv")))
Investing.csv.xts(HRV5Y, type = "y")
HRV5Y<-to.daily(HRV5Y)
CZE5Y<-data.frame(read.csv(file.path(Investing.com,"CZE5Y.csv")))
Investing.csv.xts(CZE5Y, type = "y")
CZE5Y<-to.daily(CZE5Y)
DNK5Y<-data.frame(read.csv(file.path(Investing.com,"DNK5Y.csv")))
Investing.csv.xts(DNK5Y, type = "y")
DNK5Y<-to.daily(DNK5Y)
EGY5Y<-data.frame(read.csv(file.path(Investing.com,"EGY5Y.csv")))
Investing.csv.xts(EGY5Y, type = "y")
EGY5Y<-to.daily(EGY5Y)
FIN5Y<-data.frame(read.csv(file.path(Investing.com,"FIN5Y.csv")))
Investing.csv.xts(FIN5Y, type = "y")
FIN5Y<-to.daily(FIN5Y)
FRA5Y<-data.frame(read.csv(file.path(Investing.com,"FRA5Y.csv")))
Investing.csv.xts(FRA5Y, type = "y")
FRA5Y<-to.daily(FRA5Y)
DEU5Y<-data.frame(read.csv(file.path(Investing.com,"DEU5Y.csv")))
Investing.csv.xts(DEU5Y, type = "y")
DEU5Y<-to.daily(DEU5Y)
GRC5Y<-data.frame(read.csv(file.path(Investing.com,"GRC5Y.csv")))
Investing.csv.xts(GRC5Y, type = "y")
GRC5Y<-to.daily(GRC5Y)
HKG5Y<-data.frame(read.csv(file.path(Investing.com,"HKG5Y.csv")))
Investing.csv.xts(HKG5Y, type = "y")
HKG5Y<-to.daily(HKG5Y)
HUN5Y<-data.frame(read.csv(file.path(Investing.com,"HUN5Y.csv")))
Investing.csv.xts(HUN5Y, type = "y")
HUN5Y<-to.daily(HUN5Y)
ISL5Y<-data.frame(read.csv(file.path(Investing.com,"ISL5Y.csv")))
Investing.csv.xts(ISL5Y, type = "y")
ISL5Y<-to.daily(ISL5Y)
IND5Y<-data.frame(read.csv(file.path(Investing.com,"IND5Y.csv")))
Investing.csv.xts(IND5Y, type = "y")
IND5Y<-to.daily(IND5Y)
IDN5Y<-data.frame(read.csv(file.path(Investing.com,"IDN5Y.csv")))
Investing.csv.xts(IDN5Y, type = "y")
IDN5Y<-to.daily(IDN5Y)
ISR5Y<-data.frame(read.csv(file.path(Investing.com,"ISR5Y.csv")))
Investing.csv.xts(ISR5Y, type = "y")
ISR5Y<-to.daily(ISR5Y)
ITA5Y<-data.frame(read.csv(file.path(Investing.com,"ITA5Y.csv")))
Investing.csv.xts(ITA5Y, type = "y")
ITA5Y<-to.daily(ITA5Y)
JPN5Y<-data.frame(read.csv(file.path(Investing.com,"JPN5Y.csv")))
Investing.csv.xts(JPN5Y, type = "y")
JPN5Y<-to.daily(JPN5Y)
JOR5Y<-data.frame(read.csv(file.path(Investing.com,"JOR5Y.csv")))
Investing.csv.xts(JOR5Y, type = "y")
JOR5Y<-to.daily(JOR5Y)
KEN5Y<-data.frame(read.csv(file.path(Investing.com,"KEN5Y.csv")))
Investing.csv.xts(KEN5Y, type = "y")
KEN5Y<-to.daily(KEN5Y)
LVA5Y<-data.frame(read.csv(file.path(Investing.com,"LVA5Y.csv")))
Investing.csv.xts(LVA5Y, type = "y")
LVA5Y<-to.daily(LVA5Y)
LTU5Y<-data.frame(read.csv(file.path(Investing.com,"LTU5Y.csv")))
Investing.csv.xts(LTU5Y, type = "y")
LTU5Y<-to.daily(LTU5Y)
MYS5Y<-data.frame(read.csv(file.path(Investing.com,"MYS5Y.csv")))
Investing.csv.xts(MYS5Y, type = "y")
MYS5Y<-to.daily(MYS5Y)
MLT5Y<-data.frame(read.csv(file.path(Investing.com,"MLT5Y.csv")))
Investing.csv.xts(MLT5Y, type = "y")
MLT5Y<-to.daily(MLT5Y)
MUS5Y<-data.frame(read.csv(file.path(Investing.com,"MUS5Y.csv")))
Investing.csv.xts(MUS5Y, type = "y")
MUS5Y<-to.daily(MUS5Y)
MEX5Y<-data.frame(read.csv(file.path(Investing.com,"MEX5Y.csv")))
Investing.csv.xts(MEX5Y, type = "y")
MEX5Y<-to.daily(MEX5Y)
MAR5Y<-data.frame(read.csv(file.path(Investing.com,"MAR5Y.csv")))
Investing.csv.xts(MAR5Y, type = "y")
MAR5Y<-to.daily(MAR5Y)
NLD5Y<-data.frame(read.csv(file.path(Investing.com,"NLD5Y.csv")))
Investing.csv.xts(NLD5Y, type = "y")
NLD5Y<-to.daily(NLD5Y)
NZL5Y<-data.frame(read.csv(file.path(Investing.com,"NZL5Y.csv")))
Investing.csv.xts(NZL5Y, type = "y")
NZL5Y<-to.daily(NZL5Y)
NGA5Y<-data.frame(read.csv(file.path(Investing.com,"NGA5Y.csv")))
Investing.csv.xts(NGA5Y, type = "y")
NGA5Y<-to.daily(NGA5Y)
NOR5Y<-data.frame(read.csv(file.path(Investing.com,"NOR5Y.csv")))
Investing.csv.xts(NOR5Y, type = "y")
NOR5Y<-to.daily(NOR5Y)
PAK5Y<-data.frame(read.csv(file.path(Investing.com,"PAK5Y.csv")))
Investing.csv.xts(PAK5Y, type = "y")
PAK5Y<-to.daily(PAK5Y)
PER5Y<-data.frame(read.csv(file.path(Investing.com,"PER5Y.csv")))
Investing.csv.xts(PER5Y, type = "y")
PER5Y<-to.daily(PER5Y)
PHL5Y<-data.frame(read.csv(file.path(Investing.com,"PHL5Y.csv")))
Investing.csv.xts(PHL5Y, type = "y")
PHL5Y<-to.daily(PHL5Y)
POL5Y<-data.frame(read.csv(file.path(Investing.com,"POL5Y.csv")))
Investing.csv.xts(POL5Y, type = "y")
POL5Y<-to.daily(POL5Y)
PRT5Y<-data.frame(read.csv(file.path(Investing.com,"PRT5Y.csv")))
Investing.csv.xts(PRT5Y, type = "y")
PRT5Y<-to.daily(PRT5Y)
QAT5Y<-data.frame(read.csv(file.path(Investing.com,"QAT5Y.csv")))
Investing.csv.xts(QAT5Y, type = "y")
QAT5Y<-to.daily(QAT5Y)
ROU5Y<-data.frame(read.csv(file.path(Investing.com,"ROU5Y.csv")))
Investing.csv.xts(ROU5Y, type = "y")
ROU5Y<-to.daily(ROU5Y)
RUS5Y<-data.frame(read.csv(file.path(Investing.com,"RUS5Y.csv")))
Investing.csv.xts(RUS5Y, type = "y")
RUS5Y<-to.daily(RUS5Y)
SRB5Y<-data.frame(read.csv(file.path(Investing.com,"SRB5Y.csv")))
Investing.csv.xts(SRB5Y, type = "y")
SRB5Y<-to.daily(SRB5Y)
SGP5Y<-data.frame(read.csv(file.path(Investing.com,"SGP5Y.csv")))
Investing.csv.xts(SGP5Y, type = "y")
SGP5Y<-to.daily(SGP5Y)
SVK5Y<-data.frame(read.csv(file.path(Investing.com,"SVK5Y.csv")))
Investing.csv.xts(SVK5Y, type = "y")
SVK5Y<-to.daily(SVK5Y)
SVN5Y<-data.frame(read.csv(file.path(Investing.com,"SVN5Y.csv")))
Investing.csv.xts(SVN5Y, type = "y")
SVN5Y<-to.daily(SVN5Y)
ZAF5Y<-data.frame(read.csv(file.path(Investing.com,"ZAF5Y.csv")))
Investing.csv.xts(ZAF5Y, type = "y")
ZAF5Y<-to.daily(ZAF5Y)
KOR5Y<-data.frame(read.csv(file.path(Investing.com,"KOR5Y.csv")))
Investing.csv.xts(KOR5Y, type = "y")
KOR5Y<-to.daily(KOR5Y)
ESP5Y<-data.frame(read.csv(file.path(Investing.com,"ESP5Y.csv")))
Investing.csv.xts(ESP5Y, type = "y")
ESP5Y<-to.daily(ESP5Y)
LKA5Y<-data.frame(read.csv(file.path(Investing.com,"LKA5Y.csv")))
Investing.csv.xts(LKA5Y, type = "y")
LKA5Y<-to.daily(LKA5Y)
SWE5Y<-data.frame(read.csv(file.path(Investing.com,"SWE5Y.csv")))
Investing.csv.xts(SWE5Y, type = "y")
SWE5Y<-to.daily(SWE5Y)
CHE5Y<-data.frame(read.csv(file.path(Investing.com,"CHE5Y.csv")))
Investing.csv.xts(CHE5Y, type = "y")
CHE5Y<-to.daily(CHE5Y)
TWN5Y<-data.frame(read.csv(file.path(Investing.com,"TWN5Y.csv")))
Investing.csv.xts(TWN5Y, type = "y")
TWN5Y<-to.daily(TWN5Y)
THA5Y<-data.frame(read.csv(file.path(Investing.com,"THA5Y.csv")))
Investing.csv.xts(THA5Y, type = "y")
THA5Y<-to.daily(THA5Y)
TUR5Y<-data.frame(read.csv(file.path(Investing.com,"TUR5Y.csv")))
Investing.csv.xts(TUR5Y, type = "y")
TUR5Y<-to.daily(TUR5Y)
UGA5Y<-data.frame(read.csv(file.path(Investing.com,"UGA5Y.csv")))
Investing.csv.xts(UGA5Y, type = "y")
UGA5Y<-to.daily(UGA5Y)
GBR5Y<-data.frame(read.csv(file.path(Investing.com,"GBR5Y.csv")))
Investing.csv.xts(GBR5Y, type = "y")
GBR5Y<-to.daily(GBR5Y)
USA5Y<-data.frame(read.csv(file.path(Investing.com,"USA5Y.csv")))
Investing.csv.xts(USA5Y, type = "y")
USA5Y<-to.daily(USA5Y)
VEN5Y<-data.frame(read.csv(file.path(Investing.com,"VEN5Y.csv")))
Investing.csv.xts(VEN5Y, type = "y")
VEN5Y<-to.daily(VEN5Y)
VNM5Y<-data.frame(read.csv(file.path(Investing.com,"VNM5Y.csv")))
Investing.csv.xts(VNM5Y, type = "y")
VNM5Y<-to.daily(VNM5Y)
ARG6Y<-data.frame(read.csv(file.path(Investing.com,"ARG6Y.csv")))
Investing.csv.xts(ARG6Y, type = "y")
ARG6Y<-to.daily(ARG6Y)
AUS6Y<-data.frame(read.csv(file.path(Investing.com,"AUS6Y.csv")))
Investing.csv.xts(AUS6Y, type = "y")
AUS6Y<-to.daily(AUS6Y)
AUT6Y<-data.frame(read.csv(file.path(Investing.com,"AUT6Y.csv")))
Investing.csv.xts(AUT6Y, type = "y")
AUT6Y<-to.daily(AUT6Y)
BEL6Y<-data.frame(read.csv(file.path(Investing.com,"BEL6Y.csv")))
Investing.csv.xts(BEL6Y, type = "y")
BEL6Y<-to.daily(BEL6Y)
CZE6Y<-data.frame(read.csv(file.path(Investing.com,"CZE6Y.csv")))
Investing.csv.xts(CZE6Y, type = "y")
CZE6Y<-to.daily(CZE6Y)
FIN6Y<-data.frame(read.csv(file.path(Investing.com,"FIN6Y.csv")))
Investing.csv.xts(FIN6Y, type = "y")
FIN6Y<-to.daily(FIN6Y)
FRA6Y<-data.frame(read.csv(file.path(Investing.com,"FRA6Y.csv")))
Investing.csv.xts(FRA6Y, type = "y")
FRA6Y<-to.daily(FRA6Y)
DEU6Y<-data.frame(read.csv(file.path(Investing.com,"DEU6Y.csv")))
Investing.csv.xts(DEU6Y, type = "y")
DEU6Y<-to.daily(DEU6Y)
IND6Y<-data.frame(read.csv(file.path(Investing.com,"IND6Y.csv")))
Investing.csv.xts(IND6Y, type = "y")
IND6Y<-to.daily(IND6Y)
IRL7Y<-data.frame(read.csv(file.path(Investing.com,"IRL7Y.csv")))
Investing.csv.xts(IRL7Y, type = "y")
IRL7Y<-to.daily(IRL7Y)
IRL6Y<-data.frame(read.csv(file.path(Investing.com,"IRL6Y.csv")))
Investing.csv.xts(IRL6Y, type = "y")
IRL6Y<-to.daily(IRL6Y)
ITA6Y<-data.frame(read.csv(file.path(Investing.com,"ITA6Y.csv")))
Investing.csv.xts(ITA6Y, type = "y")
ITA6Y<-to.daily(ITA6Y)
JPN6Y<-data.frame(read.csv(file.path(Investing.com,"JPN6Y.csv")))
Investing.csv.xts(JPN6Y, type = "y")
JPN6Y<-to.daily(JPN6Y)
KEN6Y<-data.frame(read.csv(file.path(Investing.com,"KEN6Y.csv")))
Investing.csv.xts(KEN6Y, type = "y")
KEN6Y<-to.daily(KEN6Y)
NLD6Y<-data.frame(read.csv(file.path(Investing.com,"NLD6Y.csv")))
Investing.csv.xts(NLD6Y, type = "y")
NLD6Y<-to.daily(NLD6Y)
POL6Y<-data.frame(read.csv(file.path(Investing.com,"POL6Y.csv")))
Investing.csv.xts(POL6Y, type = "y")
POL6Y<-to.daily(POL6Y)
PRT6Y<-data.frame(read.csv(file.path(Investing.com,"PRT6Y.csv")))
Investing.csv.xts(PRT6Y, type = "y")
PRT6Y<-to.daily(PRT6Y)
SRB6Y<-data.frame(read.csv(file.path(Investing.com,"SRB6Y.csv")))
Investing.csv.xts(SRB6Y, type = "y")
SRB6Y<-to.daily(SRB6Y)
SVK6Y<-data.frame(read.csv(file.path(Investing.com,"SVK6Y.csv")))
Investing.csv.xts(SVK6Y, type = "y")
SVK6Y<-to.daily(SVK6Y)
ZAF6Y<-data.frame(read.csv(file.path(Investing.com,"ZAF6Y.csv")))
Investing.csv.xts(ZAF6Y, type = "y")
ZAF6Y<-to.daily(ZAF6Y)
ESP6Y<-data.frame(read.csv(file.path(Investing.com,"ESP6Y.csv")))
Investing.csv.xts(ESP6Y, type = "y")
ESP6Y<-to.daily(ESP6Y)
LKA6Y<-data.frame(read.csv(file.path(Investing.com,"LKA6Y.csv")))
Investing.csv.xts(LKA6Y, type = "y")
LKA6Y<-to.daily(LKA6Y)
CHE6Y<-data.frame(read.csv(file.path(Investing.com,"CHE6Y.csv")))
Investing.csv.xts(CHE6Y, type = "y")
CHE6Y<-to.daily(CHE6Y)
GBR6Y<-data.frame(read.csv(file.path(Investing.com,"GBR6Y.csv")))
Investing.csv.xts(GBR6Y, type = "y")
GBR6Y<-to.daily(GBR6Y)
AUS7Y<-data.frame(read.csv(file.path(Investing.com,"AUS7Y.csv")))
Investing.csv.xts(AUS7Y, type = "y")
AUS7Y<-to.daily(AUS7Y)
AUT7Y<-data.frame(read.csv(file.path(Investing.com,"AUT7Y.csv")))
Investing.csv.xts(AUT7Y, type = "y")
AUT7Y<-to.daily(AUT7Y)
BEL7Y<-data.frame(read.csv(file.path(Investing.com,"BEL7Y.csv")))
Investing.csv.xts(BEL7Y, type = "y")
BEL7Y<-to.daily(BEL7Y)
BWA7Y<-data.frame(read.csv(file.path(Investing.com,"BWA7Y.csv")))
Investing.csv.xts(BWA7Y, type = "y")
BWA7Y<-to.daily(BWA7Y)
BGR7Y<-data.frame(read.csv(file.path(Investing.com,"BGR7Y.csv")))
Investing.csv.xts(BGR7Y, type = "y")
BGR7Y<-to.daily(BGR7Y)
CAN7Y<-data.frame(read.csv(file.path(Investing.com,"CAN7Y.csv")))
Investing.csv.xts(CAN7Y, type = "y")
CAN7Y<-to.daily(CAN7Y)
CHN7Y<-data.frame(read.csv(file.path(Investing.com,"CHN7Y.csv")))
Investing.csv.xts(CHN7Y, type = "y")
CHN7Y<-to.daily(CHN7Y)
CZE7Y<-data.frame(read.csv(file.path(Investing.com,"CZE7Y.csv")))
Investing.csv.xts(CZE7Y, type = "y")
CZE7Y<-to.daily(CZE7Y)
EGY7Y<-data.frame(read.csv(file.path(Investing.com,"EGY7Y.csv")))
Investing.csv.xts(EGY7Y, type = "y")
EGY7Y<-to.daily(EGY7Y)
FRA7Y<-data.frame(read.csv(file.path(Investing.com,"FRA7Y.csv")))
Investing.csv.xts(FRA7Y, type = "y")
FRA7Y<-to.daily(FRA7Y)
DEU7Y<-data.frame(read.csv(file.path(Investing.com,"DEU7Y.csv")))
Investing.csv.xts(DEU7Y, type = "y")
DEU7Y<-to.daily(DEU7Y)
HKG7Y<-data.frame(read.csv(file.path(Investing.com,"HKG7Y.csv")))
Investing.csv.xts(HKG7Y, type = "y")
HKG7Y<-to.daily(HKG7Y)
IND7Y<-data.frame(read.csv(file.path(Investing.com,"IND7Y.csv")))
Investing.csv.xts(IND7Y, type = "y")
IND7Y<-to.daily(IND7Y)
IRL8Y<-data.frame(read.csv(file.path(Investing.com,"IRL8Y.csv")))
Investing.csv.xts(IRL8Y, type = "y")
IRL8Y<-to.daily(IRL8Y)
ITA7Y<-data.frame(read.csv(file.path(Investing.com,"ITA7Y.csv")))
Investing.csv.xts(ITA7Y, type = "y")
ITA7Y<-to.daily(ITA7Y)
JPN7Y<-data.frame(read.csv(file.path(Investing.com,"JPN7Y.csv")))
Investing.csv.xts(JPN7Y, type = "y")
JPN7Y<-to.daily(JPN7Y)
JOR7Y<-data.frame(read.csv(file.path(Investing.com,"JOR7Y.csv")))
Investing.csv.xts(JOR7Y, type = "y")
JOR7Y<-to.daily(JOR7Y)
KEN7Y<-data.frame(read.csv(file.path(Investing.com,"KEN7Y.csv")))
Investing.csv.xts(KEN7Y, type = "y")
KEN7Y<-to.daily(KEN7Y)
MYS7Y<-data.frame(read.csv(file.path(Investing.com,"MYS7Y.csv")))
Investing.csv.xts(MYS7Y, type = "y")
MYS7Y<-to.daily(MYS7Y)
MEX7Y<-data.frame(read.csv(file.path(Investing.com,"MEX7Y.csv")))
Investing.csv.xts(MEX7Y, type = "y")
MEX7Y<-to.daily(MEX7Y)
NAM7Y<-data.frame(read.csv(file.path(Investing.com,"NAM7Y.csv")))
Investing.csv.xts(NAM7Y, type = "y")
NAM7Y<-to.daily(NAM7Y)
NLD7Y<-data.frame(read.csv(file.path(Investing.com,"NLD7Y.csv")))
Investing.csv.xts(NLD7Y, type = "y")
NLD7Y<-to.daily(NLD7Y)
NZL7Y<-data.frame(read.csv(file.path(Investing.com,"NZL7Y.csv")))
Investing.csv.xts(NZL7Y, type = "y")
NZL7Y<-to.daily(NZL7Y)
NGA7Y<-data.frame(read.csv(file.path(Investing.com,"NGA7Y.csv")))
Investing.csv.xts(NGA7Y, type = "y")
NGA7Y<-to.daily(NGA7Y)
PHL7Y<-data.frame(read.csv(file.path(Investing.com,"PHL7Y.csv")))
Investing.csv.xts(PHL7Y, type = "y")
PHL7Y<-to.daily(PHL7Y)
PRT7Y<-data.frame(read.csv(file.path(Investing.com,"PRT7Y.csv")))
Investing.csv.xts(PRT7Y, type = "y")
PRT7Y<-to.daily(PRT7Y)
ROU7Y<-data.frame(read.csv(file.path(Investing.com,"ROU7Y.csv")))
Investing.csv.xts(ROU7Y, type = "y")
ROU7Y<-to.daily(ROU7Y)
RUS7Y<-data.frame(read.csv(file.path(Investing.com,"RUS7Y.csv")))
Investing.csv.xts(RUS7Y, type = "y")
RUS7Y<-to.daily(RUS7Y)
SRB7Y<-data.frame(read.csv(file.path(Investing.com,"SRB7Y.csv")))
Investing.csv.xts(SRB7Y, type = "y")
SRB7Y<-to.daily(SRB7Y)
SVK7Y<-data.frame(read.csv(file.path(Investing.com,"SVK7Y.csv")))
Investing.csv.xts(SVK7Y, type = "y")
SVK7Y<-to.daily(SVK7Y)
SVN7Y<-data.frame(read.csv(file.path(Investing.com,"SVN7Y.csv")))
Investing.csv.xts(SVN7Y, type = "y")
SVN7Y<-to.daily(SVN7Y)
ESP7Y<-data.frame(read.csv(file.path(Investing.com,"ESP7Y.csv")))
Investing.csv.xts(ESP7Y, type = "y")
ESP7Y<-to.daily(ESP7Y)
LKA7Y<-data.frame(read.csv(file.path(Investing.com,"LKA7Y.csv")))
Investing.csv.xts(LKA7Y, type = "y")
LKA7Y<-to.daily(LKA7Y)
SWE7Y<-data.frame(read.csv(file.path(Investing.com,"SWE7Y.csv")))
Investing.csv.xts(SWE7Y, type = "y")
SWE7Y<-to.daily(SWE7Y)
CHE7Y<-data.frame(read.csv(file.path(Investing.com,"CHE7Y.csv")))
Investing.csv.xts(CHE7Y, type = "y")
CHE7Y<-to.daily(CHE7Y)
THA7Y<-data.frame(read.csv(file.path(Investing.com,"THA7Y.csv")))
Investing.csv.xts(THA7Y, type = "y")
THA7Y<-to.daily(THA7Y)
GBR7Y<-data.frame(read.csv(file.path(Investing.com,"GBR7Y.csv")))
Investing.csv.xts(GBR7Y, type = "y")
GBR7Y<-to.daily(GBR7Y)
USA7Y<-data.frame(read.csv(file.path(Investing.com,"USA7Y.csv")))
Investing.csv.xts(USA7Y, type = "y")
USA7Y<-to.daily(USA7Y)
VNM7Y<-data.frame(read.csv(file.path(Investing.com,"VNM7Y.csv")))
Investing.csv.xts(VNM7Y, type = "y")
VNM7Y<-to.daily(VNM7Y)
AUS8Y<-data.frame(read.csv(file.path(Investing.com,"AUS8Y.csv")))
Investing.csv.xts(AUS8Y, type = "y")
AUS8Y<-to.daily(AUS8Y)
AUT8Y<-data.frame(read.csv(file.path(Investing.com,"AUT8Y.csv")))
Investing.csv.xts(AUT8Y, type = "y")
AUT8Y<-to.daily(AUT8Y)
BEL8Y<-data.frame(read.csv(file.path(Investing.com,"BEL8Y.csv")))
Investing.csv.xts(BEL8Y, type = "y")
BEL8Y<-to.daily(BEL8Y)
BRA8Y<-data.frame(read.csv(file.path(Investing.com,"BRA8Y.csv")))
Investing.csv.xts(BRA8Y, type = "y")
BRA8Y<-to.daily(BRA8Y)
CHL8Y<-data.frame(read.csv(file.path(Investing.com,"CHL8Y.csv")))
Investing.csv.xts(CHL8Y, type = "y")
CHL8Y<-to.daily(CHL8Y)
CZE8Y<-data.frame(read.csv(file.path(Investing.com,"CZE8Y.csv")))
Investing.csv.xts(CZE8Y, type = "y")
CZE8Y<-to.daily(CZE8Y)
DNK8Y<-data.frame(read.csv(file.path(Investing.com,"DNK8Y.csv")))
Investing.csv.xts(DNK8Y, type = "y")
DNK8Y<-to.daily(DNK8Y)
FIN8Y<-data.frame(read.csv(file.path(Investing.com,"FIN8Y.csv")))
Investing.csv.xts(FIN8Y, type = "y")
FIN8Y<-to.daily(FIN8Y)
FRA8Y<-data.frame(read.csv(file.path(Investing.com,"FRA8Y.csv")))
Investing.csv.xts(FRA8Y, type = "y")
FRA8Y<-to.daily(FRA8Y)
DEU8Y<-data.frame(read.csv(file.path(Investing.com,"DEU8Y.csv")))
Investing.csv.xts(DEU8Y, type = "y")
DEU8Y<-to.daily(DEU8Y)
IND8Y<-data.frame(read.csv(file.path(Investing.com,"IND8Y.csv")))
Investing.csv.xts(IND8Y, type = "y")
IND8Y<-to.daily(IND8Y)
ITA8Y<-data.frame(read.csv(file.path(Investing.com,"ITA8Y.csv")))
Investing.csv.xts(ITA8Y, type = "y")
ITA8Y<-to.daily(ITA8Y)
JPN8Y<-data.frame(read.csv(file.path(Investing.com,"JPN8Y.csv")))
Investing.csv.xts(JPN8Y, type = "y")
JPN8Y<-to.daily(JPN8Y)
KEN8Y<-data.frame(read.csv(file.path(Investing.com,"KEN8Y.csv")))
Investing.csv.xts(KEN8Y, type = "y")
KEN8Y<-to.daily(KEN8Y)
NLD8Y<-data.frame(read.csv(file.path(Investing.com,"NLD8Y.csv")))
Investing.csv.xts(NLD8Y, type = "y")
NLD8Y<-to.daily(NLD8Y)
POL8Y<-data.frame(read.csv(file.path(Investing.com,"POL8Y.csv")))
Investing.csv.xts(POL8Y, type = "y")
POL8Y<-to.daily(POL8Y)
PRT8Y<-data.frame(read.csv(file.path(Investing.com,"PRT8Y.csv")))
Investing.csv.xts(PRT8Y, type = "y")
PRT8Y<-to.daily(PRT8Y)
SVK8Y<-data.frame(read.csv(file.path(Investing.com,"SVK8Y.csv")))
Investing.csv.xts(SVK8Y, type = "y")
SVK8Y<-to.daily(SVK8Y)
ESP8Y<-data.frame(read.csv(file.path(Investing.com,"ESP8Y.csv")))
Investing.csv.xts(ESP8Y, type = "y")
ESP8Y<-to.daily(ESP8Y)
LKA8Y<-data.frame(read.csv(file.path(Investing.com,"LKA8Y.csv")))
Investing.csv.xts(LKA8Y, type = "y")
LKA8Y<-to.daily(LKA8Y)
CHE8Y<-data.frame(read.csv(file.path(Investing.com,"CHE8Y.csv")))
Investing.csv.xts(CHE8Y, type = "y")
CHE8Y<-to.daily(CHE8Y)
GBR8Y<-data.frame(read.csv(file.path(Investing.com,"GBR8Y.csv")))
Investing.csv.xts(GBR8Y, type = "y")
GBR8Y<-to.daily(GBR8Y)
ARG9Y<-data.frame(read.csv(file.path(Investing.com,"ARG9Y.csv")))
Investing.csv.xts(ARG9Y, type = "y")
ARG9Y<-to.daily(ARG9Y)
AUS9Y<-data.frame(read.csv(file.path(Investing.com,"AUS9Y.csv")))
Investing.csv.xts(AUS9Y, type = "y")
AUS9Y<-to.daily(AUS9Y)
AUT9Y<-data.frame(read.csv(file.path(Investing.com,"AUT9Y.csv")))
Investing.csv.xts(AUT9Y, type = "y")
AUT9Y<-to.daily(AUT9Y)
BEL9Y<-data.frame(read.csv(file.path(Investing.com,"BEL9Y.csv")))
Investing.csv.xts(BEL9Y, type = "y")
BEL9Y<-to.daily(BEL9Y)
CZE9Y<-data.frame(read.csv(file.path(Investing.com,"CZE9Y.csv")))
Investing.csv.xts(CZE9Y, type = "y")
CZE9Y<-to.daily(CZE9Y)
FRA9Y<-data.frame(read.csv(file.path(Investing.com,"FRA9Y.csv")))
Investing.csv.xts(FRA9Y, type = "y")
FRA9Y<-to.daily(FRA9Y)
DEU9Y<-data.frame(read.csv(file.path(Investing.com,"DEU9Y.csv")))
Investing.csv.xts(DEU9Y, type = "y")
DEU9Y<-to.daily(DEU9Y)
IND9Y<-data.frame(read.csv(file.path(Investing.com,"IND9Y.csv")))
Investing.csv.xts(IND9Y, type = "y")
IND9Y<-to.daily(IND9Y)
IRL10Y<-data.frame(read.csv(file.path(Investing.com,"IRL10Y.csv")))
Investing.csv.xts(IRL10Y, type = "y")
IRL10Y<-to.daily(IRL10Y)
ITA9Y<-data.frame(read.csv(file.path(Investing.com,"ITA9Y.csv")))
Investing.csv.xts(ITA9Y, type = "y")
ITA9Y<-to.daily(ITA9Y)
JPN9Y<-data.frame(read.csv(file.path(Investing.com,"JPN9Y.csv")))
Investing.csv.xts(JPN9Y, type = "y")
JPN9Y<-to.daily(JPN9Y)
KEN9Y<-data.frame(read.csv(file.path(Investing.com,"KEN9Y.csv")))
Investing.csv.xts(KEN9Y, type = "y")
KEN9Y<-to.daily(KEN9Y)
NLD9Y<-data.frame(read.csv(file.path(Investing.com,"NLD9Y.csv")))
Investing.csv.xts(NLD9Y, type = "y")
NLD9Y<-to.daily(NLD9Y)
PER9Y<-data.frame(read.csv(file.path(Investing.com,"PER9Y.csv")))
Investing.csv.xts(PER9Y, type = "y")
PER9Y<-to.daily(PER9Y)
POL9Y<-data.frame(read.csv(file.path(Investing.com,"POL9Y.csv")))
Investing.csv.xts(POL9Y, type = "y")
POL9Y<-to.daily(POL9Y)
PRT9Y<-data.frame(read.csv(file.path(Investing.com,"PRT9Y.csv")))
Investing.csv.xts(PRT9Y, type = "y")
PRT9Y<-to.daily(PRT9Y)
SVK9Y<-data.frame(read.csv(file.path(Investing.com,"SVK9Y.csv")))
Investing.csv.xts(SVK9Y, type = "y")
SVK9Y<-to.daily(SVK9Y)
SVN9Y<-data.frame(read.csv(file.path(Investing.com,"SVN9Y.csv")))
Investing.csv.xts(SVN9Y, type = "y")
SVN9Y<-to.daily(SVN9Y)
ESP9Y<-data.frame(read.csv(file.path(Investing.com,"ESP9Y.csv")))
Investing.csv.xts(ESP9Y, type = "y")
ESP9Y<-to.daily(ESP9Y)
LKA9Y<-data.frame(read.csv(file.path(Investing.com,"LKA9Y.csv")))
Investing.csv.xts(LKA9Y, type = "y")
LKA9Y<-to.daily(LKA9Y)
CHE9Y<-data.frame(read.csv(file.path(Investing.com,"CHE9Y.csv")))
Investing.csv.xts(CHE9Y, type = "y")
CHE9Y<-to.daily(CHE9Y)
GBR9Y<-data.frame(read.csv(file.path(Investing.com,"GBR9Y.csv")))
Investing.csv.xts(GBR9Y, type = "y")
GBR9Y<-to.daily(GBR9Y)
AUS10Y<-data.frame(read.csv(file.path(Investing.com,"AUS10Y.csv")))
Investing.csv.xts(AUS10Y, type = "y")
AUS10Y<-to.daily(AUS10Y)
AUT10Y<-data.frame(read.csv(file.path(Investing.com,"AUT10Y.csv")))
Investing.csv.xts(AUT10Y, type = "y")
AUT10Y<-to.daily(AUT10Y)
BGD10Y<-data.frame(read.csv(file.path(Investing.com,"BGD10Y.csv")))
Investing.csv.xts(BGD10Y, type = "y")
BGD10Y<-to.daily(BGD10Y)
BEL10Y<-data.frame(read.csv(file.path(Investing.com,"BEL10Y.csv")))
Investing.csv.xts(BEL10Y, type = "y")
BEL10Y<-to.daily(BEL10Y)
BRA10Y<-data.frame(read.csv(file.path(Investing.com,"BRA10Y.csv")))
Investing.csv.xts(BRA10Y, type = "y")
BRA10Y<-to.daily(BRA10Y)
BGR10Y<-data.frame(read.csv(file.path(Investing.com,"BGR10Y.csv")))
Investing.csv.xts(BGR10Y, type = "y")
BGR10Y<-to.daily(BGR10Y)
CAN10Y<-data.frame(read.csv(file.path(Investing.com,"CAN10Y.csv")))
Investing.csv.xts(CAN10Y, type = "y")
CAN10Y<-to.daily(CAN10Y)
CHL10Y<-data.frame(read.csv(file.path(Investing.com,"CHL10Y.csv")))
Investing.csv.xts(CHL10Y, type = "y")
CHL10Y<-to.daily(CHL10Y)
CHN10Y<-data.frame(read.csv(file.path(Investing.com,"CHN10Y.csv")))
Investing.csv.xts(CHN10Y, type = "y")
CHN10Y<-to.daily(CHN10Y)
COL10Y<-data.frame(read.csv(file.path(Investing.com,"COL10Y.csv")))
Investing.csv.xts(COL10Y, type = "y")
COL10Y<-to.daily(COL10Y)
HRV10Y<-data.frame(read.csv(file.path(Investing.com,"HRV10Y.csv")))
Investing.csv.xts(HRV10Y, type = "y")
HRV10Y<-to.daily(HRV10Y)
CZE10Y<-data.frame(read.csv(file.path(Investing.com,"CZE10Y.csv")))
Investing.csv.xts(CZE10Y, type = "y")
CZE10Y<-to.daily(CZE10Y)
DNK10Y<-data.frame(read.csv(file.path(Investing.com,"DNK10Y.csv")))
Investing.csv.xts(DNK10Y, type = "y")
DNK10Y<-to.daily(DNK10Y)
EGY10Y<-data.frame(read.csv(file.path(Investing.com,"EGY10Y.csv")))
Investing.csv.xts(EGY10Y, type = "y")
EGY10Y<-to.daily(EGY10Y)
FIN10Y<-data.frame(read.csv(file.path(Investing.com,"FIN10Y.csv")))
Investing.csv.xts(FIN10Y, type = "y")
FIN10Y<-to.daily(FIN10Y)
FRA10Y<-data.frame(read.csv(file.path(Investing.com,"FRA10Y.csv")))
Investing.csv.xts(FRA10Y, type = "y")
FRA10Y<-to.daily(FRA10Y)
DEU10Y<-data.frame(read.csv(file.path(Investing.com,"DEU10Y.csv")))
Investing.csv.xts(DEU10Y, type = "y")
DEU10Y<-to.daily(DEU10Y)
GRC10Y<-data.frame(read.csv(file.path(Investing.com,"GRC10Y.csv")))
Investing.csv.xts(GRC10Y, type = "y")
GRC10Y<-to.daily(GRC10Y)
HKG10Y<-data.frame(read.csv(file.path(Investing.com,"HKG10Y.csv")))
Investing.csv.xts(HKG10Y, type = "y")
HKG10Y<-to.daily(HKG10Y)
HUN10Y<-data.frame(read.csv(file.path(Investing.com,"HUN10Y.csv")))
Investing.csv.xts(HUN10Y, type = "y")
HUN10Y<-to.daily(HUN10Y)
ISL10Y<-data.frame(read.csv(file.path(Investing.com,"ISL10Y.csv")))
Investing.csv.xts(ISL10Y, type = "y")
ISL10Y<-to.daily(ISL10Y)
IND10Y<-data.frame(read.csv(file.path(Investing.com,"IND10Y.csv")))
Investing.csv.xts(IND10Y, type = "y")
IND10Y<-to.daily(IND10Y)
IDN10Y<-data.frame(read.csv(file.path(Investing.com,"IDN10Y.csv")))
Investing.csv.xts(IDN10Y, type = "y")
IDN10Y<-to.daily(IDN10Y)
ISR10Y<-data.frame(read.csv(file.path(Investing.com,"ISR10Y.csv")))
Investing.csv.xts(ISR10Y, type = "y")
ISR10Y<-to.daily(ISR10Y)
ITA10Y<-data.frame(read.csv(file.path(Investing.com,"ITA10Y.csv")))
Investing.csv.xts(ITA10Y, type = "y")
ITA10Y<-to.daily(ITA10Y)
JPN10Y<-data.frame(read.csv(file.path(Investing.com,"JPN10Y.csv")))
Investing.csv.xts(JPN10Y, type = "y")
JPN10Y<-to.daily(JPN10Y)
JOR10Y<-data.frame(read.csv(file.path(Investing.com,"JOR10Y.csv")))
Investing.csv.xts(JOR10Y, type = "y")
JOR10Y<-to.daily(JOR10Y)
KEN10Y<-data.frame(read.csv(file.path(Investing.com,"KEN10Y.csv")))
Investing.csv.xts(KEN10Y, type = "y")
KEN10Y<-to.daily(KEN10Y)
LTU10Y<-data.frame(read.csv(file.path(Investing.com,"LTU10Y.csv")))
Investing.csv.xts(LTU10Y, type = "y")
LTU10Y<-to.daily(LTU10Y)
MYS10Y<-data.frame(read.csv(file.path(Investing.com,"MYS10Y.csv")))
Investing.csv.xts(MYS10Y, type = "y")
MYS10Y<-to.daily(MYS10Y)
MLT10Y<-data.frame(read.csv(file.path(Investing.com,"MLT10Y.csv")))
Investing.csv.xts(MLT10Y, type = "y")
MLT10Y<-to.daily(MLT10Y)
MUS10Y<-data.frame(read.csv(file.path(Investing.com,"MUS10Y.csv")))
Investing.csv.xts(MUS10Y, type = "y")
MUS10Y<-to.daily(MUS10Y)
MEX10Y<-data.frame(read.csv(file.path(Investing.com,"MEX10Y.csv")))
Investing.csv.xts(MEX10Y, type = "y")
MEX10Y<-to.daily(MEX10Y)
MAR10Y<-data.frame(read.csv(file.path(Investing.com,"MAR10Y.csv")))
Investing.csv.xts(MAR10Y, type = "y")
MAR10Y<-to.daily(MAR10Y)
NAM10Y<-data.frame(read.csv(file.path(Investing.com,"NAM10Y.csv")))
Investing.csv.xts(NAM10Y, type = "y")
NAM10Y<-to.daily(NAM10Y)
NLD10Y<-data.frame(read.csv(file.path(Investing.com,"NLD10Y.csv")))
Investing.csv.xts(NLD10Y, type = "y")
NLD10Y<-to.daily(NLD10Y)
NZL10Y<-data.frame(read.csv(file.path(Investing.com,"NZL10Y.csv")))
Investing.csv.xts(NZL10Y, type = "y")
NZL10Y<-to.daily(NZL10Y)
NGA10Y<-data.frame(read.csv(file.path(Investing.com,"NGA10Y.csv")))
Investing.csv.xts(NGA10Y, type = "y")
NGA10Y<-to.daily(NGA10Y)
NOR10Y<-data.frame(read.csv(file.path(Investing.com,"NOR10Y.csv")))
Investing.csv.xts(NOR10Y, type = "y")
NOR10Y<-to.daily(NOR10Y)
PAK10Y<-data.frame(read.csv(file.path(Investing.com,"PAK10Y.csv")))
Investing.csv.xts(PAK10Y, type = "y")
PAK10Y<-to.daily(PAK10Y)
PHL10Y<-data.frame(read.csv(file.path(Investing.com,"PHL10Y.csv")))
Investing.csv.xts(PHL10Y, type = "y")
PHL10Y<-to.daily(PHL10Y)
POL10Y<-data.frame(read.csv(file.path(Investing.com,"POL10Y.csv")))
Investing.csv.xts(POL10Y, type = "y")
POL10Y<-to.daily(POL10Y)
PRT10Y<-data.frame(read.csv(file.path(Investing.com,"PRT10Y.csv")))
Investing.csv.xts(PRT10Y, type = "y")
PRT10Y<-to.daily(PRT10Y)
QAT10Y<-data.frame(read.csv(file.path(Investing.com,"QAT10Y.csv")))
Investing.csv.xts(QAT10Y, type = "y")
QAT10Y<-to.daily(QAT10Y)
ROU10Y<-data.frame(read.csv(file.path(Investing.com,"ROU10Y.csv")))
Investing.csv.xts(ROU10Y, type = "y")
ROU10Y<-to.daily(ROU10Y)
RUS10Y<-data.frame(read.csv(file.path(Investing.com,"RUS10Y.csv")))
Investing.csv.xts(RUS10Y, type = "y")
RUS10Y<-to.daily(RUS10Y)
SGP10Y<-data.frame(read.csv(file.path(Investing.com,"SGP10Y.csv")))
Investing.csv.xts(SGP10Y, type = "y")
SGP10Y<-to.daily(SGP10Y)
SVK10Y<-data.frame(read.csv(file.path(Investing.com,"SVK10Y.csv")))
Investing.csv.xts(SVK10Y, type = "y")
SVK10Y<-to.daily(SVK10Y)
SVN10Y<-data.frame(read.csv(file.path(Investing.com,"SVN10Y.csv")))
Investing.csv.xts(SVN10Y, type = "y")
SVN10Y<-to.daily(SVN10Y)
ZAF10Y<-data.frame(read.csv(file.path(Investing.com,"ZAF10Y.csv")))
Investing.csv.xts(ZAF10Y, type = "y")
ZAF10Y<-to.daily(ZAF10Y)
KOR10Y<-data.frame(read.csv(file.path(Investing.com,"KOR10Y.csv")))
Investing.csv.xts(KOR10Y, type = "y")
KOR10Y<-to.daily(KOR10Y)
ESP10Y<-data.frame(read.csv(file.path(Investing.com,"ESP10Y.csv")))
Investing.csv.xts(ESP10Y, type = "y")
ESP10Y<-to.daily(ESP10Y)
LKA10Y<-data.frame(read.csv(file.path(Investing.com,"LKA10Y.csv")))
Investing.csv.xts(LKA10Y, type = "y")
LKA10Y<-to.daily(LKA10Y)
SWE10Y<-data.frame(read.csv(file.path(Investing.com,"SWE10Y.csv")))
Investing.csv.xts(SWE10Y, type = "y")
SWE10Y<-to.daily(SWE10Y)
CHE10Y<-data.frame(read.csv(file.path(Investing.com,"CHE10Y.csv")))
Investing.csv.xts(CHE10Y, type = "y")
CHE10Y<-to.daily(CHE10Y)
TWN10Y<-data.frame(read.csv(file.path(Investing.com,"TWN10Y.csv")))
Investing.csv.xts(TWN10Y, type = "y")
TWN10Y<-to.daily(TWN10Y)
THA10Y<-data.frame(read.csv(file.path(Investing.com,"THA10Y.csv")))
Investing.csv.xts(THA10Y, type = "y")
THA10Y<-to.daily(THA10Y)
TUR10Y<-data.frame(read.csv(file.path(Investing.com,"TUR10Y.csv")))
Investing.csv.xts(TUR10Y, type = "y")
TUR10Y<-to.daily(TUR10Y)
UGA10Y<-data.frame(read.csv(file.path(Investing.com,"UGA10Y.csv")))
Investing.csv.xts(UGA10Y, type = "y")
UGA10Y<-to.daily(UGA10Y)
GBR10Y<-data.frame(read.csv(file.path(Investing.com,"GBR10Y.csv")))
Investing.csv.xts(GBR10Y, type = "y")
GBR10Y<-to.daily(GBR10Y)
USA10Y<-data.frame(read.csv(file.path(Investing.com,"USA10Y.csv")))
Investing.csv.xts(USA10Y, type = "y")
USA10Y<-to.daily(USA10Y)
VNM10Y<-data.frame(read.csv(file.path(Investing.com,"VNM10Y.csv")))
Investing.csv.xts(VNM10Y, type = "y")
VNM10Y<-to.daily(VNM10Y)
IND11Y<-data.frame(read.csv(file.path(Investing.com,"IND11Y.csv")))
Investing.csv.xts(IND11Y, type = "y")
IND11Y<-to.daily(IND11Y)
AUS12Y<-data.frame(read.csv(file.path(Investing.com,"AUS12Y.csv")))
Investing.csv.xts(AUS12Y, type = "y")
AUS12Y<-to.daily(AUS12Y)
IND12Y<-data.frame(read.csv(file.path(Investing.com,"IND12Y.csv")))
Investing.csv.xts(IND12Y, type = "y")
IND12Y<-to.daily(IND12Y)
KEN12Y<-data.frame(read.csv(file.path(Investing.com,"KEN12Y.csv")))
Investing.csv.xts(KEN12Y, type = "y")
KEN12Y<-to.daily(KEN12Y)
POL12Y<-data.frame(read.csv(file.path(Investing.com,"POL12Y.csv")))
Investing.csv.xts(POL12Y, type = "y")
POL12Y<-to.daily(POL12Y)
SVK12Y<-data.frame(read.csv(file.path(Investing.com,"SVK12Y.csv")))
Investing.csv.xts(SVK12Y, type = "y")
SVK12Y<-to.daily(SVK12Y)
THA12Y<-data.frame(read.csv(file.path(Investing.com,"THA12Y.csv")))
Investing.csv.xts(THA12Y, type = "y")
THA12Y<-to.daily(THA12Y)
GBR12Y<-data.frame(read.csv(file.path(Investing.com,"GBR12Y.csv")))
Investing.csv.xts(GBR12Y, type = "y")
GBR12Y<-to.daily(GBR12Y)
BWA13Y<-data.frame(read.csv(file.path(Investing.com,"BWA13Y.csv")))
Investing.csv.xts(BWA13Y, type = "y")
BWA13Y<-to.daily(BWA13Y)
IND13Y<-data.frame(read.csv(file.path(Investing.com,"IND13Y.csv")))
Investing.csv.xts(IND13Y, type = "y")
IND13Y<-to.daily(IND13Y)
IND14Y<-data.frame(read.csv(file.path(Investing.com,"IND14Y.csv")))
Investing.csv.xts(IND14Y, type = "y")
IND14Y<-to.daily(IND14Y)
PAK14Y<-data.frame(read.csv(file.path(Investing.com,"PAK14Y.csv")))
Investing.csv.xts(PAK14Y, type = "y")
PAK14Y<-to.daily(PAK14Y)
IRL15Y<-data.frame(read.csv(file.path(Investing.com,"IRL15Y.csv")))
Investing.csv.xts(IRL15Y, type = "y")
IRL15Y<-to.daily(IRL15Y)
SVK14Y<-data.frame(read.csv(file.path(Investing.com,"SVK14Y.csv")))
Investing.csv.xts(SVK14Y, type = "y")
SVK14Y<-to.daily(SVK14Y)
THA14Y<-data.frame(read.csv(file.path(Investing.com,"THA14Y.csv")))
Investing.csv.xts(THA14Y, type = "y")
THA14Y<-to.daily(THA14Y)
AUS15Y<-data.frame(read.csv(file.path(Investing.com,"AUS15Y.csv")))
Investing.csv.xts(AUS15Y, type = "y")
AUS15Y<-to.daily(AUS15Y)
AUT15Y<-data.frame(read.csv(file.path(Investing.com,"AUT15Y.csv")))
Investing.csv.xts(AUT15Y, type = "y")
AUT15Y<-to.daily(AUT15Y)
BGD15Y<-data.frame(read.csv(file.path(Investing.com,"BGD15Y.csv")))
Investing.csv.xts(BGD15Y, type = "y")
BGD15Y<-to.daily(BGD15Y)
BEL15Y<-data.frame(read.csv(file.path(Investing.com,"BEL15Y.csv")))
Investing.csv.xts(BEL15Y, type = "y")
BEL15Y<-to.daily(BEL15Y)
CHN15Y<-data.frame(read.csv(file.path(Investing.com,"CHN15Y.csv")))
Investing.csv.xts(CHN15Y, type = "y")
CHN15Y<-to.daily(CHN15Y)
COL15Y<-data.frame(read.csv(file.path(Investing.com,"COL15Y.csv")))
Investing.csv.xts(COL15Y, type = "y")
COL15Y<-to.daily(COL15Y)
CZE15Y<-data.frame(read.csv(file.path(Investing.com,"CZE15Y.csv")))
Investing.csv.xts(CZE15Y, type = "y")
CZE15Y<-to.daily(CZE15Y)
FIN15Y<-data.frame(read.csv(file.path(Investing.com,"FIN15Y.csv")))
Investing.csv.xts(FIN15Y, type = "y")
FIN15Y<-to.daily(FIN15Y)
FRA15Y<-data.frame(read.csv(file.path(Investing.com,"FRA15Y.csv")))
Investing.csv.xts(FRA15Y, type = "y")
FRA15Y<-to.daily(FRA15Y)
DEU15Y<-data.frame(read.csv(file.path(Investing.com,"DEU15Y.csv")))
Investing.csv.xts(DEU15Y, type = "y")
DEU15Y<-to.daily(DEU15Y)
GRC15Y<-data.frame(read.csv(file.path(Investing.com,"GRC15Y.csv")))
Investing.csv.xts(GRC15Y, type = "y")
GRC15Y<-to.daily(GRC15Y)
HKG15Y<-data.frame(read.csv(file.path(Investing.com,"HKG15Y.csv")))
Investing.csv.xts(HKG15Y, type = "y")
HKG15Y<-to.daily(HKG15Y)
HUN15Y<-data.frame(read.csv(file.path(Investing.com,"HUN15Y.csv")))
Investing.csv.xts(HUN15Y, type = "y")
HUN15Y<-to.daily(HUN15Y)
IND15Y<-data.frame(read.csv(file.path(Investing.com,"IND15Y.csv")))
Investing.csv.xts(IND15Y, type = "y")
IND15Y<-to.daily(IND15Y)
IDN15Y<-data.frame(read.csv(file.path(Investing.com,"IDN15Y.csv")))
Investing.csv.xts(IDN15Y, type = "y")
IDN15Y<-to.daily(IDN15Y)
ITA15Y<-data.frame(read.csv(file.path(Investing.com,"ITA15Y.csv")))
Investing.csv.xts(ITA15Y, type = "y")
ITA15Y<-to.daily(ITA15Y)
JPN15Y<-data.frame(read.csv(file.path(Investing.com,"JPN15Y.csv")))
Investing.csv.xts(JPN15Y, type = "y")
JPN15Y<-to.daily(JPN15Y)
KEN15Y<-data.frame(read.csv(file.path(Investing.com,"KEN15Y.csv")))
Investing.csv.xts(KEN15Y, type = "y")
KEN15Y<-to.daily(KEN15Y)
MYS15Y<-data.frame(read.csv(file.path(Investing.com,"MYS15Y.csv")))
Investing.csv.xts(MYS15Y, type = "y")
MYS15Y<-to.daily(MYS15Y)
MUS15Y<-data.frame(read.csv(file.path(Investing.com,"MUS15Y.csv")))
Investing.csv.xts(MUS15Y, type = "y")
MUS15Y<-to.daily(MUS15Y)
MEX15Y<-data.frame(read.csv(file.path(Investing.com,"MEX15Y.csv")))
Investing.csv.xts(MEX15Y, type = "y")
MEX15Y<-to.daily(MEX15Y)
MAR15Y<-data.frame(read.csv(file.path(Investing.com,"MAR15Y.csv")))
Investing.csv.xts(MAR15Y, type = "y")
MAR15Y<-to.daily(MAR15Y)
NAM15Y<-data.frame(read.csv(file.path(Investing.com,"NAM15Y.csv")))
Investing.csv.xts(NAM15Y, type = "y")
NAM15Y<-to.daily(NAM15Y)
NLD15Y<-data.frame(read.csv(file.path(Investing.com,"NLD15Y.csv")))
Investing.csv.xts(NLD15Y, type = "y")
NLD15Y<-to.daily(NLD15Y)
NZL15Y<-data.frame(read.csv(file.path(Investing.com,"NZL15Y.csv")))
Investing.csv.xts(NZL15Y, type = "y")
NZL15Y<-to.daily(NZL15Y)
NGA15Y<-data.frame(read.csv(file.path(Investing.com,"NGA15Y.csv")))
Investing.csv.xts(NGA15Y, type = "y")
NGA15Y<-to.daily(NGA15Y)
PER15Y<-data.frame(read.csv(file.path(Investing.com,"PER15Y.csv")))
Investing.csv.xts(PER15Y, type = "y")
PER15Y<-to.daily(PER15Y)
PRT15Y<-data.frame(read.csv(file.path(Investing.com,"PRT15Y.csv")))
Investing.csv.xts(PRT15Y, type = "y")
PRT15Y<-to.daily(PRT15Y)
QAT15Y<-data.frame(read.csv(file.path(Investing.com,"QAT15Y.csv")))
Investing.csv.xts(QAT15Y, type = "y")
QAT15Y<-to.daily(QAT15Y)
RUS15Y<-data.frame(read.csv(file.path(Investing.com,"RUS15Y.csv")))
Investing.csv.xts(RUS15Y, type = "y")
RUS15Y<-to.daily(RUS15Y)
SGP15Y<-data.frame(read.csv(file.path(Investing.com,"SGP15Y.csv")))
Investing.csv.xts(SGP15Y, type = "y")
SGP15Y<-to.daily(SGP15Y)
ZAF15Y<-data.frame(read.csv(file.path(Investing.com,"ZAF15Y.csv")))
Investing.csv.xts(ZAF15Y, type = "y")
ZAF15Y<-to.daily(ZAF15Y)
ESP15Y<-data.frame(read.csv(file.path(Investing.com,"ESP15Y.csv")))
Investing.csv.xts(ESP15Y, type = "y")
ESP15Y<-to.daily(ESP15Y)
LKA15Y<-data.frame(read.csv(file.path(Investing.com,"LKA15Y.csv")))
Investing.csv.xts(LKA15Y, type = "y")
LKA15Y<-to.daily(LKA15Y)
SWE15Y<-data.frame(read.csv(file.path(Investing.com,"SWE15Y.csv")))
Investing.csv.xts(SWE15Y, type = "y")
SWE15Y<-to.daily(SWE15Y)
CHE15Y<-data.frame(read.csv(file.path(Investing.com,"CHE15Y.csv")))
Investing.csv.xts(CHE15Y, type = "y")
CHE15Y<-to.daily(CHE15Y)
THA15Y<-data.frame(read.csv(file.path(Investing.com,"THA15Y.csv")))
Investing.csv.xts(THA15Y, type = "y")
THA15Y<-to.daily(THA15Y)
UGA15Y<-data.frame(read.csv(file.path(Investing.com,"UGA15Y.csv")))
Investing.csv.xts(UGA15Y, type = "y")
UGA15Y<-to.daily(UGA15Y)
GBR15Y<-data.frame(read.csv(file.path(Investing.com,"GBR15Y.csv")))
Investing.csv.xts(GBR15Y, type = "y")
GBR15Y<-to.daily(GBR15Y)
VEN15Y<-data.frame(read.csv(file.path(Investing.com,"VEN15Y.csv")))
Investing.csv.xts(VEN15Y, type = "y")
VEN15Y<-to.daily(VEN15Y)
THA16Y<-data.frame(read.csv(file.path(Investing.com,"THA16Y.csv")))
Investing.csv.xts(THA16Y, type = "y")
THA16Y<-to.daily(THA16Y)
IND19Y<-data.frame(read.csv(file.path(Investing.com,"IND19Y.csv")))
Investing.csv.xts(IND19Y, type = "y")
IND19Y<-to.daily(IND19Y)
IRL20Y<-data.frame(read.csv(file.path(Investing.com,"IRL20Y.csv")))
Investing.csv.xts(IRL20Y, type = "y")
IRL20Y<-to.daily(IRL20Y)
AUS20Y<-data.frame(read.csv(file.path(Investing.com,"AUS20Y.csv")))
Investing.csv.xts(AUS20Y, type = "y")
AUS20Y<-to.daily(AUS20Y)
AUT20Y<-data.frame(read.csv(file.path(Investing.com,"AUT20Y.csv")))
Investing.csv.xts(AUT20Y, type = "y")
AUT20Y<-to.daily(AUT20Y)
BGD20Y<-data.frame(read.csv(file.path(Investing.com,"BGD20Y.csv")))
Investing.csv.xts(BGD20Y, type = "y")
BGD20Y<-to.daily(BGD20Y)
BEL20Y<-data.frame(read.csv(file.path(Investing.com,"BEL20Y.csv")))
Investing.csv.xts(BEL20Y, type = "y")
BEL20Y<-to.daily(BEL20Y)
CAN20Y<-data.frame(read.csv(file.path(Investing.com,"CAN20Y.csv")))
Investing.csv.xts(CAN20Y, type = "y")
CAN20Y<-to.daily(CAN20Y)
CHN20Y<-data.frame(read.csv(file.path(Investing.com,"CHN20Y.csv")))
Investing.csv.xts(CHN20Y, type = "y")
CHN20Y<-to.daily(CHN20Y)
CZE20Y<-data.frame(read.csv(file.path(Investing.com,"CZE20Y.csv")))
Investing.csv.xts(CZE20Y, type = "y")
CZE20Y<-to.daily(CZE20Y)
FRA20Y<-data.frame(read.csv(file.path(Investing.com,"FRA20Y.csv")))
Investing.csv.xts(FRA20Y, type = "y")
FRA20Y<-to.daily(FRA20Y)
DEU20Y<-data.frame(read.csv(file.path(Investing.com,"DEU20Y.csv")))
Investing.csv.xts(DEU20Y, type = "y")
DEU20Y<-to.daily(DEU20Y)
GRC20Y<-data.frame(read.csv(file.path(Investing.com,"GRC20Y.csv")))
Investing.csv.xts(GRC20Y, type = "y")
GRC20Y<-to.daily(GRC20Y)
IDN20Y<-data.frame(read.csv(file.path(Investing.com,"IDN20Y.csv")))
Investing.csv.xts(IDN20Y, type = "y")
IDN20Y<-to.daily(IDN20Y)
ITA20Y<-data.frame(read.csv(file.path(Investing.com,"ITA20Y.csv")))
Investing.csv.xts(ITA20Y, type = "y")
ITA20Y<-to.daily(ITA20Y)
JPN20Y<-data.frame(read.csv(file.path(Investing.com,"JPN20Y.csv")))
Investing.csv.xts(JPN20Y, type = "y")
JPN20Y<-to.daily(JPN20Y)
KEN20Y<-data.frame(read.csv(file.path(Investing.com,"KEN20Y.csv")))
Investing.csv.xts(KEN20Y, type = "y")
KEN20Y<-to.daily(KEN20Y)
MYS20Y<-data.frame(read.csv(file.path(Investing.com,"MYS20Y.csv")))
Investing.csv.xts(MYS20Y, type = "y")
MYS20Y<-to.daily(MYS20Y)
MLT20Y<-data.frame(read.csv(file.path(Investing.com,"MLT20Y.csv")))
Investing.csv.xts(MLT20Y, type = "y")
MLT20Y<-to.daily(MLT20Y)
MUS20Y<-data.frame(read.csv(file.path(Investing.com,"MUS20Y.csv")))
Investing.csv.xts(MUS20Y, type = "y")
MUS20Y<-to.daily(MUS20Y)
MEX20Y<-data.frame(read.csv(file.path(Investing.com,"MEX20Y.csv")))
Investing.csv.xts(MEX20Y, type = "y")
MEX20Y<-to.daily(MEX20Y)
NAM20Y<-data.frame(read.csv(file.path(Investing.com,"NAM20Y.csv")))
Investing.csv.xts(NAM20Y, type = "y")
NAM20Y<-to.daily(NAM20Y)
NLD20Y<-data.frame(read.csv(file.path(Investing.com,"NLD20Y.csv")))
Investing.csv.xts(NLD20Y, type = "y")
NLD20Y<-to.daily(NLD20Y)
NZL20Y<-data.frame(read.csv(file.path(Investing.com,"NZL20Y.csv")))
Investing.csv.xts(NZL20Y, type = "y")
NZL20Y<-to.daily(NZL20Y)
NGA20Y<-data.frame(read.csv(file.path(Investing.com,"NGA20Y.csv")))
Investing.csv.xts(NGA20Y, type = "y")
NGA20Y<-to.daily(NGA20Y)
PAK20Y<-data.frame(read.csv(file.path(Investing.com,"PAK20Y.csv")))
Investing.csv.xts(PAK20Y, type = "y")
PAK20Y<-to.daily(PAK20Y)
PER20Y<-data.frame(read.csv(file.path(Investing.com,"PER20Y.csv")))
Investing.csv.xts(PER20Y, type = "y")
PER20Y<-to.daily(PER20Y)
PHL20Y<-data.frame(read.csv(file.path(Investing.com,"PHL20Y.csv")))
Investing.csv.xts(PHL20Y, type = "y")
PHL20Y<-to.daily(PHL20Y)
PRT20Y<-data.frame(read.csv(file.path(Investing.com,"PRT20Y.csv")))
Investing.csv.xts(PRT20Y, type = "y")
PRT20Y<-to.daily(PRT20Y)
RUS20Y<-data.frame(read.csv(file.path(Investing.com,"RUS20Y.csv")))
Investing.csv.xts(RUS20Y, type = "y")
RUS20Y<-to.daily(RUS20Y)
SGP20Y<-data.frame(read.csv(file.path(Investing.com,"SGP20Y.csv")))
Investing.csv.xts(SGP20Y, type = "y")
SGP20Y<-to.daily(SGP20Y)
SVK20Y<-data.frame(read.csv(file.path(Investing.com,"SVK20Y.csv")))
Investing.csv.xts(SVK20Y, type = "y")
SVK20Y<-to.daily(SVK20Y)
ZAF20Y<-data.frame(read.csv(file.path(Investing.com,"ZAF20Y.csv")))
Investing.csv.xts(ZAF20Y, type = "y")
ZAF20Y<-to.daily(ZAF20Y)
KOR20Y<-data.frame(read.csv(file.path(Investing.com,"KOR20Y.csv")))
Investing.csv.xts(KOR20Y, type = "y")
KOR20Y<-to.daily(KOR20Y)
ESP20Y<-data.frame(read.csv(file.path(Investing.com,"ESP20Y.csv")))
Investing.csv.xts(ESP20Y, type = "y")
ESP20Y<-to.daily(ESP20Y)
SWE20Y<-data.frame(read.csv(file.path(Investing.com,"SWE20Y.csv")))
Investing.csv.xts(SWE20Y, type = "y")
SWE20Y<-to.daily(SWE20Y)
CHE20Y<-data.frame(read.csv(file.path(Investing.com,"CHE20Y.csv")))
Investing.csv.xts(CHE20Y, type = "y")
CHE20Y<-to.daily(CHE20Y)
TWN20Y<-data.frame(read.csv(file.path(Investing.com,"TWN20Y.csv")))
Investing.csv.xts(TWN20Y, type = "y")
TWN20Y<-to.daily(TWN20Y)
THA20Y<-data.frame(read.csv(file.path(Investing.com,"THA20Y.csv")))
Investing.csv.xts(THA20Y, type = "y")
THA20Y<-to.daily(THA20Y)
GBR20Y<-data.frame(read.csv(file.path(Investing.com,"GBR20Y.csv")))
Investing.csv.xts(GBR20Y, type = "y")
GBR20Y<-to.daily(GBR20Y)
VEN20Y<-data.frame(read.csv(file.path(Investing.com,"VEN20Y.csv")))
Investing.csv.xts(VEN20Y, type = "y")
VEN20Y<-to.daily(VEN20Y)
IND24Y<-data.frame(read.csv(file.path(Investing.com,"IND24Y.csv")))
Investing.csv.xts(IND24Y, type = "y")
IND24Y<-to.daily(IND24Y)
AUT25Y<-data.frame(read.csv(file.path(Investing.com,"AUT25Y.csv")))
Investing.csv.xts(AUT25Y, type = "y")
AUT25Y<-to.daily(AUT25Y)
FRA25Y<-data.frame(read.csv(file.path(Investing.com,"FRA25Y.csv")))
Investing.csv.xts(FRA25Y, type = "y")
FRA25Y<-to.daily(FRA25Y)
DEU25Y<-data.frame(read.csv(file.path(Investing.com,"DEU25Y.csv")))
Investing.csv.xts(DEU25Y, type = "y")
DEU25Y<-to.daily(DEU25Y)
GRC25Y<-data.frame(read.csv(file.path(Investing.com,"GRC25Y.csv")))
Investing.csv.xts(GRC25Y, type = "y")
GRC25Y<-to.daily(GRC25Y)
IDN25Y<-data.frame(read.csv(file.path(Investing.com,"IDN25Y.csv")))
Investing.csv.xts(IDN25Y, type = "y")
IDN25Y<-to.daily(IDN25Y)
IRL30Y<-data.frame(read.csv(file.path(Investing.com,"IRL30Y.csv")))
Investing.csv.xts(IRL30Y, type = "y")
IRL30Y<-to.daily(IRL30Y)
KEN25Y<-data.frame(read.csv(file.path(Investing.com,"KEN25Y.csv")))
Investing.csv.xts(KEN25Y, type = "y")
KEN25Y<-to.daily(KEN25Y)
MLT25Y<-data.frame(read.csv(file.path(Investing.com,"MLT25Y.csv")))
Investing.csv.xts(MLT25Y, type = "y")
MLT25Y<-to.daily(MLT25Y)
NLD25Y<-data.frame(read.csv(file.path(Investing.com,"NLD25Y.csv")))
Investing.csv.xts(NLD25Y, type = "y")
NLD25Y<-to.daily(NLD25Y)
PHL25Y<-data.frame(read.csv(file.path(Investing.com,"PHL25Y.csv")))
Investing.csv.xts(PHL25Y, type = "y")
PHL25Y<-to.daily(PHL25Y)
ZAF25Y<-data.frame(read.csv(file.path(Investing.com,"ZAF25Y.csv")))
Investing.csv.xts(ZAF25Y, type = "y")
ZAF25Y<-to.daily(ZAF25Y)
ESP25Y<-data.frame(read.csv(file.path(Investing.com,"ESP25Y.csv")))
Investing.csv.xts(ESP25Y, type = "y")
ESP25Y<-to.daily(ESP25Y)
GBR25Y<-data.frame(read.csv(file.path(Investing.com,"GBR25Y.csv")))
Investing.csv.xts(GBR25Y, type = "y")
GBR25Y<-to.daily(GBR25Y)
AUS30Y<-data.frame(read.csv(file.path(Investing.com,"AUS30Y.csv")))
Investing.csv.xts(AUS30Y, type = "y")
AUS30Y<-to.daily(AUS30Y)
AUT30Y<-data.frame(read.csv(file.path(Investing.com,"AUT30Y.csv")))
Investing.csv.xts(AUT30Y, type = "y")
AUT30Y<-to.daily(AUT30Y)
CAN30Y<-data.frame(read.csv(file.path(Investing.com,"CAN30Y.csv")))
Investing.csv.xts(CAN30Y, type = "y")
CAN30Y<-to.daily(CAN30Y)
CHN30Y<-data.frame(read.csv(file.path(Investing.com,"CHN30Y.csv")))
Investing.csv.xts(CHN30Y, type = "y")
CHN30Y<-to.daily(CHN30Y)
DNK30Y<-data.frame(read.csv(file.path(Investing.com,"DNK30Y.csv")))
Investing.csv.xts(DNK30Y, type = "y")
DNK30Y<-to.daily(DNK30Y)
FIN30Y<-data.frame(read.csv(file.path(Investing.com,"FIN30Y.csv")))
Investing.csv.xts(FIN30Y, type = "y")
FIN30Y<-to.daily(FIN30Y)
FRA30Y<-data.frame(read.csv(file.path(Investing.com,"FRA30Y.csv")))
Investing.csv.xts(FRA30Y, type = "y")
FRA30Y<-to.daily(FRA30Y)
DEU30Y<-data.frame(read.csv(file.path(Investing.com,"DEU30Y.csv")))
Investing.csv.xts(DEU30Y, type = "y")
DEU30Y<-to.daily(DEU30Y)
IND30Y<-data.frame(read.csv(file.path(Investing.com,"IND30Y.csv")))
Investing.csv.xts(IND30Y, type = "y")
IND30Y<-to.daily(IND30Y)
IDN30Y<-data.frame(read.csv(file.path(Investing.com,"IDN30Y.csv")))
Investing.csv.xts(IDN30Y, type = "y")
IDN30Y<-to.daily(IDN30Y)
ISR30Y<-data.frame(read.csv(file.path(Investing.com,"ISR30Y.csv")))
Investing.csv.xts(ISR30Y, type = "y")
ISR30Y<-to.daily(ISR30Y)
ITA30Y<-data.frame(read.csv(file.path(Investing.com,"ITA30Y.csv")))
Investing.csv.xts(ITA30Y, type = "y")
ITA30Y<-to.daily(ITA30Y)
JPN30Y<-data.frame(read.csv(file.path(Investing.com,"JPN30Y.csv")))
Investing.csv.xts(JPN30Y, type = "y")
JPN30Y<-to.daily(JPN30Y)
MYS30Y<-data.frame(read.csv(file.path(Investing.com,"MYS30Y.csv")))
Investing.csv.xts(MYS30Y, type = "y")
MYS30Y<-to.daily(MYS30Y)
MEX30Y<-data.frame(read.csv(file.path(Investing.com,"MEX30Y.csv")))
Investing.csv.xts(MEX30Y, type = "y")
MEX30Y<-to.daily(MEX30Y)
NLD30Y<-data.frame(read.csv(file.path(Investing.com,"NLD30Y.csv")))
Investing.csv.xts(NLD30Y, type = "y")
NLD30Y<-to.daily(NLD30Y)
PER30Y<-data.frame(read.csv(file.path(Investing.com,"PER30Y.csv")))
Investing.csv.xts(PER30Y, type = "y")
PER30Y<-to.daily(PER30Y)
PRT30Y<-data.frame(read.csv(file.path(Investing.com,"PRT30Y.csv")))
Investing.csv.xts(PRT30Y, type = "y")
PRT30Y<-to.daily(PRT30Y)
QAT30Y<-data.frame(read.csv(file.path(Investing.com,"QAT30Y.csv")))
Investing.csv.xts(QAT30Y, type = "y")
QAT30Y<-to.daily(QAT30Y)
SGP30Y<-data.frame(read.csv(file.path(Investing.com,"SGP30Y.csv")))
Investing.csv.xts(SGP30Y, type = "y")
SGP30Y<-to.daily(SGP30Y)
ZAF30Y<-data.frame(read.csv(file.path(Investing.com,"ZAF30Y.csv")))
Investing.csv.xts(ZAF30Y, type = "y")
ZAF30Y<-to.daily(ZAF30Y)
KOR30Y<-data.frame(read.csv(file.path(Investing.com,"KOR30Y.csv")))
Investing.csv.xts(KOR30Y, type = "y")
KOR30Y<-to.daily(KOR30Y)
ESP30Y<-data.frame(read.csv(file.path(Investing.com,"ESP30Y.csv")))
Investing.csv.xts(ESP30Y, type = "y")
ESP30Y<-to.daily(ESP30Y)
CHE30Y<-data.frame(read.csv(file.path(Investing.com,"CHE30Y.csv")))
Investing.csv.xts(CHE30Y, type = "y")
CHE30Y<-to.daily(CHE30Y)
TWN30Y<-data.frame(read.csv(file.path(Investing.com,"TWN30Y.csv")))
Investing.csv.xts(TWN30Y, type = "y")
TWN30Y<-to.daily(TWN30Y)
GBR30Y<-data.frame(read.csv(file.path(Investing.com,"GBR30Y.csv")))
Investing.csv.xts(GBR30Y, type = "y")
GBR30Y<-to.daily(GBR30Y)
USA30Y<-data.frame(read.csv(file.path(Investing.com,"USA30Y.csv")))
Investing.csv.xts(USA30Y, type = "y")
USA30Y<-to.daily(USA30Y)
JPN40Y<-data.frame(read.csv(file.path(Investing.com,"JPN40Y.csv")))
Investing.csv.xts(JPN40Y, type = "y")
JPN40Y<-to.daily(JPN40Y)
GBR40Y<-data.frame(read.csv(file.path(Investing.com,"GBR40Y.csv")))
Investing.csv.xts(GBR40Y, type = "y")
GBR40Y<-to.daily(GBR40Y)
AUT50Y<-data.frame(read.csv(file.path(Investing.com,"AUT50Y.csv")))
Investing.csv.xts(AUT50Y, type = "y")
AUT50Y<-to.daily(AUT50Y)
CZE50Y<-data.frame(read.csv(file.path(Investing.com,"CZE50Y.csv")))
Investing.csv.xts(CZE50Y, type = "y")
CZE50Y<-to.daily(CZE50Y)
FRA50Y<-data.frame(read.csv(file.path(Investing.com,"FRA50Y.csv")))
Investing.csv.xts(FRA50Y, type = "y")
FRA50Y<-to.daily(FRA50Y)
ITA50Y<-data.frame(read.csv(file.path(Investing.com,"ITA50Y.csv")))
Investing.csv.xts(ITA50Y, type = "y")
ITA50Y<-to.daily(ITA50Y)
KOR50Y<-data.frame(read.csv(file.path(Investing.com,"KOR50Y.csv")))
Investing.csv.xts(KOR50Y, type = "y")
KOR50Y<-to.daily(KOR50Y)
CHE50Y<-data.frame(read.csv(file.path(Investing.com,"CHE50Y.csv")))
Investing.csv.xts(CHE50Y, type = "y")
CHE50Y<-to.daily(CHE50Y)
GBR50Y<-data.frame(read.csv(file.path(Investing.com,"GBR50Y.csv")))
Investing.csv.xts(GBR50Y, type = "y")
GBR50Y<-to.daily(GBR50Y)
#### Convert ETF data ................................................ ####
XLY<-data.frame(read.csv(file.path(Investing.com,"XLY.csv")))
Investing.csv.xts(XLY)
XLY<-to.daily(XLY)
XLP<-data.frame(read.csv(file.path(Investing.com,"XLP.csv")))
Investing.csv.xts(XLP)
XLP<-to.daily(XLP)
XLE<-data.frame(read.csv(file.path(Investing.com,"XLE.csv")))
Investing.csv.xts(XLE)
XLE<-to.daily(XLE)
XLF<-data.frame(read.csv(file.path(Investing.com,"XLF.csv")))
Investing.csv.xts(XLF)
XLF<-to.daily(XLF)
XLV<-data.frame(read.csv(file.path(Investing.com,"XLV.csv")))
Investing.csv.xts(XLV)
XLV<-to.daily(XLV)
XLI<-data.frame(read.csv(file.path(Investing.com,"XLI.csv")))
Investing.csv.xts(XLI)
XLI<-to.daily(XLI)
XLB<-data.frame(read.csv(file.path(Investing.com,"XLB.csv")))
Investing.csv.xts(XLB)
XLB<-to.daily(XLB)
XLRE<-data.frame(read.csv(file.path(Investing.com,"XLRE.csv")))
Investing.csv.xts(XLRE)
XLRE<-to.daily(XLRE)
XLK<-data.frame(read.csv(file.path(Investing.com,"XLK.csv")))
Investing.csv.xts(XLK)
XLK<-to.daily(XLK)
XLU<-data.frame(read.csv(file.path(Investing.com,"XLU.csv")))
Investing.csv.xts(XLU)
XLU<-to.daily(XLU)
XITK<-data.frame(read.csv(file.path(Investing.com,"XITK.csv")))
Investing.csv.xts(XITK)
XITK<-to.daily(XITK)
XNTK<-data.frame(read.csv(file.path(Investing.com,"XNTK.csv")))
Investing.csv.xts(XNTK)
XNTK<-to.daily(XNTK)
XAR<-data.frame(read.csv(file.path(Investing.com,"XAR.csv")))
Investing.csv.xts(XAR)
XAR<-to.daily(XAR)
KBE<-data.frame(read.csv(file.path(Investing.com,"KBE.csv")))
Investing.csv.xts(KBE)
KBE<-to.daily(KBE)
XBI<-data.frame(read.csv(file.path(Investing.com,"XBI.csv")))
Investing.csv.xts(XBI)
XBI<-to.daily(XBI)
KCE<-data.frame(read.csv(file.path(Investing.com,"KCE.csv")))
Investing.csv.xts(KCE)
KCE<-to.daily(KCE)
XHE<-data.frame(read.csv(file.path(Investing.com,"XHE.csv")))
Investing.csv.xts(XHE)
XHE<-to.daily(XHE)
XHS<-data.frame(read.csv(file.path(Investing.com,"XHS.csv")))
Investing.csv.xts(XHS)
XHS<-to.daily(XHS)
XHB<-data.frame(read.csv(file.path(Investing.com,"XHB.csv")))
Investing.csv.xts(XHB)
XHB<-to.daily(XHB)
KIE<-data.frame(read.csv(file.path(Investing.com,"KIE.csv")))
Investing.csv.xts(KIE)
KIE<-to.daily(KIE)
XWEB<-data.frame(read.csv(file.path(Investing.com,"XWEB.csv")))
Investing.csv.xts(XWEB)
XWEB<-to.daily(XWEB)
XME<-data.frame(read.csv(file.path(Investing.com,"XME.csv")))
Investing.csv.xts(XME)
XME<-to.daily(XME)
XES<-data.frame(read.csv(file.path(Investing.com,"XES.csv")))
Investing.csv.xts(XES)
XES<-to.daily(XES)
XOP<-data.frame(read.csv(file.path(Investing.com,"XOP.csv")))
Investing.csv.xts(XOP)
XOP<-to.daily(XOP)
XPH<-data.frame(read.csv(file.path(Investing.com,"XPH.csv")))
Investing.csv.xts(XPH)
XPH<-to.daily(XPH)
KRE<-data.frame(read.csv(file.path(Investing.com,"KRE.csv")))
Investing.csv.xts(KRE)
KRE<-to.daily(KRE)
XRT<-data.frame(read.csv(file.path(Investing.com,"XRT.csv")))
Investing.csv.xts(XRT)
XRT<-to.daily(XRT)
XSD<-data.frame(read.csv(file.path(Investing.com,"XSD.csv")))
Investing.csv.xts(XSD)
XSD<-to.daily(XSD)
XSW<-data.frame(read.csv(file.path(Investing.com,"XSW.csv")))
Investing.csv.xts(XSW)
XSW<-to.daily(XSW)
XTH<-data.frame(read.csv(file.path(Investing.com,"XTH.csv")))
Investing.csv.xts(XTH)
XTH<-to.daily(XTH)
XTL<-data.frame(read.csv(file.path(Investing.com,"XTL.csv")))
Investing.csv.xts(XTL)
XTL<-to.daily(XTL)
XTN<-data.frame(read.csv(file.path(Investing.com,"XTN.csv")))
Investing.csv.xts(XTN)
XTN<-to.daily(XTN)
RWO<-data.frame(read.csv(file.path(Investing.com,"RWO.csv")))
Investing.csv.xts(RWO)
RWO<-to.daily(RWO)
RWX<-data.frame(read.csv(file.path(Investing.com,"RWX.csv")))
Investing.csv.xts(RWX)
RWX<-to.daily(RWX)
RWR<-data.frame(read.csv(file.path(Investing.com,"RWR.csv")))
Investing.csv.xts(RWR)
RWR<-to.daily(RWR)
GLD<-data.frame(read.csv(file.path(Investing.com,"GLD.csv")))
Investing.csv.xts(GLD)
GLD<-to.daily(GLD)
SLV<-data.frame(read.csv(file.path(Investing.com,"SLV.csv")))
Investing.csv.xts(SLV)
SLV<-to.daily(SLV)
GLDW<-data.frame(read.csv(file.path(Investing.com,"GLDW.csv")))
Investing.csv.xts(GLDW)
GLDW<-to.daily(GLDW)
GII<-data.frame(read.csv(file.path(Investing.com,"GII.csv")))
Investing.csv.xts(GII)
GII<-to.daily(GII)
GNR<-data.frame(read.csv(file.path(Investing.com,"GNR.csv")))
Investing.csv.xts(GNR)
GNR<-to.daily(GNR)
NANR<-data.frame(read.csv(file.path(Investing.com,"NANR.csv")))
Investing.csv.xts(NANR)
NANR<-to.daily(NANR)
|
bdd70f7a32bc0ec5d943ae1e78c8e3ddc2a189a7
|
686800c5ddb65505335f30ded6fcf96a6afe66e2
|
/man/SurvivalDiagnostics.Rd
|
7f9e0313bb04b5c77f3b7d3722ffc0b4d3661031
|
[] |
no_license
|
cran/RcmdrPlugin.survival
|
2b9e889c64e788a4fb0e6bb96b89293d0c8b7bbd
|
86f846e93563d94ceb23c89ac08eb8ae129f92a4
|
refs/heads/master
| 2022-09-25T09:24:11.554730
| 2022-09-20T16:00:02
| 2022-09-20T16:00:02
| 17,693,134
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,277
|
rd
|
SurvivalDiagnostics.Rd
|
\name{SurvivalDiagnostics}
\alias{SurvivalDiagnostics}
\alias{crPlots}
\alias{crPlots.coxph}
\alias{dfbeta.coxph}
\alias{plot.dfbeta.coxph}
\alias{dfbetas.coxph}
\alias{plot.dfbetas.coxph}
\alias{dfbeta.survreg}
\alias{plot.dfbeta.survreg}
\alias{dfbetas.survreg}
\alias{plot.dfbetas.survreg}
\alias{MartingalePlots}
\alias{MartingalePlots.coxph}
\alias{testPropHazards}
\alias{testPropHazards.coxph}
\title{
Diagnostics for Survival Regression Models
}
\description{
These are primarily convenience functions for the \pkg{RcmdrPlugin.survival} package, to produce diagnostics for \code{\link[survival]{coxph}} and \code{\link[survival]{survreg}} models in a convenient form for plotting via the package's GUI.
}
\usage{
crPlots(model, ...)
\method{crPlots}{coxph}(model, ...)
\method{dfbeta}{coxph}(model, ...)
\method{plot}{dfbeta.coxph}(x, ...)
\method{dfbetas}{coxph}(model, ...)
\method{plot}{dfbetas.coxph}(x, ...)
\method{dfbeta}{survreg}(model, ...)
\method{plot}{dfbeta.survreg}(x, ...)
\method{dfbetas}{survreg}(model, ...)
\method{plot}{dfbetas.survreg}(x, ...)
MartingalePlots(model, ...)
\method{MartingalePlots}{coxph}(model, ...)
testPropHazards(model, ...)
\method{testPropHazards}{coxph}(model, ...)
}
\arguments{
\item{model, x}{a Cox regression or parametric survival regression model, as appropriate.}
\item{\dots}{arguments to be passed down.}
}
\details{
\itemize{
\item \code{crPlots.coxph} is a method for the \code{\link[car]{crPlots}} function in the \pkg{car} package, to create component+residual (partial-residual) plots, using \code{\link[survival]{residuals.coxph}} and \code{\link[survival]{predict.coxph}} in the \pkg{survival} package.
\item \code{testPropHazards} is essentially a wrapper for the \code{\link[survival]{cox.zph}} function in the \pkg{survival} package.
\item \code{MartingalePlots} creates null-model Martingale plots for Cox regression models, using the \code{\link[survival]{residuals.coxph}} function in the \pkg{survival} package.
\item \code{dfbeta.coxph} and \code{dfbetas.coxph} provide methods for the standard \code{\link{dfbeta}} and \code{\link{dfbetas}} functions, using the \code{\link[survival]{residuals.coxph}} function in the \pkg{survival} package for computation. \code{plot.dfbeta.coxph} and \code{plot.dfbetas.coxph} are plot methods for the objects produced by these functions.
\item \code{dfbeta.survreg}, \code{dfbetas.survreg}, \code{plot.dfbeta.survreg} and \code{plot.dfbetas.survreg} are similar methods for \code{\link[survival]{survreg}} objects.
}
}
\value{
Most of these function create graphs and don't return useful values; the \code{dfbeta} and \code{dfbetas} methods create matrices of dfbeta and dfbetas values.
}
\author{John Fox <jfox@mcmaster.ca>}
\references{
John Fox, Marilia Sa Carvalho (2012).
The RcmdrPlugin.survival Package: Extending the R Commander Interface to Survival Analysis.
\emph{Journal of Statistical Software}, 49(7), 1-32. \doi{10.18637/jss.v049.i07}.
}
\seealso{
\code{\link[survival]{coxph}}, \code{\link[survival]{survreg}}, \code{\link[car]{crPlots}}, \code{\link[survival]{residuals.coxph}}, \code{\link[survival]{residuals.survreg}}, \code{\link[survival]{predict.coxph}}, \code{\link[survival]{cox.zph}}
}
|
53355a454b12027ab9637b6f28e7bc73d3f0c858
|
c95063c2ba103110ff101122617e1356b3b86f31
|
/man/labplot.Rd
|
50f7cb3c606db5381f035e8f58bdbf7b2e4153ba
|
[
"MIT"
] |
permissive
|
xinxiong0238/PostSequelae
|
661a8213014691e026eea01f32e43922a5535c5c
|
1c758218de50cc281dffc0eb552c2d31fd6356ed
|
refs/heads/master
| 2023-03-24T10:59:11.348512
| 2021-03-18T08:55:08
| 2021-03-18T08:55:08
| 349,000,587
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 528
|
rd
|
labplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labplot.R
\name{labplot}
\alias{labplot}
\title{Density plot}
\usage{
labplot(
PatientObersvations_pro,
loinc_mapping,
windows.size,
windows.min,
windows.max
)
}
\arguments{
\item{PatientObersvations_pro}{Dataframe; \code{PatientObersvations_pro} output from \code{\link{main}}.}
\item{loinc_mapping}{Dataframe; connecting loinc codes to detailed description.}
}
\value{
A ggplot object.
}
\description{
Create density plot for lab data
}
|
4a8b1bbfc5340c508bc7fae54772d83b346f4569
|
4b9b4b3b829e07889b14d33043ca81d124e31cf7
|
/R/score-functions.R
|
c091f791519c0b2bcdd235a0598e2e728c3ec80d
|
[
"Apache-2.0"
] |
permissive
|
sverchkov/bionetwork
|
8d2cb008cf30c397c45a56d1f44510a76980ec7c
|
8ef0ca221b8d7b71384af72c0964d4d9772761f9
|
refs/heads/master
| 2021-01-15T10:17:31.773237
| 2016-08-25T22:48:34
| 2016-08-25T22:48:34
| 42,209,208
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,883
|
r
|
score-functions.R
|
# Score functions
#' Likelihood scores broken down by reporter
#'
#' Likelihood calculation for a full network, with the score contribution of each
#' reporter returned in a vector.
#' @param ancestry - the ancestry matrix
#' @param lll - the LocalLogLikelihoods object
#' @return a vector with a log-likelihood for each reporter
scoreLikelihoodsPerReporter = function ( ancestry, lll ) {
actor.list = rownames( ancestry )
nA = length( actor.list )
nR = howManyReporters( lll )
unique.actors = getActors( lll )
# For a reporter r
# Ancestors of r : ancestry[, r ]
# Named : actor.list[ ancestry[, r ] ]
# Named as in lll : stripDots( actor.list[ ancestry[, r ] ] )
# Selected in lll : unique.actors %in% stripDots( actor.list ... )
non.ancestors = sapply( as.list( getReporters( lll ) ), function( r ) {
unique.actors %in% stripDots( actor.list[ !ancestry[, r ] ] )
} )
# Simple ancestry component:
simple.ancestry.scores = ancestryScoreMatrix( lll )
simple.ancestry.scores[ non.ancestors ] = nonAncestryScoreMatrix( lll )[ non.ancestors ]
# Clean NAs
simple.ancestry.scores[ is.na( simple.ancestry.scores ) ] = 0
# Get scores
scores = colSums( simple.ancestry.scores )
# Scoring 2le KOs requires some special care. For "neither ancestor" we need to make
# sure we aren't scoring a reporter multiple times, so we need to iterate over the
# unique actors. For the other states we use the pooled versions (and as long as our
# starting ancestry is correct there shouldn't be any double-counting)
# 2le KO "neither ancestor" component:
for ( a in 2:length( unique.actors ) ){
for ( b in 1:a ){
selected.reporters = !apply(
ancestry[ stripDots( actor.list ) %in% unique.actors[ c(a,b) ],
nA + ( 1:nR ) ],
2, any )
scores[ selected.reporters ] = scores[ selected.reporters ] + replaceNAs( scoreNeitherAncestor( lll, unique.actors[ a ], unique.actors[ b ] )[ selected.reporters ] )
}
}
# 2le KO components for remainder:
for ( a in 2:nA ){
actor.a.pooled = actor.list[a]
actor.a.unique = stripDots( actor.a.pooled )
for ( b in 1:a ){
actor.b.pooled = actor.list[b]
actor.b.unique = stripDots( actor.b.pooled )
# Ancestor masks:
# ancestry[ a, nA + ( 1:nR ) ] <- ancestors of a
only.a = ancestry[ a, nA + (1:nR) ] & !ancestry[ b, nA + (1:nR) ]
only.b = ancestry[ b, nA + (1:nR) ] & !ancestry[ a, nA + (1:nR) ]
both = ancestry[ a, nA + (1:nR) ] & ancestry[ b, nA + (1:nR) ]
# Only A ancestor score
scores[ only.a ] = scores[ only.a ] + replaceNAs( scoreSingleAncestor( lll, actor.a.unique, actor.b.unique )[ only.a ] )
# Only B ancestor score
scores[ only.b ] = scores[ only.b ] + replaceNAs( scoreSingleAncestor( lll, actor.b.unique, actor.a.unique )[ only.b ] )
# Both
scores[ both ] = scores[ both ] + replaceNAs(
if ( ancestry[ a, b ] || ancestry[ b, a ] ){
# Shared pathway score
scoreSharedPathways( lll, actor.a.unique, actor.b.unique )[ both ]
} else {
# Independent pathway score
scoreIndependentPathways( lll, actor.a.unique, actor.b.unique )[ both ]
} )
}
}
return ( scores )
}
#' The heuristic + score for A*
#' This one uses the log-likelihood interface
getHeuristicScore = function ( lll, reporterIndex, depth, adjacency ){
n = nrow( adjacency )
actors = getActors( lll )
# Mark certain/free edges in adjacency matrix.
uncertain = getUncertaintyMatrix( depth, n, n+1 )
#print( uncertain )
# Derive ancestry
ancestral = deriveAncestry( adjacency, uncertain )
uncertain = ancestral$uncertain
ancestry = ancestral$ancestry
# For debugging
#print( showUncertainAncestry( ancestry, uncertain ) )
# Score
# Simple ancestry component.
score =
# Certain ancestors
sum( ancestryScoreMatrix( lll )[ ancestry[, n + 1 ] & !uncertain[, n + 1 ], reporterIndex], na.rm = TRUE ) +
# Certain non-ancestors
sum( nonAncestryScoreMatrix( lll )[ !ancestry[, n + 1 ] & !uncertain[, n + 1 ], reporterIndex ], na.rm = TRUE ) +
# Uncertain
sum( pmax(
ancestryScoreMatrix( lll )[ uncertain[, n + 1 ], reporterIndex ],
nonAncestryScoreMatrix( lll )[ uncertain[, n + 1 ], reporterIndex ] ), na.rm = TRUE )
# Sorta hacky
if ( is.na( score ) ) score = 0
# 2le KO component:
for( a in 2:n ){
for ( b in 1:a ){
rel.score = c(
# Neither ancestor score
neither = scoreNeitherAncestor( lll, actors[a], actors[b] )[ reporterIndex ],
# Only A ancestor score
only.a = scoreSingleAncestor( lll, actors[a], actors[b] )[ reporterIndex ],
# Only B ancestor score
only.b = scoreSingleAncestor( lll, actors[b], actors[a] )[ reporterIndex ],
# Both, independent pathway score
independent = scoreIndependentPathways( lll, actors[a], actors[b] )[ reporterIndex ],
# Both, shared pathway score
shared = scoreSharedPathways( lll, actors[a], actors[b] )[ reporterIndex ] )
# Sorta hacky
rel.score[ is.na( rel.score ) ] = 0
# Now go through the cases
if ( !uncertain[ a, n + 1 ] ){ # Only certain relations eliminate cases
if ( ancestry[ a, n + 1 ] ) { # a is ancestor
rel.score[ c( "only.b", "neither" ) ] = -Inf
} else {
rel.score[ c( "only.a", "independent", "shared" ) ] = -Inf
}
}
if ( !uncertain[ b, n + 1 ] ){
if ( ancestry[ b, n + 1 ] ){ # b is ancestor
rel.score[ c( "only.a", "neither" ) ] = -Inf
} else { # b is not ancestor
rel.score[ c( "only.b", "independent", "shared" ) ] = -Inf
}
}
if ( ( !uncertain[ a, b ] && ancestry[ a, b ] ) || ( !uncertain[ b, a ] && ancestry[ b, a ] ) ) {
rel.score[ "independent" ] = -Inf
} else if ( !uncertain[ a, b ] && !uncertain[ b, a ] && !ancestry[ a, b ] && !ancestry[ b, a ] ) {
rel.score[ "shared" ] = -Inf
}
score = score + max( rel.score )
}
}
# For debugging
#print( score )
return ( score )
}
#' Score bounds for underdetermined graph
#'
#' Computes the score bounds for an underdetermined graph described by two matrices,
#' possible.ancestors and possible.nonancestors. These describe ancestry relationships
#' derived from a graph with present, absent, and undetermined edges.
#' The number of reporters in the result matrix is based on the number of reporters in
#' the input ancestry matrices. It is assumed that the input matrices have the same
#' dimensions.
#'
#' @param lll - The LocalLogLikelihoods object
#' @param possible.ancestors - boolean matrix indicating possible ancestors
#' @param possible.nonancestors - boolean matrix indicating possible nonancestors
#' @return A 2x|reporters| matrix, with the top row indicating upper and the bottom row indicating lower bounds on the score.
getScoreBounds = function ( lll, possible.ancestors, possible.nonancestors ){
# Init block: get actors, reporters, dimensions.
actors = rownames( possible.ancestors )
n = length( actors )
if ( nrow( possible.nonancestors ) != n )
stop( "possible.ancestors and possible.nonancestors have different numbers of rows!" )
reporters = colnames( possible.ancestors )[-(1:n)]
nR = length( reporters )
# Simple ancestry component.
simple.ancestry.scores = abind::abind( ancestryScoreMatrix( lll )[actors,reporters], nonAncestryScoreMatrix( lll )[actors,reporters], along = 3 )
simple.ancestry.scores[,,1][ !possible.ancestors[ n+(1:nR) ] ] = NA
simple.ancestry.scores[,,2][ !possible.nonancestors[ n+(1:nR) ] ] = NA
# Score
score = rbind(
"upper" = colSums( apply( simple.ancestry.scores, 1:2, max, na.rm = TRUE ) ),
"lower" = colSums( apply( simple.ancestry.scores, 1:2, min, na.rm = TRUE ) ) )
# 2le KO component:
for( a in 2:n ){
for ( b in 1:(a-1) ){
rel.score = matrix( rep( c( -Inf, Inf ), nR ), nrow = 2, ncol = nR,
dimnames = list( c( "upper", "lower" ), reporters ) )
# Neither ancestor score present when both A and B are possible nonancestors
select = possible.nonancestors[ a, reporters ] & possible.nonancestors[ b, reporters ]
if( any( select ) )
rel.score[ , select ] =
updateBounds( rel.score[ , select ],
scoreNeitherAncestor( lll, actors[ a ], actors[ b ] )[ reporters[ select ] ] )
# Only A ancestor score present when A is possible ancestor and B possible nonancestor
select = possible.ancestors[ a, reporters ] & possible.nonancestors[ b, reporters ]
if( any( select ) )
rel.score[ , select ] =
updateBounds( rel.score[ , select ],
scoreSingleAncestor( lll, actors[ a] , actors[ b ] )[ reporters[ select ] ] )
# Only B ancestor score present when B is possible ancestor and A possible nonancestor
select = possible.ancestors[ b, reporters ] & possible.nonancestors[ a, reporters ]
if( any( select ) )
rel.score[ , select ] =
updateBounds( rel.score[ , select ],
scoreSingleAncestor( lll, actors[ b ], actors[ a ] )[ reporters[ select ] ] )
# Both+independent when A,B possible nonancestors of each other and possible ancestors of R
if ( possible.nonancestors[ a, b ] && possible.nonancestors[ b, a ] ) {
select = possible.ancestors[ a, reporters ] & possible.ancestors[ a, reporters ]
if( any( select ) )
rel.score[ , select ] =
updateBounds( rel.score[ , select ],
scoreIndependentPathways( lll, actors[ a ], actors[ b ] )[ reporters[ select ] ] )
}
# Both+shared when A,B have possible ancestry and both possible ancestors of R
if ( possible.ancestors[ a, b ] || possible.ancestors[ b, a ] ) {
select = possible.ancestors[ a, reporters ] & possible.ancestors[ b, reporters ]
if( any( select ) )
rel.score[ , select ] =
updateBounds( rel.score[ , select ],
scoreSharedPathways( lll, actors[ a ], actors[ b ] )[ reporters[ select ] ] )
}
score = score + rel.score
}
}
return ( score )
}
#' Helper for updating upper/lower-bound matrix
#'
#' @param bounds - the bounds matrix
#' @param scores - vector of score candidates
#' @return updated bounds where the "upper" is the max of scores+"upper" and "lower" is the min of scores+"lower"
updateBounds = function ( bounds, scores ) {
if( length( bounds ) > 2 ){
rbind( "upper" = pmax( bounds[ "upper", ], scores ),
"lower" = pmin( bounds[ "lower", ], scores ) )
} else { # Assuming this is the 1-element case
rbind( "upper" = max( bounds[ "upper" ], scores ),
"lower" = min( bounds[ "lower" ], scores ) )
}
}
#' The heuristic + score for A*
getHeuristicScoreOld = function ( laps, reporterIndex, depth, adjacency ){
n = nrow( adjacency )
# Mark certain/free edges in adjacency matrix.
uncertain = getUncertaintyMatrix( depth, n, n+1 )
#print( uncertain )
# Derive ancestry
ancestral = deriveAncestry( adjacency, uncertain )
uncertain = ancestral$uncertain
ancestry = ancestral$ancestry
# For debugging
#print( showUncertainAncestry( ancestry, uncertain ) )
# Score
# Simple ancestry component:
local.scores = ancestryScoreMatrix( laps )[,reporterIndex]
score =
# Certain ancestors
sum( local.scores[ ancestry[, n + 1 ] & !uncertain[, n + 1 ] ], na.rm=TRUE ) +
# Certain non-ancestors
sum( log1mexp(
local.scores[ !ancestry[, n + 1 ] & !uncertain[, n + 1 ] ] ), na.rm=TRUE ) +
# Uncertain
if ( any ( uncertain[, n + 1 ] ) ){
sum( mapply(
max,
local.scores[ uncertain[, n + 1 ] ],
log1mexp( local.scores[ uncertain[, n + 1 ] ] ) ), na.rm = TRUE )
}else 0
# 2le KO component:
actors = which( ancestry[, n + 1 ] | uncertain[, n + 1 ] )
if ( length( actors ) > 1 ){
for ( i in 2:length( actors ) ){
for ( b in actors[ 1:( i - 1 ) ] ){
a = actors[ i ]
sp.score = scoreSharedPathways( laps, a, b )[reporterIndex]
ip.score = scoreIndependentPathways( laps, a, b )[reporterIndex]
#print( score )
#print( sp.score )
#print( ip.score )
score = score +
if ( any( uncertain[ c( a, b ), c( a, b, n + 1 ) ] ) ){
max( sp.score, ip.score )
} else {
if ( ancestry[a,b] || ancestry[b,a] )
sp.score
else
ip.score
}
}
}
}
# For debugging
#print( score )
return ( score )
}
|
5d42b6155fe58c9920ce0e3f003ad6d9c7e8c459
|
394b0b27a68e590165d0dfb9243e7b2d5deaf4d5
|
/man/sample_chat_sentiment_syu.Rd
|
2ab587ec6ee7b11bf693f388f427cbafbb2bcc04
|
[
"MIT"
] |
permissive
|
NastashaVelasco1987/zoomGroupStats
|
5b414b28e794eecbb9227d4b1cd81d46b00576e4
|
8f4975f36b5250a72e5075173caa875e8f9f368d
|
refs/heads/main
| 2023-05-05T18:23:17.777533
| 2021-05-24T16:08:23
| 2021-05-24T16:08:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,523
|
rd
|
sample_chat_sentiment_syu.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{sample_chat_sentiment_syu}
\alias{sample_chat_sentiment_syu}
\title{Parsed chat file in a 'Zoom' meeting with sentiment analysis using syuzhet}
\format{
A data frame with 30 rows of 30 variables:
\describe{
\item{batchMeetingId}{a character meeting identification variable}
\item{messageId}{an incremented numeric identifier for a marked chat message}
\item{userName}{'Zoom' display name attached to the messager}
\item{messageSeconds}{when the message was posted as the number of seconds from the start of the recording}
\item{messageTime}{timestamp for message}
\item{message}{text of the message}
\item{messageLanguage}{language code of the message}
\item{userEmail}{character email address}
\item{userId}{numeric id of each speaker}
\item{wordCount}{number of words in this utterance}
\item{syu_anger}{number of anger words}
\item{syu_anticipation}{number of anticipation words}
\item{syu_disgust}{number of disgust words}
\item{syu_fear}{number of fear words}
\item{syu_joy}{number of joy words}
\item{syu_sadness}{number of sadness words}
\item{syu_surprise}{number of surprise words}
\item{syu_trust}{number of trust words}
\item{syu_negative}{number of negative words}
\item{syu_positive}{number of positive words}
}
}
\source{
\url{http://zoomgroupstats.org/}
}
\usage{
sample_chat_sentiment_syu
}
\description{
Parsed chat file in a 'Zoom' meeting with sentiment analysis using syuzhet
}
\keyword{datasets}
|
b8d6535965166b08710634b80ecca248d4e63e76
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.storage/man/storagegateway_describe_vtl_devices.Rd
|
be12cac12d69e960550c7f2896e9e529b9cef79f
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,241
|
rd
|
storagegateway_describe_vtl_devices.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/storagegateway_operations.R
\name{storagegateway_describe_vtl_devices}
\alias{storagegateway_describe_vtl_devices}
\title{Returns a description of virtual tape library (VTL) devices for the
specified tape gateway}
\usage{
storagegateway_describe_vtl_devices(
GatewayARN,
VTLDeviceARNs = NULL,
Marker = NULL,
Limit = NULL
)
}
\arguments{
\item{GatewayARN}{[required]}
\item{VTLDeviceARNs}{An array of strings, where each string represents the Amazon Resource
Name (ARN) of a VTL device.
All of the specified VTL devices must be from the same gateway. If no
VTL devices are specified, the result will contain all devices on the
specified gateway.}
\item{Marker}{An opaque string that indicates the position at which to begin
describing the VTL devices.}
\item{Limit}{Specifies that the number of VTL devices described be limited to the
specified number.}
}
\description{
Returns a description of virtual tape library (VTL) devices for the specified tape gateway. In the response, Storage Gateway returns VTL device information.
See \url{https://www.paws-r-sdk.com/docs/storagegateway_describe_vtl_devices/} for full documentation.
}
\keyword{internal}
|
0b54a9caf34aa70fc29a318b7637b29470433ba6
|
d4b17472248cfbd9d9179d593e476574ab649fd3
|
/data_processing/tertiary2pdb.R
|
c2e9fa9d7a64d1ff5dcb40a42805d4010ad67a4a
|
[
"MIT"
] |
permissive
|
Maikuraky/rgn
|
294f013df18d381dabdc74919b0ff04668bfd556
|
167bed319d065056ac8464b67c6972c9a8f5f192
|
refs/heads/master
| 2022-03-02T04:05:43.569160
| 2019-10-21T14:42:19
| 2019-10-21T14:42:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,095
|
r
|
tertiary2pdb.R
|
# Convert a tertiary prediction from RGN into PDB file format
# Aleix Lafita - October 2019
library(argparse)
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(seqinr))
###################### Argparse #############################
tertiary.in = "protein.tertiary"
fasta.in = "protein.fa"
pdb.out = "protein.pdb"
# create parser object
parser = ArgumentParser(
description='Convert a tertiary prediction from RGN into a PDB file')
# specify our desired options
parser$add_argument("-t", "--tertiary", default=tertiary.in,
help="Coordinates from RGN for protein structure [default \"%(default)s\"]")
parser$add_argument("-f", "--fasta", default=fasta.in,
help="Protein sequence in fasta format [default \"%(default)s\"]")
parser$add_argument("-p", "--pdb", default=pdb.out,
help="Name of the output pdb formatted coordinates [default \"%(default)s\"]")
# get command line options, if help option encountered print help and exit,
# otherwise if options not found on command line then set defaults,
args = parser$parse_args()
tertiary.in = args$tertiary
fasta.in = args$fasta
pdb.out = args$pdb
aa.codes = c(
"A" = "Ala",
"C" = "Cys",
"D" = "Asp",
"E" = "Glu",
"F" = "Phe",
"G" = "Gly",
"H" = "His",
"I" = "Ile",
"K" = "Lys",
"L" = "Leu",
"M" = "Met",
"N" = "Asn",
"P" = "Pro",
"Q" = "Gln",
"R" = "Arg",
"S" = "Ser",
"T" = "Thr",
"V" = "Val",
"W" = "Trp",
"Y" = "Tyr"
) %>% toupper()
############################# File parsing ###############################
# Parse the protein sequence and convert to DF
seq = read.fasta(fasta.in)
seqlen.fasta = length(getSequence(seq)[[1]])
seq.df = data.frame(
pos = seq(1, seqlen.fasta, 1),
seq = toupper(unlist(getSequence(seq))),
stringsAsFactors = F
)
# Parse the tertiary coordinates
coords = read.csv(
tertiary.in,
sep = "",
comment.char = "#",
header = F,
stringsAsFactors = F
)
seqlen.pdb = ncol(coords) / 3
# Stop if the length of the sequence is different than the tertiary
if(seqlen.fasta != seqlen.pdb)
stop(sprintf("Sequence length in FASTA (%i) different than in tertiary (%i)", seqlen.fasta, seqlen.pdb))
seqlen = seqlen.fasta
############################# Convert ####################################
coords.mat = t(coords)
pdb.df = as.data.frame(coords.mat) %>%
mutate(
atomr = "ATOM",
atomid = seq(1, seqlen*3, 1),
s1n = 7 - ceiling(log10(atomid + 1)),
atomn = rep(c(" N ", " CA ", " C "), seqlen),
chainid = "A",
resid = ceiling(atomid / 3),
s2n = 4 - ceiling(log10(resid + 1)),
x = round(V1/100, 3),
y = round(V2/100, 3),
z = round(V3/100, 3),
sxn = abs(x),
sxn = ceiling(log10(sxn)),
sxn = ifelse(abs(x) <= 1, 1, sxn),
sxn = 8 - ifelse(x < 0, sxn +1, sxn),
syn = abs(y),
syn = ceiling(log10(syn)),
syn = ifelse(abs(y) <= 1, 1, syn),
syn = 4 - ifelse(y < 0, syn +1, syn),
szn = abs(z),
szn = ceiling(log10(szn)),
szn = ifelse(abs(z) <= 1, 1, szn),
szn = 8 - ifelse(z < 0, szn +1, szn),
occup = " 1.00",
bfac = " 0.00",
atomtype = rep(c("N", "C", "C"), seqlen)
)
# Include the sequence information from FASTA file
pdb.resn = merge(pdb.df, seq.df, by.x = "resid", by.y = "pos") %>%
rowwise() %>%
mutate(resn = aa.codes[seq])
# Combine all info into PDB format lines
pdb.pdbrec = pdb.resn %>%
rowwise() %>%
mutate(
pdbrec = paste0(
atomr,
paste0(rep(" ", s1n), collapse = ""),
atomid,
atomn,
resn,
" ",
chainid,
paste0(rep(" ", s2n), collapse = ""),
resid,
paste0(rep(" ", sxn), collapse = ""),
sprintf("%.3f", x),
paste0(rep(" ", syn), collapse = ""),
sprintf("%.3f", y),
paste0(rep(" ", szn), collapse = ""),
sprintf("%.3f", z),
occup,
bfac,
" ",
atomtype,
" "
)
)
# Write the output file
write.table(
pdb.pdbrec %>% select(pdbrec),
pdb.out,
row.names = F,
col.names = F,
quote = F
)
|
da53078f92a1b8aaa982fa05c083ce8430ccaae6
|
7786be8e0bd3bf57b5437887bc95b27ae3f3f66c
|
/R_code_multipanel.r
|
0da7d28a399d66954dfbf189c71d45a02795caad
|
[] |
no_license
|
CeciliaRocca/Monitoring
|
7781304f96379ca557522d516d1db82fc3ddc940
|
cd18f1f5d49808ce29e79eb503a7562a43628fa5
|
refs/heads/master
| 2021-04-23T23:14:11.011160
| 2020-05-13T15:32:13
| 2020-05-13T15:32:13
| 250,028,941
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,086
|
r
|
R_code_multipanel.r
|
###Multipanel in R: the second lesson of Monitoring Ecosystem
install.packages("sp")
install.packages("GGally") #this is used for the function ggpair
library(sp) #require(sp) will also do the job
library(GGally)
data(meuse) #there is a dataset available named meuse
attach(meuse)
#Excercise: see the names of the variables and plot cadmium versus zinc
there are two ways: name(meuse), head(meuse)
plot(cadmium,zinc,pch=15,col="green",cex=2)
#Excersise: make all the possible paired plots in the datased
#don't use plots, use pairs(meuse)
#in case you receive the error "the sieze is too large" reshape the graph window with the mouse
pairs(~cadium+copper+lead+zinc,data=meuse)
#pairing from the 3rd to the 6th column
pairs(meuse[,3:6])
#Excersise: prettify the graph
pairs(meuse[,3:6],pch=18,col="red",cex=1.5)
#GGally package will prettify the graph
ggapirs(meuse[,3:6])
#it shows the frequency of the values for each column (ie: cadmium shows few high values and mostly low values)
#correlation: cadmium and copper have similar values, therefore high correlation: 0.925
|
b1f8b83a95ce51526c7d4333c8cbf6cb10471c4a
|
73d6b9e8adbd873875ed51751abc7182133b3e8a
|
/man/get_raw_vaccination_data.Rd
|
bbcc4827911d107489205fa362909899671cd0f2
|
[
"MIT"
] |
permissive
|
SimonCoulombe/covidtwitterbot
|
cc307e395312e5bbbff09b36c6dda9aef3e1747b
|
128749bfd4d0fcec5e3970f60f87424bff8cd50f
|
refs/heads/master
| 2023-04-19T10:03:03.008866
| 2021-04-26T16:26:36
| 2021-04-26T16:26:36
| 311,527,367
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 360
|
rd
|
get_raw_vaccination_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_vaccins_data.R
\name{get_raw_vaccination_data}
\alias{get_raw_vaccination_data}
\title{get_raw_vaccination_data downloads vaccination data from inspq}
\usage{
get_raw_vaccination_data()
}
\value{
}
\description{
get_raw_vaccination_data downloads vaccination data from inspq
}
|
f564afa8d63ec532b1aa5c6b8ba3de4a3617d13a
|
106f0d1b82521ad049ed5321736b8ec4767f21a1
|
/makeRegioPlot.R
|
e5faaef67e522d7d70cf0857d6727d6c1feb40e9
|
[
"MIT"
] |
permissive
|
georgeblck/weatherLeipzig
|
b37e4514f380dd825a78c21f8e3658d9213a0a83
|
71f1423632fb3d6fe97c2bb32e35f0541f0349b3
|
refs/heads/master
| 2021-07-20T14:13:07.314300
| 2020-05-15T18:53:54
| 2020-05-15T18:53:54
| 160,327,794
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,411
|
r
|
makeRegioPlot.R
|
# rm(list = ls())
# load packages
library(lubridate)
library(tidyverse)
library(ggthemes)
# Filter festlegen
jahr <- 2020
bundesland <- "Sachsen"
lastmonat <- ifelse((year(today()) - jahr) != 0, 12, month(today()) - 1)
# Daten einlesen
regionalAverages <- read.table("data/regionalAverages.csv", header = TRUE, dec = ".",
sep = ";")
# Daten filtern und aggregieren
bundeslandDat <- regionalAverages %>% filter(Bundesland == bundesland, Jahr <= jahr) %>%
filter(Monat <= lastmonat) %>% group_by(Jahr) %>% summarise_at(c("Temperatur",
"Niederschlag", "Sonnendauer"), list(mw = mean, summe = sum), na.rm = TRUE) %>%
ungroup() %>% mutate(typeYear = (Jahr >= 2000) + (Jahr == jahr))
# Make the empty plot with the geom_segments Get the borders and round down or up
tempRange <- bundeslandDat %>% summarise_at(c("Temperatur_mw"), list(min = min, max = max)) %>%
mutate(min = floor(min), max = ceiling(max)) %>% unlist()
tempTicks <- tempRange[1]:tempRange[2]
tempsegDat <- data.frame(x = rep(1881, length(tempTicks)), xend = rep(jahr + 0.5,
length(tempTicks)), y = tempTicks, yend = tempTicks)
regioTemp <- ggplot(data = bundeslandDat, aes(x = Jahr, y = Temperatur_mw)) + geom_segment(data = tempsegDat,
aes(x = x, y = y, xend = xend, yend = yend), inherit.aes = FALSE, linetype = "dashed",
alpha = 0.3, col = "black") + geom_rangeframe(col = "black") + geom_line(alpha = 0.7,
col = "red", size = 0.7) + geom_point(size = 1.5, alpha = 0.7, col = "red") +
xlab("Jahr") + ylab("Durchschnittstemperatur (°C)") + theme_tufte(base_size = 11) +
theme(legend.position = "none") + scale_x_continuous(breaks = c(1881, seq(1900,
jahr, by = 20), jahr)) + scale_y_continuous(limits = c(tempRange[1], tempRange[2])) +
theme(text = element_text(size = 11, family = "sans-serif"))
#### Plot Precip vs Temp ####
# Round Precip to nearest 50
precipRange <- bundeslandDat %>% summarise_at(c("Niederschlag_summe"), list(min = min,
max = max)) %>% mutate(min = floor(min/50) * 50, max = ceiling(max/50) * 50) %>%
unlist()
# Get Average Values
avgValues <- bundeslandDat %>% summarise_at(c("Temperatur_mw", "Niederschlag_summe"),
mean) %>% unlist()
regioPrecip <- ggplot(data = bundeslandDat, aes(y = Temperatur_mw, x = Niederschlag_summe,
color = factor(typeYear), alpha = factor(typeYear))) + geom_segment(aes(y = avgValues[1],
yend = avgValues[1], x = precipRange[1] * 1.1, xend = precipRange[2] * 0.9),
inherit.aes = FALSE, linetype = "dashed", alpha = 0.5, col = "black", data = data.frame()) +
geom_segment(aes(y = tempRange[1] * ifelse(tempRange[1] > 0, 1.1, 0.9), yend = tempRange[2] *
0.9, x = avgValues[2], xend = avgValues[2]), inherit.aes = FALSE, linetype = "dashed",
alpha = 0.5, col = "black", data = data.frame()) + geom_point(size = 1.5) +
geom_rangeframe(col = "black", sides = "br") + theme_tufte(base_size = 15) +
ylab("Durchschnittstemperatur (°C)") + xlab("Niederschlag (mm)") + scale_color_manual(values = c("blue",
"black", "red"), breaks = c(0, 1, 2), name = "Jahr", labels = c("1881-1999",
paste0("2000-", jahr - 1), 2019)) + scale_y_continuous(limits = c(tempRange[1],
tempRange[2]), position = "right") + scale_x_continuous(limits = precipRange) +
theme(legend.position = c(0, 0), legend.justification = c(0, 0), legend.title = element_text(size = 7,
face = "bold", hjust = 0.5)) + annotate("text", y = tempRange[1] * ifelse(tempRange[1] >
0, 1.1, 0.9), x = avgValues[2] + 0.01 * avgValues[2], label = "Durchschnittswerte",
size = 2, angle = 90) + scale_alpha_manual(values = c(0.4, 1, 1), guide = FALSE) +
guides(colour = guide_legend(override.aes = list(alpha = c(0.4, 0.9)))) + theme(text = element_text(size = 11,
family = "sans-serif")) + theme(legend.text = element_text(size = 6))
# Sun duration plot
sunDat <- bundeslandDat %>% filter(Sonnendauer_summe > 0) %>% mutate(numDays = ifelse(leap_year(Jahr),
366, 365)) %>% mutate(sonneprotag = Sonnendauer_summe/numDays)
ggplot(data = sunDat, aes(x = Jahr, y = sonneprotag)) + geom_rangeframe(col = "black") +
geom_line(alpha = 0.7, col = "darkorange", size = 1) + xlab("Jahr") + ylab("Sonnenstunden pro Tag") +
geom_point(size = 3, alpha = 0.7, col = "darkorange") + theme_tufte(base_size = 11) +
theme(legend.position = "none") + scale_x_continuous(limits = c(1951, 2019),
breaks = c(1951, seq(1970, 2019, by = 20), 2019))
|
63df2e3c2ce92a4c5113aea9d0399e13eb54217e
|
1584aff3bcb57975ed52341d11673402f2646053
|
/GeoSpatialAnalysis_inR_cont.R
|
b4009f1a9f9f7b1ca2b5c3fdeb7bf198f759e51b
|
[] |
no_license
|
atseng1/p9380_lab2
|
f0b0bdcee33b536d3debb75947df06d0085d95ba
|
3ef9923989b7fbdb5370bf01f53c136435504843
|
refs/heads/master
| 2020-12-23T08:24:54.608755
| 2020-01-31T14:48:14
| 2020-01-31T14:48:14
| 237,096,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,516
|
r
|
GeoSpatialAnalysis_inR_cont.R
|
setwd("/Users/ashleytseng/OneDrive - cumc.columbia.edu/MPH/Spring 2020/EHSC P9380_Advanced GIS/Labs/Lab 2/p9380_lab2")
### Lab goal is to create a county level map of Quality of Life Index Ranking from the
### Robert Wood Johnson Foundation
install.packages("maps")
require(maps)
ny_cty <- map('county', 'new york', fill=TRUE, col = palette())
head(ny_cty)
list.names.ny <- strsplit(ny_cty$names,",")
head(list.names.ny, n=62)
View(list.names.ny)
map.IDs <- as.character(tolower(sapply(list.names.ny, function(x) x[2])))
head(map.IDs, n=62)
map.IDs <- gsub("st lawrence", "stlawrence", map.IDs)
head(map.IDs, n=62)
require(maptools)
ny_cty_sp <- map2SpatialPolygons(ny_cty, IDs = map.IDs, proj4string = CRS("+init=epsg:2261"))
head(map.IDs, n=62)
####Read data and create Spatial Polygons Dataframe
#install.packages("data.table")
require(data.table)
rwj <- fread("rwj_rank.csv", stringsAsFactors = F, data.table = F, colClasses=list(character=c("FIPS")))
head(rwj)
ny_rwj <- subset(rwj, State == "New York")
head(ny_rwj, n=62)
ny_rwj$County <- gsub("St. Lawrence", "stlawrence", ny_rwj$County)
head(ny_rwj$County, n=62)
row.names(ny_rwj) <- as.character(tolower(ny_rwj$County))
head(row.names(ny_rwj), n=62)
head(map.IDs, n=62)
ny_rwj_df <- SpatialPolygonsDataFrame(ny_cty_sp,ny_rwj)
summary(ny_rwj_df)
summary(ny_rwj_df$QL.Rank)
ny_rwj_df$QL.Rank <- as.numeric(ny_rwj_df$QL.Rank)
summary(ny_rwj_df$QL.Rank)
ny_rwj_df$QL.Rank <- 62 - ny_rwj_df$QL.Rank
library(RColorBrewer)
library(classInt)
plotvar <- ny_rwj_df$QL.Rank
nclr <- 5
class <- classIntervals(plotvar, nclr, style = "quantile")
plotclr <- brewer.pal(nclr, "Greens")
colcode <- findColours(class, plotclr, digits = 3)
plot(ny_rwj_df, col = colcode, border = "grey",axes = F)
title(main = "Quality of Life Rankings: NY State \n by Jeremy R. Porter",
sub = "Data Source: Robert Wood Johnson Foundation")
legend("bottomleft", legend = names(attr(colcode,"table")),
fill = attr(colcode, "palette"), cex=0.55)
require(rgdal)
writeOGR(ny_rwj_df,
dsn = "working_directory",
layer = "RWJ_NY",
driver = "ESRI Shapefile")
#install.packages("sf")
require(sf)
rwj_sf <- st_read(dsn = "working_directory",
layer = "RWJ_NY")
names(rwj_sf)
head(rwj_sf)
plot(rwj_sf, max.plot = 15)
plot(st_geometry(rwj_sf), axes=TRUE)
plot(rwj_sf[,"QL_Rank"],
graticule=st_crs(rwj_sf), axes=TRUE, las=1)
plot(st_transform(rwj_sf[,"QL_Rank"], 2263),
graticule=st_crs(rwj_sf),
axes=TRUE, las=1)
coords <- st_coordinates(rwj_sf)
plot(coords)
#### Data Visualization with GGplot
#install.packages("ggplot2")
require(ggplot2)
map1 <- ggplot(data = rwj_sf) +
geom_sf()
map1
map1a <- ggplot(data = rwj_sf) +
geom_sf() +
aes(fill=cut_number(QL_Rank, 5)) +
scale_fill_brewer()
map1a
map2 <- ggplot(data = rwj_sf) +
geom_sf() +
aes(fill=cut_number(QL_Rank, 5)) +
scale_fill_brewer() +
ggtitle("County Level Quality of Life Rank\nNew York State") +
theme(line = element_blank(),
axis.text=element_blank(),
axis.title=element_blank(),
panel.background = element_blank())
map2
map3 <- ggplot(data = rwj_sf) +
geom_sf() +
aes(fill=cut_number(QL_Rank, 5)) +
scale_fill_brewer() +
ggtitle("County Level Quality of Life Rank\nNew York State") +
theme(axis.text=element_text(size=8),
axis.title=element_text(size=8,face="bold"),
plot.title = element_text(hjust = 0.5))
map3
map4 <- ggplot(data = rwj_sf) +
geom_sf() +
aes(fill=cut_number(QL_Rank, 5)) +
scale_fill_brewer() +
ggtitle("County Level Quality of Life Rank\nNew York State") +
theme(axis.text=element_text(size=8),
axis.title=element_text(size=8,face="bold"),
plot.title = element_text(face="bold",size=10,hjust = 0.5))
map4
map5 <- ggplot(data = rwj_sf) +
geom_sf() +
aes(fill=cut_number(QL_Rank, 5)) +
scale_fill_brewer(name="Quantile", palette="Blues",
labels=c("1st",
"2nd",
"3rd",
"4th",
"5th")) +
ggtitle("County Level Quality of Life Rank New York State\n ") +
theme(axis.text=element_text(size=8),
axis.title=element_text(size=8),
plot.title = element_text(face="bold",size=12,hjust = 0.5),
legend.position="bottom")
map5
#install.packages("ggsn")
require(ggsn)
northSymbols()
map6 <- ggplot(data = rwj_sf) +
geom_sf() +
aes(fill=cut_number(QL_Rank, 5)) +
scale_fill_brewer(name="Quantile", palette="Blues",
labels=c("1st",
"2nd",
"3rd",
"4th",
"5th")) +
labs(title = "County Level Quality of Life Rank New York State",
subtitle = "Jeremy R. Porter\n",
caption = "\nData source: Robert Wood Johnson Foundation") +
theme(axis.text=element_text(size=6),
axis.title=element_text(size=6),
plot.title = element_text(face="bold",size=16,hjust = 0.5),
plot.subtitle = element_text(size=12,hjust = 0.5),
plot.caption = element_text(),
legend.position=c(0.91,0.58)) +
north(rwj_sf, scale = 0.15, symbol=1, location="bottomleft")
map6
rstudioapi::documentSave()
|
d07e4380e0bb08622dd9071ddfb4bb6e4b06fdab
|
dac9ab3e7cda91b95e7dc80bfcc36782710464cb
|
/R/install.R
|
dc6e116d957a1c7c08fc11e775b4b3d0f3ac0d41
|
[] |
no_license
|
krlmlr/tic
|
8f02ac2707b21b82cca171a8539eec56bc64650f
|
fb6f25ccd5a7cf9f4919814f728f104384c9f663
|
refs/heads/master
| 2021-07-08T08:30:33.267799
| 2019-12-23T23:04:43
| 2019-12-23T23:04:43
| 72,775,037
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 763
|
r
|
install.R
|
# This code can only run as part of a CI run
# nocov start
verify_install <- function(pkg_names, pkgType = NULL) { # nolint
# set "type" to platform default
pkgType <- update_type(pkgType) # nolint
lapply(pkg_names, function(x) verify_install_one(x, pkgType = pkgType))
}
verify_install_one <- function(pkg_name, pkgType) { # nolint
withr::with_options(
c(pkgType = pkgType),
remotes::install_cran(pkg_name, upgrade = TRUE)
)
if (!package_installed(pkg_name)) {
stopc(
"Error installing package ", pkg_name, " or one of its dependencies."
)
}
}
package_installed <- function(pkg_name) {
path <- system.file("DESCRIPTION", package = pkg_name)
file.exists(path)
}
# This code can only run as part of a CI run
# nocov end
|
ebd5a70722e15550a49e6f2f85f3d0f03f825731
|
f91369d3ff4584d909ff5f0f4be382e54594d95c
|
/man/global_options.Rd
|
ea205d2e521e7e515950533e568b24e4d3c61147
|
[
"Apache-2.0"
] |
permissive
|
Novartis/tidymodules
|
e4449133f5d299ec7b669b02432b537de871278d
|
daa948f31910686171476865051dcee9e6f5b10f
|
refs/heads/master
| 2023-03-06T01:18:55.990139
| 2023-02-23T15:01:28
| 2023-02-23T15:01:28
| 203,401,748
| 147
| 13
|
NOASSERTION
| 2020-04-02T16:09:32
| 2019-08-20T15:16:40
|
R
|
UTF-8
|
R
| false
| true
| 742
|
rd
|
global_options.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{global_options}
\alias{global_options}
\title{tidymodules options}
\description{
List of global options used to adjust tidymodules configuration.
\itemize{
\item{\strong{tm_session_type}}{ : Define the type of the session, See available session types in \code{\link{session_type}} }
\item{\strong{tm_session_custom}}{ : Used to set a custom function for generating the session Id. Used in concordance with the \code{CUSTOM} session type.}
\item{\strong{tm_disable_cache}}{ : Disable caching of modules. This option is set to FALSE by default but is only relevant when user's session is managed properly. See also \code{\link{getCacheOption}}}
}
}
|
85c0cfe6c944a91818216e9c0fcb7e4572ad358d
|
436ace74a695893aad73229b723fac6be6814129
|
/R/sobolmartinez.R
|
0d2be074f860a4f380f1d529182c85d5d39e8db6
|
[] |
no_license
|
cran/sensitivity
|
18657169c915857dcde8af872e0048fef77107f4
|
2b2cbcb7f1bebecfd05e589e459fdf4334df3af1
|
refs/heads/master
| 2023-04-06T05:36:54.290801
| 2023-03-19T18:10:02
| 2023-03-19T18:10:02
| 17,699,584
| 17
| 17
| null | 2021-04-07T00:57:30
| 2014-03-13T06:16:44
|
R
|
UTF-8
|
R
| false
| false
| 12,784
|
r
|
sobolmartinez.R
|
# Sobol' indices estimation (Martinez 2011)
# Plus: Theoretical confidence intervals from correlation coefficient-based confidence interval
#
# J-M. Martinez, Analyse de sensibilite globale par decomposition de la variance,
# Presentation a la journee des GdR Ondes et MASCOT-NUM, 13 janvier 2011,
# Institut Henri Poincare, Paris, France.
#
# M. Baudin, K. Boumhaout, T. Delage, B. Iooss and J-M. Martinez, 2016,
# Numerical stability of Sobol' indices estimation formula,
# Proceedings of the SAMO 2016 Conference, Reunion Island, France, December 2016
# Bertrand Iooss (2015)
# Modified by Frank Weber (2016)
sobolmartinez <- function(model = NULL, X1, X2, nboot = 0, conf = 0.95, ...) {
if ((ncol(X1) != ncol(X2)) | (nrow(X1) != nrow(X2)))
stop("The samples X1 and X2 must have the same dimensions")
p <- ncol(X1)
X <- rbind(X1,X2)
for (i in 1:p) {
Xb <- X1
Xb[,i] <- X2[,i]
X <- rbind(X, Xb)
}
x <- list(model = model, X1 = X1, X2 = X2, nboot = nboot, conf = conf, X = X,
call = match.call())
class(x) <- "sobolmartinez"
if (!is.null(x$model)) {
response(x, other_types_allowed = TRUE, ...)
tell(x)
}
return(x)
}
estim.sobolmartinez <- function(data, i = NULL, estimStd = FALSE, conf = 0){
if(is(data,"matrix")){
# This means x$y is a numeric vector.
if(is.null(i)) i <- 1:nrow(data)
d <- as.matrix(data[i, ]) # as.matrix for colSums
n <- nrow(d)
p <- ncol(d) - 2
V <- var(d[, 1])
ecor <- sapply(1:p, function(ii){
cor(d[, 2], d[, ii + 2], use = "pairwise.complete.obs")
})
ecorcompl <- sapply(1:p, function(ii){
cor(d[, 1], d[, ii + 2], use = "pairwise.complete.obs")
})
if(estimStd){
VV <- matrix(V, nrow = 1, ncol = 3,
dimnames = list(1, c("estim", "CIinf", "CIsup")))
est_matrix <- sapply(1:p, function(ii){
confcor <- cor.test(d[, 2],d[, ii + 2], conf.level = conf)
estcor <- c(ecor[ii],
confcor$conf.int[1],
confcor$conf.int[2])
confcor <- cor.test(d[, 1], d[, ii + 2], conf.level = conf)
estcorcompl <- c(ecorcompl[ii],
confcor$conf.int[2],
confcor$conf.int[1]) # on intervertit car apres on prend l'oppose
return(c(estcor, estcorcompl))
})
estcor <- t(est_matrix[1:3, ])
estcorcompl <- t(est_matrix[4:6, ])
dimnames(estcor) <- list(2:(p + 1), c("estim", "CIinf", "CIsup"))
dimnames(estcorcompl) <- list((p + 2):(2*p + 1),
c("estim", "CIinf", "CIsup"))
return(rbind(VV, estcor, estcorcompl))
} else{
return(c(V, ecor, ecorcompl))
}
} else if(is(data,"array")){
if(estimStd){
stop("Confidence intervals not supported if \"data\" is an array")
}
if(is.null(i)) i <- 1:dim(data)[1]
n <- length(i)
p <- dim(data)[2] - 2
# Define a helper function:
one_dim3 <- function(d_array){
V <- apply(d_array, 3, function(d_matrix){
var(d_matrix[, 1])
})
ematrix <- apply(d_array, 3, function(d_matrix){
ecor <- sapply(1:p, function(ii){
cor(d_matrix[, 2], d_matrix[, ii + 2], use = "pairwise.complete.obs")
})
ecorcompl <- sapply(1:p, function(ii){
cor(d_matrix[, 1], d_matrix[, ii + 2], use = "pairwise.complete.obs")
})
c(ecor, ecorcompl)
})
return(rbind(V, ematrix, deparse.level = 0))
}
if(length(dim(data)) == 3){
# This means x$y is a matrix.
d <- data[i, , , drop = FALSE]
return(one_dim3(d))
} else if(length(dim(data)) == 4){
# This means x$y is a 3-dimensional array.
d <- data[i, , , , drop = FALSE]
all_dim3 <- sapply(1:dim(data)[4], function(i){
one_dim3(array(data[ , , , i],
dim = dim(data)[1:3],
dimnames = dimnames(data)[1:3]))
}, simplify = "array")
dimnames(all_dim3)[[3]] <- dimnames(data)[[4]]
return(all_dim3)
}
}
}
tell.sobolmartinez <- function(x, y = NULL, return.var = NULL, ...) {
id <- deparse(substitute(x))
if (! is.null(y)) {
x$y <- y
} else if (is.null(x$y)) {
stop("y not found")
}
p <- ncol(x$X1)
n <- nrow(x$X1)
if(is(x$y,"numeric")){
data <- matrix(x$y, nrow = n)
# estimation of the partial variances (V, D1 and Dt)
if (x$nboot == 0){
V <- data.frame(original = estim.sobolmartinez(data, 1:n, TRUE, x$conf))
colnames(V) <- c("original", "min. c.i.", "max. c.i.")
}
else{
V.boot <- boot(data, estim.sobolmartinez, R = x$nboot)
V <- bootstats(V.boot, x$conf, "basic")
rownames(V) <- c("global",
colnames(x$X1),
paste("-", colnames(x$X1), sep = ""))
}
# estimation of the Sobol' indices (S1 and St)
if (x$nboot == 0) {
S <- V[2:(p + 1), 1:3, drop = FALSE]
T <- 1 - V[(p + 2):(2 * p + 1), 1:3, drop = FALSE]
} else {
S.boot <- V.boot
S.boot$t0 <- V.boot$t0[2:(p + 1)]
S.boot$t <- V.boot$t[,2:(p + 1)]
S <- bootstats(S.boot, x$conf, "basic")
T.boot <- V.boot
T.boot$t0 <- 1 - V.boot$t0[(p + 2):(2 * p + 1)]
T.boot$t <- 1 - V.boot$t[,(p + 2):(2 * p + 1)]
T <- bootstats(T.boot, x$conf, "basic")
}
rownames(S) <- colnames(x$X1)
rownames(T) <- colnames(x$X1)
} else if(is(x$y,"matrix")){
data <- array(x$y, dim = c(n, nrow(x$y) / n, ncol(x$y)),
dimnames = list(NULL, NULL, colnames(x$y)))
if(x$nboot == 0){
V <- estim.sobolmartinez(data, 1:n, estimStd = FALSE)
rownames(V) <- c("global",
colnames(x$X1),
paste("-", colnames(x$X1), sep = ""))
S <- V[2:(p + 1), , drop = FALSE]
T <- 1 - V[(p + 2):(2 * p + 1), , drop = FALSE]
rownames(T) <- colnames(x$X1)
} else{
V.boot <- lapply(1:ncol(x$y), function(col_idx){
boot(as.matrix(data[, , col_idx]), estim.sobolmartinez, R = x$nboot)
})
V <- sapply(1:length(V.boot), function(col_idx){
as.matrix(bootstats(V.boot[[col_idx]], x$conf, "basic"))
}, simplify = "array")
dimnames(V) <- list(
c("global", colnames(x$X1), paste("-", colnames(x$X1), sep = "")),
dimnames(V)[[2]],
colnames(x$y))
S <- sapply(1:length(V.boot), function(col_idx){
S.boot_col <- V.boot[[col_idx]]
S.boot_col$t0 <- V.boot[[col_idx]]$t0[2:(p + 1)]
S.boot_col$t <- V.boot[[col_idx]]$t[, 2:(p + 1)]
as.matrix(bootstats(S.boot_col, x$conf, "basic"))
}, simplify = "array")
T <- sapply(1:length(V.boot), function(col_idx){
T.boot_col <- V.boot[[col_idx]]
T.boot_col$t0 <- 1 - V.boot[[col_idx]]$t0[(p + 2):(2 * p + 1)]
T.boot_col$t <- 1 - V.boot[[col_idx]]$t[, (p + 2):(2 * p + 1)]
as.matrix(bootstats(T.boot_col, x$conf, "basic"))
}, simplify = "array")
dimnames(S) <- dimnames(T) <- list(colnames(x$X1),
dimnames(V)[[2]],
colnames(x$y))
}
} else if(is(x$y,"array")){
data <- array(x$y, dim = c(n, dim(x$y)[1] / n, dim(x$y)[2:3]),
dimnames = list(NULL, NULL,
dimnames(x$y)[[2]], dimnames(x$y)[[3]]))
if(x$nboot == 0){
V <- estim.sobolmartinez(data, 1:n, estimStd = FALSE)
dimnames(V)[[1]] <- c("global",
colnames(x$X1),
paste("-", colnames(x$X1), sep = ""))
S <- V[2:(p + 1), , , drop = FALSE]
T <- 1 - V[(p + 2):(2 * p + 1), , , drop = FALSE]
dimnames(T)[[1]] <- colnames(x$X1)
} else{
V.boot <- lapply(1:dim(x$y)[[3]], function(dim3_idx){
lapply(1:dim(x$y)[[2]], function(dim2_idx){
boot(as.matrix(data[, , dim2_idx, dim3_idx]), estim.sobolmartinez, R = x$nboot)
})
})
V <- sapply(1:dim(x$y)[[3]], function(dim3_idx){
sapply(1:dim(x$y)[[2]], function(dim2_idx){
as.matrix(bootstats(V.boot[[dim3_idx]][[dim2_idx]], x$conf, "basic"))
}, simplify = "array")
}, simplify = "array")
dimnames(V) <- list(c("global",
colnames(x$X1),
paste("-", colnames(x$X1), sep = "")),
dimnames(V)[[2]],
dimnames(x$y)[[2]],
dimnames(x$y)[[3]])
S <- sapply(1:dim(x$y)[[3]], function(dim3_idx){
sapply(1:dim(x$y)[[2]], function(dim2_idx){
S.boot_dim2 <- V.boot[[dim3_idx]][[dim2_idx]]
S.boot_dim2$t0 <-
V.boot[[dim3_idx]][[dim2_idx]]$t0[2:(p + 1)]
S.boot_dim2$t <-
V.boot[[dim3_idx]][[dim2_idx]]$t[, 2:(p + 1)]
as.matrix(bootstats(S.boot_dim2, x$conf, "basic"))
}, simplify = "array")
}, simplify = "array")
T <- sapply(1:dim(x$y)[[3]], function(dim3_idx){
sapply(1:dim(x$y)[[2]], function(dim2_idx){
T.boot_dim2 <- V.boot[[dim3_idx]][[dim2_idx]]
T.boot_dim2$t0 <-
1 - V.boot[[dim3_idx]][[dim2_idx]]$t0[(p + 2):(2 * p + 1)]
T.boot_dim2$t <-
1 - V.boot[[dim3_idx]][[dim2_idx]]$t[, (p + 2):(2 * p + 1)]
as.matrix(bootstats(T.boot_dim2, x$conf, "basic"))
}, simplify = "array")
}, simplify = "array")
dimnames(S) <- dimnames(T) <- list(colnames(x$X1),
dimnames(V)[[2]],
dimnames(x$y)[[2]],
dimnames(x$y)[[3]])
}
}
# return
x$V <- V
x$S <- S
x$T <- T
for (i in return.var) {
x[[i]] <- get(i)
}
assign(id, x, parent.frame())
}
print.sobolmartinez <- function(x, ...) {
cat("\nCall:\n", deparse(x$call), "\n", sep = "")
if (!is.null(x$y)) {
if (is(x$y,"numeric")) {
cat("\nModel runs:", length(x$y), "\n")
} else if (is(x$y,"matrix")) {
cat("\nModel runs:", nrow(x$y), "\n")
} else if (is(x$y,"array")) {
cat("\nModel runs:", dim(x$y)[1], "\n")
}
cat("\nFirst order indices:\n")
print(x$S)
cat("\nTotal indices:\n")
print(x$T)
} else {
cat("\n(empty)\n")
}
}
plot.sobolmartinez <- function(x, ylim = c(0, 1),
y_col = NULL, y_dim3 = NULL, ...) {
if (!is.null(x$y)) {
p <- ncol(x$X1)
pch = c(21, 24)
if(inherits(x$y, "numeric")){
nodeplot(x$S, xlim = c(1, p + 1), ylim = ylim, pch = pch[1])
nodeplot(x$T, xlim = c(1, p + 1), ylim = ylim, labels = FALSE,
pch = pch[2], at = (1:p)+.3, add = TRUE)
} else if(is(x$y,"matrix") | is(x$y,"array")){
if(is.null(y_col)) y_col <- 1
if(is(x$y,"matrix") && !is.null(y_dim3)){
y_dim3 <- NULL
warning("Argument \"y_dim3\" is ignored since the model output is ",
"a matrix")
}
if(is(x$y,"array") && !is(x$y,"matrix") && is.null(y_dim3)) y_dim3 <- 1
nodeplot(x$S, xlim = c(1, p + 1), ylim = ylim, pch = pch[1],
y_col = y_col, y_dim3 = y_dim3)
nodeplot(x$T, xlim = c(1, p + 1), ylim = ylim, labels = FALSE,
pch = pch[2], at = (1:p)+.3, add = TRUE,
y_col = y_col, y_dim3 = y_dim3)
}
legend(x = "topright", legend = c("main effect", "total effect"), pch = pch)
}
}
ggplot.sobolmartinez <- function(data, mapping = aes(), ylim = c(0, 1),
y_col = NULL, y_dim3 = NULL, ..., environment = parent.frame()) {
x <- data
if (!is.null(x$y)) {
p <- ncol(x$X1)
pch = c(21, 24)
if(is(x$y,"numeric")){
nodeggplot(listx = list(x$S,x$T), xname = c("Main effet","Total effect"), ylim = ylim, pch = pch)
} else if(is(x$y,"matrix") | is(x$y,"array")){
if(is.null(y_col)) y_col <- 1
if(is(x$y,"matrix") && !is.null(y_dim3)){
y_dim3 <- NULL
warning("Argument \"y_dim3\" is ignored since the model output is ",
"a matrix")
}
if(is(x$y,"array") && !is(x$y,"matrix") && is.null(y_dim3)) y_dim3 <- 1
nodeggplot(listx = list(x$S,x$T), xname = c("Main effet","Total effect"), ylim = ylim, pch = pch, y_col = y_col, y_dim3 = y_dim3)
}
}
}
|
ed6ed1244f639cbcf626570db513bb3540d5de92
|
0485c00604cf3448cedb45e6efb2f85d88790c85
|
/Book/p73.R
|
48109926b4d6bbef45357bf8a40888af027b7d21
|
[] |
no_license
|
zsx29/R
|
d89af4ec46b8068f25d1e2447f98085a6721537f
|
1f349b7b3f1981010677530e05d4d467f2c6e95e
|
refs/heads/master
| 2023-06-11T11:28:40.607617
| 2021-07-02T02:17:13
| 2021-07-02T02:17:13
| 380,932,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 159
|
r
|
p73.R
|
df <- data.frame(x = c(1:5), y = seq(2, 10 ,2), z = c('a', 'b', 'c', 'd', 'e'))
df
df$x
df$y
df$z
str(df) # 데이터프레임 객체의 자료구조 확인
|
bb28c048944b84b82f5d1453cdb7eddeb460d752
|
ce6df5d7725e4d1dec9a818bbf60cec8d68aa62c
|
/tests/testthat/test_vds_amelia_plots.R
|
37e25e12d7a6829a8be562dfcca057b98d3dcb5d
|
[] |
no_license
|
jmarca/calvad_rscripts
|
64b4453a37e482ee20fb66b94451f7f097f3db0a
|
4cfd418e029ccaa00a2828e525baa5e8641b3e2c
|
refs/heads/master
| 2020-04-12T08:43:40.247022
| 2017-12-21T19:18:22
| 2017-12-21T19:18:22
| 9,202,075
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,754
|
r
|
test_vds_amelia_plots.R
|
config <- rcouchutils::get.config(Sys.getenv('RCOUCHUTILS_TEST_CONFIG'))
parts <- c('vds','amelia','plots')
result <- rcouchutils::couch.makedb(parts)
context('get.and.plot.vds.amelia works okay')
test_that("plotting imputed data code works okay",{
file <- './files/737237_ML_2012.df.2012.RData'
fname <- '737237_ML_2012'
vds.id <- 737237
year <- 2012
seconds <- 120
path <- '.'
df_agg <- get.and.plot.vds.amelia(
pair=vds.id,
year=year,
doplots=TRUE,
remote=FALSE,
path=path,
force.plot=TRUE,
trackingdb=parts)
expect_that(df_agg,is_a('data.frame'))
expect_that(names(df_agg),equals(c("ts","nl1","nr1",
"ol1","or1","obs_count",
"tod","day")))
expect_that(min(df_agg$nl1,na.rm=TRUE),equals(0.0))
## print(sprintf("%0.10f",mean(df_agg$nl1,na.rm=TRUE)))
expect_that(mean(df_agg$nl1,na.rm=TRUE),equals(780.1367564096,tolerance = .00001))
## print(sprintf("%0.10f",median(df_agg$nl1,na.rm=TRUE)))
expect_that(median(df_agg$nl1,na.rm=TRUE),equals(883))
## print(sprintf("%0.10f",max(df_agg$nl1,na.rm=TRUE)))
expect_that(max(df_agg$nl1,na.rm=TRUE),equals(1861))
plots <- paste(vds.id,year,'imputed',
c('001.png','002.png','003.png','004.png'),
sep='_')
for(plot in plots){
result <- rcouchutils::couch.has.attachment(db=parts,docname=vds.id,
attachment=plot)
expect_true(result)
}
## cleanup
unlink(c('./files/images/',vds.id,'/',plots))
## should also md5 check the dumped images?
})
rcouchutils::couch.deletedb(parts)
|
4df564ad10bded9c3ff9f282f5c4f1e9d26eae8c
|
7d0d3d2311a1e6bf40d7e288c18a78e7becf855b
|
/R/threshold_SE.R
|
43b687d9e1f05d0cef4c562fd05d08d2b13b4873
|
[
"CC0-1.0"
] |
permissive
|
dunbarlabNIH/barcodetrackR
|
c8023800426ec56ec08dad7ce0b7450a4aad0bae
|
f3b8174e5cb7b0540de6bbbedf19022cc0c14074
|
refs/heads/master
| 2023-04-26T13:15:16.327054
| 2021-04-26T14:59:21
| 2021-04-26T14:59:21
| 47,579,726
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,353
|
r
|
threshold_SE.R
|
#' @title Threshold SE
#'
#' @description Removes barcodes from a SummarizedExperiment object which have an abundance lower than the provided relative or absolute threshold. See the function `estimate_barcode_threshold` to estimate an appropriate threshold for an SE.
#'
#' @param your_SE A Summarized Experiment object.
#' @param threshold_value Numeric. The minimum threshold abundance for a barcode to be maintained in the SE. If `threshold_type` is relative, this parameter should be between 0 and 1. If `threshold_type` is absolute, this parameter should be greater than 1.
#' @param threshold_type Character. One of "relative" or "absolute" relative. If a relative threshold is specified, only those rows which have higher than `threshold_value` proportion of reads within at least one sample will be kept as non-zero. If an absolute threshold is specified, only those rows which have an absolute read count higher than `threshold_value` in at least one sample will be kept as non-zero.
#' @param verbose Logical. If TRUE, print the total number of barcodes removed from the SE.
#'
#' @return Returns a SummarizedExperiment containing only barcodes which passed the supplied threshold in at least one sample. All of the defualt assays are re-calculated after thresholding is applied. Note that since tthe SE is re-instantiated, any custom assays should be recalculated after thresholding.
#'
#' @import SummarizedExperiment
#'
#' @examples
#' data(wu_subset)
#' threshold_SE(
#' your_SE = wu_subset, threshold_value = 0.005,
#' threshold_type = "relative", verbose = TRUE
#' )
#' @export
#'
threshold_SE <- function(your_SE,
threshold_value,
threshold_type = "relative",
verbose = TRUE) {
# Error checking
if (threshold_type %in% c("relative", "absolute") == FALSE) {
stop("The parameter `threshold_type` must be set to `relative` or `absolute`.")
}
if (threshold_type == "relative") {
if (threshold_value <= 0 | threshold_value >= 1) {
stop("Since `threshold_type` is set to `relative`, `threshold_value` must be greater than 0 and less than 1.")
}
}
if (threshold_type == "absolute") {
if (threshold_value <= 1) {
stop("Since `threshold_type` is set to `absolute`, `threshold_value` must be greater than 1.")
}
}
# Get number of barcodes before thresholding
pre_thresh_bc_num <- nrow(SummarizedExperiment::assays(your_SE)$counts)
# Apply threshold
your_data <- threshold(SummarizedExperiment::assays(your_SE)$counts, thresh = threshold_value, thresh_type = threshold_type)
# Check that all samples still contain data
if (any(colSums(your_data) == 0)) {
bad_samples <- which(colSums(your_data) == 0)
cat("The following samples have no data after thresholding. \n")
cat(colnames(SummarizedExperiment::assays(your_SE)$counts)[bad_samples], sep = ", ")
cat("\n")
stop("Please try a more permissive threshold or remove the sample(s) prior to thresholding")
}
# Re-calculate other assays
your_data.ranks <- as.data.frame(apply(-your_data, 2, rank, ties.method = "min", na.last = "keep"))
your_data.proportions <- as.data.frame(prop.table(as.matrix(your_data), 2))
your_data.normalized <- your_data.proportions * your_SE@metadata$scale_factor
your_data.logged <- log(1 + your_data.normalized, base = your_SE@metadata$log_base)
# Create new thresholded SE
thresh_SE <- SummarizedExperiment::SummarizedExperiment(
assays = list(
counts = your_data,
proportions = your_data.proportions,
ranks = your_data.ranks,
normalized = your_data.normalized,
logs = your_data.logged
),
colData = your_SE@colData
)
# Update metadata with thresholding information
S4Vectors::metadata(thresh_SE) <- S4Vectors::metadata(your_SE)
S4Vectors::metadata(thresh_SE)$threshold_type <- threshold_type
S4Vectors::metadata(thresh_SE)$threshold_value <- threshold_value
# Print number of barcodes removed
if (verbose) {
cat("Removed", pre_thresh_bc_num - nrow(SummarizedExperiment::assays(thresh_SE)$counts), "barcodes from the supplied dataframe based on", threshold_type, "threshold of", threshold_value, "\n")
}
return(thresh_SE)
}
|
8a54d7daf25b98f34315c4e9de8b3de1ffb26786
|
f0035bfa6406697169bfa880aa8929e2d325a43a
|
/Streaming das mençoes a candidatos.R
|
5ec6761ab24774f3fd510928452c8c80a7129004
|
[] |
no_license
|
euricotu/ColetaEleicoes
|
d8d6145dacd32693755b187a9ec6e9ac1227a9f2
|
ecbf9fa3fabb77aefb79daafaa0f5346a2c8cde9
|
refs/heads/master
| 2020-03-26T12:29:21.526958
| 2018-08-24T02:24:40
| 2018-08-24T02:24:40
| 144,895,062
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,129
|
r
|
Streaming das mençoes a candidatos.R
|
library("rtweet")
app = "rtweet_stream_ematos"
ckey = ""
csec = ""
atok = ""
stok = ""
twitter_token = create_token(app,ckey,csec,atok,stok)
user_ids= "33374761,74215006,762402774260875265,128372940,2670726740,354095556,105155795,870030409890910210,256730310,73745956,73889361,989899804200325121"
palavra ="bolsonaro"
stream_tweets(q= user_ids,
timeout = (60*2),
parse = FALSE,
file_name="tweets.json",
token=twitter_token)
dados <- parse_stream("tweets.json")
## Informações estatísticas sobre os candidatos
candidatos <- c("33374761", "74215006", "762402774260875265", "128372940", "2670726740",
"105155795", "870030409890910210","256730310", "73745956", "73889361",
"989899804200325121")
partidos <-c('','','')
infocandidatos <- lookup_users(candidatos)
infopartidos <- lookup_users(partidos)
c <- as.matrix(candidatos)
p <- as.matrix(partidos)
#Salvar em CSV
write.csv(c, file='dia1_candidatos.csv', fileEncoding = "UTF-8")
write.csv(c, file='dia1_candidatos.csv', fileEncoding = "UTF-8")
|
651bc3af0a559d9f1fe3070f11cfc6fd33d3a42e
|
7904e63563865a091329c4da11fe16bbbab6d9e4
|
/Airpassenger_R/using_arima/predict_arima.r
|
386d7e6820eb764ee9dc72c0346a2b7c0d59f8ee
|
[] |
no_license
|
am23/Internship_R_Scripts
|
b1e2e5e0b1d87ae28bffe3d70c9c16ae9a640871
|
649666369efd64d80f1cd7865229b690210d752f
|
refs/heads/master
| 2020-05-18T19:14:40.244690
| 2013-07-17T11:48:51
| 2013-07-17T11:48:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 675
|
r
|
predict_arima.r
|
#This code for predicting the Airpassengers for next 2 years.
#reading files
a<-read.table('original.txt')
#Converting to time series
myts <- ts(a, start=c(1949,1), frequency=12)
#fitting using ARIMA model.
fit1 <- arima(myts, order=c(1,0,0), list(order=c(2,1,0), period=12),method="ML")
#predicting using n.ahead for predicting interval.
fore1 <- predict(fit1, n.ahead=24)
#plotting
ts.plot(myts,fore1$pred,gpars=list(xlab="time", ylab="People",col=c("black","blue")))
legend("topleft",cex=1, pch=18,col=c("black","blue"),legend=c("Original","Predicted"))
#Conclusion : The prediction seems very reasonable and a little growth can be seen over the time.
|
3f2f7566d36902b4125bf25b87a46ead388fe44a
|
66ee5b9cbe7f6b3a745cc8174deda69ef6b833b8
|
/R/utils.R
|
63da1402b828f57f8e2abf07d51952288b5ed1a6
|
[] |
no_license
|
SRenan/XCIR
|
5e4d2299ea57edbff200793e8d91f7814b27c79a
|
4e51efe9980056e7fe274224da0173fcfaf2edd7
|
refs/heads/master
| 2021-10-12T07:06:51.967393
| 2021-10-04T20:36:54
| 2021-10-04T20:36:54
| 69,993,016
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,960
|
r
|
utils.R
|
#' Read a list of known inactivated genes
#'
#' Read a list of gene symbols of known inactivated genes
#' to be used as training set in \code{betaBinomXI}.
#'
#' @param xciGenes A \code{character} or code{NULL}. By defaults, return a
#' vector of 177 genes. Other available choices include "cotton" and "intersect".
#' If a file path is given, the genes will be read from the file.
#'
#' @details
#' Both gene lists are extracted from Cotton et al. Genome
#' Biology (2013). doi:10.1186/gb-2013-14-11-r122.
#' By default, the function returns a list that was used as training set in
#' the paper. This training set was generated as the intersection of the
#' silenced genes identified by both expression (Carrel & Willard, 2005) and
#' DNA methylation analysis (Cotton et al, 2011).
#' Setting it to "cotton" will instead return a list of 294 genes that were
#' classified as inactivated by Cotton et al.
#' "intersect" is the most stringent list which returns the intersection of
#' training and predicted set.
#'
#' @return A \code{character} vector of gene names.
#'
#' @examples
#' xcig <- readXCI()
#' xcig <- readXCI("cotton")
#'
#' @seealso \code{betaBinomXI}
#' @export
readXCI <- function(xciGenes = NULL){
if(!is.null(xciGenes)){
if(length(xciGenes) > 1){
# If the input is a character vector, assume it's gene symbols
xci <- xciGenes
return(xciGenes)
} else if(xciGenes == "cotton"){
xci <- system.file("extdata", "xciGenes_cotton.txt", package = "XCIR")
xci <- readLines(xci)
} else if(xciGenes == "intersect"){
xci177 <- readLines(system.file("extdata", "xciGene.txt", package = "XCIR"))
xcic <- readLines(system.file("extdata", "xciGenes_cotton.txt", package = "XCIR"))
xci <- intersect(xci177, xcic)
} else if(file.exists(xciGenes)){
xci <- readLines(xciGenes)
} else{
stop("The file does not exist")
}
} else{
xci <- system.file("extdata", "xciGene.txt", package = "XCIR")
xci <- readLines(xci)
}
}
# Find the most skewed samples within an XCIR data.table
.getSkewedSamples <- function(data, n = 2){
nsamples <- length(unique(data$sample))
if(n < 1 | n > nsamples)
stop(paste("n should be a number between 1 and", nsamples))
skewed_samples <- as.character(data[, median(pmin(AD_hap1, AD_hap2)/(AD_hap1+AD_hap2), na.rm = TRUE),
by = sample][order(V1)][seq_len(n), sample])
return(skewed_samples)
}
# Find the allelic imbalance for each sample
# Based on formula in Cotton et al. Genome Biology 2013
.getAI <- function(calls, labels = FALSE){
ai_dt <- unique(calls[, list(sample, GENE, POS, AD_hap1, AD_hap2, tot, f)])
ai_dt <- unique(ai_dt[, list(sum(pmin(AD_hap1, AD_hap2)), sum(tot), f), by = sample]) # Assume that lowest expressed allele is Xi
ai_dt[, pxa := (V2-V1)/V2]
ai_dt[, pxi := V1/V2]
ai_dt[, num := (pxa * (1-f)) + (pxi * f)]
ai_dt[, denom := f * (pxi + pxa) + (1-f) * (pxi + pxa)]
ai_dt[, ai := abs(num/denom -0.5)]
p <- ggplot(ai_dt[!is.na(f)], aes(ai, pxi)) + geom_point() + geom_smooth(method="loess") +
ggtitle("%Xi expression vs. AI") + theme(plot.title = element_text(hjust = .5))
if(labels)
p <- p + geom_text(aes(label = sample), vjust = 1.2)
print(p)
return(ai_dt)
}
.getAI <- function(calls){
ai_dt <- unique(calls[, list(sample, GENE, AD_hap1, AD_hap2, tot, f, tau, p_value)])
ai_dt[, xiexpr := pmin(AD_hap1, AD_hap2)/pmax(AD_hap1, AD_hap2)] #Assuming xaexpr is always 100%
ai_dt[, num := (1-f) + xiexpr * f]
ai_dt[, denom := ((1-f) * (xiexpr + 1)) + (f * (xiexpr + 1))]
ai_dt[, ai := abs(num/denom - 0.5)]
}
# Samples with ai < cutoffs are subject to XCI
plot_escape_fraction <- function(ai, cutoff = .1){
fai <- ai[, Nsamples := .N, by = "GENE"]
fai <- fai[ai < cutoff, Ninac := .N, ]
}
#' Sample estimates
#'
#' Return sample specific information from XCIR results
#'
#' @param bb_table A \code{data.table}. The table returned by \code{betaBinomXI}.
#'
#' @return A \code{data.table} with one entry per sample and information
#' regarding skewing and model fitting.
#'
#' @example inst/examples/betaBinomXI.R
#'
#' @export
sample_clean <- function(bb_table){
clean_cols <- c("sample", "model", "f", "a_est", "b_est")
ret <- unique(bb_table[, clean_cols, with = FALSE])
return(ret)
}
xcir_clean <- function(bb_table){
clean_cols <- c("sample", "GENE", "AD_hap1", "AD_hap2", "f", "p_value", "pbb")
ret <- unique(bb_table[, clean_cols, with = FALSE])
return(ret)
}
#' Classify X-genes
#'
#' Classify X-linked genes between Escape (E), Variable Escape (VE) and Silenced (S)
#'
#' @param xciObj A \code{data.table}. The table returned by \code{betaBinomXI}
#'
#' @return A \code{data.table} with genes and their XCI-state.
#'
#' @example inst/examples/betaBinomXI.R
#'
#' @export
getXCIstate <- function(xciObj){
if(!"status" %in% names(xciObj))
xciObj[, status := ifelse(p_value < 0.05, "E", "S")]
out <- setkey(xciObj, GENE, status)[, .N, by = c("GENE", "status")][CJ(GENE, status, unique = TRUE), allow.cartesian = TRUE][is.na(N), N := 0L]
out[, Ntot := sum(N), by = "GENE"]
outE <- out[status == "E"]#
outE[, pe := N/Ntot]
ret <- outE[, .(GENE, Ntot, Nesc = N, pe)]
ret[, XCIstate := ifelse(pe <= .25, "S", "VE")]
ret[, XCIstate := ifelse(pe >= .75, "E", XCIstate)]
return(ret)
}
.betaAB <- function(m, theta, mu, sigma2){
if(!is.null(m) & !is.null(theta)){
alpha <- m*(theta-2)+1
beta <- (1-m)*(theta-2)+1
} else if(!is.null(mu) & !is.null(sigma2)){
v <- (mu * (1-mu))/sigma2 - 1
alpha <- mu*v
beta <- (1-mu)*v
} else{
stop("At least one pair of m/theta or mu/sigma2 must be specified")
}
return(c(alpha, beta))
}
.betaMT <- function(alpha, beta, mu, sigma2){
if(!is.null(alpha) & !is.null(beta)){
theta <- alpha + beta
m <- (alpha-1)/(theta-2)
} else if(!is.null(mu) & !is.null(sigma2)){
} else{
stop("At least one pair of alpha/beta or mu/sigma2 must be specified")
}
return(c(m, theta))
}
.betaMV <- function(alpha, beta, m, theta){
if(!is.null(alpha) & !is.null(beta)){
mu <- alpha/(alpha + beta)
sigma2 <- (alpha*beta)/((alpha+beta)^2 * (alpha+beta+1))
} else if(!is.null(m) & !is.null(theta)){
mu <- m*(theta-2)+1/theta
# TODO: Simplify this
sigma2 <- ((m*(theta-2)+1) * ((1-m)*(theta-2)+1)) / # a*b
(( m*(theta-2)+1 + (1-m)*(theta-2)+1)^2 * # (a+b)^2
( m*(theta-2)+1 + (1-m)*(theta-2)+1 + 1)) # (a+b+1)
} else{
stop("At least one pair of alpha/beta or m/theta must be specified")
}
return(c(mu, sigma2))
}
#' Converting beta distribution parameters
#'
#' Convert parameter values between different beta distribution parametrization
#'
#' @param alpha A \code{numeric}. First shape parameter
#' @param beta A \code{numeric}. Second shape parameter
#' @param m A \code{numeric}. Mode
#' @param theta A \code{numeric}. Concentration
#' @param mu A \code{numeric}. Mean
#' @param sigma2 A \code{numeric}. Variance
#'
#' @details
#' This function needs two parameters that caracterise the beta distribution
#' (alpha and beta, mode and concentration or mean and variance) and returns
#' all parametrizations.
#'
#' @return A named \code{numeric} with all equivalent formulations of the distribution.
#'
#' @examples
#'
#' betaParam(alpha = 5, beta = 5)
#' betaParam(m = 0.5, theta = 10)
#' betaParam(mu = 0.5, sigma2 = 0.02272727)
#'
#' @rdname BetaConversion
#' @export
betaParam <- function(alpha = NULL, beta = NULL, m = NULL, theta = NULL, mu = NULL, sigma2 = NULL){
if(is.null(alpha)){
ab <- .betaAB(m, theta, mu, sigma2)
alpha <- ab[1]
beta <- ab[2]
}
if(is.null(m)){
mt <- .betaMT(alpha, beta, mu, sigma2)
m <- mt[1]
theta <- mt[2]
}
if(is.null(mu)){
mv <- .betaMV(alpha, beta, m, theta)
mu <- mv[1]
sigma2 <- mv[2]
}
return(c(alpha = alpha, beta = beta, m = m, theta = theta, mu = mu, sigma2 = sigma2))
}
|
fc29e69b25d4b06fed0cc8bdeba0fd4293eaf779
|
1d95129039dfe86fe4aba9c160749cadd9d7ff48
|
/man/viz_thickforest.Rd
|
ed8964f401b82fdace87ea84c271985b6f986c80
|
[] |
no_license
|
Mkossmeier/metaviz
|
3a10cb9d2cb1fdd82cd6814f39f98375dc11c85c
|
e1686fceb30294503479748edffb288b88153671
|
refs/heads/master
| 2021-01-22T02:53:24.447802
| 2020-04-07T17:42:00
| 2020-04-07T17:42:00
| 81,078,260
| 15
| 4
| null | 2019-01-24T13:37:31
| 2017-02-06T11:00:26
|
R
|
UTF-8
|
R
| false
| true
| 8,108
|
rd
|
viz_thickforest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viz_thickforest.R
\name{viz_thickforest}
\alias{viz_thickforest}
\title{Thick forest plots for meta-analyses}
\usage{
viz_thickforest(
x,
group = NULL,
type = "standard",
method = "FE",
study_labels = NULL,
summary_label = NULL,
confidence_level = 0.95,
col = "Blues",
summary_col = col,
tick_col = "firebrick",
text_size = 3,
xlab = "Effect",
x_limit = NULL,
x_trans_function = NULL,
x_breaks = NULL,
annotate_CI = FALSE,
study_table = NULL,
summary_table = NULL,
table_headers = NULL,
table_layout = NULL
)
}
\arguments{
\item{x}{data.frame or matrix with the effect sizes of all studies (e.g.,
correlations, log odds ratios, or Cohen \emph{d}) in the first column and their
respective standard errors in the second column. Alternatively, x can be the
output object of function \code{\link[metafor]{rma.uni}} from package
\pkg{metafor}; then effect sizes and standard errors are extracted from \code{x}.}
\item{group}{factor indicating the subgroup of each study to plot a subgroup forest plot. Has to be in the same order than \code{x}.}
\item{type}{character string indicating the type of forest plot to be plotted. Can be "standard" (default), "study_only",
"summary_only", "cumulative", or "sensitivity". See 'Details'.}
\item{method}{character string indicating which method should be used to compute the study weights and summary effect(s).
Can be any method argument from \code{\link[metafor]{rma.uni}}
(e.g., "FE" for the fixed effect model, or "DL" for the random effects model using the
DerSimonian-Laird method to estimate \eqn{\tau^2}{tau squared}).
If input \code{x} is an output object of function \code{\link[metafor]{rma.uni}} from package \pkg{metafor}, then the method is extracted from \code{x}.}
\item{study_labels}{a character vector with names/identifiers to annotate each study in the forest plot.
Has to be in the same order than \code{x}. Ignored if \code{study_table} and/or \code{summary_table} is supplied.}
\item{summary_label}{a character string specifying the name to annotate the summary effect. If a subgroup
analysis is plotted, \code{summary_label} should be a character vector with a name for each
subgroup summary effect, arranged in the order of the levels of \code{group}. Ignored if \code{study_table} and/or
\code{summary_table} is supplied.}
\item{confidence_level}{numeric value. The confidence level for the plotted confidence bars.}
\item{col}{character string specifying the color used for the study-level error bars. Can be a vector of length \code{nrow(x)}
with colors for each study-level result individually.}
\item{summary_col}{character string specifying the main color for plotting the summary effect(s). Can be a vector
with colors for each subgroup summary effect individually.}
\item{tick_col}{character string specifying the color used for the ticks indicating the point estimates.}
\item{text_size}{numeric value. Size of text in the forest plot. Default is 3.}
\item{xlab}{character string specifying the label of the x axis. By default also used for the header of the aligned table if \code{annotate_CI} is \code{TRUE}.}
\item{x_limit}{numeric vector of length 2 with the limits (minimum, maximum) of the x axis.}
\item{x_trans_function}{function to transform the labels of the x axis. Common uses are to transform
log-odds-ratios or log-risk-ratios with \code{exp} to their original scale (odds ratios and risk ratios), or Fisher's z values
back to correlation coefficients using \code{tanh}.}
\item{x_breaks}{numeric vector of values for the breaks on the x-axis. When used in tandem with \code{x_trans_function}
the supplied values should be not yet transformed.}
\item{annotate_CI}{logical scalar. Should the effect size and confidence interval values be shown as text in an aligned table on the right-hand side of the forest plot?}
\item{study_table}{a data.frame with additional study-level variables which should be shown in an aligned table.
Has to be in the same order than \code{x}.}
\item{summary_table}{a data.frame with additional summary-level information shown in an aligned table.
If \code{group} is supplied, \code{summary_table} must have a row for each subgroup
summary effect, arranged in the order of the levels of \code{group}.}
\item{table_headers}{character vector. Headers for each column of aligned tables via \code{study_table}, \code{summary_table}, or \code{annotate_CI}.}
\item{table_layout}{numeric layout matrix passed to \code{layout_matrx} of \code{\link[gridExtra]{arrangeGrob}}. Can be used to overwrite the default spacing
of the forest plot and aligned tables via \code{study_table}, \code{summary_table}, and \code{annotate_CI}.}
}
\value{
A thick forest plot is created using ggplot2.
}
\description{
Creates a thick forest plot, a novel variant of the forest plot.
}
\details{
The thick forest plot was proposed by Schild and Voracek (2015) as a variant and
enhancement of classic forest plots. Thick forest plots use rectangular error bars
instead of traditional lines to display confidence intervals (width of the error bar), as well as the relative
meta-analytic weight (height of the error bar) of each study. In addition, study and summary level
point estimates are depicted clearly by a specific symbol.
Thick forest plots have the following advantages, as compared to classic forest plots:
\enumerate{
\item Using the height of bars proportional to the (relative) meta-analytic weight
causes small studies (with wide confidence intervals and less weight in the meta-analysis) to
be visually less dominant.
\item In classic forest plots, it is often hard to depict the magnitude of
point estimates to a reasonable degree of accuracy, especially for studies
with large meta-analytic weights and correspondingly large plotting symbols
(commonly squares). Specific symbols within the thick forest plot improve the
visualization of study point estimates.}
Note that for subgroup analysis the height of each error bar is scaled by the weight of each study within the subgroup divided by
the sum of the weights of all studies irrespective of subgroup. Therefore, with subgroups present, the overall impression of error
bar heights within a given subgroup compared to other subgroups conveys information about the relative precision of the meta-analytic
estimate within the subgroup.
}
\examples{
library(metaviz)
# Plotting a thick forest plot using the mozart data
viz_thickforest(x = mozart[, c("d", "se")],
study_labels = mozart[, "study_name"], xlab = "Cohen d")
# Visualizing a subgroup analysis of published and unpublished studies
viz_thickforest(x = mozart[, c("d", "se")], group = mozart[, "rr_lab"],
study_labels = mozart[, "study_name"], method = "REML",
summary_label = c("Summary (rr_lab = no)", "Summary (rr_lab = yes)"),
xlab = "Cohen d")
# Showing additional information in aligned tables. Log risk ratios are labeled
# in their original metric (risk ratios) on the x axis.
viz_thickforest(x = exrehab[, c("logrr", "logrr_se")],
annotate_CI = TRUE, xlab = "RR", x_trans_function = exp,
study_table = data.frame(
Name = exrehab[, "study_name"],
eventsT = paste(exrehab$ai, "/", exrehab$ai + exrehab$bi, sep = ""),
eventsC = paste(exrehab$ci, "/", exrehab$ci + exrehab$di, sep = "")),
summary_table = data.frame(
Name = "Summary",
eventsT = paste(sum(exrehab$ai), "/", sum(exrehab$ai + exrehab$bi), sep = ""),
eventsC = paste(sum(exrehab$ci), "/", sum(exrehab$ci + exrehab$di), sep = "")),
table_layout = matrix(c(1, 1, 2, 2, 3), nrow = 1))
}
\references{
Schild, A. H., & Voracek, M. (2015). Finding your way out of the
forest without a trail of bread crumbs: Development and evaluation of two
novel displays of forest plots. \emph{Research Synthesis Methods}, \emph{6},
74-86.
}
\author{
Michael Kossmeier* <michael.kossmeier@univie.ac.at>
Ulrich S. Tran* <ulrich.tran@univie.ac.at>
Martin Voracek* <martin.voracek@univie.ac.at>
*Department of Basic Psychological Research and Research Methods, School of Psychology, University of Vienna
}
|
e5c052a876d0cd16a11ff4354b4d8417cd7ad00d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/FSA/examples/ksTest.Rd.R
|
782617c0ae41ce155716b97ffaebb7d18a7faa2f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 523
|
r
|
ksTest.Rd.R
|
library(FSA)
### Name: ksTest
### Title: Kolmogorov-Smirnov Tests.
### Aliases: ksTest ksTest.default ksTest.formula
### Keywords: htest
### ** Examples
## see ks.test for other examples
x <- rnorm(50)
y <- runif(30)
df <- data.frame(dat=c(x,y),grp=rep(c("X","Y"),c(50,30)))
## one-sample (from ks.test) still works
ksTest(x+2, "pgamma", 3, 2)
ks.test(x+2, "pgamma", 3, 2)
## first two-sample example in ?ks.test
ksTest(x,y)
ks.test(x,y)
## same as above but using data.frame and formula
ksTest(dat~grp,data=df)
|
b943e287d53c5291690a7d3ddeaf726a7f6d9290
|
69c02cafd31ee8b6ff88707fb6148caac49e4375
|
/C4_Exploratory Analysis/Project_2_Final/Coursera_output/Plot6.R
|
3a1e4b6f169faa4ea932d8de94fbfd6c1ddb1b4a
|
[] |
no_license
|
pjbaudin/Data-Science-Study
|
8909430ac169faf268f0de7bf08c5b54a7d18d6f
|
0c64c704aa4d2cbb9227c4fe06e12c74d50ede9b
|
refs/heads/master
| 2021-01-13T03:41:42.931943
| 2017-05-15T02:19:27
| 2017-05-15T02:19:27
| 77,277,928
| 0
| 0
| null | 2017-05-14T05:44:24
| 2016-12-24T10:24:27
|
HTML
|
UTF-8
|
R
| false
| false
| 1,041
|
r
|
Plot6.R
|
# Plot 6
# Import dataset
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Load library
library(ggplot2)
library(dplyr)
library(magrittr)
# Filter NEI to keep On-road type and Baltimore city and Los Angeles County only
City <- data.frame(fips = c("24510", "06037"), City = c("Baltimore City", "Los Angeles County, California"))
MotorComp <- suppressWarnings(NEI %>%
filter(type == "ON-ROAD" & (fips == "24510" | fips == "06037")) %>%
droplevels() %>%
left_join(City, by = "fips"))
# Plot the result
png(filename = "Plot6.png")
ggplot(MotorComp, aes(x = factor(year), y = Emissions), fill = City) +
geom_bar(aes(fill = year), stat = "identity") +
facet_grid(.~ City, scales = "free", space = "free") +
theme_light() +
labs(x = "Year", y = "Total PM2.5 Emission (Tons)",
title = "PM2.5 Motor Vehicle Source Emissions in Baltimore & LA, 1999-2008")
dev.off()
|
68de0bfb2eab7e9acc260192e162a5cbb72ee919
|
1faa849036ff058507a07f918d6da9702bbebed4
|
/app.R
|
25396436e79d11da07eb627ab87438fa5a76e28a
|
[] |
no_license
|
wesleycoates/KmeansShinyApp
|
1324734920c51fb641a30e07fcf241e1251a6100
|
bab2b3b4323cd9a13be3e8d799ed5a17c750febb
|
refs/heads/main
| 2023-01-21T09:57:19.727325
| 2020-12-02T03:23:06
| 2020-12-02T03:23:06
| 317,710,639
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,692
|
r
|
app.R
|
##This app is using one year of sleep data from August 2019 to August 2020
##Building off of the Kmeans app
## install.packages('rsconnect') and load its library(rsconnect)
## configure your R instance and rsconnect with your shinyapps.io account
##rsconnect::setAccountInfo(name='wesleycoates',
##token='<BLARG>',
##secret='<SECRET>')
## install readxl if you must: install.packages("readxl")
library("readxl")
## create a dataframe from the Excel file
sleep_scores <- read_xlsx("sleep_scores_Aug2019_2020.xlsx",
sheet = "SleepScores")
## need to omit first 2 fields from this dataframe, as they won't work in Kmeans
sleep_kmeans <- sleep_scores[c(3,4,5,6,7,8,9)]
## now it's time to create the web app!
# if needed install.packages('shiny')
library(shiny)
# User Interface side of the code
ui <- fluidPage(
headerPanel("Matt's sleep k-means clustering"),
sidebarPanel(
selectInput('xcol', 'X Variable', names(sleep_kmeans)),
selectInput('ycol', 'Y Variable', names(sleep_kmeans),
selected = names(sleep_kmeans)[[2]]),
numericInput('clusters', 'Cluster count', 1, min = 1, max = 9)
),
mainPanel(
plotOutput('plot1')
)
)
# And this is the server-side of the code
server <- function(input, output) {
selectedData <- reactive({
sleep_kmeans[, c(input$xcol, input$ycol)]
})
clusters <- reactive({
kmeans(selectedData(), input$clusters)
})
output$plot1 <- renderPlot({
par(mar = c(5.1, 4.1, 0, 1))
plot(selectedData(),
col = clusters()$cluster,
pch = 20, cex = 3)
points(clusters()$centers, pch = 4, cex = 4, lwd = 4)
})
}
shinyApp(ui = ui, server = server)
|
8e019c9a446618fa3f1127f2ec45d7731f2637fb
|
a4efc97580ebcf91bc69bbd71be5d6d2d4a30352
|
/post-81.r
|
ffa6b3b28747b2f137601951978423366829ff75
|
[] |
no_license
|
maruko-rosso/datasciencehenomiti
|
9a078f902904f9a2d4bca61407395ac71b929036
|
8060b65bf1d3aec5055de0d65187126569cad7a7
|
refs/heads/master
| 2022-06-26T18:38:47.641246
| 2022-06-26T10:00:15
| 2022-06-26T10:00:15
| 231,026,119
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,197
|
r
|
post-81.r
|
##### 分散の作り方・理解 ####
library(ggplot2)
ggplot(HightOfStudent,aes(x = 1,y = 身長)) +
geom_point() +
xlab("") +
geom_text(aes(label = 生徒名),vjust= -0.2,hjust= -0.2) +
ggtitle("身長を一直線に可視化")
#### 一直線の可視化を横に展開 ####
ggplot(HightOfStudent,aes(x = 生徒名,y = 身長)) +
geom_point() +
geom_text(aes(label = 生徒名),vjust= -0.2,hjust= -0.2) +
ggtitle("一直線の可視化を横に展開")
#### 身長の可視化+平均線 ####
ggplot(HightOfStudent,aes(x = scales,y = 身長)) +
geom_point() +
geom_line(aes(y = 身長の平均,col = "red" ),stat = "identity")+
geom_text(aes(label = 生徒名),vjust= -0.2,hjust= -0.2) +
ggtitle("身長の可視化+平均線") +
xlab("") +
theme(legend.position = "none")
#### 計算 ####
HightOfStudent$偏差 <- HightOfStudent$身長 - HightOfStudent$身長の平均 # 偏差の計算
HightOfStudent$偏差の二乗 <- HightOfStudent$偏差 ^ 2 # 偏差の二乗を計算
Varience <- sum(HightOfStudent$偏差の二乗) / length(HightOfStudent$身長) # 偏差二乗和 ÷ 変数の要素数
Varience # 分散(標本分散であることに注意!)
|
5687877c9e3c03507025d74c8ca28cec7ddf3c1c
|
0fdfe67718008e3a27f626344c5b5c56d6d5b58b
|
/vaers/vaers.R
|
4daddfd3c094497ed6d1db8b5693c24ff222607b
|
[] |
no_license
|
tundraka/analysis
|
899d901a0627896c36c50d8bb7a32d50d4853c60
|
9ca1ff962a7c0c15f995db47804641fd035be5f0
|
refs/heads/master
| 2020-12-19T22:08:19.380833
| 2016-05-29T04:12:49
| 2016-05-29T04:12:49
| 39,984,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,517
|
r
|
vaers.R
|
# Information about the data needed in this script can be found in the README.
# https://github.com/tundraka/analysis/blob/master/vaers/README.md
#
library(data.table)
library(stringr)
library(ggplot2)
dataLabel <- '2014 VAERS'
datesAs <- 'character'
#
# READING VAERS DATA
#
vaersDataFile <- 'data/VAERS/2014/2014VAERSDATA.CSV'
vaersColClasses <- c('numeric', datesAs, 'factor', rep('numeric', 3), 'factor',
datesAs, 'character', 'factor', datesAs, rep('factor', 3),
'numeric', rep('factor', 3), rep(datesAs, 2), 'numeric',
'character', rep('factor', 2), rep('character', 5))
vaersColNames <- c('vaersid', 'datereceived', 'state', 'age', 'ageyears',
'agemonths', 'sex', 'reportdate', 'symptoms', 'died',
'datedied', 'lifethreateningeffect', 'ervisit', 'hospitalized',
'hospitalizeddays', 'prolongedhospitalization', 'disable',
'recovered', 'vaccinationdate', 'onsetdate', 'numdays',
'labdata', 'administeredby', 'fundby', 'othermeds',
'currentillnesses', 'history', 'priorvaccinations',
'naufacturernumber')
# fread is reading string fields that contain a , as different fields, looks like
# this is a know issue.
vaersdata <- as.data.table(read.csv(vaersDataFile, colClasses=vaersColClasses))
# TODO set better column names.
currentColNames <- names(vaersdata)
setnames(vaersdata, currentColNames, vaersColNames)
#
# READING VAERSVAX DATA
#
vaxDataFile <- 'data/VAERS/2014/2014VAERSVAX.csv'
vaxColClasses <- c('numeric', 'factor', rep('character', 3), rep('factor', 2),
'character')
vaxColNames <- c('vaersid', 'type', 'manufacturer', 'lot', 'dose', 'route',
'site', 'name')
vaxdata <- fread(vaxDataFile, colClasses=vaxColClasses)
setnames(vaxdata, names(vaxdata), vaxColNames)
# vax types should be all upper case
vaxdata[,type:=toupper(type)]
#
# PROCESSING
#
# According to the VAERS code book, the age fields represent:
#
# The values for this variable range from 0 to <1. It is only calculated for
# patients age 2 years or less. The variables CAGE_YR and CAGE_MO work in
# conjunction to specify the calculated age of a person. For example, if
# CAGE_YR=1 and CAGE_MO=.5 then the age of the individual is 1.5 years or 1 year
# 6 months.
ageDifference <- vaersdata[(age != (ageyears + agemonths)), .N]
if (ageDifference > 0) {
print('The reported age in "age" and the sum of "ageyears" + "agemonths" is')
print(paste('different in:', ageDifference, 'records'))
}
#
# Reports created by state. What are the states that report the most cases.
#
totsByState <- vaersdata[,.(tot=.N), .(state)][order(-tot)]
topTot <- 20
ggplot(totsByState[1:topTot], aes(state, tot)) +
geom_bar(stat='identity') +
labs(title=paste(paste(dataLabel, ': Reports by top', topTot, 'states')))
# Let's select only the top 10 states.
#
# TODO. I need to order based on the total. Right now it's ordering by the state
# and sex since the total by state is divided between male/female/unknown. Probably
# what I'll need to do is some melt/dcast
topTot <- 10
totsBySexState <- vaersdata[state %in% totsByState[1:topTot, .(state)]$state,
.(tot=.N),
.(state, sex)][order(state, sex)]
ggplot(totsBySexState, aes(state, tot, fill=sex)) +
geom_bar(stat='identity') +
labs(title=paste(dataLabel, ': Top', topTot, 'states with more reports'))
# What's the age distribution in the VAERS reports?
ageDist <- vaersdata[!is.na(ageyears),.(tot=.N),.(ageyears)][order(ageyears)]
ggplot(ageDist, aes(ageyears, tot)) +
geom_bar(stat='identity') +
labs(title=paste(dataLabel, ': Age distribution'))
# What's the age distribution and sex?
ageBreaks <- c(-1, 1, 5, seq(10, 100, by=10), 2000)
#ageLabels <- c('0-1', '1-5', '5-10', '10-20', '20-30', '30-40', '40-60', '60+')
ages <- vaersdata[!is.na(ageyears), .(ageyears, sex)]
ages[,agesegment:=cut(ages$ageyears, ageBreaks)]#, labels=ageLabels)]
ggplot(ages, aes(agesegment, fill=sex)) +
geom_bar() +
labs(title=paste(dataLabel, ': Reports by age.')) +
labs(x='Age group') +
labs(y='Total')
# What are the vaccines that generated the most reports by state
vaxinfo <- merge(vaersdata[, .(vaersid, state, sex, ageyears)],
vaxdata[,.(vaersid, type, manufacturer, name)],
by='vaersid')
topStatesVaxInfo <- vaxinfo[state %in% totsByState[1:topTot, .(state)]$state &
ageyears < 2.5,
.(tot=.N),
.(sex, state, type)]
# todo, this needs to be refined.
ggplot(topStatesVaxInfo, aes(state, tot, fill=type)) +
geom_bar(stat='identity') +
facet_grid(sex ~ .)
# Most commons reported vaccines for <5 yrs
# If we will be working with ages, let's remove the NAs.
vaxinfoages <- vaxinfo[!is.na(ageyears)]
ageSegments <- cut(vaxinfoages$ageyears, c(-1, 1, 2.5, 5, 12, 15, 22, 35, 50, 60, 1000))
vaxinfoages[,agesegment:=ageSegments]
vax25 <- vaxinfoages[ageyears<=5,
.(tot=.N),
.(type, sex, agesegment)][order(-tot)]
ggplot(vax25, aes(type, tot, fill=sex)) +
geom_bar(stat='identity') +
facet_grid(agesegment ~ .) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
labs(title=paste(dataLabel, ':<=5 yrs old, vaccines most reported by gender')) +
labs(x='Vaccine') +
labs(y='Reports/Age group')
vax522 <- vaxinfoages[ageyears>5 & ageyears<=22,
.(tot=.N),
.(type, sex, agesegment)][order(-tot)]
ggplot(vax522, aes(type, tot, fill=sex)) +
geom_bar(stat='identity') +
facet_grid(agesegment ~ .) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
labs(title=paste(dataLabel,
':22>=age>5 yrs old, vaccines most reported by gender')) +
labs(x='Vaccine') +
labs(y='Reports/Age group')
vax22p <- vaxinfoages[ageyears>22,
.(tot=.N),
.(type, sex, agesegment)][order(-tot)]
ggplot(vax22p, aes(type, tot, fill=sex)) +
geom_bar(stat='identity') +
facet_grid(agesegment ~ .) +
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)) +
labs(title=paste(dataLabel,
':>22 yrs old, vaccines most reported by gender')) +
labs(x='Vaccine') +
labs(y='Reports/Age group')
# What are the vaccine reports by age?
|
74a9920c64368e86692215dc3e1439708a6e3000
|
53c79f2ee9ea1ebb4b2050bb12f98416ac543d94
|
/ui.R
|
54b8e58babc4a5e85ae838fa97641ab23ec30c5a
|
[] |
no_license
|
kenyang88/DevelopingDataProducts-Week4Project
|
30c3bebd31dee12eeeb6e5f4e2ceaf7c48a404ae
|
9bbb68bebbaad48dd15d5b2ace74a391a6412e26
|
refs/heads/main
| 2023-05-23T06:23:21.136210
| 2021-06-11T04:30:53
| 2021-06-11T04:30:53
| 375,887,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,103
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application of BMI calculator
shinyUI(fluidPage(
# Application title
titlePanel("BMI Calculator"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
helpText("Enter your weight and height to know how fit you are!"),
selectInput("select_measure", label = h6("Select the measurement"), choices = list("Weight (kg) vs Height (cm)" = 1, "Weight (lb) vs. Height (inches)" = 703), selected = 1),
numericInput("num_weight", label = h6("Enter your weight"), min = 1, value = NULL),
numericInput("num_height", label = h6("Enter your height"), min = 1, value = NULL),
actionButton("action_Calc", label = "CALCULATE")
),
mainPanel(
tabsetPanel(
tabPanel("BMI",
p(h5("")), textOutput("text_weight"),
textOutput("text_height"), p(h5("Body Mass Index(BMI):")),
textOutput("text_bmi")
),
tabPanel("BMI Chart",
p(h4("BMI Calculator:")),
helpText("Statistical Categories of BMI as given by the National Heart, Lung, and Blood Institute (NLBI)"),
HTML(
"<br> </br>
<b> less than 18.5 </b> = underweight <br>
<br> </br>
<b> Between 18.5 and 24.9 </b> = Normal weight <br>
<br> </br>
<b> Between 25 and 29.9 </b> = Overweight <br>
<br> </br>
<b> greater than 30 </b> = Obsesity <br>"
)
)
)
)
)
))
|
e73504dc397b3aae6fbe494eb56c50a0ea154f61
|
7c7b3517fdf83f3009a31e48405745ed5fbc7f80
|
/exercise-6/exercise.R
|
beb7547e1e1e304bff8f13d061f6f0023632f731
|
[
"MIT"
] |
permissive
|
davidl357/module10-dplyr
|
cd529b59992942132160c6ca647677448767a3fe
|
3f8f4b191f56a2a366bc0bd92738ce870c83e513
|
refs/heads/master
| 2021-01-11T15:55:14.638020
| 2017-01-26T23:19:28
| 2017-01-26T23:19:28
| 79,956,053
| 2
| 0
| null | 2017-01-24T21:26:34
| 2017-01-24T21:26:34
| null |
UTF-8
|
R
| false
| false
| 931
|
r
|
exercise.R
|
# Exercise 6: DPLYR join introduction
# Install the nycflights13 package and read it in. Require the dplyr package.
# install.packages("nycflights13")
# Create a dataframe of the average arrival delay for each destination, then use `left_join()`
# to join on the "airports" dataframe, which has the airport info
avg.arrival.delay <- flights %>%
group_by(dest) %>%
summarise(avg.delay = mean(arr_delay, na.rm = TRUE)) %>%
mutate(faa = dest) %>%
left_join(airports, by = 'faa') %>%
arrange(-avg.delay)
# Create a dataframe of the average arrival delay for each airline, then use `left_join()``
# to join on the "airlines" dataframe, which has the airline info
### Bonus ###
# Calculate the average delay by city AND airline, then merge on the city and airline information
# If you're running into sorting issues:
# http://stackoverflow.com/questions/26555297/dplyr-arrange-a-grouped-df-by-group-variable-not-working
|
6d754b758d53898a08dad8a560c5a4e299fc9fc2
|
76537f8b121711152e8f4ac2a74579f4d7f46264
|
/R/Plot.R
|
2d86aa409890e8d2115e3ec9e41789dfb2c36d62
|
[
"MIT"
] |
permissive
|
KehaoWu/GWAScFDR
|
49f1cf7d1a9d8ff8579375f24b5e6ae3f4b3b64e
|
c7a0fbfea8fab5c9aed557b1503e2ca64592b3b2
|
refs/heads/master
| 2020-04-07T22:57:35.197138
| 2015-11-22T01:13:40
| 2015-11-22T01:13:40
| 42,730,656
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,876
|
r
|
Plot.R
|
library(ggplot2)
stratifiedQQplot = function(p1,p2,xlab="Nominal -log p conditional",ylab="Nominal -log p"){
library(ggplot2)
p1 = p1[p1!=0]
p2 = p2[p2!=0]
dat = NULL
for(cutoff in c(1,0.1,0.01,0.001,0.0001)){
p = p1[p2<=cutoff]
x = -log10(seq(from = 0,to = 1,length.out = length(p)+1))[-1]
print(max(x))
y = sort(-log10(p),decreasing = T)
dat = rbind(dat,data.frame(x=x,y=y,cutoff))
}
dat$cutoff = factor(dat$cutoff)
p = ggplot(dat,aes(x=x,y=y,fill=cutoff,colour=cutoff)) +
geom_line(size=1.2) +
geom_abline(intercept=0,slope=1) +
labs(title = "Stratified Q-Q Plot") +
labs(x = xlab) +
labs(y = ylab) +
ylim(0,log10(length(p1)))
}
stratifiedTDRplot = function(p1,p2){
library(ggplot2)
dat = NULL
for(cutoff in c(1,0.1,0.01,0.001)){
p = p1[p2<=cutoff]
y = p * length(p) / (order(order(p)))
y = 1- ifelse(y>=1,1,y)
x = -log10(p)
dat = rbind(dat,data.frame(x=x,y=y,cutoff))
}
dat$cutoff = factor(dat$cutoff)
p = ggplot(dat,aes(x=x,y=y,fill=cutoff,colour=cutoff)) +
geom_line() +
labs(title = "Stratified True Discovery Rate Plot") +
labs(x = "Nominal -log p") +
labs(y = "TDR: 1 - FDR")
p
}
stratifiedQQForGenomeControlplot = function(p){
library(ggplot2)
p = sort(p)
y = -log10(GenomicControl(p))
x = -log10(seq(from = 0,to = 1,length.out = length(p)+1)[-1])
dat = data.frame(x=x,y=y,type="Adjusted")
y = -log10(p)
dat = rbind(dat,data.frame(x=x,y=y,type="raw"))
p = ggplot(dat) +
geom_line(aes(x=x,y=y,colour=type)) +
geom_abline(intercept=0,slope=1)
p
}
manhattanPlot = function(pvalue,bp,chr,gene=NULL,
ylab=expression(-log[10](P)),
cutoffline=T,
chrLabel = 1:22){
if(grepl(pattern = "chr",x = chr[1])){
chr = as.numeric(gsub(pattern = "chr",replacement = "",x = chr))
}
bp = bp[pvalue!=0]
chr = chr[pvalue!=0]
gene = gene[pvalue!=0]
pvalue = pvalue[pvalue!=0]
pvalue = pvalue[!is.na(chr)]
bp = bp[!is.na(chr)]
gene = gene[!is.na(chr)]
chr = chr[!is.na(chr)]
pvalue = -log10(pvalue)
pvalue = ifelse(pvalue<0,0,pvalue)
print(max(pvalue))
bpMidVec <- vector(length=length(chrLabel))
maxbp = 0
bmin = vector(length=length(chrLabel))
bmax = vector(length=length(chrLabel))
for(i in chrLabel){
bp[chr==i] = bp[chr==i] + maxbp
bmin[i] = min(bp[chr==i])
bmax[i] = max(bp[chr==i])
bpMidVec[i] <- ((max(bp[chr==i]) - min(bp[chr==i]))/2) + min(bp[chr==i])
maxbp = max(bp[chr==i])
}
chr = factor(chr)
yLabel = round(c(0,1,-log10(0.05),2:(max(pvalue)[1])),digits = 2)
p = ggplot() +
geom_rect(data = data.frame(bmin,bmax,alpha=0.01),
aes(xmin=bmin,xmax=bmax,alpha=alpha),
ymin=0,
ymax=Inf,
fill = rep(c("grey90","white"),11),
size = 0
) +
geom_point(data = data.frame(P=pvalue,BP=bp,CHR=chr),
aes(y=P,x=BP,colour=CHR),
alpha=0.8) +
ylim(0,1.3*max(pvalue)) +
scale_x_continuous(labels=as.character(chrLabel), breaks=bpMidVec) +
scale_y_continuous(labels=as.character(yLabel), breaks=yLabel) +
scale_color_manual(values=rep(c('orange1', 'grey20'), 11)) +
theme_bw() +
theme(
panel.grid=element_blank()
) +
xlab("Chromosomal Location") +
ylab(ylab) +
theme(legend.position='none')
if (cutoffline){
p = p + geom_hline(y=-log10(0.05), linetype=1, col='red', lwd=1)
}
if (!is.null(gene)){
x = bp[pvalue>=-log10(0.05)]
g = gene[pvalue>=-log10(0.05)]
y = pvalue[pvalue>=-log10(0.05)]
p = p +
geom_text(data=data.frame(y=y,x=x,gene=g),
aes(y=y,x=x,label=gene),
hjust=0)
}
p
}
cFDRDotPlot = function(p1,p2,cFDR){
x = -log10(p1)
y = -log10(p2)
z = -log10(unlist(lapply(cFDR,FUN = function(x)min(x,1))))
p = ggplot(data=data.frame(x,y,z)) +
geom_point(aes(x=x,y=y,color=z)) +
ylim(0,7) +
xlim(0,7) +
scale_colour_gradientn(colours=c("white","yellow","orange","tomato","red"),
values=rescale(c(0,1,2,4,max(z))),
space = "Lab")
plot(p)
}
QQplot = function(x,y = NULL){
fs = factor(y)
maxValue = xx = yy = NULL
x = -log10(x)
for(f in levels(fs)){
maxValue = c(max(x[fs==f]))
yyy = x[fs==f]
yy = c(yy,sort(yyy,decreasing = T))
xx = c(xx,-log10((1:sum(fs==f))/sum(fs==f)))
}
p = ggplot(data=data.frame(
x = xx, y = yy, SNPs = fs
)) +
geom_line(aes(x=x,y=y,colour=SNPs)) +
theme_bw() +
theme(
panel.grid=element_blank()
) +
xlab(paste("Empirical",expression(-log10(q)))) +
ylab(paste("Nominal",expression(-log10(p)))) +
geom_abline(intercept = 0.8*min(maxValue),slope=0,linetype=2,colour="red")
p
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.