blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
96210da13a158bd5f8fb86407c65b29c125018a6
|
92626a21f23ab35e82cb439255e10cde2a7047c1
|
/tests/testthat/test-TPCfunctions.R
|
20bd1356021e61a1c6342f9098d1679380d9a556
|
[
"MIT"
] |
permissive
|
ArchiYujie/TrenchR
|
04630ddd078eca187a517c0c98e59065b3054a74
|
f45c2f0b54eab4ce578c0b3b631f9d93058ba731
|
refs/heads/master
| 2023-07-16T22:25:21.419072
| 2021-08-26T21:30:12
| 2021-08-26T21:30:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 713
|
r
|
test-TPCfunctions.R
|
context("TPCfunctions")
expect_similar <- function(input, expected) {
eval(bquote(expect_lt(abs(input - expected), 0.01)))
}
test_that("TPC function works as expected", {
expect_equal(max(TPC(T=0:60, Topt=30, CTmin=10, CTmax=40)),1)
expect_equal(min(TPC(T=0:60, Topt=30, CTmin=10, CTmax=40)),0)
expect_equal(length(TPC(T=0:60, Topt=30, CTmin=10, CTmax=40)),61)
expect_similar(TPC(T=0:60, Topt=30, CTmin=10, CTmax=40)[1], 0.0001234098)
})
test_that("TPC.beta function works as expected", {
expect_equal(length(TPC.beta(T=0:60, shift=-1, breadth=0.1, aran=0, tolerance= 43, skew=0.7)), 61)
expect_similar(TPC.beta(T=0:60, shift=-1, breadth=0.1, aran=0, tolerance= 43, skew=0.7)[1], 3.80322e-08)
})
|
a71b5129475623b8335d59339a609574dd08b5d4
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/5868_0/rinput.R
|
6f983a3763874c1532063402795dc7ebbd5dcc7a
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("5868_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5868_0_unrooted.txt")
|
49ec95c0b55a578bb34474e15af6a3939a64dfca
|
a8e8000b370d54c2f6a097ee59876827f4daafbe
|
/study/datatime.R
|
6c41aec9c0c64d21e2822dbd1113593fe6d8d413
|
[] |
no_license
|
weidaoming/R
|
142be073ebdf097740ae5a02a7e75308a06e30d1
|
5048ca1d46025ba41d03b00049a17b309e8dfedc
|
refs/heads/master
| 2021-07-12T10:47:27.552074
| 2017-10-18T07:09:09
| 2017-10-18T07:09:09
| null | 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 439
|
r
|
datatime.R
|
#日期与时间
x <-date()
x
class(x)
class(date())
x2 <- Sys.Date()
x2
class(x2)
x3<-as.Date("2017-01-01")
x3
class(x3)
weekdays(x3)
months(x3)
quarters(x3)
julian(x3)
x4<-as.Date("2017-02-20")
x3 <- as.Date("2013-03-18")
x4-x3
as.numeric(x4-x3)
#time
x <- Sys.time()
x
class(x)
p<-as.POSIXlt(x)
p
class(p)
names(unclass(p))
p$min
p$year
as.POSIXct(p)
#不同格式的相互转换
x1 <- "Jan 1,2015 01:01"
strptime(x1,"%B %d, %Y %H:%M")
|
fc57144a227229b4f19201a8538da1cae31f8424
|
99315b85d37870a5da01f33ddd0b50be7838c9e7
|
/WAPIWrapper/WAPIWrapperR/WindR/man/w.weqs.Rd
|
1bee7376573f1832dcf4b0584d765e7921e2110a
|
[] |
no_license
|
WindQuant/ThirdParty
|
7e4932a5abbcf2374beb551dcddef14bf27b627c
|
c53334109c2e97a938d62b131531010fb265e516
|
refs/heads/master
| 2020-06-04T20:01:31.490944
| 2015-12-04T02:18:35
| 2015-12-04T02:18:35
| 27,526,077
| 22
| 12
| null | 2015-12-03T13:06:06
| 2014-12-04T06:06:46
|
C++
|
UTF-8
|
R
| false
| false
| 633
|
rd
|
w.weqs.Rd
|
\name{w.weqs}
\alias{w.weqs}
\title{
Retrieve stocks by custom filter.
}
\description{
wset is used to retrieve stocks by custom filter.\cr
To show the guide dialog, please input w.menu("weqs").\cr
data <- w.wset(filtername,...)\cr
\cr
Description:\cr
filtername the filter's name.\cr
\cr
$Data the return result, a data.frame.\cr
$Time the time of the data.\cr
$ErrorCode the error ID (0 is OK).\cr
}
\usage{
w.weqs(filtername,...)
}
\examples{
library(WindR)
w.start()
#Please firstly create a filter named myKDJ.
w.wset('myKDJ')
}
\keyword{ weqs }
|
9f45dd3afff444e909270db496694c474cc63e42
|
302cfb5a0c87ed3f1c9cc1b9a045aa42f49500bf
|
/man/ESEA.Main.Rd
|
85a441dd1d334e77862f1637fc7899a505f658af
|
[
"Apache-2.0"
] |
permissive
|
hanjunwei-lab/ESEA
|
ee4f18be7c0f43cfce4377b632f350dd1ec40d00
|
e428d4137a52d4c9078d1e61a3f4817de85efafe
|
refs/heads/master
| 2022-11-10T14:19:33.761782
| 2020-06-30T13:13:20
| 2020-06-30T13:13:20
| 275,342,928
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,288
|
rd
|
ESEA.Main.Rd
|
\name{ESEA.Main}
\alias{ESEA.Main}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Identify dysregulated pathways based on edge set enrichment analysis}
\description{
A edge-centric method to identify dysregulated pathways by investigating the changes of biological relationships of pathways in the context of gene
expression data.
}
\usage{
ESEA.Main(EdgeCorScore, pathwayEdge.db, weighted.score.type = 1, pathway = "kegg",
gs.size.threshold.min = 15, gs.size.threshold.max = 1000,
reshuffling.type = "edge.labels", nperm = 100, p.val.threshold = -1,
FDR.threshold = 0.05, topgs = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{EdgeCorScore}{ A numeric vector. Each element is the differential correlation score of an edge.}
\item{pathwayEdge.db}{ A character vector, the length of it is the number of pathways.}
\item{weighted.score.type}{ A value. Edge enrichment correlation-based weighting: 0=no weight, 1=standard weigth, 2 = over-weigth. The default value is 1}
\item{pathway}{ A character string of pathway database. Should be one of "kegg","reactome",
"nci","huamncyc","biocarta","spike" and "panther". The default value is "kegg"}
\item{gs.size.threshold.min}{ An integer. The minimum size (in edges) for pathways to be considered. The default value is 15. }
\item{gs.size.threshold.max}{ An integer. The maximum size (in edges) for pathways to be considered. The default value is 1000.}
\item{reshuffling.type}{ A character string. The type of permutation reshuffling: "edge.labels" or "gene.labels". The default value is "edge.labels".}
\item{nperm}{ An integer. The number of permutation reshuffling. The default value is 100.}
\item{p.val.threshold}{ A value. The significance threshold of NOM p-value for pathways whose detail results of pathways to be presented. The default value is -1, which means no threshold.}
\item{FDR.threshold}{ A value. The significance threshold of FDR q-value for pathways whose detail results of pathways to be presented. The default value is 0.05.}
\item{topgs}{ An integer. The number of top scoring gene sets used for detailed reports. The default value is 1.}
}
\details{
ESEA integrates pathway structure (e.g. interaction, regulation, modification, and binding etc.) and differential correlation among genes in identifying dysregulated pathways. The biological pathways were collected from the seven public databases (KEGG; Reactome; Biocarta; NCI/Nature Pathway Interaction Database; SPIKE; HumanCyc; Panther). We constructed a background set of edges by extracting pathway structure from each pathway in the seven databases. We then applied an information-theoretic measure, mutual information(MI), to quantify the change of correlation between genes for each edge based on gene expression data with cases and controls. An edge list was formed by ranking the edges according to their changes of correlation. Finally, we used the weighted Kolmogorov-Smirnov statistic to evaluate each pathway by mapping the edges in the pathway to the edge list. The permutation is used to identify the statistical significance of pathways (normal p-values) and the FDR is used to to account for false positives.
}
\value{
A list. It includes two elements: SummaryResult and PathwayList.
SummaryResult is a list. It is the summary of the result of pathways which include two elements: the results of Gain-of-correlation and Loss-of-correlation. Each element of the lists is a dataframe. Each rows of the dataframe represents a pathway. Its columns include "Pathway Name", "Pathway source" "ES", "NES", "NOM p-val", "FDR q-val", "Tag percentage" (Percent of edge set before running enrichment peak), "edge percentage" (Percent of edge list before running enrichment peak), "Signal strength" (enrichment signal strength).
PathwayList is list of pathways which present the detail results of pathways with NOM p-val<p.val.threshold or FDR<FDR.threshold or topgs<=topgs.threshold. Each element of the list is a dataframe. Each rows of the dataframe represents an edge. Its columns include "Edge number in the (sorted) pathway", "Edge ID", "location of the edge in the sorted edge list", "EdgeCorScore", "Running enrichment score", "Property of contribution".
}
\author{Junwei Han <hanjunwei1981@163.com>, Xinrui Shi<xinrui103@163.com> and Chunquan Li <lcqbio@163.com>}
\references{
Subramanian, A., Tamayo, P., Mootha, V.K., Mukherjee, S., Ebert, B.L., Gillette, M.A., Paulovich, A., Pomeroy, S.L., Golub, T.R., Lander, E.S. et al. (2005) Gene set enrichment analysis: a knowledgebased approach for interpreting genome-wide expression profiles. Proc Natl Acad Sci U S A, 102, 15545-15550.
}
\examples{
\dontrun{
#get example data
dataset<-GetExampleData("dataset")
class.labels<-GetExampleData("class.labels")
controlcharactor<-GetExampleData("controlcharactor")
#get the data for background set of edges
edgesbackgrand<-GetEdgesBackgrandData()
#get the edge sets of pathways
pathwayEdge.db<-GetPathwayEdgeData()
#calculate the differential correlation score for edges
EdgeCorScore<-calEdgeCorScore(dataset, class.labels, controlcharactor, edgesbackgrand)
#identify dysregulated pathways by using the function ESEA.Main
Results<-ESEA.Main(
EdgeCorScore,
pathwayEdge.db,
weighted.score.type = 1,
pathway = "kegg",
gs.size.threshold.min = 15,
gs.size.threshold.max = 1000,
reshuffling.type = "edge.labels",
nperm =10,
p.val.threshold=-1,
FDR.threshold = 0.05,
topgs =1
)
#print the summary results of pathways to screen
Results[[1]][[1]][1:5,]
#print the detail results of pathways to screen
Results[[2]][[1]][1:5,]
#write the summary results of pathways to tab delimited file.
write.table(Results[[1]][[1]], file = "kegg-SUMMARY RESULTS Gain-of-correlation.txt", quote=F,
row.names=F, sep = "\t")
write.table(Results[[1]][[2]], file = "kegg-SUMMARY RESULTS Loss-of-correlation.txt", quote=F,
row.names=F, sep = "\t")
#write the detail results of genes for each pathway with FDR.threshold< 0.05 to tab delimited file.
for(i in 1:length(Results[[2]])){
PathwayList<-Results[[2]][[i]]
filename <- paste(names(Results[[2]][i]),".txt", sep="", collapse="")
write.table(PathwayList, file = filename, quote=F, row.names=F, sep = "\t")
}
}
}
|
edf33df429b12145805bf1ace9a3a4bac5c6c820
|
d9d04d53288e4cb5801cfe1e35f9d7b6e3154fb8
|
/ACTIVIDAD 4/Actividad 3.R
|
5f4dde3cfb1433ad9eaf93f7344820f9f81ec6a6
|
[] |
no_license
|
yerayb/Visualizacion-Rstudio
|
187618fcea1d56c41f7df371708c6ba27bfa3347
|
4908f4664eddbd1752557ef35827516bc89c0ad7
|
refs/heads/master
| 2022-11-14T13:07:09.427681
| 2020-06-26T19:33:54
| 2020-06-26T19:33:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,735
|
r
|
Actividad 3.R
|
####################################################################
# Configurando el entorno de trabajo
####################################################################
rm(list=ls())
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
getwd()
dir()
graphics.off()
cat("\014")
###################################################################
#Algunos Paquetes necesarios
###################################################################
library(tm) # Minería de datoss
library(SnowballC)
library(wordcloud) #Sacar graficos de nubes de palabras
library(ggplot2) #Hacer Graficos
library(dplyr) # Manipular y Transformar datos
library(readr) # Leer y escribir documentos
library(cluster) # Analisis de grupos
library(stringr)
##---- RETO 1 ---- ##
#Importar dataset
datos1 <- read.csv("cotoarticles20160603.csv",header = TRUE , row.names = NULL)
#Data cleaning
# Eliminamos duplicados
datos1 = datos1[!duplicated(datos1), ]
str(datos1)
datos1 = na.omit(datos1)
#GRAFICO
require(plyr)
datosprueba = ddply(datos1, .(categories),summarise,price=mean(price))
max(datosprueba$price)
categoriaMedia <- data.frame(datosprueba$categories, datosprueba$price ) #columna precio X 100 gr añadida
ggplot(datosprueba, aes(x=datosprueba$categories, y = datosprueba$price)) + geom_col()
##---- RETO 2 ---- ##
#Importar dataset
datos2 <- read.csv("jumboarticles20160603.csv",header = TRUE , row.names = NULL)
#Data cleaning
# Eliminamos duplicados
datos2 = datos2[!duplicated(datos2), ]
str(datos2)
datos2 = na.omit(datos2)
#Articulos que son de mermelada
mermeladaArt <- datos2 %>% filter(str_detect(name, "ermela"))
precioMermelada <- mermeladaArt$unit_price
#precio por cada 100 gramos
precio_100gramos <- str_sub(precioMermelada,-5,-1)
precio_100gramos <- as.numeric(precio_100gramos)/10
mermeladaArt <- data.frame(mermeladaArt, precio_100gramos ) #columna precio X 100 gr añadida
##---- RETO 3 ---- ##
coto <- read.csv("cotoarticles20160603.csv",header = TRUE , row.names = NULL)
jumbo <- read.csv("jumboarticles20160603.csv",header = TRUE , row.names = NULL)
#Data cleaning
# Eliminamos duplicados
coto = coto[!duplicated(coto), ]
str(coto)
coto = na.omit(coto)
jumbo = jumbo[!duplicated(jumbo), ]
str(jumbo)
jumbo = na.omit(jumbo)
#Galletas dataframeCoto
cotoGalletas <- coto %>% filter(str_detect(name, "alleti"))
cotoGalletasPrecio <- as.numeric(cotoGalletas$price)
cotoGalletas <- data.frame(cotoGalletas$name, cotoGalletasPrecio)
#Galletas dataFrameJumbo
jumboGalletas <- jumbo %>% filter(str_detect(name, "alleti"))
jumboGalletasPrecio <- as.numeric(jumboGalletas$price)
jumboGalletas <- data.frame(jumboGalletas$name, jumboGalletasPrecio)
#Visualizacion
boxplot <- boxplot(cotoGalletasPrecio, jumboGalletasPrecio, main="Precio galletitas")
##---- RETO 4 ---- ##
coto <- read.csv("cotoarticles20160603.csv",header = TRUE , row.names = NULL)
jumbo <- read.csv("jumboarticles20160603.csv",header = TRUE , row.names = NULL)
#Data cleaning
# Eliminamos duplicados
coto = coto[!duplicated(coto), ]
str(coto)
coto = na.omit(coto)
jumbo = jumbo[!duplicated(jumbo), ]
str(jumbo)
jumbo = na.omit(jumbo)
#Dataframe con media precio por categoria
cat <- jumbo$categories
precio <- as.numeric(jumbo$price)
jumboCatyPrec <- data.frame(aggregate(precio, list(cat), mean)) #nuevo dataframe
#Quantiles
Q <- quantile(jumboCatyPrec$x)
Q1 <- Q[2]
Q3 <- Q[4]
minQ <- Q[1]
maxQ <- Q[5]
Med <- Q[3]
inf = Q1 - 3 * (Q3-Q1)
sup = Q3 + 3 * (Q3-1)
prec.outlier <- data.frame(jumboCatyPrec, abs(jumboCatyPrec$x) > sup| abs(scale(jumboCatyPrec$x)) < inf)
plot(prec.outlier$Group.1, as.character(prec.outlier$x), main="Estudio de Outliers", xlab="Categorias", ylab="Precio", pch=1, col = ifelse(prec.outlier$abs.jumboCatyPrec.x....sup...abs.scale.jumboCatyPrec.x.....inf == TRUE, "red","black"))
media <- mean(precio)
abline(h = mean(precio), col = "brown")
##---- RETO 5 ---- ##
#NOTA: EL CSV DE VEARTICLES NO CONSIGO QUE LO LEA
coto <- read.csv("cotoarticles20160603.csv",header = TRUE , row.names = NULL)
jumbo <- read.csv("jumboarticles20160603.csv",header = TRUE , row.names = NULL)
#Data cleaning
# Eliminamos duplicados
coto = coto[!duplicated(coto), ]
str(coto)
coto = na.omit(coto)
jumbo = jumbo[!duplicated(jumbo), ]
str(jumbo)
jumbo = na.omit(jumbo)
#LECHE DE COTO Y JUMBO
lecheCoto <- coto %>% filter(str_detect(name, "^Leche"))
precioLecheCoto <- as.numeric(lecheCoto$price)
lecheCoto <- data.frame(lecheCoto$name, precioLecheCoto)
lecheJumbo <- jumbo %>% filter(str_detect(name, "^Leche"))
precioLecheJumbo <- as.numeric(lecheJumbo$price)
lecheJumbo <- data.frame(lecheJumbo$name, precioLecheJumbo)
boxplotLeche <- boxplot(precioLecheCoto, precioLecheJumbo)
|
0ff4d8c55566797b6611d8f06b3d42c706603275
|
b653662e7003da49fdf36429d813fc57ea501c4f
|
/Mushroom_HC_clust.R
|
9928fbddd04d427b51b664a11a635070fe5f4c30
|
[] |
no_license
|
mahi941333/Analysis-Of-mushroom-dataset
|
dfe13fa396e6e30ae0b2b64b8d0f37ebd2a2b769
|
5359915d9e56ed6d459b3e64768988272810d3d8
|
refs/heads/master
| 2020-04-03T09:40:14.802672
| 2018-10-29T08:04:18
| 2018-10-29T08:04:18
| 155,172,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,195
|
r
|
Mushroom_HC_clust.R
|
install.packages("klaR")
install.packages('cba')
library(klaR)
library(cba)
Mushrooms= read.csv("Mushroom.csv")
str(Mushrooms)
X=Mushrooms[-1]
#Checking missing values
Index= apply(Mushrooms, 2, function(variable) any(is.na(variable) | is.infinite(variable)))
colnames(Mushrooms)[Index]
table(Mushrooms$Class)
#plot
for (z in 1:7) {
plot(Mushrooms[ ,z], main=colnames(Mushrooms)[z],
ylab = "Count", col="green", las = 0)
}
#removing viel_type variable which has single values
Mushrooms.torun = subset(Mushrooms, select = -c(Class, Veil.type))
##One-hot encoded data
##This is basically creating dummy variables for each value of the category, for all the variables
Mushrooms.torun.ohe = model.matrix(~.-1, data=Mushrooms.torun)
str(Mushrooms.torun.ohe)
##getting the X dataset
X=Mushrooms.torun.ohe[-1]
d =dist(Mushrooms.torun.ohe, method = "euclidean") # Euclidean distance matrix.
H.fit <- hclust(d, method="ward.D2")
plot(H.fit) # display dendogram
groups <- cutree(H.fit, k=3) # cut tree into 5 clusters
# draw dendogram with red borders around the 5 clusters
rect.hclust(H.fit, k=3, border="red")
table(Mushrooms.torun.ohe[,1],groups)
|
09ddd44620b4d02a6c5f2c56d1453cef63243d43
|
882384720aaad106f30d2893574483ec664c6ca9
|
/weicheng/couponExploration.R
|
3a7ddf719e3f9456756405d7392c1aecc821a1ad
|
[] |
no_license
|
imouzon/dmc2015
|
deb1bfb5a25955b289726e48723b7e736f71f8b6
|
8a1aa31a7a917501a9a1b5a96c313685591a31b7
|
refs/heads/master
| 2020-12-24T19:17:58.268371
| 2015-06-29T11:36:59
| 2015-06-29T11:36:59
| 33,014,468
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,047
|
r
|
couponExploration.R
|
train = read.csv("./data/mc2015_train_simple.csv",
stringsAsFactors=FALSE, header=TRUE)
class = read.csv("./data/mc2015_test_simple.csv",
stringsAsFactors=FALSE, header=TRUE)
## nCoupon[i]Used = the number of times a coupon in column i is
## used in the training set alone
## pCoupon[i]Used = the proportion of times a coupon in column i is
## used in the training set alone
trn = train[,c(5,13,21, 29:31)]
coupons = unlist(trn[,1:3], use.names=FALSE)
couponUsed = unlist(trn[,4:6], use.names=FALSE)
nCouponUsed = aggregate(couponUsed, by=list(coupons), sum)
couponsTab = table(coupons)
trn$nCoupon1 = unname(couponsTab[as.character(trn$couponID1)])
trn$nCoupon2 = unname(couponsTab[as.character(trn$couponID2)])
trn$nCoupon3 = unname(couponsTab[as.character(trn$couponID3)])
trn$nCoupon1Used = nCouponUsed[trn$couponID1, 2]
trn$nCoupon2Used = nCouponUsed[trn$couponID2, 2]
trn$nCoupon3Used = nCouponUsed[trn$couponID3, 2]
trn$pCoupon1Used = trn$nCoupon1Used / trn$nCoupon1
trn$pCoupon2Used = trn$nCoupon2Used / trn$nCoupon2
trn$pCoupon3Used = trn$nCoupon3Used / trn$nCoupon3
## nCoup[i]Col[j]Used = the number of times a coupon that from
## column [i] appeared in column[j] is used in the training set alone
## pCoup[i]Col[j]Used = the proportion of times a coupon that from
## column [i] appeared in column[j] is used in the training set alone
## Some code below is modified from pete's folder: 03_features.R
# ===================================================================
coupTab1 <- table(trn$couponID1)
coupTab2 <- table(trn$couponID2)
coupTab3 <- table(trn$couponID3)
# Training set
trn$nCoup1Col1 <- unname(coupTab1[as.character(trn$couponID1)])
trn$nCoup1Col2 <- unname(coupTab2[as.character(trn$couponID1)])
trn$nCoup1Col3 <- unname(coupTab3[as.character(trn$couponID1)])
trn$nCoup2Col1 <- unname(coupTab1[as.character(trn$couponID2)])
trn$nCoup2Col2 <- unname(coupTab2[as.character(trn$couponID2)])
trn$nCoup2Col3 <- unname(coupTab3[as.character(trn$couponID2)])
trn$nCoup3Col1 <- unname(coupTab1[as.character(trn$couponID3)])
trn$nCoup3Col2 <- unname(coupTab2[as.character(trn$couponID3)])
trn$nCoup3Col3 <- unname(coupTab3[as.character(trn$couponID3)])
trn[is.na(trn)] <- 0
## Coupon used number for each column
nCoup1Used = aggregate(trn$coupon1Used, by=list(trn$couponID1), sum)
nCoup2Used = aggregate(trn$coupon2Used, by=list(trn$couponID2), sum)
nCoup3Used = aggregate(trn$coupon3Used, by=list(trn$couponID3), sum)
index = match(trn$couponID1, nCoup1Used$Group.1)
trn$nCoup1Col1Used = ifelse(is.na(index), 0, nCoup1Used[index, 2])
index = match(trn$couponID1, nCoup2Used$Group.1)
trn$nCoup1Col2Used = ifelse(is.na(index), 0, nCoup2Used[index, 2])
index = match(trn$couponID1, nCoup3Used$Group.1)
trn$nCoup1Col3Used = ifelse(is.na(index), 0, nCoup3Used[index, 2])
index = match(trn$couponID2, nCoup1Used$Group.1)
trn$nCoup2Col1Used = ifelse(is.na(index), 0, nCoup1Used[index, 2])
index = match(trn$couponID2, nCoup2Used$Group.1)
trn$nCoup2Col2Used = ifelse(is.na(index), 0, nCoup2Used[index, 2])
index = match(trn$couponID2, nCoup3Used$Group.1)
trn$nCoup2Col3Used = ifelse(is.na(index), 0, nCoup3Used[index, 2])
index = match(trn$couponID3, nCoup1Used$Group.1)
trn$nCoup3Col1Used = ifelse(is.na(index), 0, nCoup1Used[index, 2])
index = match(trn$couponID3, nCoup2Used$Group.1)
trn$nCoup3Col2Used = ifelse(is.na(index), 0, nCoup2Used[index, 2])
index = match(trn$couponID3, nCoup3Used$Group.1)
trn$nCoup3Col3Used = ifelse(is.na(index), 0, nCoup3Used[index, 2])
## Result verification
identical(as.integer(trn$nCoup1Col1Used + trn$nCoup1Col2Used + trn$nCoup1Col3Used), trn$nCoupon1Used)
identical(as.integer(trn$nCoup2Col1Used + trn$nCoup2Col2Used + trn$nCoup2Col3Used), trn$nCoupon2Used)
identical(as.integer(trn$nCoup3Col1Used + trn$nCoup3Col2Used + trn$nCoup3Col3Used), trn$nCoupon3Used)
## proportion
trn$pCoup1Col1Used = trn$nCoup1Col1Used / trn$nCoupon1Used
trn$pCoup1Col2Used = trn$nCoup1Col2Used / trn$nCoupon1Used
trn$pCoup1Col3Used = trn$nCoup1Col3Used / trn$nCoupon1Used
trn$pCoup2Col1Used = trn$nCoup2Col1Used / trn$nCoupon2Used
trn$pCoup2Col2Used = trn$nCoup2Col2Used / trn$nCoupon2Used
trn$pCoup2Col3Used = trn$nCoup2Col3Used / trn$nCoupon2Used
trn$pCoup3Col1Used = trn$nCoup3Col1Used / trn$nCoupon3Used
trn$pCoup3Col2Used = trn$nCoup3Col2Used / trn$nCoupon3Used
trn$pCoup3Col3Used = trn$nCoup3Col3Used / trn$nCoupon3Used
#### mapping for class data
## ==========================
tst = class[,c(1, 5,13,21)]
## column1
c1 = trn[,c("couponID1", "nCoupon1Used", "pCoupon1Used",
"nCoup1Col1Used", "nCoup1Col2Used", "nCoup1Col3Used",
"pCoup1Col1Used", "pCoup1Col2Used", "pCoup1Col3Used")]
c11 = unique(c1)
tst1 = tst[,1:2]
tmp = merge(tst1, c11, by="couponID1", incomparables = NA, all.x = TRUE)
tst1 = tmp[order(tmp$orderID),]
## column2
c2 = trn[,c("couponID2", "nCoupon2Used", "pCoupon2Used",
"nCoup2Col1Used", "nCoup2Col2Used", "nCoup2Col3Used",
"pCoup2Col1Used", "pCoup2Col2Used", "pCoup2Col3Used")]
c22 = unique(c2)
tst2 = tst[,c(1, 3)]
tmp = merge(tst2, c22, by="couponID2", incomparables = NA, all.x = TRUE)
tst2 = tmp[order(tmp$orderID),]
## column3
c3 = trn[,c("couponID3", "nCoupon3Used", "pCoupon3Used",
"nCoup3Col1Used", "nCoup3Col2Used", "nCoup3Col3Used",
"pCoup3Col1Used", "pCoup3Col2Used", "pCoup3Col3Used")]
c33 = unique(c3)
tst3 = tst[,c(1, 4)]
tmp = merge(tst3, c33, by="couponID3", incomparables = NA, all.x = TRUE)
tst3 = tmp[order(tmp$orderID),]
## Merge all together
oTst = cbind(orderID = tst$orderID, tst1[, -c(1:2)], tst2[, -c(1:2)], tst3[, -c(1:2)])
oTst = oTst[,c(1,2,10,18, 3, 11, 19, 4:6, 12:14, 20:22, 7:9, 15:17, 23:25)]
#### export
## ========
oTrn = trn[,-c(1:9, 16:24)]
oTrn = cbind(orderID = train$orderID, oTrn)
write.csv(oTrn, "./feature/couponUsed_train.csv", quote = FALSE, row.names=FALSE, na="")
write.csv(oTst, "./feature/couponUsed_class.csv", quote = FALSE, row.names=FALSE, na="")
## TEST
aa = read.csv("./feature/couponUsed_train.csv", header=TRUE)
bb = read.csv("./feature/couponUsed_class.csv", header=TRUE)
|
7a82166d36798a4fad479dd0269c776c0c25d333
|
f1c2f21f599a3588dfe35ca0384c167402157395
|
/man/find.names.Rd
|
ad432df73ac2d86b614b621a0d4316777d8fe168
|
[] |
no_license
|
SathishN/griffun
|
91c03d84004e499a6030b510192d9d04acd387d0
|
199fae5a9a1ab8b4f6436585811ea3f7db6d06d9
|
refs/heads/master
| 2020-06-03T19:43:28.678531
| 2014-11-11T17:37:46
| 2014-11-11T17:37:46
| 27,498,150
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 430
|
rd
|
find.names.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{find.names}
\alias{find.names}
\title{Find location of given names in all names}
\usage{
find.names(find.names, all.names)
}
\arguments{
\item{find.names}{find names of columns}
\item{all.names}{all names}
}
\value{
nothing
}
\description{
Find location of given names in all names
}
\examples{
\dontrun{
find.names('Close,Volume', colnames(b[[ symbolnames[i] ]]))
}
}
|
2ef4d3c93fb439d91d88b7d20d557897be9902b9
|
272df57d6e1feffffc4775e5c2c2f18ed51f35f9
|
/R/VariableSelection/EaaMutualInfo.R
|
e1ab097da2e5bb3e9675e4ac3cd869707c535592
|
[] |
no_license
|
eulertech/backup
|
d0fc7e5b5989f2f782154e65107277ade39c1d29
|
9ff48f61cfd4e0c5994ad3dabab3987255cea953
|
refs/heads/master
| 2021-04-26T23:00:40.526364
| 2019-01-23T18:24:15
| 2019-01-23T18:24:15
| 72,011,630
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,097
|
r
|
EaaMutualInfo.R
|
# This function takes a data frame and returns a mutual information coefficient for each variable
# You can also specify the number of bins you want your data discretized into
# Input: dataframe with columns as variables and target as first column
# Output dataframe with mutual information for each variable
# Author: Lou Zhang
# Load drivers
wants <- c("infotheo")
has <- wants %in% rownames(installed.packages())
if(any(!has)) install.packages(wants[!has])
sapply(wants, require, character.only = TRUE)
rm("wants","has")
EaaMutualInfo <- function(df, bins = nrow(df)^(1/3)) {
range01 <- function(x){
(x-min(x))/(max(x)-min(x))
}
predictors <- discretize(df[2:ncol(df)],nbins = bins)
colnames(predictors) <- colnames(df[,2:ncol(df)])
target <- discretize(df[1], nbins = bins)
mi <- sapply(1:(ncol(df)-1), function(i) mutinformation(predictors[i],target))
MutInfo <- as.data.frame(mi, row.names = colnames(df)[2:ncol(df)])
colnames(MutInfo) <- c("MutualInformation")
MutInfo$Scaled <- range01(MutInfo$MutualInformation)
return(MutInfo)
}
|
dce9fb482a287a94b49dcf351417cae784a4ad46
|
0d392aa46ecc2069dfa828002dbc38180d37d515
|
/shiny-market-basket/app.R
|
6c0f7130f7d92cc0b8dcd5ce47b8d5b3583f7cd0
|
[
"MIT"
] |
permissive
|
nimeshchaturvedi/shiny-market-basket
|
1c70537b940c8f6376ab3fd84ae28128047df8fb
|
1d92a08c8b74c8f0b835b006d86f3110005f3835
|
refs/heads/master
| 2021-05-31T17:50:42.706697
| 2016-06-07T03:25:48
| 2016-06-07T03:25:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,471
|
r
|
app.R
|
# shiny market basket - quick and dirty tool for market basket analysis
# using Apriori rule search.
library(shinydashboard)
library(dplyr)
library(magrittr)
library(arules)
data("Groceries")
ui <- dashboardPage(skin="yellow",
dashboardHeader(title = "Market Basket"),
dashboardSidebar(
fileInput("file1","Choose CSV Data",accept=c(".csv","text/csv","text/plain")),
sidebarMenu(
menuItem("Overview", tabName = "overview", icon = icon("dashboard"), selected = T),
menuItem("Explore", tabName = "explore", icon = icon("binoculars"), selected = F)
)
),
dashboardBody(
tabItems(
tabItem(tabName = "overview",
fluidPage(
fluidRow(box(verbatimTextOutput("txt_rules"), width=12))
)
),
tabItem(tabName = "explore",
fluidPage(
fluidRow(box(selectInput("item","Item",choices = c(), selected = "")
, width = 12)),
fluidRow(box(plotOutput("plt_item_freq"))),
fluidRow(box(verbatimTextOutput("txt_overview"),
title = "Summary", width=12))
)
)
)
)
)
server <- function(input, output, session) {
ret_data_file <- reactive({
message("loading ", input$file1$datapath)
# data_file <<- tryCatch(read.transactions(input$file1$datapath, sep = ","),
# error=function(e) e)
# data_file
Groceries
})
df_items <- reactive({
data_file <- ret_data_file()
ret <- as.data.frame(itemFrequency(data_file))
ret$item=rownames(ret)
row.names(ret) <- NULL
names(ret) <- c("freq","item")
ret %<>% arrange(desc(freq))
updateSelectInput(session,"item",choices = ret$item, selected = ret$item[1])
ret
})
rules <- reactive({
data_file <- ret_data_file()
data_file <- Groceries
ret <- apriori(data_file, parameter = list(support = 0.006, confidence = 0.25, minlen = 2))
summary(ret)
})
output$txt_rules <- renderPrint(rules())
output$txt_overview <- renderPrint({
validate(need(input$file1$datapath, "no data yet..."))
summary(data_file)
})
output$plt_item_freq <- renderPlot({
dat <- df_items()
b <- input$item
ggplot(dat %>% filter(item==input$item), aes(x=1,y=freq)) +
geom_bar(stat="identity", fill="blue3") + ylim(0,max(dat$freq)) +
theme_minimal() +
labs(x=input$item,y="Frequency") +
scale_x_continuous(breaks=NULL)
})
}
shinyApp(ui, server)
|
deedeb9db75bbfa1fe9700527d646838ed339949
|
649bebd8319980ff66e2138a22b3b51db8e361f7
|
/getoldEC2info.r
|
4f573a88d09caa5fc929ae8d7e0cfead414acb46
|
[] |
no_license
|
ryoogata/getAWSinfo
|
4d14e9ea84742cbb8d189d2d16bccd5be1c1002f
|
42daca7b11d20fa0b1a32a2786ed9b2e2a11e1cc
|
refs/heads/master
| 2021-01-10T18:32:10.625917
| 2015-09-20T15:54:46
| 2015-09-20T15:54:46
| 42,399,669
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,464
|
r
|
getoldEC2info.r
|
require(XML)
require(stringr)
# Web 上から情報を取得
theURL <- "http://aws.amazon.com/ec2/previous-generation/"
# ページの 7 個目のテーブルに EC2 の一覧があるのでそれを取得
oldtable <- readHTMLTable(theURL, which = 7, header = TRUE, stringsAsFactors = FALSE)
# vCPU, Memory (GiB) の列を数値化
oldtable$"vCPU" <- as.numeric(oldtable$"vCPU")
oldtable$"Memory (GiB)" <- as.numeric(oldtable$"Memory (GiB)")
# LocalStorage の項目の追加 ( Instance Storage (GB) の列から加工 )
oldtable$"LocalStorage" <- NA
oldtable$"LocalStorage"[which(grepl("SSD", oldtable$"Instance Storage (GB)"))] <- "SSD"
oldtable$"LocalStorage"[which(grepl("EBS Only", oldtable$"Instance Storage (GB)"))] <- "EBS Only"
# 列: Instance Storage (GB) から不要な文字列を削除
oldtable$"Instance Storage (GB)" <- str_replace(string=oldtable$"Instance Storage (GB)", pattern = " SSD", replacement = "")
oldtable$"Instance Storage (GB)" <- str_replace(string=oldtable$"Instance Storage (GB)", pattern = "EBS Only", replacement = "")
oldtable$"Instance Storage (GB)" <- str_replace(string=oldtable$"Instance Storage (GB)", pattern = ",", replacement = "")
# 列: Instance Storage (GB) を LocalStorageNUM と LocalStorageGB の列に分割
list <- str_split(string = oldtable$"Instance Storage (GB)", pattern = "x")
localStorageDF <- t(data.frame(list))
localStorageDF <- data.frame(localStorageDF)
rownames(localStorageDF) <- NULL
names(localStorageDF) <- c("LocalStorageNUM", "LocalStorageGB")
# LocalStorageNUM と LocalStorageGB の値を数値化
localStorageDF$LocalStorageNUM <- as.numeric(as.character(localStorageDF$LocalStorageNUM))
localStorageDF$LocalStorageGB <- as.numeric(as.character(localStorageDF$LocalStorageGB))
# テーブルの結合と不要な列の削除
oldtable <- cbind(oldtable, localStorageDF)
oldtable$LocalStorageCapacity <- localStorageDF$LocalStorageNUM * localStorageDF$LocalStorageGB
oldtable <- oldtable[,-6]
# 列: Instance Type を カテゴリーとサイズ情報として分割
instanceList <-str_split(string = oldtable$"Instance Type", pattern = "\\.")
instanceMatrix <- data.frame(Reduce(rbind, instanceList))
names(instanceMatrix) <- c("Instance Category","Instance Size")
oldtable <- cbind(instanceMatrix, oldtable)
oldtable$"LocalStorage"[which(is.na(oldtable$"LocalStorage"))] <- "HDD"
# ファイルの書き出し
write.table(table, file="oldec2list.csv", sep=",", row.names = FALSE)
|
875d1e1203b497a8e04beb958f4143b713fd960b
|
03beb8f841ede21339b41caa120169074e8d498c
|
/mono_therapy_models/testing_nn.R
|
8b495f7e2ff225043fcb50440916b9b4cf8fa70e
|
[] |
no_license
|
edwinyi/tensor_gp
|
3d65a13e209610ce6627147fa51282d537b698e3
|
23d6ab25a5525ae237423838dbccb75508b240a6
|
refs/heads/master
| 2020-06-10T01:30:21.270507
| 2017-02-28T00:20:54
| 2017-02-28T00:20:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 811
|
r
|
testing_nn.R
|
library(rstan)
data = iris[1:100,]
data[,1:4] = scale(data[,1:4])
data[,5] = as.integer(data[,5])-1
N = 80
Nt = nrow(data)-N
train_ind = sample(100,N)
test_ind = setdiff(1:100, train_ind)
yt = data[test_ind,5]
stan.dat=list(
num_nodes=10,
num_middle_layers=3,
d=4,
N=N,
Nt=Nt,
X=data[train_ind,1:4],
y=data[train_ind,5],
Xt=data[test_ind,1:4])
m <- stan_model("nn.stan")
v <- rstan:::vb(m, data = stan.dat, iter = 10000)
g=getMeans(v)
mean_predictions=g$predictions
s <- sampling(m, data = stan.dat, iter = 1000, chains = 4, cores=4)
fitmat = as.matrix(s)
predictions = fitmat[,grep("predictions", colnames(fitmat))]
parameters = fitmat[,grep("beta", colnames(fitmat))]
mean_predictions = colMeans(predictions)
plot(1:Nt, yt)
lines(1:Nt, mean_predictions, type='p', col='red')
|
b19c203c06f311d1fe575e4228a79de701f497f5
|
eafcbacd06361d83a8da71ae86968f723177570f
|
/code/R Process Tools/RDFtoExcel.R
|
0c55742195b1a290a41fe3416143ea67b93d2a46
|
[
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
usbr/RW-RDF-Process-Plot
|
a6edd4a0172294424d90a1456907be2b9baffc8c
|
943ff620ef5b96fa30f9c830cfc0853a1420ec62
|
refs/heads/master
| 2023-08-17T10:22:15.871190
| 2023-08-04T21:15:48
| 2023-08-04T21:15:48
| 144,192,835
| 0
| 1
| null | 2022-03-24T14:41:14
| 2018-08-09T19:04:22
|
HTML
|
UTF-8
|
R
| false
| false
| 2,843
|
r
|
RDFtoExcel.R
|
# Input -----------------------------------
## rdf to excel exe
### MyPC
rdf2excel <- "C:/Users/cfelletter/Documents/Tools/RdfToExcel/RdfToExcelExecutable.exe"
### BA
rdf2excel <- "C:/Program Files/CADSWES/RdfToExcel/RdfToExcelExecutable.exe"
## file paths
# ### CRSS - MTOM
# folderpath <- "Z:/felletter/CRSS_MTOM_scens/Stress" # manoa
# folderpath <- "C:/Users/cfelletter/Documents/CRSS_MTOM/testbedanalysis/data/Scenario" #myPC
# scen <- "NF_pluvialRem_2000,2016Dems_v1.6,CRSS_VerificationModel_9015,IGDCP.v4.4.0.9004"
# scen <- c("Base_NoDO_8004.mdl,ISM1988_2018,2007Dems,NoDO_GREAT_7001,MTOM_Most_Aug21IC",
# "All_8004_NoDO,ISM1988_2018,2007Dems,NoDO_GREAT_7001,MTOM_Most_Aug21IC")
# ### if you previously ran Move and Rename
# folderpath <- current.folder
# ### on my comp
# folderpath <- "C:/Users/cfelletter/Documents/CRSS_MTOM/testbedanalysis/data/Scenario/temp_Hist" #testbed scens
# # folderpath <- "C:/Users/cfelletter/Documents/CRSS/Scenario" #CRSS scens
# scen <- list.files(folderpath)
### from BA CRSS results
folderpath <- "C:/Users/fellette/Documents/GIT/CRSS/Scenario" #BA
scen <- list.files(folderpath) #only if want all scenarios
folderpath <- "C:/Users/fellette/Documents/GIT/crss.trirvw2020/Scenario" #BA
scen <- c("Base_NoDO_8004.mdl,ISM1988_2018,2007Dems,NoDO_GREAT_7001,MTOM_Most_Aug21IC",
"All_8004_NoDO,ISM1988_2018,2007Dems,NoDO_GREAT_7001,MTOM_Most_Aug21IC")
### from Manoa CRSS results
folderpath <- "M:/Shared/CRSS/2021/Scenario_dev"
scen <- c(scen1,scen2)
###if you have a scens list from other code
scen <- scens
###always check this before proceeding
## which rdf files
rdf <- c('AspinallOps') #c('UBRch')
## CRSS-MTOM files
rdf <- c("SystemConditions","Res","UBRes","xtraRes") #"UBRch.rdf"
## GREAT files
rdf <- c("DailyFlows","Res","UBRes")#,"UBDO","Drought") #don't need UBDO since no DO, or Drought for HClass
# files for Process CRSS
rdf <- c("KeySlots","Check","MPPE","MWDICS","SystemConditions","Res","CSD_ann","CSD_mon")
### setup files
rdfs <- rdf #if you previously ran Move and Rename.R
xlsx <- rdfs
rdfs <- paste0(rdfs, ".rdf")
xlsx <- paste0(xlsx, ".xlsx")
rdfs
xlsx
# Run RdfToExcelExecutable -----------------------------------
stopifnot(file.exists(rdf2excel))
for(j in 1:length(scen)){
# j=1
fpath <- file.path(folderpath,scen[j])
message("Starting: ", scen[j])
for (i in seq_along(rdfs)) {
ifile <- file.path(fpath, rdfs[i])
message("Starting: ", rdfs[i])
if (!file.exists(ifile)) {
message(ifile, "/nDoes not exist.")
} else {
ofile <- file.path(fpath, xlsx[i])
cmd <- c("-i", ifile, "-o", ofile)
system2(rdf2excel, args = cmd)
}
}
}
# TODO: parse the log file. Delete it if it ends in
# "RdfToExcel: Workbook write successfully completed", otherwise keep it.
|
4a61cc1d0ddd476dfffb4aa501cf97429f0b4aab
|
53851868e25801999033fe8d7c3150b73e7dde65
|
/R/UKHEI/weightedAdjacencyMatrix.r
|
ef1bfa6a89aa76c2b06270910b6f0e8bcaefe0b3
|
[] |
no_license
|
xuzhikethinker/PRG
|
bb7e75d27f9da7611d3c26f10bb083ec69025487
|
25b971f6e65ef13f80d3a56732e4bb6d4502bb55
|
refs/heads/master
| 2016-09-06T02:27:18.042949
| 2013-03-27T18:17:53
| 2013-03-27T18:17:53
| 9,262,600
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 856
|
r
|
weightedAdjacencyMatrix.r
|
# calculate distance average weighted by number of papers excluding internal ones
weightedAdjacencyRank <- function(adjMat, distMatrix, instToComm){
adjMatNSL <- adjMat -diag(adjMat) # this is adjacency ,matrix with zeros on diagonal
distAmNSL <- distMatrix * adjMatNSL
cAmNSL <- instToComm %*% adjMatNSL %*% t(instToComm)
cDistAmNSL <- instToComm %*% distAmNSL %*% t(instToComm)
# The following is a weighted average distance matrix for the communities.
# Weights are the number of papers and internal papers (diagonal terms) are ignored = NSL (no self-loops)
cWDistAvNSL <- cDistAmNSL/cAmNSL
cWDistAvNSL[is.na(cWDistAvNSL)] <- 0 # replaces NaN values by zero. Disconnected communities give this
cWDistAvNSLrank <- rep(-1, times=numberComm)
for (ccc in 1:numberComm) {
cWDistAvNSLrank[ccc] <- rank(cWDistAvNSL[ccc,])[ccc]
}
cWDistAvNSLrank
}
|
283deb48f7bc2fc0d663daebf42c839991160aa4
|
06cdfccf8d44f11742fec1162afdfe2421c22302
|
/man/generate_acceptable_data_cpp.Rd
|
e29318cc20a4688db96578d00b979cd5bd29e311
|
[
"MIT"
] |
permissive
|
lgaborini/rdirdirgamma
|
06feabefb12a42d0496818ecc9a0f70f7ccc1c5c
|
f3087f0a81c9e4b08ff56efcc260873eaa16232d
|
refs/heads/master
| 2023-04-18T00:18:29.380512
| 2021-03-05T18:06:26
| 2021-03-05T18:06:26
| 290,997,800
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,478
|
rd
|
generate_acceptable_data_cpp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{generate_acceptable_data_cpp}
\alias{generate_acceptable_data_cpp}
\title{Generate data that is accepted by ABC.}
\usage{
generate_acceptable_data_cpp(
n_sample,
m_sample,
alpha_0,
beta_0,
nu_0,
mtx_obs,
summarize_eps,
reps,
max_iter,
p_norm,
use_optimized_summary
)
}
\arguments{
\item{n_sample}{hyperparameters that are used to generate data: number of samples per source}
\item{m_sample}{hyperparameters that are used to generate data: number of sources}
\item{alpha_0}{hyperparameters that are used to generate data}
\item{beta_0}{hyperparameters that are used to generate data}
\item{nu_0}{hyperparameters that are used to generate data}
\item{mtx_obs}{the observed data matrix}
\item{summarize_eps}{ABC thresholds: as many as summary statistics}
\item{reps}{how many datasets are returned}
\item{max_iter}{how many iterations are tried}
\item{p_norm}{exponent of the L^p norm (can be \code{Inf}) (default: 2)}
\item{use_optimized_summary}{if TRUE, return the optimized summary statistics (mean, sd, kurtosis, skewness), else standard (mean, sd)}
}
\value{
a (n x n_obs x p) array of generated data
}
\description{
Generate data that is accepted by ABC.
}
\seealso{
Other ABC functions:
\code{\link{compute_ABC_cpp}()},
\code{\link{compute_distances_gen_obs_cpp}()},
\code{\link{sample_ABC_rdirdirgamma_beta_cpp}()}
}
\concept{ABC functions}
|
3702dbacd87569908d58b7c0bebdf0ad3b40a250
|
f57ab85483c112fc67fa8d2c5886a7cb3a8e995c
|
/tests/testthat/test_guess.R
|
fd1354b1522a065d2fabd4de58e4139e4c324d66
|
[] |
no_license
|
wintercmin/rio
|
7ada15885d92e4300002debf9aa500a99a26652f
|
d013436b77dd14893a5896c8b9cc9b3b2b309a07
|
refs/heads/master
| 2021-01-16T21:31:23.046864
| 2015-03-18T09:03:32
| 2015-03-18T09:03:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 247
|
r
|
test_guess.R
|
context("Get File Extension")
test_that("File extension converted correctly", {
expect_that(get_ext("hello.csv"), equals("csv"))
expect_that(get_ext("hello.CSV"), equals("csv"))
expect_that(get_ext("hello.sav.CSV"), equals("csv"))
})
|
e9d979f8233e6c8c1a22a88339b08e61226cae90
|
2bda11429539681c8247baec5b481cf0208c56e2
|
/man/iexdata.Rd
|
1eed2cad89ae30eb3fe63325286e5efa8b589178
|
[] |
no_license
|
RakhithJK/stockapp
|
786e7ee6d9181ab63b0234fb9b1d9c67eee73f89
|
0f985a0e37c4fe5f2c62549479af89d41e5da5ec
|
refs/heads/master
| 2022-11-09T00:37:41.638781
| 2020-07-02T19:12:27
| 2020-07-02T19:12:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 495
|
rd
|
iexdata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iexdata.R
\name{iexdata}
\alias{iexdata}
\title{IEX data}
\usage{
iexdata(ticker, from = NULL, to = NULL)
}
\arguments{
\item{ticker}{stock ticker symbol. E.g. "GOOG".}
\item{from}{start date. Either string or date object.}
\item{to}{end date. Either string or date object.}
}
\value{
dataframe with historical prices
}
\description{
Download historical prices for a given stock from \url{https://iextrading.com}
}
|
f6fe5a8dded04f1eaf118ffee07813309a702945
|
012259a4ab7ac234a73f09bdf39754e9887b8928
|
/man/inference_plot.Rd
|
fc4d32751cc71b6f757fac3531bba5d83bebd773
|
[] |
no_license
|
AngelosPsy/multifear
|
66485c0e428b6002152d3feafff3c3977942a52c
|
3dec34a0c72506a550a2f8115e4ecbd4bdf622d0
|
refs/heads/master
| 2022-02-12T16:08:33.224525
| 2022-02-01T16:45:37
| 2022-02-01T16:45:37
| 245,685,847
| 2
| 1
| null | 2021-11-17T20:10:02
| 2020-03-07T18:38:31
|
R
|
UTF-8
|
R
| false
| true
| 1,439
|
rd
|
inference_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inference_plot.R
\name{inference_plot}
\alias{inference_plot}
\title{inference_plot}
\usage{
inference_plot(
data,
alpha_level = 0.05,
add_line = TRUE,
na.rm = FALSE,
framework = "Both",
col = c("gray45", "maroon4", "brown1"),
return_plot = TRUE
)
}
\arguments{
\item{data}{a data frame with the results of a multiverse analysis}
\item{alpha_level}{What should be the alpha level used (default to 0.05)}
\item{add_line}{Whether to add a line with the alpha level in the produced histogram (default to \code{TRUE})}
\item{na.rm}{Should NA's be removed (default to \code{FALSE}). See details for more information}
\item{framework}{Inference framework. Values could be "NHST", "Bayesian", or "Both" (no case sensitivity)}
\item{col}{A length three vector with the colors to be used for ANOVAS, t-tests, and mixed models (in this order)}
\item{return_plot}{Whether to return a plot or not (default too TRUE)}
}
\value{
A histogram summarizing the results.
}
\description{
Function for plotting the multiverse results.
}
\details{
For the plot the NAs in the \code{p.value} column are removed automatically -- so what \code{ggplot2} does automatically but here no message is returned.
The \code{return_plot} argument is there in case you want to combine multiple panels and you do
not want to have a plot returned every time you run the code.
}
|
95cd1fce13065dab73102adb6a39e1f4ee263490
|
12eab6d1208f29f6a4210cee0a712e8304240814
|
/mrds/man/histline.Rd
|
d0e409538bb70f147094be60b00e91bac66ee49d
|
[] |
no_license
|
jlaake/mrds
|
c8920ffdd91c411f2bfb91a15abd786f8b0b1d0e
|
64bb11a5682a2fd56611cef9860be476ea5f14be
|
refs/heads/master
| 2021-01-18T06:25:57.563386
| 2013-12-18T19:13:48
| 2013-12-18T19:13:48
| 2,009,699
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,019
|
rd
|
histline.Rd
|
\name{histline}
\alias{histline}
\title{Plot histogram line}
\usage{
histline(height, breaks, lineonly = FALSE, outline = FALSE, fill = FALSE,
ylim = range(height), xlab = "x", ylab = "y", det.plot = FALSE, ...)
}
\arguments{
\item{height}{heights of histogram bars}
\item{breaks}{cutpoints for x}
\item{lineonly}{if TRUE, drawn with plot; otherwise with
lines to allow addition of current plot}
\item{outline}{if TRUE, only outline of histogram is
plotted}
\item{fill}{If fill==TRUE, uses polygon() to fill bars}
\item{ylim}{limits for y axis}
\item{xlab}{label for x axis}
\item{ylab}{label for y axis}
\item{det.plot}{if TRUE, plot is of detection so yaxis
limited to unit interval}
\item{\dots}{Additional unspecified arguments for plot
(fill==TRUE}
}
\value{
None
}
\description{
Takes bar heights (height) and cutpoints (breaks), and
constructs a line-only histogram from them using the
function plot() (if lineonly==FALSE) or lines() (if
lineonly==TRUE).
}
\author{
???
}
|
bfd4fd811b5433a1e947d81e8d800b3b855f3579
|
58b5653f2c3e582a3de4aa5059b33282e000e8cc
|
/man/Poisson.Rd
|
1b51924e72166b20d8687427e7a00eb0840d9e21
|
[] |
no_license
|
hayate0304/Rsymbulate
|
539c158af6d758ccf5254c5feaf440be26419089
|
3fc7fd0ca0e9c476e4da0f8382787ac5bbddd010
|
refs/heads/master
| 2020-03-25T18:57:41.909510
| 2019-01-24T04:47:27
| 2019-01-24T04:47:27
| 144,058,208
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 363
|
rd
|
Poisson.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{Poisson}
\alias{Poisson}
\title{Defines a probability space for a Poisson distribution.}
\usage{
Poisson(lam)
}
\arguments{
\item{lam}{(float): rate parameter for the Poisson distribution}
}
\description{
Defines a probability space for a Poisson distribution.
}
|
585a1ff61a07b90703bd531fa67be80b6154031f
|
d5f19cf29c8c3607adaec502ba28af078bf60aab
|
/man/SQLServerDriver-class.Rd
|
cb405a8b6365f2fd551bcf0241254ee13dd2736a
|
[] |
no_license
|
mnel/RSQLServer
|
749e960b519723cc622483d10de58ffa76403d69
|
3fe72ae20d880c436a1276f8ab522b1a27255ab6
|
refs/heads/master
| 2020-12-26T03:34:39.423955
| 2014-10-10T21:47:21
| 2014-10-10T21:47:21
| 25,183,262
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,405
|
rd
|
SQLServerDriver-class.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{class}
\name{SQLServerDriver-class}
\alias{SQLServer}
\alias{SQLServerDriver-class}
\title{An S4 class to represent a SQL Server driver}
\usage{
SQLServer(identifier.quote = "[")
}
\arguments{
\item{identifier.quote}{quote character for a SQL Server identifier can be a
single quotation mark (\code{\'}), a left or right bracket (\code{[]},
defaults to \code{[}), or a double quotation mark (\code{\"}).}
}
\value{
An object of class \linkS4class{SQLServerDriver}.
}
\description{
This class extends the \code{\link[RJDBC:JDBCDriver-class]{JDBCDriver}} class
to represent a SQL Server driver used to access SQL Server databases. This
should always be initialised with \code{SQLServer()}. JDBCDriver extends
DBIDriver. The package uses the jTDS driver set.
}
\section{Slots}{
\describe{
\item{\code{identifier.quote}}{quote character for a SQL Server identifier can be a
single quotation mark (\code{\'}), a left or right bracket (\code{[]},
defaults to \code{[}), or a double quotation mark (\code{\"}).}
\item{\code{jdrv}}{Java object reference to an instance of the SQL Server driver if
the driver can be instantiated by a default constructor. This object is only
used as a fall-back when the driver manager fails to find a driver.}
}}
\examples{
\dontrun{
SQLServer()
}
}
\references{
\href{http://jtds.sourceforge.net/}{jTDS project}
}
|
f5d542fab6acf17f0c42c0e6a513d77346f6590a
|
631b7fb4b4aff7f03a17d393b606449a681ee027
|
/plot4.R
|
1c4cc241ec4432d7e1f61a2d3aacd62c3602c30c
|
[] |
no_license
|
rofeld/ExData_Prjoj2
|
ee1f484a5e4110188db99c1498b79ba3fd97baef
|
9a337a991f139cfa247d2c9d753aec79838bce7d
|
refs/heads/master
| 2020-05-19T22:31:21.567393
| 2014-07-19T00:55:45
| 2014-07-19T00:55:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 739
|
r
|
plot4.R
|
cur_dir = "C:/dev/R/Coursera/Proj2" #working directory where .rds files are located
setwd(cur_dir)
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
coal_indx <- SCC[grep("coal", SCC$"Short.Name", ignore.case=T),"SCC"]
NEI.Coal <- NEI[NEI$SCC %in% coal_indx,]
df.tot.emiss <- aggregate(NEI.Coal[, "Emissions"], list(Year = NEI.Coal$year), FUN = "sum")
library(ggplot2)
library(scales)
gg <- ggplot(df.tot.emiss, aes(x = Year, y = x)) + geom_line() + scale_y_continuous(name="Total Emissions", labels = comma)
gg <- gg + ggtitle("National Coal Combustion")
#print(gg)
dev.copy(png,'plot4.png', width = 480, height = 480)
dev.off()
|
6e90e318c926642e16c1788888bc6ab0ca38df57
|
d0a73e5ee95b54efb416d2afdb4b2c48a410738d
|
/HW_1.R
|
243f92b2447a1e7c4912e95daea49cdbbfb0ec3d
|
[] |
no_license
|
PankoAliaksandr/Project-WU-Minimum_Variance_Optimization
|
cc9e7cc0c74fddcd74b8c3eacb6a4198092f16cd
|
3d7134efc19ceeb57625246d278eeee0914737aa
|
refs/heads/master
| 2022-02-22T04:03:29.523001
| 2019-09-23T13:42:30
| 2019-09-23T13:42:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,278
|
r
|
HW_1.R
|
# Assignment 1: Minimum Variance Portfolios
# Glossary:
# PC - The Principal Component Estimator
# SAC - Shrinkage Towards the Average-Correlation Estimator
library(zoo)
library(matlib)
library(matrixcalc)
library(quadprog)
library(ggplot2)
library(reshape2)
# Constants
number_of_stocks <- 500
number_of_months <- 60
one_month <- 1/12
last_available_date <- "Dec 2017"
five_years <- 5
pattern <- "%d-%m-%Y"
end_date <- as.yearmon("31-12-2017", pattern)
# Load provided data
load("D:/data_ex1_covariance_20181122.Rdata")
# Functions
prepare_data <- function(){
# Create unique keys:
# Delete leading zeros
cnam$iid <<- sub("^[0]+", "", cnam$iid)
DT.members$iid <<- sub("^[0]+", "", DT.members$iid)
returns$iid <<- sub("^[0]+", "", returns$iid)
# Unite gvkey and iid to create unique keys:
cnam$id <<- paste(cnam$gvkey, cnam$iid, sep="")
DT.members$id <<- paste(DT.members$gvkey, DT.members$iid, sep="")
returns$id <<- paste(returns$gvkey, returns$iid, sep="")
# Drop unrequired columns:
cnam <<- subset(cnam, select = -c(iid, gvkey, sich))
DT.members <<- subset(DT.members, select = -c(iid, gvkey))
returns <<- subset(returns, select = -c(iid, gvkey))
}
get_relevant_companies <- function(end_date){
# Find companies that were in index in June 2018 at least 1 day
sp500_stocks <- DT.members[DT.members$ym == end_date,]
# Reset index in the dataframe
rownames(sp500_stocks) <- NULL
# Add complany name
sp500_stocks <- merge(x = sp500_stocks, y = cnam, by = "id", all.x = TRUE)
# Drop unrequired columns: Date and id industry classification
sp500_stocks <- subset(sp500_stocks, select = -c(ym))
# Update number of stocks in the index
number_of_stocks <<- nrow(sp500_stocks)
return(sp500_stocks)
}
get_monthly_returns <- function(sp500_stocks, end_date){
# Create a time period column
time_period <- seq((end_date - five_years + one_month), end_date, by = one_month )
monthly_returns <- data.frame(matrix(ncol = 1, nrow = number_of_months))
colnames(monthly_returns) <- "Time Period"
monthly_returns["Time Period"] <- time_period
# Create returns data frame size = 60x501
for(stock_id in sp500_stocks$id){
stock_returns_df <- returns[(returns$id == stock_id), c("ym", "trt1m")]
colnames(stock_returns_df) <- c("Time Period", stock_id)
monthly_returns <- merge(x = monthly_returns, y = stock_returns_df,
by = "Time Period", all.x = TRUE)
}
# Set index and drop date column
rownames(monthly_returns) <- monthly_returns$`Time Period`
monthly_returns$`Time Period`<- NULL
# Fill NA with mean
for(col in colnames(monthly_returns)){
ratio_of_na <- sum(is.na(monthly_returns[[col]])) / length(monthly_returns[[col]])
if(ratio_of_na > 0.2){
monthly_returns[[col]] <- NULL
}
else{
col_mean <- mean(monthly_returns[[col]], na.rm = TRUE)
monthly_returns[[col]][is.na(monthly_returns[[col]])] <- col_mean
}
}
number_of_stocks <<- min(number_of_stocks, ncol(monthly_returns))
return(monthly_returns)
}
calculate_PC_covariance_matrix <- function(monthly_returns, sample_cov_monthly_returns){
n <- 3
# Eigenvalue Decomposition:
# Vectors matrix here is orthogonal
# Eigenvalues already sorted in descending order
eigenS <- eigen(sample_cov_monthly_returns, symmetric = TRUE)
Lambda <- diag(eigenS$values)
diag(Lambda)[(n + 1):number_of_stocks] <- 0
SigmaPC1 <- eigenS$vectors %*% Lambda %*% t(eigenS$vectors)
# Find weights in a portfolio using formula in hint
w1 <- eigenS$vectors[,1] / sum(eigenS$vectors[,1])
w2 <- eigenS$vectors[,2] / sum(eigenS$vectors[,2])
w3 <- eigenS$vectors[,3] / sum(eigenS$vectors[,3])
# Determine corresponding returns using formula in hint
r1 <- as.matrix(monthly_returns) %*% w1
r2 <- as.matrix(monthly_returns) %*% w2
r3 <- as.matrix(monthly_returns) %*% w3
resVar <- rep(0, times = number_of_stocks)
for (j in 1:number_of_stocks){
model <- lm(monthly_returns[,j] ~ (1 + r1 + r2 + r3))
resVar[j] <- var(model$residuals)
}
SigmaPC2 <- diag(resVar)
SigmaPC <- SigmaPC1 + SigmaPC2
return(SigmaPC)
}
calculate_SAC_covariance_matrix <- function(monthly_returns, sample_cov_monthly_returns){
sample_cor_monthly_returns <- cor(monthly_returns)
mean_corr_value <- mean(sample_cor_monthly_returns[upper.tri(sample_cor_monthly_returns)])
# 500x500
C <- matrix(data = mean_corr_value,
nrow = number_of_stocks,
ncol = number_of_stocks)
diag(C) <- 1
# 500x500
Delta <- diag(sqrt(diag(sample_cov_monthly_returns)))
# 500x500
SigmaAC <- Delta %*% C %*% Delta
# 500x500
SigmaSAC <- 0.5 * sample_cov_monthly_returns + 0.5 * SigmaAC
return(SigmaSAC)
}
matrix_check <- function(matr){
matr <- round(matr, 8)
cat("Symmetric:", isSymmetric(matr))
cat('\n')
cat("Singular:", is.singular.matrix(matr, tol = 1e-8))
cat('\n')
cat("Positive semi-definite:", is.positive.semi.definite(matr, tol=1e-8))
cat('\n')
}
check_covariance_matrices <- function(sample_cov_monthly_returns, SigmaSAC, SigmaPC){
cat("\nFor Original covariance matrix:\n")
matrix_check(sample_cov_monthly_returns)
cat("\nFor SigmaPC covariance matrix:\n")
matrix_check(SigmaPC)
cat("\nFor SigmaSAC covariance matrix:\n")
matrix_check(SigmaSAC)
}
analize_portfolio_weights <- function(portfolio_weights){
portfolio_weights_rounded <- as.vector(round(portfolio_weights, digit = 4))
cat("\nNumber of weights > 0:\n", sum(portfolio_weights_rounded > 0))
cat("\nNumber of weights < 0:\n", sum(portfolio_weights_rounded < 0))
cat("\nNumber of weights == 0:\n", sum(portfolio_weights_rounded == 0))
return(portfolio_weights_rounded)
}
calculate_portfolio_weights <- function(SigmaPC, SigmaSAC){
# Minimum Variance Portfolio Construction
# Case 1: PC: No restriction (short sales allowed)
# Find inverse matrix
SigmaPC_inv <- solve(SigmaPC)
# Creating unit-vector of length 500
Unit <- c(rep(1,number_of_stocks))
# Weight of stocks in minimum variance portfolio using the formula
w_PC <- SigmaPC_inv %*% (Unit) / as.numeric(t(Unit) %*% SigmaPC_inv %*% Unit)
# Case 2: PC: Restriction on short sales (short sales are not allowed)
w_PC_c <- solve.QP( Dmat = SigmaPC,
dvec = rep(0, number_of_stocks),
Amat = cbind(rep(1, number_of_stocks), diag(number_of_stocks)),
bvec = c(1, rep(0, number_of_stocks)),
meq = 1)$solution
# Case 3: SAC: No restriction (short sales allowed)
# Find inverse matrix
SigmaSAC_inv <- solve(SigmaSAC)
# creating unit-vector of length 500
Unit <- c(rep(1,number_of_stocks))
# Weight of stocks in minimum variance portfolio using the formula
w_SAC <- SigmaSAC_inv %*% (Unit) / as.numeric(t(Unit) %*% SigmaSAC_inv %*% Unit)
# Case 4: SAC: Restriction on short sales (short sales are not allowed)
w_SAC_c <- solve.QP( Dmat = SigmaSAC,
dvec = rep(0, number_of_stocks),
Amat = cbind(rep(1, number_of_stocks), diag(number_of_stocks)),
bvec = c(1, rep(0, number_of_stocks)),
meq = 1)$solution
# Case 5: Equally weighted
weight <- as.double(1/number_of_stocks)
w_EQ <- rep(weight, times = number_of_stocks)
weights <- data.frame("w_PC" = w_PC,
"w_PC_c" = w_PC_c,
"w_SAC" = w_SAC,
"w_SAC_c" = w_SAC_c,
"w_EQ" = w_EQ)
return(weights)
}
analyze_portfolios_weights <- function(weights){
# PC: No restrictions: How many of the stocks have positive, zero and negative weights?
cat("\nFor PC non-restricted:\n")
rounded_w_PC <- analize_portfolio_weights(weights$w_PC)
# PC: Restricted: How many of the stocks have positive, zero and negative weights?
cat("\nFor PC restricted:\n")
rounded_w_PC_c <- analize_portfolio_weights(weights$w_PC_c)
# SAC: No restrictions: How many of the stocks have positive, zero and negative weights?
cat("\nFor SAC non-restricted:\n")
rounded_w_SAC <- analize_portfolio_weights(weights$w_SAC)
# SAC: Restricted: How many of the stocks have positive, zero and negative weights?
cat("\nFor SAC restricted:\n")
rounded_w_SAC_c <- analize_portfolio_weights(weights$w_SAC_c)
rounded_weights <- data.frame("w_PC" = rounded_w_PC,
"w_PC_c" = rounded_w_PC_c,
"w_SAC" = rounded_w_SAC,
"w_SAC_c" = rounded_w_SAC_c)
return(rounded_weights)
}
find_most_important_stocks <- function(sp500_stocks, rounded_weights) {
# PC: No restrictions: Output weights data frame
PC_weight_no_rest_df <- cbind(sp500_stocks, weights = rounded_weights$w_PC)
# PC: Restricted: Output weights data frame
PC_weight_rest_df <- cbind(sp500_stocks, weights = rounded_weights$w_PC_c)
# SAC: No restrictions: Output weights data frame
SAC_weight_no_rest_df <- cbind(sp500_stocks, weights = rounded_weights$w_SAC)
# SAC: No restrictions: Output weights data frame
SAC_weight_rest_df <- cbind(sp500_stocks, weights = rounded_weights$w_SAC_c)
### SORT to find the most important ####
# PC: No restrictions: Output weights data frame
PC_weight_no_rest_df_s <- PC_weight_no_rest_df[order(PC_weight_no_rest_df$weights,decreasing = TRUE),]
# PC: Restricted: Output weights data frame
PC_weight_rest_df_s <- PC_weight_rest_df[order(PC_weight_rest_df$weights, decreasing = TRUE),]
# SAC: No restrictions: Output weights data frame
SAC_weight_no_rest_df_s <- SAC_weight_no_rest_df[order(SAC_weight_no_rest_df$weights, decreasing = TRUE),]
# SAC: No restrictions: Output weights data frame
SAC_weight_rest_df_s <- SAC_weight_rest_df[order(SAC_weight_rest_df$weights, decreasing = TRUE),]
top_10 <- data.frame( "w_PC" = PC_weight_no_rest_df_s[1:10,],
"w_PC_c" = PC_weight_rest_df_s[1:10,],
"w_SAC" = SAC_weight_no_rest_df_s[1:10,],
"w_SAC_c" = SAC_weight_rest_df_s[1:10,])
return(top_10)
}
calculate_portfolios_returns <- function(monthly_returns, weights, end_date){
# Calculate monthly return
returns_PC_no_rest <- as.matrix(monthly_returns) %*% weights$w_PC
returns_PC_rest <- as.matrix(monthly_returns) %*% weights$w_PC_c
returns_SAC_no_rest <- as.matrix(monthly_returns) %*% weights$w_SAC
returns_SAC_rest <- as.matrix(monthly_returns) %*% weights$w_SAC_c
returns_EQ <- as.matrix(monthly_returns) %*% weights$w_EQ
sp500_returns <- spx[spx$ym >= (end_date - five_years) & spx$ym <= end_date]
returns_SP500 <- sp500_returns$ret * 100 # in percents
portfolios_returns <- data.frame( "PC" = returns_PC_no_rest,
"PC_c" = returns_PC_rest,
"SAC" = returns_SAC_no_rest,
"SAC_c" = returns_SAC_rest,
"EQ" = returns_EQ,
"SP500" = returns_SP500)
return(portfolios_returns)
}
calculate_avg_returns <- function(portfolios_returns){
# Calculate average monthly return
avg_returns_PC_no_rest <- mean(portfolios_returns$PC)
avg_returns_PC_rest <- mean(portfolios_returns$PC_c)
avg_returns_SAC_no_rest <- mean(portfolios_returns$SAC)
avg_returns_SAC_rest <- mean(portfolios_returns$SAC_c)
avg_returns_EQ <- mean(portfolios_returns$EQ)
avg_returns_SP500 <- mean(portfolios_returns$SP500)
avg_returns <- data.frame("PC" = avg_returns_PC_no_rest,
"PC_c" = avg_returns_PC_rest,
"SAC" = avg_returns_SAC_no_rest,
"SAC_c" = avg_returns_SAC_rest,
"EQ" = avg_returns_EQ,
"SP500" = avg_returns_SP500)
return(avg_returns)
}
calculate_std_returns <- function(portfolios_returns){
# Standard Deviations
std_returns_PC_no_rest <- sd(portfolios_returns$PC)
std_returns_PC_rest <- sd(portfolios_returns$PC_c)
std_returns_SAC_no_rest <- sd(portfolios_returns$SAC)
std_returns_SAC_rest <- sd(portfolios_returns$SAC_c)
std_returns_EQ <- sd(portfolios_returns$EQ)
std_returns_SP500 <- sd(portfolios_returns$SP500)
std_returns <- data.frame("PC" = std_returns_PC_no_rest,
"PC_c" = std_returns_PC_rest,
"SAC" = std_returns_SAC_no_rest,
"SAC_c" = std_returns_SAC_rest,
"EQ" = std_returns_EQ,
"SP500" = std_returns_SP500)
return(std_returns)
}
plot_average_returns <- function(avg_returns_df){
column_names <- colnames(avg_returns)
column_names <- c(column_names, "index")
avg_returns_df$index = seq(1:516)
colnames(avg_returns_df) <- column_names
D = melt(avg_returns_df, id = 'index')
ggplot(D, aes(index, value, group = variable, color = variable)) +
geom_line() + ggtitle("Average Monthly Returns(%)") + xlab("Rolling window number") +
ylab("Return")
}
plot_std_returns <- function(std_returns_df){
column_names <- colnames(std_returns)
column_names <- c(column_names, "index")
std_returns_df$index = seq(1:516)
colnames(std_returns_df) <- column_names
R = melt(std_returns_df, id = 'index')
ggplot(R, aes(index, value, group = variable, color = variable)) +
geom_line() + ggtitle("Monthly Standard Deviations(%)") + xlab("Rolling window number") +
ylab("Standard Deviation")
}
main_part_1 <- function(){
prepare_data()
sp500_stocks <- get_relevant_companies(end_date)
monthly_returns <- get_monthly_returns(sp500_stocks, end_date)
sample_cov_monthly_returns <- cov(monthly_returns)
SigmaSAC <- calculate_SAC_covariance_matrix(monthly_returns, sample_cov_monthly_returns)
SigmaPC <- calculate_PC_covariance_matrix(monthly_returns, sample_cov_monthly_returns)
check_covariance_matrices(sample_cov_monthly_returns, SigmaSAC, SigmaPC)
weights <- calculate_portfolio_weights(SigmaPC, SigmaSAC)
rounded_weights <- analyze_portfolios_weights(weights)
top_10 <- find_most_important_stocks(sp500_stocks, rounded_weights)
portfolio_returns <- calculate_portfolios_returns(monthly_returns, weights, end_date)
avg_returns <- calculate_avg_returns(portfolio_returns)
std_returns <- calculate_std_returns(portfolio_returns)
}
main_part_2 <- function(){
end_date <- DT.members[1]$ym + five_years
i <- 1
avg_returns_df <- data.frame(matrix(NA, nrow = 517, ncol = 6))
std_returns_df <- data.frame(matrix(NA, nrow = 517, ncol = 6))
while(end_date <= last_available_date){
cat(i)
start_date <- end_date - five_years
sp500_stocks <- get_relevant_companies(end_date)
monthly_returns <- get_monthly_returns(sp500_stocks, end_date)
sample_cov_monthly_returns <- cov(monthly_returns)
SigmaSAC <- calculate_SAC_covariance_matrix(monthly_returns, sample_cov_monthly_returns)
SigmaPC <- calculate_PC_covariance_matrix(monthly_returns, sample_cov_monthly_returns)
#check_covariance_matrices(sample_cov_monthly_returns, SigmaSAC, SigmaPC)
weights <- calculate_portfolio_weights(SigmaPC, SigmaSAC)
#rounded_weights <- analyze_portfolios_weights(weights)
portfolio_returns <- calculate_portfolios_returns(monthly_returns, weights)
avg_returns <- calculate_avg_returns(portfolio_returns)
std_returns <- calculate_std_returns(portfolio_returns)
avg_returns_df[i,]<- avg_returns
std_returns_df[i,]<- std_returns
i <- i + 1
end_date <- end_date + one_month
}
plot_average_returns(avg_returns_df)
plot_std_returns(std_returns_df)
}
main_part_1()
main_part_2()
|
e0a14e936270a44a625908bca0c1cf6a6aa92ead
|
ecee7073a403ff4d40a3ebc7050c51bbfa5a1247
|
/functions.R
|
894e5c842cbf46733cae4f2a8162b56b1befaf56
|
[] |
no_license
|
arietma/ibf-uganda
|
3c50ad2d9894d2138fa1db8135f87de12b5f536c
|
5d2db93152dafbc4606bce4684df937d0112010f
|
refs/heads/main
| 2023-01-20T19:36:43.509368
| 2020-11-26T12:10:29
| 2020-11-26T12:10:29
| 309,714,988
| 0
| 1
| null | 2020-11-26T12:10:31
| 2020-11-03T14:39:34
|
R
|
UTF-8
|
R
| false
| false
| 1,121
|
r
|
functions.R
|
terra_extract_part <- function(raster, polygon, n) {
x <- ceiling(nlyr(terra_s) / n)
for (j in 1:n){
cat(sprintf("%d/%d\n", j, n))
terra_part <- terra::subset(terra_s, (x * (j - 1) + 1):min((x * j), nlyr(terra_s)))
terra_sub <- terra::extract(terra_part, terraUganda[i,], fun=sum)
if (j == 1){
terra_all <- terra_sub
} else {
terra_all <- cbind(terra_all, terra_sub)
}
}
return(terra_all)
}
extract_polygon <- function(rasters, polygons, i, n) {
terra_all <- terra_extract_part(rasters, polygons[i,], n)
total_amount <- terra::extract(fullraster, polygons[i,], fun=sum)
perc_all <- as.data.frame(t(terra_all / total_amount[1,2] * 100))
colnames(perc_all) <- polygons$ADM1_EN[i]
perc_all$date <- as.Date(substr(rownames(perc_all),13,20), format = "%Y%m%d")
return(perc_all)
}
plot_district <- function(datalist, n = NULL, name = NULL)
{
if (is.null(n) & is.null(name)) {
cat("either n or name has to be given\n")
return()
} else if (!is.null(n)) {
ggplot(datalist[[n]], aes_string(y = names(datalist[[n]])[1], x = "date")) + geom_line()
}
}
|
52361a1557d68719693211b08607a2985f3c81c2
|
3687e29bd71d62e992ff064ee6b888dbeee31c21
|
/source/lncDifferentiaExxpression.R
|
8fad16c9bb4a637b887d0d1b166456479387bfe1
|
[] |
no_license
|
HongyuanWu/lncRNA-2
|
1d93de0928c3f9524d91b88fe74dfa2edd6104e4
|
e49abf45e4d9621b4e26b74fd6c944ea60fb1750
|
refs/heads/master
| 2022-09-28T17:44:43.206149
| 2020-06-02T15:43:37
| 2020-06-02T15:43:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,429
|
r
|
lncDifferentiaExxpression.R
|
library("DESeq2")
library("pheatmap")
library("tidyverse")
library("RColorBrewer")
# Input matrixes creation
data <- read.table(file = "data/SCells_LncRNA_s1_CombinedSamples.txt", header = TRUE)
timepoints <- c("Day_16_D","Day_30_D","Day_60_D","Day_16_N","Day_30_N","Day_60_N")
colnames(data)[7:12] <- timepoints
rownames(data)<-data[,1]
count_data <- data[,c(7:12)]
coldata <- data.frame(state=c("InDev","InDev","InDev","Neurons","Neurons","Neurons"),
Timepoint=c("Day_16","Day_30","Day_60"),
row.names = timepoints)
#Deseq object creation
dds <- DESeqDataSetFromMatrix(countData = count_data, colData = coldata, design = ~Timepoint)
levels(dds$Timepoint)
dds$Timepoint <- relevel(dds$Timepoint, ref = "Day_16") #Stablish day 16 as reference
dds <- DESeq(dds)
res16_30 <- results(dds, name = "Timepoint_Day_30_vs_Day_16")
res16_60 <- results(dds, name = "Timepoint_Day_60_vs_Day_16")
## VISZUALIZE
outdir <- "output/DE/"
res <- results(dds, alpha = 0.05)
res<-res[order(res$padj),]
head(res,10)
sink(file = (paste0(outdir,"Summary.txt")))
summary(res)
sink()
# Expression versus significance
png(paste0(outdir, "MAPlot_Pval_0.05.png"))
plotMA(dds, ylim=c(-2,2), main="MA Plot")
dev.off()
# Data transformations for visualizations
rld <- rlogTransformation(dds,blind=TRUE)
vsd <- varianceStabilizingTransformation(dds, blind = TRUE)
# heatmap countmatrix
select <- order(rowMeans(counts(dds, normalized=TRUE)), decreasing=TRUE)[1:30]
df<- as.data.frame(colData(dds)[,c("Timepoint","state")])
top50 <- subset(res, pvalue<0.05)
countTop50 <- subset(counts(dds), rownames(counts(dds)) %in% rownames(top50))[1:25]
top25pval <- res[order(res$padj),] [1:25,]
png(paste0(outdir,"Heatmap_top25byPvalVSD2.png"))
pheatmap(assay(vsd)[rownames(top25pval),], cluster_rows=FALSE,show_rownames=TRUE, cluster_cols=FALSE, annotation_col=df)
dev.off()
pheatmap(countTop25)
png(paste0(outdir,"Heatmap_top25byPvalVSD.png"))
pheatmap(assay(vsd)[countTop50,], cluster_rows=FALSE,show_rownames=TRUE, cluster_cols=FALSE, annotation_col=df)
dev.off()
png(paste0(outdir,"Heatmap_countsVSD.png"))
pheatmap(assay(vsd)[select,], cluster_rows=FALSE,show_rownames=TRUE, cluster_cols=FALSE, annotation_col=df)
dev.off()
png(paste0(outdir,"Heatmap_countsRLD.png"))
pheatmap(assay(rld)[select,], cluster_rows=FALSE,show_rownames=TRUE, cluster_cols=FALSE, annotation_col=df)
dev.off()
# Heatmap of similarity between replicates
distVSD <- dist(t(assay(vsd)))
matrix <- as.matrix(distVSD)
rownames(matrix) <- paste(vsd$Timepoint,vsd$state, sep = "-")
colnames(matrix) <- paste(vsd$Timepoint,vsd$state, sep = "-")
hmcol <- colorRampPalette(brewer.pal(9, "GnBu"))(100)
png(paste0(outdir,"Heatmap_DistancesNT.png"))
pheatmap(matrix,clustering_distance_rows = distVSD,
clustering_distance_cols = distVSD,
cluster_rows = FALSE, cluster_cols = FALSE,
show_rownames = TRUE,show_colnames = TRUE, fontsize = 15,
color = hmcol, main = "Distance Matrix")
dev.off()
png(paste0(outdir,"Heatmap_Distances.png"))
pheatmap(matrix,clustering_distance_rows = distVSD,
clustering_distance_cols = distVSD,
cluster_rows = TRUE, cluster_cols = TRUE,
show_rownames = TRUE,show_colnames = TRUE,
color = hmcol, main = "Distance Matrix")
dev.off()
# PCA plot
png(paste0(outdir,"PCA.png"))
print(plotPCA(rld, intgroup=c("Timepoint")))
dev.off()
|
1236da7421179a2c8e144a8e73af93c0b779ee16
|
f081d9af62b6cab758afb4ad1b9f06e2193e73c4
|
/R/sheets_share.R
|
0a8d7de70d64bdb6936e39e16509a9a7bcfeebb7
|
[
"MIT"
] |
permissive
|
MarkEdmondson1234/googlesheets4
|
540c08ae4158a414179c140d4033d31160d0f7ec
|
23e462476641995302275e24892158b10374c963
|
refs/heads/master
| 2020-12-03T07:34:27.657702
| 2020-01-01T17:24:48
| 2020-01-01T17:24:48
| 231,243,893
| 2
| 0
|
NOASSERTION
| 2020-01-01T17:23:12
| 2020-01-01T17:23:11
| null |
UTF-8
|
R
| false
| false
| 1,124
|
r
|
sheets_share.R
|
# currently just for development
# I'm generally auth'd as:
# * as a service acct (which means I can't look at anything in the browser)
# * with Drive and Sheets scope
# * with googlesheets4 and googledrive
# so this is helpful for quickly granting anyone or myself specifically
# permission to read or write a Sheet I'm fiddling with in the browser or the
# API explorer
#
# Note defaults: role = "reader", type = "anyone"
# --> "anyone with the link" can view
#
# examples:
# sheets_share()
# sheets_share(type = "user", emailAddress = "jane@example.com")
# sheets_share(type = "user", emailAddress = "jane@example.com", role = "writer")
sheets_share <- function(ss,
...,
role = c(
"reader", "commenter", "writer",
"owner", "organizer"
),
type = c("anyone", "user", "group", "domain")) {
role <- match.arg(role)
type <- match.arg(type)
googledrive::drive_share(
file = googledrive::as_id(as_sheets_id(ss)),
role = role,
type = type,
...
)
}
|
b57a5b1efdd84803c6b7ce108e16c7764f249124
|
08dd5387de20250425f56b4a114e05df1ceff315
|
/bin/significance/getSignificance.R
|
2b3bca3c65a289e4bbed72b179c7060e7e9e8e11
|
[] |
no_license
|
ahmadassaf/graph-sampling
|
e7ba56e4cbb86aeabdf7eb272be6384552e53247
|
3a6e3a4fca6778e4938f2de1eb74b41d0c0e2f96
|
refs/heads/master
| 2021-08-09T01:46:31.681419
| 2021-06-08T23:33:05
| 2021-06-08T23:33:05
| 23,197,721
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,883
|
r
|
getSignificance.R
|
library(pwr)
samplePower = .80
sampleSigLevel = 0.05 #alpha / significance level
sampleType = "paired"
sampleAlternative = "two.sided"
datasetName = "swdf"
dataset = read.csv(paste(datasetName, ".csv", sep=""),header=TRUE, sep="\t")
randomSampleSig <- c(0, 0, 0)
freqBaselineSig <- c(0,0,0)
randomSampleN <- c(0,0,0)
freqBaselineN <- c(0,0,0)
i <- 4
# result <- t.test(dataset[,2], dataset[,4], paired=TRUE, conf.level=0.99)
# valuesss <- result$p.value
# randomSampleSig <- append(randomSampleSig,valuesss)
i <- 4
while( i <= 28 ){
result <- t.test(dataset[,2], dataset[,i], paired=TRUE, conf.level=1-sampleSigLevel)
randomSampleSig <- append(randomSampleSig, result$p.value)
nStats = power.t.test(power = samplePower, sig.level=sampleSigLevel, delta = mean(dataset[,2] - dataset[,i]), sd = sd(dataset[,2] - dataset[,i]), alternative = sampleAlternative, type = sampleType)
randomSampleN <- append(randomSampleN, nStats$n)
i <- i + 1
}
i <- 4
while( i <= 28 ){
tryCatch({
result <- t.test(dataset[,3], dataset[,i], paired=TRUE, conf.level=1-sampleSigLevel)
freqBaselineSig <- append(freqBaselineSig, result$p.value)
}, error=function(err) {
cat("err:", conditionMessage(err), "\n")
freqBaselineSig <- append(freqBaselineSig,666666)
})
tryCatch({
nStats = power.t.test(power = samplePower, sig.level=sampleSigLevel, delta = mean(dataset[,3] - dataset[,i]), sd = sd(dataset[,3] - dataset[,i]), alternative = sampleAlternative, type = sampleType)
freqBaselineN <- append(freqBaselineN, nStats$n)
}, error=function(err) {
cat("err:", conditionMessage(err), "\n")
freqBaselineN <- append(freqBaselineN, 666666)
})
i <- i + 1
}
dataframe = rbind(dataset, randomSampleSig, freqBaselineSig, randomSampleN, freqBaselineN)
write.table(dataframe, file=paste(datasetName, "_sig.csv", sep=""), sep="\t", row.names=FALSE)
|
93dd0eda367cd10403264f5f13fb360c2d59a523
|
e1d4541a03df3b5ce94dae89111712601db3f12d
|
/data_cleaning/visualize_data.R
|
0852865a2ec1d2fe63b9eda1c6b3dfea01e6356c
|
[] |
no_license
|
svteichman/wages
|
f2f48d473176e67c0ede934fe41a9313658d9ddb
|
0f5ee033220d42f82cc9727f99f79eee3c2a9571
|
refs/heads/main
| 2023-06-16T14:21:42.753234
| 2021-07-17T18:30:14
| 2021-07-17T18:30:14
| 380,072,819
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,781
|
r
|
visualize_data.R
|
library(tidyverse)
library(xtable)
load(file = "wave3to5_filt.rda")
load(file = "stan_dat.rda")
# visualize r and s by sector ----
# make jobs array
N <- stan_dat$N
K <- stan_dat$K
time <- stan_dat$T
sec <- grep("sec", names(wave3to5_filt))
job <- array(data = NA, dim = c(time,N))
job <- data.frame(matrix(nrow = N, ncol = time))
for (i in 1:N) {
job[i,] <- as.numeric(wave3to5_filt[i,sec])
}
# make array to hold r and s percentages by sector and time
# auxiliary function to add values with 0 people to jobs list
fill_in <- function(jobs,vec_ind) {
all <- sort(unique(unlist(job)))
uninc <- all[which(!(all %in% vec_ind), arr.ind = T)]
return(uninc)
}
# make dataframe
rs_dat <- data.frame(matrix(ncol = 5, nrow = 2*K))
names(rs_dat) <- c("sector","time","count","per_r","per_s")
# set sector and time
rs_dat$sector <- rep(0:(K-1),2)
rs_dat$time <- c(rep("2",K),rep("3",K))
# find count per sector for each time
job1 <- job %>% group_by(X1) %>% summarise(count = n())
job2 <- job %>% group_by(X2) %>% summarise(count = n())
uninc <- fill_in(job, job2$X2)
for (i in 1:length(uninc)) {
job2 <- add_row(job2, X2= uninc[i], count = 0)
}
job2 <- job2 %>% arrange(X2)
job3 <- job %>% group_by(X3) %>% summarise(count = n())
uninc <- fill_in(job, job3$X3)
for (i in 1:length(uninc)) {
job3 <- add_row(job3, X3 = uninc[i], count = 0)
}
job3 <- job3 %>% arrange(X3)
count_dat <- data.frame(sector = rep(0:(K-1),3),
time = c(rep("1",K),rep("2",K),rep("3",K)),
count = c(job1$count, job2$count, job3$count))
rs_dat$count <- c(job2$count, job3$count)
# find r percentage by sector
r <- stan_dat$r
s <- stan_dat$s
r1 <- data.frame(sec = job[,2], r = r[1,]) %>%
group_by(sec) %>%
summarise(count = n(),
r_count = sum(r)) %>%
mutate(prop = r_count/count)
uninc <- fill_in(job, r1$sec)
for (i in 1:length(uninc)) {
r1 <- add_row(r1, sec= uninc[i], count = 0, r_count = 0, prop = 0)
}
r1 <- r1 %>% arrange(sec)
r2 <- data.frame(sec = job[,3], r = r[2,]) %>%
group_by(sec) %>%
summarise(count = n(),
r_count = sum(r)) %>%
mutate(prop = r_count/count)
uninc <- fill_in(job, r2$sec)
for (i in 1:length(uninc)) {
r2 <- add_row(r2, sec= uninc[i], count = 0, r_count = 0, prop = 0)
}
r2 <- r2 %>% arrange(sec)
rs_dat$per_r <- c(r1$prop,r2$prop)
# find s percentage by sector
s1 <- data.frame(sec = job[,2], s = s[1,]) %>%
group_by(sec) %>%
summarise(count = n(),
s_count = sum(s)) %>%
mutate(prop = s_count/count)
uninc <- fill_in(job, s1$sec)
for (i in 1:length(uninc)) {
s1 <- add_row(s1, sec= uninc[i], count = 0, s_count = 0, prop = 0)
}
s1 <- s1 %>% arrange(sec)
s2 <- data.frame(sec = job[,3], s = s[2,]) %>%
group_by(sec) %>%
summarise(count = n(),
s_count = sum(s)) %>%
mutate(prop = s_count/count)
uninc <- fill_in(job, s2$sec)
for (i in 1:length(uninc)) {
s2 <- add_row(s2, sec= uninc[i], count = 0, s_count = 0, prop = 0)
}
s2 <- s2 %>% arrange(sec)
rs_dat$per_s <- c(s1$prop,s2$prop)
# make plots
ggplot(count_dat, aes(x = as.factor(sector), y = count, group = time, fill = time)) +
geom_bar(position = "dodge", stat = "identity") + ggtitle("Worker count by sector") +
xlab("Sector") + ylab("Count") + scale_fill_brewer(palette="Dark2")
ggsave("count_plot.png")
ggplot(rs_dat, aes(x = as.factor(sector), y = per_r, group = time, fill = time)) +
geom_bar(position = "dodge", stat = "identity") + ggtitle("Proportion remaining by sector") +
xlab("Sector") + ylab("Proportion") + scale_fill_brewer(palette="Dark2")
ggsave("r_prop_plot.png")
ggplot(rs_dat, aes(x = as.factor(sector), y = per_s, group = time, fill = time)) +
geom_bar(position = "dodge", stat = "identity") +
ggtitle("Proportion switching within sector by sector") + xlab("Sector") + ylab("Proportion") +
scale_fill_brewer(palette="Dark2")
ggsave("s_prop_plot.png")
# visualize mistakes in r by sector ----
r_dat <- data.frame(sector = rs_dat$sector[1:K], r1_count = rep(0,K),
r1_mist = rep(0,K), r2_count = rep(0,K), r2_mist = rep(0,K))
r1_ind <- which(r[1,]==1)
r2_ind <- which(r[2,]==1)
# encode mistakes for the sector in which they end i.e. job 2 if job 1 neq job 2
# mistakes for r1
mist_r1 <- list()
for (ind in r1_ind) {
sec <- job$X2[ind]
r_dat$r1_count[sec+1] <- r_dat$r1_count[sec+1] + 1
if (!(job$X1[ind] == job$X2[ind])) {
r_dat$r1_mist[sec+1] <- r_dat$r1_mist[sec+1] + 1
mist_r1 <- append(mist_r1, ind)
}
}
# mistakes for r2
mist_r2 <- list()
for (ind in r2_ind) {
sec <- job$X3[ind]
r_dat$r2_count[sec+1] <- r_dat$r2_count[sec+1] + 1
if (!(job$X2[ind] == job$X3[ind])) {
r_dat$r2_mist[sec+1] <- r_dat$r2_mist[sec+1] + 1
mist_r2 <- append(mist_r2, ind)
}
}
# add columns for percentage wrong
r_dat <- r_dat %>%
mutate(r1_prop_wrong = r1_mist/r1_count,
r2_prop_wrong = r2_mist/r2_count)
# plot proportions wrong
r_plot_dat <- data.frame(sector = c(r_dat$sector, r_dat$sector),
count_wrong = c(r_dat$r1_mist, r_dat$r2_mist),
prop_wrong = c(r_dat$r1_prop_wrong, r_dat$r2_prop_wrong),
time = c(rep("2",K),rep("3",K)))
ggplot(r_plot_dat, aes(x = as.factor(sector), y = count_wrong, group = time, fill = time)) +
geom_bar(position = "dodge", stat = "identity") +
ggtitle("Count sector incorrectly coded") + xlab("Sector") + ylab("Count") +
scale_fill_brewer(palette="Dark2")
ggsave("r_mistake_counts.png")
ggplot(r_plot_dat, aes(x = as.factor(sector), y = prop_wrong, group = time, fill = time)) +
geom_bar(position = "dodge", stat = "identity") +
ggtitle("Proportion sector incorrectly coded") + xlab("Sector") + ylab("Proportion") +
scale_fill_brewer(palette="Dark2")
ggsave("r_mistake_props.png")
# make transition matrices for sector ----
#make transition matrices for everyone
trans_1 <- array(data = as.integer(0), dim = c(K, K))
trans_2 <- array(data = as.integer(0), dim = c(K,K))
for (i in 1:N) {
trans_1[job[i,1]+1,job[i,2]+1] <- trans_1[job[i,1]+1,job[i,2]+1] + as.integer(1)
trans_2[job[i,2]+1,job[i,3]+1] <- trans_2[job[i,2]+1,job[i,3]+1] + as.integer(1)
}
trans <- trans_1 + trans_2
x <- xtable(trans)
print.xtable(x, type="latex")
#make transition matrices for people that seem like mistakes
trans_mist1 <- array(data = as.integer(0), dim = c(K, K))
trans_mist2 <- array(data = as.integer(0), dim = c(K, K))
for (ind in mist_r1) {
trans_mist1[job[ind,1]+1,job[ind,2]+1] <- trans_mist1[job[ind,1]+1,job[ind,2]+1] + as.integer(1)
}
for (ind in mist_r2) {
trans_mist2[job[ind,2]+1,job[ind,3]+1] <- trans_mist2[job[ind,2]+1,job[ind,3]+1] + as.integer(1)
}
trans_mist <- trans_mist1 + trans_mist2
x <- xtable(trans_mist)
print.xtable(x, type="latex")
|
ffab0e167d5b131ec70ef366045e4fe58f00bf23
|
859b798db974af7d3784d3cb11957a5c9f78bcb4
|
/leafcutter/setup.R
|
88189a59d275d495e455fcc5316fec550881e2bc
|
[] |
no_license
|
sanderslab/splicing-pipeline-containers
|
63892e858e5cc4bde70c55d28d057f710fdd562f
|
eb00ef8759b167936189e6e4ba1c7e7e69120331
|
refs/heads/main
| 2023-06-18T20:25:06.100148
| 2021-07-16T22:32:14
| 2021-07-16T22:32:14
| 386,777,730
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 206
|
r
|
setup.R
|
install.packages("devtools", repos='http://cran.us.r-project.org')
devtools::install_github("davidaknowles/leafcutter/leafcutter")
library(leafcutter)
args <- commandArgs(trailingOnly=TRUE)
print(args)
|
c09583df78d0f66d5877a869d4cb4dc803f2740a
|
cfc1510ad69d06c239c7fd7ecffa02b27b878609
|
/man/stat_brace.Rd
|
a0871d0f5d14db02f1e229139e1297dccc5d7d7c
|
[
"MIT"
] |
permissive
|
RoyalTS/ggbrace
|
1d29b0e3b21bb61cc221b66c0c65b2e7634f00cd
|
4333d133a5fb876523bb833e8a544d797d627d61
|
refs/heads/main
| 2023-04-09T23:44:57.326633
| 2021-04-04T11:31:54
| 2021-04-04T11:31:54
| 354,616,103
| 0
| 0
|
MIT
| 2021-04-04T18:20:15
| 2021-04-04T18:20:14
| null |
UTF-8
|
R
| false
| true
| 2,658
|
rd
|
stat_brace.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat_brace.R
\name{stat_brace}
\alias{stat_brace}
\title{create curly braces as a layer in ggplot}
\usage{
stat_brace(
mapping = NULL,
data = NULL,
rotate = 0,
textsize = 5,
distance = NULL,
outerstart = NULL,
width = NULL,
mid = NULL,
npoints = 100,
...
)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{aes()}} or
\code{\link[ggplot2:aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[ggplot2:ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[ggplot2:fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame}, and
will be used as the layer data. A \code{function} can be created
from a \code{formula} (e.g. \code{~ head(.x, 10)}).}
\item{rotate}{integer, either 0, 90, 180 or 270 to indicate if the braces should point up, right, down or left respectively}
\item{textsize}{number, regulates text size}
\item{distance}{number, regulates how far away the brace is from the last data point (individually for each group)}
\item{outerstart}{number, overwrites distance and provides one coordinate for all braces}
\item{width}{number, regulates how wide the braces are}
\item{mid}{number, where the pointer is within the bracket space (between 0.25 and 0.75)}
\item{npoints}{integer, number of points generated for the brace curves (resolution). This number will be rounded to be a multiple of 4 for calculation purposes.}
\item{...}{Other arguments passed on to \code{\link[ggplot2:layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{colour = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
}
\value{
ggplot2 layer object (geom_path) that can directly be added to a ggplot2 object. If a label was provided, a another layer (geom_text) is added.
}
\description{
Imports:
ggplot2
}
\examples{
library(ggbrace)
library(ggplot2)
ggplot(mtcars, aes(mpg, wt, color=factor(am))) +
geom_point() +
facet_wrap(~vs) +
stat_brace(rotate=90, aes(label=factor(am)))
}
|
c6cdd9db6f0392184f5c4791453b4235037ff2eb
|
55ed849c4d5dc38eb2c5071f962272fafca4e3c2
|
/R_KoNLP.R
|
3e07940731cf34b78c56d968c7ba906d891ba782
|
[] |
no_license
|
ljh468/R_console
|
133838bbbd6a89dbddc4167757d46b566c7618ec
|
969060ff35af908bc5c7765ab8e2043e7c5e55ce
|
refs/heads/master
| 2023-01-30T15:52:37.710206
| 2020-12-07T11:37:06
| 2020-12-07T11:37:06
| 299,467,249
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 659
|
r
|
R_KoNLP.R
|
.libPaths()
setwd()
getwd()
Sys.setenv(JAVA_HOME='C:/Java/jdk1.8.0_191')
install.packages("multilinguer")
install.packages("rJava")
install.packages("memoise")
install.packages("KoNLP")
library(multilinguer)
library(rJava)
library(memoise)
library(KoNLP)
remotes::install_github('haven-jeon/KoNLP',upgrade = "never", INSTALL_opts=c("--no-multiarch"))
install.packages("dplyr")
library(dplyr)
install.packages(c('stringr','hash','tau','Sejong','RSQLite','devtools'), type="binary")
install.packages("remotes")
install.packages("devtools")
library("devtools")
devtools::install_github('haven-jeon/KoNLP',upgrade = "naver", INSTALL_opts=c("--no-multiarch"))
|
9593cd554f66b39965c4ce03b57b2db4b3ec1ba0
|
744b5597ce50a99c7633d59d609502ab709b8a9e
|
/run_analysis.R
|
f00308d6632542667a28fe858d49c0addf4cc8d0
|
[] |
no_license
|
stepsutt/CleaningDataProject
|
3f7e242e7e6574af642eb31e43b4a9cb66afc7f1
|
34af291a429b5b3de2354feed429e66c3d68b70b
|
refs/heads/master
| 2016-09-06T00:58:47.076495
| 2014-08-24T20:58:39
| 2014-08-24T20:58:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,377
|
r
|
run_analysis.R
|
##create directory for project data if it does not exist
if (!file.exists("proj")) {
dir.create("proj")
}
##Download the zip file
pfileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
if (!file.exists("proj/projdata.zip")) {
download.file(pfileURL,"proj/projdata.zip")
}
##and unzip
if(!file.exists("proj/UCI HAR Dataset")) {
unzip("proj/projdata.zip",exdir="proj")
}
##Get data column & activity labels
dataLabels <- read.table("proj/UCI HAR Dataset/features.txt",
col.names=c("featureNo","featureLabel"))
activityLabels <- read.table("proj/UCI HAR Dataset/activity_labels.txt",
col.names=c("activityNo","activityLabel"))
##Get training data
##Get data, the subject and then the activity foreach measurement
train <- read.table("proj/UCI HAR Dataset/train/x_train.txt",
col.names=dataLabels$featureLabel)
trainSubject <- read.table("proj/UCI HAR Dataset/train/subject_train.txt",
col.names="Subject")
train$Subject <- trainSubject[,1]
trainActivity <- read.table("proj/UCI HAR Dataset/train/y_train.txt",
col.names="Activity")
train$Activity <- trainActivity[,1]
##for merged data set the source in case we need to identify
train$Source <- "train"
##Get test data
##Get data, the subject and then the activity foreach measurement
test <- read.table("proj/UCI HAR Dataset/test/x_test.txt",
col.names=dataLabels$featureLabel)
testSubject <- read.table("proj/UCI HAR Dataset/test/subject_test.txt",
col.names="Subject")
test$Subject <- testSubject[,1]
testActivity <- read.table("proj/UCI HAR Dataset/test/y_test.txt",
col.names="Activity")
test$Activity <- testActivity[,1]
test$Source <- "test"
##Now merge the test and train data together
combined <- rbind(train, test)
##get column indices for the tidy data set
##add Subject, Activity and Source as well
tidyCols <- c(grep("(mean|std)\\(\\)", dataLabels[,2]), 562, 563, 564)
##Now extra these columns only
tidyData <- combined[,tidyCols]
##Change activity to description from code
tidyData$Activity <- tolower(as.character(activityLabels[tidyData$Activity,2]))
##tidy column names
##remove repitition of Body
names(tidyData) <- gsub("BodyBody","Body",names(tidyData))
##Now get rid of .
names(tidyData) <- gsub("\\.","",names(tidyData))
##Going to user CamelCase
##using format (as regular expression) for all apart from Subject, Activity and Source
## (Time|FFT)(Body|Gravity)(Gyroscope|Accelerometer)(Jerk|Magnitude){0,2}(Mean|STD)(X|Y|Z){0,1}
names(tidyData) <- gsub("^t","Time",names(tidyData))
names(tidyData) <- gsub("^f","FFT",names(tidyData))
names(tidyData) <- gsub("Acc","Accelerometer",names(tidyData))
names(tidyData) <- gsub("Gyro","Gyroscope",names(tidyData))
names(tidyData) <- gsub("Mag","Magnitude",names(tidyData))
names(tidyData) <- gsub("std","STD",names(tidyData))
names(tidyData) <- gsub("mean","Mean",names(tidyData))
##Get 1 row per observation for an activity and subject
meltedtidyData2 <- melt(tidyData, id=c("Activity","Subject"), measure.vars=1:66)
tidyData2 <- ddply(meltedtidyData2,c("Activity","Subject"),summarise,mean=mean(value))
##Now write it out
write.table(tidyData2,"proj/tidyData2.txt",row.name=FALSE)
|
48f76202dff49f2ec41c222b2e133c8ad322857b
|
8474e5591c6e2564895bde0522424f7cb60c90d1
|
/man/tile_by_regions.Rd
|
93115965a39c935ccd6c8d2f1d8faaf2a5827a3f
|
[] |
no_license
|
ajpatel2007/methylSig
|
398504ffe01d51c806098ee9da2751e09d260f65
|
cb469678e2e4b5c3569d0927675d698dbe0f8f01
|
refs/heads/master
| 2022-04-14T04:20:20.587995
| 2020-03-25T18:38:33
| 2020-03-25T18:38:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 959
|
rd
|
tile_by_regions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tile_by_regions.R
\name{tile_by_regions}
\alias{tile_by_regions}
\title{Group cytosine / CpG level data into regions based on genomic regions}
\usage{
tile_by_regions(bs, gr)
}
\arguments{
\item{bs}{a \code{BSseq} object.}
\item{gr}{a \code{GRanges} object.}
}
\value{
A \code{BSseq} object with loci of regions matching \code{gr}. Coverage and methylation read count matrices are aggregated by the sums of the cytosines / CpGs in the regions per sample.
}
\description{
An optional function to aggregate cytosine / CpG level data into regions based on a \code{GRanges} set of genomic regions.
}
\examples{
data(bsseq_stranded, package = 'methylSig')
regions = GenomicRanges::GRanges(
seqnames = c('chr1','chr1','chr1'),
ranges = IRanges::IRanges(
start = c(5,35,75),
end = c(30,70,80)
)
)
tiled = tile_by_regions(bs = bsseq_stranded, gr = regions)
}
|
16d973c1127e5389b5915f54a0dfcc9794a75a9a
|
a5003e5db33ee4f306438b45e80808162169d8ad
|
/R/list_portals.R
|
6b2631037d42e8b2bc8788918c3304bf40e8a32f
|
[
"MIT"
] |
permissive
|
Tutuchan/fodr
|
0efb7f0b97a56278ea3e6bc6039a2c9bc12b9a56
|
5810a74aaa0606b166dc66a4444a2fbd6fc4c77f
|
refs/heads/master
| 2021-01-22T13:41:42.546404
| 2019-01-03T12:03:02
| 2019-01-03T12:03:02
| 59,779,584
| 23
| 6
|
MIT
| 2019-06-27T07:29:57
| 2016-05-26T20:00:39
|
R
|
UTF-8
|
R
| false
| false
| 156
|
r
|
list_portals.R
|
#' list the available portals
#'
#' This function displays a data.frame of the available portals.
#'
#' @export
list_portals <- function(){
portals()
}
|
e7b4c12f6fee786a11ae618d9040f0c7337d9d74
|
3c634d9af741cf8862aa5c560a55c2f984dadfec
|
/man/demographicModeling.Rd
|
56b8205306bad95016e4c44c28dec36644bbfc0d
|
[] |
no_license
|
dlandy/demographicModeling
|
9943e1e590940b9e2f7db7cf5a0ea0b7420d79d4
|
87ddd5d78e2c1031accda8792805a04162fbaed8
|
refs/heads/master
| 2021-09-14T03:26:33.925414
| 2018-05-07T16:35:03
| 2018-05-07T16:35:03
| 117,877,907
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,164
|
rd
|
demographicModeling.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/demographicModeling-package.r
\docType{package}
\name{demographicModeling}
\alias{demographicModeling}
\alias{demographicModeling-package}
\title{demographicModeling.}
\description{
This package provides a number of tools we have found useful in modeling demographic estimations, and other kinds of estimations.
This includes one set of functions to calculate, given tibble column names, Della-Carpini & Keeter's political knowledge
assessment, the BNT (coming soon), to recode on political ideology (coming soon), and to calculate and recode
other standard measures.
}
\details{
It includes a set of functions to build and regularize graphs, and third set of functions to simplify running JAGS models
over the kinds of data we typically collect from qualtrics, ipsosMORI, and GSS.
It includes a few other convenience functions (such as gammaFromModeSD) that probably belong elsewhere.
To apply the psychophysical functions we typically apply to these kinds of situations, install dlandy/psychophysicalModels
To see a full list of functions, do ls("package:demographicModeling")
}
|
457e446a21f364bef20efd82a2dc4502db63b229
|
dc79a2186fcacfeb2aef0c1f096da8a835bd5a69
|
/worddatabse.R
|
0f955acf1ba9b86f4f474e9f98b2ee6e02d24fea
|
[] |
no_license
|
gaka444/BIG5
|
56b676e1fdb5c627d234f59650b53337402eb122
|
23b1d6acaa0ccf973f41a8c4a64dc49dc4ab8866
|
refs/heads/master
| 2021-01-25T00:03:55.971622
| 2018-04-11T17:50:49
| 2018-04-11T17:50:49
| 123,288,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 631
|
r
|
worddatabse.R
|
#After downloading the hui.lui.pos and hui.lui.neg, mention below the location of the file
extra.words = scan('C:/Users/acerpc/Documents/Big5/extraversion.txt', what='character', comment.char=';')
agree.words = scan('C:/Users/acerpc/Documents/Big5/agreeableness.txt', what='character', comment.char=';')
open.words = scan('C:/Users/acerpc/Documents/Big5/openness.txt', what='character', comment.char=';')
cons.words = scan('C:/Users/acerpc/Documents/Big5/conscientious.txt', what='character', comment.char=';')
neuro.words = scan('C:/Users/acerpc/Documents/Big5/neuroticism.txt', what='character', comment.char=';')
??
|
67349fe8ed9db1568e321eb811fa7f5c1feb32de
|
db10b0336f082c09393a39ee5de99e7aaa05014d
|
/man/dbR6_send_transaction.Rd
|
4c7a4913994c38b8e038281c39619b60db5d8d61
|
[] |
no_license
|
leandroroser/dbR6
|
8709c4977f59ed8c22bbcb63e78db1119c7d19f1
|
f722a08ab2930bd4be0bbd50cf5a08b117980fe1
|
refs/heads/master
| 2021-09-14T21:46:30.801292
| 2018-05-20T08:51:06
| 2018-05-20T08:51:06
| 112,393,261
| 1
| 0
| null | 2018-01-18T15:58:47
| 2017-11-28T21:50:04
|
R
|
UTF-8
|
R
| false
| true
| 285
|
rd
|
dbR6_send_transaction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbR6_send_transaction.R
\name{dbR6_send_transaction}
\alias{dbR6_send_transaction}
\title{dbR6_send_transaction}
\usage{
dbR6_send_transaction(...)
}
\description{
dbR6_send_transaction
}
\keyword{internal}
|
2e926a3b6d566a6e8e06caa371179cf75ca19ae3
|
3a904aa24a78bcc7fcfb7c7853ffb7a7785c8d82
|
/man/CPRD/ARCHIVE/TEST_CASES/codeToRun.R
|
e44a750db068e43b1e19fd0c38e4053b1ca64aba
|
[
"Apache-2.0"
] |
permissive
|
OHDSI/ETL-CDMBuilder
|
18226c5e1ee3ef6833967b94600da2d2b48c6371
|
a32893602210c4b5e08bdbff9a4762218907d342
|
refs/heads/master
| 2023-04-27T22:48:10.942050
| 2022-04-26T18:50:50
| 2022-04-26T18:50:50
| 27,511,076
| 47
| 43
|
Apache-2.0
| 2023-04-16T00:30:08
| 2014-12-03T22:33:40
|
C#
|
UTF-8
|
R
| false
| false
| 4,156
|
r
|
codeToRun.R
|
# Establish Extended Type and Connection strings
#=============================
detach("package:CPRDTesting", unload=TRUE)
library(CPRDTesting)
source_schema <- "CDM_CPRD_TESTING_RAW.dbo"
cdm_schema <- "CDM_CPRD_TESTING_CDM.dbo"
cdmDatabaseSchema = strsplit(source_schema, '[.]')[[1]][1]
pw <- NULL
dbms <- "sql server"
user <- NULL
server <- "SERVER_NAME"
port <- NULL
connectionDetails <- DatabaseConnector::createConnectionDetails(dbms = dbms,
server = server,
user = user,
password = pw,
port = port)
#BUILD RAW DATA
#=============================
connection <- DatabaseConnector::connect(connectionDetails)
insertSql <- SqlRender::translateSql(SqlRender::renderSql(paste(getInsertSql(connectionDetails), sep = "", collapse = "\n"),
source_schema = source_schema)$sql,
targetDialect = connectionDetails$dbms)$sql
SqlRender::writeSql(insertSql, 'insertSql.sql')
DatabaseConnector::executeSql(connection, insertSql)
#insertSqls <- getInsertSql(connectionDetails)
#for (i in 1:905){#length(insertSqls)){ # 901 issue 857 ok
#for (i in 907:length(insertSqls)){ # 901 issue 857 ok 880
# insertSql <- SqlRender::translateSql(SqlRender::renderSql(insertSqls[i],
# source_schema = source_schema)$sql,
# targetDialect = connectionDetails$dbms)$sql
# if(insertSql!='' & substring(insertSql,1,2)!='--')
# DatabaseConnector::executeSql(connection, insertSql)
#} # 900 was an issue
# add the extra tables:
sql <- SqlRender::loadRenderTranslateSql('HES_FieldDefinitions_inserts2.sql', packageName = 'CPRDtesting', dbms = connectionDetails$dbms,
cdm_database_schema= cdmDatabaseSchema)
DatabaseConnector::executeSql(connection, sql)
sql <- SqlRender::loadRenderTranslateSql('HES_FieldMappings_inserts2.sql', packageName = 'CPRDtesting', dbms = connectionDetails$dbms,
cdm_database_schema= cdmDatabaseSchema)
DatabaseConnector::executeSql(connection, sql)
sql <- SqlRender::loadRenderTranslateSql('lookup_insert.sql', packageName = 'CPRDtesting', dbms = connectionDetails$dbms,
cdm_database_schema= cdmDatabaseSchema)
DatabaseConnector::executeSql(connection, sql)
sql <- SqlRender::loadRenderTranslateSql('lookuptype_insert.sql', packageName = 'CPRDtesting', dbms = connectionDetails$dbms,
cdm_database_schema= cdmDatabaseSchema)
DatabaseConnector::executeSql(connection, sql)
sql <- SqlRender::loadRenderTranslateSql('entity_insert.sql', packageName = 'CPRDtesting', dbms = connectionDetails$dbms,
cdm_database_schema= cdmDatabaseSchema)
DatabaseConnector::executeSql(connection, sql)
#sql <- SqlRender::loadRenderTranslateSql('hes_linkage_coverage_insert.sql', packageName = 'CPRDtesting', dbms = connectionDetails$dbms,
# cdm_database_schema= cdmDatabaseSchema)
#DatabaseConnector::executeSql(connection, sql)
#RUN BUILDER
#=============================
#TEST CDM
#=============================
testSql <- SqlRender::translateSql(SqlRender::renderSql(paste(gsub('; ','', getTestSql(connectionDetails)), sep = "", collapse = "\n"),
cdm_schema = cdm_schema)$sql,
targetDialect = connectionDetails$dbms)$sql
SqlRender::writeSql(testSql, 'testSql.sql')
DatabaseConnector::executeSql(connection, testSql)
DatabaseConnector::querySql(connection, SqlRender::renderSql("SELECT status, count(*) FROM @cdm_schema.test_results group by status", cdm_schema = cdm_schema)$sql)
DatabaseConnector::querySql(connection, SqlRender::renderSql("SELECT * FROM @cdm_schema.test_results where status = 'FAIL'", cdm_schema = cdm_schema)$sql)
|
6cb9d6f51887e3dc66e12f89d179ca8de508b06f
|
dfcee24a721f03c8b87401942704185ebf327e5a
|
/man/predicts.Rd
|
c649a967d88fd049a09e26d78afd441131fe9848
|
[] |
no_license
|
benjaminschlegel/glm.predict
|
104967d6705e262a40638f38fc1357d52f7d6fbe
|
026526eb3b43001f05566a1ff3bed09bfe548c5f
|
refs/heads/master
| 2023-02-26T16:16:58.860243
| 2023-02-09T07:26:10
| 2023-02-09T07:26:10
| 42,925,027
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,153
|
rd
|
predicts.Rd
|
\name{predicts}
\alias{predicts}
\title{
predicted values and discrete change
}
\description{
The function calculates the predicted values and the difference of
a range of cases with the confidence interval. It can be used for any
glm, polr or multinom model.
}
\usage{
predicts(model, values, position = NULL, sim.count = 1000, conf.int = 0.95,
sigma = NULL, set.seed = NULL, doPar = FALSE,
type = c("any", "simulation", "bootstrap"))
}
\arguments{
\item{model}{
the model-Object generated with glm(), glm.nb(), polr(), multinom(), mlogit() or tobit()
}
\item{values}{
The values of cases as character in the order how they appear in the summary(model) Estimate.
The values must be in the following way: \emph{"value1;value2;value3;..."}. Each one of the values can be one of the following:
\itemize{
\item \strong{"all"}: takes all unique values of that variable
\item \strong{"mean"}: takes the mean of that variable (can only be used when the variable is numeric)
\item \strong{"median"}: takes the median of that variable (assumes for factors that they are correctly ordered)
\item \strong{"mode"}: takes the mode of that variable
\item \strong{"Q4"}: takes the quartiles (0,0.25,0.5,0.75,1) of that variable (other number for other quantiles)
\item \strong{"min"}: takes the minimum of that variable
\item \strong{"max"}: takes the maximum of that variable
\item \strong{from-to,by}: takes all values from "from" to "to" with the distance "by" (for example: "160-180,5" --> 160,165,170,175,180)
\item \strong{from-to}: same as from-to,by with by=1 (for example: "2-8" --> 2,3,4,5,6,7,8); also works for factors and takes the given levels form their position
\item \strong{value1,value2,value3,...}: takes the given values (for example: "160,180" --> 160,180); also works for factors and takes the given levels form their position
\item \strong{value1}: takes the given value (for example: "5.34" --> 5.34); also works for factors and takes the given level form its position
\item \strong{log(from-to,by)}: takes the log of all values from "from" to "to" with the distance "by" (for example: "160-180,5" --> 160,165,170,175,180)
\item \strong{log(from-to)}: same as log(from-to,by) with by=1 (for example: "2-8" --> 2,3,4,5,6,7,8)
\item \strong{log(value1,value2,value3,...)}: takes the log of the given values (for example: "160,180" --> 160,180)
\item \strong{log(value1)}: takes the log of the given value (for example: "5.34" --> 5.34)
\item \strong{"F"}: takes all values of a factor/character
\item \strong{"F(1,4,7)"}: takes the first, fourth and seventh level of a factor/character
\item \strong{"F(2)"}: takes the second level of a factor/character
}
}
\item{position}{
OPTIONAL which variable should be taken for the discrete change, the variable must have at least two values. default: only predicted probabilities
}
\item{sim.count}{
OPTIONAL numbers of simulations to be done by the function. default: 1000
}
\item{conf.int}{
OPTIONAL the confidence interval used by the function. default: 0.95
}
\item{sigma}{
OPTIONAL the variance-covariance matrix, can be changed when having for example robust or clustered vcov. default: vcov(model)
}
\item{set.seed}{
OPTIONAL set a seed for the random number generator
}
\item{doPar}{
OPTIONAL if the code should run parallel if more than 2 cores are detected
}
\item{type}{
OPTIONAL choose between simulation and bootstrap, "any" chooses between those two
according to the number of cases (bootstrap if n < 500)
}
}
\details{
The function makes a simulation for the all combination of cases and compares them to each other.
}
\value{
The output is a data.frame with the predicted values and discrete changes.
}
\author{
Benjamin Schlegel, \email{kontakt@benjaminschlegel.ch}
}
\examples{
\dontrun{
model1 = glm(Sex ~ Height + Smoke + Pulse, data=MASS::survey, family=binomial(link=logit))
summary(model1)
# comparing person with hight 150 to 160, 160 to 170, 170 to 180, 180 to 190
# with all combination of(non-)smokers and a median of pulse
predicts(model1, "150-190,10;F;median", position = 1, doPar = FALSE)
}
}
\keyword{ models }
|
35da3e6ed96fa7d873372ddbffb88697acf41b75
|
00b4b71076641c11a74848e2f3a27c6c824044bd
|
/cachematrix.R
|
6e82fca5c0cd35aa4bd6c118968ece5012b343a7
|
[] |
no_license
|
larspijnappel/ProgrammingAssignment2
|
9599379f60222ac1ab4a72ea7ab5595b662fe747
|
f810afecd6dd10a17b7275238a288c4ae0302962
|
refs/heads/master
| 2020-12-25T00:19:35.848733
| 2015-08-19T22:52:56
| 2015-08-19T22:52:56
| 40,563,313
| 0
| 0
| null | 2015-08-11T20:21:22
| 2015-08-11T20:21:22
| null |
UTF-8
|
R
| false
| false
| 4,832
|
r
|
cachematrix.R
|
## ===================================================================================
## Calling the makeCacheMatrix function results in an object in the calling environment, which provides the
## following 'object-oriented style' functionality:
## - perform a solve() function on a new matrix once
## - store the outcome (i.e. the inversed matrix) in the makeCacheMatrix object 'm'.
## - retrieve multiple times the outcome of this calculation by using cached data.
##
## When a different matrix is passed on, the cached data will be overwritten
## with the outcome of the solve() calculation.
## ===================================================================================
## makeCacheMatrix creates an 'object-oriented style' object in the calling environment, which will contain
## four 'methods' available in the calling environment:
## 1. set()
## 2. get()
## 3. setsolve()
## 4. getsolve()
## The variable 'x' is a matrix object passed on from the calling environment.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL # makeCacheMatrix object 'm' will eventually set by 'method' setsolve()
set <- function(y) { # method to set the new matrix object 'x' with the calling environment variable 'y'
x <<- y # set the makeCacheMatrix object 'm' with value 'y' (coming from the calling env.)
m <<- NULL # makeCacheMatrix object 'm' will eventually set by 'method' setsolve()
}
get <- function() x # method to get the makeCacheMatrix object 'x' (used by cacheSolve)
setsolve <- function(solve) m <<- solve # method to set the makeCacheMatrix object 'm' (used by cacheSolve)
getsolve <- function() m # method to get the makeCacheMatrix object 'm' (used by cacheSolve)
## return the 4 'methods' as a list to the calling environment.
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## cacheSolve checks if for the passed on variable 'x' the solve() calculation has already been done.
## If so, then the cached outcome will be returned.
## Otherwise a new calculation is done and the results are 'cached'.
## The variable 'x' is a makeCacheMatrix object (with its methods).
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve() # cacheSolve-object 'm' is set with makeCacheMatrix-object 'm'
## IF cacheSolve-object 'm' exists THEN
## quit this function, stating that cached data has been returned is NOT performed.)
if(!is.null(m)) {
message("getting cached data...")
return(m) # cacheSolve-object 'm' is returned to the calling environment
}
## ELSE following steps are performed:
## 1. get the matrix-data from the makeCachMatrix environment
## 2. perform the solve() function
## 3. pass the outcome back to the makeCacheMatrix-environment
## 4. return the function with the value of the cacheSolve-object 'm'
data <- x$get() # cacheSolve-object 'data' is set with makeCacheMatrix-object 'x'
m <- solve(data, ...) # cacheSolve-object 'm' is set with outcome solve-function (performed on 'data' object)
x$setsolve(m) # updated cacheSolve-object 'm' is passed back to the makeCacheMatrix environment
m # cacheSolve-object 'm' is returned to the calling environment
}
## ===================================================================================
### TEST CASES ###
## Originally provided by Thiago Kurovski
## https://class.coursera.org/rprog-031/forum/thread?thread_id=112#post-468
## ===================================================================================
## Check if creating a cacheMatrix and getting its content works fine
x1 <- matrix(rnorm(1000000), nrow = 1000) # matrix 1
x2 <- matrix(rnorm(1000000), nrow = 1000) # matrix 2
cm <- makeCacheMatrix( x1 )
identical( cm$get(), x1 ) ## TRUE
identical( cm$get(), x2 ) ## FALSE
## Check if cacheSolve gives the same result for the same matrix and if it truly uses caching
print(system.time( y1 <- cacheSolve(cm) )) # 1st time calling cacheSolve: requires some calculation time
print(system.time( y2 <- cacheSolve(cm) )) # 2nd time: same output but stating that cached data is used
identical( y1, y2 ) # TRUE
identical( y1, x2 ) # FALSE
## Check if cacheSolve gives the same result as solve()
z <- solve( x1 )
identical( y1, z ) # TRUE
## Check if updating the matrix with set works correctly
x3 <- matrix(rnorm(100), nrow = 10)
cm$set( x3 )
identical( cm$get(), x3 ) # TRUE
## Check if the cache is unvalidated after a set()
y3 <- cacheSolve( cm )
z <- solve( x3 )
identical( y3 , z ) # TRUE
|
fd6d3380034af57b9b48598fad71730c0d052613
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.compute/man/ec2_delete_transit_gateway_connect_peer.Rd
|
e058d80b2edbe513a64d8ab30f9d1f77961cd10b
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 882
|
rd
|
ec2_delete_transit_gateway_connect_peer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_delete_transit_gateway_connect_peer}
\alias{ec2_delete_transit_gateway_connect_peer}
\title{Deletes the specified Connect peer}
\usage{
ec2_delete_transit_gateway_connect_peer(
TransitGatewayConnectPeerId,
DryRun = NULL
)
}
\arguments{
\item{TransitGatewayConnectPeerId}{[required] The ID of the Connect peer.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Deletes the specified Connect peer.
See \url{https://www.paws-r-sdk.com/docs/ec2_delete_transit_gateway_connect_peer/} for full documentation.
}
\keyword{internal}
|
a908366ab02c153d54aca56d66bd3d849b95e054
|
b33735d157848984bc57288b41ce2bded79e2710
|
/R/TimeDiscretization.R
|
ebce6ee34b190f4805e6ad979dad1ce9be4af267
|
[] |
no_license
|
bachirtadde/CInLPN
|
9a00a0cabf5af34ba8403b17d81763db6067b990
|
bb51492aa116635c4bf70afd29de7bdd58de15a5
|
refs/heads/master
| 2023-06-30T15:21:04.684650
| 2023-06-20T14:31:49
| 2023-06-20T14:31:49
| 163,929,733
| 2
| 1
| null | 2021-03-28T14:58:25
| 2019-01-03T06:00:38
|
R
|
UTF-8
|
R
| false
| false
| 5,868
|
r
|
TimeDiscretization.R
|
#' function that return 1 for a vector without observation after/during discretization
#'
#' @param x a vector
#'
#' @return a binary
AnyObsByLine <- function(x){
d<-0
len <-length(na.omit(x))
if(len == 0){
d <- 1
}
return(d)
}
#=====================================================================================
#' function that filters initial data to return only individuals having at least one observation for outcomes onf interest.
#' Meaning that the returned data contained individuals with at least one measurement at the considered visit time.
#'
#' @param data input data
#' @param subject subject identifiant
#' @param outcomes a vector of outcomes of interest: markers
#'
#' @return a dataframe
#'
OneOutcomeAtLeast <- function(data, subject, outcomes){
cl <- match.call()
colnames <- colnames(data)
data2 <- data[, c(subject, outcomes)]
p <- length(outcomes)
if( p < 2){
data2$x_NA <- is.na(data2[,c(outcomes)])
data2$x_NA <- as.numeric(data2$x_NA)
}
if(p>=2){
data2$x_NA <- apply(data2[,c(-which(colnames %in% subject))],MARGIN = 1, FUN = AnyObsByLine)
}
data <- data[which(data2$x_NA ==0),]
return(data)
}
#=====================================================================================
#' function that discretized a vector of time with a given delta.
#'
#' @param Time input continuous time vector
#' @param Delta discretized step
#'
#' @return a discretized time vector
#'
f_TempsDiscr <- function(Time, Delta){
AxeT <-seq(from =min(Time), to = max(Time), by = Delta)
Time_d <- rep(0,length(Time))
for(j in 1:length(Time)){
i <-0
Time_d[j] <- NA
if(!is.na(Time[j])){
Time_d[j] <- 0
while(Time[j] > Delta*i){
i <- i+1
}
Time_d[j] <- i*Delta
}
}
return(Time_d)
}
#=====================================================================================
#' function that discretized a vector of time with a given delta.
#'
#' @param rdata input data which time point has to be discretized
#' @param outcomes a vector of outcomes names
#' @param predictors independent variables to be be included in the modeling
#' @param subject subject identifiant
#' @param Time colname indicating the time
#' @param Delta discretized time step
#'
#' @return a discretized time vector
#'
TimeDiscretization <-function(rdata, subject, outcomes, predictors = NULL, Time, Delta){
cl <- match.call()
colnames<-colnames(rdata)
# Is subject colname available in the data
if(!(subject %in% colnames))stop("Data discretisation failed: Subject colname should be in the data \n")
# Is predictors colnames available in the data
if(!(all(predictors %in% colnames)))stop("Data discretisation failed: All predictors colnames should be in the data \n")
# Is Time colname available in the data
if(!(unique(Time) %in% colnames))stop("Data discretisation failed: Time colname should be in the data \n")
# # check if Delta is not greater than min(Time_(j+1)-Time(j))
# Subjects = unique(rdata[,subject])
# min_Time_diff = 1e23
# for(n in 1:length(Subjects)){
# for(k in 1:length(outcomes)){
# Time_df = sort(rdata[which(rdata[,subject]==Subjects[n] & !is.na(rdata[,outcomes[k]])),Time])
# #
# for(i in 2:length(Time_df)){
# min_Time_diff <- min(min_Time_diff, Time_df[i]-Time_df[(i-1)], na.rm=TRUE)
# }
# }
# }
# if(min_Time_diff > Delta) stop("Discretization failed: Discretization value could not be greater than the delay between two visit time")
#
Time = rep(unique(Time),length(outcomes))## replicate Time
## pre-processing of data: retaining lines with at least one observed outcome value
data <- OneOutcomeAtLeast(rdata, subject= subject, outcomes = outcomes)
cat(paste("Number of rows of the initial data is:", dim(rdata)[1]),"\n", paste("After removing lines without any observation of the outcomes of interest, the number of rows is:", dim(data)[1]),"\n")
K <- length(outcomes)
T_k <-NULL
nameTks <- NULL
for(k in 1:K) {
nameTk <- paste(Time[k], k, sep = "_")
nameTks <- c(nameTks,nameTk)
#discretization of the time by subject: on each subject specific dataset
T_k <- cbind(T_k,assign(nameTk, f_TempsDiscr(data[,Time[k]], Delta)))
}
T_k <- data.frame(T_k)
colnames(T_k) <-nameTks
data2 <-data.frame(cbind(data[,c(subject,outcomes, predictors)],T_k))
# merge
data3 <- na.omit(data2[,c(subject, outcomes[1], predictors, nameTks[1])])
colname <- colnames(data3)
# colname[which(colname==nameTks[1])] <- paste(Time[1],"d", sep = "_")
colname[which(colname==nameTks[1])] <- Time[1]
colnames(data3) <- colname
if(K>1){
for(k in 2:K){
data_k <- na.omit(data2[,c(subject, outcomes[k], predictors, nameTks[k])])
# changing the name of T_i colname into T for the merging
colname <- colnames(data_k)
# colname[which(colname==nameTks[k])] <- paste(Time[1],"d", sep = "_")
colname[which(colname==nameTks[k])] <- Time[1]
colnames(data_k) <- colname
data3 <- merge(data3,data_k, by=c(subject,predictors,Time[1]), all.x = TRUE, all.y = TRUE)
data3 <- data3[order(data3[,subject], data3[,Time[1]]), ]
}
}
# At a visit, an outcome could be missing, but not all outcomes.
# That is, for each row must have at least non missing observation.
# So we need to check this after the discretization process.
data4 <- OneOutcomeAtLeast(data=data3, subject=subject, outcomes = outcomes)
if(dim(data4)[1] < dim(data3)[1])stop("Discretization failed: After discretization, All marker are missing at some visits")
# check if Delta is not greater than min(Time_(j+1)-Time(j))
if(dim(unique(data4))[1] != dim(data4)[1]) stop("Discretization failed: Some rows are the same in the dataset, because of a too large discretisation argument")
return(data4)
}
|
4dda2746e4085a794e49f793c6f2c4297a953c84
|
39afffbc7573ae2483a759391bfa7bb4d2b14c76
|
/cachematrix.R
|
c5e29958972b67d677438c99274f9e9e7f2f0153
|
[] |
no_license
|
Valentino92/ProgrammingAssignment2
|
8b6d390fa4e85569d45d8a10c4ef71fc74fd0e47
|
d83a30dfedd0d23225b1fcf5abbf94add534caa6
|
refs/heads/master
| 2018-01-14T05:32:42.020676
| 2015-11-21T21:33:01
| 2015-11-21T21:33:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,161
|
r
|
cachematrix.R
|
## This function creates a CacheMatrix object that could calculate the inverse
## of the matrix passed by parameter x
# create the object and its function (setter and getter)
makeCacheMatrix <- function(x = matrix()) {
inverseMatrix <- NULL
set <- function(y) {
x <<- y
inverseMatrix <<- NULL # if the matrix is modified, then the cache of the inverse is wrong so we delete it
}
get <- function() x
setinverse <- function(inverse) inverseMatrix <<- inverse
getinverse <- function() inverseMatrix
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## With this function, we can calculate the inverse of the matrix x, passing
## as parameter a CacheMatrix object created with makeCacheMatrix.
## If the inverse was already calculated, then it uses the old result and return the cache value.
## otherwise it calculates the inverse of x and return it.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
## Return a matrix that is the inverse of 'x'
}
|
d0b6997b9215aee9a7d342e63b2c0399717f8884
|
0078f47d0ecdebed9aff28834516402d0343fdec
|
/R-Codes/mkr_btw21karten.R
|
ad2c7c0bce425236b70cfa0e5186419e1b288108
|
[] |
no_license
|
StatistikVolker/BTW21
|
6f182894c0116c9a00400affe9fdf80325a6d393
|
7542f51161e8debb74928bd9f65e25b6ed476f50
|
refs/heads/main
| 2023-08-12T17:53:34.916655
| 2021-10-07T18:28:16
| 2021-10-07T18:28:16
| 412,388,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,448
|
r
|
mkr_btw21karten.R
|
# mkr_btw21karten.r
# ---------------------------------------------------------------------------
# Verzerrte Wahlkreiskarten:
# ---------------------------------------------------------------------------
# --------------------------------------------------------------------------------
mkr.btw21karten <- function(
maptype,# = "normalmap",
Gebiet,# = "Jena",
GebietWK,# = "Wahlbezirk",
Darstellung,# = "Parteihochburg",
vote,# = "Z",
party,# = "SPD", # = c("CDU","CSU") #als Vector if Union
partycol,# = "red",# ="yellow"
#vote= "Zweitstimme", # or "Erststimme" # first letter ist enough
cartoweight = "Anzahl",
itermax = 10, #Default 10
colvar = "Prozent",
legendlab = "Zweitstimmenergebnis [%]",
colgrps = c(0,5,10,15,20,25,30,35,40,45), # Grenzen der Farbabstufungen, start with 0
collabs = c("bis 5%","5-10%","10-15%","15-20%","20-25%","25-30%","30-35%","35-40%","40-45%","über 45%"),
alphastart = 0.25,
alphaend = 1,
Shapedf = BTW21wk_shapes, # (bzw. BTW21Jwk_shapesfile, BTW21Lwk_shapesfile - J,L Jena, Leipzig)
Rangdf = BTW21Rang,
Parteidf = BTW21Parteien# (bzw. BTW21JRang,BTW21LRang - J,L Jena, Leipzig)
)
{
# Für Cartogram Dateiname - Vermeidung von Umlauten
party4 <- party
if (party == "GR\u00DCNE") party4 <- "B90G"
# print(party)
# print(party4)
Gebiet2 <- Gebiet
if (Gebiet == "Neue L\u00E4nder (+Berlin)") { Gebiet2 <- "NLmitB"}
# print(Gebiet)
# print(Gebiet2)
# Some checks at the beginning
# Correst Vote
if (vote == "Erststimme" | vote == "E") {
vote <- 1
} else {
if (vote == "Zweitstimme" | vote == "Z") {
vote <- 2
} else {
stop("'vote' has to be 'Erststimme' or 'Zweitstimme' only (first letter is enogh.) Please check.")
}
}
if (vote == 1) {
legendlab = "Erststimmenergebnis [%]"
}
# correst collabs resp. colgrps
lcollabs <- length(collabs)
lcolgrps <- length(colgrps)
if (lcollabs != lcolgrps) {
stop("Number of 'colgrps' and 'collabs' do not match! Please ceck.")
}
# print(party)
# print(Gebiet)
if (party == "CDU/CSU") party <- "CDU"
if (party == "CDU" & Gebiet == "Deutschland") party <- c("CDU","CSU")
# print(party)
if (!(party %in% c("AfD","CDU","CSU","FDP","GR\u00DCNE","DIE LINKE","SPD"))) {
stop("'party' should be one of 'AfD,CDU,CSU,FDP,GR\u00DCNE,DIE LINKE,SPD'")
}
party2 <- party
if (party == "GRÜNE") {
party2 <- "BÜNDNIS 90/ DIE GRÜNEN"
}
if (party[1] %in% c("CDU","CSU)") & Gebiet == "Deutschland") {
party2 <- "CDU (in Bayern: CSU)"
}
party3 <- party
if (party == "DIE LINKE") {
party3 <- "LINKE"
}
if (party[1] %in% c("CDU","CSU)")) {
party3 <- "UNION"
}
unionlab = "CDU"
if (Gebiet == "Deutschland") unionlab = "CDU/CSU"
##mit FDP
#partycols <- c("red","black","forestgreen","yellow","darkblue","purple")
#partylabs <- c( "SPD", unionlab,"B\u00DCNDNIS 90/ DIE GR\u00DCNEN","FDP","AfD","DIE LINKE")
#ohne FDP
partycols <- c("red","black","forestgreen","darkblue","purple")
partylabs <- c( "SPD", unionlab,"B\u00DCNDNIS 90/ DIE GR\u00DCNEN","AfD","DIE LINKE")
# join election data with shape data and filter by party
#btw21 <- merge(wk_spdf, BTW21Parteien %>% filter(Stimme == vote & Gruppenname %in% c(party)), by.x="WKR_NR", by.y="Gebietsnummer")
if (Darstellung == "Parteihochburg") {
# print(party)
btw21map <- merge(Shapedf, Parteidf %>% filter(Stimme == vote & Gruppenname %in% c(party) ), by.x="WKR_NR", by.y="Gebietsnummer" )
cartodata <- paste0("DataCartogram/",Gebiet2,"_",Darstellung,"_",party4,"_",vote,"_","by",cartoweight,"-",itermax,".Rdata")
} else {
btw21map <- merge(Shapedf, Rangdf %>% filter(Stimme == vote), by.x="WKR_NR", by.y="Gebietsnummer")
cartodata <- paste0("DataCartogram/",Gebiet2,"_",Darstellung,"_",vote,"_","by",cartoweight,"-",itermax,".Rdata")
pal <- colorFactor(
palette = c('darkblue', 'black', 'purple',"yellow", 'forestgreen',"red"),
domain = btw21map$Gruppenname,
alpha = TRUE
)
}
#names(btw21map)
# Transformiere Koordinaten in WGS84
btw21map <- btw21map %>%
st_as_sf() %>%
st_transform(4326) %>%
as_Spatial()
# als simple feature
btw21map <-st_as_sf(btw21map) #as simple feature
# new Coordinates
btw21map <- st_transform(btw21map, coords = c("lon", "lat"),
crs = "+init=epsg:3395",
agr = "constant") %>%
filter(!is.na(Anzahl)) # Filter out WKs with no votes for Party
if (maptype == "cartogram") {
# calculate cartogram - only if not allready done
# print(cartodata)
if (file.exists(cartodata) == FALSE) {
btw21_conto <- cartogram_cont(btw21map, cartoweight,itermax = itermax)
save(btw21_conto,file=cartodata)
} else {
load(file=cartodata)
}
# back to WGS84 coordinates
btw21_cont<- st_transform(btw21_conto, 4326) %>% # ("+proj=longlat +datum=WGS84")
mutate(alpha = cut(as.numeric(Prozent),
c(colgrps,Inf),
collabs,
right=FALSE)
)
# print(names(btw21_cont))
# extract borders of German Länder out of cartogram
if (Gebiet == "Jena") {
btw21_contuegebiet <- btw21_cont %>%
group_by(plraum_nr) %>%
summarise(Anzahl = sum(Anzahl), do_union = TRUE) %>%
ungroup() %>%
as_Spatial()
# print("Gebiet Jena")
}
if (Gebiet == "Leipzig") {
btw21_contuegebiet <- btw21_cont %>%
group_by(stadtbezirknr) %>%
summarise(Anzahl = sum(Anzahl), do_union = TRUE) %>%
ungroup() %>%
as_Spatial()
# print("Leipzig stadtbezirknr")
}
if (Gebiet %in% c("Deutschland","Neue L\u00E4nder (+Berlin)")) {
btw21_contuegebiet <- btw21_cont %>%
group_by(LAND_NR) %>%
summarise(Anzahl = sum(Anzahl), do_union = TRUE) %>%
ungroup() %>%
as_Spatial()
# print("Gebiet D, NL")
}
#print("Schritt 1")
if (Darstellung == "Parteihochburg") {
# Cols in SF-data.frame
pal <- colorFactor(
palette = c('grey25',alpha(partycol, seq(alphastart, alphaend, length.out = lcollabs))),
domain = btw21_cont$alpha,
alpha = TRUE
)
btw21_cont<- btw21_cont %>% mutate(palcol = pal(alpha))
btw21_cont<- btw21_cont %>% as_Spatial() ## wieder SpatialPolygonsdataframe
popuptxt <- paste0(GebietWK, btw21_cont$WKR_NR, ": ", btw21_cont$Gebietsname, "<br>",
"Stimmen (%):", btw21_cont$Gruppenname, ": ", btw21_cont$Anzahl," (", mkr.formatwithdigits(btw21_cont$Prozent,1), "%)")
m<-leaflet(btw21_cont) %>%
addTiles(attribution = 'Volker Holzendorf, Stadt Jena, FD Finanzen, Team Controlling und Statiistik',
urlTemplate = "") %>% #no normal map because of Scartogram
addPolygons(#data=btw21_cont,
fillColor = ~palcol,
weight=1,
fillOpacity = 1,
#Define PopUp Text
popup = popuptxt
) %>%
addPolylines(data = btw21_contuegebiet,weight = 2,color = "black") %>% # plot border of German Lander
# Legend
addLegend("topright",pal = pal,
values = ~alpha,
title = paste0("#BTW21: ",party2,"<br>",legendlab),
opacity = 1)
#m
# print("Cartogramm_Karte erstellt")
} else {
btw21_cont<- btw21_cont %>%
mutate(alpha =ifelse((Prozent_Diff/max(Prozent_Diff)+0.25)<1, Prozent_Diff/max(Prozent_Diff)+0.25,1),
palcol = pal(Gruppenname)) %>%
as_Spatial() ## wieder SpatialPolygonsdataframe
popuptxt <- paste0(GebietWK, btw21_cont$WKR_NR, ": ", btw21_cont$Gebietsname, "<br>",
"Erster:", btw21_cont$Gruppenname, " ", btw21_cont$Anzahl," (", mkr.formatwithdigits(btw21_cont$Prozent,1), "%)", "<br>",
"Zweiter:", btw21_cont$Gruppenname_2, " ", btw21_cont$Anzahl_2," (", mkr.formatwithdigits(btw21_cont$Prozent_2,1), "%)")
m <-leaflet() %>% #addTiles() %>%
addTiles(attribution = 'Volker Holzendorf, Stadt Jena, FD Finanzen, Team Controlling und Statiistik',
urlTemplate = "") %>% #no normal map because of Scartogram
addPolygons(data=btw21_cont,
fillColor = ~palcol,
weight=2,
fillOpacity = ~alpha,
popup = popuptxt) %>%
addPolylines(data = btw21_contuegebiet,weight = 2,color = "black") %>% # plot border of German Lander
# Legend
addLegend("topright",#pal = pal,
colors = partycols,
labels = partylabs,
#values = ~Gruppenname,
title = paste0("#BTW21: Wahlkreisgewinner","<br>",
legendlab,"<br>",
"Farbs\u00E4ttigung gibt Abstand","<br>",
"zum Zweitplatzierten an. Je heller,","<br>",
"desto geringer der Abstand zwischen","<br>",
"Erst- und Zweitplatzierten."),
opacity = 0.7)
}
# mDraw Map
}
if (maptype == "normalmap") {
if (Darstellung == "Parteihochburg") {
# Cols in SF-data.frame
pal <- colorFactor(
palette = c('grey25',alpha(partycol, seq(alphastart, alphaend, length.out = lcollabs))),
domain = btw21map$alpha,
alpha = TRUE
)
}
# back to WGS84 coordinates
btw21map<- st_transform(btw21map, 4326) %>% # ("+proj=longlat +datum=WGS84")
mutate(alpha = cut(as.numeric(Prozent),
c(colgrps,Inf),
collabs,
right=FALSE),
palcol = pal(alpha)
)
# extract borders of German Länder out of cartogram
if (Gebiet == "Jena") {
btw21_uegebiet <- btw21map %>%
group_by(plraum_nr) %>%
summarise(Anzahl = sum(Anzahl), do_union = TRUE) %>%
ungroup() %>%
as_Spatial()
}
if (Gebiet == "Leipzig") {
btw21_uegebiet <- btw21map %>%
group_by(stadtbezirknr) %>%
summarise(Anzahl = sum(Anzahl), do_union = TRUE) %>%
ungroup() %>%
as_Spatial()
}
if (Gebiet %in% c("Deutschland","Neue L\u00E4nder (+Berlin)")) {
btw21_uegebiet <- btw21map %>%
group_by(LAND_NR) %>%
summarise(Anzahl = sum(Anzahl), do_union = TRUE) %>%
ungroup() %>%
as_Spatial()
}
btw21map <- as_Spatial(btw21map) ## wieder SpatialPolygonsdataframe
if (Darstellung == "Parteihochburg") {
popuptxt <- paste0(GebietWK, btw21map$WKR_NR, ": ", btw21map$Gebietsname, "<br>",
"Stimmen (%):", btw21map$Gruppenname, ": ", btw21map$Anzahl," (", mkr.formatwithdigits(btw21map$Prozent,1), "%)")
# Draw Map
m<-leaflet(btw21map) %>%
#addTiles() %>% no normal map because of Scartogram
addTiles(attribution = 'Volker Holzendorf, Stadt Jena, FD Finanzen, Team Controlling und Statiistik',
urlTemplate = "") %>% #no normal map because of Scartogram
addPolygons(#data=btw21_cont,
fillColor = ~palcol, weight=1,fillOpacity = 1,
#Define PopUp Text
popup = popuptxt) %>%
addPolylines(data = btw21_uegebiet,weight = 2,color = "black") %>%
# Legend
addLegend("topright",pal = pal,
values = ~alpha,
title = paste0("#BTW21: ",party2,"<br>",legendlab),
opacity = 1)
} else {
names(btw21map)
popuptxt <- paste0(GebietWK, btw21map$WKR_NR, ": ", btw21map$Gebietsname, "<br>",
"Erster:", btw21map$Gruppenname, " ", btw21map$Anzahl," (", mkr.formatwithdigits(btw21map$Prozent,1), "%)", "<br>",
"Zweiter:", btw21map$Gruppenname_2, " ", btw21map$Anzahl_2," (", mkr.formatwithdigits(btw21map$Prozent_2,1), "%)")
m <-leaflet() %>% #addTiles() %>%
addTiles(attribution = 'Volker Holzendorf, Stadt Jena, FD Finanzen, Team Controlling und Statiistik',
urlTemplate = "") %>% #no normal map because of Scartogram
addPolygons(data=btw21map,
fillColor = ~pal(Gruppenname),
weight=2,
fillOpacity = ~(Prozent_Diff/max(Prozent_Diff))+0.25,
popup = popuptxt) %>%
addPolylines(data = btw21_uegebiet,weight = 2,color = "black") %>%# plot border of German Lander
# Legend
addLegend("topright",#pal = pal,
colors = partycols,
labels = partylabs,
#values = ~Gruppenname,
title = paste0("#BTW21: Wahlkreisgewinner","<br>",
legendlab,"<br>",
"Farbs\u00E4ttigung gibt Abstand","<br>",
"zum Zweitplatzierten an. Je heller,","<br>",
"desto geringer der Abstand zwischen","<br>",
"Erst- und Zweitplatzierten."),
opacity = 0.7)
}
}
return(m)
}
|
7f51aeb3b7be20b17e70be835953021303ff62ba
|
f7471ac611cb87fba70fe065da96649f02ae040d
|
/evaluationScriptsPVModels/step3_extractPeaks_BICCN_mouse_NEMO_ArchRProject.r
|
45f58a2b3f7083b64a9d4131b6c304cc27eedec7
|
[] |
no_license
|
pfenninglab/TACIT
|
c90515f905f3e9cadcb8db9a6dc296bdd283abd9
|
4238c9304f9062496b1e54fcf16ef141de3fa809
|
refs/heads/main
| 2023-08-27T18:11:51.410002
| 2023-06-04T20:13:30
| 2023-06-04T20:13:30
| 411,869,107
| 8
| 1
| null | 2022-10-19T00:49:57
| 2021-09-30T00:22:35
|
Shell
|
UTF-8
|
R
| false
| false
| 949
|
r
|
step3_extractPeaks_BICCN_mouse_NEMO_ArchRProject.r
|
suppressMessages(library(ArchR))
options(stringsAsFactors = F)
##################################
### set Arrow File parameters ####
addArchRThreads(threads = 1)
addArchRGenome("mm10")
proj = loadArchRProject("ArchR_BICCN_mouse_NEMO_snATAC-Seq")
#########################
# extract IDR peak set
cellTypes = levels(factor(proj$cluster))
cellTypesCrossSpeciesNeuron = c("Chodl", "L23", "L5.IT", "L5.PT", "L6.CT", "L6.IT", "Lamp5", "NP", "Pv", "Sncg", "Sst", "Vip")
idrPeakFiles = file.path('ArchR_BICCN_mouse_NEMO_snATAC-Seq','PeakCalls',paste0(cellTypesCrossSpeciesNeuron,'-reproduciblePeaks.gr.rds'))
names(idrPeakFiles) = cellTypesCrossSpeciesNeuron
idrPeakList = GRangesList(lapply(idrPeakFiles, readRDS))
# write IDR peaks to bed file
for (name in names(idrPeakList)){
bedFileName = file.path(paste0('BICCN_mouse_NEMO_snATAC-Seq_',name,'_reproduciblePeaks.bed'))
rtracklayer::export(idrPeakList[[name]], bedFileName)
}
|
1b159e95dbe273b0cb3a3212f88ab84753880a74
|
75356478ad057032878ad6dfcd20958f183eecd6
|
/man/bas_plot.Rd
|
549ca8ab3b3ce2d68bcf4d7c9bdb7f4a2ab2e978
|
[
"MIT"
] |
permissive
|
DongshengBai/sciliantifig
|
d75ebe1ec6b3fdf532442c9569f8e7905083c645
|
bfadc5432433170f0e165c854dca6de75b643918
|
refs/heads/master
| 2022-03-12T17:44:27.544173
| 2019-09-20T01:50:10
| 2019-09-20T01:50:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 401
|
rd
|
bas_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{bas_plot}
\alias{bas_plot}
\title{Beautified version of summary plot for BAS}
\usage{
bas_plot(x, top.models = 20)
}
\arguments{
\item{x}{A BAS model object.}
\item{top.models}{The number of top models to include in the plot.}
}
\value{
A gtable.
}
\description{
Beautified version of summary plot for BAS
}
|
bd46238a8019ac3b5163c6032566d4212cb2cd59
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DiagrammeR/examples/delete_graph_actions.Rd.R
|
fb323ae0332bcfeecf7896b04f61f3d146710f7a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,279
|
r
|
delete_graph_actions.Rd.R
|
library(DiagrammeR)
### Name: delete_graph_actions
### Title: Delete one or more graph actions stored within a graph object
### Aliases: delete_graph_actions
### ** Examples
# Create a random graph using the
# `add_gnm_graph()` function
graph <-
create_graph() %>%
add_gnm_graph(
n = 5,
m = 8,
set_seed = 23)
# Add three graph actions to the
# graph
graph <-
graph %>%
add_graph_action(
fcn = "set_node_attr_w_fcn",
node_attr_fcn = "get_pagerank",
column_name = "pagerank",
action_name = "get_pagerank") %>%
add_graph_action(
fcn = "rescale_node_attrs",
node_attr_from = "pagerank",
node_attr_to = "width",
action_name = "pagerank_to_width") %>%
add_graph_action(
fcn = "colorize_node_attrs",
node_attr_from = "width",
node_attr_to = "fillcolor",
action_name = "pagerank_fillcolor")
# View the graph actions for the graph
# object by using the `get_graph_actions()`
# function
graph %>%
get_graph_actions()
# Delete the second and third graph
# actions using `delete_graph_actions()`
graph <-
graph %>%
delete_graph_actions(
actions = c(2, 3))
# Verify that these last two graph
# actions were deleted by again using
# the `get_graph_actions()` function
graph %>%
get_graph_actions()
|
a9fd1d98c39c2f5c815e25829b660d4576fce7ce
|
c9d7e4f0fcc61eb7c5215fdffced4b9db3c34d7e
|
/man/line_qid.Rd
|
65844f17645884ec41096dc2e88fdbb3ca05e7be
|
[
"Apache-2.0"
] |
permissive
|
turgut090/Cirq
|
091424a209295d0478459dcaa80a6d74384f9690
|
cfa48055034a83655e56fb9a6c9f0499dd48d710
|
refs/heads/master
| 2022-10-06T04:37:32.333261
| 2020-06-07T06:06:11
| 2020-06-07T06:06:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,004
|
rd
|
line_qid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Devices.R
\name{line_qid}
\alias{line_qid}
\title{Line Qid}
\usage{
line_qid(x, dimension)
}
\arguments{
\item{x}{The x coordinate.}
\item{dimension}{The dimension of the qid, e.g. the number of quantum levels.}
}
\value{
initialized line qid at the given x coordinate.
}
\description{
A qid on a 1d lattice with nearest-neighbor connectivity.
}
\details{
`LineQid`s have a single attribute, and integer coordinate 'x', which
identifies the qids location on the line. `LineQid`s are ordered by
this integer. One can construct new `LineQid`s by adding or subtracting integers:
line_qid(1, dimension=2) + 3
Output: cirq.LineQid(4, dimension=2)
line_qid(2, dimension=3) - 1
Output: cirq.LineQid(1, dimension=3)
}
\seealso{
Other Devices:
\code{\link{grid_qid}()},
\code{\link{grid_qubit}()},
\code{\link{line_qubit}()},
\code{\link{named_qubit}()},
\code{\link{qid}()},
\code{\link{unconstrained_device}()}
}
\concept{Devices}
|
6054014fecd7aa9bcf9221602d321d56382b2c62
|
5d690f159266b2c0f163e26fcfb9f9e17a0dc541
|
/rangeModelMetadata/man/rmmFamilies.Rd
|
22992f25dde2ca2076a4c2c6f173b90a25657107
|
[] |
no_license
|
albrizre/spatstat.revdep
|
3a83ab87085895712d7109c813dcc8acb55493e9
|
b6fc1e73985b0b7ed57d21cbebb9ca4627183108
|
refs/heads/main
| 2023-03-05T14:47:16.628700
| 2021-02-20T01:05:54
| 2021-02-20T01:05:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 306
|
rd
|
rmmFamilies.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{rmmFamilies}
\alias{rmmFamilies}
\title{Print supported family names for rmm objects}
\usage{
rmmFamilies()
}
\description{
Used to see options to for specifying an rmm object template
}
\examples{
rmmFamilies()
}
|
d495b5baaa56c9a1fe1b09fc9ecdf13f9ad4f6d8
|
d4900aec988678febffdfbed490f784c562a2bec
|
/man/a_layer.Rd
|
77cca50ee79dfcc7e26f78dc1357d53bf51064ef
|
[] |
no_license
|
vivekktiwari/animint2
|
d64883c9f18c606fb3d5f34e457b5cb896d9d291
|
9a2ff243da6d97eb6b62a82ed81bbdb7ae55b554
|
refs/heads/master
| 2021-09-20T21:50:48.979177
| 2018-08-15T22:21:37
| 2018-08-15T22:21:37
| 117,336,879
| 0
| 0
| null | 2018-08-15T12:07:24
| 2018-01-13T11:07:03
|
R
|
UTF-8
|
R
| false
| true
| 2,825
|
rd
|
a_layer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layer.r
\name{a_layer}
\alias{a_layer}
\title{Create a new layer}
\usage{
a_layer(a_geom = NULL, a_stat = NULL, data = NULL, mapping = NULL,
a_position = NULL, params = list(), inherit.a_aes = TRUE,
subset = NULL, show.legend = NA)
}
\arguments{
\item{a_geom}{The geometric object to use display the data}
\item{a_stat}{The statistical transformation to use on the data for this
layer, as a string.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link{a_plot}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link{a_fortify}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame.}, and
will be used as the layer data.}
\item{mapping}{Set of aesthetic mappings created by \code{\link{a_aes}} or
\code{\link{a_aes_}}. If specified and \code{inherit.a_aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{a_position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{params}{Additional parameters to the \code{a_geom} and \code{a_stat}.}
\item{inherit.a_aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link{borders}}.}
\item{subset}{DEPRECATED. An older way of subsetting the dataset used in a
layer.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.}
}
\description{
A layer is a combination of data, stat and a_geom with a potential position
adjustment. Usually layers are created using \code{a_geom_*} or \code{a_stat_*}
calls but it can also be created directly using this function.
}
\examples{
# geom calls are just a short cut for layer
a_plot(mpg, a_aes(displ, hwy)) + a_geom_point()
# shortcut for
a_plot(mpg, a_aes(displ, hwy)) +
a_layer(a_geom = "point", a_stat = "identity", a_position = "identity",
params = list(na.rm = FALSE)
)
# use a function as data to plot a subset of global data
a_plot(mpg, a_aes(displ, hwy)) +
a_layer(a_geom = "point", a_stat = "identity", a_position = "identity",
data = head, params = list(na.rm = FALSE)
)
}
|
9d30fefbf0634e7418da6d6540547e39f744388e
|
6918faba19e3f3de6b50fa454af760b6879bb0cc
|
/plot2.R
|
55c1e5d3cdc7f565cd110e7f232569cb724ae3ef
|
[] |
no_license
|
stephanvdw89/datasciencecoursera
|
aa6091f192a55f7402498b06c5651b57c63312e1
|
547b09a0b011de7255a6b04204bf586b5d43dd9e
|
refs/heads/master
| 2021-01-22T02:34:11.044686
| 2015-10-10T08:01:50
| 2015-10-10T08:01:50
| 25,726,377
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 773
|
r
|
plot2.R
|
#Exploratory Data Analysis
#Create Plot 2
data <- read.csv("C:\\Users\\stephanvdw\\Documents\\R\\Data\\RCourse_ED\\household_power_consumption.txt" , sep=";" , header=T)
data[,1] <- as.Date(strptime(data[,1],format="%d/%m/%Y"))
data$DateTime <- as.POSIXct(paste(data$Date,as.character(data$Time)))
data.sub1 <- data[data[,1]=="2007-02-01",]
data.sub2 <- data[data[,1]=="2007-02-02",]
data <- rbind(data.sub1,data.sub2)
for(i in 3:ncol(data)){is.na(data[,i]) <- data[,i]=="?"}
for(i in 3:9){data[,i] <- as.numeric(as.character(data[,i]))}
with(data , plot(DateTime,Global_active_power ,type="l",xlab="",main="",ylab="Global Active Power (kilowatts)"))
dev.copy(png,file="C:\\Users\\stephanvdw\\Documents\\R\\Coursera\\Course4\\Project1\\plot2.png")
dev.off()
|
0fdc573de7e6c821f39fcc56ebb291b1ea83e8d4
|
cdb8630afd0afbf0bee54c5b0c52f12e8ae2b56c
|
/BayesBD/R/plotBD.R
|
0d1742b5a9ba0dd8ffdd421a8860ee67644ed4c9
|
[] |
no_license
|
HaoWang47/GSOC-BayesBD
|
5ceed79a73f626beb69efdcd9fc8adee75f215b0
|
a3ca0980c8fc2acb305f6906b300fe645da8d51b
|
refs/heads/master
| 2021-06-22T20:30:23.159987
| 2017-08-24T22:43:01
| 2017-08-24T22:43:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,019
|
r
|
plotBD.R
|
plotBD =
function (fitted.image, plot.type)
{
is.string <- function(input) {
is.character(input) & length(input) == 1
}
if(is.string(fitted.image$image)){
if(substr(fitted.image$image,start = nchar(fitted.image$image)-2,stop = nchar(fitted.image$image))=="jpg"){
img = readJPEG(fitted.image$image)
}else if(substr(fitted.image$image,start = nchar(fitted.image$image)-2,stop = nchar(fitted.image$image))=="png"){
img = readPNG(fitted.image$image)
}else {
return('The image file must have a .png or .jpeg file type.')
}
if(length(dim(img))>2){
img = matrix(img[,,1],dim(img)[1],dim(img)[2])
}else {
img = img
}
n1 = nrow(img)
n2 = ncol(img)
img_flip = img
for(i in 1:n1){
for(j in 1:n2){
img_flip[i,j] = img[n1-i+1,j]
}
}
img = img_flip
if(any(fitted.image$center[1]>n2,fitted.image$center[2]>n1,fitted.image$center[1]<0,fitted.image$center[2]<0)){
return(paste('The center should be a pixel (x,y) between 0 and ', ncol(img), 'for x, and 0 and ', nrow(img), ' for y.'))
}
y1 = 1:n1
x1 = 1:n2
y=NULL
x=NULL
for(i in 1:n2){
y = c(y,y1)
x = c(x,rep(i,n1))
}
intensity = img
intensity = as.vector(intensity)
estimate.x = fitted.image$output$estimate*cos(fitted.image$output$theta)*n2+fitted.image$center[1]
estimate.y = fitted.image$output$estimate*sin(fitted.image$output$theta)*n1+fitted.image$center[2]
upper.x = fitted.image$output$upper*cos(fitted.image$output$theta)*n2+fitted.image$center[1]
upper.y = fitted.image$output$upper*sin(fitted.image$output$theta)*n1+fitted.image$center[2]
lower.x = fitted.image$output$lower*cos(fitted.image$output$theta)*n2+fitted.image$center[1]
lower.y = fitted.image$output$lower*sin(fitted.image$output$theta)*n1+fitted.image$center[2]
if(plot.type == 1){
if(min(intensity)<0){
normalized = (intensity+abs(min(intensity)))/(max(intensity)-min(intensity))
}else{
normalized = (intensity-abs(min(intensity)))/(max(intensity)-min(intensity))
}
plot(x, y, col = gray(normalized), pch = 15, cex = 0.375, axes = FALSE, xlab = '', ylab = '',asp = 1)
}else if(plot.type == 2){
plot(x, y, col = 'white', pch = 15, cex = 0.375, axes = FALSE, xlab = '', ylab = '',asp = 1)
polygon(upper.x, upper.y, fillOddEven = TRUE, col = "gray", border = NA)
polygon(lower.x, lower.y, fillOddEven = TRUE, col = "white", border = NA)
lines(estimate.x, estimate.y, lty = 2, lwd = 3, col='blue')
if (!is.null(fitted.image$gamma.fun)) {
gamma.x = fitted.image$gamma.fun(fitted.image$output$theta) * cos(fitted.image$output$theta) + fitted.image$center[1]
gamma.y = fitted.image$gamma.fun(fitted.image$output$theta) * sin(fitted.image$output$theta) + fitted.image$center[2]
lines(gamma.x, gamma.y, lty = 1, lwd = 1)
}
}else if(plot.type == 3){
if(min(intensity)<0){
normalized = (intensity+abs(min(intensity)))/(max(intensity)-min(intensity))
}else{
normalized = (intensity-abs(min(intensity)))/(max(intensity)-min(intensity))
}
plot(x, y, col = gray(normalized), pch = 15, cex = 0.375, axes = FALSE, xlab = '', ylab = '',asp = 1)
lines(estimate.x, estimate.y, lty = 2, lwd = 3, col='blue')
if (!is.null(fitted.image$gamma.fun)) {
gamma.x = fitted.image$gamma.fun(fitted.image$output$theta) * cos(fitted.image$output$theta) + fitted.image$center[1]
gamma.y = fitted.image$gamma.fun(fitted.image$output$theta) * sin(fitted.image$output$theta) + fitted.image$center[2]
lines(gamma.x, gamma.y, lty = 1, lwd = 1)
}
}else {
return("plot.type must be 1, 2, or 3.")
}
}else if(is.list(fitted.image$image)){
x = fitted.image$image$r.obs*cos(fitted.image$image$theta.obs)+fitted.image$image$center[1]
y = fitted.image$image$r.obs*sin(fitted.image$image$theta.obs)+fitted.image$image$center[2]
estimate.x = fitted.image$output$estimate*cos(fitted.image$output$theta)+fitted.image$image$center[1]
estimate.y = fitted.image$output$estimate*sin(fitted.image$output$theta)+fitted.image$image$center[2]
upper.x = fitted.image$output$upper*cos(fitted.image$output$theta)+fitted.image$image$center[1]
upper.y = fitted.image$output$upper*sin(fitted.image$output$theta)+fitted.image$image$center[2]
lower.x = fitted.image$output$lower*cos(fitted.image$output$theta)+fitted.image$image$center[1]
lower.y = fitted.image$output$lower*sin(fitted.image$output$theta)+fitted.image$image$center[2]
if(plot.type == 1){
if(min(fitted.image$image$intensity)<0){
normalized = (fitted.image$image$intensity+abs(min(fitted.image$image$intensity)))/(max(fitted.image$image$intensity)-min(fitted.image$image$intensity))
}else{
normalized = (fitted.image$image$intensity-abs(min(fitted.image$image$intensity)))/(max(fitted.image$image$intensity)-min(fitted.image$image$intensity))
}
plot(x, y, col = gray(normalized), pch = 15, cex = 0.375, axes = FALSE, xlab = '', ylab = '',asp = 1)
}else if(plot.type == 2){
plot(x, y, col = 'white', axes = FALSE, xlab = '', ylab = '',asp = 1)
polygon(upper.x, upper.y, fillOddEven = TRUE, col = "gray", border = NA)
polygon(lower.x, lower.y, fillOddEven = TRUE, col = "white", border = NA)
lines(estimate.x, estimate.y, lty = 2, lwd = 3, col='blue')
if (!is.null(fitted.image$gamma.fun)) {
gamma.x = fitted.image$gamma.fun(fitted.image$output$theta) * cos(fitted.image$output$theta) + fitted.image$image$center[1]
gamma.y = fitted.image$gamma.fun(fitted.image$output$theta) * sin(fitted.image$output$theta) + fitted.image$image$center[2]
lines(gamma.x, gamma.y, lty = 1, lwd = 1)
}
}else if(plot.type == 3){
if(min(fitted.image$image$intensity)<0){
normalized = (fitted.image$image$intensity+abs(min(fitted.image$image$intensity)))/(max(fitted.image$image$intensity)-min(fitted.image$image$intensity))
}else{
normalized = (fitted.image$image$intensity-abs(min(fitted.image$image$intensity)))/(max(fitted.image$image$intensity)-min(fitted.image$image$intensity))
}
plot(x, y, col = gray(normalized), pch = 15, cex = 0.375, axes = FALSE, xlab = '', ylab = '',asp = 1)
lines(estimate.x, estimate.y, lty = 2, lwd = 3, col = 'blue')
if (!is.null(fitted.image$gamma.fun)) {
gamma.x = fitted.image$gamma.fun(fitted.image$output$theta) * cos(fitted.image$output$theta) + fitted.image$image$center[1]
gamma.y = fitted.image$gamma.fun(fitted.image$output$theta) * sin(fitted.image$output$theta) + fitted.image$image$center[2]
lines(gamma.x, gamma.y, lty = 1, lwd = 1)
}
}else {
return("plot.type must be 1, 2, or 3.")
}
}else {
return("Input image is not a compatible image file nor a compatible list object.")
}
}
|
25d20d70b312ab44741f1ea8049da72c50f19622
|
df05978d15fbbc08fd2f72b5c24d6118bfc1c6d2
|
/Rfunctions/01_accessing_cleaning.R
|
22da4408b92ef5330ad3bc65420eed29aae48adc
|
[] |
no_license
|
SrivastavaLab/cesabfunctionalwebsdata
|
54b286aed4989eef36aec956f738a305ec49d1d0
|
c9b9e8fcafa6f43f4971b2ab37a9830cf17c5a67
|
refs/heads/master
| 2021-01-19T21:51:27.068980
| 2019-02-06T20:04:59
| 2019-02-06T20:04:59
| 61,402,349
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,663
|
r
|
01_accessing_cleaning.R
|
# accessing -----------------------------------------------------
## Getting the abundance matrix for each site requires querying its precise dataset ID
## this convenience function creates a named vector of all the datasets and downloads them all
## can be slow
get_all_abundances <- function(.dats){
## might need to replace this with a suitable structure
bwg_get_safe <- possibly(bwg_get, NA)
abds_ <- .dats %>%
.[["dataset_id"]] %>%
as.numeric %>%
set_names %>%
map(~ list(dataset_id = .x)) %>%
map(~ bwg_get_safe("matrix", .))
return(abds_)
}
# Filtering ---------------------------------------------------------------
## NOTE eventualy these will be deleted and then this test will be deleted
## because it will be useless
filter_test <- function(x){
x %>%
filter(!grepl("^Test", name))
}
## pesky test datasets, and other garbage potentially, goes on a blacklist.
make_blacklist <- function(.visits, .dats){
criteria <- .visits %>%
left_join(.dats, by = "dataset_id") %>%
## count the sins here
filter(str_detect(name, "Test"))
criteria %>%
select(visit_id, dataset_id)
}
# Restructuring -------------------------------------------------------------
no_attrib_unnest_det <- function(.broms){
## Drop the attributes part of the variable names
names(.broms) <- str_replace(names(.broms), "attributes\\.", "")
## yes this makes the function a bit not modular. If you're reading this you
## can feel quite free to judge :)
### the detritus should be unnested (for raw. summarize later, across columns)
.broms %>%
## supply a default data.frame, or otherwise unnesting chokes
mutate(detritus = map_if(detritus,
is_null,
~ data_frame(min = NA,
max = NA,
mass = NA))) %>%
unnest(detritus)
}
combine_multi_names <- function(.trts_all){
### names are a nested list. Convert them to a vector of length one.
## bespoke predicate function to detect multiple names
is_long <- function(x) length(x) > 1
.trts_all %>%
mutate(names = names %>%
## again, supply a sensible default (an empty word, not a NULL)
map_if(is_null, ~"") %>%
map_if(is_long, paste, collapse = ";")) %>%
## unnesst safely, since now it is a wholesome list of length 1 dataframes
unnest(names)
}
# import corrected names --------------------------------------------------
import_BromeliadSpecies <- function(path){
read_delim(path,
";", escape_double = FALSE, trim_ws = TRUE)
}
|
ab2ff9197797f1098892222b2a0ced773550be93
|
5daf55051f6012da5b35af5c5759a06e6ada88c2
|
/text_mining/wine_reviews_text_mining.R
|
6cec2f629ddd8b8ac4ea0ac6f965e0aff609b63c
|
[] |
no_license
|
DerrickStuckey/gwu-data-mining
|
349b157a845fab255a6435982e0400f794ec36e1
|
472f2e81b4f1f4511a81a187aca129ef6856d5c4
|
refs/heads/master
| 2023-04-29T13:21:42.481160
| 2023-04-20T20:28:05
| 2023-04-20T20:28:05
| 222,146,440
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,354
|
r
|
wine_reviews_text_mining.R
|
# install.packages("tm")
# install.packages("lsa")
library(tidyverse)
library(forecast) # for accuracy() function
library(tm) # general text mining tools
library(lsa) # concept extraction
library(caret) # for confusionMatrix()
# from https://www.kaggle.com/zynicide/wine-reviews
wine.reviews <- read_csv("./data/wine-reviews/winemag-data_first150k.csv")
dim(wine.reviews)
wine.reviews
# right now we only care about the review text: the 'description' column
head(wine.reviews$description)
# work with just a sample of data, as some steps can take a while on large data
sample.size <- 1000
# sample.size <- 10
set.seed(12345)
# sample.idx <- seq(1,1000,1)
sample.idx <- sample(1:nrow(wine.reviews),sample.size)
reviews.sample <- wine.reviews[sample.idx,]
# reviews.sample <- wine.reviews[1:1000,]
dim(reviews.sample)
head(reviews.sample$description)
### Basic text mining data transformations ###
# construct a corpus
corp <- VCorpus(VectorSource(reviews.sample$description))
corp
# construct a Term-Document Matrix
tdm <- TermDocumentMatrix(corp)
inspect(tdm)
# terms that occur at least 20% of the time
nrow(reviews.sample) * 0.20
findFreqTerms(tdm, nrow(reviews.sample) * 0.20)
# terms that occur at least 10% of the time
findFreqTerms(tdm, nrow(reviews.sample) * 0.10)
# highly associated terms
findAssocs(tdm, "spice", 0.2)
findAssocs(tdm, "oak", 0.2)
### more data cleaning/preparation ###
# tokenization: remove punctuation, whitespace,
# generally attempt to split into discrete 'tokens' aka words
corp.tok <- tm_map(corp, stripWhitespace)
corp.tok <- tm_map(corp.tok, removePunctuation)
corp.tok <- tm_map(corp.tok, content_transformer(tolower))
# create a Term-Document Matrix from the tokenized corpus
tdm.tok <- TermDocumentMatrix(corp.tok)
# compare the new TDM with the previous one
inspect(tdm)
inspect(tdm.tok)
# what is different?
# compare highly associated terms for the original and tokenized versions
findAssocs(tdm, "spice", 0.1)$spice[1:10]
findAssocs(tdm.tok, "spice", 0.1)$spice[1:10]
# try removing 'stopwords' aka very common words
# using stopwords() function from 'tm' package
stopwords(kind="en") %>% head()
corp.nostopwords <- tm_map(corp.tok, removeWords, stopwords(kind="en"))
tdm.nostopwords <- TermDocumentMatrix(corp.nostopwords)
inspect(tdm.tok)
inspect(tdm.nostopwords)
# compare terms that appear at least 20% of the time
# before and after removing stopwords
findFreqTerms(tdm.tok, nrow(reviews.sample) * 0.20)
findFreqTerms(tdm.nostopwords, nrow(reviews.sample) * 0.20)
# stemming (e.g. "running" -> "run")
corp.stemmed <- tm_map(corp.nostopwords, stemDocument)
tdm.stemmed <- TermDocumentMatrix(corp.stemmed)
inspect(tdm.stemmed)
# compare terms that appear at least 10% of the time
# before and after stemming
findFreqTerms(tdm.nostopwords, nrow(reviews.sample) * 0.20)
findFreqTerms(tdm.stemmed, nrow(reviews.sample) * 0.20)
# we can also drop infrequent terms if we want to
# (in this case, let's keep them)
tdm.unsparse <- removeSparseTerms(tdm.stemmed,0.999)
tdm.stemmed
tdm.unsparse
# TF-IDF weighting
# tfidf <- weightTfIdf(tdm.nostopwords)
tfidf <- weightTfIdf(tdm.stemmed)
tfidf
inspect(tfidf)
# how is this different from a basic TDM?
inspect(tdm.stemmed)
findAssocs(tdm.nostopwords, "spice", 0.1)$spice[1:10]
findAssocs(tfidf, "spice", 0.1)$spice[1:10]
dim(tfidf)
### Concept extraction ###
# extract 10 "concepts" (lsa library)
lsa.tfidf <- lsa(tfidf, dim=10)
# look at the words associated with each concept
View(lsa.tfidf$tk)
# look at the top 10 terms for each concept
concepts.top.terms <- data.frame("rank"=1:10)
for (j in 1:ncol(lsa.tfidf$tk)) {
top.terms <- row.names(lsa.tfidf$tk)[
order(lsa.tfidf$tk[,j], decreasing = TRUE)
][1:10]
concepts.top.terms <- cbind(concepts.top.terms, top.terms)
names(concepts.top.terms)[length(names(concepts.top.terms))] <- paste("Concept",j)
}
View(concepts.top.terms)
# there is also a value for each concept for each document
document.concepts <- as.data.frame(as.matrix(lsa.tfidf$dk))
dim(document.concepts)
head(document.concepts)
### Build a predictive Model ###
# set up the data to train and test on
# attempt to predict the rating, using our 10 concepts as predictors
document.concepts$points <- reviews.sample$points
head(document.concepts)
# train/test split
train.prop <- 0.7
set.seed(12345)
train.idx <- sample(1:nrow(document.concepts),nrow(document.concepts)*train.prop)
train.data <- document.concepts[train.idx,]
test.data <- document.concepts[-train.idx,]
# wait a minute
# is it OK that we did all our text feature extraction on the training and test data combined?
# depends on the use case
# try a linear regression model
points.lm <- lm(points ~ .,data=train.data)
summary(points.lm)
# which concepts are most associated with a high rating?
View(concepts.top.terms[,c('Concept 1','Concept 2','Concept 3')])
# with a low rating?
View(concepts.top.terms[,c('Concept 6','Concept 7','Concept 10')])
# how does the model perform on the holdout set?
points.preds <- predict(points.lm, newdata=test.data)
accuracy(points.preds, test.data$points)
# plot actual vs. predicted points
ggplot() +
geom_point(mapping = aes(x=points.preds, y=test.data$points))
ggplot() +
geom_jitter(mapping = aes(x=points.preds, y=test.data$points))
# predict whether the wine is Italian
document.concepts <-
document.concepts %>%
select(-points)
head(document.concepts)
document.concepts$Italian <- reviews.sample$country=="Italy"
table(document.concepts$Italian)
# re-create the training and test data (same indexes)
train.data <- document.concepts[train.idx,]
test.data <- document.concepts[-train.idx,]
# Logistic Regression to predict whether the wine is Italian
italy.logistic <- glm(Italian ~ ., data=train.data, family='binomial')
summary(italy.logistic)
# obtain test predictions
test.probs <- predict(italy.logistic, newdata=test.data, type="response")
summary(test.probs)
# confusion matrix
test.preds <- test.probs > 0.5
confusionMatrix(as.factor(test.preds), as.factor(test.data$Italian))
# ROC curve
library(plotROC)
ggplot(mapping = aes(m = test.probs, d = test.data$Italian)) +
geom_roc(n.cuts=0,labels=FALSE) +
style_roc(theme = theme_grey)
# what did the model actually find?
summary(italy.logistic)
View(concepts.top.terms[,c('Concept 1','Concept 4','Concept 9')])
|
21818745b8699df316813eb8852192ba59415549
|
26d9e1219dc151e2d27dae1e38f3ff0ed938b967
|
/man/round_facet_border.Rd
|
4f7c5eb941100e3bd3afce59aef6ba03d22ae2f6
|
[] |
no_license
|
ZhonghuiGai/ggfacet
|
36a3a3cc0d6a7ad403fa8656c84e7066e8546461
|
5dfbd2d6e2e638902b43fd31bc811aa4459f955e
|
refs/heads/main
| 2023-08-25T14:50:10.240215
| 2021-10-16T12:36:08
| 2021-10-16T12:36:08
| 414,873,558
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 604
|
rd
|
round_facet_border.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/round_facet_border.R
\name{round_facet_border}
\alias{round_facet_border}
\title{Round rectangle border of facet. A swap function of ggfun}
\usage{
round_facet_border(p, r = 0.2)
}
\arguments{
\item{r}{the radius of the rounded corners, a \code{unit} object,
default is unit(0.1, 'snpc').}
}
\value{
}
\description{
Round rectangle border of facet. A swap function of ggfun
}
\examples{
p <- ggplot(mpg, aes(displ, cty)) + geom_point() + facet_grid(cols = vars(cyl))
round_facet_border(p, r = 0.25)
}
\author{
Zhonghui Gai
}
|
3dffa09d149fabe9d6d2549f8835d506b8a2f0e0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/AER/examples/ChinaIncome.Rd.R
|
b862b9379955a9c6d3bd5774dd496f6091106425
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 189
|
r
|
ChinaIncome.Rd.R
|
library(AER)
### Name: ChinaIncome
### Title: Chinese Real National Income Data
### Aliases: ChinaIncome
### Keywords: datasets
### ** Examples
data("ChinaIncome")
plot(ChinaIncome)
|
268af2d375f967a0489e20a90a53eee200dd1b4c
|
bab0ce3c8a81762101b3af466d018f5425ab7fcc
|
/quantile-score.R
|
a09c710f160e5961254c8526b434e921644e5cf0
|
[
"MIT"
] |
permissive
|
signaturescience/random-walks
|
1f0e6f5127e1ffdba76283fc6340d66dea1a79b6
|
853f96aae54b2985d1c55ef9be5da341d945081a
|
refs/heads/master
| 2023-01-22T22:02:08.643403
| 2020-12-07T07:28:36
| 2020-12-07T07:28:36
| 319,381,303
| 0
| 0
| null | 2020-12-07T16:39:00
| 2020-12-07T16:38:59
| null |
UTF-8
|
R
| false
| false
| 2,684
|
r
|
quantile-score.R
|
#!/usr/bin/env Rscript
# Setup environment
suppressPackageStartupMessages(library(tidyverse))
source("covidhub-common.R")
ddt <- Sys.getenv("ddt")
targ_dir <- file.path("hopkins", ddt)
output_dir <- "forecasts"
datf <- load_hopkins(targ_dir) %>% rename(true_value = "value")
forecast_paths <- dir(output_dir, full.names = TRUE)
# Calculate errors
score <- function(path, tdf2) {
fcst <- read_forecast(path) %>% mutate(target_type = str_remove(target, "^\\d+ "))
ptdf <- fcst %>% filter(type == "point") %>%
left_join(tdf2, by = c("target_end_date", "target_type", "location")) %>%
filter(!is.na(true_value)) %>%
mutate(error = value - true_value)
probdf <- fcst %>% filter(type == "quantile") %>%
left_join(tdf2, by = c("target_end_date", "target_type", "location")) %>%
filter(!is.na(true_value)) %>%
mutate(is_below = true_value < value,
brier_loss = (is_below - quantile) ^ 2) %>%
mutate(pinball_loss = purrr::map2_dbl(true_value - value, quantile,
verification::check.func))
probsumdf <- probdf %>% group_by(forecast_date, target) %>%
summarise(mean_quantile_score = mean(pinball_loss),
mean_brier_score = mean(brier_loss), .groups = "drop")
list(point = ptdf, prob = probdf, probsum = probsumdf)
}
scores <- map(forecast_paths, score, tdf2 = datf)
residuals <-
map(scores, "prob") %>%
bind_rows() %>%
mutate(loc_type = case_when(location == "US" ~ "national",
nchar(location) == 2 ~ "state",
TRUE ~ "county" ))
# Summarize errors
summary <- list()
summary$by_loc_type <-
residuals %>%
group_by(loc_type) %>%
summarise(mean_qs = mean(pinball_loss), .groups = "drop")
summary$by_loc_type_targ_type <-
residuals %>%
group_by(loc_type, target_type) %>%
summarise(mean_qs = mean(pinball_loss), .groups = "drop")
summary$by_loc_targ_fdt <-
residuals %>%
group_by(loc_type, target_type, forecast_date) %>%
summarise(mean_qs = mean(pinball_loss), .groups = "drop")
# Create output
dir.create("metrics")
resids_path <- file.path("metrics", paste0(ddt, "-residuals.rds"))
saveRDS(residuals, resids_path)
summary_plot_path <- file.path("metrics", paste0(ddt, "-score-by-loc-type.csv"))
write_csv(summary$by_loc_type, path = summary_plot_path)
summary_plot_path <- file.path("metrics", paste0(ddt, "-score-by-loc-type-targ-type.csv"))
write_csv(summary$by_loc_type_targ_type, path = summary_plot_path)
summary_plot_path <- file.path("metrics", paste0(ddt, "-score-by-loc-type-targ-type-forecast-date.csv"))
write_csv(summary$by_loc_targ_fdt, path = summary_plot_path)
|
695d7798acfa4bdb3fdc257041baa8d5c61469d8
|
c262aa9d1819623e627386fd43e61e0d988d405a
|
/pipeline/scripts/11-runSumAdducts.R
|
c4cad6f74f016b24275b3874c68e518d0270a810
|
[
"MIT"
] |
permissive
|
UMCUGenetics/DIMS
|
bf818ebefd272f2b4726b9db26b6326a5070911f
|
dd98c1e4fb3cf8fbe0a08761b6583e7930696e21
|
refs/heads/master
| 2023-08-08T03:11:34.213700
| 2023-03-28T09:23:11
| 2023-03-28T09:23:11
| 175,600,531
| 1
| 3
|
MIT
| 2023-08-25T15:27:21
| 2019-03-14T10:34:21
|
R
|
UTF-8
|
R
| false
| false
| 3,479
|
r
|
11-runSumAdducts.R
|
#!/usr/bin/Rscript
.libPaths(new = "/hpc/local/CentOS7/dbg_mz/R_libs/3.2.2")
# load required packages
# none
# define parameters
cmd_args <- commandArgs(trailingOnly = TRUE)
for (arg in cmd_args) cat(" ", arg, "\n", sep="")
file <- cmd_args[1]
outdir <- cmd_args[2]
scanmode <- cmd_args[3]
adducts <- cmd_args[4]
z_score <- as.numeric(cmd_args[5])
# create output folder
dir.create(paste(outdir, "11-adductSums", sep="/"), showWarnings = FALSE)
load(paste0(outdir, "/repl.pattern.",scanmode, ".RData"))
adducts=as.vector(unlist(strsplit(adducts, ",",fixed = TRUE)))
load(file)
load(paste(outdir, "/outlist_identified_", scanmode, ".RData", sep=""))
# Local and on HPC
batch = strsplit(file, "/",fixed = TRUE)[[1]]
batch = batch[length(batch)]
batch = strsplit(batch, ".",fixed = TRUE)[[1]][2]
outlist.tot=unique(outlist.ident)
sumAdducts <- function(peaklist, theor.MZ, grpnames.long, adducts, batch, scanmode, outdir, z_score){
#theor.MZ = outlist_part
#grpnames.long = names(repl.pattern.filtered)
#peaklist = outlist.ident
#adducts = c(1) #for neg or c(1,2) for pos
#batch <- 300
#outdir <- "/Users/nunen/Documents/Metab/processed/zebrafish"
#scanmode <- "negative"
#z_score <- 0
hmdb_codes <- rownames(theor.MZ)
hmdb_names <- theor.MZ[,1, drop=FALSE]
hmdb_names[] <- lapply(hmdb_names, as.character)
# remove isotopes!!!
index <- grep("HMDB",hmdb_codes,fixed=TRUE)
hmdb_codes <- hmdb_codes[index]
hmdb_names <- hmdb_names[index,]
index = grep("_",rownames(hmdb_codes),fixed=TRUE)
if (length(index)>0) hmdb_codes = hmdb_codes[-index]
if (length(index)>0) hmdb_names = hmdb_names[-index]
#i=which(hmdb_codes=="HMDB41792")
# negative
names=NULL
adductsum=NULL
names_long=NULL
if (length(hmdb_codes)!=0) {
# assign("last.warning", NULL, envir = baseenv())
# result = tryCatch(
# {
for(i in 1:length(hmdb_codes)){
# for(i in 1:10){
#compound="HMDB00045"
compound=hmdb_codes[i]
compound_plus=c(compound,paste(compound, adducts, sep = "_"))
# x=peaklist$HMDB_code[1]
metab=unlist(lapply(peaklist$HMDB_code, function(x) {(length(intersect(unlist(strsplit(as.vector(x),";")),compound_plus))>0)}))
# peaklist[metab, "assi.hmdb"]
# which(metab==TRUE)
#if (length(which(metab==TRUE))>0) message("Bingo found something")
total=c()
# peaklist[metab, c("mzmed.pgrp", "HMDB_code", "C34.1")]
# ints=peaklist[metab, c(7:(length(grpnames.long)+6))]
if (z_score == 1) {
ints=peaklist[metab, c(15:(length(grpnames.long)+14))]
} else {
ints=peaklist[metab, c(7:(length(grpnames.long)+6))]
}
total=apply(ints, 2, sum)
if (sum(total)!=0) {
#message(i)
names = c(names, compound)
adductsum<-rbind(adductsum,total)
names_long = c(names_long, hmdb_names[i])
}
}
# }
# , warning=function(w) {
# message(paste("CATCHED", w))
# }
# , error = function(e) {
# message(paste("CATCHED", e))
# })
if (!is.null(adductsum)){
rownames(adductsum)=names
adductsum = cbind(adductsum, "HMDB_name"=names_long)
save(adductsum, file=paste(outdir, "11-adductSums", paste(scanmode, "_",batch,".RData", sep=""), sep="/"))
}
}
}
sumAdducts(outlist.tot, outlist_part, names(repl.pattern.filtered), adducts, batch, scanmode, outdir, z_score)
|
fcb30c7bb5eb0124788b0bde2a8be5ce6e5f05c2
|
937b0de304e1728c13b6b2a86c11eae6dc4c8f6c
|
/doc/hello-foofactors.R
|
9bafd93f43b4441c7bf40526e85e7068bc3cf96d
|
[] |
no_license
|
STAT545-UBC-hw-2018-19/hw07-ziqiangt
|
183853f13d5b969f9bd1ff67d3f6646aa6ce6480
|
ef88da6a55536d2238e2c2dab48c6368f4054751
|
refs/heads/master
| 2020-04-05T18:43:35.344686
| 2018-11-13T03:40:44
| 2018-11-13T03:40:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,370
|
r
|
hello-foofactors.R
|
## ------------------------------------------------------------------------
library(foofactors)
a <- factor(c("character", "hits", "your", "eyeballs"))
b <- factor(c("but", "integer", "where it", "counts"))
## ------------------------------------------------------------------------
c(a, b)
## ------------------------------------------------------------------------
fbind(a, b)
## ------------------------------------------------------------------------
set.seed(1234)
x <- factor(sample(letters[1:5], size = 100, replace = TRUE))
table(x)
as.data.frame(table(x))
## ------------------------------------------------------------------------
freq_out(x)
## ------------------------------------------------------------------------
f_detect(factor(c("a", "b", "c","a")))
f_detect(factor(c("a", "b", "c","d")))
## ------------------------------------------------------------------------
f_reorder(factor(c("B", "A", "D")))
## ------------------------------------------------------------------------
f_set(factor(c("B", "A", "D")))
## ------------------------------------------------------------------------
# De
set.seed(1234)
df <- data.frame(
kids = factor(c(1,0,1,0,0,0), levels = c(0, 1),
labels = c("boy", "girl"))
)
levels(df$kids)
x_write(df, "./df_x.csv", "./df_x.txt")
read_return <- x_read("./df_x.csv", "./df_x.txt")
levels(read_return$kids)
|
4d47c2ad59dc26772830f01e8de8a108d5c4052d
|
b7b0dc0e8130ae721148f02140c60fe491e07626
|
/Explorartory Data Analysis (EDA).R
|
c2b62840101b1b0f917a7e28cb2dffda1df759e6
|
[] |
no_license
|
Yogi5693/Exploratory-Data-Analysis-EDA-
|
00fe20e5fea162ec0c3ca1e0501c41e461fbc9a6
|
50d56196deb90b78e38abcc49650862be87d9fc8
|
refs/heads/master
| 2022-10-12T15:13:03.009351
| 2020-06-01T05:18:43
| 2020-06-01T05:18:43
| 268,430,756
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,792
|
r
|
Explorartory Data Analysis (EDA).R
|
##################### Exploratory Data Analysis ##############################################
# Data Exploratory
# Data Preparation
# Spliting Data into Train nd Test data using Random sampling
setwd("C:\\Users\\Home\\Desktop\\Dataset\\data sheets")
cr<-read.csv("Credit.csv",na.strings = c("",NA))
library(dplyr) #for Manuplation
options(scipen=999) #switch off Scintific Notation in terms of number Notation
## Data Exploration using Credit Data Set
#Sanity check
#Identifying Outiliars ,Replace them
#Impute Missing Values
#Bin Data Using -Quantile function,ntile() foir binning
# Partioning Data into Train nd Test
names(cr)
#Removing Duplicate columns :Giving Same Information
#column 1: NPA STATUS (Good_Bad)
#COlumn 12 :MonthlyIncome.1 (MonthlyIncome)
cr<-cr[,-c(1,12)] # [,column oprn]
names(cr)
#Sanity check.
#Quantitative (Numeric):5points summary (Min,max,mean,median,quantile,NA's)
#Qualitative(catgorical) :Finding Freq Distribution
summary(cr)
#Missing Value Treatment.
#DV/Target Variable :Good/Bad
#IDV :except good/Bad column
#Its Bad idea to impute Missing values for DV :Better to Negalt it/Delete it.
index<-which(is.na(cr$Good_Bad)) #Which column as Missing Values
index
cr<-cr[-index,] # [Row oprn ,]
cr
summary(cr) # NO NA'S in Good_Bad column
#Looking Individual Variables :
summary(cr$RevolvingUtilizationOfUnsecuredLines)
cr%>%filter(RevolvingUtilizationOfUnsecuredLines==0)%>%nrow() #10878 having 0 values
cr%>%filter(RevolvingUtilizationOfUnsecuredLines>=0.99)%>%nrow()#14383 having equal/Greater den 0.99
#Percentile BREAKUP (quantile function used)
quantile(cr$RevolvingUtilizationOfUnsecuredLines,p=c(1:100)/100)
#Discus vit client ,2 is limit.
cr%>%filter(RevolvingUtilizationOfUnsecuredLines<=2)%>%nrow()
cr%>%filter(RevolvingUtilizationOfUnsecuredLines<=2)->cr
summary(cr$age)
cr%>%filter(age==0)%>%nrow() #only one person having zero age stil having credt card(Its Data entry Mistake)
quantile(cr$age,p=(1:100)/100)
cr%>%filter(age!=0)->cr
summary(cr$Gender)
summary(cr)
#############Missing Value Treatment for Contionoues nd Catgorical Variable:####################
#Imputing Missing values for Catgorical Variable
unique(cr$NumberOfTime30.59DaysPastDueNotWorse)
table1<-table(cr$NumberOfTime30.59DaysPastDueNotWorse,cr$Good_Bad)
bad_rate<-table1[,1]/rowSums(table1)
ind2<-which(is.na(cr$NumberOfTime30.59DaysPastDueNotWorse))
table(cr$Good_Bad[ind2])/length(ind2)
cr$NumberOfTime30.59DaysPastDueNotWorse[ind2]<-6
summary(cr$NumberOfTime30.59DaysPastDueNotWorse)
#Imputing Missing Values for Continuoues Variable (By creating decile using ntile function)
summary(cr$MonthlyIncome)
library(dplyr) # to use ntile function
cr%>%mutate(quantile=ntile(MonthlyIncome,10))%>%group_by(Good_Bad,quantile)%>%summarize(N=n())%>%filter(Good_Bad=="Bad")->dat
cr%>%mutate(quantile=ntile(MonthlyIncome,10))%>%group_by(quantile)%>%summarize(N=n())->dat1
dat$Percentage<-dat$N/dat1$N
#Replace with 8 quantile
quantile(cr$MonthlyIncome,p=(0:10)/10,na.rm=T)
cr$MonthlyIncome[is.na(cr$MonthlyIncome)]<-9200
summary(cr$MonthlyIncome)
####################### Spliting Data into Train nd Test Using Random Sampling ######################################
#1.Using set.seed(100)
#2.Using Library(caret)
#1.Using set.seed(100) :
set.seed(100)
indexP<-sample(1:nrow(cr),0.70*nrow(cr),replace=F)
train_cr<-cr[indexP,]
test_cr<-cr[-indexP,]
##2.Using Library(caret) :
library(caret)
indexPC<-CreateDataPartition(y=cr$Good_Bad,times=1,p=0.70,list=F)
train_cr<-cr[indexPC,]
test_cr<-cr[-indexPC,]
table(train_cr$Good_Bad)/nrow(train_crC)
table(test_cr$Good_Bad)/nrow(test_crC)
|
e03dcab23f5a8107c3e1614d591cbaf55aa4d232
|
f99b364b656241b684998af32d8c3874718cf4c0
|
/Day9/Day9.R
|
1d15ad92e1de8538b53f9127443f71c0015ec6d3
|
[] |
no_license
|
RossiLorenzo/advent_code_2017
|
533315c09b6a368cf1e37f1ff3392e5149c07deb
|
7af5730c67dec2bd4130e0c865384a7238efcd4a
|
refs/heads/master
| 2021-08-31T14:20:22.474229
| 2017-12-21T16:52:12
| 2017-12-21T16:52:12
| 112,747,369
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 767
|
r
|
Day9.R
|
library(stringr)
input = readLines("Day9/Input9_1.txt")
###################### PART 1 ######################
# Pre-cleaning from garbage
input_clean = str_replace_all(input, "\\!\\!", "")
input_clean = str_replace_all(input_clean, "\\!.", "")
input_clean = str_replace_all(input_clean, "\\<[^\\>]*\\>", "")
# Find levels
input_split = strsplit(input_clean, "", fixed = T)[[1]]
level = 0; mysum = 0
for(i in input_split){
if(i == "{"){
level = level + 1
}
if(i == "}"){
mysum = mysum + level
level = level - 1
}
}
mysum
###################### PART 2 ######################
input_clean = str_replace_all(input, "\\!\\!", "")
input_clean = str_replace_all(input_clean, "\\!.", "")
sum(nchar(str_match_all(input_clean, "\\<([^\\>]*)\\>")[[1]][,2]))
|
b4de05ee2b183906a3295a6ecc37ac3b305d37fa
|
b55db886e7b1f561cc81c9caafdad5d7d632aa1e
|
/data-raw/public_transport_waedi.R
|
e3a10c44483f9e7a4fccddf8d15cd4e81258ad28
|
[] |
no_license
|
arc2r/arc2r
|
b0a3c2d07e2fed32f3716daccaf586344c0e1a4f
|
7d964b55c6b3b62f53d4af9e33bcd65727d01cc7
|
refs/heads/master
| 2023-03-09T04:45:34.568766
| 2021-03-03T09:20:56
| 2021-03-03T09:20:56
| 332,980,536
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 255
|
r
|
public_transport_waedi.R
|
public_transport_waedi <- raster::raster("data-raw/public_transport_waedi/publicTransport_waedi.tif")
# //todo add readAll
public_transport_waedi <- raster::readAll(public_transport_waedi)
usethis::use_data(public_transport_waedi, overwrite = TRUE)
|
c905274f02c79e95e3a0fbd7ef234c221133100f
|
90e2a130e06f9ad3dae97aae2270e8fe0e05fdc4
|
/R/CoNI_functions.R
|
31e5efb506b7f7658cef2802fb4be7b610c1be96
|
[] |
no_license
|
cran/CoNI
|
a844cb0c0118dbeeae2ccb5fd920fd8c952501d7
|
2636841a46ef404112e4241c0216d18e94569c17
|
refs/heads/master
| 2023-08-15T04:06:38.538008
| 2021-09-30T08:10:02
| 2021-09-30T08:10:02
| 412,122,297
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 112,445
|
r
|
CoNI_functions.R
|
#CoNI Functions
#' Correlation guided Network Integration
#' @description CoNI is the main function of Correlation guided Network Integration (CoNI). Input data should come from two sources (e.g., gene expression and metabolite expression), and it should come from the same samples. It calculates all pairwise correlations of the second data input elements and the partial correlations of these pairwise elements with respect to the elements of the first data input. Both data inputs can be prefiltered to include only those elements that significantly correlate. The first data input can be prefiltered to keep just low variance elements (var<0.5). A Steiger test is used to identify significant changes between the correlation and partial correlation values. Results can be visually represented in a Network.
#' @param edgeD Object to use as first data input (e.g., protein expression)
#' @param vertexD Object to use as second data input (e.g., metabolite expression)
#' @param outputDir Output Directory where results are stored
#' @param saveRaw logical. If TRUE the raw output of CoNI is saved in the output directory (outputDir)
#' @param outputNameRaw Name for the raw output file if saved
#' @param onlySgRes logical. If TRUE CoNI output is filtered and only significant results are kept
#' @param multipleTAdj logical. If TRUE it will filter results after adjustment of multiple testing
#' @param padjustvertexD logical. If TRUE vertexD is filtered according to the significant adjusted p-value of its pairwise correlations
#' @param correlateDFs logical. If TRUE the elements that significantly correlate of vertexD are correlated with the elements of edgeD. Only the elements that significantly correlate are kept
#' @param filter_highVarianceEdge logical. If TRUE features of edgeD with high variance are filtered out
#' @param splitedgeD logical. If TRUE edgeD will be split in n subsets for the computation (some instances n+1). Keep as TRUE unless the data input is small
#' @param split_number Number of parts to split the elements of edgeD
#' @param delPrevious logical. If TRUE previous files of a previous run are deleted
#' @param delIntermediaryFiles logical. If TRUE the output file of every iteration is deleted and only a single file with all results is kept
#' @param iteration_start Iteration start for CoNI. Useful if run is interrupted as one can restart from the last iteration
#' @param numCores Cores assigned for parallelization
#' @param verbose logical. If TRUE output in the console is more verbose
#' @param more_coef logical. If TRUE it will include the partial correlation of edge and vertex Features
#' @param edgeDname File name extension for the edge features that significantly correlate with at least one vertex feature. This file will be read if the function is called again with the same input and with delPrevious=FALSE
#' @param vertexDname File name extension for the vertex features that are involved in at least one significant correlation. This file will be read if the function is called again with the same input and with delPrevious=FALSE
#' @param saveFiles logical. If FALSE CoNI function will not save any file to disk
#' @return CoNI returns a data.frame with the correlation coefficients of the vertex-pairs, the partial correlation coefficients for every triplet, and the pvalue of the Steiger tests
#' @examples
#' #Run CoNI
#'
#' #Load gene expression - Toy dataset of two treatments
#' data(GeneExpToy)
#' #Samples in rows and genes in columns
#' GeneExp <- as.data.frame(t(GeneExpToy))
#' hfd_gene <- GeneExp[1:8,] #high fat diet
#' chow_gene<- GeneExp[9:nrow(GeneExp),] #chow diet
#' #Load metabolite expression - Toy dataset of two treatments
#' data(MetaboExpToy)
#' MetaboExp <- MetaboExpToy
#' hfd_metabo <- MetaboExp[11:18,] #high fat diet
#' chow_metabo <- MetaboExp[1:10,] #chow diet
#' #Match row names both data sets
#' rownames(hfd_metabo)<-rownames(hfd_gene)
#' rownames(chow_metabo)<-rownames(chow_gene)
#'
#' #Run CoNI with tiny example and no significance testing
#' #High fat diet
#' #For big datasets it is recommended to set splitedgeD to TRUE
#' #and split_number should be adjusted accordingly
#' #See vignette for an example
#' #Running CoNI with only a tiny dataset
#' \donttest{
#' CoNIResultsHFD <- CoNI(hfd_gene,hfd_metabo,
#' numCores = 2,
#' onlySgRes = FALSE,
#' filter_highVarianceEdge=FALSE,
#' padjustvertexD = FALSE,
#' correlateDFs = FALSE,
#' edgeDname="HFD_genes",
#' vertexDname = "HFD_metabolites",
#' saveFiles = FALSE,
#' splitedgeD = FALSE,
#' outputDir = "./")
#'}
#'
#' @import doParallel
#' @import parallel
#' @import foreach
#' @import dplyr
#' @import ppcor
#' @import cocor
#' @importFrom data.table fwrite fread
#' @importFrom stats cor p.adjust
#' @importFrom utils write.csv
#' @export
CoNI<- function(edgeD, vertexD,
outputDir = "./CoNIOutput/",
saveRaw = TRUE, outputNameRaw = "CoNIOutput",
onlySgRes = FALSE, multipleTAdj = TRUE,
padjustvertexD = TRUE, correlateDFs=TRUE,
filter_highVarianceEdge = TRUE,
splitedgeD = TRUE, split_number = 2,
delPrevious = FALSE, delIntermediaryFiles = TRUE,
iteration_start = 1, numCores = NULL,
verbose = TRUE,
more_coef = FALSE,
edgeDname = "edgeD",vertexDname = "vertexD",
saveFiles = TRUE) {
j <- NULL
#Set delPrevious to FALSE if iteration start > 1
if(iteration_start>1){
if(verbose){cat('Iteration start > 1')}
delPrevious<-FALSE
file_list <- list.files(outputDir)
file_list <- file_list[grep("CoNIOutputSplit", file_list)]
if(length(file_list) < (iteration_start-1)){
stop("Previous files were not found in the output directory")
}
}
#Check parameters
ParaList<-as.list(match.call())
checkInputParameters(ParaList)
#Check if input objects are defined
do_objectsExist(edgeD, vertexD,verbose)
#Start measuring time
start_time <- Sys.time()
#Check if output directory exists
check_outputDir(outputDir, verbose)
#Check if previous files are present and delete accordingly
check_previous(delPrevious, iteration = iteration_start, outDir = outputDir, verb = verbose)
#Split number cannot be less than 2
if(split_number < 2){
if(verbose){print("Split number cannot be less than two")}
split_number<-2
}
#Test if sample names are the same in both data sets
compare_sampleNames(edgeD, vertexD)
#Make sure column names are appropiate
colnames(edgeD) <- make.names(colnames(edgeD), unique = TRUE)
colnames(vertexD) <- make.names(colnames(vertexD), unique = TRUE)
if(!file.exists(paste(outputDir, "KeptFeatures_", vertexDname, ".csv", sep = ""))){
#Get significant correlations between metabolites
if(verbose){print("Calculating correlations of vertex Data")}
normvertexD_Tablesignificant <- sig_correlation2(input_edgeD = vertexD,padj = padjustvertexD,verb = verbose)
#Get indexes for the rows and columns for the metabo data
normvertexD_Tablesignificant$RowIndex <- apply(normvertexD_Tablesignificant, 1, function(x){return(which(colnames(vertexD)[1:ncol(vertexD)]==x[1]))})
normvertexD_Tablesignificant$ColIndex <- apply(normvertexD_Tablesignificant, 1, function(x){return(which(colnames(vertexD)[1:ncol(vertexD)]==x[2]))})
if(saveFiles){
fwrite(normvertexD_Tablesignificant, paste(outputDir, "KeptFeatures_", vertexDname, ".csv", sep=""))
normvertexD_Tablesignificant <- fread(paste(outputDir, "KeptFeatures_", vertexDname, ".csv", sep=""))
}
}else{
normvertexD_Tablesignificant <- fread(paste(outputDir, "KeptFeatures_", vertexDname, ".csv", sep=""))
}
#Get low variance edge features (e.g. genes)
if(filter_highVarianceEdge){
edgeD <- get_lowvarFeatures(edgeD) #This step was criticised
if(!nrow(edgeD)>0){
stop("After filtering high variance edge features none remained")
}
}
#Remove those with too many zeros
edgeD <- edgeD[, which(as.numeric(colSums(edgeD != 0)) > ceiling(nrow(edgeD)/2))] #At least two samples have a value higher than zero
#edgeD<-as.data.frame(edgeD)
#Get only those genes that correlate with the metabolites
if(correlateDFs & !file.exists(paste(outputDir, "KeptFeatures_", edgeDname, ".csv", sep=""))){
if(verbose){print("Calculating correlations between vertex Data and edge Data")}
#Get Column indices of all metabolites
metabo_indices<-unique(c(normvertexD_Tablesignificant$RowIndex,normvertexD_Tablesignificant$ColIndex))
#Subset metabolites to correlate with genes
SubSetvertexD<-vertexD[,metabo_indices]
ResultsCorDfs <- sig_correlation2Dfs(SubSetvertexD,edgeD)
genesSig <- unique(ResultsCorDfs$gene)
if(verbose){print(paste(length(genesSig),"features were kept from edge Data",sep=" "))}
edgeD <- edgeD[,genesSig]
if(saveFiles){
fwrite(edgeD,paste(outputDir,"KeptFeatures_",edgeDname,".csv",sep=""))
edgeD <- fread(paste(outputDir,"KeptFeatures_",edgeDname,".csv",sep=""))
}
}else if(file.exists(paste(outputDir,"KeptFeatures_",edgeDname,".csv",sep=""))){
edgeD <- fread(paste(outputDir,"KeptFeatures_",edgeDname,".csv",sep=""))
}
if(ncol(edgeD)>2000){
if(splitedgeD==FALSE){
cat('For computational purposes a split will be performed\n')
splitedgeD<-TRUE
split_number<-round(ncol(edgeD)*0.02)
}
if(split_number > ncol(edgeD)){
if(verbose){print("Cannot split edgeD by the number provided, it exceeds edgeD dimensions")}
split_number<-round(ncol(edgeD)*0.02)
if(split_number<1){
print("Cannot split less than 2")
splitedgeD<-TRUE
split_number<-round(ncol(edgeD)*0.02)
}
}
}
#Split Data Frame
if(splitedgeD){
ls_dfs<-split_df(edgeD,split_number)
print(paste("Edge Data was split into",length(ls_dfs),"parts",sep=" "))
for (i in iteration_start:length(ls_dfs)){
df_iter<-ls_dfs[[i]]
#Convert to data.frames
df_iter<-as.data.frame(df_iter)
normvertexD_Tablesignificant<-as.data.frame(normvertexD_Tablesignificant)
#Register parallel backend
chk <- Sys.getenv("_R_CHECK_LIMIT_CORES_", "")
if (nzchar(chk) && chk == "TRUE") {
# use 2 cores in CRAN/Travis/AppVeyor
numCores <- 2
if(verbose){cat("Running parallelization with ",numCores," cores\n",sep="")}
}else if(is.null(numCores)){
numCores<-detectCores()-2
if(verbose){cat("Running parallelization with ",numCores," cores\n",sep="")}
}else{
if(verbose){cat("Running parallelization with ",numCores," cores\n",sep="")}
}
if(verbose){print(paste('Running CoNI Split Number',i,sep=" "))}
cl<-makeCluster(numCores,setup_timeout = 0.5)
#registerDoSNOW(cl)
registerDoParallel(cl)
#Set progress bar
# pb<-tkProgressBar(max=ncol(edgeD))
#Run operations in parallel
df_results = foreach(j = 1:ncol(df_iter), .packages = c("ppcor", "doParallel","cocor") , .combine=rbind,.inorder = FALSE) %dopar% { #Loop table significant metabolites %dopar% .options.snow=taskBar1(pb,ncol(edgeD))
results2 =foreach(i = 1:nrow(normvertexD_Tablesignificant),.packages = c("ppcor", "doParallel","cocor"), .combine=rbind,.inorder = FALSE) %dopar% { #Loop genes
index1<-normvertexD_Tablesignificant[i,6]#Index column of first vertex feature (e.g. metabolite)
index2<-normvertexD_Tablesignificant[i,7]#Index column of second vertex feature (e.g. metabolite)
#Get vertex features names and edge feature name (e.g. names for metabolites and gene)
Feature_1_vertexD<-normvertexD_Tablesignificant[i,1]
Feature_2_vertexD<-normvertexD_Tablesignificant[i,2]
Feature_edgeD<-colnames(df_iter)[j]
#Get correlation between vertex features (e.g. metabolites)
cor_coefficient<-normvertexD_Tablesignificant[i,3]
cor_pvalue<-normvertexD_Tablesignificant[i,4]
#Calculate partial correlation between vertex features partialling out edge feature (e.g. metabolites and gene)
pcor_result<-pcor.test(vertexD[,index1],vertexD[,index2],df_iter[,j],method="p")
pcor_pvalue<-pcor_result[[2]]
pcor_coefficient<-pcor_result[[1]]
#Sometimes the computer is not precise in float representation...
#For numbers very close to 1 and -1 it is problematic
if(pcor_coefficient > 1){
pcor_coefficient<-0.999
}else if(pcor_coefficient < -1){
pcor_coefficient<- -0.999
}
#Correlation vertex feature and edge feature (e.g metabolites vs gene)
cor_m1_vs_g <- cor(vertexD[,index1],df_iter[,j])
cor_m2_vs_g <- cor(vertexD[,index2],df_iter[,j])
#Steiger test
cdgo <- cocor.dep.groups.overlap(r.jk=cor_coefficient[[1]], r.jh=pcor_coefficient, r.kh=cor_m1_vs_g[1], n=nrow(vertexD),
alternative="two.sided", alpha=0.05, conf.level=0.95, null.value=0, test='steiger1980')
cdgo2 <- cocor.dep.groups.overlap(r.jk=cor_coefficient[[1]], r.jh=pcor_coefficient, r.kh=cor_m2_vs_g[1], n=nrow(vertexD),
alternative="two.sided", alpha=0.05, conf.level=0.95, null.value=0, test='steiger1980')
cdgo_pvalue <- cdgo@steiger1980$p.value
cdgo2_pvalue<- cdgo2@steiger1980$p.value
#vertex Feature 1 and edge Feature partialling out vertex Feature 2 (e.g. Metabolite 1 and Gene | Metabolite 2)
pcor_res_jh_k <- tryCatch({pcor.test(vertexD[,index1],df_iter[,j],vertexD[,index2],method="p")},
error=function(cond) {
message('Partial correlation LF1_edge|LF2 failed')
message("Here's the original error message:")
message(cond)
# Choose a return value in case of error
return('NA')
}
)
if(is.na(pcor_res_jh_k[[1]])){
pcor_res_jh_kCoef<-"NA"
pcor_res_jh_kpval<-"NA"
}else{
pcor_res_jh_kCoef<-pcor_res_jh_k[[1]]
pcor_res_jh_kpval<-pcor_res_jh_k[[2]]
}
#vertex Feature 2 and edge Feature partialling out vertex Feature 1 (e.g. Metabolite 2 and Gene | Metabolite 1)
pcor_res_kh_j<-tryCatch({pcor.test(vertexD[,index2],df_iter[,j],vertexD[,index1],method="p")},
error=function(cond) {
message('Partial correlation LF2_edge|LF1 failed')
message("Here's the original error message:")
message(cond)
# Choose a return value in case of error
return('NA')
}
)
if(is.na(pcor_res_kh_j[[1]])){
pcor_res_kh_jCoef<-"NA"
pcor_res_kh_jpval<-"NA"
}else{
pcor_res_kh_jCoef<-pcor_res_kh_j[[1]]
pcor_res_kh_jpval<-pcor_res_kh_j[[2]]
}
rowtoprint<-list(Feature_1_vertexD,Feature_2_vertexD,Feature_edgeD,
pcor_coefficient,pcor_pvalue,cor_coefficient,
cor_pvalue,cdgo_pvalue,cdgo2_pvalue,
pcor_res_jh_kCoef,pcor_res_jh_kpval,
pcor_res_kh_jCoef,pcor_res_kh_jpval)
}
}
# close(pb)
stopCluster(cl)
df_results<-as.data.frame(df_results)
#Set column names
colnames(df_results)<-c("Feature_1_vertexD", "Feature_2_vertexD", "Feature_edgeD", "pcor_coefficient",
"pcor_pvalue", "cor_coefficient", "cor_pvalue", "cdgo_pvalue", "cdgo2_pvalue",
"pcor_LF1_edge__LF2", "pcor_pvalue_LF1_edge__LF2",
"pcor_LF2_edge__LF1", "pcor_pvalue_LF2_edge__LF1")
df_results<-as.matrix(df_results)
#oldw <- getOption("warn")
#options(warn = -1)
#Save result to memory
writeT<-writeTable(df_results,num_cores = numCores,outputDir = outputDir,iteration = i) #Try to write using fwrite
if(!length(writeT)==0){write.csv(df_results,paste(outputDir,"CoNIOutputSplit",i,".csv",sep=""))}#If fwrite fails it is written with write.csv
#options(warn = oldw)
#Remove results
rm(df_results)
#Print times
iteration_endtime <- Sys.time()
if(i==iteration_start){
iteration_time<-difftime(iteration_endtime,start_time,units='mins')
if(verbose){cat("Iteration time:",iteration_time,"minutes","\n",sep=" ")}
iteration_time_between<-iteration_endtime
}else{
iteration_time<-difftime(iteration_endtime,iteration_time_between,units='mins')
if(verbose){cat("Iteration time:",iteration_time,"minutes","\n",sep=" ")}
iteration_time_between<-iteration_endtime
}
}
#Merge output results CoNI
CoNIOutput <- merge_outpuSplitFiles(outputDir)
#################
#This step might be problematic with very big data, enough RAM is needed to avoid errors
#Add adjusted steiger pvalue
CoNIOutput$cdgo_adjusted<-p.adjust(CoNIOutput$cdgo_pvalue)
CoNIOutput$cdgo2_adjusted<-p.adjust(CoNIOutput$cdgo2_pvalue)
CoNIOutput<-as.data.frame(CoNIOutput)
if(!more_coef){
CoNIOutput<-CoNIOutput[,c(1:9,14:length(CoNIOutput))]
}
#Save raw results
if(saveFiles & saveRaw){
suppressMessages(fwrite(CoNIOutput, paste(outputDir,outputNameRaw,"_Raw",".gz",sep=""),nThread=numCores))
}
#Keep only significant results
if(onlySgRes && multipleTAdj){ #adjustment for multiple testing
CoNIOutput<-CoNIOutput %>% filter(cor_pvalue<=0.05) %>% filter(.data$cdgo_adjusted<=0.05 & .data$cdgo2_adjusted<=0.05)
}else if(onlySgRes){ #without adjustment for multiple testing
CoNIOutput<-CoNIOutput %>% filter(cor_pvalue<=0.05) %>% filter(.data$cdgo_pvalue<=0.05 & .data$cdgo2_pvalue<=0.05)
}
#Delete intermediary files
delIntFiles(delIntermediaryFiles,outputDir)
#Output processing time
end_time <- Sys.time()
total_time<-difftime(end_time,start_time,units='hours')
cat(total_time,"hours", "\n",sep=" ")
print('CoNI ran successfully')
return(CoNIOutput)
}else{
print('Split was set to FALSE')
splitedgeD<-FALSE
}
if (splitedgeD==FALSE){
#Register parallel backend
chk <- Sys.getenv("_R_CHECK_LIMIT_CORES_", "")
if (nzchar(chk) && chk == "TRUE") {
# use 2 cores in CRAN/Travis/AppVeyor
numCores <- 2
if(verbose){cat("Running parallelization with ",numCores," cores\n",sep="")}
}else if(is.null(numCores)){
numCores<-detectCores()-2
if(verbose){cat("Running parallelization with ",numCores," cores\n",sep="")}
}else{
if(verbose){cat("Running parallelization with ",numCores," cores\n",sep="")}
}
cl<-makeCluster(numCores)
#registerDoSNOW(cl)
registerDoParallel(cl)
print('Running CoNI...')
#Set progress bar
# pb<-tkProgressBar(max=ncol(edgeD))
#Convert to data.frames
edgeD<-as.data.frame(edgeD)
normvertexD_Tablesignificant<-as.data.frame(normvertexD_Tablesignificant)
df_results = foreach(j = 1:ncol(edgeD),.packages = c("ppcor", "doParallel","cocor"), .combine=rbind,.inorder = FALSE) %dopar% {#Loop genes .options.snow=taskBar1(pb,ncol(edgeD))
results2 = foreach(i = 1:nrow(normvertexD_Tablesignificant),.packages = c("ppcor", "doParallel","cocor") ,.combine=rbind,.inorder = FALSE) %dopar% {#Loop table significant metabolites
index1<-normvertexD_Tablesignificant[i,6]#Index column of first metabolite
index2<-normvertexD_Tablesignificant[i,7]#Index column of second metabolite
#Get vertex features names and edge feature name (e.g. names for metabolites and gene)
Feature_1_vertexD<-normvertexD_Tablesignificant[i,1]
Feature_2_vertexD<-normvertexD_Tablesignificant[i,2]
Feature_edgeD<-colnames(edgeD)[j]
#Get correlation between vertex features (e.g. metabolites)
cor_coefficient<-normvertexD_Tablesignificant[i,3]
cor_pvalue<-normvertexD_Tablesignificant[i,4]
#############################
#Calculate partial correlation between vertex features partialing out edge feature (e.g. metabolites and gene)
pcor_result<-pcor.test(vertexD[,index1],vertexD[,index2],edgeD[,j],method="p")
pcor_pvalue<-pcor_result[[2]]
pcor_coefficient<-pcor_result[[1]]
#Sometimes the computer is not precise in float representation...
#For numbers very close to 1 and -1 it is problematic
if(pcor_coefficient > 1){
pcor_coefficient<-0.999
}else if(pcor_coefficient < -1){
pcor_coefficient<- -0.999
}
#Correlation vertex feature and edge feature (e.g metabolites vs gene)
cor_m1_vs_g <- cor(vertexD[,index1],edgeD[,j])
cor_m2_vs_g <- cor(vertexD[,index2],edgeD[,j])
#Test if partial correlation coefficient differs from correlation coefficient
#j=vertex feature 1 (e.g. metabolite1)
#k=vertex feature 1 (e.g.metabolite2)
#h=edge feature (e.g. gene)
#Steiger test
cdgo <- cocor.dep.groups.overlap(r.jk=cor_coefficient[[1]], r.jh=pcor_coefficient, r.kh=cor_m1_vs_g[1], n=nrow(vertexD),
alternative="two.sided", alpha=0.05, conf.level=0.95, null.value=0, test='steiger1980')
cdgo2 <- cocor.dep.groups.overlap(r.jk=cor_coefficient[[1]], r.jh=pcor_coefficient, r.kh=cor_m2_vs_g[1], n=nrow(vertexD),
alternative="two.sided", alpha=0.05, conf.level=0.95, null.value=0, test='steiger1980')
cdgo_pvalue <- cdgo@steiger1980$p.value
cdgo2_pvalue<- cdgo2@steiger1980$p.value
#vertex Feature 1 and edge Feature partialling out vertex Feature 2 (e.g. Metabolite 1 and Gene | Metabolite 2)
pcor_res_jh_k <- tryCatch({pcor.test(vertexD[,index1],edgeD[,j],vertexD[,index2],method="p")},
error=function(cond) {
message('Partial correlation LF1_edge|LF2 failed')
message("Here's the original error message:")
message(cond)
# Choose a return value in case of error
return('NA')
}
)
if(is.na(pcor_res_jh_k[[1]])){
pcor_res_jh_kCoef<-"NA"
pcor_res_jh_kpval<-"NA"
}else{
pcor_res_jh_kCoef<-pcor_res_jh_k[[1]]
pcor_res_jh_kpval<-pcor_res_jh_k[[2]]
}
#vertex Feature 2 and edge Feature partialling out vertex Feature 1 (e.g. Metabolite 2 and Gene | Metabolite 1)
pcor_res_kh_j<-tryCatch({pcor.test(vertexD[,index2],edgeD[,j],vertexD[,index1],method="p")},
error=function(cond) {
message('Partial correlation LF2_edge|LF1 failed')
message("Here's the original error message:")
message(cond)
# Choose a return value in case of error
return('NA')
}
)
if(is.na(pcor_res_kh_j[[1]])){
pcor_res_kh_jCoef<-"NA"
pcor_res_kh_jpval<-"NA"
}else{
pcor_res_kh_jCoef<-pcor_res_kh_j[[1]]
pcor_res_kh_jpval<-pcor_res_kh_j[[2]]
}
rowtoprint<-list(Feature_1_vertexD,Feature_2_vertexD,Feature_edgeD,
pcor_coefficient,pcor_pvalue,cor_coefficient,
cor_pvalue,cdgo_pvalue,cdgo2_pvalue,
pcor_res_jh_kCoef,pcor_res_jh_kpval,
pcor_res_kh_jCoef,pcor_res_kh_jpval)
}
}
# close(pb)
stopCluster(cl)
#Remove weird format
df_results<-as.data.frame(df_results) #create data frame
#Remove nested list format
results<-list()
for(i in 1:ncol(df_results)){#loop columns
resultC<-unlist(df_results[i]) #unlist column
results[[i]]<-unname(resultC) #save result to list
}
df_results<-as.data.frame(do.call(cbind,results)) #cbind list to create data frame
# df_results<-sapply(df_results[,1:ncol(df_results)],function(x){
# x<-unlist(x)})
# df_results<-as.data.frame(df_results)
#Set numeric columns as numeric
df_results[,4:13]<-sapply(df_results[, 4:13], function(x){
as.numeric(as.character(x))
})
#Set column names
colnames(df_results)<-c("Feature_1_vertexD", "Feature_2_vertexD", "Feature_edgeD", "pcor_coefficient",
"pcor_pvalue", "cor_coefficient", "cor_pvalue", "cdgo_pvalue", "cdgo2_pvalue",
"pcor_LF1_edge__LF2", "pcor_pvalue_LF1_edge__LF2",
"pcor_LF2_edge__LF1", "pcor_pvalue_LF2_edge__LF1")
#Add adjusted steiger pvalue
df_results$cdgo_adjusted<-p.adjust(df_results$cdgo_pvalue)
df_results$cdgo2_adjusted<-p.adjust(df_results$cdgo2_pvalue)
#Save result to memory
if(saveFiles & saveRaw){
df_results_raw<-as.matrix(df_results)
suppressMessages(fwrite(df_results_raw, paste(outputDir,outputNameRaw,"_Raw",".gz",sep=""),nThread=numCores,quote = TRUE))
}
if(!more_coef){
df_results<-df_results[,c(1:9,14:length(df_results))]
}
#Filter significance
if(onlySgRes){
df_results<-df_results %>% filter(cor_pvalue<=0.05) %>% filter(.data$cdgo_adjusted<=0.05 & .data$cdgo2_adjusted<=0.05)
}
#Output processing time
end_time <- Sys.time()
total_time<-difftime(end_time,start_time,units='hours')
cat(total_time,"hours", "\n",sep=" ")
print('CoNI ran successfully')
return(df_results)
}
}
#' Check input parameters
#' @description Internal use. Function to check if input parameters are of the right class
#' @keywords internal
#' @importFrom methods is
#' @return No return value, called for side effects
checkInputParameters<-function(ParaList){
#Functions used
matchLs<-function(L1,L2){
Idx<-match(L1,L2)
IdxOut<-Idx[!is.na(Idx)]
IdxOut
}
#Check path
ParamPathName<-c("outputDir")
ParamPathL<-ParaList[matchLs(ParamPathName,names(ParaList))]
if(length(ParamPathL)>0){
param<-eval(ParamPathL[[1]])
if(param=="./"){
print("Output in working directory")
}else{
LPathParts<-strsplit(param,split = "/")[[1]]
LPathParts<-LPathParts[-length(LPathParts)]
Path<-paste(LPathParts,collapse="/")
if(!dir.exists(Path)){
stop("Path provided for the new directoy does not exist")
}
}
}
#Check parameters that should be characters
ParamChNames<- c("outputName","edgeDname","vertexDname")
ChParaList<-ParaList[matchLs(ParamChNames,names(ParaList))] #Obtain the parameter values given by the user
ChNameList <-ParamChNames[matchLs(names(ParaList),ParamChNames)] #and names
if(length(ChParaList)>0){
for(i in 1:length(ChParaList)){#Loop parameters to make sure they are of class character
param<-eval(ChParaList[[i]])
if (!is(param, "character")) {
stop(paste0("Wrong class for input '",ChNameList[i],"'. Character is expected and ",class(param)," was given"))
}
}
}
#Check parameters that should be nummeric
ParamNumNames<- c("split_number","numCores")
NumParaList<-ParaList[matchLs(ParamNumNames,names(ParaList))]
NumNameList <-ParamNumNames[matchLs(names(ParaList),ParamNumNames)]
if(length(NumParaList)>0){
for(i in 1:length(NumParaList)){#Loop parameters to make sure they are of class numeric
param<-eval(NumParaList[[i]])
if(is.null(param) && NumNameList[i] == "numCores"){
next
}else if(is.null(param)){
stop(paste0("Input ",NumNameList[i]," cannot be null"))
}
if (!is(param, "numeric")) {
stop(paste0("Wrong class for input '",NumNameList[i],"'. Numeric is expected and ",class(param)," was given"))
}
if(param<0){
stop(paste0("Input '",NumNameList[i],"' is not a positive integer"))
}
}
}
#Check parameters that should be logical
ParamLogNames<- c("padjustvertexD","onlySgRes","correlateDFs","splitedgeD",
"delPrevious","delIntermediaryFiles","verbose",
"filter_highVarianceEdge","more_coef","saveRaw")
LogParaList<-ParaList[matchLs(ParamLogNames,names(ParaList))]
LogNameList <-ParamLogNames[matchLs(names(ParaList),ParamLogNames)]
if(length(LogParaList)>0){
for(i in 1:length(LogParaList)){#Loop parameters to make sure they are of class logical
param<-eval(LogParaList[[i]])
if (!is(param, "logical")) {
stop(paste0("Wrong class for input '",LogNameList[i],"'. Logical is expected and ",class(param)," was given"))
}
}
}
}
#' Write table
#' @description Internal use. This function tries to write a table with fread, if it fails it returns "NA"
#' @keywords internal
#' @importFrom data.table fwrite
#' @return Returns NA if it fails to write file to memory, otherwise no return value
writeTable <- function(results_write,num_cores,outputDir,iteration) {
out <- tryCatch(
{
suppressMessages(fwrite(results_write, paste(outputDir,"CoNIOutputSplit",iteration,".csv",sep=""),nThread=num_cores))
},
error=function(cond) {
message('fwrite failed')
message("Here's the original error message:")
message(cond)
# Choose a return value in case of error
return('NA')
}
)
return(out)
}
#' Check previous files
#' @description Internal use. This function checks previous files and deletes according to the User input option
#' Requires some modifications...
#' @keywords internal
#' @return No return value, called for side effects
check_previous<-function(del,iteration,outDir,verb){
if(del){
filesDel<-unique(c(list.files(outDir,pattern = "^CoNIOutput"),list.files(outDir,pattern = "^.*_Raw")))
if(length(filesDel)>0){
sapply(filesDel, function(fi){file.remove(paste0(outDir,fi))})
}
filesDel2<-list.files(outDir,pattern = "KeptFeatures_")
if(length(filesDel2)>0){
sapply(filesDel2, function(fi){file.remove(paste0(outDir,fi))})
}
}else if(iteration>1){
#check if there are files... If there are none... Stop
if(verb){
cat("Previous files are present\n")
cat("Starting from iteration ",iteration,"\n",sep="")
}
}else{
filesDel<-list.files(outDir,pattern = "CoNIOutputSplit")
if(length(filesDel)>0){
sapply(filesDel, function(fi){file.remove(paste0(outDir,fi))})
}
}
}
#' Delete intermediary files
#' @description Internal use. This function deletes intermediary files.
#' @keywords internal
#' @return No return value, called for side effects
delIntFiles<-function(del,outDir){
if(del){
filesDel<-list.files(outDir,pattern = "CoNIOutputSplit")
sapply(filesDel, function(fi){file.remove(paste0(outDir,fi))})
}
}
#' Check if files exist
#' @description Internal use. This function tests if the input files exist, if they do not it will output an error and
#' end CoNI
#' @keywords internal
#' @return No return value, called for side effects
do_objectsExist<-function(gene_exp,norm_metabo_dat,verb){
if(missing(gene_exp) | missing(norm_metabo_dat)){
message("Input objects are missing")
stop("CoNI end")
}else{
if(verb){
print("Input objects are defined")
}
}
}
#' Split dataset
#' @description Internal use. This function wills split the 'big' omics data into smaller data frames to improve computations, to avoid running out of memory.
#' @keywords internal
#' @return A list of data.frames
split_df<-function(AbundantDF,numberSplitDF_2=2){
dt_list<-list()
if(ncol(AbundantDF) %% numberSplitDF_2 !=0){
SplitFirstParts<-floor(ncol(AbundantDF)/numberSplitDF_2)
start<-1 #Start to slice is position 1 of data frame
end<-SplitFirstParts #
i=1
while(end <= SplitFirstParts*numberSplitDF_2){
dt_list[[i]]<-AbundantDF[,start:end]
start<-start+SplitFirstParts
end<-start+SplitFirstParts-1
i<-i+1
}
start<-SplitFirstParts*numberSplitDF_2+1
dt_list[[i]]<-AbundantDF[,start:ncol(AbundantDF),drop=FALSE] #Avoid losing column name when is a single column that is left
}else{
split_size<-ncol(AbundantDF)/numberSplitDF_2
start<-1 #Start to slice is position 1 of data frame
end<-split_size
i=1
while(end <= ncol(AbundantDF)){
dt_list[[i]]<-AbundantDF[,start:end]
start<-start+split_size
end<-start+split_size-1
i<-i+1
}
}
dt_list
}
#' Compare sample names
#' @description Internal use. This function compares the sample names between the two datasets provided. If names do not match CoNI stops and outputs an error message.
#' @keywords internal
#' @return No return value, called for side effects
compare_sampleNames<-function(df1,df2){
Rowsdf1<-rownames(df1)[order(rownames(df1))]
Rowsdf2<-rownames(df2)[order(rownames(df2))]
if(!identical(Rowsdf1,Rowsdf2)){
print('Sample names between datasets do not match')
print('Make sure omics data comes from the same samples and that sample names are consistent across datasets')
stop('CoNI end')
}else{
print('Samples match')
}
}
#' Flatten
#' @description Internal use. This function gets the upper part of the matrix after calculating the correlation coefficients between all pairs of elements
#' cormat : matrix of the correlation coefficients
#' pmat : matrix of the correlation p-values
#' @keywords internal
#' @return A data.frame with all pairwise correlations of the input elements and their respective p-values
flattenCorrMatrix <- function(cormat, pmat) {
ut <- upper.tri(cormat)
data.frame(
row = rownames(cormat)[row(cormat)[ut]], #Get rownames
column = rownames(cormat)[col(cormat)[ut]], #Get colnames
cor =(cormat)[ut], #Get correlation coefficients
p = pmat[ut] #Get p values
)
}
#' Pairwise correlations
#' @description Internal use. This function calculates the pairwise correlations of a matrix (it uses Hmisc::rcorr function) and gets the significant correlations
#' @keywords internal
#' @importFrom Hmisc rcorr
#' @return A data.frame with the significant correlations of a correlation matrix
sig_correlation2<-function(input_edgeD,padj=TRUE,method="BH", verb){
corr<-rcorr(as.matrix(input_edgeD),type='p')
corr_table<-flattenCorrMatrix(corr$r,corr$P)
corr_table$adj.p<-p.adjust(corr_table$p,method = method)
if(padj){
corr_tableSig <- corr_table %>% filter(.data$adj.p<0.05)
if(nrow(corr_tableSig) == 0){
print('No features significantly correlate after padjustment for vertexD')
print('Using non adjusted pvalues')
corr_tableSig<-corr_table %>% filter(.data$p<0.05)}
}else{
print("Ajustment for multiple testing was set to FALSE for correlations in vertex Data")
corr_tableSig<-corr_table %>% filter(.data$p<0.05)
}
if(verb){print(paste('Significant correlations',nrow(corr_tableSig),sep=" "))}
corr_tableSig
}
#' Significant correlations 2 Df
#' @description Internal use. This function input are two data frames (e.g. metabolites and genes). It calculates the correlation matrix and creates a table with only significant pairs. No correction for multiple testing is done
#' @keywords internal
#' @importFrom stats pt
#' @return A data.frame with vertex-edge significant correlation coefficients and their p-values
sig_correlation2Dfs<-function(metabolite_data,gene_expression){
n <- t(!is.na(metabolite_data)) %*% (!is.na(gene_expression)) # same as count.pairwise(x,y) from psych package/ Matches number of samples
r <- cor(metabolite_data, gene_expression, use = "pairwise.complete.obs") # MUCH MUCH faster than corr.test()
cor2pvalue = function(r, n) {
t <- (r*sqrt(n-2))/sqrt(1-r^2)
p <- 2*(1 - pt(abs(t),(n-2)))
se <- sqrt((1-r*r)/(n-2))
out <- list(r, n, t, p, se)
names(out) <- c("r", "n", "t", "p", "se")
return(out)
}
# Get a list with matrices of correlation, pvalues, standard error, etc.
result = cor2pvalue(r,n)
rcoeffMatrix<-result$r
pvalueMatrix<-result$p
rows<-rownames(rcoeffMatrix)
cols<-colnames(pvalueMatrix)
df <- data.frame(metabolite=character(),
gene=character(),
cor=double(),
pvalue=double(),
stringsAsFactors=FALSE)
#This part is slow... needs to be improved
for(i in rows){
for(j in cols){
if (pvalueMatrix[i,j]>0.05){
next
}else{
cor<-rcoeffMatrix[i,j]
pvalue<-pvalueMatrix[i,j]
df<- df %>% add_row(metabolite = i, gene = j, cor = cor, pvalue = pvalue) #This part might be inefficient and slow
}
}
}
df
}
#' Low variance features
#' @description Internal use. Get low variance features.
#' @import genefilter
#' @importFrom genefilter rowVars
#' @keywords internal
#' @return A data.frame where low variance features were removed
get_lowvarFeatures<-function(df){
Var<-NULL
df<-as.data.frame(t(df))
df$Var<-genefilter::rowVars(as.matrix(df))
df<-subset(df,Var<0.5)
df<-df[,-ncol(df)] #get rid of the column with the variances
df<-as.data.frame(t(df))
}
#' Merge Files.
#' @description Internal use. This function reads the output split files generated by CoNI and generates a single result object. It is slow. Probably there are other faster alternatives.
#' @keywords internal
#' @importFrom data.table fread
#' @return A single data.table with the results of CoNI
merge_outpuSplitFiles<-function(outputDir){
#outputDir<-gsub('\\.','',outputDir)
#outputDir<-gsub('\\/','',outputDir)
file_list <- list.files(outputDir)
file_list<-file_list[grep("CoNIOutputSplit",file_list)]
for (file in file_list){
# if the merged dataset doesn't exist, create it
if (!exists("datasetResultsCoNI")){
datasetResultsCoNI <- fread(paste(outputDir,file,sep=""), header=TRUE, sep=",")
}else{
temp_dataset <- fread(paste(outputDir,file,sep=""), header=TRUE, sep=",")
datasetResultsCoNI<-rbind(datasetResultsCoNI, temp_dataset)
rm(temp_dataset)
}
}
datasetResultsCoNI
}
#' Output directory
#' @description Internal use. This function checks if the output directory exists and if it does not, it will create it
#' @keywords internal
#' @return No return value, called for side effects
check_outputDir<-function(outputDir,verb){
if (file.exists(paste(outputDir,sep=""))) {
if(verb){print("Output directory exists")}
} else {
print("Output directory does not exist - creating directory ... ")
dir.create(file.path(paste(outputDir,sep="")))
}
}
#' Create network
#' @description This function creates a network using as input the output of CoNI and a table specifying the colors for the nodes.
#' @param ResultsCoNI The input of the function are the results of CoNI.
#' @param colorVertexNetwork logical. If TRUE, the table colorVertexTable has to be provided to specify vertex colors
#' @param colorVertexTable Table specifying the colors for the nodes (vertex features). The first column should contain the names matching the features of the vertex Data and the colors or other data can be specified in the rest of the columns
#' @param outputDir Output directory where the network is saved as well as the file that was used to generate the network.
#' @param outputFileName The name of the file used to create the network.
#' @param Class Optional data frame with at least two columns, first column contains all vertex features and another column the vertex feature class (column named "Class"). Necessary for treatment comparisons based on class
#' @param saveFiles logical. If FALSE TableForNetwork_`outputFileName`.csv and Network_`outputFileName`.graphml are not saved to disk
#' @importFrom plyr ddply
#' @importFrom igraph graph_from_data_frame simplify V E degree hub_score transitivity closeness betweenness eigen_centrality centr_betw centr_clo centr_degree edge_betweenness write_graph
#' @return Returns an igraph object (network) constructed from ResultsCoNI. The network includes the following network statistics
#' \itemize{
##' \item{"degree"}{The number of the vertex adjacent edges}
##' \item{"hub_score"}{The principal eigenvector of A*t(A), where A is the adjacency matrix of the graph}
##' \item{"transitivity"}{Probability that the adjacent vertices of a vertex are connected}
##' \item{"closeness"}{Steps required to access every other vertex from a given vertex}
##' \item{"betweenness"}{(roughly) The number of geodesics (shortest paths) going through a vertex or an edge}
##' \item{"eigen_centrality"}{Takes a graph (graph) and returns the eigenvector centralities of positions v within it}
##' \item{"centralized_betweenness"}{The vertice-level centrality score according to the betweenness of vertices}
##' \item{"centralized_closeness"}{The vertice-level centrality score according to the closeness of vertices}
##' \item{"centralized_degree"}{The vertice-level centrality score according to the degrees of vertices}
##' }
##' For more details see igraph package
#' @examples
#' #Generate Network
#'
#' #Load color nodes table
#' data(MetColorTable)
#' #Assign colors according to "Class" column
#' MetColorTable<-assign_colorsAnnotation(MetColorTable)
#' #Load CoNI results
#' data(CoNIResultsHFDToy)
#'
#' #Generate Network
#' HFDNetwork<-generate_network(ResultsCoNI = CoNIResultsHFDToy,
#' colorVertexNetwork = TRUE,
#' colorVertexTable = MetColorTable,
#' outputDir = "./",
#' outputFileName = "HFD",
#' saveFiles = FALSE)
#' @export
generate_network<-function(ResultsCoNI,
colorVertexNetwork = TRUE,
colorVertexTable,
outputDir = "./", outputFileName="ResultsCoNI",
Class=NULL,
saveFiles = TRUE){
Feature_edgeD <-pcor_coefficient <- cor_coefficient <- NULL
pcor_LF1_edge__LF2 <-pcor_pvalue_LF1_edge__LF2 <-pcor_LF2_edge__LF1 <- pcor_pvalue_LF2_edge__LF1 <- NULL
saveNetwork=TRUE
#results_SteigerAdjust<-ResultsCoNI
if(ncol(ResultsCoNI)>11){
results_SteigerAdjust <- ResultsCoNI[,c(1:7,10:13)]
}else{
results_SteigerAdjust <- ResultsCoNI[,c(1:7)]
}
#Get pair metabolites, gene and pcor and cor information... change to add more information
#Summarize results for network construction
df<-ddply(results_SteigerAdjust,c(1,2),plyr::summarize,
weightreal=length(Feature_edgeD),
Genes=paste0(unique(Feature_edgeD),collapse=";"),
#ActualGeneNames=paste0(unique(ActualGeneName),collapse=";"),
PcorValues=paste0(pcor_coefficient,collapse=";"),
CorValues=paste0(cor_coefficient,collapse=";"),
PcorAverage=mean(pcor_coefficient),
CorAverage=mean(cor_coefficient)
)
if(ncol(ResultsCoNI)>11){
df_2<-ddply(results_SteigerAdjust,c(1,2),plyr::summarize,
PcorLink1edge=paste0(pcor_LF1_edge__LF2,collapse=";"),
PcorLink1edge_pvalue=paste0(pcor_pvalue_LF1_edge__LF2,collapse=";"),
PcorLink2edge=paste0(pcor_LF2_edge__LF1,collapse=";"),
PcorLink2edge_pvalue=paste0(pcor_pvalue_LF2_edge__LF1,collapse=";"))
df<-cbind(df,df_2[,3:ncol(df_2)])
}
colnames(df)[1:2] <- c("from","to")
clinksd <- df
clinksd$type <- "hyperlink"
clinksd$weight <- clinksd$weightreal/max(clinksd$weightreal) #Calculate a width based on the maximum number of genes per connection
#Save table
if(!is.null(Class)){
if(ncol(Class)<2){
stop("The 'Class' data frame provided is not correct")
}else if(!any(grepl("Class$",colnames(Class),ignore.case=TRUE))){
stop("The 'Class' data frame does not contain a 'Class' column")
}
idxClass<-grep("Class$",colnames(Class),ignore.case = TRUE)
clinksd$Vertex1_Class<-Class[match(clinksd$from,make.names(Class[,1])),idxClass] #make.names is necessary as the results files returns make.names feature names
clinksd$Vertex2_Class<-Class[match(clinksd$to,make.names(Class[,1])),idxClass]
}
if(saveFiles){
write.csv(clinksd,paste(outputDir,"TableForNetwork_",outputFileName,".csv",sep=""),row.names=FALSE)
}
cnodes <- data.frame("Name"=unique(c(as.character(df$from),as.character(df$to))),stringsAsFactors=FALSE)#Get the nodes (metabolites)
if(colorVertexNetwork){
#Assign colors to nodes
m <- merge(cnodes,colorVertexTable,by.x="Name",by.y=colnames(colorVertexTable)[1],all.x=TRUE)
cnodesd <- m
}else{
cnodesd <- cnodes
}
#Change column names
#colnames(clinksd)[10] <- "weight"
#Create graph
netd <- igraph::graph_from_data_frame(d=clinksd, vertices=cnodesd, directed=FALSE)
netd_simple <- igraph::simplify(netd,remove.multiple=FALSE)
#Add network statistics
igraph::V(netd_simple)$degree<-degree(netd_simple, mode="all")
igraph::V(netd_simple)$hub_score<-hub_score(netd_simple, weights=NA)$vector
igraph::V(netd_simple)$transitivity<-transitivity(netd_simple, type="local")
igraph::V(netd_simple)$closeness<-closeness(netd_simple, mode="all", weights=NA)
igraph::V(netd_simple)$betweenness<-betweenness(netd_simple, directed=FALSE, weights=NA)
igraph::V(netd_simple)$eigen_centrality<-eigen_centrality(netd_simple, directed=FALSE, weights=NA)$vector
igraph::V(netd_simple)$centralized_betweenness<-centr_betw(netd_simple, directed=FALSE, normalized=TRUE)$res
igraph::V(netd_simple)$centralized_closeness<-centr_clo(netd_simple, mode="all", normalized=TRUE)$res
igraph::V(netd_simple)$centralized_degree<-centr_degree(netd_simple, mode="all", normalized=TRUE)$res
#V(netd_simple)$membership_community_edgeBetweenes<-cluster_edge_betweenness(netd_simple,directed = F)$membership
#Add edge betweeness
igraph::E(netd_simple)$betweeness <- edge_betweenness(netd_simple, directed=FALSE, weights=NA)
if(saveFiles & saveNetwork){
write_graph(netd_simple,file=paste0(outputDir,"Network_",outputFileName,".graphml"),format="graphml")
}
return(netd_simple)
}
#' Find local controlling features
#' @description This function applies for a selected subnetwork a binomial test using the frequency of appearance of an edge feature and the total number of edge features. The probability corresponds to 1/n_df, where n_df corresponds to the total number of edge features in the network.
#' The selected subnetwork corresponds to the second level neighborhood of a specific node. The test is applied to all possible second level neighborhoods in the network.
#' @param ResultsCoNI The output of CoNI (after p-adjustment)
#' @param network Network created with the function generate_network
#' @param padjust logical. Filter output based on adjusted p values
#' @return Returns a data.frame with the results of the binomial tests. Significant results correspond to local controlling features
#' @importFrom igraph V neighbors
#' @importFrom stats dbinom
#' @import dplyr
#' @examples
#' #Load color nodes table
#' data(MetColorTable)
#'
#' #Assign colors according to "Class" column
#' MetColorTable<-assign_colorsAnnotation(MetColorTable)
#'
#' #Load CoNI results
#' data(CoNIResultsHFDToy)
#'
#' #Generate Network
#' #Note: Colors not visible when ploting in Igraph
#' HFDNetwork<-generate_network(ResultsCoNI = CoNIResultsHFDToy,
#' colorVertexNetwork = TRUE,
#' colorVertexTable = MetColorTable,
#' Class = MetColorTable,
#' outputDir = "./",
#' outputFileName = "HFD",
#' saveFiles = FALSE)
#'
#'#Note: For this tiny example nothing is significant
#' LCG_BinomialTestTableHFD<- find_localControllingFeatures(ResultsCoNI = CoNIResultsHFDToy,
#' network = HFDNetwork )
#' LCGenes_HFD<-as.character(unique(LCG_BinomialTestTableHFD$edgeFeatures))
#'
#' @export
find_localControllingFeatures<-function(ResultsCoNI,network,padjust=TRUE){
Feature_1_vertexD <- Feature_2_vertexD <- NULL
ls2 <- length(unique(ResultsCoNI$Feature_edgeD)) #get number of genes affecting metabolites
#Distance = 2 -> Second level neighborhood?
df <- list()
for(i in names(igraph::V(network))){ #loop nodes of graph
l <- igraph::V(network)$name[neighbors(network, i)] #Get first level neighbors of node in iteration
l1 <- list()
for(j in l){ #loop the first neighbors and get their neighbors (Second level neighborhood)
l1[[j]] <- igraph::V(network)$name[neighbors(network, j)]
}
l1 <- unique(unlist(l1)) #Get unique 2nd level neighbors
#Subset the CoNI Results table to include only the second level neighborhood
s <- subset(ResultsCoNI, ((Feature_1_vertexD==i & Feature_2_vertexD %in% l) | (Feature_2_vertexD==i & Feature_1_vertexD %in% l)) |
((Feature_1_vertexD %in% l & Feature_2_vertexD %in% l1) | (Feature_2_vertexD %in% l & Feature_1_vertexD %in% l1)))
#Get the total number of edges in the neighborhood
EdgesNo <- length(unique(paste0(s$Feature_1_vertexD,"_",s$Feature_2_vertexD)))
#Get the unique total number of edge features (e.g., genes) in the neighborhood
DrF_totalNo <- length(unique(s$Feature_edgeD))
#The amount of edge features (e.g., genes) (with repetitions) found in the second level neighborhood
DrF_wRepNo <- nrow(s)
s <- droplevels(s)
#The number of times every edge feature (e.g., genes) appears in the neighborhood. It is a table.
b <- table(s$Feature_edgeD)
TotalNumberGenes<-length(unique(s$Feature_edgeD))
df[[i]] <- data.frame("Node1"=rep(i,DrF_totalNo),"Edges"=rep(EdgesNo,DrF_totalNo),"Draws"=rep(DrF_wRepNo,DrF_totalNo),"GenesInTotal"=rep(DrF_totalNo,DrF_totalNo),as.data.frame(b))
}
#Generate result data frame
res2 <- do.call(rbind.data.frame, df)
#we use the binomial distribution to test if the enrichment is significant as we can draw a gene for an area more often
res2$Pval <- apply(res2,1,function(x){dbinom(as.numeric(x[[6]]),as.numeric(x[[3]]),1/ls2)})
res2$Padj <- p.adjust(res2$Pval)
res2 <- res2[order(res2$Padj),]
res2 <- res2 %>% rename(edgeFeatures = .data$Var1)
if(padjust){
res2 <- subset(res2,res2$Padj<0.05)
}else{
res2 <- subset(res2,res2$Pval<0.05)
}
res2
}
#'Linker Features by magnitude of effect
#'@description This function outputs the linker features with the strongest effect on the correlation of the vertex features
#'@param ResultsCoNI The output of CoNI
#'@param topn Top n number of features to output
#'@return Returns a data.frame, a filtered version of ResultsCoNI, showing the top n features
#'with the strongest effect, that is, the highest difference between the partial correlation and correlation coefficient.
#'@importFrom rlang .data
#'@examples
#' data(CoNIResultsHFDToy)
#' Top10HFD<-top_n_LF_byMagnitude(CoNIResults_HFD,topn = 10)
#'@export
top_n_LF_byMagnitude<-function(ResultsCoNI, topn=10){
ResultsCoNI<-ResultsCoNI %>% mutate(difference=abs(.data$cor_coefficient - .data$pcor_coefficient)) %>% arrange(desc(.data$difference))
lEdgeFeatures<-unique(ResultsCoNI$Feature_edgeD)
if(length(lEdgeFeatures)>=topn){
selectedEdgeFeatures<-lEdgeFeatures[1:topn]
}else{
selectedEdgeFeatures<-lEdgeFeatures[1:length(selectedEdgeFeatures)]
}
Out<-as.data.frame(ResultsCoNI[ResultsCoNI$Feature_edgeD %in% selectedEdgeFeatures,])
return(Out)
}
#' Table local controlling edge features and vertex pairs
#' @description This function creates a table of the local controlling edge features
#' @param CoNIResults The output of CoNI (after p-adjustment)
#' @param LCFs Local controlling edge features as a vector
#' @return A data.frane of local controlling edge features and their respective vertex pairs, and unique vertexes.
#' @examples
#' #Load CoNI results
#' data(CoNIResultsHFDToy)
#' #Note: arbitrary list of genes, not Local controlling features
#' tableLCFs_VFs(CoNIResultsHFDToy, c("Lilr4b","Rps12"))
#' @importFrom plyr ddply
#' @importFrom tidyr unite
#' @export
tableLCFs_VFs<-function(CoNIResults,LCFs){
Feature_1_vertexD<-Feature_2_vertexD<-Feature_edgeD<-MetabolitePair<-NULL
CoNIResults_LCFs<-CoNIResults[CoNIResults$Feature_edgeD %in% LCFs,]
Gene_TableLCFs<- plyr::ddply(CoNIResults_LCFs, plyr::.(Feature_1_vertexD,Feature_2_vertexD), plyr::summarize,
Genes=paste(Feature_edgeD,collapse=","))
#Join Metabolite pairs
CoNIResults_LCFs_MetaboliteJoined<-tidyr::unite(CoNIResults_LCFs,MetabolitePair,Feature_1_vertexD,Feature_2_vertexD,sep="-")
CoNIResults_LCFs_MetaboliteJoined<-CoNIResults_LCFs_MetaboliteJoined[,c(1,2)]
#Chowate table Genes and their corresponding Metabolite pairs
LCFs_and_MPairs <- plyr::ddply(CoNIResults_LCFs_MetaboliteJoined, plyr::.(Feature_edgeD), plyr::summarize,
MetabolitePairs=paste(MetabolitePair,collapse=","))
#Temporary table
temp<-as.data.frame(CoNIResults_LCFs[,c(1:3)])
#Add to the LCFs and Metabolites pairs the unique individual metabolites
LCFs_and_MPairs$Metabolites<-plyr::ddply(temp, plyr::.(Feature_edgeD), plyr::summarize,
Metabolites=paste(unique(c(as.character(Feature_1_vertexD),as.character(Feature_2_vertexD))),collapse=","))[,2]
colnames(LCFs_and_MPairs)<-c("Local Controlling edge Feature","Vertex Feature pairs","Vertex Features")
LCFs_and_MPairs
}
#' Compare triplets
#' @description Compare vertexFeature-vertexFeature-edgeFeature between two treatments, that is, find the shared triplets between two different CoNI runs.
#' @param Treat1_path TableForNetwork_file1 (file generated by CoNI) with path for Treatment 1
#' @param Treat2_path TableForNetwork_file2 (file generated by CoNI) with path for Treatment 2
#' @param OutputName Output file name with path
#' @return A data.frame with the shared triplets (vertex1 vertex2 edge_feature) between two CoNI runs
#' @examples
#' #For an example see the vignette
#' @importFrom utils write.csv
#' @export
Compare_Triplets<-function(Treat1_path,Treat2_path,
OutputName="Shared_Genes_and_Edges_Treat1vsTreat2.csv"){
path_C<-file.path(find.package("CoNI"),"python")
runPython<-tryCatch({system(paste0('python3 ',path_C,'/Compare_Triplets.py ',Treat1_path," ",Treat2_path," ",OutputName))},
error=function(cond) {
# Choose a return value in case of error
return('Error')
}
)
if(runPython==9009 || runPython == 127 || runPython == 2){
runPython<-tryCatch({system(paste0('python ',path_C,'/Compare_Triplets.py ',Treat1_path," ",Treat2_path," ",OutputName))},
error=function(cond) {
# Choose a return value in case of error
return('Error')
}
)
if(runPython==9009 || runPython == 127 || runPython == 2){stop("Make sure python3 is installed and in your path")}
}
# system(paste0('python3 ',path_C,'/Compare_Triplets.py ',Treat1_path," ",Treat2_path," ",OutputName))
Output<-read.csv(OutputName,sep="\t")
return(Output)
}
#' Table VertexClass pairs of shared Edge Features
#' @description Compare VertexClass pairs of the shared Edge Features of two treatments (e.g., lipid-class-pairs per shared gene)
#' @param Treat1_path TableForNetwork_file (file generated by CoNI) with path of Treatment 1
#' @param Treat2_path TableForNetwork_file (file generated by CoNI) with path of Treatment 2
#' @param OutputName Output file name with path
#' @param Treat1Name Name of treatment one, default Treat1
#' @param Treat2Name Name of treatment one, default Treat2
#' @return A data.frame with all possible vertex-class pairs and their numbers per edge-feature and treatment.
#' @examples
#' #For an example see the vignette
#' @importFrom utils read.csv
#' @export
Compare_VertexClasses_sharedEdgeFeatures<-function(Treat1_path,Treat2_path,OutputName="Shared_Genes_and_Edges_Treat1vsTreat2.csv",Treat1Name="Treat1",Treat2Name="Treat2"){
are_ClassColumnsPresent<-function(DFTreatment){
boolVec<-c("Vertex1_Class","Vertex2_Class") %in% colnames(DFTreatment)
if(!all(boolVec)){
stop("Error: Make sure to add Class when you create your networks")
}
}
DFTreat1<-read.csv(Treat1_path,nrows = 2,header=TRUE)
DFTreat2<-read.csv(Treat1_path,nrows = 2,header=TRUE)
are_ClassColumnsPresent(DFTreat1)
are_ClassColumnsPresent(DFTreat2)
path_C<-file.path(find.package("CoNI"),"python")
runPython<-tryCatch({system(paste0('python3 ',path_C,'/ComparisonClasses.py ',Treat1_path," ",Treat2_path," ",OutputName," ",Treat1Name," ",Treat2Name))},
error=function(cond) {
# Choose a return value in case of error
return('Error')
}
)
if(runPython==9009 || runPython == 127 || runPython == 2){
runPython<-tryCatch({system(paste0('python ',path_C,'/ComparisonClasses.py ',Treat1_path," ",Treat2_path," ",OutputName," ",Treat1Name," ",Treat2Name))},
error=function(cond) {
# Choose a return value in case of error
return('Error')
}
)
if(runPython==9009 || runPython == 127 || runPython == 2){stop("Make sure python3 is installed and in your path")}
}
# system(paste0('python3 ',path_C,'/ComparisonClasses.py ',Treat1_path," ",Treat2_path," ",OutputName," ",Treat1Name," ",Treat2Name))
Output<-read.csv(OutputName,sep="\t")
Output[,Treat1Name]<-as.numeric(gsub("Pair Class Missing",0,Output[,Treat1Name]))
Output[,Treat2Name]<-as.numeric(gsub("Pair Class Missing",0,Output[,Treat2Name]))
return(Output)
}
#' Vertex-class pairs profile of one shared edge feature
#' @description This function will create a barplot from the output of Compare_VertexClasses_sharedEdgeFeatures for a specific shared Edge Feature (e.g., a shared gene).
#' @param CompTreatTable Output of Compare_VertexClasses_sharedEdgeFeatures
#' @param edgeF Edge feature present in output of Compare_VertexClasses_sharedEdgeFeatures
#' @param treat1 Name of treatment one, default Treatment1. It should match the column names of the output of Compare_VertexClasses_sharedEdgeFeatures
#' @param treat2 Name of treatment one, default Treatment2. It should match the column names of the output of Compare_VertexClasses_sharedEdgeFeatures
#' @param factorOrder A list specifying the order of the treatments.
#' @param col1 Color for Treatment 1
#' @param col2 Color for Treatment 2
#' @param EdgeFeatureType Type of Edge Feature (e.g., Gene)
#' @param xlb Name for x-axis
#' @param ylb Name for the y-axis
#' @param szaxisTxt Size axis text
#' @param szaxisTitle Size axis titles
#' @export
#' @return A ggplot object for a barplot. The barplot shows the vertex-class pairs profile of a single shared edge feature between two treatments
#' @examples
#' data(VertexClassesSharedGenes_HFDvsChow)
#' create_edgeFBarplot(CompTreatTable = VertexClassesSharedGenes_HFDvsChow,
#' edgeF = "Lilr4b",
#' treat1 = "HFD",
#' treat2 = "Chow",
#' factorOrder = c("HFD","Chow"),
#' EdgeFeatureType = "Gene")
#' @import ggplot2
#' @import dplyr
#' @importFrom tidyr gather
#' @importFrom forcats fct_relevel
#' @importFrom tidyselect vars_select_helpers
create_edgeFBarplot<-function(CompTreatTable,edgeF,treat1="Treatment1",treat2="Treatment2",
factorOrder=NULL,col1="red",col2="blue",EdgeFeatureType="Edge Feature",
xlb="Vertex-Class Pairs",
ylb="Number of pairs",
szaxisTxt=12,szaxisTitle=12){
treatment<-number_pairs<-VertexClassPair<-NULL
CompTreatTable[,treat1]<-gsub("Pair Class Missing",0,CompTreatTable[,treat1])
CompTreatTable[,treat2]<-gsub("Pair Class Missing",0,CompTreatTable[,treat2])
CompTreatTableF<-CompTreatTable %>% filter(.data$EdgeFeature==edgeF)
CompTreatTableF<-CompTreatTableF[,c(2:4)]
#Make sure columns are numeric
CompTreatTableF[,2]<-as.numeric(CompTreatTableF[,2]) #Treat1
CompTreatTableF[,3]<-as.numeric(CompTreatTableF[,3]) #Treat2
#Relationship of edge features with lipid classes for specific gene
CompTreatTableF_VertexClasses<-CompTreatTableF %>% group_by(.data$VertexClassPair) %>% summarise(across(tidyselect::vars_select_helpers$where(is.numeric),sum))
ResultsFBarplot <- gather(CompTreatTableF_VertexClasses, treatment, number_pairs,-VertexClassPair)
#Reorder factors
if(!is.null(factorOrder)){
ResultsFBarplot <- ResultsFBarplot %>% mutate(treatment = fct_relevel(.data$treatment,factorOrder))
}
#Create bar plot
p<-ggplot(ResultsFBarplot, aes(x=.data$VertexClassPair, y=.data$number_pairs, fill=.data$treatment)) +
geom_bar(width = 0.4,stat="identity",position="dodge") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
legend.title = element_text(size = 14),
legend.text = element_text(size = 10),
plot.title = element_text(hjust = 0.5,size=20),
axis.title=element_text(size = szaxisTitle,face="bold", colour = "black"),
axis.text = element_text(size = szaxisTxt))+
scale_fill_manual("treatment", values = c(col1,col2))+
geom_text(aes(label=.data$number_pairs),size=3, position=position_dodge(width=0.9), vjust=-0.25)+
ggtitle(paste0(EdgeFeatureType,": ",edgeF))+
xlab(xlb)+
ylab(ylb)
return(p)
}
#' Vertex-class pairs profile of shared features
#' @description This function will create a barplot from the output of Compare_VertexClasses_sharedEdgeFeatures using all shared Edge Features (e.g., genes).
#' @param CompTreatTable Output of Compare_VertexClasses_sharedEdgeFeatures
#' @param treat1 Name of treatment one, default Treatment1. It should match the column names of the output of Compare_VertexClasses_sharedEdgeFeatures
#' @param treat2 Name of treatment one, default Treatment2. It should match the column names of the output of Compare_VertexClasses_sharedEdgeFeatures
#' @param factorOrder A list specifying the order of the treatments.
#' @param col1 Color for Treatment 1
#' @param col2 Color for Treatment 2
#' @param maxpairs If number of class-vertex-pairs > maxpairs, display number pairs on top of bar
#' @param xlb Name for x-axis
#' @param ylb Name for the y-axis
#' @param szggrepel Size ggrepel labels
#' @param nudgey Nudge y ggrepel
#' @param nudgex Nudge x ggrepel
#' @param szaxisTxt Size axis text
#' @param szaxisTitle Size axis title
#' @export
#' @return A ggplot object for a barplot. The barplot shows the vertex-class pairs profile of all shared edge features between treatments
#' @examples
#' data(VertexClassesSharedGenes_HFDvsChow)
#' create_GlobalBarplot(CompTreatTable = VertexClassesSharedGenes_HFDvsChow,
#' treat1 = "HFD",
#' treat2 = "Chow",
#' factorOrder = c("HFD","Chow"),
#' col1="red",
#' col2 ="blue",
#' maxpairs = 1,
#' szggrepel = 6,
#' szaxisTxt = 15,
#' szaxisTitle = 15,
#' xlb = "Metabolite-pair classes")
#' @import ggplot2
#' @import ggrepel
#' @importFrom tidyr gather
#' @importFrom forcats fct_relevel
#' @importFrom rlang .data
#' @importFrom tidyselect vars_select_helpers
create_GlobalBarplot<-function(CompTreatTable,
treat1="Treatment1",
treat2="Treatment2",
factorOrder=NULL,
col1="red",
col2="blue",
maxpairs=1,
xlb="Vertex-Class Pairs",
ylb="Number of pairs",
szggrepel =3.5,
nudgey=0.5,
nudgex=0.5,
szaxisTxt=12,
szaxisTitle=12){
treatment <- number_pairs <- VertexClassPair <- NULL
CompTreatTable[,treat1]<-gsub("Pair Class Missing",0,CompTreatTable[,treat1])
CompTreatTable[,treat2]<-gsub("Pair Class Missing",0,CompTreatTable[,treat2])
#Get rid of edge features, as we want a global view
CompTreatTable_NoEdgeFeatures<-CompTreatTable[,c(2:4)]
#Make sure columns are numeric
CompTreatTable_NoEdgeFeatures[,2]<-as.numeric(CompTreatTable_NoEdgeFeatures[,2]) #Treat1
CompTreatTable_NoEdgeFeatures[,3]<-as.numeric(CompTreatTable_NoEdgeFeatures[,3]) #Treat2
#Global view of the relationship of edge features with lipid classes
CompTreatTable_VertexClasses<-CompTreatTable_NoEdgeFeatures %>% group_by(.data$VertexClassPair) %>% summarise(across(tidyselect::vars_select_helpers$where(is.numeric),sum))
GlobalResultsFBarplot <- tidyr::gather(CompTreatTable_VertexClasses, treatment, number_pairs,-VertexClassPair)
#Reorder factors
if(!is.null(factorOrder)){
GlobalResultsFBarplot <- GlobalResultsFBarplot %>% mutate(treatment = forcats::fct_relevel(.data$treatment,factorOrder))
}
#Create bar plot
p<-ggplot(GlobalResultsFBarplot, aes(x=.data$VertexClassPair, y=.data$number_pairs, fill=.data$treatment)) +
geom_bar(width = 0.4,stat="identity",position="dodge") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
legend.title = element_text(size = 14),
legend.text = element_text(size = 12),
axis.title=element_text(size = szaxisTitle,face="bold", colour = "black"),
axis.text = element_text(size = szaxisTxt))+
geom_text(data = subset(GlobalResultsFBarplot, number_pairs >= maxpairs & treatment==treat1),
aes(label=.data$number_pairs),
show.legend = FALSE ,
size=szggrepel,
position=position_dodge(width=0.9),
vjust=-0.25)+
geom_text_repel(data = subset(GlobalResultsFBarplot, number_pairs >= maxpairs & treatment==treat2),
aes(label=.data$number_pairs),
show.legend = FALSE,
size = szggrepel,
min.segment.length = unit(0, 'lines'),
hjust=0,
nudge_y = nudgey,
nudge_x = nudgex,
direction="y",
point.padding =NA,
force = 0.1,
segment.alpha=0.3,
max.overlaps=Inf)+
scale_fill_manual("treatment", values = c(col1,col2))+
scale_colour_manual(values=c(col2, col1))+
xlab(xlb)+
ylab(ylb)+
coord_cartesian(clip = "off")
return(p)
}
#'Labels to colors
#' @description Internal use. This function is modified version of labels2colors of WGCNA and the internet
#' @keywords internal
#' @return A character vector with colors
labels2colors_2<-function (labels, zeroIsGrey = TRUE, colorSeq = NULL, naColor = "grey", commonColorCode = TRUE) {
standardColors<-function (n = NULL) {
if (is.null(n))
return(.GlobalStandardColors)
if ((n > 0) && (n <= length(.GlobalStandardColors))) {
return(.GlobalStandardColors[c(1:n)])
}
else {
stop("Invalid number of standard colors requested.")
}
}
# This code forms a vector of color names in which the first entries are given by BaseColors and the rest
# is "randomly" chosen from the rest of R color names that do not contain "grey" nor "gray".
BaseColors = c("turquoise","blue","brown","yellow","green","red","pink","magenta",
"purple","greenyellow","tan","salmon","cyan", "midnightblue", "lightcyan",
"grey60", "lightgreen", "lightyellow", "royalblue", "darkred", "darkgreen",
"darkturquoise", "darkgrey",
"orange", "darkorange", "white", "skyblue", "saddlebrown", "steelblue",
"paleturquoise", "violet", "darkolivegreen", "darkmagenta" );
RColors = colors()[-grep("grey", colors())];
RColors = RColors[-grep("gray", RColors)];
RColors = RColors[-grep("black", RColors)];
InBase = match(BaseColors, RColors);
ExtraColors = RColors[-c(InBase[!is.na(InBase)])];
nExtras = length(ExtraColors);
# Here is the vector of colors that should be used by all functions:
.GlobalStandardColors = c(BaseColors, ExtraColors[rank(sin(13*c(1:nExtras) +sin(13*c(1:nExtras))) )] );
if (is.null(colorSeq))
colorSeq = standardColors()
if (is.numeric(labels)) {
if (zeroIsGrey)
minLabel = 0
else minLabel = 1
if (any(labels < 0, na.rm = TRUE))
minLabel = min(c(labels), na.rm = TRUE)
nLabels = labels
}
else {
if (commonColorCode) {
factors = factor(c(as.matrix(as.data.frame(labels))))
nLabels = as.numeric(factors)
dim(nLabels) = dim(labels)
}
else {
labels = as.matrix(as.data.frame(labels))
factors = list()
for (c in 1:ncol(labels)) factors[[c]] = factor(labels[, c])
nLabels = sapply(factors, as.numeric)
}
}
if (max(nLabels, na.rm = TRUE) > length(colorSeq)) {
nRepeats = as.integer((max(labels) - 1)/length(colorSeq)) + 1
warning(paste("labels2colors: Number of labels exceeds number of avilable colors.",
"Some colors will be repeated", nRepeats, "times."))
extColorSeq = colorSeq
for (rep in 1:nRepeats) extColorSeq = c(extColorSeq, paste(colorSeq, ".", rep, sep = ""))
}
else {
nRepeats = 1
extColorSeq = colorSeq
}
colors = rep("grey", length(nLabels))
fin = !is.na(nLabels)
colors[!fin] = naColor
finLabels = nLabels[fin]
colors[fin][finLabels != 0] = extColorSeq[finLabels[finLabels != 0]]
if (!is.null(dim(labels)))
dim(colors) = dim(labels)
colors
}
#' Get class rgb color
#' @description Internal use. This function gets the class rgb color of a specific (lipid) class
#' @keywords internal
#' @return A character object, that corresponds to a color in hexadecimal format
getcolor<-function(ClassM,tableColor){
IDxclass<-grep("class$",colnames(tableColor),ignore.case = TRUE)
IDxrgb<-grep("rgb",colnames(tableColor),ignore.case = TRUE)
clhex<-unique(tableColor[which(tableColor[,IDxclass]==ClassM),IDxrgb])
return(clhex)
}
#' Assing Colors to Class
#'@description This function assigns two color columns (color name and rgb) to an annotation data frame according to a column named 'Class' or 'class'
#'@param AnnotationDf Annotation data frame that contains a factor variable to use to assign colors
#'@param col Column with factor variable that will be used to assign colors
#'@export
#'@return The input data.frame with two extra columns specifying the colors
#' for all vertexes according to their respective vertex-class
#'@importFrom gplots col2hex
assign_colorsAnnotation<-function(AnnotationDf,col="Class"){
IDxclass<-grep(paste0(col,"$"),colnames(AnnotationDf),ignore.case = TRUE)
AnnotationDf$Color<-labels2colors_2(as.numeric(as.factor(AnnotationDf[,IDxclass])))
AnnotationDf$ColorRgb<-col2hex(AnnotationDf$Color)
return(AnnotationDf)
}
#' Get colors
#' @description Internal use. This function gets the rgb colors of every (lipid) class and names them according to the class
#' @keywords internal
#' @return A character vector with the rgb colors named after the vertex-class (e.g. lipid class)
obtain_groupcolors<-function(Annotation){
group.colors<-c()
IDxclass<-grep("class$",colnames(Annotation),ignore.case = TRUE)
Classes<-unique(Annotation[,IDxclass])
for(class in Classes){
RgbColor<-getcolor(class,Annotation)
group.colors<-c(group.colors,RgbColor)
}
names(group.colors)<-unique(Annotation[,IDxclass])
return(group.colors)
}
#' Number lipid features per class
#' @description Internal use. This function counts for every edge feature the number of vertex features (e.g. lipids) per class. It disregards the number of vertex pairs...
#' @keywords internal
#' @importFrom tidyr separate
#' @import dplyr
#' @return A data.frame with the number of vertex features per class and edge feature
countClassPerEdgeFeature<-function(ResTable,treatment="Chow"){
EdgeFeatures<-unique(ResTable$EdgeFeature)
ResCountVertexClass<-data.frame(
EdgeFeature=character(),
VertexClass=character(),
Count=numeric(),
stringsAsFactors = FALSE
)
for (edgeFeature in EdgeFeatures){
FractionEdgeFeature<-ResTable %>% filter(.data$EdgeFeature==edgeFeature) #Count for every EdgeFeature
FractionEdgeFeature <- FractionEdgeFeature %>% tidyr::separate(.data$VertexClassPair, c("Vertex1", "Vertex2"), "_") #Split the Vertex pairs, get two columns
TrColumn<-which(colnames(FractionEdgeFeature) == treatment) #Get index of the desired treatment to count Vertexs
FractionEdgeFeature<-FractionEdgeFeature[FractionEdgeFeature[,TrColumn]>0,] #Get the instances that are not zero
VertexsPresent<-unique(c(FractionEdgeFeature$Vertex1, FractionEdgeFeature$Vertex2)) #Get the unique Vertexs the gene/EdgeFeature is connected to
for (vertex in VertexsPresent){ #Loop the Vertexs present
IdxVertex1<-grep(vertex,FractionEdgeFeature$Vertex1) #From the filtered table (no zero values), get the row indexes of the first Vertex of the Vertex pairs (first Vertex column)
IdxVertex2<-grep(vertex,FractionEdgeFeature$Vertex2) #From the filtered table (no zero values), get the row indexes of the second Vertex of the Vertex pairs (second Vertex column)
SumVertex1<-sum(FractionEdgeFeature[IdxVertex1,TrColumn]) #Using the indexes sum the number of times first column
SumVertex2<-sum(FractionEdgeFeature[IdxVertex2,TrColumn]) #Using the indexes sum the number of times second column
TotalVertexClassEdgeFeature<-SumVertex1+SumVertex2 #Get the total, number of times that EdgeFeature is involved with that specific Vertex class
ResCountVertexClass<-rbind(ResCountVertexClass,c(EdgeFeature=edgeFeature,VertexClass=vertex,Count=TotalVertexClassEdgeFeature)) #Add to result table
}
}
colnames(ResCountVertexClass)<-c("EdgeFeature","VertexClass","Count")
ResCountVertexClass <- ResCountVertexClass %>% filter(.data$Count>0)
ResCountVertexClass$Count <- as.numeric(ResCountVertexClass$Count) #make sure is numeric
ResCountVertexClass<-ResCountVertexClass %>% arrange(group_by = .data$EdgeFeature, dplyr::desc(.data$Count))
return(ResCountVertexClass)
}
#' Split function
#' @description Internal use. Function to split the EdgeFeatures in smaller groups
#' @keywords internal
#' @return A list of character vectors, each vector contains n edge features
chunk2 <- function(x,n) split(x, cut(seq_along(x), n, labels = FALSE))
#' Number Vertex features per class for every shared edge feature
#' @description Internal use. This function creates a barplot depicting the number of vertex features per class for every edge feature. To use this function one has to split first the file (or not if it is small) with the funciton chunk2
#' @keywords internal
#' @import ggrepel
#' @import ggplot2
#' @return ggplot object for a barplot depicting the number of vertex features per class for every edge feature
barplot_VertexsPerEdgeFeature<-function(SplitFile,title="Vertex Features per class",AnnotationWithColors,ggrepelL=TRUE,xlb="Gene",szggrepel=2.5,szTitle=12,szaxisTxt=12,szaxisTitle=12,szLegendText=10,szLegendKey=1){
EdgeFeature <- Count <- VertexClass <- NULL
group.colors<-obtain_groupcolors(AnnotationWithColors)
#Get table that counts how many times the EdgeFeature appears,
#every time corresponds to a number of a specific Vertex class
TimesEdgeFeatureTable<-table(SplitFile$EdgeFeature)
#Create a variable called Id to set order in group barplot
Id<-c()
for (i in names(TimesEdgeFeatureTable)){
Id<-c(Id,1:TimesEdgeFeatureTable[i])
}
SplitFile$Id<-as.factor(Id)
g<-ggplot(SplitFile, aes(x=EdgeFeature, y=as.numeric(Count), fill=VertexClass,group=Id))+
geom_bar(width = 1,stat="identity",position=position_dodge(0.7)) +
theme(axis.text.x = element_text(angle = 45, hjust=1),
legend.title = element_text(size = szLegendText+2),
legend.text = element_text(size = szLegendText),
legend.key.size = unit(szLegendKey,"cm"),
plot.title = element_text(hjust = 0.5,size=szTitle,face="bold"),
axis.title.y=element_text(angle=90),
axis.title=element_text(size = szaxisTitle,face="bold", colour = "black"),
axis.text = element_text(size = szaxisTxt)) +
scale_fill_manual(values=group.colors,
name="Vertex Class",) +
xlab(xlb)+
ylab("Count")
if(ggrepelL){
g<-g+geom_text_repel(aes(label = VertexClass),
size=szggrepel,color="black",
min.segment.length = unit(0, 'lines'),
nudge_y=0.1,
nudge_x=0.1,
vjust = -1,
force = 0.1,
segment.alpha=0.3,
max.overlaps=Inf
)
}
g<-g + ggtitle(title)
return(g)
}
#' Vertex Class profile per edge feature (one treatment)
#' @description This function creates a barplot or barplots showing the number of vertex features per class for every shared edge feature between two treatments
#' @param CompTreatTable Output of Compare_VertexClasses_sharedEdgeFeatures
#' @param Annotation Data frame that includes the rgb colors for every class. The column 'class' (or 'Class') has to be present and also the column 'ColorRgb'
#' @param chunks To avoid a non readable dense plot the results can be spitted in multiple plots
#' @param treat Specify the treatment for which the plot will be created. It should be one of the two treatments in the output of Compare_VertexClasses_sharedEdgeFeatures
#' @param small logical. If only a few edge features are in the input set as TRUE. A single plot will be created
#' @param ggrep logical. If TRUE includes ggrepel labels for every bar
#' @param xlb x-axis label
#' @param onlyTable logical. If TRUE a table is returned instead of a plot
#' @param szTitle Size title
#' @param szaxisTxt Size axis text
#' @param szaxisTitle Size axis title
#' @param ... Other parameters for inner functions, mainly ggplot2 visual parameters
#' @return A list of ggplot objects to create different barplots. The barplots show the number of vertex features per class for every shared
#' edge feature between two treatments. The barplots restrict to one of the compared treatments. An alternative output
#'is a data.frame with the number of vertex features per class and edge feature (onlyTable=TRUE)
#' @examples
#' data(VertexClassesSharedGenes_HFDvsChow)
#' data(MetColorTable)
#' #Note: No differences in example as all the Output of CoNI was kept
#' getVertexsPerEdgeFeature(CompTreatTable = VertexClassesSharedGenes_HFDvsChow,
#' Annotation = MetColorTable,
#' chunks = 2,
#' treat = "HFD")
#' @export
#' @import ggplot2
#' @import ggrepel
getVertexsPerEdgeFeature<-function(CompTreatTable, Annotation,
chunks = 5, treat=NULL,
small = FALSE,
ggrep = TRUE,
xlb = "Gene",
onlyTable = FALSE,
szTitle = 12,
szaxisTxt = 12,
szaxisTitle = 12, ...){
if(is.null(treat)){
stop("Specify treatment")
}
EdgeFeatureVertex<-Table<-countClassPerEdgeFeature(CompTreatTable,treatment = treat) #First count per EdgeFeature the number of Vertexs for each class, function above
# EdgeFeatureVertex$Count <- as.numeric(EdgeFeatureVertex$Count)
# EdgeFeatureVertex<-EdgeFeatureVertex %>% arrange(group_by = EdgeFeature, dplyr::desc(Count)) #Order the table from high to low per EdgeFeature
if(onlyTable){
return(EdgeFeatureVertex)
}
#Get the EdgeFeatures of the table
EdgeFeatures<-unique(EdgeFeatureVertex$EdgeFeature)
barplots<-list()
if(!small){
#Split the results in n pieces so the plots are readable
SplitIntoPieces<-chunk2(x = EdgeFeatures,chunks)
for(i in 1:chunks){
Split<-EdgeFeatureVertex[EdgeFeatureVertex$EdgeFeature %in% SplitIntoPieces[[i]],]
barplots[[i]]<-barplot_VertexsPerEdgeFeature(Split,title = treat,AnnotationWithColors = Annotation,ggrepelL = ggrep,xlb = xlb,szaxisTxt = szaxisTxt,szaxisTitle=szaxisTitle,...)
cat(max(Split$Count),"\n")
}
}else{
barplots[[1]]<-barplot_VertexsPerEdgeFeature(EdgeFeatureVertex,title=treat,AnnotationWithColors = Annotation,ggrepelL=ggrep,xlb = xlb,szaxisTxt = szaxisTxt,szaxisTitle=szaxisTitle,szTitle=szTitle,...)
cat(max(EdgeFeatureVertex$Count),"\n")
}
return(barplots)
}
#' Vertex-Class profile per edge feature Side-by-Side (two treatments)
#' @description This function creates a grid of barplots. The barplot of one side depicts the number of class vertex features per edge feature for treatment 1 and the other side the same barplot for treatment 2. Results of both Treatments are side by side for better comparison.
#' @param CompTreatTable Output of Compare_VertexClasses_sharedEdgeFeatures
#' @param Treat1 Name treatment 1 as in table CompTreatTable
#' @param Treat2 Name treatment 2 as in table CompTreatTable
#' @param Annotation Data frame that includes the rgb colors for every class. The column 'class' (or 'Class') has to be present and also the column 'ColorRgb'
#' @param chunks To avoid a non readable dense plot the results can be spitted in multiple plots
#' @param ggrep logical. If TRUE includes ggrepel labels for every bar
#' @param xlb Change the x-axis label
#' @param onlyT logical. If TRUE a table is returned instead of a grid of plots
#' @param small logical. If only a few edge features are in the input set as TRUE. A single plot will be created
#' @param ... Other parameters for inner functions, mainly ggplot2 visual parameters
#' @return A gtable containing side-by-side barplots, one for each treatment, showing the number of vertex features per class for every shared edge feature
#' @examples
#' data(VertexClassesSharedGenes_HFDvsChow)
#' VCSGs<-VertexClassesSharedGenes_HFDvsChow
#' data(MetColorTable)
#' HFD_vs_Chow_LCP_Gene<-getVertexsPerEdgeFeature_and_Grid(VCSGs,
#' "HFD","Chow",
#' Annotation=MetColorTable,
#' ggrep=FALSE,
#' small = FALSE,
#' chunks = 3,
#' szLegendKey=0.2)
#' plot(HFD_vs_Chow_LCP_Gene)
#' @export
#' @import ggplot2
#' @import ggrepel
#' @importFrom gridExtra arrangeGrob
#' @importFrom utils capture.output
getVertexsPerEdgeFeature_and_Grid<-function(CompTreatTable,
Treat1, Treat2, Annotation,
chunks = 3, ggrep = TRUE,
xlb = "Edge Feature",
onlyT = FALSE,
small = FALSE,...){
if(small){
ylimTreat1<-capture.output(Treat1Plot<-getVertexsPerEdgeFeature(CompTreatTable,chunks = chunks,small=small,treat = Treat1,Annotation=Annotation,ggrep = ggrep,xlb = xlb,...))
ylimTreat2<-capture.output(Treat2Plot<-getVertexsPerEdgeFeature(CompTreatTable,chunks = chunks,small=small,treat = Treat2,Annotation=Annotation,ggrep = ggrep, xlb =xlb,...))
#Get tables
Treat1Table<-getVertexsPerEdgeFeature(CompTreatTable,chunks = chunks,small=small,treat = Treat1,Annotation=Annotation,ggrep = ggrep,xlb = xlb,onlyTable = TRUE)
Treat2Table<-getVertexsPerEdgeFeature(CompTreatTable,chunks = chunks,small=small,treat = Treat2,Annotation=Annotation,ggrep = ggrep, xlb =xlb,onlyTable = TRUE)
if(onlyT){
Table_VertexProfile<-Treat1Table %>% full_join(Treat2Table,by= c(colnames(Treat1Table)[1],colnames(Treat1Table)[2]),suffix=c(paste0("_",Treat1),paste0("_",Treat2)))
Table_VertexProfile <- Table_VertexProfile %>% mutate_at(vars( starts_with("Count_") ),
~if_else( is.na(.), 0, .) )
return(Table_VertexProfile)
}
ylim<-cbind(as.numeric(ylimTreat1),as.numeric(ylimTreat2))
ylim_max<-apply(ylim,1,max)
#Assign limits
plots<-c()
Tr1<-Treat1Plot[[1]]+ylim(0,ylim_max)
Tr2<-Treat2Plot[[1]]+ylim(0,ylim_max)
plots[[1]]<-Tr1
plots[[2]]<-Tr2
arrangeGrob(grobs=plots,ncol=2)
}else{
#Get ylimits and plots
ylimTreat1<-capture.output(Treat1Plots<-getVertexsPerEdgeFeature(CompTreatTable,chunks = chunks,small=small,treat = Treat1,Annotation=Annotation,ggrep = ggrep,xlb = xlb,...))
ylimTreat2<-capture.output(Treat2Plots<-getVertexsPerEdgeFeature(CompTreatTable,chunks = chunks,small=small,treat = Treat2,Annotation=Annotation,ggrep = ggrep, xlb =xlb,...))
#Get tables
Treat1Table<-getVertexsPerEdgeFeature(CompTreatTable,chunks = chunks,treat = Treat1,Annotation=Annotation,ggrep = ggrep,xlb = xlb,onlyTable = TRUE)
Treat2Table<-getVertexsPerEdgeFeature(CompTreatTable,chunks = chunks,treat = Treat2,Annotation=Annotation,ggrep = ggrep, xlb =xlb,onlyTable = TRUE)
if(onlyT){
Table_VertexProfile<-Treat1Table %>% full_join(Treat2Table,by= c(colnames(Treat1Table)[1],colnames(Treat1Table)[2]),suffix=c(paste0("_",Treat1),paste0("_",Treat2)))
Table_VertexProfile <- Table_VertexProfile %>% mutate_at(vars( starts_with("Count_") ),
~if_else( is.na(.), 0, .) )
return(Table_VertexProfile)
}
ylim<-cbind(as.numeric(ylimTreat1),as.numeric(ylimTreat2))
ylim_max<-apply(ylim,1,max)
#Assign limits
plots<-c()
i<-0 #As there are 2 plots per chunk, I need to loop two numbers simultaneously. One to assign the limits and one to add to the list
for(j in seq(1,chunks*2,2)){
i<-i+1
Tr1<-Treat1Plots[[i]]+ylim(0,ylim_max[i])
Tr2<-Treat2Plots[[i]]+ylim(0,ylim_max[i])
plots[[j]]<-Tr1
plots[[j+1]]<-Tr2
}
return(arrangeGrob(grobs=plots,ncol=2)) #instead of grid.arrange that plots to console to avoid error related to size of screen
}
}
#'Stacked Global Barplot (One treatment)
#' @description This function will create a stacked barplot from the output of Compare_VertexClasses_sharedEdgeFeatures using all shared Edge Features (e.g., genes) between two treatments.
#' @param CompTreatTable Output of Compare_VertexClasses_sharedEdgeFeatures
#' @param treat Name of treatment to display. It should match the column name in the output of Compare_VertexClasses_sharedEdgeFeatures
#' @param xlb Name for x-axis
#' @param ylb Name for y-axis
#' @param max_pairsLegend If number of Edge Features >= max_pairsLegend, display number of Edge Features as label with ggrepel
#' @param mx.overlaps Max number of overlaps ggrepel
#' @param szggrepel Size ggrepel labels
#' @param force Repelling force for ggrepel labels
#' @param szTitle Size Title
#' @param szaxisTxt Size axis text
#' @param szaxisTitle Size axis titles
#' @param ylim Optional y-limits of the plot
#' @import ggplot2
#' @import ggrepel
#' @return A ggplot object to create a stacked barplot. The stacked barplot shows the vertex-class pairs profile of all shared edge features but restricted to a single treatment. Every bar consists of multiple edge features (stacked) that are represented with different colors
#' @examples
#' data(VertexClassesSharedGenes_HFDvsChow)
#' create_stackedGlobalBarplot_perTreatment(CompTreatTable = VertexClassesSharedGenes_HFDvsChow,
#' treat = "HFD",
#' max_pairsLegend = 9,
#' xlb = "Metabolite-class-pairs")
#' @export
#' @importFrom tidyr gather
#' @importFrom forcats fct_relevel
#' @importFrom gplots col2hex
#' @importFrom rlang .data
#' @importFrom tidyselect vars_select_helpers
create_stackedGlobalBarplot_perTreatment<-function(CompTreatTable,
treat=NULL,
xlb="Vertex-Class Pairs",
ylb="Number of pairs",
max_pairsLegend = 2,
mx.overlaps = Inf,
szggrepel=6,
force=0.1,
szTitle=12, szaxisTxt=12, szaxisTitle=12,
ylim=NULL){
treatment <- number_pairs <- VertexClassPair <- EdgeFeature <- label <- NULL
CompTreatTable[,ncol(CompTreatTable)]<-as.numeric(gsub("Pair Class Missing",0,CompTreatTable[,ncol(CompTreatTable)]))
CompTreatTable[,ncol(CompTreatTable)-1]<-as.numeric(gsub("Pair Class Missing",0,CompTreatTable[,ncol(CompTreatTable)-1]))
if(is.null(treat)){
print("Provide treatment to filter data e.g., treat='HFD'")
}else{
#Global view of the relationship of edge features with metabolite classes
Stacked<-CompTreatTable %>% group_by(.data$VertexClassPair,.data$EdgeFeature) %>% summarise(across(tidyselect::vars_select_helpers$where(is.numeric),sum))
StackedFBarplot <- tidyr::gather(Stacked, treatment, number_pairs,c(-VertexClassPair,-EdgeFeature))
StackedFBarplotTreat<-StackedFBarplot %>% filter(.data$treatment==treat)
ColorTable<-data.frame(
EdgeFeature=StackedFBarplotTreat$EdgeFeature,
ColorGroup=labels2colors_2(as.numeric(as.factor(StackedFBarplotTreat$EdgeFeature))))
ColorGroupRgb<-col2hex(ColorTable$ColorGroup)
names(ColorGroupRgb) = ColorTable$EdgeFeature
#StackedFBarplotTreat$EdgeFeature <- factor(StackedFBarplotTreat$EdgeFeature, levels = ColorTable$EdgeFeature)
countmaxperLipidClass<-StackedFBarplotTreat %>% group_by(.data$VertexClassPair) %>% summarise(number_pairs = sum(.data$number_pairs))
cat(max(countmaxperLipidClass$number_pairs))
TreatStacked <- ggplot(StackedFBarplotTreat, aes(x =.data$VertexClassPair , y = .data$number_pairs, fill=.data$EdgeFeature))+
geom_bar(stat="identity") +
theme(legend.position = "none") + #Remove legend
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
plot.title = element_text(hjust = 0.5,size=szTitle,face="bold"),
axis.title=element_text(size = szaxisTitle,face="bold", colour = "black"),
axis.text = element_text(size = szaxisTxt))+
geom_text_repel(data = StackedFBarplotTreat %>%
mutate(label = ifelse(number_pairs>=max_pairsLegend,paste0(EdgeFeature,": ",number_pairs),"")),
aes(label=label),
size = szggrepel,
position = position_stack(vjust = 0.5),
color="black",
alpha=0.6,
force= force,
max.overlaps=mx.overlaps,
min.segment.length = unit(0, 'lines')) +
scale_colour_manual(values = ColorGroupRgb) +
scale_fill_manual(values=ColorGroupRgb)+
xlab(xlb)+
ylab(ylb)
if(!is.null(ylim)){
TreatStacked<-TreatStacked+ ylim(c(0,ylim))
}
TreatStacked<-TreatStacked + ggtitle(treat)
return(TreatStacked)
}
}
#' Stacked Global Barplot Side-by-side (two treatments)
#' @description This function will create a stacked barplot from the output of Compare_VertexClasses_sharedEdgeFeatures using all shared Edge Features (e.g., genes) between two treatments. Results of both Treatments are side by side for better comparison.
#' @param CompTreatTable Output of Compare_VertexClasses_sharedEdgeFeatures
#' @param Treat1 Name treatment 1 as in table CompTreatTable
#' @param Treat2 Name treatment 2 as in table CompTreatTable
#' @param ggrep logical. If TRUE includes ggrepel labels for every bar
#' @param max_pairsLegend If number of Edge Features >= max_pairsLegend, display number of Edge Features as ggrepel label
#' @param mx.overlaps Max number of overlaps ggrepel
#' @param szggrepel Size ggrepel labels
#' @param force Repelling force for ggrepel labels
#' @param xlb Name for x-axis
#' @param ... Other parameters for inner functions, mainly ggplot2 visual parameters
#' @return A gtable containing stacked barplots. The barplots show the vertex-class pairs profile of all shared edge features between two treatments (one bar plot per treatment). Every bar consists of multiple edge features that are depicted with different colors
#' @examples
#' data(VertexClassesSharedGenes_HFDvsChow)
#' VCSGs<-VertexClassesSharedGenes_HFDvsChow
#' HFD_vs_Chow_stackedBarplot<-getstackedGlobalBarplot_and_Grid(VCSGs,
#' Treat1 = "HFD",
#' Treat2 = "Chow",
#' xlb = "Metabolite-class-pairs",
#' max_pairsLegend=9)
#' plot(HFD_vs_Chow_stackedBarplot)
#' @importFrom utils capture.output
#' @export
getstackedGlobalBarplot_and_Grid<-function(CompTreatTable, Treat1, Treat2,
ggrep=TRUE, max_pairsLegend = 1, force = 0.1, mx.overlaps=Inf, szggrepel=6,
xlb = "Vertex-Class Pairs",...){
#Get ylimits and plots
ylimTreat1<-capture.output(Treat1Plot<-create_stackedGlobalBarplot_perTreatment(CompTreatTable = CompTreatTable,
treat = Treat1,
max_pairsLegend = max_pairsLegend,
force = force,
xlb = xlb,
mx.overlaps = mx.overlaps,
szggrepel = szggrepel, ...))
ylimTreat2<-capture.output(Treat2Plot<-create_stackedGlobalBarplot_perTreatment(CompTreatTable = CompTreatTable,
treat = Treat2,
max_pairsLegend = max_pairsLegend,
force = force,
xlb = xlb,
mx.overlaps = mx.overlaps,
szggrepel = szggrepel, ...))
ylim<-cbind(as.numeric(ylimTreat1),as.numeric(ylimTreat2))
ylim_max<-apply(ylim,1,max)
#Assign limits
plots<-c()
Tr1<-Treat1Plot+ylim(0,ylim_max[1])
Tr2<-Treat2Plot+ylim(0,ylim_max[1])
plots[[1]]<-Tr1
plots[[2]]<-Tr2
arrangeGrob(grobs=plots,ncol=2) #instead of grid.arrange that plots to console to avoid error related to size of screen
}
#' Bipartite Table
#' @description Internal use. This function creates a table that is used to create a simple bipartite network
#' @keywords internal
#' @return A matrix containing two columns, one for vertexes and one for edge features. The matching happens if they are adjacent to one another
createBipartiteTable<-function(CoNINetworkTable){
resultFinal<-list()
for(n in 1:nrow(CoNINetworkTable)){
vertex1<-CoNINetworkTable[n,1] #Get vertex 1
vertex2<-CoNINetworkTable[n,2] #Get vertex 2
Links<-CoNINetworkTable[n,4] #Get Links
LinkL<-unlist(strsplit(Links,";")) #Split to get list
# PcorValuesL_Link1vsDriver<-unlist(strsplit(CoNINetworkTable[n,9],";")) #Get pcor values and split to get list
# PcorValuesL_Link2vsDriver<-unlist(strsplit(CoNINetworkTable[n,10],";")) #Get pcor values and split to get list
rows_complete<-list()
#pcor=numeric()
for (i in 1:length(LinkL)){
rowvertex1<-c(from=vertex1,to=LinkL[i]) #pcor=PcorValuesL_Link1vsDriver[i]
rowvertex2<-c(from=vertex2,to=LinkL[i]) #pcor=PcorValuesL_Link2vsDriver[i]
rows<-rbind(rowvertex1,rowvertex2)
rows_complete[[i]]<-rows
}
rows_complete<-do.call(rbind,rows_complete)
# result = foreach (n = 1:nrow(CoNINetworkTable), .combine=rbind) %do% {
# }
resultFinal[[n]]<-rows_complete
}
resultFinal<-do.call(rbind,resultFinal)
return(resultFinal)
}
#' Bipartite Network
#' @description This function creates a simple bipartite graph, it shows the driver and linker features as nodes.
#' @param TableNetwork TableForNetwork_file (file generated by CoNI) with path
#' @param colorVertexTable Table specifying the colors for the vertex features. The first column should contain the names matching the features of the vertex Data and another column should specify the colors (column name: Colors).
#' @param incidenceMatrix logical. If TRUE it returns a hypergraph incidence matrix instead of a bipartite graph
#' @return An igraph object for a bipartite graph or a hypergraph incidence matrix to represent ResultsCoNI. Basic network statistics are included in the bipartite graph. See generate_network function for details or consult the igraph package
#' @examples
#' #See vignette for an example
#' @export
#' @importFrom gplots col2hex
#' @importFrom igraph graph_from_data_frame simplify V E degree hub_score transitivity closeness betweenness eigen_centrality centr_betw centr_clo centr_degree edge_betweenness write_graph as_ids get.incidence
createBipartiteGraph<-function(TableNetwork,colorVertexTable,incidenceMatrix=FALSE){
TableNetwork<-read.csv(TableNetwork)
#Create bipartite table
bipartiteTable<-createBipartiteTable(TableNetwork)
#Remove redundancy
bipartiteTable<-unique(bipartiteTable)
bipartiteTable<-as.data.frame(bipartiteTable)
#Check nodes if there are identical names in vertex and linked features
LinkedFeaturesIdentical<-bipartiteTable$to[bipartiteTable$to %in% bipartiteTable$from]
if(length(LinkedFeaturesIdentical)>0){
LinkedFeaturesIdentical<-unique(LinkedFeaturesIdentical)
for(feature in LinkedFeaturesIdentical){
bipartiteTable$to[bipartiteTable$to==feature]<-paste0(feature,"_linkedF")
}
}
#Create graph
cnodes <- data.frame("Name"=unique(c(as.character(bipartiteTable$from),as.character(bipartiteTable$to))),stringsAsFactors=FALSE)#Get the nodes vertexFeature-EdgeFeature
#Assign colors to nodes
m <- merge(cnodes,colorVertexTable,by.x="Name",by.y=colnames(colorVertexTable)[1],all.x=TRUE)
#Assign grey color to Edges
m<- m %>% mutate(type=ifelse(is.na(m[,2]),"EdgeFeature","VertexFeature"))#Might be problematic but minimum annotation file should contain three columns, vertex-feature,color and colorRgb
#m[is.na(m[,2]),2]<-"EdgeFeature"
idx_colorColumn<-grep("color$",colnames(m),ignore.case = TRUE)
m[is.na(m[,idx_colorColumn]),idx_colorColumn]<-"grey"
m$ColorRgb<-col2hex(m[,idx_colorColumn])
#Create graph
netd <- graph_from_data_frame(d=bipartiteTable, vertices=m, directed=FALSE)
netd <- simplify(netd,remove.multiple=FALSE)
#Bipartite option
#bipartite_mapping(netd)$type this function is giving me problems
igraph::V(netd)$type <- ifelse(igraph::as_ids(V(netd)) %in% bipartiteTable$from,TRUE,FALSE)
igraph::V(netd)$shape <- ifelse(V(netd)$type, "circle", "square")
#Add network stats to bipartite graph... at the moment ignoring that it is a bipartite graph
#stats are as as if it were a simple network
igraph::V(netd)$degree <- degree(netd, mode="all")
igraph::V(netd)$hub_score <- hub_score(netd, weights=NA)$vector
igraph::V(netd)$transitivity <- transitivity(netd, type="local")
igraph::V(netd)$closeness <- closeness(netd, mode="all", weights=NA)
igraph::V(netd)$betweenness <- betweenness(netd, directed=FALSE, weights=NA)
igraph::V(netd)$eigen_centrality <- eigen_centrality(netd, directed=FALSE, weights=NA)$vector
igraph::V(netd)$centralized_betweenness <- centr_betw(netd, directed=FALSE, normalized=TRUE)$res
igraph::V(netd)$centralized_closeness <- centr_clo(netd, mode="all", normalized=TRUE)$res
igraph::V(netd)$centralized_degree <- centr_degree(netd, mode="all", normalized=TRUE)$res
#V(netd)$membership_community_edgeBetweenes<-cluster_edge_betweenness(netd,directed = F)$membership
#Add edge betweeness
igraph::E(netd)$betweeness <- edge_betweenness(netd, directed=FALSE, weights=NA)
if(incidenceMatrix){
incidenceM<-get.incidence(netd)
return(as.matrix(incidenceM))
}else{
return(netd)
}
}
#' Network Statistics
#' @description This function calculates simple network statistics and returns them as a dataframe
#' @param Network An Igraph network
#' @return Returns a data.frame with nine rows with the following network statistics:
#' \itemize{
##' \item{"net_avg_pathL"}{Shortest paths between vertices}
##' \item{"net_edge_density"}{Graph density, ratio of the number of edges and the number of possible edges}
##' \item{"net_transitivity"}{Probability that the adjacent vertices of a vertex are connected}
##' \item{"net_diameter"}{Length of the longest geodesic}
##' \item{"net_nodes_first_path_diameter"}{The nodes along the first found path with the length of diameter}
##' \item{"net_eigenvalue"}{The eigenvalue corresponding to the centrality scores.}
##' \item{"net_centralized_betweenessIdx"}{The graph level centrality index after centralizing the graph according to the betweenness of vertices}
##' \item{"net_centralized_closenessIdx"}{The graph level centrality index after centralizing the graph according to the closeness of vertices}
##' \item{"net_centralized_degreeIdx"}{The graph level centrality index after centralizing the graph according to the degrees of vertices}
##' }
#' For more information on the statistics consult the igraph package.
#' @examples
#' #Load color nodes table
#' data(MetColorTable)
#' #Assign colors according to "Class" column
#' MetColorTable<-assign_colorsAnnotation(MetColorTable)
#' #Load CoNI results
#' data(CoNIResultsHFDToy)
#'
#' #Generate Network
#' HFDNetwork<-generate_network(ResultsCoNI = CoNIResultsHFDToy,
#' colorVertexNetwork = TRUE,
#' colorVertexTable = MetColorTable,
#' outputDir = "./",
#' outputFileName = "HFD",
#' saveFiles = FALSE)
#' NetStats(HFDNetwork)
#' @importFrom tibble rownames_to_column
#' @importFrom igraph mean_distance edge_density transitivity diameter get_diameter eigen_centrality centr_betw centr_clo centr_degree
#' @import dplyr
#' @export
NetStats<-function(Network){
NetworkStatsTable<-data.frame(Value=t(data.frame(
net_avg_pathL=mean_distance(Network, directed=F),
net_edge_density=edge_density(Network, loops=F),
net_transitivity=transitivity(Network, type="global"),
net_diameter=diameter(Network, directed=F, weights=NA),
net_nodes_first_path_diameter= paste(names(get_diameter(Network, directed=TRUE)),collapse=","),#returns the nodes along the first found path of that distance
net_eigenvalue=eigen_centrality(Network, directed=FALSE, weights=NA)$value,
net_centralized_betweenessIdx=centr_betw(Network, directed=F, normalized=TRUE)$centralization,
net_centralized_closenessIdx=centr_clo(Network, mode="all", normalized=TRUE)$centralization,
net_centralized_degreeIdx=centr_degree(Network, mode="all", normalized=TRUE)$centralization
#net_community__modularity_edgeBetweenes=modularity(cluster_edge_betweenness(Network,directed = F))
)))
NetworkStatsTable<-NetworkStatsTable %>% rownames_to_column("Network_statistic")
return(NetworkStatsTable)
}
#' Get vertexes for edge feature
#' @keywords internal
#' @return A character vector with the vertexes connected to a given edge feature
getvertexes_edgeFeature<-function(edgeFeature,CoNIResults){
Tvertexes<-CoNIResults[CoNIResults$Feature_edgeD==edgeFeature,c(1,2),drop=FALSE]
vertexes<-unique(unlist(c(Tvertexes[,1],Tvertexes[,2])))
return(vertexes)
}
#' Correlation vs Partial correlation
#' @description This function fits two linear models on standardize data and plots the results. It generates a scatter plot with two regression lines, where the slopes correspond to the correlation and partial correlation coefficients (blue for cor and red for pcor)
#' @param ResultsCoNI The significant results generated by CoNI
#' @param edgeFeature The edge feature to explore e.g. Fabp2 (for a gene)
#' @param vertexD Vertex data that was given as input to CoNI
#' @param edgeD Edge data that was given as input to CoNI
#' @param vertexFeatures The vertex features to include as a list. If not specified all metabolites available in combination with the edgeFeature will be used
#' @param outputDir Output directory with path
#' @param label_edgeFeature Name for plot title e.g. Gene or Protein
#' @param plot_to_screen logical. If TRUE plots will be outputted to the plotting screen
#' @param fname File name to save the plots
#' @param height height of the plotting area for the saved file
#' @param width width of the plotting are for the saved file
#' @param saveFiles logical. If FALSE plot is not saved to disk
#' @import ggrepel
#' @import ggplot2
#' @importFrom stats lm as.formula resid
#' @importFrom rlang .data
#' @examples
#' #Load gene expression - Toy dataset of two treatments
#' data(GeneExpToy)
#' #Samples in rows and genes in columns
#' GeneExp <- as.data.frame(t(GeneExpToy))
#' hfd_gene <- GeneExp[1:8,] #high fat diet
#' chow_gene<- GeneExp[9:nrow(GeneExp),] #chow diet
#'
#' #Load metabolite expression - Toy dataset of two treatments
#' data(MetaboExpToy)
#' MetaboExp <- MetaboExpToy
#' hfd_metabo <- MetaboExp[11:18,] #high fat diet
#' chow_metabo <- MetaboExp[1:10,] #chow diet
#'
#' #Match row names both data sets
#' rownames(hfd_metabo)<-rownames(hfd_gene)
#' rownames(chow_metabo)<-rownames(chow_gene)
#'
#' #Load CoNI results
#' data(CoNIResultsHFDToy)
#'
#' plotPcorvsCor(ResultsCoNI = CoNIResultsHFDToy,
#' edgeFeature = "Arfrp1",
#' vertexFeatures = c("PC.ae.C40.2", "SM..OH..C22.1"),
#' vertexD = hfd_metabo,
#' edgeD = hfd_gene,
#' label_edgeFeature = "Gene",
#' plot_to_screen = TRUE,
#' height = 10,
#' saveFiles = FALSE)
#' @return Returns a ggplot object for a scatter plot with two regression lines.
#' The blue line is the regression of the vertex features, and the red line is the regression
#' of the resulting residuals after regressing each vertex feature with the edge feature.
#' The slope of the blue line corresponds to the pearson correlation coefficient and the slope of the red line
#' to the partial correlation coefficient
#' @export
plotPcorvsCor<-function(ResultsCoNI,
edgeFeature,
vertexD, edgeD,
vertexFeatures=NULL,
outputDir="./",
fname,
label_edgeFeature="Edge Feature",
plot_to_screen=TRUE,
height=10,width=8,
saveFiles=FALSE){
fname<-paste0(outputDir,edgeFeature,".pdf")
ResultsCoNIfull<-ResultsCoNI %>% filter(.data$Feature_edgeD==edgeFeature)
if(!is.null(vertexFeatures)){
llF1<-sapply(ResultsCoNIfull$Feature_1_vertexD,function(feature){feature %in% vertexFeatures})
ResultsCoNIfull<-ResultsCoNIfull[llF1,]
llF2<-sapply(ResultsCoNIfull$Feature_2_vertexD,function(feature){feature %in% vertexFeatures})
ResultsCoNIfull<-ResultsCoNIfull[llF2,]
}
plots<-list()
for(i in 1:nrow(ResultsCoNIfull)){
ResultsCoNI<-ResultsCoNIfull[i, ,drop=FALSE]
#Example Fabp2
vertexes_edgeFeature<-getvertexes_edgeFeature(edgeFeature = edgeFeature,CoNIResults = ResultsCoNI)
M1<-vertexes_edgeFeature[1]
M2<-vertexes_edgeFeature[2]
edgeFeature_vertex_Expression<-as.data.frame(cbind(edgeD[,edgeFeature,drop=FALSE],vertexD[,vertexes_edgeFeature,drop=FALSE]))
fN<-ncol(edgeFeature_vertex_Expression)-1
edgeFeature_vertex_Expression[,1:fN]<-apply(edgeFeature_vertex_Expression[,1:fN],2,as.numeric)
#Linear model vertex1 and vertex2
fitM1M2<-lm(as.formula(paste0(colnames(edgeFeature_vertex_Expression)[2], "~",colnames(edgeFeature_vertex_Expression)[3])), data=edgeFeature_vertex_Expression)
summary(fitM1M2)
#Residuals vertex 1 and vertex 2
eM1M2<-resid(lm(as.formula(paste0(colnames(edgeFeature_vertex_Expression)[2], "~",colnames(edgeFeature_vertex_Expression)[3])), data=edgeFeature_vertex_Expression))
eM2M1<-resid(lm(as.formula(paste0(colnames(edgeFeature_vertex_Expression)[3], "~",colnames(edgeFeature_vertex_Expression)[2])), data=edgeFeature_vertex_Expression))
#Residuals vertex 1 and edgeFeature 1
eM1G1<-resid(lm(as.formula(paste0(colnames(edgeFeature_vertex_Expression)[2], "~",colnames(edgeFeature_vertex_Expression)[1])), data=edgeFeature_vertex_Expression))
eG1M1<-resid(lm(as.formula(paste0(colnames(edgeFeature_vertex_Expression)[1], "~",colnames(edgeFeature_vertex_Expression)[2])), data=edgeFeature_vertex_Expression))
#Residuals vertex 2 and edgeFeature 1
eM2G1<-resid(lm(as.formula(paste0(colnames(edgeFeature_vertex_Expression)[3], "~",colnames(edgeFeature_vertex_Expression)[1])), data=edgeFeature_vertex_Expression))
eG1M2<-resid(lm(as.formula(paste0(colnames(edgeFeature_vertex_Expression)[1], "~",colnames(edgeFeature_vertex_Expression)[3])), data=edgeFeature_vertex_Expression))
ResMatrix<-as.data.frame(cbind(eM1M2=eM1M2,eM1G1=eM1G1,eM2G1=eM2G1,eG1M1=eG1M1,eM2M1=eM2M1,eG1M2=eG1M2))
#Scale
NewDF<-as.data.frame(cbind(vertex1=edgeFeature_vertex_Expression[[2]],vertex2=edgeFeature_vertex_Expression[[3]],eM1G1=ResMatrix$eM1G1,eM2G1=ResMatrix$eM2G1))
NewDF<-as.data.frame(scale(NewDF))
plots[[i]]<-ggplot(NewDF,aes(.data$vertex1,.data$vertex2)) +
geom_point()+
stat_smooth(method="lm",se=FALSE)+
geom_point(data = NewDF,aes(eM1G1,eM2G1),color="red")+
stat_smooth(data = NewDF,aes(eM1G1,eM2G1),color="red",method="lm",se=FALSE)+
xlab(M1)+
ylab(M2)+
ggtitle(paste0(label_edgeFeature,": ",edgeFeature))+
theme(plot.title = element_text(size=14,color="red", hjust = 0.5,face="bold.italic"))
}
if(plot_to_screen){
sapply(plots,plot)
}
if(saveFiles){
if(length(plots)>1){
plots_arrange<-arrangeGrob(grobs=plots,ncol=2)
ggsave(filename=fname, plot=plots_arrange, width=width, height=height)
}else{
ggsave(filename=fname, plot=plots[[1]], width=6, height=4)
}
}
}
|
aef4665be3ee9d9d11db4959532a5fe9c33d36c5
|
1427b6faa8b70925f74e9d6c7c18955b00b40413
|
/R/germanpolls.R
|
61974e340dcadf8c714e89a7285cc6ece10935ee
|
[] |
no_license
|
cutterkom/germanpolls
|
8e0d5a92542bafee87173155e3b9bb5df20e643d
|
e555496add0eb2eefab77f5c96f7ac9781d5c21f
|
refs/heads/master
| 2021-01-11T10:18:58.299049
| 2020-04-02T11:44:30
| 2020-04-02T11:44:30
| 72,452,729
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 929
|
r
|
germanpolls.R
|
#' Get data of German polls from wahlrecht.de.
#'
#' This is the main function to parse XML files from Wahlrecht.de to extract polling data.
#' It catches all XML elements that can be found.
#' Not all regions have XML available, their data can't be accessed with this function.
#' @param region string, all available regions are documented in the readme
#' @return dataframe
#' @examples
#' germanpolls(region = "de")
#' germanpolls(region = "by")
#' @importFrom RCurl url.exists
#' @export
germanpolls <- function(region = "de") {
if(region == "de") {
df <- get_data_from_xml_de()
} else if (region == "eu") {
print("No XML yet, please go to http://www.wahlrecht.de/umfragen/europawahl.htm")
} else {
url <- paste0("http://www.wahlrecht.de/umfragen/xml/land_", region, ".xml")
if(RCurl::url.exists(url) == TRUE) {
df <- get_data_from_xml_laender(url)
} else(print("No XML, sorry"))
}
}
|
6f3501d64579dd9427b3d3c8aa035dbe9c430f55
|
976f789338e38f9db135a9555af998b545ea5fd2
|
/11-7-19 74 ptsIFN and SLICC.R
|
89efcdb10fd8d399ba5d933f9689bb0feb99bf11
|
[] |
no_license
|
aamarnani/Lupus-Machine-learning
|
ea331e9be4f307792a4e7baf4b6f9293993766d3
|
f103299096bff14600fe8b3d1dfd22fbc02243f2
|
refs/heads/master
| 2020-09-06T08:43:36.008899
| 2019-11-08T03:39:01
| 2019-11-08T03:39:01
| 220,378,031
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,222
|
r
|
11-7-19 74 ptsIFN and SLICC.R
|
#11-7-19_74 patients _ IFN genes and score and SLICC total score not high vs low
#0-2 is labeled as 1 and >=3 as 2
# Data read in off of biowulf
# Read Data
setwd("/data/amarnanian/Data Files Wd")
data <- read.csv("/spin1/USERS1/amarnanian/Data Files Wd/11-7-19 74 patients for R cleaned.csv", header = TRUE,
na.strings = " ")
data <- data.frame(data)
str(data)
data$slicc_factor <- as.factor(data$slicc_factor)
str(data)
#Rem variables
data$Date.of.IFN.score <-NULL
data$SLICC.DATE <-NULL
data$SLICC..output. <- NULL
data$Median.SLEDAI..Output. <- NULL
data$DATE_Dif<-NULL
#data$Age<-NULL
#data$Monocytes....Blood.<-NULL
#data$Monocytes.Abs..Blood.<-NULL
data$SLEDAI_SEIZURE <- as.factor(data$SLEDAI_SEIZURE)
data$SLEDAI_PSYCHOSIS <- as.factor(data$SLEDAI_PSYCHOSIS)
data$SLEDAI_OBS <- as.factor(data$SLEDAI_OBS)
data$SLEDAI_VISUAL_DISTURB <- as.factor(data$SLEDAI_VISUAL_DISTURB)
data$SLEDAI_CRANIAL_NERVE <- as.factor(data$SLEDAI_CRANIAL_NERVE)
data$SLEDAI_LUPUS_HEADACHE <- as.factor(data$SLEDAI_LUPUS_HEADACHE)
data$SLEDAI_CVA <- as.factor(data$SLEDAI_CVA)
data$SLEDAI_VASCULITIS <- as.factor(data$SLEDAI_VASCULITIS)
data$SLEDAI_ARTHRITIS <- as.factor(data$SLEDAI_ARTHRITIS)
data$SLEDAI_MYOSITIS <- as.factor(data$SLEDAI_MYOSITIS)
data$SLEDAI_URINARY_CAST <- as.factor(data$SLEDAI_URINARY_CAST)
data$SLEDAI_HEMATURIA <- as.factor(data$SLEDAI_HEMATURIA)
data$SLEDAI_PROTEINURIA <- as.factor(data$SLEDAI_PROTEINURIA)
data$SLEDAI_PYURIA <- as.factor(data$SLEDAI_PYURIA)
data$SLEDAI_RASH <- as.factor(data$SLEDAI_RASH)
data$SLEDAI_ALOPECIA <- as.factor(data$SLEDAI_ALOPECIA)
data$SLEDAI_MUCOSAL_ULCERS <- as.factor(data$SLEDAI_MUCOSAL_ULCERS)
data$SLEDAI_PLEURISY <- as.factor(data$SLEDAI_PLEURISY)
data$SLEDAI_PERICARDITIS <- as.factor(data$SLEDAI_PERICARDITIS)
data$SLEDAI_LOW_COMPLEMENT <- as.factor(data$SLEDAI_LOW_COMPLEMENT)
data$SLEDAI_INC_DNA_BIND <- as.factor(data$SLEDAI_INC_DNA_BIND)
data$SLEDAI_FEVER <- as.factor(data$SLEDAI_FEVER)
data$SLEDAI_THROMBOCYTOPENIA <- as.factor(data$SLEDAI_THROMBOCYTOPENIA)
data$SLEDAI_LEUKOPENIA <- as.factor(data$SLEDAI_LEUKOPENIA)
str(data)
table(data$slicc_factor)
#This is the distribution of the SLICC overall.
#0 1 2 3 4 5 6 8
#13 18 13 11 9 6 2 2
#The two group seperation -
# 0 1
#44 30
#Data partition
set.seed(123)
ind <- sample(2, nrow(data), replace = TRUE, prob = c(0.6777, 0.333))
train <- data[ind==1,]
test <- data[ind==2,]
summary(data)
prop.table(table(data$slicc_factor))
barplot(prop.table(table(data$slicc_factor)),
col = rainbow(2),
ylim = c(0, 0.7),
main = "SLICC Output")
#Default RF settings - Train data
library(randomForest)
set.seed(222)
rf <- randomForest(slicc_factor~., data=train, importance = TRUE, proximity = TRUE)
print(rf)
# Prediction & Confusion Matrix - train data
library(caret)
p1 <- predict(rf, train)
confusionMatrix(p1, train$slicc_factor, positive = '1')
#Prediction and confusion- test data
p2 <- predict(rf, test)
confusionMatrix(p2, test$slicc_factor, positive = '1')
plot(rf)
t <- tuneRF(train[,-54], train[,54],
stepFactor = 0.5,
plot = TRUE,
ntreeTry = 400,
trace = TRUE,
improve = 0.05)
#Lowest at 7 or 14
rf <- randomForest(slicc_factor~., data=train,
ntree = 400,
mtry = 7,
importance = TRUE,
proximity = TRUE)
print(rf)
plot(rf)
#now this is with tuned RF model
#Train
p1 <- predict(rf, train)
confusionMatrix(p1, train$slicc_factor, positive = '1')
#Test
p2 <- predict(rf, test)
confusionMatrix(p2, test$slicc_factor, positive = '1')
#pretty horrible --- have Pvalue with acc > NIR of 0.57928
# No. of nodes for the trees
hist(treesize(rf),
main = "No. of Nodes for the Trees",
col = "green")
#VariableImpPlot
varImpPlot(rf)
varImpPlot(rf, type = NULL,
sort = T,
n.var = 20,
main = "Top 20 - Variable Importance")
#IFN signature is high so that is good.
#Take out SLEDAI input as well
importance(rf)
rf$importance
#write.table(rf$importance, file="10-21-19RFexportedimportancevariables.csv", sep=",")
## Variable Used
varUsed(rf)
varused <-varUsed(rf)
write.table(varused, file="exportedvariablesusedLupus10-21-19test.csv", sep=",")
# Partial Dependence Plot
partialPlot(rf, data, Low.Complement, "1")
partialPlot(rf, data, Low.Complement, "2")
#Just exported one by one. Mya not even use these plots in figure.
#Can add to existing plot... can only add from the same group to each other though e.g.
partialPlot(rf, train, P01009, "2", plot = TRUE, add = FALSE)
partialPlot(rf273, train273, P08670.4, "2", plot=T, add=T)
?partialPlot
#X axis is the value ... and the Y axis is the arbitrary units for or less likely?Arbitrary. When higher then ___ then more likley to predict.
# Extract Single Tree
getTree(rf, 2, labelVar = TRUE)
# Multi-dimensional Scaling Plot of Proximity Matrix
rftrain <- randomForest(BinaryMedianSledai~., data=train, importance = TRUE, proximity = TRUE)
rftest <- randomForest(BinaryMedianSledai~., data=test, importance = TRUE, proximity = TRUE)
#Unclear what this means? - all the data applied with the model built base don training data?
MDSplot(rf, data$BinaryMedianSledai)
#Changes the color of same dots because whether correct or not?
MDSplot(rf, train$Later.Sledai..Categorized.)
MDSplot(rf, test$Later.Sledai..Categorized.)
MDSplot(rftrain, train$Later.Sledai..Categorized.)
MDSplot(rftest, test$Later.Sledai..Categorized.)
#For ROC AUC Plot -- why is it elbow lke that?
library(pROC)
p2 <- predict(rf, test)
confusionMatrix(p2, test$Later.Sledai..Categorized.)
##table(factor(, levels=min(test):max(test)),
#factor(test, levels=min(test):max(test)))
?predict
rf.roc <- roc(test$Later.Sledai..Categorized., predictor= factor(p2,ordered = TRUE))
rf.roc <- plot.roc(test$Later.Sledai..Categorized., predictor= factor(p2,ordered = TRUE), legacy.axes = TRUE)
?roc
auc <- auc(rf.roc)
auc_legend <- round (auc,4)
legend (0.6,0.2,auc_legend, title="AUC Lupus test 10-1", cex=1.0)
#ForROC AUC Plot
require(pROC)
predictions <-as.data.frame(predict(rf,test,type="prob"))
#HOw can we make it so it isn't an elbow liek that? Not plotting probability.
# predict class and then attach test class
#predictionsLN$ <- names(predictions)[1:3][apply(predictions[,1:3], 1, which.max)]
#predictionsLN$observed <- test$condition
#head(predictions)
smooth.roc <- smooth.roc(rf.roc)
#error
# Use predict with type="prob" to get class probabilities
#iris.predictions <- predict(mn.net, newdata=iris.test, type="prob")
#head(iris.predictions)
# This can be used directly in multiclass.roc:
#multiclass.roc(iris.test$Species, iris.predictions)
rf.roc <- roc(test$Later.Sledai..Categorized., predictor= factor(p2,ordered = TRUE))
#error
######NewModel with less variables
rm(list = ls(all.names = TRUE))
#See the different attributes
attributes(rf)
#[1] "call" "type" "predicted" "err.rate" "confusion"
#[6] "votes" "oob.times" "classes" "importance" "importanceSD"
#[11] "localImportance" "proximity" "ntree" "mtry" "forest"
#[16] "y" "test" "inbag" "terms"
attributes(rf$votes)
#$class
require(randomForest)
data
# This will make drop a class to make it a 2 class problem
dataROCtest <-data[-which(data$Later.Sledai..Categorized.=="Sledai High"),]
dataROCtest$Later.Sledai..Categorized.<-as.factor(as.character(dataROCtest$Later.Sledai..Categorized.))
set.seed(71)
rf <- randomForest(Later.Sledai..Categorized.~., data=train, importance = TRUE, proximity = TRUE)
rftest <-randomForest()
require(pROC)
rf.roc<-roc(data$Later.Sledai..Categorized.,rf$votes[,2])
#Issues says need sto be same level --- gives same AUC but can't plot
plot(rf.roc)
auc(rf.roc)
plot.separation = function(rf,...) {
triax.plot(rf$votes,...,col.symbols = c("#FF0000FF",
"#00FF0010",
"#0000FF10")[as.numeric(rf$y)])
}
|
e7a408d1703636fc37773ab70961c6d5ac0bd174
|
adf7edc04f43e6cfd876774b0a95b7952f31677c
|
/model/solveTrial.R
|
7e4d7955385d6dc84d5686b4d8e760b2e8a32564
|
[
"MIT"
] |
permissive
|
timothykinyanjui/Household-Regression
|
b8fb2fb00fd414e646c8c894fba76876f600a4eb
|
417b6434dcf354097ceb48474818cade864fc65b
|
refs/heads/master
| 2020-12-13T05:29:48.758163
| 2020-01-16T13:31:05
| 2020-01-16T13:31:05
| 234,324,130
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 584
|
r
|
solveTrial.R
|
# Script checks if the solution is achievalbe with the matrix computed
# Clear the workspace
rm(list = ls())
# Set up the data
n = matrix(c(1,1,1,1),nrow = 1)
B = matrix(c(0.7,0.6,0.5,0.3),nrow = 1)
scale = 8.7
beta = matrix(0.0327,nrow = 4,ncol = 4)
# Load relevant functions and packages
source("finalSizeProbGen.R")
#library("prac")
# Run the function
output = finalSizeProbGen(n,beta,B,scale)
coefMatrix = output$coefMatrix
indexSub = output$indexSub
ones <- matrix(1,nrow = length(coefMatrix[,1]),ncol = 1)
P = solve(coefMatrix, ones)
sum(P)
# check out
plot(P,type = "l")
|
88f1a95d606c13f1c076ad2dae3efe0d916908e9
|
9d3e3c3950c4101bc863a90e69606d7c7d03a4e9
|
/Bloom/03_merge_02_Step/d_merge_blooms.R
|
084e596a8cbf6c1ac26cb3d4e0bfc59f1a902b5c
|
[
"MIT"
] |
permissive
|
HNoorazar/Ag
|
ca6eb5a72ac7ea74e4fe982e70e148d5ad6c6fee
|
24fea71e9740de7eb01782fa102ad79491257b58
|
refs/heads/main
| 2023-09-03T18:14:12.241300
| 2023-08-23T00:03:40
| 2023-08-23T00:03:40
| 146,382,473
| 3
| 6
| null | 2019-09-23T16:45:37
| 2018-08-28T02:44:37
|
R
|
UTF-8
|
R
| false
| false
| 3,115
|
r
|
d_merge_blooms.R
|
.libPaths("/data/hydro/R_libs35")
.libPaths()
library(data.table)
library(dplyr)
source_1 = "/home/hnoorazar/bloom_codes/bloom_core.R"
source(source_1)
options(digit=9)
options(digits=9)
start_time <- Sys.time()
###############################################################
# in directory
bloom_base <- "/data/hydro/users/Hossein/bloom/"
main_in <- paste0(bloom_base, "02_bloomCut_first_frost/bloom/")
model_in <- paste0(main_in, "modeled/")
# out directory
out_dir <- "/data/hydro/users/Hossein/bloom/03_merge_02_Step/"
if (dir.exists(out_dir) == F) {
dir.create(path = out_dir, recursive = T)}
models <- c("bcc-csm1-1", "bcc-csm1-1-m", "BNU-ESM", "CanESM2",
"CCSM4", "CNRM-CM5", "CSIRO-Mk3-6-0", "GFDL-ESM2G",
"GFDL-ESM2M", "HadGEM2-CC365", "HadGEM2-ES365", "inmcm4",
"IPSL-CM5A-LR", "IPSL-CM5A-MR", "IPSL-CM5B-LR", "MIROC5",
"MIROC-ESM-CHEM", "MRI-CGCM3", "NorESM1-M")
model_counter = 0
historical <- data.table()
rcp45 <- data.table()
rcp85 <- data.table()
for (model in models){
print (paste0("line 35: ", model))
curr_hist <- readRDS(paste0(model_in, model,
"/historical/",
"fullbloom_50percent_day_",
gsub("-", "_", model),
"_historical", ".rds"))
curr_45 <- readRDS(paste0(model_in, model,
"/rcp45/",
"fullbloom_50percent_day_",
gsub("-", "_", model),
"_rcp45", ".rds"))
curr_85 <- readRDS(paste0(model_in, model,
"/rcp85/",
"fullbloom_50percent_day_",
gsub("-", "_", model),
"_rcp85", ".rds"))
historical <- rbind(historical, curr_hist)
rcp45 <- rbind(rcp45, curr_45)
rcp85 <- rbind(rcp85, curr_85)
model_counter = model_counter + 1
print (paste0("model_counter = ", model_counter))
}
historical$time_period <- "modeled_hist"
historical_45 <- historical
historical_85 <- historical
historical_45$emission <- "RCP 4.5"
historical_85$emission <- "RCP 8.5"
rm(historical)
###################################################
#
# Read observed
#
###################################################
observed <- readRDS(paste0(main_in,
"fullbloom_50percent_day_observed.rds")) %>%
data.table()
observed$time_period <- "observed"
observed_45 <- observed
observed_85 <- observed
observed_45$emission <- "RCP 4.5"
observed_85$emission <- "RCP 8.5"
rcp45$time_period <- "future"
rcp85$time_period <- "future"
rcp45 <- rbind(rcp45, historical_45, observed_45)
rcp85 <- rbind(rcp85, historical_85, observed_85)
# saveRDS(rcp45, paste0(out_dir, "/fullbloom_50percent_day_rcp45.rds"))
# saveRDS(rcp85, paste0(out_dir, "/fullbloom_50percent_day_rcp85.rds"))
all_bloom <- rbind(rcp45, rcp85)
saveRDS(all_bloom, paste0(out_dir, "/fullbloom_50percent_day.rds"))
end_time <- Sys.time()
print( end_time - start_time)
|
ec310a9be126aa2b4edaae70242266951d7e1b4a
|
c8cc118d8194c69c1b6ad664d2118ffeb05a613e
|
/codes/_old_scripts/Fitting_models.R
|
27730e992ea25b8f507d09331ec3a7fdc9fe8443
|
[] |
no_license
|
CIAT-DAPA/sfs_project
|
d0147e439b2a33fca83fdac98130dba75b45479b
|
bf149165b42ae60642353959d52bae48874dc4ab
|
refs/heads/master
| 2021-01-20T13:35:11.540168
| 2020-04-08T19:44:14
| 2020-04-08T19:44:14
| 82,695,845
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,131
|
r
|
Fitting_models.R
|
# Fitting models for sustainable food systems project
# Implemented by: H. Achicanoy, P. Alvarez & L. Lamotte
# CIAT, 2017
# R options
g <- gc(reset = T); rm(list = ls()); options(warn = -1); options(scipen = 999)
OSys <- Sys.info(); OSys <- OSys[names(OSys)=="sysname"]
if(OSys == "Linux"){
wk_dir <- "/mnt/workspace_cluster_9/Sustainable_Food_System/Input_data/"; setwd(wk_dir); rm(wk_dir)
} else {
if(OSys == "Windows"){
wk_dir <- "//dapadfs/workspace_cluster_9/Sustainable_Food_System/Input_data"; setwd(wk_dir); rm(wk_dir)
}
}; rm(OSys)
# Load packages
suppressMessages(if(!require(raster)){install.packages('raster'); library(raster)} else {library(raster)})
suppressMessages(if(!require(rgdal)){install.packages('rgdal'); library(rgdal)} else {library(rgdal)})
suppressMessages(if(!require(maptools)){install.packages('maptools'); library(maptools)} else {library(maptools)})
suppressMessages(if(!require(dplyr)){install.packages('dplyr'); library(dplyr)} else {library(dplyr)})
suppressMessages(if(!require(tidyr)){install.packages('tidyr'); library(tidyr)} else {library(tidyr)})
suppressMessages(if(!require(ggplot2)){install.packages('ggplot2'); library(ggplot2)} else {library(ggplot2)})
suppressMessages(if(!require(jsonlite)){install.packages('jsonlite'); library(jsonlite)} else {library(jsonlite)})
suppressMessages(if(!require(foreach)){install.packages('foreach'); library(foreach)} else {library(foreach)})
suppressMessages(if(!require(doMC)){install.packages('doMC'); library(doMC)} else {library(doMC)})
suppressMessages(if(!require(XML)){install.packages('XML'); library(XML)} else {library(XML)})
suppressMessages(if(!require(plspm)){install.packages('plspm'); library(plspm)} else {library(plspm)})
suppressMessages(if(!require(reshape)){install.packages('reshape'); library(reshape)} else {library(reshape)})
suppressMessages(if(!require(VIM)){install.packages('VIM'); library(VIM)} else {library(VIM)})
suppressMessages(if(!require(mice)){install.packages('mice'); library(mice)} else {library(mice)})
suppressMessages(if(!require(Amelia)){install.packages('Amelia'); library(Amelia)} else {library(Amelia)})
suppressMessages(if(!require(missForest)){install.packages('missForest'); library(missForest)} else {library(missForest)})
suppressMessages(if(!require(Hmisc)){install.packages('Hmisc'); library(Hmisc)} else {library(Hmisc)})
suppressMessages(if(!require(mi)){install.packages('mi'); library(mi)} else {library(mi)})
suppressMessages(if(!require(simputation)){install.packages('simputation', dependencies = T); library(simputation)} else {library(simputation)})
suppressMessages(if(!require(highcharter)){install.packages('highcharter', dependencies = T); library(highcharter)} else {library(highcharter)})
suppressMessages(if(!require(igraph)){install.packages('igraph', dependencies = T); library(igraph)} else {library(igraph)})
suppressMessages(if(!require(networkD3)){install.packages('networkD3', dependencies = T); library(networkD3)} else {library(networkD3)})
suppressMessages(if(!require(cluster)){install.packages('cluster', dependencies = T); library(cluster)} else {library(cluster)})
suppressMessages(if(!require(factoextra)){install.packages('factoextra', dependencies = T); library(factoextra)} else {library(factoextra)})
suppressMessages(if(!require(FactoMineR)){install.packages('FactoMineR', dependencies = T); library(FactoMineR)} else {library(FactoMineR)})
suppressMessages(library(compiler))
# Load joined data
complete_data <- readRDS(file = "data_joined.RDS")
# Missing values analysis
# Percent of missing values per variable and combination
VIM::aggr(complete_data)
# Method 1: k nearest neighbors (non-parametric alternative)
complete_data1 <- VIM::kNN(data = complete_data); complete_data1 <- complete_data1[,colnames(complete_data)]
# 3. Method 2: mice (assumption of LM for continuous data and GLM for categorical)
# Method 3: Amelia package (assumption of multivariate normality and LM)
# 2. Method 4: missForest (Uses random forest)
complete_data2 <- missForest::missForest(xmis = complete_data) # Does not work. Check!!!
# 1. Method 5: Hmisc (Uses bootstrap sampling and predictive mean matching to impute missing values)
complete_data3 <- Hmisc::aregImpute(formula = ~ GHI_2000 + ChldMalnutrition + Access_median + Footprint_median + sanitation + water_sources + GDP + political_stability, data = complete_data)
# Requires a function to arrange imputed data after process
# Method 6: mi package
complete_data4 <- mi::mi(complete_data, seed = 335) # Does not work. Check!!!
# Method 7: simputation package
# PLS-PM: Using repeated indicators
# Define path model matrix (inner model)
NUTR <- c(0, 0, 0, 0)
HINT <- c(0, 0, 0, 0)
FSCY <- c(0, 0, 0, 0) # FSCY <- c(1, 1, 0, 0)
SUFS <- c(1, 1, 1, 0)
sfs_path <- rbind(NUTR, HINT, FSCY, SUFS); rm(NUTR, HINT, FSCY, SUFS)
colnames(sfs_path) <- rownames(sfs_path)
innerplot(sfs_path)
# List of blocks for outer model
sfs_blocks <- list(2:3, 4:5, 6:9, 2:9)
# List of modes
sfs_modes <- rep("A", 4)
complete_data1$Access_median <- (-complete_data1$Access_median)
# Running the model
# sfs_pls <- plspm(complete_data[complete.cases(complete_data),], sfs_path, sfs_blocks, modes = sfs_modes)
sfs_pls <- plspm(complete_data1, sfs_path, sfs_blocks, modes = sfs_modes)
outerplot(sfs_pls)
plot(sfs_pls)
pairs(sfs_pls$scores, pch = 20, cex = 2)
indices <- as.data.frame(sfs_pls$scores)
indices$iso3 <- as.character(complete_data1$ISO3)
if(!file.exists("../Results/sfs_index_knn_imputed.RDS")){
saveRDS(object = indices, file = "../Results/sfs_index_knn_imputed.RDS") # saveRDS(object = indices, file = "../Results/sfs_index_knn_imputed_model2.RDS")
} else {
indices <- readRDS(file = "../Results/sfs_index_knn_imputed.RDS")
}
indices[,1:(ncol(indices)-1)] <- round(indices[,1:(ncol(indices)-1)], 2)
xloads = melt(sfs_pls$crossloadings, id.vars = c("name", "block"))
gg <- ggplot(data = xloads, aes(x = name, y = value, fill = block))
gg <- gg + geom_hline(yintercept = 0, color = "gray75")
gg <- gg + geom_hline(yintercept = c(-0.5, 0.5), color = "gray70", linetype = 2)
gg <- gg + geom_bar(stat = 'identity', position = 'dodge')
gg <- gg + facet_wrap(block ~ variable)
gg <- gg + theme(axis.text.x = element_text(angle = 90), line = element_blank())
gg <- gg + ggtitle("Crossloadings")
gg
highchart(type = "map") %>%
hc_add_series_map(map = worldgeojson, df = indices, value = "SUFS", joinBy = "iso3") %>%
hc_colorAxis(stops = color_stops()) %>%
hc_tooltip(useHTML = TRUE, headerFormat = "",
pointFormat = "{point.name} has a SFS index of {point.SUFS}")
### =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= ###
### Clustering methodologies ###
### =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= ###
rownames(complete_data1) <- complete_data1$ISO3
# Calculate similarity measure
sfs_dis <- cluster::daisy(x = complete_data1[,-1], metric = c("gower"), stand = FALSE)
sfs_dis <- 1 - as.matrix(sfs_dis)
# Do cluster analysis
sfs_pca <- FactoMineR::PCA(X = complete_data1[,-1], scale.unit = T, graph = F)
sfs_hpc <- FactoMineR::HCPC(res = sfs_pca, nb.clust = -1, graph = F)
complete_data1$cluster <- sfs_hpc$data.clust$clust
# Visualize using networkD3
sfs_dis[lower.tri(sfs_dis, diag = TRUE)] <- NA
sfs_dis <- na.omit(data.frame(as.table(sfs_dis))); names(sfs_dis) <- c("from", "to", "similarity")
sfs_dis <- sfs_dis[sfs_dis$similarity >= .98,] # Filter by more than 98 degree of similarity
gD <- igraph::simplify(igraph::graph.data.frame(sfs_dis, directed = FALSE))
nodeList <- data.frame(id = c(0:(igraph::vcount(gD) - 1)), name = igraph::V(gD)$name) # because networkD3 library requires IDs to start at 0
getNodeID <- function(x){ which(x == igraph::V(gD)$name) - 1 } # to ensure that IDs start at 0
edgeList <- plyr::ddply(sfs_dis, .variables = c("from", "to", "similarity"),
function (x) data.frame(fromID = getNodeID(x$from),
toID = getNodeID(x$to)))
nodeList <- cbind(nodeList, nodeDegree = igraph::degree(gD, v = igraph::V(gD), mode = "all")); rm(gD, getNodeID)
nodeList$cluster <- as.numeric(as.character(complete_data1$cluster))[match(nodeList$name, complete_data1$ISO3)]
networkD3::forceNetwork(Links = edgeList,
Nodes = nodeList,
Source = "fromID",
Target = "toID",
Value = "similarity",
NodeID = "name",
Group = "cluster",
opacity = 1,
fontSize = 15)
# Dynamic time wrapping
sfs_dtw <- TSclust::diss(as.matrix(complete_data1[,-1]), METHOD = "DTWARP", normalize = TRUE)
sfs_dtw_matrix <- as.matrix(sfs_dtw)
sfs_dtw_matrix[lower.tri(sfs_dtw_matrix, diag = TRUE)] <- NA
sfs_dtw_matrix <- na.omit(data.frame(as.table(sfs_dtw_matrix))); names(sfs_dtw_matrix) <- c("Origin", "Recipient", "Similarity")
sfs_dtw_matrix2 <- sfs_dtw_matrix
sfs_dtw_matrix2 <- sfs_dtw_matrix2[sfs_dtw_matrix2$Similarity <= 2000,]
###
gD <- igraph::simplify(igraph::graph.data.frame(sfs_dtw_matrix2, directed=FALSE))
nodeList <- data.frame(ID = c(0:(igraph::vcount(gD) - 1)), # because networkD3 library requires IDs to start at 0
nName = igraph::V(gD)$name)
# Map node names from the edge list to node IDs
getNodeID <- function(x){
which(x == igraph::V(gD)$name) - 1 # to ensure that IDs start at 0
}
# And add them to the edge list
edgeList <- plyr::ddply(sfs_dtw_matrix2, .variables = c("Origin", "Recipient", "Similarity"),
function (x) data.frame(SourceID = getNodeID(x$Origin),
TargetID = getNodeID(x$Recipient)))
nodeList <- cbind(nodeList, nodeDegree=igraph::degree(gD, v = igraph::V(gD), mode = "all"))
networkD3::forceNetwork(Links = edgeList, Nodes = nodeList, Source = "SourceID",
Target = "TargetID", Value = "Similarity", NodeID = "nName",
Group = "nodeDegree", opacity = 0.4)
library(visNetwork)
visNetwork(nodeList, edgeList) %>%
visOptions(highlightNearest = TRUE,
selectedBy = "type.label")
edges <- edgeList[,4:5]; names(edges) <- c("from", "to")
nodes <- data.frame(id = nodeList[,1])
library(visNetwork)
visNetwork(nodes, edges) %>%
visOptions(highlightNearest = TRUE)
###
library("corrplot")
corrplot(as.matrix(sfs_dtw), is.corr = FALSE, method = "color", type = "upper")
plot(hclust(sfs_dtw, method = "ward.D2"))
library(igraph)
head(get.data.frame(as.matrix(sfs_dtw)))
res_dist <- get_dist(data_test[,-1], stand = TRUE, method = "pearson")
fviz_dist(dist.obj = res_dist, gradient = list(low = "#00AFBB", mid = "white", high = "#FC4E07"))
# Using k-means
fviz_nbclust(data_test[,-1], kmeans, method = "gap_stat")
get_clust_tendency(scale(data_test[,-1]), n = 50,
gradient = list(low = "steelblue", high = "white"))
|
dd580a3ecb9abe0e4b1d8f66cc869a74acc7e71a
|
2195c8ecffc27de3461974f8cea350313dcdb969
|
/cachematrix.R
|
22e015e1f8200746cdc8e09aa5a238aff8d7c595
|
[] |
no_license
|
tbudurovych/ProgrammingAssignment2
|
bd7368e353b09cdff2daf26e656d367f83089ecb
|
7e44d628b301a31c68dd43d6aac3aa6157f32b60
|
refs/heads/master
| 2021-01-21T07:25:57.403977
| 2014-09-16T09:22:36
| 2014-09-16T09:22:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,092
|
r
|
cachematrix.R
|
## Functions below allow computing of the matrix's inverse
## and caching the result for subsequent calls
## Function takes a matrix as an input and returns a special
## cached matrix structure (list)
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
## setter of matrix
set <- function(y){
x <<- y
i <<- NULL
}
## getter of matrix
get <- function() x
## setter of inverted
setInv <- function(inverted) i <<- inverted
## getter of inverted
getInv <- function() i
## returned structure
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## Function computes an inverse of the matrix
## It takes cached matrix as an put
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## try get from cache
i <- x$getInv()
if (!is.null(i)){
message("getting from cache")
return (i)
}
## else get and save in cache for a later use
message("computing inverse")
originalMatrix <- x$get()
i <- solve(originalMatrix)
x$setInv(i)
i
}
|
d2a4f4979e951ac71423894fc375dd6b5714bd99
|
6b579170717f7671b1b06f6e814a602d127c601b
|
/R/kappa4al.R
|
ae46c2aefc92a362249ef958c76b7171bbe77763
|
[] |
no_license
|
mtloots/alR
|
d8e699c7dadaf9a7abff4db0a21b46522319a069
|
1fc6a3b30ee177b0e22dcdb5ecebae6bfc567091
|
refs/heads/master
| 2021-04-22T06:46:51.423304
| 2018-03-15T09:25:14
| 2018-03-15T09:25:14
| 59,803,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,514
|
r
|
kappa4al.R
|
#' Sigmoidal curve fitting.
#'
#' A framework for arc length fitting of the four-parameter kappa sigmoidal function.
#'
#' @param formula An LHS ~ RHS formula, specifying the linear model to be estimated.
#' @param data A data.frame which contains the variables in \code{formula}.
#' @param lower A vector of lower constraints for the parameters to be estimated; defaults to c(0, -5, -5).
#' @param upper A vector of upper constraints for the parameters to be estimated; defaults to c(10, 1, 1).
#' @param q1,q2 Numeric vectors, for the lower and upper bounds of the intervals over which arc lengths are to be computed.
#' @param tol Error tolerance level; defaults to 1e-15.
#' @param maxiter The maximum number of iterations allowed; defaults to 50000.
#' @param ... Arguments to be passed on to the differential evolution function \code{\link{JDEoptim}}.
#'
#' @return A generic S3 object with class kappa4al.
#' @importFrom stats coef fitted model.frame model.matrix model.response printCoefmat var
#' @importFrom DEoptimR JDEoptim
#'
#' @export
kappa4al <- function(formula, data=list(), lower, upper, q1, q2, tol, maxiter, ...) UseMethod("kappa4al")
#' @describeIn kappa4al default method for kappa4al.
#'
#' @return kappa4al.default: A list with all components from \code{\link{JDEoptim}}, as well as:
#' \itemize{
#' \item intercept: Did the model contain an intercept TRUE/FALSE?
#' \item coefficients: A vector of estimated coefficients.
#' \item error: The value of the objective function.
#' \item fitted.values: A vector of estimated values.
#' \item residuals: The residuals resulting from the fitted model.
#' \item call: The call to the function.
#' \item ALFHat: Arc length segments of the empirical CDF (calculated from data).
#' \item ALF: Arc length segments of the CDF of the four-parameter kappa distribution (theoretical).
#' p1: The vector of sample quantiles in the data corresponding to \code{q1}.
#' p2: The vector of sample quantiles in the data corresponding to \code{q2}.
#' }
#'
#' @examples
#' k <- kappa4tc(-4, 0, 1)$par
#' x <- seq(qkappa4(0, 4, 0.4, -4, k), qkappa4(0.7, 4, 0.4, -4, k), length.out=100)
#' y <- sapply(x, function(i) pkappa4(i, 4, 0.4, -4, k))
#' kappa4nls.default(y~x, q1=c(0.025, 0.5), q2=c(0.5, 0.975), tol=1e-5)
#'
#' @export
kappa4al.default <- function(formula, data=list(), lower=c(0, -5, -5), upper=c(10, 1, 1), q1, q2, tol=1e-15, maxiter=50000, ...)
{
mf <- model.frame(formula=formula, data=data)
X <- model.matrix(attr(mf, "terms"), data=mf)
y <- model.response(mf)
intercept <- if(attr(attr(mf, "terms"), "intercept") == 1) TRUE else FALSE
if(intercept)
{
x <- X[,2]
}
else
{
x <- X[,1]
}
p1 <- sapply(1:length(q1), function(i) x[length(which((y/max(y)) <= q1[i]))+1])
p2 <- sapply(1:length(q2), function(i) x[length(which((y/max(y)) <= q2[i]))+1])
al_samp <- kappa4IntApprox2(x, y/max(y), p1, p2, FALSE)
al <- DEoptimR::JDEoptim(lower=lower, upper=upper, fn=kappa4ALobj, constr=kappa4ALcon, meq=2, tol=tol, maxiter=maxiter, al_samp=al_samp, x_min=min(x), x_max=max(x), q1=p1, q2=p2)
al$intercept <- intercept
k <- al$par[3]
if(al$par[2] <= 0)
{
mu <- min(x)-(al$par[1]/k)
}
else
{
mu <- min(x)-al$par[1]*(1-(al$par[2])^(-k))/k
}
al$coefficients <- c(mu, al$par)
names(al$coefficients) <- c("mu", "sigma", "h", "k")
al$error <- al$value
al$fitted.values <- sapply(x, function(i) pkappa4(i, mu, al$par[1], al$par[2], k))/pkappa4(max(x), mu, al$par[1], al$par[2], k)
al$residuals <- (y/max(y))-al$fitted.values
al$call <- match.call()
al$ALFHat <- al_samp
al$ALF <- kappa4Int2(mu, al$par[1], al$par[2], k, pkappa4(max(x), mu, al$par[1], al$par[2], k), p1, p2, FALSE)
al$p1 <- p1
al$p2 <- p2
class(al) <- "kappa4al"
al
}
#' @describeIn kappa4al print method for kappa4al.
#'
#' @param x A kappa4al object.
#'
#' @export
print.kappa4al <- function(x, ...)
{
cat("\nCall:\n")
print(x$call)
cat("\nCoefficients:\n")
print(x$coefficients, digits=5)
}
#' @describeIn kappa4al summary method for kappa4al.
#'
#' @param object A kappa4al object.
#'
#' @return summary.kappa4al: A list of class summary.kappa4al with the following components:
#' \itemize{
#' \item call: Original call to the \code{kappa4al} function.
#' \item coefficients: A vector with parameter estimates.
#' \item arclengths: A matrix of the arc length segments of the dependent and independent variables that were matched.
#' \item r.squared: The \eqn{r^{2}} coefficient.
#' \item sigma: The residual standard error.
#' \item error: Value of the objective function.
#' \item residSum: Summary statistics for the distribution of the residuals.
#' }
#'
#' @export
summary.kappa4al <- function(object, ...)
{
TAB <- cbind(Estimate = coef(object))
rownames(TAB) <- names(object$coefficients)
colnames(TAB) <- c("Estimate")
alTAB <- cbind(LHS = object$ALF, RHS = object$ALFHat)
rownames(alTAB) <- paste("[", round(object$p1, 5), ", ", round(object$p2, 5), "]", sep="")
colnames(alTAB) <- c("Theoretical", "Sample")
y <- object$residuals+object$fitted.values
r.squared <- 1-var(object$residuals)/var(y)
al <- list(call=object$call,
coefficients=TAB,
arclengths = alTAB,
r.squared=r.squared,
sigma=sqrt(sum((object$residuals)^2)),
error=object$error,
residSum=summary(object$residuals, digits=5)[-4])
class(al) <- "summary.kappa4al"
al
}
#' @describeIn kappa4al print method for summary.kappa4al.
#'
#' @return print.summary.kappa4al: The object passed to the function is returned invisibly.
#'
#' @export
print.summary.kappa4al <- function(x, ...)
{
cat("\nCall:\n")
print(x$call)
cat("\nResiduals:\n")
print(x$residSum)
cat("\nKappa4 CDF Arc Lengths:\n")
print(x$arclengths)
cat("\n")
printCoefmat(x$coefficients, P.values=FALSE, has.Pvalue=FALSE)
digits <- max(3, getOption("digits") - 3)
cat("\nResidual standard error: ", formatC(x$sigma, digits=digits), sep="")
cat("\nMultiple R-squared: ", formatC(x$r.squared, digits=digits), sep="")
cat("\tValue of objective function: ",formatC(x$error, digits=digits, format="f"), "\n", sep="")
invisible(x)
}
#' @describeIn kappa4al formula method for kappa4al.
#' @export
kappa4al.formula <- function(formula, data=list(), lower=c(0, -5, -5), upper=c(10, 1, 1), q1, q2, tol=1e-15, maxiter=50000, ...)
{
mf <- model.frame(formula=formula, data=data)
x <- model.matrix(attr(mf, "terms"), data=mf)
y <- model.response(mf)
al <- kappa4al.default(formula, data=data, lower=lower, upper=upper, q1=q1, q2=q2, tol=tol, maxiter=maxiter, ...)
al$call <- match.call()
al$formula <- formula
al$intercept <- attr(attr(mf, "terms"), "intercept")
al
}
#' @describeIn kappa4al predict method for kappa4al.
#'
#' @param newdata The data on which the estimated model is to be fitted.
#'
#' @return predict.kappa4al: A vector of predicted values resulting from the estimated model.
#'
#' @examples
#' u <- seq(qkappa4(0.1, 4, 0.4, -4, k), qkappa4(0.8, 4, 0.4, -4, k), length.out=100)
#' v <- sapply(u, function(i) pkappa4(i, 4, 0.4, -4, k))
#' al <- kappa4al(y~x, q1=c(0.025, 0.5), q2=c(0.5, 0.975), tol=1e-5)
#' predict(al, newdata=data.frame(y=v, x=u))
#'
#' @export
predict.kappa4al <- function(object, newdata=NULL, ...)
{
if(is.null(newdata))
{
y <- fitted(object)
}
else
{
if(!is.null(object$formula))
{
x <- model.matrix(object$formula, newdata)
}
else
{
x <- newdata
}
if(object$intercept)
{
X <- x[,2]
}
else
{
X <- x[,1]
}
y <- sapply(X, function(i) pkappa4(i, coef(object)[1], coef(object)[2], coef(object)[3], coef(object)[4]))/pkappa4(max(X), coef(object)[1], coef(object)[2], coef(object)[3], coef(object)[4])
}
y
}
|
9068a8068cd01a27f64957f5ee435c498c5f44ea
|
5e20ff0c89ef4ab6399472e45d729a34bbd4e840
|
/plot1.R
|
aeb87c10adfdcf027c4e715f13633ceb83350d17
|
[] |
no_license
|
tr3010/ExData_Courseproject_2
|
0f68138ae36d70231ab7cd3603b0c3c5789c6899
|
19a81eff1fef01001204ab3fb47e49affd1a914a
|
refs/heads/master
| 2021-01-19T18:11:30.093483
| 2014-07-26T02:48:02
| 2014-07-26T02:48:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 481
|
r
|
plot1.R
|
# Read data files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Have total PM2.5 emissions in the US decreased from 1999 to
# 2008?
total.emissions <- aggregate(Emissions ~ year, NEI, sum)
png('plot1.png')
barplot(height=total.emissions$Emissions, names.arg=total.emissions$year,
xlab="Year", ylab=expression('PM'[2.5]*' emission'),
main=expression('Total US PM'[2.5]*' Emission 1999 - 2008'))
dev.off()
|
7375b597150ac75ca0b569270b673266f50ecc97
|
7bb21189354bf72b2e8aeeb9f0e4340e69ed2913
|
/man/plot.Patterns.Rd
|
fea0a002bbeabda1a54cafcc0417ebfbaaa86dea
|
[] |
no_license
|
elvanceyhan/pcds
|
16371849188f98138933afd2e68a46167f674923
|
00331843a0670e7cd9a62b7bca70df06d4629212
|
refs/heads/master
| 2023-07-02T10:03:48.702073
| 2023-06-16T15:50:46
| 2023-06-16T15:50:46
| 218,353,699
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,144
|
rd
|
plot.Patterns.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ClassFunctions.R
\name{plot.Patterns}
\alias{plot.Patterns}
\title{Plot a \code{Patterns} \code{object}}
\usage{
\method{plot}{Patterns}(x, asp = NA, xlab = "x", ylab = "y", ...)
}
\arguments{
\item{x}{Object of class \code{Patterns}.}
\item{asp}{A \code{numeric} value,
giving the aspect ratio for \eqn{y}-axis to \eqn{x}-axis \eqn{y/x}
(default is \code{NA}),
see the official help for \code{asp} by typing "\code{? asp}".}
\item{xlab, ylab}{Titles for the \eqn{x} and \eqn{y} axes,
respectively (default is \code{xlab="x"} and \code{ylab="y"}).}
\item{\dots}{Additional parameters for \code{plot}.}
}
\value{
None
}
\description{
Plots the points generated from the pattern
(color coded for each class) together with the
study window
}
\examples{
\dontrun{
nx<-10; #try also 100 and 1000
ny<-5; #try also 1
e<-.15;
Y<-cbind(runif(ny),runif(ny))
#with default bounding box (i.e., unit square)
Xdt<-rseg.circular(nx,Y,e)
Xdt
plot(Xdt,asp=1)
}
}
\seealso{
\code{\link{print.Patterns}},
\code{\link{summary.Patterns}},
and \code{\link{print.summary.Patterns}}
}
|
ced3d7887b67050aeb0792f6c6fa3895f4622b5d
|
ca17e23492efd1a8cf1af9ad9d950c62d0364e94
|
/R/global_local_tests.R
|
5766f946e74ad09f5a1c9eb79d0a042c1b7a7371
|
[] |
no_license
|
thibautjombart/adegenet
|
a8b7f535510687dbf29c81f6da96eeacb442344e
|
bb8b9e89674adf55993d7d5abc5995485b38b8c9
|
refs/heads/master
| 2023-02-07T11:36:19.650854
| 2023-01-28T00:12:40
| 2023-01-28T00:12:40
| 31,032,458
| 169
| 70
| null | 2023-01-28T00:12:41
| 2015-02-19T19:25:14
|
R
|
UTF-8
|
R
| false
| false
| 1,963
|
r
|
global_local_tests.R
|
########################################
#
# Tests for global and local structures
#
# Thibaut Jombart 2007
# t.jombart@imperial.ac.uk
#
########################################
###############
# global.rtest
###############
global.rtest <- function(X, listw, k=1, nperm=499){
if (!inherits(listw, "listw")) stop("object of class 'listw' expected")
if (listw$style != "W") stop("object of class 'listw' with style 'W' expected")
if(any(is.na(X))) stop("NA entries in X")
n <- nrow(X)
X <- scalewt(X)
# computation of U+
temp <- .orthobasis.listw(listw)
val <- attr(temp,"values")
U <- as.matrix(temp)
Upos <- U[,val > -1/(n-1)]
# test statistic
calcstat <- function(X,k){
R <- ( t(X) %*% Upos ) / n
R2 <- R*R
temp <- sort(apply(R2,2,mean),decreasing=TRUE)
stat <- sum(temp[1:k])
return(stat)
}
ini <- calcstat(X,k)
sim <- sapply(1:nperm, function(i) calcstat( X[sample(1:n),], k ) )
res <- as.randtest(sim=sim, obs=ini, alter="greater")
res$call <- match.call()
return(res)
} #end global.rtest
###############
# local.rtest
###############
local.rtest <- function(X, listw, k=1, nperm=499){
if (!inherits(listw, "listw")) stop("object of class 'listw' expected")
if (listw$style != "W") stop("object of class 'listw' with style 'W' expected")
if(any(is.na(X))) stop("NA entries in X")
n <- nrow(X)
X <- scalewt(X)
# computation of U-
temp <- .orthobasis.listw(listw)
val <- attr(temp,"values")
U <- as.matrix(temp)
Uneg <- U[,val < -1/(n-1)]
X <- scalewt(X)
# test statistic
calcstat <- function(X,k){
R <- ( t(X) %*% Uneg ) / n
R2 <- R*R
temp <- sort(apply(R2,2,mean),decreasing=TRUE)
stat <- sum(temp[1:k])
return(stat)
}
ini <- calcstat(X,k)
sim <- sapply(1:nperm, function(i) calcstat( X[sample(1:n),], k ) )
res <- as.randtest(sim=sim, obs=ini, alter="greater")
res$call <- match.call()
return(res)
} #end local.rtest
|
475499957e9b6b95ce610afdbdd9267dba32bad2
|
794d8361259d4a057abe2409dc735d67e4a3b902
|
/plot1.R
|
afe0d1d6a459f0b759647caec5f8bfd88d971a30
|
[] |
no_license
|
hkdevgit/ExData_Plotting1
|
64370ecb0092a117e964110fe5f78f31c2e18d5c
|
e1e94112eef6545a3c6940c11047e5523d8beae5
|
refs/heads/master
| 2020-12-01T01:10:39.153224
| 2015-02-08T18:47:04
| 2015-02-08T18:47:04
| 30,468,975
| 0
| 0
| null | 2015-02-07T20:23:37
| 2015-02-07T20:23:36
| null |
UTF-8
|
R
| false
| false
| 789
|
r
|
plot1.R
|
## The following function reads in energy consumption data from
## UC Irvin Machine Learning data set to plot a histogram of the
## Global_active_power for the 2nd day of February 2007
plot1 <- function()
{
## Read data table as string, not factors.
## (60 mins/h * 24 hr/day * 90 days) = 129600 samples
Data <- read.csv("household_power_consumption.txt", sep=";", header=TRUE, nrows=129600, na.strings="?", stringsAsFactors=FALSE)
Data <- na.omit(Data)
EnergyFeb2 <- Data[Data$Date %in% c("1/2/2007", "2/2/2007"),]
## Plot the data into PNG file
png(filename="plot1.png", width=480, height=480)
hist(as.numeric(EnergyFeb2$Global_active_power), breaks=12, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency")
dev.off()
}
|
1811db688f241513ca9bc74d43e3dc69d66c35cb
|
81b906cf46e8033d2c3d33e00b639855bbbb4a3b
|
/man/mod_colors.Rd
|
c1c45bfe887ea60786dd75dd91a98ef1d426b3cd
|
[] |
no_license
|
csbl-usp/CEMiTool
|
f865e36a5e544b7bce5719afc822e2b669412c59
|
194342d0ed46d8e8432afd2972ac7e8b341802d0
|
refs/heads/master
| 2022-12-06T13:14:59.838066
| 2022-10-17T20:25:11
| 2022-10-17T20:25:11
| 76,597,565
| 24
| 9
| null | null | null | null |
UTF-8
|
R
| false
| true
| 766
|
rd
|
mod_colors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cemitool.R
\name{mod_colors}
\alias{mod_colors}
\alias{mod_colors,CEMiTool-method}
\alias{mod_colors<-}
\alias{mod_colors<-,CEMiTool,character-method}
\title{Retrieve and set mod_colors attribute}
\usage{
mod_colors(cem)
\S4method{mod_colors}{CEMiTool}(cem)
mod_colors(cem) <- value
\S4method{mod_colors}{CEMiTool,character}(cem) <- value
}
\arguments{
\item{cem}{Object of class \code{CEMiTool}}
\item{value}{a character vector containing colors for each module.
Names should match with module names}
}
\value{
A vector with color names.
}
\description{
Retrieve and set mod_colors attribute
}
\examples{
# Get example CEMiTool object
data(cem)
# See module colors
mod_colors(cem)
}
|
862504e3ca56dea5d8c2c1f542967530766ad1a9
|
56ad167c5911c6c300157c7cfff35432b983b7b9
|
/tests/testthat.R
|
ded96953a3dfbb21f49dc34f6e113fbfc42db48e
|
[
"MIT"
] |
permissive
|
schochastics/graphlayouts
|
95d3cabe2802aef0da56f7a9b74e4bf188da064e
|
7782674ea39d78629a69a934974b179fc6bb079a
|
refs/heads/main
| 2023-07-15T09:03:29.493573
| 2023-06-30T08:02:53
| 2023-06-30T08:02:53
| 148,535,149
| 227
| 16
|
NOASSERTION
| 2023-04-29T08:06:40
| 2018-09-12T20:03:02
|
R
|
UTF-8
|
R
| false
| false
| 68
|
r
|
testthat.R
|
library(testthat)
library(graphlayouts)
test_check("graphlayouts")
|
29acb917ef0ea310b374db53bc760521d93c2939
|
1df94173dc9d57962bdc9f046533e4f4f87e04dd
|
/Nigeria/DHS/1_variables_scripts/fever/med_fever_raw.R
|
164adf473d42a2bf90ad30514dfafd4c99a0298e
|
[
"MIT"
] |
permissive
|
numalariamodeling/hbhi-dhs
|
151b12be77e326a796ffadded3e248a021ad47bb
|
e9db197868dbcc4a74f290cacf70d3914451bfaf
|
refs/heads/master
| 2023-05-31T14:21:08.885388
| 2023-05-22T21:23:07
| 2023-05-22T21:23:07
| 253,842,546
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,417
|
r
|
med_fever_raw.R
|
look_for(NGAfiles[[11]], "fever")
table(NGAfiles[[2]]$h32z) #1990
table(NGAfiles[[5]]$h32z) #2003
table(NGAfiles[[8]]$h32z) #2008
table(NGAfiles[[11]]$h32z)#2010
table(NGAfiles[[14]]$h32z)#2013
table(NGAfiles[[17]]$h32z)#2015
table(NGAfiles[[20]]$h32z)#2018
# list for 1990, 2003, 2008, 2010, 2013, 2015, 2018
h32z.list <- list(NGAfiles[[2]],NGAfiles[[5]],NGAfiles[[8]],NGAfiles[[11]],NGAfiles[[14]],NGAfiles[[17]],
NGAfiles[[20]])
# recoding U5 medical treatment using h32z
# 2013
h32z.list[[5]][,"h32z"] <-recoder(h32z.list[[5]][,"h32z"])
table(h32z.list[[5]][,"h32z"])
# 2010
h32z.list[[4]][,"h32z"] <-recoder(h32z.list[[4]][,"h32z"])
table(h32z.list[[4]][,"h32z"])
# key datasets and dhs/mis datasets are joined
h32z.list <-map2(h32z.list,key_list, left_join) #medfever datasets
#data cleaning
# 2018
h32z.list[[7]]<-dataclean(h32z.list[[7]], h32z, v005, 'h32z', 'med_fever_raw')
# write.foreign(medfever.list[[6]], "mydata.txt", "med_fever.sas", package="SAS")
h32z.svyd18 <- svydesign.fun(h32z.list[[7]])
h32z_pre_18 <- result.fun('med_fever_raw', 'sstate','num_p', design=h32z.svyd18)
head(h32z_pre_18)
iLabels_18 <- val_labels(h32z.list[[7]]$sstate)
match.idx_18 <- match(h32z_pre_18$sstate, iLabels_18)
h32z_pre_18$ADM1_NAME <- ifelse(is.na(match.idx_18),
h32z_pre_18$ADM1_NAME,
names(iLabels_18)[match.idx_18])
h32z_pre_18$ADM1_NAME <- str_to_title(h32z_pre_18$ADM1_NAME)
# write.csv(medfev_pre_18, "results/medfev_pre_18.csv")
# 2015
h32z.list[[6]]<-dataclean(h32z.list[[6]], h32z, v005, 'h32z', 'med_fever_raw')
# write.foreign(medfever.list[[6]], "mydata.txt", "med_fever.sas", package="SAS")
h32z.svyd15 <- svydesign.fun(h32z.list[[6]])
h32z_pre_15 <- result.fun('med_fever_raw', 'sstate','num_p', design=h32z.svyd15)
head(h32z_pre_15)
iLabels_15 <- val_labels(h32z.list[[6]]$sstate)
match.idx_15 <- match(h32z_pre_15$sstate, iLabels_15)
h32z_pre_15$ADM1_NAME <- ifelse(is.na(match.idx_15),
h32z_pre_15$ADM1_NAME,
names(iLabels_15)[match.idx_15])
h32z_pre_15$ADM1_NAME <- str_to_title(h32z_pre_15$ADM1_NAME)
# write.csv(medfev_pre_18, "results/medfev_pre_18.csv")
# 2013
h32z.list[[5]]<-dataclean(h32z.list[[5]], h32z, v005, 'h32z', 'med_fever_raw')
# write.foreign(medfever.list[[6]], "mydata.txt", "med_fever.sas", package="SAS")
h32z.svyd13 <- svydesign.fun(h32z.list[[5]])
h32z_pre_13 <- result.fun('med_fever_raw', 'sstate','num_p', design=h32z.svyd13)
head(h32z_pre_13)
iLabels_13 <- val_labels(h32z.list[[5]]$sstate)
match.idx_13 <- match(h32z_pre_13$sstate, iLabels_13)
h32z_pre_13$ADM1_NAME <- ifelse(is.na(match.idx_13),
h32z_pre_13$ADM1_NAME,
names(iLabels_13)[match.idx_13])
h32z_pre_13$ADM1_NAME <- str_to_title(h32z_pre_13$ADM1_NAME)
h32z_pre_13 <- h32z_pre_13%>% mutate(ADM1_NAME = dplyr::recode(ADM1_NAME,"Fct-Abuja" = "Fct Abuja"))
# write.csv(medfev_pre_18, "results/medfev_pre_18.csv")
# 2010
h32z.list[[4]]<-dataclean(h32z.list[[4]], h32z, v005, 'h32z', 'med_fever_raw')
# write.foreign(medfever.list[[6]], "mydata.txt", "med_fever.sas", package="SAS")
h32z.svyd10 <- svydesign.fun(h32z.list[[4]])
h32z_pre_10 <- result.fun('med_fever_raw', 'sstate','num_p', design=h32z.svyd10)
head(h32z_pre_10)
iLabels_10 <- val_labels(h32z.list[[4]]$sstate)
match.idx_10 <- match(h32z_pre_10$sstate, iLabels_10)
h32z_pre_10$ADM1_NAME <- ifelse(is.na(match.idx_10),
h32z_pre_10$ADM1_NAME,
names(iLabels_10)[match.idx_10])
h32z_pre_10$ADM1_NAME <- str_to_title(h32z_pre_10$ADM1_NAME)
h32z_pre_10 <- h32z_pre_10%>% mutate(ADM1_NAME = dplyr::recode(ADM1_NAME,"Fct-Abuja" = "Fct Abuja"))
# write.csv(medfev_pre_18, "results/medfev_pre_18.csv")
# 2008
h32z.list[[3]]<-dataclean(h32z.list[[3]], h32z, v005, 'h32z', 'med_fever_raw')
# write.foreign(medfever.list[[6]], "mydata.txt", "med_fever.sas", package="SAS")
h32z.svyd08 <- svydesign.fun(h32z.list[[3]])
h32z_pre_08 <- result.fun('med_fever_raw', 'sstate','num_p', design=h32z.svyd08)
head(h32z_pre_08)
iLabels_08 <- val_labels(h32z.list[[3]]$sstate)
match.idx_08 <- match(h32z_pre_08$sstate, iLabels_08)
h32z_pre_08$ADM1_NAME <- ifelse(is.na(match.idx_08),
h32z_pre_08$ADM1_NAME,
names(iLabels_08)[match.idx_08])
h32z_pre_08$ADM1_NAME <- str_to_title(h32z_pre_08$ADM1_NAME)
h32z_pre_08 <- h32z_pre_08%>% mutate(ADM1_NAME = dplyr::recode(ADM1_NAME,"Abuja" = "Fct Abuja"))
# write.csv(medfev_pre_18, "results/medfev_pre_18.csv")
# 2003
h32z.list[[2]]<-dataclean(h32z.list[[2]], h32z, v005, 'h32z', 'med_fever_raw')
# write.foreign(medfever.list[[6]], "mydata.txt", "med_fever.sas", package="SAS")
h32z.svyd03 <- svydesign.fun(h32z.list[[2]])
h32z_pre_03 <- result.fun('med_fever_raw', 'sstate','num_p', design=h32z.svyd03)
head(h32z_pre_03)
iLabels_03 <- val_labels(h32z.list[[2]]$sstate)
match.idx_03 <- match(h32z_pre_03$sstate, iLabels_03)
h32z_pre_03$ADM1_NAME <- ifelse(is.na(match.idx_03),
h32z_pre_03$ADM1_NAME,
names(iLabels_03)[match.idx_03])
h32z_pre_03$ADM1_NAME <- str_to_title(h32z_pre_03$ADM1_NAME)
h32z_pre_03 <- h32z_pre_03%>%mutate(ADM1_NAME = dplyr::recode(ADM1_NAME,"Nassarawa" = "Nasarawa",
"Zamfora" = "Zamfara", "Abuja (Fct)" = "Fct Abuja"))
# write.csv(medfev_pre_18, "results/medfev_pre_18.csv")
#Maps
# 2018 transformations
S_file <- admin1shp_sf%>%left_join(h32z_pre_18)
nga_h32z18 <- tmap.fun4(S_file, "Receipt of Medical Treatment for Fever in Nigerian States_h32z (2018)",
"Prevalence", "med_fever_raw")
# 2015 transformations
S_file_15 <- admin1shp_sf%>%left_join(h32z_pre_15)
nga_h32z15 <- tmap.fun4(S_file_15, "Receipt of Medical Treatment for Fever in Nigerian States_h32z (2015)",
"Prevalence", "med_fever_raw")
# 2013 transformations
S_file_13 <- admin1shp_sf%>%left_join(h32z_pre_13)
nga_h32z13 <- tmap.fun4(S_file_13, "Receipt of Medical Treatment for Fever in Nigerian States_h32z (2013)",
"Prevalence", "med_fever_raw")
# 2010 transformations
S_file_10 <- admin1shp_sf%>%left_join(h32z_pre_10)
nga_h32z10 <- tmap.fun4(S_file_10, "Receipt of Medical Treatment for Fever in Nigerian States_h32z (2010)",
"Prevalence", "med_fever_raw")
# 2008 transformations
S_file_08 <- admin1shp_sf%>%left_join(h32z_pre_08)
nga_h32z08 <- tmap.fun4(S_file_08, "Receipt of Medical Treatment for Fever in Nigerian States_h32z (2008)",
"Prevalence", "med_fever_raw")
# 2003 transformations
S_file_03 <- admin1shp_sf%>%left_join(h32z_pre_03)
nga_h32z03 <- tmap.fun4(S_file_03, "Receipt of Medical Treatment for Fever in Nigerian States_h32z (2003)",
"Prevalence", "med_fever_raw")
all_h32z <- tmap_arrange(nga_h32z03,nga_h32z08,nga_h32z10,nga_h32z13,nga_h32z15,nga_h32z18)
tmap_save(tm = all_h32z, filename = "results/all_U5_h32z_medfever.pdf",
width=13, height=13, units ="in", asp=0,
paper ="A4r", useDingbats=FALSE)
|
69edaa9dfea8433bec0b5751a96a154fdc3d7879
|
1f2429c4ee1ae623374f6d3fbecbc056b4ac5526
|
/man/validate_single_pos_num.Rd
|
e46482213d469ea97c9d81b3794bc73c453254ef
|
[
"BSD-2-Clause"
] |
permissive
|
nsh87/receptormarker
|
1c47d9a6c461b41afd0a9d30fe7cddffd20e7ffd
|
4f2c1e9e45b2c94da2c4a56f639b9ef1c1fec6dd
|
refs/heads/master
| 2022-08-31T04:25:05.025729
| 2022-08-21T19:34:06
| 2022-08-21T19:34:06
| 35,985,771
| 4
| 9
|
BSD-2-Clause
| 2021-11-30T21:08:12
| 2015-05-21T02:18:12
|
JavaScript
|
UTF-8
|
R
| false
| true
| 427
|
rd
|
validate_single_pos_num.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arg_validation.R
\name{validate_single_pos_num}
\alias{validate_single_pos_num}
\title{Validate than an argument is a single, positive integer}
\usage{
validate_single_pos_num(n)
}
\arguments{
\item{n}{An item to be checked.}
}
\description{
An internal function that raises an error if the argument is not
a positive integer.
}
\keyword{internal}
|
d214dc9cc23be18ee0a3abc3df1f9568e9151329
|
5df9be8f44a4ea1e5f61cd0f42191ba1429c20e3
|
/R/zzz.R
|
f1980cae665b0eca36c8841f899000030ab85f64
|
[] |
no_license
|
ggPMXdevelopment/ggPMX
|
27310d945aed122bf259e95c5c0300011b3e37fc
|
ee65d769ee832779dd0fa5696138df407132734a
|
refs/heads/master
| 2023-08-03T19:41:11.837005
| 2023-05-31T20:53:19
| 2023-05-31T20:53:19
| 181,708,751
| 38
| 12
| null | 2023-07-28T17:47:36
| 2019-04-16T14:41:37
|
R
|
UTF-8
|
R
| false
| false
| 146
|
r
|
zzz.R
|
.onLoad <- function(libname = find.package("ggPMX"), pkgname = "ggPMX") {
pmxOptions(template_dir = file.path(libname, pkgname, "templates"))
}
|
3b8c135145eac62548e328ae751a20f9a0b5311e
|
fd6c335873f46f710c11c5eff98db316c77536b7
|
/man/PortfolioOptimProjection.Rd
|
2af6db7669ed92ae9f6b1e554b2ac745d175d20f
|
[] |
no_license
|
cran/PortfolioOptim
|
005b67bda2c0f06873715ca9bb7b0761df2845bf
|
cea7d0899e2aecac33c62be4efbd72c749e4ebaf
|
refs/heads/master
| 2021-01-19T22:41:18.803895
| 2019-02-07T11:53:25
| 2019-02-07T11:53:25
| 88,845,114
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,708
|
rd
|
PortfolioOptimProjection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PortfolioOptimProjection.R
\name{PortfolioOptimProjection}
\alias{PortfolioOptimProjection}
\title{Portfolio optimization which finds an optimal portfolio with the smallest distance to a benchmark.}
\usage{
PortfolioOptimProjection (dat, portfolio_return,
risk=c("CVAR","DCVAR","LSAD","MAD"), alpha=0.95, bvec,
Aconstr=NULL, bconstr=NULL, LB=NULL, UB=NULL, maxiter=500, tol=1e-7)
}
\arguments{
\item{dat}{Time series of returns data; dat = cbind(rr, pk), where \eqn{rr} is an array (time series) of asset returns,
for \eqn{n} returns and \eqn{k} assets it is an array with \eqn{\dim(rr) = (n, k)},
\eqn{pk} is a vector of length \eqn{n} containing probabilities of returns.}
\item{portfolio_return}{Target portfolio return.}
\item{risk}{Risk measure chosen for optimization; one of "CVAR", "DCVAR", "LSAD", "MAD", where
"CVAR" -- denotes Conditional Value-at-Risk (CVaR),
"DCVAR" -- denotes deviation CVaR,
"LSAD" -- denotes Lower Semi Absolute Deviation,
"MAD" -- denotes Mean Absolute Deviation.}
\item{alpha}{Value of alpha quantile used to compute portfolio VaR and CVaR; used also as quantile value for risk measures CVAR and DCVAR.}
\item{bvec}{Benchmark portfolio, a vector of length k; function \code{PortfolioOptimProjection} finds an optimal portfolio with the smallest distance to \code{bvec}.}
\item{Aconstr}{Matrix defining additional constraints, \eqn{\dim (Aconstr) = (m,k)}, where
\eqn{k} -- number of assets, \eqn{m} -- number of constraints.}
\item{bconstr}{Vector defining additional constraints, length (\eqn{bconstr}) \eqn{ = m}.}
\item{LB}{Vector of length k, lower bounds of portfolio weights \eqn{\theta}; warning: condition LB = NULL is equivalent to LB = rep(0, k) (lower bound zero).}
\item{UB}{Vector of length k, upper bounds for portfolio weights \eqn{\theta}.}
\item{maxiter}{Maximal number of iterations.}
\item{tol}{Accuracy of computations, stopping rule.}
}
\value{
PortfolioOptimProjection returns a list with items:
\tabular{llll}{
\code{return_mean} \tab vector of asset returns mean values. \cr
\code{mu} \tab realized portfolio return.\cr
\code{theta} \tab portfolio weights.\cr
\code{CVaR} \tab portfolio CVaR.\cr
\code{VaR} \tab portfolio VaR.\cr
\code{MAD} \tab portfolio MAD.\cr
\code{risk} \tab portfolio risk measured by the risk measure chosen for optimization.\cr
\code{new_portfolio_return} \tab modified target portfolio return; when the original target portfolio return \cr
\code{ } \tab is to high for the problem, the optimization problem is solved for \cr
\code{ } \tab new_portfolio_return as the target return. \cr
}
}
\description{
PortfolioOptimProjection is a linear program for financial portfolio optimization. The function finds an optimal portfolio
which has the smallest distance to a benchmark portfolio given by \code{bvec}.
Solution is by the algorithm due to Zhao and Li modified to account for the fact that the benchmark portfolio \code{bvec} has the dimension of portfolio weights
and the solved linear program has a much higher dimension since the solution vector to the LP problem consists of a set of primal variables: financial portfolio weights,
auxiliary variables coming from the reduction of the mean-risk problem to a linear program and also a set of dual variables depending
on the number of constrains in the primal problem (see Palczewski).
}
\examples{
library(mvtnorm)
k = 3
num =100
dat <- cbind(rmvnorm (n=num, mean = rep(0,k), sigma=diag(k)), matrix(1/num,num,1))
# a data sample with num rows and (k+1) columns for k assets;
w_m <- rep(1/k,k) # benchmark portfolio, a vector of length k,
port_ret = 0.05 # portfolio target return
alpha_optim = 0.95
# minimal constraints set: \\sum theta_i = 1
# has to be in two inequalities: 1 - \\epsilon <= \\sum theta_i <= 1 +\\epsilon
a0 <- rep(1,k)
Aconstr <- rbind(a0,-a0)
bconstr <- c(1+1e-8, -1+1e-8)
LB <- rep(0,k)
UB <- rep(1,k)
res <- PortfolioOptimProjection(dat, port_ret, risk="MAD",
alpha=alpha_optim, w_m, Aconstr, bconstr, LB, UB, maxiter=200, tol=1e-7)
cat ( c("Projection optimal portfolio:\\n\\n"))
cat(c("weights \\n"))
print(res$theta)
cat (c ("\\n mean = ", res$mu, " risk = ", res$risk, "\\n CVaR = ", res$CVaR, " VaR = ",
res$VaR, "\\n MAD = ", res$MAD, "\\n\\n"))
}
\references{
Palczewski, A., LP Algorithms for Portfolio Optimization: The PortfolioOptim Package, R Journal, 10(1) (2018), 308--327. DOI:10.32614/RJ-2018-028.
Zhao, Y-B., Li, D., Locating the least 2-norm solution of linear programs via a path-following method, SIAM Journal on Optimization, 12 (2002), 893--912. DOI:10.1137/S1052623401386368.
}
|
dfa2fc4dd55d013e4230368c1bc3b209f2522023
|
1b345edeca73e114498f9ae6291e2c33411a01bc
|
/chapter 06/06_01 anytime.R
|
0d9b4ee1f254a39e5e8e3e4c18152d49e1dc04b2
|
[] |
no_license
|
mnr/R-for-Data-Science-dates-and-times
|
8e11c29058f166eb62a598b3145e9be6df44b6c3
|
1c136ac075d371274656becaa18d2b187a7f3444
|
refs/heads/master
| 2021-06-13T08:36:56.956839
| 2021-04-09T00:12:44
| 2021-04-09T00:12:44
| 173,846,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 746
|
r
|
06_01 anytime.R
|
# anytime package
install.packages("anytime")
library(anytime)
# incoming ----------------------------------------------------------------
anydate(20190426 + 0:2)
anydate("2019_04_26")
anytime("20190426 0938")
anytime("March 14, 2015")
savethistime <- anytime("20190426 0938", tz = "EST")
class(anytime("20190426 0938"))
class(anydate("2019_04_26"))
# outgoing ----------------------------------------------------------------
iso8601(savethistime)
rfc2822(savethistime)
rfc3339(savethistime)
yyyymmdd(savethistime)
# interesting tidbits -----------------------------------------------------
getFormats()
anytime("may Thur 23 2010") # doesn't work
addFormats("%b %a %d %Y") # add the format
anytime("Thur may 23 2010") # now it works
|
a28a79608d742275a350344b4425ec5ef17ca377
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610382595-test.R
|
2be5222e0c9f61280cf630ddabd38cfb342eaff8
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 545
|
r
|
1610382595-test.R
|
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(1.42943178999697e-101, 4.07221237780293e-308, 0, 0, 0, 0, 0, 0, 4.77830972673648e-299, 2.82755217285071e-110, 7.2911220195564e-304, 0, 0, 0, 0, 0, -5.17539190460651e+245, 0, 0, 0, 7.29112200597562e-304, 7.06389330511216e-304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
0e8f49d90ab5dace81bcca6c187361a0fc398365
|
fcb9c08efecacc63018c388fcb847e58aa07f087
|
/man/norm_run.Rd
|
b8b779ab5d8b0f628aa9346834f14b73d2cd7a92
|
[
"MIT"
] |
permissive
|
yuliasidi/m2imp
|
52957a3580385d5f9a29e85579b4f124ae9d874e
|
857f43522906c43306d49bcd4649d2cf44901343
|
refs/heads/master
| 2020-07-13T05:51:33.835388
| 2020-01-14T21:14:16
| 2020-01-14T21:14:16
| 205,009,061
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 821
|
rd
|
norm_run.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/norm_run.R
\name{norm_run}
\alias{norm_run}
\title{Runs MI via norm package}
\usage{
norm_run(dt_in, num_m = 5, i, n_iter, chain = c("one", "multiple"))
}
\arguments{
\item{dt_in}{tibble, to be imputed}
\item{num_m}{numeric, Default: 5, number of imputations}
\item{i}{numeric, the seed is defined as 666*i}
\item{n_iter}{numeric, number of iterations to be used in data
augmentation procedure}
\item{chain}{character, 'one' or 'multiple' chains for data augmentation}
}
\value{
tibble of MI summary: qbar, ubar, b, v and t
}
\description{
Runs MI via norm package
}
\details{
DETAILS
}
\examples{
\dontrun{
if(interactive()){
#EXAMPLE1
}
}
}
\seealso{
\code{\link[tibble]{tibble}}
\code{\link[purrr]{map2}}
\code{\link[stats]{cor}}
}
|
1155ac26fe0e5813ecf0b85f37d4877b77553bf3
|
f2de3eb978554261d4edaba16782ebe0f12d9031
|
/man/rmfi_write_array.Rd
|
3ac356e589d68272159102b66b9892b1458caf64
|
[] |
no_license
|
rogiersbart/RMODFLOW
|
540a02cf0ccc89423cac0742be51b36c0d4098e5
|
2da3f1e23f0df058e56a4b30cb46a304e9a757e7
|
refs/heads/master
| 2023-03-22T15:51:58.860817
| 2022-12-26T19:09:23
| 2022-12-26T19:09:23
| 17,342,744
| 34
| 9
| null | 2021-08-19T20:56:08
| 2014-03-02T17:36:45
|
R
|
UTF-8
|
R
| false
| true
| 1,332
|
rd
|
rmfi_write_array.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internals.R
\name{rmfi_write_array}
\alias{rmfi_write_array}
\title{Write modflow array
Internal function used in the write_* functions for writing array datasets}
\usage{
rmfi_write_array(
array,
file,
cnstnt = 1,
iprn = -1,
append = TRUE,
external = NULL,
fname = NULL,
binary = NULL,
precision = "single",
nam = NULL,
xsection = FALSE,
...
)
}
\arguments{
\item{external}{character vector with names corresponding to the dataset; used to write external arrays}
\item{fname}{character vector with names corresponding to the dataset; used to write open/close arrays}
\item{binary}{character vector with names corresponding to the dataset; used to write external or open/close arrays}
\item{precision}{character: either \code{'single'} (default) or \code{'double'}. Denotes the precision of binary files}
\item{nam}{\code{\link{RMODFLOW}} nam object; used when writing external arrays}
\item{xsection}{logical; does the array represent a NLAY x NCOL cross-section. Passed to \code{rmf_write_array}}
\item{...}{ignored}
}
\description{
Write modflow array
Internal function used in the write_* functions for writing array datasets
}
\details{
if the array should be written as integers, an integer array should be provided
}
|
5c5219f1ce17942222c4d72f29c6e1ebc042cc98
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bayesmeta/examples/normalmixture.Rd.R
|
c2b753cd4b9c107490f388fc4664d16b7ec153a6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,856
|
r
|
normalmixture.Rd.R
|
library(bayesmeta)
### Name: normalmixture
### Title: Compute normal mixtures
### Aliases: normalmixture
### Keywords: distribution
### ** Examples
##################################################################
# compare half-normal mixing distributions with different scales:
nm05 <- normalmixture(cdf=function(x){phalfnormal(x, scale=0.5)})
nm10 <- normalmixture(cdf=function(x){phalfnormal(x, scale=1.0)})
# (this corresponds to the case of assuming a half-normal prior
# for the heterogeneity tau)
# check the structure of the returned object:
str(nm05)
# show density functions:
# (these would be the marginal (prior predictive) distributions
# of study-specific effects theta[i])
x <- seq(-1, 3, by=0.01)
plot(x, nm05$density(x), type="l", col="blue", ylab="density")
lines(x, nm10$density(x), col="red")
abline(h=0, v=0, col="grey")
# show cumulative distributions:
plot(x, nm05$cdf(x), type="l", col="blue", ylab="CDF")
lines(x, nm10$cdf(x), col="red")
abline(h=0:1, v=0, col="grey")
# determine 5 percent and 95 percent quantiles:
rbind("HN(0.5)"=nm05$quantile(c(0.05,0.95)),
"HN(1.0)"=nm10$quantile(c(0.05,0.95)))
##################################################################
# compare different mixing distributions
# (half-normal, half-Cauchy, exponential and Lomax):
nmHN <- normalmixture(cdf=function(x){phalfnormal(x, scale=0.5)})
nmHC <- normalmixture(cdf=function(x){phalfcauchy(x, scale=0.5)})
nmE <- normalmixture(cdf=function(x){pexp(x, rate=2)})
nmL <- normalmixture(cdf=function(x){plomax(x, shape=4, scale=2)})
# show densities (logarithmic y-axis):
x <- seq(-1, 3, by=0.01)
plot(x, nmHN$density(x), col="green", type="l", ylab="density", ylim=c(0.005, 6.5), log="y")
lines(x, nmHC$density(x), col="red")
lines(x, nmE$density(x), col="blue")
lines(x, nmL$density(x), col="cyan")
abline(v=0, col="grey")
# show CDFs:
plot(x, nmHN$cdf(x), col="green", type="l", ylab="CDF", ylim=c(0,1))
lines(x, nmHC$cdf(x), col="red")
lines(x, nmE$cdf(x), col="blue")
lines(x, nmL$cdf(x), col="cyan")
abline(h=0:1, v=0, col="grey")
# add "exponential" x-axis at top:
axis(3, at=log(c(0.5,1,2,5,10,20)), lab=c(0.5,1,2,5,10,20))
# show 95 percent quantiles:
abline(h=0.95, col="grey", lty="dashed")
abline(v=nmHN$quantile(0.95), col="green", lty="dashed")
abline(v=nmHC$quantile(0.95), col="red", lty="dashed")
abline(v=nmE$quantile(0.95), col="blue", lty="dashed")
abline(v=nmL$quantile(0.95), col="cyan", lty="dashed")
rbind("half-normal(0.5)"=nmHN$quantile(0.95),
"half-Cauchy(0.5)"=nmHC$quantile(0.95),
"exponential(2.0)"=nmE$quantile(0.95),
"Lomax(4,2)" =nmL$quantile(0.95))
#####################################################################
# a normal mixture distribution example where the solution
# is actually known analytically: the Student-t distribution.
# If Y|sigma ~ N(0,sigma^2), where sigma = sqrt(k/X)
# and X|k ~ Chi^2(df=k),
# then the marginal Y|k is Student-t with k degrees of freedom.
# define CDF of sigma:
CDF <- function(sigma, df){pchisq(df/sigma^2, df=df, lower.tail=FALSE)}
# numerically approximate normal mixture (with k=5 d.f.):
k <- 5
nmT1 <- normalmixture(cdf=function(x){CDF(x, df=k)})
# in addition also try a more accurate approximation:
nmT2 <- normalmixture(cdf=function(x){CDF(x, df=k)}, delta=0.001, epsilon=0.00001)
# check: how many grid points were required?
nmT1$bins
nmT2$bins
# show true and approximate densities:
x <- seq(-2,10,le=400)
plot(x, dt(x, df=k), type="l")
abline(h=0, v=0, col="grey")
lines(x, nmT1$density(x), col="red", lty="dashed")
lines(x, nmT2$density(x), col="blue", lty="dotted")
# show ratios of true and approximate densities:
plot(x, nmT1$density(x)/dt(x, df=k), col="red",
type="l", log="y", ylab="density ratio")
abline(h=1, v=0, col="grey")
lines(x, nmT2$density(x)/dt(x, df=k), col="blue")
|
9b0db8a8c3ac8f4bbbf5c9b0c70dd90b492dd293
|
3870838ffbe0cde6fb869dcce9c5044b250c5c22
|
/misc_scripts/gen_geno_for_multi_snp_simulation/sampling_genes.R
|
ebd2380443728253c565cb6ea0d66935a4708ab8
|
[
"MIT"
] |
permissive
|
liangyy/mixqtl-pipeline
|
d6f9baf9db9ee260bc6bafd29773361e8d58b33e
|
556c983a2e5bb3feaf21a88a72c4a77c98df179d
|
refs/heads/master
| 2023-02-19T07:53:47.135434
| 2020-05-27T02:50:54
| 2020-05-27T02:50:54
| 191,611,597
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 913
|
r
|
sampling_genes.R
|
# ARGV1: input gene model file (no header)
# ARGV2: cis window size
# ARGV3: output file name
# ARGV4: number of genes selected
## 'formatted-gene-model.gencode.v32lift37.basic.annotation.txt.gz'
set.seed(1)
library(dplyr)
args = commandArgs(trailingOnly=TRUE)
win_size = as.numeric(args[2])
n = as.numeric(args[4])
df = read.table(args[1], header = F, stringsAsFactors = F)
trim_dot = function(str) {
f = strsplit(str, '\\.')
unlist(lapply(f, function(x) { x[1] }))
}
df = df %>% filter(V8 == 'protein_coding')
df$tss = df$V4
df$tss[df$V6 == '-'] = df$V5[df$V6 == '-']
# df$tss = as.numeric(as.character(df$tss))
df$start = df$tss - win_size
df$end = df$tss + win_size
df$chr = stringr::str_remove(df$V1, 'chr')
df$gene = trim_dot(df$V7)
selected = df[sample(1:nrow(df), size = n, replace = F), ]
write.table(selected %>% select(chr, start, end, gene), args[3], row = F, col = F, sep = '\t', quo = F)
|
b0498e7306d32b5fa13384655f0749cdc7261f41
|
6b793edcb3856fb39a00861c5068e42f34883d8c
|
/R/gdc_token.R
|
e8ba96b001ed85c014259ac517c808e4d24ab487
|
[] |
no_license
|
Bioconductor/GenomicDataCommons
|
348e1879607e51dccd218229aa76746806386e17
|
007c2917f40412a3272dae41d6438ac5df284972
|
refs/heads/devel
| 2023-05-26T09:48:51.999727
| 2023-05-25T12:56:22
| 2023-05-25T12:56:22
| 60,694,062
| 87
| 30
| null | 2023-01-18T16:06:36
| 2016-06-08T11:51:11
|
R
|
UTF-8
|
R
| false
| false
| 2,008
|
r
|
gdc_token.R
|
#' return a gdc token from file or environment
#'
#' The GDC requires an auth token for downloading
#' data that are "controlled access". For example,
#' BAM files for human datasets, germline variant calls,
#' and SNP array raw data all are protected as "controlled
#' access". For these files, a GDC access token is required.
#' See the \href{details on the GDC authentication and token information}{https://docs.gdc.cancer.gov/Data_Portal/Users_Guide/Authentication/#gdc-authentication-tokens}.
#' Note that this function simply returns a string value.
#' It is possible to keep the GDC token in a variable in R
#' or to pass a string directly to the appropriate parameter.
#' This function is simply a convenience function for alternative
#' approaches to get a token from an environment variable
#' or a file.
#'
#'
#' @details
#' This function will resolve locations of the GDC token in the
#' following order:
#' \itemize{
#' \item{from the environment variable, \code{GDC_TOKEN}, expected to
#' contain the token downloaded from the GDC as a string}
#' \item{using \code{readLines} to read a file named in the environment
#' variable, \code{GDC_TOKEN_FILE}}
#' \item{using \code{readLines} to read from a file called \code{.gdc_token} in the user's
#' home directory}
#' }
#' If all of these fail, this function will return an error.
#'
#' @return character(1) (invisibly, to protect against inadvertently printing) the GDC token.
#'
#' @references \url{https://docs.gdc.cancer.gov/Data_Portal/Users_Guide/Cart/#gdc-authentication-tokens}
#'
#' @examples
#' # This will not run before a GDC token
#' # is in place.
#' token = try(gdc_token(),silent=TRUE)
#'
#'
#' @export
gdc_token <- function() {
if(Sys.getenv('GDC_TOKEN')!='') return(Sys.getenv('GDC_TOKEN'))
token_file = "~/.gdc_token"
if(Sys.getenv('GDC_TOKEN_FILE')!='')
token_file = trimws(Sys.getenv('GDC_TOKEN_FILE'))
stopifnot(file.exists(token_file))
invisible(suppressWarnings(readLines(token_file,n=1)))
}
|
e6dd5d7cb99987c2df8dac8013963a3e8b42e60f
|
f36702723c3116da6a73af47bbb3e8adafe0ee53
|
/CNC/Coding_calc.R
|
d2f3e67b30993541248cd19e6050dcddd280387d
|
[] |
no_license
|
statsarah/Capstone_TE_Vfischeri
|
384b0708bc05278a68ed2e6ce6cdd326b6b79996
|
47e4b41c834da2b396367228b6483d76ef30ab27
|
refs/heads/master
| 2021-06-19T20:36:55.738754
| 2017-08-02T03:27:55
| 2017-08-02T03:27:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,203
|
r
|
Coding_calc.R
|
###############################################
# Coding and Non-coding regions of the genome #
###############################################
Ch1Gene<-read.table("Ch1Gene.txt",sep="\t", h=T, quote="")
Ch2Gene<-read.table("Ch2Gene.txt",sep="\t", h=T, quote="")
PlsmGene<-read.table("PlsmGene.txt",sep="\t", h=T, quote="")
#############################################
# Function that takes in a NCBI data frame #
# and calculates the length of genes and #
# the distance between each gene; returns #
# a new data frame with the results cbinded #
#############################################
LengthDiff<-function(Data){
Data<-Data[order(Data[,13]),]
row.names(Data)<-1:nrow(Data)
gene_length<-Data[,14]-Data[,13] #Length of the gene
gene_dist<-vector(length=nrow(Data))
i=1
while(i<nrow(Data)){ #Calculates the distance between genes; can be negative if overlap
gene_dist[i]<-Data[i+1,13]-Data[i,14]
i=i+1
} #End of distance while-loop
Result<-cbind(Data, gene_length, gene_dist)
return(Result)
}#End of function
#Calculating for each genetic element
C1<-LengthDiff(Ch1Gene)
C2<-LengthDiff(Ch2Gene)
P<-LengthDiff(PlsmGene)
#################################################
# Function to take in the lengths and distances #
# of genes to calculate a genome vector #
#################################################
CNC<-function(Data){
Genome<-vector()
for(i in 1:nrow(Data)){
if(Data[i,20]<0){ #If a gene overlaps, shorten the first gene
coding<-rep(1, times=(Data[i,19]-Data[i,20]))
non<-vector(length=0)
}#End if
else{ #If the genes do not overlap
coding<-rep(1, times=Data[i,19])
non<-rep(0, times=Data[i,20])
}#End else
Genome<-c(Genome, coding, non)
}#End for-loop
return(Genome)
}#End function
#Calculating coding vs non-coding genome vectors and proportions
C1Genome<-CNC(C1)
(sum(C1Genome))/(length(C1Genome)) #0.8900263
write.table(C1Genome, file="Ch1_CodingSeq.txt",row.names=F)
C2Genome<-CNC(C2)
(sum(C2Genome))/(length(C2Genome)) #0.8816424
write.table(C2Genome, file="Ch2_CodingSeq.txt", row.names=F)
PGenome<-CNC(P)
(sum(PGenome))/(length(PGenome)) #0.87646
write.table(PGenome, file="Plasmid_CodingSeq.txt", row.names=F)
|
5540c02d6990bb4bc28686125b781906ea8addd9
|
bb42db4ee05bada96202f032e1ec121da0f6b952
|
/Sock-Predict/ui.R
|
a8dbb0c3a57fb6d3202e4a4a3dff4464b03d7582
|
[] |
no_license
|
Yen-HuaChen/Code-Sample
|
0a11f930c10d24cf66610e75c8587b5c0cf30be0
|
59a495b528dcf6e3f875a5108280aecadaf59217
|
refs/heads/master
| 2020-05-21T05:08:33.334534
| 2017-04-26T20:13:57
| 2017-04-26T20:13:57
| 84,575,027
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,140
|
r
|
ui.R
|
library(shiny)
library(dplyr)
library(parallel)
shinyApp(
ui = fluidPage(
titlePanel("Socks"),
# input number of simulations
sidebarPanel(
numericInput(
"n_sims",
"Number of simulations to run:",
value = 10000,
min = 1000,
max = 1e6
),
hr(),
h4("Observed Data"),
# inform users that the total number of picked socks is restricted to be 11
h6("Number of picked socks is 11"),
# only let users to change the number of singleton socks that are picked
sliderInput(
"n_odds",
"Number of singleton socks picked",
value = 11,
min = 1,
max = 11
),
h6("Number of paired socks picked"),
verbatimTextOutput("n_pairs"),
hr(),
h4("Prior on Total Number of Socks"),
# specify three priors
selectInput(
"t_socks",
"Prior Distribution for total number of socks",
c(
"Negative Binomial" = "nbinom",
"Discrete Uniform" = "dunif",
"Poisson" = "pois"
)
),
hr(),
h4("Hyper Parameters"),
# specify prior parameters
conditionalPanel(
condition = "input.t_socks == 'nbinom'",
sliderInput(
"nbinom_mu",
HTML("Mean (expected total number of socks you have) - μ"),
value = 30,
min = 10,
max = 200
),
sliderInput(
"nbinom_sd",
HTML(
"Standard Deviation (expected pair of socks in the laundry) - σ"
),
value = 15,
min = 0,
max = 100
)
),
conditionalPanel(
condition = "input.t_socks == 'dunif'",
sliderInput(
"dunif_range",
"Range:",
value = c(10, 100),
min = 10,
max = 100
)
),
conditionalPanel(
condition = "input.t_socks == 'pois'",
sliderInput(
"pois_lambda",
HTML("Total prior - λ:"),
value = 35,
min = 10,
max = 100
)
),
hr(),
h4("Prior on Proportion of Pairs"),
h6("Beta Distribution"),
sliderInput(
"beta_a",
HTML("Number of paired socks - α"),
value = 15,
min = 0,
max = 50
),
sliderInput(
"beta_b",
HTML("Number of singleton socks - β"),
value = 2,
min = 0,
max = 100
),
hr(),
checkboxInput("median", label = "Show medians", value = FALSE),
checkboxInput("sd", label = "Show 95% credible intervals", value = FALSE),
checkboxInput("true", label = "Show the true value", value = FALSE),
checkboxInput("pos_table", label = "Show posterior summary statistics", value = FALSE),
checkboxInput("pro_table", label = "Show prior summary statistics", value = FALSE),
checkboxInput("answer", label = "Show what Karl Broman truly had!", value = FALSE)
),
mainPanel(
h4("Results"),
plotOutput("t_socks"),
br(),
plotOutput("t_pairs"),
br(),
plotOutput("t_odds"),
br(),
plotOutput("prior_pairs"),
br(),
tableOutput("summary_pos_table"),
br(),
tableOutput("summary_pro_table"),
br(),
textOutput("messages"),
br()
)
),
server = function(input, output, session) {
# fix the number of pairs after observing the number of singletons
n_pairs = reactive({
(11 - input$n_odds)/2
})
# calculate and output the prior number of pairs picked by the user
output$n_pairs = renderPrint({
cat((11 - input$n_odds) / 2)
})
observe({
updateSliderInput(
session,
inputId = "n_odds",
min = 1,
max = 11,
step = 2
)
})
# restrict on the negative binomial parameter sd
# it shouldn't exceed sqrt(sd) + 1
# explained in the write-up
observe({
updateSliderInput(
session,
inputId = "nbinom_sd",
min = max(5, ceiling(sqrt(
input$nbinom_mu
) + 1)),
max = floor(input$nbinom_mu / 2)
)
})
# negative binomial miu should be larger than the number of socks picked
# explained in the write-up
observe({
updateSliderInput(session,
inputId = "nbinom_mu",
min = 11)
})
# the simulation function
sock_sim = reactive({
# redefine the input paramters outside the parallel function
dunif_min = input$dunif_range[1]
dunif_max = input$dunif_range[2]
pois_lambda = input$pois_lambda
nbinom_mu = input$nbinom_mu
nbinom_sd = input$nbinom_sd
beta_a = input$beta_a
beta_b = input$beta_b
t_socks = input$t_socks
# defining the parallel function
sock_simulation = function(i) {
# simulate total number of socks t_socks
if (t_socks == "dunif") {
t_socks = sample(dunif_min:dunif_max, 1, replace = TRUE)
}
else if (t_socks == "pois") {
t_socks = rpois(1, pois_lambda)
}
else if (t_socks == "nbinom") {
prior_size_param = -nbinom_mu ^ 2 / (nbinom_mu - nbinom_sd ^ 2)
t_socks = rnbinom(1, mu = input$nbinom_mu, size = prior_size_param)
}
else{
stop()
}
# simulate proportion of pairs
prior_pairs = rbeta(1, shape1 = beta_a, shape2 = beta_b)
# total number of pairs
t_pairs = round(floor(t_socks / 2) * prior_pairs)
# total number of odds
t_odds = t_socks - 2 * t_pairs
# calculate total number of picked socks from user input
n_picked = 2 * n_pairs() + input$n_odds
socks = rep(seq_len(t_pairs + t_odds), rep(c(2, 1), c(t_pairs, t_odds)))
# sample socks with the specified proportions
picked_socks = sample(socks, size = min(n_picked, t_socks))
sock_counts = table(picked_socks)
c(sim_odds = sum(sock_counts == 1), sim_pairs = sum(sock_counts == 2),
t_socks = t_socks, t_pairs = t_pairs, t_odds = t_odds, prior_pairs = prior_pairs)
}
n_sims = input$n_sims
# use 4 cores at the same time
# replicate it n_sims times
# will return a matrix
t(mcmapply(sock_simulation, seq_len(n_sims), mc.cores = 4))
})
# select the simulated values if they match the observed data
post_draws = reactive({
post_draws = sock_sim()[sock_sim()[, "sim_odds"] == input$n_odds & sock_sim()[, "sim_pairs" ] == n_pairs(), ]
return(post_draws)
})
# Plot the posterior/prior total number of socks in the same plot
output$t_socks = renderPlot({
# save the densities separately
# for the convenience of setting ylim on the plot
post_sock_draw = density(post_draws()[, "t_socks"])
pror_sock_draw = density(sock_sim()[, "t_socks"])
# plot posterior
plot(post_sock_draw, ylim = c(0, 1.2*max(post_sock_draw$y, pror_sock_draw$y)),
xlab = NA, type = 'l',
main = "Density Plot of Posterior/Prior Total Number of Socks",
col = "blue")
# add prior
lines(pror_sock_draw,col = "green")
legend("topright",
c("posterior total number of socks","prior total number of socks"),
lty = c(1, 1),
# gives the legend appropriate symbols (lines)
lwd = c(2.5, 2.5),
col = c("blue", "green")
)
# add medians
if (input$median) {
abline(v = median(post_draws()[, "t_socks"]),
lty = 1,
col = "blue")
}
# add 95% credible interval limits
if (input$sd) {
abline(v = quantile(post_draws()[, "t_socks"], c(0.025)),
lty = 2,
col = "blue")
abline(v = quantile(post_draws()[, "t_socks"], c(0.975)),
lty = 2,
col = "blue")
}
# add true total number of socks (45)
if (input$true) {
abline(v = 21 * 2 + 3,
lty = 1,
col = "red")
}
})
# Plot the posterior/prior number of paired socks in the same plot
output$t_pairs = renderPlot({
post_pairs_draw = density(post_draws()[, "t_pairs"])
pror_pairs_draw = density(sock_sim()[, "t_pairs"])
plot(post_pairs_draw, xlab = NA, ylim = c(0, 1.2*max(post_pairs_draw$y, pror_pairs_draw$y)),
main = "Density Plot of Posterior/Prior Number of Paired Socks",
col = "blue", type = 'l')
lines(pror_pairs_draw,col = "green")
legend("topright",
c("posterior number of paired socks","prior number of paired socks"),
lty = c(1, 1),
# gives the legend appropriate symbols (lines)
lwd = c(2.5, 2.5),
col = c("blue", "green")
)
if (input$median) {
abline(v = median(post_draws()[, "t_pairs"]),
lty = 1,
col = "blue")
}
if (input$sd) {
abline(v = quantile(post_draws()[, "t_pairs"], c(0.025)),
lty = 2,
col = "blue")
abline(v = quantile(post_draws()[, "t_pairs"], c(0.975)),
lty = 2,
col = "blue")
}
if (input$true) {
abline(v = 21,
lty = 1,
col = "red")
}
})
# Plot the posterior/prior number of odd socks in the same plot
output$t_odds = renderPlot({
post_odd_draw = density(post_draws()[, "t_odds"])
pror_odd_draw = density(sock_sim()[, "t_odds"], adjust = 1.8)
plot(post_odd_draw, ylim = c(0, 1.2*max(post_odd_draw$y, pror_odd_draw$y)),
xlab = NA, type = 'l',
main = "Density Plot of Posterior/Prior Number of odd Socks",
col = "blue")
lines(pror_odd_draw,col = "green")
legend("topright",
c("posterior number of Odd socks","prior number of odd socks"),
lty = c(1, 1),
# gives the legend appropriate symbols (lines)
lwd = c(2.5, 2.5),
col = c("blue", "green")
)
if (input$median) {
abline(v = median(post_draws()[, "t_odds"]),
lty = 1,
col = "blue")
}
if (input$sd) {
abline(v = quantile(post_draws()[, "t_odds"], c(0.025)),
lty = 2,
col = "blue")
abline(v = quantile(post_draws()[, "t_odds"], c(0.975)),
lty = 2,
col = "blue")
}
if (input$true) {
abline(v = 3,
lty = 1,
col = "red")
}
})
# Plot the posterior/prior proportion of pairs in the same plot
output$prior_pairs = renderPlot({
post_prop_draw = density(post_draws()[, "prior_pairs"])
pror_prop_draw = density(sock_sim()[, "prior_pairs"])
plot(post_prop_draw, ylim = c(0, 1.2*max(post_prop_draw$y, pror_prop_draw$y)),
xlab = NA, type = 'l',
main = "Density Plot of Posterior/Prior Proportion of Paired Socks",
col = "blue")
lines(pror_prop_draw, col = "green")
legend("topright",
c("posterior proportion of paired socks","prior proportion of paired socks"),
lty = c(1, 1),
# gives the legend appropriate symbols (lines)
lwd = c(2.5, 2.5),
col = c("blue", "green")
)
if (input$median) {
abline(v = median(post_draws()[, "prior_pairs"]),
lty = 1,
col = "blue")
}
if (input$sd) {
abline(v = quantile(post_draws()[, "prior_pairs"], c(0.025)),
lty = 2,
col = "blue")
abline(v = quantile(post_draws()[, "prior_pairs"], c(0.975)),
lty = 2,
col = "blue")
}
if (input$true) {
abline(v = 21*2/45,
lty = 1,
col = "red")
}
})
# print the summary tables
output$summary_pos_table = renderTable({
# if true
if (input$pos_table){
# initialize empty vectors
pos1 = c()
pos2 = c()
pos3 = c()
pos4 = c()
for(i in 3:6){
# append the mean of each statistics
pos1 = c(pos1, round(mean(post_draws()[,i])))
pos2 = c(pos2, round(median(post_draws()[,i])))
pos3 = c(pos3, round(quantile(post_draws()[,i], 0.025)))
pos4 = c(pos4, round(quantile(post_draws()[,i], 0.975)))
}
name_vec = c("total number of socks",
"number of pairs","number of odds",
"proportion of pairs")
d1 = data.frame(name_vec, pos1, pos2, pos3, pos4)
colnames(d1) = c("names", "mean", "median", "95% lower limit", "95% upper limit")
d1
}
}, caption = "Poesterior Summary Statistics Table",
caption.placement = getOption("top")
)
output$summary_pro_table = renderTable({
if (input$pro_table){
pro1 = c()
pro2 = c()
pro3 = c()
pro4 = c()
for(i in 3:6){
pro1 = c(pro1, round(mean(sock_sim()[,i])))
pro2 = c(pro2, round(median(sock_sim()[,i])))
pro3 = c(pro3, round(quantile(sock_sim()[,i], 0.025)))
pro4 = c(pro4, round(quantile(sock_sim()[,i], 0.975)))
}
name_vec = c("total number of socks",
"number of pairs","number of odds",
"proportion of pairs")
d2 = data.frame(name_vec, pro1, pro2, pro3, pro4)
colnames(d2) = c("names", "mean", "median", "95% lower limit", "95% upper limit")
d2
}
}, caption = "Prior Summary Statistics Table",
caption.placement = getOption("top")
)
# show or hide the true answer
output$messages = renderText({
if (input$answer){
paste("Aha! Karl Broman actually had 21 pairs and 3 singletons!")
}
})
}
)
|
74cdc8838b6935bc31a9aad801443f55532aa1c7
|
479b2de10a86a2bc30a65f82ed4a2649061c7749
|
/DPobs/cde/main.r
|
a6db54bbfcd9b2944f1c05c339f8c19cf409f0bb
|
[] |
no_license
|
iagomosqueira/deepfishman
|
33e54c39d50a4716f238b17cf0f9042f1f019488
|
ae2a7fb0a32815fb11cd7c052d3c5e365aea11bd
|
refs/heads/master
| 2021-01-10T21:33:17.487090
| 2012-05-21T13:34:59
| 2012-05-21T13:34:59
| 33,233,828
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,375
|
r
|
main.r
|
# The main loop
# Load the preconditioned stock
# Set parameters (obs error, srr error, nyrs etc)
# Loop over years and run fowards
# Calc the performance stats
#*******************************************************************************
rm(list=ls())
library(FLCore)
library(FLash)
library(FLAssess)
library(FLsp)
#*******************************************************************************
setwd("c:/Projects/Deepfishman/deepfishman/trunk/DPobs/R")
source("hcr_func.r")
# Load the conditioned stock
source("make_stocks.r")
#*******************************************************************************
# Set some parameters
obs_cv <- 0.6
srr_sd <- 0
niters <- 1
projyrs <- 20
#Blim <- 0
hcryrs <- (dims(stk1)$year+1) : (dims(stk1)$year+projyrs)
#*******************************************************************************
# Set up the stk for projection
# Including future weights and all that gubbins
stk_true <- stf(stk1,nyears=projyrs)
# Clean out harvest to avoid confusion
harvest(stk_true)[,projyrs] <- NA
#stk_true <- window(stk1,end=dims(stk1)$year+projyrs)
stk_true <- propagate(stk_true,niters)
# To store output of the HCR
TAC <- array(NA,dim=c(niters,dims(stk_true)$year))
index_sp <- stock(stk_true)
index_sp <- index_sp * rlnorm(prod(dim(index_sp)),0,obs_cv)
for (yr in hcryrs)
{
#if (yr==20) browser() # something weird going on...
cat("yr: ", yr, "\n")
# Get new TAC
# Assess using last years catch and index data
# No error on catch
catch_sp <- catch(stk_true)[,1:(yr-1)]
index_sp[,yr-1] <- stock(stk_true)[,yr-1] * rlnorm(niters,0,obs_cv)
# Plus some obs error!
#index_sp * rlnorm(prod(dim(index_sp)),0,obs_cv)
cat("index: ", index_sp, "\n")
flsp <- FLsp(catch=catch_sp,index=window(index_sp, start=1,end=yr-1))
flsp <- fitsp(flsp,lower=c(1e-6,10),upper=c(10,1e9),control=DEoptim.control(itermax=1500,trace=500))
Bcurrent <- bcurrent(flsp)
TAC[,yr] <- hcr(B=c(Bcurrent), r= c(params(flsp)['r',]), k= c(params(flsp)['k',]), Blim=0.9*Bmsy(flsp))
cat("New TAC: ", TAC[,yr], "\n")
# Project stock using TAC
ctrl <- fwdControl(data.frame(year=yr, quantity="catch",val=TAC[,yr]))
stk_true <- fwd(stk_true, ctrl=ctrl, sr=list(model=model(cod1), params=params(cod1)))
}
plot(stk_true)
# final estimate of msy
Msy(flsp)
|
c7b5b2504b2f5211a7217ee58b2c5f9373fdd768
|
fb30081730db2b779841e67bb2fbbd69d9117379
|
/CAMM/man/calc_pool.Rd
|
3c6f1060bafe5d8c32441b227295bdd7bc281068
|
[] |
no_license
|
jescoyle/CAMM
|
0095f995c07d633a5d1bb14bc8843e74a561024a
|
1f2625b4359304dd1d5c7a1fab069e4afe7baee9
|
refs/heads/master
| 2020-04-05T19:02:31.417719
| 2016-08-31T17:29:52
| 2016-08-31T17:29:52
| 49,738,912
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 875
|
rd
|
calc_pool.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_pool.r
\name{calc_pool}
\alias{calc_pool}
\title{Calculate species pool}
\usage{
calc_pool(comm, topo_names, partner)
}
\arguments{
\item{comm}{(required) metacommunity matrix indicating which
association is present in microsite of each site. Rows are sites
and columns are microsites/individuals.}
\item{topo_names}{(required) matrix representing association network
where integers label each association, as produced by
\code{\link{name_topo}}. Labels should match those used in \code{comm}.}
\item{partner}{(required) integer indicating whether the function should
calculate the species pool for hosts (\code{1}) or symbionts (\code{2}).}
}
\value{
site x species matrix indicating species presence in each site
}
\description{
Determines which species are present in a community.
}
|
d69795b0f1359e9c7bdc54757240ef17901e86d4
|
80b653809974fb05b8732bbff9fd8ff8518640d4
|
/10월 30일 _tm_워드클라우딩/TM_Lab04_RTextMining_Corpus.r
|
e20985fe6ca40763032f6874bc21a3b5e8eb15df
|
[] |
no_license
|
namu2018/text-mining
|
ecb10f69aa06e4fda19fe74336889a1e4fb351e9
|
f995e9b379c18d51c1c15b1c1bc64d866e3fb974
|
refs/heads/master
| 2020-04-03T14:33:04.981141
| 2019-02-18T01:07:15
| 2019-02-18T01:07:15
| 155,325,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,426
|
r
|
TM_Lab04_RTextMining_Corpus.r
|
## Lab04_말뭉치(Corpus) 생성, 전처리
# 01. 텍스트 마이닝 패키지 불러오기(tm) - KoNLP
# 02. 데이터 줄단위 불러오기 - readLines
# 03. 텍스트에서 말뭉치(Corpus)로 변환 -
# VectorSource(벡터->데이터소스), DirSource(디렉터리->데이터소스)...,
# 04. corpus 로 변환하여 저장 후, 전처리
# Corpus, VCorpus
# 05. 전처리가 후, 문서 내용 확인
# tm_map( tolower )
#
# 06. 불용어 처리 수행
# stopwords('english')
# 07. 단어문서행렬(Term Document Matrix, TDM)
# TermDocumentMatrix (코퍼스를 단어문서행렬)
# DocumentTermMatrix (코퍼스를 단어문서행렬)
# 02. 데이터 줄단위 불러오기 - readLines
library(tm)
dat <- readLines("E:/BigData3/dataset/movie/dir_multi/ratings01_m.txt")
print(dat)
# 03. 텍스트에서 말뭉치(Corpus)로 변환
tSource <- VectorSource(dat)
tSource
myCor <- Corpus(tSource)
myCor
# 05. 전처리가 후, 문서 내용 확인
# 한글은 tm_map 함수를 하면서 내부적인 글자가 없어질 수 있다.
# tm_map( tolower, stripWhitespace ) 문제 발생 있음.
myCor <- tm_map(myCor, removePunctuation)
myCor <- tm_map(myCor, removeNumbers)
myCor <- tm_map(myCor, stripWhitespace)
inspect(myCor)
myCor[[3]]$content
# 06. 불용어 처리 수행
## stopwords : C:\Users\ktm\Documents\R\win-library\3.5\tm\stopwords
## 나의 라이브러리 위치
.libPaths()
##
# myCor <- tm_map(myCor, removeWords, stopwords("english"))
# 07. 단어 문서 행렬(Term Document Matrix, TDM)
# TF(용어의 빈도수)
# TFIDF
tdm <- TermDocumentMatrix(myCor, control=list(tokenize="scan", wordLengths=c(2,7)))
inspect(tdm)
tdm_M <- as.matrix(tdm)
tdm_M
## 단어 빈도수 구하기
frequency <- rowSums(tdm_M)
frequency
## 정렬
frequency <- sort(frequency, decreasing = T)
frequency # 단어별 빈도
barplot(frequency[1:20], las=2) #막대그래프 그리고
colors()
RColorBrewer::brewer.pal.info
## 워드 클라우드
library(wordcloud)
w1 <- names(frequency) # 단어별- 이름
pal <- brewer.pal(8, "Dark2") # 색 지정
wordcloud(words=w1, # 이름
freq=frequency, # 빈도수
min.freq=1, # 표시할 최소 횟수
random.order=F, # 가장 횟수가 많은 단어를 중심으로
random.color=T, # 여러가지 색을 랜덤하게 지정한다.
colors=pal) # 색 지정.
|
7b622aaaff64301cbbe041837747e1e478cc4999
|
82d10a973ab97bad89ebd240347fe0809fb0350d
|
/R/plan.R
|
05ab5927944336560307d5bc7c97cb2ec9a12ba1
|
[] |
no_license
|
MilesMcBain/drake_make
|
48fb428c532368e73a5408c29067280756cf6c96
|
0ccb2dc12af88d15dc79cb221efb9157fd696171
|
refs/heads/master
| 2022-11-26T13:21:08.489530
| 2020-08-09T10:56:22
| 2020-08-09T10:56:22
| 285,987,239
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 400
|
r
|
plan.R
|
the_plan <-
drake_plan(
words = readLines(file_in("inputs/words.txt")),
histogram_data = make_historgram_data(words),
histogram_plot = qplot(Length,
Freq,
data = histogram_data),
report = target(
command = {
rmarkdown::render(knitr_in("doc/report.Rmd"))
file_out("doc/report.html")
}
)
)
|
93f292bb7e80eecf0135730fefa1aeed9576861d
|
ccbcaf18e9655c6ad5c71d29956c9761457e6425
|
/Week 4/run_analysis.R
|
edf7249d9758747b8cc9cd526b4f18db4918f8ba
|
[] |
no_license
|
marthur3/Getting-and-Cleaning-Data
|
3a4d1d5f76b181dca3a136f629ca724b9ba5fbab
|
e6abd18958b2728ff91e6511c64964cf1fd29e74
|
refs/heads/master
| 2020-03-26T20:23:06.068880
| 2018-08-19T17:49:39
| 2018-08-19T17:49:39
| 145,320,744
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,294
|
r
|
run_analysis.R
|
library(tidyverse)
library(data.table)
# setwd("~/GitHub/Getting and Cleaning Data/Week 4")
# wd <- "C:/Users/Michael A/Documents/GitHub/Getting and Cleaning Data/Week 4"
# fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# zippedFileName <- "wearables_data.zip"
#
# if (!file.exists(zippedFileName)) {
# download.file(fileUrl, destfile = "./data/wearables_data.zip")
# dateDownloaded <- date()
# }
#
# unzip(zipfile = "data/wearables_data.zip", exdir = paste0(wd,"/data"))
### Test ####
test_data <- fread("UCI HAR Dataset/test/X_test.txt")
test_activities <- (fread("UCI HAR Dataset/test/y_test.txt"))
activity_labels_test <- fread("UCI HAR Dataset/activity_labels.txt")
features_test <- fread("UCI HAR Dataset/features.txt")
subject_test <- fread("UCI HAR Dataset/test/subject_test.txt")
###Collect variable names from column two of the features table
features_test<- features_test %>% unite(V3, V1, V2)
feature_names_test <- features_test$V3
###Assign the variable names to the test_data
names(test_data) <- feature_names_test
##Create column ##Lookup table$Writtenoutnames[match(target$ids, lookup$ids)]
test_activities$Activity_name <- activity_labels_test$V2[match(test_activities$V1, activity_labels_test$V1)]
test_activities$Subjects <- subject_test
test_data <- cbind(test_data, test_activities)
### Train ####
train_data <- fread("UCI HAR Dataset/train/X_train.txt")
train_activities <- (fread("UCI HAR Dataset/train/y_train.txt"))
activity_labels_train <- fread("UCI HAR Dataset/activity_labels.txt")
features_train <- fread("UCI HAR Dataset/features.txt")
subject_train <- fread("UCI HAR Dataset/train/subject_train.txt")
###Collect variable names from column two of the features table
features_train<- features_train %>% unite(V3, V1, V2)
feature_names_train <- features_train$V3
###Assign the variable names to the train_data
names(train_data) <- feature_names_train
##Create column Activity_name ##Lookup table$Writtenoutnames[match(target$ids, lookup$ids)]
train_activities$Activity_name <- activity_labels_train$V2[match(train_activities$V1, activity_labels_train$V1)]
### Add the column Subjects
train_activities$Subjects <- subject_train
###Combines the data with the labels
train_data <- cbind(train_data, train_activities)
#### Combine #####
###Join the test and train data
total_data <- rbind(test_data, train_data, fill = T)
### Test the join
total_data %>%
group_by(Activity_name) %>%
summarise(Count = n())
###Collect the column names
measurements <- as.tibble(names(total_data))
### Create a list of only the columns related to std and mean and the identifiers
my_measurements <- measurements %>%
filter(str_detect(value, pattern = c("mean", "std", "Activity_name", "Subjects")))
### Select the columns
select_data <- total_data %>%
select(match(my_measurements$value, measurements$value))
### Create a second, independent tidy data set with the average of each variable for each activity and each subject.
tidy_independent_data <- select_data %>%
group_by(Activity_name, Subjects) %>%
summarise_all (funs(mean))
write.table(tidy_independent_data, row.names = F, file = "tidy_data_set.txt")
|
3a1f22f73f9a47ca1fdfcfa4550cec08d64363da
|
d44517cbe4b2fd0839e8c19a0848a577a1659c86
|
/quiz2.R
|
26b2b44a7289e95befa1020077f961ca1d394a51
|
[] |
no_license
|
gridl/R
|
7bfe0b8da9a3843ba8a8b0a88bee06ba794ed701
|
4a5d638d60a90b32f5b4f9c3c31d1faa37ede09d
|
refs/heads/master
| 2021-05-08T20:39:32.392688
| 2015-06-29T02:10:09
| 2015-06-29T02:10:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 666
|
r
|
quiz2.R
|
library(datasets)
data(iris)
?iris
# There will be an object called 'iris' in your workspace.
# In this dataset, what is the mean of 'Sepal.Length' for
# the species virginica?
s<-split(iris,iris$Species)
answer<- apply(s$virginica[,1:4],2,mean)
library(datasets)
data(mtcars)
x<-matrix(rnorm(200),20,10)
apply(x,1,quantile,probs=c(0.25,0.75))
a<-array(rnorm(2*2*10),c(2,2,10))
apply(a,c(1,2),mean)
# or
rowMeans(a,dims=2)
# colMeans(a,dims=2)
#
# !is.vector
# !is.object
# !is.character
# !is.matrix
# !is.list
# !is.numeric
# !is.array
x<-list(a=matrix(1:4,2,2),b=matrix(1:6,3,2))
lapply(x,function(y) y[,1])
sapply(x,function(y) y[,1])
|
a1ba9e7f7b26832c9e9fccb47f9baf9c1f6db998
|
c9e0c41b6e838d5d91c81cd1800e513ec53cd5ab
|
/R/libgladeManuals.R
|
20eeee0cc0c7d6b9979f3c908b5ac441aa0b3b23
|
[] |
no_license
|
cran/RGtk2.10
|
3eb71086e637163c34e372c7c742922b079209e3
|
75aacd92d4b2db7d0942a3a6bc62105163b35c5e
|
refs/heads/master
| 2021-01-22T23:26:26.975959
| 2007-05-05T00:00:00
| 2007-05-05T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,389
|
r
|
libgladeManuals.R
|
# reason: just wanted to add default for size parameter, for convenience
gladeXMLNewFromBuffer <-
function(buffer, size = nchar(buffer), root = "NULL", domain = "NULL")
{
buffer <- as.character(buffer)
size <- as.integer(size)
if (!is.null( root )) root <- as.character(root)
if (!is.null( domain )) domain <- as.character(domain)
w <- .RGtkCall("S_glade_xml_new_from_buffer", buffer, size, root, domain)
return(w)
}
# reason: the following could not be generated due to the bad GtkSignalFunc type
# - we're just treating it as any arbitrary R callback
gladeXMLSignalConnect <-
function(object, handlername, func)
{
checkPtrType(object, "GladeXML")
handlername <- as.character(handlername)
func <- as.function(func)
w <- .RGtkCall("S_glade_xml_signal_connect", object, handlername, func)
return(invisible(w))
}
gladeXMLSignalConnectData <-
function(object, handlername, func, user.data)
{
checkPtrType(object, "GladeXML")
handlername <- as.character(handlername)
func <- as.function(func)
w <- .RGtkCall("S_glade_xml_signal_connect_data", object, handlername, func, user.data)
return(invisible(w))
}
gladeXMLConstruct <-
function(object, fname, root = "NULL", domain = "NULL")
{
.notimplemented("makes no sense for R, just use gladeXMLNew()")
}
|
3a9d025716541e08b45830b8fc23a25ad7516559
|
5bf67929c39c37f425fb5e85a16ca82077be0b68
|
/R/utils.R
|
c85647ea350682b4df66fbc004fc30bbc03a4919
|
[] |
no_license
|
FontierZoe/Ma-reconstruction-mammire
|
6ff454768365085c9b85e2d4f7b8631be465498b
|
e6421f9860480eccd2f7f7f25f3fc666ecfe0b94
|
refs/heads/main
| 2023-06-30T12:41:53.448533
| 2021-07-28T15:20:17
| 2021-07-28T15:20:17
| 391,101,511
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,316
|
r
|
utils.R
|
#### Fonctions pour les graphes de reconstruction#####
# Fontier Zoé #
COLORS_CODE<-c()
LABEL_CODE<-c("dedoublement du sein restant"="Dedoublement du sein restant",
"dermopigmentation"="Plaque aréolo mamelonnaire par dermopigmentation",
"grand dorsal"= "Muscle du grand dorsal",
"implant"="Implant mammaire",
"DIEP"= "Technique du DIEP",
"lambeau pedicule/ TRAM"= "Technique du lambeau pédiculé/ TRAM",
"lipomodelage"= "Lipomodelage",
"plaque areolo mamelonnaire"= "Reconstruction de la plaque aréolo mamelonnaire",
"Gracillis/PAP/lambeau libre"="Gracillis/PAP/lambeau libre")
COLOR_CODE<-c("dedoublement du sein restant"="#fde0dd",
"dermopigmentation"="#edf8b1",
"grand dorsal"= "#2c7fb8",
"implant"="#21405F",
"DIEP"= "#addd8e",
"lambeau pedicule/ TRAM"= "#feb24c",
"lipomodelage"= "#8856a7",
"plaque areolo mamelonnaire"= "#FDD782",
"Gracillis/PAP/lambeau libre"="slategray2")
COLOR_PMSI<-c("CH"= "#FC4E07",
"CHR/U"= "#045991",
"CLCC"= "#B3DE3A",
"Prive"="#E7B800",
"PSPH/EBNL"="#D83E81"
)
|
9be8531d338525e02a039c0a35b65ae350e51203
|
63322f7ce9eca510117d7559640ea18b2833496e
|
/functions.R
|
f627a2fe574c0d39f2db33c613d5a673e353292b
|
[] |
no_license
|
iositan/R_Programming
|
60ce1df94b433fb269e472c21620600c2b16bc2d
|
51d8d62a68f6903c09eda39390520259a9b69a4b
|
refs/heads/master
| 2016-09-05T19:04:39.700801
| 2014-07-26T20:50:24
| 2014-07-26T20:50:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 878
|
r
|
functions.R
|
add2 <- function(x,y) {
x+y
}
above10 <- function(x) {
use <- x > 10
x[use]
}
above <- function(x, n = 10) {
use <- x > n
x[use]
}
columnean <- function(y, removeNA = T) {
# no of columns
nc <- ncol(y)
# an empty vector
means <- numeric(nc)
for (i in 1:nc) {
means[i] <- mean(y[,i], na.rm=removeNA)
}
means
}
lm <- function(x) { x * x }
make.power <- function(n) {
pow <- function(x) {
x^n
}
pow
}
#----
# Week 2
# Q 1
cube <- function(x, n) {
x^3
}
# Q 3
f <- function(x) {
g <- function(y) {
y + z
}
z <- 4
x + g(x)
}
# Q 5
h <- function(x, y=NULL, d = 3L) {
z <- cbind(x, d)
if ( !is.null(y))
z <- z + y
else
z <- z + f
g <- x + y / z
if (d == 3L)
return(g)
g <- g + 10
g
}
|
6255472a68d957eedba3f65b240cec7ae6bb9c65
|
2f44cd9955606c4784840822bf0e63e0444a367c
|
/data/WoS/col_specs.R
|
962657ae2329adb2979f2e067b20d1053a48455e
|
[] |
no_license
|
kleinlennart/literature-review
|
07dcefddd2fd7bb402541ee9f751b9b2bd88b4d8
|
c0ea0de754026b519ec3c8e89c82ba578f0856b1
|
refs/heads/master
| 2023-04-02T19:51:17.132934
| 2021-04-06T21:08:29
| 2021-04-06T21:08:29
| 355,305,387
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,167
|
r
|
col_specs.R
|
col_specs <- cols(
Title = col_character(),
Authors = col_character(),
`Corporate Authors` = col_character(),
Editors = col_character(),
`Book Editors` = col_character(),
`Source Title` = col_character(),
`Publication Date` = col_character(),
`Publication Year` = col_character(),
Volume = col_character(),
Issue = col_character(),
`Part Number` = col_double(),
Supplement = col_character(),
`Special Issue` = col_character(),
`Beginning Page` = col_character(),
`Ending Page` = col_character(),
`Article Number` = col_character(),
DOI = col_character(),
`Conference Title` = col_character(),
`Conference Date` = col_character(),
`Total Citations` = col_double(),
`Average per Year` = col_double(),
`1965` = col_double(),
`1966` = col_double(),
`1967` = col_double(),
`1968` = col_double(),
`1969` = col_double(),
`1970` = col_double(),
`1971` = col_double(),
`1972` = col_double(),
`1973` = col_double(),
`1974` = col_double(),
`1975` = col_double(),
`1976` = col_double(),
`1977` = col_double(),
`1978` = col_double(),
`1979` = col_double(),
`1980` = col_double(),
`1981` = col_double(),
`1982` = col_double(),
`1983` = col_double(),
`1984` = col_double(),
`1985` = col_double(),
`1986` = col_double(),
`1987` = col_double(),
`1988` = col_double(),
`1989` = col_double(),
`1990` = col_double(),
`1991` = col_double(),
`1992` = col_double(),
`1993` = col_double(),
`1994` = col_double(),
`1995` = col_double(),
`1996` = col_double(),
`1997` = col_double(),
`1998` = col_double(),
`1999` = col_double(),
`2000` = col_double(),
`2001` = col_double(),
`2002` = col_double(),
`2003` = col_double(),
`2004` = col_double(),
`2005` = col_double(),
`2006` = col_double(),
`2007` = col_double(),
`2008` = col_double(),
`2009` = col_double(),
`2010` = col_double(),
`2011` = col_double(),
`2012` = col_double(),
`2013` = col_double(),
`2014` = col_double(),
`2015` = col_double(),
`2016` = col_double(),
`2017` = col_double(),
`2018` = col_double(),
`2019` = col_double(),
`2020` = col_double(),
`2021` = col_double()
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.