blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5092c71adbed4b2bb13bd63d95ca35d0532253bf
|
9c2f2cacd089553020d1e9f51aa91100d4c2d77f
|
/R/sigex.specar.r
|
a265794137ea4900cae92883f185e3f47bd26fcd
|
[] |
no_license
|
palatej/sigex
|
a870a9e4015c8d7f1028bd451ec3dd93c7b62fdc
|
82496b40789390116875da7c3008f62247b44107
|
refs/heads/master
| 2020-05-03T03:18:30.375009
| 2018-10-29T16:25:27
| 2018-10-29T16:25:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,121
|
r
|
sigex.specar.r
|
#' plot AR spectrum of input time series
#'
#' @param data.ts a T x N matrix ts object,
#' corresponding to N time series of length T
#' @param diff boolean, if TRUE difference the series
#' and plot Growth Rate, else in Levels
#' @param subseries index between 1 and N, indicating which series
#' to examine
#'
#' @return NA
#' @export
#'
sigex.specar <- function(data.ts,diff=FALSE,subseries)
{
##########################################################################
#
# sigex.specar
# Copyright (C) 2017 Tucker McElroy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
############################################################################
################# Documentation #####################################
#
# Purpose: plot AR spectrum of input time series
#
# Inputs:
# data.ts: a T x N matrix ts object,
# corresponding to N time series of length T
# diff: boolean, if TRUE difference the series
# and plot Growth Rate, else in Levels
# subseries: index between 1 and N, indicating which series
# to examine
# Outputs:
# NA
#
####################################################################
period <- frequency(data.ts)
freqs <- floor(period/2)
if(diff) {
spec.ar(diff(data.ts)[,subseries],
main= paste(colnames(data.ts)[subseries],"Growth Rate"))
abline(v=seq(1,freqs),col=2)
} else {
spec.ar(data.ts[,subseries],
main= paste(colnames(data.ts)[subseries],"Levels"))
abline(v=seq(1,freqs),col=2)
}
}
|
52f9154e59702580d11d06b689d4d8fc70689330
|
483a05fd21e1cd199346d5045ae16060057501d2
|
/man/describe.Rd
|
6454b81a650d07e321aa3b3a3916cddb1bdf7946
|
[
"MIT"
] |
permissive
|
TileDB-Inc/TileDB-R
|
34862c95815a77ba9b925f8d6e24dd362b3c617a
|
71901f8c9d5de860dbe078a0e708dda8f1b9b407
|
refs/heads/master
| 2023-09-04T05:28:09.468320
| 2023-08-23T13:24:33
| 2023-08-23T13:24:33
| 91,851,655
| 90
| 20
|
NOASSERTION
| 2023-09-14T20:48:10
| 2017-05-19T23:07:10
|
R
|
UTF-8
|
R
| false
| true
| 477
|
rd
|
describe.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ArraySchema.R
\name{describe}
\alias{describe}
\title{Describe a TileDB array schema via code to create it}
\usage{
describe(arr)
}
\arguments{
\item{arr}{A TileDB Array object}
}
\value{
Nothing is returned as the function is invoked for the side effect
of printing the schema via a sequence of R instructions to re-create it.
}
\description{
Describe a TileDB array schema via code to create it
}
|
ea92c7fc73df592bf763fe185d78fe3bcc96fc31
|
f2fda5ab5c19b8af71607a7f2d2d8f4cd3b26bb4
|
/figs/bayesian-risk/fig-risk.R
|
80cbadbc2bc94672c749f670c3501e53a2bb34f2
|
[
"MIT"
] |
permissive
|
rmit-ir/bayesian-shallow
|
657528666df9900369c3e385673181d828bc9394
|
f90893a60ffbe3c6ee9f7f3b4420a28a1476aa61
|
refs/heads/master
| 2023-03-16T20:25:47.733601
| 2021-03-20T06:58:06
| 2021-03-20T06:58:06
| 322,165,651
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,527
|
r
|
fig-risk.R
|
source("../../R/compute_cis.R", chdir=TRUE)
library(brms)
library(stringr);
library(ggplot2);
library(dplyr);
library(ggridges)
library(cowplot)
args = commandArgs(trailingOnly=TRUE)
alpha <- as.numeric(args[1])
load(paste0("../../R/risk_comps/filt_", alpha, ".Rdata"))
filt_point <- aggregate(urisk ~ system, filt_agg, FUN="mean")
fences_lower <- filt_agg %>% group_by(system) %>% summarise(lower = as.numeric(quantile(urisk, probs=(.025))))
fences_upper <- filt_agg %>% group_by(system) %>% summarise(upper = as.numeric(quantile(urisk, probs=(.975))))
fences <- merge(fences_lower, fences_upper)
fences <- fences %>% mutate(urisk=0)
filt_point <- filt_point %>% mutate(rank = dense_rank(urisk))
filt_wins_agg <- merge(filt_wins_agg, filt_point, by.x="system", by.y="system")
filt_losses_agg <- merge(filt_losses_agg, filt_point, by.x="system", by.y="system")
filt_agg <- merge(filt_agg, filt_point, by.x="system", by.y="system")
fences <- merge(fences, filt_point, by.x="system", by.y="system")
filt_point$system <- str_trunc(filt_point$system, 10, "right")
filt_wins_agg$system <- str_trunc(filt_wins_agg$system, 10, "right")
filt_losses_agg$system <- str_trunc(filt_losses_agg$system, 10, "right")
filt_agg$system <- str_trunc(filt_agg$system, 10, "right")
fences$system <- str_trunc(fences$system, 10, "right")
p <- ggplot(filt_point[filt_point$rank < 5,], aes(y=reorder(system, -urisk.x)))
p <- p + geom_vline(aes(xintercept=0), color="#7570b3")
p <- p + geom_density_ridges(data=filt_wins_agg[filt_wins_agg$rank < 5,], aes(x=urisk.x), fill="#91bfdb", scale=0.8, alpha=0.5)
p <- p + geom_density_ridges(data=filt_losses_agg[filt_losses_agg$rank < 5,], aes(x=urisk.x), fill="#fc8d59", scale=0.8, alpha=0.5)
p <- p + geom_density_ridges(data=filt_agg[filt_agg$rank < 5,], aes(x=urisk.x), fill="#ffffbf", scale=0.8, alpha=0.75)
p <- p + geom_errorbarh(data=fences[fences$rank < 5,], aes(xmin=lower, xmax=upper))
p <- p + xlab("Omitted for Brevity")
p <- p + coord_cartesian(xlim=c(-0.6, 1.25))
p <- p + theme(legend.position="none")
p <- p + theme(legend.background = element_blank(),
legend.key=element_blank())
p <- p + theme(axis.text.y = element_text(size = 10), axis.title.y=element_blank()) # changes axis labels
p <- p + theme(axis.ticks.x = element_blank(), axis.text.x=element_blank(), axis.title.x=element_text(size=7)) # changes axis labels
p <- p + theme(axis.line.x=element_blank())
p2 <- ggplot(filt_point[filt_point$rank > 35,], aes(y=reorder(system, -urisk.x)))
p2 <- p2 + geom_vline(aes(xintercept=0), color="#7570b3")
p2 <- p2 + geom_density_ridges(data=filt_wins_agg[filt_wins_agg$rank > 35,], aes(x=urisk.x), fill="#91bfdb", scale=0.8, alpha=0.5)
p2 <- p2 + geom_density_ridges(data=filt_losses_agg[filt_losses_agg$rank > 35,], aes(x=urisk.x), fill="#fc8d59", scale=0.8, alpha=0.5)
p2 <- p2 + geom_density_ridges(data=filt_agg[filt_agg$rank > 35,], aes(x=urisk.x), fill="#ffffbf", scale=0.8, alpha=0.75)
p2 <- p2 + geom_errorbarh(data=fences[fences$rank > 35,], aes(xmin=lower, xmax=upper))
p2 <- p2 + xlab("Bayesian PPD URisk")
p2 <- p2 + coord_cartesian(xlim=c(-0.6, 1.25))
p2 <- p2 + theme(legend.position="none")
p2 <- p2 + theme(legend.background = element_blank(),
legend.key=element_blank())
p2 <- p2 + theme(axis.text.y = element_text(size = 10), axis.title.y=element_blank()) # changes axis labels
p <- plot_grid(p, p2, align = "v", nrow = 2, rel_heights = c(0.45, 0.55))
ggsave(p, file=paste0("s-round1-risk", alpha, ".pdf"), width=3, height=2)
|
6fee1fa06a95ce4c6659e926ab5bc9daf5d257e3
|
d1894acdd83618a12e9f155584ad788497a7d964
|
/run_analysis.R
|
fac68851353d8f036f35de0320036a55e308b948
|
[] |
no_license
|
wintics/TS_Getting_Cleaning_Data
|
0084353e4add5d0db11e85419141063d1489f3cc
|
e0963896b41f1f93c0183efd84bb1371dc46eafe
|
refs/heads/main
| 2023-01-02T20:41:56.284014
| 2020-10-25T09:43:10
| 2020-10-25T09:43:10
| 306,884,640
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,419
|
r
|
run_analysis.R
|
##########################################
# Assumed, that data files are downloaded and unpacked
# working directory for this code is 'UCI HAR Dataset'
#
###########################################
library(dplyr)
# Step 0. Loading data
s_test<-read.table("./test/subject_test.txt")
X_test<-read.table("./test/X_test.txt")
y_test<-read.table("./test/y_test.txt")
s_train<-read.table("./train/subject_train.txt")
X_train<-read.table("./train/X_train.txt")
y_train<-read.table("./train/y_train.txt")
features<-read.table("features.txt")
activ<-read.table("activity_labels.txt")
# Step 1. Merging the training and the test sets to new data set.
## put together test data and training data by columns (all have the same number of rows)
test<-cbind(s_test,y_test,X_test)
train<-cbind(s_train,y_train,X_train)
## put together training and test data by rows (both have the same column names)
all<-rbind(train,test)
## as dataframe 'all' has now several column names 'V1', we change names
names(all)<-c("subjectID","act_code",features$V2)
# Step 2. Extracting the measurements on the mean and standard deviation for each measurement.
## selecting 2 first columns and from measurement only these columns, which names contain
## either "mean()" or "std()"
part<-select(all, "subjectID","act_code",contains("mean()"),contains("std()"))
# Step 3. Uses descriptive activity names to name the activities in the data set
## substituting activity codes to activity labels (code of activity matches with index)
lablelist<-activ$V2
part[,2]<-lablelist[part[,2]]
# Step 4. Appropriately labels the data set with descriptive variable names.
## renaming 2.column as the column content has changed (code->label)
names(part)[2] <- "activity"
# Step 5. From the data set in step 4, creates a second, independent tidy data set with
# the average of each variable for each activity and each subject
## 'subjectID' and 'activity' columns change there places for future output
## grouping by variables 'activity' and 'subjectID'
## calculating mean (grouping variables are always excluded from modification)
new <- relocate(part,"activity",.before="subjectID")
new<-new %>% group_by_("activity","subjectID") %>% summarise_all(mean)
#Write new tidy dataset in a txt-file in working directory
write.table(new, "tidy_data.txt", row.name=FALSE)
|
d72f713328b4b98bf15b9c34b18f3391ee58df92
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spatialwarnings/examples/rspectrum.Rd.R
|
d67d637df8c0ceada20237286dd605bcadb550a1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 566
|
r
|
rspectrum.Rd.R
|
library(spatialwarnings)
### Name: rspectrum
### Title: r-spectrum
### Aliases: rspectrum
### ** Examples
# Spectrum of white noise
rmat <- matrix(runif(100*100) > .5, ncol = 100)
spec <- rspectrum(rmat)
plot(spec, type = "l")
# Add some spatial correlation and compare the two spectra
rmat.cor <- rmat
for (i in seq(1, nrow(rmat)-1)) {
for (j in seq(1, nrow(rmat)-1)) {
rmat.cor[i,j] <- mean(rmat[(i-1):(i+1), (j-1):(j+1)])
}
}
spec.cor <- rspectrum(rmat.cor)
plot(spec.cor, type = "n")
lines(spec, col = "black")
lines(spec.cor, col = "blue")
|
b3cd6097ce4678aac7e296c9d2d8c5a5ddb5da08
|
3cc3c71740b7453b274808bda5696bd5e77eca59
|
/R/residuals.heckitrob.R
|
26f358ce0a540a2792d940d9d00a8eac465cdea8
|
[] |
no_license
|
cran/ssmrob
|
197e166e18ded8430d558f068c3e0b66668a3e0d
|
e91bed9355484de61fc25de0253288aff3dd7a57
|
refs/heads/master
| 2023-03-16T15:53:07.223539
| 2021-08-20T14:00:06
| 2021-08-20T14:00:06
| 17,700,091
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 85
|
r
|
residuals.heckitrob.R
|
residuals.heckitrob <-
function(object, ...)
{
return(resid(object$stage2))
}
|
03363fdcbe93a5617569279b7782976001196f6c
|
b93b2d50cf239c6c65060424e5b255322a4bef78
|
/man/add.timeframe.Rd
|
6b8ad985d1d9dafc41df521790c96e24bafe6ecf
|
[] |
no_license
|
LunaSare/phyloch
|
3c49eb48dd0d36e1b2fd1a8c7d36ff6407f47ef9
|
73d21686db41630ef39560ef621b1763fc2e9853
|
refs/heads/master
| 2020-03-28T06:45:10.176727
| 2018-11-29T03:02:57
| 2018-11-29T03:02:57
| 147,857,428
| 0
| 0
| null | 2018-09-07T17:56:25
| 2018-09-07T17:56:24
| null |
UTF-8
|
R
| false
| false
| 1,763
|
rd
|
add.timeframe.Rd
|
\name{add.timeframe}
\Rdversion{1.1}
\alias{add.timeframe}
\title{High-lighting ages in chronograms}
\description{This function ...}
\usage{
add.timeframe(phy, age, clade = NULL, tips = NULL, ...)
}
\arguments{
\item{phy}{An object of class \code{phylo}.}
\item{age}{A vector of mode \code{"numeric"} giving an age or an age range.}
\item{clade}{An integer giving the the node number of the MRCA of a clade to which the timeframe should be restricted.}
\item{tips}{A vector of integers giving the numbers of tips to which the time frame should be restricted. Can be used for non-monophyletic groups.}
\item{\dots}{Further arguments.}
}
\value{none}
\author{Christoph Heibl}
\seealso{\code{\link{add.geoscale}}, \code{\link{read.beast}}, \code{\link{noi}}}
\examples{
# phylogentic relationships of bird orders:
# -----------------------------------------
data(bird.orders)
# plot tree:
# setting edge and tip colors to zero (invisible) improves
# resolution
# ----------
plot(bird.orders, edge.color = 0, tip.color = 0)
# highlight some geological event, etc. ...
# -----------------------------------------
add.timeframe(bird.orders, age = c(12, 14), col = "skyblue",
border = NA)
# restrict to certain clade ...
# -----------------------------
cl <- noi(bird.orders, c("Struthioniformes", "Anseriformes"))
add.timeframe(bird.orders, age = c(16:21), col = "pink",
border = NA, clade = cl)
# or to some other non-monophyletic assemby:
# ------------------------------------------
add.timeframe(bird.orders, age = c(17:24), col = "yellow",
border = NA, tips = 6:8)
# now plot tree again:
# arguments must be exactly the same of course, except for
# edge and tip colors
# -------------------
plot.phylo.upon(bird.orders)
}
|
4e56a8ea908b422384d75e51ea25aac7a2bae792
|
a845ad855b81eec06afe792f5f18a82a8e1c89d8
|
/Week 3/R/Cluster1.R
|
f452e94b6580a688826c6d1696e57113d36dc063
|
[] |
no_license
|
mwickers1/Statistical-Machine-Learning
|
a0542d7e8951ba636ef5b48de25deeb6b7a60b42
|
7af2138a4aaece6e28974132eb3e829e2e31ae09
|
refs/heads/master
| 2020-08-08T01:47:10.596237
| 2019-12-11T16:34:41
| 2019-12-11T16:34:41
| 213,664,455
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,505
|
r
|
Cluster1.R
|
#
# Clustering examples. kmeans() is in base R, but the rest is in the
# "cluster" package.
#
# First, let's plot by PCs.
#
state.pr <- prcomp (state.x77, scale=T)
plot (state.pr$x[,1], state.pr$x[,2], type = "n")
text (state.pr$x[,1], state.pr$x[,2], state.abb)
#
#
library (cluster)
#
# k-means example. We already know to scale the columns of state.x77. Let's use 5 cluster.
#
state.km <- kmeans (scale (state.x77), 5)
#
# How big are the clusters?
#
table (state.km$cluster)
1 2 3 4 5
12 12 16 1 9 # Your results will probably differ!
#
# But this is only a local optimum; if we run it again, we can get different results.
#
state.km2 <- kmeans (scale (state.x77), 5)
table (state.km2$cluster)
1 2 3 4 5
1 8 11 5 25 # Again, this algorithm is variable
#
# Here's how the two compare. Notice that the two-way table has lots of
# zeros in it. The more zeros, the more agreement.
#
table (state.km$cluster, state.km2$cluster)
1 2 3 4 5
1 0 7 0 5 0
2 0 0 11 0 1
3 0 0 0 0 16
4 1 0 0 0 0
5 0 1 0 0 8
#
# So in this example the first clustering produced a cluster it named "1" with
# 12 members (top row), but the second clustering put seven of those, plus one
# more state, into its cluster "2," and the other five into cluster "4."
#
# Let's set nstart to a large number to try to get a global optimum.
#
state.km <- kmeans (scale (state.x77), 5, nstart=1000, iter.max=100)
state.km2 <- kmeans (scale (state.x77), 5, nstart=1000, iter.max=100)
#
# These two runs agree, so maybe we're at the optimum.
#
table (state.km$cluster, state.km2$cluster)
1 2 3 4 5
1 1 0 0 0 0
2 0 0 11 0 0
3 0 0 0 17 0 # Yours will probably look something like this
4 0 3 0 0 0
5 0 0 0 0 18
#
# Of course a matrix of total agreement need not be diagonal; it can have permuted
# columns or rows. Here's what the output looks like
#
> state.km
K-means clustering with 5 clusters of sizes 1, 11, 17, 3, 18
Cluster means:
Population Income Illiteracy Life Exp Murder HS Grad Frost Area
1 -0.8693980 3.0582456 0.5413980 -1.1685098 1.06242932 1.6828035 0.9145676 5.80934967
2 -0.2269956 -1.3014617 1.3915271 -1.1773136 1.09198092 -1.4157826 -0.7206500 -0.23402899
3 0.3101316 0.4583003 -0.2016973 0.0812333 -0.02909674 0.1534059 -0.3992647 -0.29398491
4 2.8948232 0.4869237 0.6507713 0.1301655 1.01728104 0.1393257 -1.1310576 0.99272004
5 -0.5883532 0.1114420 -0.7984253 0.6859720 -0.86841211 0.6036071 0.9551809 -0.06752485
Clustering vector:
Alabama Alaska Arizona Arkansas California
2 1 3 2 4
Colorado Connecticut Delaware Florida Georgia
5 5 3 3 2
...
Within cluster sum of squares by cluster:
[1] 0.00000 23.62227 54.30052 11.34904 44.41991
(between_SS / total_SS = 65.9 %)
#
# The first part tells us where (in this scaled space) the five cluster centers are; the
# clustering part tells us which state goes with which cluster; and the last part tells us
# how spread out the clusters are. The first cluster contains only Alaska, so it has no spread.
#
# Cluster 2 seems reasonable....
#
state.km$cluster[state.km$cluster == 2]
Alabama Arkansas Georgia Kentucky Louisiana
2 2 2 2 2
Mississippi New Mexico North Carolina South Carolina Tennessee
2 2 2 2 2
West Virginia
2
#
# Just to draw a cute picture:
#
library (maps)
map ("state")
map ("state", state.name[state.km$cluster == 1], col = "darkgreen", fill = T, add=T)
map ("state", state.name[state.km$cluster == 2], col = "blue", fill = T, add=T)
map ("state", state.name[state.km$cluster == 3], col = "orange", fill = T, add=T)
map ("state", state.name[state.km$cluster == 4], col = "white", fill = T, add=T)
map ("state", state.name[state.km$cluster == 5], col = "pink", fill = T, add=T)
#
# What's a good value for k? Let's try 2, 3, ..., 15 clcusters.
#
wss <- numeric (length (2:15))
names (wss) <- 2:15 # vector names
for (i in 2:15) {
out <- kmeans (scale (state.x77), i, nstart=1000, iter.max=100)
wss[as.character (i)] <- out$tot.withinss
}
plot (2:15, wss, type = "b") # where is the knee??
|
23a12f374848e953efe90f954abba6ce5dc36016
|
495b8b44ed4f7d2ac023a6dcd39b6c1a49b341dc
|
/R/has-dims.R
|
0e111623a76c8924589a733b1b0908c005bd74f5
|
[] |
no_license
|
cran/assertive.properties
|
f63ed111e35472517eef0a7f2ce84ff49b41c47c
|
69d091ea0cbfeb94112c74838adac4fc638f5e1e
|
refs/heads/master
| 2022-04-30T00:55:55.308834
| 2022-04-21T11:40:08
| 2022-04-21T11:40:08
| 48,076,805
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,089
|
r
|
has-dims.R
|
#' Does the input have rows/columns?
#'
#' Checks to see if the input has rows/columns.
#'
#' @param x Input to check.
#' @param .xname Not intended to be used directly.
#' @param severity How severe should the consequences of the assertion be?
#' Either \code{"stop"}, \code{"warning"}, \code{"message"}, or \code{"none"}.
#' @return \code{has_rows} and \code{has_cols} return \code{TRUE} if
#' \code{nrow} and \code{ncol} respectively return a value that is
#' non-null and positive. The \code{assert_*} functions return nothing
#' but throw an error if the corresponding \code{has_*} function returns
#' \code{FALSE}.
#' @seealso \code{\link{ncol}}.
#' @examples
#' assert_has_rows(data.frame(x = 1:10))
#' assert_has_cols(matrix())
#' @export
has_cols <- function(x, .xname = get_name_in_parent(x))
{
ncolx <- ncol(x)
if(is.null(ncolx))
{
return(false("The number of columns in %s is NULL.", .xname))
}
if(ncolx == 0L)
{
return(false("The number of columns in %s is zero.", .xname))
}
TRUE
}
#' Does the input have dimensions?
#'
#' Checks to see if the input has dimensions.
#'
#' @param x Input to check.
#' @param .xname Not intended to be used directly.
#' @param severity How severe should the consequences of the assertion be?
#' Either \code{"stop"}, \code{"warning"}, \code{"message"}, or \code{"none"}.
#' @return \code{has_dims} returns\code{TRUE} if \code{dim} is non-null.
#' \code{assert_has_dims} returns nothing but throws an error if
#' \code{has_dims} is not \code{TRUE}.
#' @seealso \code{\link[base]{dim}}, \code{\link{is_of_dimension}}.
#' @export
has_dims <- function(x, .xname = get_name_in_parent(x))
{
dim_x <- dim(x)
if(is.null(dim_x))
{
return(false("The dimensions of %s are NULL.", .xname))
}
TRUE
}
#' @rdname has_cols
#' @export
has_rows <- function(x, .xname = get_name_in_parent(x))
{
nrowx <- nrow(x)
if(is.null(nrowx))
{
return(false("The number of rows in %s is NULL.", .xname))
}
if(nrowx == 0L)
{
return(false("The number of rows in %s is zero.", .xname))
}
TRUE
}
|
b3c5428ed6cc72ae4332c8c215238b37db5c4859
|
aaf76f929894496b5ba19a4673da2de11e31ed36
|
/STAD57/Lectures and ALl/material/problem sets/R stuff/PS3.R
|
626ce48517d29cfd70a36e429018215c7d11c1b6
|
[] |
no_license
|
MLSaj/SchoolProjects
|
53923dbf91972c37a7fb541c491299dcc6cb53cd
|
2190f1b699dbb52d2bfd9b5c924d1644dd82d387
|
refs/heads/master
| 2022-06-16T20:15:56.709652
| 2020-05-11T20:43:02
| 2020-05-11T20:43:02
| 263,151,257
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 519
|
r
|
PS3.R
|
#### 3.7 from text
ARMAacf(ar=c(-1.6,-.64), ma=0, 10)
ARMAacf(ar=c(.4,.45), ma=0, 10)
ARMAacf(ar=c(1.2,-.85), ma=0, 10)
#### 3.8 from text
phi=.6; theta=.9
AR_1.acf=ARMAacf(ar=phi, lag=25)
MA_1.acf=ARMAacf(ma=theta, lag=25)
ARMA_1.1.acf=ARMAacf(ar=phi, ma=theta, lag=25)
par(mfrow=c(1,3))
plot(AR_1.acf, type='h', main="AR(1) ACF", xla='lag', ylab='ACF' )
plot(MA_1.acf, type='h', main="MA(1) ACF", xla='lag', ylab='ACF' )
plot(ARMA_1.1.acf, type='h', main="ARMA(1,1) ACF", xla='lag', ylab='ACF' )
|
767605cb7a777a8b83066f18b8f4ae4af4989f7b
|
0b7f3a8575e4aa9d75727f605fb3e2dc147b583c
|
/man/subset.geodata.Rd
|
d543a61aaba34a172bd3d75100d75b883bb72abf
|
[] |
no_license
|
cran/geoR
|
530ade489d2efcd25b9273d935fd43a3e998694b
|
03850658d4a832f7640b09c6d2ab3dbd58c61e37
|
refs/heads/master
| 2023-08-07T23:47:16.050075
| 2022-08-09T15:00:05
| 2022-08-09T15:00:05
| 17,696,346
| 9
| 12
| null | 2023-07-29T17:01:29
| 2014-03-13T04:50:30
|
R
|
UTF-8
|
R
| false
| false
| 1,396
|
rd
|
subset.geodata.Rd
|
\name{subset.geodata}
\alias{subset.geodata}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Method for subsetting geodata objects}
\description{
Subsets a object of the class \code{geodata}
by transforming it to a data-frame, using \code{subset}
and back transforming to a \code{geodata} object.
}
\usage{
\method{subset}{geodata}(x, ..., other = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{an object of the class \code{geodata}. }
\item{\dots}{arguments to be passed to
\code{\link{subset.data.frame}}. }
\item{other}{logical. If \code{TRUE} non-standard
\code{geodata} elements
of the original \code{geodata} object are copied to the
resulting object. }
}
%\details{
% ~~ If necessary, more details than the description above ~~
%}
\value{
A list which is an object of the class \code{geodata}.
}
%\references{ ~put references to the literature/web site here ~ }
%\author{ ~~who you are~~ }
%\note{ ~~further notes~~ %
%
% ~Make other sections like Warning with \section{Warning }{....} ~
%}
\seealso{\code{\link{subset}} for the generic function and methods and
\code{\link{as.geodata}} for more information on geodata objects. }
\examples{
subset(ca20, data > 70)
subset(ca20, area == 1)
}
\keyword{spatial}% at least one, from doc/KEYWORDS
\keyword{manip}% __ONLY ONE__ keyword per line
|
222cc4346967ea4e1e9a19dc49ba23685fc90f8b
|
c74b97591f44b46c4af7f4b68ac510de04629025
|
/clust3/clust3-rcode.R
|
df7d48d7696ef88fbc711f22bd541fd6df34be47
|
[] |
no_license
|
allatambov/cluster-analysis
|
ef56882369db39b9b76adc2c884461beda405cb4
|
d6c6f8329d6d36c6194af0ea4b16e4795833bd46
|
refs/heads/master
| 2020-03-11T17:01:30.974466
| 2018-05-03T14:44:05
| 2018-05-03T14:44:05
| 130,135,007
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,200
|
r
|
clust3-rcode.R
|
### cluster analysis 3 ###
### assessing the quality of clustering ###
# load data - file reg_elect.csv
df <- read.csv(file.choose())
# look
View(df)
# choose columns that ends with _perc
library(dplyr)
to_clust <- df %>% select(ends_with("_perc"))
# name rows after country names
rownames(to_clust) <- df$region
# create a distance matrix (no squares yet)
m <- dist(scale(to_clust))
# perform the cluster analysis and plot a dendrogram
hc <- hclust(m, method = "ward.D")
plot(hc, cex = 0.9)
# choose 5 clusters and add rectangles around them
plot(hc, cex = 0.9)
rect.hclust(hc, k = 5)
# alternative - add horizontal line at h=18
plot(hc, cex = 0.9)
abline(h = 18, col = "red") # h - horizontal line, col - color
# save cluster labels and add them as a new column in df
groups5 <- cutree(hc, k = 5)
df$groups5 <- factor(groups5)
# look at clusters - choose rowns corresponding to each group
df %>% filter(groups5 == 1) %>% View
df %>% filter(groups5 == 2) %>% View
df %>% filter(groups5 == 3) %>% View
df %>% filter(groups5 == 4) %>% View
df %>% filter(groups5 == 5) %>% View
# get summary statistics by group
df %>% group_by(groups5) %>%
summarise_at(.vars = vars(ends_with("_perc")), .funs = funs(mean)) %>%
View
# plot boxplots by group
library(ggplot2)
ggplot(data = df, aes(x = "", y = turnout_perc)) + geom_boxplot() + facet_grid(~groups5)
# plot violin plots by group
ggplot(data = df, aes(x = "", y = turnout_perc, fill = groups5)) + geom_violin() +
facet_grid(~groups5)
# plot histograms
# fill - fill color
# col - border color, common for all clusters
# bins - # of columns in histogram
ggplot(data = df, aes(x = turnout_perc , fill = groups5)) + geom_histogram(bins = 6, col = "black") +
facet_grid(~groups5)
# graphs for other indices
ggplot(data = df, aes(x = "", y = Grudinin_perc, fill = groups5)) + geom_violin() +
facet_grid(~groups5)
ggplot(data = df, aes(x = "", y = Putin_perc, fill = groups5)) + geom_violin() +
facet_grid(~groups5)
# scatterplots
ggplot(data = df, aes(x = turnout_perc, y = Putin_perc)) + geom_point(aes(color = groups5))
ggplot(data = df, aes(x = Grudinin_perc, y = Putin_perc)) + geom_point(aes(color = groups5))
# Kruskal-Wallis test
kruskal.test(df$turnout_perc ~ df$groups5)
kruskal.test(df$Grudinin_perc ~ df$groups5)
kruskal.test(df$Putin_perc ~ df$groups5)
# validate: compare two different clusterings
# groups5 - our current cluster labels
# groups5_2 - labels from CA with average link
hc2 <- hclust(m, method = "average")
groups5_2 <- cutree(hc2, k = 5)
# package fossil - for Rand index
install.packages("fossil")
library(fossil)
rand.index(groups5, groups5_2)
# package fpc - for other correspondence indices
install.packages("fpc")
library(fpc)
# m - distance matrix
cluster.stats(m, groups5, groups5_2)
# for p-values
install.packages("pvclust")
library(pvclust)
fit <- pvclust(t(to_clust), method.hclust = "ward", method.dist = "euclidean")
plot(fit, cex = 0.9)
# highlight clusters that are supported by data with p=0.95
plot(fit, cex = 0.7)
pvrect(fit, alpha = 0.95)
# package factoextra - for optimal number of clusters
install.packages("factoextra")
library(factoextra)
# Elbow method
fviz_nbclust(to_clust, kmeans, method = "wss") +
labs(subtitle = "Elbow method")
# the same but with vertical line
# geom_vline - add vertical line
fviz_nbclust(to_clust, kmeans, method = "wss") +
labs(subtitle = "Elbow method") +
geom_vline(xintercept = 4, linetype = 2)
# Silhouette method
fviz_nbclust(to_clust, kmeans, method = "silhouette") +
labs(subtitle = "Silhouette method")
# kmeans with k = 4
cl <- kmeans(to_clust, 4)
# result
cl
# cluster labels only
df$kmeans4 <- cl$cluster
View(df)
# hc - from the beginning
# rand.index - from package fossil
groups4 <- cutree(hc, k = 4)
rand.index(groups4, cl$cluster)
# two clusters - compare Ward with k = 2 and kmeans with k = 2
groups2 <- cutree(hc, k = 2)
cl2 <- kmeans(to_clust, 2)
rand.index(groups2, cl2$cluster)
# package NbClust - optimal clustering (not always stable)
install.packages("NbClust")
library(NbClust)
res <- NbClust(to_clust, min.nc = 2, max.nc = 8, method = "kmeans")
res$Best.nc
library(factoextra)
fviz_nbclust(res)
|
1ac70a8790c61458990d6814da807b1c8b14b860
|
92e84107be8f732e587734e45ac46c342ef394d6
|
/run_analysis.R
|
21ca3ddd82af8170c485db454f8ff0cb22dde8fc
|
[] |
no_license
|
JorgenUnger/Coursera-Getting-and-Cleaning-Data---Course-Project
|
0ac36b44a45b03f469084d55cb2004f56dce9b1a
|
1ac0274fd12b250eab5e8d767f776e34a3c4fd7f
|
refs/heads/master
| 2021-01-20T18:04:35.515143
| 2017-05-10T20:58:14
| 2017-05-10T20:58:14
| 90,903,287
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,012
|
r
|
run_analysis.R
|
run_analysis <- function() {
library(reshape2)
#If folder UCI HAR Dataset not exist then download zip file if it doesent exist. Then unzip file.
if(!file.exists("UCI HAR Dataset")){
if(!file.exists("UCI HAR Dataset.zip"))
{
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="UCI HAR Dataset.zip",method="auto")
}
unzip("UCI HAR Dataset.zip")
}
#Read activitys
activity <- read.table("UCI HAR Dataset/activity_labels.txt")
#Clean names
activity[,2] <- sub("_","",activity[,2])
activity[,2] <- tolower(activity[,2])
#Read features
features <- read.table("UCI HAR Dataset/features.txt")
#Creata int array of features to subset
featuresToGet <- grep("*-[Mm]ean*|*-[Ss]td*",features[,2])
#Subset features
features <- features[c(featuresToGet),]
#Clean feature names by removeing ")" and "(" and "-".
features[,2] <- gsub("[\\)]|[\\(]|[-]","",features[,2])
#mean and std starts with upercase
features[,2] <- gsub("mean","Mean",features[,2])
features[,2] <- gsub("std","Std",features[,2])
#Read, subset and combine training data.
trainSubjectId <- read.table("UCI HAR Dataset/train/subject_train.txt")
trainLabelsId <- read.table("UCI HAR Dataset/train/y_train.txt")
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresToGet]
train <- cbind(trainSubjectId, trainLabelsId, train)
#Read, subset and combine test data.
testSubjectId <- read.table("UCI HAR Dataset/test/subject_test.txt")
testLabelsId <- read.table("UCI HAR Dataset/test/y_test.txt")
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresToGet]
test <- cbind(testSubjectId, testLabelsId, test)
#Combine train data and test data.
all <- rbind(train, test)
#Set column names
colnames(all) <- c("subject","activity",features[,2])
#Coerces subject to factors. Roger Peng (2016) in his book "R Programming for Data Science" explane that it is
#better to use factors with labels than using integers becuase it is self-describing. And he also says it is
#important for statistical modeling.
all$subject <- as.factor(all$subject)
all$activity <- factor(all$activity, levels = activity[,1], labels=activity[,2])
#Melt and dcast
all <- melt(all, id = c("subject", "activity"))
all <- dcast(all, subject + activity ~ variable, mean)
#Write txt file to Working Directory
write.table(all, "CleanedTidyData.txt", quote = FALSE, row.names=FALSE)
}
|
ec834562dbe576e32aba38d66d5cad426868de8d
|
ebdbf1a9fa1df3bcc1a180e4ccf51b5327968b68
|
/R/wgcna.R
|
3dd59b3af330c704287a36f53b68d1200b745c5c
|
[
"MIT"
] |
permissive
|
pranavdorbala/proteomicsHF
|
8396c6309622caaf10b154e3083760d11ef340fb
|
36124a55afd8ddad655d7a6871172741538622d6
|
refs/heads/main
| 2023-06-04T14:27:47.634938
| 2021-03-08T15:42:22
| 2021-03-08T15:42:22
| 339,808,458
| 1
| 1
|
NOASSERTION
| 2021-06-25T19:28:26
| 2021-02-17T17:50:43
|
R
|
UTF-8
|
R
| false
| false
| 3,852
|
r
|
wgcna.R
|
soft.threshold <- function(data) {
soft.threshold <-
WGCNA::pickSoftThreshold(wg.data, powerVector = 1:20,
networkType = 'unsigned',
RsquaredCut = 0.9, verbose = 0)
p <- threshold.plot(soft.threshold)
return(list(sft = soft.threshold, p = p))
}
threshold.plot <- function(soft.threshold) {
b <- soft.threshold$fitIndices[,1]
p1 <- qplot(Power, SFT.R.sq, data = soft.threshold$fitIndices,
xlab = "Soft Threshold (power)",
ylab = "Scale Free Topology Model Fit, Unsigned R^2",
main = "Scale Independence Model", geom="text", label = b) +
geom_hline(yintercept = 0.9)
p2 <- qplot(Power, truncated.R.sq, data = soft.threshold$fitIndices,
xlab = "Soft Threshold (power)",
ylab = "Scale Free Topology Model Fit, Unsigned R^2",
main = "SSI Model More Complexity",
geom="text", label = b) +
geom_hline(yintercept = 0.9)
p3 <- qplot(Power, mean.k., data = soft.threshold$fitIndices,
xlab = "Soft Threshold (power)",
ylab = "Mean Connectivity",
main = "Mean Connectivity",
geom="text", label = b)
p <- gridExtra::grid.arrange(p1, p2, p3, ncol = 3)
return(p)
}
hierTree <- function(data, sft) {
tictoc::tic()
pow <- sft$sft$powerEstimate
adj <- WGCNA::adjacency(data, power = pow)
dissTOM <- 1 - WGCNA::TOMsimilarity(adj)
tree <- fastcluster::hclust(as.dist(dissTOM), method = "average")
dynamicMods <- dynamicTreeCut::cutreeDynamic(dendro = tree,
distM = dissTOM,
deepSplit = 4,
cutHeight = 0.9995,
minClusterSize = 10,
pamRespectsDendro = F)
dynamicColors <- WGCNA::labels2colors(dynamicMods)
tictoc::toc()
WGCNA::plotDendroAndColors(tree, dynamicColors, "Dynamic Tree Cut",
dendroLabels = FALSE, hang = 0.03,
addGuide = TRUE, guideHang = 0.05,
main = "Protein dendrogram and module colors")
return(list(htree = tree,
dynMd = dynamicMods,
dynCl = dynamicColors))
}
summarize.modules <- function(data, colors, sft, labels) {
tictoc::tic()
pow <- sft$sft$powerEstimate
hub.proteins <-
WGCNA::chooseTopHubInEachModule(data, colors, omitColors = NA,
power = pow, type = "unsigned")
tictoc::toc()
module.summary <- tibble::tibble(color = colors %>% table() %>% names(),
freq = colors %>% table(),
hubs = hub.proteins) %>%
dplyr::mutate(Name = labels[hubs, 'name']) %>%
return()
}
calculate.eigengenes <- function(data, colors) {
tictoc::tic()
MEList <- WGCNA::moduleEigengenes(data, colors = colors)
eigenGenes <- tibble::tibble(MEList$eigengenes)
MEDiss <- 1 - cor(eigenGenes)
METree <- fastcluster::hclust(as.dist(MEDiss), method = "average")
MEDissThres = 0.1
plot(METree, main = "Module EigenGenes Clustering", xlab = "", sub = "")
abline(h = MEDissThres, col = "red")
merge <- WGCNA::mergeCloseModules(data, colors,
cutHeight = MEDissThres,
verbose = 3)
tictoc::toc()
return(list(eigenGenes = eigenGenes,
mergedColors = merge$colors,
mergedMEs = merge$newMEs))
}
plot.new.colors <- function(tree, colors1, colors2, labs) {
WGCNA::plotDendroAndColors(tree, cbind(colors1, colors2),
labs, dendroLabels = FALSE, hang = 0.03,
addGuide = TRUE, guideHang = 0.05)
}
|
10235dd61c7db737043a8380c5ba0c3e470eb538
|
7db7ca4aea3b8d0557248beb1b4f559e6eb10809
|
/R/piggy-waypoints.R
|
9d4d857b579ab463107434bad068be44a3daac3f
|
[] |
no_license
|
tera-insights/gtBase
|
aabbbb7ec58bf1beb75879decf35832e628874d9
|
bfbc7459923456895ab99143a5020bdfdf536720
|
refs/heads/master
| 2020-05-22T00:03:44.268580
| 2017-06-06T14:42:37
| 2017-06-06T14:42:37
| 25,822,663
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,057
|
r
|
piggy-waypoints.R
|
Translate.Cache <- function(data) {
piggy <- paste0(data$alias, " = CACHE ", data$data$alias, ";\n")
c(Translate(data$data), setNames(piggy, data$alias))
}
Translate.Compact <- function(data) {
piggy <- paste0(data$alias, " = COMPACT ", data$data$alias, ";\n")
c(Translate(data$data), setNames(piggy, data$alias))
}
Translate.Filter <- function(filter) {
update.clustering(grokit$expressions[[filter$condition]], filter$data)
c(Translate(filter$data),
setNames(
paste0(filter$alias, " = FILTER ", filter$data$alias, " BY",
"\n\t", Translate.Expr(grokit$expressions[[filter$condition]], filter$data),
";\n"),
filter$alias))
}
Translate.Generated <- function(generator) {
c(Translate(generator$data),
setNames(
paste0(generator$alias, " = FOREACH ", generator$data$alias, " GENERATE",
paste0("\n\t", Translate.Inputs(generator$generated, generator$data), collapse = ","),
";\n"),
generator$alias))
}
Translate.GF <- function(gf) {
c(unlist(lapply(gf$states, Translate)), Translate(gf$data),
setNames(
paste0(gf$alias, " = FILTER ", gf$data$alias, " BY",
"\n", Translate(gf$gf),
if (length(gf$states) > 0)
paste0("\nREQUIRES", paste0("\n\t", lapply(gf$states, `[[`, "alias"), collapse = ",")),
if (length(gf$inputs) > 0)
paste0("\nUSING", paste0("\n\t", Translate.Inputs(gf$inputs, gf$data), collapse = ",")),
";\n"),
gf$alias))
}
Translate.GI <- function(data) {
atts <- if (is.character(data$outputs))
paste0("\nATTRIBUTES FROM ", data$outputs)
else
paste0("\nATTRIBUTES",
paste0("\n\t", Translate.Outputs(data$schema),
ifelse(sapply(data$outputs, is.null), "", " : "),
lapply(data$outputs, Translate.Template),
collapse = ","))
setNames(
paste0(data$alias, " = READ",
paste0("\n FILE ", quotate(data$files), collapse = ""),
"\nUSING",
"\n", Translate(data$gi),
if (length(data$chunk)) paste("\nCHUNKSIZE", data$chunk),
atts,
";\n"),
data$alias)
}
Translate.GIST <- function(gist) {
c(unlist(lapply(gist$states, Translate)),
setNames(
paste0(gist$alias, " =",
"\n", Translate(gist$gist),
if (length(gist$states) > 0)
paste0("\nREQUIRES", paste0("\n\t", lapply(gist$states, `[[`, "alias"), collapse = ",")),
if (length(gist$schema) > 0)
paste0("\nAS", paste0("\n\t", Translate.Outputs(gist$schema), collapse = ",")),
";\n"),
gist$alias))
}
Translate.GLA <- function(gla) {
c(unlist(lapply(gla$states, Translate)), Translate(gla$data),
setNames(
paste0(gla$alias, " =",
"\n", Translate(gla$gla),
"\nFROM ", gla$data$alias,
if (length(gla$states) > 0)
paste0("\nREQUIRES", paste0("\n\t", lapply(gla$states, `[[`, "alias"), collapse = ",")),
if (length(gla$inputs) > 0)
paste0("\nUSING", paste0("\n\t", Translate.Inputs(gla$inputs, gla$data), collapse = ",")),
if (length(gla$schema) > 0)
paste0("\nAS", paste0("\n\t", Translate.Outputs(gla$schema), collapse = ",")),
";\n"),
gla$alias))
}
Translate.GT <- function(gt) {
c(unlist(lapply(gt$states, Translate)), Translate(gt$data),
setNames(
paste0(gt$alias, " =",
"\n", Translate(gt$gt),
"\nFROM ", gt$data$alias,
if (length(gt$states) > 0)
paste0("\nREQUIRES", paste0("\n\t", lapply(gt$states, `[[`, "alias"), collapse = ",")),
if (length(gt$inputs) > 0)
paste0("\nUSING", paste0("\n\t", Translate.Inputs(gt$inputs, gt$data), collapse = ",")),
if (length(gt$outputs) > 0)
paste0("\nAS", paste0("\n\t", Translate.Outputs(gt$outputs), collapse = ",")),
";\n"),
gt$alias))
}
Translate.Join <- function(join) {
c(Translate(join$x), Translate(join$y),
setNames(
paste0(join$alias, " =", if(join$left.outer) " LEFT OUTER", " JOIN\n",
"\t", join$x$alias, " BY (",
paste0(lapply(join$xSchema, Translate.Expr.name, join$x), collapse = ", "),
")", ",\n",
"\t", join$y$alias, " BY (",
paste0(lapply(join$ySchema, Translate.Expr.name, join$y), collapse = ", "),
")", ";\n"),
join$alias))
}
Translate.Load <- function(data) {
loading <- paste("LOAD", data$relation, "AS", data$alias)
if (!is.null(data$cluster)) {
range <- as.numeric(grokit$cluster[[data$cluster]][1:2])
clustering <- paste("\nFILTER RANGE", paste(ifelse(is.finite(range), range, "NULL"), collapse = ", "))
}
setNames(paste0(loading, if (!is.null(data$cluster)) clustering, ";\n"), data$alias)
}
|
fcd215e5ddf236e86cb3d36a330cb7384ad1ec70
|
08e99681e21d756b0f2339b2fd4153dc7ec277ef
|
/script.R
|
16387b1c73d4b7e7bf4635255bb2740aa696f61a
|
[] |
no_license
|
timothymartin76/K_Means_Clustering
|
56ecda8e4394d9f6eb92118b37e8eb4fdf42f6d5
|
e3be1ac9a9f7dabee53bd73f71559a71cc2e5b97
|
refs/heads/master
| 2021-01-24T21:07:39.079006
| 2016-02-10T17:28:05
| 2016-02-10T17:28:05
| 37,153,401
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,076
|
r
|
script.R
|
##More kmeans clustering
require(ggfortify)
require(cluster)
##read in agent file
mydata<- read.csv("agents.csv")
##perform kmeans clustering
m=as.matrix(cbind(mydata$Calls_Handled, mydata$AHT), ncol=2)
cl=(kmeans(m,3))
mydata$cluster=factor(cl$cluster)
centers=as.data.frame(cl$centers)
##plot in ggplot2
ggplot(data=mydata, aes(x=Calls_Handled, y=AHT, color=cluster )) +
geom_point() +
geom_point(data=centers, aes(x=V1,y=V2, color='Center')) +
geom_point(data=centers, aes(x=V1,y=V2, color='Center'), size=60, alpha=.3, show_guide=FALSE)
##add agent names to points
sp<- ggplot(data=mydata, aes(x=Calls_Handled, y=AHT, color=cluster )) +
geom_point() +
geom_point(data=centers, aes(x=V1,y=V2, color='Center')) +
geom_point(data=centers, aes(x=V1,y=V2, color='Center'), size=60, alpha=.3, show_guide=FALSE)
sp + geom_text(aes(label=CCR_Name), size=3, vjust=0)
##Add column to categorize agents by cluster
cluster_category<- fortify(kmeans(mydata, 3), data=mydata)
##write to csv file for further analysis if needed
write.csv(mydata, file="cluster_categories.csv")
|
4a606d0b8194ee24a8ddb687348e65233e7cb61d
|
5eac13b1a7d8441a430059b44fdfd10ef8d5fd12
|
/PlayerID_Scraper.R
|
8c96f205aa0ffc4fd81e976e1ccecf51f9049ac1
|
[] |
no_license
|
wrondon23/Baseball-Reference-Scrapers
|
924666e5f9fb22e8e098718c5ccd33b8d6030ace
|
112cfe3564be6407ef21f85c3ad0214c14a32792
|
refs/heads/master
| 2020-11-29T02:25:29.410217
| 2016-02-20T18:58:43
| 2016-02-20T18:58:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,374
|
r
|
PlayerID_Scraper.R
|
# This scraper builds off the MiLB_Scraper_Batting.R repository by using the baseball-reference minor league reference id URL to scrape each player's major league reference id from their B-R profile. Scrape those files first, then return here to scrape the playerID info.
#WARNING: this scrapes from individual player pages and can take a very long time with a large player pool
# install.packages("devtools")
# install_github("hadley/rvest")
c('rvest','dplyr','pipeR') -> packages #installs packages
lapply(packages, library, character.only = T) #installs packages
url <- "http://www.baseball-reference.com/minors/player.cgi?id="
minors_player <- data.frame()
for (min_bref in min_bref){
html <- html(paste(url,min_bref,sep=""))
html %>%
html_nodes(".no_mobile a") %>%
html_attr("href") %>>% unlist %>>% as.character -> maj_playerid
min_playerid <- c(min_bref)
maj_playerid=paste(maj_playerid,"")
df=cbind(min_playerid,maj_playerid)
minors_player <- rbind(minors_player,df)
}
minors_player=filter(minors_player, !grepl('japan/', maj_playerid)) #remove japanese league info
minors_player$maj_playerid <- gsub(".shtml","",minors_player$maj_playerid)
minors_player$maj_playerid=substring(minors_player$maj_playerid, 12)
minors_player$maj_playerid=str_trim(minors_player$maj_playerid)
View(minors_player)
|
923eb7fe4427ffd44de873dcab6c5781fb99a84f
|
ba7f71ef4a5d52ff48f7ab49829f2ee4d5cc282e
|
/man/CDpca.Rd
|
aa7722b0a9b2e4e1be03dab29ccd185cf8fe7136
|
[] |
no_license
|
luisrei/new-cdpca
|
0bd0c06c4ff5a31806e3ad3590ccf60a911f391d
|
30436e0cb5ab969e90b1d4f8cd2bd7192a9a3feb
|
refs/heads/master
| 2020-04-07T17:22:53.628943
| 2018-11-21T15:31:51
| 2018-11-21T15:31:51
| 158,567,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,678
|
rd
|
CDpca.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CDpca.R
\name{CDpca}
\alias{CDpca}
\title{CDpca}
\usage{
CDpca(data, class, fixAtt, nnloads = 0, Q, P, tol, maxit, r)
}
\arguments{
\item{data}{data frame (numeric).}
\item{class}{vector (numeric) or 0, if classes of objects are unknown.}
\item{fixAtt}{vector (numeric) or 0, for a selection of attributes.}
\item{nnloads}{1 or 0, for nonnegative loadings.}
\item{Q}{integer, number of clusters of variables.}
\item{P}{integer, number of clusters of objects.}
\item{tol}{real number, small positive tolerance.}
\item{maxit}{integer, maximum of iterations.}
\item{r}{number of runs of the cdpca algoritm for the final solution.}
}
\value{
iter: iterations used in the best loop for computing the best solution
loop: best loop number
timebestloop: computation time on the best loop
timeallloops: computation time for all loops
Y: the component score matrix
Ybar: the object centroids matrix in the reduced space
A: the component loading matrix
U: the partition of objects
V: the partition of variables
Fobj: function to maximize
bcdev: between cluster deviance
bcdevTotal: between cluster deviance over the total variability
tableclass: cdpca classification
pseudocm: pseudo confusion matrix of the real and cdpca classifications
Enorm: error norm for the obtained cdpca model
}
\description{
CDpca performs a clustering and disjoint principal components analysis
on the given numeric data matrix and returns a list of results.
}
\examples{
exampleCDpca = CDpca(data, class, fixAtt, nnloads, Q, P, tol, maxit, r)
}
\keyword{cdpca}
\keyword{cluster}
\keyword{kmeans}
\keyword{pca}
|
75454c03a1fd14ae5b6437506c1fe1d1a0e4a1cc
|
1f8f09cefd8ec00b2d68032ebeec74f38d7a072d
|
/HW4.R
|
cc3c11df5ebea9ab5bb8655b4166b647752cfaf1
|
[] |
no_license
|
aliizadi/statistical-inference-course
|
44934f1415d0672a8a2a86f32551cf7d5739f55f
|
332e6f8c229eeecd08f08a5691c98d6bf5d32886
|
refs/heads/main
| 2023-06-16T09:52:47.585204
| 2021-07-06T13:33:46
| 2021-07-06T13:33:46
| 383,471,829
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,942
|
r
|
HW4.R
|
######## q3
chick_m = Chick$weight[Chick$feed=="meatmeal"]
chick_c = Chick$weight[Chick$feed=="casein"]
#### a)
bootstrap_diff_means <- function(a, b, n_bootstrap=1000){
n_a <- length(a)
n_b <- length(b)
bootstrap_samples <- c()
for(i in 1:n_bootstrap){
bootstrap_a <- sample(1:n_a, n_a, replace=TRUE)
bootstrap_b <- sample(1:n_b, n_b, replace=TRUE)
diff <- mean(a[bootstrap_a])-mean(b[bootstrap_b])
bootstrap_samples <- c(bootstrap_samples, diff)
}
return(bootstrap_samples)
}
bootstrap_samples = bootstrap_diff_means(chick_c, chick_m)
#### b)
p_value_diff_means_t_test <- function(a, b) {
null = 0
observation = mean(a) - mean(b)
n_a = length(a)
n_b = length(b)
se = sqrt((sd(a) ^ 2) / n_a + (sd(b) ^ 2) / n_b)
t = (observation - null) / se
df = min(n_a, n_b)
p_value = (1 - pt(t, df=df)) * 2
return (p_value)
}
p_value_original_sample = p_value_diff_means_t_test(chick_c, chick_m)
p_value_bootstrap_diff_means_t_test <- function(a, b, bootstrap_samples) {
null = 0
n_bootstrap_samples = length(bootstrap_samples)
observation = mean(a) - mean(b)
se = sd(bootstrap_samples)/sqrt(n_bootstrap_samples)
t = (observation - null) / se
df = n_bootstrap_samples - 1
p_value = (1 - pt(t, df=df)) * 2
return (p_value)
}
p_value_bootstrap_sample = p_value_bootstrap_diff_means_t_test(chick_c, chick_m,
bootstrap_samples
)
#### c)
confidence_interval_diff_means_t <- function(a, b, p=0.95) {
observation = mean(a) - mean(b)
n_a = length(a)
n_b = length(b)
se = sqrt((sd(a) ^ 2) / n_a + (sd(b) ^ 2) / n_b)
df = min(n_a, n_b)
t_star = qt((1-p)/2, df=df) * (-1)
lower = observation - t_star * se
upper = observation + t_star * se
return (c(lower, upper))
}
confidence_interval_original_sample = confidence_interval_diff_means_t(chick_c,
chick_m)
confidence_interval_bootstrap_diff_means_t <- function(a, b, bootstrap_samples
, p=0.95) {
observation = mean(a) - mean(b)
n_bootstrap_samples = length(bootstrap_samples)
se = sd(bootstrap_samples)/sqrt(n_bootstrap_samples)
df = n_bootstrap_samples - 1
t_star = qt((1-p)/2, df=df) * (-1)
lower = observation - t_star * se
upper = observation + t_star * se
return (c(lower, upper))
}
confidence_interval_bootstrap_sample = confidence_interval_bootstrap_diff_means_t(chick_c, chick_m, bootstrap_samples)
######## q8
#### a)
Diet = Diet["pre.weight"] - Diet["weight6weeks"]
Diet$Diet=as.factor(Diet$Diet)
ggplot(Diet, aes(x = Diet, y=loss)) +
geom_boxplot() +
ggtitle("box plot") +
theme(plot.title = element_text(hjust = 0.5))
#### b)
result = aov(loss ~ Diet, data = Diet)
summary(result)
#### d)
TukeyHSD(result)
|
bc938d8505e9568e20c5d2de1f632ba18e526f5b
|
817712488d5b36c85596e42a0b7178b4a79f95f0
|
/space_history/r_space/history/wenbenwajue.R
|
6c76dd442b6ac6c0faddac0de100b1be041f4aad
|
[] |
no_license
|
TigerZhao007/GitHub
|
cfa1dcc5589988a18b0f7661e9f72b8088f95ec2
|
5e08f6a0e4033778f107cc89b422ab107082e4ab
|
refs/heads/master
| 2020-10-02T01:25:39.251198
| 2019-12-12T12:06:25
| 2019-12-12T12:06:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,048
|
r
|
wenbenwajue.R
|
# 中文文本挖掘程序
# 调入程序包。
library(rJava)
library(Rwordseg)
library(wordcloud2)
library(tmcn)
library(jiebaR)
# 简单的分词程序。
segmentCN("这是个中文分词软件。") #分词程序。
segmentCN("这是个中文分词软件。",nosymbol=FALSE) #输出标点符号。
segmentCN("这是个中文分词软件。",nature=TRUE) #输出词性。
# 人名识别功能。
getOption("isNameRecognition") #判断是否具有人名识别功能。
segmentCN("刘强东是个好学生。")
segment.options(isNameRecognition=TRUE) #具有人名识别功能。
segment.options(isNameRecognition=FALSE) #去掉人名识别功能。
# 向字典中加入某些词语。
listDict()
segmentCN("这是个分词软件。")
insertWords("这是") #向字典中加入“这是”词语。
insertWords("特校") #加入词库后可以应用识别。
listDict()
# 向字典中加入某词库。
installDict(dictpath = "canlianciku01.txt",
dictname = "dep",dicttype = "text",load = "TRUE") #向字典中加入文件中的词语。
segmentCN("2015年的几部开年戏都出现了唐嫣的身影")
installDict(dictpath = "E:\\workspace\\ciku\\canjirenshiye.scel",
dictname ="canjiren")
warnings() #查看警告内容。
listDict() #查看R语言中词库列表。
uninstallDict() #删除全部自定义词库
# 将字典中部分词语拆开。
insertWords("错过")
segmentCN(c("如果你因为错过太阳而流泪", "你也会错过星星"))
segmentCN("这个错过去你可以犯,但是现在再犯就不应该了")
deleteWords("错过")
insertWords("过去")
segmentCN("这个错过去你可以犯,但是现在再犯就不应该了")
deleteWords(c("和","的","是","对","2020"))
deleteWords("特殊")
segmentCN("这是个特殊教育学校校长公共事业分词软件。")
# 将字典中的部分数据和符号删去。
fenci <- c("2015年的几部开年戏都出现了唐嫣的身影,《华丽柏拉图》演绎的很好,很“南财”。")
fenci.seg <- segmentCN("2015年的几部开年戏都出现了唐嫣的身影,10《华丽柏拉图》演绎的很好,很“南财”。")
fenci.gsub <- gsub("[0-9 0123456789 < > ~ 《 》 “ ” , ,]","",fenci)
#0-9删除大于10的数据。
segmentCN(fenci.gsub)
# 将字典中的部分词语、停词删去。
fenci.gsub <- gsub("[的 很 了 年]","",fenci) #删去部分词语。
stopwords<- unlist(read.table("E:\\rspace\\ciku\\tingcibiao\\Ctingci02.txt",stringsAsFactors=F))
#读取停词表。
stopwords[1:100] #查看停词表。
removeStopWords <- function(x,stopwords) {
temp <- character(0)
index <- 1
xLen <- length(x)
while (index <= xLen) {
if (length(stopwords[stopwords==x[index]]) <1)
temp<- c(temp,x[index])
index <- index +1
}
temp
}
fenci.remove <-lapply(fenci.seg,removeStopWords,stopwords) #输出列表格式数据。
fenci.remove.un <- unlist(fenci.remove) #将列表数据转为向量格式。
# 文本读取、挖掘、画图分析
canlian <- readLines("canlian01.txt",encoding="GB2312")
canlian
canlian.paste <- paste(canlian,collapse = " ")
canlian.paste.se <- segmentCN(canlian.paste)
canlian.paste.se.rm <- lapply(canlian.paste.se, removeStopWords,stopwords)
canlian.paste.se.rm.un <- unlist(canlian.paste.se.rm)
canlian.paste.se.ta <- table(canlian.paste.se.rm.un)
canlian.paste.se.ta.so <- sort(canlian.paste.se.ta,decreasing = T)
length(canlian.paste.se.ta.so[1:100])
wordcloud2(canlian.paste.se.ta.so[1:100])
|
1978f9fb16663fe74ede816b737af57fb5357ed8
|
dfa24df6e16ccbde5be38539588c1466a59b6fc2
|
/R/seg_sites_fasta.R
|
9e13732794231e4ca48800653cf77cd0c9ddfcde
|
[] |
no_license
|
hmorlon/PANDA
|
2140e768cebb54d54ab07ea7f5b66c79945121ee
|
34cdff313b8db213a4e6119e93a4810608cfc32f
|
refs/heads/master
| 2023-08-23T14:21:55.066295
| 2023-08-10T15:08:40
| 2023-08-10T15:08:40
| 16,514,123
| 21
| 17
| null | 2022-10-05T14:47:48
| 2014-02-04T14:28:47
|
R
|
UTF-8
|
R
| false
| false
| 145
|
r
|
seg_sites_fasta.R
|
seg_sites_fasta <-
function (x) {
S <- which(sapply(1:ncol(x), function(i) length(which(!unique(x[,i]) %in% c("-","N","n"))))>1)
return(S)
}
|
e6a5502947eeae9f44d97978b914ff9b66b1dd50
|
88d2c333716d4c1b939459eda4b25360fa1df9d7
|
/man/ang.out.Rd
|
f7bc3b9140c5e4b415426b8028abb362a9bf0382
|
[] |
no_license
|
grundy95/changepoint.geo
|
77f0dd9b2fa49f5ab4accb4e45139baed3d3eb0e
|
8249e5be372ee9c2349ba84597fce5e9bc4a46e4
|
refs/heads/master
| 2023-03-27T00:57:28.959748
| 2021-03-25T16:16:19
| 2021-03-25T16:16:19
| 211,331,641
| 5
| 0
| null | 2021-03-25T15:11:32
| 2019-09-27T13:58:53
|
R
|
UTF-8
|
R
| false
| false
| 511
|
rd
|
ang.out.Rd
|
\name{ang.out}
\alias{ang.out}
\alias{ang.out-methods}
\title{
Generic Function - ang.out
}
\description{
Generic Function
}
\usage{
ang.out(object)
}
\arguments{
\item{object}{
Object of class cpt.geo.
}
}
\details{
Generic function
}
\value{
Numeric vector of changepoints in angle measure.
}
\author{
Thomas Grundy
}
\seealso{
\code{\link{ang.out-methods}}
}
\examples{
x <- new('cpt.geo') #new cpt.geo object
ang.out(x) #retrieves the ang.out slot from x.
}
\keyword{methods}
\keyword{internal}
|
e62923d6cb7b2a0f6fedfc117872b8f40f4b49df
|
e8edbb9c53aa6e99e78289f2d68ebeb2699f2a52
|
/Chapter4.R
|
39e78e98c3cd0c253a7a1021b13ad654ddecf2ca
|
[] |
no_license
|
iinatuomainen/IODS-project
|
77c592a9dacfa37ad9f4bb3681a9afcd9f348a24
|
b9d6e5c347415f62a6291e7e98ad8db8fef8f196
|
refs/heads/master
| 2020-09-01T15:36:35.482733
| 2019-12-09T14:06:44
| 2019-12-09T14:06:44
| 218,994,594
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,742
|
r
|
Chapter4.R
|
library(MASS)
data("Boston")
str(Boston)
dim(Boston)
pairs(Boston)
summary(Boston)
library(corrplot)
m <- cor(Boston)
corrplot(m, method = "circle")
boston_scaled <- scale(Boston)
summary(boston_scaled)
boston_scaled <- as.data.frame(boston_scaled)
summary(boston_scaled$crim)
bins <- quantile(boston_scaled$crim)
crime <- cut(boston_scaled$crim, breaks = bins, include.lowest = TRUE, labels = c("low", "med_low", "med_high", "high"))
table(crime)
boston_scaled <- dplyr::select(boston_scaled, -crim)
boston_scaled <- data.frame(boston_scaled, crime)
n <- nrow(boston_scaled)
ind <- sample(n, size = n*0.8)
train <- boston_scaled[ind, ]
test <- boston_scaled
lda.fit <- lda(crime ~ ., data=train)
classes <- as.numeric(train$crime)
plot(lda.fit, dimen = 2, col = classes, pch = classes)
lda.arrows <- function(x, myscale = 1, arrow_heads = 0.1, color = "red", tex = 0.75, choices = c(1,2)){
heads <- coef(x)
arrows(x0 = 0, y0 = 0,
x1 = myscale * heads[,choices[1]],
y1 = myscale * heads[,choices[2]], col=color, length = arrow_heads)
text(myscale * heads[,choices], labels = row.names(heads),
cex = tex, col=color, pos=3)
}
plot(lda.fit, dimen = 2, col = classes, pch = classes)
lda.arrows(lda.fit, myscale = 1)
crime_cat <- test$crime
test <- dplyr::select(test, -crime)
summary(test)
lda.pred <- predict(lda.fit, newdata = test)
table(correct = crime_cat, predicted = lda.pred$class)
data('Boston')
Boston2 <- scale(Boston)
dist_eu <- dist(Boston2)
summary(dist_eu)
set.seed(123)
k_max <- 10
twcss <- sapply(1:k_max, function(k){kmeans(Boston2, k)$tot.withinss})
library(ggplot2)
qplot(x=1:k_max, y=twcss, geom = 'line')
km <- kmeans(Boston2, centers = 2)
pairs(Boston2, col= km$cluster)
|
f1475c055467a31d35317e1ed68fcf1a05fb920d
|
34cb1d78e4a680a0fb6e6df7defc686149bed189
|
/R scripts/CopyMetadata.R
|
fc31797e6fc51b0dc835e9f02bc6f50e795405e3
|
[
"MIT"
] |
permissive
|
JowehL/Improving-Thermal-Images
|
7f0c5184dbefca40017da3193b018c5d4993395e
|
772204fa12d8088c6bd8e5f2fd7aeeb32ac296a2
|
refs/heads/master
| 2022-11-11T15:40:43.607451
| 2020-06-28T21:33:19
| 2020-06-28T21:33:19
| 275,670,043
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,025
|
r
|
CopyMetadata.R
|
library("exifr")
# Create a function with arguments.
new.copy_metadata <- function(image_file, folder_path="", output_path="Output/") {
## Create image path
image_ori = paste(folder_path, image_file, sep="")
image_cal = paste(output_path, image_file, sep="")
output_folder = paste("-o ", output_path,"GPS_metadata/", sep="")
# print(image_ori)
# print(image_cal)
argument = paste("-tagsfromfile", image_ori, image_cal, output_folder,sep=" ")
# print(argument)
exiftool_call(argument)
}
input_path = "../Data/"
filenames <- list.files(path=input_path, pattern="*.JPG", full.names=FALSE)
OP = "1_Nothing/"
lapply(filenames, new.copy_metadata, folder_path=input_path, output_path=OP)
OP = "2_HE/"
lapply(filenames, new.copy_metadata, folder_path=input_path, output_path=OP)
OP = "3_NF/"
lapply(filenames, new.copy_metadata, folder_path=input_path, output_path=OP)
OP = "4_HE_NF/"
lapply(filenames, new.copy_metadata, folder_path=input_path, output_path=OP)
|
984c2640a0799c2e0719158d2a58a5b9673baca0
|
83d9e393402aa92dce1a158ac9019e1d0ae5a4c9
|
/step_selection_analysis.R
|
58d88c4cfbded3ebcd04533c9d26f1d42cb748b3
|
[] |
no_license
|
mahle68/global_seascape
|
74b49231dcaef73ca18711d217c1e63628b76add
|
de9cecf886c7cd6e47fb9cb625949c53bd8c6da3
|
refs/heads/master
| 2022-02-22T16:08:06.655422
| 2022-01-27T07:33:37
| 2022-01-27T07:33:37
| 223,146,833
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,872
|
r
|
step_selection_analysis.R
|
# Scripts for step selection function analysis
# This is script 2 of 4 for reproducing the results of Nourani et al 2021, ProcB.
# session info is provided at the end of script 4 (all_figures.R)
# Elham Nourani, PhD. June. 2021; enourani@ab.mpg.de
#-----------------------------------------------------------------
#to do: add the wind support functions to functions.R
library(tidyverse)
library(lubridate)
library(INLA)
library(corrr)
library(raster)
# ---------- STEP 1: load data #####
#load annotated data (available on the Dryad repository)
load("annotated_steps.RData") #ann_cmpl; This dataframe includes used and alternative steps and can be reproduced using step_generation.R
# ---------- STEP 2: look into autocorrelation #####
ann_cmpl %>%
dplyr::select(c("delta_t", "wind_speed", "wind_support", "wind_support_var", "abs_cross_wind", "delta_t_var","step_length")) %>%
correlate() %>%
stretch() %>%
filter(abs(r) > 0.6)
#corr test to include in the paper
cor.test(ann_cmpl$wind_support_var, ann_cmpl$delta_t_var)
cor.test(ann_cmpl$delta_t, ann_cmpl$delta_t_var)
# ---------- STEP 3: z-transformation (i.e. scale all predictor variables) #####
#z-transform
all_data <- ann_cmpl %>%
mutate_at(c("delta_t", "wind_speed", "wind_support", "wind_support_var", "abs_cross_wind", "delta_t_var"),
list(z = ~(scale(.))))
# ---------- STEP 4: step selection analysis in INLA #####
#repeat variabels that will be used as random slopes
all_data <- all_data %>%
mutate(species1 = factor(species),
species2 = factor(species),
species3 = factor(species),
species4 = factor(species),
ind1 = factor(ind),
ind2 = factor(ind),
ind3 = factor(ind),
ind4 = factor(ind))
# Set mean and precision for the priors of slope coefficients
mean.beta <- 0
prec.beta <- 1e-4
#to be able to produce effect plots for the interaction of wind support and delta-t, we need to add rows to the dataset where the dependent variable is set to NA
#see Gómez-Rubio 2020 for details of prediction with INLA models (i.e. imputation of missing values)
#add one new row to unique strata instead of entire empty copies of strata. assign wind and delta t values on a regular grid (tried with irregular, but range of predictions was off)
set.seed(200)
n <- 500
new_data <- all_data %>%
group_by(stratum) %>%
slice_sample(n = 1) %>%
ungroup() %>%
slice_sample(n = n, replace = F) %>%
mutate(used = NA,
delta_t_z = sample(seq(min(all_data$delta_t_z),max(all_data$delta_t_z), length.out = 10), n, replace = T), #regular intervals for wind support and delta t, so we can make a raster later on
wind_support_z = sample(seq(min(all_data$wind_support_z),max(all_data$wind_support_z), length.out = 10), n, replace = T),
wind_support_var_z = sample(seq(min(all_data$wind_support_var_z),max(all_data$wind_support_var_z), length.out = 10), n, replace = T)) %>%
full_join(all_data)
#The new_data dataframe is available on the Dryad repository under name: new_data_for_modeling.RData
#Model formula
formulaM <- used ~ -1 + delta_t_z * wind_support_z + wind_support_var_z +
f(stratum, model = "iid",
hyper = list(theta = list(initial = log(1e-6),fixed = T))) +
f(species1, delta_t_z, model = "iid",
hyper=list(theta=list(initial=log(1),fixed=F,prior="pc.prec",param=c(3,0.05)))) +
f(species2, wind_support_z, model = "iid",
hyper=list(theta=list(initial=log(1),fixed=F,prior="pc.prec",param=c(3,0.05)))) +
f(species3, wind_support_var_z, model = "iid",
hyper=list(theta=list(initial=log(1),fixed=F,prior="pc.prec",param=c(3,0.05)))) +
f(ind1, delta_t_z, model = "iid",
hyper=list(theta=list(initial=log(1),fixed=F,prior="pc.prec",param=c(3,0.05)))) +
f(ind2, wind_support_z, model = "iid",
hyper=list(theta=list(initial=log(1),fixed=F,prior="pc.prec",param=c(3,0.05)))) +
f(ind3, wind_support_var_z, model = "iid",
hyper=list(theta=list(initial=log(1),fixed=F,prior="pc.prec",param=c(3,0.05))))
#Model
(b <- Sys.time())
M <- inla(formulaM, family = "Poisson",
control.fixed = list(
mean = mean.beta,
prec = list(default = prec.beta)),
data = all_data,
num.threads = 10,
control.predictor = list(compute = TRUE, link = 1), #this means that NA values will be predicted.
control.compute = list(openmp.strategy = "huge", config = TRUE, cpo = F))
Sys.time() - b #51 min
#This model is available on the Dryad repository under name: INLA_model.RData
#Model for predictions
(b <- Sys.time())
M_pred <- inla(formulaM, family = "Poisson",
control.fixed = list(
mean = mean.beta,
prec = list(default = prec.beta)),
data = new_data,
num.threads = 10,
control.predictor = list(compute = TRUE), #this means that NA values will be predicted.
control.compute = list(openmp.strategy = "huge", config = TRUE, cpo = T))
Sys.time() - b
#This model is available on the Dryad repository under name: INLA_model_preds.RData
#to plot the predictions and coefficients, see all_figures.R (Fig. 3 and 4)
##### not in public
#trying the model with a smooth term for delta-t (response to the reviewer)
all_data$delta_t_cw_group <- inla.group(all_data$delta_t_z, n = 50, method = "cut")
f <- used ~ -1 + delta_t_z * wind_support_z +
f(delta_t_cw_group, model = "rw2", constr = F) +
f(stratum, model = "iid",
hyper = list(theta = list(initial = log(1e-6),fixed = T)))
(b <- Sys.time())
m2a <- inla(f, family ="Poisson",
control.fixed = list(
mean = mean.beta,
prec = list(default = prec.beta)),
data = all_data,
num.threads = 10,
control.predictor = list(compute = T),
control.compute = list(openmp.strategy="huge", config = TRUE, mlik = T, waic = T))
Sys.time() - b #11.31979 mins
tab.rw2 <- data.frame(x = m2a$summary.random$delta_t_cw_group[, "ID"],
y = m2a$summary.random$delta_t_cw_group[, "mean"],
ll95 = m2a$summary.random$delta_t_cw_group[,"0.025quant"],
ul95 = m2a$summary.random$delta_t_cw_group[,"0.975quant"]
)
plot(tab.rw2$x,tab.rw2$y)
plot(tab.rw2$x,exp(tab.rw2$y)/(1+exp(tab.rw2$y)), xlab = "delta_t", ylab = "Probability of use", cex.lab = 2, type = "l")
X11(width = 4, height = 4)
par(mar = c(5, 5, 4, 2), cex = 1.1)
plot(x = all_data$delta_t_cw_group, y = all_data$used, col = "grey70", xlab = "delta t", ylab = "Probability of use", cex.lab = 1.5)
lines(tab.rw2$x,exp(tab.rw2$y)/(1+exp(tab.rw2$y)), lwd = 1.2)
polygon(x = c(tab.rw2$x, rev(tab.rw2$x)), y = c(exp(tab.rw2$ll95)/(1+exp(tab.rw2$ll95)), exp(tab.rw2$ul95)/(1+exp(tab.rw2$ul95))),
col = adjustcolor("grey", alpha.f = 0.7), border = NA)
## quadratic term
f <- used ~ -1 + delta_t_z * wind_support_z + I(delta_t_z ^ 2) +
f(stratum, model = "iid",
hyper = list(theta = list(initial = log(1e-6),fixed = T)))
(b <- Sys.time())
m3a <- inla(f, family ="Poisson",
control.fixed = list(
mean = mean.beta,
prec = list(default = prec.beta)),
data = all_data,
num.threads = 10,
control.predictor = list(compute = T),
control.compute = list(openmp.strategy="huge", config = TRUE, mlik = T, waic = T))
Sys.time() - b #11.31979 mins
########## summary stats
ann_cmpl %>%
group_by(group) %>%
summarise(yrs_min = min(year(date_time)),
yrs_max = max(year(date_time)),
n_ind = n_distinct(ind),
n_tracks = n_distinct(track),
n_steps = n_distinct(stratum))
|
d3d96a1d34a2c3f5a74eab5e0e337b0afdc920e4
|
f8019d1e47b1f96f0b648c87ded364a1669faff1
|
/tests/testthat/test-emojiscape.R
|
5d7d392c910bb74042b2e8618d153bffadc2b3e9
|
[
"MIT"
] |
permissive
|
matt-dray/emojiscape
|
ac37bbd6cf5bc5ba42a9b908f2cd426756de3481
|
5048606a6837d47d843e630db23e53baa3d761e6
|
refs/heads/main
| 2023-06-04T14:25:41.955166
| 2021-06-26T13:10:41
| 2021-06-26T13:10:41
| 379,699,420
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 514
|
r
|
test-emojiscape.R
|
test_that("error on bad terrain", {
expect_error(generate("x"))
expect_error(generate(1))
expect_error(get_set("x"))
expect_error(get_set(1))
})
test_that("messages on bad grid_size", {
terrain <- "ocean"
expect_error(generate(terrain, -1))
expect_warning(generate(terrain, 0.1))
})
test_that("data.frame returned with get_set()", {
expect_identical(class(get_set("ocean")), "data.frame")
expect_identical(length(get_set("ocean")), 4L)
expect_identical(nrow(get_set("ocean")), 3L)
})
|
fd7b8261aaa8708df1954e552750cef8b136f540
|
c5faa9a2e350978662624f73725eb7ee02c55cb0
|
/man/plotSpatial.MicroarrayData.Rd
|
be9b3ca8c0b9899912ef8e99bea07d8d10b647c0
|
[] |
no_license
|
HenrikBengtsson/aroma
|
341cc51ddd8f9c111347207535bfe2a85ea7622a
|
c0314ea003fb1d99d0db7f314e86059502d175c6
|
refs/heads/master
| 2016-09-05T18:24:56.275671
| 2014-06-19T04:13:11
| 2014-06-19T04:13:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,048
|
rd
|
plotSpatial.MicroarrayData.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% MicroarrayData.PLOT.R
%
% on Tue Jan 15 18:33:31 2008.
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{plotSpatial.MicroarrayData}
\alias{plotSpatial.MicroarrayData}
\alias{MicroarrayData.plotSpatial}
\alias{plotSpatial.MicroarrayData}
\alias{plotSpatial,MicroarrayData-method}
\title{Plots a spatial representation of one field}
\description{
Plots a spatial representation of one field.
It is recommended to use the \code{plot} function instead of calling
this method explicitly (see \code{\link[aroma:plot.MicroarrayData]{*plot}()}).
}
\usage{\method{plotSpatial}{MicroarrayData}(this, what, slide=1, include=NULL, exclude=NULL, col="auto", xlab=NULL, ylab="", axes=FALSE, xaxs="i", yaxs="i", pch="auto", grid=TRUE, log=NULL, ..., cex=NULL, style=NULL)}
\arguments{
\item{what}{What to plot. Any field that can be retrieved by \code{extract},
is accepted.}
\item{slide}{The slide to be plotted.}
\item{include}{The indices of the spots that should be included.
If it is instead a name of one or more flags, the spots which have been
flagged with these flags are considered.
If \code{\link[base]{NULL}} all spots are considered.}
\item{exclude}{The indices of the spots that should be excluded.
If it is instead a name of one or more flags, the spots which have been
flagged with these flags are excluded. If \code{\link[base]{NULL}} no spots are excluded.}
\item{col}{The color(s) to be used for the plotted spots, i.e. for the
spots \emph{after} inclusion and exclusion. If the value is
\code{"redgreen"} a red to green palette is used.}
\item{...}{Common arguments accepted by most plot functions.
For more information see \code{\link[graphics]{par}} and \code{\link[graphics]{plot}}.}
\item{cex}{For internal use only! See above.}
\item{pch}{For internal use only! See above.}
}
\examples{
SMA$loadData("mouse.data")
layout <- Layout$read("MouseArray.Layout.dat", path=system.file("data-ex", package="aroma"))
raw <- RawData(mouse.data, layout=layout)
ma <- getSignal(raw)
subplots(4)
plotSpatial(ma) # Spatial plot of log ratios before.
normalizeWithinSlide(ma, "p") # Printtipwise lowess normalization.
plotSpatial(ma) # Spatial plot of log ratios after.
plotSpatial(ma, include=(abs(ma$M) > 2))
points(ma, include=(abs(ma$M) > 2), col="red")
}
\author{Henrik Bengtsson (\url{http://www.braju.com/R/})}
\seealso{
\code{\link[aroma:plot.MicroarrayData]{*plot}()}.
\code{\link[aroma:plotXY.MicroarrayData]{*plotXY}()}.
\code{\link[aroma:highlight.MicroarrayData]{*highlight}()}.
\code{\link[aroma:text.MicroarrayData]{*text}()}.
For more information see \code{\link{MicroarrayData}}.
}
\keyword{internal}
\keyword{methods}
|
98f45cc9b8ed57d797be894fdfcb9c4010f577f8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pwr/examples/cohen.ES.Rd.R
|
d3e939cd602f371f43dfce94233c5004d92489d4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 444
|
r
|
cohen.ES.Rd.R
|
library(pwr)
### Name: cohen.ES
### Title: Conventional effects size
### Aliases: cohen.ES
### Keywords: htest
### ** Examples
## medium effect size for the correlation test
cohen.ES(test="r", size="medium")
## sample size for a medium size effect in the two-sided correlation test
## using the conventional power of 0.80
pwr.r.test(r=cohen.ES(test="r",size="medium")$effect.size,
power=0.80, sig.level=0.05, alternative="two.sided")
|
299a01824eeb68493725c7aa31d398a17c47f961
|
4713ce417897a95260d61762d170c090b5024859
|
/data-raw/MLGrawdata.R
|
a5d328a030c9c0b4ecabb99d92244b4cc0b7dd1f
|
[] |
no_license
|
nicola-sartori/MLGdata
|
6a75c470d025271258388b39803fc302a18211c8
|
2591593f3ccd263e446fd17c387c81d57e4e2fae
|
refs/heads/master
| 2022-12-11T14:17:08.662675
| 2020-09-17T13:39:50
| 2020-09-17T13:39:50
| 296,320,209
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,727
|
r
|
MLGrawdata.R
|
# create data objects that are stored in /data directory of the package
########################################################################
# Neonati
Neonati <- read.table("./data-raw/birth.dat",
col.names=c("peso","durata","fumo"))
Neonati$fumo <- factor(Neonati$fumo)
levels(Neonati$fumo) <- c("NF", "F")
save(Neonati,file="./data/Neonati.RData")
#Clotting
Clotting <- read.table("./data-raw/clotting.dat",header=TRUE)
save(Clotting,file = "./data/Clotting.RData")
Chlorsulfuron <- read.table("./data-raw/chlorsulfuron.dat",
col.names=c("gruppo","dose","area"))
save(Chlorsulfuron,file = "./data/Chlorsulfuron.RData")
#Credit
library(Fahrmeir)
Credit <- credit
save(Credit,file = "./data/Credit.RData")
#Aids
Aids<-read.table("./data-raw/aids.dat", col.names= c("casi", "tempo"))
save(Aids,file = "./data/Aids.RData")
#Beetles
Beetles <- read.table("./data-raw/beetles.dat", header=TRUE,
col.names = c("num", "uccisi", "logdose"))
save(Beetles,file = "./data/Beetles.RData")
#Customer
Customer <- read.table("./data-raw/customer.dat")
save(Customer,file = "./data/Customer.RData")
#Customer3
Customer3 <- read.table("./data-raw/customer3.dat")
save(Customer3,file = "./data/Customer3.RData")
#Orthodont
Orthodont <- read.table("./data-raw/Ortho.dat", header=TRUE)
names(Orthodont) <- c("genere", "dist8a", "dist10a", "dist12a", "dist14a")
save(Orthodont,file = "./data/Orthodont.RData")
#Abrasion
Abrasion <- read.table("./data-raw/abrasion.dat", header=T)
save(Abrasion,file = "./data/Abrasion.RData")
#Calcium
library(SMPracticals)
Calcium <- calcium
save(Calcium,file = "./data/Calcium.RData")
#Chimps
library(SMPracticals)
Chimps <- chimps
save(Chimps,file = "./data/Chimps.RData")
#Cement
Cement <- read.table("./data-raw/cement.dat",header=TRUE)
save(Cement,file = "./data/Cement.RData")
#Seed
Seed <- read.table("./data-raw/seed.dat",header=TRUE)
save(Seed,file = "./data/Seed.RData")
#Wool
Wool <- read.table("./data-raw/wool.dat",header=TRUE)
save(Wool,file = "./data/Wool.RData")
#Beetles10
Beetles10 <- read.table("./data-raw/beetles10.dat", header=TRUE)
save(Beetles10,file = "./data/Beetles10.RData")
#Heart
Heart <- read.table("./data-raw/heart.dat", header=TRUE)
save(Heart,file = "./data/Heart.RData")
#Kyphosis
Kyphosis <- read.table("./data-raw/kyphosis.dat", header=TRUE)
save(Kyphosis,file = "./data/Kyphosis.RData")
#Germination
Germination <- read.table("./data-raw/germination1.dat", header=TRUE)
save(Germination,file = "./data/Germination.RData")
#Vehicle
Vehicle<-read.table("./data-raw/vehicle.dat", header=T)
save(Vehicle,file = "./data/Vehicle.RData")
#Mental
Mental<-read.table("./data-raw/mental.dat", header=T)
save(Mental,file = "./data/Mental.RData")
#Pneu
library(SMPracticals)
Pneu <- pneu
save(Pneu,file = "./data/Pneu.RData")
#Alligators
Alligators <- read.table("./data-raw/alligators2.dat")
save(Alligators,file="./data/Alligators.RData")
#housing
#library(MASS)
#Snore
Snore <- read.table("./data-raw/snore.dat",header=TRUE)
Snore$russ <- factor(Snore$russ)
levels(Snore$russ) <- c("mai", "a volte", "spesso", "sempre")
save(Snore,file="./data/Snore.RData")
#Drugs
Drugs <- read.table("./data-raw/drugs.dat", header=T)
colnames(Drugs)[1:3] <- c("alc","sig","mar")
save(Drugs,file="./data/Drugs.RData")
#Drugs2
Drugs2 <- read.table("./data-raw/drugs2.dat", header=T)
colnames(Drugs2)[1:2] <- c("alc","sig")
save(Drugs2,file="./data/Drugs2.RData")
#Spending
Spending <- read.table("./data-raw/spending.dat", header=T)
save(Spending,file="./data/Spending.RData")
#Infant
Infant <- read.table("./data-raw/infant.dat", header=T)
save(Infant,file="./data/Infant.RData")
#Ants
Ants <- read.table("./data-raw/ants.dat", header=T)
save(Ants,file="./data/Ants.RData")
#Biochemists
library(pscl)
Biochemists <- bioChemists
save(Biochemists,file="./data/Biochemists.RData")
#Aziende
Aziende <- read.table("./data-raw/aziende.dat", header=T)
save(Aziende,file="./data/Aziende.RData")
#Britishdoc
Britishdoc <- read.table("./data-raw/british-doctors.dat", header=T)
save(Britishdoc,file="./data/Britishdoc.RData")
#Bartlett
library(vcdExtra)
save(Bartlett,file="./data/Bartlett.RData")
#Bartlett2
Bartlett2 <- read.table("./data-raw/bartlett1.dat", header=T)
save(Bartlett2,file="./data/Bartlett2.RData")
#Esito
Esito <- read.table("./data-raw/esito.dat", header=T)
save(Esito,file="./data/Esito.RData")
#Drugs3
Drugs3 <- read.table("./data-raw/drugs3.dat", header=T)
save(Drugs3,file="./data/Drugs3.RData")
#Homicide
Homicide <- read.table("./data-raw/Homicide.dat", header=T)
save(Homicide,file="./data/Homicide.RData")
#Rats
Rats <- read.table("./data-raw/Rats.dat", header=T)
save(Rats,file="./data/Rats.RData")
#Bioassay
Bioassay <- read.table("./data-raw/Bioassay.dat", header=T)
save(Bioassay,file="./data/Bioassay.RData")
#Stroke
Stroke <- read.table("./data-raw/Stroke.dat", header=T)
save(Stroke,file="./data/Stroke.RData")
#Stroke1
Stroke1 <- read.table("./data-raw/Stroke1.dat", header=T)
colnames(Stroke1) <- c("Subject","Group","Week","y")
save(Stroke1,file="./data/Stroke1.RData")
#Ohio
library(geepack)
Ohio <- ohio
save(Ohio,file="./data/Ohio.RData")
#Malaria
Malaria <- read.csv("./data-raw/malkenya.csv", header = TRUE)
save(Malaria,file="./data/Malaria.RData")
#Testingresso
Testingresso <- read.table("./data-raw/testingresso.dat", header = TRUE)
save(Testingresso,file="./data/Testingresso.RData")
#Dogs
#library(dobson)
#Dogs <- dogs
Dogs <- read.table("./data-raw/dogs.dat", header = TRUE)
save(Dogs,file="./data/Dogs.RData")
#Orthodont1
Orthodont1 <- read.table("./data-raw/Ortho1.dat", header=TRUE)
save(Orthodont1,file = "./data/Orthodont1.RData")
|
8baff7735b908d3852bcafec83dca5f846fe5b3f
|
7a646632f9daf6f0e83b4076a79140c8e2814b87
|
/R/res_munge_mer.R
|
be50f0344696bdd0044cdd7a32c08914b62b208d
|
[
"MIT"
] |
permissive
|
USAID-OHA-SI/Resurrection
|
7d01685bb82c163b29f85c251b2bfb4d0c70a250
|
2ea85356bda4b16cc0663a070f5a303184e55664
|
refs/heads/master
| 2020-04-15T22:38:45.422968
| 2019-02-04T15:37:51
| 2019-02-04T15:37:51
| 165,080,831
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,613
|
r
|
res_munge_mer.R
|
# MUNGE MER DATA
# Project: Resurection
# A.Chafetz, USAID
# Purpose: clean up MER data for use
# Created: 2019-01-10
# MUNGE -------------------------------------------------------------------
## MER - beneficiaries
ben <- df_mer %>%
filter(((indicator %in% c("HTS_TST","HTS_TST_POS") &
standardizeddisaggregate %in% c("Modality/Age/Sex/Result", "Modality/Age Aggregated/Sex/Result")) |
(indicator == "TX_CURR" & standardizeddisaggregate == "Total Numerator")),
!mechanismid %in% c("00000", "00001"))
ben_vmmc <- ben %>%
filter(modality == "VMMC") %>%
mutate(indicator = paste(indicator, modality, sep = "_"))
ben <- ben %>%
bind_rows(., ben_vmmc) %>%
group_by(operatingunit, fundingagency, mechanismid, indicator, implementingmechanismname, primepartner) %>%
summarize_at(vars(fy2018apr), sum, na.rm = TRUE) %>%
ungroup() %>%
mutate(type = case_when(indicator == "TX_CURR" ~ "TX",
indicator %in% c("HTS_TST", "HTS_TST_VMMC") ~ "HTS",
TRUE ~ "HTS_POS"),
indicator = str_remove(indicator, "_POS"),
program = ifelse(indicator == "TX_CURR", "C&T", "HTS")) %>%
spread(indicator, fy2018apr, fill = 0)
ben <- ben %>%
mutate(beneficiaries = case_when(type == "TX" ~ TX_CURR,
TRUE ~ HTS_TST - HTS_TST_VMMC),
beneficiaries = pmax(beneficiaries, 0)) %>%
select(-HTS_TST:-TX_CURR)
rm(ben_vmmc)
|
0e05b31c932a16e9451c538457ccf3a8904e57d5
|
645b8d1565e25244cb1c8030542bcd77114cb841
|
/Data-StoryTellingA/apendix.R
|
94a520a5920f91529ccbcce336155a27cb868fa0
|
[] |
no_license
|
mahesh122000/R
|
e649e3dc8181af04d325e4089a935717f811d8f2
|
1ff933f51191ab1585bdaf0d9013abad4c0f3e4f
|
refs/heads/master
| 2021-01-07T05:20:31.455717
| 2018-03-03T04:10:25
| 2018-03-03T04:10:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
r
|
apendix.R
|
### 부록: 한국어 형태소 분석 ###
install.packages("KoNLP")
library(KoNLP)
# 사전 설정
useSejongDic()
#useSystemDic()
# 소설 'B사감과 러브레터'에서 명사 추출하기기
# 소설 파일 읽기
sagam <- file("sagam.txt", encoding = "UTF-8")
# 읽은 파일을 한 줄씩 저장.
novel <- readLines(sagam)
# 한 줄 단위로 분석해서 명사 추출
myungsa <- sapply(novel, extractNoun, USE.NAMES = T)
extractNoun("아버지가 가방에 들어가신다")
# 처음 3줄에서 명사를 추출한 결과
head(myungsa, 3)
# 형태소 분석
hyungtaeso<- sapply(novel, MorphAnalyzer, USE.NAMES = F)
MorphAnalyzer("아버지가 가방에 들어가신다")
head(hyungtaeso, 1)
|
360afa01a3cc2b0261894e8ebaf19f491b08d108
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/edgarWebR/examples/latest_filings.Rd.R
|
d67a36ae74ecdaddd1af150287a22ddab4fcb52c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 142
|
r
|
latest_filings.Rd.R
|
library(edgarWebR)
### Name: latest_filings
### Title: SEC Latest Filings
### Aliases: latest_filings
### ** Examples
latest_filings()
|
d5e0bd11742b780c70a90fe071828344d4cebfd7
|
175e877985e8e9c47c74488577dca20f046cc7a7
|
/source.R
|
116c2b3eab971e777df344207bb45ce8beb10eec
|
[] |
no_license
|
pkasela/Web-Marketing-Project
|
f9d3d54a1e43d4839fc590eacb4eaf9067333597
|
4979ca3620bea9c3773316790e901a2db516f22c
|
refs/heads/master
| 2020-06-12T09:39:00.920124
| 2019-07-05T11:23:05
| 2019-07-05T11:23:05
| 194,261,233
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,388
|
r
|
source.R
|
set.seed(12345)
setwd('/home/pranav/Desktop/Web-Marketing-Project/')
#Nella cartella del codice ci deve essere una cartella con nome "datasets"
#contenente le 7 tabelle dei dati
#Script preparation as done in class
source('Email_Engagement/script_preparation.R')
#model using Decisition Trees(DT) and RandomForest(RF)
source('Email_Engagement/Email_Engagement_DT.R')
#model using NaiveBayes and TreeAugmentedNB(TAN)
source('Email_Engagement/Email_Engagement_Propensity_NB_&_TAN.R')
#model using xgBOOST
source('Email_Engagement/Email_Engagement_Propensity_xgBOOST.R')
#plot ROC curve with AUC measure
ggplot() +
geom_line(data = ROC_DT,aes(x,y,col="A"),show.legend = TRUE) +
xlab('False Positive Rate') + ylab('True Positive Rate') +
geom_line(data=ROC_RF,aes(x,y,col="B"),show.legend = TRUE) +
geom_line(data=ROC_NB,aes(x,y,col="C"),show.legend = TRUE) +
geom_line(data=ROC_TAN,aes(x,y,col="D"),show.legend = TRUE) +
geom_line(data=ROC_XG1,aes(x,y,col="E"),show.legend = TRUE) +
geom_line(data=ROC_XG2,aes(x,y,col="F"),show.legend = TRUE) +
scale_colour_manual(name = "Model",
values = c("A"="red","B"="blue","C"="green",
"D"="brown","E"="purple","F"="orange"),
labels = c("DT", "RF","NB","TAN","xgBoost 1","xgBoost 2")) +
annotate("text", x=0.8, y=0,
label= paste0("AUC DT = ",AUC_DT),
col="red") +
annotate("text", x=0.8, y=0.05,
label= paste0("AUC RF = ",AUC_RF),
col="blue") +
annotate("text", x=0.8, y=0.1,
label= paste0("AUC NB = ",AUC_NB),
col="green") +
annotate("text", x=0.8, y=0.15,
label= paste0("AUC TAN = ",AUC_TAN),
col="brown") +
annotate("text", x=0.8, y=0.2,
label= paste0("AUC xgBoost1 = ",AUC_XG1),
col="purple") +
annotate("text", x=0.8, y=0.25,
label= paste0("AUC xgBoost2 = ",AUC_XG2),
col="orange")
#barplot delle F1-measure
F1_measure_DF <- data.frame(Model=c("DT","RF","NB","TAN","xgBoost 1","xgBoost 2"),
value=round(c(F1_DT,F1_RF,F1_NB,F1_TAN,F1_xg1,F1_xg2),3))
ggplot(data=F1_measure_DF,aes(x=reorder(Model, -value),y=value)) +
geom_bar(stat = 'identity', fill="steelblue")+
geom_text(aes(label=value), vjust=1.6, color="white", size=5)+
xlab("Model") + ylab("F1-Measure")+
theme_minimal()
#Script prepartion for the churners
source('Propensity_churn/Creazione_churner.R')
#model using Decisition Trees(DT) and RandomForest(RF)
source('Propensity_churn/churn_DT.R')
#model using NaiveBayes and TreeAugmentedNB(TAN)
source('Propensity_churn/churn_NB_&_TAN.R')
#model using xgBOOST
source('Propensity_churn/churn_xgb.R')
#bar plot delle accuracy
Accuracy_total <- rbind(Accuracy_total,data.frame(model = c("BN","TAN","xgBoost"),
Accuracy = c(Acc_BN,Acc_TAN,Acc_xgb)))
Accuracy_total$Accuracy <- round(Accuracy_total$Accuracy,3)
ggplot(data=Accuracy_total,aes(x=reorder(model, -Accuracy),y=Accuracy)) +
geom_bar(stat = 'identity', fill="steelblue")+
geom_text(aes(label=Accuracy), vjust=1.6, color="white", size=5)+
xlab("Model") + ylab("Accuracy")+
theme_minimal()
#model to predict the future income using ARIMA and LSTM (RNN)
source('Time_Series_Model/time_Series.R')
#daily prevision ARIMA
plot(forecast(mod,10),include=30,main="Daily Forecast",
xlab="Days",ylab="Total Revenue")
lines(331:341,time_series[332:342])
#weekly prevision ARIMA
plot(forecast(mod1,2),main="Weekly Forecast",
xlab="Weeks",ylab="Total Revenue")
lines((nrow(df_weekly)-2):nrow(df_weekly),
df_weekly$weekly_income_not_scaled[(nrow(df_weekly)-2):nrow(df_weekly)])
#weekly predictions with LSTM
ggplot(data.frame(x=1:length(y_test_no_scaled),y=y_test_no_scaled)) +
geom_line(aes(x,y,col="A")) +
geom_line(data=data.frame(x=1:length(y_pred_no_scaled),y=y_pred_no_scaled),
aes(x,y,col="B")) +
scale_colour_manual(name = "Revenue",
values = c("A"="red","B"="blue"),
labels = c("Actual", "Predicted")) +
ylab("Total Revenue") + xlab("Weeks") +
ggtitle("Weekly Predictions") +
theme(plot.title = element_text(hjust = 0.5)) +
annotate("text", x = 2.5,y=2000000,label=paste0("MAE: ", round(MAE,4)*100,"%"))
#sink("sessionInfo.txt")
#sessionInfo()
#sink()
|
f1daf3d175c9fd886d1ac1830c7fb85d14ede002
|
df0fc04359afff91abce4887935db2fe03ea8cac
|
/shiny/server.r
|
c6440824485943d60b2fa2f976089bc1c68b7a75
|
[] |
no_license
|
WolfgangGross/Social_Data_Mining
|
bc5d9d4d067e453ffd1c5307a62c58f15f1ad67b
|
82ba9badf79629104ccce1bd24aa05d309c5af77
|
refs/heads/master
| 2020-06-03T10:07:59.680865
| 2014-10-28T08:18:06
| 2014-10-28T08:18:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 509
|
r
|
server.r
|
#shiny server
library(shiny)
library(ggplot2)
library(bit64)
df1 <- readRDS(file="plot_second.Rda")
df2 <- readRDS(file="plot_minute.Rda")
df3 <- readRDS(file="plot_hour.Rda")
shinyServer(
function(input,output){
output$outText <- renderPrint(input$timeInput)
output$plot <- renderPlot({
temp <-input$timeInput
if(temp == 'Second'){df <- df1}
if(temp == 'Minute'){df <- df2}
if(temp == 'Hour'){df <- df3}
ggplot(df,aes(x=time,y=count)) + geom_line()
})
}
)
|
a29c8a8d0cd2b1134c273295af2f9dcf11cbaaa3
|
4c3b7f1443db0b3e64a3c92fa364d5910fced314
|
/cachematrix.R
|
eb6aad6bf6228ba317aa26009685ed702d72b418
|
[] |
no_license
|
aksrathore/ProgrammingAssignment2
|
4889e3f4af1d52cb454ca07a1b0d3a5928c3fb17
|
a9baebc09f84fb0850e41a9546a049c37d37bce6
|
refs/heads/master
| 2021-01-18T06:45:30.780146
| 2016-02-07T13:05:13
| 2016-02-07T13:05:13
| 51,246,395
| 0
| 0
| null | 2016-02-07T12:14:08
| 2016-02-07T12:14:08
| null |
UTF-8
|
R
| false
| false
| 1,385
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## Inverse matrices are stored in the memory using scoping rules
makeCacheMatrix <- function(x = matrix()) {
matrixInverse <- NULL
set <- function(y) {
x <<- y
matrixInverse <- NULL
}
get <- function() x
setInverse <- function(Inverse) matrixInverse <<- Inverse
getInverse <- function() matrixInverse
list(set=set,get=get,setInverse=setInverse,getInverse=getInverse)
}
## Write a short comment describing this function
## Function to return the matrix inverse
cacheSolve <- function(x, ...) {
## Here corcopr librarary is used to generate inverse of the matrix
## to avoid singularity error
if(require("corpcor")){
print("corpcor library is already loaded")
} else {
print("Installing corpcor library")
install.packages("corpcor")
if(require(corpcor)){
print("corpcor installed and loaded")
} else {
stop("could not install corpcor")
}
}
## Return a matrix that is the inverse of 'x'
inverse <- x$getInverse()
if(!is.null(inverse)){
message("getting cached matrix from the memory")
return(inverse)
}
message("The inverse of the matrix is not in memory so the inverse is computed")
data <- x$get()
inverse <- pseudoinverse(data, ...)
x$setInverse(inverse)
inverse
}
|
bcf70cd463d7a1837e1a1aa255dc8c975061e0e5
|
246e5b2ac47a2adda553f1704ad758319be568df
|
/man/image2swf.Rd
|
df0516a8092ad55cd710177645f97fc257414bf8
|
[] |
no_license
|
yixuan/R2SWF
|
1537d098a0883d19eb29b8cd7045dfc1f85cb1c7
|
ab30bb7df96a5039bd7edfcd5c39f35a3dfbfca3
|
refs/heads/master
| 2022-11-25T14:54:20.088634
| 2022-11-17T05:17:45
| 2022-11-17T05:17:45
| 12,831,383
| 3
| 1
| null | 2020-06-22T00:14:47
| 2013-09-14T15:27:48
|
C
|
UTF-8
|
R
| false
| true
| 1,290
|
rd
|
image2swf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xx2swf.R
\name{image2swf}
\alias{image2swf}
\title{Convert bitmap images to SWF}
\usage{
image2swf(input, output = "movie.swf", bgColor = "white", interval = 1)
}
\arguments{
\item{input}{the file names of the images to be converted}
\item{output}{the name of the output SWF file}
\item{bgColor}{background color of the output SWF file}
\item{interval}{the time interval (in seconds) between animation frames}
}
\value{
The name of the generated swf file if successful.
}
\description{
Given the file names of a sequence of images, this function can convert them
into a Flash file (.swf). Supported input formats are jpg/jpeg and png. The
two formats are allowed to appear in the same sequence.
}
\details{
This function uses the Ming library (\url{https://github.com/libming/libming}) to
implement the conversion. If you want to create a Flash file consisting of
vector graphics, use \code{\link{svg2swf}()} instead.
}
\examples{
if(capabilities("png")) {
olddir = setwd(tempdir())
png("Rplot\%03d.png")
for(i in 1:9) plot(runif(20), ylim = c(0, 1))
dev.off()
output = image2swf(sprintf("Rplot\%03d.png", 1:9))
swf2html(output)
setwd(olddir)
}
}
\author{
Yixuan Qiu <\url{https://statr.me}>
}
|
bb8e4ea50fbd7d562fefaa1220a7778a4456f6ce
|
898c7003f627db881ea0d64b5b9c9c620bacec66
|
/man/rfEwasExampleBetaValues.Rd
|
1e84f7ec101843fca5c810ed0e95b791d49d22e4
|
[] |
no_license
|
cran/RefFreeEWAS
|
e713c9faf8433bc6a004e620b991a48f84e774db
|
cf44701b7b60234f5c52b3eb56bfc0ed6ee784e7
|
refs/heads/master
| 2021-06-01T22:08:44.413495
| 2018-12-14T05:00:03
| 2018-12-14T05:00:03
| 17,693,283
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 389
|
rd
|
rfEwasExampleBetaValues.Rd
|
\name{rfEwasExampleBetaValues}
\alias{rfEwasExampleBetaValues}
\docType{data}
\alias{rfEwasExampleBetaValues}
\title{Simulated mixed-cell DNA methylation data set}
\description{1000 CpG sites x 250 subjects. First 250 CpGs are DMRs for the cell types, although the idea is that this would not be known in practice.}
\usage{rfEwasExampleBetaValues}
\format{1000 CpG sites x 250 subjects.}
|
054e11ec6c9af73c5ae93e252de57f4c4ca21b26
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/fda/examples/lmeWinsor.Rd.R
|
f642185c26133ab9c75e30090fa84da64d9d5fe6
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 381
|
r
|
lmeWinsor.Rd.R
|
library(fda)
### Name: lmeWinsor
### Title: Winsorized Regression with mixed effects
### Aliases: lmeWinsor
### Keywords: models
### ** Examples
requireNamespace('nlme')
fm1w <- lmeWinsor(distance ~ age, data = nlme::Orthodont,
random=~age|Subject)
fm1w.1 <- lmeWinsor(distance ~ age, data = nlme::Orthodont,
random=~age|Subject, trim=0.1)
|
e2bb038ff36ee4bf9ef7b9d074df36be7c5f22ac
|
e294c658ff4ea61a8ae729fc1735b3e64afd69bd
|
/Rwanda_Map/scripts/Rwanda_ggplot_quick.R
|
31fbe45a17a2bc7fce210bbcca4b207724482c6e
|
[] |
no_license
|
ElliotMeador84/Rwanda_2
|
ff4179a963e6b1902fc3ece7624b315bc41ae15c
|
f49b326f5a23c1d1b746095695dd642f453f6391
|
refs/heads/master
| 2021-04-28T05:40:14.994501
| 2018-06-01T15:29:19
| 2018-06-01T15:29:19
| 122,182,515
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,941
|
r
|
Rwanda_ggplot_quick.R
|
# Libraries , dasta and functions -------
library(tidyverse)
library(RColorBrewer)
library(ggrepel)
source('C:/R/all_functions.R')
# coop_location <- read_csv('data/cleaned_files/Coop_location.csv')
# save(coop_location,file = 'data/coop_location.RData')
load('Rwanda_Map/data/rwanda.shape_df.RData')
load('Rwanda_Map/data/coop_location.RData')
# Rwanda base map --------
# Get background map
# ggplot has the map_data geom that
# uses information from the 'maps' package
#
# IMPORTANT
# this function replaces purrr::map()
Rwanda <-
map_data(map = "world",
# use ?maps to view all map databases
region = "Rwanda",
# change to country/region of choice
interior = T) #
# Plot major cities
#
Rwanda_cities <- world.cities %>% # world.cities is in maps::
filter(country.etc == 'Rwanda')
# Combine the two geoms --------
## Make capital city easier to identify
Rwanda_cities <- Rwanda_cities %>%
mutate(name = ifelse(name == 'Kigali', 'Kigali (capital)', name))
### Grab the cooperative's location
load(file = 'data/coop_location.RData')
#### fix the names
names(coop_location) <- tolower(names(coop_location))
# split to run ggrepel one at a time
coop_location_ls <- split(coop_location, f = coop_location$name)
coop_location_ls <- purrr::map(coop_location_ls,function(x){
x %>%
mutate(name = str_replace_all(name,'_',' '))
})
# function to handle repel each point
repel_geom_text <- function(x = tibble,
y = 0 ,
z = 0) {
geom_text_repel(
data = x,
aes(long, lat, group = name, label = name),
nudge_x = y,
nudge_y = z
)
}
# Rwanda Cooperative Location Plot ====
ggplot(rwanda.shape_df, aes(long, lat, group = group)) +
geom_polygon(fill = brewer.pal(9, 'Greens')[3],
color = brewer.pal(9, 'Greens')[4]) +
geom_point(data = Rwanda_cities, aes(long, lat, group = name)) +
geom_point(data = coop_location, aes(long, lat, group = name),
shape = 15) +
repel_geom_text(coop_location_ls$Coop_C, .5,-.5) +
repel_geom_text(coop_location_ls$Coop_D, .35, .15) +
repel_geom_text(coop_location_ls$Coop_E, .35,-.1) +
repel_geom_text(coop_location_ls$Coop_A,9,-0.2) +
repel_geom_text(coop_location_ls$Coop_B,-.5,.5) +
geom_text(
data = Rwanda_cities,
aes(long, lat, label = name, group = name),
nudge_x = -0.025,
nudge_y = 0.045,
color = 'black',
size = 3,
check_overlap = T
) +
coord_map() +
scale_x_continuous(breaks = seq(29, 31, 0.5)) +
theme_void() +
theme(panel.background = element_blank(),
plot.margin = margin(rep(.5, 4))) +
labs(title = 'Example',
caption = 'Locations are approximate')
# ggsave('png/Rwanda_Coop_Location.png',
# height = 8,
# width = 11)
|
c064fa022782a7541bb02ef19f078ae08c8775bc
|
736a4eb05526fe62fe7ae1123ca57fcf8ab40e6e
|
/Plotting/effect.plot.R
|
c0969440088426dadd9069f6167c94a253279d00
|
[] |
no_license
|
ScottZimmerman/ADRD_Trial_Sim
|
cb4d6b25c402a890bfce6b82feb12278ba8a1395
|
318cd584cbb1d3025ad8d34f4fc93a130bb190a1
|
refs/heads/main
| 2023-01-31T01:06:26.766712
| 2020-12-16T20:43:08
| 2020-12-16T20:43:08
| 319,128,360
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,082
|
r
|
effect.plot.R
|
#' Plots the estimated effect size at different end points
#'
#' @param inputs list. Contains `months` character vector. List of study end points
#' `effect_size` character vector. List of estimated effect sizes at each end point
#' `ci_low` character vector. List of low end of confidence interval for each effect estimate
#' `ci_high` character vector. List of high end of confidence interval for each effect estimate
#'
#' @return none. Plots the baseline distribution of a given variable
library("ggplot2")
library("tidyr")
library("dplyr")
effect_plot <- function(inputs){
months <- inputs[["months"]]
effect_size <- inputs[["effect_size"]]
ci_low <- inputs[["ci_low"]]
ci_high <- inputs[["ci_high"]]
df <- as.data.frame(cbind(months, effect_size, ci_low, ci_high))
df <- df %>% mutate(months = months)
df %>%
ggplot() +
geom_hline(aes(yintercept = 0), color = "grey50", lty = "dashed") +
geom_point(aes(y = effect_size, x = months), size = 3) +
geom_segment(aes(y = ci_low, yend = ci_high, x = months, xend = months), size = 1) +
theme_minimal() +
theme(axis.line = element_line(colour = "grey50", size = 1),
panel.grid.major.y = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none") +
scale_x_continuous(breaks=months,limits = c(0,max(months)+20)) +
labs(title = "Estimated Effect Size of Intensive Treatment, Measured at Different Times") +
ylab("Estimated Effect Size (Bars show 95% CI)") + xlab("Time (months)")
}
#
#
# months <- c(40,80,120,160,200,240)
# effect_size <- c(-0.041, -.062, -.11,-0.23,-0.31,-0.42)
# ci_low <- c(-0.428, -0.505, -.522,-0.61,-0.74,-0.81)
# ci_high <- c(0.324, 0.288, .201,.141,.045,-0.026)
#
# inputs <- list("months" = months,
# "effect_size" = effect_size,
# "ci_low" = ci_low,
# "ci_high" = ci_high)
# effect_plot(inputs)
# ggsave("aeffect_plot.tiff")
|
b5eff21f47830d359f7a402d92339d77da10d6f6
|
2baed5a8a605c42d1e8ba05a341ac1ef63bcfeb2
|
/package/R/data.R
|
e948dffc5816357b8e622235186d27220c207f0c
|
[
"MIT"
] |
permissive
|
dynverse/dynbenchmark
|
0c58ef1a9909b589b1cd8b0a0e69c2fa222aeb2b
|
8dca4eb841b8ce6edfb62d37ff6cfd4c974af339
|
refs/heads/master
| 2022-12-08T04:01:13.746186
| 2022-11-25T08:25:50
| 2022-11-25T08:25:50
| 102,579,801
| 171
| 40
| null | 2019-04-07T20:59:59
| 2017-09-06T07:53:11
|
R
|
UTF-8
|
R
| false
| false
| 769
|
r
|
data.R
|
#' Mapping ensembl or entrez ids to symbols
#'
#' @docType data
"id_mapper"
#' Common manual labelling
#'
#' @docType data
"labels"
#' Metadata on error reasons
#'
#' @docType data
"error_reasons"
#' Metadata on topology inference types
#'
#' @docType data
"topinf_types"
#' Metadata on prior types
#'
#' @docType data
"prior_types"
#' Metadata on prior usage types
#'
#' @docType data
"prior_usages"
#' Metadata on non-inclusion reasons
#'
#' @docType data
"non_inclusion_reasons"
#' Metadata on QC applications
#'
#' @docType data
"qc_applications"
#' Metadata on QC categories
#'
#' @docType data
"qc_categories"
#' Metadata on method statuses
#'
#' @docType data
"method_statuses"
#' Colours of method statuses
#'
#' @docType data
"method_status_colours"
|
454606d58f10666675c7ae2ff93dd97c140f6cb0
|
f3e914e8a3ccb1c4d73555321e3eaf52b59f52e0
|
/R/6.3course.R
|
c06046608d43d1b68fdfa305f541d291d6704066
|
[] |
no_license
|
youjia36313/learn_R
|
08be35ebc032839e8c25466c63ae5a0292069855
|
674de3d09e0e7dfec2d3e164ffab98e0c40ca597
|
refs/heads/master
| 2020-09-15T19:39:00.136679
| 2019-11-23T06:37:41
| 2019-11-23T06:37:41
| 223,541,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 353
|
r
|
6.3course.R
|
n <- 100000
x <- runif(n,-1,1)
y <- runif(n,-1,1)
count <- 0
for(i in 1:n){
if(x[i]^2+y[i]^2<1){
count <- count+1
}
}
print(count)
4*count/n
n_exp <- 10000
n_sample <- 10
count_1 <- 0
for(i in 1:n_exp){
x <- rnorm(n_sample,0,1)
t_0 <- mean(x)/sqrt(var(x)/n_sample)
if(abs(t_0)>qt(0.975,n_sample-1)){
count_1 <- count_1+1
}
}
count_1/n_exp
|
8cf0ecf85d8e24d7726183dd9968b0d16af3fafa
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/psych/examples/cubits.Rd.R
|
de74a192698300e031ac7a1dd50010b2d35750d8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 587
|
r
|
cubits.Rd.R
|
library(psych)
### Name: cubits
### Title: Galton's example of the relationship between height and 'cubit'
### or forearm length
### Aliases: cubits
### Keywords: datasets
### ** Examples
data(cubits)
cubits
heights <- table2df(cubits,labs = c("height","cubit"))
ellipses(heights,n=1,main="Galton's co-relation data set")
ellipses(jitter(heights$height,3),jitter(heights$cubit,3),pch=".",
main="Galton's co-relation data set",xlab="height",
ylab="Forearm (cubit)") #add in some noise to see the points
pairs.panels(heights,jiggle=TRUE,main="Galton's cubits data set")
|
a77d7222cf47567e773842fca2db3f2b12e6b9d4
|
51b243709faabb0f7d42017bb804eb3df2d24d5c
|
/RMSD.R
|
90edff33a2b251333fa6092a0fcb70647bf3787c
|
[] |
no_license
|
331065314/Data-Visualization
|
a2b69b017a1fbe04d809bfb7a5b7a68f5c1b93f9
|
36188894f052c191fe1d393826bcaf8a0647acd9
|
refs/heads/master
| 2020-09-07T00:16:02.657104
| 2019-04-06T07:17:19
| 2019-04-06T07:17:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,141
|
r
|
RMSD.R
|
#=============================================================================#
# Author: DoubeeTwT
# Function Name : RMSD
# Usage: RMSD(n=25000,t=0.002,text_size=15,title_size=25,f=c(...))
#
# -n num,number of frames you choose
# -t num,interval time you use in MD
# -text_size num,size of x-axis and y-axis text
# -title_size num,size of x-axis and y-axis title
# -f str,your input files' names
#
# Attention: 1)The input file must be 2 columns(Frames;RMSD Value)
# 2)No more than 12 files input.If you really need to,
# you can add more group names into "groupname" which
# located in the first line of the RMSD function.
#=============================================================================#
RMSD <- function(n=25000,t=0.002,text_size=15,title_size=25,f=c(...)){
groupname=c("rmsd1","rmsd2","rmsd3","rmsd4","rmsd5","rmsd6",
"rmsd7","rmsd8","rmsd9","rmsd10","rmsd11","rmsd12")
len=length(f)
rmsd=read.table(f[1])[1:n,]
for (i in 2:len) {
rmsd=rbind(rmsd,read.table(f[i])[1:n,])
}
rmsd=cbind(rmsd,rep(groupname[1:len],each=n))
colnames(rmsd)=c("num","rmsd","Group")
library(ggplot2);library(grid)
ts <- theme_bw()
ts <- ts+theme(axis.text.x=element_text(size=text_size,face="bold"))
ts <- ts+theme(axis.text.y=element_text(size=text_size,face="bold"))
ts <- ts+theme(axis.title.x=element_text(size=title_size,face="bold"))
ts <- ts+theme(axis.title.y=element_text(size=title_size,face="bold"))
p1=ggplot(data=rmsd,aes(x=num*t,y=rmsd,colour=Group))+geom_line()
p1=p1+xlab("Time(ns)")+ylab("RMSD(\uc5)")+ts
p2=ggplot(data=rmsd,aes(x=num*t,y=rmsd,colour=Group))+geom_smooth()
p2=p2+xlab("Time(ns)")+ylab("RMSD(\uc5)")+ts
grid.newpage()
pushViewport(viewport(layout = grid.layout(2,1)))
vplayout = function(x, y) viewport(layout.pos.row = x, layout.pos.col = y)
print(p1, vp = vplayout(1, 1))
print(p2, vp = vplayout(2, 1))
}
#RMSD(f=c("File name1","File name2",...))
|
4904e002ba77d08ae425db20dbf36a7d48ab6b5e
|
1038d9b180c8e9c591ff4325b4b0a3d45e389816
|
/hw4.R
|
f3369f8682da60d0d06b68f03f5647cac76a81d2
|
[] |
no_license
|
bret-harv/lyman_break_galaxy
|
53024d77b304c0df3b09a01ec07202cf2baf7b94
|
52e0d95066ee3679b9d4b1a0f52092c80603247b
|
refs/heads/master
| 2020-08-22T14:18:43.347449
| 2019-10-20T19:17:37
| 2019-10-20T19:17:37
| 216,413,546
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,162
|
r
|
hw4.R
|
# Run from the command line via, e.g.,
# R CMD BATCH --no-save '--args <template spectrum> <data directory>' hw3.R
require("astro")
rm(list = ls())
args = (commandArgs(trailingOnly=TRUE))
if(length(args) == 2){
template_spectrum = args[1]
data_directory = args[2]
} else {
cat('usage: R CMD BATCH "--args <template spectrum> <data directory>" hw4.R', file=stderr())
stop()
}
template_spectrum = as.data.frame(read.fitstab(paste(template_spectrum)))
template_spectrum_wavelength = 10^template_spectrum$LOGLAM
template_spectrum_flux = template_spectrum$FLUX
template_spectrum = data.frame(template_spectrum, wavelength = template_spectrum_wavelength)
files = list.files(paste(data_directory))
#files = head(files, 3)
template_spectrum_flux_nm = (template_spectrum_flux - mean(template_spectrum_flux))/sd(template_spectrum_flux) # normalize template_spectrum
n = length(files) # n = length(files)
bestdislist = rep(0, n)
specID = rep(0, n)
bestdisloc = rep(0, n)
for (i in 1:n) {
spec = read.fitstab(paste(data_directory, "/", files[i], sep = ""))
spec = as.data.frame(spec)
spec$wavelength = 10^spec$loglam
spec = spec[spec$and_mask == 0,]
spec_flux_nm = (spec$flux - mean(spec$flux))/sd(spec$flux)
spec_len = length(spec_flux_nm)
template_spectrum_len = length(template_spectrum_flux_nm)
bestdistance = Inf
if (spec_len >= template_spectrum_len) {
for (j in 1:(spec_len+1-template_spectrum_len)) {
spec_part = spec_flux_nm[j:(j+template_spectrum_len-1)]
distance = sqrt(sum((spec_part - template_spectrum_flux_nm)^2))
if (distance < bestdistance) {
bestdistance = distance
bestlocation = j
}
}
}
bestdislist[i] = bestdistance
specID[i] = files[i]
bestdisloc[i] = bestlocation
}
specID = gsub(pattern = "spec-", replacement = "", specID)
specID = gsub(pattern = ".fits", replacement = "", specID)
result = data.frame(distance = bestdislist, spectrumID = specID, i = bestdisloc)
result_sorted_by_distance = result[order(result$distance),]
write.table(result_sorted_by_distance, paste(args[2], ".csv", sep = ""), row.names = FALSE, col.names = F, quote = F, sep = ",")
|
06c9717f9facdbb11d9cda827d6dbaa14f53b567
|
f99ec2ade9dcf8e2cf0e21e50ab78b2396747952
|
/man/uflist.Rd
|
dc435d8837e5046297e2ee5ef21d89f58306ea7b
|
[
"MIT"
] |
permissive
|
kimjoaoun/owdbr
|
39d46c2f1cd3b5414a1bbec0892bcd22cf183415
|
bbc696d041936f9689ca44eb9d3b9d8c0e2ad806
|
refs/heads/master
| 2021-07-15T01:48:46.232456
| 2019-08-15T20:30:35
| 2019-08-15T20:30:35
| 182,590,699
| 15
| 3
|
NOASSERTION
| 2019-05-27T02:30:07
| 2019-04-21T22:59:18
|
R
|
UTF-8
|
R
| false
| true
| 1,282
|
rd
|
uflist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uflist.R
\name{uflist}
\alias{uflist}
\title{All the State's UF Codes.}
\usage{
uflist(region = NA)
}
\arguments{
\item{region}{filter the UFs (Units of the Federation or States) by geographical region. If NA, all state are shown. According to the Brazilian Institute of Geography and Statistics, the country is divided in 5 regions: "Norte", "Nordeste", "Centro-Oeste", "Sul" and "Sudeste".}
}
\value{
a tibble with 3 columns.
\describe{
\item{State}{The full name of the state.}
\item{UF}{The abbreviation of the state's name.}
\item{num}{The identification number of the state.}
\item{region}{Region of the country in which the State is located.}
}
}
\description{
Returns a tibble which contains the IBGE identification code of each one of all 23 UFs (Units of the Federation) in Brazil.
}
\examples{
\dontrun{
uflist(reg = NA) # show all states of the country.
uflist(region = "Sul") # only exhibit States in the 'Sul' region of the country.
}
}
\references{
Source: \href{https://cidades.ibge.gov.br}{IBGE (Brazilian Institute of Geography and Statistics)}
}
\author{
Joao Pedro Oliveira dos Santos, International Relations Institute, Pontifical Catholic University of Rio de Janeiro
}
|
4fc7c4a8118444fdd62da2049e020fe85cfbb018
|
44a53114076deadba6bd07b4617aed67b62f0585
|
/R/RcppExports.R
|
dbb72c2bce9b5c0f15d536a04a4a635bfd78530e
|
[] |
no_license
|
Dragon-Dane/longBayes
|
ddd199f3c4fbcd8aa364b881aeb16c510ea31e11
|
b80716ed1b1868eedd100b1422d96583708b0630
|
refs/heads/master
| 2023-08-09T03:09:49.971016
| 2018-03-31T12:45:20
| 2018-03-31T12:45:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,029
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
rcpparma_hello_world <- function() {
.Call('_longBayes_rcpparma_hello_world', PACKAGE = 'longBayes')
}
rcpparma_outerproduct <- function(x) {
.Call('_longBayes_rcpparma_outerproduct', PACKAGE = 'longBayes', x)
}
rcpparma_innerproduct <- function(x) {
.Call('_longBayes_rcpparma_innerproduct', PACKAGE = 'longBayes', x)
}
rcpparma_bothproducts <- function(x) {
.Call('_longBayes_rcpparma_bothproducts', PACKAGE = 'longBayes', x)
}
bases <- function(J = 6L) {
.Call('_longBayes_bases', PACKAGE = 'longBayes', J)
}
pg <- function(ts) {
.Call('_longBayes_pg', PACKAGE = 'longBayes', ts)
}
lpg <- function(ts) {
.Call('_longBayes_lpg', PACKAGE = 'longBayes', ts)
}
nsp <- function(spec, TR = 3.0) {
.Call('_longBayes_nsp', PACKAGE = 'longBayes', spec, TR)
}
atsm <- function(pgram) {
.Call('_longBayes_atsm', PACKAGE = 'longBayes', pgram)
}
mcmc <- function(y, eta, q, leps = 0.065, L = 15L, lepsrho = 0.13, Lrho = 10L, niter = 2e4L, Ltau = 1000, siga = 1000, sigmu = 1000) {
.Call('_longBayes_mcmc', PACKAGE = 'longBayes', y, eta, q, leps, L, lepsrho, Lrho, niter, Ltau, siga, sigmu)
}
q_lspec <- function(q, J = 6L) {
.Call('_longBayes_q_lspec', PACKAGE = 'longBayes', q, J)
}
mc_q <- function(mcsamp, J = 6L, burn = 0L) {
.Call('_longBayes_mc_q', PACKAGE = 'longBayes', mcsamp, J, burn)
}
lspec <- function(mcsamp, J = 6L, burn = 0L) {
.Call('_longBayes_lspec', PACKAGE = 'longBayes', mcsamp, J, burn)
}
tsl_mc <- function(tsl, J = 6L) {
.Call('_longBayes_tsl_mc', PACKAGE = 'longBayes', tsl, J)
}
tsl_q <- function(tsl, J = 6L, burn = 0L) {
.Call('_longBayes_tsl_q', PACKAGE = 'longBayes', tsl, J, burn)
}
lspl <- function(tsl, J = 6L, burn = 0L) {
.Call('_longBayes_lspl', PACKAGE = 'longBayes', tsl, J, burn)
}
spl <- function(tsl, J = 6L, burn = 0L) {
.Call('_longBayes_spl', PACKAGE = 'longBayes', tsl, J, burn)
}
|
6d461a8bbb162f76ede1c6ed3707c522a164d318
|
58bfe374a4376e014629dc3df2a8c74559815010
|
/finalProy/server.R
|
f1880eed97eda5ef4b99831fc72d5594c3d8d152
|
[] |
no_license
|
colombianBP/DevelopingDP
|
5aa99d19931d6614c4229efdb7c0b637351524a3
|
2d84a54bb43eba0a4bbdc66e7ad9db6c79b27337
|
refs/heads/main
| 2023-06-27T07:18:02.619934
| 2021-07-15T17:39:54
| 2021-07-15T17:39:54
| 386,375,278
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,518
|
r
|
server.R
|
library(shiny)
transform2<-function(x){
out=""
if (x=="Miles/(US) gallon") {
out="mpg"
} else if(x=="Number of cylinders"){
out="cyl"
} else if(x=="Displacement (cu.in.)"){
out="disp"
} else if(x=="Gross horsepower"){
out="hp"
} else if(x=="Rear axle ratio"){
out="drat"
} else if(x=="Weight (1000 lbs)"){
out="wt"
} else if(x=="1/4 mile time"){
out="qsec"
} else if(x=="Engine (0 = V-shaped, 1 = straight)"){
out="vs"
} else if(x=="Transmission (0 = automatic, 1 = manual)"){
out="am"
} else if(x=="Number of forward gears"){
out="gear"
} else if(x=="Number of carburetors"){
out="carb"
}
return(out)
}
shinyServer(function(input, output) {
output$plot<-renderPlot({
# val1<-reactive({
# state1<-input$state1
# transform2(state1)
# })
#
# val2<-reactive({
# state2<-input$state2
# transform2(state2)
# })
state1<-input$state1
state2<-input$state2
val1<-transform2(state1)
val2<-transform2(state2)
val1<-mtcars[[val1]]
val2<-mtcars[[val2]]
val1<-val1/(max(val1))*100
val2<-val2/(max(val2))*100
plot(val1,val2,xlim =c(0,100),ylim = c(0,100))
})
output$text<-renderText({
state1<-input$state1
state2<-input$state2
paste(state1,"vs",state2)
})
})
|
c0c51397ceddfbecbc4fe804a0032e72acc34072
|
a6efc0d1d7f13d65dff45b9872f2da85127ff5c6
|
/lfp.R
|
5626fa581399835bf8f19335db2455b380d9315d
|
[] |
no_license
|
tigerfromlondon/gamma
|
5686176310073fd49fa58b666eabc217a7a7975d
|
6789694a270ad91caeea2c4e4a82e03d92353798
|
refs/heads/master
| 2021-07-12T20:32:31.088556
| 2016-11-23T15:58:12
| 2016-11-23T15:58:12
| 37,694,161
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,202
|
r
|
lfp.R
|
lfp <- function (filename) {
#read libraries etc
library(dplyr)
#read the lfp table, remake it into nsweepsx12, bind proper names
lfptab <- read.csv(filename)
r1 <- lfptab[seq(3, nrow(lfptab), 4), ]
r2 <- lfptab[seq(4, nrow(lfptab), 4), ]
r3 <- lfptab[seq(5, nrow(lfptab), 4), ]
r4 <- lfptab[seq(6, nrow(lfptab), 4), ]
outcome <- cbind(r1, r2, r3, r4)
colnames(outcome) <- c("SWEEP","A","B", "FPRE", "FCLA", "FPOS", "APRE", "ACLA", "APOS", "PPRE", "PCLA", "PPOS")
fs <- select(outcome, starts_with("F"))
fs <- as.data.frame(fs)
fs <- mutate(fs, FOUTCOME = FPRE / FPOS)
outcome <- mutate(outcome, A.OUTCOME = FCLA + APOS)
fnum <- as.numeric(outcome$FPRE)
print(fnum)
#
#fs <- vector()
#for (i in 1:nrow(outcome)) {
# foutcome <- (2 * outcome[[i, outcome$FCLA]]) / (outcome[[i, outcome$FPRE]] + outcome[[i, outcome$FPOS]])
# print(foutcome)
#if (foutcome < 0.9) {fs <- c(fs, -1)}
#}
print(fs)
print(outcome)
}
|
2571a2496d1703322b4b08caef89df35a2671801
|
4e6184920e460006e7aaa0e377fbf3723d41a078
|
/R/gbif.R
|
8ce48e7d35cb175a955f93a59034dc54618e3190
|
[] |
no_license
|
dreamRs/conrappli
|
ee5fb0502ace498cd04a8f5e872dca9793c3e86b
|
a8525c63bf33906907796dad12128376b1f14a4c
|
refs/heads/master
| 2023-08-31T13:09:24.261571
| 2023-08-23T13:43:53
| 2023-08-23T13:43:53
| 466,046,452
| 2
| 0
| null | 2023-05-22T08:04:29
| 2022-03-04T08:41:38
|
R
|
UTF-8
|
R
| false
| false
| 2,051
|
r
|
gbif.R
|
#' Search species information
#'
#' @param species_name Character of species names.
#' @param match_type Type of match for scientific names found: exact match or above confidence level specified.
#' @param confidence_level Confidence level to use for matching scientific names.
#'
#' @return A `tibble` with info found for each species provided.
#' @export
#'
#' @importFrom dplyr bind_rows as_tibble filter
#' @importFrom taxize get_gbifid_
#'
#' @examples
#' \dontrun{
#' search_species_info(c("Uapaca niangadoumae"))
#' search_species_info(c("Uapaca niangadoumae"), match_type = "exact")
#' search_species_info(c("Uapaca niangadoumae", "do not exist"))
#' }
search_species_info <- function(species_name, match_type = c("exact", "confidence"), confidence_level = 95) {
match_type <- match.arg(match_type, several.ok = TRUE)
species_name <- unique(species_name)
species_name <- species_name[!is.na(species_name)]
infos <- taxize::get_gbifid_(sci = species_name, method = "backbone")
infos <- bind_rows(infos, .id = "provided_sciname")
if (nrow(infos) < 1)
return(infos)
infos <- filter(infos, kingdom == "Plantae")
infos <- as_tibble(infos)
infos_exact <- filter(infos, matchtype == "EXACT" & status == "ACCEPTED")
infos_conf <- filter(infos, matchtype != "EXACT" & status == "ACCEPTED" & confidence > confidence_level)
if (identical(match_type, "exact")) {
return(infos_exact)
} else if (identical(match_type, "exact")) {
return(infos_conf)
} else {
return(bind_rows(infos_exact, infos_conf))
}
}
#' Retrieve GBIF occurrences
#'
#' @param specieskey A vector of species key.
#'
#' @return A `tibble`.
#' @export
#'
#' @examples
#' \dontrun{
#'
#' infos <- search_species_info(c("Uapaca niangadoumae"))
#' retrieve_occ_data(infos$specieskey)
#'
#' }
retrieve_occ_data <- function(specieskey) {
res <- rgbif::occ_data(taxonKey = unique(specieskey), limit = 100000)
if (identical(attr(res, "type"), "many")) {
dplyr::bind_rows(lapply(X = res, FUN = `[[`, "data"))
} else {
res$data
}
}
|
fd6e9f2d1c32e44b69250626e5d4e93c0556c804
|
095093d840ce957dc420a7ff010272e4a97cff62
|
/lib/sig+prediction.R
|
b3bea574fb79c85cfa10366fbe54d2805461f48e
|
[] |
no_license
|
YUNLI531/Fall2017-project4-grp8
|
a5c851c0b48ce54c3c98a7d487b5037a1f8668e5
|
fcc89ddb7d0ae341ee8f3fa266e9a166afe58e33
|
refs/heads/master
| 2020-03-09T03:48:29.003539
| 2017-12-06T21:36:37
| 2017-12-06T21:36:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,573
|
r
|
sig+prediction.R
|
################### web spearman
source("./msweb_spearman-copy.R")
#source("./function+for+making+prediction.R")
load("../output/web.train.matrix.Rdata")
data1 = web.train.matrix
load("../output/web.sp.sim.Rdata")
web.sp.sim = (web.sp.sim - min(web.sp.sim))/(max(web.sp.sim)-min(web.sp.sim))
sim1 = web.sp.sim
web.sp.p = predict.general(data1,sim1)
save(web.sp.p,file = "../output/web.sp.p.Rdata")
data2 = web.train.matrix
load("../output/web.msdiff.sim.Rdata")
sim2 = web.msdiff.sim
web.msd.p = predict.general(data2,sim2)
save(web.msd.p, file = "../output/web.msd.p.Rdata")
data3 = web.train.matrix
load("../output/web.etp.sim.Rdata")
sim3 = web.etp.sim
#web.etp.sig.p = predict.general(data3,sim3)
web.etp.p = predict.general(data3,sim3)
save(web.etp.p, file = "../output/web.etp.p.Rdata")
####################################################
###### with significance weighting
load("../output/webtrain_significance_weighting_n=9.Rdata")
data4 = web.train.matrix
sim4 = sim1*webtrain_a_matrix_9
web.sp.sig.p = predict.general(data4,sim4)
save(web.sp.sig.p, file = "../output/web.sp.sig.p.Rdata")
#################### web msd
data5 = web.train.matrix
sim5 = sim2*webtrain_a_matrix_9
#web.msd.sig.p = predict.general(data2,sim2)
web.msd.sig.p = predict.general(data5,sim5)
save(web.msd.sig.p, file = "../output/web.msd.sig.p.Rdata")
#################### web etp
data6 = web.train.matrix
sim6 = sim3*webtrain_a_matrix_9
#web.etp.sig.p = predict.general(data3,sim3)
web.etp.sig.p = predict.general(data6,sim6)
save(web.etp.sig.p, file = "../output/web.etp.sig.p.Rdata")
|
ed4438c0b3180071d3da92cb1dff3c87cbb409ba
|
53de60b485e5642bd1cc7634eaa8b7bf04caeb27
|
/code/SDFBFunctions/man/format_text_and_split.Rd
|
ef67373d83916b2de75a556a9d9fb846671e4b23
|
[] |
no_license
|
wallingTACC/sdfb_network
|
4061ae0fafef695d189fe13b97b14c66863b8ecf
|
38649a628c1f834767b1c14bdb4402863f5f325a
|
refs/heads/master
| 2020-04-05T23:11:51.984616
| 2017-01-30T17:52:27
| 2017-01-30T17:52:27
| 60,351,123
| 0
| 1
| null | 2016-06-03T13:48:46
| 2016-06-03T13:48:45
|
R
|
UTF-8
|
R
| false
| false
| 834
|
rd
|
format_text_and_split.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/NER_processing.R
\name{format_text_and_split}
\alias{format_text_and_split}
\title{This function does the following:
1. add spaces before punctuation
2. add spaces before/after (), [], {}, as to separate them off => easier to count
3. remove more than one space in a row.
4. Tokenize (split into a word-by-word representation).
4a. Fix problems with ;.}
\usage{
format_text_and_split(text)
}
\arguments{
\item{text}{character vector}
}
\value{
formatted text
}
\description{
This function does the following:
1. add spaces before punctuation
2. add spaces before/after (), [], {}, as to separate them off => easier to count
3. remove more than one space in a row.
4. Tokenize (split into a word-by-word representation).
4a. Fix problems with ;.
}
|
ca564ab71d097102e309f0fdfe976e22cbd0bf42
|
5ffbc2901018132d3a0aea7d640ec04a29dc2127
|
/R/grplot.R
|
3e258711734e14f32612644d8c8317a1bba61f51
|
[] |
no_license
|
dwulff/memnet
|
7282e437e4c91fde96b816b9be6f25215c8da408
|
a176590e6560d5ccf361d860eaf11520a0e16fc7
|
refs/heads/master
| 2020-04-05T14:11:05.609461
| 2018-10-31T07:02:12
| 2018-10-31T07:02:12
| 94,814,857
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,360
|
r
|
grplot.R
|
# -------------------------------------------------------------------------------------------------------------------- #
#
# HELPERS
#
# -------------------------------------------------------------------------------------------------------------------- #
# Circles --------
inPi = function(x) (x/360)*(pi*2)
x_circ = function(n, deg, rad, orig, start){
degs = seq(0,deg,length = n + 1)
cbind(degs, cos(inPi(degs))*rad + orig[1])}
y_circP = function(x,rad,orig) orig[2] + sqrt(abs(rad^2-(x-orig[1])^2))
y_circM = function(x,rad,orig) orig[2] - sqrt(abs(rad^2-(x-orig[1])^2))
circle_raw<-function(n,deg,rad,orig){
xs = x_circ(n,-deg,rad,orig)
ys1 = y_circP(xs[,2],rad,orig)
ys2 = y_circM(xs[,2],rad,orig)
sel = xs[,1]>180 & xs[,1]<360 | xs[,1]>-180 & xs[,1]<0 | xs[,1]>640 & xs[,1]<720
ys = ys1
ys[sel] = ys2[sel]
cbind(xs[,2],ys)
}
circle = function(rad, orig, ..., n = 100){
graphics::polygon(circle_raw(n,360,rad,orig),...)
}
# Other stuff --------
#' Fast general purpose color mixer
#'
#' Mixes two colors or matching vectors of colors according to some relative weight
#' and exports the result either in rgb or hex format.
#'
#' @param col_1,col_2 character vector of length one or of matching length containing
#' colors either as a color name (see \link{colors}), rgb format (see \link{rgb}), or
#' hex format.
#' @param weight numeric between 0 and 1 specifying the relative mixing weight for color
#' one. E.g., \code{weight = .8} means that final color is composed of 80 percent
#' \code{col_2} and 20 percent \code{col_1}.
#' @param format character string specifying the output format. Either \code{"hex"} or
#' \code{"rgb"}.
#'
#' @return A vector of length \code{max(length(col_1), length(col_2))} containing the
#' mixed colors in the specified format.
#'
#' @examples
#'
#' # mix blue and red with more weight on blue
#' cmix('blue', 'red', .2)
#'
#' # mix blue and red with more weight on red
#' cmix('blue', 'red', .8)
#'
#' # mix blue and red and return as rgb
#' cmix('blue', 'red', .8, format = 'rgb')
#'
#' @export
cmix = function(col_1, col_2, weight, format = 'hex'){
# get lens
lens = c(ifelse(is.matrix(col_1),nrow(col_1),length(col_1)),
ifelse(is.matrix(col_2),nrow(col_2),length(col_2)),
length(weight))
# test if lens fit
if(max(lens[1:2]) %% min(lens[1:2]) != 0 | max(lens[1:2]) %% min(lens[3])){
stop("Lengths of col_1, col_2, and weight are not equal or multiples of each other")
}
# test format
if(!format %in% c('rgb','hex')) stop('Format can only be "hex" or "rgb"')
# get target length
target_length = max(lens)
# col_1 to target length matrix
if(is.character(col_1)){
if(length(col_1) == 1){
col_1 = grDevices::col2rgb(col_1)
col_1 = matrix(c(col_1), ncol = 3, nrow = target_length)
} else {
n_rep = target_length / length(col_1)
col_1 = t(sapply(rep(col_1, n_rep), grDevices::col2rgb))
}
} else{
col_1 = matrix(c(col_1), ncol=3, nrow = target_length, byrow = FALSE)
}
# col_2 to target length matrix
if(is.character(col_2)){
if(length(col_2) == 1){
col_2 = grDevices::col2rgb(col_2)
col_2 = matrix(c(col_2), ncol = 3, nrow = target_length)
} else {
n_rep = target_length / length(col_2)
col_2 = t(sapply(rep(col_2, n_rep), grDevices::col2rgb))
}
} else{
col_2 = matrix(c(col_2), ncol=3, nrow = target_length, byrow = FALSE)
}
# expand weight
weight = rep(weight, target_length / length(weight))
# average colors
col = col_1 * (1-weight) + col_2 * weight
# out
if(format == 'rgb') return(col / 255)
grDevices::rgb(data.frame(col), maxColorValue = 255)
}
# round even
round_even = function(x){
rx = round(x)
test = rx %% 2
ifelse(test == 0,rx,
ifelse( x < 0,
ifelse(x < rx,floor(x),ceiling(x)),
ifelse(x >= rx,ceiling(x),floor(x))))
}
# find complementary color
compCol = function(col){
# ----- extract color
if(is.character(col)){
col = grDevices::col2rgb(col)
} else if(is.numeric(col)){
if(length(col) == 3){
col = matrix(col,nrow=3)
} else stop('if numeric, col must be length 3')
} else stop('col must be character or numeric')
# ----- transform to complementary color
compCol = grDevices::rgb2hsv(col)
compCol[1] = ifelse(compCol[1] > .5, compCol[1] - .5, compCol[1] + .5)
compCol = grDevices::hsv(compCol[1],compCol[2],compCol[3])
# return
return(compCol)
}
# get saturation
get_saturation = function(col){
# ----- extract color
if(is.character(col)){
col = grDevices::col2rgb(col)
} else if(is.numeric(col)){
if(length(col) == 3){
col = matrix(col, nrow=3)
} else stop('if numeric, col must be length 3')
} else stop('col must be character or numeric')
# ----- transform to complementary color
col = grDevices::rgb2hsv(col)
if(is.matrix(col)){
return(col[2, ])
} else {
return(col[2])
}
}
# find complementary color
set_saturation = function(col, saturation = .5){
# ----- extract color
if(is.character(col)){
col = grDevices::col2rgb(col)
} else if(is.numeric(col)){
if(length(col) == 3){
col = matrix(col, nrow=3)
} else stop('if numeric, col must be length 3')
} else stop('col must be character or numeric')
# check consistency
if(!(ncol(col) %% length(saturation) == 0 |
length(saturation) %% ncol(col) == 0)) stop('Non matching lengths.')
if(any(saturation > 1)) stop('Saturation must be within 0 and 1.')
# adjust length
if(ncol(col) != length(saturation)){
if(ncol(col) > length(saturation)) {
saturation = rep(saturation, length(col) / length(saturation))
}
if(ncol(col) < length(saturation)) {
col = matrix(c(col), nrow = 3, ncol = length(saturation) / ncol(col))
}
}
# ----- transform to complementary color
col = grDevices::rgb2hsv(col)
if(is.matrix(col)){
col[2, ] = saturation
col = apply(col, 2, function(x) grDevices::hsv(x[1], x[2], x[3]))
} else {
col[2] = saturation
col = grDevices::hsv(col[1],col[2],col[3])
}
# return
col
}
# -------------------------------------------------------------------------------------------------------------------- #
#
# PLOTS
#
# -------------------------------------------------------------------------------------------------------------------- #
#' Plot graph
#'
#' Custom graph plot using \code{igraph}'s layout functions.
#'
#'
#' @param adj numeric matrix representing the adjacency matrix. Can also be an
#' object of class \code{"igraph"} or an edge list, i.e., a two-column
#' \code{matrix} or \code{data.frame} containing specifying the edges start
#' and end points.
#' @param names optional character vector specifying the node names. Must be of
#' appropriate length.
#' @param layout layout function from the \code{igraph} package. Default is
#' \link[igraph]{layout.fruchterman.reingold}.
#' @param nod_col character vector of length 1 or length |V| specifying the
#' node colors.
#' @param nod_cex numeric speciying the size of the node circles.
#' @param nod_shadow logical specifiying whether nodes should have shadows. Node
#' shodow color is created from darkening \code{node_col}.
#' @param edg_col character vector of length 1 of length |E| specifying the edge
#' line colors.
#' @param edg_lwd numeric vector of length 1 of length |E| specifying the edge
#' line widths.
#' @param lab_col character vector of length 1 of length |V| specifying the
#' text label colors.
#' @param lab_cex numeric vector of length 1 of length |V| specifying the
#' text label sizes.
#' @param lab_lwd numeric vector of length 1 of length |V| specifying the
#' width of the lines connecting the text label to the nodes.
#' @param lab_lcol character vector of length 1 of length |E| specifying
#' specifying the color of the lines connecting the text label to the nodes.
#' @param lab_grid_size integer specifying the grid size used to place the node
#' labels. Canvas is split in \code{lab_grid_size} and labels are placed into
#' cells closest to the associated node.
#' @param lab_padding numeric vector of length 2 specifying the spacing among
#' labels and between labels and nodes on the x and y dimension.
#'
#' @return nothing. A plot is created in \link{dev.cur}.
#'
#' @examples
#'
#' \dontrun{
#' # get fluency data
#' data(animal_fluency)
#'
#' # edge list of fluency graph
#' edge_list = threshold_graph(animal_fluency[1:20])
#'
#' # get adjacency matrix
#' adj = edg_to_adj(edge_list)
#'
#' # plot
#' network_plot(adj)
#' }
#'
#' @export
network_plot = function(adj,
names = NULL,
layout = igraph::layout.fruchterman.reingold,
nod_col = "#E0EED4",
nod_cex = 3,
nod_shadow = T,
edg_col = 'grey25',
edg_lwd = 1.5,
lab_col = 'black',
lab_cex = 1,
lab_lwd = 1,
lab_lcol = 'grey25',
lab_grid_size = 48,
lab_padding = c(3, 3)){
# ------ get graph
if(class(adj) != 'igraph'){
if(is.matrix(adj)){
if(ncol(adj) == 2) {
g = igraph::graph_from_edgelist(adj)
} else if(ncol(adj) == nrow(adj)){
g = igraph::graph_from_adjacency_matrix(adj)
} else stop('adj has inappropriate format')
} else if(is.data.frame(g)) {
if(ncol(adj) == 2) {
g = igraph::graph_from_edgelist(as.matrix(adj))
} else stop('adj has inappropriate format')
} else {
stop('adj has inappropriate format')
}
} else {
g = adj
}
# ------ extract edges and node names
edg = igraph::as_edgelist(g)
if(is.null(names)){
if('name' %in% names(igraph::vertex_attr(g))){
names = igraph::vertex_attr(g)$name
} else {
names = 1:igraph::vcount(g)
message('labeled nodes 1:network_size')
}
} else if(igraph::vcount(g) != length(names)) stop('length of names must match number of nodes')
# ------ create grid
grd = expand.grid('x'=seq(-.05,1.05,length=lab_grid_size),
'y'=seq(-.05,1.05,length=lab_grid_size))
grd[,c('row','col')] = expand.grid('row'=1:lab_grid_size,'col'=1:lab_grid_size)
grd$id = 1:nrow(grd)
grd$pos = apply(expand.grid(1:lab_grid_size,1:lab_grid_size),1,paste0,collapse='_')
grd$free = rep(T,nrow(grd))
# ------ get layout
# apply layout function
lyt = do.call(layout, list(g))
# name and norm layout
rownames(lyt) = names
lyt[,1] = (lyt[,1]-min(lyt[,1]))/max(lyt[,1]-min(lyt[,1]))
lyt[,2] = (lyt[,2]-min(lyt[,2]))/max(lyt[,2]-min(lyt[,2]))
# ------ clear nodes space
# limits of node block
vert_lim = lab_padding[1]
horiz_lim = lab_padding[2]
# iterate over nodes
for(i in 1:nrow(lyt)){
# find closest grid point
pnt = lyt[i,]
dif = sqrt((grd$x - pnt[1])**2 + (grd$y - pnt[2])**2)
sel = which(dif==min(dif))[1]
# clear rectangle around layout point
pos = unlist(grd[sel,c('row','col')])
rct = expand.grid(pos[1]+c(-horiz_lim : -1, 0, 1 : horiz_lim),
pos[2]+c(-vert_lim : -1, 0, 1 : vert_lim))
rem = apply(rct,1,paste0,collapse='_')
grd$free[grd$pos %in% rem] <- F
}
# ------ place labels
# init
vert_lim = 1
txt.pos = lyt
# iterate over nodes
for(i in 1:nrow(lyt)){
# determine horizontal space around label
wid = (.022*nchar(names[i])**.92)/2
horiz_lim = ceiling(wid*lab_grid_size)
# find closest non-taken grid position
pnt = lyt[i,]
tst = subset(grd, grd$free == T)
dif = sqrt((tst$x - pnt[1])**2 + (tst$y - pnt[2])**2)
sel = which(dif==min(dif))[1]
# clear space around label
pos = unlist(tst[sel,c('row','col')])
rct = expand.grid(pos[1]+c(-horiz_lim : -1, 0, 1 : horiz_lim),
pos[2]+c(-vert_lim : -1, 0, 1 : vert_lim))
rem = apply(rct,1,paste0,collapse='_')
grd$free[ grd$pos %in% rem] = F
# set text position
id = tst$id[sel]
txt.pos[i,] = c(grd[id,1],grd[id,2])
}
# ------ Plot
# set canvas
graphics::plot.new()
graphics::par(mar=c(.5,.5,1.5,.5))
graphics::plot.window(c(-.06,1.06),c(-.06,1.06))
# draw label lines
if(length(lab_lwd) == 1) lab_lwd = rep(lab_lwd[1], nrow(lyt))
if(length(lab_lcol) == 1) lab_lcol = rep(lab_lcol[1], nrow(lyt))
lab_lcol
for(i in 1:nrow(lyt)){
graphics::lines(c(lyt[i, 1], txt.pos[i, 1]),
c(lyt[i, 2], txt.pos[i, 2]),
lty = 3,lwd = (lab_lwd[i] - .5) ** .5, col = lab_lcol[i])
}
graphics::points(txt.pos,pch=16,lwd=lab_lwd,col='white',cex = lab_cex)
# draw edges
if(length(edg_col) == 1) edg_col = rep(edg_col[1], nrow(edg))
if(length(edg_lwd) == 1) edg_lwd = rep(edg_lwd[1], nrow(edg))
for(i in 1:nrow(edg)){
from=lyt[edg[i, 1],]
to=lyt[edg[i, 2],];
graphics::lines(c(from[1], to[1]), c(from[2], to[2]),
lwd = edg_lwd[i], col = edg_col[i])
}
# draw points
if(length(nod_cex) == 1) nod_cex = rep(nod_cex[1], nrow(lyt))
if(length(nod_col) == 1) nod_col = rep(nod_col[1], nrow(lyt))
for(i in 1:nrow(lyt)){
if(nod_shadow == TRUE) {
graphics::points(lyt[i,1]-.004, lyt[i,2]-.004,
pch = 16, col = cmix(nod_col[i], 'black', .8), cex = nod_cex[i])
}
graphics::points(lyt[i, 1], lyt[i, 2], pch = 16,
col = nod_col[i], cex = nod_cex[i])
}
# draw label background
for(i in 1:nrow(txt.pos)){
n = nchar(names[i])
graphics::rect(txt.pos[i,1]-.007*n**.92,
txt.pos[i,2]-.015,
txt.pos[i,1]+.007*n**.92,
txt.pos[i,2]+.015,
col=grDevices::rgb(1,1,1,alpha=.3),border=NA)
}
# draw labels
if(length(lab_cex) == 1) lab_cex = rep(lab_cex[1], nrow(lyt))
if(length(lab_col) == 1) lab_col = rep(lab_col[1], nrow(lyt))
graphics::text(txt.pos[, 1], txt.pos[, 2], labels = names,
font = 1, cex = lab_cex, col = lab_col)
}
#' Neighborhood plot
#'
#' Plot k-Neighborhood of given node containing all nodes with distances
#' larger than k from given node.
#'
#' @param adj numeric matrix representing the adjacency matrix. Can also be an
#' object of class \code{"igraph"} or an edge list, i.e., a two-column
#' \code{matrix} or \code{data.frame} containing specifying the edges start
#' and end points.
#' @param names optional character vector specifiying the node names.
#' @param node integer specifying the row index (within the adjacency
#' matrix) of the node whose the neighborhood should be plotted.
#' Alternatively the node name.
#' @param k integer specifying the size of the neighborhood. Specifically,
#' the plot will contain all nodes that are \code{k} or fewer steps away
#' from \code{v}.
#' @param nod_col character vector of length 1 specifying the node colors.
#' @param nod_shading logical specifying whether the node colors should be shaded
#' as a function of the distance to \code{node}.
#' @param \dots arguments to be passed to \link{network_plot}.
#'
#' @return nothing. A plot is created in \link{dev.cur}.
#'
#'@examples
#'
#' \dontrun{
#' # get fluency data
#' data(animal_fluency)
#'
#' # edge list of fluency graph
#' edge_list = threshold_graph(animal_fluency[1:40])
#'
#' # get adjacency matrix
#' adj = edg_to_adj(edge_list)
#'
#' # plot
#' neighborhood_plot(adj, node = 'dog', k = 2)
#' }
#'
#' @export
neighborhood_plot = function(adj, names = NULL, node, k = 2, nod_col = "#E0EED4", nod_shading = TRUE, ...){
# names function
get_graph_names = function(g, names){
if(is.null(names)){
if('name' %in% names(igraph::vertex_attr(g))){
names = igraph::vertex_attr(g)$name
} else {
names = 1:igraph::vcount(g)
message('labeled nodes 1:network_size')
}
} else if(igraph::vcount(g) != length(names)) stop('length of vnames must match number of nodes')
# out
names
}
# ------ get graph
if(class(adj) != 'igraph'){
if(is.matrix(adj)){
if(ncol(adj) == 2) {
g = igraph::graph_from_edgelist(adj)
adj = igraph::get.adjacency(g, sparse = FALSE)
names = get_graph_names(g, names)
} else if(nrow(adj) != ncol(adj)){
stop('adj has inappropriate format')
} else {
if(is.null(rownames(adj))){
names = 1:nrow(adj)
message('labeled nodes 1:network_size')
} else {
names = rownames(adj)
}
}
} else if(is.data.frame(g)) {
if(ncol(adj) == 2) {
g = igraph::graph_from_edgelist(as.matrix(adj))
adj = igraph::get.adjacency(g, sparse = FALSE)
names = get_graph_names(g, names)
} else stop('adj has inappropriate format')
} else {
stop('adj has inappropriate format')
}
} else {
names = get_graph_names(g, names)
adj = igraph::get.adjacency(g, sparse = FALSE)
}
# ------ get neighborhood
# get index of node
if(is.character(node)){
if(!node %in% rownames(adj)){
stop('node name not found')
} else {
node = which(node == rownames(adj))
}
} else if(is.numeric(node)){
if(!node %in% 1:nrow(adj)) stop('node index is too large')
}
# get neighborhood
hood = get_neighborhood(adj, node, k)
if(nod_shading == TRUE){
hood[,2] = abs(hood[,2] - max(hood[,2]))
cols = set_saturation(nod_col, (hood[,2] + .3)/max((hood[,2] + 1) + .6))
} else {
cols = rep(nod_col, nrow(hood))
}
# ------ reduce graph
colnames(adj) = names
adj = adj[hood[,1], hood[,1]]
# ------ graph plot
network_plot(adj, nod_col = cols, ...)
}
#'
#' #' Circular network plot
#' #'
#' #' Plot the network as a circular graph
#' #'
#' #' @param g graph object of class \code{igraph}. Alternatively, a
#' #' matrix or data.frame with the first two columns specifying the
#' #' from- and to-nodes.
#' #' @param v integer specifying the row index (within the adjacency
#' #' matrix) of the node whose the neighborhood should be plotted.
#' #' Alternatively the node name, provided g allows for the extraction
#' #' of a name attribute.
#' #' @param k integer specifying the size of the neighborhood. Specifically,
#' #' the plot will contain all nodes that are \code{k} or fewer steps away
#' #' from \code{v}.
#' #' @param \dots arguments to be passed to \link{graph_plot}.
#' #'
#'
#' # load data
#' sim = readRDS('2 Clean Data/Tablet_SimRatings.RDS')
#'
#' sim_cats = readr::read_delim('0 Material/sim_words_translation.txt',
#' delim=' ',
#' col_names=c('word','translation','category'))
#'
#' # sim$in_cat = f.utils::replace_cc(sim$left_word,
#' # sim_cats$word,
#' # sim_cats$category) ==
#' # f.utils::replace_cc(sim$right_word,
#' # sim_cats$word,
#' # sim_cats$category)
#' # print(sim %>% group_by(group, in_cat, pair_label) %>%
#' # summarize(mr = mean(norm_rating)) %>%
#' # filter(!in_cat & mr > .6),n=100)
#' #
#'
#' # pair label
#' sim$pair_label = ifelse(sim$left_word>sim$right_word,
#' paste(sim$left_word,sim$right_word,sep='_'),
#' paste(sim$right_word,sim$left_word,sep='_'))
#'
#' # extract test
#' sim_test = subset(sim, part == 'test')
#'
#' # aggregate
#' sim_agg = sim_test %>%
#' group_by(group, pair_label) %>%
#' summarize(weight = mean(norm_rating)) %>%
#' mutate('from' = sapply(stringr::str_split(pair_label,'_'),`[`,1),
#' 'to' = sapply(stringr::str_split(pair_label,'_'),`[`,2)) %>%
#' ungroup()
#'
#'
#'
#' #
#' nodes = unique(c(sim_agg$from, sim_agg$to))
#'
#' l = memnet:::circle_raw(length(nodes),360,1,c(0,0))[-(length(nodes)+1),]
#' l_t = memnet:::circle_raw(length(nodes),360,1.07,c(0,0))[-(length(nodes)+1),]
#' l_t2 = memnet:::circle_raw(length(nodes),360,1.14,c(0,0))[-(length(nodes)+1),]
#'
#' cats = read.table('~/Dropbox (2.0)/Work/Projects/Memory/-- AgingLexicon/0 Material/Tablet_categories.txt',sep='\t')
#' n_cats = memnet:::match_cc(nodes,cats[,1],cats[,2])
#'
#' # order nodes
#' nodes = nodes[order(n_cats)]
#' n_cats = n_cats[order(n_cats)]
#'
#' sim_agg$from_ind = f.utils::replace_cn(sim_agg$from, nodes, 1:length(nodes))
#' sim_agg$to_ind = f.utils::replace_cn(sim_agg$to, nodes, 1:length(nodes))
#'
#' # translate
#' trans = read.table('~/Dropbox (2.0)/Work/Projects/Memory/-- AgingLexicon/0 Material/sim_words_translation.txt')
#' node_trans = f.utils::replace_cc(nodes,trans[,1],trans[,2])
#'
#' # split and process
#' sim_old = sim_agg %>% filter(group == 'old') %>% select(from_ind, to_ind, weight)
#' sim_young = sim_agg %>% filter(group != 'old') %>% select(from_ind, to_ind, weight)
#'
#'
#' # multiply colors
#' node_col = rgb(224,238,212,maxColorValue = 255)
#' #if(length(node_col) != nrow(l)) node_col = rep(node_col[1], nrow(l))
#'
#'
#' cmix = function(x,y,r=.5) {
#' rgb(data.frame(matrix(rowSums(cbind(r*col2rgb(x),(1-r)*col2rgb(y))),nrow=1)),
#' maxColorValue=255)
#' }
#'
#' pal = c(cmix(node_col,'black',.7),cmix(node_col, 'white', .7),cmix(node_col,'black',.7),cmix(node_col,'white',.7))
#' c_cols = colorRampPalette(pal)(length(unique(n_cats)))
#' n_cols = memnet:::match_cc(n_cats,unique(n_cats),c_cols)
#' node_col = n_cols
#'
#' # older adults
#'
#' png('5 Figures/older_sim_network.tiff',width=1000,height=1000,bg='transparent')
#'
#' plot.new();par(mar=c(0,0,0,0));plot.window(xlim=range(l_t[,1])*1.6,ylim=range(l_t[,2])*1.6)
#' points(l_t, col = node_col, pch = 16, cex = 5)
#'
#' for(i in 1:nrow(sim_old)) {
#' if(sim_old$weight[i] > .4){
#' from = l[sim_old$from_ind[i],]
#' to = l[sim_old$to_ind[i],]
#' lines(c(from[1],to[1]),c(from[2],to[2]), lwd = (sim_old$weight[i] + .5)**5,col=cmix(rgb(224,238,212,maxColorValue = 255),'black',.25))
#' }
#' }
#'
#' for(i in 1:length(nodes)) text(l_t2[i,1],l_t2[i,2],labels=node_trans[i],srt=seq(360,0,length.out = length(nodes))[i],adj=0, cex= 2)
#'
#' dev.off()
#'
#'
|
1ca5c03824511e75939e020c18467a0cc7eebcc9
|
a0f0fbdb1e7f3e13def6b0b7c549554aca39fce3
|
/bikeSharing.r
|
6be471ca6867faf2a9b3420b1bb21d43d5066b44
|
[] |
no_license
|
git874997967/Kaggle-porject-3
|
05840c326376808d5bc24d1188bec9eb54160420
|
413acc4695b1c41fd2f8733b9623710dbd76280f
|
refs/heads/master
| 2021-04-30T06:32:12.310236
| 2018-02-20T19:56:45
| 2018-02-20T19:56:45
| 121,446,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,661
|
r
|
bikeSharing.r
|
train<-read.csv('train.csv')
test<-read.csv('test.csv')
test$registered=0#由于测试集没有会员的租车的数量,所以为了下面进行合并,先补充测试集这个变量,并设置为0
test$casual=0#由于测试集没有非会员的租车的数量,所以为了下面进行合并,先补充测试集这个变量,并设置为0
test$count=0#由于测试集没有总租车的数量,所以为了下面进行合并,先补充测试集这个变量,并设置为0
data<-rbind(train,test)#合并数据集,方便进行特征工程
table(is.na(data))#查看是否有空值
#描述分析
hist(data$season)#画出直方图,查看数据变量的分布情况,针对名义型数据
hist(data$weather)
hist(data$humidity)
hist(data$windspeed)
hist(data$holiday)
hist(data$workingday)
hist(data$temp)
hist(data$atemp)
library(lubridate)#专门对时间进行处理的包
data$hour1<-as.factor(hour(data$datetime))#通过hour函数提取时间类型数据的小时,对名义型数据转为factor
data$hour2<-substr(data$datetime,12,13)#通过提取字符串的形式提取时间类型数据的小时
train<-data[as.integer(day(data$datetime))<20,]#日期小于20号的,再次划分出训练集
#对名义型数据,通过做箱型图来查看因变量分布情况,以此判断因变量与这些自变量是否有关
boxplot(train$count~train$hour1,xlab="hour",ylab="count")#查看小时与总租车的关系
boxplot(train$registered~train$hour1,xlab="registered",ylab="count")#查看小时与注册用户租车数的关系
boxplot(train$casual~train$hour1,xlab="casual",ylab="count")##查看小时与非注册用户租车数的关系
data$day<-weekdays(as.Date(data$datetime))# 提取星期属性
table(data$day)
train<-data[as.integer(day(data$datetime))<20,]#日期小于20号的属于训练集,再次划分出训练集
table(day(data$datetime))
boxplot(train$count~train$day,xlab="day",ylab="count")#总租车数与星期的箱型图
boxplot(train$casual~train$day,xlab="day",ylab="casual")#非注册用户租车数与星期的箱型图
boxplot(train$registered~train$day,xlab="day",ylab="registered")#注册用户租车数与星期的箱型图
boxplot(train$registered~train$weather,xlab='day',ylab='registered')#注册用户租车数与天气的箱型图
boxplot(train$casual~train$weather,xlab='day',ylab='casual')#非注册用户租车数与天气的箱型图
boxplot(train$count~train$weather,xlab='day',ylab='count')#总租车数与天气的箱型图
head(train)
#针对数值型数据,可以直接用因变量和数值型数据的变量求相关系数,以判断它们之间的相关性
cor_data<-data.frame(train$count,train$register,train$casual,train$temp,train$atemp,train$humidity,train$windspeed)#提取注册用户租车数、非注册用户租车数、总租车数与温度、体感温度、湿度的数据集
cor(cor_data)#通过皮尔逊相关系数的方法了解租车数与湿度、温度、风速、体感温度是否相关
data$year<-year(data$datetime)#提取年份
train<-data[as.integer(day(data$datetime))<20,]
boxplot(train$count~train$year,xlab='year',ylab='count')
#特征工程
library(rpart)
library(rpart.plot)
train$hour1<-as.integer(train$hour1)#将小时变成整数型
#借助决策树,决策树可以得到某一属性的分裂特征值,更好地将某些连续型变量转为名义型变量,即将这些连续变量进行分类
d=rpart(registered~hour1,data=train)
rpart.plot(d)
data$hour1<-as.integer(data$hour1)
#将注册用户的用车时间点分为7类
data$dp_reg<-0
data$dp_reg[data$hour1<7.5]=1
data$dp_reg[data$hour1>=22]=2
data$dp_reg[data$hour1>=9.5&data$hour1<18]=3
data$dp_reg[data$hour1>=7.5&data$hour1<8.5]=4
data$dp_reg[data$hour1>=8.5&data$hour1<9.5]=5
data$dp_reg[data$hour1>=20&data$hour1<22]=6
data$dp_reg[data$hour1>=18&data$hour1<20]=7
#将非注册用户的用车小时分为4类
data$dp_cas=0
f=rpart(casual~hour1,data=train)
rpart.plot(f)
data$dp_cas[data$hour1<8.5]=1
data$dp_cas[data$hour1>=8.5&data$hour1<10]=2
data$dp_cas[data$hour1>=20]=3
data$dp_cas[data$hour1>=10&data$hour1<20]=4
data$temp_reg<-0
g=rpart(registered~temp,data=train)
rpart.plot(g)
#将气温分为4类
data$temp_reg[data$temp<13]=1
data$temp_reg[data$temp>=13&data$temp<23]=2
data$temp_reg[data$temp>=23&data$temp<30]=3
data$temp_reg[data$temp>=30]=4
#将年份分为4个季度
data$year_part=0
data$month<-month(data$datetime)
data$year_part[data$year=='2011']=1
data$year_part[data$year=='2011'&data$month>3]=2
data$year_part[data$year=='2011'&data$month>6]=3
data$year_part[data$year=='2011'&data$month>9]=4
data$year_part[data$year=='2012']=5
data$year_part[data$year=='2012'&data$month>3]=6
data$year_part[data$year=='2012'&data$month>6]=7
data$year_part[data$year=='2012'&data$month>9]=8
#day属性,为工作日、节日、周末
data$day_type=""
data$day_type[data$holiday==0&data$workingday==0]='weekend'
data$day_type[data$workingday==1]='holiday'
data$day_type[data$holiday==0&data$workingday==1]='working day'
data$weekend=0
data$weekend[data$day=="星期六"|data$day=="星期日"]=1
data$hour1=as.factor(data$hour1)
data$day=as.factor(data$day)
data$day_type=as.factor(data$day_type)
#下面进行建模
library(randomForest)
set.seed(451)
train<-data[as.integer(day(data$datetime))<20,]
test<-data[as.integer(day(data$datetime))>=20,]
train$logreg<-log(train$registered+1)#因为从箱型图可以发现,租车数有很多离群点,所以采取对数操作,防止出现0,所以加1
train$logcas<-log(train$casual+1)#取对数操作,防止出现0,所以加1
fit1<-randomForest(logreg~hour1+workingday+day+holiday+day_type+temp_reg+humidity+atemp+windspeed+season+weather+dp_reg+weekend+year_part,data=train,importance=TRUE,ntree=250)#注册用户租车数模型,ntree表示构成随机森林的树的棵数,Importance为True表示考虑属性的重要性
pred1<-predict(fit1,test)
test$logreg<-pred1
set.seed(451)
fit2<-randomForest(logcas~hour1+workingday+day+holiday+day_type+temp_reg+humidity+atemp+windspeed+season+weather+dp_reg+weekend+year_part,data=train,importance=TRUE,ntree=250)#非注册用户租车数模型
pred1<-predict(fit1,test)
pred2<-predict(fit2,test)
test$logcas<-pred2
test$registered=exp(test$logreg)-1#反取对数
test$casual=exp(test$logcas)-1#反取对数
test$count=test$registered+test$casual#总租车数
submit_final<-data.frame(datetime=test$datetime,count=test$count)
write.csv(submit_final,"sb1.csv")
|
94d94ee0b906daa329daf155ba7a8172c892ee97
|
42ff40e63ba5c362f8067a7503aa7d1aa2263109
|
/scripts/MismatchRate_ELP.R
|
028bafe8232ebada8b5c0985944833211159be2d
|
[] |
no_license
|
mfisher5/PCod-Korea-repo
|
686820f87625e5a7ea68c97c9286b783199c267e
|
90324e4431292fda757abb364bcc002dd4117e7e
|
refs/heads/master
| 2021-01-01T16:37:52.932264
| 2018-08-20T23:12:16
| 2018-08-20T23:12:16
| 81,503,317
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,420
|
r
|
MismatchRate_ELP.R
|
# This code was written by Eleni and Dan on July 18, 2017
# Function of the code:
# This code will compare replicated samples for genotype mismatches.
# Genotype mismatch rate is defined as = (number of mismatches)/(number of loci genotyped in both samples being compared)
# 1. Reads in a matrix of genotypes. Columns = samples, rows = loci.
# 2. The samples you want to compare to each other must be right next to each other on the spreadsheet.Sample1 = odd column; sample2= even row
# 3. It will calculate the % match in each sample pair! Yay!
# Set working directory
setwd("D:/sequencing_data/Herring_DissertationCH1/scripts_R")
#Read in your data tables from a tab-delimited text files
mydata1 <- read.delim("MismatchRate_JuvenileSamples_NaOCltoNaOCl_3502loci.txt", row.names = 1)
mydata2 <- read.delim("MismatchRate_JuvenileSamples_NulltoNaOCl_3502loci.txt", row.names = 1)
# This next piece of code counts the number of genotypes in the even columns that
# match the the number of genotypes in the odd columns. It then divides that number by the
# number of loci in the dataset, and subtracts it from one to get the mismatch rate.
#NaOCl-NaOCl treatment comparison
mismatch_rate1 <- c()
for(i in seq(1, ncol(mydata1)-1, 2)){
good_data <- which(mydata1[,i]!=0 & mydata1[,(i+1)] !=0) # Figure out which loci are genotyped in both samples
mismatch_rate1[length(mismatch_rate1)+1] <- 1-length(which((mydata1[good_data,i]==mydata1[good_data,(i+1)])==TRUE))/length(good_data)
}
mean(mismatch_rate1)
sd(mismatch_rate1)
#Null-NaOCl
mismatch_rate2 <- c()
for(i in seq(1, ncol(mydata2)-1, 2)){
good_data2 <- which(mydata2[,i]!=0 & mydata2[,(i+1)] !=0)
mismatch_rate2[length(mismatch_rate2)+1] <- 1-length(which((mydata2[good_data2,i]==mydata2[good_data2,(i+1)])==TRUE))/length(good_data2)
}
mean(mismatch_rate2)
sd(mismatch_rate2)
## Extra metadata for plotting the results
Treatment <- c(rep("NaOCl", 8), rep("Null vs. NaOCl", 16))
mismatch_vector <- c(mismatch_rate1, mismatch_rate2)
final_df <- data.frame(mismatch_vector, Treatment)
# Plot the data!
library(ggplot2)
theme_set(theme_classic())
myplot <- ggplot(final_df, aes(Treatment, mismatch_vector, fill = Treatment)) +
geom_boxplot() +
labs(x="\nComparison of replicated juvenile samples",
y="Genotyping mismatch rate\n") +
theme(text = element_text(size = 14), plot.margin = unit(c(1,1,1,1), "cm"))
myplot
|
9909d3ff9b701cb5cf9507a1f3eb9217494acef9
|
a89f82ced9166fd96963897d31100d021698d14b
|
/app.R
|
12b45f5672cee82822fbab5e6f4f44c99396452f
|
[
"MIT"
] |
permissive
|
BriGitBardot/CommunicationMethods
|
5644ed7ed6db940d1d086e0f3c500c57ea236b0c
|
3ba65afaae9c8afb5489f2acc5c22312f8a6975d
|
refs/heads/master
| 2021-01-22T10:02:31.405617
| 2016-09-20T17:48:41
| 2016-09-20T17:48:41
| 31,183,380
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,188
|
r
|
app.R
|
# define javascript function for responsive sizing of plots
jscode <-
'$(document).on("shiny:connected", function(e) {
var jsWidth = $(window).width();
Shiny.onInputChange("GetScreenWidth",jsWidth);
});'
# loading the necessary packages into the session
library(shiny)
library(rCharts)
library(plyr)
library(RColorBrewer)
library(ggplot2)
library(shinythemes)
# making sure that the dataset will be read properly from the data folder
wd.datapath = paste0(getwd(),"/data")
wd.init = getwd()
setwd(wd.datapath)
methods = read.csv("methods_eng_agg.csv", header = TRUE, sep=";", na.strings = c("NA", "", " "))
# resetting the working directory
setwd(wd.init)
shinyApp(
ui = fluidPage(tags$head(tags$style(HTML("
@import url('//fonts.googleapis.com/css?family=Pacifico|Righteous|Rock Salt|Permanent Marker|Waiting for the Sunrise|Indie Flower|Architects Daughter|Handlee:400,700');
.control-label {
color: #37C4A8;
}
.single {
width: 120px;
}
@media (min-width: 410px){
h2 div {
font-size: 62px;
}}
"))
),
tags$script(jscode),
# add CSS theme
theme = shinytheme("flatly"),
# title
titlePanel(title=div("Communication Research Methods", style = "padding-left: 1.5%; padding-top: 25px; padding-bottom: 15px; font-family:'Handlee'; color: #1ABC9C"), windowTitle = "Communication Research Methods"),
# create sidebar
sidebarPanel(tags$head(tags$style(HTML("
@media (max-width: 1255px) AND (min-width: 470px){
.col-sm-4 {
float:left;
width: 100%;
}
.col-sm-8 {
float:left;
width: 100%;
}
img{
max-width:350px;
}}
@media (max-width: 1255px) AND (min-width: 970px){
img{
margin-left:55%;
padding-left: 5%
}}
"))
),
#textOutput("results"),
helpText('This application features interactive graphics and tables that display the uses of the three basic methods â interview/survey, observation, and content analysis - in the field of communication research during the years 2000 to 2015.'),
helpText('You can modify the layout of these plots by deselecting one or more of the lables from the legends above the plots. The plots also feature tooltips that tell the exact value for a specific data point when you hower over it.'),
helpText(paste('The results rely on an extensive content analysis of 32 scientific journals conducted by the '), a("Department of Communication", href="http://www.uni-muenster.de/KOWI",
target= "blank"),' at the University of Münster. In sum, 6953 empirical articles were coded.'),
helpText("To meet the scientific requirement of transparency, the entire dataset can be downloaded in the âData Tableâ tab."),
helpText(paste('For more details on the project please confer '), a("this publication", href="http://www.halem-verlag.de/2014/beobachtungsverfahren-in-der-kommunikationswissenschaft/", target="blank"), "."),
p(span(strong("Note: This application is optimized for desktop browsers. If you access it from a mobile device,
we strongly recommend changing to landscape mode.", class="help-block" , style="color:#000000"))),
a(href="http://www.uni-muenster.de/", target= "blank", img(src = "wwu.png", width= "40%", style = "margin-left: 30%"))
),
# create main panel
mainPanel(tags$head(tags$style(HTML("
@media (max-width: 960px)AND (min-width: 470px){
svg{
width: 100%;
viewBox: 0 0 w h;
}
}
@media (max-width: 860px)AND (min-width: 470px){
svg{
width: 100%;
viewBox: 0 0 w h;
}
img{
min-width:200px;
}}
"))
),
tabsetPanel(type = "tabs",
# tab for display of methods across time
tabPanel("Across Time",
# draw plot
br(),
helpText(' '),
helpText('This tab depicts the distribution of the three canonic methods during the past years.'),
br(),
helpText(h3("Data Gathering Methods used in Communications Science", align="center", style = "color:#37C4A8"),
h4("[from 2000 to 2015]", align="center", style = "color:#838484")),
div(showOutput("myChart1", "nvd3"))
),
# tab for display of methods across diffenrent media, fields of study or subjects
tabPanel("By Medium, Field, and Subject",
br(),
helpText(' '),
helpText('This tab provides detail on the different areas in which the methods occur.
Depenting on your selection, the data will either render for research on different media, research subjects or fields of study.'),
selectInput('selection', 'How shall the methods be grouped?', c("by medium" = "Medium","by field" = "Field", "by subject" = "Subject"),
selected=names("Medium")),
helpText(h3("Research on Different Media, Subjects, and Fields of Study", align="center", style = "color:#37C4A8"),
h4("by Data Gathering Method", align="center", style = "color:#838484")),
showOutput("myChart2", "nvd3")
),
# data table tab
tabPanel("Data Table",
br(),
helpText('This tab offers the opportunity to navigate through all
6953 cases of the underlying dataset.'),
helpText('Click on the arrow keys to sort the columns or use the search function to access specific cases.'),
helpText(paste('Yet, this dataset only contains an aggregated version of the original data. If you want to inspect
and/or work with the original data, you can download the entire dataset '), a("in csv format",
href="methods_eng_agg.csv", target="_blank"), (' (438 kBs, english labels) or '), a("SPSS format", href="methoden.sav", target="_blank"), (' (1667 kBs, german lablels).')
),
br(),
dataTableOutput("mytable1")
),
# about tab
tabPanel("About",
helpText(h3('General')),
helpText(paste('This app was developed in RStuido [release 0.99.467, incorporating R version 3.2.1. (64-bit)] and relies on the packages '),
a("data.table", href="https://cran.r-project.org/web/packages/data.table/index.html",
target= "blank"), ', ',
a("RColorBrewer", href="https://cran.r-project.org/web/packages/RColorBrewer/index.html",
target= "blank"),', ',
a("ggplot2", href="https://cran.r-project.org/web/packages/ggplot2/index.html",
target= "blank"),', ',
a("shiny", href="https://cran.r-project.org/web/packages/shiny/index.html",
target= "blank"),', ',
a("shinythemes", href="https://cran.r-project.org/web/packages/shinythemes/index.html",
target= "blank"), ', ',
a("rCharts", href="https://ramnathv.github.io/rCharts/",
target= "blank"), ', and ',
a("plyr", href="https://cran.r-project.org/web/packages/plyr/index.html",
target= "blank"), '.'),
helpText(h3('Citing the App')),
helpText('If you want to cite the content of this app, please refer to it as:'),
p(span("Hamachers, A., & Gehrau, V. (2016). ", em(" Communication Research Methods (Version 1.2)"), " [online application].
Available from http://shinika.shinyapps.io/CommunicationMethods.", class="help-block")),
helpText(h3('Source Code & Contact')),
helpText(paste('For full reproducibility the source code for the entire app can be retrieved from '),
a("this Github repository", href="https://github.com/BriGitBardot/CommunicationMethods",
target= "blank"), '. Feel free to fork and/or contribute!'),
helpText(paste('If you have any further questions or advice regarding this project, feel also free to '),
a("mail us", href="mailto:annika.hamachers@googlemail.com",
target= "blank"), '!'),
hr()
)
)
)
),
server = function(input, output, session) {
scree <- function() {
input$GetScreenWidth
}
obs <- observe({
cat(input$GetScreenWidth)
})
# test <- function() {
# ({scree() })
#}
output$results<- reactive({input$GetScreenWidth})
# Creating the time series plot as output for the first tab panel
output$myChart1 <- renderChart({
# aggegating data for the time series by summarizing by year
# for each method and concatenating the four data frames
A <- ddply(methods, .(Year), summarize,
Freq=sum(Observation=="yes", na.rm= T))
A["Method"] <- "Observation"
B <- ddply(methods, .(Year), summarize,
Freq=sum(Survey=="yes", na.rm= T))
B["Method"] <- "Interview"
C <- ddply(methods, .(Year), summarize,
Freq=sum(Content.Analysis=="yes", na.rm= T))
C["Method"] <- "Content Analysis"
D <- ddply(methods, .(Year), summarize,
Freq=sum(Other=="yes", na.rm= T))
D["Method"] <- "Other"
methods_line <- rbind(A, B, C, D)
# Plotting
n1 <- nPlot(
Freq ~ Year,
data = methods_line,
group = "Method",
type = "lineChart")
#define colors for the plot
colors <- brewer.pal(6, "Set2")
colors <- colors[3:6]
# Add axis labels, colors, and format the tooltip
n1$yAxis(axisLabel = "Frequency", width = 62)
n1$xAxis(axisLabel = "Year")
n1$chart(color = colors)
if (input$GetScreenWidth < 900) {
n1$params$width <- 600
n1$params$height <- 300
} else {
n1$params$width <- 800
n1$params$height <- 400
}
n1$chart(tooltipContent = "#! function(key, x, y){
return '<h3>' + key + '</h3>' +
'<p>' + y + ' in ' + x + '</p>'
} !#")
n1$set(dom = "myChart1")
print(n1)
})
# Creating the bar plot as output for the second tab panel
output$myChart2 <- renderChart({
# aggegating data for the first optional bar plot by summarizing by medium
# for each method and concatenating the four data frames
# and eliminating missing values
E <- ddply(methods, input$selection, summarize,
Freq=sum(Observation=="yes", na.rm= T))
E["Method"] <- "Observation"
G <- ddply(methods, input$selection, summarize,
Freq=sum(Survey=="yes", na.rm= T))
G["Method"] <- "Interview"
H <- ddply(methods, input$selection, summarize,
Freq=sum(Content.Analysis=="yes", na.rm= T))
H["Method"] <- "Content Analysis"
I <- ddply(methods, input$selection, summarize,
Freq=sum(Other=="yes", na.rm= T))
I["Method"] <- "Other"
methods_bar <- rbind(E, G, H, I)
methods_bar = methods_bar[complete.cases(methods_bar),]
# rendering one of three different bar plots in dependence on wheater the user selected the option
# 'by Medium', 'by Subject' or 'by field'
n2 <- nPlot(Freq ~ Method, group = input$selection, data = methods_bar, type = 'multiBarChart');
#define colors
colors2 <- brewer.pal(8, "Set2")
# Add axis labels and colors
n2$yAxis(axisLabel = "Frequency", width = 62, tickFormat = "#!d3.format('.0f')!#")
n2$xAxis(axisLabel = "Method")
n2$chart(color = colors2)
if (input$GetScreenWidth < 900) {
n2$params$width <- 600
n2$params$height <- 300
} else {
n2$params$width <- 800
n2$params$height <- 400
}
n2$set(dom = "myChart2")
return(n2)
})
# Creating the data table as output for the third tab panel
output$mytable1 = renderDataTable({
methods
}, options = list(lengthMenu = c(10, 25, 50, 100), scrollX=TRUE, pageLength = 5, orderClasses = TRUE))
})
|
75ffed2cb26bb82bc5966f381b00d25f0859976a
|
56ea25a737d282ed49b40ede9ea670f05c41a9ea
|
/Harris_Matrix/HarrisMatrix_URCode.R
|
126c2abcbc7ea42cae618872cb59b3af2534c65c
|
[] |
no_license
|
crystalptacek/Stratigraphy
|
02972ae6a8e61b5a5f102936e9f90b1cc0c30431
|
4dc008bdae12535d8ff3b61cf3818938feed12bf
|
refs/heads/master
| 2022-03-25T07:38:29.615851
| 2019-12-09T19:44:46
| 2019-12-09T19:44:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,759
|
r
|
HarrisMatrix_URCode.R
|
# Filename: HarrisMatrix_URCode.R
# Purpose: Sections 1-5 pull Stratigraphic Information from the Context tables and
# gets them into a form that works for ArchEd. There are several data integrity
# checks. There is an option to replace individual
# contexts with their SG.
# Section 6 uses Rgraphviz package to visualize relationships in a different way. Serves as
# a check for ArchEd, which does not tell you where errors are only that they exist.
#
#By: FDN 11.26.2013
#Last Update: FDN 01.14.2018
#Section 6 by DTW 12.09.2019 Updated Rgraphviz package
#############################################################################
#############################################################################
############# do this once and then comment out or delete #### ############
############# Installs Rgraphviz package - not available ### #############
############# on CRAN ## ##############
if (!requireNamespace("BiocManager", quietly = TRUE)) # ##
install.packages("BiocManager") ## ##############
BiocManager::install(c("Rgraphviz")) ### #############
## #### ############
#############################################################################
#############################################################################
# libraries needed
library(DBI)
library(RPostgreSQL)
library("Rgraphviz")
library(glue)
library(tidyr)
library(dplyr)
# Establish a DBI connection to DAACS PostgreSQL database and submit SQL queries
#Link to file with database password
source("credentials.R")
# get the contexts, SGs, features and their related contexts
# use the "WHERE" clause to choose the Project and contexts
# FYI: correlation names do NOT need to be double quoted
csr <- dbGetQuery(DRCcon,'
SELECT
a."ProjectID",
a."Context" as "context",
a."DAACSStratigraphicGroup" as "SG",
a."FeatureNumber",
c."StratRelType" as "relationship",
b."Context" as "relatedContext"
FROM "tblContext" a
join "tblContextStratRel" b
on a."ContextAutoID"=b."ContextAutoID"
join "tblContextStratRelType" c
on b."StratRelTypeID"=c."StratRelTypeID"
WHERE ( a."ProjectID" = \'100\'
)
ORDER BY a."Context"
')
## 1.Recode the relationships to Arch-Ed .lst format ########
# Note that the recoding takes into account the differences between conventions used
# in the DAACS backend vs. Arch-ED .lst files. In DAACS When A has a relationship of "Seals" to B,
# it implies that A is "above" B. In the Arch-ED .lst file format this is represented as
# "A Below:B", which causes Arch-ED to draw B below A. So to get Arch-Ed to draw DAACS
# relationships correctly, we have to "reverse" the DAACS relationship
csr$archEdRel[csr$relationship=="Sealed By"]<-"above"
csr$archEdRel[csr$relationship=="Seals"]<-"below"
csr$archEdRel[csr$relationship=="Intruded By"]<-"above"
csr$archEdRel[csr$relationship=="Intrudes"]<-"below"
csr$archEdRel[csr$relationship=="Contained By"]<-"above"
csr$archEdRel[csr$relationship=="Within"]<-"below"
csr$archEdRel[csr$relationship=="Correlates With"]<-"equal to"
csr$archEdRel[csr$relationship=="Contains"]<-"above"
csr$archEdRel[csr$relationship=="Contemporary W"]<-"equal to"
## 1.1 Do a bit more recoding so we do not get tripped up later by mixtures of NAs and '' in the
# SG and FeatureNumber fields
# When a Context lacks an SG or Feature Number, sometimes those fields are NULL, sometimes they have
# blanks. In the former case, an NA is returned in R. So we make sure all the NAs are set to blanks
csr$SG[is.na(csr$SG)] <-''
sum(is.na(csr$SG))
csr$FeatureNumber[is.na(csr$FeatureNumber)] <-''
sum(is.na(csr$FeatureNumber))
# 1.2 Write out an Excel file with the DAACS data. Useful for trouble shooting
# The file is written to the current working directory.
write.csv (csr, file="contextStratigraphicRelationhips.csv")
## 2.This section does some data consistency checks. #####
## 2.1 Check for context values that appear in the context field but not the related context field and v.v.#####
# These need to fixed in the database BEFORE going further.
uniqueCxt<-unique(csr$context)
uniqueRelCxt<-unique(csr$relatedContext)
cxtOrphans<-uniqueCxt[!(uniqueCxt %in% uniqueRelCxt)]
paste("the following contexts do not appear as relatedContexts:",cxtOrphans)
relCxtOrphans<-uniqueRelCxt[!(uniqueRelCxt %in% uniqueCxt)]
paste("the following relatedContexts do not appear as contexts:",relCxtOrphans)
# 2.2 The cxtNoSG file may contain contexts that you REALLY do not want in the analysis,
# for example, if you are only analyzing a subset of contexts for a given project.
# To get rid fo them, run the following line
csr <-subset(csr, !(csr$relatedContext %in% relCxtOrphans))
# 2.3 Find "equal to" context pairs that have no SG assignments.
# If there are any, fix them BEFORE going further. ALL "correlated with" contexts
# need to belong to the same SG.
cxtNoSG<-csr$context[(csr$archEdRel=="equal to") & (csr$SG == "")]
paste("the following Contexts have 'equal to' relationships but have no SG assigned:"
,cxtNoSG)
## 2.4 Find any "equal to" pairs of contexts and related contexts that have DIFFERENT SG assignments.######
# If there are any, these need to be fixed (e.g. by making the SGs the same) BEFORE going further.
# First we have to assign SG's to related contexts...
# get a list of unique contexts and their SGs and put them in a new dataframe
relatedCxtSG<- unique(data.frame(csr$context,csr$SG,stringsAsFactors=F))
# rename the SG variable and the Context variable in the new dataframe
names(relatedCxtSG)[names(relatedCxtSG)=="csr.SG"] <- "relatedSG"
names(relatedCxtSG)[names(relatedCxtSG)=="csr.context"] <- "relatedContext"
# merge the new related context and related SG data frame with the orignal context and SG dataframe
# we match merge records on the common RelatedContext field and keep everything in the orginal context table
csr1<-merge(csr,relatedCxtSG, by="relatedContext",all.x=T)
# sort the result on SG, relatedSG, archEdRel
sortedCsr1 <- csr1[order(csr1$SG, csr1$relatedSG, csr1$archEdRel),]
# reorder the cols for convenience
sortedCsr1 <- sortedCsr1[c(2,3,4,5,6,7,1,8)]
# Now we look for contexts and related contexts that are "equal to" each other by have different SGs
diffSGVec<-(sortedCsr1$archEdRel=="equal to") & (sortedCsr1$SG != sortedCsr1$relatedSG)
differentSGs <- sortedCsr1[diffSGVec,]
paste("the following Contexts have 'equal to' relationships but have Different SG assignments:")
differentSGs
## 2.5 Context/RelatedContext and SG/RelatedSG stratigraphic consistency check ########
# Check to make sure the above and below relationships among contexts that
# belong to different SGs are consistent: contexts that belong to the
# a given SG should all have the same relationships to related contexts that all
# belong to a second SG. This code chunk finds non-matching relationships. The steps are:
# - Loop through the sorted data to find the cases where relationships do not match.
# - Note that we exclude contexts that have been assigned to the same SG
# on the assumption that SG assignment is correct. We checked that in the previous step.
badRelationTF<- rep(F,nrow(sortedCsr1))
for (i in 1:(nrow(sortedCsr1)-1)) {
# only worry about this if BOTH context and related cxt have SG assignments
# orginal code had a bug here:' ', not ''
if ((sortedCsr1$SG[i] != '') & (sortedCsr1$relatedSG[i] !='')) {
# are the SGs at row (i) and row (i+1) the same?
badRelationTF[i] <- (sortedCsr1$SG[i] == sortedCsr1$SG[i+1]) &
# are the related SGs the same?
(sortedCsr1$relatedSG[i] == sortedCsr1$relatedSG[i+1]) &
# are the archEd relations different?
(sortedCsr1$archEdRel[i] != sortedCsr1$archEdRel[i+1]) &
# this is the bit that excludes contexts assigned to the same SG
(sortedCsr1$SG[i] != sortedCsr1$relatedSG[i])
}
}
badRelationTF[(which(badRelationTF == T)+1)]<-T
paste(table(badRelationTF)[2],
"There are contradictory relationhips among contexts belonging to different SGs. Check the exported file 'badRelations.csv' for details")
badRelation <- sortedCsr1[badRelationTF,]
badRelation
write.csv(badRelation, file="badRelation.csv")
## 3. This section preps the data in the format required by ArchEd ########
## 3.1 Set up a Dataframe to store the results. Its rows are all possible combinations of Contexts and relatedContexts
allCxt <- unique( c(uniqueCxt,uniqueRelCxt))
HMData <- expand.grid(allCxt,allCxt,stringsAsFactors=F)
colnames(HMData)<-c("cxt","relCxt")
HMData$archEdRel<-"NA"
## 3.2 Assign the reciprocal relationships (e.g. A>B, B<A)
for (i in 1: nrow(csr)) {
# identify the context and its related context in the data from DAACS
thisCxt<-csr$context[i]
thisRelCxt<-csr$relatedContext[i]
# find the two locations in HMData
loc1 <- which(HMData$cxt==thisCxt & HMData$relCxt== thisRelCxt)
loc2 <- which(HMData$cxt==thisRelCxt & HMData$relCxt== thisCxt)
#assign the relationships
HMData$archEdRel[loc1]<-csr$archEdRel[i]
if (csr$archEdRel[i]=="above") {HMData$archEdRel[loc2]<-"below"}
if (csr$archEdRel[i]=="below") {HMData$archEdRel[loc2]<-"above"}
if (csr$archEdRel[i]=="equal to") {HMData$archEdRel[loc2]<-"equal to"}
}
# check on the results
table(HMData$archEdRel)
## 3.3 If you want to set the Contexts that belong to the same SG as "equal to" run this bit
allSG<- unique(csr$SG)
allSG<- allSG[!allSG==""]
allCxtSG <- unique(data.frame(csr$context, csr$SG, stringsAsFactors=F))
for (i in 1: length(allSG)){
thisSG<-allSG[i]
cxtForThisSG <- allCxtSG$csr.context[which(allCxtSG$csr.SG==thisSG)]
equalToIndex <- (HMData$cxt %in% cxtForThisSG) & (HMData$relCxt %in% cxtForThisSG)
HMData$archEdRel[equalToIndex]<-"equal to"
}
# check on the results
table(HMData$archEdRel)
## 3.4 get rid of context pairs in the HM data file without relationships #########
HMData<-HMData[!(HMData$archEdRel=="NA"),]
# get rid of context pairs that are the same -- keeping them cause Arch Ed to blow up.
HMData<-HMData[!(HMData$cxt==HMData$relCxt),]
# merge the SGs into the HM data file by Cxt
HMData<-merge(HMData,allCxtSG, by.x="cxt",by.y="csr.context",all.x=T)
# sort the HM data on the SG, contexts, and relationship. Needed to write the output.
sortedHMData<-with(HMData,
HMData[order(csr.SG,cxt,archEdRel,relCxt),])
## 3.5 Run this next block _IF_ you want to to replace the contexts with their SGs #########
HMData1<-merge(HMData,relatedCxtSG, by.x="relCxt", by.y="relatedContext", all.x=T)
for (i in 1:nrow(HMData1)){
if (HMData1$csr.SG[i] != "") {
HMData1$cxt[i]<-HMData1$csr.SG[i]
}
if (HMData1$relatedSG[i] != "") {
HMData1$relCxt[i]<-HMData1$relatedSG[i]
}
}
HMData1 <- subset(HMData1, select=c(cxt,relCxt,archEdRel))
# keep only records where ctx and relCtx are not the same value
## DTW 3/22/2018 - this also gets rid of DAACS data entry errors where a Context that is not part of an SG is above/below itself
HMData1 <- HMData1[HMData1$cxt != HMData1$relCxt,]
# get rid of redundant records
HMData1<-unique(HMData1)
sortedHMData<-HMData1[order(HMData1$cxt, HMData1$relCxt, HMData1$archEdRel),]
# 5. This section defines functions and then uses them to write the data out in ArchEd .lst format ######
# define functions that will help with writing the output file
first.case<-function(myVec){
# locates the first occurrences of each value in a sorted vector
#
# Args:
# myVec: the sorted vector
# Returns: a logical vector with T at the first occurrences
result<-rep(F,length(myVec))
for (i in (1:length(myVec))){
if (i==1) {result[1]<-T}
else {result[i]<- (myVec[i] != myVec[i-1])}
}
return(result)
}
last.case<-function(myVec){
# locates the last occurences of each value in a sorted vector
#
# Args:
# myVec: the sorted vector
# Returns: a logical vector with T at the last occurrences
result<-rep(F,length(myVec))
for (i in (1:length(myVec))){
if (i==length(myVec)) {result[length(myVec)]<-T}
else {result[i]<- (myVec[i] != myVec[i+1])}
}
return(result)
}
firstSG<-first.case(sortedHMData$csr.SG)
firstCxt<-first.case(sortedHMData$cxt)
firstArchEdRel<-first.case(sortedHMData$archEdRel)
lastArchEdRel<-last.case(sortedHMData$archEdRel)
lastCxt<-last.case(sortedHMData$cxt)
firstSG<-first.case(sortedHMData$csr.SG)
firstCxt<-first.case(sortedHMData$cxt)
firstArchEdRel<-first.case(sortedHMData$archEdRel)
lastArchEdRel<-last.case(sortedHMData$archEdRel)
lastCxt<-last.case(sortedHMData$cxt)
## write the output file to the current working directory
file.create("output.lst")
for (i in 1:nrow(sortedHMData)) {
if (firstCxt[i] == T) {
cat(paste(sortedHMData$cxt[i]),"\n", file="output.lst",append=TRUE)
}
if ((firstCxt[i] == T) | (firstArchEdRel[i]==T)) {
cat (paste(" ", sortedHMData$archEdRel[i]), ": ", sep="",
file="output.lst", append=TRUE)
}
cat(paste(sortedHMData$relCxt[i]), sep="", file="output.lst", append=TRUE)
if ((lastArchEdRel[i] == F) & (lastCxt[i]==F)) {
cat(", ", sep="", file="output.lst", append=TRUE)
}
if ((lastArchEdRel[i] == T) | (lastCxt[i]==T)) {
cat("\n", file="output.lst",append=TRUE)
}
}
if (lastCxt[i]==F) {
cat(" ,", file="output.lst",append=TRUE)
}
##### 6 - Plotting HM data via Graphviz and RGraphviz - by DTW 03.16.2018 #####
## assign global node shape to ellipse and allow the ellipse to resize itself to surround the node's text
attrs <- list(node=list(shape="ellipse", fixedsize=FALSE, graphssize = "34, 22"))
## Secton 6.1 - Saving HM data in Graphviz format
## Standalone Graphviz program requires double quotes around each context. Put double quote into a variable:
doublequote <- '"'
## Add a new column to csr1 - it will be the 'Above' Context
## for loop will put the relatedContext field into the 'Above' column when the relationship is 'above'
## it will also put the context field into the 'Above' column when the relationship is 'below'
for (i in 1:nrow(csr1)){
if(csr1$archEdRel[i]=='above') {
csr1$GraphVizAbove[i] <- paste(doublequote, csr1$relatedContext[i], doublequote, sep="")
} else if(csr1$archEdRel[i]=='below'){
csr1$GraphVizAbove[i] <- paste(doublequote, csr1$context[i], doublequote, sep="")
} else {csr1$GraphVizAbove[i] <- NA} #Graphviz can't handle 'equals to' realtionships so put in an empty string
}
## Graphviz requires a "->" to denote one context is above another. Add this to a new column
csr1$GraphVizArrow <- '->'
## Add a new column to csr1 - it will be the 'Below' context
## This section is similar to the For statement above
## it will put the context field into the 'Below' column when the relationship is 'above'
## it will also put the relatedContext into the 'Below' column when the relationship is 'below'
for (i in 1:nrow(csr1)){
if(csr1$archEdRel[i]=='above') {
csr1$GraphVizBelow[i] <- paste(doublequote, csr1$context[i], doublequote, sep="")
} else if(csr1$archEdRel[i]=='below'){
csr1$GraphVizBelow[i] <- paste(doublequote, csr1$relatedContext[i], doublequote, sep="")
} else {csr1$GraphVizBelow[i] <- NA}
}
## Create new data frame that has just 3 columns - GraphvizAbove, GraphvizArrow and GraphvizBelow
GraphVizTable <- data.frame(csr1$GraphVizAbove, csr1$GraphVizArrow, csr1$GraphVizBelow)
## Remove rows that don't have values in GraphvizAbove or GraphvizBelow (rows where archEdRel = 'equals to')
GraphVizTable <- na.omit(GraphVizTable)
## write out the GraphViz dot file. This is the file that you will open in Graphviz standalone program
## comment out if you don't have Graphviz isntalled on your computer
## probably is worth it as the graphs look MUCH better (as of 2/28/2018)!
## Create the Graphviz formatted text vector
GraphvizOutput <- "digraph G {\n node [shape=ellipse, fixedsize=FALSE]; \n"
for (i in 1:nrow(GraphVizTable)){
GraphvizOutput <- paste(GraphvizOutput, GraphVizTable$csr1.GraphVizAbove[i], GraphVizTable$csr1.GraphVizArrow[i], GraphVizTable$csr1.GraphVizBelow[i], "\n", sep = " ")
}
GraphvizOutput <- paste(GraphvizOutput, "}")
## Write the Graphviz file
fileConn<-file("Graphviz_Ctx.gv")
writeLines(GraphvizOutput, fileConn)
close(fileConn)
## Section 6.2 - Plot data using Rgraphviz
## 6.2 may be commented out as this section produces a graph without arrows.
## However, it does print every relationship - i.e., if both A is above B and B is below A are listed
## then two lines will be drawn between A and B. The graph drawn in 6.3 only draws one line, but has arrows.
## It appears that the 'agread' command creates a graph object whereas a graphNEL object is needed to
## print a directed graph (contains arrows showing the direction of the relationship)
## read the text file back into R and print out using Rgraphviz
graphNew <- agread("Graphviz_Ctx.gv", layoutType = "dot", layout = TRUE)
plot(graphNew)
## Save the output as a pdf file
pdf("HM_Ctx_output0.pdf", width = 34, height = 22)
plot(graphNew)
dev.off()
## Section 6.3 - Plot data using Rgraphviz - This section prints out data as a graphNEL object
## This gives more options on print layout
## This method does not allow for duplicate relationships or 'edges'
## i.e., A above B can not be listed twice
## Remove duplicate relationships
unique_GraphvizTable <- unique(GraphVizTable)
## Put the Above contexts into a vector
edges_Above <- unique_GraphvizTable$csr1.GraphVizAbove
## The above command thinks the text is a variable type of 'factor' (due to pulling it from a data frame)
## Convert the vector so that the values are 'text' variables
edges_Above <- as.character(edges_Above)
## Remove the trailing double quote
edges_Above <- substr(edges_Above, 1, nchar(edges_Above)-1)
## Remove the leading double quote
edges_Above <- substr(edges_Above, 2, nchar(edges_Above))
## Put the Below contexts into a vector
edges_Below <- unique_GraphvizTable$csr1.GraphVizBelow
## The above command thinks the text is a variable type of 'factor' (due to pulling it from a data frame)
## Convert the vector so that the values are 'text' variables
edges_Below <- as.character(edges_Below)
## Remove the trailing double quote
edges_Below <- substr(edges_Below, 1, nchar(edges_Below)-1)
## Remove the leading double quote
edges_Below <- substr(edges_Below, 2, nchar(edges_Below))
## Combine the 'Above' and 'Below' vectors into a matrix
edges_GraphvizTable <- cbind(edges_Above, edges_Below)
## Convert the matrix into a graphNEL object
## graphNEL objects can be directed graphs (i.e., graph with arrows showing direction of relationship)
mygraphNEL <- ftM2graphNEL(edges_GraphvizTable, edgemode="directed")
## plot the graphNEL object in the plot window
## the recipEdges="distinct" command will draw an arrow for each relationship in the graph
## i.e., if there's a cycle (A above B and B above A), the default is to draw a single line with an arrow head
## at each end. This command will force the graph to draw two arrows, one point in each direction
## this is a valid command, but does not produce two arrows...
plot(mygraphNEL, recipEdges="distinct")
## Save the output as a pdf file
pdf("HM_Ctx_output1.pdf", width = 34, height = 22) #height and width is paper size
plot(mygraphNEL, recipEdges="distinct")
dev.off()
## section 6.3
## This section replicates the previous pdf/plot created in section 6.2 and applies colors and shapes
## to each node (Context) in an SG. Each Context in a SG gets a specific color/shape combination
testGraph <- layoutGraph(mygraphNEL)
## nodes (contexts) can have four distinct shapes around the context name (box, circle, ellipse and triangle)
## the "plaintext" option removes all shapes (default is the circle shape) around the contexts
## we will apply shapes to contexts that are part of an SG. All contexts not in an SG will not have a shape
## start by applying the plaintext shape (no shape) as the default
nodeRenderInfo(testGraph) <- list(shape="plaintext")
##Section 6.3a
## this section reuses Fraser's code for looping through the contexts in each SG (Section 3.3)
## the next three lines were already run in section 3.3 so variables still exist
#allSG<- unique(csr$SG)
#allSG<- allSG[!allSG==""]
#allCxtSG <- unique(data.frame(csr$context, csr$SG, stringsAsFactors=F))
## PUt the four different Rgraphviz node types into a character vector
graph_Node_Shapes <- c("box", "ellipse", "triangle", "circle")
## Repeat the 4 node shapes as many times as the number of SG's in allSG
graph_Node_Shapes <- rep(graph_Node_Shapes, (length(allSG)+4)/4)
## Put 17 different collors that Rgraphviz recognizes into a character vector
graph_Node_Colors <- c("red", "blue", "green", "orange", "pink", "cyan", "purple", "transparent", "brown", "lightyellow", "gray", "lightblue", "gold", "darkgreen", "magenta", "yellow", "lightgreen")
## Repeat the 17 node colors as many times as the number of SG's in allSG
graph_Node_Colors <- rep(graph_Node_Colors, (length(allSG)+17)/17)
## Loop through all the SG's in allSg
## Select the Contexts for the current SG
## Assign a node shape and color for the contexts in the current SG
for (i in 1:length(allSG)){
thisSG <- allSG[i]
## Put all the Contexts in the current SG into the vector cxtForThisSG
cxtForThisSG <- allCxtSG$csr.context[which(allCxtSG$csr.SG==thisSG)]
## Put contents of cxtForThisSG into a new vector called nodefillcolor
nodefillcolor <- cxtForThisSG
## Construct a matrix containing the Context and the node fill color for the ith SG
nodefillcolor <- rep(graph_Node_Colors[i], length(cxtForThisSG))
names(nodefillcolor) <- cxtForThisSG
## Change the node fill color for contexts in ith SG
nodeRenderInfo(testGraph) <- list(fill=nodefillcolor)
## Put contents of cxtForThisSG into a new vector called nodeshape
nodeshape <- cxtForThisSG
## Construct a matrix containing the Context and the node shape for the ith SG
nodeshape <- rep(graph_Node_Shapes[i], length(cxtForThisSG))
names(nodeshape) <- cxtForThisSG
## Change the node shape for contexts in the ith SG
nodeRenderInfo(testGraph) <- list(shape=nodeshape)
}
#graph.par(list(edges=list(col="lightblue", lty="solid", lwd=1)))
#renderGraph(testGraph)
#graph.par(list(nodes=list(col="darkgreen", lty="dotted", lwd=2, fontsize=10)))
#renderGraph(testGraph)
#layoutGraph(testGraph)
#graph.par(list(nodes=list(fixedsize=FALSE, col="darkgreen", lty="solid", lwd=1, fontsize=30)))
#nodeRenderInfo(testGraph) <- list(shape=(fixedsize=FALSE))
#nodeRenderInfo(testGraph) <- list(shape=c( "2581A" = "rectangle"))
#nodeRenderInfo(testGraph) <- list(skew=c( "2581A" = -100))
#graph.par(list(nodes=list(fill="yellow", textCol="blue", fontsize = 20, font = "Times New Roman")))
#graph.par(list(edges=list(col="lightblue", lty="solid", lwd=1)))
#nodeRenderInfo(testGraph) <- list(fill=c("2581A"="", "2582A"="darkred", "2583A" = "red"))
#testGraph <- layoutGraph(testGraph)
#nodeRenderInfo(testGraph) <- list(arrowhead=c("dot"))
#layoutGraph(testGraph)
renderGraph(testGraph, recipEdges="distinct")
## Save the output as a pdf file
pdf("HM_Ctx_output2.pdf", width = 48, height = 36) #height and width is paper size
renderGraph(testGraph, recipEdges="distinct")
dev.off()
## 6.4 Plot SGs and Contexts HM graph
## This section uses HMData1 as the source of Ctx and SG relationships
## put HMData1 into a new table
GraphViz_HM_Feat_SG_Ctx <- HMData1
## tidy R way to remove rows where archEdRel = 'equal to'
#GraphViz_HM_Feat_SG_Ctx <- GraphViz_HM_Feat_SG_Ctx[(GraphViz_HM_Feat_SG_Ctx$archEdRel!="equal to"),]
## dplyr way to remove rows where archEdRel = 'equal to'
GraphViz_HM_Feat_SG_Ctx <- dplyr::filter(GraphViz_HM_Feat_SG_Ctx, archEdRel != "equal to")
## tidy R and dplyr - add new column "GraphvizAbove" and put in value of "relCtx" column if "archEdRel" = 'above'. If not true, put in value of "ctx"
GraphViz_HM_Feat_SG_Ctx <- dplyr::mutate(GraphViz_HM_Feat_SG_Ctx, GraphvizAbove = ifelse(GraphViz_HM_Feat_SG_Ctx$archEdRel == 'above', GraphViz_HM_Feat_SG_Ctx$relCxt, GraphViz_HM_Feat_SG_Ctx$cxt))
## tidy R and dplyr - add new column "GraphvizBelow" and put in value of "Ctx" column if "archEdRel" = 'above'. If not true, put in value of "relCtx"
GraphViz_HM_Feat_SG_Ctx <- dplyr::mutate(GraphViz_HM_Feat_SG_Ctx, GraphvizBelow = ifelse(GraphViz_HM_Feat_SG_Ctx$archEdRel == 'above', GraphViz_HM_Feat_SG_Ctx$cxt, GraphViz_HM_Feat_SG_Ctx$relCxt))
## Create new data frame that has just 2 columns - GraphvizAbove and GraphvizBelow
GraphVizTable_HM_Feat_SG_Ctx <- data.frame(GraphViz_HM_Feat_SG_Ctx$GraphvizAbove, GraphViz_HM_Feat_SG_Ctx$GraphvizBelow)
## Remove duplicate relationships
unique_GraphVizTable_HM_Feat_SG_Ctx <- unique(GraphVizTable_HM_Feat_SG_Ctx)
## Put the Above contexts into a vector
edges_Above_HM_Feat_SG_Ctx <- unique_GraphVizTable_HM_Feat_SG_Ctx$GraphViz_HM_Feat_SG_Ctx.GraphvizAbove
## The above command thinks the text is a variable type of 'factor' (due to pulling it from a data frame)
## Convert the vector so that the values are 'text' variables
edges_Above_HM_Feat_SG_Ctx <- as.character(edges_Above_HM_Feat_SG_Ctx)
## Put the Below contexts into a vector
edges_Below_HM_Feat_SG_Ctx <- unique_GraphVizTable_HM_Feat_SG_Ctx$GraphViz_HM_Feat_SG_Ctx.GraphvizBelow
## The above command thinks the text is a variable type of 'factor' (due to pulling it from a data frame)
## Convert the vector so that the values are 'text' variables
edges_Below_HM_Feat_SG_Ctx <- as.character(edges_Below_HM_Feat_SG_Ctx)
## Combine the 'Above' and 'Below' vectors into a matrix
edges_HM_Feat_SG_Ctx_GraphvizTable <- cbind(edges_Above_HM_Feat_SG_Ctx, edges_Below_HM_Feat_SG_Ctx)
## Convert the matrix into a graphNEL object
## graphNEL objects can be directed graphs (i.e., graph with arrows showing direction of relationship)
mygraphNEL_HM_Feat_SG_Ctx <- ftM2graphNEL(edges_HM_Feat_SG_Ctx_GraphvizTable, edgemode="directed")
## plot the graphNEL object in the plot window
## the recipEdges="distinct" command will draw an arrow for each relationship in the graph
## i.e., if there's a cycle (A above B and B above A), the default is to draw a single line with an arrow head
## at each end. This command will force the graph to draw two arrows, one point in each direction
plot(mygraphNEL_HM_Feat_SG_Ctx, recipEdges="distinct", attrs=attrs)
## Save the output as a pdf file
pdf("HM_output1_HM_Feat_SG_Ctx.pdf", width = 34, height = 22) #height and width is paper size
plot(mygraphNEL_HM_Feat_SG_Ctx, recipEdges="distinct", attrs=attrs)
dev.off()
##### Section 6.5 #####
## Produce a HM directed graph usings SGs. However, also append to the SG label all the contexts that make up the SG.
## Add two new columns to csr1
csr1$SG_CTX_Above <- NA
csr1$SG_CTX_Below <- NA
## loop through csr1 and put contexts into SG_CTX_Above when ArchEdRel is 'above' or 'below'
for (i in 1:nrow(csr1)){
if(csr1$archEdRel[i]=='above') {
csr1$SG_CTX_Above[i] <- csr1$relatedContext[i]
} else if(csr1$archEdRel[i]=='below'){
csr1$SG_CTX_Above[i] <- csr1$context[i]
}
}
for (i in 1:nrow(csr1)){
if(csr1$archEdRel[i]=='above') {
csr1$SG_CTX_Below[i] <- csr1$context[i]
} else if(csr1$archEdRel[i]=='below'){
csr1$SG_CTX_Below[i] <- csr1$relatedContext[i]
}
}
## need the glue library for the collapse command inside the for loop
## loop through allSG vector (contains all the SGs in csr1) and create a new string containing
## the SG name followed by all the Contexts that make up the SG (separtated by spaces).
## The internal For loops through csr1 and replaces the contexts within the SG_CTX_Above and SG_CTX_Below fields
## with the concatenated SG and Context names.
for (i in 1:length(allSG)){
## at beginning of each loop set SG_CTX_Combo = to an empty string ("")
SG_CTX_Combo <- ""
thisSG<-allSG[i]
cxtForThisSG_Combo <- allCxtSG$csr.context[which(allCxtSG$csr.SG==thisSG)]
## paste together thisSG and CxtForThisSG_Combo into a single variable SG_CTX_Combo (vector of length of 1)
SG_CTX_Combo <- paste(thisSG, ": ", glue_collapse(cxtForThisSG_Combo, sep = " ", width = Inf, last = ""), sep = "")
## put into column SG_CTX_Above the value of SG_CTX_Combo (SG followed by list of Contexts)
for (j in 1:nrow(csr1)) {
if(csr1$SG[j]==thisSG) {
if(csr1$archEdRel[j]=='above') {
csr1$SG_CTX_Below[j] <- SG_CTX_Combo
} else if(csr1$archEdRel[j]=='below'){
csr1$SG_CTX_Above[j] <- SG_CTX_Combo
}
}
}
## put into column SG_CTX_Below the value of SG_CTX_Combo (SG followed by list of Contexts)
for (j in 1:nrow(csr1)) {
if(csr1$relatedSG[j]==thisSG) {
if(csr1$archEdRel[j]=='above') {
csr1$SG_CTX_Above[j] <- SG_CTX_Combo
} else if(csr1$archEdRel[j]=='below'){
csr1$SG_CTX_Below[j] <- SG_CTX_Combo
}
}
}
}
## Create new data frame that has just 2 columns - SG_CTX_Above, SG_CTX_Below
GraphVizTable_SG_Ctx <- data.frame(csr1$SG_CTX_Above, csr1$SG_CTX_Below)
## Remove rows that don't have values in GraphvizAbove or GraphvizBelow (rows where archEdRel = 'equals to')
GraphVizTable_SG_Ctx <- na.omit(GraphVizTable_SG_Ctx)
## write out the GraphViz dot file. This is the file that you will open in Graphviz standalone program
## comment out if you don't have Graphviz installed on your computer
## probably is worth it as the graphs look MUCH better (as of 2/28/2018)!
## Create the Graphviz formatted text vector
GraphvizOutput_SG_Ctx <- "digraph G {\n"
for (i in 1:nrow(GraphVizTable_SG_Ctx)){
GraphvizOutput_SG_Ctx <- paste(GraphvizOutput_SG_Ctx, doublequote, GraphVizTable_SG_Ctx$csr1.SG_CTX_Above[i], doublequote, "->", doublequote, GraphVizTable_SG_Ctx$csr1.SG_CTX_Below[i], doublequote, "\n", sep = " ")
}
GraphvizOutput_SG_Ctx <- paste(GraphvizOutput_SG_Ctx, "}")
## Write the Graphviz file
fileConn<-file("Graphviz_SG_Ctx.gv")
writeLines(GraphvizOutput_SG_Ctx, fileConn)
close(fileConn)
## Remove duplicate relationships
unique_GraphvizTable_SG_Ctx <- unique(GraphVizTable_SG_Ctx)
## Put the Above contexts into a vector
edges_Above_SG_Ctx <- unique_GraphvizTable_SG_Ctx$csr1.SG_CTX_Above
## The above command thinks the text is a variable type of 'factor' (due to pulling it from a data frame)
## Convert the vector so that the values are 'text' variables
edges_Above_SG_Ctx <- as.character(edges_Above_SG_Ctx)
## Put the Below contexts into a vector
edges_Below_SG_Ctx <- unique_GraphvizTable_SG_Ctx$csr1.SG_CTX_Below
## The above command thinks the text is a variable type of 'factor' (due to pulling it from a data frame)
## Convert the vector so that the values are 'text' variables
edges_Below_SG_Ctx <- as.character(edges_Below_SG_Ctx)
#write.table(edges_Below_SG_Ctx, "mydata.txt", sep="\t")
## Combine the 'Above' and 'Below' vectors into a matrix
edges_GraphvizTable_SG_Ctx <- cbind(edges_Above_SG_Ctx, edges_Below_SG_Ctx)
## Convert the matrix into a graphNEL object
## graphNEL objects can be directed graphs (i.e., graph with arrows showing direction of relationship)
mygraphNEL_SG_Ctx <- ftM2graphNEL(edges_GraphvizTable_SG_Ctx, edgemode="directed")
## plot the graphNEL object in the plot window
## the recipEdges="distinct" command will draw an arrow for each relationship in the graph
## i.e., if there's a cycle (A above B and B above A), the default is to draw a single line with an arrow head
## at each end. This command will force the graph to draw two arrows, one point in each direction
plot(mygraphNEL_SG_Ctx, recipEdges="distinct", attrs=attrs)
## Save the output as a pdf file
pdf("HM_SG_Ctx_output1.pdf", width = 34, height = 22) #height and width is paper size
plot(mygraphNEL_SG_Ctx, recipEdges="distinct", attrs=attrs)
dev.off()
|
6f732dad1309746319409aa3228bb408af59e615
|
39f4df1f5c2faadbdf366d65ede30aa5edba3497
|
/R/factors.R
|
ddd8ca5855fcc8f11faa77409f2a928d8f975d74
|
[] |
no_license
|
cran/kutils
|
a19c69b6730548aa849ca841291d95de92dd3863
|
53ada7e4308f456a0a109955ecfd9122f6263aba
|
refs/heads/master
| 2023-07-10T17:11:29.010416
| 2023-06-26T21:40:02
| 2023-06-26T21:40:02
| 77,183,151
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,578
|
r
|
factors.R
|
##' Reverse the levels in a factor
##'
##' Simple literal reversal. Will stop with an error message if x is
##' not a factor (or ordered) variable.
##'
##' Sometimes people want to
##' reverse some levels, excluding others and leaving them at the end
##' of the list. The "eol" argument sets aside some levels and puts
##' them at the end of the list of levels.
##'
##' The use case for the \code{eol} argument is a factor
##' with several missing value labels, as appears in SPSS. With
##' up to 18 different missing codes, we want to leave them
##' at the end. In the case for which this was designed, the
##' researcher did not want to designate those values as
##' missing before inspecting the pattern of observed values.
##'
##' @param x a factor variable
##' @param eol values to be kept at the end of the list. Does not
##' accept regular expresssions, just literal text strings
##' representing values.
##' @export
##' @return a new factor variable with reversed values
##' @author Paul Johnson <pauljohn@@ku.edu>
##' @examples
##' ## Consider alphabetication of upper and lower
##' x <- factor(c("a", "b", "c", "C", "a", "c"))
##' levels(x)
##' xr1 <- reverse(x)
##' xr1
##' ## Keep "C" at end of list, after reverse others
##' xr2 <- reverse(x, eol = "C")
##' xr2
##' y <- ordered(x, levels = c("a", "b", "c", "C"))
##' yr1 <- reverse(y)
##' class(yr1)[1] == "ordered"
##' yr1
##' ## Hmm. end of list amounts to being "maximal".
##' ## Unintended side-effect, but interesting.
##' yr2 <- reverse(y, eol = "C")
##' yr2
##' ## What about a period as a value (SAS missing)
##' z <- factor(c("a", "b", "c", "b", "c", "."))
##' reverse(z)
##' z <- factor(c(".", "a", "b", "c", "b", "c", "."))
##' reverse(z)
##' ## How about R NA's
##' z <- factor(c(".", "a", NA, "b", "c", "b", NA, "c", "."))
##' z
##' reverse(z)
##' z <- ordered(c(".", "a", NA, "b", "c", "b", NA, "c", "."))
##' z
##' str(z)
##' ## Put "." at end of list
##' zr <- reverse(z, eol = ".")
##' zr
##' str(zr)
##' z <- ordered(c(".", "c", NA, "e", "a", "c", NA, "e", "."),
##' levels = c(".", "c", "e", "a"))
##' reverse(z, eol = ".")
##' reverse(z, eol = c("a", "."))
reverse <- function(x, eol = c("Skip", "DNP")){
if (!is.factor(x)) stop("your variable is not a factor")
class_old <- class(x)
rlevels <- rev(levels(x))
if (length(eol) > 0){
for (jj in eol){
if (length(yyy <- grep(jj, rlevels, fixed = TRUE))){
rlevels <- c(rlevels[-yyy], jj)
}
}
}
factor(x, levels = rlevels, ordered = is.ordered(x))
}
|
c03efde0033f5f068df16b7e9a556a00ac95a1fa
|
c55995032be21e83cccd79f7bc587ed8ad705e8d
|
/inst/munge_scripts/target_os_file_processing/target_os_expression_tab2csv.R
|
e7e3bd38670c27552aadb8a9870184fc9ef88f84
|
[] |
no_license
|
teamcgc/TARGETCloud
|
d32339a72093d683d6b15122a1b3db3c0c6e9a93
|
ae916e4f9104c8a1c7188621d47d3069e7342b55
|
refs/heads/master
| 2020-07-28T09:44:48.678109
| 2016-11-10T23:44:13
| 2016-11-10T23:44:13
| 73,414,487
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,111
|
r
|
target_os_expression_tab2csv.R
|
# 1. Merge all RNAseq gene expression files
# as one big file in csv format
# ====================================================================
rm(list=ls(all=TRUE))
input.files <- list.files(path="target_os_expression",
pattern="gene", full.names=TRUE)
length(input.files) # [1] 86
system("wc -l target_os_expression/*gene.quantification.txt")
# 3911022 total
aFile <- 1;
message(aFile, ": ", input.files[aFile]);
bigData <- read.table(input.files[aFile], header=TRUE, sep="\t", quote="")
dim(bigData) # [1] 45476 5
colnames(bigData)
# [1] "ensembl_gene_id" "mean_length" "mean_eff_length"
# [4] "est_counts" "tpm"
# for data check
# ===============================================
#
totalRows <- nrow(bigData)
totalCols <- ncol(bigData)
geneID <- as.character(bigData$ensembl_gene_id)
meanLength <- as.numeric(bigData$mean_length)
sampleID <- sub(".{1,}/", "", input.files[aFile]);
sampleID <- sub(".gene.quantification.txt", "", sampleID);
bigData <- cbind(sampleID, bigData);
head(bigData)
for(aFile in seq_len(length(input.files))[-1])
{
message(aFile, ": ", input.files[aFile]);
moreData <- read.table(input.files[aFile], header=TRUE, sep="\t", quote="")
if(nrow(moreData) != totalRows || ncol(moreData) != totalCols)
stop("Bad file dimensions!\n")
# New sampleID
#
moreSampleID <- sub(".{1,}/", "", input.files[aFile]);
moreSampleID <- sub(".gene.quantification.txt", "", moreSampleID);
if(moreSampleID == sampleID) stop("Sample ID error!")
# geneID in all files should be a same set
#
moreGeneID <- as.character(moreData$ensembl_gene_id)
if(sum(moreGeneID == geneID) != totalRows)
stop("ensembl_gene_id error!")
# mean_length in all files should be a same set
#
moreMeanLength <- as.character(moreData$mean_length)
if(sum(moreMeanLength == meanLength) != totalRows)
stop("mean_length error!")
moreData <- cbind(sampleID=moreSampleID, moreData);
print(head(moreData));
bigData <- rbind(bigData, moreData);
}
# data check
# =======================================================
dim(bigData) # [1] 3910936 6
totalRows*length(input.files) # [1] 3910936
length(unique(bigData$sampleID)) # [1] 86
head(bigData)
tail(bigData)
# Save output to files
# ===========================================================================
save(bigData, file="target_os_RNAseq_gene_expression_level_3_all_samples.RData")
write.table(bigData, sep=",", quote=FALSE, row.names=FALSE, col.names=TRUE,
file="target_os_RNAseq_gene_expression_level_3_all_samples.csv" )
# data check
# ===============================================================
system("head -n 10 target_os_RNAseq_gene_expression_level_3_all_samples.csv")
system("tail -n 10 target_os_RNAseq_gene_expression_level_3_all_samples.csv")
# 2. Merge all RNAseq isoform expression files
# as one big file in csv format
# ====================================================================
rm(list=ls(all=TRUE))
input.files <- list.files(path="target_os_expression",
pattern="isoform", full.names=TRUE)
length(input.files) # [1] 86
system("wc -l target_os_expression/*isoform.quantification.txt")
# 16509248 total
aFile <- 1;
message(aFile, ": ", input.files[aFile]);
bigData <- read.table(input.files[aFile], header=TRUE, sep="\t", quote="")
dim(bigData) # [1] 191967 5
colnames(bigData)
# [1] "target_id" "length" "eff_length" "est_counts" "tpm"
# for data check
# ===============================================
#
totalRows <- nrow(bigData)
totalCols <- ncol(bigData)
targetID <- as.character(bigData$target_id)
targetLength <- as.numeric(bigData$length)
sampleID <- sub(".{1,}/", "", input.files[aFile]);
sampleID <- sub(".isoform.quantification.txt", "", sampleID);
bigData <- cbind(sampleID, bigData);
head(bigData)
for(aFile in seq_len(length(input.files))[-1])
{
message(aFile, ": ", input.files[aFile]);
moreData <- read.table(input.files[aFile], header=TRUE, sep="\t", quote="")
if(nrow(moreData) != totalRows || ncol(moreData) != totalCols)
stop("Bad file dimensions!\n")
# New sampleID
#
moreSampleID <- sub(".{1,}/", "", input.files[aFile]);
moreSampleID <- sub(".isoform.quantification.txt", "", moreSampleID);
if(moreSampleID == sampleID) stop("Sample ID error!")
# geneID in all files should be a same set
#
moreTargetID <- as.character(moreData$target_id)
if(sum(moreTargetID == targetID) != totalRows)
stop("target_id error!")
# mean_length in all files should be a same set
#
moreLength <- as.character(moreData$length)
if(sum(moreLength == targetLength) != totalRows)
stop("length error!")
moreData <- cbind(sampleID=moreSampleID, moreData);
print(head(moreData));
bigData <- rbind(bigData, moreData);
}
# data check
# ========================================================
dim(bigData) # [1] 16509162 6
totalRows*length(input.files) # [1] 16509162
length(unique(bigData$sampleID)) # [1] 86
head(bigData)
# Save output to files
# ===========================================================================
outFile <- "target_os_RNAseq_isoform_expression_level_3_all_samples.RData";
save(bigData, file=outFile);
write.table(bigData, sep=",", quote=FALSE, row.names=FALSE, col.names=TRUE,
file="target_os_RNAseq_isoform_expression_level_3_all_samples.csv");
# data check
# ===============================================================
system("head -n 10 target_os_RNAseq_isoform_expression_level_3_all_samples.csv")
system("tail -n 10 target_os_RNAseq_isoform_expression_level_3_all_samples.csv")
|
e2365f868fdc81af65eaa1cc4eb0f11c8a2cbce0
|
7a95687ca2dd7172b7265d129aa0cb6d5d40222b
|
/man/theoretical_power.Rd
|
2c3d49c3ef3ae43e136a9f6c4d8a5bfb18af14af
|
[
"MIT"
] |
permissive
|
diff-priv-ht/nonpmRegPkg
|
5e869d21a876cc7cc6ffe8411cf90d3fb86a84a9
|
056b53435f2507224c03639047e6a5d044af95f0
|
refs/heads/master
| 2023-01-29T18:16:03.154636
| 2023-01-22T21:16:35
| 2023-01-22T21:16:35
| 190,652,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,891
|
rd
|
theoretical_power.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theoretical_power.R
\name{theoretical_power}
\alias{theoretical_power}
\title{Theoretical power of our test}
\usage{
theoretical_power(
theta_0,
M,
effect_size,
epsilon,
alpha = 0.05,
X = NULL,
groups = NULL,
n = NULL,
d = 1,
n_zeros = 0,
nsims = NULL,
test = "Linear Regression",
ncores = 1
)
}
\arguments{
\item{theta_0}{The threshold.}
\item{M}{The number of subsamples to partition the data into.}
\item{effect_size}{The quotient of the parameter of interest (beta or mu) and
the standard deviation of the noise (sigma). For ANOVA, the ratio of the
between group variance to the within group variance.}
\item{epsilon}{The privacy parameter.}
\item{alpha}{The significance level, defaults to 0.05}
\item{X}{For regression only. A design matrix with at least two explanatory
variables.}
\item{groups}{For regression, a vector of length \code{nrow(X)} with the
index of the group of each row in \code{X}. For ANOVA, an integer of the
number of groups}
\item{n}{For normal or ANOVA. The number of observations (number of rows in
the database).}
\item{d}{For normal test only. The number of dimensions (number of columns in
the database).}
\item{n_zeros}{For normal test only. The number of entries of the alternative
distribution with mean zero. Defaults to 0.}
\item{nsims}{The number of draws from the tulap and binomial with which to
compute the reference distribution. (No Longer Used)}
\item{test}{The test to compute the power of. Either "Linear Regression",
"Normal", or "ANOVA"}
\item{ncores}{The number of cores to use for the Poisson-binomial pmf
computation (No Longer Used)}
}
\value{
The output will be a double between 0 and 1.
}
\description{
Function that computes the power of our test for a given a design matrix and
a given partitioning into subsamples.
}
|
b0de2e943245ca5863fb30af9b5ee55d5ee014c9
|
b161310d2a789e3a296c17c391adac5cc36f4a39
|
/mcr1mapping.R
|
79d0ba6d1dadb441c5d482b51c9badf19e2755b9
|
[] |
no_license
|
christinecho/Disease-and-Flight-Travel-Modelling
|
94cc7107dfd820129c8cf21537966fa3609ead16
|
1f93be72f91692ea74f5e4c455ac068aa2705a56
|
refs/heads/master
| 2022-03-15T22:17:54.846219
| 2019-09-04T14:51:17
| 2019-09-04T14:51:17
| 201,417,041
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 684
|
r
|
mcr1mapping.R
|
library(ggplot2)
library(ggmap)
library(maps)
library(ggthemes)
library(gganimate)
library(animation)
# world <- ggplot() + borders("world", colour = "gray85", fill = "gray80") +
# theme_map()
# map <- ggplot() + geom_point(data = mcr1, aes(x = Longitude, y = Latitude), colour = "red")
# map
ndm_sorted<-ndm[order(ndm$`Date of Detection`),]
# animates map
world <- ggplot() + borders("world", colour = "gray85", fill = "gray80") + theme_map()
map <- world + geom_point(data = ndm_sorted, aes(x = Longitude, y = Latitude), colour = "red") +
transition_states(ndm_sorted$`Date of Detection`) +
labs(title="Date: {closest_state}")
animate(map)
# anim_save("ndm1.gif", anim)
|
891e28fd9a8af392206024c472731d52fbe9258a
|
5276f0053b33e16406a58bcf4c8293a9d272ee9b
|
/R/multiplot.R
|
5ea95def6f376624b74ffb7d528402f1a6d153c5
|
[] |
no_license
|
madsboending/boending
|
cf1314019b6270abfadbf6ffbf61bb2d8b204686
|
0835955394aa5434f3cdbeb9d9079a5de40ef9ad
|
refs/heads/master
| 2021-06-25T08:57:08.137547
| 2016-12-22T09:11:17
| 2016-12-22T09:11:17
| 41,782,730
| 0
| 0
| null | null | null | null |
ISO-8859-15
|
R
| false
| false
| 2,105
|
r
|
multiplot.R
|
#' Plot flere diagrammer på én side.
#'
#' Funktionen gør det muligt at plotte flere diagrammer på samme side. Funktionen er skamløst kopieret fra http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_(ggplot2)/
#' @param ..., ggplot objects
#' @param plotlist, en liste af ggplot objects
#' @param cols, antallet af kolonner i layoutet
#' @param layout, matrix som specificere layoutet. Hvis layout benyttes ignoreres 'cols'
#' @keywords multiplot
#' @export
#' @examples
#' #data eksempel (fra ggplot2 examples)
#' df <- data.frame(gp = factor(rep(letters[1:3], each = 10)), y = rnorm(30))
#'
#' #plot eksempel (fra ggplot2 examples)
#' plot1 <- ggplot(df, aes(x = gp, y = y))+geom_point()
#'
#' plot2 <- ggplot(df, aes(x = gp, y = y))+geom_boxplot()
#'
#'
#' multiplot(plot1, plot2, ncols=2)
#'
#'Hvis layoutet f.eks. er matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
#' så vil plot 1 være til venstre øverst, 2 vil være til højre øverst, og
#' 3 vil sprede sig nederst.
#'
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
|
54081b127751237fb89f89c21beaab5395b46402
|
57c6013f7a4636911cef6f0868b858ee907864a4
|
/DNAreadsToBytes.R
|
10852c12158b401ee3576b0a3725aa76049c0ffe
|
[] |
no_license
|
jeanlain/DNAreadsToBytes
|
06f6b31acb20ed4d1b61cb6aeb1c6c8eb2869d8f
|
1d824ead34c63a2c65565a5d0f0e3c9084b384b1
|
refs/heads/master
| 2022-04-13T13:34:03.761675
| 2020-01-14T15:14:27
| 2020-01-14T15:14:27
| 119,713,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,121
|
r
|
DNAreadsToBytes.R
|
require(stringi)
require(data.table)
require(Biostrings)
require(matrixStats)
options("stringsAsFactors" = F)
paths <- commandArgs(T)
# path to fasta file
input <- paths[1]
# output directory
outputFolder <- paths[2]
if (length(paths) < 3) {
huffmanCode <- gsub("//", "/", stri_c(outputFolder, "/View_huff3.cd.new.correct"), fixed = T)
} else {
huffmanCode <- paths[3]
}
paths <- c(input, outputFolder, huffmanCode)
missing <- !file.exists(paths)
if (any(missing)) stop(paste("missing file(s):", paste(paths[missing], collapse = ", ")))
# defining "basic" functions used in the script-----------------------------------------
baseTypes <- c("A", "C", "G", "T")
# matrix used to convert successive bases to trits, used below
conv <- matrix(c(NA, 0:2, 2L, NA, 0:1, 1:2, NA, 0L, 0:2, NA), 4, byrow = T)
colnames(conv) <- baseTypes
rownames(conv) <- baseTypes
# converts a matrix of DNA bases (sequences as rows) to integer trits according to Goldman et al's scheme
toTrits <- function(mat) {
# in case mat is a vector, we convert it to a 1-row matrix (otherwise we would have dimension issues)
if (!is.matrix(mat)) mat <- rbind(mat)
vapply(2:ncol(mat), function(col) conv[cbind(mat[, col - 1], mat[, col])], integer(nrow(mat)))
}
# splits a string vector into a matrix with one character per cell. Words are
# rows. The number of columns is the length of the longest word. We use as
# substring function (stri_sub()), which appears to be faster than a split
stringToMatrix <- function(vector) {
nchars <- stri_length(vector)
m <- max(nchars)
mat <- matrix(NA, nrow = length(vector), ncol = m, byrow = T)
for (i in 1:m) {
mat[, i] <- stri_sub(vector, i, i)
}
mat
}
# reverse complements a character vector representing DNA sequences. Ambiguities are not treated
revCom <- function(seqs) {
stri_reverse(chartr("acgtACGT", "tgcaTGCA", seqs))
}
# converts numbers of a given base to base 10, assuming digits are in separate
# columns of a matrix, so that rows represent numbers read from left to right
toBase10 <- function(mat, base = 3) {
if (!is.matrix(mat)) mat <- rbind(mat)
res <- 0
ncol <- ncol(mat)
for (col in ncol:1) {
res <- res + mat[, col] * base^(ncol - col)
}
res
}
# tells the occurrence of the element found, for all positions of a vector (e.g.
# 1 if it's the first time it appears, 2 if the 2nd time, etc)
occurrences <- function(vect) {
dt <- data.table(vect, pos = 1:length(vect))
counts <- dt[, .(occ = 1:.N, pos), by = vect]
res <- integer(length(vect))
res[counts$pos] <- counts$occ
res
}
# returns the first position (column) of each element of "values" in each
# corresponding row of matrix "mat" (recycling may work, as values can have 1
# element)
rowMatches <- function(values, mat) {
res <- which(mat == values, arr.ind = T)
res[match(1:nrow(mat), res[, 1]), 2]
}
# splits a character vector into columns (like excel). A column range can be
# specified as an integer vector and "cells" are filled with NAs if needed. By
# default, it generate as many columns as necessary given the character(s) used
# to split the vector elements.
splitToColumns <- function(vector, split, columns) {
vector <- as.character(vector)
res <- stri_split_fixed(vector, split, omit_empty = NA, simplify = T)
if (missing(columns)) {
columns <- 1:ncol(res)
}
if (any(columns < 1)) {
stop("inappropriate column parameter. Use integers >=1")
}
columns <- round(columns)
res <- res[, columns]
}
# STEP ONE. Data import and initial processing ---------------------------------------------------------------
code <- readLines(huffmanCode, skipNul = T)
# We split lines into a table (discards the last line, as it apparently doesn't correspond to a byte)
code <- data.table(splitToColumns(code[-length(code)], "\t"))
code[, byte := as.hexmode(as.integer(V3))]
cat("importing reads...\n")
# we ingest all read sequences, ignoring names. This takes memory
reads <- readDNAStringSet(input, use.names = F)
# we place them in a data table
reads <- data.table(SEQ = as.character(reads))
# so we can rapidly discard duplicate read sequences, but retain how many reads
# we had per sequence (much faster than table())
reads <- reads[, .N, by = SEQ]
# forward-sequenced reads should start by AT and end by GC (should be the
# opposite for reverse-sequenced reads)
cat("checking read orientation...\n")
reads[, c("ATstart", "GCend") := .(stri_sub(SEQ, 1, 1) %chin% c("A", "T"), stri_sub(SEQ, 117, 117) %chin% c("G", "C"))]
# contingency table used to evaluate if there are reads to reverse
nums <- matrix(reads[, sum(N), by = .(ATstart, GCend)]$V1, ncol = 2)
# This test should mean there are significantly more apparent reversed reads than expected from sequencing errors
if (chisq.test(nums)$p.value < 0.0001) {
reads[ATstart == F & GCend == F, SEQ := revCom(SEQ)]
}
reads[, c("ATstart", "GCend") := NULL]
# STEP TWO. Extraction of read index information -------------------------------------------
cat("extracting read indexing information...\n")
# We extract indexing information in the last 16 bases of a read (excluding the
# very last). We split them into a matrix of individual bases (rows = reads)
IX <- stringToMatrix(stri_sub(reads$SEQ, 101, 116))
IX <- toTrits(IX)
# We get the parity trit by summing the appropriate columns (odd numbers)
parity <- rowSums(IX[, seq(1, 13, 2)]) %% 3L
# We compare it to the 15th trit as an error check. Also, NAs in trits mean errors
error <- parity != IX[, 15] | rowSums(is.na(IX)) > 0
reads <- reads[!error]
IX <- IX[!error, ]
# We obtain file ID from first 2 trits (integer)
fileID <- IX[, 1] * 3L + IX[, 2]
# We obtain read location indices in this file from following 12 trits
idx <- toBase10(IX[, 3:14])
# We add indexing information to the table
reads[, c("FILE", "LOC") := .(fileID, as.integer(idx))]
# we may reclaim some memory
rm(fileID, idx, error, parity, IX)
# to reduce the workload (and free memory), we retain no more than 100 read
# sequences per location index per encoded file. This should be enough to call
# reliable consensuses. We favour sequences without apparent error
cat("sampling best reads...\n")
# values of this column will be TRUE if there are duplicate bases in a read, which shouldn't exist
reads[, err := grepl("AA|CC|GG|TT", SEQ)]
# we sort the table by placing these sequences at the bottom, and favour those represented by many reads
setorder(reads, FILE, LOC, err, -N)
# we retain no more than the first 100 sequences per location per file
reads <- reads[occurrences(stri_c(FILE, LOC, sep = " ")) <= 100, ]
# STEP TREE, Consensus calling -------------------------------------------------------
# we make a consensus sequences at each location index. We do it before the
# keystream conversion, as a single error may result in a borked read after this
# conversion. We will only convert the (hopefully error-free) consensuses.
cat("generating local consensuses...\n")
# We spot the 100 nt that encode file data into individual bases
reads <- reads[, data.table(stringToMatrix(stri_sub(SEQ, 2, 101)), N, FILE, LOC)]
# The function below counts the number of reads for each of the 4 possible bases at the 100
# positions of a location (takes a base matrix and the number of reads per
# sequences). We will do this for each location index
counts <- function(mat, Ns) {
# this does it the above for a given type of base
readCounts <- function(base) {
# We generate a matrix in which rows represent sequences and columns positions.
# It contains the number of reads per sequence (identical for all cells of
# the same row)
Nmat <- matrix(Ns, nrow(mat), ncol(mat))
# we remove counts for other bases
Nmat[mat != base] <- 0L
# so we can count reads for the base we're interested in
colSums(Nmat)
}
lapply(baseTypes, readCounts)
}
# needed to extract the columns containing the bases from the data table
coln <- names(reads)[1:100]
# We do the base counts by file and location. Note that the ".SD" special symbol
# of data.table would be shorter in code, but it's somehow much slower to run.
# This call creates a file in which each location is represented by 100 rows
# (consensus sequences will appear vertically)
countMat <- reads[, counts(do.call(cbind, mget(coln)), N), by = .(FILE, LOC)]
# We prepare a data.table that will contain consensus sequences
cons <- unique(countMat[, .(FILE, LOC)])
# We turn base counts into a matrix, for operations below
countMat <- as.matrix(countMat[, 3:6, with = F])
# at each position, we get the highest read counts among the 4 bases
maxCounts <- rowMaxs(countMat)
# and corresponding frequency among reads
maxFreqs <- maxCounts / rowSums(countMat)
# we compute a position quality score which takes the base frequency and the
# read count into account. Note that a read count <=1 will results in a score of
# ~0. This may not be appropriate for all experiments.
maxScores <- maxFreqs * (1 - 1 / maxCounts)
# we now select the most frequent base at each position, into a vector
consBases <- baseTypes[rowMatches(maxCounts, countMat)]
# and since there are exactly 100 bases per location, we can easily convert this
# vector to matrix where one row is a location
consBases <- matrix(consBases, ncol = 100, byrow = T)
# we do the same for the scores
consScores <- matrix(maxScores, ncol = 100, byrow = T)
rm(countMat, maxCounts, maxFreqs, maxScores)
# STEP FOUR, keystream decoding --------------------------------------------------------
# We do the keystream decoding on consensus sequences. The four different keys
# are used as rows of an integer matrix:
cat("keysteam decoding...\n")
ks <- stringToMatrix(c(
"002000010110102111112122210011122221010102121222022000221201020221002121121000212222021211121122221",
"202020122121210120001200210222112020222022222220220001221012111022121120202022211221112202002121022",
"221221101200221120220011002222100000020200021121021020122100021201010210202002000101020022121100100",
"100122100011112100120210020011102201122122100100120122212000021220022012202201100010212222110222020"))
storage.mode(ks) <- "integer"
# We convert bases to numbers
d <- chartr("ACGT", "0123", consBases)
storage.mode(d) <- "integer"
# We save the first number (base) of each consensus, for later
first <- d[, 1]
# We compute base-4 differences between adjacent numbers, then subtracting 1
d <- (d[, 2:100] - d[, 1:99]) %% 4L - 1L
# We determine the key to subtract (row of the ks matrix to use) depending on sequence location
phase <- cons$LOC %% 4L + 1L
# We subtract (base 3) the appropriate key and add 1
d <- (d - ks[phase, ]) %% 3L + 1L
# We sum (base 4) successive trits to obtain base numbers (the reciprocal of the difference step)
d <- rowCumsums(cbind(first, d)) %% 4L
# We reverse complement sequences at odd location indices. We use the fact that
# bases are still encoded as numbers, so 3 - base is the complement of base
odd <- cons$LOC %% 2L > 0
d[odd, ] <- 3L - d[odd, ncol(d):1]
# We convert numbers back to bases (chartr() may also work)
d <- matrix(baseTypes[d + 1], nrow = nrow(d))
# We add flattened consensus sequences and average score at each location index to
# the table (we actually don't use these sequences afterwards, but it can be
# used as a visual check)
cons[, c("SEQ", "SCORE") := .(do.call(stri_c, as.data.frame(d)), rowMeans(consScores))]
# we now estimate the length of encoded files, based on the location indices of
# high-quality consensuses. To avoid generating unnecessary long or broken DNA
# sequences
# we do this by creating a table discarding low-score consensuses
temp <- cons[SCORE > 0.75, .(FILE, LOC)]
rows <- 2:nrow(temp)
# We then find "gaps" between successive retained locations (a gap of 6 is arbitrary)
gap <- temp[, c(LOC[rows] - LOC[rows - 1] > 6, F)]
# the last location index with good consensus for each file
ends <- temp[gap == T, min(LOC), by = FILE]$V1
# and the first
starts <- temp[, min(LOC), by = FILE]$V1
# We retain file numbers with enough "good" locations (20 is arbitrary)
valid <- which((ends - starts) > 20) - 1
if (length(valid) == 0) stop("no file with reliable sequence could be found. Exiting.")
rm(temp, starts, gap, rows)
cat(stri_c("file", valid, ", estimated size: ", ends[valid + 1] * 25), sep = "\n")
# STEP FIVE, file consensus generation -----------------------------------------------------
# We now generate the global consensus sequences for files
cat("generating file DNA sequences...\n")
# for this, we cbind the base and score matrices (even if that turns numbers
# into strings). Not very elegant but practical in the following function
d <- cbind(d, consScores)
# We concatenate and overlap successive local consensuses from a file
overlapConsensuses <- function(file) {
# they will be stored in this matrix, vertically. We have 2*4 columns as we
# save the base and its score for the 4 overlapping consensuses at each
# position
mat <- matrix("", ncol = 8, nrow = ends[file + 1] * 25 + 100)
# We select the data table rows we need (that is, excluding what's after the estimated file end)
f <- cons[, FILE == file & LOC <= ends[file + 1]]
# and corresponding location indices
loc <- cons[f == T, LOC]
# we tolerate that some locations (before the last one) may not be represented, as consensuses overlap
missing <- setdiff(1:ends[file + 1], loc)
loc <- c(loc, missing)
# we extract the relevant rows of the base/score matrix, and add empty rows for missing locations
bases <- rbind(d[f, ], matrix("", length(missing), 200))
# phase = the future column of the consensus in the result matrix
phase <- loc %% 4L
# this loop concatenates consensuses of the same "phase", since they are adjacent and not overlapping
for (p in 0:3) {
# TRUE fo the consensuses of the given phase
f <- phase == p
# the first consensus for this phase will start at this offset in the file
start <- p * 25 + 1
# the rows of the "d" matrix corresponding to the location indices of this phase
rows <- which(f)[order(loc[f])]
# the last position of the concatenated consensus in mat
end <- start + length(rows) * 100 - 1
# we can now concatenate consensuses (matrix rows) for each phase and place them in mat
mat[start:end, p + 1] <- t(bases[rows, 1:100])
mat[start:end, p + 5] <- t(bases[rows, 101:200])
}
# we add file ID as the last column
cbind(mat, as.character(file))
}
# we apply the function to each valid file. We rbind all in a single matrix
overlaps <- do.call(rbind, lapply(valid, overlapConsensuses))
# we extract the base scores and file ID
baseInfo <- overlaps[, 5:9]
storage.mode(baseInfo) <- "numeric"
baseInfo[is.na(baseInfo)] <- 0
# for each of the 4 bases, we sum its scores over the 4 overlapping consensuses
baseScores <- function(base) {
# we store scores in a temporary matrix in the function, as we replace them with zeros for the other bases
scores <- baseInfo[, 1:4]
scores[overlaps[, 1:4] != base] <- 0
rowSums(scores)
}
# applying the function for all bases
scores <- vapply(baseTypes, baseScores, numeric(nrow(overlaps)))
maxScores <- rowMaxs(scores)
# hist(maxScores, breaks = 100)
# we retain the base with highest score at each position
consensus <- baseTypes[rowMatches(maxScores, scores)]
# and we create a data table with relevant info (some actually not used afterwards)
consensus <- data.table(
base = consensus,
score = maxScores / rowSums(overlaps[, 1:4] != ""),
file = baseInfo[, 5],
pos = occurrences(baseInfo[, 5])
)
# hist(consensus$score, breaks = 100, xlim = 0:1, main = "position quality scores")
# STEP SIX, we convert DNA bases to bytes ---------------------------------------------
cat("converting to bytes and saving to file(s)...\n")
# this function does not tolerate missing or erroneous data in files, TO IMPROVE
writeFile <- function(fileID, folder) {
# the DNA sequence for each file, as a base vector. We prepend base "A", as specified
bases <- c("A", consensus[file == fileID, base])
trits <- toTrits(bases)
# we compute the length of the file, encoded in the first 25 trits.
len <- toBase10(trits[1:25])
if (len > length(trits) - 25 | is.na(len)) {
warning("possible broken file\n")
len <- length(trits) - 25
}
# because "NA" would take 2 chars in the flattened string
trits[is.na(trits)] <- 3L
# we flatten the trit vector in a string
flat <- stri_flatten(trits[26:(len + 25)])
# we extract 5- and 6-trit words from that string. We need to find where the 6-trit words are
# all the possible 5-trit words in the string
word5 <- stri_sub(flat, 1:(len - 4), length = 5)
# same for 6-trit words
word6 <- stri_sub(flat, 1:(len - 5), length = 6)
# we make sure both are in equal numbers (needed to create the matrix below)
word6 <- c(word6, rep("", length(word5) - length(word6)))
# we determine whether words are legal, as a 2-column boolean matrix
allowed <- cbind(word5 %chin% code$V4, word6 %chin% code$V4)
# to iterate over this matrix (see loop below),
col <- 1:2
row <- 1
# cells of this matrix will be TRUE for the words we retain
retained <- matrix(F, length(word5), 2)
# we iterate from row 1
while (row <= nrow(allowed)) {
if (allowed[row, col[1]]) {
# if word is legal we mark its location as TRUE
retained[row, col[1]] <- T
# and move to the row of the next word to check (based on word size)
row <- row + 4 + col[1]
} else {
# else if word of alternate length is illegal as well, we abort
if (!allowed[row, col[2]]) stop(paste("error in file", fileID, "at position", row))
# if not, we go check the word of alternate length at this position, in the other column
col <- rev(col)
}
}
words <- rbind(word5, word6)[t(retained)]
bytes <- code[chmatch(words, V4), byte]
writeBin(as.raw(bytes), con = gsub("//", "/", stri_c(folder, "/file", fileID), fixed = T))
invisible(NULL)
}
for (file in valid) writeFile(file, outputFolder)
|
ee2946bcc51c7b69d220f5add2564a0c79238adf
|
d423ba647fdc41d4aa41167c8ae6824d36c8e428
|
/Rcode/cod_condicion_hidrica.R
|
b1610aef3ae3216c537113d1b7b3f7a49cd5619d
|
[] |
no_license
|
gastonquero/Paper_Fot_Soja
|
2943b880716050b0bcddbd6d21089e5f21bab445
|
319fadbaf5cd317c5eb92b157f54c769c563046b
|
refs/heads/master
| 2023-08-30T09:31:18.559439
| 2021-11-12T18:32:22
| 2021-11-12T18:32:22
| 381,450,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,465
|
r
|
cod_condicion_hidrica.R
|
################################################################################
# Analisis de datos de consumo de agua soja ensayos preliminares #
# #
# Don Mario 6.8.i y Génesis 5601 #
# Gaston Quero - Marta Sainz - Mauro More - Noelia Torres - Sebastian Simondi #
# 8/10/2021 #
################################################################################
getwd ()
setwd ("R:/Paper_Fot_Soja")
# Paquetes
library (lme4)
library (emmeans)
library ("car")
library ("nlmrt")
library ("easynls")
library (tidyverse)
library ("ggplot2")
library ("lattice")
library ("latticeExtra")
library (multcompView)
library (multcomp)
library ("dplyr")
library (ggjoy)
library ("ggridges")
library (hrbrthemes)
library(tidyverse)
library(forcats)
library("viridis")
library("lmerTest")
library(lubridate)
library(nycflights13)
library(nlme)
library(xtable)
library(stringr)
library(data.table)
library(svMisc)
library(ggpubr)
library("ggsci")
library("FactoMineR")
library("factoextra")
library("corrplot")
## cargar datos
#siR:\Paper_Fot_Soja\Data\rawdata\wtr\pesos_macetas
## FS.3 #########
SPM.FS.3 <- read_delim (file = "./Data/rawdata/wtr/pesos_macetas/SPM_FS.3.txt" ,
delim ="\t", quote = "\"",
escape_backslash = FALSE,
escape_double = TRUE,
col_names = TRUE,
col_types = NULL,
locale = default_locale(),
na = "NA")
cod.SPM.FS.3 <- read_delim (file = "./Data/rawdata/wtr/pesos_macetas/cod.SPM_FS.3.txt" ,
delim ="\t", quote = "\"",
escape_backslash = FALSE,
escape_double = TRUE,
col_names = TRUE,
col_types = NULL,
locale = default_locale(),
na = "NA")
cod.SPM.FS.3$maceta <- as.character (cod.SPM.FS.3$maceta)
head ( SPM.FS.3)
SPM.FS.3.1 <- SPM.FS.3 %>%
dplyr::mutate (maceta = as.character(maceta) ) %>%
dplyr::inner_join(cod.SPM.FS.3, by="maceta" ) %>%
dplyr::mutate (x = id)%>%
tidyr::separate(x , c(NA, "genotype", "amb.luminico", "cond.hidrc", "rep"))%>%
dplyr::mutate (ensayo = "FS.3") %>%
dplyr::select ("ensayo", "amb.luminico","genotype", "cond.hidrc","maceta","rep", "id", everything())%>%
dplyr::mutate (Date = dmy (dia))%>%
dplyr::mutate (pot= str_c (ensayo,maceta, id, sep="_" ))
mean.pss <- mean (c(526.2 , 517.6, 521, 518.2, 525.6, 527.4)) # Esto es el peso seco del sustrato
sd.pss <- sd (c(526.2 , 517.6, 521, 518.2, 525.6, 527.4))
pm <- 31 + 7 # Esto es el peso de la maceta
S <- mean.pss + pm
peso.CC <- 740
list.pot <- unique (SPM.FS.3.1$pot)
##### Esta es la funcion #####
run.plot.WSPM <- function ( dt = NULL, S=NULL, PCC = NULL){
dir.create (file.path ("Figures", "Plots.WSPM"), showWarnings = FALSE)
dt <- dt
t1.1 <- min (dt$Date)
dt.1 <- dt %>%
dplyr::mutate (dias = Date - t1.1) %>%
dplyr::mutate (x = as.numeric(dias))
wspm.curve <- ggscatter (dt.1 , x = "x", y = "peso",
#ylim = c(500,745 ),
title = unique(dt.1$pot),
#color = "cond.hidrc",
xlab = "time (d)",
ylab = "WSPM (g)",
point=FALSE) +
geom_line (color = "black", linetype =2, size = 0.5) +
geom_point(color = "black", size = 1.5) +
geom_hline(yintercept =peso.CC, linetype =3, size = 1,color = "blue" ) +
geom_hline(yintercept =S, linetype =3, size = 1,color = "red" )+
geom_hline(yintercept = mean (dt.1$peso, na.rm = TRUE ), linetype =2, size = 1) #+
#geom_hline(yintercept = peso.obj, linetype =2, col="red", size = 1)
ggexport ( wspm.curve, filename = str_c("./Figures/Plots.WSPM/",unique(dt.1$pot), ".tiff"),
width = 700, height = 500)
print (wspm.curve)
}
FS.3.wsp <- lapply(list.pot, function(filt.pot){
print (filt.pot)
pot <- SPM.FS.3.1 %>%
dplyr::filter (pot == filt.pot)
run.plot.WSPM (dt = pot ,S=S, PCC = peso.CC)
})
## FS.4 #########
SPM.FS.4 <- read_delim (file = "./Data/rawdata/wtr/pesos_macetas/SPM_FS.4.txt" ,
delim ="\t", quote = "\"",
escape_backslash = FALSE,
escape_double = TRUE,
col_names = TRUE,
col_types = NULL,
locale = default_locale(),
na = "NA")
cod.SPM.FS.4 <- read_delim (file = "./Data/rawdata/wtr/pesos_macetas/cod.SPM_FS.4.txt" ,
delim ="\t", quote = "\"",
escape_backslash = FALSE,
escape_double = TRUE,
col_names = TRUE,
col_types = NULL,
locale = default_locale(),
na = "NA")
cod.SPM.FS.4$maceta <- as.character (cod.SPM.FS.4$maceta)
head ( SPM.FS.4)
SPM.FS.4.1 <- SPM.FS.4 %>%
dplyr::mutate (maceta = as.character(maceta) ) %>%
dplyr::inner_join(cod.SPM.FS.4, by="maceta" ) %>%
dplyr::mutate (x = id)%>%
tidyr::separate(x , c(NA, "genotype", "amb.luminico", "cond.hidrc", "rep"))%>%
dplyr::mutate (ensayo = "FS.4") %>%
dplyr::select ("ensayo", "amb.luminico","genotype", "cond.hidrc","maceta","rep", "id", everything())%>%
dplyr::mutate (Date = dmy (dia))%>%
dplyr::mutate (pot= str_c (ensayo, maceta, id, sep="_" ))
mean.pss <- mean (c(526.2 , 517.6, 521, 518.2, 525.6, 527.4)) # Esto es el peso seco del sustrato
sd.pss <- sd (c(526.2 , 517.6, 521, 518.2, 525.6, 527.4))
pm <- 31 + 7 # Esto es el peso de la maceta
S <- mean.pss + pm
peso.CC <- 740
list.pot <- unique (SPM.FS.4.1$pot)
FS.4.wsp <- lapply(list.pot, function(filt.pot){
print (filt.pot)
pot <- SPM.FS.4.1 %>%
dplyr::filter (pot == filt.pot)
run.plot.WSPM (dt = pot ,S=S, PCC = peso.CC)
})
####
## FS.5 #########
SPM.FS.5 <- read_delim (file = "./Data/rawdata/wtr/pesos_macetas/SPM_FS.5.txt" ,
delim ="\t", quote = "\"",
escape_backslash = FALSE,
escape_double = TRUE,
col_names = TRUE,
col_types = NULL,
locale = default_locale(),
na = "NA")
cod.SPM.FS.5 <- read_delim (file = "./Data/rawdata/wtr/pesos_macetas/cod.SPM_FS.5.txt" ,
delim ="\t", quote = "\"",
escape_backslash = FALSE,
escape_double = TRUE,
col_names = TRUE,
col_types = NULL,
locale = default_locale(),
na = "NA")
cod.SPM.FS.5$maceta <- as.character (cod.SPM.FS.5$maceta)
head ( SPM.FS.5)
SPM.FS.5.1 <- SPM.FS.5 %>%
dplyr::mutate (maceta = as.character(maceta) ) %>%
dplyr::inner_join(cod.SPM.FS.5, by="maceta" ) %>%
dplyr::mutate (x = id)%>%
tidyr::separate(x , c(NA, "genotype", "amb.luminico", "cond.hidrc", "rep"))%>%
dplyr::mutate (ensayo = "FS.5") %>%
dplyr::select ("ensayo", "amb.luminico","genotype", "cond.hidrc","maceta","rep", "id", everything())%>%
dplyr::mutate (Date = dmy (dia))%>%
dplyr::mutate (pot= str_c (ensayo, maceta, id, sep="_" ))
mean.pss <- mean (c(526.2 , 517.6, 521, 518.2, 525.6, 527.4)) # Esto es el peso seco del sustrato
sd.pss <- sd (c(526.2 , 517.6, 521, 518.2, 525.6, 527.4))
pm <- 31 + 7 # Esto es el peso de la maceta
S <- mean.pss + pm
peso.CC <- 740
list.pot <- unique (SPM.FS.5.1$pot)
FS.5.wsp <- lapply(list.pot, function(filt.pot){
print (filt.pot)
pot <- SPM.FS.5.1 %>%
dplyr::filter (pot == filt.pot)
run.plot.WSPM (dt = pot ,S=S, PCC = peso.CC)
})
head(consumo.1)
## condicion hidrica HP%
CC.MN.1 <- read_delim (file = "./Data/rawdata/macetas_cc.txt" ,
delim ="\t", quote = "\"",
escape_backslash = FALSE,
escape_double = TRUE,
col_names = TRUE,
col_types = NULL,
locale = default_locale(),
na = "NA")
summary (CC.MN.1)
peso.sust.seco <- mean (CC.MN.1$peso.sust.seco, na.rm = TRUE)
peso.bolsa <- mean (CC.MN.1$peso.bolsa, na.rm = TRUE)
consumo.2 <- consumo.1 %>%
dplyr::mutate (agua = pesof.1 - peso.sust.seco - peso.bolsa) %>%
dplyr::mutate (hp.porc = (agua *100)/peso.sust.seco)
consumo.2 %>%
group_by (tratamiento) %>%
summarise (max(hp.porc, na.rm=TRUE))
list.pot2 <- unique (consumo.2$pot)
run.plot.hp <- function ( dt = NULL, t1=NULL, hp.ref =NULL){
dir.create (file.path ("Figures", "Plots.HP"), showWarnings = FALSE)
dt <- dt
t1.1 <- dmy (t1)
dt.1 <- dt %>%
dplyr::mutate (dias = Date - t1.1)
hp.curve <- ggscatter (dt.1 , x = "dias", y = "hp.porc",
title = unique(dt.1$pot),
ylim=c(0, 25),
xlab = "time (d)",
ylab = "HP (%)",
point=FALSE) +
geom_line(color = "gray48", linetype =2, size = 0.5) +
geom_point(color = "black", size = 1.5) +
geom_hline(yintercept = mean (dt.1$hp.porc, na.rm = TRUE ), linetype =3, size = 1) #+
#geom_hline(yintercept = peso.obj, linetype =2, col="red", size = 1)
ggexport ( hp.curve, filename = str_c("./Figures/Plots.HP/",unique(dt.1$pot), ".tiff"),
width = 700, height = 500)
print (hp.curve)
}
X.HP <- lapply(list.pot2, function(filt.pot){
print (filt.pot)
pot <- consumo.2 %>%
dplyr::filter (pot == filt.pot)
run.plot.hp (dt = pot , t1= "28/11/2019", hp.ref =NULL)
})
summary (consumo.2)
t1= "28/11/2019"
t1.1 <- dmy (t1)
consumo.2 <- consumo.2 %>%
dplyr::mutate (dias = Date - t1.1)
ggscatter (consumo.2 , x = "dias", y = "hp.porc", facet.by = "clon",
color="tratamiento",
palette = c("navyblue", "darkorange"),
#title = unique(dt.1$pot),
ylim=c(0, 25),
xlab = "time (d)",
ylab = "HP (%)",
point=TRUE)
summary (consumo.2)
consumo.3 <- consumo.2 %>%
group_by (tratamiento)%>%
summarise(hp.mean = mean(hp.porc , na.rm = TRUE), agua.mean = mean(agua , na.rm = TRUE))
|
b5eaf9841ca981cf1f3df4fb4f243e92d970d1dd
|
5a5bc9e1b0d59859b4e213b092e19afe232819e1
|
/R/wind/wind_downscale3.R
|
cd2dbd7a1e5b157079bd6fef44edfbbb4c0cf134
|
[] |
no_license
|
jrmosedale/microclimates
|
bf469e07b688e9342c0a8d767db84ee428e778f3
|
ae2e61969631506c523bd618c9106a61b00355dd
|
refs/heads/master
| 2021-04-30T15:18:19.091728
| 2018-02-12T11:31:16
| 2018-02-12T11:31:16
| 121,236,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,482
|
r
|
wind_downscale3.R
|
library(raster)
library(rgdal)
# # # # # # # # # # # # # # # # # # # # # #
# # # Compare model with Culdrose wind data
# # # # # # # # # # # # # # # # # # # # # #
# read in two files:
#(1) Culdrose weather data - some of these data are missing and the impution is a bit dodgy
#(2) tic - this is a version that has better impututed data, (but the time series isn't quite as long)
weather<-read.csv("C:/Lizardrefugiatidy/Data/Satelittemodeling/allyears.data.csv")
tic<-read.csv("C:/Lizardrefugiatidy/Data/Satelittemodeling/Culdroseall_tic_2014pasted.csv")
# Uses tic imputed values:
weather$pressure[169:332928]<-tic$pressure
weather$temperature[169:332928]<-tic$temperature
weather$rel.hum[169:332928]<-tic$rel.hum
weather$windspeed[169:332928]<-tic$windspeed
weather$winddir[169:332928]<-tic$winddir
weather$cloudcover[169:332928]<-tic$cloudcover
weather$winddir<-ifelse(weather$winddir<355,weather$winddir,0)
# extract data for required period only
sel<-which(weather$year==2014)
weather<-weather[sel,]
sel<-which(weather$decimal.day<32)
weather<-weather[sel,]
# extract wind data
wind.data<-data.frame(c.windspeed=weather$windspeed*0.51444444,c.winddir=weather$winddir)
# converts to wind speed at 1 metre height (Culdrose anometer @ 32m)
# Method based on Allen et al. 1998: http://www.fao.org/docrep/x0490e/x0490e07.htm#wind profile relationship
wind.data$c.windspeed<-wind.data$c.windspeed*0.6341314*0.8487155
# Convert wind to u and v components
# Important: u and v components measure direction wind is blowing too
#wind directions are usually measured in direction wind is coming from
#hence reason that wind direction has 180 added to it
wind.data$c.uwind<-wind.data$c.windspeed*sin((wind.data$c.winddir+180)*(pi/180))
wind.data$c.vwind<-wind.data$c.windspeed*cos((wind.data$c.winddir+180)*(pi/180))
# # # # # # # # # # # # # # # # # # # # # # # # #
# Obtain modelled wind data for each day and hour
# In this case, all data for January 2014
# # # # # # # # # # # # # # # # # # # # # # # # #
# create variables in data frame
wind.data$p.windspeed<-0
wind.data$p.winddir<-0
i<-1
for (day in 1:31)
{
# looping can take quite a long time. This keeps tabs on progress printing the day
tp<-paste("Day: ",day,sep="")
print(tp)
for (hr in 0:23)
{
# loads arrays of wind speed and direction for each hour produce by wind_downscale2.R
# NB - as original names of these were m1.out and m2.out, this is the name they are automatically assigned here
filein1<-paste("C:/Jonathanmodel/wind/dataout/strength_2014_1_",day,"_",hr,".r",sep="")
filein2<-paste("C:/Jonathanmodel/wind/dataout/direction_2014_1_",day,"_",hr,".r",sep="")
load(filein1)
load(filein2)
# Convert arrays to rasters
r.speed<-raster(m1.out,xmn=79400,xmx=343500,ymn=0,ymx=159300)
r.direction<-raster(m2.out,xmn=79400,xmx=343500,ymn=0,ymx=159300)
# Extract data for Culdrose (Easting = 167162.8, Northing = 25489.81)
xy<-data.frame(x=167162.8,y=25489.81)
# store values in data frame
wind.data$p.windspeed[i]<-extract(r.speed,xy)
wind.data$p.winddir[i]<-extract(r.direction,xy)
i<-i+1
}
}
# Convert wind to u and v components
# Again, u and v components measure direction wind is blowing too
# but wind directions are usually measured in direction wind is coming from
wind.data$p.uwind<-wind.data$p.windspeed*sin((wind.data$p.winddir+180)*(pi/180))
wind.data$p.vwind<-wind.data$p.windspeed*cos((wind.data$p.winddir+180)*(pi/180))
# Comparison plots + lines through points (abline function)
par(mfrow=c(2,2))
plot(c.windspeed~p.windspeed,data=wind.data,xlab="Modelled wind speed",ylab="Culdrose wind speed")
abline(lm(c.windspeed~p.windspeed,data=wind.data))
# Wind direction a little misleading as not on circular plot, so 359 looks vastly different from 0
plot(c.winddir~p.winddir,data=wind.data,xlab="Modelled wind direction",ylab="Culdrose wind direction")
abline(lm(c.winddir~p.winddir,data=wind.data))
plot(c.uwind~p.uwind,data=wind.data,xlab="Modelled easterly wind component",ylab="Culdrose easterly wind component")
abline(lm(c.uwind~p.uwind,data=wind.data))
plot(c.vwind~p.vwind,data=wind.data,xlab="Modelled northerly wind component",ylab="Culdrose northerly wind component")
abline(lm(c.vwind~p.vwind,data=wind.data))
# Linear regression of modelled versus Culdrose data, with intercept forced at zero
summary(lm(c.windspeed~p.windspeed+0,data=wind.data))
|
ad5bec0ec38b4c55ba35c41d6f2c6348387561af
|
c2dbfb76ced0fec974431508e0dfcb9833d6d8d8
|
/Caplazi_Isabel_3-22_Inclass.R
|
7eb7b2d17382481c01e2f8937789f863cb96c0c9
|
[] |
no_license
|
icaplazi/DataAnalytics2021_Isabel_Caplazi
|
90cdd89e95f809ab514ca70c964d279e207e7133
|
eaddfb8672999a45e59cefe289ce7ffbacd30a4c
|
refs/heads/main
| 2023-04-21T10:11:57.589842
| 2021-04-01T13:38:58
| 2021-04-01T13:38:58
| 334,179,584
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,314
|
r
|
Caplazi_Isabel_3-22_Inclass.R
|
############# USArrests Data Set #############
data("USArrests") # US arrest data set
View(USArrests) # opens data in another tab
states = row.names(USArrests)
states # reads off state names
names(USArrests) # column names in data set
apply(USArrests, 2, mean) # applies mean function to each column/row of data
apply(USArrests, 2, var) # variances of columns
# scales variables to have a mean of 0
# TRUE = stdev of 1
pr.out = prcomp(USArrests, scale = TRUE)
names(pr.out)
pr.out$center # mean before scale
pr.out$scale # stdev before scale
pr.out$rotation # displays loadings
dim(pr.out$x)
biplot(pr.out, scale = 0) # arrows represent loadings
pr.out$sdev # stdev of principal component
pr.var = pr.out$sdev^2 # principal component variance
pve = pr.var/sum(pr.var)
pve # proportion of variance by each component
############ IRIS DATA SET ############
data("iris") # iris data set
head(iris) # first 6 points
irisdata1 <- iris[,1:4]
irisdata1 # columns 1-4 of iris data
help("princomp")
# cor indicates correlation matrix (no constant variables)
principal_comp <- princomp(irisdata1, cor = TRUE, score = TRUE)
summary(principal_comp) # 4 different features
plot(principal_comp)
plot(principal_comp, type = "l") # line plot
biplot(principal_comp)
|
47cc13d7517d2b6e886319c12d732398afd06200
|
b253ea9ffd151dbfe884a01592d910d8244b3984
|
/Scripts/old_scripts/final_functions.R
|
90f5da3dae45fd6e150e13ffbbfd68bbcf877ba6
|
[] |
no_license
|
benmbrew/LFS
|
4d0bb1b6df268813b69c298cc310f1f3fc65a9b6
|
1384fc1e5a32e15853bfbbf3df62999fbe1b97de
|
refs/heads/master
| 2021-03-12T19:08:05.281863
| 2019-10-03T15:09:48
| 2019-10-03T15:09:48
| 47,150,327
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 216,108
|
r
|
final_functions.R
|
getSubset <- function(model_data) {
# homogenize model_data to model_data_resdi
feat_names <- colnames(model_data)[10:ncol(model_data)]
model_data <- model_data[,c('age_diagnosis', 'age_sample_collection', 'gender', 'type', feat_names)]
return(model_data)
}
process_rg_set <-
function(beta, id_map_1, id_map_2, clinical_dat, controls) {
# get ids
beta <- findIdsCombined(beta, id_map_1 = id_map_1, id_map_2 = id_map_2, controls = controls)
if(controls) {
# seperate beta
beta_cases <- beta[!grepl('^200', rownames(beta)),]
beta_controls <- beta[grepl('^200', rownames(beta)),]
} else {
# seperate beta
beta_cases <- beta[!grepl('^20', rownames(beta)),]
beta_controls <- beta[grepl('^20', rownames(beta)),]
}
# get id name (only cases)
beta_cases <- getIdName(beta_cases)
# clean ids
beta_cases <- cleanIds(beta_cases)
beta_controls <- cleanIds(beta_controls)
# remove 'ch' from column names
beta_cases <- beta_cases[, !grepl('ch', colnames(beta_cases))]
beta_controls <- beta_controls[, !grepl('ch', colnames(beta_controls))]
##########
# join data
##########
# inner join
beta_cases <- dplyr::inner_join(clinical_dat, beta_cases, by = 'ids')
beta_controls <- dplyr::inner_join(clinical_dat, beta_controls, by = 'ids')
# remove NAs from tm_donor
beta_cases <- beta_cases[!is.na(beta_cases$tm_donor),]
beta_controls <- beta_controls[!is.na(beta_controls$tm_donor),]
# remove duplicates
beta_cases <- beta_cases[!duplicated(beta_cases$tm_donor),]
beta_controls <- beta_controls[!duplicated(beta_controls$tm_donor),]
##########
# get data in format for saving
##########
# get cg_sites
cg_sites <- colnames(beta)[grepl('cg', colnames(beta))]
# subset data by colmns of interest and cg_sites
beta_cases <- beta_cases[, c('ids',
'p53_germline',
'cancer_diagnosis_diagnoses',
'age_diagnosis',
'age_sample_collection',
'gender',
'sentrix_id',
'family_name',
'tm_donor',
cg_sites)]
# subset data by colmns of interest and cg_sites
beta_controls <- beta_controls[, c('ids',
'p53_germline',
'cancer_diagnosis_diagnoses',
'age_diagnosis',
'age_sample_collection',
'gender',
'sentrix_id',
'family_name',
'tm_donor',
cg_sites)]
# combine data
beta <- rbind(beta_cases,
beta_controls)
# remove duplcated tm donor
beta <- beta[!duplicated(beta$tm_donor),]
return(beta)
}
get_diff_dups <- function(temp_data) {
# get controls - HERE
# get controls that are deduped from both sides
data_cases <- temp_data[!grepl('Unaffected', temp_data$cancer_diagnosis_diagnoses),]
data_controls <- temp_data[grepl('Unaffected', temp_data$cancer_diagnosis_diagnoses),]
# remove duplicates from controls, from both sides
data_controls_from_last <- data_controls[!duplicated(data_controls$tm_donor_, fromLast = TRUE),]
data_controls_from_first <- data_controls[!duplicated(data_controls$tm_donor_, fromLast = FALSE),]
# combine data
temp_data_from_first <- rbind(data_cases,
data_controls_from_first)
# combine data
temp_data_from_last <- rbind(data_cases,
data_controls_from_last)
return(list(temp_data_from_first, temp_data_from_last))
}
getResidual <- function(model_data)
{
# subset by mut, and complete cases for age diagnosis and age sample collection
model_data <- model_data[complete.cases(model_data),]
feature_names <- colnames(model_data)[10:ncol(model_data)]
resid <- list()
for (i in 10:ncol(model_data)) {
temp_response <- model_data[, i]
temp_var <- model_data$age_sample_collection
resid[[i]] <- lm(temp_response ~ temp_var)$residuals
print(i)
}
resid_data <- as.data.frame(do.call('cbind', resid))
model_data <- cbind(model_data$age_diagnosis, model_data$age_sample_collection, model_data$gender,
model_data$type, resid_data)
colnames(model_data) <- c('age_diagnosis', 'age_sample_collection', 'gender', 'type', feature_names)
return(model_data)
}
getBal <- function(dat_cases, dat_controls) {
# combine data
dat <- rbind(dat_cases, dat_controls)
# # # balance age
# hist(dat$age_sample_collection[dat$type == 'cases'])
# hist(dat$age_sample_collection[dat$type == 'controls'])
# remove a few from ranges 100-200, 300-400
# randomly remove controls that have a less than 50 month age of diganosis to have balanced classes
remove_index <- which(dat$type == 'controls' & ((dat$age_sample_collection >= 100 & dat$age_sample_collection <= 200) |
(dat$age_sample_collection >= 300 & dat$age_sample_collection <= 400)))
remove_index <- sample(remove_index, 10, replace = F )
dat <- dat[-remove_index,]
}
##########
# get family cancer status and ratio
##########
get_family_cancer <- function(cases_full, controls_full){
#combine data subseted data
temp_full <- bind_rows(cases_full[, c('tm_donor', 'cancer_diagnosis_diagnoses', 'family_name')],
controls_full[, c('tm_donor', 'cancer_diagnosis_diagnoses', 'family_name')])
# create cancer indicator
temp_full$cancer_fac <- ifelse(grepl('Unaffected', temp_full$cancer_diagnosis_diagnoses), 'no_cancer', 'cancer')
temp_full$cancer_diagnosis_diagnoses <- NULL
temp <- temp_full %>%
group_by(family_name) %>%
tally() %>%
left_join(temp_full)
temp$fam_num_cancer <- NA
temp$fam_cancer_ratio <- NA
for(fam_name in unique(temp$family_name)){
sub_fam <- temp[temp$family_name == fam_name,]
if(nrow(sub_fam) > 1) {
num_cancer <- length(which(sub_fam$cancer_fac == 'cancer'))
num_no <- length(which(sub_fam$cancer_fac == 'no_cancer'))
tot_fam_num <- nrow(sub_fam)
stopifnot((num_cancer + num_no) == tot_fam_num)
# get number of family members with cancer
sub_fam$fam_num_cancer <- ifelse(sub_fam$cancer_fac == 'cancer',
num_cancer - 1, num_cancer)
# condition for if the denominator is zero - that is no non cancers in family - just put number of family members with cancer
# this applies to the situation where there might be a non cancer present,
if(num_no == 0) {
sub_fam$fam_cancer_ratio <- num_cancer - 1
} else if(num_no == 1){
# get ratio of family members that have cancer to those that don't have cancer
sub_fam$fam_cancer_ratio <- ifelse(sub_fam$cancer_fac == 'cancer',
(num_cancer - 1)/num_no, num_cancer)
} else {
# conidtion if num_cancer is zero
if(num_cancer == 0) {
# get ratio of family members that have cancer to those that don't have cancer
sub_fam$fam_cancer_ratio <- 0
} else {
# get ratio of family members that have cancer to those that don't have cancer
sub_fam$fam_cancer_ratio <- ifelse(sub_fam$cancer_fac == 'cancer',
(num_cancer - 1)/num_no, num_cancer/(num_no - 1))
}
}
} else {
sub_fam$fam_num_cancer <- 0
sub_fam$fam_cancer_ratio <- 0
}
temp[temp$family_name == fam_name,] <- sub_fam
}
# remove columns not needed in join
temp <- temp[, c('tm_donor', 'family_name', 'fam_num_cancer', 'fam_cancer_ratio')]
# join temp back with cases and controls
cases_full <- inner_join(temp, cases_full)
controls_full <- inner_join(temp, controls_full)
return(list(cases_full, controls_full))
}
get_family_cancer_old <- function(cases, controls, valid){
#combine data subseted data
temp_full <- bind_rows(cases[, c('tm_donor', 'cancer_diagnosis_diagnoses', 'family_name')],
controls[, c('tm_donor', 'cancer_diagnosis_diagnoses', 'family_name')],
valid[,c('tm_donor', 'cancer_diagnosis_diagnoses', 'family_name') ])
# create cancer indicator
temp_full$cancer_fac <- ifelse(grepl('Unaffected', temp_full$cancer_diagnosis_diagnoses), 'no_cancer', 'cancer')
temp_full$cancer_diagnosis_diagnoses <- NULL
temp <- temp_full %>%
group_by(family_name) %>%
tally() %>%
left_join(temp_full)
temp$fam_num_cancer <- NA
temp$fam_cancer_ratio <- NA
for(fam_name in unique(temp$family_name)){
sub_fam <- temp[temp$family_name == fam_name,]
if(nrow(sub_fam) > 1) {
num_cancer <- length(which(sub_fam$cancer_fac == 'cancer'))
num_no <- length(which(sub_fam$cancer_fac == 'no_cancer'))
tot_fam_num <- nrow(sub_fam)
stopifnot((num_cancer + num_no) == tot_fam_num)
# get number of family members with cancer
sub_fam$fam_num_cancer <- ifelse(sub_fam$cancer_fac == 'cancer',
num_cancer - 1, num_cancer)
# condition for if the denominator is zero - that is no non cancers in family - just put number of family members with cancer
# this applies to the situation where there might be a non cancer present,
if(num_no == 0) {
sub_fam$fam_cancer_ratio <- num_cancer - 1
} else if(num_no == 1){
# get ratio of family members that have cancer to those that don't have cancer
sub_fam$fam_cancer_ratio <- ifelse(sub_fam$cancer_fac == 'cancer',
(num_cancer - 1)/num_no, num_cancer)
} else {
# conidtion if num_cancer is zero
if(num_cancer == 0) {
# get ratio of family members that have cancer to those that don't have cancer
sub_fam$fam_cancer_ratio <- 0
} else {
# get ratio of family members that have cancer to those that don't have cancer
sub_fam$fam_cancer_ratio <- ifelse(sub_fam$cancer_fac == 'cancer',
(num_cancer - 1)/num_no, num_cancer/(num_no - 1))
}
}
} else {
sub_fam$fam_num_cancer <- 0
sub_fam$fam_cancer_ratio <- 0
}
temp[temp$family_name == fam_name,] <- sub_fam
}
# remove columns not needed in join
temp <- temp[, c('tm_donor', 'family_name', 'fam_num_cancer', 'fam_cancer_ratio')]
# join temp back with cases and controls
cases <- inner_join(temp, cases)
controls <- inner_join(temp, controls)
valid <- inner_join(temp, valid)
return(list(cases, controls, valid))
}
# functions to be used in model_pipeline script
# data <- id_map
cleanIdMap <- function(data) {
data <- as.data.frame(data)
# new colnames, lowercase
colnames(data) <- tolower(colnames(data))
# combine sentrix_id and sentrix_position
data$identifier <- paste(data$sentrix_id, data$sentrix_position, sep = '_')
data$identifier <- as.factor(data$identifier)
return(data)
}
##########
# Function that combines methylation matrices with id_map, to get ids for methylation
##########
findIdsCombined <- function(data_methyl, id_map_1, id_map_2, controls) {
data_methyl <- as.data.frame(t(data_methyl))
data_methyl$identifier <- rownames(data_methyl)
data_methyl$identifier <- as.factor(data_methyl$identifier)
# loop to combine identifiers, without merging large table
data_methyl$ids <- NA
data_methyl$sentrix_id <- NA
for (i in data_methyl$identifier) {
if(controls) {
if (grepl('^200', i)) {
data_methyl$ids[data_methyl$identifier == i] <- id_map_2$sample_name[id_map_2$identifier == i]
data_methyl$sentrix_id[data_methyl$identifier == i] <- id_map_2$sentrix_id[id_map_2$identifier == i]
} else {
data_methyl$ids[data_methyl$identifier == i] <- id_map_1$sample_name[id_map_1$identifier == i]
data_methyl$sentrix_id[data_methyl$identifier == i] <- id_map_1$sentrix_id[id_map_1$identifier == i]
}
} else {
if (grepl('^20', i)) {
data_methyl$ids[data_methyl$identifier == i] <- id_map_2$sample_name[id_map_2$identifier == i]
data_methyl$sentrix_id[data_methyl$identifier == i] <- id_map_2$sentrix_id[id_map_2$identifier == i]
} else {
data_methyl$ids[data_methyl$identifier == i] <- id_map_1$sample_name[id_map_1$identifier == i]
data_methyl$sentrix_id[data_methyl$identifier == i] <- id_map_1$sentrix_id[id_map_1$identifier == i]
}
}
print(i)
}
return(data_methyl)
}
##########
# function to remove columns that have any NAs
##########
removeNA <- function(data_frame, probe_start) {
# get full probe (all non missing columns) data set
temp_data <-
data_frame[, probe_start:ncol(data_frame)][sapply(data_frame[, probe_start:ncol(data_frame)],
function(x) all(!is.na(x)))]
# combine probes with clin
full_data <- as.data.frame(cbind(data_frame[, 1:(probe_start-1)], temp_data))
# check that it worked
stopifnot(all(!is.na(full_data[, probe_start:ncol(full_data)])))
return(full_data)
}
##########
# function to remove columns that have any NAs
##########
removeInf <- function(data_frame, probe_start) {
# get full probe (all non missing columns) data set
temp_data <-
data_frame[, probe_start:ncol(data_frame)][, sapply(data_frame[, probe_start:ncol(data_frame)],
function(x) all(!is.infinite((x))))]
# combine probes with clin
full_data <- as.data.frame(cbind(data_frame[, 1:(probe_start -1)], temp_data))
# check that it worked
# stopifnot(all(!is.na(full_data[, probe_start:ncol(full_data)])))
return(full_data)
}
##########
# get mutant
##########
getModData <- function(data)
{
# subset data by not na in age of diagnosis and mut
data <- data[!is.na(data$age_diagnosis),]
data <- data[data$p53_germline == 'MUT',]
return(data)
}
testKS <- function(x, y)
{
y <- y[!is.na(y)]
x <- x[!is.na(x)]
# Do x and y come from the same distribution?
ks.test(jitter(x), jitter(y), alternative = 'two.sided')
}
# add age as dummy variable
get_age_dummy <- function(temp_dat, the_age){
temp_dat$age_dum_ <- ifelse(temp_dat$age_sample_collection > the_age, 1, 0)
temp_dat$age_dum_young <- ifelse(temp_dat$age_sample_collection <= the_age, 1, 0)
return(temp_dat)
}
get_age_cat <- function(temp_dat){
temp_dat$age_var <- ntile(temp_dat$age_sample_collection, 5)
return(temp_dat)
}
##########
# remove outliers 4257 cases, 3391, 3392 controls
##########
removeOutlier <- function(data,
cases,
controls,
val) {
if (cases) {
data <- data[data$tm_donor != '3955',]
}
#controls outlier
if (controls) {
data <- data[data$tm_donor != '3847',]
data <- data[data$ids != '3484',]
}
if(val){
data <- data[data$ids != '3540',]
}
return(data)
}
# dat = train_cases,
# wild_type = wt_data,
# boot_num = 5,
# thresh = beta_thresh,
# g_ranges = g_ranges
bump_hunter_lfs <- function(dat,
wild_type,
boot_num,
thresh,
g_ranges) {
# get mutual cgs
dat_names <- colnames(dat)[grepl('^cg', colnames(dat))]
wt_names <- colnames(wild_type)[grepl('^cg', colnames(wild_type))]
wt_intersect <- intersect(wt_names, dat_names)
# stor clinical data
clin_dat_names <- colnames(dat)[!grepl('^cg', colnames(dat))]
clin_wt_names <- colnames(wild_type)[!grepl('^cg', colnames(wild_type))]
# get data
dat <- dat[, c(clin_dat_names, wt_intersect)]
wild_type <- wild_type[, c(clin_wt_names, wt_intersect)]
# drop columns so they can match
dat$fam_cancer_ratio <- dat$fam_num_cancer <- NULL
# combine data
dat <- rbind(dat,
wild_type)
# remove NAs
dat <- dat[!is.na(dat$p53_germline),]
# get indicator (bump) vector
dat$type <- dat$p53_germline
indicator_vector <- as.factor(dat$type)
designMatrix <- cbind(rep(1, nrow(dat)), indicator_vector)
designMatrix <- as.matrix(designMatrix)
# get probe data
dat <- dat[, grepl('^cg', colnames(dat))]
# transpose methylation to join with cg_locations to get genetic location vector.
dat <- as.data.frame(t(dat), stringsAsFactors = F)
# make probe a column in methyl
dat$probe <- rownames(dat)
rownames(dat) <- NULL
# get probe column in granges
g_ranges$probe <- rownames(g_ranges)
# inner join methyl and cg_locations by probe
methyl_cg <- dplyr::inner_join(dat, g_ranges, by = 'probe')
# get chr and pos vector
chr <- methyl_cg$seqnames
pos <- methyl_cg$start
# create beta matrix
beta <- methyl_cg[, 1:(ncol(methyl_cg) - 6)]
# make beta numeric
for (i in 1:ncol(beta)) {
beta[,i] <- as.numeric(beta[,i])
print(i)
}
beta <- as.matrix(beta)
##########
# Run bumphunter
##########
# check dimensions
stopifnot(dim(beta)[2] == dim(designMatrix)[1])
stopifnot(dim(beta)[1] == length(chr))
stopifnot(dim(beta)[1] == length(pos))
# set paramenters
DELTA_BETA_THRESH = thresh # DNAm difference threshold
NUM_BOOTSTRAPS = boot_num # number of randomizations
# create tab list
tab <- list()
bump_hunter_results <- list()
for (i in 1:length(DELTA_BETA_THRESH)) {
tab[[i]] <- bumphunter(beta,
designMatrix,
chr = chr,
pos = pos,
nullMethod = "bootstrap",
cutoff = DELTA_BETA_THRESH,
B = NUM_BOOTSTRAPS,
type = "M")
bump_hunter_results[[i]] <- tab[[i]]$table
bump_hunter_results[[i]]$run <- DELTA_BETA_THRESH[i]
}
bh_results <- do.call(rbind, bump_hunter_results)
return(bh_results)
}
# dat = train_cases
# wild_type = wt_data
# boot_num = 5
# thresh = beta_thresh
# g_ranges = g_ranges
bump_hunter_surv <- function(dat,
wild_type,
boot_num,
thresh,
g_ranges) {
# get mutual cgs
dat_names <- colnames(dat)[grepl('^cg', colnames(dat))]
wt_names <- colnames(wild_type)[grepl('^cg', colnames(wild_type))]
wt_intersect <- intersect(wt_names, dat_names)
# stor clinical data
clin_dat_names <- colnames(dat)[!grepl('^cg', colnames(dat))]
clin_wt_names <- colnames(wild_type)[!grepl('^cg', colnames(wild_type))]
# get data
dat <- dat[, c(clin_dat_names, wt_intersect)]
wild_type <- wild_type[, c(clin_wt_names, wt_intersect)]
# drop columns so they can match
dat$fam_cancer_ratio <- dat$fam_num_cancer <- NULL
# combine data
dat <- rbind(dat,
wild_type)
# remove NAs
dat <- dat[!is.na(dat$p53_germline),]
# get indicator (bump) vector
dat$type <- dat$p53_germline
indicator_vector <- as.factor(dat$type)
designMatrix <- cbind(rep(1, nrow(dat)), indicator_vector)
designMatrix <- as.matrix(designMatrix)
# get probe data
dat <- dat[, grepl('^cg', colnames(dat))]
# transpose methylation to join with cg_locations to get genetic location vector.
dat <- as.data.frame(t(dat), stringsAsFactors = F)
# make probe a column in methyl
dat$probe <- rownames(dat)
rownames(dat) <- NULL
# get probe column in granges
g_ranges$probe <- rownames(g_ranges)
# inner join methyl and cg_locations by probe
methyl_cg <- dplyr::inner_join(dat, g_ranges, by = 'probe')
# get chr and pos vector
chr <- methyl_cg$seqnames
pos <- methyl_cg$start
# create beta matrix
beta <- methyl_cg[, 1:(ncol(methyl_cg) - 6)]
# make beta numeric
for (i in 1:ncol(beta)) {
beta[,i] <- as.numeric(beta[,i])
print(i)
}
beta <- as.matrix(beta)
##########
# Run bumphunter
##########
# check dimensions
stopifnot(dim(beta)[2] == dim(designMatrix)[1])
stopifnot(dim(beta)[1] == length(chr))
stopifnot(dim(beta)[1] == length(pos))
# set paramenters
DELTA_BETA_THRESH = thresh # DNAm difference threshold
NUM_BOOTSTRAPS = boot_num # number of randomizations
# create tab list
tab <- list()
bump_hunter_results <- list()
for (i in 1:length(DELTA_BETA_THRESH)) {
tab[[i]] <- bumphunter(beta,
designMatrix,
chr = chr,
pos = pos,
nullMethod = "bootstrap",
cutoff = DELTA_BETA_THRESH,
B = NUM_BOOTSTRAPS,
type = "M")
bump_hunter_results[[i]] <- tab[[i]]$table
bump_hunter_results[[i]]$run <- DELTA_BETA_THRESH[i]
}
bh_results <- do.call(rbind, bump_hunter_results)
return(bh_results)
}
# training_dat = train_cases
# controls_dat = controls_full
# valid = valid_full
# test_dat = test_cases
# age_cutoff = 72
# gender = T
# tech = T
# fam_num = T
# fam_ratio = T
# bh_features = remaining_features
run_rf <- function(training_dat,
controls_dat,
test_dat,
age_cutoff,
age_dum,
gender,
tech,
fam_num,
fam_ratio,
bh_features) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
intersected_feats <- intersect(bh_features, colnames(training_dat))
if(gender) {
intersected_feats <- c('M', intersected_feats)
intersected_feats <- c('F', intersected_feats)
}
if (tech) {
intersected_feats <- c('a', intersected_feats)
intersected_feats <- c('b', intersected_feats)
}
if (fam_num){
intersected_feats <- c('fam_num_cancer', intersected_feats)
}
if (fam_ratio){
intersected_feats <- c('fam_cancer_ratio', intersected_feats)
}
if (age_dum){
intersected_feats <- c('first', 'second', 'third',intersected_feats)
}
# intersected_feats_rand <- intersect(rand_feats, colnames(training_dat))
# # get y
train_y <- ifelse(training_dat$age_diagnosis < age_cutoff, 'yes', 'no')
test_y <- ifelse(test_dat$age_diagnosis < age_cutoff, 'yes', 'no')
controls_y <- ifelse(controls_dat$age_sample_collection < age_cutoff, 'yes', 'no')
# get clinical data
test_clin <- test_dat[, !grepl('^cg', colnames(test_dat))]
controls_clin <- controls_dat[, !grepl('^cg', colnames(controls_dat))]
# get model data
training_dat <- training_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
# determines how you train the model.
NFOLDS <- 5
fitControl <- trainControl(
method = "repeatedcv", # could train on boostrap resample, here use repeated cross validation.
number = min(10, NFOLDS),
classProbs = TRUE,
repeats = 5,
allowParallel = TRUE,
summaryFunction = twoClassSummary
)
# mtry: Number of variables randomly sampled as candidates at each split.
# ntree: Number of trees to grow.
mtry <- sqrt(ncol(training_dat[,colnames(training_dat)]))
tunegrid <- expand.grid(.mtry=mtry)
data_size <- ncol(training_dat)
model <- train(x = training_dat
, y = train_y
, metric = 'ROC'
, method = "rf"
, trControl = fitControl
, tuneGrid = tunegrid
, importance = T
, verbose = FALSE)
temp <- varImp(model)[[1]]
importance <- cbind(rownames(temp), temp$X1)
# Predictions on test data
# This returns 100 prediction with 1-100 lambdas
test.predictions <- predict(model,
data.matrix(test_dat),
type = 'prob')
# combine predictions and real labels
temp_dat <- as.data.frame(cbind(test_pred = test.predictions, test_label = test_y, test_clin))
# Predictions on controls data
# This returns 100 prediction with 1-100 lambdas
test.predictions_con <- predict(model,
data.matrix(controls_dat),
type = 'prob')
# combine predictions and real labels
temp_dat_con <- as.data.frame(cbind(controls_age_pred = test.predictions_con, controls_age_label = controls_y, controls_clin))
return(list(temp_dat, temp_dat_con, model, importance, data_size))
}
bumpHunter<- function(model_data,
DELTA_BETA_THRESH) {
##########
# get clinical model_data
##########
bump_clin <- model_data[,1:4]
##########
# get indicator and put into design matrix with intercept 1
##########
indicator_vector <- as.factor(model_data$type)
designMatrix <- cbind(rep(1, nrow(model_data)), indicator_vector)
designMatrix <- as.matrix(designMatrix)
##########
# Get genetic locations
##########
model_data$age_diagnosis <- model_data$age_sample_collection <-
model_data$gender <- model_data$type <- NULL
# transpose methylation to join with cg_locations to get genetic location vector.
model_data <- as.data.frame(t(model_data), stringsAsFactors = F)
# make probe a column in methyl
model_data$probe <- rownames(model_data)
rownames(model_data) <- NULL
# inner join methyl and cg_locations by probe
methyl_cg <- inner_join(model_data, cg_locations, by = 'probe')
# get chr and pos vector
chr <- methyl_cg$seqnames
pos <- methyl_cg$start
# create beta matrix
beta <- methyl_cg[, 1:(ncol(methyl_cg) - 6)]
# make beta numeric
for (i in 1:ncol(beta)) {
beta[,i] <- as.numeric(beta[,i])
print(i)
}
beta <- as.matrix(beta)
##########
# Run bumphunter
##########
# check dimensions
stopifnot(dim(beta)[2] == dim(designMatrix)[1])
stopifnot(dim(beta)[1] == length(chr))
stopifnot(dim(beta)[1] == length(pos))
# set paramenters
# DNAm difference threshold
NUM_BOOTSTRAPS = 3 # number of randomizations
# create tab list
tab <- bumphunter(beta,
designMatrix,
chr = chr,
pos = pos,
nullMethod = "bootstrap",
cutoff = DELTA_BETA_THRESH,
B = NUM_BOOTSTRAPS,
type = "Beta")
bump_hunter_results <- tab$table
bump_hunter_results$run <- DELTA_BETA_THRESH
return(bump_hunter_results)
}
##########
# create function that grabs probe site and gene name for results from bumphunter
##########
# all bh features have some data with region length > 0.
# cg_locations have no retions with length > 0.
getProbe <- function(data, cgs) {
colnames(cgs) <- paste0(colnames(cgs), '_', 'rgSet')
results <- list()
results_data <- list()
# first make seqnames in cgs and chr in tab character vectors for identification
cgs$seqnames <- as.character(cgs$seqnames)
data$chr <- as.character(data$chr)
# use sql data table to merger validators with model_data based on age range
result = sqldf("select * from cgs
inner join data
on cgs.start_rgSet between data.start and data.end")
# keep only necessary columns
result <- result[, c('chr' , 'start_rgSet','end_rgSet', 'probe_rgSet', 'p.value', 'fwer', 'run')]
# rename cols
colnames(result) <- c('chr', 'start', 'end', 'probe', 'p.value', 'fwer', 'run')
# get sig results
result_sig <- result[result$p.value < 0.05,]
# get fwer results
result_fwer <- result[result$fwer == 0,]
return(list(result_fwer, result_sig))
}
extractResults <- function (result_list,
data_name,
regularize)
{
# extract regression normal, correlation
temp.1 <- result_list
reg_cor <- round(cor(unlist(temp.1[[2]]), unlist(temp.1[[3]])), 2)
reg_cor <- as.data.frame(reg_cor)
colnames(reg_cor) <- 'score'
reg_cor$age <- 'regression'
reg_cor$type <- 'normal'
reg_cor$data <- data_name
if(regularize) {
reg_cor$features <- paste0(mean(unlist(temp.1[[5]])))
} else {
reg_cor$features <- as.numeric(strsplit(as.character(temp.1[[5]]), split = ' ')[[2]])
}
return(reg_cor)
}
# get age dummy category
get_age_cat_dummy <- function(temp_dat) {
temp_dat$temp_age_var <- ntile(temp_dat$age_sample_collection, 5)
temp_dat <- cbind(as.data.frame(class.ind(temp_dat$temp_age_var)), temp_dat)
colnames(temp_dat)[1:5] <- c('first', 'second', 'third', 'fourth', 'fifth')
temp_dat$temp_age_var <- NULL
return(temp_dat)
}
#
# training_dat = train_cases
# controls_dat = controls_full
# test_dat = test_cases
# age_cutoff = 72
# gender = FALSE
# tech = TRUE
# bh_features
#
#
# run_enet_450_850 <- function(training_dat,
# controls_dat,
# test_dat,
# age_cutoff,
# gender,
# tech,
# bh_features) {
#
#
#
# # get intersection of bh features and real data
# bh_features <- as.character(unlist(bh_features))
#
# intersected_feats <- intersect(bh_features, colnames(training_dat))
#
# if(gender) {
# intersected_feats <- c('Female', 'Male', intersected_feats)
# }
# if (tech) {
# intersected_feats <- c('batch_1', 'batch_2', intersected_feats)
# }
#
# # intersected_feats_rand <- intersect(rand_feats, colnames(training_dat))
# # # get y
# train_y <- as.factor(ifelse(training_dat$age_diagnosis < age_cutoff, 'positive', 'negative'))
# test_y <- as.factor(ifelse(test_dat$age_diagnosis < age_cutoff, 'positive', 'negative'))
# controls_y <- as.factor(ifelse(controls_dat$age_sample_collection < age_cutoff, 'positive', 'negative'))
#
# # get clinical data
# test_clin <- test_dat[, !grepl('^cg', colnames(test_dat))]
# controls_clin <- controls_dat[, !grepl('^cg', colnames(controls_dat))]
#
# # get model data
# training_dat <- training_dat[, intersected_feats]
# test_dat <- test_dat[, intersected_feats]
# controls_dat <- controls_dat[, intersected_feats]
#
# # start elastic net tuning
# N_CV_REPEATS = 2
# nfolds = 3
#
# ###### ENET
# # create vector and list to store best alpha on training data. alpha is the parameter that choses the
# # the optimal proportion lambda, the tuning parameter for L1 (ridge) and L2 (lasso)
# elastic_net.cv_error = vector()
# elastic_net.cv_model = list()
# elastic_net.ALPHA <- c(1:9) / 10 # creates possible alpha values for model to choose from
#
# # set parameters for training model
# type_family <- 'binomial'
# type_measure <- 'auc'
#
# # create error matrix for for opitmal alpha that can run in parraellel if you have bigger data
# # or if you have a high number fo N_CV_REPEATS
# temp.cv_error_matrix <- foreach (temp = 1:N_CV_REPEATS, .combine=rbind, .errorhandling="stop") %do% {
# for (alpha in 1:length(elastic_net.ALPHA)) # for i in 1:9 - the model will run 9 times
# {
# elastic_net.cv_model[[alpha]] = cv.glmnet(x = as.matrix(training_dat)
# , y = train_y
# , alpha = elastic_net.ALPHA[alpha] # first time with 0.1 and so on
# , type.measure = type_measure
# , family = type_family
# , standardize = FALSE
# , nfolds = nfolds
# , nlambda = 10
# , parallel = TRUE
# )
# elastic_net.cv_error[alpha] = min(elastic_net.cv_model[[alpha]]$cvm)
# }
# elastic_net.cv_error # stores 9 errors
# }
#
# if (N_CV_REPEATS == 1) {
# temp.cv_error_mean = temp.cv_error_matrix
# } else {
# temp.cv_error_mean = apply(temp.cv_error_matrix, 2, mean) # take the mean of the 5 iterations
# # as your value for alpha
# }
#
# # stop if you did not recover error for any models
# stopifnot(length(temp.cv_error_mean) == length(elastic_net.ALPHA))
#
# # get index of best alpha (lowest error) - alpha is values 0.1-0.9
# temp.best_alpha_index = which(min(temp.cv_error_mean) == temp.cv_error_mean)[length(which(min(temp.cv_error_mean) == temp.cv_error_mean))]
# # print(paste("Best ALPHA:", elastic_net.ALPHA[temp.best_alpha_index])) # print the value for alpha
# best_alpha <- elastic_net.ALPHA[temp.best_alpha_index]
# temp.non_zero_coeff_min = 0
# temp.loop_count = 0
# # loop runs initially because temp.non_zero coefficient <3 and then stops
# # usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# # it they are never greater than 1, then the model does not converge.
# while (temp.non_zero_coeff_min < 1) {
# elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
# , y = train_y
# , alpha = elastic_net.ALPHA[temp.best_alpha_index]
# , type.measure = type_measure
# , family = type_family
# , standardize=FALSE
# , nlambda = 100
# , nfolds = nfolds
# , parallel = TRUE
# )
#
# # get optimal lambda - the tuning parameter for ridge and lasso
# # THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# # AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# # GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# # GIVE YOU REASONS
# temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# temp.1se_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.1se)
#
#
# # # number of non zero coefficients at that lambda
# temp.non_zero_coeff_min = elastic_net.cv_model$nzero[temp.min_lambda_index]
# temp.non_zero_coeff_1se = elastic_net.cv_model$nzero[temp.1se_lambda_index]
#
# temp.loop_count = temp.loop_count + 1
#
# # set seed for next loop iteration
# as.numeric(Sys.time())-> t
# set.seed((t - floor(t)) * 1e8 -> seed)
# if (temp.loop_count > 10) {
# print("diverged")
# temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
# break
# }
# }# while loop ends
# # print(temp.non_zero_coeff)
#
# model = glmnet(x = as.matrix(training_dat)
# , y = train_y
# ,alpha = elastic_net.ALPHA[temp.best_alpha_index]
# ,standardize=FALSE
# ,nlambda = 100
# ,family = type_family)
#
# # Predictions on test data
#
# # This returns 100 prediction with 1-100 lambdas
# temp_test.predictions <- predict(model,
# data.matrix(test_dat),
# type = 'response')
#
#
# # get predictions with corresponding lambda.
# test.predictions <- temp_test.predictions[, temp.min_lambda_index]
#
#
# # combine predictions and real labels
# temp_dat <- as.data.frame(cbind(test_pred = test.predictions, test_label = test_y, test_clin))
#
#
# # Predictions on controls data
#
# # This returns 100 prediction with 1-100 lambdas
# temp_test.predictions_con <- predict(model,
# data.matrix(controls_dat),
# type = 'response')
#
# # get predictions with corresponding lambda.
# test.predictions_con <- temp_test.predictions_con[, temp.min_lambda_index]
#
# # combine predictions and real labels
# temp_dat_con <- as.data.frame(cbind(controls_age_pred = test.predictions_con, controls_age_label = controls_y, controls_clin))
#
#
# return(list(temp_dat, temp_dat_con, model, temp.non_zero_coeff_min,
# temp.non_zero_coeff_1se, best_alpha))
#
# }
# run_enet_all <- function(training_dat,
# controls_dat,
# valid_dat,
# age_cutoff,
# gender,
# tech,
# bh_features) {
#
#
# # get intersection of bh features and real data
# bh_features <- as.character(unlist(bh_features))
#
# intersected_feats <- intersect(bh_features, colnames(training_dat))
#
# if(gender) {
# intersected_feats <- c('Female', 'Male', intersected_feats)
# }
# if (tech) {
# intersected_feats <- c('batch_1', 'batch_2', intersected_feats)
# }
#
# # intersected_feats_rand <- intersect(rand_feats, colnames(training_dat))
# # # get y
# train_y <- as.factor(ifelse(training_dat$age_diagnosis < age_cutoff, 'positive', 'negative'))
# valid_y <- as.factor(ifelse(valid_dat$age_diagnosis < age_cutoff, 'positive', 'negative'))
# controls_y <- as.factor(ifelse(controls_dat$age_sample_collection < age_cutoff, 'positive', 'negative'))
#
# # get clinical data
# valid_clin <- test_dat[, !grepl('^cg', colnames(test_dat))]
# controls_clin <- controls_dat[, !grepl('^cg', colnames(controls_dat))]
#
# # get model data
# training_dat <- training_dat[, intersected_feats]
# controls_dat <- controls_dat[, intersected_feats]
# valid_dat <- valid_dat[, intersected_feats]
#
#
# # get bumphunter features
# training_dat <- training_dat[, intersected_feats]
# controls_dat <- controls_dat[, intersected_feats]
# valid_dat <- valid_dat[, intersected_feats]
#
#
# # start elastic net tuning
# N_CV_REPEATS = 2
# nfolds = 3
#
# ###### ENET
# # create vector and list to store best alpha on training data. alpha is the parameter that choses the
# # the optimal proportion lambda, the tuning parameter for L1 (ridge) and L2 (lasso)
# elastic_net.cv_error = vector()
# elastic_net.cv_model = list()
# elastic_net.ALPHA <- c(1:9) / 10 # creates possible alpha values for model to choose from
#
# # set parameters for training model
# type_family <- 'binomial'
# type_measure <- 'auc'
#
# # create error matrix for for opitmal alpha that can run in parraellel if you have bigger data
# # or if you have a high number fo N_CV_REPEATS
# temp.cv_error_matrix <- foreach (temp = 1:N_CV_REPEATS, .combine=rbind, .errorhandling="stop") %do% {
# for (alpha in 1:length(elastic_net.ALPHA)) # for i in 1:9 - the model will run 9 times
# {
# elastic_net.cv_model[[alpha]] = cv.glmnet(x = as.matrix(training_dat)
# , y = train_y
# , alpha = elastic_net.ALPHA[alpha] # first time with 0.1 and so on
# , type.measure = type_measure
# , family = type_family
# , standardize = FALSE
# , nfolds = nfolds
# , nlambda = 10
# , parallel = TRUE
# )
# elastic_net.cv_error[alpha] = min(elastic_net.cv_model[[alpha]]$cvm)
# }
# elastic_net.cv_error # stores 9 errors
# }
#
# if (N_CV_REPEATS == 1) {
# temp.cv_error_mean = temp.cv_error_matrix
# } else {
# temp.cv_error_mean = apply(temp.cv_error_matrix, 2, mean) # take the mean of the 5 iterations
# # as your value for alpha
# }
#
# # stop if you did not recover error for any models
# stopifnot(length(temp.cv_error_mean) == length(elastic_net.ALPHA))
#
# # get index of best alpha (lowest error) - alpha is values 0.1-0.9
# temp.best_alpha_index = which(min(temp.cv_error_mean) == temp.cv_error_mean)[length(which(min(temp.cv_error_mean) == temp.cv_error_mean))]
# # print(paste("Best ALPHA:", elastic_net.ALPHA[temp.best_alpha_index])) # print the value for alpha
# best_alpha <- elastic_net.ALPHA[temp.best_alpha_index]
# temp.non_zero_coeff = 0
# temp.loop_count = 0
# # loop runs initially because temp.non_zero coefficient <3 and then stops
# # usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# # it they are never greater than 1, then the model does not converge.
# while (temp.non_zero_coeff < 1) {
# elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
# , y = train_y
# , alpha = elastic_net.ALPHA[temp.best_alpha_index]
# , type.measure = type_measure
# , family = type_family
# , standardize=FALSE
# , nlambda = 100
# , nfolds = nfolds
# , parallel = TRUE
# )
#
# # get optimal lambda - the tuning parameter for ridge and lasso
# # THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# # AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# # GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# # GIVE YOU REASONS
# temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
#
# # # number of non zero coefficients at that lambda
# temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
# temp.loop_count = temp.loop_count + 1
#
# # set seed for next loop iteration
# as.numeric(Sys.time())-> t
# set.seed((t - floor(t)) * 1e8 -> seed)
# if (temp.loop_count > 10) {
# print("diverged")
# temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
# break
# }
# }# while loop ends
# # print(temp.non_zero_coeff)
#
# model = glmnet(x = as.matrix(training_dat)
# , y = train_y
# ,alpha = elastic_net.ALPHA[temp.best_alpha_index]
# ,standardize=FALSE
# ,nlambda = 100
# ,family = type_family)
#
# # Predictions on test data
#
# # Predictions on controls data
#
# # This returns 100 prediction with 1-100 lambdas
# temp_test.predictions_con <- predict(model,
# data.matrix(controls_dat),
# type = 'response')
#
# # get predictions with corresponding lambda.
# test.predictions_con <- temp_test.predictions_con[, temp.min_lambda_index]
#
# # combine predictions and real labels
# temp_dat_con <- as.data.frame(cbind(controls_age_pred = test.predictions_con, controls_age_label = controls_y, controls_clin))
#
# # Predictions on controls data
#
# # This returns 100 prediction with 1-100 lambdas
# temp_test.predictions_valid <- predict(model,
# data.matrix(valid_dat),
# type = 'response')
#
# # get predictions with corresponding lambda.
# test.predictions_valid <- temp_test.predictions_valid[, temp.min_lambda_index]
#
# # combine predictions and real labels
# temp_dat_valid <- as.data.frame(cbind(valid_age_pred = test.predictions_valid, valid_age_label = valid_y, valid_clin))
#
# ###########################################################################################
# return(list(temp_dat, temp_dat_con, temp_dat_valid, model, elastic_net.cv_model$lambda.min, best_alpha))
#
# }
#
# cases_dat = cases_full
# controls_dat = controls_full
# age_cutoff = 72
# gender = gender
# tech = tech
# bh_features = remaining_features
run_enet_test <- function(cases_dat,
controls_dat,
age_cutoff,
gender,
tech,
bh_features) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
intersected_feats <- intersect(bh_features, colnames(training_dat))
if(gender) {
intersected_feats <- c('Female', 'Male', intersected_feats)
}
if (tech) {
intersected_feats <- c('batch_1', 'batch_2', intersected_feats)
}
# intersected_feats_rand <- intersect(rand_feats, colnames(training_dat))
# # get y
cases_y <- ifelse(cases_dat$age_diagnosis < age_cutoff, 'positive', 'negative')
# controls
controls_y <- ifelse(controls_dat$age_sample_collection < 'positive', 'negative')
# get clinical data
cg_start <- which(grepl('^cg', colnames(cases_dat)))[1]
cases_clin <- cases_dat[, 1:(cg_start - 1)]
controls_clin <- controls_dat[, 1:(cg_start - 1)]
# get bumphunter features
cases_dat <- cases_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
# start elastic net tuning
N_CV_REPEATS = 5
nfolds = 5
###### ENET
# create vector and list to store best alpha on training data. alpha is the parameter that choses the
# the optimal proportion lambda, the tuning parameter for L1 (ridge) and L2 (lasso)
elastic_net.cv_error = vector()
elastic_net.cv_model = list()
elastic_net.ALPHA <- c(1:9) / 10 # creates possible alpha values for model to choose from
# set parameters for training model
type_family <- 'binomial'
type_measure <- 'auc'
# create error matrix for for opitmal alpha that can run in parraellel if you have bigger data
# or if you have a high number fo N_CV_REPEATS
temp.cv_error_matrix <- foreach (temp = 1:N_CV_REPEATS, .combine=rbind, .errorhandling="stop") %do% {
for (alpha in 1:length(elastic_net.ALPHA)) # for i in 1:9 - the model will run 9 times
{
elastic_net.cv_model[[alpha]] = cv.glmnet(x = as.matrix(cases_dat)
, y = cases_y
, alpha = elastic_net.ALPHA[alpha]
, type.measure = type_measure
, family = type_family
, standardize = FALSE
, nfolds = nfolds
, nlambda = 10
, parallel = TRUE
)
elastic_net.cv_error[alpha] = min(elastic_net.cv_model[[alpha]]$cvm)
}
elastic_net.cv_error # stores 9 errors
}
if (N_CV_REPEATS == 1) {
temp.cv_error_mean = temp.cv_error_matrix
} else {
temp.cv_error_mean = apply(temp.cv_error_matrix, 2, mean) # take the mean of the 5 iterations
# as your value for alpha
}
# stop if you did not recover error for any models
stopifnot(length(temp.cv_error_mean) == length(elastic_net.ALPHA))
# get index of best alpha (lowest error) - alpha is values 0.1-0.9
temp.best_alpha_index = which(min(temp.cv_error_mean) == temp.cv_error_mean)[length(which(min(temp.cv_error_mean) == temp.cv_error_mean))]
# print(paste("Best ALPHA:", elastic_net.ALPHA[temp.best_alpha_index])) # print the value for alpha
best_alpha <- elastic_net.ALPHA[temp.best_alpha_index]
temp.non_zero_coeff_min = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff_min < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(cases_dat)
, y = cases_y
, alpha =0.1 #elastic_net.ALPHA[temp.best_alpha_index]
, type.measure = type_measure
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
temp.1se_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.1se)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff_min = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.non_zero_coeff_1se = elastic_net.cv_model$nzero[temp.1se_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
# print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(cases_dat)
, y = cases_y
,alpha = elastic_net.ALPHA[temp.best_alpha_index]
,standardize=FALSE
,nlambda = 100
,family = type_family)
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions_con <- predict(model,
data.matrix(controls_dat),
type = 'response')
# get predictions with corresponding lambda.
test.predictions_con <- temp_test.predictions_con[, temp.min_lambda_index]
# combine predictions and real labels
temp_dat_con <- as.data.frame(cbind(controls_age_pred = test.predictions_con, controls_age_label = controls_y, controls_clin))
###########################################################################################
return(list(model, temp_dat_con, temp.non_zero_coeff_min, temp.non_zero_coeff_1se, best_alpha))
}
##########
# impute and scale for raw data
##########
scaleImputeDat <- function(dat, scale) {
if (scale) {
# get row statistics
rowMean <- apply(dat, 1, mean, na.rm=TRUE)
rowSd <- apply(dat, 1, sd, na.rm=TRUE)
# constantInd <- rowSd==0
# rowSd[constantInd] <- 1
rowStats <- list(mean=rowMean, sd=rowSd)
# apply normilization
dat <- (dat - rowStats$mean) / rowStats$sd
# make matrix
dat <- as.matrix(dat)
# impute with knn
dat_knn <- impute.knn(dat, k = 10)$data
# get clin data back and return final data
final_dat <- dat_knn
} else {
# make matrix
dat <- as.matrix(dat)
# impute with knn
dat_knn <- impute.knn(dat, k = 10)$data
# get clin data back and return final data
final_dat <- dat_knn
}
return(final_dat)
}
# ##########
# # scale data
# ##########
# dat <- betaCases
# probe_start <- 8
# scaleData <- function(dat, probe_start)
# {
#
# # get row statistics
# colMean <- apply(dat[, probe_start:ncol(dat)], 2, mean)
# colSd <- apply(dat[, probe_start:ncol(dat)], 2, sd)
# # constantInd <- rowSd==0
# # rowSd[constantInd] <- 1
# colStats <- list(mean=colMean, sd=colSd)
#
# # apply normilization
# dat[, probe_start:ncol(dat)] <- (dat[, probe_start:ncol(dat)] - colStats$mean) / colStats$sd
#
# return(dat)
# }
##########
# function for get m values
##########
get_m_values <-
function(data_set, probe_start) {
data_set[,probe_start:ncol(data_set)] <- apply(data_set[, probe_start:ncol(data_set)], 2,
function(x) log(x/(1-x)))
# log(data_set[, probe_start:ncol(data_set)]/(1- data_set[, probe_start:ncol(data_set)]))
return(data_set)
}
##########
# Function that combines methylation matrices with id_map, to get ids for methylation
##########
findIdsCombined <- function(data_methyl, id_map_1, id_map_2, controls) {
data_methyl <- as.data.frame(t(data_methyl))
data_methyl$identifier <- rownames(data_methyl)
data_methyl$identifier <- as.factor(data_methyl$identifier)
# loop to combine identifiers, without merging large table
data_methyl$ids <- NA
data_methyl$sentrix_id <- NA
for (i in data_methyl$identifier) {
if(controls) {
if (grepl('^200', i)) {
data_methyl$ids[data_methyl$identifier == i] <- id_map_2$sample_name[id_map_2$identifier == i]
data_methyl$sentrix_id[data_methyl$identifier == i] <- id_map_2$sentrix_id[id_map_2$identifier == i]
} else {
data_methyl$ids[data_methyl$identifier == i] <- id_map_1$sample_name[id_map_1$identifier == i]
data_methyl$sentrix_id[data_methyl$identifier == i] <- id_map_1$sentrix_id[id_map_1$identifier == i]
}
} else {
if (grepl('^20', i)) {
data_methyl$ids[data_methyl$identifier == i] <- id_map_2$sample_name[id_map_2$identifier == i]
data_methyl$sentrix_id[data_methyl$identifier == i] <- id_map_2$sentrix_id[id_map_2$identifier == i]
} else {
data_methyl$ids[data_methyl$identifier == i] <- id_map_1$sample_name[id_map_1$identifier == i]
data_methyl$sentrix_id[data_methyl$identifier == i] <- id_map_1$sentrix_id[id_map_1$identifier == i]
}
}
print(i)
}
return(data_methyl)
}
##########
# Main function that specifies a preprocessing method and get beta
##########
getMethyl <- function(data_list, cases , method) {
processed_list <-preprocessMethod(data_list, preprocess = method)
# save.image('/home/benbrew/Desktop/temp_process.RData')
# load('/home/benbrew/Desktop/temp_process.RData')
#
# combinelist
beta_methyl <- combineList(processed_list)
# m_methyl <- combineList(processed_list[[2]])
# cn_methyl <- combineList(processed_list[[3]])
if (cases) {
beta_methyl <- findIds(beta_methyl, id_map)
# clean ids
beta_methyl <- getIdName(beta_methyl)
# m_methyl <- getIdName(m_methyl)
# cn_methyl <- getIdName(cn_methyl)
# make data frame
beta_methyl <- as.data.frame(beta_methyl, stringsAsFactors = F)
} else {
# find ids
beta_methyl <- findIds(beta_methyl, id_map_control)
# make data frame
beta_methyl <- as.data.frame(beta_methyl, stringsAsFactors = F)
# m_methyl <- findIds(m_methyl, id_map_control)
# cn_methyl <- findIds(cn_methyl, id_map_control)
}
return(beta_methyl)
}
getIds <- function(cg_locations) {
#idat files
idatFiles <- list.files("GSE68777/idat", pattern = "idat.gz$", full = TRUE)
sapply(idatFiles, gunzip, overwrite = TRUE)
# read into rgSet
rgSet <- read.450k.exp("GSE68777/idat")
# preprocess quantil
rgSet <- preprocessQuantile(rgSet)
# get rangers
rgSet <- granges(rgSet)
cg_locations <- as.data.frame(rgSet)
# make rownames probe column
cg_locations$probe <- rownames(cg_locations)
rownames(cg_locations) <- NULL
return(cg_locations)
}
# # get model data
# combined_data <- m_test_con_full
# train_data <- m_train_mod
combine_clean_split <- function(combined_data, train_data, controls){
# give temp indicator for training and testing
combined_data$train_test <- 'test'
train_data$train_test <- 'train'
# intersected feats
intersect_names <- Reduce(intersect, list(colnames(combined_data)[10:ncol(combined_data)],
colnames(train_data)[10:ncol(train_data)]))
# subset by intersected feats
combined_data <- combined_data[, c('ids',
'p53_germline',
'cancer_diagnosis_diagnoses',
'age_diagnosis',
'age_sample_collection',
'gender',
'sentrix_id',
'family_name',
'tm_donor_',
intersect_names)]
# subset by intersected feats
train_data <- train_data[, c('ids',
'p53_germline',
'cancer_diagnosis_diagnoses',
'age_diagnosis',
'age_sample_collection',
'gender',
'sentrix_id',
'family_name',
'tm_donor_',
intersect_names)]
# combined data
all_data <- rbind(combined_data, train_data)
# remove inf
all_data <- removeInf(all_data, probe_start = 10)
# remove duplicates
all_data <- all_data[!duplicated(all_data$ids),]
# remove WT
all_data <- subset(all_data, p53_germline == 'Mut')
# split data
train_data <- subset(all_data, train_test == 'train')
test_data <- subset(all_data, train_test == 'test')
# remove column
train_data$train_test <- test_data$train_test <- NULL
if(controls) {
# split test data
test_cases <- subset(test_data, cancer_diagnosis_diagnoses != 'Unaffected')
test_other <- subset(test_data, cancer_diagnosis_diagnoses == 'Unaffected')
# remove NA from sample controls
test_other <- test_other[!is.na(test_other$age_sample_collection),]
} else {
# split test data
test_cases <- test_data[!grepl('^20', test_data$sentrix_id),]
test_other <- test_data[grepl('^20', test_data$sentrix_id),]
# remove NAs from validation
test_other <- test_other[!is.na(test_other$age_diagnosis),]
test_other <- test_other[!is.na(test_other$age_sample_collection),]
# remove samples from validation that are in cases
test_other <- test_other[!test_other$ids %in% test_cases$ids,]
test_other <- test_other[!test_other$ids %in% train_data$ids,]
}
# remove na on training
train_data <- train_data[!is.na(train_data$age_sample_collection),]
train_data <- train_data[!is.na(train_data$age_diagnosis),]
# remove na on test
test_data <- test_data[!is.na(test_data$age_sample_collection),]
test_data <- test_data[!is.na(test_data$age_diagnosis),]
return(list(train_data, test_cases, test_other))
}
# data <- betaControls
# function that takes each methylation and merges with clinical - keep ids, family, p53 status, age data
joinData <- function(data, control) {
# get intersection of clin idss and data idss
intersected_ids <- intersect(data$ids, clin$ids)
features <- colnames(data)[1:(length(colnames(data)) - 3)]
# loop to combine idsentifiers, without merging large table
data$p53_germline <- NA
data$age_diagnosis <- NA
data$cancer_diagnosis_diagnoses <- NA
data$age_sample_collection <- NA
data$tm_donor_ <- NA
data$gender <- NA
data$family_name <- NA
if (!control) {
for (i in intersected_ids) {
data$p53_germline[data$ids == i] <- clin$p53_germline[which(clin$ids == i)]
data$age_diagnosis[data$ids == i] <- clin$age_diagnosis[which(clin$ids == i)]
data$cancer_diagnosis_diagnoses[data$ids == i] <- clin$cancer_diagnosis_diagnoses[which(clin$ids == i)]
data$age_sample_collection[data$ids == i] <- clin$age_sample_collection[which(clin$ids == i)]
data$tm_donor_[data$ids == i] <- clin$tm_donor_[which(clin$ids == i)]
data$gender[data$ids == i] <- clin$gender[which(clin$ids == i)]
data$family_name[data$ids == i] <- clin$family_name[which(clin$ids == i)]
print(i)
}
data <- data[!is.na(data$p53_germline),]
data <- data[!duplicated(data$ids),]
data <- data[!duplicated(data$tm_donor_),]
# data <- data[!is.na(data$age_diagnosis),]
data <- data[, c('ids', 'p53_germline', 'age_diagnosis', 'cancer_diagnosis_diagnoses',
'age_sample_collection', 'gender','sentrix_id', 'family_name', features)]
} else {
for (i in intersected_ids) {
data$p53_germline[data$ids == i] <- clin$p53_germline[which(clin$ids == i)]
data$cancer_diagnosis_diagnoses[data$ids == i] <- clin$cancer_diagnosis_diagnoses[which(clin$ids == i)]
data$age_sample_collection[data$ids == i] <- clin$age_sample_collection[which(clin$ids == i)]
data$tm_donor_[data$ids == i] <- clin$tm_donor_[which(clin$ids == i)]
data$gender[data$ids == i] <- clin$gender[which(clin$ids == i)]
data$family_name[data$ids == i] <- clin$family_name[which(clin$ids == i)]
print(i)
}
data <- data[!is.na(data$p53_germline),]
data <- data[!duplicated(data$ids),]
data <- data[!duplicated(data$tm_donor_),]
data <- data[, c('ids', 'p53_germline', 'age_diagnosis', 'cancer_diagnosis_diagnoses',
'age_sample_collection', 'gender', 'sentrix_id', 'family_name',features)]
}
return(data)
}
##########
# remove cancer from controls
##########
removeCancer <- function(data_controls)
{
data_controls <- data_controls[grepl('Unaffected', data_controls$cancer_diagnosis_diagnoses),]
data_controls <- data_controls[!duplicated(data_controls$ids)]
return(data_controls)
}
##########
# get old controls
##########
getControls <- function(data, mut)
{
data <- data[grepl('Unaffected', data$cancer_diagnosis_diagnoses),]
if (mut) {
# subset data by not na in age of diagnosis and mut
data <- data[grepl('Mut', data$p53_germline),]
} else {
data <- data[grepl('WT', data$p53_germline),]
}
return(data)
}
# data_controls <- controls_wt
getBalAge <- function(data_controls, full)
{
# # # # balance age
# hist(cases$age_sample_collection)
# hist(data_controls$age_sample_collection)
# remove a few from ranges 100-200, 300-400
# randomly remove controls that have a less than 50 month age of diganosis to have balanced classes
remove_index <- which((data_controls$age_sample_collection >= 100 & data_controls$age_sample_collection <= 250) |
(data_controls$age_sample_collection >= 300 & data_controls$age_sample_collection <= 400))
if(full) {
remove_index <- sample(remove_index, 8, replace = F)
} else {
remove_index <- sample(remove_index, 2, replace = F)
}
data_controls <- data_controls[-remove_index,]
return(data_controls)
}
bumpHunterSurv <- function(dat_cases,
dat_controls)
{
# combine data
dat <- rbind(dat_cases, dat_controls)
##########
# get clinical dat
##########
bump_clin <- dat[,1:4]
# recode type
dat$type <- ifelse(grepl('Unaffected', dat$cancer_diagnosis_diagnoses), 'controls', 'cases')
##########
# get indicator and put into design matrix with intercept 1
#########
indicator_vector <- as.factor(dat$type)
designMatrix <- cbind(rep(1, nrow(dat)), indicator_vector)
designMatrix <- as.matrix(designMatrix)
##########
# Get genetic locations
##########
dat$p53_germline <- dat$age_diagnosis <- dat$cancer_diagnosis_diagnoses <- dat$ids <- dat$batch <-
dat$age_sample_collection <- dat$id <- dat$type <- dat$gender <- dat$sentrix_id <- NULL
# transpose methylation to join with cg_locations to get genetic location vector.
dat <- as.data.frame(t(dat), stringsAsFactors = F)
# make probe a column in methyl
dat$probe <- rownames(dat)
rownames(dat) <- NULL
# inner join methyl and cg_locations by probe
methyl_cg <- dplyr::inner_join(dat, cg_locations, by = 'probe')
# get chr and pos vector
chr <- methyl_cg$seqnames
pos <- methyl_cg$start
# create beta matrix
beta <- methyl_cg[, 1:(ncol(methyl_cg) - 6)]
# make beta numeric
for (i in 1:ncol(beta)) {
beta[,i] <- as.numeric(beta[,i])
print(i)
}
beta <- as.matrix(beta)
##########
# Run bumphunter
##########
# check dimensions
stopifnot(dim(beta)[2] == dim(designMatrix)[1])
stopifnot(dim(beta)[1] == length(chr))
stopifnot(dim(beta)[1] == length(pos))
# set paramenters
DELTA_BETA_THRESH = 0.5 # DNAm difference threshold
NUM_BOOTSTRAPS = 3 # number of randomizations
# create tab list
tab <- list()
bump_hunter_results <- list()
for (i in 1:length(DELTA_BETA_THRESH)) {
tab[[i]] <- bumphunter(beta,
designMatrix,
chr = chr,
pos = pos,
nullMethod = "bootstrap",
cutoff = DELTA_BETA_THRESH,
B = NUM_BOOTSTRAPS,
type = "Beta")
bump_hunter_results[[i]] <- tab[[i]]$table
bump_hunter_results[[i]]$run <- DELTA_BETA_THRESH[i]
}
bh_results <- do.call(rbind, bump_hunter_results)
return(bh_results)
}
###########
# bumphunter for predictions - WT controls, p53 contorls
###########
bumpHunterPred <- function(dat_controls_wt,
dat_controls_mut)
{
# add columns indicating p53 status (in place of age of diagnosis
dat_controls_wt$age_diagnosis <- 'WT'
dat_controls_mut$age_diagnosis <- 'MUT'
# combine data
dat <- rbind(dat_controls_wt, dat_controls_mut)
dat$ids <- NULL
# change variable name for age of diagnosis
colnames(dat)[1] <- 'status'
##########
# get clinical dat
##########
bump_clin <- dat[,1:4]
##########
# get indicator and put into design matrix with intercept 1
#########
indicator_vector <- as.factor(dat$status)
designMatrix <- cbind(rep(1, nrow(dat)), indicator_vector)
designMatrix <- as.matrix(designMatrix)
##########
# Get genetic locations
##########
dat$p53_germline <- dat$age_diagnosis <- dat$cancer_diagnosis_diagnoses <- dat$ids <- dat$batch <-
dat$age_sample_collection <- dat$id <- dat$status <- dat$gender <- dat$sentrix_id <- NULL
# transpose methylation to join with cg_locations to get genetic location vector.
dat <- as.data.frame(t(dat), stringsAsFactors = F)
# make probe a column in methyl
dat$probe <- rownames(dat)
rownames(dat) <- NULL
# inner join methyl and cg_locations by probe
methyl_cg <- dplyr::inner_join(dat, cg_locations, by = 'probe')
# get chr and pos vector
chr <- methyl_cg$seqnames
pos <- methyl_cg$start
# create beta matrix
beta <- methyl_cg[, 1:(ncol(methyl_cg) - 6)]
# make beta numeric
for (i in 1:ncol(beta)) {
beta[,i] <- as.numeric(beta[,i])
print(i)
}
beta <- as.matrix(beta)
##########
# Run bumphunter
##########
# check dimensions
stopifnot(dim(beta)[2] == dim(designMatrix)[1])
stopifnot(dim(beta)[1] == length(chr))
stopifnot(dim(beta)[1] == length(pos))
# set paramenters
DELTA_BETA_THRESH = 0.5 # DNAm difference threshold
NUM_BOOTSTRAPS = 3 # number of randomizations
# create tab list
tab <- list()
bump_hunter_results <- list()
for (i in 1:length(DELTA_BETA_THRESH)) {
tab[[i]] <- bumphunter(beta,
designMatrix,
chr = chr,
pos = pos,
nullMethod = "bootstrap",
cutoff = DELTA_BETA_THRESH,
B = NUM_BOOTSTRAPS,
type = "Beta")
bump_hunter_results[[i]] <- tab[[i]]$table
bump_hunter_results[[i]]$run <- DELTA_BETA_THRESH[i]
}
bh_results <- do.call(rbind, bump_hunter_results)
return(bh_results)
}
##########
# generate folds and set the seed for random samplings - referenced in arg[]
##########
getFolds <- function(model_dat, seed_number, k){
# assign folds
set.seed(seed_number)
model_dat$folds <- sample(1:k, nrow(model_dat), replace = T)
return(model_dat)
}
getProbe <- function(data) {
results <- list()
results_data <- list()
colnames(cg_locations) <- paste0(colnames(cg_locations), '_', 'rgSet')
# first make seqnames in cg_locations and chr in tab character vectors for identification
cg_locations$seqnames_rgSet <- as.character(cg_locations$seqnames_rgSet)
data$chr <- as.character(data$chr)
# use sql data table to merger validators with model_data based on age range
result = sqldf("select * from cg_locations
inner join data
on cg_locations.start_rgSet between data.start and data.end")
# keep only necessary columns
result <- result[, c('chr' , 'start_rgSet','end_rgSet', 'probe_rgSet', 'p.value', 'fwer', 'run')]
# rename cols
colnames(result) <- c('chr', 'start', 'end', 'probe', 'p.value', 'fwer', 'run')
# get sig results
result_sig <- result[result$p.value < 0.05,]
# get fwer resultswf
result_fwer <- result[result$fwer == 0,]
return(list(result, result_sig, result_fwer))
}
getRun <- function(data, run_num)
{
data <- data[data$run == run_num,]
data <- data[!duplicated(data$probe),]
data_feat <- as.character(data$probe)
return(data_feat)
}
testKS <- function(x, y)
{
y <- y[!is.na(y)]
x <- x[!is.na(x)]
# Do x and y come from the same distribution?
ks.test(jitter(x), jitter(y), alternative = 'two.sided')
}
# # # #
# training_dat = cases[train_index,]
# test_dat = cases[test_index,]
# controls_dat = betaControls
# valid_dat = betaValid
# bh_features = bh_feat_sig
# gender = T
runEnet <- function(training_dat,
test_dat,
controls_dat,
controls_dat_old,
controls_dat_full,
valid_dat,
bh_features,
gender)
{
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
intersected_feats <- intersect(bh_features, colnames(training_dat))
# # get y
train_y <- as.numeric(training_dat$age_diagnosis)
test_y <- as.numeric(test_dat$age_diagnosis)
test_y_controls <- as.numeric(controls_dat$age_sample_collection)
test_y_controls_old <- as.numeric(controls_dat_old$age_sample_collection)
test_y_controls_full <- as.numeric(controls_dat_full$age_sample_collection)
test_y_valid <- as.numeric(valid_dat$age_diagnosis)
patient_age <- as.numeric(test_dat$age_sample_collection)
missing_ind <- !is.na(patient_age)
patient_age <- patient_age[missing_ind]
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
controls_dat_old <- controls_dat_old[, intersected_feats]
controls_dat_full <- controls_dat_full[, intersected_feats]
valid_dat <- valid_dat[, intersected_feats]
N_CV_REPEATS = 2
nfolds = 3
###### ENET
# create vector and list to store best alpha on training data. alpha is the parameter that choses the
# the optimal proportion lambda, the tuning parameter for L1 (ridge) and L2 (lasso)
elastic_net.cv_error = vector()
elastic_net.cv_model = list()
elastic_net.ALPHA <- c(1:9) / 10 # creates possible alpha values for model to choose from
# set parameters for training model
type_family <- 'gaussian'
# create error matrix for for opitmal alpha that can run in parraellel if you have bigger data
# or if you have a high number fo N_CV_REPEATS
temp.cv_error_matrix <- foreach (temp = 1:N_CV_REPEATS, .combine=rbind, .errorhandling="stop") %do% {
for (alpha in 1:length(elastic_net.ALPHA)) # for i in 1:9 - the model will run 9 times
{
elastic_net.cv_model[[alpha]] = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[alpha] # first time with 0.1 and so on
, type.measure = 'deviance'
, family = type_family
, standardize = FALSE
, nfolds = nfolds
, nlambda = 10
, parallel = TRUE
)
elastic_net.cv_error[alpha] = min(elastic_net.cv_model[[alpha]]$cvm)
}
elastic_net.cv_error # stores 9 errors
}
if (N_CV_REPEATS == 1) {
temp.cv_error_mean = temp.cv_error_matrix
} else {
temp.cv_error_mean = apply(temp.cv_error_matrix, 2, mean) # take the mean of the 5 iterations
# as your value for alpha
}
# stop if you did not recover error for any models
stopifnot(length(temp.cv_error_mean) == length(elastic_net.ALPHA))
# get index of best alpha (lowest error) - alpha is values 0.1-0.9
temp.best_alpha_index = which(min(temp.cv_error_mean) == temp.cv_error_mean)[length(which(min(temp.cv_error_mean) == temp.cv_error_mean))]
print(paste("Best ALPHA:", elastic_net.ALPHA[temp.best_alpha_index])) # print the value for alpha
best_alpha <- elastic_net.ALPHA[temp.best_alpha_index]
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[temp.best_alpha_index]
, type.measure = 'deviance'
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat)
, y = train_y
,alpha = elastic_net.ALPHA[temp.best_alpha_index]
,standardize=FALSE
,nlambda = 100
,family = type_family)
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(test_dat),
type = 'response', )
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
# get controls
temp_test.predictions_controls <- predict(model,
data.matrix(controls_dat),
type = 'response')
# look at s = 'lambda.min'
test.predictions_controls <- temp_test.predictions_controls[, temp.min_lambda_index]
# get controls old
temp_test.predictions_controls_old <- predict(model,
data.matrix(controls_dat_old),
type = 'response')
test.predictions_controls_old <- temp_test.predictions_controls_old[, temp.min_lambda_index]
# get controls full
temp_test.predictions_controls_full <- predict(model,
data.matrix(controls_dat_full),
type = 'response')
test.predictions_controls_full <- temp_test.predictions_controls_full[, temp.min_lambda_index]
# valid
temp_test.predictions_valid <- predict(model,
data.matrix(valid_dat),
type = 'response')
test.predictions_valid <- temp_test.predictions_valid[, temp.min_lambda_index]
importance <- coef(model)
lambda_value <- elastic_net.cv_model$lambda.min
cases_cor <- cor(test_y, test.predictions)
age_cor <- cor(test.predictions[missing_ind], patient_age)
# # for each iteration, this should always be the same.
controls_cor <- cor(test_y_controls, test.predictions_controls)
controls_cor_old <- cor(test_y_controls_old, test.predictions_controls_old)
controls_cor_full <- cor(test_y_controls_full, test.predictions_controls_full)
valid_cor <- cor(test_y_valid, test.predictions_valid)
alpha <- best_alpha
return(list(alpha, lambda_value, importance,
cases_cor,
age_cor,
controls_cor,
controls_cor_full,
controls_cor_old,
valid_cor,
temp.non_zero_coeff))
}
##########
# enet diff
###########
# # # #
# training_dat = cases[train_index,]
# test_dat = cases[test_index,]
# controls_dat = betaControls
# valid_dat = betaValid
# bh_features = bh_feat_sig
# gender = T
runEnetDiff <- function(training_dat,
test_dat,
bh_features,
gender)
{
if(gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
}
intersected_feats <- intersect(bh_features, colnames(training_dat))
# # get y
train_y <- as.numeric(training_dat$age_diagnosis)
test_y <- as.numeric(test_dat$age_diagnosis)
# get test age of sample collection
patient_age <- as.numeric(test_dat$age_sample_collection)
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
# start elastic net tuning
N_CV_REPEATS = 2
nfolds = 3
###### ENET
# create vector and list to store best alpha on training data. alpha is the parameter that choses the
# the optimal proportion lambda, the tuning parameter for L1 (ridge) and L2 (lasso)
elastic_net.cv_error = vector()
elastic_net.cv_model = list()
elastic_net.ALPHA <- c(1:9) / 10 # creates possible alpha values for model to choose from
# set parameters for training model
type_family <- 'gaussian'
# create error matrix for for opitmal alpha that can run in parraellel if you have bigger data
# or if you have a high number fo N_CV_REPEATS
temp.cv_error_matrix <- foreach (temp = 1:N_CV_REPEATS, .combine=rbind, .errorhandling="stop") %do% {
for (alpha in 1:length(elastic_net.ALPHA)) # for i in 1:9 - the model will run 9 times
{
elastic_net.cv_model[[alpha]] = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[alpha] # first time with 0.1 and so on
, type.measure = 'deviance'
, family = type_family
, standardize = FALSE
, nfolds = nfolds
, nlambda = 10
, parallel = TRUE
)
elastic_net.cv_error[alpha] = min(elastic_net.cv_model[[alpha]]$cvm)
}
elastic_net.cv_error # stores 9 errors
}
if (N_CV_REPEATS == 1) {
temp.cv_error_mean = temp.cv_error_matrix
} else {
temp.cv_error_mean = apply(temp.cv_error_matrix, 2, mean) # take the mean of the 5 iterations
# as your value for alpha
}
# stop if you did not recover error for any models
stopifnot(length(temp.cv_error_mean) == length(elastic_net.ALPHA))
# get index of best alpha (lowest error) - alpha is values 0.1-0.9
temp.best_alpha_index = which(min(temp.cv_error_mean) == temp.cv_error_mean)[length(which(min(temp.cv_error_mean) == temp.cv_error_mean))]
# print(paste("Best ALPHA:", elastic_net.ALPHA[temp.best_alpha_index])) # print the value for alpha
best_alpha <- elastic_net.ALPHA[temp.best_alpha_index]
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[temp.best_alpha_index]
, type.measure = 'deviance'
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat)
, y = train_y
,alpha = elastic_net.ALPHA[temp.best_alpha_index]
,standardize=FALSE
,nlambda = 100
,family = type_family)
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(test_dat),
type = 'response')
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
return(list(test.predictions, test_y, patient_age))
}
##########
# enet diff
###########
runEnetRand <- function(training_dat,
controls_dat,
valid_dat,
test_dat,
bh_features,
age_cutoff,
gender)
{
if(gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
}
intersected_feats <- intersect(bh_features, colnames(training_dat))
# # get y
train_y <- as.numeric(training_dat$age_diagnosis)
# other y and age
valid_y <- as.numeric(valid_dat$age_diagnosis)
test_y <- as.numeric(test_dat$age_diagnosis)
# get test age of sample collection
patient_age <- as.numeric(test_dat$age_sample_collection)
patient_age_controls <- as.numeric(controls_dat$age_sample_collection)
patient_age_valid <- as.numeric(valid_dat$age_sample_collection)
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
valid_dat <- valid_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
# start elastic net tuning
N_CV_REPEATS = 2
nfolds = 3
###### ENET
# create vector and list to store best alpha on training data. alpha is the parameter that choses the
# the optimal proportion lambda, the tuning parameter for L1 (ridge) and L2 (lasso)
elastic_net.cv_error = vector()
elastic_net.cv_model = list()
elastic_net.ALPHA <- c(1:9) / 10 # creates possible alpha values for model to choose from
# set parameters for training model
type_family <- 'gaussian'
# create error matrix for for opitmal alpha that can run in parraellel if you have bigger data
# or if you have a high number fo N_CV_REPEATS
temp.cv_error_matrix <- foreach (temp = 1:N_CV_REPEATS, .combine=rbind, .errorhandling="stop") %do% {
for (alpha in 1:length(elastic_net.ALPHA)) # for i in 1:9 - the model will run 9 times
{
elastic_net.cv_model[[alpha]] = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[alpha] # first time with 0.1 and so on
, type.measure = 'deviance'
, family = type_family
, standardize = FALSE
, nfolds = nfolds
, nlambda = 10
, parallel = TRUE
)
elastic_net.cv_error[alpha] = min(elastic_net.cv_model[[alpha]]$cvm)
}
elastic_net.cv_error # stores 9 errors
}
if (N_CV_REPEATS == 1) {
temp.cv_error_mean = temp.cv_error_matrix
} else {
temp.cv_error_mean = apply(temp.cv_error_matrix, 2, mean) # take the mean of the 5 iterations
# as your value for alpha
}
# stop if you did not recover error for any models
stopifnot(length(temp.cv_error_mean) == length(elastic_net.ALPHA))
# get index of best alpha (lowest error) - alpha is values 0.1-0.9
temp.best_alpha_index = which(min(temp.cv_error_mean) == temp.cv_error_mean)[length(which(min(temp.cv_error_mean) == temp.cv_error_mean))]
# print(paste("Best ALPHA:", elastic_net.ALPHA[temp.best_alpha_index])) # print the value for alpha
best_alpha <- elastic_net.ALPHA[temp.best_alpha_index]
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[temp.best_alpha_index]
, type.measure = 'deviance'
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
# print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat)
, y = train_y
,alpha = elastic_net.ALPHA[temp.best_alpha_index]
,standardize=FALSE
,nlambda = 100
,family = type_family)
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(test_dat),
type = 'response')
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
# get controls
temp_test.predictions_controls <- predict(model,
data.matrix(controls_dat),
type = 'response')
test.predictions_controls <- temp_test.predictions_controls[, temp.min_lambda_index]
# get validation
temp_test.predictions_valid <- predict(model,
data.matrix(valid_dat),
type = 'response')
test.predictions_valid <- temp_test.predictions_valid[, temp.min_lambda_index]
return(list(test.predictions, test_y, patient_age,
test.predictions_valid, valid_y, patient_age_valid,
test.predictions_controls, patient_age_controls))
}
##########
# lasso rand
###########
runLassoRand <- function(training_dat,
controls_dat,
controls_dat_old,
controls_dat_full,
valid_dat,
test_dat,
bh_features,
gender)
{
if(gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
}
intersected_feats <- intersect(bh_features, colnames(training_dat))
# # get y
train_y <- as.numeric(training_dat$age_diagnosis)
# other y and age
valid_y <- as.numeric(valid_dat$age_diagnosis)
test_y <- as.numeric(test_dat$age_diagnosis)
# get test age of sample collection
patient_age <- as.numeric(test_dat$age_sample_collection)
patient_age_controls <- as.numeric(controls_dat$age_sample_collection)
patient_age_controls_old <- as.numeric(controls_dat_old$age_sample_collection)
patient_age_controls_full <- as.numeric(controls_dat_full$age_sample_collection)
patient_age_valid <- as.numeric(valid_dat$age_sample_collection)
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
controls_dat_old <- controls_dat_old[, intersected_feats]
controls_dat_full <- controls_dat_full[, intersected_feats]
valid_dat <- valid_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
nfolds <- 5
type_family <- 'gaussian'
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = 1
, type.measure = 'deviance'
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat)
, y = train_y
,alpha = 1
,standardize=FALSE
,nlambda = 100
,family = type_family)
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(test_dat),
type = 'response')
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
# get controls
temp_test.predictions_controls <- predict(model,
data.matrix(controls_dat),
type = 'response')
test.predictions_controls <- temp_test.predictions_controls[, temp.min_lambda_index]
# get controls
temp_test.predictions_controls_old <- predict(model,
data.matrix(controls_dat_old),
type = 'response')
test.predictions_controls_old <- temp_test.predictions_controls_old[, temp.min_lambda_index]
# get controls full
temp_test.predictions_controls_full <- predict(model,
data.matrix(controls_dat_full),
type = 'response')
test.predictions_controls_full <- temp_test.predictions_controls_full[, temp.min_lambda_index]
# get validation
temp_test.predictions_valid <- predict(model,
data.matrix(valid_dat),
type = 'response')
test.predictions_valid <- temp_test.predictions_valid[, temp.min_lambda_index]
return(list(test.predictions, test_y, patient_age,
test.predictions_valid, valid_y, patient_age_valid,
test.predictions_controls, patient_age_controls,
test.predictions_controls_full, patient_age_controls_full,
test.predictions_controls_old, patient_age_controls_old))
}
##########
# lasso rand
###########
runRidgeRand <- function(training_dat,
controls_dat,
controls_dat_old,
controls_dat_full,
valid_dat,
test_dat,
bh_features,
gender)
{
if(gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
}
intersected_feats <- intersect(bh_features, colnames(training_dat))
# # get y
train_y <- as.numeric(training_dat$age_diagnosis)
# other y and age
valid_y <- as.numeric(valid_dat$age_diagnosis)
test_y <- as.numeric(test_dat$age_diagnosis)
# get test age of sample collection
patient_age <- as.numeric(test_dat$age_sample_collection)
patient_age_controls <- as.numeric(controls_dat$age_sample_collection)
patient_age_controls_old <- as.numeric(controls_dat_old$age_sample_collection)
patient_age_controls_full <- as.numeric(controls_dat_full$age_sample_collection)
patient_age_valid <- as.numeric(valid_dat$age_sample_collection)
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
controls_dat_old <- controls_dat_old[, intersected_feats]
controls_dat_full <- controls_dat_full[, intersected_feats]
valid_dat <- valid_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
nfolds <- 5
type_family <- 'gaussian'
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = 0
, type.measure = 'deviance'
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat)
, y = train_y
,alpha = 0
,standardize=FALSE
,nlambda = 100
,family = type_family)
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(test_dat),
type = 'response')
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
# get controls
temp_test.predictions_controls <- predict(model,
data.matrix(controls_dat),
type = 'response')
test.predictions_controls <- temp_test.predictions_controls[, temp.min_lambda_index]
# get controls
temp_test.predictions_controls_old <- predict(model,
data.matrix(controls_dat_old),
type = 'response')
test.predictions_controls_old <- temp_test.predictions_controls_old[, temp.min_lambda_index]
# get controls full
temp_test.predictions_controls_full <- predict(model,
data.matrix(controls_dat_full),
type = 'response')
test.predictions_controls_full <- temp_test.predictions_controls_full[, temp.min_lambda_index]
# get validation
temp_test.predictions_valid <- predict(model,
data.matrix(valid_dat),
type = 'response')
test.predictions_valid <- temp_test.predictions_valid[, temp.min_lambda_index]
return(list(test.predictions, test_y, patient_age,
test.predictions_valid, valid_y, patient_age_valid,
test.predictions_controls, patient_age_controls,
test.predictions_controls_full, patient_age_controls_full,
test.predictions_controls_old, patient_age_controls_old))
}
##########
# enet diff
###########
runRfRand <- function(training_dat,
controls_dat,
controls_dat_old,
controls_dat_full,
valid_dat,
test_dat,
bh_features,
gender)
{
if(gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
}
intersected_feats <- intersect(bh_features, colnames(training_dat))
# # get y
train_y <- as.numeric(training_dat$age_diagnosis)
# other y and age
valid_y <- as.numeric(valid_dat$age_diagnosis)
test_y <- as.numeric(test_dat$age_diagnosis)
# get test age of sample collection
patient_age <- as.numeric(test_dat$age_sample_collection)
patient_age_controls <- as.numeric(controls_dat$age_sample_collection)
patient_age_controls_old <- as.numeric(controls_dat_old$age_sample_collection)
patient_age_controls_full <- as.numeric(controls_dat_full$age_sample_collection)
patient_age_valid <- as.numeric(valid_dat$age_sample_collection)
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
controls_dat_old <- controls_dat_old[, intersected_feats]
controls_dat_full <- controls_dat_full[, intersected_feats]
valid_dat <- valid_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
fitControl <- trainControl(
method = "repeatedcv", # could train on boostrap resample, here use repeated cross validation.
number = 4,
repeats = 1,
allowParallel = TRUE)
# mtry: Number of variables randomly sampled as candidates at each split.
# ntree: Number of trees to grow.
mtry <- sqrt(ncol(training_dat))
tunegrid <- expand.grid(.mtry=mtry)
model <- train(x = training_dat
, y =train_y
, method = "rf"
, trControl = fitControl
, tuneGrid = tunegrid
, importance = T
, verbose = FALSE)
temp <- varImp(model)[[1]]
importance <- cbind(rownames(temp), temp$Overall)
# predict on test data
test.predictions <- predict(model,
newdata = test_dat)
# get controls
test.predictions_controls <- predict(model,
controls_dat)
# get controls old
test.predictions_controls_old <- predict(model,
controls_dat_old)
# get controls
test.predictions_controls_full <- predict(model,
controls_dat_full)
# get controls
test.predictions_valid <- predict(model,
valid_dat)
return(list(test.predictions, test_y, patient_age,
test.predictions_valid, valid_y, patient_age_valid,
test.predictions_controls, patient_age_controls,
test.predictions_controls_full, patient_age_controls_full,
test.predictions_controls_old, patient_age_controls_old))
}
##########
# enet diff
###########
runSvmRand <- function(training_dat,
controls_dat,
controls_dat_old,
controls_dat_full,
valid_dat,
test_dat,
bh_features,
gender)
{
if(gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
}
intersected_feats <- intersect(bh_features, colnames(training_dat))
# # get y
train_y <- as.numeric(training_dat$age_diagnosis)
# other y and age
valid_y <- as.numeric(valid_dat$age_diagnosis)
test_y <- as.numeric(test_dat$age_diagnosis)
# get test age of sample collection
patient_age <- as.numeric(test_dat$age_sample_collection)
patient_age_controls <- as.numeric(controls_dat$age_sample_collection)
patient_age_controls_old <- as.numeric(controls_dat_old$age_sample_collection)
patient_age_controls_full <- as.numeric(controls_dat_full$age_sample_collection)
patient_age_valid <- as.numeric(valid_dat$age_sample_collection)
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
controls_dat_old <- controls_dat_old[, intersected_feats]
controls_dat_full <- controls_dat_full[, intersected_feats]
valid_dat <- valid_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
# 6) SVM with radial kernel
fitControl <- trainControl(
method = "repeatedcv", # could train on boostrap resample, here use repeated cross validation.
number = 4,
repeats = 1,
allowParallel = TRUE)
model <- train(x = training_dat,
, y = train_y
, method = "svmRadial"
, trControl = fitControl
, verbose = FALSE
)
# predict on test data
test.predictions <- predict(model,
newdata = test_dat)
# get controls
test.predictions_controls <- predict(model,
controls_dat)
# get controls old
test.predictions_controls_old <- predict(model,
controls_dat_old)
# get controls
test.predictions_controls_full <- predict(model,
controls_dat_full)
# get controls
test.predictions_valid <- predict(model,
valid_dat)
return(list(test.predictions, test_y, patient_age,
test.predictions_valid, valid_y, patient_age_valid,
test.predictions_controls, patient_age_controls,
test.predictions_controls_full, patient_age_controls_full,
test.predictions_controls_old, patient_age_controls_old))
}
##########
# enet case
###########
runEnetCase <- function(cases_data, cases_y, alpha_number)
{
set.seed(alpha_number)
# get a column for each dataset indicating the fold
fold_vec <- sample(1:5, nrow(cases_data), replace = T)
cases_cor <- list()
alpha_score_list <- list()
for (i in 1:5) {
# get x
train_index <- !grepl(i, fold_vec)
test_index <- !train_index
# get traiind and test data
training_dat <- cases_data[train_index,]
training_y <- cases_y[train_index]
testing_dat <- cases_data[!train_index,]
testing_y <- cases_y[!train_index]
# get training and test data
N_CV_REPEATS = 2
nfolds = 3
###### ENET
# create vector and list to store best alpha on training data. alpha is the parameter that choses the
# the optimal proportion lambda, the tuning parameter for L1 (ridge) and L2 (lasso)
elastic_net.cv_error = vector()
elastic_net.cv_model = list()
elastic_net.ALPHA <- c(1:9) / 10 # creates possible alpha values for model to choose from
# set parameters for training model
type_family <- 'gaussian'
# create error matrix for for opitmal alpha that can run in parraellel if you have bigger data
# or if you have a high number fo N_CV_REPEATS
temp.cv_error_matrix <- foreach (temp = 1:N_CV_REPEATS, .combine=rbind, .errorhandling="stop") %do% {
for (alpha in 1:length(elastic_net.ALPHA)) # for i in 1:9 - the model will run 9 times
{
elastic_net.cv_model[[alpha]] = cv.glmnet(x = as.matrix(training_dat)
, y = training_y
, alpha = elastic_net.ALPHA[alpha] # first time with 0.1 and so on
, type.measure = 'deviance'
, family = type_family
, standardize = FALSE
, nfolds = nfolds
, nlambda = 10
, parallel = TRUE
)
elastic_net.cv_error[alpha] = min(elastic_net.cv_model[[alpha]]$cvm)
}
elastic_net.cv_error # stores 9 errors
}
if (N_CV_REPEATS == 1) {
temp.cv_error_mean = temp.cv_error_matrix
} else {
temp.cv_error_mean = apply(temp.cv_error_matrix, 2, mean) # take the mean of the 5 iterations
# as your value for alpha
}
# stop if you did not recover error for any models
stopifnot(length(temp.cv_error_mean) == length(elastic_net.ALPHA))
# get index of best alpha (lowest error) - alpha is values 0.1-0.9
temp.best_alpha_index = which(min(temp.cv_error_mean) == temp.cv_error_mean)[length(which(min(temp.cv_error_mean) == temp.cv_error_mean))]
# print(paste("Best ALPHA:", elastic_net.ALPHA[temp.best_alpha_index])) # print the value for alpha
best_alpha <- elastic_net.ALPHA[temp.best_alpha_index]
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
, y = training_y
, alpha = elastic_net.ALPHA[temp.best_alpha_index]
, type.measure = 'deviance'
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
# print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat)
, y = training_y
,alpha = elastic_net.ALPHA[temp.best_alpha_index]
,standardize=FALSE
,nlambda = 100
,family = type_family)
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(testing_dat),
type = 'response')
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
importance <- coef(model)
lambda_value <- elastic_net.cv_model$lambda.min
cases_cor[[i]] <- cor(testing_y, test.predictions)
alpha_score_list[[i]] <- best_alpha
}
mean_cor <- mean(unlist(cases_cor))
mean_alpha <- mean(unlist(alpha_score_list))
return(list(mean_alpha,
mean_cor))
}
# training_dat = cases[train_index,]
# test_dat = cases[test_index,]
# controls_dat = betaControls
# valid_dat = betaValid
# bh_features = bh_feat_sig
# gender = T
# cutoff = 48
runEnetFac <- function(training_dat,
test_dat,
controls_dat,
valid_dat,
bh_features,
gender,
cutoff)
{
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
intersected_feats <- intersect(bh_features, colnames(training_dat))
# # get y
train_y <- factor(ifelse(training_dat$age_diagnosis <= cutoff, 'yes', 'no'), levels = c('yes', 'no'))
test_y <- factor(ifelse(test_dat$age_diagnosis <= cutoff, 'yes', 'no'), levels = c('yes', 'no'))
# test_y_controls <- as.numeric(controls_dat$age_sample_collection)
# test_y_valid <- as.numeric(valid_dat$age_diagnosis)
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
# controls_dat <- controls_dat[, intersected_feats]
# valid_dat <- valid_dat[, intersected_feats]
N_CV_REPEATS = 2
nfolds = 3
###### ENET
# create vector and list to store best alpha on training data. alpha is the parameter that choses the
# the optimal proportion lambda, the tuning parameter for L1 (ridge) and L2 (lasso)
elastic_net.cv_error = vector()
elastic_net.cv_model = list()
elastic_net.ALPHA <- c(1:9) / 10 # creates possible alpha values for model to choose from
# set parameters for training model
type_family <- 'binomial'
type_measure <- 'auc'
# create error matrix for for opitmal alpha that can run in parraellel if you have bigger data
# or if you have a high number fo N_CV_REPEATS
temp.cv_error_matrix <- foreach (temp = 1:N_CV_REPEATS, .combine=rbind, .errorhandling="stop") %do% {
for (alpha in 1:length(elastic_net.ALPHA)) # for i in 1:9 - the model will run 9 times
{
elastic_net.cv_model[[alpha]] = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[alpha] # first time with 0.1 and so on
, type.measure = type_measure
, family = type_family
, standardize = FALSE
, nfolds = nfolds
, nlambda = 10
, parallel = TRUE
)
elastic_net.cv_error[alpha] = min(elastic_net.cv_model[[alpha]]$cvm)
}
elastic_net.cv_error # stores 9 errors
}
if (N_CV_REPEATS == 1) {
temp.cv_error_mean = temp.cv_error_matrix
} else {
temp.cv_error_mean = apply(temp.cv_error_matrix, 2, mean) # take the mean of the 5 iterations
# as your value for alpha
}
# stop if you did not recover error for any models
stopifnot(length(temp.cv_error_mean) == length(elastic_net.ALPHA))
# get index of best alpha (lowest error) - alpha is values 0.1-0.9
temp.best_alpha_index = which(min(temp.cv_error_mean) == temp.cv_error_mean)[length(which(min(temp.cv_error_mean) == temp.cv_error_mean))]
print(paste("Best ALPHA:", elastic_net.ALPHA[temp.best_alpha_index])) # print the value for alpha
best_alpha <- elastic_net.ALPHA[temp.best_alpha_index]
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[temp.best_alpha_index]
, type.measure = type_measure
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat)
, y = train_y
,alpha = elastic_net.ALPHA[temp.best_alpha_index]
,standardize=FALSE
,nlambda = 100
,family = type_family)
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(test_dat),
type = 'class')
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
test.predictions <- factor(test.predictions, levels = c('yes', 'no'))
test_stats <- confusionMatrix(test_y, test.predictions)
importance <- coef(model)
lambda_value <- elastic_net.cv_model$lambda.min
alpha <- best_alpha
return(list(alpha, lambda_value, importance, test_stats, model, temp.non_zero_coeff))
}
##########
# get the variation of the probe that is orthogonal to age of sample collection
##########
getResidual <- function(data,
bh_features)
{
# get feature intersection
intersected_feats <- intersect(bh_features, colnames(data))
# get genes
data <- data[, c("age_diagnosis",
"age_sample_collection",
"cancer_diagnosis_diagnoses",
"M",
"F",
intersected_feats)]
probes <- colnames(data)[6:ncol(data)]
resid <- list()
for (i in 6:ncol(data)){
temp <- data[, i]
temp1 <- data$age_sample_collection
resid[[i]] <- lm(temp ~ temp1)$residuals
print(i)
}
resid <- do.call('cbind', resid)
resid <- apply(resid, 2, function(x) as.numeric(x))
resid <- as.data.frame(resid)
resid <- cbind(data$age_diagnosis,
data$age_sample_collection,
data$cancer_diagnosis_diagnoses,
data$M,
data$F,
resid)
# change colnames
colnames(resid) <- c('age_diagnosis',
'age_sample_collection',
'cancer_diagnosis_diagnoses',
'gender',
probes)
return(resid)
}
##########
# get results
##########
getResults <- function(result_list)
{
results_final <- list(result_list[[1]],
result_list[[2]],
result_list[[3]],
result_list[[4]],
result_list[[5]],
result_list[[6]],
result_list[[7]],
result_list[[8]],
result_list[[9]],
result_list[[10]])
return(results_final)
}
##########
# get results cancer
##########
getResultsCancer <- function(result_list)
{
results_final <- list(result_list[[1]],
result_list[[2]],
result_list[[3]],
result_list[[4]],
result_list[[5]],
result_list[[6]])
return(results_final)
}
##########
# function for finding probes with threshold differenceW
##########
get_diff_probes <-
function(cases,
other_850,
thresh) {
temp_names <- list()
for (i in 1:nrow(cases)) {
temp.sample_cases <- as.data.frame(t(cases[i, 8:ncol(cases)]))
temp.sample_other_850 <- as.data.frame(t(other_850[i, 8:ncol(other_850)]))
temp_diff <- abs(temp.sample_cases - temp.sample_other_850)
temp_names[[i]] <- rownames(temp_diff)[temp_diff > thresh]
print(i)
}
probe_names <- unlist(temp_names)
# probe_names_dup <- probe_names[!duplicated(probe_names)]
return(probe_names)
}
##########
# function for finding probes with threshold differenceW
##########
get_diff_probes_keep <-
function(cases,
other_850,
thresh) {
temp_names <- list()
for (i in 1:nrow(cases)) {
temp.sample_cases <- as.data.frame(t(cases[i, 8:ncol(cases)]))
temp.sample_other_850 <- as.data.frame(t(other_850[i, 8:ncol(other_850)]))
temp_diff <- as.data.frame(abs(temp.sample_cases - temp.sample_other_850))
colnames(temp_diff)[1] <- 'diff'
temp_diff$sample <- i
temp_diff$names <- rownames(temp_diff)
temp_names[[i]] <- temp_diff[temp_diff$diff > thresh,]
print(i)
}
probe_data <- do.call(rbind, temp_names)
return(probe_data)
}
##########
# estimate linear model and transform
##########
linearTransform <- function (cases_12,
controls_12,
controls_full) {
probe_model <- list()
probe_control_result <- list()
for (i in 8:ncol(controls_full)) {
control <- as.data.frame(controls_12[, i])
cases <- as.data.frame(cases_12[, i])
model_data <- data.frame(control = control, cases = cases)
names(model_data) <- c('control', 'cases')
probe_model[[i]] <- lm(cases ~ control, data = model_data)
control_full <- as.numeric(controls_full[, i])
model_data_new <- data.frame(control = control_full)
names(model_data_new) <- 'control'
probe_control_result[[i]] <- predict(probe_model[[i]], newdata = model_data_new, type = 'response')
print(i)
}
# transpose results
temp <- do.call(rbind, probe_control_result)
transform_controls <- t(temp)
# add cg sites
colnames(transform_controls) <- colnames(controls_full[8:ncol(controls_full)])
# add clinical variables
transform_controls <- as.data.frame(cbind(id = controls_full$id,
p53_germline = controls_full$p53_germline,
cancer_diagnosis_diagnoses = controls_full$cancer_diagnosis_diagnoses,
age_diagnosis = controls_full$age_diagnosis,
age_sample_collection = controls_full$age_sample_collection,
gender = controls_full$gender,
sentrix_id = controls_full$sentrix_id,
transform_controls))
# make numeric
transform_controls[, 8:ncol(transform_controls)] <- apply(transform_controls[, 8:ncol(transform_controls)],
2,
function(x) as.numeric(x))
return(transform_controls)
}
# subset to get methylation data
get_model_dat <- function(full_data, probe_start, seed_num, k) {
beta_methyl <- as.matrix(t(full_data[ ,probe_start:ncol(full_data)]))
rownames(beta_methyl) <- NULL
attributes(beta_methyl)[2] <- NULL
clin_data <- full_data[1:(probe_start-1)]
fold_vec <- getFolds(full_data, seed_number = seed_num, k = k)
feat_names <- colnames(full_data)[probe_start:ncol(full_data)]
return(list(beta_methyl, clin_data, fold_vec$folds, feat_names))
}
#########
# predict
#########
superpc.predict <- function (object, data, newdata, threshold, n.components = 3,
prediction.type = c("continuous", "discrete", "nonzero"),
n.class = 2)
{
this.call <- match.call()
prediction.type <- match.arg(prediction.type)
if (n.class > 3) {
stop("Maximum number of survival classes is 3")
}
# get features that have feature.scores larger than the threshold
which.features <- (abs(object$feature.scores) >= threshold)
# get x training data with which.featurs (on row index because matrix is p x n)
x.sml <- data$x[which.features, ]
# get number of PCAs
n.pc <- n.components
# calculate pca
x.sml.svd <- mysvd(x.sml, n.components = n.components)
if (prediction.type == "nonzero") {
if (!is.null(data$featurenames)) {
out <- data$featurenames[which.features]
}
else {
# get features to be used in PCA
out <- (1:nrow(data$x))[which.features]
}
}
#### HERE
if (prediction.type == "continuous" | prediction.type ==
"discrete") {
# test x, with chosen features
xtemp = newdata$x[which.features, ]
# this is all to get your pca from the test data
# x temp 9 test data centered by the means from
# xtemp1 = t(scale(t(xtemp), center = x.sml.svd$feature.means,
# scale = F))
scal = apply(scale(abs(x.sml.svd$u), center = F, scale = x.sml.svd$d),
2, sum)
# pca for test data
cur.v <- scale(t(xtemp) %*% x.sml.svd$u, center = FALSE,
scale = scal * x.sml.svd$d)
# x train with chosen features
xtemp0 = data$x[which.features, ]
# center x train with mean features
xtemp0 = t(scale(t(xtemp0), center = x.sml.svd$feature.means,
scale = F))
# get PCA for training data
cur.v0 <- scale(t(xtemp0) %*% x.sml.svd$u, center = FALSE,
scale = scal * x.sml.svd$d)
}
# training pca and training data
result <- superpc.fit.to.outcome(object, data, cur.v0, print = FALSE)$results
if (object$type == "survival") {
coef = result$coef
}
if (object$type == "regression") {
# training coefficient
coef = result$coef[-1]
}
if (prediction.type == "continuous") {
# test data - flip sign on negative PCA coefficients
out <- scale(cur.v, center = FALSE, scale = sign(coef))
# test data - flip sign on negative PCA coefficients
out_train <- scale(cur.v0, center = FALSE, scale = sign(coef))
# each row, multiply coefficient by pca
v.pred.1df = apply(scale(out, center = FALSE, scale = 1/abs(coef)),
1, sum)
}
else if (prediction.type == "discrete") {
out0 <- scale(cur.v0, center = FALSE, scale = sign(coef))
v.pred0.1df = apply(scale(out0, center = FALSE, scale = 1/abs(coef)),
1, sum)
out <- scale(cur.v, center = FALSE, scale = sign(coef))
v.pred.1df = apply(scale(out, center = FALSE, scale = 1/abs(coef)),
1, sum)
for (j in 1:ncol(out)) {
# br = quantile(cur.v0[, j], (0:n.class)/n.class)
br = quantile(out0[, j], (0:n.class)/n.class) ## yp
# out[, j] <- cut(out[, j], breaks = br, n.class, labels = FALSE)
out[,j] = ifelse(out[,j] <= br[2], 1, 2) ## yp
# out[is.na(out[, j]), j] <- 1
}
br = quantile(v.pred0.1df, (0:n.class)/n.class)
# v.pred.1df <- cut(v.pred.1df, breaks = br, labels = FALSE)
# v.pred.1df[is.na(v.pred.1df)] <- 1
v.pred.1df = ifelse(v.pred.1df <= br[2], 1, 2) ## yp
}
if (is.matrix(out)) {
dimnames(out) = list(NULL, rep(prediction.type, ncol(out)))
}
junk <- list(v.pred = out, v.train = out_train, u = x.sml.svd$u, d = x.sml.svd$d,
which.features = which.features, v.pred.1df = v.pred.1df,
n.components = n.pc, coef = result$coef, call = this.call,
prediction.type = prediction.type)
return(junk)
}
##########
# fit to outcome function
##########
# superpc.fit.to.outcome(train.obj, data.test, fit.cts$v.pred)
# fit <- fit.cts
# data.test <- data.test
# score <- fit.cts$v.pred
superpc.fit.to.outcome<- function(fit, data.test,score, competing.predictors=NULL, print=TRUE, iter.max=5){
type=fit$type
if(type=="survival"){temp.list=makelist(data.test$y, data.test$censoring.status, score)}
if(type=="regression"){temp.list=makelist(data.test$y,NULL, score)}
if(!is.null(competing.predictors)){
temp.list=c(temp.list,competing.predictors)
}
if(type=="survival"){
require(survival)
results<-coxph(Surv(y, censoring.status)~., data=temp.list, control=coxph.control(iter.max=iter.max))
}
else{
# fit test y against 3 pcas
results<-lm(data.test$y~., data=temp.list)
}
if(print){print(summary(results))}
ss=summary(results)
if(type=="survival"){ test.stat=ss$logtest[1]
df=ss$logtest[2]
pvalue=ss$logtest[3]
}
if(type=="regression"){ test.stat=ss$fstat[1]
df=ss$fstat[2:3]
pvalue=1-pf(test.stat,df[1],df[2])
}
teststat.table=matrix(c(test.stat, df, pvalue), nrow=1)
if(length(df)==1){dflabel="df"}
if(length(df)==2){dflabel=c("df1", "df2")}
dimnames(teststat.table)=list(NULL,c("test statistic",dflabel,"p-value"))
return(list(results=results, teststat.table=teststat.table, coeftable=ss$coef))
}
##########
# fit to outcome function cross validation
##########
superpc_fit_lm_cv <-
function(y,
score,
cv){
# combine y and score
mod_data <-as.data.frame(cbind(y, score))
# fix colnames
colnames(mod_data) <- c('y', 'pca1', 'pca2', 'pca3')
# feat_names
feat_names <- colnames(mod_data)[-1]
# list to strore predictions,ground truth and model
test_y <-
y_pred <-
trained_model <- list()
if (cv == 'loocv') {
# perform leave one out cross validation
for(i in 1:nrow(mod_data)){
# get loocv training and test data
train_x_y <- mod_data[-i,]
test_x <- mod_data[i, feat_names]
test_y[[i]] <- mod_data$y[i]
# fit test y against 3 pcas
trained_model[[i]] <- lm(y ~., train_x_y)
# predict
y_pred[[i]] <- predict(trained_model[[i]], test_x)
}
onset_cor <- cor(unlist(y_pred), unlist(test_y))
return(list(onset_cor, trained_model))
}
if (cv == 'k_fold') {
# get fold vector
mod_data$folds <- sample(c(1,2,3), nrow(mod_data), replace = T)
for(i in unique(sort(mod_data$folds))) {
# train_set and test_set index
train_set <- !grepl(i, mod_data$folds)
test_set <- !train_set
# get training and testing data
train_x_y <- mod_data[train_set, c('y', feat_names)]
test_x <- mod_data[test_set, feat_names]
test_y[[i]] <- mod_data$y[test_set]
# fit test y against 3 pcas
trained_model[[i]] <- lm(y ~., train_x_y)
# predict
y_pred[[i]] <- predict(trained_model[[i]], test_x)
}
onset_cor <- cor(unlist(y_pred), unlist(test_y))
return(list(onset_cor, trained_model))
}
if (cv =='auto') {
auto_cv <- cv.lm(mod_data, formula(y ~.), m = 2)
temp <- do.call(rbind, auto_cv)
onset_cor <- cor(temp$cvpred, temp$y)
return(list(onset_cor, auto_cv))
}
}
##########
# function for listing objects
##########
makelist=function (y, censoring.status, predictors)
{
val = list(y = y)
if (!is.null(censoring.status)) {
val$censoring.status = censoring.status
}
if (!is.matrix(predictors)) {
val$score.1 = predictors
}
if (is.matrix(predictors)) {
if (ncol(predictors) > 3) {
stop("Can't have > 3 principal components")
}
predictor.type=dimnames(predictors)[[2]]
if(is.null(dimnames(predictors)[[2]])){
predictor.type=rep("continuous",ncol(predictors))
}
score1 = predictors[, 1]
if(predictor.type[1]=="factor") {
score1 = as.factor(score1)
}
val$score.1 = score1
if (ncol(predictors) > 1) {
score2 = predictors[, 2]
if(predictor.type[2]=="factor") {
score2 = as.factor(score2)
}
val$score.2 = score2
}
if (ncol(predictors) > 2) {
score3 = predictors[, 3]
if(predictor.type[3]=="factor") {
score3 = as.factor(score3)
}
val$score.3 = score3
}
}
return(val)
}
mysvd <- function (x, n.components = NULL) {
p <- nrow(x)
n <- ncol(x)
feature.means <- rowMeans(x)
x <- t(scale(t(x), center = feature.means, scale = F))
if (is.null(n.components)) {
n.components = min(n, p)
}
if (p > n) {
a <- eigen(t(x) %*% x)
v <- a$vec[, 1:n.components, drop = FALSE]
d <- sqrt(a$val[1:n.components, drop = FALSE])
u <- scale(x %*% v, center = FALSE, scale = d)
return(list(u = u, d = d, v = v, feature.means = feature.means))
}
else {
junk <- svd(x, LINPACK = TRUE)
nc = min(ncol(junk$u), n.components)
return(list(u = junk$u[, 1:nc], d = junk$d[1:nc], v = junk$v[,
1:nc], feature.means = feature.means))
}
}
##########
# enet diff
###########
runEnetRandResid <- function(training_dat,
test_dat,
bh_features,
gender)
{
if(gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
}
intersected_feats <- intersect(bh_features, colnames(training_dat))
# # get y
train_y <- as.numeric(training_dat$age_diagnosis)
test_y <- as.numeric(test_dat$age_diagnosis)
# get test age of sample collection
patient_age <- as.numeric(test_dat$age_sample_collection)
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
# start elastic net tuning
N_CV_REPEATS = 2
nfolds = 3
###### ENET
# create vector and list to store best alpha on training data. alpha is the parameter that choses the
# the optimal proportion lambda, the tuning parameter for L1 (ridge) and L2 (lasso)
elastic_net.cv_error = vector()
elastic_net.cv_model = list()
elastic_net.ALPHA <- c(1:9) / 10 # creates possible alpha values for model to choose from
# set parameters for training model
type_family <- 'gaussian'
# create error matrix for for opitmal alpha that can run in parraellel if you have bigger data
# or if you have a high number fo N_CV_REPEATS
temp.cv_error_matrix <- foreach (temp = 1:N_CV_REPEATS, .combine=rbind, .errorhandling="stop") %do% {
for (alpha in 1:length(elastic_net.ALPHA)) # for i in 1:9 - the model will run 9 times
{
elastic_net.cv_model[[alpha]] = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[alpha] # first time with 0.1 and so on
, type.measure = 'deviance'
, family = type_family
, standardize = FALSE
, nfolds = nfolds
, nlambda = 10
, parallel = TRUE
)
elastic_net.cv_error[alpha] = min(elastic_net.cv_model[[alpha]]$cvm)
}
elastic_net.cv_error # stores 9 errors
}
if (N_CV_REPEATS == 1) {
temp.cv_error_mean = temp.cv_error_matrix
} else {
temp.cv_error_mean = apply(temp.cv_error_matrix, 2, mean) # take the mean of the 5 iterations
# as your value for alpha
}
# stop if you did not recover error for any models
stopifnot(length(temp.cv_error_mean) == length(elastic_net.ALPHA))
# get index of best alpha (lowest error) - alpha is values 0.1-0.9
temp.best_alpha_index = which(min(temp.cv_error_mean) == temp.cv_error_mean)[length(which(min(temp.cv_error_mean) == temp.cv_error_mean))]
# print(paste("Best ALPHA:", elastic_net.ALPHA[temp.best_alpha_index])) # print the value for alpha
best_alpha <- elastic_net.ALPHA[temp.best_alpha_index]
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[temp.best_alpha_index]
, type.measure = 'deviance'
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat)
, y = train_y
,alpha = elastic_net.ALPHA[temp.best_alpha_index]
,standardize=FALSE
,nlambda = 100
,family = type_family)
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(test_dat),
type = 'response')
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
return(list(test.predictions, test_y, patient_age))
}
##########
# enet diff
###########
# #
# training_dat = m_train_cases
# controls_dat = m_controls_mod
# valid_dat = m_valid_mod
# test_dat = m_test_cases
# age_cutoff = 72
# bh_features = bh_features
# rand_feats = rand_feats
# gender = T
runEnetRandFac <- function(training_dat,
controls_dat,
valid_dat,
test_dat,
age_cutoff,
bh_features,
gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
intersected_feats <- intersect(bh_features, colnames(training_dat))
if(gender) {
intersected_feats <- append('M', intersected_feats)
intersected_feats <- append('F', intersected_feats)
}
# intersected_feats_rand <- intersect(rand_feats, colnames(training_dat))
# # get y
train_y <- ifelse(training_dat$age_diagnosis < age_cutoff, 1, 0)
test_y <- ifelse(test_dat$age_diagnosis < age_cutoff, 1, 0)
valid_y <- ifelse(valid_dat$age_diagnosis < age_cutoff, 1, 0)
patient_age <- ifelse(test_dat$age_sample_collection < age_cutoff, 1, 0)
patient_age_controls <- ifelse(controls_dat$age_sample_collection < age_cutoff, 1, 0)
patient_age_valid <- ifelse(valid_dat$age_sample_collection < age_cutoff, 1, 0)
# get train and test clinical data
training_clin <- training_dat[,3:11]
test_clin <- test_dat[,3:11]
controls_clin <- controls_dat[, 3:11]
valid_clin <- valid_dat[, 3:11]
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
valid_dat <- valid_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
# start elastic net tuning
N_CV_REPEATS = 2
nfolds = 3
###### ENET
# create vector and list to store best alpha on training data. alpha is the parameter that choses the
# the optimal proportion lambda, the tuning parameter for L1 (ridge) and L2 (lasso)
elastic_net.cv_error = vector()
elastic_net.cv_model = list()
elastic_net.ALPHA <- c(1:9) / 10 # creates possible alpha values for model to choose from
# set parameters for training model
type_family <- 'binomial'
type_measure <- 'auc'
# create error matrix for for opitmal alpha that can run in parraellel if you have bigger data
# or if you have a high number fo N_CV_REPEATS
temp.cv_error_matrix <- foreach (temp = 1:N_CV_REPEATS, .combine=rbind, .errorhandling="stop") %do% {
for (alpha in 1:length(elastic_net.ALPHA)) # for i in 1:9 - the model will run 9 times
{
elastic_net.cv_model[[alpha]] = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[alpha] # first time with 0.1 and so on
, type.measure = type_measure
, family = type_family
, standardize = FALSE
, nfolds = nfolds
, nlambda = 10
, parallel = TRUE
)
elastic_net.cv_error[alpha] = min(elastic_net.cv_model[[alpha]]$cvm)
}
elastic_net.cv_error # stores 9 errors
}
if (N_CV_REPEATS == 1) {
temp.cv_error_mean = temp.cv_error_matrix
} else {
temp.cv_error_mean = apply(temp.cv_error_matrix, 2, mean) # take the mean of the 5 iterations
# as your value for alpha
}
# stop if you did not recover error for any models
stopifnot(length(temp.cv_error_mean) == length(elastic_net.ALPHA))
# get index of best alpha (lowest error) - alpha is values 0.1-0.9
temp.best_alpha_index = which(min(temp.cv_error_mean) == temp.cv_error_mean)[length(which(min(temp.cv_error_mean) == temp.cv_error_mean))]
# print(paste("Best ALPHA:", elastic_net.ALPHA[temp.best_alpha_index])) # print the value for alpha
best_alpha <- elastic_net.ALPHA[temp.best_alpha_index]
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[temp.best_alpha_index]
, type.measure = type_measure
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
# print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat)
, y = train_y
,alpha = elastic_net.ALPHA[temp.best_alpha_index]
,standardize=FALSE
,nlambda = 100
,family = type_family)
##########
# Predictions on test data
##########
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(test_dat),
type = 'response')
# original should be fine, something wrong with caret package
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
# original should be fine, something wrong with caret package
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
temp_cases_dat <- as.data.frame(cbind(test_pred = test.predictions, test_label = test_y))
temp_cases_test <- cbind(test_clin, temp_cases_dat)
# get controls
temp_test.predictions_controls <- predict(model,
data.matrix(controls_dat),
type = 'class')
test.predictions_controls <- temp_test.predictions_controls[, temp.min_lambda_index]
temp_controls_dat <- as.data.frame(cbind(test_pred = test.predictions_controls,
test_label = patient_age_controls))
temp_controls <- cbind(controls_clin, temp_controls_dat)
##########
# Predictions on valid dat
##########
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions_valid <- predict(model,
data.matrix(valid_dat),
type = 'class')
# original should be fine, something wrong with caret package
test.predictions_valid <- temp_test.predictions_valid[, temp.min_lambda_index]
temp_valid_dat <- as.data.frame(cbind(test_pred = test.predictions_valid,
test_label = valid_y))
temp_valid <- cbind(valid_clin, temp_valid_dat)
###########################################################################################
return(list(temp_cases_test, temp_controls, temp_valid))
}
##########
# enet diff
###########
runLassoL1RandFac <- function(training_dat,
controls_dat,
valid_dat,
test_dat,
age_cutoff,
bh_features,
rand_feats,
gender) {
if(gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
}
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
intersected_feats <- intersect(bh_features, colnames(training_dat))
intersected_feats_rand <- intersect(rand_feats, colnames(training_dat))
# # get y
train_y <- as.factor(ifelse(training_dat$age_diagnosis < age_cutoff, 'yes', 'no'))
test_y <- as.factor(ifelse(test_dat$age_diagnosis < age_cutoff, 'yes', 'no'))
valid_y <- as.factor(ifelse(valid_dat$age_diagnosis < age_cutoff, 'yes', 'no'))
# get test age of sample collection
patient_age <- as.factor(ifelse(test_dat$age_sample_collection < age_cutoff, 'yes', 'no'))
patient_age_controls <- as.factor(ifelse(controls_dat$age_sample_collection < age_cutoff, 'yes', 'no'))
patient_age_valid <- as.factor(ifelse(valid_dat$age_sample_collection < age_cutoff, 'yes', 'no'))
# get random features
training_dat_rand <- training_dat[, intersected_feats_rand]
controls_dat_rand <- controls_dat[, intersected_feats_rand]
valid_dat_rand <- valid_dat[, intersected_feats_rand]
test_dat_rand <- test_dat[,intersected_feats_rand]
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
valid_dat <- valid_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
# start elastic net tuning
N_CV_REPEATS = 2
nfolds = 3
# set parameters for training model
type_family <- 'binomial'
type_measure <- 'auc'
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = 1
, type.measure = type_measure
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
# print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat)
, y = train_y
,alpha = 1
,standardize=FALSE
,nlambda = 100
,family = type_family)
##########
# Predictions on test data
##########
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(test_dat),
type = 'class')
# original should be fine, something wrong with caret package
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
# test.predictions <- ifelse(test.predictions >= pred_cutoff, 'yes', 'no')
test.predictions <- factor(test.predictions, levels = c('yes', 'no'))
test_y <- factor(test_y, levels = c('yes','no'))
patient_age <- factor(patient_age, levels = c('yes', 'no'))
test_stats <- caret::confusionMatrix(test.predictions, test_y)
test_stats_age <- caret::confusionMatrix(test.predictions, patient_age)
##########
# Predictions against controls
##########
# get controls
temp_test.predictions_controls <- predict(model,
data.matrix(controls_dat),
type = 'class')
test.predictions_controls <- temp_test.predictions_controls[, temp.min_lambda_index]
# test.predictions_controls <- ifelse(test.predictions_controls >= pred_cutoff, 'yes', 'no')
test.predictions_controls <- factor(test.predictions_controls, levels = c('yes', 'no'))
patient_age_controls <- factor(patient_age_controls, levels = c('yes', 'no'))
test_stats_controls <- caret::confusionMatrix(test.predictions_controls, patient_age_controls)
##########
# Predictions on valid dat
##########
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions_valid <- predict(model,
data.matrix(valid_dat),
type = 'class')
# original should be fine, something wrong with caret package
test.predictions_valid <- temp_test.predictions_valid[, temp.min_lambda_index]
# test.predictions_valid <- ifelse(test.predictions_valid >= pred_cutoff, 'yes', 'no')
test.predictions_valid <- factor(test.predictions_valid, levels = c('yes', 'no'))
valid_y <- factor(valid_y, levels = c('yes', 'no'))
patient_age_valid <- factor(patient_age_valid, levels = c('yes', 'no'))
test_stats_valid <- caret::confusionMatrix(test.predictions_valid, valid_y)
test_stats_age_valid <- caret::confusionMatrix(test.predictions_valid, patient_age_valid)
test_stats_feats <- test_stats
test_stats_age_feats <- test_stats_age
test_stats_controls_feats <- test_stats_controls
test_stats_valid_feats <- test_stats_valid
test_stats_age_valid_feats <- test_stats_age_valid
###########################################################################################
# RAND
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat_rand)
, y = train_y
, alpha = 1
, type.measure = type_measure
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
# print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat_rand)
, y = train_y
,alpha = 1
,standardize=FALSE
,nlambda = 100
,family = type_family)
##########
# Predictions on test data
##########
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(test_dat_rand),
type = 'class')
# original should be fine, something wrong with caret package
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
# test.predictions <- ifelse(test.predictions >= pred_cutoff, 'yes', 'no')
test.predictions <- factor(test.predictions, levels = c('yes', 'no'))
test_y <- factor(test_y, levels = c('yes','no'))
patient_age <- factor(patient_age, levels = c('yes', 'no'))
test_stats <- caret::confusionMatrix(test.predictions, test_y)
test_stats_age <- caret::confusionMatrix(test.predictions, patient_age)
##########
# Predictions against controls
##########
# get controls
temp_test.predictions_controls <- predict(model,
data.matrix(controls_dat_rand),
type = 'class')
test.predictions_controls <- temp_test.predictions_controls[, temp.min_lambda_index]
# test.predictions_controls <- ifelse(test.predictions_controls >= pred_cutoff, 'yes', 'no')
test.predictions_controls <- factor(test.predictions_controls, levels = c('yes', 'no'))
patient_age_controls <- factor(patient_age_controls, levels = c('yes', 'no'))
test_stats_controls <- caret::confusionMatrix(test.predictions_controls, patient_age_controls)
##########
# Predictions on valid dat
##########
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions_valid <- predict(model,
data.matrix(valid_dat_rand),
type = 'class')
# original should be fine, something wrong with caret package
test.predictions_valid <- temp_test.predictions_valid[, temp.min_lambda_index]
# test.predictions_valid <- ifelse(test.predictions_valid >= pred_cutoff, 'yes', 'no')
test.predictions_valid <- factor(test.predictions_valid, levels = c('yes', 'no'))
valid_y <- factor(valid_y, levels = c('yes', 'no'))
patient_age_valid <- factor(patient_age_valid, levels = c('yes', 'no'))
test_stats_valid <- caret::confusionMatrix(test.predictions_valid, valid_y)
test_stats_age_valid <- caret::confusionMatrix(test.predictions_valid, patient_age_valid)
###########################################################################################
return(list(test_stats_feats, test_stats_age_feats,
test_stats_controls_feats, test_stats_valid_feats,
test_stats_age_valid_feats,
test_stats, test_stats_age,
test_stats_controls, test_stats_valid,
test_stats_age_valid))
}
##########
# enet diff
###########
run_enet_rand <- function(training_dat,
controls_dat,
valid_dat,
test_dat,
rand_feats,
age_cutoff) {
intersected_feats <- intersect(rand_feats, colnames(training_dat))
# # get y
train_y <- as.factor(ifelse(training_dat$age_diagnosis < age_cutoff, 'yes', 'no'))
test_y <- as.factor(ifelse(test_dat$age_diagnosis < age_cutoff, 'yes', 'no'))
valid_y <- as.factor(ifelse(valid_dat$age_diagnosis < age_cutoff, 'yes', 'no'))
# get test age of sample collection
patient_age <- as.factor(ifelse(test_dat$age_sample_collection < age_cutoff, 'yes', 'no'))
patient_age_controls <- as.factor(ifelse(controls_dat$age_sample_collection < age_cutoff, 'yes', 'no'))
patient_age_valid <- as.factor(ifelse(valid_dat$age_sample_collection < age_cutoff, 'yes', 'no'))
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
valid_dat <- valid_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
# start elastic net tuning
N_CV_REPEATS = 2
nfolds = 3
###### ENET
# create vector and list to store best alpha on training data. alpha is the parameter that choses the
# the optimal proportion lambda, the tuning parameter for L1 (ridge) and L2 (lasso)
elastic_net.cv_error = vector()
elastic_net.cv_model = list()
elastic_net.ALPHA <- c(1:9) / 10 # creates possible alpha values for model to choose from
# set parameters for training model
type_family <- 'binomial'
type_measure <- 'auc'
# create error matrix for for opitmal alpha that can run in parraellel if you have bigger data
# or if you have a high number fo N_CV_REPEATS
temp.cv_error_matrix <- foreach (temp = 1:N_CV_REPEATS, .combine=rbind, .errorhandling="stop") %do% {
for (alpha in 1:length(elastic_net.ALPHA)) # for i in 1:9 - the model will run 9 times
{
elastic_net.cv_model[[alpha]] = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[alpha] # first time with 0.1 and so on
, type.measure = type_measure
, family = type_family
, standardize = FALSE
, nfolds = nfolds
, nlambda = 10
, parallel = TRUE
)
elastic_net.cv_error[alpha] = min(elastic_net.cv_model[[alpha]]$cvm)
}
elastic_net.cv_error # stores 9 errors
}
if (N_CV_REPEATS == 1) {
temp.cv_error_mean = temp.cv_error_matrix
} else {
temp.cv_error_mean = apply(temp.cv_error_matrix, 2, mean) # take the mean of the 5 iterations
# as your value for alpha
}
# stop if you did not recover error for any models
stopifnot(length(temp.cv_error_mean) == length(elastic_net.ALPHA))
# get index of best alpha (lowest error) - alpha is values 0.1-0.9
temp.best_alpha_index = which(min(temp.cv_error_mean) == temp.cv_error_mean)[length(which(min(temp.cv_error_mean) == temp.cv_error_mean))]
# print(paste("Best ALPHA:", elastic_net.ALPHA[temp.best_alpha_index])) # print the value for alpha
best_alpha <- elastic_net.ALPHA[temp.best_alpha_index]
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[temp.best_alpha_index]
, type.measure = type_measure
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
# print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat)
, y = train_y
,alpha = elastic_net.ALPHA[temp.best_alpha_index]
,standardize=FALSE
,nlambda = 100
,family = type_family)
##########
# Predictions on test data
##########
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(test_dat),
type = 'class')
# original should be fine, something wrong with caret package
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
# test.predictions <- ifelse(test.predictions >= pred_cutoff, 'yes', 'no')
test.predictions <- factor(test.predictions, levels = c('yes', 'no'))
test_y <- factor(test_y, levels = c('yes','no'))
patient_age <- factor(patient_age, levels = c('yes', 'no'))
test_stats <- caret::confusionMatrix(test.predictions, test_y)
test_stats_age <- caret::confusionMatrix(test.predictions, patient_age)
##########
# Predictions against controls
##########
# get controls
temp_test.predictions_controls <- predict(model,
data.matrix(controls_dat),
type = 'class')
test.predictions_controls <- temp_test.predictions_controls[, temp.min_lambda_index]
# test.predictions_controls <- ifelse(test.predictions_controls >= pred_cutoff, 'yes', 'no')
test.predictions_controls <- factor(test.predictions_controls, levels = c('yes', 'no'))
patient_age_controls <- factor(patient_age_controls, levels = c('yes', 'no'))
test_stats_controls <- caret::confusionMatrix(test.predictions_controls, patient_age_controls)
##########
# Predictions on valid dat
##########
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions_valid <- predict(model,
data.matrix(valid_dat),
type = 'class')
# original should be fine, something wrong with caret package
test.predictions_valid <- temp_test.predictions_valid[, temp.min_lambda_index]
# test.predictions_valid <- ifelse(test.predictions_valid >= pred_cutoff, 'yes', 'no')
test.predictions_valid <- factor(test.predictions_valid, levels = c('yes', 'no'))
valid_y <- factor(valid_y, levels = c('yes', 'no'))
patient_age_valid <- factor(patient_age_valid, levels = c('yes', 'no'))
test_stats_valid <- caret::confusionMatrix(test.predictions_valid, valid_y)
test_stats_age_valid <- caret::confusionMatrix(test.predictions_valid, patient_age_valid)
return(list(test_stats, test_stats_age,
test_stats_controls, test_stats_valid,
test_stats_age_valid))
}
##########
# Lasso
###########
# training_dat = beta_cases[train_index,]
# controls_dat = beta_controls
# valid_dat = beta_valid
# test_dat = beta_cases[test_index,]
# bh_features = mod_feats
runGlmLassoRandFac <- function(training_dat,
controls_dat,
valid_dat,
test_dat,
age_cutoff,
pred_cutoff,
bh_features,
gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
intersected_feats <- bh_features
# # get y
training_dat$age_diagnosis <- ifelse(training_dat$age_diagnosis > age_cutoff, 0,1)
test_y <-ifelse(test_dat$age_diagnosis > age_cutoff, 'no', 'yes')
valid_y <- ifelse(valid_dat$age_diagnosis > age_cutoff, 'no', 'yes')
test_y <- factor(test_y, levels = c('yes', 'no'))
valid_y <-factor(valid_y, levels = c('yes', 'no'))
# get test age of sample collection
patient_age <- ifelse(test_dat$age_sample_collection > age_cutoff, 'no', 'yes')
patient_age_controls<- ifelse(controls_dat$age_sample_collection > age_cutoff,'no', 'yes')
patient_age_valid <- ifelse(valid_dat$age_sample_collection > age_cutoff, 'no', 'yes')
patient_age <- factor(patient_age, levels = c('yes', 'no'))
patient_age_controls <- factor(patient_age_controls, levels = c('yes', 'no'))
patient_age_valid <- factor(patient_age_valid, levels = c('yes', 'no'))
# get bumphunter features
training_dat <- training_dat[, c('age_diagnosis', 'family_name', intersected_feats)]
controls_dat <- controls_dat[, c('age_sample_collection', 'family_name', intersected_feats)]
valid_dat <- valid_dat[, c('age_diagnosis', 'family_name', intersected_feats)]
test_dat <- test_dat[, c('age_diagnosis', 'family_name', intersected_feats)]
# rename sample to diagnosis
colnames(controls_dat)[1] <- 'age_diagnosis'
# make factor variabls
training_dat$family_name <- as.factor(training_dat$family_name)
fmla <- as.formula(paste("age_diagnosis ~ ", paste(colnames(training_dat[,-c(1:2)]), collapse= "+")))
##########
# linear mixed model
##########
model <- glmmLasso(fmla,
rnd = list(family_name = ~1),
lambda=10,
data = training_dat, family=binomial(link = "logit"))
##########
# test dat
##########
temp.pred <- predict(model,
test_dat,
type = 'response')
test.pred <- ifelse(temp.pred > pred_cutoff, 'no', 'yes')
test.predictions <- factor(test.pred, levels = c('yes', 'no'))
test_stats <- confusionMatrix(test_y, test.predictions)
test_stats_age <- confusionMatrix(patient_age, test.predictions)
##########
# controls dat
##########
temp.pred <- predict(model,
controls_dat,
type = 'response')
test.pred <- ifelse(temp.pred > pred_cutoff, 'no', 'yes')
test.predictions <- factor(test.pred, levels = c('yes', 'no'))
test_stats <- confusionMatrix(test.predictions, patient_age_controls)
return(list(test_stats, test_stats_age,
test_stats_controls, test_stats_valid,
test_stats_age_valid))
}
##########
# RF fac
###########
runRfRandFac <- function(training_dat,
controls_dat,
valid_dat,
test_dat,
age_cutoff,
bh_features,
rand_feats,
pred_cutoff,
gender) {
if(gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
}
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
intersected_feats <- intersect(bh_features, colnames(training_dat))
intersected_feats_rand <- intersect(rand_feats, colnames(training_dat))
# # get y
train_y <- as.factor(ifelse(training_dat$age_diagnosis < age_cutoff, 'yes', 'no'))
test_y <- as.factor(ifelse(test_dat$age_diagnosis < age_cutoff, 'yes', 'no'))
valid_y <- as.factor(ifelse(valid_dat$age_diagnosis < age_cutoff, 'yes', 'no'))
# get test age of sample collection
patient_age <- as.factor(ifelse(test_dat$age_sample_collection < age_cutoff, 'yes', 'no'))
patient_age_controls <- as.factor(ifelse(controls_dat$age_sample_collection < age_cutoff, 'yes', 'no'))
patient_age_valid <- as.factor(ifelse(valid_dat$age_sample_collection < age_cutoff, 'yes', 'no'))
# get bumphunter features
training_dat_rand <- training_dat[, intersected_feats_rand]
controls_dat_rand <- controls_dat[, intersected_feats_rand]
valid_dat_rand <- valid_dat[, intersected_feats_rand]
test_dat_rand <- test_dat[, intersected_feats_rand]
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
valid_dat <- valid_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
summaryFunc <- twoClassSummary
NFOLDS = 4
# determines how you train the model.
fitControl <- trainControl(
method = "repeatedcv", # could train on boostrap resample, here use repeated cross validation.
number = min(10, NFOLDS),
classProbs = TRUE,
repeats = 1,
allowParallel = TRUE,
summaryFunction = summaryFunc)
# mtry: Number of variables randomly sampled as candidates at each split.
# ntree: Number of trees to grow.
mtry <- sqrt(ncol(training_dat))
tunegrid <- expand.grid(.mtry=mtry)
model <- train(x = training_dat
, y =train_y
, method = "rf"
, trControl = fitControl
, tuneGrid = tunegrid
, importance = T
, metric = "ROC"
, verbose = FALSE)
temp <- varImp(model)[[1]]
importance <- cbind(rownames(temp), temp$Overall)
##########
# Predictions on test data
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions <- predict(model,
data.matrix(test_dat),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions <- test.predictions$yes
test.predictions <- as.factor(ifelse(test.predictions >= pred_cutoff, 'yes', 'no'))
test.predictions <- factor(test.predictions, levels = c('yes', 'no'))
test_y <- factor(test_y, levels = c('yes', 'no'))
patient_age <- factor(patient_age, levels = c('yes', 'no'))
test_stats <- caret::confusionMatrix(test.predictions, test_y)
test_stats_age <- caret::confusionMatrix(test.predictions, patient_age)
##########
# Predictions on controls
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions_controls <- predict(model,
data.matrix(controls_dat),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions_controls <- test.predictions_controls$yes
test.predictions_controls <- as.factor(ifelse(test.predictions_controls >= pred_cutoff, 'yes', 'no'))
test.predictions_controls <- factor(test.predictions_controls, levels = c('yes', 'no'))
patient_age_controls <- factor(patient_age_controls, levels = c('yes', 'no'))
test_stats_controls <-caret:: confusionMatrix(test.predictions_controls, patient_age_controls)
##########
# Predictions on valid data
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions_valid <- predict(model,
data.matrix(valid_dat),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions_valid <- test.predictions_valid$yes
test.predictions_valid <- as.factor(ifelse(test.predictions_valid >= pred_cutoff, 'yes', 'no'))
test.predictions_valid <- factor(test.predictions_valid, levels = c('yes', 'no'))
valid_y <- factor(valid_y, levels = c('yes', 'no'))
patient_age_valid <- factor(patient_age_valid, levels = c('yes', 'no'))
test_stats_valid <- caret::confusionMatrix(test.predictions_valid, valid_y)
test_stats_age_valid <- caret::confusionMatrix(test.predictions_valid, patient_age_valid)
test_stats_norm <- test_stats
test_stats_age_norm <- test_stats_age
test_stats_controls_norm <- test_stats_controls
test_stats_valid_norm <- test_stats_valid
test_stats_age_valid_norm <- test_stats_age_valid
####################################################################################################
# random
# mtry: Number of variables randomly sampled as candidates at each split.
# ntree: Number of trees to grow.
mtry <- sqrt(ncol(training_dat))
tunegrid <- expand.grid(.mtry=mtry)
model <- train(x = training_dat_rand
, y =train_y
, method = "rf"
, trControl = fitControl
, tuneGrid = tunegrid
, importance = T
, metric = "ROC"
, verbose = FALSE)
temp <- varImp(model)[[1]]
importance <- cbind(rownames(temp), temp$Overall)
##########
# Predictions on test data
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions <- predict(model,
data.matrix(test_dat_rand),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions <- test.predictions$yes
test.predictions <- as.factor(ifelse(test.predictions >= pred_cutoff, 'yes', 'no'))
test.predictions <- factor(test.predictions, levels = c('yes', 'no'))
test_y <- factor(test_y, levels = c('yes', 'no'))
patient_age <- factor(patient_age, levels = c('yes', 'no'))
test_stats <- caret::confusionMatrix(test.predictions, test_y)
test_stats_age <- caret::confusionMatrix(test.predictions, patient_age)
##########
# Predictions on controls
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions_controls <- predict(model,
data.matrix(controls_dat_rand),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions_controls <- test.predictions_controls$yes
test.predictions_controls <- as.factor(ifelse(test.predictions_controls >= pred_cutoff, 'yes', 'no'))
test.predictions_controls <- factor(test.predictions_controls, levels = c('yes', 'no'))
patient_age_controls <- factor(patient_age_controls, levels = c('yes', 'no'))
test_stats_controls <-caret:: confusionMatrix(test.predictions_controls, patient_age_controls)
##########
# Predictions on valid data
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions_valid <- predict(model,
data.matrix(valid_dat_rand),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions_valid <- test.predictions_valid$yes
test.predictions_valid <- as.factor(ifelse(test.predictions_valid >= pred_cutoff, 'yes', 'no'))
test.predictions_valid <- factor(test.predictions_valid, levels = c('yes', 'no'))
valid_y <- factor(valid_y, levels = c('yes', 'no'))
patient_age_valid <- factor(patient_age_valid, levels = c('yes', 'no'))
test_stats_valid <- caret::confusionMatrix(test.predictions_valid, valid_y)
test_stats_age_valid <- caret::confusionMatrix(test.predictions_valid, patient_age_valid)
return(list(test_stats_norm, test_stats_age_norm,
test_stats_controls_norm, test_stats_valid_norm,
test_stats_age_valid_norm,
test_stats, test_stats_age,
test_stats_controls, test_stats_valid,
test_stats_age_valid))
}
##########
# RF fac
###########
run_rf_rand <- function(training_dat,
controls_dat,
valid_dat,
test_dat,
rand_feats,
age_cutoff) {
intersected_feats <- intersect(rand_feats, colnames(training_dat))
# # get y
train_y <- as.factor(ifelse(training_dat$age_diagnosis < age_cutoff, 'yes', 'no'))
test_y <- as.factor(ifelse(test_dat$age_diagnosis < age_cutoff, 'yes', 'no'))
valid_y <- as.factor(ifelse(valid_dat$age_diagnosis < age_cutoff, 'yes', 'no'))
# get test age of sample collection
patient_age <- as.factor(ifelse(test_dat$age_sample_collection < age_cutoff, 'yes', 'no'))
patient_age_controls <- as.factor(ifelse(controls_dat$age_sample_collection < age_cutoff, 'yes', 'no'))
patient_age_valid <- as.factor(ifelse(valid_dat$age_sample_collection < age_cutoff, 'yes', 'no'))
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
valid_dat <- valid_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
summaryFunc <- twoClassSummary
NFOLDS = 4
# determines how you train the model.
fitControl <- trainControl(
method = "repeatedcv", # could train on boostrap resample, here use repeated cross validation.
number = min(10, NFOLDS),
classProbs = TRUE,
repeats = 1,
allowParallel = TRUE,
summaryFunction = summaryFunc)
# mtry: Number of variables randomly sampled as candidates at each split.
# ntree: Number of trees to grow.
mtry <- sqrt(ncol(training_dat))
tunegrid <- expand.grid(.mtry=mtry)
model <- train(x = training_dat
, y =train_y
, method = "rf"
, trControl = fitControl
, tuneGrid = tunegrid
, importance = T
, metric = "ROC"
, verbose = FALSE)
temp <- varImp(model)[[1]]
importance <- cbind(rownames(temp), temp$Overall)
##########
# Predictions on test data
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions <- predict(model,
data.matrix(test_dat),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions <- test.predictions$yes
test.predictions <- as.factor(ifelse(test.predictions >= 0.5, 'yes', 'no'))
test.predictions <- factor(test.predictions, levels = c('yes', 'no'))
test_y <- factor(test_y, levels = c('yes', 'no'))
patient_age <- factor(patient_age, levels = c('yes', 'no'))
test_stats <- caret::confusionMatrix(test.predictions, test_y)
test_stats_age <- caret::confusionMatrix(test.predictions, patient_age)
##########
# Predictions on controls
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions_controls <- predict(model,
data.matrix(controls_dat),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions_controls <- test.predictions_controls$yes
test.predictions_controls <- as.factor(ifelse(test.predictions_controls >= 0.5, 'yes', 'no'))
test.predictions_controls <- factor(test.predictions_controls, levels = c('yes', 'no'))
patient_age_controls <- factor(patient_age_controls, levels = c('yes', 'no'))
test_stats_controls <-caret:: confusionMatrix(test.predictions_controls, patient_age_controls)
##########
# Predictions on valid data
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions_valid <- predict(model,
data.matrix(valid_dat),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions_valid <- test.predictions_valid$yes
test.predictions_valid <- as.factor(ifelse(test.predictions_valid >= 0.5, 'yes', 'no'))
test.predictions_valid <- factor(test.predictions_valid, levels = c('yes', 'no'))
valid_y <- factor(valid_y, levels = c('yes', 'no'))
patient_age_valid <- factor(patient_age_valid, levels = c('yes', 'no'))
test_stats_valid <- caret::confusionMatrix(test.predictions_valid, valid_y)
test_stats_age_valid <- caret::confusionMatrix(test.predictions_valid, patient_age_valid)
return(list(test_stats, test_stats_age,
test_stats_controls, test_stats_valid,
test_stats_age_valid))
}
##########
# enet diff
###########
runSvmRandFac <- function(training_dat,
controls_dat,
valid_dat,
test_dat,
age_cutoff,
pred_cutoff,
bh_features,
gender)
{
if(gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
}
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
intersected_feats <- intersect(bh_features, colnames(training_dat))
# # get y
train_y <- ifelse(training_dat$age_diagnosis > age_cutoff, 'no', 'yes')
test_y <-ifelse(test_dat$age_diagnosis > age_cutoff, 'no', 'yes')
valid_y <- ifelse(valid_dat$age_diagnosis > age_cutoff, 'no', 'yes')
train_y <- factor(train_y, levels = c('yes', 'no'))
test_y <- factor(test_y, levels = c('yes', 'no'))
valid_y <-factor(valid_y, levels = c('yes', 'no'))
# get test age of sample collection
patient_age <- ifelse(test_dat$age_sample_collection > age_cutoff, 'no', 'yes')
patient_age_controls <- ifelse(controls_dat$age_sample_collection > age_cutoff, 'no', 'yes')
patient_age_valid <- ifelse(valid_dat$age_sample_collection > age_cutoff, 'no', 'yes')
patient_age <- factor(patient_age, levels = c('yes', 'no'))
patient_age_controls <- factor(patient_age_controls, levels = c('yes', 'no'))
patient_age_valid <- factor(patient_age_valid, levels = c('yes', 'no'))
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
controls_dat <- controls_dat[, intersected_feats]
valid_dat <- valid_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
summaryFunc <- twoClassSummary
NFOLDS = 4
# determines how you train the model.
fitControl <- trainControl(
method = "repeatedcv", # could train on boostrap resample, here use repeated cross validation.
number = min(10, NFOLDS),
classProbs = TRUE,
repeats = 1,
allowParallel = TRUE,
summaryFunction = summaryFunc)
model <- train(x = training_dat
, y = train_y
, method = "svmRadial"
, trControl = fitControl
, verbose = FALSE)
##########
# Predictions on test data
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions <- predict(model,
data.matrix(test_dat),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions <- test.predictions$yes
test.predictions <- as.factor(ifelse(test.predictions >= pred_cutoff, 'yes', 'no'))
test.predictions <- factor(test.predictions, levels = c('yes', 'no'))
test_stats <- confusionMatrix(test_y, test.predictions)
test_stats_age <- confusionMatrix(patient_age, test.predictions)
##########
# Predictions on controls
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions_controls <- predict(model,
data.matrix(controls_dat),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions_controls <- test.predictions_controls$yes
test.predictions_controls <- as.factor(ifelse(test.predictions_controls >= pred_cutoff, 'yes', 'no'))
test.predictions_controls <- factor(test.predictions_controls, levels = c('yes', 'no'))
test_stats_controls <- confusionMatrix(patient_age_controls, test.predictions_controls)
##########
# Predictions on valid data
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions_valid <- predict(model,
data.matrix(valid_dat),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions_valid <- test.predictions_valid$yes
test.predictions_valid <- as.factor(ifelse(test.predictions_valid >= pred_cutoff, 'yes', 'no'))
test.predictions_valid <- factor(test.predictions_valid, levels = c('yes', 'no'))
test_stats_valid <- confusionMatrix(valid_y, test.predictions_valid)
test_stats_age_valid <- confusionMatrix(patient_age_valid, test.predictions_valid)
return(list(test_stats, test_stats_age,
test_stats_controls, test_stats_valid,
test_stats_age_valid))
}
##########
# mixed lasso family as random effects
###########
run_mixed_lasso <- function(training_dat,
controls_dat,
valid_dat,
test_dat,
age_cutoff,
pred_cutoff,
bh_features,
gender)
{
if(gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
bh_features <- append('M', bh_features)
bh_features <- append('F', bh_features)
}
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
intersected_feats <- intersect(bh_features, colnames(training_dat))
# # get y
train_y <- ifelse(training_dat$age_diagnosis > age_cutoff, 0, 1)
test_y <-ifelse(test_dat$age_diagnosis > age_cutoff, 0, 1)
valid_y <- ifelse(valid_dat$age_diagnosis > age_cutoff, 0, 1)
# train_y <- factor(train_y, levels = c('yes', 'no'))
# test_y <- factor(test_y, levels = c('yes', 'no'))
# valid_y <-factor(valid_y, levels = c('yes', 'no'))
# get test age of sample collection
patient_age <- ifelse(test_dat$age_sample_collection > age_cutoff, 0, 1)
patient_age_controls <- ifelse(controls_dat$age_sample_collection > age_cutoff, 0, 1)
patient_age_valid <- ifelse(valid_dat$age_sample_collection > age_cutoff, 0, 1)
#
# patient_age <- factor(patient_age, levels = c('yes', 'no'))
# patient_age_controls <- factor(patient_age_controls, levels = c('yes', 'no'))
# patient_age_valid <- factor(patient_age_valid, levels = c('yes', 'no'))
# get bumphunter features
training_dat <- training_dat[, c('family_name', intersected_feats)]
controls_dat <- controls_dat[, c('family_name', intersected_feats)]
valid_dat <- valid_dat[, c('family_name', intersected_feats)]
test_dat <- test_dat[, c('family_name', intersected_feats)]
# recode family name (our groups) as numeric
training_dat$family_name <- as.numeric(as.factor(training_dat$family_name))
controls_dat$family_name <- as.numeric(as.factor(controls_dat$family_name))
valid_dat$family_name <- as.numeric(as.factor(valid_dat$family_name))
test_dat$family_name <- as.numeric(as.factor(test_dat$family_name))
# add an intercept and make it a matrix
training_dat <- as.matrix(cbind(1, training_dat))
test_dat <- as.matrix(cbind(1, test_dat))
controls_dat <- as.matrix(cbind(1, controls_dat))
valid_dat <- as.matrix(cbind(1, valid_dat))
# get random effects matrix which is just first two columns of data
z_train <- as.matrix(training_dat[, 1])
# get grp variable length of data
g_train <- t(factor(training_dat[,2]))
fit <-lassop(training_dat,
train_y,
z_train,
g_train,
mu=1.5,
fix=1,
rand=1)
predict(fit)
#HERE
########
#independent random effects
# x is a numeric matrix n*p (120, 81) = training_dat (67*p)
# y is outcome, length 120 = train_y (factor variable)
# z is random effects matrix n*q (120, 2)
# grp variable length n
# rand where z is in x
# fix variables (in front of data) not submitted for selection - use 1 or 2
rand
fit=lassop(x,y,z,grp,D=1,mu=0.2,fix=1,rand=c(1,2))
##########
# Predictions on test data
##########
# This returns 100 prediction with 1-100 lambdas
test.predictions <- predict(model,
data.matrix(test_dat),
type = 'prob')
# original should be fine, something wrong with caret package
test.predictions <- test.predictions$yes
test.predictions <- as.factor(ifelse(test.predictions >= pred_cutoff, 'yes', 'no'))
test.predictions <- factor(test.predictions, levels = c('yes', 'no'))
test_stats <- caret::confusionMatrix(test_y, test.predictions)
test_stats_age <- caret::confusionMatrix(patient_age, test.predictions)
return(list(test_stats, test_stats_age,
test_stats_controls, test_stats_valid,
test_stats_age_valid))
}
# mod_results_list <- mod_result
# dims_of_dat <- 22
# feat_name <- 'bh'
# mod_name <- 'rf'
# seed_number <- 1
# mod_results_list <- mod_result
get_class_results <- function(mod_results_list, dims_of_dat, mod_name, feat_name, seed_number) {
class_onset_norm <- as.data.frame(t(mod_results_list[[1]]$byClass))
class_onset_norm$age_type <- 'cases_onset'
class_onset_norm$seed_number <- seed_number
class_onset_norm$feature_num <- dims_of_dat
class_onset_norm$model_method <- mod_name
class_onset_norm$feat_name <- feat_name
# cases age
class_age_norm <-as.data.frame(t(mod_results_list[[2]]$byClass))
class_age_norm$age_type <- 'cases_age'
class_age_norm$seed_number <- seed_number
class_age_norm$feature_num <- dims_of_dat
class_age_norm$model_method <- mod_name
class_age_norm$feat_name <- feat_name
# valid age
class_controls_age_norm <- as.data.frame(t(mod_results_list[[3]]$byClass))
class_controls_age_norm$age_type <- 'controls_age'
class_controls_age_norm$seed_number <- seed_number
class_controls_age_norm$feature_num <- dims_of_dat
class_controls_age_norm$model_method <- mod_name
class_controls_age_norm$feat_name <- feat_name
# valid onset
class_valid_onset_norm <- as.data.frame(t(mod_results_list[[4]]$byClass))
class_valid_onset_norm$age_type <- 'valid_onset'
class_valid_onset_norm$seed_number <- seed_number
class_valid_onset_norm$feature_num <- dims_of_dat
class_valid_onset_norm$model_method <- mod_name
class_valid_onset_norm$feat_name <- feat_name
# valid age
class_valid_age_norm <- as.data.frame(t(mod_results_list[[5]]$byClass))
class_valid_age_norm$age_type <- 'valid_age'
class_valid_age_norm$seed_number <- seed_number
class_valid_age_norm$feature_num <- dims_of_dat
class_valid_age_norm$model_method <- mod_name
class_valid_age_norm$feat_name <- feat_name
# get clinical data
clin_data <- mod_results_list[[6]]
# collapse
clin_data_full <- do.call(rbind, clin_data)
# combine class
class_results_norm <- rbind(class_onset_norm,
class_age_norm,
class_valid_age_norm,
class_valid_onset_norm,
class_controls_age_norm)
return(list(class_results_norm, clin_data_full))
}
get_class_results_cancer <- function(mod_results_list, dims_of_dat, mod_name, gender, p53) {
# save importance each time
# get stat results
class_cancer <- as.data.frame(t(mod_results_list[[2]]$byClass))
class_cancer$p53 <- p53
class_cancer$gender <- gender
class_cancer$feature_num <- dims_of_dat
class_cancer$model_name <- mod_name
# get clinical data
clin_data <- mod_results_list[[3]]
return(list(class_cancer, clin_data))
}
get_class_results_test <- function(mod_results_list, dims_of_dat, mod_name, feat_name) {
mat_onset_norm <- mod_results_list[[1]]$table
class_onset_norm <- as.data.frame(t(mod_results_list[[1]]$byClass))
class_onset_norm$age_type <- 'cases_onset'
class_onset_norm$feature_num <- dims_of_dat
class_onset_norm$model_method <- mod_name
class_onset_norm$feat_name <- feat_name
# cases age
mat_age_norm <- mod_results_list[[2]]$table
class_age_norm <- as.data.frame(t(mod_results_list[[2]]$byClass))
class_age_norm$age_type <- 'cases_age'
class_age_norm$feature_num <- dims_of_dat
class_age_norm$model_method <- mod_name
class_age_norm$feat_name <- feat_name
# valid age
mat_controls_age_norm <-mod_results_list[[3]]$table
class_controls_age_norm <- as.data.frame(t(mod_results_list[[3]]$byClass))
class_controls_age_norm$age_type <- 'controls_age'
class_controls_age_norm$feature_num <- dims_of_dat
class_controls_age_norm$model_method <- mod_name
class_controls_age_norm$feat_name <- feat_name
# valid onset
mat_valid_onset_norm <- mod_results_list[[4]]$table
class_valid_onset_norm <- as.data.frame(t(mod_results_list[[4]]$byClass))
class_valid_onset_norm$age_type <- 'valid_onset'
class_valid_onset_norm$feature_num <- dims_of_dat
class_valid_onset_norm$model_method <- mod_name
class_valid_onset_norm$feat_name <- feat_name
# valid age
mat_valid_age_norm <- mod_results_list[[5]]$table
class_valid_age_norm <- as.data.frame(t(mod_results_list[[5]]$byClass))
class_valid_age_norm$age_type <- 'valid_age'
class_valid_age_norm$feature_num <- dims_of_dat
class_valid_age_norm$model_method <- mod_name
class_valid_age_norm$feat_name <- feat_name
# combine class
class_results_norm <- rbind(class_onset_norm,
class_age_norm,
class_valid_age_norm,
class_valid_onset_norm,
class_controls_age_norm)
##################################################################################################
# RAND
mat_onset_rand <- mod_results_list[[6]]$table
class_onset_rand <- as.data.frame(t(mod_results_list[[6]]$byClass))
class_onset_rand$age_type <- 'cases_onset'
class_onset_rand$feature_num <- dims_of_dat
class_onset_rand$model_method <- mod_name
class_onset_rand$feat_name <- feat_name
# cases age
mat_age_rand <- mod_results_list[[7]]$table
class_age_rand <- as.data.frame(t(mod_results_list[[7]]$byClass))
class_age_rand$age_type <- 'cases_age_rand'
class_age_rand$feature_num <- dims_of_dat
class_age_rand$model_method <- mod_name
class_age_rand$feat_name <- feat_name
# valid age
mat_controls_age_rand <-mod_results_list[[8]]$table
class_controls_age_rand <- as.data.frame(t(mod_results_list[[8]]$byClass))
class_controls_age_rand$age_type <- 'controls_age_rand'
class_controls_age_rand$feature_num <- dims_of_dat
class_controls_age_rand$model_method <- mod_name
class_controls_age_rand$feat_name <- feat_name
# valid onset
mat_valid_onset_rand <- mod_results_list[[9]]$table
class_valid_onset_rand <- as.data.frame(t(mod_results_list[[9]]$byClass))
class_valid_onset_rand$age_type <- 'valid_onset_rand'
class_valid_onset_rand$feature_num <- dims_of_dat
class_valid_onset_rand$model_method <- mod_name
class_valid_onset_rand$feat_name <- feat_name
# valid age
mat_valid_age_rand <- mod_results_list[[10]]$table
class_valid_age_rand <- as.data.frame(t(mod_results_list[[10]]$byClass))
class_valid_age_rand$age_type <- 'valid_age_rand'
class_valid_age_rand$feature_num <- dims_of_dat
class_valid_age_rand$model_method <- mod_name
class_valid_age_rand$feat_name <- feat_name
# combine class
class_results_rand <- rbind(class_onset_rand,
class_age_rand,
class_valid_age_rand,
class_valid_onset_rand,
class_controls_age_rand)
class_results <- rbind(class_results_norm,
class_results_rand)
return(list(class_results, list(mat_onset_norm, mat_age_norm,
mat_valid_onset_norm, mat_valid_age_norm,
mat_controls_age_norm)))
}
# mod_results_list <- mod_result
get_class_results_rand <- function(mod_results_list, mod_name, dims_of_dat) {
# NORM
mat_onset_norm <- Reduce('+', lapply(lapply(mod_results_list, function(x) x[[1]]$table), function(x) rbind(x)))/5
class_onset_norm <- as.data.frame(t(apply(do.call(rbind, lapply(mod_results_list, function(x) x[[1]]$byClass)), 2, function(x) mean(x, na.rm =T))))
class_onset_norm$age_type <- 'cases_onset'
class_onset_norm$feature_num <- dims_of_dat
class_onset_norm$model_method <- mod_name
# cases age
mat_age_norm <- Reduce('+', lapply(lapply(mod_results_list, function(x) x[[2]]$table), function(x) rbind(x)))/5
class_age_norm <- as.data.frame(t(apply(do.call(rbind, lapply(mod_results_list, function(x) x[[2]]$byClass)), 2, function(x) mean(x, na.rm =T))))
class_age_norm$age_type <- 'cases_age'
class_age_norm$feature_num <- dims_of_dat
class_age_norm$model_method <- mod_name
# valid age
mat_controls_age_norm <- Reduce('+', lapply(lapply(mod_results_list, function(x) x[[3]]$table), function(x) rbind(x)))/5
class_controls_age_norm <- as.data.frame(t(apply(do.call(rbind, lapply(mod_results_list, function(x) x[[3]]$byClass)), 2, function(x) mean(x, na.rm =T))))
class_controls_age_norm$age_type <- 'controls_age'
class_controls_age_norm$feature_num <- dims_of_dat
class_controls_age_norm$model_method <- mod_name
# valid onset
mat_valid_onset_norm <- Reduce('+', lapply(lapply(mod_results_list, function(x) x[[4]]$table), function(x) rbind(x)))/5
class_valid_onse_norm <- as.data.frame(t(apply(do.call(rbind, lapply(mod_results_list, function(x) x[[4]]$byClass)), 2, function(x) mean(x, na.rm =T))))
class_valid_onset_norm$age_type <- 'valid_onset'
class_valid_onset_norm$feature_num <- dims_of_dat
class_valid_onset_norm$model_method <- mod_name
# valid age
mat_valid_age_norm <- Reduce('+', lapply(lapply(mod_results_list, function(x) x[[5]]$table), function(x) rbind(x)))/5
class_valid_age_norm <-as.data.frame(t(apply(do.call(rbind, lapply(mod_results_list, function(x) x[[5]]$byClass)), 2, function(x) mean(x, na.rm =T))))
class_valid_age_norm$age_type <- 'valid_age'
class_valid_age_norm$feature_num <- dims_of_dat
class_valid_age_norm$model_method <- mod_name
# combine class
class_results <- rbind(class_onset_norm,
class_age_norm,
class_valid_age_norm,
class_valid_onse_norm,
class_controls_age_norm)
return(list(class_results, list(mat_onset, mat_age_norm,
mat_valid_onset_norm, mat_valid_age_norm,
mat_controls_age_norm,
mat_age_rand,
mat_valid_onset_rand, mat_valid_age_rand,
mat_controls_age_rand)))
}
# train_data <- m_train_cases
# test_data <- m_test_cases
# remove duplicates
remove_dups <- function(train_data, test_data) {
# give data indicator columns
train_data$type <- 'train'
test_data$type <- 'test'
# combine
full_data <- rbind(train_data,
test_data)
# remove duplicates
full_data <- full_data[!duplicated(full_data$ids),]
train_data <- full_data[grepl('train', full_data$type),]
test_data <- full_data[grepl('test', full_data$type),]
train_data$type <- NULL
test_data$type <- NULL
return(list(train_data, test_data))
}
runEnetRoc <- function(training_dat,
test_dat,
age_cutoff,
bh_features,
gender) {
# get intersection of bh features and real data
bh_features <- as.character(unlist(bh_features))
intersected_feats <- intersect(bh_features, colnames(training_dat))
if(gender) {
intersected_feats <- append('M', intersected_feats)
intersected_feats <- append('F', intersected_feats)
}
# intersected_feats_rand <- intersect(rand_feats, colnames(training_dat))
# # get y
train_y <- ifelse(training_dat$age_diagnosis < age_cutoff, 1, 0)
test_y <- ifelse(test_dat$age_diagnosis < age_cutoff, 1, 0)
# get clinical data
test_clin <- test_dat[, 1:9]
# get bumphunter features
training_dat <- training_dat[, intersected_feats]
test_dat <- test_dat[, intersected_feats]
# start elastic net tuning
N_CV_REPEATS = 2
nfolds = 3
###### ENET
# create vector and list to store best alpha on training data. alpha is the parameter that choses the
# the optimal proportion lambda, the tuning parameter for L1 (ridge) and L2 (lasso)
elastic_net.cv_error = vector()
elastic_net.cv_model = list()
elastic_net.ALPHA <- c(1:9) / 10 # creates possible alpha values for model to choose from
# set parameters for training model
type_family <- 'binomial'
type_measure <- 'auc'
# create error matrix for for opitmal alpha that can run in parraellel if you have bigger data
# or if you have a high number fo N_CV_REPEATS
temp.cv_error_matrix <- foreach (temp = 1:N_CV_REPEATS, .combine=rbind, .errorhandling="stop") %do% {
for (alpha in 1:length(elastic_net.ALPHA)) # for i in 1:9 - the model will run 9 times
{
elastic_net.cv_model[[alpha]] = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[alpha] # first time with 0.1 and so on
, type.measure = type_measure
, family = type_family
, standardize = FALSE
, nfolds = nfolds
, nlambda = 10
, parallel = TRUE
)
elastic_net.cv_error[alpha] = min(elastic_net.cv_model[[alpha]]$cvm)
}
elastic_net.cv_error # stores 9 errors
}
if (N_CV_REPEATS == 1) {
temp.cv_error_mean = temp.cv_error_matrix
} else {
temp.cv_error_mean = apply(temp.cv_error_matrix, 2, mean) # take the mean of the 5 iterations
# as your value for alpha
}
# stop if you did not recover error for any models
stopifnot(length(temp.cv_error_mean) == length(elastic_net.ALPHA))
# get index of best alpha (lowest error) - alpha is values 0.1-0.9
temp.best_alpha_index = which(min(temp.cv_error_mean) == temp.cv_error_mean)[length(which(min(temp.cv_error_mean) == temp.cv_error_mean))]
# print(paste("Best ALPHA:", elastic_net.ALPHA[temp.best_alpha_index])) # print the value for alpha
best_alpha <- elastic_net.ALPHA[temp.best_alpha_index]
temp.non_zero_coeff = 0
temp.loop_count = 0
# loop runs initially because temp.non_zero coefficient <3 and then stops
# usually after one iteration because the nzero variable selected by lambda is greater that 3. if it keeps looping
# it they are never greater than 1, then the model does not converge.
while (temp.non_zero_coeff < 1) {
elastic_net.cv_model = cv.glmnet(x = as.matrix(training_dat)
, y = train_y
, alpha = elastic_net.ALPHA[temp.best_alpha_index]
, type.measure = type_measure
, family = type_family
, standardize=FALSE
, nlambda = 100
, nfolds = nfolds
, parallel = TRUE
)
# get optimal lambda - the tuning parameter for ridge and lasso
# THIS IS IMPORTANT BECAUSE WHEN YOU TRAIN THE MODEL ON 100 SEPERATE VALUES OF LAMBDA
# AND WHEN YOU TEST THE MODEL IT WILL RETURN PREDCITION FOR ALL THOSE VALUES (1-100). YOU NEED TO
# GRAB THE PREDICTION WITH SAME LAMBDA THAT YOU TRAINED ON. ITS ALL IN THE CODE, BUT JUST WANTED TO
# GIVE YOU REASONS
temp.min_lambda_index = which(elastic_net.cv_model$lambda == elastic_net.cv_model$lambda.min)
# # number of non zero coefficients at that lambda
temp.non_zero_coeff = elastic_net.cv_model$nzero[temp.min_lambda_index]
temp.loop_count = temp.loop_count + 1
# set seed for next loop iteration
as.numeric(Sys.time())-> t
set.seed((t - floor(t)) * 1e8 -> seed)
if (temp.loop_count > 10) {
print("diverged")
temp.min_lambda_index = 50 # if it loops more than 5 times, then model did not converge
break
}
}# while loop ends
# print(temp.non_zero_coeff)
model = glmnet(x = as.matrix(training_dat)
, y = train_y
,alpha = elastic_net.ALPHA[temp.best_alpha_index]
,standardize=FALSE
,nlambda = 100
,family = type_family)
##########
# Predictions on test data
##########
# This returns 100 prediction with 1-100 lambdas
temp_test.predictions <- predict(model,
data.matrix(test_dat),
type = 'response')
# original should be fine, something wrong with caret package
test.predictions <- temp_test.predictions[, temp.min_lambda_index]
temp_dat <- as.data.frame(cbind(test_pred = test.predictions, test_label = test_y))
###########################################################################################
return(temp_dat)
}
##########
# function for get m values
##########
get_m_values <-
function(data_set, probe_start) {
data_set[,probe_start:ncol(data_set)] <- apply(data_set[, probe_start:ncol(data_set)], 2,
function(x) log(x/(1-x)))
# log(data_set[, probe_start:ncol(data_set)]/(1- data_set[, probe_start:ncol(data_set)]))
return(data_set)
}
|
4e06164e34db799055e4216f67236fb6635e45de
|
9a835c54b337b14a89dfd0dea3489f2f5c77a128
|
/2016-08-18-shiny/App/global.R
|
902898f7d0e0fd7a53c3cac909176b45e406ad06
|
[] |
no_license
|
MonicaGutierrez/useRbog
|
42ffa186343e8b73f7639e684d697e854926cedf
|
05a2171d6a0b59925238682e1fd77566a0c7aa2c
|
refs/heads/master
| 2021-01-16T20:53:42.099076
| 2016-09-13T15:55:32
| 2016-09-13T15:55:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,725
|
r
|
global.R
|
library(dplyr)
library(plyr)
library(shiny)
df_loader <- function(input){
list_df <- lapply(1:dim(input$datafile)[1], function(number){
path <- input$datafile[[number, 'datapath']]
df <- read.csv(path, header = TRUE, sep = ",", dec = ".", stringsAsFactors=FALSE)
df <- df [, c("Div","Date","HomeTeam","AwayTeam","FTHG",
"FTAG","FTR","HTHG","HTAG","HTR")]
df
})
df <- plyr::rbind.fill(list_df)
df
}
process_data <- function(df){
teams <- unique(df$HomeTeam)
list_df <- lapply(teams, function(team){
if(team != 'Reading' && team != ""){
partidos_ganados <- dim(df %>%
filter((HomeTeam == team & FTR == 'H') |
(AwayTeam == team & FTR == 'A')))[1]
partidos_perdidos <- dim(df %>%
filter((HomeTeam == team & FTR == 'A') |
(AwayTeam == team & df$FTR == 'H')))[1]
partidos_empatados <- dim(df %>%
filter((HomeTeam == team & FTR == 'D') |
(AwayTeam == team & FTR == 'D')))[1]
bal_tot_gol_loc <- sum(df %>%
filter(HomeTeam == team) %>%
select(FTHG)) -
sum(df %>%
filter(HomeTeam == team) %>%
select(FTAG))
bal_tot_gol_vis <- sum(df %>%
filter(HomeTeam == team) %>%
select(FTAG)) -
sum(df %>%
filter(HomeTeam == team) %>%
select(FTHG))
df = data.frame(Team=team, PG = partidos_ganados,
PP = partidos_perdidos, PE = partidos_empatados,
BTGL = bal_tot_gol_loc, BTGV = bal_tot_gol_vis,
stringsAsFactors=FALSE)
}
})
df_processed <- plyr::rbind.fill(list_df)
return(df_processed)
}
bar_plot_btgl <- function(df_processed){
var_xbtgl = sort(df_processed$BTGL, decreasing = TRUE)
xtick_btgl = df_processed[order(df_processed$BTGL, decreasing = TRUE), ]$Team
barplot(var_xbtgl, ylab = 'Diferencia de Goles', width = 1)
title('Balance de Goles como Local')
axis(side = 1, at=1:length(xtick_btgl), labels = xtick_btgl)
}
bar_plot_btgv <- function(df_processed){
var_xbtgv = sort(df_processed$BTGV, decreasing = TRUE)
xtick_btgv = df_processed[order(df_processed$BTGV, decreasing = TRUE), ]$Team
barplot(var_xbtgv, ylab = 'Diferencia de Goles', width = 1)
title('Balance de Goles como Visitante')
axis(side = 1, at=1:length(xtick_btgv), labels = xtick_btgv)
}
|
88ccaa35dbfb72695f531c394bf102e1f1fe22ab
|
be531c0f8f2920fb34c57010923c7c1bcff216c4
|
/airline_fc_assign.R
|
475dd6c3bd782546895a95696c1d88a3ee1fff50
|
[] |
no_license
|
SarthakChitre/DS_Forecasting
|
2510d31481aed0c9783f3d078cae69b7468f3976
|
ab2f0ed01b09b1f37267997c41267b9ffc90c041
|
refs/heads/master
| 2022-12-03T11:32:11.621731
| 2020-08-11T07:02:52
| 2020-08-11T07:02:52
| 285,646,530
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,612
|
r
|
airline_fc_assign.R
|
install.packages("rmarkdown")
library(rmarkdown)
install.packages("forecast")
library(forecast)
install.packages("fpp")
library(fpp)
install.packages("smooth")
library(smooth)
#read file
library(readxl)
airlines=read_excel(file.choose())
View(airlines)
plot(airlines$Passengers,type = "o")
#Creating dummy variables
x=data.frame(outer(rep(month.abb,length = 96),month.abb,"==")+0)
View(x)
colnames(x)=month.abb
airlinedata=cbind(airlines,x)
View(airlinedata)
airlinedata["t"]=1:96
airlinedata["t_square"]=airlinedata["t"]*airlinedata["t"]
View(airlinedata)
airlinedata["log_passenger"] <- log(airlinedata["Passengers"])
attach(airlinedata)
#DataPartition
train <- airlinedata[1:84,]
test <- airlinedata[85:96,]
install.packages("Metrics")
library(Metrics)
###################Linear Model####################
airline_linear=lm(Passengers~t,data=train)
linear_pred=data.frame(predict(airline_linear,interval = "predict",newdata=test))
View(linear_pred)
rmse_lm =rmse(test$Passengers,linear_pred$fit) #53.199
rmse_lm
################Exponential Model############
airline_expo <- lm(log_passenger~t,data=train)
airline_expo_pred <- data.frame(predict(airline_expo,interval='predict', newdata=test))
View(airline_expo_pred)
rmse_expo=rmse(test$Passengers,exp(airline_expo_pred$fit)) #46.05
exp(airline_expo_pred$fit)
test$Passengers
##############Quaddratic Model#############
airline_quad=lm(Passengers~t+t_square,data=train)
airline_quad_pred=data.frame(predict(airline_quad,interval="predict",newdata=test))
View(airline_quad_pred)
rmse_Quad=rmse(test$Passengers,airline_quad_pred$fit) #48.05
rmse_Quad
#############Additive Seasonality########
sea_add_model <- lm(Passengers~Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov , data=train)
sea_add_pred<-data.frame(predict(sea_add_model,newdata=test,interval='predict'))
View(sea_add_pred)
rmse_sea_add=rmse(test$Passengers,sea_add_pred$fit) #132.81
#########Additive Seasonlity with linear#######
Add_sea_Linear_model<-lm(Passengers~t+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
Add_sea_Linear_pred<-data.frame(predict(Add_sea_Linear_model,interval='predict',newdata=test))
View(Add_sea_Linear_pred)
rmse_Add_sea_Linear=rmse(test$Passengers,Add_sea_Linear_pred$fit) #35.34
rmse_Add_sea_Linear #35.34
######## Additive Seasonality with Quadratic ######
Add_sea_Quad_model<-lm(Passengers~t+t_square+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data=train)
Add_sea_Quad_pred<-data.frame(predict(Add_sea_Quad_model,interval='predict',newdata=test))
rmse_Add_sea_Quad<-sqrt(mean((test$Passengers-Add_sea_Quad_pred$fit)^2,na.rm=T))
rmse_Add_sea_Quad #26.36082
# Multiplicative Seasonality
multi_sea_model<-lm(log_passenger~Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov,data = train)
multi_sea_pred<-data.frame(predict(multi_sea_model,newdata=test,interval='predict'))
rmse_multi_sea<-sqrt(mean((test$Passengers-exp(multi_sea_pred$fit))^2,na.rm = T))
rmse_multi_sea #140.0632
# Preparing table on model and it's RMSE values
table_rmse<-data.frame(c("rmse_lm","rmse_expo","rmse_Quad","rmse_sea_add","rmse_Add_sea_Quad","rmse_multi_sea"),c(rmse_lm,rmse_expo,rmse_Quad,rmse_sea_add,rmse_Add_sea_Quad,rmse_multi_sea))
View(table_rmse)
colnames(table_rmse)<-c("model","RMSE")
View(table_rmse)
# Additive seasonality with Quadratic has least RMSE value
### Combining Training & test data to build Additive seasonality using Quadratic Trend ####
Add_sea_Quad_model_final <- lm(Passengers ~ t+t_square+Jan+Feb+Mar+Apr+May+Jun+Jul+Aug+Sep+Oct+Nov, data = airlines)
summary(Add_sea_Quad_model_final)
residual=Add_sea_Quad_model_final$residuals
#####
plot(Add_sea_Quad_model_final)
acf(Add_sea_Quad_model_final$residuals, lag.max = 10) # take all residual value of the model built & plot ACF plot
A <- arima(Add_sea_Quad_model_final$residuals, order = c(1,0,0))
###########
A$residuals
Aerrors <- A$residuals
acf(Aerrors, lag.max = 10)
# predicting next 12 months errors using arima( order =c(1,0,0))
library(forecast)
errors_12 <- forecast(A, h = 12) # head of months
errors_12
predict_res=predict(arima(residual,order=c(1,0,0)),n.ahead=12)
predict_res$pred
future_errors=as.data.frame(predict_res$pred)
View(future_errors)
#newdata
newtest_data=read_excel(file.choose())
View(newtest_data)
pred_new=data.frame(predict(Add_sea_Quad_model_final,newdata=newtest_data,interval="predict"))
View(pred_new)
predicted_new_values <- pred_new$fit + future_errors
View(predicted_new_values)
plot(predicted_new_values,type="o")
|
4a7f12e9e880512396ed5fbee6f2005ac19f50a3
|
13e5cd3f13e6a2bf300c7199dc26f530ac899ba1
|
/man/get_polymorph_acc.Rd
|
481ff6ebcadb0c535c212a84eb1f73a099252d42
|
[
"MIT"
] |
permissive
|
phue/gwaR
|
cb4c3213c63f6e5eb9c97320d735637573958e97
|
80718a405c210555d80116a23be421ced1609087
|
refs/heads/master
| 2020-08-28T01:06:01.945434
| 2019-10-25T13:20:54
| 2019-10-25T13:20:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 549
|
rd
|
get_polymorph_acc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GWAS_functions.R
\name{get_polymorph_acc}
\alias{get_polymorph_acc}
\title{Query the 1001genomes API to obtain accession ids that carry a SNP of interest}
\usage{
get_polymorph_acc(gwas_table, SNPrank)
}
\arguments{
\item{gwas_table}{Object returned from read_gwas() function}
\item{SNPrank}{The (-log10(p)) rank of the SNP of interest}
}
\description{
Query the 1001genomes API to obtain accession ids that carry a SNP of interest
}
\seealso{
\code{\link{read_gwas}}
}
|
ea71d06effa314ea5418f62d6747e8eff14df0e8
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/seqHMM/inst/doc/seqHMM.R
|
dd458a1c6beed35c75d3593f8cd5655372390598
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,579
|
r
|
seqHMM.R
|
## ----setup, include=FALSE, cache=FALSE------------------------------------
library(knitr)
opts_chunk$set(concordance = TRUE, tidy = FALSE)
options(prompt = "R> ", continue = "+ ", width = 76, useFancyQuotes = FALSE)
## ----settingdata, message=FALSE, cache=FALSE, echo = FALSE, eval = TRUE----
library("seqHMM")
data("biofam", package = "TraMineR")
biofam_seq <- seqdef(biofam[, 10:25], start = 15, labels = c("parent",
"left", "married", "left+marr", "child", "left+child", "left+marr+ch",
"divorced"))
data("biofam3c")
marr_seq <- seqdef(biofam3c$married, start = 15, alphabet = c("single",
"married", "divorced"))
child_seq <- seqdef(biofam3c$children, start = 15,
alphabet = c("childless", "children"))
left_seq <- seqdef(biofam3c$left, start = 15, alphabet = c("with parents",
"left home"))
attr(marr_seq, "cpal") <- c("violetred2", "darkgoldenrod2", "darkmagenta")
attr(child_seq, "cpal") <- c("darkseagreen1", "coral3")
attr(left_seq, "cpal") <- c("lightblue", "red3")
## ----graphicalillustrations2, fig.width=6.5, fig.height=3.7, dev.args=list(pointsize=10), fig.keep='last', cache=FALSE, message=FALSE, echo=FALSE, fig.cap='Stacked sequence plot of the first ten individuals in the \\code{biofam} data plotted with the \\code{ssplot} function. The top plot shows the original sequences, and the three bottom plots show the sequences in the separate channels for the same individuals. The sequences are in the same order in each plot, i.e., the same row always matches the same individual.', fig.align='center'----
ssplot(list(biofam_seq[1:10,], marr_seq[1:10,], child_seq[1:10,],
left_seq[1:10,]),
sortv = "from.start", sort.channel = 1, type = "I",
ylab = c("Original", "Marriage", "Parenthood", "Residence"),
xtlab = 15:30, xlab = "Age", title = "Ten first sequences",
title.n = FALSE, legend.prop = 0.63, ylab.pos = c(1, 1.5),
ncol.legend = c(3, 1, 1, 1))
## ----plottingsequences, fig.width=5, fig.height=3, dev.args=list(pointsize=10), fig.cap="Stacked sequence plot of annual state distributions in the three-channel \\code{biofam} data. This is the default output of the \\code{ssplot} function. The labels for the channels are taken from the named list of state sequence objects, and the labels for the x axis ticks are taken from the column names of the first object.", fig.keep='last', fig.align='center', cache=FALSE, echo = FALSE----
ssplot(list("Marriage" = marr_seq, "Parenthood" = child_seq,
"Residence" = left_seq))
## ----gridplot1, fig.width=5.5, fig.height=3.5, dev.args=list(pointsize=10), echo=FALSE, fig.cap="Showing state distribution plots for women and men in the \\code{biofam} data. Two figures were defined with the \\code{ssp} function and then combined into one figure with the \\code{gridplot} function.", fig.align='center', fig.keep='last', cache = FALSE----
ssp_f <- ssp(list(marr_seq[biofam3c$covariates$sex == "woman",],
child_seq[biofam3c$covariates$sex == "woman",],
left_seq[biofam3c$covariates$sex == "woman",]),
type = "I", sortv = "mds.obs", with.legend = FALSE, title = "Women",
ylab.pos = c(1, 2, 1), xtlab = 15:30, ylab = c("Married", "Children",
"Residence"))
ssp_m <- update(ssp_f, title = "Men",
x = list(marr_seq[biofam3c$covariates$sex == "man",],
child_seq[biofam3c$covariates$sex == "man",],
left_seq[biofam3c$covariates$sex == "man",]))
gridplot(list(ssp_f, ssp_m), ncol = 2, nrow = 2, byrow = TRUE,
legend.pos = "bottom", legend.pos2 = "top", row.prop = c(0.65, 0.35))
## ----code_mcHMM, cache=FALSE, echo = FALSE, message=FALSE, warning=TRUE, eval = TRUE----
mc_init <- c(0.9, 0.05, 0.02, 0.02, 0.01)
mc_trans <- matrix(c(0.80, 0.10, 0.05, 0.03, 0.02, 0, 0.90, 0.05,
0.03, 0.02, 0, 0, 0.90, 0.07, 0.03, 0, 0, 0, 0.90, 0.10, 0, 0, 0,
0, 1), nrow = 5, ncol = 5, byrow = TRUE)
mc_emiss_marr <- matrix(c(0.90, 0.05, 0.05, 0.90, 0.05, 0.05, 0.05,
0.90, 0.05, 0.05, 0.90, 0.05, 0.30, 0.30, 0.40), nrow = 5, ncol = 3,
byrow = TRUE)
mc_emiss_child <- matrix(c(0.9, 0.1, 0.9, 0.1, 0.1, 0.9, 0.1, 0.9,
0.5, 0.5), nrow = 5, ncol = 2, byrow = TRUE)
mc_emiss_left <- matrix(c(0.9, 0.1, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9,
0.5, 0.5), nrow = 5, ncol = 2, byrow = TRUE)
mc_obs <- list(marr_seq, child_seq, left_seq)
mc_emiss <- list(mc_emiss_marr, mc_emiss_child, mc_emiss_left)
mc_initmod <- build_hmm(observations = mc_obs, initial_probs = mc_init,
transition_probs = mc_trans, emission_probs = mc_emiss,
channel_names = c("Marriage", "Parenthood", "Residence"))
# For CRAN vignette: load the estimated model object for speed-up
data("hmm_biofam")
# mc_fit <- fit_model(mc_initmod, em_step = FALSE, local_step = TRUE,
# threads = 4)
## ----plottingHMM, out.width='\\linewidth', fig.height=3.5, dev.args=list(pointsize=10), echo=FALSE, fig.cap="Illustrating a hidden Markov model as a directed graph. Pies represent five hidden states, with slices showing emission probabilities of combinations of observed states. States with emission probability less than 0.05 are combined into one slice. Edges show the transtion probabilities. Initial probabilities of hidden states are given below the pies.", fig.align='center', fig.keep='last', cache = FALSE----
plot(hmm_biofam, vertex.size = 50, vertex.label.dist = 1.5,
edge.curved = c(0, 0.6, -0.8, 0.6, 0, 0.6, 0), legend.prop = 0.3,
combined.slice.label = "States with prob. < 0.05")
## ----graphicalillustrations5, out.width='\\linewidth', fig.height=3.5, dev.args=list(pointsize=10), echo=FALSE, fig.cap="Another version of the hidden Markov model of Figure 4 with a different layout and modified labels, legends, and colors. All observed states are shown.", fig.align='center', fig.keep='last', cache = FALSE----
vertex_layout <- matrix(c(1, 2, 2, 3, 1, 0, 0.5, -0.5, 0, -1),
ncol = 2)
plot(hmm_biofam, layout = vertex_layout, xlim = c(0.5, 3.5),
ylim = c(-1.5, 1), rescale = FALSE, vertex.size = 50,
vertex.label.pos = c("left", "top", "bottom", "right", "left"),
edge.curved = FALSE, edge.width = 1, edge.arrow.size = 1,
with.legend = "left", legend.prop = 0.4, label.signif = 1,
combine.slices = 0, cpal = colorpalette[[30]][c(14:5)])
## ----ssplotHMM, fig.width=5.5, fig.height=5.5, dev.args=list(pointsize=10), fig.cap="Using the \\code{ssplot} function for an \\code{hmm} object makes it easy to plot the observed sequences together with the most probable paths of hidden states given the model.", fig.align='center', fig.keep='last', cache = FALSE, echo = FALSE----
ssplot(hmm_biofam, plots = "both", type = "I", sortv = "mds.hidden",
title = "Observed and hidden state sequences", xtlab = 15:30,
xlab = "Age")
## ----code_settingdata, ref.label = 'settingdata', message=FALSE, warning = TRUE, echo = TRUE----
library("seqHMM")
data("biofam", package = "TraMineR")
biofam_seq <- seqdef(biofam[, 10:25], start = 15, labels = c("parent",
"left", "married", "left+marr", "child", "left+child", "left+marr+ch",
"divorced"))
data("biofam3c")
marr_seq <- seqdef(biofam3c$married, start = 15, alphabet = c("single",
"married", "divorced"))
child_seq <- seqdef(biofam3c$children, start = 15,
alphabet = c("childless", "children"))
left_seq <- seqdef(biofam3c$left, start = 15, alphabet = c("with parents",
"left home"))
attr(marr_seq, "cpal") <- c("violetred2", "darkgoldenrod2", "darkmagenta")
attr(child_seq, "cpal") <- c("darkseagreen1", "coral3")
attr(left_seq, "cpal") <- c("lightblue", "red3")
## ----code_plottingsequences, ref.label = 'plottingsequences', echo = TRUE, eval = FALSE----
# ssplot(list("Marriage" = marr_seq, "Parenthood" = child_seq,
# "Residence" = left_seq))
## ----code_graphicalillustrations2, ref.label = 'graphicalillustrations2', echo=TRUE, eval = FALSE----
# ssplot(list(biofam_seq[1:10,], marr_seq[1:10,], child_seq[1:10,],
# left_seq[1:10,]),
# sortv = "from.start", sort.channel = 1, type = "I",
# ylab = c("Original", "Marriage", "Parenthood", "Residence"),
# xtlab = 15:30, xlab = "Age", title = "Ten first sequences",
# title.n = FALSE, legend.prop = 0.63, ylab.pos = c(1, 1.5),
# ncol.legend = c(3, 1, 1, 1))
## ----code_gridplot1, ref.label = 'gridplot1', echo=TRUE, eval = FALSE-----
# ssp_f <- ssp(list(marr_seq[biofam3c$covariates$sex == "woman",],
# child_seq[biofam3c$covariates$sex == "woman",],
# left_seq[biofam3c$covariates$sex == "woman",]),
# type = "I", sortv = "mds.obs", with.legend = FALSE, title = "Women",
# ylab.pos = c(1, 2, 1), xtlab = 15:30, ylab = c("Married", "Children",
# "Residence"))
#
# ssp_m <- update(ssp_f, title = "Men",
# x = list(marr_seq[biofam3c$covariates$sex == "man",],
# child_seq[biofam3c$covariates$sex == "man",],
# left_seq[biofam3c$covariates$sex == "man",]))
#
# gridplot(list(ssp_f, ssp_m), ncol = 2, nrow = 2, byrow = TRUE,
# legend.pos = "bottom", legend.pos2 = "top", row.prop = c(0.65, 0.35))
## ----code_sc_buildHMM_random, cache=FALSE---------------------------------
sc_initmod_random <- build_hmm(observations = biofam_seq, n_states = 5)
## ----code_sc_initialvalues, cache=FALSE-----------------------------------
sc_init <- c(0.9, 0.06, 0.02, 0.01, 0.01)
sc_trans <- matrix(c(0.80, 0.10, 0.05, 0.03, 0.02, 0.02, 0.80, 0.10,
0.05, 0.03, 0.02, 0.03, 0.80, 0.10, 0.05, 0.02, 0.03, 0.05, 0.80, 0.10,
0.02, 0.03, 0.05, 0.05, 0.85), nrow = 5, ncol = 5, byrow = TRUE)
sc_emiss <- matrix(NA, nrow = 5, ncol = 8)
sc_emiss[1,] <- seqstatf(biofam_seq[, 1:4])[, 2] + 0.1
sc_emiss[2,] <- seqstatf(biofam_seq[, 5:7])[, 2] + 0.1
sc_emiss[3,] <- seqstatf(biofam_seq[, 8:10])[, 2] + 0.1
sc_emiss[4,] <- seqstatf(biofam_seq[, 11:13])[, 2] + 0.1
sc_emiss[5,] <- seqstatf(biofam_seq[, 14:16])[, 2] + 0.1
sc_emiss <- sc_emiss / rowSums(sc_emiss)
rownames(sc_trans) <- colnames(sc_trans) <- rownames(sc_emiss) <-
paste("State", 1:5)
colnames(sc_emiss) <- attr(biofam_seq, "labels")
sc_trans
round(sc_emiss, 3)
## ----code_sc_buildHMM, cache=FALSE----------------------------------------
sc_initmod <- build_hmm(observations = biofam_seq, initial_probs = sc_init,
transition_probs = sc_trans, emission_probs = sc_emiss)
## ----code_sc_fitHMM, cache=FALSE------------------------------------------
sc_fit <- fit_model(sc_initmod)
## ----code_sc_results1, cache=FALSE----------------------------------------
sc_fit$logLik
## ----code_sc_results2, cache=FALSE----------------------------------------
sc_fit$model
## ----code_mcHMM2, echo = TRUE, message=TRUE, warnings=TRUE, eval = TRUE, cache = FALSE----
mc_init <- c(0.9, 0.05, 0.02, 0.02, 0.01)
mc_trans <- matrix(c(0.80, 0.10, 0.05, 0.03, 0.02, 0, 0.90, 0.05, 0.03,
0.02, 0, 0, 0.90, 0.07, 0.03, 0, 0, 0, 0.90, 0.10, 0, 0, 0, 0, 1),
nrow = 5, ncol = 5, byrow = TRUE)
mc_emiss_marr <- matrix(c(0.90, 0.05, 0.05, 0.90, 0.05, 0.05, 0.05, 0.90,
0.05, 0.05, 0.90, 0.05, 0.30, 0.30, 0.40), nrow = 5, ncol = 3,
byrow = TRUE)
mc_emiss_child <- matrix(c(0.9, 0.1, 0.9, 0.1, 0.1, 0.9, 0.1, 0.9, 0.5,
0.5), nrow = 5, ncol = 2, byrow = TRUE)
mc_emiss_left <- matrix(c(0.9, 0.1, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.5,
0.5), nrow = 5, ncol = 2, byrow = TRUE)
mc_obs <- list(marr_seq, child_seq, left_seq)
mc_emiss <- list(mc_emiss_marr, mc_emiss_child, mc_emiss_left)
mc_initmod <- build_hmm(observations = mc_obs, initial_probs = mc_init,
transition_probs = mc_trans, emission_probs = mc_emiss,
channel_names = c("Marriage", "Parenthood", "Residence"))
# For CRAN vignette: load the estimated model object for speed-up
data("hmm_biofam")
# mc_fit <- fit_model(mc_initmod, em_step = FALSE, local_step = TRUE,
# threads = 4)
## ----code_mcHMM_BIC, cache=FALSE, echo = TRUE, message=FALSE, eval = TRUE----
# Vignette: already loaded hmm_biofam
# hmm_biofam <- mc_fit$model
BIC(hmm_biofam)
## ----code_MHMM, cache=FALSE, echo = TRUE, eval = TRUE, warning=TRUE-------
mc_init2 <- c(0.9, 0.05, 0.03, 0.02)
mc_trans2 <- matrix(c(0.85, 0.05, 0.05, 0.05, 0, 0.90, 0.05, 0.05, 0, 0,
0.95, 0.05, 0, 0, 0, 1), nrow = 4, ncol = 4, byrow = TRUE)
mc_emiss_marr2 <- matrix(c(0.90, 0.05, 0.05, 0.90, 0.05, 0.05, 0.05,
0.85, 0.10, 0.05, 0.80, 0.15), nrow = 4, ncol = 3, byrow = TRUE)
mc_emiss_child2 <- matrix(c(0.9, 0.1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5),
nrow = 4, ncol = 2, byrow = TRUE)
mc_emiss_left2 <- matrix(c(0.9, 0.1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5),
nrow = 4, ncol = 2, byrow = TRUE)
mhmm_init <- list(mc_init, mc_init2)
mhmm_trans <- list(mc_trans, mc_trans2)
mhmm_emiss <- list(list(mc_emiss_marr, mc_emiss_child, mc_emiss_left),
list(mc_emiss_marr2, mc_emiss_child2, mc_emiss_left2))
biofam3c$covariates$cohort <- cut(biofam3c$covariates$birthyr,
c(1908, 1935, 1945, 1957))
biofam3c$covariates$cohort <- factor(biofam3c$covariates$cohort,
labels=c("1909-1935", "1936-1945", "1946-1957"))
init_mhmm <- build_mhmm(observations = mc_obs, initial_probs = mhmm_init,
transition_probs = mhmm_trans, emission_probs = mhmm_emiss,
formula = ~sex + cohort, data = biofam3c$covariates,
channel_names = c("Marriage", "Parenthood", "Residence"),
cluster_names = c("Cluster 1", "Cluster 2"))
# vignette: less restarts and no parallelization
set.seed(1011)
mhmm_fit <- fit_model(init_mhmm, local_step = TRUE, threads = 1,
control_em = list(restart = list(times = 10)))
mhmm <- mhmm_fit$model
## ----code_summaryMHMM, cache=FALSE----------------------------------------
summary(mhmm, conditional_se = FALSE)
## ----code_plottingHMMbasic, out.width='\\linewidth', fig.height=4, dev.args=list(pointsize=10), echo=TRUE, fig.align='center', fig.keep='last', cache = FALSE, eval = TRUE, fig.cap="A default plot of a hidden Markov model."----
plot(hmm_biofam)
## ----code_plottingHMM, ref.label='plottingHMM', echo=TRUE, eval = FALSE----
# plot(hmm_biofam, vertex.size = 50, vertex.label.dist = 1.5,
# edge.curved = c(0, 0.6, -0.8, 0.6, 0, 0.6, 0), legend.prop = 0.3,
# combined.slice.label = "States with prob. < 0.05")
## ----code_graphicalillustrations5, ref.label = 'graphicalillustrations5', echo=TRUE, eval = FALSE----
# vertex_layout <- matrix(c(1, 2, 2, 3, 1, 0, 0.5, -0.5, 0, -1),
# ncol = 2)
#
# plot(hmm_biofam, layout = vertex_layout, xlim = c(0.5, 3.5),
# ylim = c(-1.5, 1), rescale = FALSE, vertex.size = 50,
# vertex.label.pos = c("left", "top", "bottom", "right", "left"),
# edge.curved = FALSE, edge.width = 1, edge.arrow.size = 1,
# with.legend = "left", legend.prop = 0.4, label.signif = 1,
# combine.slices = 0, cpal = colorpalette[[30]][c(14:5)])
## ----code_ssplotHMM, ref.label = 'ssplotHMM', eval = FALSE, echo = TRUE----
# ssplot(hmm_biofam, plots = "both", type = "I", sortv = "mds.hidden",
# title = "Observed and hidden state sequences", xtlab = 15:30,
# xlab = "Age")
## ----code_plottingMHMMbasic, fig.width=6.5, fig.height=8, dev.args=list(pointsize=10), echo=TRUE, fig.align='center', fig.keep='last', cache = FALSE, eval = TRUE, fig.cap="Plotting submodels of an MHMM with the \\code{plot} method."----
plot(mhmm, interactive = FALSE, nrow = 2, legend.prop = 0.45,
vertex.size = 50, vertex.label.cex = 1.3, cex.legend = 1.3,
edge.curved = 0.65, edge.label.cex = 1.3, edge.arrow.size = 0.8)
## ----code_ssplotMHMM, eval = FALSE, echo = TRUE---------------------------
# mssplot(mhmm, ask = TRUE)
|
2603a6fb0d299acc2a17727e465d441ad9d59c25
|
cd812e9a7d34e00a26ce8bdcf81b5eb2d43a4a66
|
/bearing.R
|
877eff5bc4a058aa6b3199278e59433d87e34fb4
|
[] |
no_license
|
anniekellner/ch1_landing
|
e0644f8b97578dbae499530e20658691b100f99b
|
f71c76e0e40761cc0b6606bacedf560119b2d1da
|
refs/heads/master
| 2022-12-07T22:50:23.557591
| 2022-12-02T22:31:48
| 2022-12-02T22:31:48
| 140,736,188
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,052
|
r
|
bearing.R
|
rm(list = ls())
load('all.RData')
swim <- subset(all, swim==1)
library(adehabitatLT)
library(proj4)
# project lat/long to grid for adehabitat
M <- as.matrix(cbind(swim$gps_lon, swim$gps_lat)) #create a matrix to project
xy <- project(M, "+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs") #project to NAD 1983 Alaska Albers
X <- xy[,1]
Y <- xy[,2]
swim2 <- cbind(swim,X,Y) #bind the datasets
#to POSIXct
swim2$datetime <- as.POSIXct(swim2$datetime, tz='US/Alaska')
#Divide into groups with same fix interval
one <- subset(swim2, id=='pb_20414.2009' | id=='pb_20446.2009' | id=='pb_20735.2009' | id=='pb_20845.2015')
two <- subset(swim2, id=='pb_20520.2012' | id=='pb_20525.2013' | id=='pb_20525.2014' | id=='pb_20529.2004' | id=='pb_20529.2005'| id=='pb_21015.2013' | id=='pb_21358.2013' | id=='pb_21368.2014' | id=='pb_32366.2014')
four <- subset(swim2, id=='pb_20333.2008' | id=='pb_21264.2011' | id=='pb_32366.2011')
eight <- subset(swim2, id=='pb_06817.2006' | id=='pb_20413.2006' | id=='pb_20418.2005')
#convert to traj
swim1.traj <- as.ltraj(xy=one[,c("X","Y")], date=one$datetime, id=one$id, burst=one$id, proj4string = CRS(("+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs")))
swim2.traj <- as.ltraj(xy=two[,c("X","Y")], date=two$datetime, id=two$id, burst=two$id, proj4string = CRS(("+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs")))
swim4.traj <- as.ltraj(xy=four[,c("X","Y")], date=four$datetime, id=four$id, burst=four$id, proj4string = CRS(("+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs")))
swim8.traj <- as.ltraj(xy=eight[,c("X","Y")], date=eight$datetime, id=eight$id, burst=eight$id, proj4string = CRS(("+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs")))
#create regular trajectories
library(lubridate)
refda <- parse_date_time(paste(min(swim2$datetime)), orders = 'ymd HMS', tz = 'US/Alaska') #set refda
swim1.traj.NA <- setNA(swim1.traj, refda, 1, units = 'hour') #setNA
swim1.traj.NA0 <- sett0(swim1.traj.NA, refda, 1, units = 'hour') #round times to create regular trajectory
swim1.df <- ld(swim1.traj.NA)
## Convert t1 to LL
library(proj4)
M <- as.matrix(cbind(swim1.df$x, swim1.df$y)) #create a matrix to project
xy <- project(M, "+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs", inverse=TRUE) #project to lat/long
long.t1 <- xy[,1]
lat.t1 <- xy[,2]
swim1.df <- cbind(swim1.df,long.t1,lat.t1) #bind the datasets
## Create column for difference (t2x = location x when t = x+1)
## t2 = t + 1
library(dplyr)
swim1.df<- dplyr::mutate(swim1.df, t2x=x+dx)
swim1.df <- dplyr::mutate(swim1.df, t2y=y+dy)
#convert t2 back to lat/long
library(proj4)
M <- as.matrix(cbind(swim1.df$t2x, swim1.df$t2y)) #create a matrix to project
xy <- project(M, "+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs", inverse=TRUE) #project to lat/long
long.t2 <- xy[,1]
lat.t2 <- xy[,2]
swim1.df <- cbind(swim1.df,long.t2,lat.t2) #bind the datasets
swim1.df$LL.t1 <- c(swim1.df$long.t1, swim1.df$lat.t1)
swim1.df$LL.t2 <- paste(swim1.df$long.t2, swim1.df$lat.t2, sep=",")
## find bearing
library(geosphere)
get.az <- function(i){
row <- swim1.df[i,]
az <- bearing(c(swim1.df$long.t1[i], swim1.df$lat.t1[i]), c(swim1.df$long.t2[i], swim1.df$lat.t2[i]))
return(az)
}
swim1.df$bearing <- lapply(X=seq(1, nrow(swim1.df),1), FUN = get.az)
# Change negative angles to positive by adding 360
swim1.df$bearing <- as.numeric(swim1.df$bearing)
swim1.df$bearing <- ifelse(swim1.df$bearing <0, swim1.df$bearing + 360, swim1.df$bearing +0)
# Save as RData
library(dplyr)
swim1.NAZ <- select(swim1.df, x:pkey, bearing)
save(swim1.NAZ, file='swim1_NAZ.RData')
#####################################
|
8d0ce70f3a70c09bace1f01af9adb9481ae4a4b2
|
09604199ee820207ac560b698f8d4392b5d3ec70
|
/Skripte/calcPhageAbundance_wo_pOTU.R
|
f580561279916ebd3bb31965af4a55a180e461b6
|
[] |
no_license
|
EllenOldenburg/master_thesis
|
c7e321173f3af55b8a0256d505e064a544fbbf5e
|
ca8d42b67efaf3fecceb4ceac1810ae6b7199d49
|
refs/heads/master
| 2022-12-31T19:38:55.046805
| 2020-10-28T12:56:26
| 2020-10-28T12:56:26
| 293,061,043
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,491
|
r
|
calcPhageAbundance_wo_pOTU.R
|
###############
## Libraries ##
###############
library(plyr)
library(data.table)
library(readr)
library(ggplot2)
library(parallel)
###########################
## Protein to organismus ##
###########################
#read files
data = fread("/mnt/data_scratch/ovidiu/Ellen/MasterThesis/pOTU_readCount/SRR9178536.txt")
# remove last row from file (number of unmapped reads)
data=data[1:(nrow(data)-1),]
#extract org name
########
#cluster_info = fread("/mnt/data_scratch/thomas/NetworkFiles/cps.txt")
prot2pp = fread("/mnt/data_scratch/ovidiu/Ellen/MasterThesis/protein2prophage.tsv", header = FALSE, sep = "\t")
show(dim(prot2pp))
prot2pp = unique(prot2pp)
#prot2pp$V1=strsplit(prot2pp$V1,'\\|') # split first column in accesion id and locus tag (protein 2 prophage data)
#prot2pp$V3=sapply(prot2pp$V1,function(x)x[[2]]) # add new column with locus tag (protein 2 prophage data)
#prot2pp$V1=sapply(prot2pp$V1,function(x)x[[1]]) # change the first column to accesion id only (protein 2 prophage data)
prot2pp$V3=gsub("\\.fa[a$&]",'',prot2pp$V3) # remove file ending (.faa,.fa$,.fa&)
# match mcl cluster to prophage annotation
mi = match(data$V1,prot2pp$V2) # index prophages in prot2pp
org_names = prot2pp$V3[mi] # add prophages names, phages names as NA
i1 = unlist(which(is.na(org_names))) # get index of phages
data$V5 = org_names # add prophages names, phages names are NA
data$V5[i1] = gsub("&.*","",data$V1[i1]) # change phagenames (NAs) to it's name
# set the steps for the data
steps = seq(178068,178536,1)
BAI=data$V5
#unique bac accession ids
ubai=unique(BAI)
abm_phage=matrix(NaN,nrow = length(steps),ncol=length(ubai))
rownames(abm_phage)=paste0("SRR9", steps)
colnames(abm_phage)=ubai
show(length(ubai))
for (i in steps) {
show(i)
data = fread(paste0("/mnt/data_scratch/ovidiu/Ellen/MasterThesis/pOTU_readCount/SRR9", i,".txt"))
sm=sum(data$V3+data$V4)
absz = data$V3/sm
absz = absz[1:length(absz)-1]
data = data[1:(nrow(data)-1),]
data = cbind(data,BAI)
# 02
# berechnung rel Abund für alle organismen
rb_non_pOTU = aggregate(absz,list(data$BAI),mean)
# check the average number of reads per organism
tmp=rb_non_pOTU$x*sm
# keep only the species which have on average 1 read per whole genome
rb_non_pOTU$x[which(tmp<1)]=0
# speichern in Matrix
abm_phage[paste0("SRR9", i),rb_non_pOTU$Group.1]=rb_non_pOTU$x
}
save(abm_phage, file = "/mnt/data_scratch/ovidiu/Ellen/MasterThesis/phageInfo/abund_phage.RData")
|
2fdaa12a2894c1bf6056b20892afadaa5ba498be
|
51e773e0c7294378152bc0610926da06c64d2eec
|
/app.R
|
9a15f782845877846af529a59ec799c5a1c86528
|
[] |
no_license
|
laurin-f/dreiFragezeichenapp
|
5c84077b19baed894bb1638b9b3e8683d259c9c3
|
b95c7bb136fce270713ad643770483210ecd36e0
|
refs/heads/master
| 2020-05-04T16:51:32.974416
| 2019-04-03T14:41:02
| 2019-04-03T14:41:02
| 179,290,244
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,499
|
r
|
app.R
|
#install packages
# packages<-c("shiny","rsconnect","shinyFiles","rdrop2")
# for(i in packages){
# if (!i %in% installed.packages()) {install.packages(i)}}
#load packages
library(rdrop2)
library(shiny)
library(shinyFiles)
#dropboxdir
outputDir <- "dreiFragezeichenApp"
############################
#Functions to save Data to Dropbox
##################################
saveData <- function(data,fileName) {
# Write the data to a temporary file locally
#filePath <- file.path(tempdir(), fileName)
if(length(grep("\\.csv",fileName))==1){
write.csv(data, fileName, row.names = FALSE, quote = TRUE)
}
if(length(grep("\\.R",fileName))==1){
save(data,file=fileName)
}
# Upload the file to Dropbox
drop_upload(fileName, path = outputDir)
}
######################################
#Function to load data from dropbox
#######################################
loadData <- function(fileName) {
filePath <- file.path(outputDir, fileName)
if(length(grep("\\.csv",fileName))==1){
data <- drop_read_csv(filePath)
return(t(data))
}
if(length(grep("\\.R",fileName))==1){
drop_download(filePath,overwrite = T,local_path = file.path(getwd(),fileName))
load(file.path(getwd(),fileName),envir = .GlobalEnv)
}
}
####################
#set input
####################
input.path<-file.path(outputDir,"input.csv")
if(drop_exists(input.path)){
n_folgen2<-as.numeric(drop_read_csv(input.path))
#oad("input.R",envir = .GlobalEnv)
}else{
n_folgen2<-200
}
###################################
#UI
#####################################
#build User Interface
ui<-pageWithSidebar(
# Application title
headerPanel("??? Zufallsgenerator"),
#
sidebarPanel(
numericInput("n_folgen","Anzahl Folgen",n_folgen2),
actionButton("button", "Folge generieren")
),
mainPanel(
textOutput("folge"),
textOutput("weitere_folgen"),
br(),
textOutput("datum"),
tags$head(tags$style("#folge{
font-size: 20px;
}"
)
)
)
)
###############################
#dreifragezeichen Funktion
###############################
random_drei_fragezeichen<-
function(n_folgen=200,#Anzahl Folgen
#name der Datei in der der Vektor gespeichert wird
file="R_drei_Fragezeichen-Folgen_"){
#Zusammenfügen von Pfad und Dateiname mit Anzahl Folgen im Namen
file_n<-paste0(file,n_folgen,".R")
#Falls noch keine Datei mit dem Namen vorhanden ist
#wird sie jetzt erstellt
if(!drop_exists(file.path(outputDir,file_n))){
#ein Vektor von 1 bis anzahl folgen wird erstellt
vec<-1:n_folgen
#der Vektor wird unter dem angegebenen Namen gespeichert
saveData(data=vec,fileName = file_n)
#save(vec,file=file_n)
}#Ende if
#Falls die Anzahl Folgen geändert wurde sind nun zwei Dateien im Ordner
#die nicht mehr aktuelle Datei wird gelöscht
#erst werden alle Dateien abgefragt die den Name enthalten
dropbox_list<-drop_dir(outputDir)
files<-dropbox_list$name[grep(file,dropbox_list$name)]
#die aktuelle Datei ist die bei der n_folgen am Ende steht
cur_file<-grep(paste0(file,n_folgen),files)
#falls weitere Dateien vorhanden sind werden sie nun gelöscht
if(length(files[-cur_file])!=0){
for(i in files[-cur_file]){
drop_delete(file.path(outputDir,i))
}
}
#Nun wird der gespeicherte Vektor geladen
#load(file_n)
loadData(fileName = file_n)
vec<-data
#if(save==T){
#falls er die Länge Null hat wird erneut
#ein Vektor von 1 bis n_folgen erstellt
if(length(vec)==0){
vec<-1:n_folgen
}
if(length(vec)==n_folgen){
#Dieser Wert entspricht der Folge die jetzt gehört wird
folge<-vec[n_folgen]
#diese Folge wird vom gesamt Vektor entfernt
vec<-vec[-n_folgen]
}else{
#dann wird ein zufälliger Wert des Vektors gezogen
sampl<-sample(1:length(vec),1,replace = F)
#Dieser Wert entspricht der Folge die jetzt gehört wird
folge<-vec[sampl]
#diese Folge wird vom gesamt Vektor entfernt
vec<-vec[-sampl]
}
#der gekürzte Vektor wird gespeichert
saveData(vec,fileName=file_n)
#Die aktuelle Folge wird ausgegeben
# return(folge)
#}else{
# return(length(vec))
#}
return(list(folge,length(vec)))
}
##################################
#Server
##################################
#build server
server<-function(input, output,session) {
observe({
if(drop_exists(input.path)){
n_folgen2<-as.numeric(drop_read_csv(input.path))
#oad("input.R",envir = .GlobalEnv)
}else{
n_folgen2<-200
}
#session$sendInputMessage("n_folgen", list(value=inputvalues[[i]]) )
updateNumericInput(session,"n_folgen",value=n_folgen2)
})
observeEvent(input$button,{
n_folgen2<-(input$n_folgen)
out<-random_drei_fragezeichen(n_folgen = n_folgen2)
weitere<-out[[2]]
folge<-out[[1]]
saveData(n_folgen2, fileName = 'input.csv')
output$folge<-renderText({
formulierungen<-c(paste("heute hörst du Folge",folge),
paste("heute ist Folge",folge,"dran"),
paste("die heutige Auswahl ist Folge",folge),
paste("für heute hat der Zufallsgenerator Folge",folge,"ausgesucht"),
paste("dieses Mal ist Folge",folge,"dran"),
paste("der Algorithmus hat sich für Folge",folge,"entschieden"),
paste("der Computer schlägt dir heute Folge",folge,"vor"),
paste("die Planetenkonstellation spricht heute deutlich für Folge",folge),
paste("die Götter haben entschieden, dass heute Zeit für Folge",folge,"ist"),
paste0("wie wäre es mit Folge ",folge,"?"),
paste("dieses Mal hörst du Folge",folge))
sample(formulierungen,1)
})
output$weitere_folgen<-renderText({
#weitere<-random_drei_fragezeichen(n_folgen = input$n_folgen,save=F)
if(weitere>0){
paste("jetzt sind noch",weitere,"Folgen übrig")
}else{
"du hast jetzt alle Folgen durchgehört und es beginnt wieder von vorn!"
}
})
output$datum<-renderText({
datum<-format(Sys.Date(),"%d.%m")
apptag<-as.numeric(format(Sys.Date(),"%Y"))-2019
if(datum=="14.10"){
"Alles Gute zum Geburtstag!"
}
if(datum=="24.12"){
"Frohe Weihnachten!"
}
if(datum=="03.04"&apptag>0){
paste0("heute hat die App ihren ",apptag,"ten Geburtstag!")
}
})
})
}
#run app
shinyApp(ui, server)
|
a4e190891f963d3271b5645fdfe22a7dd53f609a
|
3554cabe4cd7eda7abfba04b37dc9a9b41efaf90
|
/R_Script_Labelling.R
|
7499a242a9f1d778f7c8f3c4631e5895c5e52ce2
|
[] |
no_license
|
cschmidtlab/Labelling_R_Script
|
0f01921ce06e37d708578f0c5c1df38ad9183236
|
899a7355ecbfccd534c4e473d7597b6d0a76af1a
|
refs/heads/master
| 2020-08-23T05:28:43.190980
| 2020-02-26T11:17:57
| 2020-02-26T11:17:57
| 216,553,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,840
|
r
|
R_Script_Labelling.R
|
####Evaluation of NHS-Acetate and DEPC labelling for determination of solvent accessible amino acid residues in protein complexes####
#This R script was used to calculate a normalised intensity for each modified site.
#For this, MaxQuant output files containing information on the individual modifications
#(e.g. acetylation of lysine or acetylation of serine, threonine, tyrosine) were combined
#and filtered for the protein of interest.
#Modified sites with a localisation probability < 0.75 and a peptide score < 80 were discarded.
#The MaxQuant intensity of each modified site was then normalised
#by applying a normalisation factor calculated from the sum of all corresponding peptides (i.e. all modified and unmodified peptides) containing the respective site.
#The following equation was used: ((Intensity of modified residue)*100)/(sum corresponding peptides (modified+unmodified)).
#Subsequently, mean and standard error were calculated.
# Install if not installed
#install.packages("openxlsx")
#install.packages("tidyverse")
#install.packages("ggforce")
#install.packages("reshape")
#install.packages('Rmisc', dependencies = TRUE)
# Required libraries
library("openxlsx")
library("tidyverse")
library("ggforce")
library("reshape")
library("Rmisc")
#### 1. Import data ####
# ModificationSpecificPeptides.txt (MaxQuant result file) contains information on peptides
peptides = read.table("modificationSpecificPeptides.txt", sep="\t", header = TRUE, stringsAsFactors = F)
# Peptides file (MaxQuant) is used for info of start and end of peptide sequence
peptides_start_end = read.table("peptides.txt", sep="\t", header = TRUE, stringsAsFactors = F)
# Modification files from Max Quant (identified modified sites)
mod_sites_1 = read.table("mod_name_1.txt", sep="\t", header = TRUE, stringsAsFactors = F)
mod_sites_2 = read.table("mod_name_2.txt", sep="\t", header = TRUE, stringsAsFactors = F)
mod_sites_3 = read.table("mod_name_3.txt", sep="\t", header = TRUE, stringsAsFactors = F)
mod_sites_4 = read.table("mod_name_4.txt", sep="\t", header = TRUE, stringsAsFactors = F)
# File Frac_ASA_protein_structure contains info of GETAREA output, "Ratio." is the relative solvent accessibility of the amino acid
Frac_asa <- read.xlsx("Frac_ASA_Protein_structure.xlsx")
#### 1.1 Add start and end position of every peptide to peptides table ####
peptides = select( my_data_info, Start.position, End.position, Proteins, Sequence)
peptides = merge(peptides, peptides_start_end, by=c("Sequence","Proteins"))
#### 1.2 Remove all contaminant entries and "no Name" entries ####
peptides = peptides[-grep("^CON", peptides$Proteins),]
peptides = peptides[peptides$Proteins != "",]
#### 1.3 Change structure of peptide table ####
#Name: Experiment_1 can be changed in experimental table within MaxQuant search
peptides = select(peptides, Start.position, End.position, Proteins, Intensity.Experiment_1_R1:Intensity.Experiment_5_R3)
peptides_new_structure <- melt(peptides, id=(c("Start.position", "End.position","Proteins")))
#### 2. Filter modified sites ####
#### 2.1 Add column with specific modification information ####
mod_sites_1$Modification = "mod_name_1"
mod_sites_2$Modification = "mod_name_2"
mod_sites_3$Modification = "mod_name_3"
mod_sites_4$Modification = "mod_name_4"
### 2.2 Merge modification files into one ####
mod_sites = merge(mod_sites_1,mod_sites_2, all=TRUE)
mod_sites = merge(mod_sites,mod_sites_3, all=TRUE)
mod_sites = merge(mod_sites,mod_sites_4, all=TRUE)
#### 2.3 Select rows without contaminats ####
mod_sites = mod_sites[-grep("^CON", mod_sites$Proteins),]
mod_sites = mod_sites2[mod_sites2$Proteins != "",]
#### 2.4 Select rows according to score and localisation probability ####
# Score selection can be adjusted
mod_sites = mod_sites[mod_sites$Score > 80,]
# Localization pobability filter can be adjusted
mod_sites = mod_sites[mod_sites$Localization.prob > 0.75,]
#### 2.5 Change structure of modification table ####
# Selection of protein, position, amino acid, modification and intensity of Experiment 1 to Experiment 5 of all replicates
mod_sites = select(mod_sites, Protein, Position, Amino.acid, Modification, Intensity.ADH_DEPC_0_0_mM_R1:Intensity.ADH_DEPC_5_0_mM_R3)
mod_sites_new_structure <- melt(mod_sites, id=(c("Protein","Position","Amino.acid","Modification")))
#### 3. Normalisation of Intensities ####
#### 3.1 Sum intensities of (unmodified+modifified) peptides of every modified site ####
#Pept: peptides in peptides_new_structure table that include the modification site position, the same protein and replicate number as in the mod-sites_new_structure table are selected
for(i in 1:nrow(mod_sites_new_structure)){
mod_pos = mod_sites_new_structure$Position[i]
mod_pos = as.integer(mod_pos)
repl = mod_sites_new_structure$variable[i]
protein = mod_sites_new_structure$Protein[i]
pept = which(peptides_new_structure$variable == repl & as.integer(peptides_structure$Start.position) <= mod_pos & as.integer(peptides_new_structure$End.position) >= mod_pos & peptides_new_structure$Proteins == protein)
mod_sites_new_structure$Sum_cor_pept[i] = sum(peptides_new_structure$value[pept], na.rm = TRUE)
}
#### 3.2 Build ratio modified / (unmodified+modified) ####
mod_sites_ratio = mod_sites_new_structure
mod_sites_ratio$Ratio <- (mod_sites_ratio$value *100)/(mod_sites_ratio$Sum_cor_pept)
#### 4. Add new columns with additional information ####
#### 4.1 Concentration (molar excess labelling reagent) ####
mod_sites_ratio$Concentration <- 0 #Experiment 1
mod_sites_ratio$Concentration[mod_sites_ratio$variable %in% c("Intensity.Experiment_2_R1",
"Intensity.Experiment_2_R2",
"Intensity.Experiment_2_R3")] <- 2 # value of molar excess
mod_sites_ratio$Concentration[mod_sites_ratio$variable %in% c("Intensity.Experiment_3_R1",
"Intensity.Experiment_3_R2",
"Intensity.Experiment_3_R3")] <- 10
mod_sites_ratio$Concentration[mod_sites_ratio$variable %in% c("Intensity.Experiment_4_R1",
"Intensity.Experiment_4_R2",
"Intensity.Experiment_4_R3")] <- 50
mod_sites_ratio$Concentration[mod_sites_ratio$variable %in% c("Intensity.Experiment_5_R1",
"Intensity.Experiment_5_R2",
"Intensity.Experiment_5_R3")] <- 100
#### 4.2 Replicate information ####
mod_sites_ratio$Replicate = mod_sites_melt$variable
mod_sites_ratio$Replicate = gsub("Intensity.Experiment_\d{1}_R", "",mod_sites_melt$Replicate)
#### 4.3 Add column with Frac_ASA info (obtained from GETAREA) and save file ####
Frac_asa$Position=Frac_asa$Residue
mod_sites_ratio_final=merge(mod_sites_ratio,Frac_asa, by = "Position", all.x= TRUE)
write.xlsx(mod_sites_ratio_final, "Name_of_new_file.xlsx")
#### 5.Calculate mean values and standard error for every concentration ####
mod_sites_ratio_final <- as_data_frame(mod_sites_ratio_final)
mod_sites_ratio_final <- summarySE(mod_sites_ratio_final, measurevar="Ratio", groupvars=c("Position","Amino.acid","Concentration","Modification","Sidechain","Ratio.","In.Out"))
#### 6. Plot results for every amino acid ####
# Create colour gradient 0-20 buried, above 50 solvent accessible
custom <- colorRampPalette(c("red4","red3","orangered3","orange1","orange","cyan3","deepskyblue2","dodgerblue1","dodgerblue3","blue","blue3"))
# Plot
plot_SE = ggplot(mod_sites_ratio_final, aes(x= as.numeric(Concentration), y=Ratio, order= Position, colour=Ratio., shape= Modification)) +
geom_errorbar(aes(ymin=Ratio-se, ymax=Ratio+se),size=1.5, width=.1) +
geom_point(size= 4)+
scale_y_continuous(name = "Percent labelled peptide")+
scale_x_continuous(name = "Molar excess labelling reagent")+
facet_wrap_paginate(~ Position + Modification, nrow = 9, ncol =7, scales ="free_x", page=1)+
theme_grey(base_size = 30)+
theme(panel.spacing = unit(2, "lines"))+
scale_colour_gradientn(name = element_blank(),
breaks = c(20,50),
labels = c("Buried","Solvent accessible"),
colours = custom(100),
limits =c (0,100),
na.value = "grey50")+
ylim (0,100)+
theme(strip.background = element_blank(),
strip.text.x = element_blank())+
scale_shape_manual(values=c(15,19))+
geom_text(
size = 9,
data = id1,
mapping = aes(x = -Inf, y = Inf, label = Label),
hjust = -0.2,
vjust = 1.5,
col = "black")
x11()
plot_SE
|
b402005759b26bfa4ca1e9cf02281832e6baa591
|
22989ddedc61cb52110cd8d9a9c454da71337f8b
|
/man/xrd.Rd
|
fcc569f9f00a645ab6aa544eff0304ce6a0ad606
|
[] |
no_license
|
rmatev/readcern
|
89a5e72284d9ac1e076ee9048529c847f3d2dc7c
|
22b5ae18e2220dcd3784e02121b7b3997aa3dfde
|
refs/heads/master
| 2022-01-07T22:30:52.742927
| 2019-05-31T22:26:18
| 2019-05-31T22:26:18
| 107,023,663
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 720
|
rd
|
xrd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xrd.r
\name{xrd}
\alias{xrd}
\alias{xrd_command}
\title{Open an xrootd connection.}
\usage{
xrd(url, decompress = TRUE)
xrd_command(url, decompress = TRUE)
}
\arguments{
\item{url}{URL starting with \code{root://}.}
\item{decompress}{Whether to automatically decompress files.}
}
\value{
A connection or the command to be given to \code{\link[=pipe]{pipe()}}.
}
\description{
\code{xrd()} returns a pipe, while \code{xrd_command()} returns the command
with which an xrootd url is piped.
}
\examples{
xrd('root://eosuser.cern.ch//eos/user/r/rmatev/readcern/README')
xrd_command('root://eosuser.cern.ch//eos/user/r/rmatev/readcern/README')
}
|
15521c78bdabee6dc0b160c09f3d15a2eec3b980
|
60a99dc425d9edca7b3dec562f5cf6367d9c61ec
|
/DistatisR/man/GraphDistatisPartial.Rd
|
9366048701b656a791ab6f09391cd1b789de7def
|
[] |
no_license
|
LukeMoraglia/ExPosition1
|
e7718ae848608f1dc3934513c6588f53f2c45a7f
|
a69da6c5b0f14ef9fd031b98c3b40b34dad5240f
|
refs/heads/master
| 2022-12-31T17:45:10.909002
| 2020-10-22T19:45:49
| 2020-10-22T19:45:49
| 255,486,130
| 0
| 1
| null | 2020-10-22T18:08:38
| 2020-04-14T02:01:12
|
R
|
UTF-8
|
R
| false
| false
| 4,034
|
rd
|
GraphDistatisPartial.Rd
|
\name{GraphDistatisPartial}
\alias{GraphDistatisPartial}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot maps of the factor scores and partial factor scores of the observations for a \acronym{DISTATIS} analysis.
}
\description{
\code{GraphDistatisPartial} plots maps of the factor scores of the observations
from a \code{\link{distatis}} analysis. \code{GraphDistatisPartial} gives a map of the factors scores of the observations plus partial factor scores, as "seen" by each of the matrices.
}
\usage{
GraphDistatisPartial(FS, PartialFS, axis1 = 1, axis2 = 2, constraints = NULL,
item.colors = NULL, participant.colors = NULL, ZeTitle = "Distatis-Partial",
Ctr=NULL, color.by.observations = TRUE, nude = FALSE, lines = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{FS}{The factor scores of the observations (\code{$res4Splus$F}from \code{distatis}).}
\item{PartialFS}{The partial factor scores of the observations (\code{$res4Splus$PartialF} from \code{distatis})}
\item{axis1}{The dimension for the horizontal axis of the plots.}
\item{axis2}{The dimension for the vertical axis of the plots.}
\item{constraints}{constraints for the axes}
\item{item.colors}{A \eqn{I\time 1}{I*1} matrix (with \eqn{I} = # observations)
of color names for the observations. If NULL (default), \code{prettyGraphs} chooses.}
\item{participant.colors}{A \eqn{I\time 1}{I*1} matrix (with \eqn{I} = # participants)
of color names for the observations. If NULL (default), \code{prettyGraphs} chooses.
}
\item{ZeTitle}{General title for the plots.}
\item{Ctr}{Contributions of each observation. If NULL (default), these are computed from FS}
\item{color.by.observations}{if \code{TRUE} (default), the partial factor scores are colored by \code{item.colors}. When \code{FALSE}, \code{participant.colors} are used.}
\item{nude}{When \code{nude} is \code{TRUE} the labels for the observations are not plotted (useful when editing the graphs for publication).}
\item{lines}{If \code{TRUE} (default) then lines are drawn between the partial factor score of an observation and the compromise factor score of the observation.}
}
\details{
Note that, in the current version, the graphs are plotted as R-plots
and are \emph{not} passed back by the routine.
So the graphs need to be saved "by hand" from the R graphic windows.
We plan to improve this in a future version.
}
\value{
\item{constraints}{A set of plot constraints that are returned.}
\item{item.colors}{A set of colors for the observations are returned.}
\item{participant.colors}{A set of colors for the participants are returned.}
}
\references{
The plots are similar to the graphs from
Abdi, H., Valentin, D., O'Toole, A.J., & Edelman, B. (2005).
DISTATIS: The analysis of multiple distance matrices.
\emph{Proceedings of the IEEE Computer Society: International Conference on Computer Vision and Pattern Recognition}.
(San Diego, CA, USA). pp. 42-47.
see \url{www.utdallas.edu/~herve}
}
\author{
Derek Beaton and Herve Abdi
}
\seealso{\code{\link{GraphDistatisAll}} \code{\link{GraphDistatisCompromise}} \code{\link{GraphDistatisPartial}} \code{\link{GraphDistatisBoot}} \code{\link{GraphDistatisRv}} \code{\link{distatis}}
}
\examples{
# 1. Load the DistAlgo data set (available from the DistatisR package)
data(DistAlgo)
# DistAlgo is a 6*6*4 Array (face*face*Algorithm)
#-----------------------------------------------------------------------------
# 2. Call the DISTATIS routine with the array of distance (DistAlgo) as parameter
DistatisAlgo <- distatis(DistAlgo)
# 3. Plot the compromise map with the labels for the first 2 dimensions
# DistatisAlgo$res4Splus$F are the factors scores for the 6 observations (i.e., faces)
# DistatisAlgo$res4Splus$PartialF are the partial factors scores
##(i.e., one set of factor scores per algorithm)
GraphDistatisPartial(DistatisAlgo$res4Splus$F,DistatisAlgo$res4Splus$PartialF)
}
\keyword{DistatisR}
\keyword{mds}
|
87c6fbf3a55a05989c351b723e6bb4b8f8bef46a
|
3710f7ba59bef100b3f4cc7e13cc248fd3fe61c4
|
/R/utils.R
|
3d4f2b1547543cfbdc2fdeb04fad5f4575897284
|
[
"MIT"
] |
permissive
|
aliouneriddle/shiny.semantic
|
8959828af6c416cffb5ebf5c4d65ce6e0f0cfbff
|
8f6dd67faaa1464a76911ed37118afe4fd3d6a89
|
refs/heads/master
| 2022-11-19T14:00:24.397199
| 2020-07-03T13:58:25
| 2020-07-03T13:58:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 623
|
r
|
utils.R
|
#' Parse the `shiny_input` value from JSON
#'
#' @param val value to get from JSON
#'
#' @return Value of type defined in `shiny_input`
#' @export
parse_val <- function(val) {
jsonlite::fromJSON(ifelse(is.null(val), '""', val))
}
#' Check if color is set from Semanti-UI palette
#'
#' @param color character with color name
#'
#' @return Error when \code{color} does not belong to palette
#' @export
#' @examples
#' check_proper_color("blue")
check_proper_color <- function(color) {
if (!(color %in% c("", names(semantic_palette)))) {
stop("Wrong color parameter specified!")
} else {
invisible(color)
}
}
|
809a8cf6b07c7ac84e581b46cf68a2cb4242228d
|
ca9f4ade5bd0dec1d9945ec33ec01386323608a9
|
/R/tagsim.R
|
9f26449f47e55b355823cbfb8340fdcc2df8b8e2
|
[] |
no_license
|
AustralianAntarcticDivision/tagsim
|
b25baf7bc20d26b83a893214b39155d6830e8443
|
4c80ac8b5da8445ccae8be9678dd8fe6bae84863
|
refs/heads/master
| 2020-05-21T07:26:39.095224
| 2018-04-10T06:34:13
| 2018-04-10T06:34:13
| 84,594,946
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,508
|
r
|
tagsim.R
|
#' tagsim: A simple population model for tag-based simulations
#'
#' A simple population model for tag-based simulations for fisheries based on
#' the fisheries operating model written by Philippe Ziegler.
#'
#' Simulation model
#'
#' The simulation model is aimed at evaluating tag-based scenarios, however
#' scenarios without tagging data can be implemented.
#'
#' Assessment model
#'
#' Simple assessments are implemented including
#'
#' 1) Tag-based estimates
#'
#' Simulation Processes
#'
#' Processes in the model occur in the following order
#'
#' 1 New season processes
#'
#' 1.1 create a temp vector to store the population and tag numbers
#' last year
#'
#' 1.2.1 Move untagged population
#'
#' 1.2.2 Move the tagged population
#'
#' 1.3 Estimate recruitment from last season numbers (there is no growth)
#'
#' 1.3.1 add the recrutiment (recruits don't move) consider saving this
#'
#' 2 Harvest and natural mortality
#'
#' 2.1 calculate population size by strata (tagged + untagged)
#'
#' 2.1.1 update the harvest rate/strategy based on last seasons abundance
#'
#' 2.1.2 calculate an F based on fishery type (Ricker I or II)
#'
#' 2.2 calculate the total catch & natural mortality
#'
#' 2.3 remove from the population tagged and untagged
#'
#' 2.4 calculate the number of tags observed etc
#'
#' 3 Assign the end of season population size to the storage
#'
#' 4 Estimate abundance and store the result
#'
#' 5 move to the next year and repeat
#'
#' @docType package
#' @name tagsim
NULL
#> NULL
|
25fdb950aa83a5f8c75295653e16c154725d4afa
|
9f916fe7828f79b3355bc6ff3509ff4b0a62d0b7
|
/R/data.R
|
9ab2b857a608861d8e7bee268a875990b3bcb9e6
|
[
"BSD-3-Clause"
] |
permissive
|
greenelab/ADAGEpath
|
9b5d44465a09fbed025bbdd793eaed89e7ec17a2
|
e556970ef334d86ddfbf533acb8753d4ddb93967
|
refs/heads/master
| 2023-08-01T00:11:19.677503
| 2022-05-20T16:48:18
| 2022-05-20T16:48:18
| 70,191,632
| 5
| 7
|
BSD-3-Clause
| 2022-05-20T16:48:19
| 2016-10-06T20:46:19
|
R
|
UTF-8
|
R
| false
| false
| 4,674
|
r
|
data.R
|
#' eADAGE model
#'
#' The 300-node eADAGE model used in the paper "Unsupervised extraction of
#' functional gene expression signatures in the bacterial pathogen Pseudomonas
#' aeruginosa with eADAGE"
#'
#' @format A data frame (tibble) with 5549 rows and 301 variables. The first
#' column is geneID that specifies gene identifiers. Starting from
#' the second column, each column stores numeric weight values of one node
#' @source \url{https://doi.org/10.1101/078659}
"eADAGEmodel"
#' Pseudomonas aeruginosa gene expression compendium
#'
#' The compendium contains all P.a. microarray datasets measured on the
#' [Pae_G1a] Affymetric Pseudomonas aeruginosa array
#' platform from the ArrayExpress database as of 31 July 2015. The expression
#' values in the compendium have been background corrected and quantile
#' normalized.
#'
#' @format A data frame (tibble) with 5549 rows and 1052 variables. The first
#' column is geneID that specifies gene identifiers. Starting from
#' the second column, each column stores numeric expression values of one sample.
#' @source \url{https://doi.org/10.1101/078659}
"PAcompendium"
#' Probe distribution
#'
#' The quantile distribution of microarray probes used in building the preloaded
#' PAcompendium data object. It will be used as a reference distribution to
#' quantile normalize new datasets.
#'
#' @format A numeric vector:
"probedistribution"
#' Gene information
#'
#' Pseudomonas aeruginosa gene information curated by NCBI.
#' It was downloaded from NCBI ftp on Oct. 7 2016. It is used for
#' mapping PA numbers (LocusTag) to gene Symbol.
#'
#' @format A data.frame (tibble) with 5698 rows and 15 variables:
#' \describe{
#' \item{\samp{#tax_id}}{a numeric vector, taxonomic id of P. aeruginosa}
#' \item{\code{GeneID}}{a numeric vector, unique gene identifiers}
#' \item{\code{Symbol}}{a character vector, gene symbols or gene names}
#' \item{\code{LocusTag}}{a character vector, locus tags or PA numbers}
#' \item{\code{Synonyms}}{a numeric vector}
#' \item{\code{dbXrefs}}{a character vector}
#' \item{\code{chromosome}}{a numeric vector}
#' \item{\code{map_location}}{a numeric vector}
#' \item{\code{description}}{a character vector}
#' \item{\code{type_of_gene}}{a character vector}
#' \item{\code{Symbol_from_nomenclature_authority}}{a numeric vector}
#' \item{\code{Full_name_from_nomenclature_authority}}{a numeric vector}
#' \item{\code{Nomenclature_status}}{a numeric vector}
#' \item{\code{Other_designations}}{a numeric vector}
#' \item{\code{Modification_date}}{a numeric vector}
#' }
#' @source \url{ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/GENE_INFO/Archaea_Bacteria/Pseudomonas_aeruginosa_PAO1.gene_info.gz}
"geneinfo"
#' Pseudomonas aeruginosa PAO1 strain orthologs
#'
#' Pseudomonas aeruginosa PAO1 strain ortholog predictions obtained from
#' http://pseudomonas.com on Oct. 10 2016.
#' It uses genes in PAO1 strain as query and finds
#' orthologs hit in other P.a. strains.
#'
#' @format A data.frame (tibble) with 392139 rows and 8 variables:
#' \describe{
#' \item{Strain (Query)}{Pseudomonas aeruginosa PAO1 (Reference)}
#' \item{Locus Tag (Query)}{Locus tag for PAO1}
#' \item{Description (Query)}{Description of the PAO1 gene}
#' \item{Strain (Hit)}{Ortholog hit strain}
#' \item{Locus Tag (Hit)}{Locus tag of the hit ortholog}
#' \item{Description (Hit)}{Description of the hit ortholog}
#' \item{Percent Identity}{Percentage of sequence identity}
#' \item{Alignment Length}{Alignment length}
#' ...
#' }
#' @source \url{http://pseudomonas.com/downloads/pseudomonas/pgd_r_16_1/Pseudomonas_aeruginosa_PAO1_107/Pseudomonas_aeruginosa_PAO1_107_orthologs.txt}
"PAO1orthologs"
#' Experiments and samples in Pseudomonas aeruginosa gene expression compendium
#'
#' ArrayExpress experiment assession numbers and their associated sample IDs
#' included in the preloaded PAcompendium data object.
#' There are duplicates in sample IDs because one sample can be included in
#' multiple experiments.
#' @format A data.frame with 1118 rows and 2 variables:
#' \describe{
#' \item{Experiment}{ArrayExpress experiment assession numbers, only contain
#' experiments in the preloaded PAcompendium data object.}
#' \item{Sample}{Sample IDs in "xxx.CEL" format}
#' }
"experimentID"
#' Pseudomonas aeruginosa PAO1 operons
#'
#' Pseudomonas aeruginosa PAO1 operon predictions obtained from the DOOR (
#' Database of prOkaryotic OpeRons) database on Dec. 12 2016.
#'
#' @format A list with each element being a character vector that stores genes
#' in one operon. Locus tag (PAXXXX) is used as gene ID.
#' @source \url{http://csbl.bmb.uga.edu/DOOR/index.php}
"operons"
|
12e0186abe0806067d8de509fb74ef6718557dd5
|
a97dabb75b5120735a54e251fc036b003f273f4e
|
/common/functions.R
|
1f6790a58572feefdb91da552cb0ff13e36c262f
|
[] |
no_license
|
Acanthiza/template
|
c72b946aa082fc36aa107aca3ec38ebc56ba5f30
|
6e8414c856872fae4bcacc2efff1ff1f2d0dc01e
|
refs/heads/master
| 2021-07-25T19:16:24.173131
| 2021-07-22T05:20:21
| 2021-07-22T05:20:21
| 240,398,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 35,006
|
r
|
functions.R
|
# https://stackoverflow.com/questions/55432420/how-can-i-commit-changes-to-github-from-within-a-r-script
# Git add.
gitadd <- function(dir = getwd()){
cmd_list <- list(
cmd1 = tolower(substr(dir,1,2)),
cmd2 = paste("cd",dir),
cmd3 = "git add --all"
)
cmd <- paste(unlist(cmd_list),collapse = " & ")
shell(cmd)
}
# Git commit.
gitcommit <- function(msg = "commit from Rstudio", dir = getwd()){
cmd = sprintf("git commit -m\"%s\"",msg)
shell(cmd)
}
# Git push.
gitpush <- function(dir = getwd()){
cmd_list <- list(
cmd1 = tolower(substr(dir,1,2)),
cmd2 = paste("cd",dir),
cmd3 = "git push"
)
cmd <- paste(unlist(cmd_list),collapse = " & ")
shell(cmd)
}
# Cite a package in rmarkdown
# assumes these have been run and that 'packages' contains all packages to cite
# knitr::write_bib(packages,"packageCitations.bib")
# refs <- bib2df::bib2df("packageCitations.bib")
cite_package <- function(package,brack = TRUE,startText = "", endText = "") {
thisRef <- refs %>%
dplyr::filter(grepl(paste0("-",package),BIBTEXKEY) | grepl(paste0("^",package),BIBTEXKEY)) %>%
dplyr::pull(BIBTEXKEY)
starts <- if(brack) paste0("[",startText,"@") else paste0(startText,"@")
ends <- if(brack) paste0(endText,"]") else endText
if(length(thisRef) > 1) {
paste0(starts,paste0(thisRef,collapse = "; @"),ends)
} else {
paste0(starts,"R-",package,ends)
}
}
# fix bibliography issues, including tweaks for known package issues.
fix_bib <- function(bibFile, isPackageBib = FALSE) {
inRefs <- bib2df::bib2df(bibFile)
namesInRefs <- colnames(inRefs)
refs <- inRefs %>%
{if(isPackageBib) (.) %>% dplyr::mutate(package = gsub("R-|\\d{4}","",BIBTEXKEY)) else (.)} %>%
tidytext::unnest_tokens("titleWords"
,TITLE
,token = "regex"
,pattern = " "
,to_lower = FALSE
#,strip_punct = FALSE
,collapse = FALSE
) %>%
dplyr::mutate(titleWords = gsub("\\{|\\}","",titleWords)
, isCap = grepl(paste0(LETTERS,collapse="|"),titleWords)
, titleWords = if_else(isCap,paste0("{",titleWords,"}"),titleWords)
) %>%
tidyr::nest(data = c(titleWords,isCap)) %>%
dplyr::mutate(TITLE = map_chr(data,. %>% dplyr::pull(titleWords) %>% paste0(collapse = " "))
, AUTHOR = map(AUTHOR,~gsub("Microsoft Corporation","{Microsoft Corporation}",.))
, AUTHOR = map(AUTHOR,~gsub("Fortran original by |R port by ","",.))
, AUTHOR = map(AUTHOR,~gsub("with contributions by","and",.))
, AUTHOR = map(AUTHOR,~gsub("Â "," ",.))
) %>%
{if(isPackageBib) (.) %>% dplyr::mutate(TITLE = map2_chr(package,TITLE,~gsub(.x,paste0("{",.x,"}"),.y))) else (.)} %>%
dplyr::select(any_of(namesInRefs))
bib2df::df2bib(refs,bibFile)
return(refs)
}
# Are the values within a column unique
col_vals_unique <- function(df,col = "SiteID") {
notUnique <- df %>%
dplyr::select(grep("^n$",names(.),value = TRUE,invert = TRUE)) %>%
dplyr::count(!!ensym(col)) %>%
dplyr::filter(n > 1)
print(paste0("there are ",nrow(notUnique)," ",col,"(s) that are not unique: ",dplyr::pull(notUnique[,1])))
}
# Unscale scaled data
unscale_data <- function(scaledData) {
scaledData*attr(scaledData,"scaled:scale")+attr(scaledData,"scaled:center")
}
# From https://github.com/mtennekes/tmap/issues/255 - who got it from:
#http://stackoverflow.com/questions/20241065/r-barplot-wrapping-long-text-labels
# Core wrapping function
wrap.it <- function(x, len) {
sapply(x, function(y) paste(strwrap(y, len),
collapse = "\n"),
USE.NAMES = FALSE)
}
# Call this function with a list or vector
wrap.labels <- function(x, len) {
if (is.list(x))
{
lapply(x, wrap.it, len)
} else {
wrap.it(x, len)
}
}
# From https://gist.github.com/danlwarren/271288d5bab45d2da549
# Function to rarefy point data in any number of dimensions. The goal here is to
# take a large data set and reduce it in size in such a way as to approximately maximize the
# difference between points. For instance, if you have 2000 points but suspect a lot of
# spatial autocorrelation between them, you can pass in your data frame, the names (or indices)
# of the lat/lon columns, and the number 200, and you get back 200 points from your original data
# set that are chosen to be as different from each other as possible given a randomly chosen
# starting point
# Input is:
#
# x, a data frame containing the columns to be used to calculate distances along with whatever other data you need
# cols, a vector of column names or indices to use for calculating distances
# npoints, the number of rarefied points to spit out
#
# e.g., thin.max(my.data, c("latitude", "longitude"), 200)
thin_max <- function(x, cols, npoints){
#Create empty vector for output
inds <- vector(mode="numeric")
#Create distance matrix
this.dist <- as.matrix(dist(x[,cols], upper=TRUE))
#Draw first index at random
inds <- c(inds, as.integer(runif(1, 1, length(this.dist[,1]))))
#Get second index from maximally distant point from first one
#Necessary because apply needs at least two columns or it'll barf
#in the next bit
inds <- c(inds, which.max(this.dist[,inds]))
while(length(inds) < npoints){
#For each point, find its distance to the closest point that's already been selected
min.dists <- apply(this.dist[,inds], 1, min)
#Select the point that is furthest from everything we've already selected
this.ind <- which.max(min.dists)
#Get rid of ties, if they exist
if(length(this.ind) > 1){
print("Breaking tie...")
this.ind <- this.ind[1]
}
inds <- c(inds, this.ind)
}
return(x[inds,])
}
# A function to run random forest over a df with first column 'cluster' and other columns explanatory
rf_mod_fold <- function(envClust
, clustCol = "cluster"
, envCols = names(patchesEnvSelect)[-1]
, idCol = "SiteID"
, doFolds = folds
, outFile = fs::path("out","rfMod_conf.feather")
, saveModel = FALSE
, saveImp = FALSE
, ...
){
idCol <- if(is.numeric(idCol)) names(envClust)[idCol] else idCol
clustCol <- if(is.numeric(clustCol)) names(envClust)[clustCol] else clustCol
envCols <- if(is.numeric(envCols)) names(envClust)[envCols] else envCols
envClust <- envClust %>%
dplyr::mutate(fold = sample(1:doFolds,nrow(.),replace=TRUE,rep(1/doFolds,doFolds)))
folds <- 1:doFolds
fold_rf_mod <- function(fold) {
outFile <- gsub("_conf",paste0("_fold",fold,"_conf"),outFile)
if(!file.exists(outFile)) {
if(doFolds > 1) {
train <- envClust[envClust$fold != fold,which(names(envClust) %in% c(clustCol,envCols))] %>%
dplyr::mutate(!!ensym(clustCol) := factor(!!ensym(clustCol)))
test <- envClust[envClust$fold == fold,which(names(envClust) %in% c(idCol,clustCol,envCols))] %>%
dplyr::mutate(!!ensym(clustCol) := factor(!!ensym(clustCol)))
rfMod <- randomForest(x = train[,envCols]
, y = train[,clustCol] %>% dplyr::pull(!!ensym(clustCol))
, ntree = 500
, importance = saveImp
)
rfPred <- test %>%
dplyr::select(!!ensym(idCol),!!ensym(clustCol)) %>%
dplyr::bind_cols(predict(rfMod
, newdata = test[,envCols]
) %>%
tibble::enframe(name = NULL, value = "predCluster")
) %>%
dplyr::bind_cols(predict(rfMod
, newdata = test[,envCols]
, type = "prob"
) %>%
as_tibble()
)
} else {
rfMod <- randomForest::randomForest(x = envClust[,which(names(envClust) %in% c(envCols))]
, y = envClust %>% dplyr::pull(!!ensym(clustCol))
, ntree = 500
, importance = saveImp
)
rfPred <- envClust[,c(idCol,clustCol)] %>%
dplyr::bind_cols(predict(rfMod) %>%
tibble::enframe(name = NULL, value = "predCluster")
) %>%
dplyr::bind_cols(predict(rfMod
, type = "prob"
) %>%
as_tibble()
)
}
feather::write_feather(rfPred,outFile)
if(saveImp) {feather::write_feather(as_tibble(rfMod$importance, rownames = "att"),gsub("_rfPred","_rfImp",outFile))}
if(saveModel) {readr::write_rds(rfMod,gsub("_conf","_mod",outFile))}
}
}
map(folds,fold_rf_mod)
}
# get metrics from saved predictions
make_conf <- function(df) {
caret::confusionMatrix(df$predCluster,df$cluster)
}
rf_mod <- function(envClust
, clustCol
, envCols
, idCol
, outFile
, saveModel = FALSE
, saveImp = FALSE
, ...
){
idCol <- if(is.numeric(idCol)) names(envClust)[idCol] else idCol
clustCol <- if(is.numeric(clustCol)) names(envClust)[clustCol] else clustCol
envCols <- if(is.numeric(envCols)) names(envClust)[envCols] else envCols
rfMod <- randomForest::randomForest(x = envClust[,which(names(envClust) %in% c(envCols))]
, y = envClust %>% dplyr::pull(!!ensym(clustCol))
, ntree = 500
, importance = saveImp
)
rfPred <- envClust[,c(idCol,clustCol)] %>%
dplyr::bind_cols(predict(rfMod) %>%
tibble::enframe(name = NULL, value = "predCluster")
) %>%
dplyr::bind_cols(predict(rfMod
, type = "prob"
) %>%
as_tibble()
)
feather::write_feather(rfPred,outFile)
if(saveImp) {feather::write_feather(as_tibble(rfMod$importance, rownames = "att"),gsub("_rfPred","_rfImp",outFile))}
if(saveModel) {feather::write_feather(rfMod,gsub("_rfPred","",outFile))}
}
# A function to run random forest using tidymodels dialogue
run_rf <- function(datTrain,modRecipe) {
randomForest::randomForest(as.formula(modRecipe)
, data = datTrain
, ntree = 500
)
}
# Function to get data out of 32 bit MS Access from 64 bit R
# see https://stackoverflow.com/questions/13070706/how-to-connect-r-with-access-database-in-64-bit-window
access_query_32 <- function(db_path, db_table = "qryData_RM", table_out = "data_access") {
library(svSocket)
# variables to make values uniform
sock_port <- 8642L
sock_con <- "sv_con"
ODBC_con <- "a32_con"
if (file.exists(db_path)) {
# build ODBC string
ODBC_str <- local({
s <- list()
s$path <- paste0("DBQ=", gsub("(/|\\\\)+", "/", path.expand(db_path)))
s$driver <- "Driver={Microsoft Access Driver (*.mdb, *.accdb)}"
s$threads <- "Threads=4"
s$buffer <- "MaxBufferSize=4096"
s$timeout <- "PageTimeout=5"
paste(s, collapse=";")
})
# start socket server to transfer data to 32 bit session
startSocketServer(port=sock_port, server.name="access_query_32", local=TRUE)
# build expression to pass to 32 bit R session
expr <- "library(svSocket)"
expr <- c(expr, "library(RODBC)")
expr <- c(expr, sprintf("%s <- odbcDriverConnect('%s')", ODBC_con, ODBC_str))
expr <- c(expr, sprintf("if('%1$s' %%in%% sqlTables(%2$s)$TABLE_NAME) {%1$s <- sqlFetch(%2$s, '%1$s')} else {%1$s <- 'table %1$s not found'}", db_table, ODBC_con))
expr <- c(expr, sprintf("%s <- socketConnection(port=%i)", sock_con, sock_port))
expr <- c(expr, sprintf("evalServer(%s, %s, %s)", sock_con, table_out, db_table))
expr <- c(expr, "odbcCloseAll()")
expr <- c(expr, sprintf("close(%s)", sock_con))
expr <- paste(expr, collapse=";")
# launch 32 bit R session and run expressions
prog <- file.path(R.home(), "bin", "i386", "Rscript.exe")
system2(prog, args=c("-e", shQuote(expr)), stdout=NULL, wait=TRUE, invisible=TRUE)
# stop socket server
stopSocketServer(port=sock_port)
# display table fields
message("retrieved: ", table_out, " - ", paste(colnames(get(table_out)), collapse=", "))
} else {
warning("database not found: ", db_path)
}
}
# Function to create grid of continuous values - usually for prediction
create_grid_cont <- function(df,vars,seqLength){
df %>%
dplyr::summarise_at(vars, min) %>%
dplyr::mutate(type = "min") %>%
dplyr::bind_rows(df %>%
dplyr::summarise_at(vars, max) %>%
dplyr::mutate(type = "max")
) %>%
tidyr::gather(variable,value,1:length(vars)) %>%
tidyr::spread(type,value) %>%
dplyr::mutate(values = map2(min,max,~seq(.x,.y,(.y-.x)/(seqLength-1)))) %>%
dplyr::select(variable,values) %>%
tidyr::unnest() %>%
split(.$variable) %>%
lapply(function(x) x %>% dplyr::select(2) %>% unlist()) %>%
expand.grid()
}
# Function written by Andrew Bevan, found on R-sig-Geo, and modified by Pascal Title
ah2sp <- function(x, increment=360, rnd=10, proj4string=CRS(as.character(NA)),tol=1e-4) {
if (class(x) != "ahull"){
stop("x needs to be an ahull class object")
}
# Extract the edges from the ahull object as a dataframe
xdf <- as.data.frame(x$arcs)
#correct for possible arc order strangeness (Pascal Title addition 29 Nov 2013)
k <- 1
xdf <- cbind(xdf, flip = rep(FALSE, nrow(xdf)))
repeat{
if (is.na(xdf[k+1, 'end1'])) {
break
}
#cat(k, '\n')
if (xdf[k,'end2'] == xdf[k+1,'end1']) {
#cat('case 1\n')
k <- k + 1
} else if (xdf[k,'end2'] != xdf[k+1,'end1'] & !xdf[k,'end2'] %in% xdf$end1[k+1:nrow(xdf)] & !xdf[k,'end2'] %in% xdf$end2[k+1:nrow(xdf)]) {
#cat('case 2\n')
k <- k + 1
} else if (xdf[k,'end2'] != xdf[k+1,'end1'] & xdf[k,'end2'] %in% xdf$end1[k+1:nrow(xdf)] & !xdf[k,'end2'] %in% xdf$end2[k+1:nrow(xdf)]) {
#cat('case 3\n')
m <- which(xdf$end1[k+1:nrow(xdf)] == xdf[k,'end2']) + k
xdf <- rbind(xdf[1:k,],xdf[m,],xdf[setdiff((k+1):nrow(xdf),m),])
} else if (xdf[k,'end2'] != xdf[k+1,'end1'] & !xdf[k,'end2'] %in% xdf$end1[k+1:nrow(xdf)] & xdf[k,'end2'] %in% xdf$end2[k+1:nrow(xdf)]) {
#cat('case 4\n')
m <- which(xdf$end2[k+1:nrow(xdf)] == xdf[k,'end2']) + k
tmp1 <- xdf[m,'end1']
tmp2 <- xdf[m,'end2']
xdf[m,'end1'] <- tmp2
xdf[m,'end2'] <- tmp1
xdf[m,'flip'] <- TRUE
xdf <- rbind(xdf[1:k,], xdf[m,], xdf[setdiff((k+1):nrow(xdf), m),])
} else {
k <- k + 1
}
}
# Remove all cases where the coordinates are all the same
xdf <- subset(xdf, xdf$r > 0)
res <- NULL
if (nrow(xdf) > 0) {
# Convert each arc to a line segment
linesj <- list()
prevx <- NULL
prevy <- NULL
j <- 1
for(i in 1:nrow(xdf)) {
rowi <- xdf[i,]
v <- c(rowi$v.x, rowi$v.y)
theta <- rowi$theta
r <- rowi$r
cc <- c(rowi$c1, rowi$c2)
# Arcs need to be redefined as strings of points. Work out the number of points to allocate in this arc segment.
ipoints <- 2 + round(increment * (rowi$theta / 2), 0)
# Calculate coordinates from arc() description for ipoints along the arc.
angles <- alphahull::anglesArc(v, theta)
if (rowi['flip'] == TRUE){ angles <- rev(angles) }
seqang <- seq(angles[1], angles[2], length = ipoints)
x <- round(cc[1] + r * cos(seqang),rnd)
y <- round(cc[2] + r * sin(seqang),rnd)
# Check for line segments that should be joined up and combine their coordinates
if (is.null(prevx)) {
prevx <- x
prevy <- y
# added numerical precision fix (Pascal Title Dec 9 2013)
} else if ((x[1] == round(prevx[length(prevx)],rnd) | abs(x[1] - prevx[length(prevx)]) < tol) && (y[1] == round(prevy[length(prevy)],rnd) | abs(y[1] - prevy[length(prevy)]) < tol)) {
if (i == nrow(xdf)){
#We have got to the end of the dataset
prevx <- append(prevx ,x[2:ipoints])
prevy <- append(prevy, y[2:ipoints])
prevx[length(prevx)] <- prevx[1]
prevy[length(prevy)] <- prevy[1]
coordsj <- cbind(prevx,prevy)
colnames(coordsj) <- NULL
# Build as Line and then Lines class
linej <- Line(coordsj)
linesj[[j]] <- Lines(linej, ID = as.character(j))
} else {
prevx <- append(prevx, x[2:ipoints])
prevy <- append(prevy, y[2:ipoints])
}
} else {
# We have got to the end of a set of lines, and there are several such sets, so convert the whole of this one to a line segment and reset.
prevx[length(prevx)] <- prevx[1]
prevy[length(prevy)] <- prevy[1]
coordsj <- cbind(prevx,prevy)
colnames(coordsj)<-NULL
# Build as Line and then Lines class
linej <- Line(coordsj)
linesj[[j]] <- Lines(linej, ID = as.character(j))
j <- j + 1
prevx <- NULL
prevy <- NULL
}
}
#Drop lines that will not produce adequate polygons (Pascal Title addition 9 Dec 2013)
badLines <- vector()
for (i in 1:length(linesj)){
if (nrow(linesj[[i]]@Lines[[1]]@coords) < 4){
badLines <- c(badLines,i)
}
}
if (length(badLines) > 0){linesj <- linesj[-badLines]}
# Promote to SpatialLines
lspl <- SpatialLines(linesj)
# Convert lines to polygons
# Pull out Lines slot and check which lines have start and end points that are the same
lns <- slot(lspl, "lines")
polys <- sapply(lns, function(x) {
crds <- slot(slot(x, "Lines")[[1]], "coords")
identical(crds[1, ], crds[nrow(crds), ])
})
# Select those that do and convert to SpatialPolygons
polyssl <- lspl[polys]
list_of_Lines <- slot(polyssl, "lines")
sppolys <- SpatialPolygons(list(Polygons(lapply(list_of_Lines, function(x) { Polygon(slot(slot(x, "Lines")[[1]], "coords")) }), ID = "1")), proj4string=proj4string)
# Create a set of ids in a dataframe, then promote to SpatialPolygonsDataFrame
hid <- sapply(slot(sppolys, "polygons"), function(x) slot(x, "ID"))
areas <- sapply(slot(sppolys, "polygons"), function(x) slot(x, "area"))
df <- data.frame(hid,areas)
names(df) <- c("HID","Area")
rownames(df) <- df$HID
res <- SpatialPolygonsDataFrame(sppolys, data=df)
res <- res[which(res@data$Area > 0),]
}
return(res)
}
# glm 'chi-square'
contingency_glm <- function(cont){
var1 <- names(cont)[1]
var2 <- names(cont)[2]
contingency <- cont %>%
dplyr::mutate(var1 = factor(!!ensym(var1))
, var2 = factor(!!ensym(var2))
) %>%
dplyr::group_by(var2) %>%
dplyr::filter(success > 3) %>%
dplyr::mutate(levels = n()) %>%
dplyr::ungroup() %>%
dplyr::filter(levels == max(levels)) %>%
dplyr::ungroup() %>%
dplyr::mutate(!!var1 := if_else(is.na(!!ensym(var1)),var1,!!ensym(var1))
, !!var2 := if_else(is.na(!!ensym(var2)),var2,!!ensym(var2))
, var1 = fct_inorder(factor(as.character(var1)))
, var2 = fct_reorder(as.character(var2), success)
, var1No = as.factor(as.numeric(var1))
, var2No = as.factor(as.numeric(var2))
, trials = success + failure
) %>%
replace(is.na(.), 0)
mod <- stan_glm(cbind(success,failure) ~ var1*var2
, data = contingency
, family = binomial()
#, iter = 5000
)
modFit <- pp_check(mod)
modRhat <- plot(mod, "rhat_hist")
modTrace <- stan_trace(mod)
mod2d <- pp_check(mod, plotfun = "stat_2d", stat = c("mean", "sd"))
# Use the model to predict results over variables of interest
modPred <- contingency %>%
dplyr::group_by(var1,var2) %>%
dplyr::summarise(success = 0, failure = 100) %>% # use 100 trials to give results as percentages
dplyr::ungroup() %>%
dplyr::mutate(col = row.names(.)) %>%
dplyr::left_join(as_tibble(posterior_predict(mod
, newdata = .
)
) %>%
tibble::rownames_to_column(var = "row") %>%
tidyr::gather(col,value,2:ncol(.))
) %>%
dplyr::left_join(contingency %>%
dplyr::select(!!ensym(var1),!!ensym(var2),var1,var2) %>%
unique()
)
# summarise the results
modRes <- as_tibble(modPred) %>%
dplyr::group_by(!!ensym(var1),!!ensym(var2)) %>%
dplyr::summarise(n = n()
, nCheck = nrow(as_tibble(mod))
, modMedian = quantile(value,0.5)
, modMean = mean(value)
, modci90lo = quantile(value, 0.025)
, modci90up = quantile(value, 0.975)
, ci = modci90up-modci90lo
, text = paste0(round(modMedian,2)," (",round(modci90lo,2)," to ",round(modci90up,2),")")
) %>%
dplyr::ungroup() %>%
dplyr::mutate_if(is.numeric,round,2)
modPlotRidges <- ggplot(modPred, aes(value,!!ensym(var1),fill=!!ensym(var1))) +
ggridges::geom_density_ridges(alpha = 0.5) +
facet_wrap(~get("var2"),scales="free") +
scale_fill_viridis_d()
modDiffVar1 <- modPred %>%
(function(x) x %>% dplyr::left_join(x %>% dplyr::select(var1,var2,row,value) %>% dplyr::rename(var1b = var1, value_2 = value))) %>%
dplyr::filter(var1 != var1b) %>%
dplyr::mutate(comparison = map2_chr(var1,var1b,~paste(sort(c(.x,.y))[1],sort(c(.x,.y))[2]))) %>%
dplyr::group_by(comparison,var2,row) %>%
dplyr::slice(1) %>% # from memory this is part of a trick to remove duplicates... comparison field is key to this?
dplyr::ungroup() %>%
dplyr::mutate(diff = value-value_2
, maxEst = map2_dbl(abs(value),abs(value_2),max)
, test01 = map2_lgl(value,value_2,~all(.x==0,.y==0))
, perDiff = if_else(!test01,100*diff/maxEst,0)
)
modDiffRes <- modDiffVar1 %>%
dplyr::group_by(var1,var2,var1b) %>%
dplyr::summarise(n = n()
, nCheck = nrow(as_tibble(mod))
, meanDiff = median(diff)
, value = mean(perDiff)
) %>%
dplyr::ungroup() %>%
dplyr::mutate(alpha = 1.5*abs(value-50)
, colour = if_else(value > 0,"More likely","Less likely")
, text = paste0(round(abs(value),0),"%")
, longText = paste0(var2, " was ", text, " ", tolower(colour), " to occur in ", var1, " reviews than ",var1b," reviews")
, var1Comparison = paste0(var1," vs ",var1b)
, var1Comparison = fct_relevel(var1Comparison, grep("SA",unique(var1Comparison),value=TRUE))
) %>%
dplyr::mutate_if(is.numeric,round,2) %>%
dplyr::arrange(var2,var1,var1b)
modPlotRidgesDiff <- ggplot(modDiffVar1, aes(perDiff,paste0(get("var1")," vs ",get("var1b")))) +
ggridges::geom_density_ridges(alpha = 0.5) +
geom_vline(aes(xintercept = 0)) +
facet_wrap(~get("var2"),scales="free") +
scale_fill_viridis_d() +
labs(y = "Comparison"
, x = "Difference"
)
modPlotDiff <- ggplot(modDiffRes,aes(var1Comparison,var2,fill=colour,alpha=abs(value),label=text)) +
geom_tile() +
geom_text(size=3) +
scale_fill_viridis_d() +
scale_alpha_continuous(guide = FALSE
, range = c(0,0.5)
) +
scale_x_discrete(limits = rev(levels(var2))) +
labs(fill = "Likelihood"
, x = "Comparison"
, y = get("var2")
)
mget(ls(pattern = "mod"))
}
# chi-square
chi_square <- function(cont) {
var1 <- names(cont)[1]
var2 <- names(cont)[2]
contingency <- cont %>%
dplyr::mutate(var1 = factor(!!ensym(var1))
, var2 = factor(!!ensym(var2))
) %>%
tidyr::complete(var1,var2) %>%
dplyr::mutate(!!var1 := if_else(is.na(!!ensym(var1)),var1,!!ensym(var1))
, !!var2 := if_else(is.na(!!ensym(var2)),var2,!!ensym(var2))
, var1 = fct_inorder(factor(as.character(var1)))
, var2 = fct_inorder(factor(as.character(var2)))
, var1No = as.factor(as.numeric(var1))
, var2No = as.factor(as.numeric(var2))
) %>%
replace(is.na(.), 0)
chSq <- contingency %>%
dplyr::select(var1No,var2No,n) %>%
tidyr::spread(var2No,n,drop=TRUE) %>%
as.data.frame %>%
tibble::column_to_rownames(names(.)[1]) %>%
chisq.test()
chSqResidual <- chSq$residuals %>%
data.frame %>%
tibble::rownames_to_column("var1No") %>%
tidyr::gather("var2No","residual",2:ncol(.)) %>%
dplyr::mutate(var2No = gsub("X","",var2No))
chSqVis <- data.frame(100*chSq$residuals^2/chSq$statistic) %>%
data.frame %>%
tibble::rownames_to_column("var1No") %>%
tidyr::gather("var2No","contribution",2:ncol(.))%>%
dplyr::mutate(var2No = gsub("X","",var2No)) %>%
as_tibble() %>%
dplyr::left_join(chSqResidual) %>%
dplyr::left_join(contingency) %>%
dplyr::mutate(per = 100*contribution/sum(contribution)
, text = paste0("n:",n,"\n",round(per,1),"%")
, direction = if_else(residual<0
,"less than expected"
, if_else(residual>0
,"more than expected"
,"as expected"
)
)
, label = paste0(var2, " occurs ", direction, " in ", var1)
) %>%
dplyr::select(!!var1,!!var2,contribution,residual,n,per,text,label,direction)
chSqPlot <- ggplot(chSqVis, aes(!!ensym(var1), fct_rev(!!ensym(var2)), fill = direction, alpha = contribution, label = text)) +
geom_tile() +
geom_text(size = 2) +
guides(alpha = FALSE) +
theme(axis.text.x=element_text(angle=90, vjust=0.5)) +
labs(subtitle = "Percentages are the percent contribution to overall chi-squared value"
, y = var2
, x = var1
) +
scale_fill_viridis_d()
chSqText <- paste0("(Chi-squared = ",round(chSq$statistic,1), ", df = ",chSq$parameter,", p <= ",round(chSq$p.value,4),")")
doChSqPlot <- chSq$p.value<0.05
chSqRes <- list(chSq=chSq,chSqVis=chSqVis,chSqPlot=chSqPlot,chSqText=chSqText,doChSqPlot=doChSqPlot)
}
# function to read in previously saved rds
read_rds_file <- function(fileName) if(file.exists(fileName)) read_rds(fileName) else NULL
# Find peaks in a vector
which_peaks <- function(x,partial=TRUE,decreasing=FALSE){
if (decreasing){
if (partial){
which(diff(c(FALSE,diff(x)>0,TRUE))>0)
}else {
which(diff(diff(x)>0)>0)+1
}
} else {
if (partial){
which(diff(c(TRUE,diff(x)>=0,FALSE))<0)
}else {
which(diff(diff(x)>=0)<0)+1
}
}
}
# Set column widths for kable tables
html_table_width <- function(kable_output, width){
width_html <- paste0(paste0('<col width="', width, '">'), collapse = "\n")
sub("<table>", paste0("<table>\n", width_html), kable_output)
}
# Create a colour palette for n groups
col_pal <- function(n) {
if (n <= 8) {
RColorBrewer::brewer.pal(n, "Set2")
} else {
hcl(h=seq(0,(n-1)/(n),length=n)*360,c=100,l=65,fixup=TRUE)
}
}
# turn a vector into a comma separated list of values with a penultimate 'and'
vec_to_sentence <- function(x,sep=",",end="and") {
x[!is.na(x)] %>%
paste(collapse = "JOINSRUS") %>%
(function(x) if(sep == ";") {
stringi::stri_replace_last_regex(x,"JOINSRUS", paste0(sep," and ")) %>%
str_replace_all("JOINSRUS",paste0(sep," "))
} else {
stringi::stri_replace_last_regex(x,"JOINSRUS",paste0(" ",end," ")) %>%
str_replace_all("JOINSRUS",paste0(sep," "))
}
)
}
# https://github.com/ateucher/useful_code/blob/master/R/numbers2words.r
numbers2words <- function(x){
## Function by John Fox found here:
## http://tolstoy.newcastle.edu.au/R/help/05/04/2715.html
## Tweaks by AJH to add commas and "and"
helper <- function(x){
digits <- rev(strsplit(as.character(x), "")[[1]])
nDigits <- length(digits)
if (nDigits == 1) as.vector(ones[digits])
else if (nDigits == 2)
if (x <= 19) as.vector(teens[digits[1]])
else trim(paste(tens[digits[2]],
Recall(as.numeric(digits[1]))))
else if (nDigits == 3) trim(paste(ones[digits[3]], "hundred and",
Recall(makeNumber(digits[2:1]))))
else {
nSuffix <- ((nDigits + 2) %/% 3) - 1
if (nSuffix > length(suffixes)) stop(paste(x, "is too large!"))
trim(paste(Recall(makeNumber(digits[
nDigits:(3*nSuffix + 1)])),
suffixes[nSuffix],"," ,
Recall(makeNumber(digits[(3*nSuffix):1]))))
}
}
trim <- function(text){
#Tidy leading/trailing whitespace, space before comma
text=gsub("^\ ", "", gsub("\ *$", "", gsub("\ ,",",",text)))
#Clear any trailing " and"
text=gsub(" and$","",text)
#Clear any trailing comma
gsub("\ *,$","",text)
}
makeNumber <- function(...) as.numeric(paste(..., collapse=""))
#Disable scientific notation
opts <- options(scipen=100)
on.exit(options(opts))
ones <- c("", "one", "two", "three", "four", "five", "six", "seven",
"eight", "nine")
names(ones) <- 0:9
teens <- c("ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen")
names(teens) <- 0:9
tens <- c("twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty",
"ninety")
names(tens) <- 2:9
x <- round(x)
suffixes <- c("thousand", "million", "billion", "trillion")
if (length(x) > 1) return(trim(sapply(x, helper)))
res <- helper(x)
#res <- gsub(" ","",res)
return(res)
}
# Fix html widget when not displayed
widgetFix <- function(inputFile,outputFile){
a = readLines(inputFile)
output = paste(a,collapse="\n")
output = gsub(">\n\n</div>","></div>",output)
writeLines(output,outputFile)
invisible(NULL)
}
# Generate jenks breaks
# http://cainarchaeology.weebly.com/r-function-for-plotting-jenks-natural-breaks-classification.html
plotJenks <- function(data, n=3, brks.cex=0.70, top.margin=10, dist=5){
df <- data.frame(sorted.values=sort(data, decreasing=TRUE))
Jclassif <- classIntervals(df$sorted.values, n, style = "jenks") #requires the 'classInt' package
test <- jenks.tests(Jclassif) #requires the 'classInt' package
df$class <- cut(df$sorted.values, unique(Jclassif$brks), labels=FALSE, include.lowest=TRUE) #the function unique() is used to remove non-unique breaks, should the latter be produced. This is done because the cut() function cannot break the values into classes if non-unique breaks are provided
if(length(Jclassif$brks)!=length(unique(Jclassif$brks))){
info <- ("The method has produced non-unique breaks, which have been removed. Please, check '...$classif$brks'")
} else {info <- ("The method did not produce non-unique breaks.")}
loop.res <- numeric(nrow(df))
i <- 1
repeat{
i <- i+1
loop.class <- classIntervals(df$sorted.values, i, style = "jenks")
loop.test <- jenks.tests(loop.class)
loop.res[i] <- loop.test[[2]]
if(loop.res[i]>0.9999){
break
}
}
max.GoF.brks <- which.max(loop.res)
plot(x=df$sorted.values, y=c(1:nrow(df)), type="b", main=paste0("Jenks natural breaks optimization; number of classes: ", n), sub=paste0("Goodness of Fit: ", round(test[[2]],4), ". Max GoF (", round(max(loop.res),4), ") with classes:", max.GoF.brks), ylim =c(0, nrow(df)+top.margin), cex=0.75, cex.main=0.95, cex.sub=0.7, ylab="observation index", xlab="value (increasing order)")
abline(v=Jclassif$brks, lty=3, col="red")
text(x=Jclassif$brks, y= max(nrow(df)) + dist, labels=sort(round(Jclassif$brks, 2)), cex=brks.cex, srt=90)
results <- list("info"=info, "classif" = Jclassif, "breaks.max.GoF"=max.GoF.brks, "class.data" = df)
return(results)
}
|
acc21c83928a794a5ce20244c845911b0137dbd4
|
e7c040329363e813d79b64513bb3ffb3b7573617
|
/pySAHM/Resources/R_Modules/modalDialog.R
|
d28f401d3720c73225947793ddde72358edf49dd
|
[] |
no_license
|
jpocom/sahm
|
800e9c62b401a11334de74cf1b903f9fc20dd886
|
9b2f764ec1bb6a01077ddd0030e6dcf26fb24867
|
refs/heads/SAHM_1_1
| 2021-01-21T03:51:02.865367
| 2015-04-07T17:14:45
| 2015-04-07T17:14:45
| 33,557,074
| 0
| 0
| null | 2015-04-07T17:11:39
| 2015-04-07T17:11:37
| null |
UTF-8
|
R
| false
| false
| 3,907
|
r
|
modalDialog.R
|
###############################################################################
##
## Copyright (C) 2010-2012, USGS Fort Collins Science Center.
## All rights reserved.
## Contact: talbertc@usgs.gov
##
## This file is part of the Software for Assisted Habitat Modeling package
## for VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
## Although this program has been used by the U.S. Geological Survey (USGS),
## no warranty, expressed or implied, is made by the USGS or the
## U.S. Government as to the accuracy and functioning of the program and
## related program material nor shall the fact of distribution constitute
## any such warranty, and no responsibility is assumed by the USGS
## in connection therewith.
##
## Any use of trade, firm, or product names is for descriptive purposes only
## and does not imply endorsement by the U.S. Government.
###############################################################################
#This function was robbed from the dynamicGraphic library
"modalDialog" <-
function (title, question, entryInit, top = NULL, entryWidth = 20,
returnValOnCancel = "ID_CANCEL", do.grab = FALSE)
{
dlg <- tktoplevel()
tkwm.deiconify(dlg)
if (do.grab)
tkgrab.set(dlg)
tkfocus(dlg)
tkwm.title(dlg, title)
textEntryVarTcl <- tclVar(paste(entryInit))
textEntryWidget <- tkentry(dlg, width = paste(entryWidth),
textvariable = textEntryVarTcl, background = "white")
tkgrid(tklabel(dlg, text = " "))
tkgrid(tklabel(dlg, text = question), textEntryWidget)
tkgrid(tklabel(dlg, text = " "))
ReturnVal <- returnValOnCancel
"onOK" <- function() {
ReturnVal <<- tclvalue(textEntryVarTcl)
tkgrab.release(dlg)
tkdestroy(dlg)
if (!is.null(top))
tkfocus(top)
}
"onCancel" <- function() {
ReturnVal <<- returnValOnCancel
tkgrab.release(dlg)
tkdestroy(dlg)
if (!is.null(top))
tkfocus(top)
}
OK.but <- tkbutton(dlg, text = " OK ", command = onOK)
Cancel.but <- tkbutton(dlg, text = " Cancel ", command = onCancel)
tkgrid(OK.but, Cancel.but)
tkgrid(tklabel(dlg, text = " "))
tkfocus(dlg)
tkbind(dlg, "<Destroy>", function() {
tkgrab.release(dlg)
tkfocus(top)
})
tkbind(textEntryWidget, "<Return>", onOK)
tkwait.window(dlg)
return(ReturnVal)
}
|
87bc4d98e7bbb6d65376a1289362a1111b5f5ba1
|
75db022357f0aaff30d419c13eafb9dddfce885a
|
/inst/IP/bycatchCSAS/9.LandingPredFootPrintMaps.r
|
950d49749c1f937150ce783db4a2ec96beac3a40
|
[] |
no_license
|
LobsterScience/bio.lobster
|
d4c553f0f55f561bb9f9cd4fac52c585e9cd16f8
|
b2af955291cb70c2d994e58fd99d68c6d7907181
|
refs/heads/master
| 2023-09-01T00:12:23.064363
| 2023-08-23T16:34:12
| 2023-08-23T16:34:12
| 60,636,005
| 11
| 5
| null | 2017-01-20T14:35:09
| 2016-06-07T18:18:28
|
R
|
UTF-8
|
R
| false
| false
| 2,739
|
r
|
9.LandingPredFootPrintMaps.r
|
require(bio.lobster)
require(bio.utilities)
require(lubridate)
require(devtools)
options(stringAsFactors=F)
la()
load_all('C:/Users/Cooka/Documents/git/bio.utilities')
wd = ('C:\\Users\\Cooka\\OneDrive - DFO-MPO\\BycatchLobster')
setwd(wd)
p = bio.lobster::load.environment()
ef = readRDS('results/LandingsPredictedActual.rds')
ef = bio.utilities::rename.df(ef,c('PID','SID'),c('LFA','GRID_NO'))
ef$Z = ef$Rediduals/1000
x = subset(ef,LFA==33)
q = quantile(x$Z,probs=.99,na.rm=T)
x$Z[which(x$Z>q)] <- q
ux = c(min(x$Z),max(x$Z))
x$WOS = x$WOS-6
png('Figures/ModelOutput/LobsterPredictionsvLogs331-12.png',width=10,height=12,units='in',res=300)
ggLobsterMap('33',bathy=T,attrData = subset(x,LFA==33& WOS %in% 1:12),fw='WOS',legLab='Landings Difference (t)',addLFALabels = F,scaleTrans = 'identity' ,brks=ux)
dev.off()
png('Figures/ModelOutput/LobsterPredictionsvLogs13-21.png',width=10,height=12,units='in',res=300)
ggLobsterMap('33',bathy=T,attrData = subset(x,LFA==33& WOS %in% 13:24),fw='WOS',legLab='Landings Difference (t)',addLFALabels = F,scaleTrans = 'identity' ,brks=ux)
dev.off()
x = subset(ef,LFA==34)
q = quantile(x$Z,probs=.99,na.rm=T)
x$Z[which(x$Z>q)] <- q
ux = c(min(x$Z),max(x$Z))
x$WOS = x$WOS-6
png('Figures/ModelOutput/LobsterPredictionsvLogs34-1-12.png',width=10,height=12,units='in',res=300)
ggLobsterMap('34',bathy=T,attrData = subset(x,LFA==34& WOS %in% 1:12),fw='WOS',legLab='Landings Difference (t)',addLFALabels = F,scaleTrans = 'identity' ,brks=ux)
dev.off()
png('Figures/ModelOutput/LobsterPredictionsvLogs34-13-21.png',width=10,height=12,units='in',res=300)
ggLobsterMap('34',bathy=T,attrData = subset(x,LFA==34& WOS %in% 13:24),fw='WOS',legLab='Landings Difference (t)',addLFALabels = F,scaleTrans = 'identity' ,brks=ux)
dev.off()
x = subset(ef,LFA==35)
q = quantile(x$Z,probs=.99,na.rm=T)
x$Z[which(x$Z>q)] <- q
ux = c(min(x$Z),max(x$Z))
png('Figures/ModelOutput/LobsterPredictionsvLogs35-1-10.png',width=10,height=12,units='in',res=300)
ggLobsterMap('35',bathy=T,attrData = subset(x,LFA==35 & WOS %in% c(1:10)),fw='WOS',legLab='Landings Difference (t)',addLFALabels = F,scaleTrans ='identity',brks=ux)
dev.off()
png('Figures/ModelOutput/LobsterPredictionsvLogs35-22-31.png',width=10,height=12,units='in',res=300)
ggLobsterMap('35',bathy=T,attrData = subset(x,LFA==35 & WOS %in% c(22:31)),fw='WOS',legLab='Landings Difference (t)',addLFALabels = F,scaleTrans ='identity',brks=ux)
dev.off()
png('Figures/ModelOutput/LobsterPredictionsvLogs35-32-40.png',width=10,height=12,units='in',res=300)
ggLobsterMap('35',bathy=T,attrData = subset(x,LFA==35 & WOS %in% c(32:42)),fw='WOS',legLab='Landings Difference (t)',addLFALabels = F,scaleTrans ='identity',brks=ux)
dev.off()
|
d7bac33a40c1b4d6f3171f20894696ecc7466166
|
cce3c2bed76e1b7fb2e2128e25560ac2936ea5c8
|
/C7.R
|
6d0b98ccff45e163bb9b6a0fff603da076a22bf8
|
[] |
no_license
|
linusyoung/DAT209x
|
2b5fec0a6cfc6c3feb53c8292712cfbf01c0e27b
|
acd6e8e7d802604c381020dda01e0606accfe0b8
|
refs/heads/master
| 2021-01-15T08:19:14.879392
| 2017-08-14T11:17:23
| 2017-08-14T11:17:23
| 99,565,075
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 244
|
r
|
C7.R
|
# CH7
conn_str <- paste(
"Driver={SQL Server}",
"Server=msedxeus.database.windows.net",
"Database=DAT209x01",
"uid=Rlogin",
"pwd=P@ssw0rd;",
sep = ";"
)
conn_str
conn <- odbcDriverConnect(conn_str)
tab <- sqlTables(conn)
head(tab)
|
fec8b05271057441b2aad6b281a92b577770cd33
|
448ac5074d4401666b5a3f8ea96b8ba80a88aa95
|
/plot4.R
|
8b61c9be1122bf039a825c64910b52b627d635e1
|
[] |
no_license
|
HabileStudio/4w4pa
|
3f1d0188b3d261062a0706a29a056e73a91565f1
|
81c54e7f8f1aa642d2740446511d3dffbdd59599
|
refs/heads/master
| 2022-12-23T11:35:35.842542
| 2020-09-16T09:13:10
| 2020-09-16T09:13:10
| 295,111,172
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 989
|
r
|
plot4.R
|
## This first line will likely take a few seconds. Be patient!
rawdata <- readRDS("data/summarySCC_PM25.rds")
# Reference
SCC <- readRDS("data/Source_Classification_Code.rds")
# We remove the pollutant column:
# a call to unique() shows always the same value "PM25-PRI"
data <- rawdata[,-c(3)]
# Across the United States, how have emissions
# from coal combustion-related sources changed from 1999–2008?
# Get the coal combustion related SCC codes
coal <- grep("Coal.*Combustion", SCC$Short.Name)
coalcodes <- SCC[coal,"SCC"]
data <- data[data$SCC %in% coalcodes,]
# Prepare for png output
png(filename = "plot4.png",
width = 480, height = 480,
units = "px")
# Load the plotting library
if(! "ggplot2" %in% rownames(installed.packages())){
install.packages("ggplot2")
}
library("ggplot2")
# Plot
ggplot(data = data) +
geom_col(aes(x = as.character(year), y = Emissions)) +
labs(x = "", y="Emissions (tons)", title = "PM2.5 emissions from coal combustion")
dev.off()
|
154569ab34aa03a83edd41d53c28d70463efc2e7
|
4dd00a8544a7aed138d1ab50bab9b283e78824fa
|
/run_analysis.R
|
6b185a42dc8b613d4bd6069bce4b5e7e9f71dc7d
|
[] |
no_license
|
chongyan11/Getting-And-Cleaning-Data
|
b951f765511351e389e96554caf23f1399440cff
|
bcb3281ac233453c5fa7f9d68d89758d2f17ee61
|
refs/heads/master
| 2020-05-26T13:37:35.930640
| 2019-05-23T15:02:01
| 2019-05-23T15:02:01
| 188,249,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,146
|
r
|
run_analysis.R
|
run_analysis <- function() {
## Reading data from datasets provided
features <- read.table("UCI HAR Dataset/features.txt", header = FALSE, col.names = c("n", "Feature"))
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt", header = FALSE, col.names = c("Label", "Activity Name"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE, col.names = "Subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", header = FALSE, col.names = features$Feature)
y_test <- read.table("UCI HAR Dataset/test/Y_test.txt", header = FALSE, col.names = "Label")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE, col.names = "Subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", header = FALSE, col.names = features$Feature)
y_train <- read.table("UCI HAR Dataset/train/Y_train.txt", header = FALSE, col.names = "Label")
# print(ls())
## Merging data from training and test sets
x <- rbind(x_train, x_test)
y <- rbind(y_train, y_test)
subject <- rbind(subject_train, subject_test)
combined <- cbind(subject, x, y)
# View(combined)
## Selecting only data containing mean and standard deviation
mean_std_col <- grep(("mean|std"), names(combined))
# print(mean_std_col)
library(dplyr)
extract <- select(combined, Subject, Label, mean_std_col)
# View(extract)
## Naming activities in data set
extract$Label <- activity_labels[extract$Label, 2]
names(extract) <- gsub("^t", "Time", names(extract))
names(extract) <- gsub("^f", "Freq", names(extract))
names(extract) <- gsub("Gyro", "Gyroscope", names(extract))
names(extract) <- gsub("Acc", "Accelerometer", names(extract))
names(extract) <- gsub("-mean()", "Mean", names(extract))
names(extract) <- gsub("-freq()", "Freq", names(extract))
names(extract) <- gsub("Mag", "Magnitude", names(extract))
names(extract)[2] <- "Activity"
## Creating average by activity and subject
summary <- extract %>%
group_by(Subject, Activity) %>%
summarize_all(mean)
write.table(summary, "TidyData.txt", row.names = FALSE)
# View(summary)
}
|
6ea30184e05f0df7a84229a7f4354a949c7ddeb5
|
0155e0990ce8fc71651c48d9af7bb38a11779d20
|
/fss.R
|
1c1c708dff903e7af66e01899b12d009f9fcccd4
|
[
"MIT"
] |
permissive
|
goujonpa/SY19TP7
|
2968adc0f3faf1cc06986bd7354e31160b5c5809
|
99f016e47ed939b9235b73f485aec59b2ea09ffa
|
refs/heads/master
| 2021-01-12T10:23:55.786174
| 2017-01-15T18:28:12
| 2017-01-15T18:28:12
| 76,444,335
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 116
|
r
|
fss.R
|
# Paul GOUJON & Jo COLINA
# UTC - SY19 - TP7
# Facial expression recognition
df = cbind(as.data.frame(X), y)
|
4fa771fa56ef4f209ebb09943a2f8440f8b7c647
|
75d05fed7d91b13f7bd6f6f352fa258e79347b9b
|
/R/cryptohash.R
|
37567280e32f7f0c9e1b8c1d8d78d92d872208c6
|
[
"MIT"
] |
permissive
|
Protonk/WMUtils
|
fdd9d207e9a155f996aa882c4bdedf4a8bcb9d24
|
57f75e11802a72726f8c865f5ec20f09a8ec9fb8
|
refs/heads/master
| 2021-01-17T23:23:29.576657
| 2014-12-03T17:51:48
| 2014-12-03T17:51:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,291
|
r
|
cryptohash.R
|
#'@title cryptohash
#'
#'@description cryptographically hash a character vector, or something coercable into one.
#'
#'@param x a vector of strings, or an object that can be coerced into a vector of strings.
#'
#'@param algorithm the hashing algorithm to apply to each string. Options are "md5", "sha1", "sha256",
#'and "sha512".
#'
#'@param include_rand whether or not to include a pseudorandom element as part of each string; set to FALSE
#'by default.
#'
#'@param na_rm whether or not it is acceptable to remove NA values from the vector. Please note that
#'in the event that NA values are found, and na.rm is FALSE, the function call will simply terminate.
#'Set to FALSE by default.
#'
#'@examples
#'cryptohash("foo", algorithm = "md5")
#'cryptohash("foo", algorithm = "md5", include_rand = TRUE)
#'cryptohash(c("foo",NA), algorithm = "sha1", na_rm = TRUE)
#'
#'@return a vector of cryptographic hashes, the length of the input vector (or shorter if
#'na_rm is set to TRUE)
#'
#'@export
cryptohash <- function(x, algorithm, include_rand = FALSE, na_rm = FALSE){
#If x is not a character vector, turn it into one and warn
if(!is.character(x)){
x <- as.character(x)
warning("x is not a character vector. Attempting to convert it into one.")
}
#If x contain NAs...
if(any(is.na(x))){
if(!na_rm){
#..and that hasn't been accounted for with na.rm, stop
stop("x contained NA values and cannot be hashed. Remove the invalid elements, or set na_rm to TRUE")
} else {
x <- x[!is.na(x)]
if(length(x) == 0){
stop("After removing NA values, no values were left.")
}
}
}
#If include_rand is true...well, include a rand.
if(include_rand){
#NULL out the seed to avoid consistency between runs
set.seed(NULL)
#Pseudorandomly select a number from within that. Starting at 3, because 1 and 2 are
#disturbingly similar between runs.
rand_val <- as.character(.Random.seed[sample(3:length(.Random.seed),1)])
#NULL out the seed yet again to avoid the seed values being recoverable.
set.seed(NULL)
#Append to each string
x <- paste0(x, rand_val)
}
#After all that, hash
output <- c_cryptohash(x = x, algorithm = algorithm)
#Return
return(output)
}
|
1c889ed1bbda436161233885aa9da8ed6caaf976
|
3f62979ccc33c036f0904034aedbc3db8b7e85af
|
/question_eight.R
|
90facea546fc2091dd18a44600677cd9ccb76fa7
|
[] |
no_license
|
yaliG/MD-ML_HW1
|
949b60e249feccb7c78f484215a7e4dd7479f732
|
d4c6228315417ee21dd66525530fa1464fd46c3b
|
refs/heads/master
| 2020-08-08T07:44:46.581800
| 2019-10-09T00:22:19
| 2019-10-09T00:22:19
| 213,782,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,193
|
r
|
question_eight.R
|
library(ggplot2)
# (1)
# read data
taxi_data <- readr::read_csv("data/question_five.csv")
# 4 scatterplots
p <- ggplot(data=taxi_data, aes(x=total_trips, y=total_earnings)) +
geom_point()
p + ggtitle("Total trips v.s. total earnings") +
xlab("Total trips, times") + ylab("Total earnings, dollar")
ggsave("question_eight_trips.png", path = "figures")
p <- ggplot(data=taxi_data, aes(x=total_passengers, y=total_earnings)) +
geom_point()
p + ggtitle("Total passengers v.s. total earnings") +
xlab("Total passengers, number of people") + ylab("Total earnings, dollar")
ggsave("question_eight_passengers.png", path = "figures")
p <- ggplot(data=taxi_data, aes(x=total_time_with_passengers, y=total_earnings)) +
geom_point()
p + ggtitle("Total time with passengers v.s. total earnings") +
xlab("Total time with passengers, second") + ylab("Total earnings, dollar")
ggsave("question_eight_time_with_passengers.png", path = "figures")
# remove the impossible total_distance
taxi_data <- filter(taxi_data, total_distance <= 100*24)
p <- ggplot(data=taxi_data, aes(x=total_distance, y=total_earnings)) +
geom_point()
p + ggtitle("Total distance v.s. total earnings") +
xlab("Total distance, mile") + ylab("Total earnings, dollar")
ggsave("question_eight_distance.png", path = "figures")
# (2)
# Liang
taxi_data <- readr::read_csv("data/question_six.csv")
p1 <- ggplot(data=taxi_data)+
geom_smooth(mapping=aes(x=hour,y=trips_started))+
labs(title="Trips Started in 24h",x="Hour(h)",y="Trips Started(count)")
ggsave("question_eight_Liang_Yingtian1.png", path = "figures")
p2 <- ggplot(data=taxi_data)+
geom_smooth(mapping=aes(x=hour,y=total_passengers_picked_up))+
labs(title="Passengers in 24h",x="Hour(h)",y="Passengers(person)")
ggsave("question_eight_Liang_Yingtian2.png", path = "figures")
d1 <- taxi_data %>% group_by(hack_license) %>%
summarize(total_trips=sum(trips_started),total_passengers=sum(total_passengers_picked_up))
p3 <- ggplot(d1)+
geom_smooth(mapping=aes(x=total_trips,y=total_passengers))+
labs(title="Trips and Passengers",x="Total trips(count)",y="Total passengers(person)")
ggsave("question_eight_Liang_Yingtian3.png", path = "figures")
# Shen
taxi_hour <- readr::read_csv("data/question_seven.csv")
p4 <- ggplot(data = filter(taxi_hour, earnings <100),
aes(x=total_time_with_passengers, y=earnings, color = as.factor(hour))) +
geom_point(size = 1)+
labs(title="Total time with passengers v.s. earnings for different hours",
x="Total time with passengers, sec",
y="Earnings")+
guides(color = guide_legend(title = "hour"))
ggsave("question_eight_Shen_Xuechun.png", path = "figures", width = 20, height = 10, unit = "cm")
# Gao
p5 <- ggplot(taxi_hour,
aes(x=total_time_with_passengers, y=miles_with_passengers, color = as.factor(hour)))+
geom_point(size = 1) +
labs(title="Total time with passengers v.s. miles with passengers for different hours",
x="Total time with passengers, sec",
y="Miles with passengers, mile")+
guides(color = guide_legend(title = "hour", nrow = 12))
ggsave("question_eight_Gao_Yali.png", path = "figures", width = 20, height = 10, unit = "cm")
|
19dee9c9a7361800aaacbd1000152450b86b07a4
|
284c7b66d6db034a5ccfd34486eaeba8bc2ccaf6
|
/R/package.r
|
c09e0dd8c9e4cb13750dcccff2eb4a4db91d66e1
|
[] |
no_license
|
hadley/helpr
|
c9967cfabe6d510212a32d83643136b3b85d5507
|
2eeb174b09493f8d5c4c072772285c100f410a29
|
refs/heads/master
| 2021-01-11T04:57:20.128449
| 2012-04-09T14:34:21
| 2012-04-09T14:34:21
| 377,173
| 23
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,421
|
r
|
package.r
|
#' Read RDS file
#'
#' @param ... args sent directly to base's read rds function
#' @author Barret Schloerke
#' @keywords internal
read_rds <- function(...) {
rv = R.version
if (rv$major == "2" && as.numeric(rv$minor) < 13.1) {
base:::.readRDS(...)
} else {
base:::readRDS(...)
}
}
#' Package information.
#'
#' @aliases helpr_package helpr_package_mem
#' @return all the information necessary to produce a package site ("/package/:package/")
#' @author Barret Schloerke
#' @keywords internal
helpr_package <- function(package) {
helpr_package_mem(package, pkg_version(package))
}
helpr_package_mem <- memoise(function(package, version) {
info <- read_rds(system.file("Meta", "package.rds", package = package))
description <- as.list(info$DESCRIPTION)
info$DESCRIPTION <- NULL
description <- modifyList(info, description)
names(description) <- tolower(names(description))
author_str <- pluralize("Author", description$author)
items <- pkg_topics_alias(package)
demos <- pkg_demos(package)
vigs <- pkg_vigs(package)
description$depends <- parse_pkg_desc_item(description$depends)
description$imports <- parse_pkg_desc_item(description$imports)
description$suggests <- parse_pkg_desc_item(description$suggests)
description$extends <- parse_pkg_desc_item(description$extends)
description$reverse <- dependsOnPkgs(package)
description$author <- pkg_author_and_maintainers(description$author, description$maintainer)
description$maintainer <- NULL
if (has_text(description$url)) {
description$url <- str_trim(str_split(description$url, ",")[[1]])
}
list(
package = package,
items = items,
description = description,
author_str = author_str,
demos_str = pluralize("Demo", demos),
demos = demos,
vigs_str = pluralize("Vignette", vigs),
vigs = vigs,
change_log = pkg_news(package)
)
})
#' Package version from the rd file
#'
#' @keywords internal
#' @author Hadley Wickham
pkg_version <- function(pkg) {
rds <- read_rds(system.file("Meta", "package.rds", package = pkg))
rds$DESCRIPTION[["Version"]]
}
#' Add package link to a string.
#'
#' @param string string to be evaluated
#' @keywords internal
#' @author Barret Schloerke
#' @examples #add_package_link_to_string("quantreg, Hmisc, mapproj, maps, hexbin, gpclib, maptools")
add_package_link_to_string <- function(string) {
packs <- str_trim(str_split(usage_functions(string), "[, ]")[[1]])
packs <- packs[packs != ""]
pack_link <- str_c("<a href='", router_url(), "/package/", packs, "/' >", packs, "</a>")
for(i in seq_along(packs)){
string <- str_replace(string, packs[i], pack_link[i])[[1]]
}
string
}
#' Ensure package version is properly displayed (if not already in a nice
#' format).
#'
#' @author Barret Schloerke
#' @keywords internal
parse_pkg_desc_item <- function(obj) {
if (NROW(obj) < 1) {
return(NULL)
}
if (is.character(obj)) {
return(obj)
}
if (!is.list(obj)) {
obj <- list(obj = (list(name = obj, version = NULL)))
}
as.data.frame(
sapply(obj, function(x) {
vers <- NULL
# if the version is found, it will create one in the form of '(1.2.3)'
if (!is.null(x$version)) {
vers <- str_c("(", x$op, " ", str_c(unclass(x$version)[[1]], collapse = "."), ")", collapse = "")
}
list(name = as.character(x$name), version = as.character(vers))
})
, stringsAsFactors = FALSE
)
}
#' Parse authors and maintainer.
#'
#' @param description list containing the \code{author} and \code{maintainer}
#' @return string containing the authors with links properly displayed
#' @author Barret Schloerke
#' @keywords internal
pkg_author_and_maintainers <- function(authors, maintainer=NULL) {
# retrieve the authors and email
authors <- str_replace_all(authors, "\\n", " ")
str_extract_all(authors, "[a-zA-z]* [a-zA-z]* <*>")
# retrieve the author and email separately
all_pattern <- "[a-zA-Z][a-zA-Z]* [a-zA-Z][a-zA-Z]* <[a-zA-Z0-9._-]*@[a-zA-Z0-9._-]*.[a-zA-Z]{2,7}>"
name_pattern <- "[a-zA-Z][a-zA-Z_-]*[ ]{0,1}[a-zA-Z][a-zA-Z]*"
email_pattern <- "<[a-zA-Z0-9._-]*@[a-zA-Z0-9._-]*.[a-zA-Z]{2,7}>"
auths_string <- str_extract_all(authors, all_pattern)[[1]]
auths <- sapply(str_extract_all(auths_string, name_pattern), "[[", 1)
emails <- sapply(str_extract_all(auths_string, email_pattern), "[[", 1)
if (length(emails) < 1) {
author_and_email <- auths
} else {
emails <- str_replace_all(emails, "<", "")
emails <- str_replace_all(emails, ">", "")
author_and_email <- author_email(auths, emails)
}
# replace the original authors with the linked authors
for (i in seq_along(author_and_email)) {
authors <- str_replace_all(authors, auths_string[i], author_and_email[i])
}
if (!is.null(maintainer)) {
maintainer_name <- str_trim(strip_html(maintainer))
maintainer_email <- str_extract_all(maintainer, email_pattern)[[1]][1]
maintainer_email <- str_replace_all(maintainer_email, "<", "")
maintainer_email <- str_replace_all(maintainer_email, ">", "")
# make the maintainer bold
maintainer_string <- str_c("<strong>", author_email(maintainer_name, maintainer_email), "</strong>", collapse = "")
if (str_detect(authors, maintainer_name)) {
# replace the current author text with the maintainer text
authors <- str_replace_all(
authors,
str_c("</?.*?>",maintainer_name,"</?.*?>", collapse = ""),
maintainer_name
)
authors <- str_replace_all(
authors,
maintainer_name,
maintainer_string
)
} else {
# attach the maintainer to the end
authors <- str_c(authors, "; ", maintainer_string, collapse = "")
}
}
authors
}
#' Documentation database path.
#'
#' @param package package to explore
#' @return \code{file.path} to the documentation database
#' @keywords internal
#' @author Hadley Wickham
pkg_rddb_path <- function(package) {
file.path(pkg_help_path(package), package)
}
#' Package help path.
#'
#' @param package package to explore
#' @return \code{file.path} to the help folder
#' @keywords internal
#' @author Hadley Wickham
pkg_help_path <- function(package) {
system.file("help", package = package)
}
#' List all package vignettes.
#'
#' @param package package to explore
#' @return \code{subset} of the \code{vignette()$results} \code{data.frame} ("Package", "LibPath", "Item" and "Title")
#' @author Barret Schloerke
#' @keywords internal
pkg_vigs <- function(package) {
vignettes <- vignette(package = package)$results
if (!NROW(vignettes)) {
return(NULL)
}
titles <- str_replace_all(vignettes[,4], "source, pdf", "")
titles <- str_trim(str_replace_all(titles, "[()]", ""))
data.frame(item = vignettes[,"Item"], title = titles, stringsAsFactors = FALSE)
}
#' Package topics alias to file index.
#'
#' @param package package to explore
#' @return \code{\link{data.frame}} containing \code{alias} (function name) and \code{file} that it is associated with
#' @keywords internal
#' @author Hadley Wickham
pkg_topics_index <- memoise(function(package) {
help_path <- pkg_help_path(package)
file_path <- file.path(help_path, "AnIndex")
### check to see if there is anything that exists, aka sinartra
if (length(readLines(file_path, n = 1)) < 1) {
return(NULL)
}
topics <- read.table(file_path, sep = "\t",
stringsAsFactors = FALSE, comment.char = "", quote = "", header = FALSE)
names(topics) <- c("alias", "file")
topics[complete.cases(topics), ]
})
#' Package topics file documentation.
#'
#' Items can be accessed by \emph{\code{list()}}\code{$file_name}
#' @param package package to explore
#' @return \code{\link{list}} containing the documentation file of each file of a package
#' @keywords internal
#' @author Hadley Wickham
#' @import tools
pkg_topics_rd <- memoise(function(package) {
rd <- tools:::fetchRdDB(pkg_rddb_path(package))
lapply(rd, name_rd)
})
#' Topic title and aliases by package.
#' return information on the package, datasets, internal, and datasets
#'
#' @param pkg package in question
#' @author Barret Schloerke
#' @keywords internal
pkg_topics_alias <- function(pkg) {
rd1 <- pkg_topics_rd(pkg)
rd <- lapply(rd1, function(x) {
desc <- reconstruct(untag(x$description), pkg)
desc_naked <- strip_html(desc)
if (str_length(desc_naked) > 150) {
desc <- str_c(str_sub(desc_naked, end = 150), " ...")
}
list(
topic = unlist(x$name),
alias = unname(sapply(x[names(x) == "alias"], "[[", 1)),
keywords = str_trim(reconstruct(untag(x$keyword), pkg)),
desc = desc,
title = reconstruct(untag(x$title), pkg)
)
})
keywords <- sapply(rd, function(x){ x$keywords })
package_info <- rd[keywords == "package"]
internal <- rd[keywords == "internal"]
dataset <- rd[keywords == "datasets"]
rows <- keywords %in% c("package", "internal", "datasets")
if (sum(rows) > 0) rd[rows] <- NULL
list(func = rd, dataset = dataset, internal = internal, info = package_info)
}
#' Package description
#'
#' @param pkg package in question
#' @param topic topic in question
#' @author Barret Schloerke
#' @keywords internal
package_description <- function(pkg, topic) {
gsub("$\n+|\n+^", "", reconstruct(pkg_topic(pkg, topic)$description, package))
}
|
4f02f00c643df74bab49e3f43f82b76abf715c90
|
8c7db1c12d5662b6b9e139a641024cdefd96ba09
|
/plot2.R
|
4b08f6987412979a342a3ec8600e953598f85f57
|
[] |
no_license
|
kl79/ExData_Plotting1
|
fdcf845c6377590270f6f76e7e0f5ab2b9d77cd5
|
d83d34238eadefc0de3d23098cd513d17afaeb44
|
refs/heads/master
| 2021-01-16T17:50:20.698735
| 2015-09-12T17:06:54
| 2015-09-12T17:06:54
| 42,358,980
| 0
| 0
| null | 2015-09-12T14:02:27
| 2015-09-12T14:02:26
| null |
UTF-8
|
R
| false
| false
| 596
|
r
|
plot2.R
|
makeplot2 <- function()
{
# read data
data <- read.csv("../household_power_consumption.txt", sep=";")
subdata <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
## convert to numeric
subdata$Global_active_power <- as.numeric(as.character(subdata$Global_active_power))
## convert date+time to datetime
subdata$datetime <- as.POSIXct(strptime(paste(subdata$Date, subdata$Time), "%d/%m/%Y %H:%M:%S"))
# construct plot + save to PNG
png("plot2.png")
plot(subdata$Global_active_power ~ subdata$datetime, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
dev.off()
}
|
087e251cde0835d6c6eecff4ef32511ac4d076fd
|
af850f1ae16c67d8a97f8495f3990b014c3b7f26
|
/complete.R
|
c4ab39dddb1c2cafec11d833e793d89b57a21eef
|
[] |
no_license
|
2473539347/Data-Learner
|
e3778a39b642eb11a2759bd76073c69874c315f2
|
f21ee812ba71af77f452e309647e7ff31e466f61
|
refs/heads/master
| 2021-01-13T00:37:13.915403
| 2016-03-13T11:24:42
| 2016-03-13T11:24:42
| 52,871,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 395
|
r
|
complete.R
|
complete<-function(directory,id=1:332){
file<-paste(directory,"/",sep = "")
b<-read.massive.csv(file,id)
good<-complete.cases(b)
bnoNA<-b[good,]
n=length(id)
nods<-numeric(length = n)
nods[1]<-sum(bnoNA$ID==id[1])
if(n>1){
for(i in 2:n){
nods[i]<- sum((bnoNA$ID)==id[i])
}
}
data.frame(id,nods)
}
|
1665fd2c313aea3e9fa68c8c083e922e74291a61
|
39cacd6faa29f5736990dcc491fc58f1b5c59cfb
|
/man/add_binary_decisions.Rd
|
33668986bdfe388ec2653f6a53c4d47ed9f58194
|
[] |
no_license
|
l5d1l5/prioritizr
|
ddf4ef4054cb9e974b2edaf9e9adbb91ff1e2459
|
f1deca821db7f3809d240e351b5ab9492fe7e711
|
refs/heads/master
| 2020-06-26T15:55:45.751296
| 2017-05-03T22:49:23
| 2017-05-08T20:19:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,114
|
rd
|
add_binary_decisions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_binary_decisions.R
\name{add_binary_decisions}
\alias{add_binary_decisions}
\alias{add_binary_decisions}
\title{Add Binary Decisions}
\usage{
add_binary_decisions(x)
}
\arguments{
\item{x}{\code{\link{ConservationProblem-class}} object.}
}
\value{
\code{\link{Decision-class}} object.
}
\description{
Add a binary decision to a conservation planning \code{\link{problem}}.
This is the classic decision of either prioritizing or not prioritizing a
planning unit. Typically, this decision has the assumed action of buying
the planning unit to include in a protected area network. If no decision is
added to a problem then this decision class will be used by default.
}
\details{
Conservation planning problems involve making decisions on planning units.
These decisions are then associated with actions (eg. turning a planning
unit into a protected area). If no decision is explicitly added to a problem,
then the binary decision class will be used by default. Only a single decision
should be added to a
\code{ConservationProblem} object. \strong{If multiple decisions are added
to a problem object, then the last one to be added will be used.}
}
\examples{
# create basic problem and using the default decision (binary)
p <- problem(sim_pu_raster, sim_features) \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.1)
# manually specify a binary decision type
p2 <- p \%>\% add_binary_decisions()
# specify a proportion decision type
p3 <- p \%>\% add_proportion_decisions()
# specify a semicontinuous decision type
p4 <- p \%>\% add_semicontinuous_decisions(upper_limit=0.5)
\donttest{
# solve problem
s <- stack(solve(p), solve(p2), solve(p3), solve(p4))
# plot solutions
plot(s, main = c("default (binary)", "binary", "proportion",
"semicontinuous (upper=0.5)"))
}
}
\seealso{
\code{\link{decisions}}, \code{\link{add_proportion_decisions}},
\code{\link{add_semicontinuous_decisions}}, \code{\link{constraints}},
\code{\link{problem}}, \code{\link{targets}}, \code{\link{objectives}}
}
|
34fca7b470bb9babac980b396962dfacbe47e1c0
|
9ef1a637bba8e3d18e8d05ca6a34b53b478e10b3
|
/util.R
|
569dc7d102241c6899fa411bf02a4d4db22f94c6
|
[
"MIT"
] |
permissive
|
bgraf/mt-scripts
|
3f01e7f02a8d82f28cc864081c34f78d8ec52856
|
ea76785699ca699cf15f98c7cf3c5ae8b9bf8fc1
|
refs/heads/master
| 2021-01-11T09:42:16.078192
| 2017-02-24T06:46:56
| 2017-02-24T06:46:56
| 81,337,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,246
|
r
|
util.R
|
library(reshape)
library(dplyr)
library(ggplot2)
library(moments)
minObs = 10
width <- 5
height <- 5
widthFull <- 10
heightFull <- 10
width3 <- 3
height3 <- 3
stdGrey <- 'grey90'
readDataset <- function() { read.csv('dataset.csv', header=T) }
readAgg <- function() {
df <- readDataset()
aggregate(. ~ Class, data=df, function(x) mean(x, na.rm=T), na.action=NULL)
}
aggByClass <- function(df) {
aggregate(. ~ Class, data=df, function(x) mean(x, na.rm=T), na.action=NULL)
}
adjustCoverageNAs <- function(df) {
df[is.na(df$BranchCoverage),"BranchCoverage"] <- 0
df[is.na(df$MethodCoverage),"MethodCoverage"] <- 0
df[is.na(df$LineCoverage),"LineCoverage"] <- 0
df[is.na(df$BranchCovered),"BranchCovered"] <- 0
df[is.na(df$MethodCovered),"MethodCovered"] <- 0
df[is.na(df$LineCovered),"LineCovered"] <- 0
df
}
colAdmin <- c(
"Project",
"Class",
"Seed"
)
colRes <- c(
"TestCnt",
"TestLen",
"LineGoals",
"LineCovered",
"LineCoverage",
"MethodGoals",
"MethodCovered",
"MethodCoverage",
"BranchGoals",
"BranchCovered",
"BranchCoverage"
)
colEvo <- c(
"CR",
"PIC",
"NSC",
"FF",
"NSCFCS",
"NumIter"
)
colMetric <- c(
"AD",
"CBO",
"CBOI",
"CC",
"CCL",
"CCO",
"CD",
"CI",
"CLC",
"CLLC",
"CLOC",
"DIT",
"DLOC",
"LCOM5",
"LDC",
"LLDC",
"LLOC",
"LOC",
"NA.",
"NG",
"NII",
"NL",
"NLA",
"NLE",
"NLG",
"NLM",
"NLPA",
"NLPM",
"NLS",
"NM",
"NOA",
"NOC",
"NOD",
"NOI",
"NOP",
"NOS",
"NPA",
"NPM",
"NS",
"PDA",
"PUA",
"RFC",
"TCD",
"TCLOC",
"TLLOC",
"TLOC",
"TNA",
"TNG",
"TNLA",
"TNLG",
"TNLM",
"TNLPA",
"TNLPM",
"TNLS",
"TNM",
"TNOS",
"TNPA",
"TNPM",
"TNS",
"WMC",
"cWMC",
"cDIT",
"cNOC",
"cCBO",
"cRFC",
"cLCOM",
"cCa",
"cNPM",
"mMethodCnt",
"mCMethodCnt",
"mBranchCnt",
"mBranchInstrCnt",
"mPMethodCnt",
"mPCMethodCnt",
"mSMethodCnt",
"mSCMethodCnt",
"mSPMethodCnt",
"mSPCMethodCnt",
"mFieldCnt",
"mPFieldCnt",
"mIFEQs",
"mIFNEs",
"mIFLTs",
"mIFLEs",
"mIFGTs",
"mIFGEs",
"mICMPEQs",
"mICMPNEs",
"mICMPLTs",
"mICMPGEs",
"mICMPGTs",
"mICMPLEs",
"mACMPEQs",
"mACMPNEs",
"mIFNULLs",
"mIFNONNULLs"
)
# limitedCorrPlot <- function(mat, keep, digits = 2) {
# mat <- mat[!(rownames(mat) %in% keep), colnames(mat) %in% keep]
# mat <- round(mat, digits = digits)
# dat <- melt(mat)
#
# dat$X1 <- factor(dat$X1, levels=setdiff(rownames(mat), keep))
# dat$X2 <- factor(dat$X2, levels=keep)
# ggplot(dat, aes(X2, X1, fill = value)) +
# geom_tile() +
# coord_equal() +
# theme(aspect.ratio = 1) +
# geom_text(aes(X2, X1, label = value), color = "black", size = 4) +
# labs(x = NULL, y = NULL) +
# scale_fill_gradient2(
# name = NULL,
# low = "blue",
# high = "red",
# mid = "white",
# midpoint = 0,
# limit = c(-1, 1),
# space = "Lab"
# ) +
# theme_minimal()
# }
#
# keep <- c('TC', 'BC', 'MC', 'LC')
#
# limitedCorrPlot(mPears, keep)
# ggsave('sizeMetricsVsResults-tab-pears.pdf', width=width, height=height)
#
# limitedCorrPlot(mSpear, keep)
# ggsave('sizeMetricsVsResults-tab-spear.pdf', width=width, height=height)
|
6c0fbe932ac2197e7cb917788c5ad992137a52b5
|
f96a5c7d44e7ea728495ccae1c66ad9a6ce4fc96
|
/multiWayANOVA/powerWithAndWithoutBlocking.R
|
03fb150228978310b1f63004ead5333ff7925bf1
|
[] |
no_license
|
STAT6302/lectures
|
6ed787958ce1722796005bf3d042496c61642b20
|
3250462937a5b36483a92713f26ae1f8ee024965
|
refs/heads/master
| 2021-05-14T08:49:02.333892
| 2018-05-01T15:35:27
| 2018-05-01T15:35:27
| 116,308,977
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,602
|
r
|
powerWithAndWithoutBlocking.R
|
n = 48
X = matrix(0,nrow=n,ncol=13)
colnames(X) = c('mu','A','B','Control','F1','F2','F3','B1','B2','B3','B4','B5','B6')
X[,1] = rep(1,n)
X[,2] = c(rep(1,n/2),rep(0,n/2))
X[,3] = c(rep(0,n/2),rep(1,n/2))
X[,4] = rep(c(rep(1,n/8),rep(0,n/8),rep(0,n/8),rep(0,n/8)),2)
X[,5] = rep(c(rep(0,n/8),rep(1,n/8),rep(0,n/8),rep(0,n/8)),2)
X[,6] = rep(c(rep(0,n/8),rep(0,n/8),rep(1,n/8),rep(0,n/8)),2)
X[,7] = rep(c(rep(0,n/8),rep(0,n/8),rep(0,n/8),rep(1,n/8)),2)
X[1:6,8:13] = diag(1,6)
X[7:12,8:13] = diag(1,6)
X[13:18,8:13] = diag(1,6)
X[19:24,8:13] = diag(1,6)
X[25:30,8:13] = diag(1,6)
X[31:36,8:13] = diag(1,6)
X[37:42,8:13] = diag(1,6)
X[43:48,8:13] = diag(1,6)
XdfBlock = data.frame('species' = c(rep('A',n/2),rep('B',n/2)),
'fertilizer' = c(rep('Control',6),rep('F1',6),rep('F2',6),rep('F3',6),
rep('Control',6),rep('F1',6),rep('F2',6),rep('F3',6)),
'block' = rep(c('B1','B2','B3','B4','B5','B6'),8))
###
# sim parms
###
nSweep = 2000
alpha = 0.05
sig = 1
####
####
alphaVec_species = rep(0,nSweep)
alphaVec_fert = rep(0,nSweep)
set.seed(10)
for(j in 1:nSweep){
mu = c(12,1,2,0,2,1,-3,-6,-4,-2,0,3,6)# Substantial block effect
#mu = c(12,1,2,0,2,1,-3,rep(0,6))# No block effect
mu = c(12,1,1,-2,-2,-2,-2,c(-6,-6,-6,6,6,6)/6)# No block nor main effect
Y = X %*% mu + rnorm(n,0,sig)
model = lm(Y ~ species + fertilizer,
data = XdfBlock)
alphaVec_species[j] = anova(model)[[5]][1]
alphaVec_fert[j] = anova(model)[[5]][2]
}
mean(alphaVec_fert < alpha)
mean(alphaVec_species < alpha)
|
00f17c6fd647beb01a101a15e72ddef40a66109c
|
1e5ba5e4557483a9f52584e54cc6812258fa0153
|
/Scripts/Predictions.R
|
367c2b8a83ae2e1275ceb63fc7068c9eae26c997
|
[] |
no_license
|
leticiaamarcal/Predicting-Profitability
|
1cbfc6e4bb17fad2632e39192895384be1d6823b
|
99b6b049ad9066f758872cbbe1cfa1acca444800
|
refs/heads/master
| 2020-08-19T23:06:15.867526
| 2019-10-18T07:21:58
| 2019-10-18T07:21:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,443
|
r
|
Predictions.R
|
#------------------------------------------------------------------------------------
#Goal: Fazer Predictions
#Description: aplicar modelo criado para fazer predictions
#Developer: Letícia Marçal
#------------------------------------------------------------------------------------
#linkar com o preprocess
source("Scripts/TrainModels.R")
#upload data
newData <- read.csv("C:/Users/letic/Desktop/newproductattributes2017.csv")
#dummy
newDFP <- dummyVars(" ~ .", data = newData)
newProducts <- data.frame(predict(newDFP, newdata = newData))
#eliminar colunas
newProducts <- newProducts[,-c(1, 2, 3, 4, 8, 9, 11, 12, 14, 15, 17, 19, 21, 22, 23, 24, 25, 26, 27, 29)]
#mudar nome das colunas
colnames(newProducts)[1] <- 'Laptop'
colnames(newProducts)[2] <- 'Netbook'
colnames(newProducts)[3] <- 'PC'
colnames(newProducts)[4] <- 'Smartphone'
#transformar em ID
rownames(newProducts) <- newProducts$ProductNum
#excluir colunas de ProductNum
newProducts <- newProducts[,-5]
#Random Forest e SVM Linear foram os melhores modelos, então vou fazer
#a previsão com eles
###predictions com Random Forest
newProducts$VolumeRF <- predict(RFmodel, newProducts)
###predictions com SVM Linear
newProducts$VolumeSVM <- predict(SVMmodel, newProducts)
##vou testar os outros modelos just in case
## k-NN
newProducts$VolumeKNN <- predict(KNNmodel, newProducts)
## Linear Regression LRmodel
newProducts$VolumeLR <- predict(LRmodel, newProducts)
|
047e05f94acb8736f8a17d255f247c9a14fbcd25
|
aef5b0480c8c972e4bad075b15942f70479cc584
|
/reading local files.R
|
12ae421815dbc82457ab7fbd05d5ac57b1e0961c
|
[] |
no_license
|
teofizzy/Fizzyshock
|
c09e052a9e5b0435f794b4af54ae9967e20799c1
|
6c434c0192880af588eebcc7cba2324f6648769b
|
refs/heads/main
| 2023-07-16T21:24:27.094309
| 2021-08-24T06:19:47
| 2021-08-24T06:19:47
| 369,437,671
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 329
|
r
|
reading local files.R
|
yoh <- if(!file.exists("data")){
dir.create("data")
}
yoh
setwd("C:\Users\hp\Downloads")
getwd()
read.csv("C:\Users\hp\Documents\R\datasciencecoursera repo\Fizzyshock\Fixed_Speed_Cameras.csv", quote = "")
library(xlsx)
library(csv)
read.xlsx("C:\Users\hp\Downloads\Foreign Trade Summary (Ksh Million) (1).xlsx", header = TRUE)
|
c07e57d2f305f888750634c2d1296de82fd493d7
|
9b67199df14059ab543e777aac16e78a441656ef
|
/HypothesisFour.R
|
b0c0395380c0194a9593d06644a5b3dd75db6680
|
[] |
no_license
|
megzcalvert/AMtoo
|
608b5bcac2ffd708fc3e3e3c4ab979408bb748d8
|
4a900df8f3edd3defd31e08f5950cc8b2271295e
|
refs/heads/master
| 2021-06-26T03:07:08.958953
| 2020-10-18T16:20:05
| 2020-10-18T16:20:05
| 153,009,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,109
|
r
|
HypothesisFour.R
|
rm(list = objects())
ls()
library(readr)
library(data.table)
library(tidyverse)
library(janitor)
library(tidylog)
library(broom)
library(Hmisc)
library(psych)
##### Set up work space ####
#### Theme set
custom_theme <- theme_minimal() %+replace%
theme(
axis.title = element_text(
colour = "black",
size = rel(2)
),
axis.title.x = element_text(
vjust = 0,
margin = margin(
t = 0, r = 0.25,
b = 0, l = 0,
unit = "cm"
)
),
axis.title.y = element_text(
vjust = 1,
angle = 90,
margin = margin(
t = 0, r = 0.25,
b = 0, l = 0.1,
unit = "cm"
)
),
axis.text = element_text(
colour = "black",
size = rel(1.5)
),
axis.ticks = element_line(colour = "black"),
axis.ticks.length = unit(3, "pt"),
axis.line = element_line(
color = "black",
size = 0.5
),
legend.key.size = unit(4, "lines"),
# legend.background = element_rect(fill = NULL, colour = NULL),
# legend.box = NULL,
legend.margin = margin(
t = 0, r = 0.75,
b = 0, l = 0.75,
unit = "cm"
),
legend.text = element_text(size = rel(2)),
legend.title = element_text(size = rel(1.5)),
panel.grid.major = element_line(
colour = "#969696",
linetype = 3
),
panel.grid.minor = element_blank(),
plot.tag = element_text(
size = rel(2),
margin = margin(
t = 0.1, r = 0.1,
b = 0.1, l = 0.1,
unit = "cm"
)
),
plot.margin = margin(
t = 0.5, r = 0.5,
b = 0.5, l = 0,
unit = "cm"
),
plot.title = element_text(
colour = "black",
size = rel(3),
vjust = 0,
hjust = 0,
margin = margin(
t = 0.25, r = 0.25,
b = 0.5, l = 0.25,
unit = "cm"
)
),
strip.background = element_rect(
fill = "white",
colour = "black",
size = 1
),
strip.text = element_text(
colour = "black",
size = rel(1)
),
complete = F
)
theme_set(custom_theme)
getwd()
setwd("~/Dropbox/Research_Poland_Lab/AM Panel/")
set.seed(1642)
#### Loading data ####
pheno17 <- fread("./Phenotype_Database/pheno17_htpLong.txt")
pheno18 <- fread("./Phenotype_Database/pheno18_htpLong.txt")
pheno19 <- fread("./Phenotype_Database/pheno19_htpLong.txt")
colnames(pheno17)
# Check
trial <- pheno17 %>%
filter(ID == "NDVI") %>%
filter(Date == "2017-03-31")
cor.test(trial$value, trial$GRYLD)
nested17 <- pheno17 %>%
tidylog::select(-Plot_ID, -Variety) %>%
group_by(Date, ID) %>%
nest() %>%
mutate(
correlation = map(data, ~ cor.test(.x$GRYLD, .x$value)),
tidyCor = map(correlation, glance)
) %>%
unnest(tidyCor) %>%
tidylog::select(-data, -correlation)
nested18 <- pheno18 %>%
filter(ID != "height") %>%
tidylog::select(-entity_id, -Variety, -year) %>%
group_by(Date, ID) %>%
nest() %>%
mutate(
correlation = map(data, ~ cor.test(.x$GRYLD, .x$value)),
tidyCor = map(correlation, glance)
) %>%
unnest(tidyCor) %>%
tidylog::select(-data, -correlation)
nested19 <- pheno19 %>%
tidylog::select(-entity_id, -Variety, -year) %>%
group_by(Date, ID) %>%
nest() %>%
mutate(
correlation = map(data, ~ cor.test(.x$GRYLD, .x$value)),
tidyCor = map(correlation, glance)
) %>%
unnest(tidyCor) %>%
tidylog::select(-data, -correlation)
nested17$Date <- as.Date(nested17$Date)
nested18$Date <- as.Date(nested18$Date)
nested19$Date <- as.Date(nested19$Date)
nested17 %>%
ggplot(aes(x = Date, y = estimate, color = ID)) +
geom_point(size = 2) +
geom_errorbar(aes(ymin = conf.low, ymax = conf.high), size = 1) +
geom_hline(
yintercept = 0, linetype = 2,
colour = "darkgrey"
) +
scale_x_date(
date_breaks = "1 week",
date_labels = "%d%b"
) +
labs(title = "Correlation with CI 2017") +
ylab("Pearson correlation co-efficient")
nested18 %>%
ggplot(aes(x = Date, y = estimate, color = ID)) +
geom_point(size = 2) +
geom_errorbar(aes(ymin = conf.low, ymax = conf.high), size = 1) +
geom_hline(
yintercept = 0, linetype = 2,
colour = "darkgrey"
) +
scale_x_date(
date_breaks = "1 week",
date_labels = "%d%b"
) +
labs(title = "Correlation with CI 2018") +
ylab("Pearson correlation co-efficient")
nested19 %>%
ggplot(aes(x = Date, y = estimate, color = ID)) +
geom_point(size = 2) +
geom_errorbar(aes(ymin = conf.low, ymax = conf.high), size = 1) +
geom_hline(
yintercept = 0, linetype = 2,
colour = "darkgrey"
) +
scale_x_date(
date_breaks = "1 week",
date_labels = "%d%b"
) +
labs(title = "Correlation with CI 2019") +
ylab("Pearson correlation co-efficient")
write.table(nested17, "./Phenotype_Database/Correlation_VI_2017.txt",
sep = "\t", quote = F, row.names = F, col.names = T
)
write.table(nested18, "./Phenotype_Database/Correlation_VI_2018.txt",
sep = "\t", quote = F, row.names = F, col.names = T
)
write.table(nested19, "./Phenotype_Database/Correlation_VI_2019.txt",
sep = "\t", quote = F, row.names = F, col.names = T
)
|
b58fb11c12c5580bdea4b2f7825080ed2ecdeda9
|
ffe4995426a090251aaf49f1ea4867cfee9df337
|
/_script/data-design.R
|
68b96b502fee544bf187f93a0422cb95e92252a4
|
[] |
no_license
|
therimalaya/PhD-Dissertation
|
3222144d746a52f6f52d39af1b0c8c5742eef4ef
|
ada2fa2be3ca81f28545f029544d6b6805d0a195
|
refs/heads/master
| 2020-08-13T20:12:06.967860
| 2019-10-14T11:56:42
| 2019-10-14T11:56:42
| 215,030,990
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,948
|
r
|
data-design.R
|
df <- data.frame(
id = factor(1:10, levels = 1:10),
x = c(
paste0("Number of Predictors\n", "p:", "50 and 250"),
paste0("Level of Multicollinearity\n",
"gamma: 0.2 and 0.9"),
paste0("Level of Response Correlation\n",
"eta: 0, 0.4, 0.8 and 1.2"),
paste0("Position of relevant predictor components\n",
"relpos: 1, 2, 3, 4 and 5, 6, 7, 8"),
paste0("Methods\n PCR, PLS1, PLS2, Xenv and Senv"),
paste0("Number of Components\n 0, 1, 2, ..., 10"),
paste0("Min. Prediction Error (Min. Components)\n",
"Corresponding to Response Y", 1:4)
),
fct = c(rep("Factors", 6), paste0("Y", 1:4)),
fill = c(rep("Factors", 6), rep("Y", 4))
)
data_design_tbl <- ggplot(df, aes(x, y = 0)) +
geom_tile(aes(fill = fill), alpha = 0.3, show.legend = FALSE,
color = "black") +
geom_text(aes(label = x), angle = 90, hjust = 1, nudge_y = 0.45,
family = "mono", size = rel(6), lineheight=0.9, parse = FALSE) +
facet_grid(. ~ fct, drop = TRUE, scales = 'free_x',
space = 'free_x') +
theme_void(base_size = 16) +
theme(
strip.text = element_text(
size = rel(1.5),
margin = unit(c(2, 0, 3, 0), "mm"),
),
strip.background = element_rect(
fill = "lightgrey",
color = "darkgrey",
size = 1
),
panel.spacing = unit(0, "mm"),
plot.margin = unit(c(0, 0, 0, 0), "mm"),
plot.subtitle = element_text(margin = unit(c(2, 0, 5, 0), "mm")),
plot.title = element_text(size = rel(1.8))
) +
scale_x_discrete(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0)) +
scale_fill_brewer(palette = "Set1") +
labs(title = "Data for Further Analysis",
subtitle = paste0(
"Response can be Average Miniumum Error or\n",
"Avg. Number of Components used to obtain Minimum Error"))
# ggsave(plot = plt, filename = "_images/data-design.svg", width = 7, height = 5.7, unit = "in")
|
baa9d018cdf4d94c6ea48c82519258e7c23bee59
|
21845f139f8c4dcc3a21b133a9d3e6620eb89bc4
|
/R/spikeTrainStats.R
|
d69581003b6529de8f5503eb6316026d0a0ece86
|
[] |
no_license
|
cran/STAR
|
257a4e63d61f3a5b664b1fa6770d2383096cddb9
|
c88668ba8a508206fdc5ef4406b6373c492f2806
|
refs/heads/master
| 2021-01-13T01:49:39.256279
| 2012-10-08T00:00:00
| 2012-10-08T00:00:00
| 17,693,622
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,542
|
r
|
spikeTrainStats.R
|
varianceTime <- function (spikeTrain,
CI = c(0.95, 0.99),
windowSizes) {
if (!is.spikeTrain(spikeTrain)) spikeTrain <- as.spikeTrain(spikeTrain)
train.l <- length(spikeTrain)
lastTime <- spikeTrain[train.l]
mean.rate <- train.l/(lastTime - spikeTrain[1])
if (!missing(windowSizes)) {
windowSizes <- sort(windowSizes)
if (any(windowSizes <= 0))
stop(paste("Elements of", deparse(substitute(windowSizes)),
"should be non-negative"))
if (max(windowSizes) >= lastTime/5)
warning(paste("Some elements of", deparse(substitute(windowSizes)),
"are too large and will be ignored"))
windowSizes <- windowSizes[windowSizes < lastTime/5]
} else {
minSize <- 5/mean.rate
maxSize <- lastTime/10
windowSizes <- seq(minSize, maxSize, minSize)
windowSizes <- windowSizes[windowSizes <= maxSize]
}
if (length(CI) > 2)
CI <- CI[1:2]
if (any(CI >= 1 | CI <= 0))
stop(paste(deparse(substitute(CI)), "components should be in (0,1)"))
dataVariance <- sapply(windowSizes,
function(ws) {
nBins <- lastTime%/%ws
binSeq <- (0:nBins) * ws
counts <- hist(spikeTrain[spikeTrain > min(binSeq) & spikeTrain < max(binSeq)],
breaks = binSeq, plot = FALSE)$counts
s2 <- var(counts)
sigma2 <- mean.rate * ws
ciUp <- sapply(CI, function(ci)
qnorm(1 - (1 - ci)/2,sigma2, sqrt((2*sigma2^2+sigma2)/nBins)))
ciLow <- sapply(CI, function(ci)
max(qnorm((1 - ci)/2,sigma2, sqrt((2*sigma2^2+sigma2)/nBins)), 0))
c(s2, sigma2, ciUp, ciLow)
}
)
if (length(CI) == 1) {
resutl <- list(s2 = dataVariance[1,],
sigma2 = dataVariance[2,],
ciUp = dataVariance[3,],
ciLow = dataVariance[4,],
windowSizes = windowSizes,
CI = CI,
call = match.call()
)
} else {
result <- list(s2 = dataVariance[1, ],
sigma2 = dataVariance[2,],
ciUp = dataVariance[c(3, 4), ],
ciLow = dataVariance[c(5,6), ],
windowSizes = windowSizes,
CI = CI,
call = match.call()
)
}
class(result) <- "varianceTime"
return(result)
}
is.varianceTime <- function(obj) {
if(!("varianceTime" %in% class(obj))) return(FALSE)
componentsNames <- c("s2",
"sigma2",
"ciUp",
"ciLow",
"call",
"windowSizes",
"CI")
if (!all(names(obj) %in% componentsNames)) return(FALSE)
TRUE
}
plot.varianceTime <- function (x,
style = c("default", "Ogata"),
unit = "s", xlab, ylab, main,
sub, xlim, ylim,
...) {
varianceTimeObj <- x
if (!is.varianceTime(varianceTimeObj))
stop(paste(deparse(substitute(varianceTimeObj)), "is not a varianceTime object."))
if (missing(xlab))
xlab <- paste("Time (", unit, ")", sep = "")
if (missing(ylab))
ylab <- "Variance"
if (missing(main))
main <- "Estimated Variance-Time Curve and Theoretical Poisson"
if (missing(xlim))
xlim <- c(0, max(varianceTimeObj$windowSizes))
if (missing(ylim)) {
ylim <- c(0, max(c(varianceTimeObj$s2, varianceTimeObj$ciUp)) * 1.01)
}
if (missing(sub)) {
if (is.null(varianceTimeObj$call[["CI"]])) {
CItxt <- paste(eval(formals(varianceTime)$CI) * 100,collapse = " and ")
} else {
CItxt <- paste(eval(varianceTimeObj$call[["CI"]]) * 100, collapse = " and ")
}
sub <- paste("Conf. Interval at: ", CItxt, sep = "")
}
X <- varianceTimeObj$windowSizes
if (style[1] == "Ogata") {
plot(X, varianceTimeObj$s2, type = "n", xlab = xlab,
ylab = ylab, main = main, xlim = xlim, ylim = ylim,
sub = sub, xaxs = "i", yaxs = "i")
if (is.null(dim(varianceTimeObj$ciLow))) {
lines(X, varianceTimeObj$ciLow, lty = 2)
lines(X, varianceTimeObj$ciUp, lty = 2)
} else {
apply(varianceTimeObj$ciLow, 1, function(Y) lines(X,Y, lty = 2))
apply(varianceTimeObj$ciUp, 1, function(Y) lines(X,Y, lty = 2))
}
lines(X, varianceTimeObj$sigma2)
points(X, varianceTimeObj$s2, pch = 3, ...)
} else {
plot(X, varianceTimeObj$s2, type = "n", xlab = xlab,
ylab = ylab, main = main, xlim = xlim, ylim = ylim,
xaxs = "i", yaxs = "i", sub = sub)
if (is.null(dim(varianceTimeObj$ciUp))) {
polygon(c(X, rev(X)),
c(varianceTimeObj$ciUp,rev(varianceTimeObj$ciLow)),
lty = 0,
col = "grey80")
} else {
polygon(c(X, rev(X)),
c(varianceTimeObj$ciUp[2,],rev(varianceTimeObj$ciLow[2, ])),
lty = 0,
col = "grey30")
polygon(c(X, rev(X)),
c(varianceTimeObj$ciUp[1,],rev(varianceTimeObj$ciLow[1, ])),
lty = 0,
col = "grey80")
}
lines(X, varianceTimeObj$sigma2, lwd = 2)
lines(X, varianceTimeObj$s2, col = 2, lwd = 2, ...)
}
}
acf.spikeTrain <- function (spikeTrain,
lag.max = NULL,
type = c("correlation", "covariance", "partial"),
plot = TRUE,
na.action = na.fail,
demean = TRUE,
xlab = "Lag (in isi #)",
ylab = "ISI acf",
main,
...) {
if (!is.spikeTrain(spikeTrain)) spikeTrain <- as.spikeTrain(spikeTrain)
isi <- diff(spikeTrain)
if (missing(main))
main <- paste("Train", deparse(substitute(spikeTrain)), "ISI acf")
acf(isi, lag.max = lag.max, type = type, plot = plot, na.action = na.action,
demean = demean, xlab = xlab, main = main, ylab = ylab,
...)
}
renewalTestPlot <- function (spikeTrain,
lag.max=NULL,
d=max(c(2,sqrt(length(spikeTrain)) %/% 5)),
orderPlotPch=ifelse(length(spikeTrain)<=600,1,"."),
...) {
if (!is.spikeTrain(spikeTrain)) spikeTrain <- as.spikeTrain(spikeTrain)
if (length(spikeTrain) < 50)
stop(paste(deparse(substitute(spikeTrain)), "contains less than 50 events."))
m <- matrix(c(1:4), nrow = 2, ncol = 2, byrow = TRUE)
oldpar <- par(mar = c(5, 4, 2, 2))
layout(m)
on.exit(par(oldpar))
isi <- diff(spikeTrain)
isi.o <- rank(isi)/length(isi)
isi.l <- length(isi)
if (is.null(lag.max))
lag.max <- round(10 * log10(isi.l))
lag.max <- min(isi.l - 1, lag.max)
grid <- seq(0, 1, length.out = d + 1)
getChi2 <- function(lag) {
isi.x <- isi.o[1:(isi.l - lag.max)]
isi.y <- isi.o[(1 + lag):(isi.l - lag.max + lag)]
isi.x <- as.integer(cut(isi.x, breaks = grid))
isi.y <- as.integer(cut(isi.y, breaks = grid))
counts <- matrix(0, nrow = d, ncol = d)
for (i in seq(along.with = isi.x))
counts[isi.x[i], isi.y[i]] <- counts[isi.x[i], isi.y[i]] + 1
chisq.test(counts, ...)
}
chi2seq <- lapply(1:lag.max, getChi2)
minChi2 <- qchisq(0.025, df = chi2seq[[1]]$parameter)
maxChi2 <- qchisq(0.975, df = chi2seq[[1]]$parameter)
chi2V <- sapply(chi2seq, function(l) l$statistic)
outOf95 <- chi2V < minChi2 | chi2V > maxChi2
plot(isi.o[-length(isi.o)] * isi.l, isi.o[-1] * isi.l, xlab = quote(O[k]),
ylab = quote(O[k + 1]), main = "Order Statistic Correlation at Lag 1",
type = "n")
sapply(grid[-c(1, d + 1)],
function(idx) {
abline(v = idx * isi.l, col = "grey")
abline(h = idx * isi.l, col = "grey")
}
)
points(isi.o[-length(isi.o)] * isi.l, isi.o[-1] * isi.l,
pch = orderPlotPch)
plot(isi.o[-(0:-1 + length(isi.o))] * isi.l,
isi.o[-(1:2)] * isi.l,
xlab = quote(O[k]), ylab = quote(O[k + 2]),
main = "Order Statistic Correlation at Lag 2",
type = "n")
sapply(grid[-c(1, d + 1)],
function(idx) {
abline(v = idx * isi.l, col = "grey")
abline(h = idx * isi.l, col = "grey")
}
)
points(isi.o[-(0:-1 + length(isi.o))] * isi.l,
isi.o[-(1:2)] * isi.l,
pch = orderPlotPch)
acf.spikeTrain(spikeTrain,
lag.max = lag.max, main = "")
plot(1:lag.max, chi2V, type = "n",
xlab = "Lag (in isi #)",
ylab = quote(chi^2),
main = expression(paste(chi^2, " Statistics")),
xlim = c(0, lag.max),
ylim = c(min(qchisq(0.01, df = chi2seq[[1]]$parameter), min(chi2V)),
max(qchisq(0.99, df = d^2 - 2 * (d - 1) - 1), max(chi2V)))
)
polygon(c(0, 0, lag.max + 1, lag.max + 1),
c(minChi2, maxChi2, maxChi2, minChi2),
col = "grey80", lty = 0
)
points(1:lag.max, chi2V,
pch = c(16, 17)[outOf95 + 1],
col = c("black", "grey30")[outOf95 + 1]
)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.