blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3476e07ef8c391f7e11aaa3c6049d8137061a0fb
|
6b46e9d05bbf0622728f1034174732003d80bdd7
|
/R/group.R
|
5ddfe7639ed2ecc01b3d46298d935c500c6d5df4
|
[
"MIT"
] |
permissive
|
gwb/RGroupFormation
|
314c5f1b738512b734dd327fba5631fca697adb2
|
f28dd330817efe7e4c71717dc8e96386f7fd643c
|
refs/heads/master
| 2022-12-12T11:00:39.111760
| 2020-09-07T09:37:48
| 2020-09-07T09:37:48
| 287,511,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,876
|
r
|
group.R
|
##suppressMessages(library(purrr))
#' Generates a random permutation
#'
#' If \code{x} is an integer, then returns an element of the symmetric
#' group on \code{x} elements. If \code{x} is a vector, then generates a
#' permutation of the elements of \code{x}.
#'
#' @param x An integer or a vector.
#' @return A vector representing a permutation.
#' @examples
#' rp(5)
#' rp(c(1, 4, 6, 10, 2))
#' @export
rp <- function(x) {
if(length(x) == 1) {
## x is an integer
res <- sample(seq(x))
} else {
## x is a set
res <- sample(x)
}
return(res)
}
count.uniques <- function(v.ls) {
uniq.v <- unique(v.ls)
res <- sapply(uniq.v, function(v) sum(v.ls == v))
names(res) <- uniq.v
return(res)
}
.rsp <- function(A) {
stopifnot(all(count.uniques(A) > 1))
idx.strata.ls <- lapply(unique(A),
function(a) which(A==a))
res <- rep(NA, length(A))
for(idx.strata in idx.strata.ls) {
res[idx.strata] <- rp(idx.strata)
}
return(res)
}
#'@importFrom purrr array_branch
#' @importFrom purrr map_chr
#' @importFrom purrr %>%
format.attributes <- function(A) {
if(is.matrix(A)) {
new.A <- array_branch(A, 1) %>% map_chr(~ paste0(., collapse="."))
} else if(is.numeric(A)){
new.A <- as.character(A)
} else {
stop("unsupported format")
}
return(new.A)
}
#' Generates a random stratified permutation
#'
#' \code{rsp} returns an element of the stabilizer of \code{A} in the
#' symmetric group on \code{|A|} elements.
#'
#' @param A A vector.
#' @return A vector representing a permutation
#' @examples
#' A <- c(0, 0, 0, 0, 1, 1, 1, 1)
#' rsp(A)
#' @export
rsp <- function(A) {
res <- .rsp(format.attributes(A))
return(res)
}
#' Group action operator
#'
#' Applies a permutation \code{s} to a vector \code{v}.
#'
#' @param s A vector representing a permutation.
#' @param v A vector of the same length as \code{s}.
#' @return A vector of the same length as \code{s} and \code{v}.
#' @examples
#' X <- seq(8)
#' A <- c(0, 0, 0, 0, 1, 1, 1, 1)
#' rsp(A) %p% X
#' rp(8) %p% X
#' @export
`%p%` <- function(s, v) {
stopifnot(length(s) == length(v))
res <- v[s]
return(res)
}
inv <- function(s){
stopifnot(setequal(s, seq_along(s)))
res <- sort(s, index.return=TRUE)$ix
return(res)
}
#' Pretty print permutations
#'
#' \code{pprint} shows both the permutationa and its inverse. The column
#' labelled \code{p.i} shows the image of the column labelled \code{i} while
#' the image labelled \code{pm.i} shows the inverse image of \code{i}.
#'
#' @param s A vector representing a permutation.
#' @return A dataframe with 3 columns: \code{i}, \code{p.i}, \code{pm.i}.
#' @export
pprint <- function(s) {
res <- data.frame(i = seq_along(s), pm.i = s, p.i = inv(s))
return(res)
}
|
d67afb7cba9635292c968160684ef4144a13fb12
|
f96af69ed2cd74a7fcf70f0f63c40f7725fe5090
|
/MonteShaffer/humanVerseWSU/humanVerseWSU/man/whichMin.Rd
|
c0936ad184b4ce4f5671d52f7806a7be34a652fe
|
[
"MIT"
] |
permissive
|
sronchet/WSU_STATS419_2021
|
80aa40978698305123af917ed68b90f0ed5fff18
|
e1def6982879596a93b2a88f8ddd319357aeee3e
|
refs/heads/main
| 2023-03-25T09:20:26.697560
| 2021-03-15T17:28:06
| 2021-03-15T17:28:06
| 333,239,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 771
|
rd
|
whichMin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions-vector.R
\name{whichMin}
\alias{whichMin}
\title{whichMin}
\usage{
whichMin(x)
}
\arguments{
\item{x}{numeric vector}
}
\value{
numeric vector that contains the indexes of *all* min elements, not just the *first*
}
\description{
behaves like which.min(x) but returns multiple indexes if required.
}
\examples{
which.min( c(23, presidents[1:30], 23) );
whichMin( c(23, presidents[1:30], 23) );
}
\seealso{
Other Vectors:
\code{\link{doUnique}()},
\code{\link{findAllIndexesWithValueInVector}()},
\code{\link{freqMax}()},
\code{\link{freqMin}()},
\code{\link{notDuplicated}()},
\code{\link{whichMaxFreq}()},
\code{\link{whichMax}()},
\code{\link{whichMinFreq}()}
}
\concept{Vectors}
|
aa2198287739539b073eeb3bb1b0507683e8a424
|
b61c793564f2197ea1f076cabc990f81baccec8f
|
/man/grepv.Rd
|
e40f3d1252da571f81dea6e1513ee980aac64808
|
[
"MIT"
] |
permissive
|
tkonopka/shrt
|
46fabfcbfd3819a9016b412f1a7b91f4ba88c28b
|
eeef8bf50aee0412b5feff427c12ba2eec17332d
|
refs/heads/master
| 2020-05-21T17:48:37.016989
| 2020-02-28T06:26:33
| 2020-02-28T06:26:33
| 60,825,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 629
|
rd
|
grepv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{grepv}
\alias{grepv}
\title{Pattern matching}
\usage{
grepv(pattern, x, value = T, ...)
}
\arguments{
\item{pattern}{character, pattern to look for}
\item{x}{character vector, object wherein to look for the pattern}
\item{value}{logical, set to T to return the values in that
match the pattern}
\item{...}{parameters passed on to grep}
}
\description{
This is a wrapper for grep. By default it returns the matching
values as opposed to the indeces where the hits occur.
}
\details{
The name is short for: (grep) returning (v)alues.
}
|
c560af1e09c5a92fa7e7e43b7d877ef540c04ee8
|
05ba1456015c848734180d4419c0875cae0e7d96
|
/man/rowRanks.Rd
|
64098e67405cbd0ffc4153cfdc9bcf17e3be898a
|
[] |
no_license
|
bkmontgom/matrixStats
|
31ccdfeacc33120e1b3905e7f7a19866d919342a
|
43ed438a0114f67c3bd5174e75e15c6b2f07b7bd
|
refs/heads/master
| 2020-05-15T18:35:13.436117
| 2019-05-04T22:03:29
| 2019-05-05T02:36:24
| 182,430,860
| 0
| 0
| null | 2019-04-20T16:47:29
| 2019-04-20T16:47:29
| null |
UTF-8
|
R
| false
| true
| 3,332
|
rd
|
rowRanks.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rowRanks.R
\name{rowRanks}
\alias{rowRanks}
\alias{colRanks}
\title{Gets the rank of the elements in each row (column) of a matrix}
\usage{
rowRanks(x, rows = NULL, cols = NULL, ties.method = c("max",
"average", "first", "last", "random", "max", "min", "dense"),
dim. = dim(x), ...)
colRanks(x, rows = NULL, cols = NULL, ties.method = c("max",
"average", "first", "last", "random", "max", "min", "dense"),
dim. = dim(x), preserveShape = FALSE, ...)
}
\arguments{
\item{x}{A \code{\link[base]{numeric}} or \code{\link[base]{integer}} NxK
\code{\link[base]{matrix}}.}
\item{rows, cols}{A \code{\link[base]{vector}} indicating subset of rows
(and/or columns) to operate over. If \code{\link[base]{NULL}}, no subsetting
is done.}
\item{ties.method}{A \code{\link[base]{character}} string specifying how
ties are treated. For details, see below.}
\item{dim.}{An \code{\link[base]{integer}} \code{\link[base]{vector}} of
length two specifying the dimension of \code{x}, also when not a
\code{\link[base]{matrix}}.}
\item{...}{Not used.}
\item{preserveShape}{A \code{\link[base]{logical}} specifying whether the
\code{\link[base]{matrix}} returned should preserve the input shape of
\code{x}, or not.}
}
\value{
An \code{\link[base]{integer}} \code{\link[base]{matrix}} is
returned. The \code{rowRanks()} function always returns an NxK
\code{\link[base]{matrix}}, where N (K) is the number of rows (columns)
whose ranks are calculated.
The \code{colRanks()} function returns an NxK \code{\link[base]{matrix}}, if
\code{preserveShape = TRUE}, otherwise a KxN \code{\link[base]{matrix}}.
%% The mode of the returned matrix is \code{\link[base]{integer}}, except
for %% \code{ties.method == "average"} when it is
\code{\link[base]{double}}.
}
\description{
Gets the rank of the elements in each row (column) of a matrix.
}
\details{
The row ranks of \code{x} are collected as \emph{rows} of the result matrix.
The column ranks of \code{x} are collected as \emph{rows} if
\code{preserveShape = FALSE}, otherwise as \emph{columns}.
The implementation is optimized for both speed and memory. To avoid
coercing to \code{\link[base]{double}}s (and hence memory allocation), there
is a unique implementation for \code{\link[base]{integer}} matrices. It is
more memory efficient to do \code{colRanks(x, preserveShape = TRUE)} than
\code{t(colRanks(x, preserveShape = FALSE))}.
Any \code{\link[base]{names}} of \code{x} are ignored and absent in the
result.
}
\section{Missing and non- values}{
These are ranked as \code{NA}, as with
\code{na.last = "keep"} in the \code{\link[base]{rank}}() function.
}
\seealso{
\code{\link[base]{rank}}().
\code{\link[data.table]{frank}}() for ties.method 'dense'.
For developers, see also Section Utility functions' in
'Writing R Extensions manual', particularly the
native functions \code{R_qsort_I()} and \code{R_qsort_int_I()}.
}
\author{
Hector Corrada Bravo and Harris Jaffee. Peter Langfelder for adding
'ties.method' support. Brian Montgomery for adding more 'ties.method's.
Henrik Bengtsson adapted the original native
implementation of \code{rowRanks()} from Robert Gentleman's \code{rowQ()} in
the \pkg{Biobase} package.
}
\keyword{array}
\keyword{iteration}
\keyword{robust}
\keyword{univar}
|
fbdfc8ab6b2c04e8fbc146ebc264c428bf59ede0
|
a877efed473317cb676c7f7603aad960e658043e
|
/scr/rcourseday2.R
|
c0982e3dd9e58cfe67d95a981ca7b47b55528dce
|
[
"MIT"
] |
permissive
|
nreigl/R.TTU_2018
|
6ef238d595f0f1a123660e7c6f2db74524fdc8af
|
aeed6aea45d09e4f307e55280fa94a322df72896
|
refs/heads/master
| 2021-05-08T23:37:03.744642
| 2018-03-19T10:47:56
| 2018-03-19T10:47:56
| 119,717,970
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,790
|
r
|
rcourseday2.R
|
#' R course day 2/ Live coding
#' 10.Feb.2018
# Recap day 1
mydata <- 5
rm(mydata)
myvector <- c(3,5, 4.3, 6)
mean(gdp$date)
dataframe$newvariable <- NULL
?functionname
summary()
str()
head()
tail()
names(gdp)
#
piaac <- read.csv("http://www.ut.ee/~iseppo/piaacest.csv")
mean(piaac$earnhr, na.rm = T)
median(piaac$earnhr, na.rm = T)
piaac$logincome<- log(piaac$earnhr)
cor(piaac$pvnum1, piaac$pvlit1, use="complete.obs")
# Getting data in
?read.csv
read.csv(na.strings = c(NA, "", " ", ".")
read.csv(strip.white = FALSE)
read.csv(fileEncoding = )
#
# install.packages("eurostat")
library(eurostat)
eurostatgdp<- get_eurostat("tec00114")
View(head(eurostatgdp)) # this functions views the of eurostat
# for finance eg packages: "tidyquant" or "quandl"
# install.packages("wbstats")
library(wbstats)
#' this a commment
#' this is the second line of the comment
wbsearch("GDP per capita, PPP")
gdp.pc <- wb(indicator = "NY.GDP.PCAP.PP.KD", POSIXct = TRUE)
# install.packages("devtools")
devtools::install_github("vincentarelbundock/WDI")
# quick demo dataset
mydata <- data.frame(animal = c("elephant", "bear", "cat"),
length = c(5,3,2),
weight = c(10, 5, 1))
mydata
mydata$length[1]
mydata$weight[1] <- 12
mydata
mydata$length[c(2,3)]
mydata$length[-2]
mydata[1,2]
mydata[1,] # entire for row
mydata[, 3] # entire third column
mydata[c(2,3), ] # second and third row
mydata$animal[c(TRUE, FALSE, TRUE)]
# Conditional statements and logic in R
3>4
mydata$length > 2
mydata$animal[mydata$length > 2]
mydata[mydata$length > 2]
mydata$length[mydata$animal == "elephant"] <- 100
# Basic comparison
x <- 3
z <- 4
x > 3
x >= 3
z < 4
z <= 4
x == z
x != z
is.na(x)
!is.na(x)
mydata$weight[1] <- NA
mydata
is.na(mydata$weight)
!is.na(mydata$weight)
x > 2 | z > 5
x > 2 & z > 5
mydata$length > 2 & mydata$length < 5
4 %in% c(2,3,4)
myanimals <- c("chicken", "duck", "bear")
"frog" %in% (myanimals)
# Filtering data
summary(piaac$edlevel3)
highschool <- subset(piaac, edlevel3 == "Medium")
summary(highschool$edlevel3)
highschool2 <- subset(piaac,
edlevel3 == "Medium"& livingwithspouse == "Yes")
mean(highschool2$pvnum1, na.rm = TRUE)
sample3 <- subset(piaac, edlevel3 %in% c("High", "Medium"))
summary(sample3)
str(sample3)
edlevel <- c("High", "Medium")
sample4 <- subset(piaac, edlevel3 %in% edlevel)
str(sample4)
piaac<- subset(piaac, !is.na(edlevel3))
gdp <- read.csv("http://www.ut.ee/~iseppo/gdpestimate.csv")
gdp$difference <- gdp$firstEstimate - gdp$latestEstimate
yearsIwant <- c("2012", "2013", "2014")
sample5 <- subset(gdp, year %in% yearsIwant)
str(sample5)
summary(sample5$year)
mean(sample5$difference)
names(piaac)
sample6 <- subset(piaac,
livingwithspouse == "Yes")
mean(subset(piaac, livingwithspouse == "Yes")$earnhr, na.rm=T)
mean(piaac$earnhr[piaac$livingwithspouse=="Yes"], na.rm=T)
str(sample6)
mean(sample6$earnhr, na.rm = TRUE)
sample7 <- subset(piaac, age > "34" & edlevel3 == "Medium")
str(sample7)
sample7$age
sample7$edlevel3
mean(sample7$earnhr, na.rm = TRUE)
# The tidyverse
## ggplot2
library(ggplot2)
gdp.pc
unique(gdp.pc$country)
gdp.pc.world <- subset(gdp.pc, country == "World")
gdp.pc.world
library(ggplot2)
ggplot(data = gdp.pc.world) +
geom_point(aes(x = date_ct, y = value))
ggplot(data = gdp.pc.world) +
geom_point(aes(x = date_ct, y = value)) +
geom_line(aes(x=date_ct, y=value))
ggplot(data = gdp.pc.world, aes(x = date_ct, y = value)) +
geom_point() +
geom_line()
p.gdp <- ggplot(data = gdp.pc.world,
aes(x = date_ct, y = value)) +
geom_point() +
geom_line()
p.gdp
p.1 <- ggplot(data = gdp.pc.world,
aes(x = date_ct, y = value)) +
geom_point()
p.1 + geom_line()
ggsave(p.gdp, filename = "worldgdp.png",
height = 4, width = 4, scale = 2)
# Your turn
piaac <- read.csv("http://www.ut.ee/~iseppo/piaacest.csv")
piaac$logincome <- log(piaac$earnhr)
names(piaac)
ggplot(data = piaac, aes(x = pvnum1, y = logincome)) +
geom_point(aes(color = gender,
shape = children), alpha = 0.5) +
geom_smooth()
# remove observations where children is.na
piaac <- subset(piaac, !is.na(children))
?geom_smooth
ggplot(data = piaac, aes(x = pvnum1, y = logincome)) +
geom_point() +
geom_smooth(aes(color = gender))
# Your turn
gdp <- read.csv("http://www.ut.ee/~iseppo/gdpestimate.csv")
str(gdp)
gdp$date <- as.Date(gdp$date)
ggplot(data = gdp, aes(x = date, y = latestEstimate)) +
geom_line()+
geom_line(aes(y = firstEstimate), color = "red")+
ylab("GDP Estimates")
# Facets
ggplot(data = piaac, aes(x = pvnum1,
y = earnmth,
color = edlevel3)) +
geom_point() +
geom_smooth()
|
ced66bc52e50de26d4c471bb7f4a8d263a4be8d8
|
2ef76297c9731a56e2b00626c3a1679cccd5132e
|
/load.R
|
cce522de7fd2d1c7d4f9b800ba81c7c681ff61c3
|
[] |
no_license
|
vhy1967/YVY
|
777155adf32482e81657cc102c09451237e554c4
|
79f4e37d3cc54af1a1c95578eb3b389acf2efdec
|
refs/heads/master
| 2020-03-18T04:38:55.188954
| 2018-05-21T16:29:08
| 2018-05-21T16:29:08
| 134,298,356
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 897
|
r
|
load.R
|
'C:/Users/yuferov-vg/Desktop/YVY/processed/АнодиованиеРезы/3/основ/'
#fnames=list.files('C:/Users/yuferov-vg/Desktop/YVY/processed/АнодиованиеРезы/3/основ/',
fnames=list.files("C:/Users/yuferov-vg/Desktop/YVY/data/",
pattern = "Log-20171116-(\\d{6}).csv",full.names = T)
fnames=fnames[order(fnames)]
require(xts)
#fname="C:/Users/yuferov-vg/Desktop/YVY/data/Log-20170530-131522.csv"
df_list=list()
for(fname in fnames) {
dt=strptime(substr(fname,42,56),"%Y%m%d-%H%M%S")
print(dt)
df=read.csv(fname,skip=5,header=T)
#df['DT']=dt-df[length(df$Time.S.),1]+df[,1]
df['DT']=dt-df[,1]
df_list[[substr(fname,42,56)]]=as.xts(df[,c(1,2,3)],order.by=df$DT)
}
length(df_list)
class(df_list[[1]])
periodicity(df_list[[1]])
df_xts=do.call(rbind,df_list)
#merge(df_list[[1]],df_list[[2]])
plot.xts(df_xts[,c(2)])
plot.xts(df_xts[,c(3)])
plot.xts(df_xts)
|
10a31c3172ea28fde4ffeff29fcc0b1a949d9e97
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/waved/examples/make.lidar.Rd.R
|
8b4f9a29b94c2108cdd2890cb8a2d20412dc9b40
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 184
|
r
|
make.lidar.Rd.R
|
library(waved)
### Name: make.lidar
### Title: Make LIDAR signal
### Aliases: make.lidar
### Keywords: internal
### ** Examples
plot(seq(0,1,le=1000),make.lidar(1000),type='l')
|
58cb3dcefe28d92575edf3e4a2a08fe3b9b00146
|
0cd7388579dfbb382be628be2f38374b18783650
|
/man/whately_2015.Rd
|
bd598bf028fa26e9476b3dba057f5e4569057617
|
[] |
no_license
|
beanumber/macleish
|
411d8f6b7368fc8331b49ffb32c2dd9b9cb8ff38
|
4f345d0f6a03946cc9fd32d60cb59558925f996f
|
refs/heads/master
| 2022-07-22T01:00:41.136122
| 2022-07-14T17:14:27
| 2022-07-14T17:14:27
| 45,202,012
| 2
| 6
| null | 2022-06-29T18:23:54
| 2015-10-29T18:13:58
|
R
|
UTF-8
|
R
| false
| true
| 3,736
|
rd
|
whately_2015.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{whately_2015}
\alias{whately_2015}
\alias{orchard_2015}
\title{Weather data from Macleish Field Stations}
\format{
For both, a data frame (\code{\link[dplyr:tbl_df]{dplyr::tbl_df()}}) with roughly 52,560 rows and 8 or 9 variables.
The following variables are values that are found in either the \code{whately_2015}
or \code{orchard_2015} data tables.
All variables are averaged over the 10 minute interval unless otherwise noted.
\describe{
\item{when}{Timestamp for each measurement set in Eastern Standard Time.}
\item{temperature}{average temperature, in Celsius}
\item{wind_speed}{Wind speed, in meters per second}
\item{wind_dir}{Wind direction, in degrees}
\item{rel_humidity}{How much water there is in the air, in millimeters}
\item{pressure}{Atmospheric pressure, in millibars}
\item{rainfall}{Total rainfall, in millimeters}
\item{solar_radiation}{Amount of radiation coming from the sun, in Watts/meters^2. Solar measurement for Whately}
\item{par_density}{Photosynthetically Active Radiation (sunlight between 400 and 700 nm), in average density of Watts/meters^2. One of two solar measurements for Orchard}
\item{par_total}{Photosynthetically Active Radiation (sunlight between 400 and 700 nm), in average total over measurement period of Watts/meters^2. One of two solar measurements for Orchard}
}
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 52547 rows and 9 columns.
}
\source{
These data are recorded at \url{https://www.smith.edu/about-smith/sustainable-smith/ceeds}
}
\usage{
whately_2015
orchard_2015
}
\description{
Weather data collected at the Macleish Field Station in Whately,
MA during 2015.
}
\details{
The Macleish Field Station is a remote outpost owned by Smith
College and used for field research. There are two weather stations on the
premises. One is called \code{WhatelyMet} and the other is \code{OrchardMet}.
The \code{WhatelyMet} station is located at (42.448470, -72.680553) and
the \code{OrchardMet} station is at (42.449653, -72.680315).
\code{WhatelyMet} is located at the end of Poplar Hill Road in Whately,
Massachusetts, USA. The meteorological instruments of \code{WhatelyMet} (except the
rain gauge) are mounted at the top of a tower 25.3 m tall, well above the
surrounding forest canopy. The tower is located on a local ridge at an
elevation 250.75m above sea level.
\code{OrchardMet} is located about 250 m north of the first tower in an open
field next to an apple orchard. Full canopy trees (~20 m tall) are within
30 m of this station. This station has a standard instrument configuration
with temperature, relative humidity, solar radiation, and barometric
pressure measured between 1.5 and 2.0 m above the ground. Wind speed and
direction are measured on a 10 m tall tower and precipitation is measured
on the ground. Ground temperature is measured at 15 and 30 cm below the
ground surface 2 m south of the tower. The tower is located 258.1 m above
sea level. Data collection at OrchardMet began on June 27th, 2014.
The variables shown above are weather data collected at \code{WhatelyMet} and
\code{OrchardMet} during 2015. Solar radiation is measured in two different ways:
see \code{SlrW_Avg}or the \code{PAR} variables for Photosynthetic Active Radiation.
Note that a loose wire resulted in erroneous temperature reading at OrchardMet
in late November, 2015.
}
\examples{
\dontrun{
#' # loose wire anomalies
if (require(dplyr) & require(ggplot2) & require(lubridate)) {
orchard_2015 \%>\%
filter(month(when) == 11) \%>\%
ggplot(aes(x = when, y = temperature)) +
geom_line() + geom_smooth()
}
}
}
\keyword{datasets}
|
96d0f43ccfbf376a6c871626a7821bfee99de593
|
b76879ca270a8d94a42ee2cf821ae24508d5a510
|
/man/extract.prior.Rd
|
7f02a5d91ca5caa29af6fe41ede162c76fa1b39c
|
[] |
no_license
|
cran/DCL
|
ee11184e6a8b74515ea856e3bd27acda5488d861
|
bd9aa3502f861a5c0ff0266730a438c7d0d7116a
|
refs/heads/master
| 2022-05-17T09:38:59.010683
| 2022-05-05T15:40:02
| 2022-05-05T15:40:02
| 17,678,627
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,733
|
rd
|
extract.prior.Rd
|
\name{extract.prior}
\alias{extract.prior}
\title{Extracting information about zero-claims and severity inflation
}
\description{A way of extracting information about zero-claims and severity development inflation through the DCL method applied to two counts triangles: number of payments and number of reported claims.
}
\usage{
extract.prior(Xtriangle, Npaid, Ntriangle, Plots = TRUE , n.cal = NA ,
Fj.X = NA , Fj.N = NA , Fj.Npaid = NA )
}
\arguments{
\item{Xtriangle}{
The paid run-off triangle: incremental aggregated payments. It should be a matrix with incremental aggregated payments located in the upper triangle and the lower triangle consisting in missing or zero values.
}
\item{Npaid}{
A run-off (incremental) triangle with the number of payments. It should be a matrix with the observed counts located in the upper triangle and the lower triangle consisting in missing or zero values. It should has the same dimension as \code{Xtriangle} (both in the same aggregation level (quarters, years,etc.))
}
\item{Ntriangle }{
The counts data triangle: incremental number of reported claims. It should be a matrix with the observed counts located in the upper triangle and the lower triangle consisting in missing or zero values. It should has the same dimension as \code{Xtriangle} (both in the same aggregation level (quarters, years,etc.))
}
\item{Plots}{Logical. If TRUE (default) it is showed a two by one plot showing the extracted severity inflation in the development direction and the probability of zero-claims for each underwriting period.
}
\item{n.cal}{Integer specifying the number of most recent calendars which will be used to calculate the development factors. By default \code{n.cal=NA} and all the observed calendars are used (classical chain ladder). }
\item{Fj.X}{Optional vector with lentgth m-1 (m being the dimension of the triangles) with the development factors to calculate the chain ladder estimates from \code{Xtriangle}. See more details in \code{\link{clm}}.}
\item{Fj.Npaid}{Optional vector with lentgth m-1 with the development factors to calculate the chain ladder estimates from \code{Npaid}. }
\item{Fj.N}{Optional vector with lentgth m-1 with the development factors to calculate the chain ladder estimates from \code{Ntriangle}. }
}
\details{
The function implements the strategy proposed in the paper by Martinez-Miranda, Nielsen, Verrall and Wuthrich (2013) to extract information for additional triangles (see "Section 5: An example showing how other data can be used to provide prior information in practice"). The derived severity inflation \code{inflat.j} does not extend to the tail. If you want provide the tail, by using \code{\link{dcl.predict.prior}}, the vector should be extended to have dimension 2m-1, otherwise the tail will be not provided (as was done in the cited paper).
}
\value{
\item{inflat.j }{A vector with dimension m with the extracted severity inflation in the development direction.}
\item{Qi }{A vector with dimension m with the extracted probability of zero-claims for undewriting period.}
}
\references{
Martinez-Miranda, M.D., Nielsen, J.P., Verrall, R. and Wuthrich, M.V. (2013) Double Chain Ladder, Claims Development Inflation and Zero Claims. \emph{Scandinavian Actuarial Journal}. In press.
}
\author{
M.D. Martinez-Miranda, J.P. Nielsen and R. Verrall
}
\seealso{
\code{\link{dcl.predict.prior}}, \code{\link{dcl.estimation}}
}
\examples{
## Data application in Martinez-Miranda, Nielsen, Verrall and Wuthrich (2013)
data(NtrianglePrior)
data(NpaidPrior)
data(XtrianglePrior)
extract.prior(XtrianglePrior,NpaidPrior,NtrianglePrior)
}
\keyword{Statistics}
|
70f7390e75298b7f13291f7e7d2ac3cd1e4857a3
|
b64f494df1015a60619f8cddc70465f742652525
|
/preprocess.R
|
c58a1bf4cb7439d22a52fdc5a02ab06c60aaabad
|
[] |
no_license
|
hnyang1993/UmbrellaAcademy
|
5b604e9caa0d998f580760fb3970138348cec22f
|
a287c4df9fd342fe93b2204645d693e66bc7cbb2
|
refs/heads/master
| 2020-05-02T15:31:57.398931
| 2019-04-26T01:32:42
| 2019-04-26T01:32:42
| 178,043,804
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,366
|
r
|
preprocess.R
|
library(text2vec)
library(data.table)
library(magrittr)
library(stopwords)
library(SnowballC)
library(caret)
library(glmnet)
setwd("~/Desktop/BIOS 735/final_project/jigsaw-toxic-comment-classification-challenge")
sample <- fread("sample_submission.csv")
test <- fread("test.csv")
test_labels <- fread("test_labels.csv")
train.data <- fread("train.csv")
prep_fun = tolower
#stemming words
tok_fun = function(x) {
word_tokenizer(x) %>% lapply( function(x) SnowballC::wordStem(x,language="en"))
}
it_train = itoken(train.data$comment_text,
preprocessor = prep_fun,
tokenizer = tok_fun,
ids = train.data$id,
progressbar = TRUE)
vocab = create_vocabulary(it_train, ngram=c(1L,3L), stopwords=stopwords("en",source="smart"))
prune.vocab <- prune_vocabulary(vocab, term_count_min = 10)
vectorizer = vocab_vectorizer(prune.vocab)
raw.dtm = create_dtm(it_train, vectorizer)
dim(raw.dtm)
bns = BNS$new()
train.dtm = bns$fit_transform(raw.dtm, train.data$toxic)
train.dtm = raw.dtm
feature.selection = cv.glmnet(train.dtm, train.data$toxic, family = "binomial")
fit = glmnet(train.dtm, train.data$toxic, family = "binomial",
alpha=1,lambda=feature.selection$lambda.1se)
beta = fit$beta
s.train <- train.dtm[,which(beta!=0)]
dense.train.dtm <- as.matrix(s.train)
|
984ec54b1c80afda1f7c2318a22e398bfb151cdb
|
c0dcb97d778aada5336bdd129446f4b66a0c8a32
|
/Crab.R
|
e2715db11f4d54e740df24eb6b7fa15175a401e1
|
[] |
no_license
|
OmarVillalobos/Crab
|
d936d658bbaa16807f9cba0ff675b0a44d06f9f5
|
000fad9e3727ddadd0b8b7b7bfe8e901d61ae57c
|
refs/heads/master
| 2020-03-23T16:56:51.384839
| 2018-08-01T13:33:27
| 2018-08-01T13:33:27
| 141,834,826
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,563
|
r
|
Crab.R
|
df <- read.csv("C:/Users/OmarVr/Downloads/small_train.data", sep = '\t',
header=T,encoding = 'UTF-8')
label <- read.csv("C:/Users/OmarVr/Downloads/small_train_upselling.labels", sep = '\t',
header = F, encoding = 'UTF-8')
new_data <- read.csv("C:/Users/OmarVr/Downloads/small_test.data", sep = '\t',
header = T, encoding = 'UTF-8')
count_na <- sapply(df, function(x) sum(length(which(is.na(x)))))
names(count_na) <- names(df)
count_na <- data.frame(count_na)
summary(count_na)
hist(as.numeric(count_na$count_na),breaks = 1000)
a <- ggplot(count_na, aes(as.numeric(count_na)) ) +
geom_histogram(binwidth = 500)
percent_na_accepted = 0.80
df_na_reduced <- sapply(df, function(x) sum(length(which(is.na(x)))) < 50000*(1-percent_na_accepted))
df_na_reduced <- df[,df_na_reduced]
count_na <- sapply(df_na_reduced, function(x) sum(length(which(is.na(x)))))
names(count_na) <- names(df_na_reduced)
count_na <- data.frame(count_na)
summary(count_na)
hist(as.numeric(count_na$count_na),breaks = 100)
a <- ggplot(count_na, aes(as.numeric(count_na)) ) +
geom_histogram(binwidth = 500)
summary(df_na_reduced)
str(df_na_reduced[complete.cases(df_na_reduced),])
library(mlbench)
library(caret)
correlation_matrix <- cor(df_na_reduced[complete.cases(df_na_reduced),1:38])
print(correlation_matrix)
high_corr <- findCorrelation(correlation_matrix, cutoff=0.5)
library(corrplot)
corrplot(correlation_matrix, type = "upper", order = "original",
tl.col = "black", tl.srt = 45)
str(df_na_reduced[complete.cases(df_na_reduced),-high_corr])
df_label <- cbind(df_na_reduced,label)
df_label <- df_label[complete.cases(df_label),-high_corr]
Large_factor_level_columns <- names(which(sapply(df_label,
function(x) if (is.factor(x) & length(levels(x)) > 53 )
TRUE
else
FALSE)))
df_label <- df_label[,!(names(df_label) %in% Large_factor_level_columns)]
df_label$V1 <- as.factor(df_label$V1)
summary(as.factor(df_label$V1))
train <- sample(1:nrow(df_label), nrow(df_label)*0.7)
training <- df_label[train,]
testing <- df_label[-train,]
x_train <- training[,-47]
y_train <- training[,47]
y_train <- as.data.frame(y_train)
y_train$y_train <- as.factor(y_train$y_train)
library(parallel)
library(doParallel)
cluster <- makeCluster(detectCores() - 1 )
registerDoParallel(cluster)
fitControl <- trainControl(method = "cv",
number = 5,
allowParallel = TRUE)
fit <- train(x_train,y_train$y_train, method="rf",trControl = fitControl)
stopCluster(cluster)
registerDoSEQ()
library(plyr)
var_importance <- (varImp(fit,scale = FALSE))
var_importance_sorted <- var_importance$importance
var_importance_sorted$var <- row.names.data.frame(var_importance_sorted)
var_importance_sorted <- arrange(var_importance_sorted, desc(Overall))
var_importance_sorted[1:18,]$var
df_label <- df_label[,(names(df_label) %in% c(var_importance_sorted[1:5,]$var,'V1'))]
df_label <- df_label[,!(names(df_label) %in% c('Var193'))]
df_label <- df_label[,!(names(df_label) %in% c('Var226'))]
df_label$Var219 <- gsub("[[:blank:]]", "", df_label$Var219)
df_label$Var219 <- sub("^$", "N", df_label$Var219)
df_label$Var219 <- as.factor(df_label$Var219)
scenarios_under <- df_label %>%
group_by(Var226)%>%
summarise(n = n()) %>%
arrange(desc(n)) %>%
filter(n > 4)
df_label <- semi_join(df_label,scenarios_under)
df_label <- droplevels(df_label)
set.seed(681)
train <- sample(1:nrow(df_label), nrow(df_label)*0.7)
#label <- t(df_label[-train,]$V1)
logistic <- glm(V1~., data=df_label[train,], family = binomial)
# mats <- Map(function(x, y)
# if (is.factor(x) & any(is.na(match(unique(x),unique(y)))))
# x <- replace(x, which(x == dplyr::setdiff(levels(x), levels(y))[1]), NA)
# else
# x,
# df_label[-train,],
# df_label[train,])
# new_data <- do.call(cbind, lapply(mats, data.frame, stringsAsFactors=FALSE))
# names(new_data) <- names(df_label)
# new_data <- droplevels(new_data)
prob_logistic <- (predict.glm(logistic, df_label[-train,], type = 'response'))
predict_logistic <- rep('-1',nrow(df_label[-train,]))
predict_logistic[prob_logistic > 0.5] <- '1'
table(predict_logistic, df_label[-train,]$V1)
summary(predict_logistic)
error <- sum(label!=predict_logistic)/nrow(df_label[-train,])
library(pROC)
g <- roc(V1 ~ predict_logistic, data = df_label[-train,])
plot(g)
train <- sample(1:nrow(df_label), nrow(df_label)*0.7)
cluster <- makeCluster(detectCores() - 1 )
registerDoParallel(cluster)
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 10,
verboseIter = FALSE,
sampling = "up",
allowParallel = TRUE)
mod_fit <- train(V1~., data=df_label[train,], method="glm", family="binomial",
trControl = fitControl, tuneLength = 5)
pred = predict(mod_fit, newdata=df_label[-train,])
confusionMatrix(data=pred, df_label[-train,]$V1)
stopCluster(cluster)
registerDoSEQ()
cluster <- makeCluster(detectCores() - 1 )
registerDoParallel(cluster)
fitControl <- trainControl(method = "cv",
number = 10,
verboseIter = FALSE,
sampling = "down",
allowParallel = TRUE)
mod_fit_rf <- train(V1~., data=df_label[train,], method="rf",
trControl = fitControl, prox = TRUE)
pred_rf = predict(mod_fit_rf, newdata=df_label[-train,],type = 'raw')
confusionMatrix(data=pred_rf, df_label[-train,]$V1)
stopCluster(cluster)
registerDoSEQ()
cluster <- makeCluster(detectCores() - 1 )
registerDoParallel(cluster)
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 10,
verboseIter = FALSE,
sampling = "up",
allowParallel = TRUE)
mod_fit_svm <- train(V1 ~., data=df_label[train,], method = "svmLinear",
trControl=fitControl,
tuneLength = 5)
pred_svm = predict(mod_fit_svm, newdata=df_label[-train,],type = 'raw')
confusionMatrix(data=pred_svm, df_label[-train,]$V1)
#Best performer
best_pred <- predict(mod_fit, newdata=df_label[-train,], type = 'prob')
best_pred <- as.data.frame(best_pred[,2])
best_pred <- round((best_pred - min(best_pred)) / (max(best_pred) - min(best_pred)) * 10,digits = 0)
names(best_pred) <- 'Calificacion'
best_pred$Predictions <- pred
# New data
new_data <- (new_data[,(names(new_data) %in% c(var_importance_sorted[1:5,]$var))])
new_data <- (new_data[complete.cases(new_data),])
scenarios_under <- new_data %>%
group_by(Var226)%>%
summarise(n = n()) %>%
arrange(desc(n)) %>%
filter(n > 1)
new_data <- semi_join(new_data,scenarios_under)
new_data <- droplevels(new_data)
new_pred <- predict(mod_fit, newdata=new_data, type = 'prob')
new_pred_logist <- predict(mod_fit, newdata=new_data, type = 'raw')
new_pred <- as.data.frame(new_pred[,2])
new_pred <- round((new_pred - min(new_pred)) / (max(new_pred) - min(new_pred)) * 10,digits = 0)
names(new_pred) <- 'Score'
new_pred$Predictions <- new_pred_logist
write.table(new_pred,"C:/Users/OmarVr/Downloads/small_test_pred__and_score.data", sep = '\t', row.names = FALSE)
|
9980d58e8a9b49a3bb2e13d96525549d01d9a6eb
|
7cae64bf335cf9c08ffb5cdd7564b82f1ea2abd4
|
/Multiple Linear Regression Class.R
|
730a7917e419fc7594a391601c564402ba0b4af8
|
[] |
no_license
|
Shivanandrai/R-Code-
|
89945897c875c8dedcec8b69867dd2ee5b29317e
|
8716ea634f8bcf95680288fdc0153e4fa17f95b5
|
refs/heads/master
| 2022-11-15T22:46:06.076331
| 2020-07-02T09:48:34
| 2020-07-02T09:48:34
| 276,605,378
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,471
|
r
|
Multiple Linear Regression Class.R
|
setwd("~/Desktop/ESCP/R Class")
Fueleff <-read.csv("1-FuelEfficiency.csv")
head(Fueleff)
plot(GPM~WT, Fueleff)
model_1= lm(GPM~., data=Fueleff)
summary(model_1)
Fueleff= Fueleff[-1]
model_1= lm(GPM~., data=Fueleff)
summary(model_1)
cor(Fueleff)
library(leaps)
X=Fueleff[ ,2:7]
Y=Fueleff[ ,1]
#new function. I use Mallow's law to run the multiple regression and run the multiple variables and decide which two best models
#it will keep
output= summary(regsubsets(X,Y, nbest=2, nvmax = ncol(X)))
tab = cbind(output$which, output$rsq, output$adjr2, output$cp)
model_2= lm(GPM~WT, data = Fueleff)
summary(model_2)
mape(Fueleff$GPM, predict(model_1,Fueleff))
mape(Fueleff$GPM, predict(model_2,Fueleff))
rmse(Fueleff$GPM, predict(model_1,Fueleff))
rmse(Fueleff$GPM, predict(model_2,Fueleff))
me(Fueleff$GPM, predict(model_1, Fueleff))
me(Fueleff$GPM, predict(model_2, Fueleff))
#split the data in a different way
ind=createDataPartition(Fueleff$GPM, p=2/3, list=FALSE)
training_data_set <- Fueleff [ind,]
testing_data_set <- Fueleff [-ind,]
model_11 = lm(GPM~., data=training_data_set)
mape(Fueleff$GPM, predict(model_11, testing_data_set))
rmse(Fueleff$GPM, predict(model_11, testing_data_set))
me(Fueleff$GPM, predict(model_11, testing_data_set))
model_22 = lm(GPM~WT, data= training_data_set)
mape(Fueleff$GPM, predict(model_11,testing_data_set))
rmse(Fueleff$GPM, predict(model_11,testing_data_set))
me(Fueleff$GPM, predict(model_11, testing_data_set))
|
71bae1af1ccb319a0eceaa651ebbd92f52ff0e56
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/4281_1/rinput.R
|
c010c739beb20f60af8ccfd0e23bd6be86021ce9
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("4281_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4281_1_unrooted.txt")
|
9de1844dd5a651f63ed149a6ef11256452aa976c
|
5ac57449f8a0cfbc0e9c8f716ab0a578d8606806
|
/man/pDcalc.Rd
|
c55dced881ff73a38cd3b1c1a1599e130432fb75
|
[] |
no_license
|
hugaped/MBNMAtime
|
bfb6913e25cacd148ed82de5456eb9c5d4f93eab
|
04de8baa16bf1be4ad7010787a1feb9c7f1b84fd
|
refs/heads/master
| 2023-06-09T01:23:14.240105
| 2023-06-01T12:51:48
| 2023-06-01T12:51:48
| 213,945,629
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,021
|
rd
|
pDcalc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run.functions.R
\name{pDcalc}
\alias{pDcalc}
\title{Calculate plugin pD from a JAGS model with univariate likelihood for studies
with repeated measurements}
\usage{
pDcalc(
obs1,
obs2,
fups = NULL,
narm,
NS,
theta.result,
resdev.result,
likelihood = "normal",
type = "time"
)
}
\arguments{
\item{obs1}{A matrix (study x arm) or array (study x arm x time point) containing
observed data for \code{y} (normal likelihood) or \code{r} (binomial or Poisson likelihood)
in each arm of each study. This will be the same array
used as data for the JAGS model.}
\item{obs2}{A matrix (study x arm) or array (study x arm x time point) containing
observed data for \code{se} (normal likelihood), \code{N} (binomial likelihood) or \code{E} (Poisson likelihood)
in each arm of each study. This will be the same array
used as data for the JAGS model.}
\item{fups}{A numeric vector of length equal to the number of studies,
containing the number of follow-up mean responses reported in each study. Required for
time-course MBNMA models (if \code{type="time"})}
\item{narm}{A numeric vector of length equal to the number of studies,
containing the number of arms in each study.}
\item{NS}{A single number equal to the number of studies in the dataset.}
\item{theta.result}{A matrix (study x arm) or array (study x arm x time point)
containing the posterior mean predicted means/probabilities/rate in each arm of each
study. This will be estimated by the JAGS model.}
\item{resdev.result}{A matrix (study x arm) or array (study x arm x time point)
containing the posterior mean residual deviance contributions in each arm of each
study. This will be estimated by the JAGS model.}
\item{likelihood}{A character object of any of the following likelihoods:
\itemize{
\item \code{univariate}
\item \code{binomial} (does not work with time-course MBNMA models)
\item \code{multivar.normal} (does not work with time-course MBNMA models)
}}
\item{type}{The type of MBNMA model fitted. Can be either \code{"time"} or \code{"dose"}}
}
\value{
A numeric value for the effective number of parameters, pD, calculated via the plugin method
}
\description{
Uses results from MBNMA JAGS models to calculate pD via the
plugin method \insertCite{spiegelhalter2002}{MBNMAtime}. Can only be used for models with known
standard errors or covariance matrices (typically univariate).
}
\details{
Method for calculating pD via the plugin method proposed by
\insertCite{spiegelhalter2002}{MBNMAtime}. Standard errors / covariance matrices must be assumed
to be known. To obtain values for theta.result and resdev.result these
parameters must be monitored when running the JAGS model.
For non-linear time-course MBNMA models residual deviance contributions may be skewed, which
can lead to non-sensical results when calculating pD via the plugin method.
Alternative approaches are to use pV (\code{pv}) as an approximation \insertCite{plummer2008}{MBNMAtime} or
pD calculated by Kullback–Leibler divergence (\code{pd.kl}) or using an optimism adjustment (\code{popt}) \insertCite{plummer2008}{MBNMAtime}.
}
\examples{
\donttest{
# Using the alogliptin dataset
network <- mb.network(alog_pcfb)
# Run Emax model saving predicted means and residual deviance contributions
emax <- mb.run(network, fun=temax(),
parameters.to.save=c("theta", "resdev"), intercept=FALSE)
# Get matrices of observed data
jagsdat <- getjagsdata(network$data.ab)
# Plugin estimation of pD is problematic with non-linear models as it often leads to
#negative values, hence use of pV, pd.kl and popt as other measures for the effective
#number of parameters
pDcalc(obs1=jagsdat$y, obs2=jagsdat$se,
fups=jagsdat$fups, narm=jagsdat$narm, NS=jagsdat$NS,
theta.result = emax$BUGSoutput$mean$theta,
resdev.result = emax$BUGSoutput$mean$resdev
)
}
}
\references{
TO ADD pV REF
}
|
29f3cdb9cc66a32c3ae1001a460167bb97e80d1f
|
7b25b67ba55aeb0dec69a3ec6d0ed48939bf944b
|
/R/getClusterTree.R
|
47cd0be1f94d9dfc84dd1c67d67781612e513b34
|
[] |
no_license
|
rwoldford/trec
|
4524c08406042f4fdfb7fc839f51691f162d6515
|
5cac232d997815956c767d9f12ec6abe2357dc1f
|
refs/heads/master
| 2020-04-18T09:11:31.902264
| 2019-04-26T17:27:28
| 2019-04-26T17:27:28
| 167,425,596
| 1
| 1
| null | 2019-04-26T17:27:29
| 2019-01-24T19:37:44
|
R
|
UTF-8
|
R
| false
| false
| 4,622
|
r
|
getClusterTree.R
|
#' Transform all data structures into clusterTree
#'
#' @param x data structure output by some clustering method
#' (e.g. hclust, kmeans, dbscan, etc.)
#' @return a matrix providing the mapping
#' between data points and cluster id.
#' @examples
#' x <- kmeans(matrix(rnorm(100),nrow=50),centers=3)
#' getClusterTree(x)
#' @export
getClusterTree <- function(x)
{
UseMethod("getClusterTree")
}
#' @export
getClusterTree.clusterTree <- function(x) {
class(x) <- unique(c("clusterTree", class(x)))
x
}
#' @export
getClusterTree.default <- function(x)
{
if(is.vector(x)) {
getClusterTree(matrix(x, ncol = 1))
} else stop("Not a clustering")
}
#' @export
getClusterTree.factor <- function(x)
{
if(is.numeric(x)) {
getClusterTree(matrix(x, ncol = 1))
} else stop("Not a clustering")
}
#' @export
getClusterTree.matrix <- function(x)
{
matrixToClusterTree(x)
}
## adjclust package
#' @export
getClusterTree.chac <- function(x)
{
matrixToClusterTree(mergeToMatrix(x$merge),labels = x$labels)
}
## adpclust package
#' @export
getClusterTree.adpclust <- function(x)
{
x <- as.matrix(x$clusters)
matrixToClusterTree(x)
}
## afCEC package
#' @export
getClusterTree.afCEC <- function(x)
{
x<-as.matrix(x$labels)
matrixToClusterTree(x)
}
## apcluster package
#' @export
getClusterTree.apcluster <- function(x)
{
x<-as.matrix(x)
matrixToClusterTree(x)
}
#' @export
getClusterTree.AggExResul <- function(x)
{
matrixToClusterTree(mergeToMatrix(x$merge),x$labels)
}
## bclust package
#' @export
getClusterTree.bclust <- function(x)
{
matrixToClusterTree(mergeToMatrix(x$merge),x$labels)
}
#' @export
getClusterTree.bclustvs <- function(x)
{
matrixToClusterTree(mergeToMatrix(x$merge),x$labels)
}
## biclust package #Nothing needed here
## package cba
#' @export
getClusterTree.ccfkms <- function(x)
{
x<-as.matrix(x$cl)
matrixToClusterTree(x)
}
## package cclust
#' @export
getClusterTree.cclust <- function(x)
{
x <- as.matrix(x$cluster)
matrixToClusterTree(x)
}
## package CEC
#' @export
getClusterTree.cec <- function(x)
{
x<-as.matrix(x$cluster)
matrixToClusterTree(x)
}
#### package Ckmeans.1d.dp #### TO DO
#' @export
getClusterTree.Ckmeans.1d.dp <- function(x)
{
x<-as.matrix(x$cluster)
matrixToClusterTree(x)
}
## package clues
#' @export
getClusterTree.clues <- function(x)
{
x<-as.matrix(x$mem)
matrixToClusterTree(x)
}
## package cluster
#' @export
getClusterTree.agnes <- function(x)
{
matrixToClusterTree(mergeToMatrix(x$merge),x$labels)
}
#' @export
getClusterTree.clara <- function(x)
{
x<-as.matrix(x$clustering)
matrixToClusterTree(x)
}
#' @export
getClusterTree.diana <- function(x)
{
matrixToClusterTree(mergeToMatrix(x$merge),x$labels)
}
#' @export
getClusterTree.fanny <- function(x)
{
x<-as.matrix(x$clustering)
matrixToClusterTree(x)
}
#' @export
getClusterTree.pam <- function(x)
{
x<-as.matrix(x$clustering)
matrixToClusterTree(x)
}
## ClusterR package TODO
## clustMD package TODO
## CoClust package TODO
#' @export
getClusterTree.CoClust <- function(x)
{
stop("don't know how to handle result from CoClust package")
}
## compHclust package
#' @export
#getClusterTree.compHclust <- function(x)
#{
#}
## conclust package
## only need default processing
## contaminatedmixt package
#' @export
#getClusterTree.CNmixt <- function(x)
#{
#don't know how to do this!!!
#}
## CORM package ###
## don't know how to do this!
## don't need to deal with CREAM package ##
## package CrossClustering returns list??????????????????????
## Package 'CRPClustering' only requires default processing ##
#' @export
getClusterTree.dbscan_fast <- function(x)
{
matrixToClusterTree(as.matrix(x$cluster))
}
getClusterTree.dbscan <- function(x)
{
matrixToClusterTree(as.matrix(x$cluster))
}
## densityClust package ##
#' @export
getClusterTree.densityCluster <- function(x){
matrixToClusterTree(x$clusters)
}
## Fclust package ##
#' @export
getClusterTree.fclust <- function(x)
{
matrixToClusterTree(as.matrix(x$clus))
}
## hierarchical based clustering
#' @export
getClusterTree.hclust <- function(x)
{
matrixToClusterTree(mergeToMatrix(x$merge),x$labels)
}
## package kernlab
#' @export
getClusterTree.specc <- function(x)
{
matrixToClusterTree(as.matrix(x@.Data))
}
## kmeans package
#' @export
getClusterTree.kmeans <- function(x)
{
matrixToClusterTree(as.matrix(x$cluster))
}
## mixture based clustering
#' @export
getClusterTree.Mclust <- function(x)
{
matrixToClusterTree(as.matrix(x$classification))
}
|
e6828f4683612f05ee327ba768651df4bd83dbdf
|
a58298d4d1afffcbf7de672cb99ad8ab7ff6b5c5
|
/Scripts/depth_12.4G_VS_6.2G.R
|
aac5baaf5c35690a38cf2c59fa9300ffbb581d2f
|
[] |
no_license
|
zhouyunyan/PIGC
|
b6edb9685d720bdd0145598fa34dccd1f750223e
|
cfceab8ad4cadab522d56106adb4b636dfbe24c6
|
refs/heads/master
| 2023-04-12T04:45:05.660394
| 2022-07-21T08:34:03
| 2022-07-21T08:34:03
| 295,356,035
| 18
| 15
| null | null | null | null |
TIS-620
|
R
| false
| false
| 1,014
|
r
|
depth_12.4G_VS_6.2G.R
|
####Comparison of predicted gene number between high and low seqeuncing depth
table <- read.table("geneNum_12.4G_VS_6.2G.txt",header = T)
library(reshape2)
table_melt <-melt(table,id.vars = "ID")
table_melt$value <- table_melt$value/1000
table_melt$variable <- factor(table_melt$variable, levels = c("Base6.2G","Base12.4G"))
library(ggpubr)
tiff(filename = "depth_12.4G_VS_6.2G.tif",width = 2000,height = 2000,res=600,compression="lzw")
#paired compare
ggboxplot(table_melt, x="variable", y="value", fill = "variable",
palette = "jco", add = "jitter",xlab = F,ylab = "Gene numbers (กม 1000)") +
guides(fill = FALSE) +
theme(panel.background = element_rect(color = "black"),
axis.title.y = element_text(size = 14,color="black"),
axis.text.y = element_text(size = 12,color="black"),
axis.text.x = element_text(size = 12,color="black"))+
scale_x_discrete(labels =c("Base12.4G"="12.4G","Base6.2G"="6.2G"))+
stat_compare_means(label.x = 0.7)
dev.off()
|
02a5610d9d1c1699019d5bf41b267ea24c5b6ab5
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/glamlasso/R/glamlasso_RH.R
|
f762de858045df09c97c18dac2bdcc1542d2f992
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,099
|
r
|
glamlasso_RH.R
|
#
# Description of this R script:
# Rotated H-transform of an array A by a matrix M.
#
# Intended for use with R.
# Copyright (C) 2015 Adam Lund
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# H-transform of an array A by a matrix X
H <- function(M, A){
d <- dim(A)
Amat <- matrix(A, nrow = d[1])
MAmat <- M %*% Amat
array(MAmat, c(nrow(MAmat), d[-1]))
}
# Rotation of an array A
Rotate <- function(A){
d <- 1:length(dim(A))
d1 <- c(d[-1], d[1])
aperm(A, d1)
}
#' @name RH
#'
#' @aliases glamlasso_RH Rotate H
#'
#' @title The Rotated H-transform of a 3d Array by a Matrix
#'
#' @description This function is an implementation of the \eqn{\rho}-operator found in
#' \cite{Currie et al 2006}. It forms the basis of the GLAM arithmetic.
#'
#' @details For details see \cite{Currie et al 2006}. Note that this particular implementation
#' is not used in the optimization routines underlying the glamlasso procedure.
#'
#' @usage RH(M, A)
#'
#' @param M a \eqn{n \times p_1} matrix.
#' @param A a 3d array of size \eqn{p_1 \times p_2 \times p_3}.
#'
#' @return A 3d array of size \eqn{p_2 \times p_3 \times n}.
#'
#' @references
#' Currie, I. D., M. Durban, and P. H. C. Eilers (2006). Generalized linear
#' array models with applications to multidimensional
#' smoothing. \emph{Journal of the Royal Statistical Society. Series B}. 68, 259-280.
#'
#' @author Adam Lund
RH <- function(M, A){
Rotate(H(M, A))
}
|
125cc0244cb0ce75d1f94e532c38978dff35dae4
|
53fbeea7ca52f3b474e79efe34b1bee3f9a52572
|
/man/coerce-tbl_df.Rd
|
bdebb4cef4a34c53140ced88dc5046c7189c21bc
|
[
"MIT"
] |
permissive
|
csu-xiao-an/transformer
|
575aa7a59094e02006d44672c3fa5f79c085c757
|
04658a7d99973ac0d3d9b4484b0e599ecedcf936
|
refs/heads/master
| 2020-07-26T17:56:40.024193
| 2019-09-09T16:55:50
| 2019-09-09T16:55:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,598
|
rd
|
coerce-tbl_df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as_tibble-methods.R, R/coerce-tbl_df-methods.R
\name{as_tibble}
\alias{as_tibble}
\alias{as_tibble.DataFrame}
\alias{as_tibble.IPosRanges}
\alias{as_tibble.GenomicRanges}
\alias{coerce-tbl_df}
\alias{coerce,data.frame,tbl_df-method}
\alias{coerce,DataFrame,tbl_df-method}
\alias{coerce,GenomicRanges,tbl_df-method}
\alias{coerce,IPosRanges,tbl_df-method}
\title{Coerce to tibble}
\usage{
\method{as_tibble}{DataFrame}(x, ...,
rownames = pkgconfig::get_config("tibble::rownames", "rowname"))
\method{as_tibble}{IPosRanges}(x, ...,
rownames = pkgconfig::get_config("tibble::rownames", "rowname"))
\method{as_tibble}{GenomicRanges}(x, ...,
rownames = pkgconfig::get_config("tibble::rownames", "rowname"))
}
\arguments{
\item{x}{A data frame, list, matrix, or other object that could reasonably be
coerced to a tibble.}
\item{...}{Other arguments passed on to individual methods.}
\item{rownames}{How to treat existing row names of a data frame or matrix:
\itemize{
\item \code{NULL}: remove row names. This is the default.
\item \code{NA}: keep row names.
\item A string: the name of a new column. Existing rownames are transferred
into this column and the \code{row.names} attribute is deleted.
Read more in \link{rownames}.
}}
}
\value{
\code{tbl_df}.
}
\description{
Coerce to \code{tbl_df}.
}
\details{
Our defined methods attempt to improve on the defaults in the tibble package
to ensure that row names are not dropped by default, which is a poor default
for bioinformatics. This is accomplished by setting \code{rownames = "rowname"} by
default instead of \code{rownames = NULL}.
}
\section{S3 \code{as_tibble()}}{
transformer extends \code{\link[tibble:as_tibble]{as_tibble()}} method support for
these S4 classes:
\itemize{
\item \code{DataFrame}.
\item \code{GRanges}.
}
}
\section{S4 \code{as()}}{
Since \code{tbl_df} is a virtual class that extends \code{tbl} and \code{data.frame}, we
need to define an S4 coercion method that allows us to use
\code{\link[methods:as]{as()}} to coerce an object to a tibble.
See \code{getClass("tbl_df")} for details on how tibble is a virtual class.
}
\examples{
data(DataFrame, GRanges, IRanges, package = "acidtest")
## DataFrame to tbl_df ====
x <- as(DataFrame, "tbl_df")
x <- as_tibble(DataFrame)
print(x)
## GenomicRanges to tbl_df ====
x <- as(GRanges, "tbl_df")
x <- as_tibble(GRanges)
print(x)
## IRanges to tbl_df ====
x <- as(IRanges, "tbl_df")
x <- as_tibble(IRanges)
print(x)
}
\seealso{
\code{\link[tibble:as_tibble]{tibble::as_tibble()}}.
}
|
2994378f961ffafaa25268f4a7a4f8ee2492d446
|
d771ff12fe4ede6e33699704efa371a2f33cdfaa
|
/R/do.filter.R
|
aa360ae6cb29ce9ba44f9337127e9a7047807add
|
[
"MIT"
] |
permissive
|
ImmuneDynamics/Spectre
|
aee033979ca6a032b49ede718792c72bc6491db5
|
250fe9ca3050a4d09b42d687fe3f8f9514a9b3bf
|
refs/heads/master
| 2023-08-23T14:06:40.859152
| 2023-04-27T00:31:30
| 2023-04-27T00:31:30
| 306,186,694
| 52
| 17
|
MIT
| 2023-08-06T01:26:31
| 2020-10-22T01:07:51
|
HTML
|
UTF-8
|
R
| false
| false
| 13,283
|
r
|
do.filter.R
|
#' do.filter - filtering data.table using multiple match values
#'
#' This function allows filtering of a data.table using multiple match values -- all cells that contain any of the match values will be filtered, and provided in a new data.table.
#'
#' @param dat NO DEFAULT. A data.table
#' @param use.col DEFAULT = NULL. The column to use for re-ordering
#' @param values DEFAULT = NULL. A vector of values to use for filtering -- all cells that contain any of the match values will be filtered, and provided in a new data.table. Up to 20 match values can be provided
#'
#' @usage do.filter(dat, use.col, values)
#'
#' @author
#' Thomas M Ashhurst, \email{thomas.ashhurst@@sydney.edu.au}
#'
#' @references \url{https://github.com/ImmuneDynamics/Spectre}.
#'
#' @import data.table
#'
#' @export
do.filter <- function(dat,
use.col,
values){
### Demo data
# dat <- Spectre::demo.clustered
# use.col <- 'Population'
#
# unique(dat[[use.col]])
#
# values <- c("Microglia", "Infil Macrophages")
### Setup
n.values <- length(values)
### Warnings
if(n.values > 20){
stop('do.filter function cannot take more than 20 target values')
}
### Filtering
if(n.values == 1){
res <- dat[dat[[use.col]] == values[1],]
}
if(n.values == 2){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2],
]
}
if(n.values == 3){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3],
]
}
if(n.values == 4){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4],
]
}
if(n.values == 5){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5],
]
}
if(n.values == 6){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6],
]
}
if(n.values == 7){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7],
]
}
if(n.values == 8){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8],
]
}
if(n.values == 9){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8] |
dat[[use.col]] == values[9],
]
}
if(n.values == 10){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8] |
dat[[use.col]] == values[9] |
dat[[use.col]] == values[10],
]
}
if(n.values == 11){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8] |
dat[[use.col]] == values[9] |
dat[[use.col]] == values[10] |
dat[[use.col]] == values[11],
]
}
if(n.values == 12){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8] |
dat[[use.col]] == values[9] |
dat[[use.col]] == values[10] |
dat[[use.col]] == values[11] |
dat[[use.col]] == values[12],
]
}
if(n.values == 13){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8] |
dat[[use.col]] == values[9] |
dat[[use.col]] == values[10] |
dat[[use.col]] == values[11] |
dat[[use.col]] == values[12] |
dat[[use.col]] == values[13],
]
}
if(n.values == 14){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8] |
dat[[use.col]] == values[9] |
dat[[use.col]] == values[10] |
dat[[use.col]] == values[11] |
dat[[use.col]] == values[12] |
dat[[use.col]] == values[13] |
dat[[use.col]] == values[14],
]
}
if(n.values == 15){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8] |
dat[[use.col]] == values[9] |
dat[[use.col]] == values[10] |
dat[[use.col]] == values[11] |
dat[[use.col]] == values[12] |
dat[[use.col]] == values[13] |
dat[[use.col]] == values[14] |
dat[[use.col]] == values[15],
]
}
if(n.values == 16){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8] |
dat[[use.col]] == values[9] |
dat[[use.col]] == values[10] |
dat[[use.col]] == values[11] |
dat[[use.col]] == values[12] |
dat[[use.col]] == values[13] |
dat[[use.col]] == values[14] |
dat[[use.col]] == values[15] |
dat[[use.col]] == values[16],
]
}
if(n.values == 17){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8] |
dat[[use.col]] == values[9] |
dat[[use.col]] == values[10] |
dat[[use.col]] == values[11] |
dat[[use.col]] == values[12] |
dat[[use.col]] == values[13] |
dat[[use.col]] == values[14] |
dat[[use.col]] == values[15] |
dat[[use.col]] == values[16] |
dat[[use.col]] == values[17],
]
}
if(n.values == 18){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8] |
dat[[use.col]] == values[9] |
dat[[use.col]] == values[10] |
dat[[use.col]] == values[11] |
dat[[use.col]] == values[12] |
dat[[use.col]] == values[13] |
dat[[use.col]] == values[14] |
dat[[use.col]] == values[15] |
dat[[use.col]] == values[16] |
dat[[use.col]] == values[17] |
dat[[use.col]] == values[18],
]
}
if(n.values == 19){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8] |
dat[[use.col]] == values[9] |
dat[[use.col]] == values[10] |
dat[[use.col]] == values[11] |
dat[[use.col]] == values[12] |
dat[[use.col]] == values[13] |
dat[[use.col]] == values[14] |
dat[[use.col]] == values[15] |
dat[[use.col]] == values[16] |
dat[[use.col]] == values[17] |
dat[[use.col]] == values[18] |
dat[[use.col]] == values[19],
]
}
if(n.values == 20){
res <- dat[dat[[use.col]] == values[1] |
dat[[use.col]] == values[2] |
dat[[use.col]] == values[3] |
dat[[use.col]] == values[4] |
dat[[use.col]] == values[5] |
dat[[use.col]] == values[6] |
dat[[use.col]] == values[7] |
dat[[use.col]] == values[8] |
dat[[use.col]] == values[9] |
dat[[use.col]] == values[10] |
dat[[use.col]] == values[11] |
dat[[use.col]] == values[12] |
dat[[use.col]] == values[13] |
dat[[use.col]] == values[14] |
dat[[use.col]] == values[15] |
dat[[use.col]] == values[16] |
dat[[use.col]] == values[17] |
dat[[use.col]] == values[18] |
dat[[use.col]] == values[19] |
dat[[use.col]] == values[20],
]
}
gc()
### Wrap up
return(res)
}
|
663c1f66553ddc4b9f7c9d9578ecc79828535073
|
1391c4c885aa13dbfc7e8ab54598787697c65662
|
/Data Analytics/DataVisualizationPhase3.R
|
2f9e8e69adb2fb6be71022df8a5fbf2a5812af02
|
[] |
no_license
|
salpoddar/Predicting-NewYork-Taxi-Fare
|
86a7b497dd057f23e7c5dc1b3f0b657631a43eec
|
575c2fefd5b8c1e79e1739f416f2c1d0cbd84b09
|
refs/heads/master
| 2023-04-02T06:56:35.378700
| 2021-04-12T20:27:15
| 2021-04-12T20:27:15
| 357,330,226
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,149
|
r
|
DataVisualizationPhase3.R
|
#load the libraries
library(data.table) # Fast CSV read
library(ggplot2) #Visualisation
library(lubridate) #Date-Time extraction
library(dplyr) #Data wrangling
library(ranger) # Random forest
library(geosphere) #Dist
library(caret) #Cross vaildation
library(xgboost)
library(DataExplorer)
library(mlr)
library(gridExtra)
#load the training data with 20M rows
train=fread("C:\\train.csv",nrows=2000000)
train<-na.omit(train)
train = train %>%
filter(fare_amount>=2.5,
pickup_latitude<=41.71, pickup_latitude>=40.57,
dropoff_latitude<=41.70, dropoff_latitude>=40.57,
pickup_longitude<=-72.99, pickup_longitude>=-74.25,
dropoff_longitude<=-72.99, dropoff_longitude>=-74.26)
#preprocess data and add extra columns derived from date column
train<-train%>%
mutate(
pickup_datetime = ymd_hms(pickup_datetime),
year = as.factor(year(pickup_datetime)),
month = as.factor(month(pickup_datetime)),
day = as.numeric(day(pickup_datetime)),
dayofweek = as.factor(wday(pickup_datetime)),
hour = as.numeric(hour(pickup_datetime)),
timeofday = as.factor(ifelse(hour >= 3 & hour < 9,
"Morning", ifelse(hour >= 9 & hour < 14, "Mid-Day",
ifelse(hour >= 14 & hour < 18, "Evening", "Night")))),
lat_diff=abs(dropoff_latitude)-abs(pickup_latitude),
long_diff=abs(dropoff_longitude)-abs(pickup_longitude),
) %>%
select(-pickup_datetime)
#plot histogram of fare amount and histogram of log of fare amount
p1=ggplot(train,aes(x=fare_amount))+
geom_histogram(fill="red",alpha=0.5) +
ggtitle("Histogram of fare amount")
p2=ggplot(train,aes(x=log1p(fare_amount)))+
geom_histogram(fill="red",alpha=0.5,binwidth=0.25)+
ggtitle("Histogram of log fare_amount")
#save the grid with your own path
g1 = grid.arrange(p1,p2,ncol=2)
ggsave(filename='fare distribution.jpg',plot=g1,path='D:\\Dell\\RIT MS\\Intro to Big Data\\final project')
#plot scatter plot of Dropoff and pick up locations in data
p1=train %>%
ggplot(aes(x=pickup_latitude,y=pickup_longitude))+
geom_point()+
theme_light()+
theme(plot.title = element_text(hjust = 0.5)) +
ggtitle("Scatterplot of Pickup in Data")
p2=train %>%
ggplot(aes(x=dropoff_latitude,y=dropoff_longitude))+
geom_point()+
theme_light()+
theme(plot.title = element_text(hjust = 0.5)) +
ggtitle("Scatterplot of Dropoff in Data")
g2 = grid.arrange(p1,p2,nrow=2);
ggsave(filename='scatter_lati_long.jpg',plot=g1,path='D:\\Dell\\RIT MS\\Intro to Big Data\\final project')
#plot box plot of year in data
p1=train %>%
ggplot(aes(factor(year),y=fare_amount))+
geom_boxplot()+
theme_light()+
theme(plot.title = element_text(hjust = 0.5)) +
ggtitle("Boxplot of Year")
#plot box plot of fare amount by year in data
p2=train %>%
ggplot(aes(as.factor(year), fare_amount))+
geom_point(stat = "summary", fun.y = "median", col = "red", size = 5)+
ggtitle("Fare Amount by Year")+
theme(plot.title = element_text(hjust = .5), legend.position = "bottom")
g3 = grid.arrange(p1,p2,nrow=2)
ggsave(filename='box_plot_year.jpg',plot=g3,path='D:\\Dell\\RIT MS\\Intro to Big Data\\final project')
#plot box plot of fare amount by month in data
p1 = train %>%
ggplot(aes(as.factor(month), fare_amount))+
geom_point(stat = "summary", fun.y = "median", col = "red", size = 5)+
ggtitle("Fare Amount by Month")+
theme(plot.title = element_text(hjust = .5), legend.position = "bottom")+
facet_wrap(~factor(year),nrow=3)
ggsave(filename='fare_amount_month.jpg',plot=p1,path='D:\\Dell\\RIT MS\\Intro to Big Data\\final project')
#plot box plot of fare amount by time of day in data
p1 = train %>%
ggplot(aes(as.factor(timeofday), fare_amount))+
geom_point(stat = "summary", fun.y = "median", col = "red", size = 5)+
ggtitle("Fare Amount by Time of Day")+
theme(plot.title = element_text(hjust = .5), legend.position = "bottom")+
facet_wrap(~factor(year),nrow=3)
ggsave(filename='fare_amount_timeofday.jpg',plot=p1,path='D:\\Dell\\RIT MS\\Intro to Big Data\\final project')
|
6d01e5648f5d7cab0103c56b44b90114117f6983
|
0606177a3914cc80d5caa4d7a98d7f9870d4cb40
|
/code/10/9/question.R
|
30ef3bd73fb34b92a85b1ff5d8378a1bebe3c2c6
|
[] |
no_license
|
jeffljx12/thesis-new
|
624183776824eee36c3a38c0edb30aca6454012b
|
7ffab51491bbccfb8f5b953a3fb41cc84f214ebf
|
refs/heads/master
| 2020-04-07T19:08:32.966809
| 2019-03-10T16:36:45
| 2019-03-10T16:36:45
| 158,637,647
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,906
|
r
|
question.R
|
library(nlme)
bigF= function(){
# build model
d3k2_model = function(time,beta0,beta1,beta2,beta3,beta4,beta5,
ra1_i,ra2_i,ra3_i,tau1,tau2,
covar1,covar2,covar3,covar4,covar5,
cov.coef1,cov.coef2,cov.coef3,cov.coef4,cov.coef5,env = parent.frame()
){
intercept.x = ra2_i
speed = ra3_i + cov.coef1 *covar1 +
cov.coef2 *covar2 +
cov.coef3 *covar3 +
cov.coef4 *covar4 +
cov.coef5 *covar5
time_i = time*exp(speed) - intercept.x
return( ra1_i + beta0 + beta1*time_i + beta2*time_i^2 + beta3*time_i^3 +
beta4*ifelse(time_i-tau1>0,(time_i-tau1)^3,0) + beta5*ifelse(time_i-tau2>0,(time_i-tau2)^3,0)
)
}
#return(environment(d3k2_model))
nlme_d3k2 = nlme(outcome ~ d3k2_model(time = age,
beta0,beta1,beta2,beta3,beta4,beta5,
ra1_i, ra2_i,ra3_i,
tau1=7.3,tau2=14.6,
covar1 = var1,covar2 = var2,covar3 = var3,covar4 = var4,covar5 = var5,
cov.coef1, cov.coef2, cov.coef3, cov.coef4, cov.coef5
),
data = ,
fixed = beta0 + beta1 + beta2 + beta3 + beta4 + beta5 +
cov.coef1 + cov.coef2 + cov.coef3 + cov.coef4 + cov.coef5 ~1,
random = ra1_i + ra2_i + ra3_i ~ 1 |id,
start = c(beta0 = 19, beta1 = 8,beta2 = 9,beta3 = 0.8,beta4 = 0.3,beta5 = -0.9,
cov.coef1 = 0.5,cov.coef2 = 0.4, cov.coef3 = -0.5,cov.coef4 = -0.9,cov.coef5 = 0.3),
control = nlmeControl(pnlsTol = 0.01, # not using default
msMaxIter =50,
msVerbose = TRUE),
method = "ML",
na.action = na.omit)
return(summary(nlme_d3k2))
}
bigF()
# rm(d3k2_model)
# rm(nlme_d3k2)
|
a3f755197161849a4d5aa574b9bbae4de5222d5f
|
8d542ee756ee30acb737f5497625308da92a9678
|
/meta learners.r
|
72e3a19b1977a0a9e602f4533d1d18bdc5c0adcb
|
[] |
no_license
|
uint4/retail-sales-forecasting-with-meta-learning
|
82e60c4f70b6af96fb046174f7c69184ef4071aa
|
c613f6e66ce9faa9b9266d3c2c703b331c0ac0ef
|
refs/heads/master
| 2022-03-24T01:04:48.967757
| 2020-01-09T05:14:50
| 2020-01-09T05:14:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,166
|
r
|
meta learners.r
|
scale_mse <- function( scales ) { ### loss function
loss<-function(y_true, y_pred){
k_mean( (k_mean(k_square(y_pred-y_true),axis=-1,keepdims=TRUE)/scales))
}
loss
}
fcn_block<-function(input){ ### convonlutional blocks
squeeze_excite_block<-function(input){
filters = unlist(k_get_variable_shape(input)[3])
se = layer_global_average_pooling_1d(input) %>%
layer_dense(filters %/% 16, activation='relu', kernel_initializer='he_normal', use_bias=FALSE)%>%
layer_dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=FALSE)
se = layer_multiply(list(input, se))
se
}
fcn<-input %>%
layer_conv_1d(filters = 64, kernel_size = 2, padding="same", kernel_initializer='he_uniform') %>%
layer_activation_relu() %>%
squeeze_excite_block() %>%
layer_conv_1d(filters = 128, kernel_size = 4, padding="same", kernel_initializer='he_uniform') %>%
layer_activation_relu() %>%
squeeze_excite_block() %>%
layer_conv_1d(filters = 64, kernel_size = 8, padding="same", kernel_initializer='he_uniform') %>%
layer_activation_relu() %>%
layer_global_average_pooling_1d()
fcn
}
M0<-function(ep=50,XX=TRUE,y_shape,P_shape,mds,horizons,train_input,Y_train,test_input,Y_test){
## this meta learner is used to train M0, M2, M3,and M4, depending on the data inputed and value of XX
## ep: number of epochs of training;
## XX: whether extracting features from indicators
## y_shape: shape of sales series
## P_shape: shape of indicator series
## mds: number of base forecasters
## horizons: forecasting horizon
X_input_y <-layer_input(shape = y_shape)
X_input_P <-layer_input(shape = P_shape)
L_input <- layer_input(shape = c(1))
y_input<-layer_input(shape = c(mds,horizons))
X_layers_y <- X_input_y %>% fcn_block()
X_layers_P <- X_input_P %>% fcn_block()
if (XX) {
X_layers <- layer_concatenate(list(X_layers_y,X_layers_P)) %>%layer_dropout(0.8)%>% layer_dense(units = mds,activation="softmax" ) %>%
layer_repeat_vector(horizons) } else {
X_layers <- X_layers_y %>%layer_dropout(0.8)%>% layer_dense(units = mds,activation="softmax" ) %>%
layer_repeat_vector(horizons) }
y_input_p<- y_input %>% layer_permute(list(2,1))
multi_sum<-function(args) k_sum(args,axis=-1)
output <-layer_multiply(input=list(y_input_p,X_layers)) %>% layer_lambda(multi_sum)
model <- keras_model(list(X_input_y,X_input_P,y_input,L_input),output)
model %>% compile( loss = scale_mse(L_input), optimizer = 'adam' )
history <- model %>% fit(
train_input,Y_train,
validation_data = list(test_input, Y_test),
epochs = ep, batch_size = 4096)
model ## the trained model is returned
}
M1<-function(ep=50,y_shape,mds,horizons,train_input,Y_train,test_input,Y_test){
######## meta learner using hand craft features
X_input <-layer_input(shape = y_shape)
L_input <- layer_input(shape = c(1))
y_input<-layer_input(shape = c(mds,horizons))
X_layers<-X_input %>% layer_dense(128,activation='relu') %>%
layer_dense(64,activation='relu') %>%
layer_dense(32,activation='relu') %>%
layer_dropout(0.8)%>%
layer_dense(units = mds,activation="softmax" ) %>%
layer_repeat_vector(horizons)
y_input_p<- y_input %>% layer_permute(list(2,1))
multi_sum<-function(args) k_sum(args,axis=-1)
output <-layer_multiply(input=list(y_input_p,X_layers)) %>% layer_lambda(multi_sum)
model <- keras_model(list(X_input,y_input,L_input),output)
model %>% compile( loss = scale_mse(L_input), optimizer = 'adam' )
history <- model %>% fit(
train_input,Y_train,
validation_data = list(test_input, Y_test),
epochs = ep, batch_size = 4096)
model
}
M5<-function(ep=50,y_shape,mds,train_input,Y_train_bestone,test_input,Y_test_bestone){
X_input_y <-layer_input(shape = y_shape)
X_input_P <-layer_input(shape = P_shape)
X_layers_y <- X_input_y %>% fcn_block()
X_layers_P <- X_input_P %>% fcn_block()
X_layers <- layer_concatenate(list(X_layers_y,X_layers_P)) %>%layer_dropout(0.8)%>%
layer_dense(units = mds,activation="softmax" )
model <- keras_model(list(X_input_y,X_input_P), X_layers)
model %>%
compile(
loss ='categorical_crossentropy',
optimizer = 'adam',
metrics = c("accuracy")
)
history <- model %>% fit(
train_input,Y_train_bestone,
validation_data = list(test_input, Y_test_bestone),
epochs = ep, batch_size = 4096)
model
}
M6<-function(ep=50,y_shape,P_shape,mds,train_input,Y_train_err,test_input,Y_test_err){
X_input_y <-layer_input(shape = y_shape)
X_input_P <-layer_input(shape = P_shape)
X_layers_y <- X_input_y %>% fcn_block()
X_layers_P <- X_input_P %>% fcn_block()
X_layers <- layer_concatenate(list(X_layers_y,X_layers_P)) %>%layer_dropout(0.8)%>%
layer_dense(units = mds,activation='relu' )
model <- keras_model(list(X_input_y,X_input_P), X_layers)
model %>%
compile(
loss =loss_mean_squared_error, optimizer = 'adam'
)
history <- model %>% fit(
train_input,Y_train_err,
validation_data = list(test_input, Y_test_err),
epochs = ep, batch_size = 4096)
model
}
FFORMA<-function(ep=100,X_train,Y_train_err,X_test,Y_test_err){
dtrain <- xgboost::xgb.DMatrix(X_train,label=apply(Y_train_err,1,function(x) which(x==min(x))-1))
attr(dtrain, "errors") <- Y_train_err
dtest<- xgboost::xgb.DMatrix(X_test,label=apply(Y_test_err,1,function(x) which(x==min(x))-1))
attr(dtest, "errors") <- Y_test_err
param <- list(max_depth = 14, eta = 0.575188, nthread = 3,
silent = 0, objective = error_softmax_obj, num_class = ncol(Y_train_err),
subsample = 0.9161483, colsample_bytree = 0.7670739)
watchlist <- list(train = dtrain,valid=dtest)
model <- xgboost::xgb.train( params = param,
data = dtrain,
nrounds = ep,
verbose = 1,
watchlist = watchlist,
maximize = FALSE
)
model
}
|
f389d990b16379a6a0aa9058cddba98a7642a587
|
2bb0ba7e4b29ea458cbb064411c7ef69ef47065d
|
/R/NonParametric.VUS.R
|
c03e27d8046d50abc347ac2fa499a623b646602d
|
[] |
no_license
|
cran/DiagTest3Grp
|
a1b04922524129306eacb1d1643b0a7bc47f6df3
|
25099a0b5d0da160778aa6412021a2e2f1e01720
|
refs/heads/master
| 2021-01-23T08:43:47.100435
| 2014-02-20T00:00:00
| 2014-02-20T00:00:00
| 17,713,726
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,934
|
r
|
NonParametric.VUS.R
|
NonParametric.VUS <- function(x,y,z,alpha=0.05,NBOOT=50,FisherZ=FALSE)
{
######################################################################################################################################################
########This function provides Nonparametric estimate on VUS =Pr(X1<=X2<=X3)
########
####1. Input:
###x,y,z:vectors are the test marker measurements for the 3 ordinal group (D-,D0,D+)
####2. Output: nonparametric VUS estimate
######################################################################################################################################################
x <- na.exclude(x)
y <- na.exclude(y)
z <- na.exclude(z)
###sample size
n.minus <- length(x)
n0 <- length(y)
n.plus <- length(z)
##normal sample means
mu.minus <- mean(x,na.rm=TRUE)
mu0 <- mean(y,na.rm=TRUE)
mu.plus <- mean(z,na.rm=TRUE)
##normal sample SDs
s.minus <- sd(x,na.rm=TRUE)
s0 <- sd(y,na.rm=TRUE)
s.plus <- sd(z,na.rm=TRUE)
###data summary
dat.summary <- data.frame(n=c(n.minus,n0,n.plus),mu=c(mu.minus,mu0,mu.plus),sd=c(s.minus,s0,s.plus),row.names=c("D-","D0","D+"))
###non-parametric VUS estimate by ranking the triplets, each from a diagnosis group
temp.merge1 <- merge(data.frame(ID=rep(1,n.minus),x=x),data.frame(ID=rep(1,n0),y=y),by.x=1,by.y=1,all.x=T,all.y=T)
temp.merge2 <- merge(temp.merge1,data.frame(ID=rep(1,n.plus),z=z),by.x=1,by.y=1,all.x=T,all.y=T)
flag <- 1*(temp.merge2$x<=temp.merge2$y & temp.merge2$y<=temp.merge2$z)###if no ties,weight 1
flag[temp.merge2$x==temp.merge2$y | temp.merge2$y==temp.merge2$z] <- 0.5##ties have weight 0.5
VUS <- mean(flag)
if(FisherZ) VUS <- FisherZ(VUS)
###bootstrap variance and CI has to be obtained by bootstrapping outside the function
return(list(dat=list(x=x,y=y,z=z),dat.summary=dat.summary,estimate=VUS))
}
|
76f517ec95eba28c3cd580638f093385832672fb
|
d3b774668f6e577cefdeea4dd2be1326ee4b5aee
|
/R/all_mailinglists.R
|
776a333096b514cdc6ca782ac0f16d6e49a5b97e
|
[
"MIT"
] |
permissive
|
ropensci/qualtRics
|
50e68a3dd3f184ee14f19126bd7783b4b9bd61d1
|
c721563fa2fcb734c1ad9c4d8ccd80bbefbed15d
|
refs/heads/main
| 2023-08-31T01:00:05.366989
| 2023-06-23T18:55:13
| 2023-06-23T18:55:13
| 70,817,337
| 188
| 64
|
NOASSERTION
| 2023-09-07T19:38:56
| 2016-10-13T14:51:26
|
R
|
UTF-8
|
R
| false
| false
| 859
|
r
|
all_mailinglists.R
|
#' Retrieve a data frame of all mailing lists from Qualtrics
#'
#' @template retry-advice
#' @importFrom purrr map_df
#' @importFrom purrr flatten
#' @export
#'
#' @examples
#' \dontrun{
#' # Register your Qualtrics credentials if you haven't already
#' qualtrics_api_credentials(
#' api_key = "<YOUR-API-KEY>",
#' base_url = "<YOUR-BASE-URL>"
#' )
#'
#' # Retrieve a list of all mailing lists
#' mailinglists <- all_mailinglists()
#' }
#'
all_mailinglists <- function(){
check_credentials()
# Function-specific API stuff
fetch_url <- generate_url(query = "allmailinglists")
elements <- list()
while(!is.null(fetch_url)){
res <- qualtrics_api_request("GET", url = fetch_url)
elements <- append(elements, res$result$elements)
fetch_url <- res$result$nextPage
}
x <- purrr::map_df(elements, purrr::flatten)
return(x)
}
|
6a530b2815570c4eb3626b0ed55c78c3d62c4b37
|
86d4288e42815531c36804e95e3a58cef083090b
|
/run_analysis.R
|
5359385150e0d75065da4e90f4fc292968113e78
|
[] |
no_license
|
shilpibhatnagar/Getting-cleaningdata
|
2cb07afa0f8ec2e54fb93eaa2024b622e197b5ef
|
69eb35c2f43727c9a7be825f081bcfa03e8c052f
|
refs/heads/master
| 2021-01-12T01:53:53.659156
| 2017-01-10T10:00:33
| 2017-01-10T10:00:33
| 78,441,816
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,357
|
r
|
run_analysis.R
|
#download the files and change the working directory in R Studio to the folder where the data files exist
#setwd("filepath")
# First we read all the data files for which we need to tidy the data.
#Training files
train_data_x <- read.table("./train/X_train.txt")
train_data_y <- read.table("./train/y_train.txt")
train_data_subject <- read.table("./train/subject_train.txt")
# test data files
test_data_x <- read.table("./test/X_test.txt")
test_data_y <- read.table("./test/y_test.txt")
test_data_subject <- read.table("./test/subject_test.txt")
# features file
features <- read.table("./features.txt")
# activity names
activity_labels <- read.table("./activity_labels.txt")
# merge test and train datasets
joined_data <- rbind(train_data_x,test_data_x)
joined_labels <- rbind(train_data_y,test_data_y)
joined_subjects <- rbind(train_data_subject,test_data_subject)
# Set the appropiate column names
names(joined_data) = features[[2]]
names(joined_labels) = c("Activityid")
names(joined_subjects) = c("Subjects")
# Extract the data for mean and std columns as a vector, and then merge the two vectors to get a single vector.
means_indices <- grep("mean",features[[2]])
std_indices <- grep("std",features[[2]])
merged_indices <- c(means_indices,std_indices)
# Extract relevant joined data from the indices
indiced_joined_data <- joined_data[merged_indices]
# Tidy up the column names
names(activity_labels) = c("Activityid","Activityname")
# Substitute the IDs with the merge function
activities <- merge(activity_labels,joined_labels,"Activityid")
indiced_joined_data$activities <- activities[[2]]
indiced_joined_data$subjects <- joined_subjects[[1]]
# Clean up the columnnames
names(indiced_joined_data) <- gsub("\\(\\)","",names(indiced_joined_data))
#names(indiced_joined_data) <- gsub("std","Std",names(indiced_joined_data))
#names(indiced_joined_data) <- gsub("mean","Mean",names(indiced_joined_data))
names(indiced_joined_data) <- gsub("-","",names(indiced_joined_data))
# Create the second set
second_set<-aggregate(indiced_joined_data[,1:79],list(activities = indiced_joined_data$activities, subjects=indiced_joined_data$subjects),mean, na.rm=TRUE)
# write to file
write.table(indiced_joined_data, "clean_data.txt")
write.table(second_set,"second_set.txt",row.name=FALSE)
|
4c4aa63abeccb5690ca4713738dfac0557e53790
|
1eb7bac365b579c4840e1489ed3c0171d1bd5e00
|
/scripts/genieR.R
|
b5cfd5f1eb3e7346b789de5e7972f6025915bbcd
|
[] |
no_license
|
asmmhossain/phlow
|
80b853fd9b3aeca569d56882c72348437bdcf963
|
2fd76033b5412b1239f6af256ff20ee29bf7d237
|
refs/heads/master
| 2020-09-13T10:22:36.830570
| 2017-09-27T15:43:25
| 2017-09-27T15:43:25
| 13,800,552
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 988
|
r
|
genieR.R
|
#!/usr/bin/env Rscript
args <- commandArgs(trailingOnly=T)
if(length(args) < 2)
{
stop('Usage: rtt2.R <tree> <outname>')
}
suppressPackageStartupMessages(require(ape))
suppressPackageStartupMessages(require(pander))
suppressPackageStartupMessages(require(genieR))
#suppressPackageStartupMessages(require(adephylo))
trName <- args[1]
outName <- args[2]
tre <- read.tree(trName)
const <- Geniefit(tre,Model='const',start=c(100),upper=Inf,lower=0)
expo <- Geniefit(tre,Model='expo',start=c(100,.1),upper=Inf,lower=0)
log <- Geniefit(tre,Model='log',start=c(100,.1,.1),upper=Inf,lower=0)
popSize <- c(const$parr,expo$parr[1],log$parr[1])
growthRate <- c(NA,expo$parr[2],log$parr[2])
aic <- c(const$AIC,expo$AIC,log$AIC)
models <- c('Constant','Exponential','Logistic')
genie <- data.frame(models,popSize,growthRate,aic)
colnames(genie) <- c('Models','PopulationSize','GrowthRate','AIC')
pander(genie)
write.table(genie,outName,col.names=TRUE,row.names=FALSE,sep="\t",quote=FALSE)
|
8075bad8ab102375abf67c245d1f90b85dde0aa9
|
95fecb2a9ee2c90763b0baa04379a5f119546063
|
/github_whatsappcode.R
|
8165419648a69ce5b67709e997eee7ad9e5d1291
|
[] |
no_license
|
iPALVIKAS/whatsapp-chat-text-mining
|
2db29ba946192788617f9eb0ace2371b6423f302
|
5482d3c10197fdbde970021bbaf4f23d9f4edfcd
|
refs/heads/master
| 2021-01-20T22:19:06.211986
| 2016-07-27T11:15:53
| 2016-07-27T11:15:53
| 64,249,782
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,271
|
r
|
github_whatsappcode.R
|
#Vikas Pal
#the data show the text minning and representation of it the form of word cloud
#loading data from the local folder
data <- read.csv('data.csv',header = FALSE)
#handling emoticons
data <- sapply(data,function(row) iconv(row, 'latin1', 'ASCII',sub = ''))
#calling text mining package
library(tm)
#making it a corpus vector source
newCorpus <- Corpus(VectorSource(d))
#making all the text to lower case
newCorpus <- tm_map(newCorpus, content_transformer(tolower))
#removing al the punctuation from the data
newCorpus <- tm_map(newCorpus, removePunctuation)
#removing all the numbers from the data, this will also remove the date written in number format
newCorpus <- tm_map(newCorpus, removeNumbers)
#remove teh extra white spaces from the data
newCorpus <- tm_map(newCorpus, stripWhitespace)
#taking care of all thee stopwords in english language
newCorpus <- tm_map(newCorpus, removeWords, stopwords("english"))
#stemming the data
newCorpus <- tm_map(newCorpus, stemDocument, language = 'english')
#creating a document term frequency
ndtm <- DocumentTermMatrix(newCorpus)
nndtm <- as.matrix(ndtm)
#calling wordcloud package
library(wordcloud)
#this wil get the sum of the occurance of the words
f <- colSums(nndtm)
#sorting it in decreasing form
f <- sort(f, decreasing = TRUE)
#view the occurance of the data
head(f)
#taking the names of the words occured in the data and converting it to a vector form
words <- names(f)
words <- as.vector(words)
#making it into a dataframs so the extraction of words and frequency will be easy
f1 <- as.data.frame(f)
#writing the data with word frequency
write.csv(x = f1,file = 'newdata.csv')
#taking the frequency of the occured word
freq <- f1$f
#RColorBrewer package to get the color in the word cloud
library(RColorBrewer)
##following type to get the output in the form of word cloud
#Type1
wordcloud(words, scale = c(2,.2))
#Type 2
wordcloud(words = words,
freq = freq,
min.freq = 4,
colors = brewer.pal(8, "Dark2"))
#Type 3
wordcloud(words = words,
freq = freq,
min.freq = 4,
scale = c(5,0.5),
random.order = FALSE,
colors = brewer.pal(8, "Dark2"))
|
97a8d61c01562ab66fd50be17b143e3b6d95827b
|
ddd18a4a1cc9911b3f1ec8e38165d5cdf3386fac
|
/303ar_age.R
|
43d8174b33da77011035b3ed19db368347c88472
|
[] |
no_license
|
stewartli/auditworkpaper
|
656e47fd7113170886ce59585a18d349bda0b5d2
|
3a4ee3fdeba67e58657435c5849fc5ae4d78a5ed
|
refs/heads/master
| 2020-08-09T13:46:03.584327
| 2020-07-26T06:51:08
| 2020-07-26T06:51:08
| 214,100,102
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,316
|
r
|
303ar_age.R
|
source("001package.R")
source("202df_tb.R")
# reconcile sales to ar----
#+
df_join <- df_ar %>%
filter(subaccount == "Accounts Receivable") %>%
mutate(amt = debit,
amt = ifelse(amt == 0, -credit, amt)) %>%
group_by(num) %>%
full_join(df_ar %>%
filter(subaccount == "Revenue") %>%
select(num, type, debit, credit) %>%
group_by(num) %>%
summarise(tdebit = sum(debit),
tcredit = sum(credit),
trans = n()),
by = c("num")) %>%
select(subaccount:split, amt, debit, credit, trans, tdebit, tcredit) %>%
ungroup()
#+ YE ar balance by customers
df_join %>%
group_by(name, type) %>%
summarise_at(vars(tdebit, tcredit, trans, debit, credit), sum) %>% # reconcile sales to ar
summarise_at(vars(debit, credit), sum) %>%
mutate(cf = debit - credit) %>%
filter(cf != 0)
#+
df_join %>%
group_by(name, type, num) %>%
summarise_at(vars(debit, credit), sum) %>%
rownames_to_column() %>%
mutate(status = ifelse(rowname %in% c(4, 7, 55, 96, 123, 172), "unpaid", "paid")) %>%
filter(status == "unpaid") # not automatic
df_ar %>%
filter(subaccount == "Accounts Receivable") %>%
group_by(name, type) %>%
summarise_at(vars(debit, credit), sum) %>%
mutate(ar = debit,
ar = ifelse(ar == 0, -credit, ar)) %>%
select(-c(credit, debit)) %>%
spread(type, ar) %>%
ungroup() %>%
mutate_all(~(replace_na(.,0))) %>%
ggplot(aes(Invoice, Payment)) +
geom_point() +
geom_abline(slope = -1, col = "red", lty = 2) +
scale_x_continuous(labels = comma_format()) +
scale_y_continuous(labels = comma_format()) +
labs(title = "Paid vs Unpaid invoices for FY2018") # not very useful
# trend----
df_ar %>%
group_by(subaccount, month) %>%
summarise_at(vars(debit, credit), sum) %>%
mutate(amt = case_when(subaccount == "Accounts Receivable" ~ debit - credit,
subaccount == "Revenue" ~ credit - debit)) %>%
ggplot(aes(month, amt, color = subaccount)) +
geom_point(show.legend = FALSE) +
geom_path(aes(group = subaccount), show.legend = FALSE) +
geom_hline(yintercept = 0, lty = 2, col = "black", size = 1) +
scale_y_continuous(breaks = seq(-30000, 80000, 10000), labels = comma_format()) +
directlabels::geom_dl(aes(label = subaccount), method = list(dl.combine("last.points"), cex = 0.6)) +
theme_light() +
labs(title = "Monthly movement for FY2018",
caption = "RAudit Solution LLP | Stewart Li",
x = "",
y = "",
color = "") # advance payment
# appendix----
#' ar invoice/payment mean
df_ar %>%
filter(subaccount == "Accounts Receivable") %>%
group_by(type) %>%
summarize(mean_debit = mean(debit),
mean_credit = mean(credit),
n = n()) %>%
pivot_wider(names_from = type,
values_from = c(mean_debit, mean_credit, n))
#' good customers
d15p <- df_ar %>%
filter(subaccount == "Accounts Receivable") %>%
arrange(desc(credit)) %>%
filter(credit > quantile(credit, prob = .85)) # not sure it is a correct approach?
c15p <- df_ar %>%
filter(subaccount == "Accounts Receivable") %>%
arrange(desc(debit)) %>%
head(nrow(d15p))
goodcustomers <- intersect(d15p$name, c15p$name)
|
b5ae4fc98d03f1f4e638cef97332b62dbea08775
|
cd181d1f57308093a0142a6f3630db006bda2c1d
|
/preliminary.R
|
ffcd5f357f8650bf992eb77d8b0615d0741675a4
|
[] |
no_license
|
CoMoS-SA/agriLOVE
|
717df7d531faacdc361360f53613af93595716a0
|
91153495a29dd2cba938df64cf745daacf398b0f
|
refs/heads/main
| 2023-08-26T20:09:25.028184
| 2021-10-13T15:32:58
| 2021-10-13T15:32:58
| 416,800,601
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,245
|
r
|
preliminary.R
|
#### PRELIMINARIES
#Contains preliminary code chunks to be reproduced at the beginning of each time step
preliminary_f <- function(){
if(flag_suspend_ergodic==1){
if(t<ergodic_trans){
flag_auction <<- 0
flag_deforestation <<- 0
flag_land_abandon <<- 0
}else{
if(t==ergodic_trans){
flag_auction <<- 1
flag_deforestation <<- 1
flag_land_abandon <<- 1
}
}
}
# World and agriculture
world[,,t,p] <<- world[,,t-1,p] # inerith distribution of properties from previous period (changes to property happen in the entry module, at the end of any period)
agri[,,t,p] <<- agri[,,t-1,p] # inerith type of agriculture from previous period (switching happens at the end of every period)
# Turn wastelands into forests
if(t>time_to_forest){
wasteland_coord <<- which(world[,,t,p]==9999, arr.ind = T) #get all forests coordinates
if(nrow(wasteland_coord)!=0){
for(o in 1:nrow(wasteland_coord)){ #scroll forests
if(!any(world[wasteland_coord[o,"row"],wasteland_coord[o,"col"],t:(t-time_to_forest),p]!=9999)){#if enough periods have passed, turn into forest
world[wasteland_coord[o,"row"],wasteland_coord[o,"col"],t,p] <<- 0
loss[wasteland_coord[o,"row"],wasteland_coord[o,"col"],t,p] <<- 0
}
}
}
}
#rate of growth of productivity, we increase wage and price of land at this rate
if(t==2){
prod_growth <<- 0.1
}else{
prod_growth <<- log(mean(theta[,,t-1,p])) - log(mean(theta[,,t-2,p]))
}
if(t==2){
wage_shock <<- 0
}else{
wage_shock <<- log(sum(L[,,t-1,p])) - log(sum(L[,,t-2,p]))
}
# Evolution of demand and wages
demand_noise[t,p] <<- rnorm(1,0,0.01)
demand[t,p] <<- (demand[t-1,p] + demand_rate)*(1 + demand_noise[t,p]) #linear demand
wage[t,p] <<- wage[t-1,p]*(1+prod_growth)*(1+eps_wage*wage_shock)
if(flag_climate==2){
demand[t,p] <<- as.numeric(baseline_climate_demand[baseline_climate_demand$time==t & baseline_climate_demand$mc==p,"demand"] )
}
# Update existing producers
z <<- 1
for (z in 1:dim(existing_producers)[1]){
property <<- which(world[,,t,p]==z, arr.ind = T)
if(nrow(property)==0){ # if z producers do not have properties (has defaulted in the past or not born yet)
existing_producers[z,t,p] <<- 0
n_property[z,t,p] <<- 0
}else{
existing_producers[z,t,p] <<- 1 # active producer
n_property[z,t,p] <<- nrow(property) # compure number of properties for each farmer
}
}
# How many forests this period?
forests[t,p] <<- sum(world[,,t,p]==0)
if(forests[t,p]==(x*y)){
print(paste("WARNING: World is covered with forests at time ",t, ": Welcome to the jungle.", sep = ""))
}
wastelands[t,p] <<- sum(world[,,t,p]==0)
if(wastelands[t,p]==(x*y)){
print(paste("WARNING: World is covered with wastelands at time ",t, sep = ""))
}
# Where are the forests?
non_forest_pos <- which(world[,,t,1]!=0, arr.ind = T)
if(flag_def_policy!=0){
agri_share_def[t,p] <<- sum(agri[,,t,p]==2)/sum(agri[,,t,p]!=0)
if(t>ergodic_trans){
if( (agri_share_def[t-1,p]<tresh_def_pol & agri_share_def[t,p]>tresh_def_pol) | (agri_share_def[t-1,p]>tresh_def_pol & agri_share_def[t,p]<tresh_def_pol) ){
flag_deforestation <<- 0
}
}
}
#if(flag_conv_tax==1|flag_conv_tax==2){ #if no more sustainables, suspend policy
# if((flag_conv_tax==1)&(t==2)){
# flag_conv_tax<<-1
# }
# if((flag_conv_tax==2)&(t==2)){
# print("yo")
# flag_conv_tax<<-2
# }
# if(sum(agri[,,t,p]==2)==0){
# print("POLICY SUPPRESSED")
# flag_conv_tax <<- 0
# }
#}
}
|
c5f4a2e84e1920858c7e621b2b754c7fc517cedd
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/iBATCGH/man/Scenario2.Rd
|
7f4d977136834079b8952e5e63cc1953dce61639
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,249
|
rd
|
Scenario2.Rd
|
\name{Scenario2}
\alias{Scenario2}
\title{
Simulated data - Scenario 2
}
\description{
Simulates the data as described in the reference provided below (Scenario 2).
}
\usage{
Scenario2(sigmak = 0.1)
}
\arguments{
\item{sigmak}{
Standard deviation of the error term
}
}
\value{
Return a list made of the following items
\item{Y }{Matrix of simulated gene expression}
\item{X }{Matrix of simulated CGH}
\item{Xi }{True matrix of hidden states}
\item{A }{Empirical transition matrix}
\item{mu }{True vector of state specific mean}
\item{Sd }{True vector of state specific sd}
\item{coeff }{True matrix of association coefficients between gene expression and CGH probes}
\item{distance }{Vector of distance between CGH probes}
\item{disfix }{Length of the chromosome}
}
\references{
Cassese A, Guindani M, Tadesse M, Falciani F, Vannucci M. A hierarchical Bayesian model for inference of copy number variants and their association to gene expression. Annals of Applied Statistics, 8(1), 148-175.\cr
Cassese A, Guindani M, Vannucci M. A Bayesian integrative model for genetical genomics with spatially informed variable selection. Cancer Informatics.
}
\author{
Alberto Cassese
}
\examples{
data <- Scenario2(sigmak = 0.1)
}
\keyword{Simulated data}
|
58bbbf2db2c86b867fbc5edf8f4d2cde5682daa5
|
8b387d5ca95fab93d6e73cd2f14af4579c1828c9
|
/man/tiler.Rd
|
6cdcf4cf79ba1bc47d96721d9e00f8069d44ab9c
|
[] |
no_license
|
swebb1/questplots
|
f13564cd617aedd92543b0f7b6bc7d33e174d9d4
|
70d04398f053e01c0041ecdf80212ae46b3ec2a3
|
refs/heads/master
| 2021-01-17T18:04:57.301061
| 2016-06-30T10:45:38
| 2016-06-30T10:45:38
| 62,303,322
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,212
|
rd
|
tiler.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tiler.r
\name{tiler}
\alias{tiler}
\title{tiler function}
\usage{
tiler(t, x, xl = F, xs = 1, y, yl = F, ys = 1, z, zl = F, zs = 1,
bin = 1, min = -5, max = 5, xrange = c(-100, 100), yrange = c(-100,
100), func = "median", col = cols)
}
\arguments{
\item{t}{Table of data}
\item{x, y, z}{Variables for each scale}
\item{xl, yl, zl}{Log the variable first. Defaults to F.}
\item{xs, ys, zs}{Scale the variable first. Defaults to 1.}
\item{bin}{Size of each tile}
\item{min, max}{Range of the colour scale. Defaults to -5 to 5}
\item{xrange}{Range displayed on x-axis. Defaults to c(-100,100)}
\item{yrange}{Range displayed on y-axis. Defaults to c(-100,100)}
\item{func}{Function applied to tiled z variables ("median","mean","sum","count"). Defaults to median.}
\item{cols}{Vector of colours for colour range. Defaults to colorRamps::matlab.like2(10)}
}
\description{
This function plots 3 variables on an x,y and coloured z scale. Values on the x and y scale are tiled
and the colour of the tile is determined by the function applied to z (mean,median,count etc.).
}
\examples{
tiler()
}
\keyword{plots}
\keyword{tile}
|
5b667106864d73d0521c9cc8dfd61d06af822dda
|
17655e4899fc18dc366e4ea1a066a2b6b100a2b0
|
/tests/testthat.R
|
0ce499727e83c679d75fc270d6a2b72f9af920e5
|
[] |
no_license
|
mdsumner/rmdal0
|
704ba2a23e6b5b88f51bc968ff1d0b387ce4d324
|
37313a442ee327bad86bb348e52d2714560254fa
|
refs/heads/master
| 2020-04-06T12:51:51.260753
| 2019-09-20T07:05:03
| 2019-09-20T07:05:03
| 157,473,804
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
testthat.R
|
library(testthat)
library(rmdal0)
test_check("rmdal0")
|
4223a33170b44c1407b3ee59730f724c2e4dbde1
|
1c33c264f7b501d2d38c4f0fc0ea016d1fe6a7a2
|
/man/computeAuc.Rd
|
48896555f76e0346dfe8dd05f5ad9361f635d0b8
|
[
"Apache-2.0"
] |
permissive
|
OHDSI/PatientLevelPrediction
|
348a0418bfcaac4a54f7fdf8d56593f1bca60119
|
f6bda233eae96bae81b5745b46879c685d278e36
|
refs/heads/main
| 2023-08-16T22:31:30.383549
| 2023-08-16T12:34:22
| 2023-08-16T12:34:22
| 32,690,570
| 176
| 98
| null | 2023-09-07T14:58:48
| 2015-03-22T19:08:53
|
HTML
|
UTF-8
|
R
| false
| true
| 600
|
rd
|
computeAuc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EvaluationSummary.R
\name{computeAuc}
\alias{computeAuc}
\title{Compute the area under the ROC curve}
\usage{
computeAuc(prediction, confidenceInterval = FALSE)
}
\arguments{
\item{prediction}{A prediction object as generated using the
\code{\link{predict}} functions.}
\item{confidenceInterval}{Should 95 percebt confidence intervals be computed?}
}
\description{
Compute the area under the ROC curve
}
\details{
Computes the area under the ROC curve for the predicted probabilities, given the true observed
outcomes.
}
|
2af2bf5e13ed177c602579f0436d98c194a4276c
|
f7bdf02c5d15335f306ee45720570c0e00856e5d
|
/day1/day1_fish_exercises.r
|
229bd904e4fc0a254fee9ace185cd7e2c68d147d
|
[] |
no_license
|
rkbauer/R_Course_Basic_Statistics_for_Marine_Fishery_Biologists
|
e8171da5a1256dc53cc94f8c101b9ca6ea20edef
|
79b691dcc956e7894274c11e09e7e162ce7b6322
|
refs/heads/master
| 2023-01-22T12:42:57.447146
| 2020-11-28T21:28:14
| 2020-11-28T21:28:14
| 263,082,981
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,550
|
r
|
day1_fish_exercises.r
|
# fish exercise - day 1
# 1. Import "Fish.csv" (mind not available values!)
setwd('~/Dropbox/R_course/day1')
fish <- read.table("Fish.csv", header=T, sep=',', dec=".") # load dataframe
head(fish)
attach(fish)
# 2. Which was the maximum, which was the minimum size of each caught species?
range(total.length[species == "dab"])
range(total.length[species == "cod"])
# 3. Create box-plots of the length distribution of both species and both sexes
boxplot(total.length[species == "dab"]~sex[species == "dab"], data = fish, las=1, names = c("male", "female"), ylab="total length [mm]", main = "dab")
boxplot(total.length[species == "cod"]~sex[species == "cod"], data = fish, las=1, names = c("male", "female", "juvenile"), ylab="total length [mm]", main = "cod")
# 4. Create species specific box-plots of the full, empty and liver weight
boxplot(full.weight[species == "dab"], empty.weight[species == "dab"], liver.weight[species == "dab"], data = fish, las=1, names = c("full", "empty", "liver"), ylab="weight [g]", main = "dab")
boxplot(full.weight[species == "cod"], empty.weight[species == "cod"], liver.weight[species == "cod"], data = fish, las=1, names = c("full", "empty", "liver"), ylab="weight [g]", main = "cod")
# 5. Define a new vector: Hepta Somatic Index
HSI <- liver.weight*100/full.weight
# 6. What is the median & the range of the HSI per species?
median(HSI[species == "dab"], na.rm=T)
median(HSI[species == "cod"], na.rm=T)
range(HSI[species == "dab"], na.rm=T)
range(HSI[species == "cod"], na.rm=T)
|
12d189dd5162a620c49f63e9aadd1984877cf90f
|
4838ff30f4cf7fc1d6a268a1f28f122dea90d056
|
/man/PopHumanAnalysis.Rd
|
8d65d0d2cbb3ee9d454917857061c5d23445795d
|
[] |
no_license
|
BGD-UAB/iMKT
|
319ff35016ba54108fe1dc3ce64e90e623b733b0
|
1fb62426f850c4423c042f027fd9fb844f56304b
|
refs/heads/master
| 2021-06-15T23:20:46.913981
| 2021-02-15T12:07:05
| 2021-02-15T12:07:05
| 144,316,815
| 8
| 2
| null | 2019-04-29T13:25:01
| 2018-08-10T17:51:58
|
R
|
UTF-8
|
R
| false
| true
| 2,331
|
rd
|
PopHumanAnalysis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PopHumanAnalysis.R
\name{PopHumanAnalysis}
\alias{PopHumanAnalysis}
\title{iMKT using PopHuman data}
\usage{
PopHumanAnalysis(
genes = c("gene1", "gene2", "..."),
pops = c("pop1", "pop2", "..."),
cutoff = 0.05,
recomb = TRUE/FALSE,
bins = 0,
test = c("standardMKT", "imputedMKT", "FWW", "aMKT"),
xlow = 0,
xhigh = 1,
plot = FALSE
)
}
\arguments{
\item{genes}{list of genes to analyze}
\item{pops}{list of populations to analyze}
\item{recomb}{group genes according to recombination values (TRUE/FALSE)}
\item{bins}{number of recombination bins to compute (mandatory if recomb=TRUE)}
\item{test}{which test to perform. Options include: standardMKT (default), imputedMKT, FWW, aMKT}
\item{xlow}{lower limit for asymptotic alpha fit (default=0)}
\item{xhigh}{higher limit for asymptotic alpha fit (default=1)}
\item{plot}{report plot (optional). Default is FALSE}
\item{cutoffs}{list of cutofs to perform FWW and/or imputedMKT}
}
\value{
List of lists with the default test output for each selected population (and recombination bin when defined)
}
\description{
Perform any MKT method using a subset of PopHuman data defined by custom genes and populations lists
}
\details{
Execute any MKT method (standardMKT, FWW, imputedMKT, aMKT) using a subset of PopHuman data defined by custom genes and populations lists. It uses the dataframe PopHumanData, which can be already loaded in the workspace (using loadPopHuman()) or is directly loaded when executing this function. It also allows deciding whether to analyze genes groupped by recombination bins or not, using recombination rate values corresponding to the sex average estimates from Bhérer et al. 2017 Nature Commun.
}
\examples{
## List of genes
mygenes <- c("ENSG00000011021.21_3","ENSG00000091483.6_3","ENSG00000116191.17_3",
"ENSG00000116337.15_4","ENSG00000116584.17_3","ENSG00000116745.6_3",
"ENSG00000116852.14_3","ENSG00000116898.11_3","ENSG00000117010.15_3",
"ENSG00000117090.14_3","ENSG00000117222.13_3","ENSG00000117394.20_3")
## Perform analyses
PopHumanAnalysis(genes=mygenes , pops=c("CEU","YRI"), recomb=FALSE, test="standardMKT")
PopHumanAnalysis(genes=mygenes , pops=c("CEU"), recomb=TRUE, bins=3, test="imputedMKT")
}
\keyword{PopData}
|
62af94e697a62041523d91d8457944f479d36029
|
f2badfafe4cbaa709547c18ff5abb1283a255390
|
/kokoro.r
|
0f04d0b6d1805343216ebca0e57e7b3f2694f089
|
[] |
no_license
|
satocos135/lecture2019shimane
|
71bf893f49ba08b9ac5ba75984266d0eaeb30faa
|
5483f5d4524bc9d488ab1acd2ef8121db24c01cc
|
refs/heads/master
| 2020-06-19T03:11:29.705018
| 2019-07-24T07:27:45
| 2019-07-24T07:27:45
| 196,543,866
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,155
|
r
|
kokoro.r
|
# path = Sys.getenv('PATH')
# Sys.setenv(PATH= sub('MeCab', 'MeCab_sjis', path))
# Sys.getenv('PATH')
#Sys.getenv('PATH')
library('tidyverse')
library('RMeCab')
library('igraph')
getwd()
#setwd('../projects/lec')
setwd('../lecture2019/')
kokoro = read.delim('data/kokoro.tsv', header=T, sep='\t', stringsAsFactor=F, fileEncoding='utf8')
kokoro %>% head()
# 段落の長さの分布
kokoro[, 'content'] %>% str_length() %>% hist(breaks=40, xlab='Paragraph length', main='Histogram of paragraph length')
kokoro['length'] = kokoro[, 'content'] %>% str_length()
boxplot(length ~ part_id, data=kokoro, main='Paragraph length of each part')
kokoro['section_id2'] = kokoro['part_id'] * 100 + kokoro['section_id']
boxplot(length ~ section_id2, data=kokoro, main='Paragraph length of each section')
# 分析のため各部ごとに文章を結合する
parts = kokoro %>% group_by(part_id) %>% summarise(text = paste0(content, collapse=''))
parts = as.data.frame(parts)
dim(parts)
parts[, 'text'] %>% str_length()
count_noun = docMatrixDF(parts[,'text'], pos=c('名詞'))
count_noun = docMatrixDF(parts[,'text'], pos=c('名詞'), dic='dict/kokoro.dic')
# 全体を集計する
freq_noun = count_noun %>% rowSums()
# 全体を集計する
freq_noun %>% sort(decreasing=T) %>% plot(main='Distribution of noun frequency', xlab='Rank', ylab='Frequency')
freq_noun %>% sort(decreasing=T) %>% plot(main='Distribution of noun frequency', xlab='Rank', ylab='Frequency (log)', log='y')
freq_noun %>% sort(decreasing=T) %>% plot(main='Distribution of noun frequency', xlab='Rank (log)', ylab='Frequency (log)', log='xy')
freq_noun %>% sort() %>% tail(30) %>% barplot(horiz=T, las=1, main='Top 30 nouns', xlab='Frequency')
# (t(count_noun) / colSums(count_noun) ) %>% colSums()
stopwords = c('事','の','よう','それ','もの', '人', '何','一', 'ん','方','二','前','気','中','上','今','ため')
freq_noun[!names(freq_noun) %in% stopwords] %>% sort() %>% tail(30) %>% barplot(horiz=T, las=1, main='Top 30 nouns', xlab='Frequency')
stopwords = c('事','の','よう','それ','もの', '人', '何','一', 'ん','方','二','前','気','中','上','今','ため', '時', 'そこ', 'どこ', 'これ', 'そう')
freq_noun[!names(freq_noun) %in% stopwords] %>% sort() %>% tail(30) %>% barplot(horiz=T, las=1, main='Top 30 nouns', xlab='Frequency')
# PCA
mat = count_noun
mat = mat[rowSums(mat) > 50, ]
mat = mat[!row.names(mat) %in% stopwords, ]
colnames(mat) = c('第一部', '第二部', '第三部')
mat_t = t(mat)
mat %>% head()
# 単語の頻度
result = (mat_t / colSums(mat)) %>% prcomp()
biplot(result)
# 確率の比
ratio = mat_t / colSums(mat)
ratio_t = t(ratio)
result = (ratio_t / colSums(ratio)) %>% prcomp()
biplot(result)
# 共起分析
# バイグラム
bigram = docDF(parts,col='text', type=1, pos=c('名詞'), N=2, nDF=1, dic='dict/kokoro_sjis.dic')
# distribution
bigram['freq'] = bigram[,5:7] %>% rowSums()
bigram[,'freq'] %>% sort(decreasing=T) %>% plot(log='y')
bigram[,'freq'] %>% sort(decreasing=T) %>% plot(log='xy')
bigram[1,]
net = bigram %>%
filter(freq > 20) %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords)
net %>% graph_from_data_frame() %>% tkplot(vertex.color='SkyBlue', vertex.size=22)
net = bigram %>%
select(N1, N2, freq=Row1) %>%
filter(freq > 10) %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords)
net %>% graph_from_data_frame() %>% tkplot(vertex.color='SkyBlue', vertex.size=22)
net = bigram %>%
select(N1, N2, freq=Row2) %>%
filter(freq > 5) %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords)
net %>% graph_from_data_frame() %>% tkplot(vertex.color='SkyBlue', vertex.size=22)
net = bigram %>%
select(N1, N2, freq=Row3) %>%
filter(freq > 10) %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords)
net %>% graph_from_data_frame() %>% tkplot(vertex.color='SkyBlue', vertex.size=22)
# net %>% graph_from_data_frame() %>% plot(vertex.color='SkyBlue', vertex.size=22)
bigram = docDF(parts,col='text', type=1, pos=c('名詞', '動詞', '形容詞', '副詞'), N=2, nDF=1, dic='dict/kokoro_sjis.dic')
# ojo-san
bigram %>%
select(N1, N2, freq=Row3) %>%
filter(N1 == 'お嬢さん' | N2 == 'お嬢さん') %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords) %>%
arrange(desc(freq))
net = bigram %>%
select(N1, N2, freq=Row3) %>%
filter(freq > 1) %>%
filter(N1 == 'お嬢さん' | N2 == 'お嬢さん') %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords)
net %>% graph_from_data_frame() %>% tkplot(vertex.color='SkyBlue', vertex.size=22)
stopwords = c('事','の','よう','それ','もの', '人', '何','一', 'ん','方','二','前','気','中','上','今','ため', '時', 'そこ', 'どこ', 'これ', 'そう',
'いる', 'なる', 'する', 'いう', 'ある')
# watashi
bigram %>%
filter(N1 == '私' | N2 == '私') %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords) %>%
filter(str_detect(POS2, '非自立')) %>%
arrange(desc(Row3)) %>% head(50)
bigram %>%
filter(N1 == '私' | N2 == '私') %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords) %>%
filter(!str_detect(POS2, '非自立')) %>%
arrange(desc(Row3)) %>% head(20)
bigram %>%
filter(N1 == '私' | N2 == '私') %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords) %>%
filter(!str_detect(POS2, '非自立')) %>%
filter(str_detect(POS2, '動詞')) %>%
select(everything(), freq=Row3) %>%
arrange(desc(freq)) %>% head(30)
bigram %>%
filter(N1 == '私' | N2 == '私') %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords) %>%
filter(!str_detect(POS2, '非自立')) %>%
filter(str_detect(POS1, '動詞')) %>%
select(everything(), freq=Row3) %>%
arrange(desc(freq)) %>% head(30)
net = bigram %>%
filter(N1 == '私' | N2 == '私') %>%
filter(str_detect(POS1, '名詞-名詞')) %>%
select(N1, N2, freq=Row3) %>%
filter(freq > 4) %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords)
net %>% graph_from_data_frame() %>% tkplot(vertex.color='SkyBlue', vertex.size=22)
net = bigram %>%
filter(N1 == '私' | N2 == '私') %>%
filter(str_detect(POS1, '動詞')) %>%
select(N1, N2, freq=Row3) %>%
filter(freq > 4) %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords)
net %>% graph_from_data_frame() %>% tkplot(vertex.color='SkyBlue', vertex.size=22)
net = bigram %>%
filter(N1 %in% c('私','K','お嬢さん') | N2 %in% c('私','K','お嬢さん')) %>%
filter(str_detect(POS1, '形容')) %>%
select(N1, N2, freq=Row3) %>%
filter(freq > 0) %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords)
net %>% graph_from_data_frame() %>% tkplot(vertex.color='SkyBlue', vertex.size=22)
net = bigram %>%
filter(str_detect(POS1, '形容')) %>%
select(N1, N2, freq=Row3) %>%
filter(freq > 2) %>%
filter(N1 != '私') %>%
filter(N2 != '私') %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords)
net %>% graph_from_data_frame() %>% tkplot(vertex.color='SkyBlue', vertex.size=22)
stopwords = c('事','の','よう','それ','もの', '人', '何','一', 'ん','方','二','前','気','中','上','今','ため', '時', 'そこ', 'どこ', 'これ', 'そう',
'いる', 'なる', 'する', 'いう', 'ある', 'れる', 'られる', 'くれる')
## co-occurence analysis
#kokoro_sentence = read.delim('kokoro_sentence.tsv', header=T, sep='\t', stringsAsFactor=F, fileEncoding='UTF-8-BOM')
kokoro_sentence = read.delim('data/kokoro_sentence.tsv', header=T, sep='\t', stringsAsFactor=F, fileEncoding='UTF-8-BOM')
kokoro_sentence %>% head()
source('functions.r', encoding='utf8')
# get_cooc = function(string, pos=c('名詞', '動詞'), with_pos=F, unique=T, stopwords=c(), dic=''){
# if(length(string) == 0){
# return(c())
# }
# words = RMeCabC(string, 1, dic=dic) %>% unlist()
# targets = words[names(words) %in% pos]
# if(unique){
# #targets = unique(targets)
# targets[!duplicated(targets)]
# }
# if(length(stopwords)){
# targets = targets[! targets %in% stopwords]
# }
#
# if(length(targets) < 2){
# return(c())
# }
#
# if(with_pos){
# targets = str_c(targets,'/',names(targets))
# }
# res = targets %>% sort() %>% combn(2)
#
# result = paste(res[1,], '-', res[2,], sep='')
# return(result)
# }
#
#
# parse_cooc = function(strings, freq){
# res = str_split(strings, '-')
# w1 = str_split(map_chr(res,1), '/')
# w2 = str_split(map_chr(res,2), '/')
# result = as.data.frame(cbind(
# map_chr(w1,1),
# map_chr(w2,1),
# str_c(map_chr(w1,2), '-', map_chr(w2,2))
# ))
# colnames(result) = c('N1', 'N2', 'POS')
# result['freq'] = freq
# return(result)
# }
#
words = RMeCabC('今日はよい日で天気がよい', 1) %>% unlist()
words[!duplicated(words)]
targets = str_c(targets,'/',names(targets))
res = targets %>% sort() %>% combn(2)
w1 = str_split(res[1,], '/')
w2 = str_split(res[2,], '/')
as.data.frame(cbind(
str_c(map_chr(w1,1), '-', map_chr(w2,1)),
str_c(map_chr(w1,2), '-', map_chr(w2,2))
))
# test = text %>% str_split('。') %>% unlist()
# test = test[str_length(test) > 0]
# kokoro['sentences'] = kokoro[, 'content'] %>% str_split('。')
# iconv(from='UTF-8', to='cp932')
# count_cooc %>% head() %>% lapply(get_cooc) %>% unlist()
#
#
#
# test %>% iconv(from='UTF-8', to='cp932') %>% map(function(x){RMeCabC(x)})
#
# test =
# RMeCabC(test[1], 1)
#
#
# Encoding(test[1]) = 'cp932'
# Encoding(text)
# Encoding(test)
# test
# RMeCabC(text, 1)
#
# RMeCabC(str_c(test[1], ''))
# Sys.setlocale("LC_CTYPE", "ja_JP.UTF8")
# Sys.setlocale("LC_CTYPE", "C")
# Sys.getlocale()
## sentenceごとにわける
stopwords = c('事','の','よう','それ','もの', '人', '何','一', 'ん','方','二','前','気','中','上','今','ため', '時', 'そこ', 'どこ', 'これ', 'そう',
'いる', 'なる', 'する', 'いう', 'ある', 'れる', 'られる', 'くれる')
test = map(kokoro_sentence[,'content'], get_cooc, pos=c('名詞'), stopwords=stopwords)
res = test %>% unlist() %>% table()
res %>% as.vector() %>% sort(decreasing=T) %>% head(100) %>% plot(type='p')
res %>% dim()
res %>% as.vector() %>% sort(decreasing=T) %>% plot()
res %>% as.vector() %>% sort(decreasing=T) %>% plot(log='y')
res %>% as.vector() %>% sort(decreasing=T) %>% plot(log='xy')
part1 = map(kokoro_sentence[kokoro_sentence$part_id == 1,'content'],
get_cooc, pos=c('名詞'), stopwords=stopwords, dic='dict/kokoro_sjis.dic') %>% unlist() %>% table()
part2 = map(kokoro_sentence[kokoro_sentence$part_id == 2,'content'],
get_cooc, pos=c('名詞'), stopwords=stopwords, dic='dict/kokoro_sjis.dic') %>% unlist() %>% table()
part3 = map(kokoro_sentence[kokoro_sentence$part_id == 3,'content'],
get_cooc, pos=c('名詞'), stopwords=stopwords, dic='dict/kokoro_sjis.dic') %>% unlist() %>% table()
df1 = as.data.frame(part1)
df2 = as.data.frame(part2)
df3 = as.data.frame(part3)
res = merge(x=df1, y=df2, by='.', all=T)
res = merge(x=res, y=df3, by='.', all=T)
colnames(res) = c('term', 'df1', 'df2', 'df3')
res[is.na(res)] = 0
res %>% filter(str_detect(term, 'K')) %>%
arrange(desc(df3)) %>% head()
mat = res
mat %>% filter(str_detect(term, 'K')) %>% arrange(desc(df3)) %>% head()
row.names(mat) = mat[,1]
mat = mat[2:4]
#mat = mat + 0.5
mat = mat[rowSums(mat) > 50, ]
colnames(mat) = c('第一部','第二部','第三部')
mat_t = t(mat)
ratio = mat_t / colSums(mat)
ratio_t = t(ratio)
result = (ratio_t / colSums(ratio)) %>% prcomp()
biplot(result)
#biplot(result, xlim=c(-0.7, 0.7), ylim=c(-0.7, 0.7))
res = map(kokoro_sentence[kokoro_sentence$part_id==3,'content'], get_cooc, pos=c('名詞', '形容詞'), with_pos=T, stopwords=stopwords, dic='dict/kokoro_sjis.dic') %>%
unlist() %>% table()
res %>% head()
df = parse_cooc(names(res), as.vector(res))
df %>% head()
df$freq %>% sort(decreasing=T) %>% plot()
df$freq %>% sort(decreasing=T) %>% plot(log='y')
net = df %>%
filter(N1 %in% c('私','K','お嬢さん') | N2 %in% c('K','お嬢さん','私')) %>%
filter(str_detect(POS, '形容')) %>%
filter(freq > 0) %>%
filter(! N1 %in% stopwords) %>%
filter(! N2 %in% stopwords)
net %>% graph_from_data_frame() %>% as.undirected() %>% tkplot(vertex.color='SkyBlue', vertex.size=22)
net %>% graph_from_data_frame() %>% tkplot(vertex.color='SkyBlue', vertex.size=22)
stopwords = c('事','の','よう','それ','もの', '人', '何','一', 'ん','方','二','前','気','中','上','今','ため', '時', 'そこ', 'どこ', 'これ', 'そう',
'いる', 'なる', 'する', 'いう', 'ある')
|
6f62de3ff85a3df17b91794c73867687f4f6fcaf
|
92081f34f55b066932c7490529b1d9b64cfc2fe6
|
/server.r
|
878bb93a47784656cde07e4defda55abf0faf207
|
[] |
no_license
|
dattashingate/Rshiny-Apps-radioButtons
|
4c8dc2b94771a7f50f9b318226954020a0c4038d
|
f5a36021c8334fa1dc1f1c39885920069901472a
|
refs/heads/master
| 2022-10-05T10:19:48.873848
| 2020-06-09T08:40:24
| 2020-06-09T08:40:24
| 270,953,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 220
|
r
|
server.r
|
library(shiny)
shinyServer(
function(input,output)
{
output$name=renderText(input$name)
output$age=renderText(input$age)
output$addr=renderText(input$addr)
output$gender=renderText(input$gender)
}
)
|
fe15ce43bf8753340209dd53dd7788908eb837cf
|
2a374d65d81be09bc7bf9c6f5153aa239ad20c5b
|
/R/fetch.R
|
3b0ce1913ed3ed279749b999d04bbeedbfa5c0bb
|
[] |
no_license
|
KennethTM/windfetchR
|
e037dfb97a88b6691993883a8eed3a84388d68c3
|
60a3506f5658fe4aacff359e7c50a7771e71094d
|
refs/heads/main
| 2023-02-17T23:06:22.307535
| 2021-01-20T11:35:27
| 2021-01-20T11:35:27
| 331,251,052
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 91
|
r
|
fetch.R
|
#' @useDynLib windfetchR
#' @importFrom Rcpp evalCpp
#' @exportPattern "^[[:alpha:]]+"
NULL
|
8b32c990456f0e8c288c7f8f9b5d6f6d85d801cc
|
440ad9e927eee7e0080e05a602eada7b8ca645ac
|
/man/mirror2esnssite.Rd
|
0fcc320d464215a965a45806ee4be7bbcab1f7ff
|
[] |
no_license
|
jae0/SCtagging
|
517de7d5ce6d58153af877d5eb7c828092707344
|
bcf5e885bc932657da43643b367c91541f834408
|
refs/heads/master
| 2023-02-24T17:56:40.806931
| 2021-01-21T13:15:59
| 2021-01-21T13:15:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 380
|
rd
|
mirror2esnssite.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SCtagging.R
\name{mirror2esnssite}
\alias{mirror2esnssite}
\title{mirror2esnssite}
\usage{
mirror2esnssite(region = "ScotianShelf")
}
\value{
status message in case it was called by webpage
}
\description{
ESSENTIAL FUNCTION. Must call after any data entry in order for the website to be up to date!
}
|
e13054a1d03ea93a50e8ae950a79d55414c686f4
|
8d5679573c40ea3391c2c63f2ec794928b594b65
|
/man/lines.TPCmsm.Rd
|
66a8a58a6fcf2deb6a4a04f4cf04c53e28d44d30
|
[] |
no_license
|
cran/TPmsm
|
1320002cef52b8c2205e07c75758561edc221ac9
|
6f54632266e86a7cd24948437da6bf42e2e44010
|
refs/heads/master
| 2023-04-06T09:37:29.701579
| 2023-01-13T19:30:02
| 2023-01-13T19:30:02
| 17,693,879
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,777
|
rd
|
lines.TPCmsm.Rd
|
\encoding{UTF-8}
\name{lines.TPCmsm}
\alias{lines.TPCmsm}
\title{lines method for a TPCmsm object}
\description{lines method for an object of class \sQuote{TPCmsm}.}
\usage{\S3method{lines}{TPCmsm}(x, plot.type="t", tr.choice, col, lty, conf.int=FALSE,
ci.col, ci.lty, legend=FALSE, legend.pos, curvlab, legend.bty="n", ...)}
\arguments{
\item{x}{An object of class \sQuote{TPCmsm}.}
\item{plot.type}{A character string specifying the type of plot.
If \dQuote{t} the scatterplot of transition probability versus time is plotted.
If \dQuote{c} the scatterplot of transition probability versus covariate is plotted.
}
\item{tr.choice}{Character vector of the form \sQuote{c(\dQuote{from to}, \dQuote{from to})}
specifying which transitions should be plotted.
Default, all the transition probabilities are plotted.
}
\item{col}{Vector of colour.}
\item{lty}{Vector of line type. Default is 1:number of transitions.}
\item{conf.int}{Logical. Whether to display pointwise confidence bands. Default is FALSE.}
\item{ci.col}{Colour of the confidence bands. Default is \code{col}.}
\item{ci.lty}{Line type of the confidence bands. Default is 3.}
\item{legend}{A logical specifying if a legend should be added.}
\item{legend.pos}{A vector giving the legend's position.
See \code{\link{legend}} for further details.
}
\item{curvlab}{A character or expression vector to appear in the legend.
Default is the name of the transitions.
}
\item{legend.bty}{Box type for the legend. By default no box is drawn.}
\item{\dots}{Further arguments for lines.}
}
\value{
No value is returned.
}
\author{Artur Araújo, Javier Roca-Pardiñas and Luís Meira-Machado}
\references{
Araújo A, Meira-Machado L, Roca-Pardiñas J (2014). TPmsm: Estimation of the Transition Probabilities in
3-State Models. \emph{Journal of Statistical Software}, \bold{62}(4), 1-29. \doi{10.18637/jss.v062.i04}
Meira-Machado L., de Uña-Álvarez J., Datta S. (2011). Conditional Transition Probabilities in a non-Markov Illness-death Model. Discussion Papers in Statistics and Operation Research n 11/03. Department of Statistics and Operations Research, University of Vigo (ISSN: 1888-5756, Deposito Legal VG 1402-2007). \url{https://depc05.webs.uvigo.es/reports/12_05.pdf}
}
\seealso{
\code{\link{legend}},
\code{\link{lines}},
\code{\link{plot.default}},
\code{\link{plot.TPCmsm}}.
}
\examples{
# Set the number of threads
nth <- setThreadsTP(2)
# Create survTP object
data(heartTP)
heartTP_obj <- with( heartTP, survTP(time1, event1, Stime, event, age=age) )
# Compute IPCW1 conditional transition probabilities without confidence band
TPC_IPCW1 <- transIPCW(heartTP_obj, s=57, t=310, x=15, conf=FALSE, method.est=1)
# Compute IPCW2 conditional transition probabilities without confidence band
TPC_IPCW2 <- transIPCW(heartTP_obj, s=57, t=310, x=15, conf=FALSE, method.est=2)
# Compute LIN conditional transition probabilities without confidence band
TPC_LIN <- transLIN(heartTP_obj, s=57, t=310, x=15, conf=FALSE)
# Build covariate plots
tr.choice <- dimnames(TPC_LIN$est)[[3]]
par.orig <- par( c("mfrow", "cex") )
par( mfrow=c(2,3) )
for ( i in seq_len( length(tr.choice) ) ) {
plot(TPC_IPCW1, plot.type="c", tr.choice=tr.choice[i], legend=FALSE,
main=tr.choice[i], col=1, lty=1, xlab="", ylab="")
lines(TPC_IPCW2, plot.type="c", tr.choice=tr.choice[i], legend=FALSE, col=2, lty=1)
lines(TPC_LIN, plot.type="c", tr.choice=tr.choice[i], legend=FALSE, col=3, lty=1)
}
plot.new()
legend(x="center", legend=c("IPCW1", "IPCW2", "LIN"), col=1:3, lty=1, bty="n", cex=1.5)
par(mfrow=c(1, 1), cex=1.2)
title(xlab="Age", ylab="Transition probability", line=3)
par(par.orig)
# Restore the number of threads
setThreadsTP(nth)
}
\keyword{aplot}
\keyword{methods}
\keyword{survival}
|
af8970157927bdf11a711f44561c37a9f8bc5e35
|
833201ed243c95e2702337630f4f67fd91a63c01
|
/TA1_Admin_Info_Visualizaciones_Carlos.R
|
4a7bce3af601494f7645940bdb86798dddbb7b6b
|
[] |
no_license
|
OscarFloresP/Administracion
|
150b2ca29947ef74b08e6c3924af1fd011007041
|
d049c9b81713d07e0484a32f1e81eb6bb76e821e
|
refs/heads/main
| 2023-04-18T05:20:25.542551
| 2021-05-06T03:10:22
| 2021-05-06T03:10:22
| 363,507,955
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,230
|
r
|
TA1_Admin_Info_Visualizaciones_Carlos.R
|
#Cargar los datos
DFHoteles <- read.csv("hotel_bookings.csv")
View(DFHoteles)
#Inspeccionar los datos
nrow(DFHoteles)
ncol(DFHoteles)
colnames(DFHoteles)
str(DFHoteles)
summary(DFHoteles)
#Mis Visualizaciones
#Primer grafico
table(DFHoteles$arrival_date_year)
table(DFHoteles$arrival_date_month)
table(DFHoteles$arrival_date_year, DFHoteles$arrival_date_month)
barplot(table(DFHoteles$arrival_date_year, DFHoteles$arrival_date_month), main = "Llegada de huéspedes anual por mes", xlab = "Meses", ylab = "Nro de Huéspedes", col = c("blue", "yellow", "red"))
legend(x = "bottomright", legend = c("2015", "2016", "2017"), fill = c("blue", "yellow", "red"), title = "Año", cex = 0.6)
#Segundo grafico
table(DFHoteles$reservation_status)
table(DFHoteles$arrival_date_year)
table(DFHoteles$reservation_status, DFHoteles$arrival_date_year)
barplot(table(DFHoteles$reservation_status, DFHoteles$arrival_date_year), main = "Estado de reservas de huéspedes por año", xlab = "Año", ylab = "Nro de Huéspedes", col = c("darkslateblue", "gold3", "khaki"))
legend(x = "bottomright", legend = c("canceled", "check-out", "No-show"), fill = c("darkslateblue", "gold3", "khaki"), title = "Estado de Reserva", cex = 0.6)
|
c49942aa201042c697917e28470f8f522569d429
|
72e7b17d8bb90d293f8075a71abec37910831030
|
/HouseholdPowerConsumption/plot2.R
|
5d372fd361c1fdc4fa5e488fe507d2129e8fa501
|
[] |
no_license
|
kieranroberts/R-datascience
|
2b70bdef780af48920a869f8bf87ed9fbef99758
|
acac610dfc9ba9a75129c762d85136f666245405
|
refs/heads/master
| 2016-09-13T08:10:48.007527
| 2016-05-18T07:03:45
| 2016-05-18T07:03:45
| 58,977,950
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 813
|
r
|
plot2.R
|
# Temporary change the locale
my_lc_time <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "en_GB.UTF-8")
# Get the data
source("getData.R")
getData()
# read and extract relevant data
data <- read.table("./data/household_power_consumption.txt", header=TRUE, sep = ";", stringsAsFactors=FALSE)
subsetData <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
# convert necessary object types for plotting
globalActivePower <- as.numeric(subsetData$Global_active_power)
fulldate<- strptime(paste(subsetData$Date, subsetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# plot to a PNG file
png(file = "plot2.png", width=480, height=480)
plot(fulldate, globalActivePower, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off()
# Return locale to original
Sys.setlocale("LC_TIME", my_lc_time)
|
d09645c8ad7bb969312c7d9fbfad567add596c47
|
a72c6d86ff34d7e0669cfd82c0629ff85ecbffc7
|
/classifier.R
|
fe233ec1350089f7a21b0febf2ccf35267eb41db
|
[] |
no_license
|
GCDigitalFellows/WebScraping
|
86c2a7dc644d655354d420b339bbd31f597db740
|
c69bff920a69b7cdecec6bc63110877bd647528f
|
refs/heads/master
| 2020-04-05T18:55:32.493448
| 2015-05-20T21:25:45
| 2015-05-20T21:25:45
| 38,452,845
| 2
| 0
| null | 2015-07-02T19:32:33
| 2015-07-02T19:32:32
| null |
UTF-8
|
R
| false
| false
| 956
|
r
|
classifier.R
|
library(caret)
library(mlbench)
library(magrittr)
data(Sonar)
set.seed(107)
in_train <- createDataPartition(y = Sonar$Class, p = 0.75, list = FALSE)
training <- Sonar[in_train, ]
testing <- Sonar[-in_train, ]
ctrl <- trainControl(method = "repeatedcv",
repeats = 3,
classProbs = TRUE,
summaryFunction = twoClassSummary
)
# See names(getModelInfo()) for potential models
# or http://topepo.github.io/caret/bytag.html
model <- train(Class ~ .,
data = training,
method = "pls",
preProc = c("center", "scale"),
trControl = ctrl,
metric = "ROC",
tuneLength = 15
)
model
plot(model)
predictions <- predict(model, newdata = testing)
predictions_prob <- predict(model, newdata = testing, type = "prob")
accuracy <- confusionMatrix(data = predictions, testing$Class)
|
c9f3776530e67b16fe39dd424f81fed4613cd261
|
9d3e3c3950c4101bc863a90e69606d7c7d03a4e9
|
/codling_moth/code/drivers/pest_window_code/since_day_1/takes_long/d_extract_needed_info.R
|
3801f997d0a1f1ce1f789c32fd02c3498e0dbfa0
|
[
"MIT"
] |
permissive
|
HNoorazar/Ag
|
ca6eb5a72ac7ea74e4fe982e70e148d5ad6c6fee
|
24fea71e9740de7eb01782fa102ad79491257b58
|
refs/heads/main
| 2023-09-03T18:14:12.241300
| 2023-08-23T00:03:40
| 2023-08-23T00:03:40
| 146,382,473
| 3
| 6
| null | 2019-09-23T16:45:37
| 2018-08-28T02:44:37
|
R
|
UTF-8
|
R
| false
| false
| 2,682
|
r
|
d_extract_needed_info.R
|
#!/share/apps/R-3.2.2_gcc/bin/Rscript
#library(chron)
library(data.table)
data_dir = "/data/hydro/users/Hossein/codling_moth_new/local/processed/section_46_Pest/"
output_dir = "/data/hydro/users/Hossein/codling_moth_new/local/processed/section_46_Pest/"
name_pref = "combined_CMPOP_4_pest_rcp"
models = c("45.rds", "85.rds")
for (model in models){
df <- data.frame(matrix(ncol = 8, nrow = 0))
colnames(df) <- c("latitude", "longitude",
"ClimateGroup", "CountyGroup", "CumDDinF", "dayofyear",
"PercLarvaGen1", "PercLarvaGen2",
"PercLarvaGen3", "PercLarvaGen4")
curr_data = readRDS(paste0(data_dir, name_pref, model))
# Gen 1's
L = curr_data[curr_data$PercLarvaGen1 > 0.25 ]
L = L[L$PercLarvaGen1 == min(L$PercLarvaGen1)]
df = rbind(df, L)
L = curr_data[curr_data$PercLarvaGen1 > 0.5 ]
L = L[L$PercLarvaGen1 == min(L$PercLarvaGen1)]
df = rbind(df, L)
L = curr_data[curr_data$PercLarvaGen1 > 0.75 ]
L = L[L$PercLarvaGen1 == min(L$PercLarvaGen1)]
df = rbind(df, L)
L = curr_data[curr_data$PercLarvaGen1 > 0.999 ]
L = L[L$PercLarvaGen1 == min(L$PercLarvaGen1)]
df = rbind(df, L)
# Gen 2's
L = curr_data[curr_data$PercLarvaGen2 > 0.25 ]
L = L[L$PercLarvaGen2 == min(L$PercLarvaGen2)]
df = rbind(df, L)
L = curr_data[curr_data$PercLarvaGen2 > 0.5 ]
L = L[L$PercLarvaGen2 == min(L$PercLarvaGen2)]
df = rbind(df, L)
L = curr_data[curr_data$PercLarvaGen2 > 0.75 ]
L = L[L$PercLarvaGen2 == min(L$PercLarvaGen2)]
df = rbind(df, L)
L = curr_data[curr_data$PercLarvaGen2 > 0.999 ]
L = L[L$PercLarvaGen2 == min(L$PercLarvaGen2)]
df = rbind(df, L)
# Gen 3's
L = curr_data[curr_data$PercLarvaGen3 > 0.25 ]
L = L[L$PercLarvaGen3 == min(L$PercLarvaGen3)]
df = rbind(df, L)
L = curr_data[curr_data$PercLarvaGen3 > 0.5 ]
L = L[L$PercLarvaGen3 == min(L$PercLarvaGen3)]
df = rbind(df, L)
L = curr_data[curr_data$PercLarvaGen3 > 0.75 ]
L = L[L$PercLarvaGen3 == min(L$PercLarvaGen3)]
df = rbind(df, L)
L = curr_data[curr_data$PercLarvaGen3 > 0.999 ]
L = L[L$PercLarvaGen3 == min(L$PercLarvaGen3)]
df = rbind(df, L)
# Gen 4's
L = curr_data[curr_data$PercLarvaGen4 > 0.25 ]
L = L[L$PercLarvaGen4 == min(L$PercLarvaGen4)]
df = rbind(df, L)
L = curr_data[curr_data$PercLarvaGen4 > 0.5 ]
L = L[L$PercLarvaGen4 == min(L$PercLarvaGen4)]
df = rbind(df, L)
L = curr_data[curr_data$PercLarvaGen4 > 0.75 ]
L = L[L$PercLarvaGen4 == min(L$PercLarvaGen4)]
df = rbind(df, L)
L = curr_data[curr_data$PercLarvaGen4 > 0.999 ]
L = L[L$PercLarvaGen4 == min(L$PercLarvaGen4)]
df = rbind(df, L)
output_name = paste0("pest_quantile_rcp", model)
saveRDS(df, paste0(output_dir, output_name))
}
|
955dca00ea98c4b3c9c9b66f1b83da7ba4e9864f
|
d590f2ad3d1352449b5085ab2ee2a9eca853486f
|
/I_Consent_Gazepoint_Project/scripts/TMSP.R
|
58aa47c2af543224e9e81f02f4e976ee07daa19d
|
[] |
no_license
|
jhthompson/cpsc-4120-gazepoint-project
|
1afed31589001de6985a4984d36e188c4c6f8ec1
|
300cac949f2c6b262dd23e92d7f6bc58b89d2207
|
refs/heads/master
| 2020-08-18T22:54:23.532647
| 2019-12-01T21:45:18
| 2019-12-01T21:45:18
| 215,843,412
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,637
|
r
|
TMSP.R
|
################### IMPORTANT NOTICE #######################
# Below you'll find 4 functions:
# TransMatrix
# calculates transition matrix between AOIs - returns transtion matrix M,
# TransPlot
# plots tranistion matrix - returns TM plot with probabilities in each cell;
# TransEntropy
# calculates transition matrix entropy for each participant - returns vactor of Shannon's H.
# LevenMatrix
# calulates standardized Levenstein distans between all stimuli-driven scanpaths - returns matrix containing similarities measures between all participants' scanpaths
#
#### 1. Use TransMatrix befor using TransPlot
#### 1. Data for functions 1 to 3 have to be in the form of 'data.frame'. Each raw represents seperate fixation.
###################################################################################
### Required packages
library(gplots)
library(RColorBrewer)
library(colorspace)
library(entropy)
library(gmodels)
library(car)
library(lattice)
#library(MiscPsycho)
mixedletters <- function(n) {
if(n <= 26) {
v <- paste(letters[1:n])
} else {
n = n - 26
v <- c(paste(letters[1:26]),paste(LETTERS[1:n]))
}
return(v)
}
# setting up m x n AOI matrix: there are a total of m x n AOIs, but
# transition matrix must be (m x n) x (m x n)
zeroTM <- function(xtiles,ytiles) {
# list of letters 'a', 'b', ..., <whaterver the last one is>
# lx <- paste(letters[1:xtiles])
# ly <- paste(letters[1:ytiles])
lx <- mixedletters(xtiles)
ly <- mixedletters(ytiles)
# print(lx)
# print(ly)
# set up m x n list of AOI labels: 'aa', 'ab', 'ac', ...
AOIs <- list()
for (i in 1:length(lx)) {
for (j in 1:length(ly)) {
AOIs[length(AOIs) + 1L] <- paste(lx[i],ly[j],sep="")
}
}
# for (i in 1:length(ly)) {
# for (j in 1:length(lx)) {
# AOIs[length(AOIs) + 1L] <- paste(ly[i],lx[j],sep="")
# }
# }
rows = length(lx)*length(ly)
cols = length(lx)*length(ly)
print(sprintf("AOIs:\n"))
# print(AOIs)
print(sprintf("zeroTM: nrow x ncol: %d x %d",rows,cols))
M <- matrix(data=0,
nrow=rows,
ncol=cols,
dimnames=list(AOIs, AOIs)) # empty matrix
# print(M)
return(M)
}
############################################################################
################# TRANSITION MATRIX CALCULATION ############################
## Function arguments:
# data - R data object
# AOInamesVar - variable which contain AOI names (put in brackets)
# SubjectVar - variable which contain Subjects' names (put in brackets)
# FixOrderVar - variable indicating time order of AOI hits within each participant (put in brackets)
# print - logical. Should transition matrices be printed (default is TRUE)
## Function returns:
# M - normalized transition matrix
# Munst - unnormalized transition matrix
# Usage example:
# TransMatrix(data=ddf, AOInamesVar="AreaofInterest", SubjectsVar="Subject", fixOrderVar= , print=TRUE)
############################################################################
TransMatrix <- function(M, data, AOInamesVar, SubjectsVar, FixOrderVar, print=FALSE){
data <- data[order(data[ ,SubjectsVar], data[ ,FixOrderVar]), ]
uniqS <- sort(unique(data[ ,SubjectsVar])) # unique values of Subject variable
lui <- length(unique(data[ ,SubjectsVar])) # how many subjects
uniqAOI <- sort(unique(data[ ,AOInamesVar])) # unique values of AOI variable
# siz <- length(unique(data[ ,AOInamesVar])) # how many AOIs
siz <- nrow(M)
if(nrow(M) != ncol(M)) {
print('Warnings: ')
print(sprintf("%s%dx%d", 'Matrix not square: ',nrow(M),ncol(M)))
}
# empty matrix
# M <- matrix(data=0, nrow=siz, ncol=siz, dimnames=list(uniqAOI, uniqAOI))
M[,] <- 0
for (i in 1:lui){
kukudf <- data[which(data[ ,SubjectsVar] == uniqS[i]), ] # choose subject i
luj <- dim(kukudf)[1] # how many AOIs for subject i
if (luj > 1){
j <- 0
repeat {
j <- j+1
from <- kukudf[j, AOInamesVar]
to <- kukudf[j+1, AOInamesVar]
print(sprintf("nrow: %d, ncol: %d",nrow(M),ncol(M)))
print(sprintf("from: %d, to: %d",from,to))
print(sprintf("from: %s, to: %s",as.character(from),as.character(to)))
M[as.character(from), as.character(to)] <-
M[as.character(from), as.character(to)] + 1
if (j > luj-2) break()
}
} else {
print('Warnings: ')
print(sprintf("%s%s", 'No AOI transitions for subject: ', uniqS[i]))
}
}
Munst <<- round(M, 2)
#### Normalize to source - probability of going from i's AOI to any j's AOI
# for (i in 1:siz){
# Srow <- sum(M[i,])
# for (j in 1:siz){
# M[i,j] <- M[i,j]/Srow
# if(is.nan(M[i,j])) { M[i,j] = 0.0 }
# }
# }
# ATD: if we have row summing up to zero (no transitions), then we
# should set each cell to the max entropy (1/siz) instead of
# min entropy (0) since if there were no observed transitions
# we can't very well predict where they're likely to go, hence
# we should have max entropy (max suprise)
for (i in 1:siz) {
Srow <- sum(M[i,])
if(abs(Srow) < 0.00001) {
M[i,] <- 1.0/siz
} else {
for (j in 1:siz) {
M[i,j] <- M[i,j]/Srow
if(is.nan(M[i,j])) { M[i,j] = 0.0 }
}
}
}
M <- round(M, 2)
M <<- round(M, 2)
####### Print results
if (print == TRUE) {
print('***************************')
print('Raw data Transition Matrix' )
print('')
print(Munst)
print('***************************')
print('Normalized Transition Matrix' )
print('')
print(M)
print('Results written to M and Munst matrices. Use print(M) and print(Munst)')
}
}
############################################################################
##################### TRANSITION MATRIX PLOT2 ##############################
## Funtion arguments:
# transMatrix - transition matrix to be plotted
# plotName - name of plot for saving (put in brackets)
# plotColors - color scheme for the plot
# margin - left, bottom margin
# annColor - color of annotations in cells (default 'black')
# annAlpha - transparency of cell colors (default '0.85')
## Function returns:
# Saved pdf plot in the specified directory and name
## Usage example:
# TransPlot2(transMatrix=M, plotName="Loveactually15_shot.pdf",
# plotColors=brewer.pal(9,"Oranges"), annColor='black')
############################################################################
TransPlot2 <- function(transMatrix,
plotName,
plotColors=brewer.pal(9,"Oranges"),
margin=c(4,4),
annColor='black',
annCex=0.5,
annAlpha=0.85,
cexR=0.7,
cexC=0.7,
cexAxis=1.5) {
pdf.options(family = "NimbusSan",useDingbats=FALSE)
pdf(plotName)
lrheatmap(transMatrix,
ColorRamp=plotColors,
cellColor=annColor,
margin=margin,
cex.cels=annCex,
cex.rows=cexR,
cex.cols=cexC,
xlab='Destination AOI (to)',
ylab='Source AOI (from)',
cex.lab=cexAxis
)
dev.off()
embedFonts(plotName, "pdfwrite", outfile = plotName,
fontpaths =
c("/sw/share/texmf-dist/fonts/type1/urw/helvetic",
"/usr/share/texmf/fonts/type1/urw/helvetic",
"/usr/local/teTeX/share/texmf-dist/fonts/type1/urw/helvetic",
"/usr/share/texmf-texlive/fonts/type1/urw/helvetic",
"/usr/local/texlive/texmf-local/fonts/type1/urw/helvetic"))
}
############################################################################
##################### TRANSITION MATRIX PLOT ###############################
## Funtion arguments:
# transMatrix - transition matrix to be plotted
# plotName - name of plot for saving (put in brackets)
# plotColots - color scheme for the plot
# annColor - color of annotations in cells (default 'black')
## Function returns:
# Saved pdf plot in the specified directory and name
## Usage example:
# TransPlot(transMatrix=M, plotName="Loveactually15_shot.pdf",
# plotColors=brewer.pal(9,"Oranges"), annColor='black')
############################################################################
TransPlot <- function (transMatrix,
plotName,
plotColors,
annColor='black',
annCex=2.5,
margin=c(15,15),
cexR=1.9,
cexC=1.9) {
pdf.options(family = "NimbusSan",useDingbats=FALSE)
pdf(plotName)
# ATD: use heat function in heat.R, which is a hacked version of heatmap.2
# which accepts cex.lab to scale axis labels
# heatmap.2(x = transMatrix,
heat(x = transMatrix,
Rowv = FALSE,
Colv = FALSE,
dendrogram = 'none',
scale = 'none',
col = plotColors,
cellnote = M,
notecol = annColor,
notecex = annCex,
trace = 'none',
margins = margin,
cexRow = cexR,
cexCol = cexC,
key = FALSE,
keysize = 1.0,
density.info = "none",
main = '',
ylab = 'Source AOI (from)',
xlab = 'Destination AOI (to)',
cex.lab = 1.7,
lmat = rbind(c(0,3),c(2,1),c(1,4)),
lhei = c(1,8,1), # controls heights of rows
lwid = c(1,8) # seems to control placement of main title
)
dev.off()
embedFonts(plotName, "pdfwrite", outfile = plotName,
fontpaths =
c("/sw/share/texmf-dist/fonts/type1/urw/helvetic",
"/usr/share/texmf/fonts/type1/urw/helvetic",
"/usr/local/teTeX/share/texmf-dist/fonts/type1/urw/helvetic",
"/usr/share/texmf-texlive/fonts/type1/urw/helvetic",
"/usr/local/texlive/texmf-local/fonts/type1/urw/helvetic"))
}
#############################################################################
#################### TRANSITION MATRIX ENTROPY ##############################
## Function arguments:
# data - data file
# AOInamesVar - variable which contain AOI names (put in brackets)
# SubjectVar - variable which contain Subjects' names (put in brackets)
# FixOrderVar - variable indicating time order of AOI hits within each participant (put in brackets)
## Returns:
# TMentrop - string with entropy value of transition matrix for each person
# in the data file
## Usage example;
# TransEntropy(data=df, AOInamesVar="AOI", SubjectsVar="Subject", FixOrderVar="")
############################################################################
TransEntropy <- function(M, data, SubjectsVar, AOInamesVar, FixOrderVar) {
data <- data[order(data[ ,SubjectsVar], data[ ,FixOrderVar]), ]
uniqS <- sort(unique(data[ ,SubjectsVar]))
lu <- length(uniqS)
uniqAOI <- sort(unique(data[ ,AOInamesVar])) # unique values of AOI variable
# siz <- length(unique(data[ ,AOInamesVar])) # how many AOIs
siz <- nrow(M)
if(nrow(M) != ncol(M)) {
print('Warnings: ')
print(sprintf("%s%dx%d", 'Matrix not square: ',nrow(M),ncol(M)))
}
TMentrop <- numeric()
for (i in 1:lu) {
en <- numeric()
# empty matrix
# M <- matrix(data=0, nrow=siz, ncol=siz, dimnames=list(uniqAOI, uniqAOI))
M[,] <- 0
kukudf <- data[which(data[ ,SubjectsVar] == uniqS[i]), ] # choose subject i
luj <- dim(kukudf)[1] # how many AOIs for subject i
if (luj > 1) {
j <- 0
repeat {
j <- j+1
from <- kukudf[j, AOInamesVar]
to <- kukudf[j+1, AOInamesVar]
M[as.character(from), as.character(to)] <-
M[as.character(from), as.character(to)] + 1
if (j > luj-2) break()
}
} else {
print('Warnings: ')
print(sprintf("%s%s", 'No AOI transitions for subject: ', uniqS[i]))
}
Munst <<- round(M, 2)
#### Normalize to source - probability of going from i's AOI to any j's AOI
# for (i in 1:siz) {
# Srow <- sum(M[i,])
# for (j in 1:siz) {
# M[i,j] <- M[i,j]/Srow
# if(is.nan(M[i,j])) { M[i,j] = 0.0 }
# }
# }
# ATD: if we have row summing up to zero (no transitions), then we
# should set each cell to the max entropy (1/siz) instead of
# min entropy (0) since if there were no observed transitions
# we can't very well predict where they're likely to go, hence
# we should have max entropy (max suprise)
for (i in 1:siz) {
Srow <- sum(M[i,])
if(abs(Srow) < 0.00001) {
M[i,] <- 1.0/siz
} else {
for (j in 1:siz) {
M[i,j] <- M[i,j]/Srow
if(is.nan(M[i,j])) { M[i,j] = 0.0 }
}
}
}
M <- round(M, 2)
M <<- round(M, 2)
# ATD: max entropy will be with each row's cells = 1/col, i.e.,
# each AOI equally likely
# max_entrop <- numeric()
# max entropy is log_2(n) where n is the number of possible outcomes,
# hence number of AOIs
# max_entrop <- log2(siz)
# default (empirical) entropy estimation is done via ML, maximum-likelihood
# method; if there are many zero counts and the sample size is small, this
# is very inefficient and also strongly biased
# en <- entropy(M)
# the bias-corrected maximum likelihood method, applying
# the Miller-Madow correction to the empirical entropy
# ATD: normalize entropy
# en <- entropy(M,method="MM")
# en <- entropy(M,method="MM")/max_entrop
# compute normalized entropy (\eta(X)) in bits
# en <- entropy(M,method="MM",unit="log2")/max_entrop
# en <- entropy(M,method="ML",unit="log2")/max_entrop
# don't use entropy function since it assumes probability distribution
# M does not represent probability distribution, rather each of its
# rows does
# H_t does normalization internally
en <- H_t(M)
# could get NA here...
if(is.na(en)) {
en = 0.0
}
TMentrop <- c(TMentrop, en)
}
# TMentrop <<- TMentrop
TMentrop <<- data.frame(uniqS, TMentrop)
names(TMentrop) <<- c("Subject", "Entropy")
}
############################################################################
################# TRANSITION ENTROPY CALCULATION ###########################
############################################################################
H_t <- function(M) {
# ATD: max entropy will be with each row's cells = 1/col, i.e.,
# each AOI equally likely
max_entrop <- numeric()
# max entropy is log_2(n) where n is the number of possible outcomes,
# hence number of AOIs
max_entrop <- log2(ncol(M))
# get stationary distribution
p <- H_s(M)$sdist
# see: http://www-isl.stanford.edu/~cover/papers/paper101.pdf
# we really should do H(M) = -\sum_i \pi_i \sum_j M_{ij} log_2(M_{ij})
H <- 0
for (i in 1:nrow(M)) {
c <- 0
# for (j in 1:ncol(M)) {
# if(abs(M[i,j]) > 0.0001) {
# c = c + (M[i,j] * log2(M[i,j]))
# }
# }
c = entropy(M[i,],method="ML",unit="log2")
if(is.na(c)) {
c = 0.0
}
H = H + (p[i] * c)
}
H <- H / max_entrop
return(H)
}
############################################################################
################# STATIONARY ENTROPY CALCULATION ###########################
############################################################################
H_s <- function(M) {
# matrix whose columns contain the eigenvectors
# e <- eigen(M)
# or is it transpose of M?
# see: http://faculty.cas.usf.edu/jkwilde/mathcamp/Linear_Algebra_II.pdf
e <- eigen(t(M))
# stationary distribution \pi is unchanged by the operation of transition
# matrix \mathbf{P} on it, and so is defined by
# \( \pi \mathbf{P} = \pi \)
# by comparing this definition with that of an eigenvector, the two concepts
# are related and that
# \( \pi = \frac{e}{\sum_{i}{e_i}} \)
# is a normalized ($\sum_{i}{\pi_i}$) multiple of a left $\mathbf{e}$ of the
# transition matrix $\mathbf{P}$
#
lam <- e$values
# print(lam)
vec <- e$vectors
# print(vec)
v <- vec[,1]
# v <- Re(v)
v <- Mod(v)
# print(v)
# this gives us \pi, the stationary distribution from the eigenvector
p <- v * 1.0/sum(v)
# print(sprintf("pi, the stationary distribution from the eigenvecotr:\n"))
# print(p)
# $\sum_{i}{\pi_i}$ should equal 1.0
# print(sum(p))
# see: http://www-isl.stanford.edu/~cover/papers/paper101.pdf
# we really should do H(M) = -\sum_i \pi_i \sum_j M_{ij} log_2(M_{ij})
H <- 0
# for (i in 1:nrow(M)) {
# c <- 0
# for (j in 1:ncol(M)) {
# if(abs(M[i,j]) > 0.0001) {
# c = c + (M[i,j] * log2(M[i,j]))
# }
# }
## print(c)
# H = H + (p[i] * c)
# }
# as per the paper: H_s(M) = -\sum_i \pi_i log_2(pi_{i})
for (i in 1:length(p)) {
if(abs(p[i]) > 0.0001) {
H = H + p[i] * log2(p[i])
}
}
H <- -H
# this should now be in bits per transition
# print(sprintf("H = %f\n",H))
max_entrop <- numeric()
# max entropy is log_2(n) where n is the number of possible outcomes,
# hence number of AOIs squared (the number of matrix cells basically)
# MM doesn't give the same as log2(siz*siz), but ML does
# n <- nrow(M)*ncol(M)
n <- length(p)
# print(sprintf("nrow = %d, ncol = %d, n = %d\n",nrow(M),ncol(M),n))
max_entrop <- log2(n)
# print(sprintf("max entropy = %f\n",max_entrop))
# see also: http://aix1.uottawa.ca/~jkhoury/markov.htm
eta <- H / max_entrop
if(is.complex(eta)) {
eta = Re(eta)
}
# eta should now be normalized stationary entropy since log2(n) is
# max entropy if n is the number of possible outcomes
# print(sprintf("eta = %f\n",eta))
retList <- list("sdist" = p, "eta" = eta)
return(retList)
}
#############################################################################
#################### STATIONARY TRANSITION MATRIX ENTROPY ###################
## Function arguments:
# data - data file
# AOInamesVar - variable which contain AOI names (put in brackets)
# SubjectVar - variable which contain Subjects' names (put in brackets)
# FixOrderVar - variable indicating time order of AOI hits within each participant (put in brackets)
## Returns:
# TMentrop - string with entropy value of transition matrix for each person
# in the data file
## Usage example;
# TransEntropy(data=df, AOInamesVar="AOI", SubjectsVar="Subject", FixOrderVar="")
############################################################################
StationaryEntropy <- function(M, data, SubjectsVar, AOInamesVar, FixOrderVar) {
data <- data[order(data[ ,SubjectsVar], data[ ,FixOrderVar]), ]
uniqS <- sort(unique(data[ ,SubjectsVar]))
lu <- length(uniqS)
uniqAOI <- sort(unique(data[ ,AOInamesVar])) # unique values of AOI variable
# siz <- length(unique(data[ ,AOInamesVar])) # how many AOIs
siz <- nrow(M)
if(nrow(M) != ncol(M)) {
print('Warnings: ')
print(sprintf("%s%dx%d", 'Matrix not square: ',nrow(M),ncol(M)))
}
STentrop <- numeric()
for (i in 1:lu) {
en <- numeric()
# empty matrix
# M <- matrix(data=0, nrow=siz, ncol=siz, dimnames=list(uniqAOI, uniqAOI))
M[,] <- 0
kukudf <- data[which(data[ ,SubjectsVar] == uniqS[i]), ] # choose subject i
luj <- dim(kukudf)[1] # how many AOIs for subject i
if (luj > 1) {
j <- 0
repeat {
j <- j+1
from <- kukudf[j, AOInamesVar]
to <- kukudf[j+1, AOInamesVar]
M[as.character(from), as.character(to)] <-
M[as.character(from), as.character(to)] + 1
if (j > luj-2) break()
}
} else {
print('Warnings: ')
print(sprintf("%s%s", 'No AOI transitions for subject: ', uniqS[i]))
}
Munst <<- round(M, 2)
#### Normalize to source - probability of going from i's AOI to any j's AOI
# for (i in 1:siz) {
# Srow <- sum(M[i,])
# for (j in 1:siz) {
# M[i,j] <- M[i,j]/Srow
# if(is.nan(M[i,j])) { M[i,j] = 0.0 }
# }
# }
# ATD: if we have row summing up to zero (no transitions), then we
# should set each cell to the max entropy (1/siz) instead of
# min entropy (0) since if there were no observed transitions
# we can't very well predict where they're likely to go, hence
# we should have max entropy (max suprise)
for (i in 1:siz) {
Srow <- sum(M[i,])
if(abs(Srow) < 0.00001) {
M[i,] <- 1.0/siz
} else {
for (j in 1:siz) {
M[i,j] <- M[i,j]/Srow
if(is.nan(M[i,j])) { M[i,j] = 0.0 }
}
}
}
M <- round(M, 2)
M <<- round(M, 2)
sen <- H_s(M)
eta <- sen$eta
# could get NA here...
if(is.na(eta)) {
eta = 0.0
}
STentrop <- c(STentrop, eta)
}
# STentrop <<- STentrop
STentrop <<- data.frame(uniqS, STentrop)
names(STentrop) <<- c("Subject", "SEntropy")
}
#############################################################################
################## LEVENSTEIN INDEX OF SCANPATH SIMILARITIES ################
## Function arguments:
# data - data file
# SubjectVar - variable which contain Subjects' names (put in brackets)
# AOInamesVar - variable which contain AOI names (put in brackets)
# FixOrderVar - variable indicating time order of AOI hits within each participant (put in brackets)
## Returns:
# LM - matrix containing similarities measure (Levenstein's H) between participants' scanpaths
#
## Usage example;
# LevenMatrix(data=dd, SubjectsVar="Subject", AOInamesVar="AOI", FixOrderVar="")
############################################################################
LevenMatrix <- function(data, SubjectsVar, AOInamesVar, FixOrderVar){
data <- data[order(data[ ,SubjectsVar], data[ ,FixOrderVar]), ]
uniqS <- sort(unique(data[ ,SubjectsVar])) # unique values of Subject variable
lui <- length(unique(data[ ,SubjectsVar])) # how many subjects
uniqAOI <- sort(unique(data[ ,AOInamesVar])) # unique values of AOI variable
M <- matrix(data=NA, nrow=lui, ncol=lui, dimnames=list(uniqS, uniqS)) # empty matrix
for (i in 1:lui){
s <- as.character(uniqS)[i]
str1 <- toString(as.character(data[which(data[,SubjectsVar] == s), AOInamesVar]))
j <- 0
for (j in 1:lui) {
ss <- as.character(uniqS)[j]
str2 <- toString(as.character(data[which(data[,SubjectsVar] == ss), AOInamesVar]))
M[i,j] <- stringMatch(str1, str2)
}
}
LM <<- M
}
|
50d3a687167ba4d783732922dde6f3f2c779c351
|
de64950c44d13417e8ca44c1d25791d05544e867
|
/Explore/density_test_train_plot.r
|
1b716f023929d48bab04f5d216363285c02cb1da
|
[] |
no_license
|
gumpu/kagglehiggs
|
c6bb393a1d1c48ed865b5f26ac881d7456e4ddab
|
3b408c1c23ff6cd2b73698166ab45ab80b02fbda
|
refs/heads/master
| 2021-01-21T01:59:50.427112
| 2014-06-22T18:52:13
| 2014-06-22T18:52:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 672
|
r
|
density_test_train_plot.r
|
#=============================================================================
#
# This plots the density for the various
# input variables grouped according to
#
# test / trainingset
#
#=============================================================================
require(ggplot2)
load(file="../Processed/norm_dataset.rdata")
factors <- labels(dataset)[2][[1]]
factors <- factors[grep("PRI|DER", factors)]
pdf("density_plot.pdf")
for (f in factors) {
p1 <- ggplot(dataset[,c(f,"test","Label")], aes_string(x=f)) +
geom_density(aes(color=test))
print(p1)
}
dev.off()
#=============================================================================
|
59f7d96d9fb5fd994883f2f35f70616c01eec875
|
7c3f2e88c0273324f42c5175795931c668631520
|
/R/utils_io_fst.R
|
53c1cbe337150101bb2cc7d8a0abc5a0a392407e
|
[] |
no_license
|
dipterix/rave
|
ddf843b81d784a825acf8fe407c6169c52af5e3e
|
fe9e298fc9f740a70f96359d87987f8807fbd6f3
|
refs/heads/master
| 2020-06-26T19:52:26.927042
| 2019-07-30T22:47:36
| 2019-07-30T22:47:36
| 110,286,235
| 0
| 0
| null | 2018-06-20T03:18:47
| 2017-11-10T19:45:40
|
R
|
UTF-8
|
R
| false
| false
| 4,925
|
r
|
utils_io_fst.R
|
# fst IO
#' R6 class to load fst files
LazyFST <- R6::R6Class(
classname = 'LazyFST',
private = list(
file_path = NULL,
transpose = F,
meta = NULL,
dims = NULL,
data = NULL,
last_visited = NULL,
delayed = 3
),
public = list(
open = function(...){},
close = function(..., .remove_file = F){
if(.remove_file){
unlink(private$file_path)
}
},
save = function(...){
warning('NOT Implemented yet')
},
initialize = function(file_path, transpose = F, dims = NULL, ...){
private$file_path = file_path
private$transpose = transpose
# check if dimension matches
private$meta = fst::metadata_fst(file_path)
if(length(dims) == 2){
if(private$meta$nrOfRows * length(private$meta$columnNames) == prod(dims)){
private$dims = dims
}else{
stop('cached data has different dimensions than the given value')
}
}else{
if(is.null(dims)){
private$dims = c(private$meta$nrOfRows, length(private$meta$columnNames))
if(transpose){
private$dims = private$dims[c(2,1)]
}
}else{
stop('fast cache only supports 2 dimension data')
}
}
},
get_dims = function(...){
private$dims
},
subset = function(i = NULL, j = NULL, ..., drop = T){
if(!length(j)){
j = seq_len(private$dims[2])
}
if(!length(i)){
i = seq_len(private$dims[1])
}
if(is.logical(i)){
i = which(i)
}
if(is.logical(j)){
j = which(j)
}
real_i = i <= private$dims[1]
real_j = j <= private$dims[2]
re = matrix(NA, nrow = length(i), ncol = length(j))
private$last_visited = Sys.time()
# if(is.null(private$data)){
# # load all data
# private$data = as.matrix(fst::read_fst(private$file_path))
# }
# if(private$transpose){
# re[real_i, real_j] = t(private$data[j[real_j], i[real_i]])
# }else{
# re[real_i, real_j] = private$data[i[real_i], j[real_j]]
# }
if(private$transpose){
col_names = private$meta$columnNames[i[real_i]]
dat = as.matrix(fst::read_fst(private$file_path, columns = col_names))
dat = dat[j[real_j], ]
re[real_i, real_j] = t(dat)
}else{
col_names = private$meta$columnNames[j[real_j]]
dat = as.matrix(fst::read_fst(private$file_path, columns = col_names))
dat = dat[i[real_i], ]
re[real_i, real_j] = dat
}
rm(dat)
gc()
dimnames(re) = NULL
# wait 10 secs to see if data idle, if true, remove private$data
# later::later(function(){
# d = as.numeric(difftime(Sys.time(), private$last_visited, units = 'secs') )
# if(d >= private$delayed){
# private$data = NULL
# gc()
# }
# }, delay = private$delayed)
if(drop){
return(drop(re))
}else{
return(re)
}
}
)
)
#' @export
`[.LazyFST` <- function(obj, i, j, ..., drop = F){
if(missing(i)){
i = NULL
}
if(missing(j)){
j = NULL
}
obj$subset(i, j, ..., drop = drop)
}
#' @export
`+.LazyFST` <- function(a, b){
b + a$subset()
}
#' @export
`-.LazyFST` <- function(a, b){
-(b - a$subset())
}
#' @export
`*.LazyFST` <- function(a, b){
b * (a$subset())
}
#' @export
`/.LazyFST` <- function(a, b){
if(is(b, 'LazyFST')){
b = b$subset()
}
a$subset() / b
}
#' @export
dim.LazyFST <- function(x){
dim_info = x$get_dims()
if(length(dim_info) == 1){
dim_info = NULL
}
dim_info
}
#' @export
length.LazyFST <- function(x){
dim_info = x$get_dims()
prod(dim_info)
}
#' @export
as.array.LazyFST <- function(x, ...){
as.array(x$subset(), ...)
}
#' @export
Mod.LazyFST <- function(z){
base::Mod(z$subset())
}
#' @export
Arg.LazyFST <- function(z){
base::Arg(z$subset())
}
#' @export
exp.LazyFST <- function(x){
base::exp(x$subset())
}
#' Function try to load FST file cache, if not found, read HDF5 file
#' @param fst_path fst cache path
#' @param h5_path alternate hdf5 path
#' @param h5_name hdf5 data name
#' @param fst_need_transpose does fst data need transpose?
#' @param fst_need_drop drop dimensions
#' @param ram read to ram?
#'
load_fst_or_h5 <- function(
fst_path, h5_path, h5_name, fst_need_transpose = F, fst_need_drop = F, ram = F
){
# check if fst_path exists
if(file.exists(fst_path)){
if(ram){
re = as.matrix(fst::read_fst(fst_path))
dimnames(re) = NULL
if(fst_need_transpose){
re = t(re)
}
if(fst_need_drop){
re = drop(re)
}
return(re)
}else{
re = LazyFST$new(file_path = fst_path, transpose = fst_need_transpose)
return(re)
}
}else{
re = load_h5(file = h5_path, name = h5_name, read_only = T, ram = ram)
return(re)
}
}
|
26fae9feb961f4bf006909396e0eec6c81f71981
|
28ba974b2647aaebf2e4cfe32103f63456414bf4
|
/nonmem/190223_nonmem_process_sim.R
|
3f784e457970cdc11b2702e4479678b863e04117
|
[] |
no_license
|
jhhughes256/nivo_sim
|
87d1089638319df14e4ef9d16fa2271e66b20921
|
255e86a054469f05848fd64060212bde085f7680
|
refs/heads/master
| 2022-01-08T12:31:48.062569
| 2019-07-03T21:32:31
| 2019-07-03T21:32:31
| 168,279,237
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,805
|
r
|
190223_nonmem_process_sim.R
|
# Process NONMEM simulation .fit file from 190220_nonmem_pop.R dataset
# -----------------------------------------------------------------------------
# Aim is to replicate some of the plots from the paper
# Designed to be used with three separate simulation files
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Clear workspace
rm(list=ls(all=TRUE))
graphics.off()
# Set working directory
# if not working with RStudio project place working directory here
# setwd("C:/.../nivo_sim/")
# Load package libraries
library(plyr) # New plyr - required for mrgsolve
library(ggplot2) # Graphical package
# Source processSIMdata function
source("nonmem/processSIMdata.R")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Simulation data processing
# Set up environment for function
proj.dir <- getwd()
master.dir <- paste(getwd(), "output", sep = "/")
run.name <- paste0("nm_", c("20mgq2w", "1280mgq2w", "placebo"), "_haz")
# Process data (only needs to be run once as it saves a .csv)
# llply(run.name, function(x) {
# processSIMdata(paste0(x, ".ctl"))
# })
setwd(proj.dir)
# Read in simdata
simdata.list <- llply(run.name, function(x) {
read.csv(paste0("output/", x, ".nm7/", x, ".fit.csv"),
stringsAsFactors = F, na.strings = ".")
})
# Correct DOSEGRP column
simdata.list[[1]]$DOSEGRP <- 2
simdata.list[[2]]$DOSEGRP <- 3
# Bind together
simdata <- do.call(rbind, simdata.list)
# ID represents Dose group
# SIM represents patient ID
# ID == 1 - placebo group
# ID == 2 - 20mg q2w
# ID == 3 - 1280mg q2w
match.data <- ddply(simdata[simdata$DOSEGRP != 1,], .(DOSEGRP), function(df) {
cbind(df,
data.frame(
HAZPLAC = simdata$HAZRATE[simdata$DOSEGRP == 1]
)
)
})
match.data$HAZRAT <- with(match.data, HAZRATE/HAZPLAC)
hazdata <- ddply(match.data, .(DOSEGRP, TIME), function(df) {
data.frame(
MED = mean(df$HAZRAT, na.rm = T),
CI90LO = quantile(df$HAZRAT, prob = 0.05, na.rm = T),
CI90HI = quantile(df$HAZRAT, prob = 0.95, na.rm = T)
)
})
closest <- which(abs(hazdata$MED - 0.4) == min(abs(hazdata$MED - 0.4), na.rm = T))
hazdata[hazdata$TIME == hazdata$TIME[closest],]
tumdata <- ddply(simdata, .(DOSEGRP, TIME), function(df) {
data.frame(
MED = median(df$TUM_SLD, na.rm = T),
CI90LO = quantile(df$TUM_SLD, prob = 0.05, na.rm = T),
CI90HI = quantile(df$TUM_SLD, prob = 0.95, na.rm = T)
)
})
condata <- ddply(simdata, .(DOSEGRP, TIME), function(df) {
data.frame(
MED = median(df$CONC, na.rm = T),
CI90LO = quantile(df$CONC, prob = 0.05, na.rm = T),
CI90HI = quantile(df$CONC, prob = 0.95, na.rm = T)
)
})
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
p <- NULL
p <- ggplot(data = hazdata)
p <- p + geom_line(aes(x = TIME, y = MED), size = 1)
p <- p + geom_line(aes(x = TIME, y = CI90LO),
size = 1, linetype = "dashed")
p <- p + geom_line(aes(x = TIME, y = CI90HI),
size = 1, linetype = "dashed")
p <- p + facet_wrap(~DOSEGRP)
p
# See if tumour growth goes down
p <- NULL
p <- ggplot(data = tumdata)
p <- p + geom_line(aes(x = TIME, y = MED), size = 1)
p <- p + geom_line(aes(x = TIME, y = CI90LO),
size = 1, linetype = "dashed")
p <- p + geom_line(aes(x = TIME, y = CI90HI),
size = 1, linetype = "dashed")
p <- p + facet_wrap(~DOSEGRP)
p
p <- NULL
p <- ggplot(data = condata[condata$DOSEGRP == 3,])
p <- p + geom_line(aes(x = TIME, y = MED), size = 1)
p <- p + geom_line(aes(x = TIME, y = CI90LO),
size = 1, linetype = "dashed")
p <- p + geom_line(aes(x = TIME, y = CI90HI),
size = 1, linetype = "dashed")
p
|
490a612399eef91c4666059025e715cd87b17b5f
|
06f362f76b1542bbdea12c34c0f239c9a624c877
|
/man/addISCO.Rd
|
72c425132d5d7c4fd6afcff0a72f57e457714da6
|
[] |
no_license
|
mi2-warsaw/PISAoccupations
|
31412147943082c058d998618ac6c78c06c9caf7
|
0b817f09c5599b59390e58edab602453ac9b0fe4
|
refs/heads/master
| 2020-05-22T06:51:14.081893
| 2017-04-18T18:49:12
| 2017-04-18T18:49:12
| 63,240,717
| 2
| 1
| null | 2016-12-09T00:00:09
| 2016-07-13T11:32:33
|
R
|
UTF-8
|
R
| false
| true
| 481
|
rd
|
addISCO.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importexport.R
\name{addISCO}
\alias{addISCO}
\title{Add columns with occupations and change format to long.}
\usage{
addISCO(dataSource, NAcodes)
}
\arguments{
\item{dataSource}{data frame returned by importFromTxt function.}
\item{NAcodes}{occupation that need to be converted to NA.}
}
\value{
data frame in long format ...
}
\description{
Add columns with occupations and change format to long.
}
|
283404b8ad231838cb6fc7e4dba35152a1d2eeb5
|
8472d9c0fc2109172b688c2caf5bcb13d2d2881c
|
/R/EPIC.R
|
482032622d85cf8a850f074a829a21f952e184a2
|
[
"Apache-2.0"
] |
permissive
|
changwn/DeconvoLib
|
eba2a2c0904c42ecc8cbbbb100e45f96951f2465
|
bd0df2d4b56d7fd2e253eea565c5547a5b1d4272
|
refs/heads/master
| 2020-11-28T02:05:15.210339
| 2019-12-23T04:27:27
| 2019-12-23T04:27:27
| 228,144,540
| 0
| 0
|
Apache-2.0
| 2019-12-23T00:15:20
| 2019-12-15T07:17:51
|
R
|
UTF-8
|
R
| false
| false
| 11,112
|
r
|
EPIC.R
|
# code from EPIC r package.
#signature_epic <- EPIC::TRef$refProfiles
#gene_epic <- EPIC::TRef$sigGenes
EPIC <- function (bulk, reference = NULL, mRNA_cell = NULL, mRNA_cell_sub = NULL,
sigGenes = NULL, scaleExprs = TRUE, withOtherCells = TRUE,
constrainedSum = TRUE, rangeBasedOptim = FALSE)
{
if (!is.matrix(bulk) && !is.data.frame(bulk))
stop("'bulk' needs to be given as a matrix or data.frame")
with_w <- TRUE
if (is.null(reference)) {
reference <- EPIC::TRef
}
else if (is.character(reference)) {
if (reference %in% prebuiltRefNames) {
reference <- get(reference, pos = "package:EPIC")
}
else stop("The reference, '", reference, "' is not part of the allowed ",
"references:", paste(prebuiltRefNames, collapse = ", "))
}
else if (is.list(reference)) {
refListNames <- names(reference)
if ((!all(c("refProfiles", "sigGenes") %in%
refListNames)) || (("refProfiles" %in% refListNames) &&
!is.null(sigGenes)))
stop("Reference, when given as a list needs to contain at least the ",
"fields 'refProfiles' and 'sigGenes' (sigGenes could also be ",
"given as input to EPIC instead)")
if (!is.matrix(reference$refProfiles) && !is.data.frame(reference$refProfiles))
stop("'reference$refProfiles' needs to be given as a matrix or data.frame")
if (!("refProfiles.var" %in% refListNames)) {
warning("'refProfiles.var' not defined; using identical weights ",
"for all genes")
with_w <- FALSE
}
else if (!is.matrix(reference$refProfiles.var) && !is.data.frame(reference$refProfiles.var)) {
stop("'reference$refProfiles.var' needs to be given as a matrix or ",
"data.frame when present.")
}
else if (!identical(dim(reference$refProfiles.var), dim(reference$refProfiles)) ||
!identical(dimnames(reference$refProfiles.var), dimnames(reference$refProfiles)))
stop("The dimensions and dimnames of 'reference$refProfiles' and ",
"'reference$refProfiles.var' need to be the same")
}
else {
stop("Unknown format for 'reference'")
}
bulk <- merge_duplicates(bulk, in_type = "bulk samples")
refProfiles <- merge_duplicates(reference$refProfiles, in_type = "reference profiles")
if (with_w) {
refProfiles.var <- merge_duplicates(reference$refProfiles.var,
warn = F)
}
else {
refProfiles.var <- 0
}
nSamples <- NCOL(bulk)
samplesNames <- colnames(bulk)
if (is.null(samplesNames)) {
samplesNames <- 1:nSamples
colnames(bulk) <- samplesNames
}
nRefCells <- NCOL(refProfiles)
refCellsNames <- colnames(refProfiles)
bulk_NA <- apply(is.na(bulk), MARGIN = 1, FUN = all)
if (any(bulk_NA)) {
warning(sum(bulk_NA), " genes are NA in all bulk samples, removing these.")
bulk <- bulk[!bulk_NA, ]
}
bulkGenes <- rownames(bulk)
refGenes <- rownames(refProfiles)
commonGenes <- intersect(bulkGenes, refGenes)
if (is.null(sigGenes))
sigGenes <- unique(reference$sigGenes)
sigGenes <- sigGenes[sigGenes %in% commonGenes]
nSigGenes <- length(sigGenes)
if (nSigGenes < nRefCells)
stop("There are only ", nSigGenes, " signature genes",
" matching common genes between bulk and reference profiles,",
" but there should be more signature genes than reference cells")
if (scaleExprs) {
if (length(commonGenes) < 2000)
warning("there are few genes in common between the bulk samples and ",
"reference cells:", length(commonGenes),
", so the data scaling ", "might be an issue")
bulk <- scaleCounts(bulk, sigGenes, commonGenes)$counts
temp <- scaleCounts(refProfiles, sigGenes, commonGenes)
refProfiles <- temp$counts
if (with_w)
refProfiles.var <- scaleCounts(refProfiles.var, sigGenes,
normFact = temp$normFact)$counts
}
else {
bulk <- bulk[sigGenes, ]
refProfiles <- refProfiles[sigGenes, ]
if (with_w)
refProfiles.var <- refProfiles.var[sigGenes, ]
}
if (is.null(mRNA_cell))
mRNA_cell <- EPIC::mRNA_cell_default
if (!is.null(mRNA_cell_sub)) {
if (is.null(names(mRNA_cell_sub)) || !is.numeric(mRNA_cell_sub))
stop("When mRNA_cell_sub is given, it needs to be a named numeric vector")
mRNA_cell[names(mRNA_cell_sub)] <- mRNA_cell_sub
}
minFun <- function(x, A, b, w) {
return(sum((w * (A %*% x - b)^2), na.rm = TRUE))
}
minFun.range <- function(x, A, b, A.var) {
val.max <- (A + A.var) %*% x - b
val.min <- (A - A.var) %*% x - b
cErr <- rep(0, length(b))
outOfRange <- (sign(val.max) * sign(val.min) == 1)
cErr[outOfRange] <- pmin(abs(val.max[outOfRange]), abs(val.min[outOfRange]))
return(sum(cErr, na.rm = TRUE))
}
if (with_w && !rangeBasedOptim) {
w <- rowSums(refProfiles/(refProfiles.var + 1e-12), na.rm = TRUE)
med_w <- stats::median(w[w > 0], na.rm = TRUE)
w[w > 100 * med_w] <- 100 * med_w
}
else w <- 1
if (withOtherCells) {
cMin <- 0
}
else {
cMin <- 0.99
}
cMax <- 1
ui <- diag(nRefCells)
ci <- rep(0, nRefCells)
if (constrainedSum) {
ui <- rbind(ui, rep(1, nRefCells), rep(-1, nRefCells))
ci <- c(ci, cMin, -cMax)
}
cInitProp <- (min(1, cMax) - 1e-05)/nRefCells
tempPropPred <- lapply(1:nSamples, FUN = function(cSample) {
b <- bulk[, cSample]
if (!rangeBasedOptim) {
fit <- stats::constrOptim(theta = rep(cInitProp,
nRefCells), f = minFun, grad = NULL, ui = ui,
ci = ci, A = refProfiles, b = b, w = w)
}
else {
fit <- stats::constrOptim(theta = rep(cInitProp,
nRefCells), f = minFun.range, grad = NULL, ui = ui,
ci = ci, A = refProfiles, b = b, A.var = refProfiles.var)
}
fit$x <- fit$par
if (!withOtherCells)
fit$x <- fit$x/sum(fit$x, na.rm = TRUE)
b_estimated <- refProfiles %*% fit$x
if (nSigGenes > 2) {
suppressWarnings(corSp.test <- stats::cor.test(b,
b_estimated, method = "spearman"))
corPear.test <- stats::cor.test(b, b_estimated, method = "pearson")
}
else {
corSp.test <- corPear.test <- list()
corSp.test$estimate <- corSp.test$p.value <- corPear.test$estimate <- corPear.test$p.value <- NA
}
regLine <- stats::lm(b_estimated ~ b)
regLine_through0 <- stats::lm(b_estimated ~ b + 0)
if (!rangeBasedOptim) {
rmse_pred <- sqrt(minFun(x = fit$x, A = refProfiles,
b = b, w = w)/nSigGenes)
rmse_0 <- sqrt(minFun(x = rep(0, nRefCells), A = refProfiles,
b = b, w = w)/nSigGenes)
}
else {
rmse_pred <- sqrt(minFun.range(x = fit$x, A = refProfiles,
b = b, A.var = refProfiles.var)/nSigGenes)
rmse_0 <- sqrt(minFun.range(x = rep(0, nRefCells),
A = refProfiles, b = b, A.var = refProfiles.var)/nSigGenes)
}
gof <- data.frame(fit$convergence, ifelse(is.null(fit$message),
"", fit$message), rmse_pred, rmse_0, corSp.test$estimate,
corSp.test$p.value, corPear.test$estimate, corPear.test$p.value,
regLine$coefficients[2], regLine$coefficients[1],
regLine_through0$coefficients[1], sum(fit$x), stringsAsFactors = FALSE)
return(list(mRNAProportions = fit$x, fit.gof = gof))
})
mRNAProportions <- do.call(rbind, lapply(tempPropPred, function(x) x$mRNAProportions))
dimnames(mRNAProportions) <- list(samplesNames, refCellsNames)
fit.gof <- do.call(rbind, lapply(tempPropPred, function(x) x$fit.gof))
dimnames(fit.gof) <- list(samplesNames, c("convergeCode",
"convergeMessage", "RMSE_weighted", "Root_mean_squared_geneExpr_weighted",
"spearmanR", "spearmanP", "pearsonR",
"pearsonP", "regline_a_x", "regline_b",
"regline_a_x_through0", "sum_mRNAProportions"))
if (any(fit.gof$convergeCode != 0))
warning("The optimization didn't fully converge for some samples:\n",
paste(samplesNames[fit.gof$convergeCode != 0], collapse = "; "),
"\n - check fit.gof for the convergeCode and convergeMessage")
if (withOtherCells)
mRNAProportions <- cbind(mRNAProportions, otherCells = 1 -
rowSums(mRNAProportions))
tInds <- match(colnames(mRNAProportions), names(mRNA_cell))
if (anyNA(tInds)) {
defaultInd <- match("default", names(mRNA_cell))
if (is.na(defaultInd)) {
tStr <- paste(" and no default value is given for this mRNA per cell,",
"so we cannot estimate the cellFractions, only",
"the mRNA proportions")
}
else {
tStr <- paste(" - using the default value of",
mRNA_cell[defaultInd], "for these but this might bias the true cell proportions from",
"all cell types.")
}
warning("mRNA_cell value unknown for some cell types: ",
paste(colnames(mRNAProportions)[is.na(tInds)], collapse = ", "),
tStr)
tInds[is.na(tInds)] <- defaultInd
}
cellFractions <- t(t(mRNAProportions)/mRNA_cell[tInds])
cellFractions <- cellFractions/rowSums(cellFractions, na.rm = FALSE)
return(list(mRNAProportions = mRNAProportions, cellFractions = cellFractions,
fit.gof = fit.gof))
}
merge_duplicates <- function (mat, warn = TRUE, in_type = NULL)
{
dupl <- duplicated(rownames(mat))
if (sum(dupl) > 0) {
dupl_genes <- unique(rownames(mat)[dupl])
if (warn) {
warning("There are ", length(dupl_genes), " duplicated gene names",
ifelse(!is.null(in_type), paste(" in the", in_type),
""), ". We'll use the median value for each of these cases.")
}
mat_dupl <- mat[rownames(mat) %in% dupl_genes, , drop = F]
mat_dupl_names <- rownames(mat_dupl)
mat <- mat[!dupl, , drop = F]
mat[dupl_genes, ] <- t(sapply(dupl_genes, FUN = function(cgene) apply(mat_dupl[mat_dupl_names ==
cgene, , drop = F], MARGIN = 2, FUN = median)))
}
return(mat)
}
scaleCounts <- function (counts, sigGenes = NULL, renormGenes = NULL, normFact = NULL)
{
if (is.null(sigGenes))
sigGenes <- 1:nrow(counts)
if (is.null(normFact)) {
if (is.null(renormGenes))
renormGenes <- 1:nrow(counts)
normFact <- colSums(counts[renormGenes, , drop = FALSE],
na.rm = TRUE)
}
counts <- t(t(counts[sigGenes, , drop = FALSE])/normFact) *
1e+06
return(list(counts = counts, normFact = normFact))
}
|
5ce82e5461b130c650785c5588387b4f0dae6c34
|
92e597e4ffc9b52cfb6b512734fb10c255543d26
|
/man/catIf.Rd
|
7b0be3692dd43ea0173b2f15dc81985d96b06b5e
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.utils
|
3b978dba2a86a01d3c11fee1fbcb965dd15a710d
|
0930eaeb9303cd9359892c1403226a73060eed5b
|
refs/heads/master
| 2023-05-12T15:26:14.529039
| 2023-04-21T04:28:29
| 2023-04-21T04:28:29
| 60,531,844
| 9
| 1
|
MIT
| 2023-04-21T04:28:30
| 2016-06-06T13:52:43
|
R
|
UTF-8
|
R
| false
| true
| 328
|
rd
|
catIf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{catIf}
\alias{catIf}
\title{Call cat If Condition Is Met}
\usage{
catIf(condition, ...)
}
\arguments{
\item{condition}{if TRUE, cat is called, else not}
\item{\dots}{arguments passed to cat}
}
\description{
Call cat If Condition Is Met
}
|
693cb096b621b635506fafda7e255bd6cbeba106
|
e7d5aaaa05ee4204f414e2adcdb73f93b8fa21fb
|
/code/analysis_default_vary_cb.R
|
9342adb78b479c21b85c008e65e33fe80c696613
|
[] |
no_license
|
markusneumann/altruism_simulation
|
a7d5572f354bcc3ff27532d27c5fec0d51fdbed8
|
f918ffe618e9105744fc22cf7eeacea5d5522712
|
refs/heads/master
| 2020-12-25T17:46:07.544751
| 2020-07-26T20:42:05
| 2020-07-26T20:42:05
| 61,062,722
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,199
|
r
|
analysis_default_vary_cb.R
|
rm(list = ls()) #ensure that no hidden variables are loaded through .Rhistory or .Rsession
library(reshape2)
library(ggplot2)
N <- 300 #default is 300
nsim <- 100 #default is 100
network_type <- "WS"
vary <- "cb"
WS.nei <- 5 #default is 5
WS.p <- 0.2 #default is 0.1; 0.2 corresponds to Figure 4
#Note: the reason this is 0.2 and not the default 0.1 here is because if it was 0.1,
# the plot wouldn't show any variation based on the cost-benefit ratio;
# in that case, the left-hand side of the plot would simply correspond to Figure 5.
#Instead, the conditions need to be sufficiently difficult for altruism,
# for the benefits of a higher cost-benefit ratio to become evident
generations <- 100
mechanisms <- c("truthful", "negative", "similar")
replication_methods <- c("new", "retain")
mechanism <- mechanisms[1]
replication <- replication_methods[1]
donation.fit.loss.seq <- seq(-0.5, -0.1, length.out = nsim)
donation.fit.gain <- 1
#Run the simulation and save the results
source("Neumann_Altruism_Simulation.R")
results_name <- paste0(network_type, "_N", N, "_", mechanism, "_", replication, "_vary_", vary)
save.image(paste0("../results/img_", results_name, ".rdata"))
# save(results_sims, file = paste0("../results/result_", results_name, ".rdata"))
#Read in the functions to generate plots
source("analysisFunctions.R")
#make the plot in color
makeSimulationPlot(results_sims, N, generations, nsim, donation.fit.loss.seq, "Cost-benefit ratio", bw = F) + ylab("Percentage of surviving strategies")
# ggsave(filename = paste0("../figures/results_", results_name, ".pdf"), width = 8, height = 6, dpi = 100, units = "in")
ggsave(filename = paste0("../figures/results_", results_name, ".png"), width = 8, height = 6, dpi = 100, units = "in")
# #make the plot in black and white
# makeSimulationPlot(results_sims, N, generations, nsim, donation.fit.loss.seq, "p (Rewiring probability)", bw = T) + ylab("Percentage of surviving strategies")
# ggsave(filename = paste0("../figures/results_", results_name, "_bw.pdf"), width = 8, height = 6, dpi = 100, units = "in")
# ggsave(filename = paste0("../figures/results_", results_name, "_bw.png"), width = 8, height = 6, dpi = 100, units = "in")
|
40d5d12ada3c1a8f33b518201ddbc66487c26c94
|
4a411afcafea626670dd79dddf8ce1f9771f761f
|
/l2/work.R
|
1cd72350475443e980704fb230130e010fc210ef
|
[] |
no_license
|
alfonsokim/machine-learning-class
|
75402424b60e399044f4704709c021448f0dbc8d
|
8ea97008e7303146df7e58634a6b0e93741484f1
|
refs/heads/master
| 2020-03-29T13:10:55.321689
| 2013-12-09T22:04:17
| 2013-12-09T22:04:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 100
|
r
|
work.R
|
setwd("/Users/Alfonso/r-workspace/machine-learning/l2")
list.files()
library(shiny)
runApp(".")
|
ece313573b608990a66ce0291f60f1789dc16049
|
fd570307c637f9101ab25a223356ec32dacbff0a
|
/src-local/specpr/src.specpr/crtp/window.r
|
e31adfbf45b57b4ef82f76d520d7a73ab13a80c2
|
[] |
no_license
|
ns-bak/tetracorder-tutorial
|
3ab4dd14950eff0d63429291c648820fb14bb4cb
|
fd07c008100f6021c293ce3c1f69584cc35de98a
|
refs/heads/master
| 2022-07-30T06:04:07.138507
| 2021-01-03T22:19:09
| 2021-01-03T22:49:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,333
|
r
|
window.r
|
subroutine window(lbnd,ubnd,xmin,xmax,diff)
implicit integer*4 (i-q)
include "../common/hptrm"
include "../common/lundefs"
real*4 lbnd,ubnd,xmin,xmax,diff
character*1 escape
character*80 iopcon,outline
escape = char(27)
# RED Initialize to 0 the following 4 vars
x1 = 0
x2 = 0
y1 = 0
y2 = 0
icheck=0
# axl = 56. # original standard size
# axh = 500.
# ayl = 46.
# ayh = 276.
axl = 112. # 2x size
axh = 1000.
ayl = 92.
ayh = 552.
#
# determine constants to scale data
#
if (diff == 0.) diff = 0.1e-10
dy = (ayh-ayl)/diff
an = xmax - xmin
if (an <= 0) an = 0.1e-10
dx = (axh-axl)/an
1 call serase(0,620,1022,778)
call movabs (0,718)
call sb(0)
write (outline,72) char(0)
call gwrite(outline)
if (icheck==0) {
if (igrmod < 50 | igrmod > 53) {
write (outline,172) char(0)
} else {
write(outline,74) char(0)
}
call gwrite(outline)
write (outline,272) char(0)
call gwrite(outline)
} else {
if (igrmod < 50 | igrmod > 53) {
write (outline,73) char(0)
} else {
write(outline,75) char(0)
}
call gwrite(outline)
write (outline,272) char(0)
call gwrite(outline)
call movabs(ix,iy)
}
call sb(0)
#
# Get position, and draw cross hairs
#
if (icheck==0) {
call gcrpos(ix,iy,xpos,ypos,xmax,xmin,lbnd,diff,iopcon,ier)
call movabs (int(axl),iy)
call drwabs(int(axh),iy)
call movabs(ix,int(ayl))
call drwabs(ix,int(ayh))
} else {
call gcrpos(ix2,iy2,xpos,ypos,xmax,xmin,lbnd,diff,iopcon,ier)
call movabs (int(axl),iy2)
call drwabs(int(axh),iy2)
call movabs(ix2,int(ayl))
call drwabs(ix2,int(ayh))
call sb(0)
ix=ix2
iy=iy2
}
#
# calculate x and y postion in data space
#
if (icheck==0) {
icheck = 1
x1=xpos
y1=ypos
goto 1
} else {
x2=xpos
y2=ypos
}
if (x2>x1) {
xmax=x2
xmin=x1
} else {
xmax=x1
xmin=x2
}
if (y2>y1) {
lbnd=y1
ubnd=y2
} else {
lbnd=y2
ubnd=y1
}
diff = ubnd-lbnd
72 format(26x,'Graphic Plot Scaling Routine',a1)
172 format(12x,'<cr> to enter Graphics Cursor Position as 1st corner,',a1)
74 format(12x,'use left mousebutton to enter Position as 1st corner,',a1)
272 format(12x,' or e to exit',a1)
73 format(12x,'<cr> to enter Graphics Cursor Position as 2nd corner,',a1)
75 format(12x,'use left mousebutton to enter Position as 2nd corner,',a1)
return
end
|
666dff7d651bc499baf63b235b1679beb4d05d74
|
5a66a0950c91f0f093612bd6496e25cbd7cc5383
|
/week1.R
|
70c13ee0f0cf77e43d073f55f1e8465ab8baf8d5
|
[] |
no_license
|
hizkiafebianto/LearningR
|
5be8590d991712b669cb2420a2e914c680276ac2
|
aff531ac3672f51ec0d284e1ca13a66d5414cd70
|
refs/heads/master
| 2021-01-17T18:22:13.501647
| 2016-06-20T02:36:39
| 2016-06-20T02:36:39
| 60,750,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,810
|
r
|
week1.R
|
#title :week1.R
#description :This is a practice file from week 1 lessons.
#author :Hizkia Febianto
#date :20170609
###Entering Input###
x<-1
print(x)
msg <- "Hello"
msg
#Printing a sequence
x <- 1:20 # a sequence of numbers from 1 to 20
x #Output: [1] 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
###DATA TYPES: VECTORS AND LISTS###
x <- c(0.5,0.6)
x
#Output: [1] 0.5 0.6
x <- vector("logical",10)
x
# [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE
#Coercion
x <- 0:6
class(x)
as.numeric(x)
as.logical(x)
as.character(x)
#Lists
x <- list(1,"a",TRUE,1+4i)
x
###DATA TYPES: MATRICES###
m <- matrix(data = 1:20,nrow = 4,ncol = 5)
m
#Matrices are constructed column-wise
#Output:
# [,1] [,2] [,3] [,4] [,5]
#[1,] 1 5 9 13 17
#[2,] 2 6 10 14 18
#[3,] 3 7 11 15 19
#[4,] 4 8 12 16 20
dim(m)
#Transforming a vector into a matrix
m<-1:10
m
#[1] 1 2 3 4 5 6 7 8 9 10
dim(m)<-c(2,5)
m
# [,1] [,2] [,3] [,4] [,5]
#[1,] 1 3 5 7 9
#[2,] 2 4 6 8 10
#Creating a matrix with cbind() and rbind()
x <- c(1,2,3)
y <- c("a","b","c")
cbind(x,y)
# x y
#[1,] "1" "a"
#[2,] "2" "b"
#[3,] "3" "c"
rbind(x,y)
###DATA TYPES: FACTORS###
x<-factor(c("yes","yes","no","yes","no"))
x
table(x)
unclass(x)
###DATA TYPES: MISSING VALUES###
x<-c(1,2,NaN,NA,4)
is.na(x)
is.nan(x)
###DATA TYPES: DATA FRAMES###
x<-data.frame(foo=1:4, bar=c(T,T,F,F))
x
###NAMES ATTRIBUTE###
#Vectors
x<-1:3
names(x)<-c("foo","bar","woof")
x
# foo bar woof
# 1 2 3
#Lists
x<-list(a=1,b=2,c=3)
x
#Matrices
x<-matrix(data = 1:6,nrow = 2,ncol = 3)
dimnames(x) <- list(c("a","b"),c("x","y","z"))
x
###TEXTUAL DATA FORMAT###
#dput() can only be used in a single R object
y <- data.frame(a=c(1,2,5), b=c(2,3,4))
dput(y)
#structure(list(a = c(1, 2, 5), b = c(2, 3, 4)), .Names = c("a",
#"b"), row.names = c(NA, -3L), class = "data.frame")
dput(y,file="y.R")
new.y <- dget("y.R")
new.y
#dump() is used for storing multiple R objects
x <- "foo"
y <- data.frame(a=c(1,2,5), b=c(2,3,4))
dump(c("x","y"), file = "data.R")
rm(x,y)
source("data.R")
###cONNECTIONS###
con<-url("http://www.wakastudio.com","r")
head(readLines(con),50)
close(con)
###SUBSETTING###
#Basic
x<-c("a","b","c","c","d","e")
x[1]
#[1] "a"
x[1:4]
#[1] "a" "b" "c" "c"
x[x>"a"]
#[1] "b" "c" "c" "d" "e"
u <- x>"a"
u
#[1] FALSE TRUE TRUE TRUE TRUE TRUE
x[c(T,F,T,T)]
#[1] "a" "c" "c" "d"
#Subsetting Lists
x <- list(foo=1:4, alp = c("a","d"), rum = "Hello")
x[1]
#$foo
#[1] 1 2 3 4
x$alp
#[1] "a" "d"
x["alp"]
#$alp
#[1] "a" "d"
x[["alp"]]
#[1] "a" "d"
x[c(1,3)]
#$foo
#[1] 1 2 3 4
#$rum
#[1] "Hello"
x <- list(a=list(2,3,4), b=c("a","b","c"))
x[[1]][[1]]
x[[c(2,3)]]
#[1] "c"
x[c(2,3)]
#$b
#[1] "a" "b" "c"
#$<NA>
# NULL
#Subsetting a matrix
x <- matrix(data=1:20,ncol = 4, nrow = 5)
x[1,2]
#[1] 6
x[2,]
#[1] 2 7 12 17
#Retaining the dimension when subsetting
x[2,,drop=FALSE]
#The output will be a 1x4 matrix
# [,1] [,2] [,3] [,4]
#[1,] 2 7 12 17
###REMOVING NA VALUES###
x <- c(1,2,NA,4,NA,NA)
bad <- is.na(x)
x[!bad]
#[1] 1 2 4
airquality[1:6,]
good <- complete.cases(airquality)
airquality[good,][1:6,]
###VECTORIZED OPERATIONS###
#Vectors
x <- 1:3
y <- 5:7
x+y
#[1] 6 8 10
x/y
#Matrices
x<-matrix(1:4,2,2)
y<-matrix(rep(10,4),2,2)
x*y ##elementwise multiplication
# [,1] [,2]
#[1,] 10 30
#[2,] 20 40
x %*% y ##true matrix multiplicatition
# [,1] [,2]
#[1,] 40 40
#[2,] 60 60
###HOMEWORK###
x<- 1:4
y<- 2:3
class(x+y)
data
data <- read.csv("hw1_data.csv")
data[47,"Ozone"]
bad <- is.na(data["Ozone"])
sum(bad)
summary(data[!bad,])
summary(data[data$Ozone>31 & data$Temp > 90,])
summary(data[data$Month == 6,])
summary(data[data$Month == 5,])
|
f51817749d9789b2ae867a748fbf9cb32016fbd0
|
05343067e6a3b7b66cdc58a1be5c3b0efc939b27
|
/day02/2-ribbon.R
|
e8671778fdce135b67297b56fb18b61895ede6bb
|
[] |
no_license
|
pdil/adventR
|
1ebdbea943c21609f9d4e182e608536ada0a9ef7
|
8a18aae49f81dca608837b24a6a4cd68bc0c17b8
|
refs/heads/master
| 2020-01-23T21:50:02.372178
| 2016-01-21T03:10:29
| 2016-01-21T03:10:29
| 47,226,210
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 805
|
r
|
2-ribbon.R
|
library(readr)
library(dplyr)
# read input from file
input <- read_lines("input.txt")
# separate each line into three separate numbers, convert to data frame
input_split <- input %>% lapply(strsplit, split = "x") %>% unlist %>%
matrix(ncol = 3, byrow = TRUE) %>% as.data.frame(stringsAsFactors = FALSE)
# coerce characters to numbers
input_split$V1 <- as.numeric(input_split$V1)
input_split$V2 <- as.numeric(input_split$V2)
input_split$V3 <- as.numeric(input_split$V3)
# compute relevant quantities
input_split <- input_split %>%
mutate(peri1 = 2*V1 + 2*V2, peri2 = 2*V1 + 2*V3, peri3 = 2*V2 + 2*V3, vol = V1*V2*V3) %>%
mutate(smallest_peri = pmin(peri1, peri2, peri3))
# result is sum of smallest perimeters and sum of all volumes
sum(input_split$smallest_peri) + sum(input_split$vol)
# 3812909
|
77dea56b922b2c75504409ac28ded2b7aaf5492c
|
8691943a2547990118c85beefecdc43312371618
|
/R/fplot.R
|
30402e02e032a1f4d55bf6c50c118e6dd75263b0
|
[] |
no_license
|
Flavjack/GerminaR
|
f0d97cbf735520db702538601e2cf511527cf66c
|
3535a1aea0729abe5ba79114386885d487f946f4
|
refs/heads/master
| 2022-09-04T10:21:15.391932
| 2022-05-29T16:19:10
| 2022-05-29T16:19:10
| 49,505,163
| 5
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,041
|
r
|
fplot.R
|
#' Plot line or bar graphic
#'
#' @description Function use the dtsm function for plot the results
#' @param data Output from ger_testcomp function
#' @param type Type of graphic. "bar" or "line"
#' @param x Axis x variable
#' @param y Axis y variable
#' @param group Group variable
#' @param ylab Title for the axis y
#' @param xlab Title for the axis x
#' @param glab Title for the legend
#' @param ylimits limits of the y axis
#' @param xrotation Rotation in x axis c(angle, h, v)
#' @param xtext Text labels in x axis
#' @param gtext Text labels in groups
#' @param legend the position of legends ("none", "left", "right", "bottom", "top", or two-element numeric vector)
#' @param sig Column with the significance
#' @param sigsize Font size in significance letters
#' @param error Show the error bar ("ste" or "std").
#' @param color colored figure c(TRUE, FALSE) or vector with the color.
#' @param opt Add news layer to the plot
#' @return Line o bar plot
#' @import dplyr
#' @importFrom grDevices colorRampPalette colors
#' @importFrom tibble deframe
#' @importFrom grDevices gray.colors
#' @import ggplot2
#' @export
#'
#' @examples
#'
#' \dontrun{
#'
#' library(GerminaR)
#' library(dplyr)
#'
#' smr <- ger_summary(SeedN = "seeds"
#' , evalName = "D"
#' , data = prosopis) %>%
#' mutate(across(rep:temp, as.factor))
#'
#' av <- aov(grp ~ nacl*temp, smr)
#'
#' anova(av)
#'
#' mc <- ger_testcomp(aov = av
#' , comp = c("nacl", "temp"))
#'
#' plotdt <- mc$table
#'
#' plot <- fplot(data = plotdt
#' , type = "bar"
#' , x = "temp"
#' , y = "grp"
#' , group = "nacl"
#' , sig = "sig"
#' #, error = "ste"
#' , color = T
#' , ylimits = c(0, 120, 20)
#' )
#'
#' plot
#'
#' }
#'
fplot <- function(data
, type = "bar"
, x
, y
, group = NA
, xlab = NA
, ylab = NA
, glab = NA
, ylimits = NA
, xrotation = NA
, xtext = NA
, gtext = NA
, legend = "top"
, sig = NA
, sigsize = 3
, error = NA
, color = TRUE
, opt = NA
){
# match args --------------------------------------------------------------
legend <- match.arg(legend, c("top", "left", "right", "bottom", "none"))
type <- match.arg(type, c("barra", "linea"))
if(!c(x %in% colnames(data))) stop("colum no exist")
if(!c(y %in% colnames(data))) stop("colum no exist")
# -------------------------------------------------------------------------
if(is.null(group)) {group <- x}
xlab <- if(is.null(xlab) || is.na(xlab) || xlab == "") {NULL} else {xlab}
ylab <- if(is.null(ylab) || is.na(ylab) || ylab == "") {NULL} else {ylab}
glab <- if(is.null(glab) || is.na(glab) || glab == "") {NULL} else {glab}
opt <- if(is.null(opt) || is.na(opt) || opt == "") {NULL} else {opt}
sig <- if(is.null(sig) || is.na(sig) || sig == "" || sig == "none") {NULL} else {sig}
error <- if(is.null(error) || is.na(error) || error == "" || error == "none") {
NULL} else {error}
color <- if(is.null(color) || is.na(color) || color == "" || color == "yes") {
TRUE} else if (color == "none") {FALSE} else {color}
ylimits <- if(all(is.null(ylimits)) || all(is.na(ylimits)) || all(ylimits %in% "")) {
NULL
} else if(is.character(ylimits)) {
ylimits %>%
gsub("[[:space:]]", "", .) %>%
strsplit(., "[*]") %>%
unlist() %>% as.numeric()
} else {ylimits}
xtext <- if(is.null(xtext) || is.na(xtext) || xtext == "") {
NULL} else if (is.character(xtext)){
xtext %>%
strsplit(., ",") %>%
unlist() %>%
base::trimws()
} else {xtext}
gtext <- if(is.null(gtext) || is.na(gtext) || gtext == "") {
NULL} else if (is.character(gtext)){
gtext %>%
strsplit(., ",") %>%
unlist() %>%
base::trimws()
} else {gtext}
xrotation <- if(all(is.null(xrotation)) || all(is.na(xrotation)) || all(xrotation == "")) {
c(0, 0.5, 0.5)
} else if (is.character(xrotation)){
xrotation %>%
gsub("[[:space:]]", "", .) %>%
strsplit(., "[*]") %>%
unlist() %>% as.numeric()
} else {xrotation}
# graph-color -------------------------------------------------------------
if (isTRUE(color)) {
color <- colorRampPalette(
c("#86CD80" # green
, "#F4CB8C" # orange
, "#F3BB00" # yellow
, "#0198CD" # blue
, "#FE6673" # red
))(length(data[[group]] %>% unique()))
} else if (isFALSE(color)) {
color <- gray.colors(n = data[[group]] %>% unique() %>% length()
, start = 0.8
, end = 0.3)
} else {
color <- color
}
# sci-labels --------------------------------------------------------------
if ( !is.null(xlab) ) {
xlab <- xlab %>%
gsub(pattern = " ", "~", .)
xlab <- eval(expression(parse(text = xlab)))
}
if ( !is.null(ylab) ) { #
ylab <- ylab %>%
gsub(pattern = " ", "~", .)
ylab <- eval(expression(parse(text = ylab)))
}
if ( !is.null(glab) ) {
glab <- glab %>%
gsub(pattern = " ", "~", .)
glab <- eval(expression(parse(text = glab)))
}
# type --------------------------------------------------------------------
plotdt <- data %>%
select(!starts_with("{") | !ends_with("}")) %>%
select_if(~ !all(is.na(.))) %>%
drop_na(names(.[1])) %>%
mutate(across(c({{group}}), as.factor))
# bar plot ----------------------------------------------------------------
if(type == "barra") {
plot <- plotdt %>%
ggplot(., aes(x = .data[[x]]
, y = .data[[y]]
, fill = .data[[group]])
) +
geom_col(
position = position_dodge2()
, colour = "black"
, size = 0.4
, na.rm = T
) +
labs(
x = if(is.null(xlab)) x else xlab
, y = if(is.null(ylab)) y else ylab
, fill = if(is.null(glab)) group else glab
) +
{
if (!is.null(error))
geom_errorbar(
aes(ymin = .data[[y]] - .data[[error]]
, ymax = .data[[y]] + .data[[error]] )
, position = position_dodge(width = 0.9)
, width = 0.15
, na.rm = T)
} +
{
if (!is.null(sig) )
geom_text(
aes(label = .data[[sig]]
, y = if(!is.null(error)) .data[[y]] + .data[[error]] else .data[[y]])
, position = position_dodge(width = 0.9)
, na.rm = T
, colour = "black"
, vjust = -0.5
, hjust = 0.5
, angle = 0
, size = sigsize
)
} +
scale_fill_manual(values = color
, labels = if(!is.null(gtext)) gtext else waiver())
}
# line plot ---------------------------------------------------------------
if (type == "linea") {
plot <- plotdt %>%
ggplot( aes(x = .data[[x]]
, y = .data[[y]]
, shape = .data[[group]]
, colour = .data[[group]]
) ) +
geom_point( aes(group = .data[[group]]
, shape = .data[[group]]
, color = .data[[group]]
), size = 2.5 ) +
geom_line( aes( group = .data[[group]]
, color = .data[[group]]
, linetype = .data[[group]]
) , size = 1 ) +
labs(x = if(is.null(xlab)) x else xlab
, y = if(is.null(ylab)) y else ylab
, shape = if(is.null(glab)) group else glab
, color = if(is.null(glab)) group else glab
, linetype = if(is.null(glab)) group else glab
) +
{
if (!is.null(error))
geom_errorbar(aes(ymin = .data[[y]] - .data[[error]]
, ymax = .data[[y]] + .data[[error]])
, width = 0.08)
} +
{
if (!is.null(sig))
geom_text(
aes(label = .data[[sig]]
, y = if(!is.null(error)) .data[[y]] + .data[[error]] else .data[[y]])
, colour = "black"
, vjust = -0.5
, hjust = 0.5
, angle = 0
, size = 3
)
} +
scale_color_manual(
labels = if(!is.null(gtext)) gtext else waiver()
, values = color
) +
scale_linetype_discrete(labels = if(!is.null(gtext)) gtext else waiver()) +
scale_shape_discrete(labels = if(!is.null(gtext)) gtext else waiver())
}
# layers ------------------------------------------------------------------
graph <- plot +
{ if(!is.null(xtext)) scale_x_discrete(labels = xtext) } +
{
if(!is.null(ylimits))
scale_y_continuous(
limits = ylimits[1:2]
, breaks = seq(ylimits[1], ylimits[2], by = abs(ylimits[3]))
, expand = c(0,0)
)
}
layers <- 'graph +
theme_minimal() +
theme(legend.position = legend
, panel.border = element_rect(colour = "black", fill=NA)
, panel.background = element_rect(fill = "transparent")
, legend.background = element_rect(colour = "transparent", fill = "transparent")
, axis.text.x = element_text(angle = xrotation[1]
, hjust= xrotation[2]
, vjust = xrotation[3])
)'
if(is.null(opt)) {
eval(parse(text = layers))
} else {
eval(parse(text = paste(layers, opt, sep = " + ")))
}
}
|
376977f5810d9471dd10730efbf543d134d6e2ec
|
51aa41cd56fa4ab3e661a4787f23cb831e833787
|
/tsEbolaplots.R
|
baad74a8647be1db82d19cb61463c90179deae4b
|
[] |
no_license
|
dushoff/Ebola_sims
|
9a01d563172a89727c04315140a0803ba6b17c5f
|
a2c33546d656d3d273bb75e19bfacf4894d6b179
|
refs/heads/master
| 2020-04-03T10:05:57.783894
| 2020-02-20T22:50:44
| 2020-02-20T22:50:44
| 63,194,488
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 337
|
r
|
tsEbolaplots.R
|
library(ggplot2)
theme_set(theme_bw())
casePlot <- (
ggplot(sims, aes(x=day, y=cases))
+ geom_line(aes(color=Symptomatic), size=1.5)
)
print(casePlot+scale_y_log10())
print(casePlot)
infPlot <- (
ggplot(sims, aes(x=day, y=infections))
+ geom_line(aes(color=Symptomatic), size=1.5)
)
print(infPlot+scale_y_log10())
print(infPlot)
|
3c720afec3f8771884b09c7e734061101dcdaa72
|
e2d3550de157dadc159be78eb151210e8c1a7dac
|
/man/casl_tsne.Rd
|
c4a26076518a4fced10cbf40dcda83930cbcabd5
|
[] |
no_license
|
statsmaths/casl
|
b40d812127802c8f07a7c6a94d26a018e71628c0
|
196caaff495cbd60648434424a20db472b5b4eda
|
refs/heads/master
| 2020-03-28T07:03:32.596204
| 2018-11-18T13:20:59
| 2018-11-18T13:20:59
| 147,878,257
| 6
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 809
|
rd
|
casl_tsne.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ch09-dim.R
\name{casl_tsne}
\alias{casl_tsne}
\title{Compute t-SNE variance values.}
\usage{
casl_tsne(X, perplexity = 30, k = 2L, iter = 1000L, rho = 100)
}
\arguments{
\item{X}{A numeric data matrix.}
\item{perplexity}{Desired perplexity score for all variables.}
\item{k}{Dimensionality of the output.}
\item{iter}{Number of iterations to perform.}
\item{rho}{A positive numeric learning rate.}
}
\value{
An nrow(X) by k matrix of t-SNE embeddings.
}
\description{
Compute t-SNE variance values.
}
\references{
Taylor Arnold, Michael Kane, and Bryan Lewis.
\emph{A Computational Approach to Statistical Learning}.
Chapman & Hall/CRC Texts in Statistical Science, 2019.
}
\author{
Taylor Arnold, Michael Kane, Bryan Lewis.
}
|
d41c1683dfcebbae9cb1785cd413974d2236f54d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/reliaR/examples/abic.logis.exp.Rd.R
|
54dc4d6737df35ab762592ef193fb8a04507bd97
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 557
|
r
|
abic.logis.exp.Rd.R
|
library(reliaR)
### Name: abic.logis.exp
### Title: Akaike information criterion (AIC) and Bayesian information
### criterion (BIC) for Logistic-Exponential(LE) distribution
### Aliases: abic.logis.exp
### Keywords: models
### ** Examples
## Load data sets
data(bearings)
## Maximum Likelihood(ML) Estimates of alpha & lambda for the data(bearings)
## Estimates of alpha & lambda using 'maxLik' package
## alpha.est = 2.36754, lambda.est = 0.01059
## Values of AIC, BIC and LogLik for the data(bearings)
abic.logis.exp(bearings, 2.36754, 0.01059)
|
e46246b6a97e8b1bd5c4869f8830ebfad1b7860a
|
9e6f54110611694c0a04fac0d37886192cc5a030
|
/R/scale_tree.R
|
1f6283954b6add79e320af84a5354f6f38386502
|
[] |
no_license
|
cran/bnpsd
|
acdeb759cdf1e725abc9a565889af4e4cbd00e5d
|
273cdd47a5e73724bd72a0c2231644a4a0dfee19
|
refs/heads/master
| 2023-07-15T13:23:18.532331
| 2021-08-25T11:50:26
| 2021-08-25T11:50:26
| 117,667,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,656
|
r
|
scale_tree.R
|
#' Scale a coancestry tree
#'
#' Scale by a scalar `factor` all the edges (`$edge.length`) of a `phylo` object from the `ape` package, including the root edge (`$root.edge`) if present, and additive edges (`$edge.length.add`, present in trees returned by [fit_tree()]).
#' Stops if any of the edges exceed 1 before or after scaling (since these edges are IBD probabilities).
#'
#' @param tree The coancestry tree to edit.
#' @param factor The scalar factor to multiply all edges.
#' Must be non-negative, and not be so large that any edge exceeds 1 after scaling.
#'
#' @return The edited tree with all edges scaled as desired.
#'
#' @examples
#' # create a random tree
#' library(ape)
#' k <- 5
#' tree <- rtree( k )
#'
#' # scale this tree
#' tree_scaled <- scale_tree( tree, 0.5 )
#'
#' @seealso
#' [ape::read.tree()] for reading `phylo` objects and their definition.
#'
#' @export
scale_tree <- function( tree, factor ) {
# require both inputs
if ( missing( tree ) )
stop( '`tree` is required!' )
if ( missing( factor ) )
stop( '`factor` is required!' )
# tree should be valid already
validate_coanc_tree( tree )
# validate factor
if ( length( factor ) != 1 )
stop( '`factor` must be scalar! Observed length: ', length( factor ) )
if ( !is.numeric( factor ) )
stop( '`factor` must be numeric! Observed value: ', factor )
if ( factor < 0 )
stop( '`factor` must be non-negative! Observed value: ', factor )
# start editing values, but will check each carefully to make sure we don't go out of bounds!
# edge lengths are the majority of the work usually:
# apply factor
tree$edge.length <- tree$edge.length * factor
# die if any edge exceeds one now
# (negatives don't occur given what we've checked already)
if ( any( tree$edge.length > 1 ) )
stop( 'At least one `tree` edge length exceeds 1 after scaling, max: ', max( tree$edge.length ) )
# edit additive edges if present
# this case doesn't need any checking (if the previous step worked, all is good)
if ( !is.null( tree$edge.length.add ) )
tree$edge.length.add <- tree$edge.length.add * factor
# edit root edge if present
if ( !is.null( tree$root.edge ) ) {
# this also gets checked
tree$root.edge <- tree$root.edge * factor
# die if any edge exceeds one now
# (negatives don't occur given what we've checked already)
if ( any( tree$root.edge > 1 ) )
stop( 'Root edge length exceeds 1 after scaling: ', tree$root.edge )
}
# return now if all was scaled and validated
return( tree )
}
|
154dc708881b0338df42bb9a5214c0456b9b259e
|
15c072f05f8670f072b7358c4d70774456fabd38
|
/man/spark_options_box.Rd
|
8b95770cf1d242cf189d67c9c263ddd1667bfaae
|
[] |
no_license
|
mrhopko/DTHelper
|
3fb0e73789d0c065f6a9780726b8271ba3c027b5
|
d5fc3a14ecadfb2cf84932c835199ef0bec87cca
|
refs/heads/master
| 2021-01-20T04:16:57.347415
| 2017-04-28T07:57:04
| 2017-04-28T07:57:04
| 89,670,626
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,041
|
rd
|
spark_options_box.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sparkline_helper.R
\name{spark_options_box}
\alias{spark_options_box}
\title{Create sparkline options js to include in column call back}
\usage{
spark_options_box(raw = NULL, showOutliers = NULL, outlierIQR = NULL,
boxLineColor = NULL, boxFillColor = NULL, whiskerColor = NULL,
outlierLineColor = NULL, outlierFillColor = NULL, spotRadius = NULL,
medianColor = NULL, target = NULL, targetColor = NULL,
minValue = NULL, maxValue = NULL, ...)
}
\arguments{
\item{raw}{If set to false (default) then the values supplied are used to caculate the box data points for you. If true then you must pre-calculate the points (see below)}
\item{showOutliers}{If true (default) then outliers (values > 1.5x the IQR) are marked with circles and the whiskers are placed at Q1 and Q3 instead of the least and greatest value}
\item{outlierIQR}{Set the inter-quartile range multipler used to calculate values that qualify as an outlier - Defaults to 1.5}
\item{boxLineColor}{CSS line colour used to outline the box}
\item{boxFillColor}{CSS fill colour used for the box}
\item{whiskerColor}{CSS colour used to draw the whiskers}
\item{outlierLineColor}{CSS colour used to draw the outlier circles}
\item{outlierFillColor}{CSS colour used to fill the outlier circles}
\item{spotRadius}{Radius in pixels to draw the outlier circles}
\item{medianColor}{CSS colour used to draw the median line}
\item{target}{If set to a value, then a small crosshair is drawn at that point to represent a target value}
\item{targetColor}{CSS colour used to draw the target crosshair, if set}
\item{minValue}{If minvalue and maxvalue are set then the scale of the plot is fixed. By default minValue and maxValue are deduced from the values supplied}
\item{maxValue}{See minValue}
\item{...}{pass additional options to list}
}
\value{
javascript call back
}
\description{
Create sparkline options js to include in column call back
}
|
2736ceb344b8de8a924ae2bcbe3a929e4a6a8966
|
051c93b817d1688caa689aefbd5ccc82006e616c
|
/Plot2.R
|
f80c177da74381f1fc5b6ba3d2bb2f78411ef038
|
[] |
no_license
|
82andries/ExData_Plotting1
|
f1f8288e3462e1584f7c7d4860b3ae0b2a6ab4c8
|
2146faf5dcc229e6ef7522bb3c840f5aad9c5ee2
|
refs/heads/master
| 2020-07-30T15:02:22.520013
| 2016-11-13T16:39:09
| 2016-11-13T16:39:09
| 73,627,821
| 0
| 0
| null | 2016-11-13T16:23:48
| 2016-11-13T16:23:47
| null |
UTF-8
|
R
| false
| false
| 766
|
r
|
Plot2.R
|
#Reading in the data and subsetting for the required dates
powerdata <- read.table("household_power_consumption.txt",header = TRUE, sep=";")
subpowerdata <- subset(powerdata, powerdata$Date=="1/2/2007" | powerdata$Date=="2/2/2007")
# Changing the data from factor to numeric
subpowerdata$Global_active_power <- as.numeric(as.character(subpowerdata$Global_active_power))
#combining the date and time in one
datetime <- paste(subpowerdata$Date, subpowerdata$Time)
#converting datetime
newdatetime <- strptime(datetime, "%d/%m/%Y %H:%M:%S")
#creating png file
png("Plot2.png", width=480, height=480)
#plotting data
plot(newdatetime, subpowerdata$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
#closing device
dev.off()
|
9eacd4b0a908a8a272633662a1d16c4cf15d5ea3
|
897d154e93f8c9c45c294cfe88986e7f0b178f51
|
/집단 검정/단일 집단 평균 검정 (T-test).R
|
36cc1e4eb753de0e19893f030324a4d7fb0ac60e
|
[] |
no_license
|
freegray/R_training
|
b9a3d425673e5f201e6e7b2802950765f8c7441e
|
ed3e165931e47df003db0ec5d7d929dd08d6499e
|
refs/heads/master
| 2023-04-12T11:32:41.292096
| 2021-05-21T15:44:40
| 2021-05-21T15:44:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,439
|
r
|
단일 집단 평균 검정 (T-test).R
|
# 단일 집단 평균 검정(단일 표본 T 검정)
setwd("/Users/yuhayung/Desktop/coding/학원/Rtraining/dataset2")
data <- read.csv("one_sample.csv", header = T)
str(data) # 150
head(data)
x<- data$time
head(x)
summary(x)
mean(x)
mean(x,na.rm = T) # 데이터 정제
x1 <- na.omit(x) # na 데이터 (omit) 빼기
mean(x1)
# 정규분포 검정
# 귀무가설 - x의 데이터 분포는 정규분포이다.
shapiro.test(x1) # x1 에 대한 정규분포 검정
# Shapiro-Wilk normality test
#
# data: x1
# W = 0.99137, p-value = 0.7242
# p 벨류값이 유의 수준 보다 크다 즉, 정규분포를 따른다. 따라서 T 검정으로 평균 차이 검정을 수행한다.
# 정규분포 시각화
par(mfrow = c(1,2))
hist(x1)
qqnorm(x1)
qqline(x1, lty = 1, col = "blue" )
# 평균 차이 검정
# t- test (x, y = NULL, alternative = c("two.sided"/"less"/"greater"), mu = 0, paired = F, var.equal = F, conf.level = 0.95, ...)
t.test(x1, mu = 5.2) # mu 모집단의 평균값
# One Sample t-test
# data: x1
# t = 3.9461, df = 108, p-value = 0.0001417 <= p-value 유의수준 0.05 보다 작기 때문에 귀무가설 채택
# alternative hypothesis: true mean is not equal to 5.2
# 95 percent confidence interval:
# 5.377613 5.736148
# sample estimates:
# mean of x
# 5.556881
t.test(x1, mu = 5.2, alternative = "greater", conf.level = 0.95)
# One Sample t-test
# data: x1
# t = 3.9461, df = 108, p-value = 7.083e-05 <= p-value 유의수준 0.05 보다 매우 작기 때문에 채택
# alternative hypothesis: true mean is greater than 5.2
# 95 percent confidence interval:
# 5.406833 Inf
# sample estimates:
# mean of x
# 5.556881
qt(0.05, 108, lower.tail = F) # 귀무가설 임계값 확인
# [1] 1.659085
# 불만족 고객 14명을 대상으로 95% 신뢰 수준에서 양측 검정을 시행한 결과 검정 통계량 p- value 값은 0.0006735로 유의 수준 0.05 보다
# 작아 기존 불만율 20%과 차이가 있다고 볼 수 있다. 즉, 기존 2019년도 고객 불만율과 2020년도 CS 교육 후 불만율에 차이가 있다고 볼 수 있다.
#
# 하지만 양측 검정 결과에서는 기존 불만율보다 크다, 혹은 작다는 방향성은 제시되지 않는다.
# 따라서 방향성을 갖는 단측 가설 검정을 통해서 기존 집단과 비교하여 신규 집단의 불만율이 개선되었는지를 확인해야 한다.
|
5bd58cfe61e675fc40183fc393a6f9c98d17bbaf
|
498e7df01e78657277b23d81d7b07ab431def4fb
|
/east_share_season.R
|
d8a31b3e750aa19f626ad125ce22bd355081e5e8
|
[] |
no_license
|
kralljr/share_medicare
|
de5be725529fd00b42ab8aaf6edd31b91731a16e
|
17aac20ee28e70e5cc93e71d4b11ce5e3f5ec2a5
|
refs/heads/master
| 2021-01-17T07:40:19.613158
| 2016-07-15T15:53:38
| 2016-07-15T15:53:38
| 18,215,156
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,436
|
r
|
east_share_season.R
|
# File to estimate major sources in NE US
#Set directory
home.dir <- "~/Dropbox/SpatialFA/"
dir1 <- file.path(home.dir, "data")
paperdir <- file.path(home.dir, "paper_spatialfa/figs")
#Load my packages
library(share)
library(handles)
library(sharesim)
#load other packages
library(devtools)
library(RColorBrewer)
library(xtable)
library(maps)
library(dplyr)
########
# get DATA
load(file.path(dir1, "speciation_medicare.RData"))
data.rr <- lapply(datall, function(x) {
x[, -c(2)]
})
pm25 <- lapply(datall, function(x) x[, 2])
names(data.rr) <- monsKEEP[, 3]
# get data specifically for each season
# indicator function for warm (ito 2010)
iswarm <- function(dat) {
months <- as.numeric(substr(dat[, 1], 6, 7))
ifelse(between(months, 4, 9), 1, 0)
}
warm <- lapply(data.rr, iswarm)
cold.dat <- list()
warm.dat <- list()
cold.pm <- list()
warm.pm <- list()
for(i in 1 : length(data.rr)) {
print(i)
warm1 <- warm[[i]]
cold.dat[[i]] <- data.rr[[i]][warm1 == 0, ]
warm.dat[[i]] <- data.rr[[i]][warm1 == 1, ]
cold.pm[[i]] <- pm25[[i]][warm1 == 0]
warm.pm[[i]] <- pm25[[i]][warm1 == 1]
}
# restrict to monitors with more than 50 days
cold.great <- which(sapply(cold.pm, length) >= 50)
cold.pm <- cold.pm[cold.great]
cold.dat <- cold.dat[cold.great]
names(cold.dat) <- names(data.rr)[cold.great]
names(cold.pm) <- names(data.rr)[cold.great]
warm.great <- which(sapply(warm.pm, length) >= 50)
warm.pm <- warm.pm[warm.great]
warm.dat <- warm.dat[warm.great]
names(warm.dat) <- names(data.rr)[warm.great]
names(warm.pm) <- names(data.rr)[warm.great]
#get new list of monitors
names.cold <- names(data.rr)[cold.great]
names(names.cold) <- substr(names.cold, 1, 5)
un.cold <- unique(substr(names.cold, 1, 5))
cold.mons <- list()
for(i in 1 : length(un.cold)) {
cold.mons[[i]] <- names.cold[which(names(names.cold) == un.cold[i])]
}
names(cold.mons) <- un.cold
names.warm <- names(data.rr)[warm.great]
names(names.warm) <- substr(names.warm, 1, 5)
un.warm <- unique(substr(names.warm, 1, 5))
warm.mons <- list()
for(i in 1 : length(un.warm)) {
warm.mons[[i]] <- names.warm[which(names(names.warm) == un.warm[i])]
}
names(warm.mons) <- un.warm
########
# Perform SHARE and mAPCA
set.seed(10)
share.warm <- sharehealth(warm.dat, tots = warm.pm, list = warm.mons)
share.cold <- sharehealth(cold.dat, tots = cold.pm, list = cold.mons)
set.seed(10)
mapca.warm <- sharehealth(warm.dat, tots = warm.pm, list = warm.mons, method = "mapca")
mapca.cold <- sharehealth(cold.dat, tots = cold.pm, list = cold.mons, method = "mapca")
#######
# Match results between mAPCA and SHARE
#COLD
nc <- ncol(cold.dat[[1]][, -1])
mapcaload <- mapca.cold$major.sig
#use high threshold so match all sources
match1 <- matchfun(list(mapcaload), share.cold$major.sig, thres = 70 * pi/180)$match[[1]]
rownames(match1) <- paste0("mapca", seq(1, ncol(mapcaload)))
colnames(match1) <- paste0("share", seq(1, ncol(share.cold$major.sig)))
match1 <- whichCS(t(match1))
#reorder mapca
mapca.cold$major.sig <- suppressWarnings(mapca.cold$major.sig[, match1])
mapcaconc.cold <- sapply(mapca.cold$sources, function(x) {
dat <- as.matrix(x[, -1])
dat <- data.frame(x[, 1], suppressWarnings(dat[, match1]))
colnames(dat) <- c("date", paste0("source", seq(1, ncol(dat) - 1)))
dat
}, simplify = F)
names(mapcaconc.cold) <- names(cold.dat)
#WARM
nc <- ncol(warm.dat[[1]][, -1])
mapcaload <- mapca.warm$major.sig
#use high threshold so match all sources
match1 <- matchfun(list(mapcaload), share.warm$major.sig, thres = 70 * pi/180)$match[[1]]
rownames(match1) <- paste0("mapca", seq(1, ncol(mapcaload)))
colnames(match1) <- paste0("share", seq(1, ncol(share.warm$major.sig)))
match1 <- whichCS(t(match1))
#reorder mapca
mapca.warm$major.sig <- suppressWarnings(mapca.warm$major.sig[, match1])
mapcaconc.warm <- sapply(mapca.warm$sources, function(x) {
dat <- as.matrix(x[, -1])
dat <- data.frame(x[, 1], suppressWarnings(dat[, match1]))
colnames(dat) <- c("date", paste0("source", seq(1, ncol(dat) - 1)))
dat
}, simplify = F)
names(mapcaconc.warm) <- names(warm.dat)
########
# Results table
#Number of days
days <- sapply(cold.dat, nrow)
c(min(days), max(days), median(days))
days <- sapply(warm.dat, nrow)
c(min(days), max(days), median(days))
simpleCap <- function(x) {
s1 <- strsplit(x, " ")[[1]]
paste(toupper(substring(s1, 1, 1)), substring(s1, 2),
sep = "", collapse = " ")
}
names1 <- rownames(share.cold$major.sig)
apply(round(share.cold$major.sig, 3), 2, function(x) names1[order(abs(x), decreasing = T)[1 : 5]])
names.cold <- c("Traffic", "Sec. Sulfate", "Soil", "Residual oil", "Salt", "As/Se/Br", "P", "Metals")
names1 <- rownames(share.warm$major.sig)
apply(round(share.warm$major.sig, 3), 2, function(x) names1[order(abs(x), decreasing = T)[1 : 5]])
names.warm <- c("Sec. Sulfate", "Soil", "Fireworks", "Metals", "Salt", "P/V", "Residual oil", "As/Se/Br")
#### Table for cold
summ <- data.frame(names.cold, share.cold$summary[, c("monitor", "counties", "cons", "Median", "IQR")])
summ$IQR <- round(summ$IQR, 2)
temp <- sapply(as.character(summ$cons), function(x) {
x <- strsplit(x, ", ")[[1]]
for(i in 1 : length(x)) {
x1 <- x[i]
x1 <- simpleCap(as.character(x1))
x1 <- ifelse(x1 == "Elemental_carbon", "EC", x1)
x1 <- ifelse(x1 == "Sodium_ion", "Sodium ion", x1)
x1 <- ifelse(x1 == "Ammonium_ion", "Ammonium", x1)
x[i] <- x1
}
paste(x, collapse = ", ")
}, simplify = T)
summ$cons <- temp
colnames(summ) <- c("Sources", "Monitors", "Counties", "Major constituents", "Median", "IQR")
summ$Monitors <- as.integer(summ$Monitors)
summ$Counties <- as.integer(summ$Counties)
# colnames(summ)[-1] <- paste0(colnames(summ)[-1], ".cold")
summ[, 1] <- as.character(summ[, 1])
summ[which(summ[, 1] == "P"), 1] <- "P/V"
summ.cold <- summ
##### TABLE FOR WARM
summ <- data.frame(names.warm, share.warm$summary[, c("monitor", "counties", "cons", "Median", "IQR")])
summ$IQR <- round(summ$IQR, 2)
temp <- sapply(as.character(summ$cons), function(x) {
x <- strsplit(x, ", ")[[1]]
for(i in 1 : length(x)) {
x1 <- x[i]
x1 <- simpleCap(as.character(x1))
x1 <- ifelse(x1 == "Elemental_carbon", "EC", x1)
x1 <- ifelse(x1 == "Sodium_ion", "Sodium ion", x1)
x1 <- ifelse(x1 == "Ammonium_ion", "Ammonium", x1)
x[i] <- x1
}
paste(x, collapse = ", ")
}, simplify = T)
summ$cons <- temp
colnames(summ) <- c("Sources", "Monitors", "Counties", "Major constituents", "Median", "IQR")
summ$Monitors <- as.integer(summ$Monitors)
summ$Counties <- as.integer(summ$Counties)
# colnames(summ)[-1] <- paste0(colnames(summ)[-1], ".warm")
summ <- summ[match(names1, summ[, 1]), ]
summ.cold <- summ.cold[match(names1, summ.cold[, 1]), ]
print(xtable(summ.cold), include.rownames = F)
print(xtable(summ), include.rownames = F)
summ <- merge(summ, summ.cold, all = T)
########
# Results map
#set map defaults
#set size, bw
cex1 <- 2.2
cex2 <- 1.75
lwd1 <- 1
cols <- c(1, "grey50")
#set size, color
# lwd1 <- 2
# cex1 <- 3
# cex2 <- 2
cols <- brewer.pal(8, "Dark2")
#cols <- c("darkolivegreen3", "orange")
m1 <- map("state", fill = TRUE, col = "grey60",
ylim = c(36, 45), xlim = c(-90, -69),plot = F)
#rev1
cex1 <- 2.1
cex2 <- 1.2
names1 <- c("Metals", "Soil", "Sec. Sulfate", "Fireworks", "Salt", "P/V", "Residual oil", "As/Se/Br", "Traffic")
#pdf(file.path(paperdir, "map_east_sources_warm.pdf"), height = 7, width = 11)
pdf(file.path(paperdir, "map_east_sources_warm-rev1.pdf"), height = 7, width = 11)
par(mfrow = c(3, 3), mar = c(4, 2.5, 1.4, 0), oma = c(3,2,1,0))
reps <- seq(1, 9)
reps2 <- 0
for(j in c(1, 4, 7)) {
reps3 <- rep(reps[j : (j + 2)], 2)
reps2 <- c(reps2, reps3)
}
reps2 <- reps2[-1]
Sources <- sort(unique(unlist(share.warm$share)))
Sources <- Sources[!is.infinite(Sources)]
#layout(matrix(c(reps2, rep(10, 3)), 7, 3, byrow = TRUE))
l <- 1
for(k in names1) {
if(k %in% names.warm) {
i <- which(names.warm == k)
plot(m1$x, m1$y, type = "l", xlim = c(-90, -69),
ylim = c(36, 45), xlab = "", ylab = "",
cex.axis = 2, axes = F, col = "grey80")
keeps <- sapply(share.warm$share, function(x) {
ifelse(i %in% x, 1, 0)
})
wh1 <- which(keeps == 1)
mtext(names.warm[i], cex = 2)
points(monsKEEP[-wh1, c(2, 1)], col = cols[1], pch = "+", cex = cex1)
points(monsKEEP[wh1, c(2, 1)], col = cols[2], pch = 1, cex = cex2)
}else{
plot(1, 1, type = "n", axes = F, xlab = "", ylab = "")
}
if(l == 8) {
par(xpd = T)
legend(-87, 35.5, col = c(cols[2], cols[1]),
legend = c("Source found", "Source not found"),
pch = c(1, 3), cex = 1.5, border = NULL, bty = "n")
par(xpd = F)
}
l <- l + 1
}
graphics.off()
#pdf(file.path(paperdir, "map_east_sources_cold.pdf"), height = 7, width = 11)
pdf(file.path(paperdir, "map_east_sources_cold-rev1.pdf"), height = 7, width = 11)
par(mfrow = c(3, 3), mar = c(4, 2.5, 1.4, 0), oma = c(3,2,1,0))
reps <- seq(1, 9)
reps2 <- 0
for(j in c(1, 4, 7)) {
reps3 <- rep(reps[j : (j + 2)], 2)
reps2 <- c(reps2, reps3)
}
reps2 <- reps2[-1]
Sources <- sort(unique(unlist(share.cold$share)))
Sources <- Sources[!is.infinite(Sources)]
#layout(matrix(c(reps2, rep(10, 3)), 7, 3, byrow = TRUE))
l <- 1
names.cold[7] <- "P/V"
for(k in names1) {
if(k %in% names.cold) {
i <- which(names.cold == k)
plot(m1$x, m1$y, type = "l", xlim = c(-90, -69),
ylim = c(36, 45), xlab = "", ylab = "",
cex.axis = 2, axes = F, col = "grey80")
keeps <- sapply(share.cold$share, function(x) {
ifelse(i %in% x, 1, 0)
})
wh1 <- which(keeps == 1)
mtext(names.cold[i], cex = 2)
points(monsKEEP[-wh1, c(2, 1)], col = cols[1], pch = "+", cex = cex1)
points(monsKEEP[wh1, c(2, 1)], col = cols[2], pch = 1, cex = cex2)
}else{
plot(1, 1, type = "n", axes = F, xlab = "", ylab = "")
}
if(l == 8) {
par(xpd = T)
legend(-87, 35.5, col = c(cols[2], cols[1]),
legend = c("Source found", "Source not found"),
pch = c(1, 3), cex = 1.5, border = NULL, bty = "n")
par(xpd = F)
}
l <- l + 1
}
graphics.off()
|
591f8d54a53a3b6cd18263d63041ea61ca9337b5
|
33b8c52fc3fffcf88865e888ac8d3935afc80f0a
|
/datamass/Deszcz/rain_model_identification.R
|
d0e37aa610154ce0bcc3956dab7d025db8d94e70
|
[
"Apache-2.0"
] |
permissive
|
carldata/argo
|
f72082016471ad6d3df077d3625d44c9927bdb84
|
dede10a172f6eacf10d49091321f5e4f00b997c0
|
refs/heads/master
| 2022-05-14T11:09:03.182686
| 2022-04-25T11:33:48
| 2022-04-25T11:33:48
| 116,822,122
| 1
| 0
|
Apache-2.0
| 2018-02-15T09:11:36
| 2018-01-09T13:51:48
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 2,928
|
r
|
rain_model_identification.R
|
library("datetime")
new_probe_period<-30
load('rain_day.RData')
load('input_data.RData')
lambda <- 0.9
output_vector_length <- 2
input_vector_length <- 2
data_length <- output_vector_length+input_vector_length
### Clean model ###
params <- rep.int(0.1,data_length)
P <- diag(data_length)
### DATA ###
response_data <- rain_day;
output_vector <-rep.int(0,output_vector_length);
# -rev(response_data$flow[1:output_vector_length])
input_vector <- rev(response_data$rainfall[1:input_vector_length])
###
resp_length <- length(response_data[,1])
response_data[7] <- rep.int(0,resp_length);
response_data[8] <- rep.int(0,resp_length);
colnames(response_data)[7] <-'predicted_response'
colnames(response_data)[8] <-'prediction_error'
### RAINFALL RECALC FOR NEW PROBING PERIOD ###
for(i in 1:resp_length){
v<-response_data[i,]
filtered<-input_data[(input_data$timestamp >= v$timestamp - new_probe_period/2)
& (input_data$timestamp < v$timestamp + new_probe_period/2),]
response_data[i,]$rainfall <- sum(filtered$rainfall)
}
###
params<-c(-0.757,-0.163, 2.81,12) #2+2 1.168291 ### 30 min big rain
### PREDICTION ###
for(i in 1:resp_length){
v<-response_data[i,]
input_vector <- c(v$rainfall, head((input_vector),-1));
regression_vector <- c(output_vector,input_vector)
predicted_response <- drop(regression_vector %*% params)
prediction_error <- drop(v$flow - predicted_response)
response_data$predicted_response[i] <- predicted_response
response_data$prediction_error[i] <- prediction_error
#P
numerator <- (P %*% regression_vector %*% t(regression_vector) %*% P)
denominator<- drop(1/(lambda + t(regression_vector) %*% P %*% regression_vector))
P <- (1/lambda) * (P-numerator * denominator)
#L
L <- P %*% regression_vector
#params
# params <- params + L*prediction_error
output_vector <- c(-predicted_response,head((output_vector),-1));
}
###
### PLOT ###
plot_data <- response_data[,names(response_data) %in% c("timestamp","flow","rainfall","predicted_response","prediction_error")]
plot(plot_data$timestamp,plot_data$flow,type="l",xaxt='n',ylim=c(-5,350),xlab="", ylab="", col="blue")
lines(plot_data$timestamp,plot_data$predicted_response,type="l",xlab="", ylab="",col="orange")
# axis(2,at=plot_data$flow, las=0)
axis(1, at=plot_data$timestamp,labels=format.datetime(plot_data$timestamp,"%Y-%m-%d %H:%M"), las=0)
lines(plot_data$timestamp,plot_data$prediction_error,type="l",xaxt='n',xlab="", ylab="",col="red")
axis(4,at=plot_data$prediction_error, las=0)
par(new=TRUE)
plot(plot_data$timestamp,plot_data$rainfall,type="l",axes=FALSE,xlab="", ylab="",col="gray")
###
a_score<-mean(abs(response_data$prediction_error))
# print(a_score)
rmse_score<-RMSE(response_data$predicted_response,response_data$flow)
print(rmse_score)
# save(list=c('params','output_vector_length','input_vector_length','lambda'),file='rain_params.RData')
|
ba7a805a1ec115a65ac2fc3246e53815b7eb8e8a
|
14bc409b2f0a66e56d9b6d8a0774e187e48063d7
|
/plot1.R
|
6e18eaafbabdab9b2c4b90d42f4d82dddfd85b02
|
[] |
no_license
|
yumemi-hkamijo/exploratory_data_analysis_course_project2
|
6f8d5dbe6a22d826c461a30bed67ac781a266777
|
c91624a5a16d4bf30448ca3c240c0e3b7f488456
|
refs/heads/master
| 2021-03-02T01:05:09.576569
| 2020-03-09T14:22:55
| 2020-03-09T14:22:55
| 245,825,731
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 567
|
r
|
plot1.R
|
library('data.table')
NEI <- readRDS("data/exdata_data_NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("data/exdata_data_NEI_data/Source_Classification_Code.rds")
# get total emissions of the years 1999, 2002, 2005, and 2008.
aggregated_total_by_year <- aggregate(Emissions ~ year, NEI, sum)
png('plot1.png')
barplot(height=aggregated_total_by_year$Emissions,
names.arg=aggregated_total_by_year$year,
xlab="years",
ylab=expression('total PM'[2.5]*' emission'),
main=expression('Total PM'[2.5]*' emissions at various years'))
dev.off()
|
3031e43dbfae43f9e07695f31c52c0cf75ed55de
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/synlik/R/I_cleanHessian.R
|
a57c54b71fbfa95e8dacebd73e1350dcd357c0dc
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,320
|
r
|
I_cleanHessian.R
|
# Cleans the hessian used to get confidence intervals for the parameters
# Until the hessian is not positive definite, we take the smallest eigenvalues
# and increase them to a tolerance. We then invert this modified hessian and
# we remove the parameter with the highest variance from the hessian.
# We returned the reduced hessian and the indexes of the parameters that have
# been eliminated.
.cleanHessian <- function(hessian, tol = 1e-8)
{
badParam <- numeric()
repeat{
eDec <- eigen(hessian)
# Find small eigenvalues
lowEigen <- which(eDec$values < eDec$values[1] * tol)
# If there are none, I consider the hessian to be PD and I exit
if( length(lowEigen) == 0 ) break
# Increase the small eigenvalues to a threshold
eDec$values[lowEigen] <- eDec$values[1] * tol
# Invert the modified hessian to get the covariance
COV <- eDec$vectors%*%(t(eDec$vectors) / eDec$values)
# I identify the parameter with the highest variance, I remove the corresponding
# elements from the Hessian and I store its index
bad <- which.max( diag(COV) )
hessian <- hessian[-bad, -bad]
offset <- sum( badParam <= bad )
badParam <- c(badParam, bad + offset)
}
return( list("hessian" = hessian, "badParam" = badParam) )
}
|
867219471918a052fe39351c876c78dbd7d69c87
|
ab2ca92926a046e6cb5ef6677f0aa99d0a18c037
|
/Plot_04.R
|
82bfa2a8347c95082245d6cb7a7f563efc294e33
|
[] |
no_license
|
AlexanderSobolevV/ExData_Plotting1
|
ce645655af1d3be436b5f3cfd67a8e5f0076a178
|
d17929bf913279d178c9949ede760e47860cfcc1
|
refs/heads/master
| 2021-08-27T21:31:56.812305
| 2017-12-10T12:11:11
| 2017-12-10T12:11:11
| 112,948,295
| 0
| 0
| null | 2017-12-03T17:28:47
| 2017-12-03T17:28:47
| null |
UTF-8
|
R
| false
| false
| 1,331
|
r
|
Plot_04.R
|
#optional area'
#setwd("/Users/i312190/Desktop/Data Science/UNIT4/WEEK1")
#optional area'
raw_data <- read.csv("household_power_consumption.txt", header = TRUE, sep = ';', na.strings = "?")
raw_data$Time <- paste(raw_data$Date, raw_data$Time)
raw_data$Date <- as.Date(raw_data$Date, format = "%d/%m/%Y")
new_data <- raw_data[raw_data$Date >= "2007-02-01" & raw_data$Date <= "2007-02-02" , ]
new_data$Time <- as.POSIXct(new_data$Time, format = "%d/%m/%Y %H:%M:%S")
layout(matrix(c(1, 2, 3, 4), nrow=2, byrow=TRUE))
plot(new_data$Time, new_data$Global_active_power, type = "l", ylab = "Global active power", xlab = "")
plot(new_data$Time, new_data$Voltage, type = "l", ylab = "Voltage", xlab = "datetime")
plot(new_data$Time, new_data$Sub_metering_1, type = "l", xlab = "", col = "black", ylab = "Energy sub metering")
lines(new_data$Time, new_data$Sub_metering_2, type = "l", xlab = "", col = "red")
lines(new_data$Time, new_data$Sub_metering_3, type = "l", xlab = "", col = "blue")
legend(legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), x = "topright", y = "top", lty=1:1, cex = 0.8, bty = "n" )
plot(new_data$Time, new_data$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.copy(png,'Plot4.png', width=480, height=480, res = 60)
dev.off()
|
a5d00b2e45b4d774bd5a87e742dc8accc80ea763
|
76db567fe36a2d907cbf083bfe9a46b907aa3922
|
/man/validate_metadata.Rd
|
3659ef906dfce5ee2b0ebb2e3b4f4de4969e5b8d
|
[] |
no_license
|
cran/sfarrow
|
2aa1d0aeac4bfc2cb3c38ee3774ef038bfc49fc4
|
12d732edbebeee9f3072a762c8bcebf2a75fb9b6
|
refs/heads/master
| 2023-09-04T06:31:38.973965
| 2021-10-27T15:30:02
| 2021-10-27T15:30:02
| 379,600,748
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 412
|
rd
|
validate_metadata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/st_arrow.R
\name{validate_metadata}
\alias{validate_metadata}
\title{Basic checking of key geo metadata columns}
\usage{
validate_metadata(metadata)
}
\arguments{
\item{metadata}{list for geo metadata}
}
\value{
None. Throws an error and stops execution
}
\description{
Basic checking of key geo metadata columns
}
\keyword{internal}
|
c0ea236cd073cf2145c19375098819528dddecf9
|
dded25b3cf087b2228c35a79d6bf21834455b932
|
/Visualization_Wrangling_Reporting/Data Visualization/ggplot2_geometrics.R
|
72c6fab0fe287723c4eb9856ba7dda7c26a834f6
|
[] |
no_license
|
PakistanAI/Data_Analytics_Certification
|
927c6839c6f250ddab32093b5139a61fce9aa79b
|
1a7ca14eb5d098ea13e05a3b877183c013cbc939
|
refs/heads/master
| 2020-04-18T22:02:18.649103
| 2019-04-11T15:38:48
| 2019-04-11T15:38:48
| 167,782,524
| 7
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,175
|
r
|
ggplot2_geometrics.R
|
# Shown in the viewer:
ggplot(mtcars, aes(x = cyl, y = wt)) +
geom_point()
# Solutions:
# 1 - With geom_jitter()
ggplot(mtcars, aes(x = cyl, y = wt)) +
geom_jitter()
# 2 - Set width in geom_jitter()
ggplot(mtcars, aes(x = cyl, y = wt)) +
geom_jitter(width=0.1)
# 3 - Set position = position_jitter() in geom_point() ()
ggplot(mtcars, aes(x = cyl, y = wt)) +
geom_jitter(position = position_jitter(0.1))
# Jittering is useful when you have a discrete position, and a relatively
#' # small number of points
#' # take up as much space as a boxplot or a bar
ggplot(mtcars, aes(x = cyl, y = wt)) + geom_boxplot(color="grey50") +
geom_jitter(position = position_jitter(0.1))
#========Histograms=========
# Defaults to 30 bins
# many ways to find binwidth, use diff and range
# 1 - Make a univariate histogram
ggplot(mtcars, aes(x = mpg)) +
geom_histogram()
# 2 - Plot 1, plus set binwidth to 1 in the geom layer
ggplot(mtcars, aes(x = mpg)) +
geom_histogram(binwidth=1)
# 3 - Plot 2, plus MAP ..density.. to the y aesthetic (i.e. in a second aes() function)
ggplot(mtcars, aes(x = mpg)) +
geom_histogram(binwidth=1, aes(y=..density..))
# 4 - plot 3, plus SET the fill attribute to "#377EB8"
ggplot(mtcars, aes(x = mpg)) +
geom_histogram(binwidth=1, aes(y=..density..), fill="#377EB8")
#==========Bar plots============
mtcars$cyl <- factor(mtcars$cyl)
mtcars$am <- factor(mtcars$am)
# Draw a bar plot of cyl, filled according to am
ggplot(mtcars, aes(x = cyl, fill = am))+
geom_bar()
# Change the position argument to stack
ggplot(mtcars, aes(x = cyl, fill = am)) +
geom_bar(position="stack")
# Change the position argument to fill
ggplot(mtcars, aes(x = cyl, fill = am)) +
geom_bar(position="fill")
# Change the position argument to dodge
ggplot(mtcars, aes(x = cyl, fill = am)) +
geom_bar(position="dodge")
# Define posn_d with position_dodge()
posn_d <- position_dodge(width=0.2)
# Change the position argument to posn_d
ggplot(mtcars, aes(x = cyl, fill = am)) +
geom_bar(position = posn_d)
# Use posn_d as position and adjust alpha to 0.6
ggplot(mtcars, aes(x = cyl, fill = am)) +
geom_bar(position = posn_d, alpha = 0.6)
|
0df44517eec019e7cd4b34856b1ee52e35f22fd9
|
02c27fc07ee76bf11d21c4cf59ae5d3b94194a3e
|
/man/Normal_ID.Rd
|
a15f8b3dcee2fc42c318e1fd8fa3dc6e4c0bf6c6
|
[] |
no_license
|
rnorouzian/BayesianforL2
|
cd581e5d5bba2de6d12411aa26214dab85490deb
|
e97f4d7fccf3c1b9ee1619dec496584b0d22f505
|
refs/heads/master
| 2021-01-20T16:09:22.503864
| 2017-12-02T06:49:24
| 2017-12-02T06:49:24
| 90,819,012
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,471
|
rd
|
Normal_ID.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Normal_ID.R
\name{Normal_ID}
\alias{Normal_ID}
\title{Normal Prior Distribution Identifier}
\usage{
Normal_ID(Low, High, Cover = NULL)
}
\arguments{
\item{Low}{researchers LOWEST value for the parameter.}
\item{High}{researchers HIGHEST value for the parameter.}
\item{Cover}{coverage for the Low and High values provided.}
}
\value{
Provides graphical as well as full textual description of a suitable Normal
distribution for researcers based on their knowledge about how High or Low
the parameter has been found in the literature. Also, helps researcher
to revise their prior by issuing various messages.
}
\description{
Uses the subject matter researcher's knowledge to generate
a corresponding Normal prior distribution.
}
\details{
solves to provide graphical and textual information about
an appropriate Normal prior distribution.
}
\examples{
# Suppose a researcher needs a Normal prior for a Cohen d effect size that in
# his/her view can't be less than -3 and more than +3. The researcher believes
# these two limit values cover 95\% of all possible values that this parameter
# can take:
Normal_ID (Low = -3, High = 3, Cover = '95\%')
# User can also use any value that is between 0 and 1 for the argument
# cover without using percentage sign:
Normal_ID (Low = -3, High = 3, Cover = 95)
}
\author{
Reza Norouzian <rnorouzian@gmail.com>
}
|
1d5e25b1bbf21a25c4e449eebfa7fb3f98889204
|
57fc9c6e10a2797b6aa852a5eb345df182a108eb
|
/tests/testthat/test-utils.R
|
34039a5c80b4e4f1fbd2edc07a1aaca608ce3472
|
[] |
no_license
|
aammd/remake
|
d36a1181ca4d0a6cca2fcce6a6231d28cbee72c7
|
9becf862b2270f146e23d3b5f54373dab67cd333
|
refs/heads/master
| 2021-01-15T21:24:21.421773
| 2015-09-02T06:18:30
| 2015-09-02T06:18:30
| 31,632,581
| 2
| 0
| null | 2015-03-04T02:01:50
| 2015-03-04T02:01:50
|
R
|
UTF-8
|
R
| false
| false
| 4,315
|
r
|
test-utils.R
|
context("Utilities")
test_that("insert_at", {
y <- "value"
expect_that(insert_at(list(), y, 1), equals(list(y)))
expect_that(insert_at(character(0), y, 1), equals(y))
expect_that(insert_at(list(), y, 0), throws_error("Invalid position"))
expect_that(insert_at(list(), y, 2), throws_error("Invalid position"))
expect_that(insert_at(list(), y, 1.1), throws_error("must be integer"))
x <- list(a=1, b=2)
expect_that(insert_at(x, y, 0), throws_error("Invalid position"))
expect_that(insert_at(x, y, 1), equals(c(list("value"), x)))
expect_that(insert_at(x, y, 2), equals(list(a=1, "value", b=2)))
expect_that(insert_at(x, y, 3), equals(list(a=1, b=2, "value")))
expect_that(insert_at(x, y, 4), throws_error("Invalid position"))
x <- c(a="a", b="b")
expect_that(insert_at(x, y, 0), throws_error("Invalid position"))
expect_that(insert_at(x, y, 1), equals(c("value", x)))
expect_that(insert_at(x, y, 2), equals(c(a="a", "value", b="b")))
expect_that(insert_at(x, y, 3), equals(c(a="a", b="b", "value")))
expect_that(insert_at(x, y, 4), throws_error("Invalid position"))
})
test_that("zip_dir", {
dir.create("test")
file.copy(c("code.R", "remake.yml"), "test")
dest <- zip_dir("test")
expect_that(dest, equals("test.zip"))
expect_that(file.exists("test.zip"), is_true())
contents <- unzip("test.zip", list=TRUE)
expected <- c("test/code.R", "test/remake.yml")
expect_that(all(expected %in% contents$Name), is_true())
file_remove("test.zip")
## Then, out of place:
path <- file.path(tempdir(), "test")
dir.create(path)
file.copy(c("code.R", "remake.yml"), path)
dest <- zip_dir(path)
expect_that(dest, equals("test.zip"))
expect_that(all(expected %in% contents$Name), is_true())
file_remove("test.zip")
})
test_that("git_exists", {
## Definitely not in a temp directory:
owd <- setwd(tempdir())
on.exit(setwd(owd))
expect_that(git_exists(), not(throws_error()))
expect_that(git_exists(), not(gives_warning()))
expect_that(git_exists(), is_false())
expect_that(git_ignores(character(0)), equals(logical(0)))
expect_that(git_ignores("foo"), equals(FALSE))
})
test_that("file_exists", {
expect_that(file.exists("test-target.R"), is_true())
## This will be true on Mac and Windows; Linux and unixes will not
## see the file.
expect_that(file.exists("test-target.r"), equals(is_case_insensitive()))
expect_that(file_exists("test-target.r"), is_false())
files <- dir(".", recursive=TRUE)
## There's a nicer way of doing this with sub I think.
len <- nchar(files)
files_lower <- paste0(substr(files, 1, len - 1),
tolower(substr(files, len, len)))
files_upper <- paste0(substr(files, 1, len - 1),
toupper(substr(files, len, len)))
## Case munging sees them all:
expect_that(file.exists(files_lower),
equals(is_case_insensitive() | files == files_lower))
expect_that(file.exists(files_upper),
equals(is_case_insensitive() | files == files_upper))
expect_that(file_exists(files_lower),
equals(files == files_lower))
expect_that(file_exists(files_upper),
equals(files == files_upper))
## Mix in some nonexistant things:
fake <- c("test/fake.r", "fakedir/fake.r", "fake.r")
o <- sample(length(files) + length(fake))
files2 <- c(files, fake)[o]
files2_lower <- c(files_lower, fake)[o]
files2_upper <- c(files_upper, fake)[o]
exists <- !(files2 %in% fake)
expect_that(file.exists(files2), equals(exists))
expect_that(file_exists(files2_lower),
equals(exists & files2 == files2_lower))
expect_that(file_exists(files2_upper),
equals(exists & files2 == files2_upper))
expect_that(file_real_case(files),
equals(files))
if (is_case_insensitive()) {
expect_that(file_real_case(files_upper),
equals(files))
expect_that(file_real_case(files_lower),
equals(files))
}
files2_real <- ifelse(exists, files2, NA_character_)
expect_that(file_real_case(files2),
equals(files2_real))
if (is_case_insensitive()) {
expect_that(file_real_case(files2_upper),
equals(files2_real))
expect_that(file_real_case(files2_lower),
equals(files2_real))
}
})
|
ceb645ea910e36cc0699dd3ef9059121a6201766
|
7747a3fdf0fdc57b767d8ed199b323afb4d491a2
|
/R/sim_transmit.r
|
f7caa107dd90c2f61a96a5fe1a91256cdf2e25d3
|
[] |
no_license
|
ianjonsen/simsmolt
|
dcafaad041d6caa29cd573cd543dbeab7e14868a
|
09c9a8b8132bedaa499dd71c5c2fc6e2439256eb
|
refs/heads/master
| 2022-07-28T10:03:06.683400
| 2022-07-07T14:13:08
| 2022-07-07T14:13:08
| 155,731,825
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,775
|
r
|
sim_transmit.r
|
#' @title Simulate telemetry transmitter signals along a path
#'
#' @description
#' `Modified from C. Holbrook by IDJ` Simulate tag signal transmission along a pre-defined path (x, y coords)
#' based on constant movement velocity, transmitter delay range, and duration
#' of signal.
#'
#' @param path A two-column data frame with at least two rows and columns
#' \code{x} and \code{y} with coordinates that define path.
#' @param vel A numeric scalar with movement velocity along track; assumed
#' constant.
#' @param delayRng A 2-element numeric vector with minimum and maximum delay
#' (time in seconds from end of one coded burst to beginning of next).
#' @param burstDur A numeric scalar with duration (in seconds) of each coded
#' burst (i.e., pulse train).
#'
#' @details
#' Delays are drawn from uniform distribution defined by delay range.
#' First, elapsed time in seconds at each node in path is calculated based on
#' path length and velocity. Next, delays are simulated and burst durations
#' are added toeach delay to determine the time of each signal transmission.
#' Location of each signal transmission along the path is linearly interpolated.
#'
#' @return A two-column data frame containing:
#' \item{x}{ x coordinates for start of each transmission }
#' \item{y}{ y coordinates for start of each transmission }
#' \item{et}{ elapsed time to start of each transmission }
#'
#' @note
#' This function was written to be called before
#' \code{\link{detect_transmissions}}, which was designed to accept the result
#' as input (\code{trnsLoc}).
#'
#' @author C. Holbrook \email{cholbrook@usgs.gov}
#'
#' @examples
#' mypath <- data.frame(x=seq(0,1000,100),y=seq(0,1000,100))
#' mytrns <- transmit_along_path(mypath,vel=0.5,delayRng=c(60,180),burstDur=5.0)
#' plot(mypath,type="b")
#' points(mytrns,pch=20,col="red")
#'
#' @importFrom stats runif approx
#' @export
sim_transmit <- function(path = NA, delayRng = c(60, 180), burstDur = 5.0) {
#cumulative distance travelled in meters
path$cumdistm <- c(0, cumsum(sqrt(diff(path$x)^2 + diff(path$y)^2)))
path$etime <- c(0, cumsum(rep(3600, nrow(path) - 1))) #elapsed time in s
ntrns <- max(path$etime) / (delayRng[1] + burstDur)
ints <- runif(ntrns, delayRng[1] + burstDur, delayRng[2] + burstDur)
ints[1] <- runif(1, 0, ints[1]) #draw random the start time
etime <- cumsum(ints) #elapsed time
etime <- etime[etime <= max(path$etime)] #subset trans during track duration
#interpolate transmit locations along track
trns <- data.frame(id = rep(path$id[1], length(etime)),
date = path$date[1] + etime,
x = approx(path$etime, path$x, xout=etime)$y,
y = approx(path$etime, path$y, xout=etime)$y
)
return(trns)
}
|
6f075bdd3844f100338d6e557406034ba425117a
|
07744eecf50ea11922ff44de44338e4d74604ae8
|
/cross_validation.R
|
33060d47a1337192c77faadab21488304caeac9c
|
[] |
no_license
|
mateuscgc/enem
|
21291413a6d2448ccdbe171d9b74e08adc3e26e3
|
e1ae7ecaa54d5fd1fa63cff9e3149fe00131f22e
|
refs/heads/master
| 2021-05-01T01:07:42.613914
| 2016-10-25T15:48:47
| 2016-11-28T13:10:35
| 68,045,503
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,951
|
r
|
cross_validation.R
|
# Biblioteca com implementação do Naive Bayes
library(e1071)
same_class <- function(a, b) {
return ((a == 'P' && b == 'P') || (a != 'P' && b != 'P'))
}
dados <- read.table("./processed/processed.csv", sep=",", header=TRUE)
folds <- 1:10
dados$fold <- sample(folds, nrow(dados), replace = TRUE)
foldColNum <- grep("fold", names(dados))
erros <- c()
for(i in folds) {
test_set <- dados[dados$fold==i, -foldColNum]
training_set <- dados[dados$fold %in% folds[-i], -foldColNum]
model <- naiveBayes(IN_STATUS_REDACAO ~ ., data = training_set)
pred <- predict(model, test_set)
erros[i] = 0;
cols <- c()
rows <- c()
corrc <- c()
for(r in 1:length(pred)) {
if(pred[r] != test_set[r, "IN_STATUS_REDACAO"])
erros[i] = erros[i] + 1
# numeric_column(pred)
if(test_set[r, "IN_STATUS_REDACAO"] == 'P')
cols[r] = 'Ñ deu merda'
else
cols[r] = 'Deu merda'
if(same_class(pred[r], test_set[r, "IN_STATUS_REDACAO"])) {
if(pred[r] == test_set[r, "IN_STATUS_REDACAO"]) {
rows[r] = 'classificou corretamente'
corrc[r] = 'Predição correta'
} else {
rows[r] = 'classificou corretamente a classe'
corrc[r] = 'Predição incorreta'
}
} else {
rows[r] = 'classificou incorretamente'
corrc[r] = 'Predição incorreta'
}
}
# corrc <- merge(corrc, test_set[, "IN_STATUS_REDACAO"])
stacks <- table(corrc, test_set[, "IN_STATUS_REDACAO"])
# ColNums <- grep("P", names(stacks))
print(stacks)
barplot(stacks, main="Distribuição de predições por status e validade",
xlab="Status da redação", col=c("white","grey"),
legend = rownames(stacks))
print(length(cols))
print(length(rows))
print(table(rows, cols))
print(table(pred, test_set[, "IN_STATUS_REDACAO"]))
# erros[i] = mean(pred, test_set[, "IN_STATUS_REDACAO"])
erros[i] = erros[i] / length(pred)
}
print(erros)
print(mean(erros))
|
f4ef08e4986899f62782d00aa6923cc6f84d113e
|
38c9bdbe080e4ce9eac69231b6a7411bc99ae3a5
|
/server.R
|
35a5e42d192a380fd63ac26458749bf956d29dfb
|
[
"MIT"
] |
permissive
|
ctlab/shinygam
|
79c2a85971fc36abaa9ca960d67f412863c6fa21
|
a9e26e376d692ee3de2d97001a748335a1bd9e3d
|
refs/heads/master
| 2022-08-17T04:42:52.179887
| 2022-08-11T09:06:15
| 2022-08-11T09:06:15
| 29,388,290
| 10
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,986
|
r
|
server.R
|
library(shiny)
library(data.table)
library(igraph)
library(GAM)
library(GAM.db)
library(GAM.networks)
library(RCurl)
library(parallel)
#library(xlsx)
library(pryr)
library(logging)
addHandler(writeToFile, file="/dev/stderr", level='DEBUG')
"%o%" <- pryr::compose
options(shiny.error=traceback)
#options(shiny.trace=TRUE)
options(shiny.fullstacktrace=TRUE)
source("./functions.R")
#data("met.id.map")
#data("kegg.human.network")
#data("kegg.mouse.network")
#data("kegg.arabidopsis.network")
#data("kegg.yeast.network")
uploaderPath <- "/mnt/data/upload"
networks <- list(
"mmu"="kegg.mouse.network",
"hsa"="kegg.human.network",
"ath"="kegg.arabidopsis.network",
"sce"="kegg.yeast.network"
)
heinz2 <- "/usr/local/lib/heinz2/heinz21"
h.solver <- heinz.solver("/usr/local/lib/heinz/heinz.py", timeLimit=4*60)
attr(h.solver, "description") <- "Heinz (time limit = 4m)"
h2.solver <- heinz21.solver(heinz2, timeLimit=30, nthreads=detectCores())
attr(h2.solver, "description") <- "Heinz2 (time limit = 30s)"
g.solver <- gmwcs.solver("gmwcs", timeLimit=30, nthreads=detectCores())
attr(g.solver, "description") <- "gmwcs (time limit = 30s)"
example.gene.de.path <- "http://artyomovlab.wustl.edu/publications/supp_materials/GAM/Ctrl.vs.MandLPSandIFNg.gene.de.tsv"
example.met.de.path <- "http://artyomovlab.wustl.edu/publications/supp_materials/GAM/Ctrl.vs.MandLPSandIFNg.met.de.tsv"
# not sure if works
example.gene.de <- force(as.data.table(read.table(text=getURL(example.gene.de.path), stringsAsFactors=FALSE, header=1)))
attr(example.gene.de, "name") <- basename(example.gene.de.path)
example.met.de <- force(as.data.table(read.table(text=getURL(example.met.de.path), stringsAsFactors=FALSE, header=1)))
attr(example.met.de, "name") <- basename(example.met.de.path)
shinyServer(function(input, output, session) {
longProcessStart <- function() {
session$sendCustomMessage(type='showWaitMessage', list(value=T))
}
longProcessStop <- function() {
session$sendCustomMessage(type='showWaitMessage', list(value=F))
}
output$initialized <- renderJs('$("#initializing").hide()')
queryValues <- reactiveValues()
observe({
query <- parseQueryString(session$clientData$url_search)
if ("organism" %in% names(query)) {
updateSelectInput(session, "network", selected=query$organism)
#values$queryOrganism <- query$organism
}
if ("geneDE_key" %in% names(query)) {
geneDE_key <- gsub("[^a-z0-9]", "", query$geneDE_key)
loginfo("found key: %s", geneDE_key)
if (geneDE_key == "") {
geneDE_key <- NULL
}
queryValues$geneDE_key <- geneDE_key
}
})
loadExample <- reactive({
input$loadExampleGeneDE || input$loadExampleMetDE
})
getNetwork <- reactive({
net <- if (loadExample()) {
"kegg.mouse.network"
} else {
networks[[input$network]]
}
GAM:::lazyData(net)
res <- get(net)
# :ToDo: it's a hack
res$rxn2name$name <- ""
res
})
geneDEInput <- reactive({
if (loadExample()) {
if (input$loadExampleGeneDE) {
return(example.gene.de)
}
return(NULL)
}
if (!is.null(input$geneDE) && !is(input$geneDE, "data.frame")) {
# Value was reset
return(NULL)
}
if (!is.null(input$geneDE)) {
loginfo("GeneDE file:")
loginfo(capture.output(str(input$geneDE)))
loginfo("reading gene.de: %s", input$geneDE$name)
loginfo(" from file: %s", input$geneDE$datapath)
path <- input$geneDE$datapath
deName <- input$geneDE$name
} else if (!is.null(queryValues$geneDE_key)) {
# User has not uploaded a file yet but provided value by key
path <- file.path(uploaderPath, queryValues$geneDE_key)
loginfo("GeneDE from key, path: %s", path)
if (!file.exists(path)) {
return(NULL)
}
deName <- queryValues$geneDE_key
} else {
# User has not uploaded a file yet and we don't have a key
return(NULL)
}
network <- getNetwork()
gene.id.map <- network$gene.id.map
res <- read.table.smart.de.gene(path, idsList=gene.id.map)
logdebug(capture.output(str(res)))
if (!all(necessary.de.fields %in% names(res))) {
loginfo("not all fields in DE file: %s", input$geneDE$datapath)
if (grepl("xlsx?$", input$geneDE$name)) {
stop("We do not support excel files yet, please, use tab-separated files instead")
} else{
stop(paste0("Genomic differential expression data should contain at least these fields: ",
paste(necessary.de.fields, collapse=", ")))
}
}
attr(res, "name") <- deName
res
})
geneIdsType <- reactive({
data <- geneDEInput()
if (is.null(data)) {
return(NULL)
}
network <- getNetwork()
gene.id.map <- network$gene.id.map
res <- getIdType(data$ID, gene.id.map)
if (length(res) != 1) {
stop("Can't determine type of IDs for genes")
}
res
})
output$geneDESummary <- renderUI({
gene.de <- geneDEInput()
ids.type <- geneIdsType()
if (is.null(gene.de)) {
return("There is no genomic data")
}
div(
HTML(
vector2html(c(
"name" = attr(gene.de, "name"),
"length" = nrow(gene.de),
"ID type" = ids.type
))),
p("Top DE genes:"))
})
output$geneDETable <- renderTable({
data <- geneDEInput()
if (is.null(data)) {
return(NULL)
}
format(as.data.frame(head(data[order(pval)])), digits=3)
})
notMappedGenes <- reactive({
network <- getNetwork()
gene.id.map <- network$gene.id.map
geneIT <- geneIdsType()
if (is.null(geneIT)) {
return(NULL)
}
if (geneIT == network$gene.ids) {
return(NULL)
}
notMapped <- setdiff(geneDEInput()$ID, gene.id.map[[geneIT]])
notMapped
})
output$geneDENotMapped <- renderUI({
data <- geneDEInput()
if (is.null(data)) {
return(NULL)
}
notMapped <- notMappedGenes()
network <- getNetwork()
div(
p(sprintf("Not mapped to %s: %s", network$gene.ids, length(notMapped))),
if (length(notMapped) > 0) {
p("Top unmapped genes:",
a("show",
id="geneDENotMappedTableShowBtn",
href="javascript:void()",
onclick='$("#geneDENotMappedTable").show();$("#geneDENotMappedTableShowBtn").hide();$("#geneDENotMappedTableHideBtn").show()'),
a("hide",
id="geneDENotMappedTableHideBtn",
href="javascript:void()",
onclick='$("#geneDENotMappedTable").hide();$("#geneDENotMappedTableShowBtn").show();$("#geneDENotMappedTableHideBtn").hide()',
style="display:none"),
tag("script", '$("#geneDENotMappedTable").hide()')
)
} else NULL)
})
output$geneDENotMappedTable <- renderTable({
data <- geneDEInput()
if (is.null(data)) {
return(NULL)
}
notMapped <- notMappedGenes()
if (length(notMapped) == 0) {
return(NULL)
}
data <- data[order(pval)]
data <- data[ID %in% notMapped]
format(as.data.frame(head(data, n=20)), digits=3)
})
metDEInput <- reactive({
if (loadExample()) {
if (input$loadExampleMetDE) {
return(example.met.de)
}
return(NULL)
}
if (is.null(input$metDE)) {
# User has not uploaded a file yet
return(NULL)
}
if (!is(input$metDE, "data.frame")) {
# Value was reset
return(NULL)
}
loginfo("MetDE file:")
loginfo(capture.output(str(input$metDE)))
loginfo("reading met.de: %s", input$metDE$name)
loginfo(" from file: %s", input$metDE$datapath)
res <- read.table.smart.de.met(input$metDE$datapath)
logdebug(capture.output(str(res)))
if (!all(necessary.de.fields %in% names(res))) {
loginfo("not all fields in DE file: %s", input$metDE$datapath)
if (grepl("xlsx?$", input$metDE$name)) {
stop("We do not support excel files yet, please, use tab-separated files instead")
} else {
stop(paste0("Metabolic differential expression data should contain at least these fields: ",
paste(necessary.de.fields, collapse=", ")))
}
}
attr(res, "name") <- input$metDE$name
res
})
metIdsType <- reactive({
data <- metDEInput()
if (is.null(data)) {
return(NULL)
}
GAM:::lazyData("met.id.map")
res <- getIdType(data$ID, met.id.map)
if (length(res) != 1) {
stop("Can't determine type of IDs for metabolites")
}
res
})
output$metDESummary <- renderUI({
met.de <- metDEInput()
ids.type <- metIdsType()
if (is.null(met.de)) {
return("There is no metabolic data")
}
div(
HTML(
vector2html(c(
"name" = attr(met.de, "name"),
"length" = nrow(met.de),
"ID type" = ids.type
))),
p("Top DE metabolites:"))
})
output$metDETable <- renderTable({
data <- metDEInput()
if (is.null(data)) {
return(NULL)
}
format(as.data.frame(head(data[order(pval)])), digits=3)
})
notMappedMets <- reactive({
network <- getNetwork()
metIT <- metIdsType()
if (is.null(metIT)) {
return(NULL)
}
if (metIT == network$met.ids) {
return(NULL)
}
GAM:::lazyData("met.id.map")
notMapped <- setdiff(metDEInput()$ID, met.id.map[[metIT]])
})
output$metDENotMapped <- renderUI({
data <- metDEInput()
if (is.null(data)) {
return(NULL)
}
notMapped <- notMappedMets()
network <- getNetwork()
div(
p(sprintf("Not mapped to %s: %s", network$met.ids, length(notMapped))),
if (length(notMapped) > 0) {
p("Top unmapped metabolites:",
a("show",
id="metDENotMappedTableShowBtn",
href="javascript:void()",
onclick='$("#metDENotMappedTable").show();$("#metDENotMappedTableShowBtn").hide();$("#metDENotMappedTableHideBtn").show()'),
a("hide",
id="metDENotMappedTableHideBtn",
href="javascript:void()",
onclick='$("#metDENotMappedTable").hide();$("#metDENotMappedTableShowBtn").show();$("#metDENotMappedTableHideBtn").hide()',
style="display:none"),
tag("script", '$("#metDENotMappedTable").hide()')
)
} else NULL)
})
output$metDENotMappedTable <- renderTable({
data <- metDEInput()
if (is.null(data)) {
return(NULL)
}
notMapped <- notMappedMets()
if (length(notMapped) == 0) {
return(NULL)
}
data <- data[order(pval)]
data <- data[ID %in% notMapped]
format(as.data.frame(head(data, n=20)), digits=3)
})
output$updateEsParameters <- renderJs({
selected <- if (!is.null(metDEInput())) "edges" else "nodes"
return(sprintf("$('#reactionsAs')[0].selectize.setValue('%s')", selected))
})
experimentTag <- reactive({
geneData <- geneDEInput()
metData <- metDEInput()
name <- if (!is.null(geneData)) attr(geneData, "name") else attr(metData, "name")
tag <- name
tag <- gsub("\\.gz$", "", tag)
tag <- gsub("\\.([ct]sv|txt)$", "", tag)
tag
})
#
# output$reactionsAsHolder <- renderUI({
# gene.de <- geneDEInput()
#
# met.de <- metDEInput()
#
# selected <- if (!is.null(met.de)) "edges" else "nodes"
#
# selectInput("reactionsAs",
# label="Interpret reactions as",
# c("edges"="edges", "nodes"="nodes"),
# selected=selected)
# })
esInput <- reactive({
input$preprocess
loginfo("Preprocessing")
#input$runAll
network <- isolate(getNetwork())
gene.de <- isolate(geneDEInput())
gene.ids <- isolate(geneIdsType())
met.de <- isolate(metDEInput())
met.ids <- isolate(metIdsType())
tag <- isolate(experimentTag())
if (is.null(gene.de) && is.null(met.de)) {
return(NULL)
}
longProcessStart()
tryCatch({
if (!is.null(gene.de)) {
gene.de <- gene.de[which(gene.de$pval < 1),]
}
if (!is.null(met.de)) {
met.de <- met.de[which(met.de$pval < 1),]
}
reactions.as.edges = isolate(input$reactionsAs) == "edges"
collapse.reactions = isolate(input$collapseReactions)
use.rpairs = isolate(input$useRpairs)
es <- makeExperimentSet(
network=network,
met.de=met.de, gene.de=gene.de,
met.ids=met.ids, gene.ids=gene.ids,
reactions.as.edges=reactions.as.edges,
collapse.reactions=collapse.reactions,
use.rpairs=use.rpairs,
plot=F)
attr(es, "tag") <- tag
es
}, finally=longProcessStop())
})
output$networkSummary <- reactive({
es <- esInput()
net <- es$subnet
if (is.null(net)) {
return("There is no built network")
}
vector2html(c(
"number of nodes" = length(V(net)),
"number of edges" = length(E(net))
))
})
output$FDRParameters <- reactive({
res <- sprintf("%s;", input$resetFDRs)
es <- NULL
tryCatch({
es <- isolate(esInput())
}, error=function(e) {})
if (!is.null(es)) {
res <- paste0(res, generateFDRs(es))
} else {
res <- ""
}
res
})
output$networkParameters <- reactive({
es <- NULL
tryCatch({
es <- esInput()
}, error=function(e) {})
if (is.null(es)) {
return("")
}
res <- paste0(
makeJsAssignments(
network.available = TRUE,
network.hasReactionsAsNodes = !es$reactions.as.edges,
network.hasReactionsAsEdges = es$reactions.as.edges,
network.hasGenes = !is.null(es$fb.rxn),
network.hasMets = !is.null(es$fb.met),
network.usesRpairs = es$use.rpairs
)
)
if (isolate(input$autoFindModule)) {
res <- paste0(res, generateFDRs(es))
res <- paste0(res, '$("#find").trigger("click");')
}
res <- paste0(res, '$("#find").removeAttr("disabled").addClass("btn-default");')
res <- paste0(res, '$("#resetFDRs").removeAttr("disabled").addClass("btn-default");')
res
})
output$enableMakeNetwork <- renderJs({
res <- ""
canRun <- FALSE
tryCatch({
geneDE <- geneDEInput()
gIT <- geneIdsType()
metDE <- metDEInput()
mIT <- metIdsType()
canRun <- !is.null(geneDE) || !is.null(metDE)
}, error=function(e) {
# if anything happened, not running
})
if (canRun) {
res <- paste0(res, '$("#runStep1").removeAttr("disabled").addClass("btn-default");')
res <- paste0(res, '$("#runAll").removeAttr("disabled").addClass("btn-default");')
} else {
res <- paste0(res, '$("#runStep1").attr("disabled", "disabled");')
res <- paste0(res, '$("#runAll").attr("disabled", "disabled");')
}
res
})
output$showModulePanel <- renderJs({
if (!is.null(esInput())) { return("mp = $('#module-panel'); mp[0].scrollIntoView();")
}
# return("mp = $('#module-panel'); mp.hide();")
return("")
})
metFDR <- reactive({
10^input$metLogFDR
})
geneFDR <- reactive({
10^input$geneLogFDR
})
getSolver <- reactive({
es <- esInput()
if (is.null(es)) {
return(NULL)
}
if (input$solveToOptimality) {
h.solver
} else if (es$reactions.as.edges && !is.null(es$fb.rxn)) {
g.solver
} else {
h2.solver
}
})
output$solverString <- reactive({
es <- esInput()
solver <- getSolver()
if (!is.null(solver)) {
sprintf("Solver: %s", attr(solver, "description"))
} else {
""
}
})
esScoredInput <- reactive({
input$find
met.fdr <- isolate(metFDR())
gene.fdr <- isolate(geneFDR())
absent.met.score <- isolate(input$absentMetScore)
#absent.rxn.score <- isolate(input$absentRxnScore)
#absent.rxn.score <- 0
es <- isolate(esInput())
if (is.null(es)) {
return(NULL)
}
longProcessStart()
loginfo(paste0(attr(es, "tag"),".mp", # min p-value
if (es$reactions.as.edges) ".re" else ".rn",
".mf=", format(log10(met.fdr), digist=2),
".rf=", format(log10(gene.fdr), digist=2),
".ams=", absent.met.score
#, ".ars=", absent.rxn.score
))
res <- scoreNetwork(es,
met.fdr=met.fdr,
rxn.fdr=gene.fdr,
absent.met.score=absent.met.score,
#absent.rxn.score=absent.rxn.score,
met.score=-0.01,
rxn.score=-0.01)
res$description.string <- paste0(attr(es, "tag"),
if (es$reactions.as.edges) ".re" else ".rn",
".mf=", format(log10(met.fdr), digist=2),
".rf=", format(log10(gene.fdr), digist=2),
".ams=", absent.met.score
#, ".ars=", absent.rxn.score
)
res
})
rawModuleInput <- reactive({
input$find
esScored <- isolate(esScoredInput())
if (is.null(esScored)) {
return(NULL)
}
longProcessStart()
tryCatch({
solver <- isolate(getSolver())
#res <- induced.subgraph(esScored$subnet.scored, V(esScored$subnet.scored)[adj(adj(1))])
res <- findModule(esScored,
solver=solver)
if (is.null(res) || length(V(res)) == 0) {
stop("No module found")
}
res$description.string <- esScored$description.string
res
}, finally=longProcessStop())
})
moduleInput <- reactive({
module <- rawModuleInput()
if (is.null(module)) {
return(NULL)
}
# for consistency
module <- remove.vertex.attribute(module, "score")
module <- remove.edge.attribute(module, "score")
es <- isolate(esInput())
if (es$reactions.as.edges) {
if (isolate(input$useRpairs)) {
if (input$addTransPairs) {
module <- addTransEdges(module, es)
}
}
} else {
if (input$addCommonMetabolites) {
module <- addMetabolitesForReactions(module, es)
module <- removeHangingNodes(module)
}
}
module$description.string <- rawModuleInput()$description.string
module
})
output$moduleSummary <- reactive({
module <- moduleInput()
if (is.null(module)) {
return("There is no module yet")
}
vector2html(c(
"number of nodes" = length(V(module)),
"number of edges" = length(E(module))
))
})
output$moduleParameters <- reactive({
m <- NULL
tryCatch({
m <- moduleInput()
}, error=function(e) {})
makeJsAssignments(
module.available = !is.null(m)
)
})
#output$module <- renderGraph({
# moduleInput()
#})
output$module <- renderUI({
sf <- svgFile()
if (!is.null(sf)) {
HTML(readLines(sf), "<script>var panZoomModule = svgPanZoom('#module svg');</script>")
} else {
HTML("")
}
})
output$downloadNetwork <- downloadHandler(
filename = reactive({ paste0("network.", tolower(esInput()$network$organism), ".xgmml") }),
content = function(file) {
saveModuleToXgmml(esInput()$subnet, file=file, name=tolower(esInput()$network$organism))
})
output$downloadModule <- downloadHandler(
filename = reactive({ paste0(moduleInput()$description.string, ".xgmml") }),
content = function(file) {
saveModuleToXgmml(moduleInput(), file=file, moduleInput()$description.string)
})
dotFile <- reactive({
m <- moduleInput()
if (is.null(m)) {
return(NULL)
}
longProcessStart()
res <- tempfile(pattern="module", fileext=".dot")
saveModuleToDot(m, file=res, name=m$description.string)
longProcessStop()
res
})
svgFile <- reactive({
df <- dotFile()
if (is.null(df)) {
return(NULL)
}
res <- paste0(df, ".svg")
system2("neato", c("-Tsvg",
"-o", res,
df), stderr=NULL)
res
})
pdfFile <- reactive({
df <- dotFile()
if (is.null(df)) {
return(NULL)
}
res <- paste0(df, ".pdf")
system2("neato", c("-Tpdf",
"-o", res,
df), stderr=NULL)
res
})
output$downloadXlsx <- downloadHandler(
filename = reactive({ paste0(moduleInput()$description.string, ".xlsx") }),
content = function(file) {
module <- moduleInput()
es <- isolate(esScoredInput())
stopifnot(require(xlsx))
wb <- createWorkbook()
vTable <- data.table(get.vertex.attributes(module))
eTable <- data.table(get.edge.attributes(module, include.ends=T))
if (es$reactions.as.edges) {
metTable <- vTable
metTable[, nodeType := NULL]
rxnTable <- eTable
} else {
metTable <- vTable[nodeType == "met",]
metTable[, nodeType := NULL]
rxnTable <- vTable[nodeType == "rxn",]
rxnTable[, nodeType := NULL]
}
metTable <- removeNAColumns(metTable)
rxnTable <- removeNAColumns(rxnTable)
addDataFrame(metTable, createSheet(wb, "metabolites"), row.names=F)
addDataFrame(rxnTable, createSheet(wb, "reactions"), row.names=F)
metInModule <- metTable$name
geneInModule <- rxnTable$origin
saveWorkbook(wb, file)
})
output$downloadPDF <- downloadHandler(
filename = reactive({ paste0(moduleInput()$description.string, ".pdf") }),
content = function(file) {
file.copy(pdfFile(), file)
})
output$downloadDot <- downloadHandler(
filename = reactive({ paste0(moduleInput()$description.string, ".dot") }),
content = function(file) {
file.copy(dotFile(), file)
})
output$downloadVizMap <- downloadHandler(
filename = "GAM_VizMap.xml",
content = function(file) {
file.copy(
from=system.file("GAM_VizMap.xml", package="GAM"),
to=file)
})
output$GAMVersion <- renderUI({
p(paste("GAM version:", sessionInfo()$otherPkgs$GAM$Revision))
})
output$geneDEExample <- renderUI({
a("here", href=example.gene.de.path, target="_blank")
})
output$metDEExample <- renderUI({
a("here", href=example.met.de.path, target="_blank")
})
output$downloadVizMapInHelp <- downloadHandler(
filename = "GAM_VizMap.xml",
content = function(file) {
file.copy(
from=system.file("GAM_VizMap.xml", package="GAM"),
to=file)
})
})
|
2f6c958851c41429925102a9f3abd9ae692964f3
|
c2cd76dd228bd63faf5f4fa7864577ed8ee63085
|
/R/src/src_kymo/GetFluoTimeSeries.R
|
210ecad28e9dbd292ded6df469912f1774574309
|
[] |
no_license
|
destritux/CHUKNORRIS
|
0f575602a1276e6dc8fca15b874669448b215183
|
9002e5b835a4f0b1bb70917a32c2839eae191ef5
|
refs/heads/master
| 2021-07-02T08:46:38.223962
| 2017-09-23T20:05:37
| 2017-09-23T20:05:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 286
|
r
|
GetFluoTimeSeries.R
|
#-------------------------------------------------------------------------------
GetFluoTimeSeries <- function(kymo, ini.ind, avg.width){
return(apply(kymo[, ini.ind:(ini.ind + avg.width)], 1, median))
}
#-------------------------------------------------------------------------------
|
0c1a63019a82f0859c3c2d94f523bd218c8707d3
|
1d71b1b06a24b54529fac458c8fe084ce7e3875b
|
/scripts/n4J_trait_DB_structure.R
|
22932214ade0e3251d796ce257e3667689da00ec
|
[] |
no_license
|
rjcmarkelz/BR_genome_DB
|
b76cf35d527ef33f07f733f5e4ce731c2913a110
|
637b4c0dd2fa5e78e8b7734f34526aeebbed967c
|
refs/heads/master
| 2016-09-05T18:01:21.416560
| 2015-12-11T23:45:09
| 2015-12-11T23:45:09
| 29,374,873
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,741
|
r
|
n4J_trait_DB_structure.R
|
install.packages("devtools")
devtools::install_github("nicolewhite/RNeo4j")
library(RNeo4j)
?startGraph
graph <- startGraph("http://localhost:7474/db/data/")
startGraph
?createNode
addConstraint(graph, "gene", "name")
gene1 <- createNode(graph, "gene", name = "Bra_10001")
gene2 <- createNode(graph, "gene", name = "Bra_10002")
getConstraint(graph)
addConstraint(graph, "genotype", "name")
genotype3 <- createNode(graph, c("genotype", "environment"), name = "genotype3")
genotype2 <- createNode(graph, "genotype", name = "genotype2")
genotype1 <- createNode(graph, "genotype", name = "genotype1")
qtl3 <- createNode(graph, c("qtl", "environment"), name = "qtl3")
qtl2 <- createNode(graph, "qtl", name = "qtl2")
qtl1 <- createNode(graph, "qtl", name = "qtl1")
clear(graph)
Y
browse(graph)
###########
###########
# TAGS
###########
###########
#species: name:
#genotype name:
?addConstraint
addConstraint(graph, "genotype", "genotype")
RIL_2 <- createNode(graph, c("genotype","Brassica"), genotype = "RIL_2")
RIL_1 <- createNode(graph, c("genotype","Brassica"), genotype = "RIL_1")
RIL_3 <- createNode(graph, c("genotype","Brassica"), genotype = "RIL_3")
#marker name:
addConstraint(graph, "marker", "name")
mk_1 <- createNode(graph, c("marker","Brassica"), name = "A01x3323536", chr = "A01", cM = 3.323536)
mk_2 <- createNode(graph, c("marker","Brassica"), name = "A02x222925", chr = "A02", cM = 56.48621943)
mk_3 <- createNode(graph, c("marker","Brassica"), name = "A03x9953796", chr = "A03", cM = 54.25056711)
#individual
##investigation
##treatment
#individual
addConstraint(graph, "Observation", "observation")
obs_1 <- createNode(graph, c("Observation"), observation = "1", value = 6, units = "cm")
obs_2 <- createNode(graph, c("Observation"), observation = "2 ", value = 5, units = "cm")
obs_3 <- createNode(graph, c("Observation"), observation = "3", value = 10, units = "cm")
obs_4 <- createNode(graph, c("Observation"), observation = "4", value = 20, units = "cm")
obs_5 <- createNode(graph, c("Observation"), observation = "5", value = 25, units = "cm")
obs_6 <- createNode(graph, c("Observation"), observation = "6", value = 25, units = "cm")
obs_7 <- createNode(graph, c("Observation"), observation = "7", value = 50, units = "ret time")
obs_8 <- createNode(graph, c("Observation"), observation = "8", value = 100, units = "ret time")
obs_9 <- createNode(graph, c("Observation"), observation = "9", value = 34, units = "ret time")
obs_10 <- createNode(graph, c("Observation"), observation = "10", value = 50, units = "ret time")
obs_11 <- createNode(graph, c("Observation"), observation = "11", value = 100, units = "ret time")
obs_12 <- createNode(graph, c("Observation"), observation = "12", value = 34, units = "ret time")
#time varying observations
obs_13 <- createNode(graph, c("Observation"), observation = "13", value = 50, units = "cm")
obs_14 <- createNode(graph, c("Observation"), observation = "14", value = 100, units = "cm")
obs_15 <- createNode(graph, c("Observation"), observation = "15", value = 1500, units = "cm")
obs_13
#investigations
addConstraint(graph, "Investigation", "Investigation")
exp_1 <- createNode(graph, c("Investigation"), Investigation = "edwards")
exp_2 <- createNode(graph, c("Investigation"), Investigation = "brock")
exp_3 <- createNode(graph, c("Investigation"), Investigation = "duchaine")
#treatments
addConstraint(graph, "Treatment", "Treatment")
trt_1 <- createNode(graph, c("Treatment"), Treatment = "Uncrowded")
trt_2 <- createNode(graph, c("Treatment"), Treatment = "Crowded")
trt_3 <- createNode(graph, c("Treatment"), Treatment = "Drought")
#structures
addConstraint(graph, "Structure", "Structure")
leaf <- createNode(graph, c("Structure"), Structure = "leaf")
petiole <- createNode(graph, c("Structure"), Structure = "petiole")
root <- createNode(graph, c("Structure"), Structure = "root")
#traits
leaflength <- createNode(graph, c("leaflength","trait"), trait = "leaflength")
petiolelength <- createNode(graph, c("petiolelength","trait"),
trait = "petiolelength")
metab <- createNode(graph, c("metabolite","trait"),
name = "3-Hydroxypropyl", URL = "http://tinyurl.com/q8zrhsq")
createRel(leaflength, "IN_STURCTURE", leaf)
createRel(petiolelength, "IN_STURCTURE", petiole)
######
#RELATIONSHIPS
######
#obs 1 to 6
createRel(obs_1, "IN_EXPERIMENT", exp_1)
createRel(obs_2, "IN_EXPERIMENT", exp_1)
createRel(obs_3, "IN_EXPERIMENT", exp_1)
createRel(obs_4, "IN_EXPERIMENT", exp_1)
createRel(obs_5, "IN_EXPERIMENT", exp_1)
createRel(obs_6, "IN_EXPERIMENT", exp_1)
createRel(obs_1, "IN_TREATMENT", trt_1)
createRel(obs_2, "IN_TREATMENT", trt_1)
createRel(obs_3, "IN_TREATMENT", trt_1)
createRel(obs_4, "IN_TREATMENT", trt_2)
createRel(obs_5, "IN_TREATMENT", trt_2)
createRel(obs_6, "IN_TREATMENT", trt_2)
createRel(obs_1, "IN_STRUCTURE", petiole)
createRel(obs_2, "IN_STRUCTURE", petiole)
createRel(obs_3, "IN_STRUCTURE", petiole)
createRel(obs_4, "IN_STRUCTURE", petiole)
createRel(obs_5, "IN_STRUCTURE", petiole)
createRel(obs_6, "IN_STRUCTURE", petiole)
createRel(obs_1, "TRAIT", leaflength)
createRel(obs_2, "TRAIT", leaflength)
createRel(obs_3, "TRAIT", leaflength)
createRel(obs_4, "TRAIT", leaflength)
createRel(obs_5, "TRAIT", leaflength)
createRel(obs_6, "TRAIT", leaflength)
createRel(obs_1, "IS_GENOTYPE", RIL_1)
createRel(obs_2, "IS_GENOTYPE", RIL_2)
createRel(obs_3, "IS_GENOTYPE", RIL_3)
createRel(obs_4, "IS_GENOTYPE", RIL_1)
createRel(obs_5, "IS_GENOTYPE", RIL_2)
createRel(obs_6, "IS_GENOTYPE", RIL_3)
######
#RELATIONSHIPS
######
#obs 7 to 12
createRel(obs_7, "IN_EXPERIMENT", exp_2)
createRel(obs_8, "IN_EXPERIMENT", exp_2)
createRel(obs_9, "IN_EXPERIMENT", exp_2)
createRel(obs_10, "IN_EXPERIMENT", exp_2)
createRel(obs_11, "IN_EXPERIMENT", exp_2)
createRel(obs_12, "IN_EXPERIMENT", exp_2)
createRel(obs_7, "IN_TREATMENT", trt_1)
createRel(obs_8, "IN_TREATMENT", trt_1)
createRel(obs_9, "IN_TREATMENT", trt_1)
createRel(obs_10, "IN_TREATMENT", trt_2)
createRel(obs_11, "IN_TREATMENT", trt_2)
createRel(obs_12, "IN_TREATMENT", trt_2)
createRel(obs_7, "IN_STRUCTURE", leaf)
createRel(obs_8, "IN_STRUCTURE", leaf)
createRel(obs_9, "IN_STRUCTURE", leaf)
createRel(obs_10, "IN_STRUCTURE", leaf)
createRel(obs_11, "IN_STRUCTURE", leaf)
createRel(obs_12, "IN_STRUCTURE", leaf)
createRel(obs_7, "TRAIT", metab)
createRel(obs_8, "TRAIT", metab)
createRel(obs_9, "TRAIT", metab)
createRel(obs_10, "TRAIT", metab)
createRel(obs_11, "TRAIT", metab)
createRel(obs_12, "TRAIT", metab)
createRel(obs_7, "IS_GENOTYPE", RIL_1)
createRel(obs_8, "IS_GENOTYPE", RIL_2)
createRel(obs_9, "IS_GENOTYPE", RIL_3)
createRel(obs_10, "IS_GENOTYPE", RIL_1)
createRel(obs_11, "IS_GENOTYPE", RIL_2)
createRel(obs_12, "IS_GENOTYPE", RIL_3)
#####
#RELATIONSHIPS
######
#obs 13 15
createRel(obs_13, "IN_EXPERIMENT", exp_3)
createRel(obs_14, "IN_EXPERIMENT", exp_3)
createRel(obs_15, "IN_EXPERIMENT", exp_3)
createRel(obs_13, "IN_TREATMENT", trt_3)
createRel(obs_14, "IN_TREATMENT", trt_3)
createRel(obs_15, "IN_TREATMENT", trt_3)
createRel(obs_13, "IN_STRUCTURE", leaf)
createRel(obs_14, "IN_STRUCTURE", leaf)
createRel(obs_15, "IN_STRUCTURE", leaf)
createRel(obs_13, "TRAIT", metab)
createRel(obs_14, "TRAIT", metab)
createRel(obs_15, "TRAIT", metab)
createRel(obs_13, "TIME", obs_14)
createRel(obs_14, "TIME", obs_15)
createRel(obs_13, "IS_GENOTYPE", RIL_1)
createRel(obs_14, "IS_GENOTYPE", RIL_2)
createRel(obs_15, "IS_GENOTYPE", RIL_3)
#genotype combo
rel_geno1 <- createRel(RIL_1, "HAS_GENOTYPE", mk_1, genotype = "AA")
rel_geno2 <- createRel(RIL_1, "HAS_GENOTYPE", mk_2, genotype = "BB")
rel_geno3 <- createRel(RIL_1, "HAS_GENOTYPE", mk_3, genotype = "AA")
rel_geno1_2 <- createRel(RIL_2, "HAS_GENOTYPE", mk_1, genotype = "AA")
rel_geno2_2 <- createRel(RIL_2, "HAS_GENOTYPE", mk_2, genotype = "AA")
rel_geno3_2 <- createRel(RIL_2, "HAS_GENOTYPE", mk_3, genotype = "BB")
rel_geno1_2 <- createRel(RIL_3, "HAS_GENOTYPE", mk_1, genotype = "BB")
rel_geno2_2 <- createRel(RIL_3, "HAS_GENOTYPE", mk_2, genotype = "BB")
rel_geno3_2 <- createRel(RIL_3, "HAS_GENOTYPE", mk_3, genotype = "BB")
QTL_met <- createNode(graph, c("QTL"), name = "QTL_met")
QTL_leaf <- createNode(graph, c("QTL"), name = "QTL_leaf")
createRel(metab, "HAS_QTL", QTL_met)
createRel(QTL_met, "QTL_LOCATION", mk_1, lod = 4.6, R500_allele = 5, IMB_allele = 0.1)
createRel(QTL_met, "QTL_LOCATION", mk_3, lod = 10, R500_allele = 4, IMB_allele = 2)
createRel(leaflength, "HAS_QTL", QTL_leaf)
createRel(QTL_leaf, "QTL_LOCATION", mk_1, lod = 20, R500_allele = 5, IMB_allele = 0.1)
browse(graph)
#time
#leaf
#petiole
#root
#shoot
#vegetative
#reproductive
##name
##value
#qtl
#environment
#time
##datatype:light
##value:
##units:
|
aae5f9bc7e2af1ea257f62ec6cdab815d4fd910f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/TreeBUGS/examples/plotFit.Rd.R
|
16d486380e11671347f3a3dbec98eed75d883fbf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 373
|
r
|
plotFit.Rd.R
|
library(TreeBUGS)
### Name: plotFit
### Title: Plot Posterior Predictive Mean Frequencies
### Aliases: plotFit
### ** Examples
## Not run:
##D # add posterior predictive samples to fitted model:
##D fittedModel$postpred$freq.pred <-
##D posteriorPredictive(fittedModel, M=1000)
##D
##D # plot model fit
##D plotFit(fittedModel, stat = "mean")
## End(Not run)
|
155471ba8e5278808459597e7dc6e04cff3e562e
|
5781d284a1af118c558031848076df2dd090b37c
|
/man/aggreg_stratdata_in_harmonclasses.Rd
|
6bb8b7ce7a055785279970aa3e21e26876622e93
|
[
"MIT"
] |
permissive
|
TabeaSonnenschein/GenSynthPop
|
16b4a699bdd62cb691020bcc56a93b24b8b6e0bc
|
08298e6577fcb3bbc8d3a6e9c3ba293f5458a09f
|
refs/heads/main
| 2023-04-11T15:35:28.814404
| 2023-01-29T21:00:52
| 2023-01-29T21:00:52
| 594,842,211
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,518
|
rd
|
aggreg_stratdata_in_harmonclasses.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_preparation.R
\name{aggreg_stratdata_in_harmonclasses}
\alias{aggreg_stratdata_in_harmonclasses}
\title{Aggregating a stratified dataset into the newly added harmonised classes}
\usage{
aggreg_stratdata_in_harmonclasses(
df,
harmon_var_col,
former_var_col,
marg_dist_collist
)
}
\arguments{
\item{df}{The stratified dataframe which contains old classes and a new harmonised class (corresponding to another dataframe) as well as some marginal distributions}
\item{harmon_var_col}{The name of the column that contains the new harmonised classes (e.g. output of varclass_harmonization function)}
\item{former_var_col}{The name of the old column that contains the classes that were initially in the dataframe but which we want to get rid off}
\item{marg_dist_collist}{A list of names of the columns that specify the marginal distributions (numbers/counts). These will have to be aggregated into the new harmonised classes}
}
\value{
new dataframe with only the harmonised classes, other stratified variables that were in the df, and the aggregated marginal distributions.
}
\description{
Once the new harmonised classes of have been added to a stratified dataframe, the marginal distributions have to be aggregated correspondingly. This function helps doing exactly that, resulting in a new dataframe with only the harmonised classes, other stratified variables that were in the df, and the aggregated marginal distributions.
}
|
b6eebe689767fcea7ec7c59e090068fa7e53d2bd
|
821c5b17dc28a8504950794023c92434b964c997
|
/code/draft_scripts/merge-all.R
|
84c13989e70ffdc21f721b81315f26bda2d5f451
|
[] |
no_license
|
scmcdonald/honorsthesis
|
5a0d26b182b5790b90b1c3d8373889e62ed735fd
|
d3b26acce18c66e46970dbe5807c3b420f8ee616
|
refs/heads/master
| 2022-04-18T10:23:38.298822
| 2020-03-27T18:13:42
| 2020-03-27T18:13:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,672
|
r
|
merge-all.R
|
library(sf)
library(ggplot2)
library(maptools)
library(dplyr)
library(stringr)
library(plyr)
library(zoo)
data$FIPS <- ifelse(nchar(data$FIPS) != 2 ,gsub(" ", "", paste("0",data$FIPS), fixed = TRUE), data$FIPS)
#colnames(data)[colnames(data) == "CNTLAFFI"] <- "PUBPRIVATE"
var <- read.csv("variables2.csv")
sector <- var %>% filter(varname == "SECTOR")
data <- merge(data, sector[, c("codevalue","valuelabel")], by.x = "SECTOR", by.y = "codevalue", all.x = TRUE)
data <- subset(data,select=-c(SECTOR))
colnames(data)[colnames(data) == "valuelabel"] <- "SECTOR"
pubprivate <- var %>% filter(varname == "CNTLAFFI")
data <- merge(data, pubprivate[, c("codevalue", "valuelabel")], by.x = "CNTLAFFI", by.y = "codevalue", all.x = TRUE)
data <- subset(data,select=-c(CNTLAFFI))
colnames(data)[colnames(data) == "valuelabel"] <- "PUBPRIVATE"
relaff <- var %>% filter(varname == "RELAFFIL")
data <- merge(data, relaff[, c("codevalue", "valuelabel")], by.x = "RELAFFIL", by.y = "codevalue", all.x = TRUE)
data <- subset(data,select=-c(RELAFFIL))
colnames(data)[colnames(data) == "valuelabel"] <- "RELAFFIL"
deggrant <- var %>% filter(varname == "DEGGRANT")
data <- merge(data, deggrant[, c("codevalue", "valuelabel")], by.x = "DEGGRANT", by.y = "codevalue", all.x = TRUE)
data <- subset(data,select=-c(DEGGRANT))
colnames(data)[colnames(data) == "valuelabel"] <- "DEGGRANT"
carnegie <- var %>% filter(varname == "CARNEGIE")
data <- merge(data, carnegie[, c("codevalue", "valuelabel")], by.x = "CARNEGIE", by.y = "codevalue", all.x = TRUE)
data <- subset(data,select=-c(CARNEGIE))
colnames(data)[colnames(data) == "valuelabel"] <- "CARNEGIE"
KML <- getKMLcoordinates(kmlfile = unzip(zipfile = "C:/Users/Sarah McDonald/Documents/honorsthesis/US_Dioceses.kmz", exdir = "~/honorsthesis/KML"), ignoreAltitude = TRUE)
cath_geo <- st_read("KML/doc.kml")
coords_17 <- data %>% filter(YEAR == 2017 & LINE == "Total") %>% select(UNITID, LONGITUD, LATITUDE)
coords_17$LONGITUD <- as.numeric(as.character(as.factor(coords_17$LONGITUD)))
coords_17$LATITUDE <- as.numeric(as.character(as.factor(coords_17$LATITUDE)))
coords_17 = st_as_sf(coords_17, coords = c("LONGITUD", "LATITUDE"), crs = st_crs(cath_geo))
counties <- st_read("tl_2017_us_county/tl_2017_us_county.shp")
counties = st_transform(counties, st_crs(cath_geo))
inst_county <- st_join(coords_17, counties)
inst_county <- inst_county[, c(1, 5)]
inst_county$GEOID <- as.character(as.factor(inst_county$GEOID))
count_dioceses <- read.csv("count_dioceses.csv")
count_dioceses$GEOID <- as.character(count_dioceses$GEOID)
count_dioceses$GEOID <- ifelse(nchar(count_dioceses$GEOID) != 5, gsub(" ", "", paste("0", count_dioceses$GEOID), fixed = TRUE), count_dioceses$GEOID)
inst_county<-st_set_geometry(inst_county, NULL)
inst_county_dioceses <- merge(inst_county, count_dioceses, by = "GEOID", all.x = TRUE)
inst_county_dioceses <- inst_county_dioceses[, -3]
data <- merge(data, inst_county_dioceses[, c(1:2, 6:7)], by = "UNITID", all = TRUE)
data <- subset(data, select = -c(FIPS))
socio <- read.csv("socioeconomic.csv")
socio <- socio[, -1]
socio$FIPS <- ifelse(nchar(socio$FIPS) != 5 ,gsub(" ", "", paste("0",socio$FIPS), fixed = TRUE), socio$FIPS)
data <- merge(data, socio, by.x = c("GEOID", "YEAR"), by.y = c("FIPS", "Year"), all.x = TRUE)
#write.csv(data, "data.csv")
data <- read.csv ("data.csv")
#data[levels(data$CARNEGIE)=="{Item not available}"] <- NA
data$CARNEGIE <- revalue(data$CARNEGIE, c("{Item not available}"=NA))
data$DEGGRANT <- revalue(data$DEGGRANT, c("{Not available}"=NA))
data <- data[,-c(1,2)]
School_location <- data[,c("UNITID", "INSTNM", "LONGITUD", "LATITUDE", "YEAR")]
School_locations <- unique(School_location[order(School_location$UNITID,
School_location$YEAR,
decreasing = TRUE),])
# Your latitude and longitude variables are factors--NO! They should be numeric
School_locations$LONGITUD <- as.numeric(paste(School_locations$LONGITUD))
School_locations$LATITUDE <- as.numeric(paste(School_locations$LATITUDE))
# Last obs. carried forward—closest year info will fall down (e.g.: 2009, to 2008, to 2007….)
School_locations$UNITID <- na.locf(School_locations$UNITID)
School_locations$INSTNM <- na.locf(School_locations$INSTNM)
School_locations$LONGITUD <- na.locf(School_locations$LONGITUD)
School_locations$LATITUDE <- na.locf(School_locations$LATITUDE)
School_locations <- unique(School_locations)
# Get the difference in Lats and longs and units to see which schools have
# different information over the years--aka problem schools
# These may not be problems, but it will be good to keep this list if
# we notice outliers later on, these may be it! These are schools that moved
# from year to year and thus could have different counties.
School_locations$ID_Diff <- c(0,diff(round(as.numeric(School_locations$UNITID)), differences = 1))
School_locations$Lat_Diff <- c(0,diff(round(as.numeric(School_locations$LATITUDE)), differences = 1))
School_locations$Long_Diff <- c(0,diff(round(as.numeric(School_locations$LONGITUD)), differences = 1))
Problem_IDS <- School_locations[School_locations$ID_Diff == 0 &
(School_locations$Long_Diff != 0 &
School_locations$Lat_Diff != 0),]$UNITID
Problem_Schools <- School_locations[School_locations$UNITID %in% Problem_IDS,]
Problem_Schools_in_IPEDS <- data[data$UNITID %in% Problem_IDS,]
School_locations_Final <- School_locations[,-6:-8]
# I believe you could choose to overwrite the existing INSTNM, LAT and LONG
# with your new dataset, but didn't here in case you have an issue with the creation
data <- merge(data, School_locations_Final, by = c("UNITID", "YEAR"), all.x = TRUE)
data <- subset(data,select=-c(INSTNM.x, LONGITUD.x, LATITUDE.x))
colnames(data)[colnames(data) == "INSTNM.y"] <- "INSTNM"
colnames(data)[colnames(data) == "LONGITUD.y"] <- "LONGITUD"
colnames(data)[colnames(data) == "LATITUDE.y"] <- "LATITUDE"
##############other interpolations
School_other <- data[,c("UNITID", "SECTOR", "PUBPRIVATE", "RELAFFIL", "DEGGRANT", "YEAR")]
School_others <- unique(School_other[order(School_other$UNITID,
School_other$YEAR,
decreasing = TRUE),])
# Last obs. carried forward—closest year info will fall down (e.g.: 2009, to 2008, to 2007….)
# added na.rm = FALSE because NAs in 2017 (leading NAs) cause and error
School_others$PUBPRIVATE <- na.locf(School_others$PUBPRIVATE, na.rm = FALSE)
School_others$SECTOR <- na.locf(School_others$SECTOR)
School_others$RELAFFIL <- na.locf(School_others$RELAFFIL, na.rm = FALSE)
School_others$DEGGRANT <- na.locf(School_others$DEGGRANT, na.rm = FALSE)
School_others <- unique(School_others)
# Get the difference in Lats and longs and units to see which schools have
# different information over the years--aka problem schools
# These may not be problems, but it will be good to keep this list if
# we notice outliers later on, these may be it! These are schools that moved
# from year to year and thus could have different counties.
School_others$PUBPRIVATE_Diff <- c(0,diff(round(as.numeric(School_others$PUBPRIVATE)), differences = 1))
School_others$SECTOR_Diff <- c(0,diff(round(as.numeric(School_others$SECTOR)), differences = 1))
School_others$RELAFFIL_Diff <- c(0,diff(round(as.numeric(School_others$RELAFFIL)), differences = 1))
School_others$DEGGRANT_Diff <- c(0,diff(round(as.numeric(School_others$DEGGRANT)), differences = 1))
Problem_IDS2 <- School_others[School_others$PUBPRIVATE_Diff == 0 &
(School_others$SECTOR_Diff != 0 &
School_others$RELAFFIL_Diff != 0 &
School_others$DEGGRANT_Diff),]$UNITID
Problem_Schools2 <- School_others[School_others$UNITID %in% Problem_IDS2,]
Problem_Schools_in_IPEDS2 <- data[data$UNITID %in% Problem_IDS2,]
School_others_Final <- School_others[,-7:-10]
# I believe you could choose to overwrite the existing INSTNM, LAT and LONG
# with your new dataset, but didn't here in case you have an issue with the creation
data <- merge(data, School_others_Final, by = c("UNITID", "YEAR"), all.x = TRUE)
data <- subset(data,select=-c(SECTOR.x, PUBPRIVATE.x, RELAFFIL.x, DEGGRANT.x))
colnames(data)[colnames(data) == "SECTOR.y"] <- "SECTOR"
colnames(data)[colnames(data) == "PUBPRIVATE.y"] <- "PUBPRIVATE"
colnames(data)[colnames(data) == "RELAFFIL.y"] <- "RELAFFIL"
colnames(data)[colnames(data) == "DEGGRANT.y"] <- "DEGGRANT"
|
e9fcd77bb9786e5dd799caff81ff56587f00db8b
|
162fab589748e453ab81eca414b8153fec0c592f
|
/R/user_input.R
|
d6d4aff6e6acbced7703213ab14873d6a016daaa
|
[] |
no_license
|
TheWorkingBee/Scraper
|
770bb2e8a66a9546b48ac9e186802741d5725000
|
2f594da5472fd1c3168012856a0159fdcc21341a
|
refs/heads/main
| 2023-04-18T20:52:45.370007
| 2021-04-28T10:39:56
| 2021-04-28T10:39:56
| 362,377,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
r
|
user_input.R
|
## ---- user_input.R ----
#Date 2020-03-12
#Link to starting page for appartements that listed for sale on blocket and number of pages to be scraped
home_url <- "https://www.blocket.se"
start_url <- "https://www.blocket.se/annonser/hela_sverige/bostad/lagenheter?cg=3020"
n_pages <- 9
#selectors
table_div <- "KeyValTable__Row-ku6uw2-0 dhntKE"
price_selector <- ".EkzGO"
geo_selector <- ".dxqCwo+ .dxqCwo"
url_selector <- ".Rycta"
title_selector <- ".gjFLVU"
description_selector <- ".bYSeDO"
#names of folders to be created
data_file_path <- "../Data"
ad_folder_path <- str_c(data_file_path, "/", "ad")
page_folder_path <- str_c(data_file_path, "/", "page")
db_path <- str_c(data_file_path, "/", "df.db")
|
bef6d8b7f8b4148ad3a10ec63929deda5cbb5c7a
|
3393efb272d29d743658810e7358e557c3f06be7
|
/tests/mc_sim_eval_dtbs_glmm/b_run_mcsim.R
|
06a3d59ea3a57db2cbbc4add51bc74703349c82f
|
[] |
no_license
|
mikejacktzen/treeboot_double
|
5ab6752f6dc7b65724e657bde927f406ad50d059
|
88a90a04b98627a7134ea283edf68d19b49a982c
|
refs/heads/master
| 2023-02-21T23:11:15.109806
| 2021-01-15T20:44:09
| 2021-01-15T20:44:09
| 214,261,869
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,110
|
r
|
b_run_mcsim.R
|
# Data Generating Processs to evaluate performance
# 1) list_param_rds
# batch submit num_mc_sim=1 iteration
# see how long it takes (hoffman will give you the .log file)
# keep track how many cores you used
# fake example: 8 cores takes 20 minutes for num_mc_sim=1
# Population
require(plyr)
require(dplyr)
require(RDStreeboot)
# generate pop ----------------------------------------------
# below two mimic dataset
# num.samp_use = c(4) # max recruits
# num.prob_use = c(0,c(1/4,1/4,1/4,1/4)) # recruit probab
# beta <- c(0,1,2) # Y~X fixed effects coefficient scenarios
# n_0_use = c(100,200,300)
# num.seeds_use = c(5,10,15)
beta <- c(1) # Y~X fixed effects coefficient scenarios
n_0_use = c(100)
num.seeds_use = c(5)
list_param_rds = expand.grid(data.frame(n_use=n_0_use,num.seeds_use,beta))
rm(beta,n_0_use,num.seeds_use)
source("~/a_0_dtbs_helpers_all.R")
source("~/a_1_data_generating_process.R")
source("~/a_2_compute_quants.R")
args(gen_data_rds)
library(foreach)
library(doParallel)
# set number of core to pre requested numbre of cores -----------------
num_core=16
cl <- parallel::makeCluster(num_core)
doParallel::registerDoParallel(cl)
# stopCluster(cl)
B1s_pick = c(10) # B1=B2
num_mc_sim_pick=16
# purrr::pmap(list_param_rds,.f=one_popsim_eval_dtbs_b1b2)
one_popsim_eval_dtbs_b1b2 = function(n_use,num.seeds_use,
beta,
B1_pick,B2_pick,
num_mc_sim){
# n_use=list_param_rds[1,'n_use']
# num.seeds_use = list_param_rds[1,'num.seeds_use']
# beta=list_param_rds[1,'beta']
# one_int_sim = vector(mode='list',length = num_mc_sim)
# for(s in 1:num_mc_sim){
require('dplyr')
one_int_sim <- foreach(s = 1:num_mc_sim,
.packages=c('plyr','dplyr','tibble',
'GLMMadaptive'
),
.export=c(
'gen_data_rds',
'rank_ids_tree_info',
'get_ids_dtbs',
'resample_id_tbs_dub',
'remap_tree',
'comp_est_over_ids_b_bb_onerow_serial',
'form_int_dubboot',
'compute_quants'
)) %dopar% {
# s=1
# message(paste('B1',B1_pick,'iter_sim',s,'of',num_mc_sim,'n_use',n_use,'num.seeds',num.seeds_use,'beta',beta))
# df_sim_one = purrr::pmap(.f=gen_data_rds,list_param_rds[1,]) %>% unlist(FALSE)
df_sim_one = gen_data_rds(n_use=n_use,num.seeds_use=num.seeds_use,beta=beta)
df_4_estimate_sim = df_sim_one$df_4_estimate
df_est_samp_orig = compute_quants(df_4_estimate = df_4_estimate_sim)
# ids_resamp_b_bb = get_ids_dtbs(10,10,df_sim_one)
# B1_pick <- B2_pick <- 10
ids_resamp_b_bb = get_ids_dtbs(B1_pick,B2_pick,df_sim_one)
ind_skip = unlist(purrr::map(unlist(ids_resamp_b_bb[,2],FALSE),
.f=function(xx){is.null(unlist(xx))}))
# any(ind_skip)
sstest = ids_resamp_b_bb[!ind_skip,]
# old serial for level 1
# ests_dtbs = vector(mode='list',length = nrow(sstest))
# for(ii in 1:nrow(sstest)){
# serial for lvl 1 and lvl 2 -------------------------------------------
# par for level 1
# require(foreach)
#' ests_dtbs <- foreach(ii = 1:nrow(sstest),
#' .packages=c('plyr','dplyr','tibble',
#' 'GLMMadaptive'
#' ),
#' .export=c(#'brm_comp',
#' #'df_est_samp_orig',
#' #'comp_est_over_ids_b_bb_onerow',
#' 'comp_est_over_ids_b_bb_onerow_serial',
#' 'compute_quants')) %dopar%
ests_dtbs = vector(mode='list',length = nrow(sstest))
for(ii in 1:nrow(sstest)){
# message('level b resample iter ',ii)
# args(comp_est_over_ids_b_bb)
# need to define possibly here,
compute_quants_possibly=purrr::possibly(compute_quants,NA)
# comp_est_over_ids_b_bb_onerow() internally uses parLapply()
# during 2nd level estimates
# source('/a_5_comp_est_over_ids_b_bb_onerow_serial.R')
comp_est_over_ids_b_bb_onerow_possibly = purrr::possibly(comp_est_over_ids_b_bb_onerow_serial,
otherwise = NA)
# comp_est_over_ids_b_bb_onerow_possibly = purrr::possibly(comp_est_over_ids_b_bb_onerow_3type,otherwise = NA)
# ii=1
out_b_bb = comp_est_over_ids_b_bb_onerow_possibly(ids_b_bb_one=(sstest[ii,]), # index rows of tibble seems to work
# ids_b_bb_one=unlist(sstest[ii,]),
df_use=df_4_estimate_sim,
df_est_samp_orig=df_est_samp_orig,
# cl=cl,
# iter_pick=10,
# iter_pick=5000,
# compute_quants=compute_quants
compute_quants=compute_quants_possibly
)
# via serial for loop pre allocated storage
ests_dtbs[[ii]] = out_b_bb
# via foreach()
# return(out_b_bb)
}
# str(ests_dtbs,1)
df_ref_quants_sim_one = do.call(rbind,ests_dtbs)
df_ref_quants_sim_one_non_na = na.omit(df_ref_quants_sim_one)
# dim(df_ref_quants_sim_one)
int_dtbs = form_int_dubboot(df_est_samp_orig=df_est_samp_orig,
alpha=0.05,
df_ref_quants=df_ref_quants_sim_one_non_na)
# one_int_sim[[s]] = int_dtbs
# one option, externally write.csv(int_dtbs_s.csv)
return(int_dtbs)
}
# one_int_sim[[s]] = int_dtbs
df_int_all = do.call(rbind,one_int_sim) %>%
mutate(beta_inside=ifelse((ll <= beta)&&(ul >= beta),1,0))
# mean(one_int_sim$length)
# mean(one_int_sim$beta_inside)
mean_int_length = mean(df_int_all$length_int)
prop_cover = mean(df_int_all$beta_inside)
out_res = data.frame(n_use,num.seeds_use,
beta,
B1_pick,B2_pick,
num_mc_sim,
mean_int_length,prop_cover,
n_sim_non_na=nrow(na.omit(df_int_all)))
return(out_res)
}
# gen dtbs context --------------------------------------------------------
#
# don't care when B2 < B1, so repeat B2=B1
# B1s = c(100,200,300)
# B1s = c(20,30,40)
# B1s_pick = c(10) # B1=B2
# num_mc_sim_pick=16
list_param_dtbs = data.frame(B1_pick=B1s_pick,B2_pick=B1s_pick)
# list_param_dtbs[1,]
results_all = vector(mode='list',length = nrow(list_param_dtbs))
system.time(out_timing <- for(j in 1:nrow(list_param_dtbs)){
# list_param_dtbs[b,]
one_popsim_eval_dtbs_b1b2_fix = function(b1,b2){
eval_int_sim_one_b = purrr::pmap(list_param_rds,
# list_param_rds[c(1,10),],
.f=one_popsim_eval_dtbs_b1b2,
num_mc_sim=num_mc_sim_pick,
# num_mc_sim=8,
B1_pick=b1,B2_pick=b2)
return(eval_int_sim_one_b)
}
# j=2
results_all_oneb = one_popsim_eval_dtbs_b1b2_fix(b1=list_param_dtbs[j,1],
b2=list_param_dtbs[j,2])
results_all[[j]] = results_all_oneb
})
results_all
# > results_all
# [[1]]
# [[1]][[1]]
# n_use num.seeds_use beta B1_pick B2_pick num_mc_sim mean_int_length prop_cover n_sim_non_na
# 1 100 5 1 50 50 8 0.7480822 0 8
# 8 cores, 8 sims , serial B1=50, serial B2=50
# user system elapsed
# 0.57 0.64 4858.78
# > 4858/60/60
# [1] 1.349444
# eg 1.3 hours for 1 sim of (2500) B1*B2 iters
# eg ~1.3 hours for 8 parallel sim of (2500) B1*B2 iters
# probably 5 hours if B1=B2=100 for 1 sim
# ~16 cores give you 64 parallel sims in 24 hours
# user system elapsed
# 4.09 0.28 1985.89
# yes perallel at top level and serial lower level faster because no wait time for group completion
# in that case, fastest approach would be to parallel at the mc sim level and serial lvl1 lvl2
# for package, par at lvl 1, since end users will not typicaly embed use in outter mc simulation
# eval summary --------------------------------------------------------------------
#
# results_all[[1]]
# > results_all
# [[1]]
# [[1]][[1]]
# n_use num.seeds_use beta B1_pick B2_pick num_mc_sim mean_int_length prop_cover n_sim_non_na
# 1 100 5 1 100 100 2 0.7474501 1 2
|
e00c41b75783a6d2f68888b56fe70790869d0539
|
bdcaa34802008ad9d28a451ac13b1f4d31c853df
|
/server.R
|
062454d44b9fcd30be0ef3f6f2a23af170eb1f01
|
[] |
no_license
|
deromed2000/presDDPcourse
|
73601da83a5d4e7a463edc6b0fab5336a7fc42bb
|
d4a42c3b7ab565ba5b929a1ee72810dc931d2f81
|
refs/heads/master
| 2021-01-10T11:56:07.696976
| 2015-09-28T07:01:53
| 2015-09-28T07:01:53
| 43,248,395
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,863
|
r
|
server.R
|
library(shiny)
library(ggplot2)
library(data.table)
library(maps)
library(rCharts)
library(reshape2)
library(markdown)
library(mapproj)
library(ISLR);library(randomForest); library(caret); data(Wage)
Wage1 <- subset(Wage,select=-c(logwage, region, sex, health))
WageP <- subset(Wage,select=-c(logwage, region, health))
predVar <- c("age","sex","maritl","race","education","jobclass","health_ins")
model.RF <- randomForest(x = WageP[ ,predVar],
y = WageP$wage,
ntree = 15)
years <- as.list(sort(unique(Wage$year)))
health <<- as.character(sort(unique(Wage$health_ins)))
shinyServer(function(input, output){
dt <- reactive({
tmp <- Wage1
tmp[is.na(tmp)] <- 0
tmp
})
output$healthinsur <- renderUI({
# if(1) {
checkboxGroupInput('health', 'health insurance', health, selected=health)
# }
})
output$WageAgebyeducation <- renderChart({
dt1 <- dt()
if(input$HealthInsurance[1] == "1. Yes" & is.null(input$HealthInsurance[2])){
dt1 <- dt1[dt1$health_ins == "1. Yes",]
}
if(input$HealthInsurance[2] == "2. No" & is.null(input$HealthInsurance[1])){
dt1 <- dt1[dt1$health_ins == "2. No",]
}
# if(input$HealthInsurance[2] == "2. No" & input$HealthInsurance[1] == "1. Yes"){
# dt1 <- dt1
# }
if(input$JobClass == "Industrial") {
data <- dt1[dt1$jobclass == "1. Industrial" & dt1$age >= input$range[1] & dt1$age <= input$range[2],]
data <- aggregate(data.frame(wage=data$wage), by = list(year=data$year, education=data$education), mean)
} else if(input$JobClass == "Information") {
data <- dt1[dt1$jobclass == "2. Information" & dt1$age >= input$range[1] & dt1$age <= input$range[2],]
data <- aggregate(data.frame(wage=data$wage), by = list(year=data$year, education=data$education), mean)
} else {
data <- dt1[dt1$age >= input$range[1] & dt1$age <= input$range[2],]
data <- aggregate(data.frame(wage=data$wage), by = list(year=data$year, education=data$education), mean)
}
p6 <- nPlot(wage ~ year, group = 'education', data = data, color = 'education',
type = 'multiBarChart', dom = 'WageAgebyeducation', width = 600)
p6$chart(margin = list(left = 100), reduceXTicks = FALSE)
p6$yAxis( axisLabel = "Wage", width = 80)
p6$xAxis( axisLabel = "Year", width = 70, staggerLabels = TRUE)
return(p6)
})
output$WageAgebyrace <- renderChart({
dt1 <- dt()
if(input$HealthInsurance[1] == "1. Yes" & is.null(input$HealthInsurance[2])){
data <- dt1[dt1$health_ins == "1. Yes",]
}
if(input$HealthInsurance[2] == "2. No" & is.null(input$HealthInsurance[1])){
data <- dt1[dt1$health_ins == "2. No",]
}
if(input$JobClass == "Industrial") {
data <- dt1[dt1$jobclass == "1. Industrial" & dt1$age >= input$range[1] & dt1$age <= input$range[2],]
data <- aggregate(data.frame(wage=data$wage), by = list(year=data$year, race = data$race), mean)
} else if(input$JobClass == "Information") {
data <- dt1[dt1$jobclass == "2. Information" & dt1$age >= input$range[1] & dt1$age <= input$range[2],]
data <- aggregate(data.frame(wage=data$wage), by = list(year=data$year, race = data$race), mean)
} else {
data <- dt1[dt1$age >= input$range[1] & dt1$age <= input$range[2],]
data <- aggregate(data.frame(wage=data$wage), by = list(year=data$year,race = data$race), mean)
}
p7 <- nPlot(wage ~ year, group = 'race', data = data, color = 'race',
type = 'multiBarChart', dom = 'WageAgebyrace', width = 600, height = 600)
p7$chart(margin = list(left = 100), reduceXTicks = FALSE)
p7$yAxis( axisLabel = "Wage", width = 80)
p7$xAxis( axisLabel = "Year", width = 70, staggerLabels = TRUE)
return(p7)
})
WagePrediction <<- function(df) predict(model.RF, newdata = df)
dtr = reactive({tempd <- data.frame(age = input$age,
sex = input$sex,
maritl = input$maritl,
race = input$race,
education = input$education,
jobclass = input$jobclass,
health_ins = input$health_ins)
levels(tempd$sex) = levels(WageP$sex)
levels(tempd$maritl) = levels(WageP$maritl)
levels(tempd$race) = levels(WageP$race)
levels(tempd$education) = levels(WageP$education)
levels(tempd$jobclass) = levels(WageP$jobclass)
levels(tempd$health_ins) = levels(WageP$health_ins)
tempd
})
output$valueinput <- renderPrint({ WagePrediction(dtr()) })
})
|
7592d558d63e803e2b705edf9743c8420465ddea
|
b179d78ff4cfadbdfd8ec20a38fbb9914a076b1c
|
/data.table/example_reference.R
|
25bd1c1bb8218fd3c10e2293a4b5bbc193d9cbe1
|
[] |
no_license
|
jbhender/CSCAR_Workshops
|
1dd68b83a57a39db62125bc13881989d8e72b16f
|
8fb89f09678fdc51a4b6a9d4e508ebdd01bcf199
|
refs/heads/main
| 2021-12-23T21:38:53.497557
| 2021-08-19T20:39:18
| 2021-08-19T20:39:18
| 189,452,495
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,075
|
r
|
example_reference.R
|
## Quick examples of updating by reference using data.table
##
## These examples are more abstract than practical.
##
## Updated: May 30, 2019
## Author: James Henderson, PhD (CSCAR)
# libraries: ------------------------------------------------------------------
library(tidyverse); library(data.table)
# create a data.frame and tibble/data.table copies for later user: ------------
n = 1e5
df = data.frame( id = 1:n,
x = rnorm(n),
group = sample(LETTERS, n, replace = TRUE)
)
df$group = as.character(df$group)
dt = as.data.table(df)
tbl = as_tibble(df)
# Let's investigate the structure of these data frames and there locations in
# memory.
tracemem(df); sapply(df, tracemem)
tracemem(tbl); sapply(tbl, tracemem)
tracemem(dt); sapply(dt, tracemem)
# The := operation in "j" adds columns by reference to a data.table.
dt[ , sign := {-1}^{x < 0}]
tracemem(dt); sapply(dt, tracemem)
# Can also be used as a function, useful for multiple assignments.
dt[ , `:=`( sign = {-1}^{x < 0}, positive = {x > 0}) ][]
# Use := NULL to delete a column by reference.
dt[ , positive := NULL][]
# Did location of list vector with pointers to columns change?
tracemem(dt); sapply(dt, tracemem)
# Rename group "G" to "g". What happens to location in G?
dt[ group == 'G', group := 'g' ]
tracemem(dt); sapply(dt, tracemem)
# Compare to data.frame
df$sign = {-1}^{df$x < 0}
tracemem(df); sapply(df, tracemem)
df$group[ df$group == "G" ] = "g"
# Compare to tibble
tracemem(tbl); sapply(tbl, tracemem)
tbl = tbl %>% mutate( sign = {-1}^{x < 0} )
tracemem(tbl); sapply(tbl, tracemem)
tbl = tbl %>% mutate( group = ifelse(group == "G", "g", group) )
tracemem(tbl); sapply(tbl, tracemem)
# data.table breaks R's copy on modify semanitcs, so be careful!
dt2 = dt
dt2[ group == "A", group := "a"]
dt2[ , .N, group][ order(group) ]
dt[ , .N, group][ order(group) ]
# Use copy to avoid affecting columns with multiple pointers.
dt3 = copy(dt)
dt3[ group == "B", group := "b"]
dt3[ , .N, group][ order(group) ]
dt[ , .N, group][ order(group) ]
|
7693f9ec9681a5ff342daebe9b6d547dba3ce61a
|
64452d8bbc144fdfc96df86c4582dc8537024608
|
/man/share_secret.Rd
|
5b5512a098c982a0443a775e99c33385665eec06
|
[] |
no_license
|
GreyZephyr/secret
|
5dc5820063cae33f69a4272acfc463e1c4bc460c
|
c308ffadde5b326ac628f023887e36b950e5e356
|
refs/heads/master
| 2020-12-28T17:24:12.117873
| 2019-10-22T07:57:26
| 2019-10-22T07:57:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,932
|
rd
|
share_secret.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/secrets.R
\name{share_secret}
\alias{share_secret}
\title{Share a secret among some users.}
\usage{
share_secret(name, users, key = local_key(), vault = NULL)
}
\arguments{
\item{name}{Name of the secret, a string that can contain alphanumeric
characters, underscores, dashes and dots.}
\item{users}{addresses of users that will have access to the secret.
(See \code{\link[=add_user]{add_user()}}).}
\item{key}{Private key that has access to the secret. (I.e. its
corresponding public key is among the vault users.)}
\item{vault}{Vault location (starting point to find the vault).
To create a vault, use \code{\link[=create_vault]{create_vault()}} or \code{\link[=create_package_vault]{create_package_vault()}}.
If this is \code{NULL}, then \code{secret} tries to find the vault automatically:
\itemize{
\item If the \code{secret.vault} option is set to path, that is used as the
starting point.
\item Otherwise, if the \code{R_SECRET_VAULT} environment variable is set to a
path, that is used as a starting point.
\item Otherwise the current working directory is used as the starting
point.
}
If the starting point is a vault, that is used. Otherwise, if the
starting point is in a package tree, the \code{inst/vault} folder is used
within the package. If no vault can be found, an error is thrown.}
}
\description{
Use this function to extend the set of users that have access to a
secret. The calling user must have access to the secret as well.
}
\seealso{
\code{\link[=unshare_secret]{unshare_secret()}}, \code{\link[=list_owners]{list_owners()}} to list users that have
access to a secret.
Other secret functions: \code{\link{add_secret}},
\code{\link{delete_secret}}, \code{\link{get_secret}},
\code{\link{list_owners}}, \code{\link{list_secrets}},
\code{\link{local_key}}, \code{\link{unshare_secret}},
\code{\link{update_secret}}
}
|
7bc1ecc37c70816b3fb61518f024b24548775af6
|
fd869b374a6a819e26262f873b2ec6b1def14398
|
/cache_matrix.R
|
50c03e94dadaa1720fcda8340a82dfd9712d2da7
|
[] |
no_license
|
magicbunny1103/R-programming
|
d3e5de9999cd073a90aa70f12bfa3d3c3a1023e3
|
9b5b868865f922c29a497ee04dbd138b804850b6
|
refs/heads/master
| 2020-06-13T16:58:04.424836
| 2019-07-01T19:06:46
| 2019-07-01T19:06:46
| 194,722,855
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,293
|
r
|
cache_matrix.R
|
#The first function, makeCacheMatrix creates a special "matrix", which is really a list containing a function to
#set the value of the matrix
#get the value of the matrix
#set the value of the invertible matrix
#get the value of the invertible matrix
makeCacheMatrix <- function(x = matrix()) {
inverse_matrix <- NULL
set_matrix <- function(y) {
x <<- y
inverse_matrix <<- NULL
}
get_matrix <- function() x
set_inverse <- function(inverse) inverse_matrix <<- inverse
get_inverse <- function() inverse_matrix
list(set_matrix = set_matrix, get_matrix = get_matrix,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
#The following function returns a matrix that is the inverse of x.
#However, it first checks to see if the invertible matrix has been cached. If so, it gets the matrix from the cache and skips the inverse. Otherwise, it invert the matrix, set and return the invertible matrix.
cacheSolve <- function(x, ...) {
inverse_matrix <- x$get_inverse()
if(!is.null(inverse_matrix)) {
message("getting cached invertible matrix")
return(inverse_matrix)
}
matrix_data <- x$get_matrix()
inverse_matrix <- solve(matrix_data, ...)
x$set_inverse(inverse_matrix)
inverse_matrix
}
|
d2630ccb6295795bef1b2aac95dadf74b30217e4
|
3f00f7c81c6ed9bb50db182fa6652e26a062a5f1
|
/man/consensus_LG_assignment.Rd
|
9c4e3ad7ba369fcf1ddd05324daf838d68dbf05a
|
[] |
no_license
|
cran/polymapR
|
c2c2130a476b2e1da85b1ac0d96cd330b4eb2b1a
|
ae247e7e8fb238f9fd8933d12e2d46ae04606b1e
|
refs/heads/master
| 2023-03-19T23:30:05.960131
| 2023-03-13T16:20:02
| 2023-03-13T16:20:02
| 113,219,924
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,401
|
rd
|
consensus_LG_assignment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exported_functions.R
\name{consensus_LG_assignment}
\alias{consensus_LG_assignment}
\title{Consensus LG assignment}
\usage{
consensus_LG_assignment(
P1_assigned,
P2_assigned,
LG_number,
ploidy,
consensus_file = NULL,
log = NULL
)
}
\arguments{
\item{P1_assigned}{A marker assignment file of the first parent. Should contain the number of linkages per LG per marker.}
\item{P2_assigned}{A marker assignment file of the second parent. Should be the same markertype as first parent and contain the number of linkages per LG per marker.}
\item{LG_number}{Number of linkage groups (chromosomes).}
\item{ploidy}{Ploidy level of plant species.}
\item{consensus_file}{Filename of consensus output. No output is written if NULL.}
\item{log}{Character string specifying the log filename to which standard output should be written. If NULL log is send to stdout.}
}
\value{
Returns a list containing the following components:
\item{P1_assigned}{
A (modified) marker assignment matrix of the first parent.
}
\item{P2_assigned}{
A (modified) marker assignment matrix of the second parent.
}
}
\description{
Assign markers to an LG based on consensus between two parents.
}
\examples{
data("P1_SxS_Assigned", "P2_SxS_Assigned_2")
SxS_Assigned_list <- consensus_LG_assignment(P1_SxS_Assigned,P2_SxS_Assigned_2,5,4)
}
|
9a67bfc7a9b781d83baa8d63aa66f500db2b3f17
|
fd365694237edb699e53eef04f1c3c0ff649f3c8
|
/man/opal.assign.Rd
|
46ef3ed25738e1a103fd106f37e2d0f97681a5b8
|
[] |
no_license
|
obiba/opalr
|
f73a0eb0280bc768b47711d6a1a08ce0eded7ce1
|
5ca4936deae7e3410db5ee6a02df7994ff5fa336
|
refs/heads/master
| 2023-08-03T06:18:07.954481
| 2023-07-21T06:58:07
| 2023-07-21T06:58:07
| 166,788,279
| 3
| 3
| null | 2021-05-13T15:50:49
| 2019-01-21T09:45:41
|
R
|
UTF-8
|
R
| false
| true
| 2,636
|
rd
|
opal.assign.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opal.assign.R
\name{opal.assign}
\alias{opal.assign}
\title{Data or expression assignment}
\usage{
opal.assign(
opal,
symbol,
value,
variables = NULL,
missings = FALSE,
identifiers = NULL,
id.name = NULL,
updated.name = NULL,
async = FALSE
)
}
\arguments{
\item{opal}{Opal object or list of opal objects.}
\item{symbol}{Name of the R symbol.}
\item{value}{The value to assign evaluated in the following order: a R expression, a function, a fully qualified name of a variable or a table in Opal or any other R object (data.frame, vector).}
\item{variables}{List of variable names or Javascript expression that selects the variables of a table (ignored if value does not refere to a table). See javascript documentation: http://wiki.obiba.org/display/OPALDOC/Variable+Methods}
\item{missings}{If TRUE, missing values will be pushed from Opal to R, default is FALSE. Ignored if value is an R expression.}
\item{identifiers}{Name of the identifiers mapping to use when assigning entities to R (from Opal 2.0).}
\item{id.name}{Add a vector with the given name representing the entity identifiers (from Opal 2.6). Default is NULL.}
\item{updated.name}{Add a vector with the given name representing the creation and last update timestamps (from Opal 2.6). Default is NULL.}
\item{async}{R script is executed asynchronously within the session (default is FALSE). If TRUE, the value returned is the ID of the command to look for (from Opal 2.1).}
}
\description{
Assign a Opal table, or a R expression or a R object to a R symbol in the current R session.
}
\examples{
\dontrun{
o <- opal.login('administrator','password', url='https://opal-demo.obiba.org')
# assign a list of variables from table CNSIM1
opal.assign(o, symbol="D", value="datashield.CNSIM1", variables=list("GENDER","LAB_TSC"))
# assign all the variables matching 'LAB' from table HOP of opal object o
opal.assign(o, symbol="D", value="datashield.CNSIM1", variables="name().matches('LAB_')")
# assign a function and call it
opal.assign.script(o, 'hello', quote(function(x) { print(paste0('Hello ', x , '!'))}))
opal.execute(o, "hello('Mr Bean')")
# push an arbitrary data frame to the R server
#opal.assign(o, "D", mtcars)
# push an arbitrary vector to the R server
#opal.assign(o, "C", mtcars$cyl)
opal.logout(o)
}
}
\seealso{
Other assignment functions:
\code{\link{opal.assign.data}()},
\code{\link{opal.assign.resource}()},
\code{\link{opal.assign.script}()},
\code{\link{opal.assign.table.tibble}()},
\code{\link{opal.assign.table}()}
}
\concept{assignment functions}
|
40ef156343bd52ba6bef17061c85620b2ba719e2
|
fa52fd0c2d9eda31ad27c0c9521bc8d0747316d4
|
/5. Basic t-procedures/DA5_t_procedures.R
|
44c7c0fd54c299a6ef031e556fa189ef5e9d1540
|
[] |
no_license
|
IvanHalim/r-statistics
|
ca0360755d1feeb524b3c1e05e13b9b494a488ff
|
166725cc3f60f7a01c1bc6b58682100dc7f3a21b
|
refs/heads/master
| 2020-05-18T15:49:56.702530
| 2019-05-02T02:36:33
| 2019-05-02T02:36:33
| 184,510,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,271
|
r
|
DA5_t_procedures.R
|
##########PART 1 START ############################################################################# # R code and explanation for data analysis 5 t procedures.
# Read in the microbeersW19.csv dataset
microbeers = read.csv(file.choose(), header = TRUE)
# gives variable names.
names(microbeers) # gives variable names.
# Make an appropriate visual display for abv.
# Recall hist() or boxplot()
# Add a title.
# Add color and other aesthetics if you like.
# See week 3 lessons or the script from Data Analysis #3.
# Calculate the mean and standard deviation. mean() and sd()
# Again week 3 lessons or the script from Data Analysis #3.
# Perform a t test using the t.test() command.
# The format is t.test(data, mu = mu_0, alternative = "alt")
# where data is a quantitative variable mu_0 is the hypothesized mean,
# and alt is either "less", "greater" or "two.sided" (default).
# See lesson 27 for an R code example for t.test() or week 5 module R tutorials.
##########PART 1 END #############################################################################
##########PART 2 START ###########################################################################
# This section is only for your information.
# You don't have to run this section.
# Upload combined student data set ("CombinedST314SISW19.csv")
st314data = read.csv(file.choose(), header = TRUE)
# creates a side by side boxplot with customized axes.
boxplot(st314data$SchoolWorkHours ~st314data$Course,
col = c("lightgreen", "lightblue"),
axes = FALSE, horizontal = TRUE,
main = "Comparison of time spent doing classwork between in-class and online ST314 Students", xlab = "Minutes Spent Doing School Work")
# Adds customized axes
axis(2, at = c(1,2), c("In Class", "Online"))
axis(1, at = c(seq(0,160,10)))
# Calculate means, sd and sample size by group
aggregate(st314data$SchoolWorkHours~st314data$Course, data = st314data, mean)
aggregate(st314data$SchoolWorkHours~st314data$Course, data = st314data, sd)
aggregate(st314data$SchoolWorkHours~st314data$Course, data = st314data, length)
# two sample t test
t.test(st314data$SchoolWorkHours~st314data$Course)
##########PART 2 END #############################################################################
|
35d7c606162313407d0f3e844eaed3449f0b8129
|
3feadcaf381e598f80367b9b94211a768c8b49b8
|
/Part_A.R
|
15ff9ec11d464692d6580b34dcfc225bf5582025
|
[] |
no_license
|
Frankiwy/Graph-Randomization
|
4e77513fb1a9909ebe02255c5200b441e8af2586
|
488c5742b17baa39ed50f340215b23ad1cf12de7
|
refs/heads/main
| 2023-01-15T15:56:19.870676
| 2020-11-22T23:22:14
| 2020-11-22T23:22:14
| 310,292,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,403
|
r
|
Part_A.R
|
library(igraph)
library(sdpt3r)
library(glue)
library(mc2d)
library(ggplot2)
library(knitr)
set.seed(161821)
ER_graph = erdos.renyi.game(10,p=1/3, type = 'gnp', directed=FALSE) # Erdős–Rényi
VS_graph = graph_from_literal(1--2, 1--4, 2--3, 4--3, 3--5, 5--6, 5--7) # the very small one
include_nodes <- c(2,3,5)
plot(VS_graph, vertex.size=25, vertex.frame.color="black",
vertex.label.color="black",
vertex.color=c("purple",'green')[1+(V(VS_graph)%in%include_nodes)],
vertex.label.cex=1.5, vertex.label.dist=0, edge.curved=0, edge.color=('black'),
xlim=c(-1,1), ylim=c(-1,1))
title("'Very' small one", adj = 0, line = -5)
list_nodes <- c(3,4,5,10)
plot(ER_graph, vertex.color=c("purple",'green')[1+(V(VS_graph)%in%list_nodes)],
vertex.size=30, vertex.frame.color="black", vertex.label.color="black",
vertex.label.cex=1, vertex.label.dist=0,
edge.curved=0, edge.color='black',
xlim=c(-1,1), ylim=c(-1,1))
title("Erdős–Rényi", adj = 0, line = -5)
ER_matrix = as.matrix(as_adj(ER_graph))
VS_matrix = as.matrix(as_adj(VS_graph))
print(glue('ER maxcut: {round(maxcut(ER_matrix)$pobj,2)} & VS maxcut: {round(maxcut(VS_matrix)$pobj,2)}'))
for (m in 1:2){
g_matrices <-list(ER_matrix, VS_matrix)
name_graphs <- c('ER_graph', 'VS_graph')
OPT_names <- c('OPT_sdpt3r', 'OPT_E')
g_dimension = dim(g_matrices[[m]])[1]
sum = 0
for (n in 1:100000){
mask = rbern(n = g_dimension, prob=.5)
vector = c(1:g_dimension)
indeces_U = vector[!!mask] # this is a little workaround to have a mask with TRUE and FALSE instead of 1 0
indeces_not_U = vector[!mask]
num_edges = sum(g_matrices[[m]][indeces_U, indeces_not_U])
sum = sum+num_edges
}
exp = sum/100000
print(glue('{name_graphs[m]}: EXP= {exp} & {OPT_names[m]}/2= {round(-maxcut(g_matrices[[m]])$pobj/2,2)}'))
}
Paul_Erdos_function = function(gen_graph){
starting = proc.time()
g_matrix = as.matrix(as_adj(gen_graph))
g_dimension = dim(g_matrix)
sum = 0
for (n in 1:10000){
mask = rbern(n = g_dimension, prob=.5)
vector = c(1:g_dimension)
indeces_U = vector[!!mask] # this is a little workaround to have a mask with TRUE and FALSE instead of 1 0
indeces_not_U = vector[!mask]
num_edges = sum(g_matrix[indeces_U, indeces_not_U])
sum = sum+num_edges
}
exp = sum/10000
end_time = proc.time() - starting
return(end_time[[3]])
}
maxcut_function = function(gen_graph){
starting = proc.time()
g_matrix = as.matrix(as_adj(gen_graph))
maxcut_value = maxcut(g_matrix)$pobj
end_time = proc.time() - starting
return(end_time[[3]])
}
time_elapsed_PE <- c()
for (n in seq(from = 10, to = 200, by = 10)) {
intermediated_time = 0
for (l in 1:5){
intermediated_time = intermediated_time +
Paul_Erdos_function(erdos.renyi.game(n,p=1/3, type = 'gnp', directed=FALSE))
}
time_elapsed_PE <- c(time_elapsed_PE, round(intermediated_time/5,3))
}
time_elapsed_maxcut <- c()
for (n in seq(from = 10, to = 200, by = 10)) {
intermediated_time = 0
for (l in 1:5){
intermediated_time = intermediated_time +
maxcut_function(erdos.renyi.game(n,p=1/3, type = 'gnp', directed=FALSE))
}
time_elapsed_maxcut <- c(time_elapsed_maxcut, round(intermediated_time/5,3))
}
PE_performances <- data.frame('n_of_nodes' = seq(from = 10, to = 200, by = 10),
'elapsed_time_PE' = time_elapsed_PE,
'elapsed_time_maxcut' = time_elapsed_maxcut)
write.csv(PE_performances,"performances.csv", row.names = FALSE)
PE_performances <- read.csv("performances.csv")
kable(PE_performances, caption= "Time colplexity Table")
ggplot(PE_performances, aes(x=n_of_nodes))+
geom_line(aes(y=time_elapsed_PE, color="#09611D"), size=1) +
geom_point(aes(y=time_elapsed_PE), shape=21, color="#09611D", fill="#F34835", size=3) +
geom_line(aes(y=time_elapsed_maxcut, color="#F3C035"), size=1) +
geom_point(aes(y=time_elapsed_maxcut), color="#F3C035", shape=21, fill="purple", size=3) +
labs(title = "Time Complexity Plot" , x = 'number of nodes', y= 'elapsed time [s]') +
scale_color_identity(name = "Algorithms",
breaks = c("#09611D", "#F3C035"),
labels = c("Paul Erdos", "Maxcut"),
guide = "legend") +
coord_cartesian(xlim = c(0,201), ylim = c(0,20))
|
56752522db1b68914693deb8ffd1cd66879423d7
|
5c2dcf913088ef4671fa2bd07f9fbcd4ad564e71
|
/tests/testthat/test-print.R
|
dd46396677ed118e59709faaffab172aa4ad5b2f
|
[] |
no_license
|
GRSEB9S/errors
|
9c93724ddbcb2056eede1ac65f1242076afcc290
|
68d28a5dab9c69065d0d7a7f6adaa1eac7586304
|
refs/heads/master
| 2021-08-22T17:02:45.003708
| 2017-11-30T18:27:04
| 2017-11-30T18:27:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,439
|
r
|
test-print.R
|
context("print")
test_that("error formatting works properly", {
x <- set_errors(rep(11111.22222, 8),
c(12345678, 1234.5678, 12.345678, 1.2345678,
.12345678, .012345678, .000012345678, .000000012345678))
expect_equal(capture.output(print(x)), c(
"errors: 1.234568e+07 1.234568e+03 1.234568e+01 1.234568e+00 1.234568e-01 ...",
"[1] 11111.22 11111.22 11111.22 11111.22 11111.22 11111.22 11111.22 11111.22"
))
for (i in seq_along(x))
expect_equal(capture.output(print(x[i])), format(x[i]))
expect_equal(format(x, notation="parenthesis"),
c("10000(10000000)", "11000(1000)", "11110(10)", "11111(1)",
"11111.2(1)", "11111.22(1)", "11111.22222(1)", "11111.22222000(1)"))
expect_equal(format(x, notation="parenthesis", digits=3),
c("10000(12300000)", "11110(1230)", "11111.2(123)", "11111.22(123)",
"11111.222(123)", "11111.2222(123)", "11111.2222200(123)", "11111.2222200000(123)"))
expect_equal(format(x, notation="parenthesis", scientific=TRUE),
c("1(1000)e4", "1.1(1)e4", "1.111(1)e4", "1.1111(1)e4", "1.11112(1)e4",
"1.111122(1)e4", "1.111122222(1)e4", "1.111122222000(1)e4"))
expect_equal(format(x, notation="plus-minus"),
c("10000 +/- 10000000", "11000 +/- 1000", "11110 +/- 10", "11111 +/- 1",
"11111.2 +/- 0.1", "11111.22 +/- 0.01", "11111.22222 +/- 0.00001", "11111.22222000 +/- 0.00000001"))
expect_equal(format(x, notation="plus-minus", digits=3),
c("10000 +/- 12300000", "11110 +/- 1230", "11111.2 +/- 12.3", "11111.22 +/- 1.23",
"11111.222 +/- 0.123", "11111.2222 +/- 0.0123", "11111.2222200 +/- 0.0000123", "11111.2222200000 +/- 0.0000000123"))
expect_equal(format(x, notation="plus-minus", scientific=TRUE),
c("(1 +/- 1000)e4", "(1.1 +/- 0.1)e4", "(1.111 +/- 0.001)e4", "(1.1111 +/- 0.0001)e4",
"(1.11112 +/- 0.00001)e4", "(1.111122 +/- 0.000001)e4", "(1.111122222 +/- 0.000000001)e4",
"(1.111122222000 +/- 0.000000000001)e4"))
x <- set_errors(10, 1)
expect_equal(format(x - set_errors(10)), "0(1)")
expect_equal(format(x - x), "0(1)")
x <- set_errors(c(0.4, NA, NaN, Inf))
expect_equal(format(x[1]), "0.4(0)")
expect_equal(format(x[2]), "NA(NA)")
expect_equal(format(x[3]), "NaN(NaN)")
expect_equal(format(x[4]), "Inf(Inf)")
})
|
c816a3a31db5d0a98a50e4b1ff1b5029dafb7d5d
|
4c70c5d35ccf53e69e97240dc35ced615fa014eb
|
/www/R/plotEvolution.R
|
a24cf38db32cf3860069bc71be94e7ef164ccfc3
|
[] |
no_license
|
shroff254/shiny_windfarm
|
d331aa0710184d3405667957ec2563d7c49d0789
|
80dbaf09465b0195392296d6d19a16a6e7dea6b4
|
refs/heads/master
| 2022-01-07T00:58:43.365500
| 2019-05-30T14:34:19
| 2019-05-30T14:34:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,520
|
r
|
plotEvolution.R
|
#' @title Plot the evolution of fitness values
#' @name plotEvolution
#' @description Plot the evolution of energy outputs and efficiency rates
#' over the whole generations. Plots min, mean and max values.
#' @export
#'
#' @importFrom graphics plot lines grid points par
#'
#' @param result The output matrix of \code{\link{genAlgo}}, which has
#' stored all relevant information. (matrix)
#' @param ask Should R wait for interaction for subsequent plotting.
#' Default is "T" (character)
#' @param spar A numeric value determining how exact a spline should
#' be drawn. Default is 0.5 (numeric)
#'
#' @return NULL
#'
#' @author Sebastian Gatscha
plotEvolution <- function(result,ask=1, spar=0.5){
# result=ResG;ask=T; spar=0.5
# library(stats);
result1 <- as.data.frame(do.call("rbind", result[,1]))
x <- 1:length(result1$MaxEnergyRedu)
if (ask==1){
plot(result1$minParkwirkungsg, xaxt='n', main="Wind Farm Efficiency per Generation", xlab="Generation",
ylab="Park Efficiency in %", cex=1.2,col="red", pch=20,
ylim= c(min(result1$minParkwirkungsg),max(result1$maxParkwirkungsg)))
axis(1,at = 1:nrow(result1),tick=T)
#mtext(result[,4],side=1,col=Col, at=1:length(dir1))
grid(col = "black")
points(result1$meanParkwirkungsg,ylab="MeanxParkwirkungsg", cex=1.2,col="blue", pch=20)
points(result1$maxParkwirkungsg,ylab="maxParkwirkungsg", cex=1.2,col="green", pch=20)
lmin <- smooth.spline(x,result1$minParkwirkungsg, spar=spar); lines(lmin, col='red', lwd=1.2)
lmea <- smooth.spline(x,result1$meanParkwirkungsg, spar=spar); lines(lmea, col='blue', lwd=1.2)
lmax <- smooth.spline(x,result1$maxParkwirkungsg, spar=spar); lines(lmax, col='green', lwd=1.2)
}
if (ask==2) {
plot(result1$MeanEnergyRedu,xaxt='n',main="Wind Farm Energy Yield per Generation",xlab="Generation",ylab="Energy in kW", cex=1.2,
col="blue", pch=20, ylim= c(min(result1$MinEnergyRedu),max(result1$MaxEnergyRedu)))
axis(1,at = 1:nrow(result1),tick=T)
grid(col = "black")
points(result1$MaxEnergyRedu,ylab="maxParkwirkungsg", cex=1.2,col="green", pch=20)
points(result1$MinEnergyRedu, cex=1.2,col="red", pch=20)
emean <- smooth.spline(x,result1$MeanEnergyRedu, spar=spar); lines(emean, col='blue', lwd=1.2)
emax <- smooth.spline(x,result1$MaxEnergyRedu, spar=spar); lines(emax, col='green', lwd=1.2)
emin <- smooth.spline(x,result1$MinEnergyRedu, spar=spar); lines(emin, col='red', lwd=1.2)
}
}
|
ec7323989c493fc49eb22d3ac0439c7a48954eae
|
9730cd65b21efb77e13821b22512498d6f6f903b
|
/Code/spat21_moz_data_import.R
|
936333c961e91dec2ec4d025f883c3753c56abae
|
[] |
no_license
|
sam-k/malaria-spat21-analysis
|
d7bd7f8133b340b71b5902848c732882576956dd
|
5705fd2f424c71504e6e87b22e23fa9a3a648efa
|
refs/heads/master
| 2020-04-13T22:04:32.161882
| 2019-08-25T04:48:11
| 2019-08-25T04:48:11
| 163,471,841
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,738
|
r
|
spat21_moz_data_import.R
|
# ----------------------------------------- #
# Spat21 Data Set Importing #
# Mosquito Data #
# December 18, 2018 #
# K. Sumner, S. Kim #
# ----------------------------------------- #
#### ------------------ load packages ------------------ ####
library(readr)
library(dplyr)
library(tidyr)
library(lubridate)
library(magrittr)
#### ---------------- set up environment --------------- ####
.wd <- "~/Projects/Malaria collab/Spatial R21 projects/Spat21 cleaning, analysis/"
ALLSPECIES_FP <- paste0(.wd, "Data/Mosquito Data Sets/MOZZIECollectionSummary_June2017_July2018.csv")
ANOPHELES_FP <- paste0(.wd, "Data/Mosquito Data Sets/MOZZIEFemaleAnophele_June2017_July2018.csv")
QPCR_FP <- paste0(.wd, "Data/Mosquito Data Sets/Mozzie mosquito compiled detection results 18Dec2018.csv")
DATA_DICT_FP <- paste0(.wd, "Data/Data Dictionary/spat21_data_mosquito_dictionary.csv")
IMPORTED_FP <- paste0(.wd, "Data/Mosquito Data Sets/moz_imported_data.Rdata")
LOG_FP <- paste0(.wd, "Code/spat21_moz_data_import.log")
close(file(LOG_FP, open="w")) # clear log file
write.log <- function(...) {
for(output in list(...)) {
write(output, file=LOG_FP, append=TRUE)
}
write("", file=LOG_FP, append=TRUE)
}
.zero <- 1e-6 # threshold for zero CT value
#### ------------- read in mosquito data --------------- ####
write.log("# ------ IMPORT RAW DATA ------ #")
# Read in the mosquito descriptive data sets.
# Read in the data set with all mosquito species.
allspecies_data <- read.csv(ALLSPECIES_FP, stringsAsFactors=FALSE)
# Read in the wide data set with only anopheles mosquitoes.
anopheles_widedata <- read.csv(ANOPHELES_FP, stringsAsFactors=FALSE)
# Read in the mosquito qPCR data sets.
qpcr_data <- read.csv(QPCR_FP, stringsAsFactors=FALSE)
# Clean column names.
names(anopheles_widedata) %<>%
{ tolower(gsub("..", ".", ., fixed=TRUE)) } %>%
{ tolower(gsub("\\.$", "", .)) } # remove trailing periods
anopheles_widedata %<>% rename(form.entered.date = form.entered.on) # consistent name
# Look at summaries of all the data sets.
summary(allspecies_data)
summary(anopheles_widedata)
summary(qpcr_data)
str(allspecies_data)
str(anopheles_widedata)
str(qpcr_data)
write.log("allspecies_data dims:", paste(ncol(allspecies_data), "vars"), paste(nrow(allspecies_data), "obs"))
write.log("anopheles_widedata dims:", paste(ncol(anopheles_widedata), "vars (10 + 16*6 + 5)"),
paste(nrow(anopheles_widedata), "obs (or more)"))
write.log("qpcr_data dims:", paste(ncol(qpcr_data), "vars"), paste(nrow(qpcr_data), "obs"))
# Output a CSV file of all the variable names.
allnames <- data.frame(c(names(allspecies_data), names(anopheles_widedata), names(qpcr_data)))
write_csv(allnames, DATA_DICT_FP)
#### ------ reformat all species descriptive data ------ ####
write.log("# ------ REFORMAT ALL SPP. DESCRIPTIVE DATA ------ #")
# Rename and reformat columns.
names(allspecies_data) <- c("household.id","repeat.instrument","repeat.instance","collection.date","collection.time","village","collection.done.by",
"anoph.unfed","anoph.bloodfed","anoph.halfgravid","anoph.gravid","anoph.undetermined","anoph.total","num.male.anoph",
"culex.unfed","culex.bloodfed","culex.halfgravid","culex.gravid","culex.undetermined","culex.total","num.male.culex",
"form.checked.by","form.checked.date","form.entered.by","form.entered.date","complete")
allspecies_data %<>%
mutate_at(c("household.id","repeat.instrument","village","collection.done.by","form.checked.by","form.entered.by","complete"), factor) %>%
mutate_at(c("repeat.instance",
"anoph.unfed","anoph.bloodfed","anoph.halfgravid","anoph.gravid","anoph.undetermined","anoph.total","num.male.anoph",
"culex.unfed","culex.bloodfed","culex.halfgravid","culex.gravid","culex.undetermined","culex.total","num.male.culex"), as.integer) %>%
mutate_at(c("collection.date","form.checked.date","form.entered.date"), mdy) %>%
mutate_at(c("collection.time"), as.logical)
write.log("Renamed columns")
#### ------------ reformat descriptive data ------------ ####
write.log("# ------ REFORMAT ANOPH. DESCRIPTIVE DATA ------ #")
# Reformat columns from wide to long.
anopheles_data <- as.data.frame(matrix(nrow=16*nrow(anopheles_widedata), ncol=21), stringsAsFactors=FALSE) # long data, overshooting # of rows
names(anopheles_data) <- c("household.id","repeat.instrument","repeat.instance","collection.date","collection.time","village",
"collection.done.by","samples.prepared.by","species.id.done.by","total.number.of.mosquitos.in.the.household",
"sample.id.head","sample.id.abdomen","abdominal.status","species.type","specify.species","comment",
"form.checked.by","form.checked.date","form.entered.by","form.entered.date","complete")
.count <- 1
for(.i in 1:nrow(anopheles_widedata)) {
.header <- anopheles_widedata[.i, 1:10]
.footer <- anopheles_widedata[.i, 107:111]
for(.j in 1:16) {
if(anopheles_widedata[[.i, 5+6*.j]] != "") { # first column of j-th "block"
anopheles_data[.count, ] <- c(.header, anopheles_widedata[.i, (5+6*.j):(10+6*.j)], .footer)
.count <- .count + 1
}
}
}
anopheles_data %<>% filter(!is.na(household.id)) # trim empty rows
anopheles_data[anopheles_data==""] <- NA
# Rename and reformat columns.
anopheles_data %<>%
mutate_at(c("household.id","repeat.instrument","village","collection.done.by","samples.prepared.by","species.id.done.by",
"abdominal.status","species.type","specify.species","form.checked.by","form.entered.by","complete"), factor) %>%
mutate_at(c("repeat.instance","total.number.of.mosquitos.in.the.household"), as.integer) %>%
mutate_at(c("collection.date","form.checked.date","form.entered.date"), mdy) %>%
mutate_at(c("collection.time"), as.logical)
write.log("Reformatted data from wide to long")
write.log("anopheles_data dims:", paste(ncol(anopheles_data), "vars"), paste(nrow(anopheles_data), "obs"))
# Standardize sample ID format.
anopheles_data %<>%
mutate_at(c("sample.id.head","sample.id.abdomen"), function(x) { gsub("\\s*A\\s*", " A", x) }) %>%
mutate_at(c("sample.id.head","sample.id.abdomen"), function(x) { gsub("\\s*H\\s*", " H", x) })
write.log("Standardized sample ID spacing")
#### ---------------- reformat qPCR data --------------- ####
write.log("# ------ REFORMAT QPCR DATA ------ #")
# Rename and reformat columns.
names(qpcr_data)[11] <- "pfr364R^2"
qpcr_data[qpcr_data == "Undetermined"] <- NA
qpcr_data %<>%
mutate_at(c("Sample.Name","Experiment.Name"), factor) %>%
mutate_at(c("HbtubCT1","HbtubCT2","pfr364CT1","pfr364CT2","pfr364Std5a","pfr364Std5b","pfr364Std6a","pfr364Std6b"), as.numeric)
qpcr_data[is.na(qpcr_data)] <- NA # correct NaNs to NAs
# Standardize sample ID format.
qpcr_data %<>%
mutate(Sample.Name=gsub("\\s?A\\s?", " A", Sample.Name)) %>%
mutate(Sample.Name=gsub("\\s?H\\s?", " H", Sample.Name))
write.log("Standardized sample ID spacing")
# Process qPCR Ct values.
qpcr_data$Has.Hb <- FALSE
qpcr_data$Has.Pf <- FALSE
qpcr_data$Has.Hb[which(qpcr_data$HbtubCT1<.zero & qpcr_data$HbtubCT2<.zero & qpcr_data$pfr364CT1<.zero & qpcr_data$pfr364CT2<.zero)] <- NA
qpcr_data$Has.Pf[which(is.na(qpcr_data$Has.Hb))] <- NA
write.log("Zero Ct is defined as Ct<0.000001")
qpcr_data %<>%
mutate_at(c("HbtubCT1","HbtubCT2","pfr364CT1","pfr364CT2","pfr364Q1","pfr364Q2"), function(x) { ifelse(x<.zero, NA, x) })
qpcr_data$Has.Hb[which(qpcr_data$HbtubCT1>=.zero | qpcr_data$HbtubCT2>=.zero)] <- TRUE
qpcr_data$Has.Pf[which(qpcr_data$pfr364CT1>=.zero | qpcr_data$pfr364CT2>=.zero)] <- TRUE
write.log("All zero Ct's marked as missing",
"Any positive Ct marked as positive",
"Everything else marked as negative")
qpcr_data %<>% .[c(names(.)[1:5], "Has.Hb", names(.)[6:9], "Has.Pf", names(.)[10:24])]
# Tabulate qPCR Ct counts.
qpcr_counts <- rbind(table(qpcr_data$Has.Hb, useNA="always"), table(qpcr_data$Has.Pf, useNA="always"))
rownames(qpcr_counts) <- c("Hb","Pf")
colnames(qpcr_counts) <- c("Neg","Pos","Missing")
qpcr_counts[["Pf","Pos"]] %<>% { . - 1 } # no parasitemia for M06 A00026
qpcr_counts[["Pf","Neg"]] %<>% { . + 1 } # no parasitemia for M06 A00026
write.table(qpcr_counts, col.names=NA, file=LOG_FP, append=TRUE, quote=FALSE, sep="\t")
write.log()
write.log("No parasitemia for M06 A00026, considered missing")
#### ------------ export reformatted data -------------- ####
save(allspecies_data, anopheles_data, qpcr_data, file=IMPORTED_FP)
|
c18b462fc9ddaa0d3aa2be35bc25774aaac385f8
|
6582dfb79c42b7f8f7b8bddd77f1f9e99317e4a9
|
/source-code/analysis.r
|
50c63820faae2ed1ca3bf05f49e0bd84a69ee207
|
[] |
no_license
|
gtrdp/masters-thesis-guntur
|
d74bfdc0aeb284527bec94e5612dde9672ec1cf9
|
3432ace7684aba436797e4a0bb01936adbfee0c6
|
refs/heads/master
| 2020-12-07T11:45:23.303834
| 2017-02-06T14:34:16
| 2017-02-06T14:34:16
| 67,314,219
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,730
|
r
|
analysis.r
|
all_global_data <- read.table("global-dump.txt",
header = TRUE,
sep="\t",
col.names=c("ap", "pr","au", "gt",
"rms", "pklv", "rssi", "snr", "loc"),
fill=FALSE,
strip.white=TRUE)
global_data <- all_global_data[c("ap", "pr", "gt", "rms", "pklv", "rssi")]
phone_data_gt <- all_global_data[c("gt", "ap", "rms", "pklv", "rssi")]
phone_data_pr <- all_global_data[c("pr", "ap", "rms", "pklv", "rssi")]
# all models
# 1. Linear
model1.gt <- lm(gt~., phone_data_gt)
summary(model1.gt)
predictions <- predict(model1.gt, phone_data_gt)
rmse <- sqrt(mean((phone_data_gt$gt - predictions)^2))
print(rmse)
model1.pr <- lm(pr~., phone_data_pr)
summary(model1.pr)
predictions <- predict(model1.pr, phone_data_pr)
rmse <- sqrt(mean((phone_data_pr$pr - predictions)^2))
print(rmse)
# 2. Multivariate Adaptive Regression Splines
library(earth)
model2.gt <- earth(gt~., phone_data_gt)
summary(model2.gt)
evimp(model2.gt)
predictions <- predict(model2.gt, phone_data_gt)
rmse <- sqrt(mean((phone_data_gt$gt - predictions)^2))
print(rmse)
model2.pr <- earth(pr~., phone_data_pr)
summary(model2.pr)
evimp(model2.pr)
predictions <- predict(model2.pr, phone_data_pr)
rmse <- sqrt(mean((phone_data_pr$pr - predictions)^2))
print(rmse)
# 3. SVM
library(kernlab)
model3.gt <- ksvm(gt~., phone_data_gt)
summary(model3.gt)
model3.gt
predictions <- predict(model3.gt, phone_data_gt)
rmse <- sqrt(mean((phone_data_gt$gt - predictions)^2))
print(rmse)
model3.pr <- ksvm(pr~., phone_data_pr)
summary(model3.pr)
model3.pr
predictions <- predict(model3.pr, phone_data_pr)
rmse <- sqrt(mean((phone_data_pr$pr - predictions)^2))
print(rmse)
# 4. kNN
library(caret)
model4.gt <- knnreg(phone_data_gt[,2:5], phone_data_gt[,1], k=3)
summary(model4.gt)
model4.gt
predictions <- predict(model4.gt, phone_data_gt[,2:5])
rmse <- sqrt(mean((phone_data_gt$gt - predictions)^2))
print(rmse)
model4.pr <- knnreg(phone_data_pr[,2:5], phone_data_pr[,1], k=1)
summary(model4.pr)
model4.pr
predictions <- predict(model4.pr, phone_data_pr[,2:5])
rmse <- sqrt(mean((phone_data_pr$pr - predictions)^2))
print(rmse)
# 5. Neural Network
library(nnet)
x <- phone_data_gt[,2:5]
y <- phone_data_gt[,1]
model5.gt <- nnet(gt~., phone_data_gt, size=50, maxit=1000, linout=T, decay=0.1)
summary(model5.gt)
predictions <- predict(model5.gt, x, type="raw")
rmse <- sqrt(mean((y - predictions)^2))
print(rmse)
x <- phone_data_pr[,2:5]
y <- phone_data_pr[,1]
model5.pr <- nnet(pr~., phone_data_pr, size=50, maxit=1000, linout=T, decay=0.1)
summary(model5.pr)
predictions <- predict(model5.pr, x, type="raw")
rmse <- sqrt(mean((y - predictions)^2))
print(rmse)
|
be42810b91072cae02893ecd5f7912dc53f2ade6
|
bc43cfc66bf4508f26682b1d9bf0bd29f219a3cc
|
/r_code/stab2.R
|
a76ea9677a258255184e824d299adc856b948a02
|
[] |
no_license
|
fernote7/thesis
|
0a7b856477de9c5beeb2fedbd98af926d10ef719
|
c79684f4a4c1443ddb79aaa89f151066b99b30cc
|
refs/heads/master
| 2021-03-23T11:35:57.911382
| 2017-12-08T18:20:29
| 2017-12-08T18:20:29
| 81,347,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 463
|
r
|
stab2.R
|
par(mfrow=c(1,2), xpd=FALSE, mar=c(2,4,2,2), oma=c(2,0,0,0))
z=10
plot(z, asp = 1, xlim = c(-2.5, 0), ylim = c(-1.5,1.5), axes = FALSE, ylab = "", xlab = "")
rect(-5,-5,2,2, density = 15, angle = 50)
draw.circle(-1, 0, 1, nv = 1000, border = NULL, lty = 1, lwd = 1, col = "white")
abline(h=0)
abline(v=0)
z=10
plot(z, asp = 1, xlim = c(-1, 1), ylim = c(-1,1), axes = FALSE, ylab = "", xlab = "")
rect(-0,-2,2,2, density = 15, angle = 50)
abline(h=0)
abline(v=0)
|
1da2e4b966ccc60bce70d28ae7ef66fdce463d69
|
efcd3c537262887632b7e9356f6ce095dc66335e
|
/R/preparingData.R
|
e00aacd0e409f87b6902f8df0e831a2cfa371296
|
[] |
no_license
|
ZarnackGroup/m6Aboost
|
e6785db5a99e1d189f4cab214d679bffa7ef3551
|
7797eff2fc60798294d93ff3b0f2178329dc79ce
|
refs/heads/master
| 2023-06-12T16:51:16.310769
| 2021-07-02T08:06:45
| 2021-07-02T08:06:45
| 371,287,281
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,266
|
r
|
preparingData.R
|
## ============================================================================
## The Methods-preparingData for the GRanges objects
## ----------------------------------------------------------------------------
#' @import GenomicRanges
#' @import methods
#' @rawNamespace import(IRanges, except = c(collapse, slice, desc))
#' @rawNamespace import(dplyr, except = c(union, intersect, setdiff))
#' @importFrom S4Vectors mcols
#' @importFrom utils globalVariables
#' @importFrom rtracklayer import.gff3
## ============================================================================
## Small functions
## ----------------------------------------------------------------------------
## Assign the miCLIP2 peaks to transcript regions (UTR3, UTR5, CDS)
.assignment <- function(object, UTR3, CDS, UTR5)
{
## Define whether the peak would overlap to at least one of the Genomic
## features
object$UTR3 <- ifelse(object %over% UTR3, "YES", "NO")
object$UTR5 <- ifelse(object %over% UTR5, "YES", "NO")
object$CDS <- ifelse(object %over% CDS, "YES", "NO")
return(object)
}
## To fix the global variable note
utils::globalVariables(c("elementMetadata", "queryHits" , "geneid",
"reads", "predict.boosting", "elementMetadata<-",
"subjectHits", "strand<-", "start<-", "end<-", "gene_id"))
## - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## The "preparingData" methods for GRanges objects.
##
#' @title preparingData for the miCILP2 data
#'
#' @description A function for calculating the relative signal strength and
#' extract the features for running the m6Aboost.
#'
#' @author You Zhou
#'
#' @param object A GRanges object which should contain all single-
#' nucleotide peaks from the miCLIP2 experiment.
#' @param annotation A path to the annotation file. The format of the
#' annotation file should be a gff3 file and downloaded from
#' https://www.gencodegenes.org/
#' @param colname_reads The name of the metadata column which contains the
#' mean value of the truncation reads number without C to T transition
#' reads.
#' @param colname_C2T The name of the meta data column which contains the
#' mean value of C to T transition read counts.
#'
#' @return A GRanges object with all information that is required for running
#' the m6Aboost model.
#'
#' @examples
#' testpath <- system.file("extdata", package = "m6Aboost")
#' test_gff3 <- file.path(testpath, "test_annotation.gff3")
#' test <- readRDS(file.path(testpath, "test.rds"))
#' test<- preparingData(test, test_gff3, colname_reads="WTmean",
#' colname_C2T="CtoTmean")
#'
#' @export
preparingData <-
function(object, annotation, colname_reads="", colname_C2T="")
{
if(!isS4(object))
stop("The input object should be a GRanges object.")
meta_name <- names(elementMetadata(object))
if(!colname_reads %in% meta_name |!colname_C2T %in% meta_name)
stop("colname_reads and colname_C2T should refer to the respective
column names in the elementMetadata of the input object.")
## Load annotation file
anno <- rtracklayer::import.gff3(con = annotation)
anno_gene <- anno[anno$type == "gene"]
anno_gene$level <- as.numeric(anno_gene$level)
anno_gene <- anno_gene[order(anno_gene$level, -width(anno_gene))]
o <- findOverlaps(object, anno_gene, select = "first")
object$gene_id <- anno_gene$gene_id[o]
object$gene_id[is.na(object$gene_id)] <- "NO"
object <- as.data.frame(object)
## Calculate Relative Signal Strength (RSS)
object <- object %>% group_by(gene_id) %>%
mutate(factor=mean(get(colname_reads)))
object <- mutate(object, RSS=get(colname_reads)/factor)
object$factor <- NULL
object <- makeGRangesFromDataFrame(object, keep.extra.columns = TRUE)
## make the column for running the m6Aboost
object$CtoT <- elementMetadata(object)[,colname_C2T]
UTR5 <- anno[anno$type == "five_prime_UTR"]
UTR3 <- anno[anno$type == "three_prime_UTR"]
CDS <- anno[anno$type == "CDS"]
object <- .assignment(object, UTR3, CDS, UTR5)
return(object)
}
|
c58543eed7a288e1801a0639a46ecf42e90f326a
|
df0b491ed96c8726e2fe3930818deb23210ecb17
|
/cachematrix.R
|
d3b095db13d82ad8d0bd2549994bd30fa12708a3
|
[] |
no_license
|
cvaruns/ProgrammingAssignment2
|
e2ec59ac0dddc3bbe98c24519ef76600aaa1a90f
|
4fb28b008324830e7be5448780452b526da54b06
|
refs/heads/master
| 2021-01-21T21:29:22.774397
| 2015-06-19T18:43:34
| 2015-06-19T18:43:34
| 37,738,580
| 0
| 0
| null | 2015-06-19T18:20:31
| 2015-06-19T18:20:31
| null |
UTF-8
|
R
| false
| false
| 1,107
|
r
|
cachematrix.R
|
## The purpose of these functions is to create a cache matrix, store it and return the
## inverse of the cached matrix stored.
## makeCacheMatrix is the function used to take a 'square matrix' as an argument and create a cache matrix
makeCacheMatrix <- function(x = matrix()) ##Accepts only Square Matrix
{
i <- NULL
y <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() ##Stores the matrix in cache
{
x
}
setinverse <- function(inv){ i <<- inv}
getinverse <- function(){ i}
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve is the function that generates and returns the inverse of the cache matrix.
cacheSolve <- function(x, ...) ## Return a matrix that is the inverse of 'x'
{
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data.")
return(i)
}
data <- x$get()
i <- solve(data,...) ##Obtaining Inverse of Cached Matrix
x$setinverse(i)
i
}
##This function only works if the Determinant of the cached matrix is something other than 0.
##Else it will generate an error.
|
4ac14ed911a62c289991b76e9e4b05c82dfd77e0
|
4b49631324270db21fa4e01f7fea617ac773b791
|
/elevation/download_elevation_files.R
|
17bef57f7c1d9be742cc5eba2f4f5271e54391b3
|
[] |
no_license
|
mvanhala/geospatial_data_sources
|
c93106ddf22ad52ef3493b484138483c6ac9af40
|
f8315df51498bf197b80134d20a73e344048ddbe
|
refs/heads/master
| 2021-06-30T22:34:31.553697
| 2018-06-21T18:15:15
| 2018-06-21T18:15:15
| 137,977,202
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,516
|
r
|
download_elevation_files.R
|
library(sf)
library(dplyr)
library(purrr)
library(stringr)
library(readr)
library(parallel)
states <- read_sf("/mnt/data/boundaries/tl_2017_us_state") %>%
st_transform(4326)
system(
glue::glue(
"aws configure set aws_access_key_id {id}",
id = Sys.getenv("AWS_ACCESS_KEY_ID")
)
)
system(
glue::glue(
"aws configure set aws_secret_access_key {key}",
key = Sys.getenv("AWS_SECRET_ACCESS_KEY")
)
)
ned_files <- system("aws s3 ls s3://prd-tnm/StagedProducts/Elevation/1/IMG/", intern = TRUE)
zip_files <- ned_files %>%
keep(str_detect, pattern = ".zip$")
times <- str_sub(zip_files, end = 19)
img_files <- zip_files %>%
str_sub(start = 20) %>%
str_trim() %>%
str_split("[ ]+", n = 2) %>%
transpose() %>%
simplify_all() %>%
setNames(c("size", "name")) %>%
as_tibble() %>%
mutate(updated_time = times)
boxes <- img_files %>%
mutate(
area = str_extract(name, "n[0-9]+w[0-9]+"),
lat = parse_number(str_extract(area, "n[0-9]+")),
lon = parse_number(str_extract(area, "w[0-9]+")),
url = paste0("https://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/1/IMG/", name)
) %>%
filter(!is.na(area)) %>%
group_by(area) %>%
filter(row_number(desc(updated_time)) == 1) %>%
ungroup()
boxes_poly <- st_as_sf(
boxes,
geom = st_sfc(
map2(
boxes$lon, boxes$lat,
function(x, y) {
st_polygon(list(cbind(c(-x, -x + 1, -x + 1, -x, -x), c(y, y, y - 1, y - 1, y))))
}
),
crs = 4326
)
)
boxes_us <- boxes_poly %>%
mutate(in_us = as.logical(st_intersects(., st_union(states), sparse = FALSE)))
boxes_download <- boxes_us %>%
filter(in_us) %>%
as.list() %>%
.[c("area", "url")] %>%
transpose()
download_one_box <- function(pars, root_dir = ".") {
url <- pars$url
area <- pars$area
zip_path <- file.path(root_dir, "zip", paste0(area, ".zip"))
download.file(url, zip_path, quiet = TRUE)
tmp <- tempfile(tmpdir = root_dir)
on.exit(unlink(tmp, recursive = TRUE))
unzip(zip_path, exdir = tmp)
img_files <- list.files(tmp, pattern = ".img|.IMG", full.names = TRUE)
if (length(img_files) > 0) {
first_img <- img_files[[1]]
file.copy(
file.path(first_img),
file.path(root_dir, "img", paste0(area, ".img"))
)
}
}
dir.create("/mnt/data/elevation")
dir.create("/mnt/data/elevation/zip")
dir.create("/mnt/data/elevation/img")
cl <- makeCluster(36)
ned_downloads <- parLapply(
cl, boxes_download, download_one_box, root_dir = "/mnt/data/elevation"
)
stopCluster(cl)
|
b5281a6ce87629a1dca25c2b72e82cf48f8a1ea8
|
b8058ad0e52402a536943b3007c69d0f3ce90333
|
/EDA.R
|
1753900179547f614d8203d6a9189c645b89a9ee
|
[] |
no_license
|
pecu/EDA
|
53ba8af1edd5cf5239ad6c673ab1e507d3929eff
|
4adb792195ebd625453a125e7b3615d4ff930e48
|
refs/heads/master
| 2020-06-17T14:58:34.604278
| 2017-06-13T01:48:40
| 2017-06-13T01:48:40
| 94,157,053
| 1
| 1
| null | null | null | null |
BIG5
|
R
| false
| false
| 1,787
|
r
|
EDA.R
|
library(ggplot2)
library(dplyr)
raw = read.csv("Project(Part 1).csv",
header = TRUE, sep = ',')
# 統計敘述
result = summary(raw)
# 颱風修復工程代碼
ans <- filter(raw, raw$颱風 == "97年辛樂克及薔蜜") %>%
group_by(機關名稱, 工程代碼) %>%
summarise(price = sum(審議經費.千元.))
highPrice <- filter(ans, price > 500000)
ans2 <- filter(raw, raw$颱風 == "97年辛樂克及薔蜜") %>%
group_by(機關名稱, 工程代碼)
ggplot(ans2, aes(x = 工程代碼)) +
geom_bar(position = "dodge",
aes(fill = 機關名稱))
work <- filter(raw, raw$工程代碼 == "C1") %>%
group_by(機關名稱, 鄉.鎮市.) %>%
summarise(price = sum(審議經費.千元.))
ggplot(data = ans, aes(x = 工程代碼,
y = price,
group = 機關名稱)) +
geom_bar(position = "dodge",
stat = "identity",
aes(fill = 機關名稱))
ggplot(data = ans, aes(x = 工程代碼,
group = 機關名稱)) +
geom_bar(position = "dodge",
aes(fill = 機關名稱))
# 機關名稱申請工程
ans <- filter(raw, raw$颱風 == "97年辛樂克及薔蜜") %>%
group_by(機關名稱, 工程代碼) %>%
summarise(price = sum(審議經費.千元.))
ggplot(data = ans, aes(x = 機關名稱,
y = price,
fill = 工程代碼)) +
geom_bar(stat = "identity")
# 各單位通過的審議經費?
out <- group_by(raw, 機關名稱, 颱風, 工程代碼) %>%
summarise(price = sum(審議經費.千元.))
ggplot(data = out, aes(x = 機關名稱,
y = price)) +
geom_boxplot()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.