blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3433b0ac987868e12981454b8a1b3fd78ffbbaeb
|
e56c25e5c06b392f841f1b089ec36a372efaecd5
|
/first_script.R
|
46cd8eda3cf2d2486e3c7873a3787747fe92fc8e
|
[] |
no_license
|
krstout/test
|
faf6726e38677afc235819ee57bfca96fe23aa18
|
d775ea305dbea69a03548346b6e4a5ebfe4c2ed8
|
refs/heads/master
| 2020-08-04T12:59:29.818328
| 2019-10-01T16:51:53
| 2019-10-01T16:51:53
| 212,143,946
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8
|
r
|
first_script.R
|
d <- 2^2
|
31138fac068a24b8beec0ada3aa56aa76714e6d0
|
263511c73655f2f4ae96e7b988b25375bfbffc82
|
/tests/test_prioritizr_nodata.R
|
ed710fda9a91edee952d90b4fd9c66c8824c1923
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
VUEG/priocomp
|
1f05c31e20bdb39a9c51128f055bfcedd667a726
|
16b40077efb239bcefb7f2f463d3e2dcb328918d
|
refs/heads/master
| 2021-01-23T21:46:02.871833
| 2016-12-20T09:30:16
| 2016-12-20T09:30:16
| 57,879,702
| 1
| 3
| null | 2016-06-03T07:19:06
| 2016-05-02T09:51:18
|
HTML
|
UTF-8
|
R
| false
| false
| 953
|
r
|
test_prioritizr_nodata.R
|
library(prioritizr)
library(raster)
z_tutorial_dir <- "~/dev/git-data/zonation-tutorial"
z_tutorial_data_dir <- file.path(z_tutorial_dir, "data")
tutorial_files <- list.files(path = z_tutorial_data_dir,
pattern = "species.+\\.tif",
full.names = TRUE)
sp_rasters <- raster::stack(tutorial_files)
# For some mysterious reason, minmax isn't set automatically for sp_rasters[[1]]
# (species1)
sp_rasters[[1]] <- setMinMax(sp_rasters[[1]])
cost <- extent(sp_rasters) %>%
raster(nrows = nrow(sp_rasters), ncols = ncol(sp_rasters), vals = 1)
cost[is.na(sp_rasters[[1]])] <- NA
b_cells <- 0.10 * raster::cellStats(cost, "sum")
mc_model <- prioritizr::maxcover_model(x = cost, features = sp_rasters,
budget = b_cells)
mc_results <- prioritizr::prioritize(mc_model, gap = 0.001)
prioritizr::plot_selection(cost, mc_results$x, title = "Maximum Cover Solution")
|
2d21f1d315489f4b3184e36d30f0fb8281e74419
|
d0d061329421401283a3db1f8e7aa016e61888d7
|
/man/tclust.Rd
|
0a61ee4b08fa577e187cb2656e1db2754c4d2942
|
[
"MIT"
] |
permissive
|
boopsboops/spider
|
87885b53570a98aece6e7ca1ce600330d9b95d25
|
e93c5b4bc7f50168b8a155a6dca7c87dfbdef134
|
refs/heads/master
| 2021-05-12T07:38:37.413486
| 2019-03-07T21:43:43
| 2019-03-07T21:43:43
| 117,250,046
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,206
|
rd
|
tclust.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tclust.R
\name{tclust}
\alias{tclust}
\title{Clustering by a threshold}
\usage{
tclust(distobj, threshold = 0.01)
}
\arguments{
\item{distobj}{A distance object (usually from \code{\link{dist.dna}}).}
\item{threshold}{Distance cutoff for clustering. Default of 0.01 (1\%).}
}
\value{
A list with each element giving the index of the individuals
contained in each cluster.
}
\description{
Identifies clusters, excluding individuals greater than the threshold from
any member.
}
\details{
If two individuals are more distant than \code{threshold} from each other,
but both within \code{threshold} of a third, all three are contained in a
single cluster.
}
\examples{
data(anoteropsis)
anoSpp <- sapply(strsplit(dimnames(anoteropsis)[[1]], split="_"),
function(x) paste(x[1], x[2], sep="_"))
anoDist <- ape::dist.dna(anoteropsis)
tclust(anoDist)
#Names of individuals
anoClust <- tclust(anoDist)
lapply(anoClust, function(x) anoSpp[x])
}
\seealso{
\code{\link{dist.dna}}, \code{\link{localMinima}}. %% ~~objects to
See Also as \code{\link{help}}, ~~~
}
\author{
Samuel Brown <s_d_j_brown@hotmail.com>
}
\keyword{Barcoding}
|
6822343bc6bcebac3ec51cc0f234d30ebac290dd
|
063c28143308d5b8029bf6111dca8ff248cdeb86
|
/cachematrix.R
|
023b244d7f3748fd08114985b2c6c46b6b649e26
|
[] |
no_license
|
jayantamukherjee1980/ProgrammingAssignment2
|
30004e242c7b3d1860ee6f152a093891c45a472c
|
623dd4fff8ac05e753eb2bfbe5bd79f405a70a64
|
refs/heads/master
| 2020-04-03T03:11:15.273734
| 2018-10-27T15:33:00
| 2018-10-27T15:33:00
| 154,978,961
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,432
|
r
|
cachematrix.R
|
## Two functions, one is to create the cache and
## the other is to populate and/or fetch the inverse from the cache
## The function below creates a structure to hold the matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
mat_inv <- NULL #initialize inverse with null
set <- function(y) {
x <<- y
mat_inv <<- NULL #re-initialize inverse on change
}
get <- function()
x
set_inverse <- function(x_inverse)
mat_inv <<- x_inverse
get_inverse <- function()
mat_inv
list(
set = set,
get = get,
set_inverse = set_inverse,
get_inverse = get_inverse
)
}
## The function below given the matrix with cache returns the cached inverse
## when inverse is not cached it comutes the inverse and cache it for future calls
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mat_inv <- x$get_inverse()
if (!is.null(mat_inv)) {
#when cached inverse is present
message("retrieve cached inverse")
return(mat_inv)
}
mat <- x$get()
mat_inv <- solve(mat) #compute the inverse
x$set_inverse(mat_inv) #put the computed inverse in cache
message("inverse added to cache")
mat_inv
}
|
9bfd564f942ba795e4cd4ef046dcfe308e080450
|
7a70a074f64227b1a09d9e8c6f93002d4a2de76c
|
/R/match_ordi_method.R
|
029ffd5acc9c861820f83c8bdf4cecd858583fef
|
[] |
no_license
|
krisrs1128/mvarVis
|
6df5166dfb36d400e37fc71c41ebcf517a40076c
|
f41fa56e80cb15be7875b78fde69cb3a0ce167f8
|
refs/heads/master
| 2020-12-25T16:59:16.317405
| 2019-10-13T22:02:03
| 2019-10-13T22:02:03
| 33,798,105
| 5
| 2
| null | 2017-11-20T20:03:00
| 2015-04-12T00:52:47
|
R
|
UTF-8
|
R
| false
| false
| 1,585
|
r
|
match_ordi_method.R
|
#' @title Search Vegan and ade4 for matching method
#'
#' @description Given a character string describing an ordination method
#' from ade4 or vegan, return the actual function for that method.
#'
#' @param A string describing the function to return. The currently accepted
#' strings, along with the functions they return, are
#' pca: dudi.pca
#' pco: dudi.pco
#' acm: dudi.acm
#' coa: dudi.coa
#' fpca: dudi.fpca
#' hillsmith: dudi.hillsmith
#' mix: dudi.mix
#' nsc: dudi.nsc
#' vegan_cca: cca, from vegan
#' and any other string that exactly matches a function in ade4 or vegan.
#'
#' @return ordi_method The function from either ade4 or vegan giving the
#' desired function.
#'
#' @importFrom ade4 dudi.pca dudi.pco dudi.acm dudi.coa dudi.fpca dudi.hillsmith
#' dudi.mix dudi.nsc dpcoa
#' @importFrom vegan cca
#' @importFrom FactoMineR PCA CA MFA
match_ordi_method <- function(method) {
# determine the function to be called
paste_dudi <- c("pco", "acm", "coa", "fpca","hillsmith", "mix", "nsc")
rename_methods <- c("vegan_cca", "ade4_pca", "factominer_pca")
if(method %in% paste_dudi) {
# Methods that need to be called as dudi."name"
ordi_method <- get(paste0("dudi.", method))
} else if(method %in% rename_methods) {
ordi_method <- switch(method,
"vegan_cca" = vegan::cca,
"ade4_pca" = ade4::dudi.pca,
"factominer_pca" = FactoMineR::PCA)
} else {
# Methods that can be called directly
ordi_method <- get(method)
}
ordi_method
}
|
1b406f9a12d901dd00e8811bdf2ca74b3dd012cf
|
c2f2c0fdaf739be514b0ac8fee7095b00eb307ea
|
/aula visualização e diagnostico I.R
|
4247b7fa2a4232b51229dc8b64a7a7324eae6e0d
|
[] |
no_license
|
wagnerLM/quanti2
|
56d864f0e7ea34bc23ad4be33203e20128d09581
|
3a753e3871847b5f3cc7cc751dbe5c57a9a00225
|
refs/heads/master
| 2021-07-03T19:10:19.817228
| 2020-11-24T16:06:32
| 2020-11-24T16:06:32
| 202,057,247
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,873
|
r
|
aula visualização e diagnostico I.R
|
## Visualização e diagnóstico de variáveis
# instalando pacotes necessários
installpackages("mgm")
install.packages("psych")
# ativando os pacotes
library(mgm)
library(psych)
# banco de dados exemplo
# reconhecer o nível de mensuração de cada variável:
View(autism)
?autism_data
View(autism_names)
# artigo do estudo completo:
# http://sci-hub.tw/10.1177/1362361316660309
# visualização e diagnóstico:
mean(autism$WorkHrs)
sd(autism$WorkHrs)
?summary
summary(autism)
summary(autism$WorkHrs)
?describe
describe(autism$WorkHrs)
?hist
hist(autism$WorkHrs)
hist(autism$WorkHrs,freq = F)
lines(density(autism$WorkHrs),col=2)
# teste de normalidade, a hipótese nula é que a distribuição é normal
shapiro.test(autism$WorkHrs)
# necessário para escolha de testes paramétricos
# ou correspondentes não paramétricos
# em quais situações é importante o diagnóstico:
# https://www.sheffield.ac.uk/polopoly_fs/1.579191!/file/stcp-karadimitriou-normalR.pdf
#
table(autism$Gen)
prop.table(table(autism$Gen))
table(autism$Gen,autism$ToH)
prop.table(table(autism$Gen,autism$ToH))
prop.table(table(autism$Gen,autism$ToH),1)
prop.table(table(autism$Gen,autism$ToH),2)
?pie
pie(table(autism$Gen,autism$ToH))
pie(table(autism$Gen,autism$ToH),labels = c("1,1","2,1","1,2","2,2"))
?barplot
barplot(table(autism$Gen,autism$ToH))
?boxplot
boxplot(autism$IQ~autism$Gen)
boxplot(autism$SatTreat~autism$Gen)
?plot
plot(autism$SatTreat,autism$NoC)
?pairs.panels
pairs.panels(autism[,c(2,6,7)],jiggle = T,factor = 5)
# Use a função "rowSums", compute os escores das subescalas
# da DASS-21 e descreva suas medidas de tendência central,
# dispersão, e explore graficamente a relação entre elas:
# estresse c(1,6,8,11,12,14,18)
# ansiedade c(2,4,7,9,15,19,20)
# depressao c(3,5,10,13,16,17,21)
|
462e41f25d5022ac7e9930d5ebf4f7c54812ac83
|
62792b978ab0f64caf95ec6913f0fce8c07d2985
|
/inst/doc/part04-analyze-zoom-video-data.R
|
336c2ee20ebbaed19711d84013e11a12021ad8b1
|
[] |
no_license
|
cran/zoomGroupStats
|
206e6f3709d5261a7ee07d1330a389a4038e4c97
|
8431abd407a0da6daa95e2a6feaa22a30f7b25c1
|
refs/heads/master
| 2023-04-21T00:18:29.718618
| 2021-05-13T08:20:02
| 2021-05-13T08:20:02
| 367,098,029
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 963
|
r
|
part04-analyze-zoom-video-data.R
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- eval=FALSE--------------------------------------------------------------
# batchOut = batchProcessZoomOutput(batchInput="./myMeetingsBatch.xlsx")
## ---- eval=FALSE, error=FALSE, message=FALSE, warning=FALSE, include=FALSE, results='hide'----
# batchOut = invisible(batchProcessZoomOutput(batchInput=system.file('extdata', 'myMeetingsBatch.xlsx', package = 'zoomGroupStats')))
## ---- eval=FALSE--------------------------------------------------------------
# batchGrabVideoStills(batchInfo=batchOut$batchInfo, imageDir="~/Documents/myMeetings/videoImages", sampleWindow=60)
## ---- eval=FALSE--------------------------------------------------------------
# vidOut = batchVideoFaceAnalysis(batchInfo=batchOut$batchInfo, imageDir="~/Documents/meetingImages", sampleWindow=60, facesCollectionID="group-r")
|
8d6d73239d9f0670ec9f15aa8dc373c21abb21ee
|
a6d89b1eddfb3a32abed2abae0530acd4226ccb1
|
/PlotsCategoricalVar.R
|
27b33739bc8ff037feb22e3ca7be1e9bab3bf80c
|
[] |
no_license
|
akhilK17/Data-Science-with-R
|
2144aeb9cb51378cff2ccfba43a92944f38efd8d
|
10578dccec218885a322c4a8158d61734d6c9883
|
refs/heads/master
| 2020-03-22T15:57:49.703091
| 2018-09-10T17:32:49
| 2018-09-10T17:32:49
| 140,292,510
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,184
|
r
|
PlotsCategoricalVar.R
|
# This script is to find the distribution of the chick weight according to type of feed using bar plot and pie chart visualization
# About the data: An experiment was conducted to measure and compare the effectiveness of various feed supplements on the growth rate of chickens.
require("datasets") # attach package datasets in the working environment
?chickwts # help/information can be taken from the documents, this data set is about Chicken Weights by Feed Type from datasets pacakge of R
data("chickwts") # load the chickwts data in working environment
str(chickwts) # dataset structure can be obtained by str function
# Barplots for categorical variable, this is chicken count vs feed barplot
plot(chickwts$feed) # plot function will plot the feed frequency
feeds <- table(chickwts$feed) # Creating a table feeds using reference ($)
barplot(feeds) # Simple barplot of feeds to show frequency of different feeds
barplot(feeds[order(feeds, decreasing = TRUE)] # barplot in decreasing frequency of feeds
par(oma = c(1, 1, 1, 1)) # sets outside margins of barplot
par(mar = c(4, 5, 2, 1)) # Sets plots margins
# Many other arguments can be used to make visually better barplot, some are used in below barplot function
barplot(feeds[order(feeds)],
horiz = TRUE,
las = 1, # it gives the orientation of axis labels
col = c("beige", "blanchedalmond", "bisque1", "bisque2", "bisque3", "bisque4"),
border = NA, # No boarders on bars
main = "Frequencies on Different Feeds\nin chickwts Dataset", # \n is line break
xlab = "Number of Chicks"
)
# Pie Charts for categorical variable, chicken count vs feed in a pie chart visualization
pie(feeds) # a simple pie plot of the feeds
?pie # "?" is to get help from documents
# modifying the pie chart to make this pie chart interesting and colourful to visualize in a better way
pie(feeds[order(feeds, decreasing = TRUE)],
init.angle = 90, # starts at 12 o'clock instead of 3
clockwise = TRUE,
col = rainbow(6),
main = "Pie Chart of Feeds from chickwts")
# Any chart can be exported in an image of format either png or jpeg by using png/jpeg function in R, example is below:
png("Barplot.png", # Creating a dummy file named Barplot
width = 888, # define the size of the image
height = 571)
# Set up the margins of image
par(oma = c(1, 1, 1, 1))
par(mar = c(4, 5, 2, 1))
# run the same barplot function with same parameter (described aboce) to plot the chart in the dummy file named Barplot.png
# any plot can be exported by using png/jpeg function
barplot(feeds[order(feeds)],
horiz = TRUE,
las = 1, # it gives the orientation of axis labels
col = c("beige", "blanchedalmond", "bisque1", "bisque2", "bisque3", "bisque4"),
border = NA, # No boarders on bars
main = "Frequencies on Different Feeds\nin chickwts Dataset", # \n is line break
xlab = "Number of Chicks"
)
dev.off() # this is mandatory parameter to close the file, without using this, the plot exporte process will not be compled.
rm(list = ls()) # to clear out the working environment
|
568f1fc1ac51e52c999a8fc4d41bb5d36f7f80a9
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Amendola-Ricca-Truszczynski/wgrowing/ctrl.e#1.a#3.E#128.A#48.c#.w#7.s#32.asp/ctrl.e#1.a#3.E#128.A#48.c#.w#7.s#32.asp.R
|
27f653d362ba45b523e758a29e8160a4d95c8f17
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 91
|
r
|
ctrl.e#1.a#3.E#128.A#48.c#.w#7.s#32.asp.R
|
454fcbfc8fe2b17363896366c203c60a ctrl.e#1.a#3.E#128.A#48.c#.w#7.s#32.asp.qdimacs 7343 21502
|
7bfa22914f84921158223c930bc933d8847dac5d
|
d20145e080798319e0028a4c9d8851f619285c29
|
/Rprofile.R
|
0baa9d2a0f0ce5dfe7a0d6513192611fe5a75016
|
[] |
no_license
|
friendly/VCDR
|
da441b8306969fd6469a3bbc6a4475e028c61e29
|
818156ee0a26943a656ae915d924773c87654cec
|
refs/heads/master
| 2023-02-18T08:40:28.634897
| 2014-12-05T13:24:55
| 2014-12-05T13:24:55
| 16,708,002
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,369
|
r
|
Rprofile.R
|
# from http://biostat.mc.vanderbilt.edu/wiki/Main/KnitrHowto
# this from Frank Harrell -- no longer useful. Remove all uses
spar <- function(mar=if(!axes)
c(2.25+bot-.45*multi,2+left,.5+top+.25*multi,.5+rt) else
c(3.25+bot-.45*multi,3.5+left,.5+top+.25*multi,.5+rt),
lwd = if(multi) 1 else 1.75,
# The margin line (in mex units) for the axis title, axis labels and axis line.
# Note that mgp[1] affects title whereas mgp[2:3] affect axis. The default is c(3, 1, 0).
mgp = if(!axes) mgp=c(.75, .1, 0) else
if(multi) c(1.5, .365, 0) else c(2.4-.4, 0.475, 0),
# length of tick marks
tcl = if(multi)-0.25 else -0.4,
xpd=FALSE,
bot=0, left=0, top=0, rt=0,
# point size of text
ps=if(multi) 14 else 10,
mfrow=NULL, axes=TRUE, ...) {
multi <- length(mfrow) > 0
op <- par(mar=mar, lwd=lwd, mgp=mgp, tcl=tcl, ps=ps, xpd=xpd, ...)
if(multi) op <- par(mfrow=mfrow)
invisible(op)
}
##################################################################################
# knitrSet is called once at the beginning of each chapter to set defaults
# for all chunks in that chapter
knitrSet <- function(basename=NULL, w=4, h=3,
fig.align='center', fig.show='hold', fig.pos='!htbp',
fig.lp='fig:',
dev='pdf',
tidy=FALSE,
error=FALSE,
cache=FALSE,
width=65,
digits=5,
decinline=5,
keep.source=TRUE) {
## Specify dev=c('pdf','png') to produce two graphics files for each plot
## But: dev='CairoPNG' is preferred for png
require(knitr)
# set names of input directory and name of current input file:
# in_dir <- knitr:::knitEnv$input_dir
in_file <- knitr:::knit_concord$get("infile")
options(width=width, digits=digits)
## fills Sweavel boxes when font size is \small and svmono.cls
## is in effect (use 65 without svmono)
## How to render output? - default is render_latex()
# render_latex() # uses alltt package
# render_listings() # uses listings package, with shaded background
#knit_theme$set("default")
#knit_theme$set("print") # for only b/w, with bold highlighing
knit_theme$set("seashell") # light salmon background color
## re-direct warning messages to messages.txt
# unlink('messages.txt') # Start fresh with each run-- now in book.Rnw & chapter.Rnw
hook_log = function(x, options) cat(x, file='messages.txt', append=TRUE)
knit_hooks$set(warning = hook_log, message = hook_log)# , error = hook_lst_bf)
cat("** Chapter ", basename, " **\n", file='messages.txt', append=TRUE )
if(length(decinline)) {
rnd <- function(x, dec) round(x, dec)
formals(rnd) <- list(x=NULL, dec=decinline)
knit_hooks$set(inline = rnd)
}
# Allow use of crop=TRUE in figure chunks to invoke pdfcrop.
if (!Sys.which('pdfcrop')=="")
knit_hooks$set(crop=hook_pdfcrop)
knit_hooks$set(par=function(before, options, envir)
if(before && options$fig.show != 'none') {
# p <- c('bty','mfrow','ps','bot','top','left','rt','lwd',
# 'mgp','tcl', 'axes','xpd')
# pars <- opts_current$get(p)
# pars <- pars[!is.na(names(pars))]
# if(length(pars)) do.call('spar', pars) else spar()
})
opts_knit$set(aliases=c(h='fig.height', w='fig.width',
cap='fig.cap', scap='fig.scap'),
eval.after = c('fig.cap','fig.scap'),
error=error, keep.source=keep.source,
comment=NA, prompt=TRUE
)
# suggestion of reviewer: make R output look more 'normal'
# maybe we should also dispense with code highlighting
# opts_knit$set(comment=NA, prompt=TRUE)
opts_chunk$set(fig.path=paste0(basename, '/fig/'),
fig.align=fig.align, w=w, h=h,
fig.show=fig.show, fig.lp=fig.lp, fig.pos=fig.pos,
cache.path=paste0(basename, '/cache/'),
cache=cache,
dev=dev, par=TRUE, tidy=tidy,
out.width=NULL)
hook_chunk = knit_hooks$get('chunk')
## centering will not allow too-wide figures to go into left margin
knit_hooks$set(chunk = function(x, options) {
res = hook_chunk(x, options)
if (options$fig.align != 'center') return(res)
gsub('\\{\\\\centering (\\\\includegraphics.+)\n\n\\}',
'\\\\centerline{\\1}', res)
})
# modify knitr output hook to allow an optional output.lines option
# this follows from the knitr book, p. 118, \S 12.3.5
# but that does it globally. As well, the option should be called
# output.lines, because out.* options generally pertain to figures.
# a more general version would also allow output.lines to be a
# vector of integers, as in output.lines=3:15, selecting those numbered lines,
# as with echo=
# NB: this code has a dependency on stringr, but that is a knitr
# Depends:
# get the default output hook
# hook_output <- knit_hooks$get("output")
#
# knit_hooks$set(output = function(x, options) {
# lines <- options$output.lines
# if (is.null(lines)) {
# hook_output(x, options) # pass to default hook
# }
# else {
# x <- unlist(stringr::str_split(x, "\n"))
# if (length(x) > lines) {
# # truncate the output, but add ....
# x <- c(head(x, lines), "...\n")
# }
# # paste these lines together
# x <- paste(x, collapse = "\n")
# hook_output(x, options)
# }
# })
# knitr hook function to allow an output.lines option
# e.g.,
# output.lines=12 prints lines 1:12 ...
# output.lines=1:12 does the same
# output.lines=3:15 prints lines ... 3:15 ...
# output.lines=-(1:8) removes lines 1:8 and prints ... 9:n ...
# No allowance for anything but a consecutive range of lines
hook_output <- knit_hooks$get("output")
knit_hooks$set(output = function(x, options) {
lines <- options$output.lines
if (is.null(lines)) {
return(hook_output(x, options)) # pass to default hook
}
x <- unlist(strsplit(x, "\n"))
more <- "..."
if (length(lines)==1) { # first n lines
if (length(x) > lines) {
# truncate the output, but add ....
x <- c(head(x, lines), more)
}
} else {
x <- c(if (abs(lines[1])>1 | lines[1]<0) more else NULL,
x[lines],
if (length(x)>lines[abs(length(lines))]) more else NULL
)
}
# paste these lines together
x <- paste(c(x, ""), collapse = "\n")
hook_output(x, options)
})
# http://stackoverflow.com/questions/23349525/how-to-set-knitr-chunk-output-width-on-a-per-chunk-basis
# NO: now use chunk option R.options=list(width=) in knitr 1.6+
# knit_hooks$set(output.width=local({
# .width <- 0
# function(before, options, envir) {
# if (before) .width <<- options(width=options$width)
# else options(.width)
# }
# })
# )
}
## see http://yihui.name/knitr/options#package_options
## Use caption package options to control caption font size
|
85a3eff530ef9b6adf84871ee80a758fe4ab0147
|
982415a4fbff0d1291926e3a857f322531fa9a98
|
/man/noaaParameters.Rd
|
9824c7242ce7a8288f746575e9f10d0aba4b00cf
|
[] |
no_license
|
ScenicVerve/VulnToolkit
|
89554775667054ae0cccd3dc9e596fc02a544713
|
e18da64ea4ca633eb49519473c4df1f3a0b4eee6
|
refs/heads/master
| 2021-01-17T22:28:51.142893
| 2015-08-12T13:26:28
| 2015-08-12T13:26:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 870
|
rd
|
noaaParameters.Rd
|
\name{noaa.parameters}
\alias{noaa.parameters}
\title{Reports data available for a NOAA station}
\description{
noaa.parameters reports the parameters (meteorological and tidal) available for a specified NOAA station.
}
\usage{
noaa.parameters(stn = 8518750)
}
\arguments{
\item{stn}{NOAA station number (note that station names are not yet acceptable inputs).}
}
\value{
\item{dataset}{a dataframe with parameters and associated start and end dates. Where a parameter's availability is not continuous, multiple rows are reported. This function can be used to, for example, select meteorological parameters to include in calls to \code{\link{noaa}}}
}
\seealso{
\code{\link{noaa.stations}}
\code{\link{noaa}}
}
\examples{
# Example requires an internet connection
# noaa.parameters()
# LA.stns <- noaa.stations(state = "LA")
# noaa.parameters(LA.stns$number[1])
}
|
fbaadc743e750a158b61ddcbe46590be98d1460f
|
e57486b58f1c5218c9fdf620fbeb961df194e2a0
|
/man/q_growth_ratios.Rd
|
3ffe087450029cf4027fdf87f8d698210a74e9dd
|
[] |
no_license
|
zac-garland/eqdata
|
1f0b3b2e00967ac2765aa9c647cabc57e4d975bc
|
a258ccebc941aa91d4174f6cfbd37cc045a68416
|
refs/heads/master
| 2022-04-24T03:55:59.262519
| 2020-04-24T15:27:46
| 2020-04-24T15:27:46
| 257,068,325
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 266
|
rd
|
q_growth_ratios.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/q_growth_ratios.R
\name{q_growth_ratios}
\alias{q_growth_ratios}
\title{q_growth_ratios}
\usage{
q_growth_ratios(ticker, freq = c("quarterly", "annual"))
}
\description{
q_growth_ratios
}
|
6cccc9897906490cf4e67a04492534c17dad8ad8
|
8691ae0b068ca49236c1ecd238ea646952d19e83
|
/dayMid2_1.R
|
bd356f0120fc1aeef0fe85cc6d003ff815f7db25
|
[] |
no_license
|
ktabata117/classmaterial_public
|
0751fa738cf38dc1e66e06f8738bdde56ed38025
|
605c159a61b9622a1007d69b2f3ab531b66683a0
|
refs/heads/master
| 2023-05-12T21:06:01.481629
| 2021-06-04T10:42:15
| 2021-06-04T10:42:15
| 262,516,513
| 0
| 0
| null | 2020-12-02T10:20:59
| 2020-05-09T07:38:06
|
R
|
UTF-8
|
R
| false
| false
| 1,424
|
r
|
dayMid2_1.R
|
library(igraph)
(twitter <- read.csv("twitter-following.csv",
stringsAsFactors=FALSE))
# 文字列をファクタとみなす(変換する)」stringsAsFactors=TRUE
senator <- read.csv("twitter-senator.csv",
stringsAsFactors=FALSE)
n <- nrow(senator)
twitter.adj <- matrix(0, nrow=n,ncol=n)
#nn行列、要素は全部ゼロを作成する
colnames(twitter.adj) <- rownames(twitter.adj) <-
senator$screen_name
# senatorの名前をrとCに書き込む。C|r_
for(i in 1:nrow(twitter)){
twitter.adj[twitter$following[i],
twitter$followed[i]] <- 1
}
twitter.adj <- graph.adjacency(twitter.adj, mode = "directed", diag = FALSE)
# plot(twitter.adj)
senator$indegree <- degree(twitter.adj, mode = "in")
senator$outdegree <- degree(twitter.adj, mode = "out")
in.order <- order(senator$indegree, decreasing = TRUE)
out.order <- order(senator$outdegree, decreasing = TRUE)
# 降順
senator[in.order[1:3],]
senator[out.order[1:3],]
n <-nrow(senator)
col <- rep("red",n) # 一度、91名をredとしてColに
col[senator$party=="D"] <- "blue"
col[senator$party=="I"] <- "blue"
pch <- rep(16,n) # 一度、91名を16に
pch[senator$party=="D"] <- 17
pch[senator$party=="I"] <- 8
plot(betweenness(twitter.adj,directed =TRUE),
betweenness(twitter.adj,directed =FALSE),
pch = pch, col =col)
|
927386334d29b73ddc539aefa24ea620b8a3f600
|
67f6ca6dd3f8fb1d3104f931546c50445846083c
|
/src/main/R/multi-day-plans.R
|
e7a172809a9756130f953950e9fab9725749dcb9
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
LBNL-UCB-STI/beam
|
7e63cf28854a0b78e5f123629f5ff84966d75deb
|
ca433c85c592285cf4ff6c28620b3538fe9cc9ba
|
refs/heads/develop
| 2023-09-01T03:51:59.353627
| 2023-08-31T15:04:26
| 2023-08-31T15:04:26
| 73,118,824
| 142
| 71
|
NOASSERTION
| 2023-09-11T14:53:58
| 2016-11-07T20:38:29
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 7,956
|
r
|
multi-day-plans.R
|
load.libraries(c('XML'))
do.or.load('/Users/critter/Documents/beam/input/run0-201-leg-data.Rdata',function(){
legs <- data.table(read.csv('/Users/critter/Documents/matsim/input/run0.201.leg.data.csv',stringsAsFactors=F,header=F))
legs[V2=='',V2:=NA]
legs[V3=='',V3:=NA]
legs[,num.na:=is.na(V2)+is.na(V3)+is.na(V4)]
legs[,person:=ifelse(num.na==3,as.numeric(V1),NA)]
legs[,dep.time:=ifelse(num.na==2,(V1),NA)]
legs[,travel.time:=ifelse(num.na==2,(V2),NA)]
legs[,start.link:=ifelse(num.na==0,(V1),NA)]
legs[,end.link:=ifelse(num.na==0,(V2),NA)]
legs[,travel.time.2:=ifelse(num.na==0,(V3),NA)]
legs[,dist:=ifelse(num.na==0,as.numeric(V4),NA)]
rep.person <- function(x){
person.inds <- which(!is.na(x))
num.to.fill <- diff(c(person.inds,length(x)+1))
unlist(apply(cbind(x[person.inds],num.to.fill),1,function(xx){ rep(xx[1],xx[2]) }))
}
legs[,person:=rep.person(person)]
legs[num.na<3, dep.time:=rep(dep.time[!is.na(dep.time)],each=2)]
legs[num.na<3, travel.time:=rep(travel.time[!is.na(travel.time)],each=2)]
legs <- legs[num.na==0,list(person,dep.time,travel.time,start.link,end.link,travel.time.2,dist)]
legs[,miles:=dist/1609.34]
legs[,dep.time.hr:=unlist(lapply(str_split(dep.time,":"),function(ll){ as.numeric(ll[1]) + as.numeric(ll[2])/60 }))]
legs[,travel.time.hr:=unlist(lapply(str_split(travel.time,":"),function(ll){ as.numeric(ll[1]) + as.numeric(ll[2])/60 }))]
legs[,arr.time.hr:=dep.time.hr+travel.time.hr]
setkey(legs,person,dep.time.hr)
legs[,dwell.time:=c(tail(dep.time.hr,-1)-head(arr.time.hr,-1),0),by='person']
legs[,dwell.time:=ifelse(dwell.time<0,0,dwell.time)]
legs[,range.replenished:=dwell.time * 5 /.35]
track.range <- function(miles,replenish){
tracked <- c(max(0,miles[1] - replenish[1]),rep(0,length(miles)-1))
if(length(miles)>1){
for(i in 2:length(miles)){
tracked[i] <- max(0,tracked[i-1] + miles[i] - replenish[i])
}
}
tracked
}
legs[,range.tracked:=track.range(miles,range.replenished),by='person']
list(legs=legs)
})
do.or.load('/Users/critter/Documents/beam/input/run0-201-plans-all.Rdata',function(){
plans <- read.csv('/Users/critter/Documents/matsim/input/run0.201.plans.thinned4.csv',header=F)
person.id <- as.numeric(as.character(plans$V1))
plans$id <- unlist(alply(cbind(which(is.na(plans$V2)),c(tail(which(is.na(plans$V2)),-1)-1,nrow(plans))),.(1),function(ii){ rep(person.id[ii[1]],diff(ii)+1) }))
plans.etc <- plans[!is.na(plans$V2),]
plans <- data.table(id=plans.etc$id,type=plans.etc$V1,link.id=plans.etc$V2,x=plans.etc$V3,y=plans.etc$V4,end=plans.etc$V5)
load(file=pp(matsim.shared,"model-inputs/development/network_SF_Bay_detailed.Rdata"))
link.nodes <- join.on(links,nodes,'from','id',c('x','y'),'from.')
link.nodes <- join.on(link.nodes,nodes,'to','id',c('x','y'),'to.')
link.nodes[,link.x:=(to.x+from.x)/2]
link.nodes[,link.y:=(to.y+from.y)/2]
plans <- join.on(plans,link.nodes,'link.id','id',c('link.x','link.y'))
setkey(plans,id,type)
homes <- u(plans[type=='Home'])
zip.of.homes <- over(SpatialPoints(homes[,list(link.x,link.y)],CRS("+init=epsg:26910")),sf.zips)$ZCTA5CE10
homes[,zip:=as.numeric(as.character(zip.of.homes))]
dists.by.person <- legs[,list(miles=sum(miles),max.trip=max(miles),min.range=max(range.tracked)),by='person']
dists.by.person[,limiting.range:=ifelse(max.trip>min.range,max.trip,min.range)/.95]
homes <- join.on(homes,dists.by.person,'id','person')
return(list('plans'=plans,'homes'=homes))
})
plans[,act.end:=to.posix(end,'%H:%M:%S')]
plans[,act.end.hr:=hour(act.end)]
plans[,type:=factor(type)]
plans[,end:=as.character(end)]
plans[,end.dt:=to.posix(pp('1970-01-',ifelse(end=='','10 00:00:00', pp('01 ',end))),'%Y-%m-%d %H:%M:%S')]
setkey(plans,id,end.dt)
plans[,element.i:=1:length(x),by='id']
legs[,dep.dt:=to.posix(pp('1970-01-01 ',dep.time),'%Y-%m-%d %H:%M:%S')]
legs[,id:=person]
setkey(legs,id,dep.dt)
legs[,element.i:=1:length(dist),by='id']
legs <- join.on(legs,plans,c('id','element.i'),c('id','element.i'),c('end.dt','link.id','link.x','link.y'))
legs[,dep.delay:=dep.dt - end.dt]
# Now just deal with subset of plans data we're intersted in
load('/Users/critter/Documents/beam/input/sf-bay-sampled-plans.Rdata')
plans <- plans[id%in%sampled.reg$smart.id]
legs <- legs[id%in%sampled.reg$smart.id]
# Group into cases to make it easy to deal with the carry-over from last plan to the next
#
# 98% of plans being and end at Home
# sum(plans[,head(type,1)=='Home' && tail(type,1)=='Home',by='id']$V1)
home.to.home <- u(plans[,head(type,1)=='Home' & tail(type,1)=='Home',by='id'][V1==T]$id)
other.types <- u(plans[,head(type,1)=='Home' & tail(type,1)=='Home',by='id'][V1==F]$id)
new.plans <- list()
for(person in home.to.home){
n.acts <- nrow(plans[id==person])
plans[id==person,end.dt:=c(head(end.dt,-1),end.dt[1]+24*3600)]
new.plans[[length(new.plans)+1]] <- rbindlist(list(plans[id==person,list(type=type,link=link.id,id=id,x=x,y=y,end_time=end.dt)],
plans[id==person][2:n.acts,list(type=type,link=link.id,id=id,x=x,y=y,end_time=end.dt+24*3600)],
plans[id==person][2:n.acts,list(type=type,link=link.id,id=id,x=x,y=y,end_time=end.dt+48*3600)]))
}
for(person in other.types){
n.acts <- nrow(plans[id==person])
plans[id==person,end.dt:=c(head(end.dt,-1),to.posix('1970-01-02'))]
new.plans[[length(new.plans)+1]] <- rbindlist(list(plans[id==person,list(type=type,link=link.id,id=id,x=x,y=y,end_time=end.dt)],
plans[id==person][,list(type=type,link=link.id,id=id,x=x,y=y,end_time=end.dt+24*3600)],
plans[id==person][,list(type=type,link=link.id,id=id,x=x,y=y,end_time=end.dt+48*3600)]))
}
new.plans <- rbindlist(new.plans)
new.plans[,link:=pp('sfpt',link)]
new.legs <- new.plans[,list(start_link=head(link,-1),end_link=tail(link,-1),trav_time=1,distance=1),by='id']
save(new.plans,new.legs,file='/Users/critter/Documents/beam/input/sf-bay-sampled-plans-multi-day.Rdata')
load(file='/Users/critter/Documents/beam/input/sf-bay-sampled-plans-multi-day.Rdata')
outfile <- '/Users/critter/Documents/beam/input/sf-bay-sampled-plans-multi-day.xml'
outfile.500 <- '/Users/critter/Documents/beam/input/sf-bay-sampled-plans-multi-day-500.xml'
the.str <- '<?xml version="1.0" encoding="utf-8"?>\n<!DOCTYPE population SYSTEM "http://www.matsim.org/files/dtd/population_v5.dtd">\n\n<population>\n'
cat(the.str,file=outfile,append=F)
cat(the.str,file=outfile.500,append=F)
i <- 1
for(person in u(new.plans$id)){
the.str <- pp('\t<person id="',person,'" employed="yes">\n\t\t<plan selected="yes">\n')
the.hr <- as.numeric(strftime(new.plans[id==person]$end_time,'%H')) + 24*(as.numeric(strftime(new.plans[id==person]$end_time,'%j'))-1)
the.min <- as.numeric(strftime(new.plans[id==person]$end_time,'%M'))
the.acts <- pp('\t\t\t<act end_time="',the.hr,':',formatC(the.min,width = 2, format = "d", flag = "0"),':00" link="',new.plans[id==person]$link,'" type="',new.plans[id==person]$type,'" x="',new.plans[id==person]$x,'" y="',new.plans[id==person]$y,'"/>')
the.legs <- pp('\t\t\t<leg mode="PEV"><route type="links" start_link="',new.legs[id==person]$start_link,'" end_link="',new.legs[id==person]$end_link,'" trav_time="',new.legs[id==person]$trav_time,'" distance="',new.legs[id==person]$distance,'">',new.legs[id==person]$start_link,' ',new.legs[id==person]$end_link,'</route></leg>')
the.str <- pp(the.str,pp(rbind(the.acts,c(the.legs,'')),collapse='\n'),'\t\t</plan>\n\t</person>\n')
cat(the.str,file=outfile,append=T)
if(i <= 500){
cat(the.str,file=outfile.500,append=T)
i <- i + 1
}
}
the.str <- '\n<!-- ====================================================================== -->\n\n</population>'
cat(the.str,file=outfile,append=T)
cat(the.str,file=outfile.500,append=T)
|
5ff259ac71fa34bb70948cac113b13f8cc081c0e
|
7b386cda02570ca54536a5a352881f4ac954ce78
|
/app/app.R
|
77c4bc266bff4313544c44b8045d9fc91190e11c
|
[
"MIT"
] |
permissive
|
JacobYunker/DevOps-Data-Science-GHC-2020
|
5e72b399590fff81622d288a22dc6a0bc7147062
|
5a29a572cfc8391a5a6353eae601f7dadf51caa0
|
refs/heads/master
| 2022-12-07T13:49:55.647226
| 2020-08-31T23:08:51
| 2020-08-31T23:08:51
| 291,738,316
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,008
|
r
|
app.R
|
library(shiny)
library(tidyverse)
housing<-read.csv("BostonHousing.csv", header=TRUE)
ui<-fluidPage(
titlePanel("Boston Housing Data Visualization"),
sidebarLayout(
sidebarPanel(
h4("Filter"),
selectInput(inputId = "y",
label = "Y-Axis:",
choices = c("crim","zn","indus","chas","nox","rm","age","dis","rad","tax","ptratio","b","lstat","medv"),
selected = "rm"),
selectInput(inputId = "x",
label = "X-Axis:",
choices = c("crim","zn","indus","chas","nox","rm","age","dis","rad","tax","ptratio","b","lstat","medv"),
selected = "medv"),
h6("CRIM: Per capita crime rate by town"),
h6("ZN: Proportion of residential land zoned for lots over 25,000 sq. ft"),
h6("INDUS: Proportion of non-retail business acres per town"),
h6("CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)"),
h6("NOX: Nitric oxide concentration (parts per 10 million)"),
h6("RM: Average number of rooms per dwelling"),
h6("AGE: Proportion of owner-occupied units built prior to 1940"),
h6("DIS: Weighted distances to five Boston employment centers"),
h6("RAD: Index of accessibility to radial highways"),
h6("TAX: Full-value property tax rate per $10,000"),
h6("PTRATIO: Pupil-teacher ratio by town"),
h6("B: 1000(Bk — 0.63)², where Bk is the proportion of [people of African American descent] by town"),
h6("LSTAT: Percentage of lower status of the population"),
h6("MEDV: Median value of owner-occupied homes in $1000s")
),
mainPanel(
plotOutput("scatterPlot")
)
)
)
server<-function(input,output){
output$scatterPlot<-renderPlot({
ggplot(housing)+
geom_point(aes_string(x=input$x,y=input$y))
})
}
shinyApp(ui=ui, server=server)
|
c40d8b868e4ddff03dc73ca5c2df4cd1c112ef20
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SpATS/examples/summary.SpATS.Rd.R
|
a666afe6de405af37074d46f41fd55158eb4bf24
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 753
|
r
|
summary.SpATS.Rd.R
|
library(SpATS)
### Name: summary.SpATS
### Title: Summary method for 'SpATS' objects
### Aliases: summary.SpATS
### ** Examples
library(SpATS)
data(wheatdata)
summary(wheatdata)
# Create factor variable for row and columns
wheatdata$R <- as.factor(wheatdata$row)
wheatdata$C <- as.factor(wheatdata$col)
m0 <- SpATS(response = "yield", spatial = ~ SAP(col, row, nseg = c(10,20), degree = 3, pord = 2),
genotype = "geno", fixed = ~ colcode + rowcode, random = ~ R + C, data = wheatdata,
control = list(tolerance = 1e-03))
# Brief summary
m0
# More information: dimensions
summary(m0) # summary(fit.m2, which = "dimensions")
# More information: variances
summary(m0, which = "variances")
# More information: all
summary(m0, which = "all")
|
36c170c069a4c68ae6656ac1047835bddbd9f2fc
|
0ce902a366b310ea7f3e40700a04abb3a7e73fa4
|
/myplot.R
|
cfe861dca83d17fbdbfda9b50245e460f24e6604
|
[] |
no_license
|
sophieberkhout/thesis-edges-parsimony
|
38de4e8b69c72c5a4cff92579e1cc7ae848dd780
|
8bb6383468a37b9bc46f577d2fc0038a7dc06a6f
|
refs/heads/master
| 2022-11-16T18:34:00.749366
| 2020-06-30T19:33:52
| 2020-06-30T19:33:52
| 257,295,430
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,015
|
r
|
myplot.R
|
myplot <- function(df, title = "", type = "preacher"){
if(type == "preacher"){
cols <- c("blue", "red", "black")
labs <- c("A", "B", "Full")
xlab <- "LL"
}
if(type == "mimicry"){
cols <- c("blue", "red")
labs <- expression(A[b]-B[b], B[a]-A[a])
xlab <- expression(Delta*LL)
}
ggplot(df, aes(x = Fit, colour = Model)) +
stat_ecdf(size = 1.5) +
scale_colour_manual(values = cols, labels = labs) +
theme_classic() +
ylab("") +
xlab(xlab) +
labs(colour = "") +
ggtitle(title) +
theme(text = element_text(size = 30, family = "serif"),
axis.ticks.length = unit(-0.25, "cm"),
axis.ticks = element_line(size = 1.25),
axis.text.x = element_text(margin=unit(c(0.5,0.5,0.5,0.5), "cm")),
axis.text.y = element_text(margin=unit(c(0.5,0.5,0.5,0.5), "cm")),
axis.line = element_line(size = 1.25),
legend.position = c(.15, .85),
plot.margin = margin(2, 50, 10, 1),
)
}
|
bfc985322003201d1c10dbb5cd075b0a058edb11
|
3ae85ad5599681390f59838210c0a733e39496a5
|
/01_lyrics_scraping.R
|
e5bf4a75012582be81bcf49c6dc52e3bb77d5548
|
[] |
no_license
|
jason-j-kim/melonchartlyrics
|
ec14324f5819eb6d667d10a454bb87b9d7b0ae0d
|
0790ebc02e6f1ac98523df7a37f0696d401f01f9
|
refs/heads/master
| 2021-03-27T14:41:59.457919
| 2017-11-11T23:36:43
| 2017-11-11T23:36:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,556
|
r
|
01_lyrics_scraping.R
|
devtools::install_github("johndharrison/wdman")
install.packages("RSelenium")
install.packages("rvest")
install.packages("tidyverse")
install.packages("stringr")
install.packages("httr")
library(wdman)
library(RSelenium)
library(rvest)
library(tidyverse)
library(stringr)
library(httr)
rD <- rsDriver(verbose = FALSE)
remDr <- rD$client
songList <- data_frame()
for (year in seq(1974, 2016, 1)) {
remDr$navigate(paste0("http://www.melon.com/chart/age/index.htm?chartType=YE&chartGenre=KPOP&chartDate=", year))
webElem <- remDr$findElement(using = "css selector", value = "form#frm")
current <- data_frame(year = year,
title = read_html(webElem$getPageSource()[[1]]) %>%
html_node("form#frm table") %>%
html_nodes("div.wrap_song_info div.rank01") %>%
html_text() %>%
str_trim("both") %>%
str_conv("UTF8"),
artist = read_html(webElem$getPageSource()[[1]]) %>%
html_node("form#frm table") %>%
html_nodes("span.checkEllipsis") %>%
html_text() %>%
str_conv("UTF8"),
song_id = read_html(webElem$getPageSource()[[1]]) %>%
html_node("form#frm table") %>%
html_nodes("div.wrap a.btn") %>%
html_attr("onclick") %>%
str_extract_all("[0-9]+") %>%
unlist())
songList <- dplyr::bind_rows(songList, current)
Sys.sleep(60)
}
lyrics <- data_frame()
for (id in songList$song_id) {
newLyrics <- data_frame(id = id,
lyric = read_html(content(GET(paste0("http://www.melon.com/song/detail.htm?songId=", id)), "text")) %>%
html_node("div.lyric") %>%
gsub(pattern = "<.*?>", replacement = "\n") %>%
str_replace_all("\t", ""))
lyrics <- dplyr::bind_rows(lyrics, newLyrics)
Sys.sleep(sample(seq(1, 6, 1), 1))
}
songListLyrics <- songList %>%
left_join(lyrics, by = c("song_id" = "id")) %>%
unique()
songListLyrics$lyric <- str_replace_all(songListLyrics$lyric, "[\t\r\n]+", " ") %>%
str_replace_all(">", " ") %>%
str_replace_all("<", " ") %>%
str_trim("both")
# write_csv(songListLyrics, path = "melon_ranking_lyrics_1964-2016.csv")
|
c5f5a9bdd99abf74ff1c4b01151b2dd7b958dce8
|
0eb29689bbfd02d2d7708c9a4c82b8dad7184387
|
/programs/bmMethods.R
|
c21f53d131691608c013e743948a30bb48e2bd36
|
[] |
no_license
|
bigdata-ufsc/ferrero-2018-movelets
|
11647807a111c0c9237f2d3c9d386b98d7101838
|
a5cd12d84b408154b0c3129815f9c5f5d9705a47
|
refs/heads/master
| 2022-06-03T06:54:53.298871
| 2019-11-07T15:09:33
| 2019-11-07T15:09:33
| 220,263,536
| 0
| 1
| null | 2022-05-20T21:15:11
| 2019-11-07T15:05:17
|
Java
|
UTF-8
|
R
| false
| false
| 9,783
|
r
|
bmMethods.R
|
require(data.table)
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
error.fun.wF <- function (true, predicted){
1 - fmeasure(predicted,true)
}
fmeasure <- function (predicted, true){
levels <- levels(true)
predicted <- factor(predicted,levels)
cm = as.matrix(table(Actual = true, Predicted = predicted)) # create the confusion matrix
diag = diag(cm) # number of correctly classified instances per class
rowsums = apply(cm, 1, sum) # number of instances per class
colsums = apply(cm, 2, sum) # number of predictions per class
precision = ifelse(colsums == 0, 0, diag / colsums)
recall = ifelse(rowsums == 0, 0, diag / rowsums)
f1 = 2 * precision * recall / (precision + recall)
f1 <- replace(f1,is.na(f1),0)
sum(f1 * table(true) / length(true), na.rm = T)
}
fmeasure.weka <- function( evaluation, true ){
f1 <- evaluation$detailsClass[,"fMeasure"]
sum(f1 * table(true) / length(true), na.rm = T)
}
# -------------------------------------------------------------
# BUILDING MODELS
# -------------------------------------------------------------
randomForest.eval <- function (dt){
require(e1071)
require(randomForest)
set.seed(1)
tunecontrol <- tune.control(sampling = "cross", cross = 5)
rf.tunned <- tune(randomForest, class ~ . , data = dt,
proximity=TRUE, ntree = 500,
tunecontrol = tunecontrol)
best.model <- rf.tunned$best.model
best.performance <- rf.tunned$best.performance
data.table( algorithm = "randomForest",
fmeasure = fmeasure(best.model$predicted, dt$class),
Accuracy = 1-best.performance,
model = list(best.model),
incorrect.instances = list(instances = which( dt$class != best.model$predicted ))
)
}
randomForest.eval.x <- function (dt){
require(e1071)
require(randomForest)
set.seed(1)
tunecontrol <- tune.control(sampling = "cross", cross = 5)
rf.tunned <- tune(randomForest, class ~ . , data = dt,
proximity=TRUE, ntree = 200, mtry = 19,
tunecontrol = tunecontrol)
best.model <- rf.tunned$best.model
best.performance <- rf.tunned$best.performance
data.table( algorithm = "randomForest",
fmeasure = fmeasure(best.model$predicted, dt$class),
Accuracy = 1-best.performance,
model = list(best.model),
incorrect.instances = list(instances = which( dt$class != best.model$predicted ))
)
}
weka.smo.eval <- function (dt){
require(RWeka)
require(rJava)
SMO <- make_Weka_classifier("weka/classifiers/functions/SMO")
model <- SMO (class ~ ., data = dt)
e <- evaluate_Weka_classifier(model, numFolds = 5, seed = 1, class = T)
data.table( algorithm = "weka.SMO",
fmeasure = fmeasure.weka(e, dt$class),
Accuracy = e$details["pctCorrect"] / 100,
model = list(model),
incorrect.instances = list(instances = NA)
)
}
# --------------------------------------------------------------------
weka.J48.eval <- function (dt){
require(RWeka)
require(rJava)
J48 <- make_Weka_classifier("weka/classifiers/trees/J48")
model <- J48 (class ~ ., data = dt)
e <- evaluate_Weka_classifier(model, numFolds = 5, seed = 1, class = T)
data.table( algorithm = "weka.J48",
fmeasure = fmeasure.weka(e, dt$class),
Accuracy = e$details["pctCorrect"] / 100,
model = list(model),
incorrect.instances = list(instances = NA)
)
}
# --------------------------------------------------------------------
weka.Bayes.eval <- function (dt){
require(RWeka)
require(rJava)
NB <- make_Weka_classifier("weka/classifiers/bayes/NaiveBayes")
model <- NB (class ~ ., data = dt)
e <- evaluate_Weka_classifier(model, numFolds = 5, seed = 1, class = T)
data.table( algorithm = "weka.Bayes",
fmeasure = fmeasure.weka(e, dt$class),
Accuracy = e$details["pctCorrect"] / 100,
model = list(model),
incorrect.instances = list(instances = NA)
)
}
# --------------------------------------------------------------------
getCaretResult <- function( fit = NULL, name = NULL){
if ( is.null(fit) ) return()
truth <- fit$pred$obs
predicted <- fit$pred$pred
cm <- confusionMatrix( data = predicted, reference = truth, mode = "prec_recall" )
data.table( algorithm = ifelse(is.null(name), fit$modelInfo$label, name),
fmeasure = fmeasure( predicted, truth),
Accuracy = cm$overall[['Accuracy']],
model = list(fit$finalModel),
incorrect.instances = list(which( predicted != truth ))
)
}
# --------------------------------------------------------------------
# RANDOM FOREST
# --------------------------------------------------------------------
caret.rf.eval <- function (dt, fitcontrol){
require(caret)
set.seed(1)
mtry <- sqrt(ncol(dt)-1)
tunegrid <- expand.grid(.mtry=mtry)
fit <- train(class ~ ., data = dt,
method = 'rf',
trControl = fitcontrol,
tuneGrid=tunegrid,
tuneLength=10,
proximity=TRUE,
verbose = FALSE)
getCaretResult(fit,"caret.rf")
}
# -------------------------------------------------------------------
# --------------------------------------------------------------------
# SVM
# --------------------------------------------------------------------
caret.svm.eval <- function (dt, fitcontrol){
require(caret)
set.seed(1)
svmGrid <- expand.grid(sigma= 2^c(-25, -20, -15,-10, -5, 0), C= 2^c(0:5))
fit <- train(class ~ ., data = dt,
method = 'svmRadial',
tuneGrid = svmGrid,
trControl = fitcontrol,
proximity=TRUE,
verbose = FALSE)
getCaretResult(fit,"caret.svm")
}
# -----------------------------------------------------------------
svm.eval <- function (dt){
require(e1071)
set.seed(1)
tunecontrol <- tune.control(sampling = "cross", cross = 5, error.fun = error.fun.wF)
ranges <- list(ranges = list(kernel = c("radial","linear"), epsilon = seq(0,1,0.1), cost = 2^(1:10) ) )
tunned <- tune(svm, class ~ . , data = dt, proximity=TRUE, tunecontrol = tunecontrol, ranges = ranges)
best.model <- tunned$best.model
wF <- 1 - tunned$best.performance
data.table( algorithm = "svm",
fmeasure = wF,
Accuracy = NA,
model = list(best.model),
incorrect.instances = NA)
}
# --------------------------------------------------------------------
zero.eval <- function (dt){
require(RWeka)
truth <- dt$class
n <- length(truth)
predicted <- rep(names(which.max(table(truth))), n)
predicted <- factor(predicted, levels(truth))
dt[,-ncol(dt)] <- 0
model <- J48(class ~., dt)
cm <- confusionMatrix( data = predicted, reference = truth, mode = "prec_recall" )
data.table( algorithm = "zero",
fmeasure = fmeasure(predicted, truth),
Accuracy = cm$overall[['Accuracy']],
model = list(model),
incorrect.instances = list(which( truth != predicted ))
)
}
# --------------------------------------------------------------------
models.eval <- function(tr = NULL, te = NULL, alg=NULL, per.class =F){
require(doParallel)
require(caret)
if (is.null(tr) | is.null(alg)){
warning("Please, provide dataset and a list of algorithms.")
return()
}
if (is.null(tr$class)){
warning("Please, provide class attribute in dataset.")
return()
}
fitControl <- trainControl(## 10-fold CV
method = "repeatedcv",
number = 5,
repeats = 10,
savePredictions = "final",
allowParallel = TRUE)
fitControl <- trainControl(savePredictions = "final", allowParallel = TRUE)
cl <- makeCluster(detectCores())
registerDoParallel(cl)
results <- rbindlist(
lapply(alg,function(x){
model.eval <- eval(parse(text=paste0(x,".eval")))
if (grepl("caret",x))
result <- model.eval(tr,fitControl)
else
result <- model.eval(tr)
if (!is.null(te)){
predicted <- predict(result$model[[1]],te)
truth <- te$class
cm <- confusionMatrix( data = predicted, reference = truth, mode = "prec_recall" )
if (!per.class)
result <- data.table(result,
fmeasureOnTest = fmeasure(predicted, truth),
AccuracyOnTest = cm$overall[['Accuracy']])
else
result <- data.table(result,
class = gsub( "Class: ", "", row.names(cm$byClass) ),
F1 = as.numeric(cm$byClass[,"F1"]) )
}
result
})
)
stopCluster(cl)
results
}
# --------------------------------------------------------------------
read.data <- function(DIR_PATH, iteration = NULL, file = "train.csv"){
train <- NULL
lapply(iteration, function(x){
if (!is.null(iteration))
DIR_PATH.i <- paste0(DIR_PATH,"/",x,"/")
else DIR_PATH.i <- paste0(DIR_PATH)
train.file = paste0(DIR_PATH.i, file)
train.i <- read.csv(train.file)
if (is.null(train)) {
train.i$class <- as.factor(train.i$class)
train <<- train.i
}
else {
cl.idx <- ncol(train.i)
train.i <- subset(train.i, select = -c(cl.idx))
names(train.i) <- paste0(names(train.i),"_",x)
train <<- cbind(train.i, train)
}
NULL
})
train
}
|
0d2817a1fbf1c56457593e141c99d4949717ebf9
|
3b1f27002a1aad5f2a8acf88402ccc1c3b0cd014
|
/tf/run.fraig.all.r
|
480977daad45e28d59bcdd61b3a96d294a7e3c15
|
[] |
no_license
|
Splend1d/FRAIG
|
ea3c77636197ee9237ed8a1cc8fa61201a7aea0c
|
cd0a0319e2408775a0223f28ca20abfd0293c0e8
|
refs/heads/master
| 2020-12-18T19:05:11.837571
| 2020-04-05T10:52:19
| 2020-04-05T10:52:19
| 235,491,015
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 255
|
r
|
run.fraig.all.r
|
./run.fraig.r 01
./run.fraig.r 02
./run.fraig.r 03
./run.fraig.r 04
./run.fraig.r 05
./run.fraig.r 06
./run.fraig.r 07
./run.fraig.r 08
./run.fraig.r 09
./run.fraig.r 10
./run.fraig.r 11
./run.fraig.r 12
./run.fraig.r 13
./run.fraig.r 14
./run.fraig.r 15
|
c13c364f2e33b47a0b613f8b5b676ec3b6b3d395
|
7bcc9e2b4084aeef92947498343be7e35ef620dc
|
/man/fd_page.Rd
|
0d685399e5d697613b5039ce0130501dc27958e8
|
[] |
no_license
|
Jamamel/shinyfoundation
|
f406006cc8625725c29da85f2c34315f4235db10
|
a8018c538d34f21e7b728a924c43fbc73ae0490a
|
refs/heads/master
| 2022-03-14T01:03:21.447771
| 2019-10-15T21:08:37
| 2019-10-15T21:08:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 462
|
rd
|
fd_page.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/page.R
\name{fd_page}
\alias{fd_page}
\title{Basic Page for Foundation UI}
\usage{
fd_page(..., header = NULL, footer = NULL)
}
\arguments{
\item{...}{The contents of the document body.}
\item{header}{The contents of the document header}
\item{footer}{The contents of the document footer}
}
\description{
Creates a Shiny UI page that load s the CSS and JavaScript for Foundation.
}
|
0700cca98e8d55f72c92fded8d975bcaacd61763
|
a8223bdceb0a16fc162d6af299c9f830b01be66a
|
/TR_ORF_CRF.R
|
29cac02aba28e13d2a232be82e883862abbc3f09
|
[] |
no_license
|
xanthexu/ribosome-profiling
|
be24d8b32d2519a7aec8fb4852d06a0c6f8bf0f6
|
ab060f05bb72fe6a32812a5d9ea449cd237755a8
|
refs/heads/master
| 2020-03-21T19:53:39.116347
| 2018-06-28T06:42:45
| 2018-06-28T06:42:45
| 138,976,194
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,703
|
r
|
TR_ORF_CRF.R
|
#!/usr/bin/env Rscript
###################################################################
# This file is part of RiboWave.
# RiboWave is powerful Ribo-seq analysis tool that is able to
# denoise the Ribo-seq data and serve for multiple functions.
#
# RiboWave can be used for multiple purposes:
# 1. denoise the raw Ribo-seq data
# 2. define translated ORFs
# 3. estimate the abundance of actively elongating ribosomes
# 4. estimate translation efficiency(TE)
# 5. identify potential frameshift candidates
#
# Author: Zhiyu Xu, Long Hu
#
# Copyright (C) 2017 Zhiyu Xu
#
# RiboWave is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# Contact: xanthexu18@gmail.com
#######################################################################
Args = commandArgs(TRUE);
inputF1 = Args[1];#the transcript psite
outputF1= Args[2];#the output;
options(scipen=1000);
options(warn=-1);
library(parallel);
source(paste(Args[3],"function.R",sep = "/"));
source(paste(Args[3],"CRF_function.R",sep = "/"));
######################################## sub functions
######## pre_stored ORFs:
INid = file(inputF1,"r");
IN = readLines(INid,n=1);
write.table(paste('ORF','gap_start','gap_stop','pattern_after','position_after','CRF',sep='\t',collapse='\t'),outputF1,sep='\t',col.names=F,row.names=F,quote=F,append=F);
while(length(IN) != 0){
tmp = unlist(strsplit(IN,split="\t",fixed=T));
TRID = tmp[1];
frame = as.numeric(tmp[2]);
start = as.numeric(tmp[3]);
stop = as.numeric(tmp[4]);
high3nt = as.numeric(unlist(strsplit(tmp[5],split=",",fixed=T)));
chk_ORF = as.numeric(tmp[2:4]);
ID3nt = which(high3nt>0);
threent_start = min(ID3nt);
threent_stop = max(ID3nt);
frms = (ID3nt+2)%%3;
brkPots = my_brkPots(frms);
if(brkPots>0){
gaps = my_gap(ID3nt,frms);
gap_start = gaps$gap_start;
gap_stop = gaps$gap_stop;
ID_tmp = intersect(which(gap_start > start), which(gap_start < stop));
gap_start = gap_start[ID_tmp];
gap_stop = gap_stop[ID_tmp];
if(length(gap_start)>0){
for (k in 1:length(gap_start)){
gap_start_tmp = gap_start[k];
gap_stop_tmp = gap_stop[k];
ID3nt_after = ID3nt[ID3nt>gap_stop_tmp];
frms_after = (ID3nt_after+2)%%3;
pattern_after = my_pattern(frms_after);
position_after = my_position(ID3nt_after,frms_after);
gap_out = my_calculation(high3nt,frame,gap_start_tmp,gap_stop_tmp,start,threent_stop);
if(!is.na(gap_out)){
line=paste(paste(TRID,frame,start,stop,sep="_",collapse="_"),gap_start_tmp,gap_stop_tmp,pattern_after,position_after,gap_out,sep='\t',collapse='\t');
write.table(line,outputF1,sep='\t',col.names=F,row.names=F,quote=F,append=T);
}
}
}
}
IN = readLines(INid,n=1);
}
####
close(INid);
rm(list=ls());
|
953c9c1d9227cc9f424ea4553dd0b370a9a3e9b1
|
c56511aab076a84c78e5449156480180245c0b51
|
/Wine Project.R
|
f20430da1ed8c5fed5a67ed2a946ed8625a96a49
|
[] |
no_license
|
davidkeim/Wine-Project
|
21069bc1f173458b78d851810a30169abbbcc67e
|
10af582e6b26ecd0e15ef13630dada6aaacc38e6
|
refs/heads/master
| 2020-12-01T00:56:13.084054
| 2019-12-30T02:39:33
| 2019-12-30T02:39:33
| 230,527,031
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,338
|
r
|
Wine Project.R
|
if (!require(dplyr)) install.packages('dplyr')
library(dplyr)
if (!require(ggplot2)) install.packages('ggplot2')
library(ggplot2)
if (!require(ggthemes)) install.packages('ggthemes')
library(ggthemes)
if (!require(caret)) install.packages('caret')
library(caret)
if (!require(e1071)) install.packages('e1071')
library(e1071)
options(digits = 3)
#Wine Data from UCI
winequality.white <- read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv", sep=";")
winequality.red <- read.csv("https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv", sep=";")
#Explore the Dataset's Structure
str(winequality.white)
str(winequality.red)
#Merge Datasets
winequality.white <- data.frame(winequality.white, color = "white")
winequality.red <- data.frame(winequality.red, color = "red")
wine <- rbind(winequality.white, winequality.red)
#Turn Quality into a Factor
wine$quality = as.factor(wine$quality)
str(wine)
#Split into Training and Test Sets
set.seed(1, sample.kind="Rounding")
test_index <- createDataPartition(y = wine$quality, times = 1, p = 0.1, list = FALSE)
train_set <- wine[-test_index,]
test_set <- wine[test_index,]
#Explore Training Dataset
str(train_set)
avg_rating <- mean(wine$quality)
train_set %>% ggplot(aes(quality)) + geom_bar() +
ggtitle("Quality Ratings") +
theme_economist() +
ggsave(filename = "./figure01.png")
#Visualization of Variables to spot if any are impactful
train_set %>% ggplot(aes(quality, fixed.acidity)) + geom_boxplot(aes(group=quality)) +
ggtitle("Fixed Acid Levels by Quality") + theme_economist() +
ggsave(filename = "./figure02.png")
train_set %>% ggplot(aes(quality, volatile.acidity)) + geom_boxplot(aes(group=quality)) +
ggtitle("Volatile Acids Levels by Quality") + theme_economist()+
ggsave(filename = "./figure03.png")
train_set %>% ggplot(aes(quality, citric.acid)) + geom_boxplot(aes(group=quality))+
ggtitle("Citric Acid Levels by Quality") + theme_economist()+
ggsave(filename = "./figure04.png")
train_set %>% ggplot(aes(quality, residual.sugar)) + geom_boxplot(aes(group=quality))+
ggtitle("Sugar Levels by Quality") + theme_economist()+
ggsave(filename = "./figure05.png")
train_set %>% ggplot(aes(quality, chlorides)) + geom_boxplot(aes(group=quality))+
ggtitle("Chloride Levels by Quality") + theme_economist()+
ggsave(filename = "./figure06.png")
train_set %>% ggplot(aes(quality, free.sulfur.dioxide)) + geom_boxplot(aes(group=quality))+
ggtitle("Free SO2 Levels by Quality") + theme_economist()+
ggsave(filename = "./figure07.png")
train_set %>% ggplot(aes(quality, total.sulfur.dioxide)) + geom_boxplot(aes(group=quality))+
ggtitle("Total SO2 Levels by Quality") + theme_economist()+
ggsave(filename = "./figure08.png")
train_set %>% ggplot(aes(quality, density)) + geom_boxplot(aes(group=quality))+
ggtitle("Density Levels by Quality") + theme_economist()+
ggsave(filename = "./figure09.png")
train_set %>% ggplot(aes(quality, pH)) + geom_boxplot(aes(group=quality))+
ggtitle("pH Levels by Quality") + theme_economist()+
ggsave(filename = "./figure10.png")
train_set %>% ggplot(aes(quality, sulphates)) + geom_boxplot(aes(group=quality))+
ggtitle("Sulphate Levels by Quality") + theme_economist()+
ggsave(filename = "./figure11.png")
train_set %>% ggplot(aes(quality, alcohol)) + geom_boxplot(aes(group=quality))+
ggtitle("Alcohol Levels by Quality") + theme_economist()+
ggsave(filename = "./figure12.png")
#Simply Guess 6 for everything
mean(train_set$quality == "6")
#Try KNN to see accuracy as a baseline
fit_knn <- train(quality ~ . , method = "knn" , data = train_set, trControl = trainControl(method = "cv", number = 10))
fit_knn
#RFE to see Accuracy & Variable Importance
control <- rfeControl(functions=rfFuncs, method="cv", number=10)
rfe_train <- rfe(train_set[,1:11], train_set[,12], sizes=c(1:11), rfeControl=control)
rfe_train
predictors(rfe_train)
plot(rfe_train, type=c("g", "o"))
#Try some different models with top 5 factors
models <- c("lda", "treebag", "naive_bayes", "knn", "svmLinear", "gamLoess", "multinom", "rf", "lvq", "gbm", "svmRadial", "C5.0")
set.seed(1, sample.kind = "Rounding")
fits_cv <- lapply(models, function(model){
print(model)
train(quality ~ alcohol + volatile.acidity + free.sulfur.dioxide + sulphates + chlorides, method = model, data = train_set, trControl = trainControl(method = "cv", number = 10))
})
results <- resamples(fits_cv)
summary(results)
dotplot(results)
#Trim Models to Top 5 with top 5 models and top 5 factors
models_top_5 <- c("treebag", "rf", "gbm", "svmRadial", "C5.0")
set.seed(1, sample.kind = "Rounding")
fits_top_5 <- lapply(models_top_5, function(model){
print(model)
train(quality ~ alcohol + volatile.acidity + free.sulfur.dioxide + sulphates + chlorides, method = model, data = train_set, trControl = trainControl(method = "cv", number = 10))
})
results_top_5 <- resamples(fits_top_5)
summary(results_top_5)
dotplot(results_top_5)
#Compare top 5 factor results against all 12
models_5_12 <- c("treebag", "rf", "gbm", "svmRadial", "C5.0")
set.seed(1, sample.kind = "Rounding")
fits_5_12 <- lapply(models_5_12, function(model){
print(model)
train(quality ~ ., method = model, data = train_set, trControl = trainControl(method = "cv", number = 10))
})
results_5_12 <- resamples(fits_5_12)
summary(results_5_12)
dotplot(results_5_12)
# See if models with good results are over-correlated
cor_5_12 <- modelCor(results_5_12)
heatmap(cor_5_12)
#Build the Ensemble using Random Foest - Results in Error
set.seed(1, sample.kind = "Rounding")
ensemble_models <- caretList(quality~ . -color, data=train_set, trControl = trainControl(method = "cv", number = 10), methodList=models_top_5)
ensemble_rf <- caretStack(ensemble_models, method="rf", metric="Accuracy", trControl=stackControl)
ensemble_rf
fit_rf <- train(quality ~ . -color, method = "rf", data = train_set, trControl = trainControl(method = "cv", number = 10))
#Tune the Worst Model - This takes forever
set.seed(1, sample.kind = "Rounding")
# Intial tuning grid. Skipping on reruns. sr_grid <- expand.grid(sigma = 2^c(-25, -20, -15,-10, -5, 0), C = 2^c(6:10))
sr_grid <- expand.grid(sigma = 1, C = 128)
fit_sr <- train(quality ~ .-color, method = "svmRadial", data = train_set,
trControl = trainControl(method = "cv", number = 10),
tuneGrid = sr_grid)
#Optimal - sigma = 1 and c = 128 acc - 65% from 57%
#Tune the Second Worst Model - This takes even longer - Splitting in Half
set.seed(1, sample.kind = "Rounding")
gbm_grid <- expand.grid(interaction.depth = c(1),
n.trees = (0:21)*50,
shrinkage = seq(.0005, .05,.005),
n.minobsinnode = 5)
fit_gbm <- train(quality ~ .-color, method = "gbm", data = train_set,
trControl = trainControl(method = "cv", number = 10),
tuneGrid = gbm_grid)
#Second half of GBM tuning - This one is better than the first half
set.seed(1, sample.kind = "Rounding")
# Intial tuning grid. Skipping on reruns. - gbm_grid_2 <- expand.grid(interaction.depth = c(3), n.trees = (0:21)*50, shrinkage = seq(.0005, .05,.005), n.minobsinnode = 5)
gbm_grid_2 <- expand.grid(interaction.depth = c(3), n.trees = 1050, shrinkage = 0.0405, n.minobsinnode = 5)
fit_gbm_2 <- train(quality ~ .-color, method = "gbm", data = train_set,
trControl = trainControl(method = "cv", number = 10),
tuneGrid = gbm_grid_2)
#Optimal - interaction.depth = 3 and n.trees = 1050 and shrinkage = 0.0405 acc - 61% from 58.4%
#Fit Function for Evaluation of Models against Test Set
fit_function <- function(fit) {predict(fit, newdata = test_set)}
#A Function for getting the mode of the predictions
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
#Run the Final Ensemble the Manual Way with an improved svmRadial and improved GBM
preds_orig <- sapply(fits_5_12, fit_function)
preds_sr <- predict(fit_sr, newdata = test_set)
preds_gbm <- predict(fit_gbm_2, newdata = test_set)
preds_all <- cbind(preds_orig, as.matrix(preds_sr), as.matrix(preds_gbm))
preds_final <- preds_all[,-c(3:4)]
preds_vote_final <- apply(preds_final, 1, getmode)
mean(preds_vote_final == test_set$quality)
#Accuracy - 70.9%
#Run the Ensemble the Manual Way
preds_2 <- preds_all[,-c(6:7)]
preds_vote_2 <- apply(preds_2, 1, getmode)
mean(preds_vote_2 == test_set$quality)
#Accuracy - 70.6%
#Run the Ensemble the Manual Way with an improved svmRadial
preds_3 <- preds_all[,-c(4,7)]
preds_vote_3 <- apply(preds_3, 1, getmode)
mean(preds_vote_3 == test_set$quality)
#Accuracy 70.9%
#Run the initial 5 Model with 5 Factors
preds_1 <- predict(fits_top_5, newdata = test_set)
preds_vote_1 <- apply(preds_1, 1, getmode)
mean(preds_vote_1 == test_set$quality)
#Final Accuracies Dataframe & Plot
accuracies <- data.frame(method = c("Mean", "KNN", "RF", "Ens_1","Ens_2", "Ens_3", "Ens_F"), rates = c( 43.7, 47.5, 67.7, 70.6, 70.6, 70.9, 70.9))
accuracies$method <- factor(accuracies$method, levels = accuracies$method)
accuracies %>% ggplot(aes(method, rates, label = rates)) + geom_point() +
ggtitle("Accuracy by Method") + theme_economist() + geom_text(nudge_y = 1) + ggsave(filename = "./figure16.png")
|
192018c07f480c66a7a76dcea0279e000801e4f7
|
bcaaeb83862dab0045fd83ce033c65f373ffad7d
|
/dailydata/data_scripts/filter_ops.R
|
144da9e09e2bc301d797b2565c08ecf7e3244daa
|
[] |
no_license
|
anthonymorast/lstm-lstm
|
7481df0f179740357a489a0497a29f4a25cfb55f
|
5eadfbdab32aa5143a2b63856cdcebb1b660b64a
|
refs/heads/master
| 2021-03-22T05:11:46.390172
| 2019-03-31T22:08:38
| 2019-03-31T22:08:38
| 112,763,473
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,003
|
r
|
filter_ops.R
|
##
## Below are the operations performed to read and filter the data to contain the
## same rows as the forex data frame. This example uses silver and applies to PERTH
## datasets.
##
# silver <- read.csv("commodities/PERTH-SLVR_USD_D.csv")
# silver.filter <- silver[silver$Date %in% forex$Date, ]
# plot(silver$Bid.Average)
# silver$Date <- as.Date(silver$Date, format="%Y-%m-%d")
# silver.filter <- silver.filter[, -8]
# silver.filter <- silver[silver$Date %in% forex$Date, ]
# silver.filter2 <- silver.filter
#
# after this we run the <var>_to_forex.R scripts
### PLOTTING (using gold now)
# plot(gold.filter$Bid.Average, forex$EUR.USD, col="blue", xlab="Gold Prices (Avg. Bid)",
# ylab="EUR/USD", main="Gold Prices (USD) vs. EUR/USD")
#
# cor(gold.filter$Bid.Average, forex$EUR.USD)
# gold.lm <- lm(forex$EUR.USD~gold.filter$Bid.Average)
# abline(gold.lm, col="green", lwd=2)
#
# Finally write a new CSV with the (good) filtered data
# write.csv(silver.filter, "commodities/real_silver.csv")
|
375fa2f865f6e923a49cd216860dc79506df49af
|
35d5f82fc5fa6ea9783b7a8e50c06703bee649d6
|
/processEngineeredFeatures.R
|
a2d24bb814ece8d238ac786b7ddad136099aade3
|
[] |
no_license
|
tboats/AirBNB-competition
|
cee54d9df3e2c03df535d8d614b6b2151f86cdee
|
d2f376afe7b0ccda349f4aa0b85f1b13bcf50104
|
refs/heads/master
| 2021-01-10T02:53:59.220497
| 2016-03-07T06:52:28
| 2016-03-07T06:52:28
| 53,273,582
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,389
|
r
|
processEngineeredFeatures.R
|
## Goal: apply cross validation to determine tuning parameters
###########################################################################
## load libraries
library(xgboost)
library(stringr)
library(caret)
library(plyr)
library(dplyr)
library(lubridate)
source('E:/Dropbox/R/general/timestamp.R')
ndcg5 <- function(preds, dtrain) {
labels <- getinfo(dtrain,"label")
num.class = length(unique(labels))
pred <- matrix(preds, nrow = num.class)
top <- t(apply(pred, 2, function(y) order(y)[num.class:(num.class-4)]-1))
x <- ifelse(top==labels,1,0)
dcg <- function(y) sum((2^y - 1)/log(2:(length(y)+1), base = 2))
ndcg <- mean(apply(x,1,dcg))
return(list(metric = "ndcg5", value = ndcg))
}
set.seed(1)
###########################################################################
## load data with engineered features
X <- read.csv("X.csv")
y <- read.csv("y.csv")
X_test <- read.csv("X_test.csv")
## convert y to data matrix
y <- data.matrix(y)
###########################################################################
## merge training data with country data
# grab country labels from original data set
df_train <- read.csv("../data/train_users_2.csv")
labels2 <- df_train$country_destination
ydf <- data.frame(country_destination = labels2)
df <- cbind(ydf, X)
# load countries data
df_countries <- read.csv("../data/countries.csv")
# merge the countries data with training data
df <- merge(df, df_countries, by = "country_destination", all.x = TRUE, sort = FALSE)
df[is.na(df$distance_km),"distance_km"] <- 0
df[which(is.na(df$destination_language)),"destination_language"] <- "eng"
# save the column of interest as a label (distance_km)
label_int <- df$distance_km # intermediate label
label_lang <- df$destination_language # intermediate label
# remove all columns from countries
df <- df[, !(names(df) %in% names(df_countries))]
###########################################################################
## boosting on language
trainFraction <- 0.8
nClasses <- length(unique(label_lang))
# split into training and validation
trainIndex <- createDataPartition(label_int, p = trainFraction, list = FALSE)
df_tr <- df[trainIndex,]
df_val <- df[-trainIndex,]
label_tr <- as.integer(as.numeric(label_lang)[trainIndex]-1)
label_val <- as.integer(as.numeric(label_lang)[-trainIndex]-1)
label_val_fact <- as.character(label_lang[-trainIndex])
# convert factor to numeric
lang_levels <- levels(label_lang)
# table(label_tr)
# label_tr_num <- as.numeric(label_tr)
# table(label_tr_num)
# label_tr_fact <- factor(label_tr_num)
# levels(label_tr_fact) <- lang_levels
# table(label_tr_fact)
xgb_lang <- xgboost(data = data.matrix((df_tr[,-1])), #data.matrix(X_tr), #data.matrix(X_tr[,-1]),
label = data.matrix(label_tr),
eta = 0.1,
max_depth = 6,
objective = "multi:softprob",
nthread = 3,
nrounds = 80, #originally 25
seed = 1,
eval_metric = "mlogloss", #ndcg5, #eval_metric,#ndcg5,#"mlogloss",#"merror",
num_class = nClasses
)
if (trainFraction < 1){
pred_lang <- predict(xgb_lang, data.matrix(df_val[,-1]))
# extract the N classes with highest probabilities and save to data frame
predictions <- as.data.frame(matrix(pred_lang, nrow=nClasses))
pred_val <- data.frame(t(predictions))
names(pred_val) <- lang_levels
# add in IDs
X2 <- cbind(df_val$id, pred_val)
names(X2)[1] <- "id"
# predict on full data set and write to csv
pred_lang <- predict(xgb_lang, data.matrix(df[,-1]))
predictions <- as.data.frame(matrix(pred_lang, nrow=nClasses))
pred_val <- data.frame(t(predictions))
names(pred_val) <- lang_levels
# add in IDs
X2 <- cbind(df$id, pred_val)
names(X2)[1] <- "id"
# write to csv
X2 <- join(X, X2)
write.csv(X2, "X_lang.csv")
# pull out the top prediction for basic evaluation of model
predictions_top1 <- as.vector(apply(pred_val, 1, function(x) names(sort(x, decreasing = TRUE)[1])))
# compute accuracy
pred_acc <- sum(predictions_top1 == label_val_fact)/length(label_val_fact)
print(pred_acc)
table(label_val_fact)/length(label_val_fact)
}
####################################################################################
## apply training model on test data
pred_lang_test <- predict(xgb_lang, data.matrix(X_test[,-1]))
# extract the N classes with highest probabilities and save to data frame
predictions <- as.data.frame(matrix(pred_lang_test, nrow=nClasses))
pred_test <- data.frame(t(predictions))
names(pred_test) <- lang_levels
# add in IDs
X_test2 <- cbind(X_test$id, pred_test)
names(X_test2)[1] <- "id"
# write to csv
X_test2 <- join(X_test, X_test2)
write.csv(X_test2, "X_test_lang.csv")
###########################################################################
## linear regression to find distance_km
trainFraction <- 0.8
trainIndex <- createDataPartition(label_int, p = trainFraction, list = FALSE)
df_tr <- df[trainIndex,]
df_val <- df[-trainIndex,]
label_tr <- label_int[trainIndex]
label_val <- label_int[-trainIndex]
params <- list(
eta = 0.1,
max_depth = 6,
objective = "reg:linear",
nthread = 3
)
xgb_distanceReg <- xgboost(data = data.matrix(df_tr[,-1]), #data.matrix(X_tr), #data.matrix(X_tr[,-1]),
label = label_tr,
params = params,
nrounds = 80
)
## check on validation set
distancePred <- predict(xgb_distanceReg, data.matrix(df_val[,-1])) #data.matrix(X_val))#data.matrix(X_val[,-1]))
dist <- data.frame(distance = label_val, predictedDistance = distancePred)
head(dist,100)
distForeign <- filter(dist, distance > 0)
tail(distForeign,25)
# cv <- xgb.cv(data = data.matrix(df[,-1]),#data.matrix(X_tr[,-1]),
# label = label_int,
# params = params,
# nrounds = 150,
# prediction = TRUE,
# nfold = 5
# )
# ts <- timestamp()
# write.csv(data.frame(cv$dt), paste("dt_distanceRegression_",ts,'.csv',sep=""))
# eta = eta,
# max_depth = max_depth, #originally 9
# nrounds = nround, #originally 25
# #subsample = subsample,#0.5,
# #colsample_bytree = colsample_bytree, #0.5
# #seed = 1,
# #eval_metric = eval_metric, #ndcg5, #eval_metric,#ndcg5,#"mlogloss",#"merror",
# objective = objective,
# lambda = lambda,
# alpha = alpha,
# gamma = gamma,
# num_class = 12,
# nthread = 3
|
a7f5e83714e1fb7deed77ec899b77385fda84f3f
|
e2b59ecade22df23781a7f6d2bd6bb559d4b01bc
|
/R/boxplot.R
|
6f211a314c720736df5351082dca032014fb4a91
|
[
"MIT"
] |
permissive
|
artedison/ensRadaptor
|
2838d1740b5add8b1f348b14a1cc576e571bb789
|
bf3911a29812120cfe3965148a9e21f98e350212
|
refs/heads/master
| 2021-08-20T08:01:24.766312
| 2020-12-07T01:09:18
| 2020-12-07T01:09:18
| 232,164,615
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,086
|
r
|
boxplot.R
|
#' boxplot of multiple species and different experiments
#'
#' can be parameters or intial value
#' prior range of species can be also presented
#'
#' @param list.spe list. data.list. must be provided
#' @param path string. result location. must be provided
#' @param rank array. rank of presenting the data. must be provided
#' @param xlab string. x axis. default "x"
#' @param ylab string. y axis. default "y"
#' @param list.prior.part list. the prior range. default NULL
#' @param logtrans bool. whether use log transform. default FALSE
#' @return just plot no return
#' @export
#' @seealso [boxplot_multi_exp_comb()]
#' @import ggplot2 stringr scales
boxplot_multi_exp<-function(list.spe=NULL,path=NULL,rank=NULL,xlab="x",ylab="y",list.prior.part=NULL,logtrans=FALSE){
if(is.null(rank)){
stop("please provide rank of xlab")
}
if(is.null(list.spe)){
stop("please provide input data list")
}
if(is.null(path)){
stop("please provide output path")
}
list.tab=sapply(names(list.spe),simplify=FALSE,function(name){
temptab=list.spe[[name]]
temp=cbind(unlist(temptab),rep(names(temptab[[1]]),times=length(temptab)))
cbind(temp,rep(name,times=dim(temp)[1]))
})
tab=Reduce("rbind",list.tab)
temp_pattern=str_replace(string=tab[,2],pattern="\\_[\\dr]+$",replacement="")
tab_list=sapply(rank,simplify=FALSE,function(x){
tab[temp_pattern==x,]
})
tab=Reduce("rbind",tab_list)
rownames(tab)=NULL
tab=as.data.frame(tab)
colnames(tab)=c("val","type","exp")
tab[,3]=as.factor(tab[,3])
tab[,3]=ordered(tab[,3],levels=unique(tab[,3]))
ecid=str_replace_all(string=tab[,2],pattern="\\_\\d+$",replacement="")
tab[,2]=factor(ecid,levels=unique(ecid))
tab[,1]=as.numeric(tab[,1])
# if(logtrans){
# tab[,1]=log10(tab[,1])
# }
p<-ggplot(data=tab,aes(x=type,y=val,color=exp))+
geom_boxplot(outlier.alpha=0.05)+
xlab(xlab)+
ylab(ylab)+
theme_bw()+
theme(axis.text.x=element_text(angle=90,hjust=1))
if(logtrans){
breaks=10^seq(log10(min(tab[,"val"])),log10(max(tab[,"val"])),length.out=8)
p=p+scale_y_log10(breaks=breaks,labels=scientific)
# p=p+scale_y_log10(breaks=round(10^seq(log10(min(tab[,"val"])),log10(max(tab[,"val"])),length.out=5),6),labels=scientific)
}
if(!is.null(list.prior.part)){
list.tab.add=sapply(names(list.spe),simplify=FALSE,function(name){
temptab=list.prior.part[[name]]
temp=cbind(temptab,rownames(temptab))
cbind(temp,rep(name,times=dim(temp)[1]))
})
tab.add=Reduce("rbind",list.tab.add)
temp_pattern=str_replace(string=tab[,2],pattern="\\_[\\dr]+$",replacement="")
tab_list=sapply(rank,simplify=FALSE,function(x){
tab[temp_pattern==x,]
})
tab=Reduce("rbind",tab_list)
rownames(tab.add)=NULL
tab.add=as.data.frame(tab.add)
colnames(tab.add)=c("low","high","type","exp")
tab.add[,4]=as.factor(tab.add[,4])
tab.add[,3]=as.factor(str_replace_all(string=tab.add[,3],pattern="\\_\\d+$",replacement=""))
# tab.add[,1]=log10(as.numeric(tab.add[,1]))
# tab.add[,2]=log10(as.numeric(tab.add[,2]))
tab.add[,1]=as.numeric(tab.add[,1])
tab.add[,2]=as.numeric(tab.add[,2])
p2<-ggplot(data=tab.add)+
geom_point(aes(x=type,y=low,color=exp))+
geom_point(aes(x=type,y=high,color=exp))+
xlab(xlab)+
ylab(ylab)+
theme_bw()+
theme(axis.text.x=element_text(angle=90,hjust=1))
if(logtrans){
# p2=p2+scale_y_log10(breaks=round(10^seq(log10(min(tab.add[,"low"])),log10(max(tab.add[,"high"])),length.out=5),4),labels=scientific)
breaks=10^seq(log10(min(tab[,"val"])),log10(max(tab[,"val"])),length.out=8)
p2=p2+scale_y_log10(breaks=breaks,labels=scientific)
}
p=plot_grid(p,p2,labels=c("result", "prior"),nrow=2)
}
# p
ggsave(plot=p,file=path,width=14,height=7)
}
#' boxplot of multiple species and different experiments
#'
#' can be parameters or intial value
#' prior range of species can be also presented
#' the differences from boxplot.multi.exp is that this funciton will plot prior and result side by side
#'
#' @param list.spe list. list of data. must be provided
#' @param path string. result location. must be provided
#' @param rank array. rank of presenting the data. must be provided
#' @param xlab string. x axis. default "x"
#' @param ylab string. y axis. default "y"
#' @param list.prior.part list. the prior range. default NULL.
#' @param logtrans bool. whether use log transform. default FALSE.
#' @return just plot no return
#' @export
#' @seealso [boxplot_multi_exp()]
#' @import ggplot2 stringr scales
boxplot_multi_exp_comb<-function(list.spe=NULL,path=NULL,rank=NULL,xlab="x",ylab="y",list.prior.part=NULL,logtrans=FALSE){
if(is.null(rank)){
stop("please provide rank of xlab")
}
if(is.null(list.spe)){
stop("please provide input data list")
}
if(is.null(path)){
stop("please provide output path")
}
list.tab=sapply(names(list.spe),simplify=FALSE,function(name){
temptab=list.spe[[name]]
temp=cbind(unlist(temptab),rep(names(temptab[[1]]),times=length(temptab)))
cbind(temp,rep(name,times=dim(temp)[1]))
})
tab=Reduce("rbind",list.tab)
temp_pattern=str_replace(string=tab[,2],pattern="\\_[\\dr]+$",replacement="")
tab_list=sapply(rank,simplify=FALSE,function(x){
tab[temp_pattern==x,]
})
tab=Reduce("rbind",tab_list)
rownames(tab)=NULL
tab=as.data.frame(tab)
colnames(tab)=c("val","type","exp")
tab[,3]=as.factor(tab[,3])
tab[,3]=ordered(tab[,3],levels=unique(tab[,3]))
ecid=str_replace_all(string=tab[,2],pattern="\\_\\d+$",replacement="")
tab[,2]=factor(ecid,levels=unique(ecid))
tab[,1]=as.numeric(tab[,1])
###prior
list.tab.add=sapply(names(list.spe),simplify=FALSE,function(name){
temptab=list.prior.part[[name]]
namessele=names(list.spe[[1]][[1]])
namessele=str_replace(string=namessele,pattern="\\_\\d+$",replacement="")
temptab=temptab[namessele,]
temp=cbind(temptab,rownames(temptab))
cbind(temp,rep(name,times=dim(temp)[1]))
})
tab.add=Reduce("rbind",list.tab.add)
rownames(tab.add)=NULL
tab.add=as.data.frame(tab.add)
colnames(tab.add)=c("low","high","type","exp")
tab.add[,4]=as.factor(tab.add[,4])
tab.add[,3]=as.factor(str_replace_all(string=tab.add[,3],pattern="\\_\\d+$",replacement=""))
tab.add[,1]=as.numeric(tab.add[,1])
tab.add[,2]=as.numeric(tab.add[,2])
p<-ggplot(data=tab,aes(x=type,y=val),color="blue")+
geom_boxplot(outlier.alpha=0.05,outlier.color="grey70")+
geom_point(data=tab.add,aes(x=type,y=low),color="red")+
geom_point(data=tab.add,aes(x=type,y=high),color="red")+
xlab(xlab)+
ylab(ylab)+
theme_bw()+
theme(axis.text.x=element_text(angle=90,hjust=1))
if(logtrans){
breaks=10^seq(log10(min(tab.add[,"low"])),log10(max(tab.add[,"high"])),length.out=8)
p=p+scale_y_log10(breaks=breaks,labels=scientific)
}
# p
ggsave(plot=p,file=path,width=14,height=7)
}
|
0f0a7f3f517fc5407f54ec28395e3532446a6e87
|
85c772f3db8c3fa6928341bfd97b5a57c9bcc961
|
/C2/gg-area.R
|
8abb44c5f0278ff0d59360a2ec310cf309ca400f
|
[] |
no_license
|
DUanalytics/BARtaxila
|
e0efe470fd7a0eeb9a5dedc0f2254cd83efb6075
|
0c76fad59a2e6f565277f52d080b48561a09eed6
|
refs/heads/master
| 2020-07-26T17:04:30.853751
| 2020-02-16T12:02:17
| 2020-02-16T12:02:17
| 208,712,611
| 77
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 630
|
r
|
gg-area.R
|
#ggplot - Area
df=mtcars
catcols = c('cyl', 'vs', 'am', 'gear', 'carb')
df[,catcols] = lapply(df[,catcols], as.factor)
g1 <- ggplot(df, aes(x=mpg))
# Basic area plot
g1 + geom_area(stat = "bin")
# y axis as density value
g1 + geom_area(aes(y = ..density..), stat = "bin")
# Add mean line
g1 + geom_area(stat = "bin", fill = "lightblue")+ geom_vline(aes(xintercept=mean(mpg)), color="blue", linetype="dashed", size=1)
#bar & area
#bar
ggplot(df, aes(x = mpg, fill = am)) + geom_bar(stat = "bin", bins=10)
# Area plot
ggplot(df, aes(x = mpg, fill = am)) + geom_area(stat = "bin", bins=10) + scale_fill_brewer(palette="Dark2")
|
471b20f6bb9c19fdd55f42f07ccd03e23648fe94
|
1230124bdac62ce04a7f62d43adca3452881edb1
|
/examples/whole_blood/generate_rnb_set_with_samplesheet_and_idatfiles.R
|
62a858c4471c545fc0bbce0307a48ac90b5d4c9e
|
[] |
no_license
|
lutsik/DecompPipeline
|
732639b94cf5766f28489ee759a79cbd7a3c7789
|
e38e3311391d8afc4fe035cfdf71760e5390e001
|
refs/heads/master
| 2020-09-16T00:22:46.675862
| 2019-10-11T13:57:26
| 2019-10-11T13:57:26
| 89,242,755
| 1
| 1
| null | 2019-04-10T14:58:46
| 2017-04-24T13:22:28
|
R
|
UTF-8
|
R
| false
| false
| 1,690
|
r
|
generate_rnb_set_with_samplesheet_and_idatfiles.R
|
# cd /home/guptad/divanshu
# mkdir exp %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# cd exp%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# mkdir temp
# mkdir datasets
# cd datasets
# wget '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
# mv '%%%%%%%%%%%%%%%%%%%%%%%%%5' &&&&&&&&&&.tar
# tar -xvf %%%%%%%.tar
# gunzip -d *.gz
# for file in `ls -1 | grep idat`; do new_file_name=`echo $file | sed -e 's/^GSM[0-9]\+_//'`; echo $new_file_name; mv $file $new_file_name; done
## scp c010-generic@c010-ngs:/home/c010-generic/Documents/divanshu/pipeline/sheet.csv /
# scp -r /home/guptad/divanshu/rnbsets c010-generic@c010-ngs:/home/c010-generic/Documents/divanshu/pipeline
suppressPackageStartupMessages(library(RnBeads))
options(fftempdir="/scratch/divanshu/newset/temp",disk.dump.big.matrices = FALSE,disk.dump.bigff = FALSE)
rnb.options(disk.dump.big.matrices = FALSE,disk.dump.bigff = FALSE)
rnb.set<-rnb.execute.import(data.source="GSE42861", data.type="GEO")
x <- pheno(rnb.set)
barcode <- x$supplementary_file.1
barcode <- gsub("_Red.idat.gz","",barcode)
barcode <- sub(".+?_","",barcode)
x <- cbind(x,barcode)
rnb.set<-rnb.execute.import(data.source=list("/scratch/divanshu/newset/datasets", x), data.type="idat.dir")
saveRDS(rnb.set,"/scratch/divanshu/newset/rnb.set.rds")
save.image("rnb.setimage.RData")
source("/home/guptad/divanshu/pipeline/code.R")
res<-prepare_data(
RNB_SET=rnb.set,
WORK_DIR=file.path("/home/guptad/divanshu/exp2"),
DATASET="adgbbSorted",
DATA_SUBSET="frontal"
)
source("/home/guptad/divanshu/pipeline/cpgsubset.R")
cg_subsets<-prepare_CG_subsets(
res$rnb.set.filtered,
MARKER_SELECTION=c("var5k", "var10k")
)
save.image("final.RData")
|
37f98695b9cf42e0a23899f307b38851b35a5b21
|
8de7c88fd3ce03591c538694b3361f6b6c7fbf61
|
/R/phyloVar.R
|
d5a704398ba29537c874fd5ae9f3f63178efb330
|
[] |
no_license
|
ghthomas/motmot
|
b093742a4ed264076ca41bbc4fddf29d3cc00a93
|
c24372f5d5efbfbee6196c5459d0def31d547e54
|
refs/heads/master
| 2021-01-01T17:57:38.949773
| 2018-07-30T10:12:35
| 2018-07-30T10:12:35
| 10,839,257
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 967
|
r
|
phyloVar.R
|
phyloVar <-
function(rateData, rate=NULL, common.mean=FALSE, lambda.est=TRUE, lambda=1, meserr=FALSE) {
if(is.null(rate)) { rate <- c(rep(1,length(rateData$Vmat))) } else { rate <- rate }
if(length(rate) != length(rateData$Vmat)){stop("The number of rates defined differs from the number of rate matrices")}
y <- rateData$y
x <- as.factor(rateData$x)
if (common.mean==FALSE) {k <- nlevels(x)} else {k <- 1}
V <- transformRateMatrix(rateData, rate)
if (lambda.est & !meserr) {
v.temp <- V
diag(v.temp) <- rep(0, dim(V)[1])
V.lam <- lambda*v.temp
diag(V.lam) <- diag(V)
V <- V.lam
}
x <- make.anc(y, x)
if(common.mean==FALSE) {x <- x} else { x <- rep(1, length(x[,1]))}
mu <- phyloMean(rateData, rate, common.mean=common.mean, lambda.est, lambda)
iV <- solve(V)
e <- y - x %*% mu
s2 <- crossprod(e, iV %*% e)
n <- length(y)
phylo.var <- ( s2 / (n - k) )
return(phylo.var)
}
|
b0f6afd36e544c1be50862586c654fffd8ae682d
|
1dfe802713de5a2a5cc08e8be4b8e5ac47b7d7cc
|
/snpCyto.R
|
390cd8f56ce186ab2b29049adcf3d362b101d418
|
[
"BSD-3-Clause"
] |
permissive
|
KieberLab/Mutagenomics
|
91a9c1f1c2b93aaeef6f8bfd008b6b0c095738ce
|
3cf88522aad2b1b60cc93f986a937b79f857ce0b
|
refs/heads/master
| 2021-07-08T17:20:23.970280
| 2020-08-07T16:37:05
| 2020-08-07T16:37:05
| 143,743,258
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 588
|
r
|
snpCyto.R
|
options(stringsAsFactors=FALSE)
library(RCy3)
ck <- read.delim("output.hom/Moderate.shared.txt")
ck$from <- as.character(ck$from) #important for making networks, source and target need to be character, not integers
ckCut <- ck[c(1,3)]
names(ckCut) <- c("source","target")
ckNet <- createNetworkFromDataFrames(edges=ckCut[c(1,2)])
nodeList <- getAllNodes(ckNet)
mutList <- nodeList[nodeList %in% ckCut$source]
setNodeColorBypass(mutList,new.colors="#ffff00",network=ckNet)
setNodeShapeDefault(new.shape="ELLIPSE")
setNodeShapeBypass(mutList,new.shapes="ROUND_RECTANGLE",network=ckNet)
|
f625c79212633578dee519d287e72f63d0b4925e
|
1d1e6b4e32594a6a5b4a3f23723abee28c629f10
|
/R/check_feature_independence.R
|
1d631204c8e0df2743ffe380af1ef14814c7d219
|
[] |
no_license
|
darshanmeel/LV
|
62e7e01dd29c143cd92f443b837e4f5874644fa1
|
844ac42a4ce81538477c7443082ffb5c77249591
|
refs/heads/master
| 2021-01-19T13:49:19.533404
| 2015-03-24T08:21:08
| 2015-03-24T08:21:08
| 30,870,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,383
|
r
|
check_feature_independence.R
|
# This file will find all the columns which are correlated to each other.
# Numeric columns will be shown based on the correaltaion whereas factor columns will be shown based on the chi square.
# Numeric columns can be converted to factor columns before checking independence of fact columns but these might need to discretize first.
check_whether_features_are_related <- function(X,cor_cutoff = 0.5,alpha = 0.05)
{
#do not check the correlation or independece against class column which will be the last column.
X <- X[,-ncol(X)]
#find correlation.Convert all int columns to numeric
X <- convertinttonum(X)
# Find all num cols
numcols <- findallnumcols(X)
# Find correlation between columns
cor_cols <- findcorrelation(X)
# Consider columns with a certain length as correlated
ab<- as.matrix(cor_cols>cor_cutoff)
print(paste("numeric columns with a correlation more than",cor_cutoff))
print(ab)
print ("ignore diagonal columns as these values will be 1 always")
#Now run chi square
#convert numcols to factors
#convert the numcols to say 20 factor cols. This is just to get the idea
X[,numcols] <- EqualFreqDiscretization(X[,numcols],numofbins=20)
X <- convertnumtofact(X)
#convert char cols to fact cols
X <- convertchartofact(X)
factcols <- findallfactorcols(X)
#now run the loop. It can be done using sapply and lapply as well but that will make it more complicated although might be faster
# Here the code is for analysis so it is more about understanding and that is why you will see some verbose output as well. In future, I might test the sapply and l apply as well.
X <- X[,factcols]
pvals <- as.data.frame(t(c('NULL','NULL',0.0)))
colnames(pvals) <- c('col1','col2','pvalue')
pvals$col1 <- as.character(pvals$col1)
pvals$col2 <- as.character(pvals$col2)
pvals$pvalue <- as.numeric(pvals$pvalue)
cols <- colnames(X)
nc <- ncol(X) -1
for (i in 1:nc){
k <- i+1
for (j in k:(nc+1)){
ak <- chisq.test(table(X[,i],X[,j]))
pdtl <- c(cols[i],cols[j],ak$p.value)
pvals <- rbind(pvals,pdtl)
}
}
#removefirst val
pvals <- pvals[-1,]
#now print
print("print the p values for column independence.These are order by p values descending to find the columsn which are independent")
pvals <- pvals[order(pvals[,3],decreasing=FALSE,na.last=TRUE),]
print(pvals)
}
|
3c6b2a0a08d0e4d157ffa149b92bb0587281ccf0
|
b69cc5de123d0f6e9de659c933a418de2129f6f0
|
/R/Spatial Tidying.R
|
ab45f9a1468688a9d73ada3556418cbd499d5852
|
[] |
no_license
|
NeilHobbs/sri_lanka_snakebite
|
c43b31c8e9ac4ec290ae46e7a1e734f2028a70ac
|
605d2c4f2c8afef459d3adf72b645560e110c493
|
refs/heads/main
| 2023-01-01T05:29:37.020122
| 2020-10-19T14:44:46
| 2020-10-19T14:44:46
| 305,411,919
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,191
|
r
|
Spatial Tidying.R
|
library(stringr)
library(dplyr)
library(readxl)
hospital_coordinates = read_excel("./Hospitals_SL_update.xlsx")
hospital_coordinates = hospital_coordinates%>%
rename(Hospital = Name)
#Find the hospitals with the codes before the names.
Hosp_Coords1 = hospital_coordinates%>%
filter(str_detect(Hospital, "BHA|BHB|DHC|DHA|CD|DH|DHB|DHC|PGH|PMCU"))
#Those that don't have the codes before their names
Hosp_Coords2 = anti_join(hospital_coordinates, Hosp_Coords1)
Hosp_Coords1 = Hosp_Coords1%>%
mutate(Hospital = sub(".*? ", "", Hosp_Coords1$Hospital))%>% # gets rid of everything before the first space
mutate(Hospital = str_to_lower(Hospital))
Hosp_Coords2 = Hosp_Coords2%>%
mutate(Hospital = str_to_lower(Hospital))
hospitals_updated = union(Hosp_Coords1, Hosp_Coords2)
#Import intitial tidied datasets (see Data Tidying file)
snakes2008 = read.csv("snakes2008.csv")
snakes2009 = read.csv("snakes2009.csv")
snakes2010 = read.csv("snakes2010.csv")
snakes2011 = read.csv("snakes2011.csv")
snakes2012 = read.csv("snakes2012.csv")
snakes2013 = read.csv("snakes2013.csv")
snakes2014 = read.csv("snakes2014.csv")
snakes2015 = read.csv("snakes2015.csv")
snakes2016 = read.csv("snakes2016.csv")
snakes2017 = read.csv("snakes2017.csv")
snakes2018 = read.csv("snakes2018.csv")
#bind the rows of the tidied data sets
snakes_df = rbind(snakes2008, snakes2009, snakes2010,
snakes2011, snakes2012, snakes2013,
snakes2014, snakes2015, snakes2016,
snakes2017, snakes2018)
snakes_df = snakes_df%>%
ungroup()%>%
mutate(Hospital = str_to_lower(Hospital))
write.csv(hospitals_updated, ".//hosital_coords.csv")
write.csv(snakes_df, ".//snakebite_data_tidy.csv")
untagged_snakes = geotagged_snakes%>%
filter(is.na(Hospital.y))
snakes_data = read.csv("./snakebite_data_tidy.csv")
hospital_coordinates = read.csv("hosital_coords.csv")
geotagged_snakes = left_join(snakes_data, hospital_coordinates, by = "Hospital")
geotagged_snakes = geotagged_snakes%>%
select(-"X")
write.csv(geotagged_snakes, ".//geotagged_snakes.csv")
distinct_hospitals = geotagged_snakes%>%
filter(is.na(x))%>%
distinct(Hospital)
|
05d04c8d4000f0160f78c097f12ca9fdf5ac81ab
|
315e904bb62f434ed3ffffc7a7ede67a94df1d5c
|
/R/compute_ssd.R
|
0c16331ef9722344e58fa87107813fb9ada0d7c5
|
[] |
no_license
|
ChengLiLab/markov3d
|
8fa7ae37ea207c4cf05592c9a7e43881b2245f39
|
e26c7261f05793c4f57ad54cb64eb4530f359724
|
refs/heads/master
| 2021-01-11T09:58:36.831283
| 2017-01-03T05:54:24
| 2017-01-03T05:54:24
| 77,888,214
| 1
| 0
| null | 2017-01-03T05:56:39
| 2017-01-03T05:56:39
| null |
UTF-8
|
R
| false
| false
| 753
|
r
|
compute_ssd.R
|
# Rcpp::sourceCpp("computeSSD.cpp")
compute_ssd <- function(transition.mat, iter.epsilon = 1e-8, iter.max = 1e5) {
#' compute steady-state distribution from transition matrix
#'
#' @param transition.mat Matrix, transition matrix.
#' @param iter.epsilon Float, the tolerance of iterative computation of SSD
#' @param iter.max Int, the max number of iteration
#' @return Named vector, steady-state distribution.
ssd <- Re(rARPACK::eigs(t(transition.mat), 1)$vector)
# if (length(ssd) == 0 | any(ssd < 0)) {
# ssd <- as.vector(computeSSD(transition.mat, iter.epsilon, iter.max))
# } # compute ssd through iteration
ssd <- ssd / sum(ssd)
if (length(ssd) == 0) return(NULL)
names(ssd) <- colnames(transition.mat)
return(ssd)
}
|
3e78eb5ccf9995d53ac26fe59e5739997db85fab
|
70000c0b41bfa6a711a953e7ea77a0a35dce425a
|
/R/run_seminribm.R
|
4f228ecea508bcdd596e2e0a9ccb55a2fb5dbec6
|
[
"MIT"
] |
permissive
|
davidchampredon/seminribm
|
a46972481fb8ae1f2bcea4a5082a890a721e0b1a
|
cd327a16a9587ac861694e7c0e54392bb276ed6f
|
refs/heads/master
| 2021-05-14T11:37:11.405019
| 2018-01-10T15:22:04
| 2018-01-10T15:22:05
| 116,387,422
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,159
|
r
|
run_seminribm.R
|
#' Run one simulation of a stochastic SEmInR model
#'
#' @description Stochastic, individual-based, SEIR model with respectively m and n 'E' and 'I' chained compartments. Use Gillespie algorithm, with or without the tau-leap approximation.
#' @param horizon Numeric. Time horizon of the simulation.
#' @param popSize Integer. Population size.
#' @param R0 Numeric. Basic reproduction number.
#' @param latent_mean Numeric. Mean latent (infected but not infectious) duration.
#' @param infectious_mean Numeric. Mean infectious duration.
#' @param nE Integer. Number of 'E' compartments.
#' @param nI Integer. Number of 'I' compartments.
#' @param initInfectious Integer. Number of individuals initially infected (at time 0).
#' @param doExact Boolean. TRUE: the stochastic simulation follows the standard Gillespie algorithm. FALSE: tau-leap approximation is used.
#' @param timeStepTauLeap Numeric. Time step for the tau-leap approximation. Default = 0.1. Only used if \code{doExact=FALSE}.
#' @param rnd_seed Integer. Seed for the random number generator. Default = 1234.
#' @param calc_WIW_Re Boolean. Calculate the 'Who Infected Who' matrice. Warning: slows down simulation. Default = FALSE.
#' @return A named list storing simulation outputs.
#' \itemize{
#' \item \code{times}: simulated calendar times.
#' \item \code{S}: Time series of the number of susceptible individuals.
#' \item \code{prev}: Time series of prevalence (number of infectious individuals).
#' \item \code{R}: Time series of the number of recovered individuals.
#' \item \code{acq_times}: Disease acquisition times for all infected individuals.
#' \item \code{acqTransm_times}: Disease acquisition times for all infected individuals that transmitted the disease to at least one other individual.
#' \item \code{GI_bck}: Backward generation intervals for all infected individuals.
#' \item \code{GI_fwd}: Forward generation intervals for all infected individuals. One individual may have more than one forward generation interval, hence this object is a list of vectors.
#' \item \code{Reff}: Effective disease reproduction. Number of secondary cases generated by each infected individual.
#' }
#' @export
seminribm_run <- function(horizon,
popSize ,
R0 ,
latent_mean ,
infectious_mean,
nE ,
nI ,
initInfectious ,
doExact ,
timeStepTauLeap = 0.1,
rnd_seed = 1234,
calc_WIW_Re = FALSE) {
return( cpp_seminribm_run(horizon,
popSize ,
R0 ,
latent_mean ,
infectious_mean,
nE ,
nI ,
initInfectious ,
calc_WIW_Re ,
doExact ,
timeStepTauLeap ,
rnd_seed)
)
}
|
e620005a7e782d2dad58694bc255550ddb4b3d55
|
cebdfafd0b197990e4b78ad50b124c8da9383143
|
/data/MLPAvalidation.R
|
d313169d55bf43854ce7940e3acfde9dd7f50b6b
|
[] |
no_license
|
cran/MLPAstats
|
0e74630f5191fdf5dfec8ae7baac5f936f012fa7
|
899bf5e9737526e026fd1e7cbe13c5e3ed56b163
|
refs/heads/master
| 2021-01-23T16:41:05.075290
| 2011-02-11T00:00:00
| 2011-02-11T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,951
|
r
|
MLPAvalidation.R
|
`controls` <-
structure(list(Sample = structure(c(1L, 1L, 1L, 2L, 2L, 2L, 3L,
3L, 3L, 4L, 4L, 4L, 5L, 5L, 5L, 6L, 6L, 6L, 7L, 7L, 7L, 8L, 8L,
8L, 9L, 9L, 9L, 10L, 10L, 10L, 11L, 11L, 11L, 12L, 12L, 12L,
13L, 13L, 13L, 14L, 14L, 14L, 15L, 15L, 15L, 16L, 16L, 16L, 17L,
17L, 17L, 18L, 18L, 18L, 19L, 19L, 19L, 20L, 20L, 20L), .Label = c("NA.HMP05.a2",
"NA.HMP05.a3", "NA.HMP05.b1", "NA.HMP05.b2", "NA.HMP05.b3", "NA.HMP05.c1",
"NA.HMP05.c2", "NA.HMP05.c3", "NA.HMP05.d1", "NA.HMP05.d2", "NA.HMP05.d3",
"NA.HMP05.e2", "NA.HMP05.f1", "NA.HMP05.f2", "NA.HMP05.f3", "NA.HMP05.g1",
"NA.HMP05.g2", "NA.HMP05.g3", "NA.HMP05.h2", "NA.HMP05.h3"), class = "factor"),
Replica = structure(c(1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L,
1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L,
1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L,
1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L,
1L, 2L, 3L, 1L, 2L, 3L), .Label = c("A", "B", "C"), class = "factor"),
RNAseP = c(13220, 13028, 13034, 12125, 11230, 7201, 14056,
11221, 11950, 12653, 12157, 10693, 13033, 13037, 12220, 13934,
10812, 11373, 12941, 10499, 11142, 13375, 10049, 13134, 12638,
13177, 11269, 12807, 11614, 9995, 13830, 11398, 12022, 11527,
12384, 12033, 13503, 13417, 12382, 12871, 13811, 11808, 13521,
10618, 10184, 15948, 12856, 10867, 13125, 12572, 11535, 13439,
11202, 10770, 14238, 11564, 10724, 11064, 11532, 12794),
HIRA = c(9108, 9891, 8779, 9564, 8295, 4962, 10950, 7778,
8173, 9933, 8832, 8062, 10210, 9266, 8944, 10120, 7870, 7949,
9096, 7862, 7983, 9665, 7808, 9238, 9195, 9272, 7631, 10016,
8671, 6885, 9815, 8299, 8890, 7634, 8875, 8496, 9563, 9543,
8916, 8709, 9544, 8715, 10098, 6870, 7450, 10702, 7814, 7707,
8175, 8763, 8366, 9380, 7730, 7946, 10384, 8421, 7273, 7960,
8561, 9109), UBE3A = c(10644, 10673, 10756, 9945, 8695, 5593,
10756, 8602, 9062, 9971, 9464, 8353, 10518, 9764, 9008, 10892,
8646, 8940, 10530, 7940, 9067, 10718, 8351, 11077, 9854,
10662, 8728, 10820, 9338, 7743, 10881, 9643, 9291, 9046,
10040, 9644, 11579, 10909, 9581, 10557, 10695, 9457, 10326,
7821, 8050, 12199, 9996, 8608, 10279, 9823, 8782, 10976,
9087, 8357, 12358, 9113, 8244, 7887, 8991, 9632), ENm014 = c(10637,
11312, 10204, 10250, 9403, 5957, 11704, 9223, 9879, 10851,
10349, 9301, 11482, 11117, 10164, 12400, 9738, 9196, 11019,
8622, 9101, 11333, 8606, 11984, 10290, 11260, 9899, 11489,
10407, 8843, 11636, 10303, 9839, 9508, 10270, 10450, 12173,
12073, 11006, 11477, 10986, 10614, 12042, 9239, 8972, 13010,
10026, 9764, 10946, 10595, 9611, 11544, 9161, 8977, 13590,
10305, 8605, 9315, 10446, 11492), ENm013 = c(10012, 10045,
9419, 9986, 8778, 5546, 11135, 8670, 8745, 10179, 8606, 8589,
10670, 9737, 9235, 10823, 8668, 8431, 9988, 7920, 8572, 10041,
7822, 10440, 9665, 9794, 8848, 10806, 9294, 7444, 10485,
8737, 9276, 8607, 9185, 9414, 11090, 9718, 8833, 10071, 10314,
9353, 10468, 7421, 8048, 11639, 9605, 8456, 9245, 9426, 8685,
10587, 8729, 8223, 11422, 9004, 8111, 8413, 9014, 9315),
SNRPN = c(6061, 5623, 5328, 5729, 4898, 3104, 6300, 4704,
4775, 5619, 5022, 4688, 5675, 4784, 4946, 5913, 5042, 4473,
5554, 4260, 4620, 5947, 4585, 6140, 5350, 5867, 4891, 5879,
5423, 4099, 5714, 5142, 4829, 5017, 5344, 4950, 6274, 5831,
5445, 5630, 5308, 4826, 5811, 4430, 4561, 6948, 5134, 4648,
5526, 5522, 4827, 5347, 4801, 4576, 6099, 5215, 4145, 4722,
4555, 5627), ENm313 = c(6101, 5985, 5374, 5996, 5387, 3435,
6296, 5067, 4801, 5838, 5703, 4911, 6013, 5558, 5140, 6109,
5271, 5408, 5856, 4737, 5283, 6522, 4658, 6090, 6034, 6362,
4829, 6024, 5601, 4869, 6456, 5320, 5692, 5650, 5742, 5579,
6619, 6573, 6257, 5723, 5669, 5313, 6066, 4711, 4825, 6847,
5366, 5275, 5582, 5679, 5083, 6100, 5355, 5344, 7238, 5335,
4885, 4883, 4953, 6231), ZWINT = c(3161, 3238, 2948, 2850,
2495, 1591, 3186, 2455, 2711, 2881, 2598, 2497, 3359, 2715,
2617, 3172, 2560, 2441, 3087, 2483, 2401, 3050, 2189, 3054,
2922, 3014, 2497, 3155, 2839, 2410, 3004, 2586, 2703, 2626,
2722, 2808, 3386, 2947, 2994, 3263, 2898, 2639, 3237, 2395,
2446, 3516, 2627, 2413, 2863, 2668, 2571, 3065, 2669, 2540,
3283, 2343, 2457, 2596, 2567, 3024), ENr111 = c(2879, 2799,
2538, 2978, 2637, 1598, 3150, 2472, 2343, 3126, 2392, 2301,
2980, 2893, 2576, 3224, 2412, 2287, 2811, 2432, 2449, 3022,
2197, 2866, 2852, 2785, 2447, 2681, 2568, 2145, 2800, 2394,
2692, 2868, 2580, 2589, 3230, 2943, 3023, 2765, 3036, 2656,
3127, 2285, 2285, 3701, 2527, 2198, 2713, 2732, 2296, 2952,
2469, 2560, 3532, 2732, 2392, 2515, 2693, 2894), ENm323 = c(4172,
4130, 3920, 3747, 3423, 2225, 4711, 3424, 3469, 3960, 3856,
3543, 4387, 4251, 4007, 4486, 3461, 3442, 4062, 3187, 3279,
3907, 3114, 4267, 3826, 4093, 3423, 4191, 3710, 2932, 4213,
3567, 3574, 3294, 3954, 3757, 4366, 4038, 3709, 4021, 3776,
3630, 4018, 3087, 3134, 4585, 4031, 3407, 3605, 3659, 3299,
4202, 3517, 3367, 4368, 3455, 3348, 3224, 3442, 4242), UBEA3A = c(3325,
2934, 2691, 2704, 2528, 1634, 3166, 2591, 2309, 2935, 2659,
2404, 2869, 2729, 2465, 3234, 2604, 2482, 3069, 2506, 2556,
2890, 2238, 2976, 2955, 2955, 2584, 3272, 2878, 2313, 3162,
2692, 2986, 2737, 2981, 2879, 3396, 3271, 2858, 2880, 3025,
2713, 3211, 2235, 2242, 3467, 2832, 2327, 2697, 2655, 2552,
3181, 2732, 2680, 3679, 2468, 2180, 2426, 2599, 3067), ENr213 = c(1772,
2041, 1996, 2033, 1816, 1204, 2054, 1606, 1710, 1947, 1651,
1707, 1864, 1691, 1598, 2358, 1760, 1685, 1888, 1565, 1678,
2014, 1552, 1951, 1876, 1906, 1660, 2126, 1788, 1612, 2044,
1730, 1990, 1634, 1566, 1735, 2201, 2041, 1658, 1861, 1770,
1681, 2083, 1719, 1624, 2261, 1795, 1282, 1796, 1828, 2064,
2093, 1742, 1739, 2636, 1739, 1658, 1579, 1838, 2058), PHYLIP = c(4283,
4420, 4051, 3833, 3257, 2316, 4576, 3308, 3354, 3856, 3567,
3460, 3891, 3731, 3470, 4313, 3410, 3173, 4186, 3101, 3383,
4135, 3007, 4044, 4164, 4086, 3607, 4234, 3742, 3139, 3918,
3550, 3742, 3407, 3797, 3629, 4455, 4001, 4099, 3913, 4250,
3421, 3949, 3134, 2911, 4606, 3487, 3260, 4079, 3571, 3108,
4152, 3572, 3534, 4522, 3504, 3219, 3614, 3592, 3646), ENr233 = c(10983,
11228, 10406, 9465, 9812, 6112, 11396, 8923, 9601, 10372,
9691, 9176, 11433, 11344, 10177, 11387, 8769, 8813, 10573,
8628, 9297, 11467, 8163, 10584, 10382, 10414, 9376, 10785,
9397, 8620, 10938, 9451, 10043, 7239, 7536, 7874, 11126,
10924, 10770, 10800, 11467, 9849, 11114, 8776, 8516, 12358,
9491, 8860, 9660, 10584, 9173, 13355, 10891, 11036, 13073,
9339, 8781, 9710, 9943, 10874), RP11 = c(3249, 3456, 3539,
3151, 2981, 1813, 3590, 2544, 2880, 3323, 3125, 2723, 3665,
3544, 3139, 3688, 2617, 2663, 3325, 2663, 2905, 3705, 2634,
3429, 3219, 3331, 3126, 3270, 3234, 2454, 3585, 3072, 2948,
2852, 3238, 3065, 3332, 3416, 3123, 3438, 3423, 3238, 3810,
2658, 2648, 4018, 3090, 2316, 3200, 3228, 2906, 3554, 2830,
2674, 3802, 3183, 2749, 2965, 3349, 3512), ENr222 = c(3959,
3709, 3733, 3543, 3057, 2076, 4242, 3145, 2910, 3801, 3681,
3002, 4376, 3545, 3306, 4003, 3341, 2954, 3748, 3003, 3161,
3725, 2971, 3552, 3767, 3716, 3187, 3748, 3419, 2844, 3641,
3226, 3214, 3433, 3511, 3543, 4008, 4133, 3296, 3714, 3452,
3213, 3942, 2970, 3182, 4401, 3385, 3097, 3433, 3485, 3140,
3790, 3221, 3111, 4200, 3389, 2973, 3159, 3087, 3694)), .Names = c("Sample",
"Replica", "RNAseP", "HIRA", "UBE3A", "ENm014", "ENm013", "SNRPN",
"ENm313", "ZWINT", "ENr111", "ENm323", "UBEA3A", "ENr213", "PHYLIP",
"ENr233", "RP11", "ENr222"), row.names = c(NA, 60L), class = "data.frame")
`cases` <-
structure(list(Sample = structure(c(5L, 5L, 5L, 6L, 6L, 6L, 7L,
7L, 7L, 8L, 8L, 8L, 9L, 9L, 9L, 3L, 3L, 3L, 4L, 4L, 4L, 1L, 1L,
1L, 2L, 2L, 2L), .Label = c("Autism1", "Autism2", "DiGeor1",
"DiGeor2", "HapMap1", "HapMap2", "HapMap3", "PradWilli1", "PradWilli2"
), class = "factor"), Replica = structure(c(1L, 2L, 3L, 1L, 2L,
3L, 1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L, 1L, 2L, 3L,
1L, 2L, 3L, 1L, 2L, 3L), .Label = c("A", "B", "C"), class = "factor"),
RNAseP = c(13873, 10180, 11569, 11917, 12315, 11956, 14951,
12858, 11370, 15773, 10971, 13033, 7616, 6170, 6448, 11248,
11389, 14502, 10403, 10014, 12409, 9653, 9896, 10423, 10317,
10271, 10456), HIRA = c(10538, 7233, 7960, 8343, 8317, 8565,
11167, 8474, 7795, 10666, 8395, 9228, 5462, 4569, 4505, 4389,
4494, 5282, 3977, 4249, 4710, 7331, 6786, 7492, 7568, 6425,
8375), UBE3A = c(10767, 8795, 8936, 9138, 9399, 9537, 11928,
9375, 8686, 6749, 4666, 5896, 3068, 2639, 2527, 9495, 9281,
11809, 9282, 8889, 9953, 7620, 7126, 7439, 7943, 7880, 7533
), ENm014 = c(11561, 8688, 10219, 10271, 9835, 10608, 12929,
9982, 9380, 12982, 10022, 11568, 7320, 5820, 5783, 10329,
9880, 12593, 9866, 9186, 11024, 8692, 8249, 8352, 9254, 8568,
8868), ENm013 = c(10675, 7860, 8987, 9039, 8849, 9548, 11404,
8850, 8611, 10749, 8866, 10137, 6022, 4728, 4691, 8906, 8360,
11257, 8405, 8748, 9957, 7739, 7461, 7671, 8347, 7223, 8667
), SNRPN = c(6375, 4624, 4651, 4816, 5184, 5076, 5684, 4715,
4901, 3137, 2665, 2933, 1729, 1594, 1454, 5620, 5545, 6764,
4974, 4961, 6181, 4137, 4149, 4213, 4304, 4542, 4607), ENm313 = c(6208,
4962, 5658, 5169, 5273, 5501, 7053, 4956, 5246, 7628, 5899,
6650, 3447, 3445, 3122, 5748, 5849, 7240, 5478, 5378, 6424,
4217, 4247, 4820, 5296, 4486, 4779), ZWINT = c(3283, 2548,
2733, 2761, 2552, 2742, 3385, 2555, 2854, 3521, 2871, 3272,
1834, 1604, 1502, 2725, 2893, 3563, 2493, 2535, 3181, 3273,
3268, 3517, 3485, 3168, 3185), ENr111 = c(2990, 2565, 2911,
2423, 2738, 2538, 3671, 2628, 2541, 3129, 2590, 2661, 1460,
939, 1242, 2485, 2435, 3107, 2176, 2439, 2665, 2312, 2386,
2546, 2493, 2131, 2281), ENm323 = c(4878, 3494, 3497, 3480,
3573, 3864, 4655, 3524, 3330, 4809, 3321, 4178, 2570, 2036,
1905, 3606, 3951, 4631, 3409, 3124, 4021, 2902, 3083, 3118,
3341, 3263, 3197), UBEA3A = c(3445, 2311, 2628, 2365, 2708,
2644, 3397, 2797, 2648, 2118, 1529, 1673, 853, 974, 618,
2936, 2831, 3456, 2664, 2746, 2936, 2125, 2233, 2425, 2197,
2273, 2698), ENr213 = c(1930, 1687, 1505, 1872, 2055, 2026,
2247, 1610, 1598, 2438, 1834, 2228, 1004, 924, 1170, 2134,
2090, 2639, 1917, 1910, 2277, 1559, 1564, 1291, 1542, 1508,
1661), PHYLIP = c(3825, 3183, 3475, 3263, 3703, 3503, 5017,
3730, 3783, 4809, 3549, 4328, 2452, 1968, 2017, 3837, 4085,
4540, 3517, 3480, 4155, 4170, 4499, 4740, 4887, 4170, 4739
), ENr233 = c(11046, 9165, 9616, 9637, 10391, 10623, 13173,
9634, 9320, 12057, 9498, 10942, 6555, 5203, 5259, 9662, 9519,
11368, 9959, 8720, 10166, 8380, 8551, 8646, 9056, 8659, 8606
), RP11 = c(3258, 2800, 3172, 3092, 3282, 3167, 4377, 2919,
2937, 3836, 3055, 3335, 1905, 1666, 1577, 3167, 2909, 3411,
2576, 2690, 3215, 3864, 3746, 3606, 4174, 3549, 4071), ENr222 = c(3652,
3006, 3327, 3317, 3406, 3589, 4767, 3244, 3356, 4082, 3282,
4033, 2135, 1826, 1591, 3367, 3457, 3781, 3238, 3233, 3326,
2783, 2681, 2677, 3374, 2876, 2931)), .Names = c("Sample",
"Replica", "RNAseP", "HIRA", "UBE3A", "ENm014", "ENm013", "SNRPN",
"ENm313", "ZWINT", "ENr111", "ENm323", "UBEA3A", "ENr213", "PHYLIP",
"ENr233", "RP11", "ENr222"), row.names = c(NA, 27L), class = "data.frame")
`reference.probes` <-
c(1, 4, 5, 7, 9, 10, 12, 14, 16)
`size` <-
c(88, 91, 93, 97, 103, 109, 112, 117, 121, 124, 128, 130, 136,
141, 144, 147)
|
6cc26625b306f202d280879f65fbfef42c72ebcd
|
e0877a3b598fcdcea8f07a02e48f8aa490bf01be
|
/store.returns.R
|
8fff168963251900e08120bb47d091efdfb6df1e
|
[] |
no_license
|
lotterdata/proj_2_bootcamp
|
a1b8cffdabe864370f001d0b00c42acf8ff8011a
|
03e39dd40b4e2329c31d9f7ec82d1a2c21be15c1
|
refs/heads/master
| 2021-01-10T07:35:23.427915
| 2015-10-18T03:27:38
| 2015-10-18T03:27:38
| 43,725,182
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 380
|
r
|
store.returns.R
|
source("eff_front_app/helpers.R")
library(stringr)
full.list <- portfolioReturns(c('PFE','NVS','MRK','LLY',
'GS','JPM','MS','PNC',
'TWX','CMCSA','DIS','DISCA',
'WMT','TGT','HD','COST'))
full.list$date <- str_sub(full.list$date, 1, 7)
saveRDS(full.list,"eff_front_app/full_list.rds")
|
fb2d5b6f07586c524c9ae781968ff981bde2afe6
|
d2283754044515dbef6c729a5b6ffe236f9e4bfb
|
/R/facet_plot.R
|
089ab1fb0acda75231fcffe36f2c7eb6f3a0ad47
|
[] |
no_license
|
funkhou9/editTools
|
24e311cdf4928d0c9b3048d5fc39def5816d120c
|
e8bcd8595942c98286dd94a57b0f90c6c6681e6b
|
refs/heads/master
| 2020-05-30T06:42:40.951785
| 2018-09-12T01:43:06
| 2018-09-12T01:43:06
| 39,027,824
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,269
|
r
|
facet_plot.R
|
#' Turns two or more plots of edit_tables into a facet plot
#'
#'
#' @param ... any number data.frames resulting from plot.edit_table(plot = FALSE)
#' @param names character vector for each sample provided in
#' @param text_size numeric changing the appearence of text in the plot
#' @param group character providing the 'fill' aesthetic argument fo each facet
#' @return a gg object
#' @export
facet_plot <- function(...,
names = NULL,
text_size = 20,
percent_size = 10,
group = "Tissue") {
comb_dat <- list(...) %>%
do.call(rbind, .)
if (!is.null(names))
levels(comb_dat$field) <- names
if (group != "none") {
g <- ggplot(comb_dat,
aes(x = reorder(Event, -Freq),
y = Freq,
fill = reorder(group, Freq)))
g <- g + geom_bar(stat = 'identity')
} else {
g <- ggplot(comb_dat,
aes(x = reorder(Event, Freq),
y = Freq))
g <- g + geom_bar(stat = "identity",
fill = I("darkgreen"))
}
g <- g + geom_text(aes(label = Total_prop),
na.rm = TRUE,
position = "stack",
hjust = 0.5,
vjust= -0.2,
size = percent_size)
g <- g + ylab("Number of events")
g <- g + xlab("Type of mismatch")
g <- g + theme(axis.title.x = element_text(size = text_size),
axis.title.y = element_text(size = text_size),
axis.text.x = element_text(angle = 45,
hjust = 1,
size = text_size),
axis.text.y = element_text(size = text_size),
legend.text = element_text(size = text_size),
legend.title = element_text(size = text_size),
legend.key.height = unit(3, "line"),
strip.text.x = element_text(size = text_size))
g <- g + guides(fill = guide_legend(title = "Tissues"))
g <- g + facet_wrap(~ field,
nrow = 1,
scale = "free_x")
return (g)
}
|
da63a2d061b82a6fb0ad51a99e005b1be0cf6a0f
|
d0295bf5fdddd589d8026b600c8132bf846629c3
|
/exercises/final_project/sdsfinal_KC.R
|
98c73d7965ce833ae7eda21dcc848cbb48160972
|
[] |
no_license
|
tsecrystal/SDS-323
|
77d08c928f4564d0743c6e6db868992df24de08a
|
732b44ad351d71cad8a7bac37d266ccaa5593031
|
refs/heads/master
| 2023-05-03T00:49:13.403568
| 2021-05-22T01:53:53
| 2021-05-22T01:53:53
| 254,649,229
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,493
|
r
|
sdsfinal_KC.R
|
packs <- c("tidyverse","tidyr","corrplot","caret","cluster","mosaic","glmnet","gamlr","dplyr","lubridate",
"dendextend","kableExtra","ggcorrplot","mosaic","psych","gridExtra","LICORS","forcats","naniar",
"randomForest","pdp","gmodels","ROCR", "yardstick","funModeling","lime","recipes","rsample",
"ggthemes","rpart","rpart.plot","ggpubr","ggplot2","RColorBrewer")
lapply(packs, library, character.only = TRUE)
#big bank data set
bank <- read.csv("data/bank-additional-full.csv",
header = TRUE, sep =";")
sum(is.na.data.frame(bank))
bank <- bank %>% dplyr::rename("deposit"="y")
bank <- bank[!duplicated(bank), ]
bank$duration <- NULL
bank$default <- NULL
bank$pdays <- NULL
bank$deposit <- factor(bank$deposit)
xtabs(~bank$deposit)
month_recode = c("mar" = "(03)mar",
"apr" = "(04)apr",
"may" = "(05)may",
"jun" = "(06)jun",
"jul" = "(07)jul",
"aug" = "(08)aug",
"sep" = "(09)sep",
"oct" = "(10)oct",
"nov" = "(11)nov",
"dec" = "(12)dec")
bank = bank %>%
mutate(month = recode(month, !!!month_recode))
day_recode = c("mon" = "(01)mon","tue" = "(02)tue","wed" = "(03)wed","thu" = "(04)thu","fri" = "(05)fri")
bank = bank %>%
mutate(day_of_week = recode(day_of_week, !!!day_recode))
fxtable = function(df, var1, var2){
# df: dataframe containing both vars
# var1, var2: columns to cross together.
CrossTable(df[, var1], df[, var2],
prop.r = T,
prop.c = F,
prop.t = F,
prop.chisq = F,
dnn = c(var1, var2))
}
bank2 <- bank %>%
mutate(age = if_else(age > 60, "high", if_else(age > 30, "mid", "low")))
#fxtables
fxtable(bank2, "age","deposit")
fxtable(bank2, "job", "deposit")
fxtable(bank2,"marital","deposit")
fxtable(bank2,"education", "deposit")
fxtable(bank2,"default","deposit")
fxtable(bank2,"housing","deposit")
fxtable(bank2,"loan","deposit")
fxtable(bank2,"contact","deposit")
fxtable(bank2,"month","deposit")
fxtable(bank2,"day_of_week","deposit")
fxtable(bank2,"campaign","deposit")
fxtable(bank2, "previous", "deposit")
fxtable(bank2, "poutcome","deposit")
# DATA ADJUSTMENT -----
#filtered out
bank <- bank %>%
filter(job != "unknown") %>%
filter(marital !="unkown") %>%
filter(education !="illiterate")
bank[bank == "unknown"] <- NA
bank <-bank[complete.cases(bank), ]
# EDA -----
tab1 <- table(bank$deposit)
prop.table(tab1)
ggplot(bank, aes(x=fct_rev(fct_infreq((job))))) +
geom_bar(fill="darkblue")+
coord_flip()+
theme_bw()+
labs(x="Job Title", y="Count")
ggplot(bank, aes(x=fct_rev(fct_infreq(marital)), fill=deposit)) +
geom_bar() +
coord_flip() +
theme_bw() +
labs(x="Marital Status", y="Count")
ggplot(bank, aes(x=euribor3m, fill=deposit)) +
geom_histogram(bins=30)+
facet_wrap(~deposit)
aggregate(bank[, 18], list(bank$deposit), median)
bank %>%
select(emp.var.rate, cons.price.idx, cons.conf.idx, euribor3m, nr.employed) %>%
cor() %>%
corrplot(method = "number",
type = "upper",
tl.cex = 0.8,
tl.srt = 35,
tl.col = "black")
# LOGISTIC -----
bank3 <- bank
bank3$loan <- NULL
bank3$nr.employed <- NULL
bank3$deposit <- (as.numeric(bank3$deposit) -1)
xtabs(~bank3$deposit)
set.seed(343)
ind = createDataPartition(bank3$deposit,
times = 1,
p = 0.75,
list = F)
bank3_train = bank3[ind, ]
bank3_test = bank3[-ind, ]
b_mod0 <- glm(deposit~.,family="binomial", data=bank3)
car::vif(b_mod0)
b_mod <- glm(deposit ~ ., family="binomial", data=bank3_train)
summary(b_mod)
b_modpred = predict(b_mod, bank3_test, type = "response")
bpred <- ifelse(b_modpred>0.5, 1, 0)
table(y=bank3_test$deposit, yhat=bpred)
confusionMatrix(data=factor(bpred), reference = factor(bank3_test$deposit))
# DECISION TREE -----
bank6 = bank
bank6 <- select(bank, -nr.employed)
bank6 = arrange(bank6, deposit)
N = nrow(bank6)
bank6 <- bank6 %>%
mutate(age_distri = cut(age, c(20,40, 60, 80, 100)))
summary(bank6)
train_frac = 0.75
N_train = floor(train_frac*N)
N_test = N - N_train
train_ind = sample.int(N, N_train, replace=FALSE) %>% sort
load_train = bank6[train_ind,]
load_test = bank6[-train_ind,]
fit.tree <- rpart(deposit~., data = bank6, method = 'class')
rpart.plot(fit.tree, extra = 106)
nbig = length(unique(fit.tree$where))
nbig
plotcp(fit.tree)
head(fit.tree$cptable, 100)
prune_1se = function(treefit) {
errtab = treefit$cptable
xerr = errtab[,"xerror"]
jbest = which.min(xerr)
err_thresh = xerr[jbest] + errtab[jbest,"xstd"]
j1se = min(which(xerr <= err_thresh))
cp1se = errtab[j1se,"CP"]
prune(treefit, cp1se)
}
tree_pred <-predict(fit.tree, load_test, type = 'class')
tree_predict <-predict(fit.tree, load_test, type = 'prob')
dtr_table <- table(load_test$deposit, predict)
dtr_table
confusionMatrix(tree_pred, load_test$deposit, positive = 'yes')
# RANDOM FOREST -----
n = nrow(bank)
n_train = floor(0.75*n)
n_test = n - n_train
train_cases = sample.int(n, size=n_train, replace=FALSE)
y_all = bank$deposit
x_all = model.matrix(~age+job+marital+education+housing+loan+contact+month+day_of_week
+campaign+pdays+previous+poutcome+emp.var.rate+cons.price.idx+cons.conf.idx
+euribor3m+nr.employed, data=bank)
y_train = y_all[train_cases]
x_train = x_all[train_cases,]
y_test = y_all[-train_cases]
x_test = x_all[-train_cases,]
bank_train = bank[train_cases,]
bank_test = bank[-train_cases,]
forest1 <- randomForest(deposit ~ ., data=bank_train)
yhat_test = predict(forest1, bank_test)
varImpPlot(forest1)
table(yhat_test,y_test)
confusionMatrix(yhat_test, y_test, positive="yes")
# MODELS FOR ROC -----
bank1<- bank
set.seed(123)
train_test_split <- initial_split(bank1, prop = 0.75, strata = 'deposit')
train_test_split
train_data <- training(train_test_split)
test_data <- testing(train_test_split)
recipe_obj <- recipe(deposit ~ ., data = train_data) %>%
step_zv(all_predictors()) %>%
step_center(all_numeric()) %>%
step_scale(all_numeric()) %>%
prep()
train_data <- bake(recipe_obj, train_data)
test_data <- bake(recipe_obj, test_data)
train_ctr <- trainControl(method = 'cv', number = 3,
classProbs = TRUE,
summaryFunction = twoClassSummary
)
Logistic_model <- train(deposit ~ ., data = train_data,
method = 'glm', family = 'binomial',
trControl = train_ctr,
metric = 'ROC'
)
rf_model <- train(deposit ~ ., data = train_data,
ntree=100,
method = 'rf',
trControl = train_ctr,
tuneLength = 1,
metric = 'ROC'
)
dtree_model = train(deposit ~ .,
data=train_data,
method="rpart",
trControl = train_ctr,
metric='ROC'
)
pred_logistic <- predict(Logistic_model, newdata = test_data, type = 'prob')
pred_rf <- predict(rf_model, newdata = test_data, type = 'prob')
pred_dtr <- predict(dtree_model, newdata=test_data, type='prob')
evaluation_tbl <- tibble(true_class = test_data$deposit,
logistic_dep = pred_logistic$yes,
rf_dep = pred_rf$yes,
dtr_dep=pred_dtr$yes)
options(yardstick.event_first = FALSE)
# PLOTTING ROC -----
# creating data for ploting ROC curve
roc_curve_logistic <- roc_curve(evaluation_tbl, true_class, logistic_dep) %>%
mutate(model = 'Logistic')
roc_curve_rf <- roc_curve(evaluation_tbl, true_class, rf_dep) %>%
mutate(model = 'Random Forest')
roc_curve_dtree <- roc_curve(evaluation_tbl, true_class, dtr_dep) %>%
mutate(model = 'Decision Tree')
logistic_auc <- roc_auc(evaluation_tbl, true_class, logistic_dep)
rf_auc <- roc_auc(evaluation_tbl, true_class, rf_dep)
dtr_auc <- roc_auc(evaluation_tbl, true_class, dtr_dep)
roc_curve_combine_tbl <- Reduce(rbind, list(roc_curve_logistic, roc_curve_rf, roc_curve_dtree))
roc_curve_combine_tbl %>%
ggplot(aes(x = 1- specificity, y = sensitivity, color = model))+
geom_line(size = 1)+
geom_abline(linetype = 'dashed')+
theme_bw()+
scale_color_tableau()+
labs(title = 'ROC curve Comparision',
x = '1 - Specificity',
y = 'Sensitity')
|
ff58bfbe19c1dd38d84ba07ab9744fe9298ad1df
|
db4118bc4c3fa27bce4c2d5039facbb9072479c0
|
/coevo/raw_data.n5/raw_data.n5.R
|
ce655b4a978f0c42ad51846c9942414def954ec0
|
[] |
no_license
|
yaotli/Packaging_Type
|
166d4a4b6b8d20daab88612bc497e02d9e8fc038
|
4dba547aed7105c13f5bf4042c121f2289081ae1
|
refs/heads/master
| 2021-01-23T01:26:21.502910
| 2019-06-17T03:44:33
| 2019-06-17T03:44:33
| 85,908,055
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,368
|
r
|
raw_data.n5.R
|
source( "./function.coevo.R" )
# database download ------------
#
# GISAID -> 35 isolates ( 35 in the file)
# 1. type 'A'; 2. N '5'
# 3. host undo 'Lab derived, Unknown, NA'
# 4. location 'all'
# 5. required seqments 'HA, NA'
# 6. mim length '1000'; 7. only GISAID
# format 'Isolate name Type Collection date Isolate ID'
#
# NCBI -> 589 isolates
# 1. type 'A'; 2. host undo 'Unknown'
# 3. country 'any'; 4. segment 'HA'
# 5. H 'any'; N '6'
# 6. required segments 'HA, NA'
# 7. 'exclude' pH1N1, 'exclude' lab strains
# 'include' lineage defining strains
# 'include' FLU project,
# 'include' vaccine strains, 'exclude' mixed
# format '>{accession}_{strain}_{serotype}_|{country}|_{year}-{month}-{day}'
#
fas_n5_g <- "./raw_data.n5/sources/pN5_G_35_20190115.fasta"
csv_g <- "./raw_data.n5/sources/G_20190115.csv"
fas_n5_n <- "./raw_data.n5/sources/pN5_N_589_20190115.fasta"
fas_curated_h5 <- "./raw_data/processed/pH5_8334.fasta"
# data cleaning ------------
# GISAID
# to remove one isolate which appears twice
#
seq_n5_g <- fastaEx( fas_n5_g )$seq
id_n5_g <- fastaEx( fas_n5_g )$id
infols_n5_g <- idInfo.na( rawid = id_n5_g, datasource = "g", g.csv = csv_g, na_subtype = 5 )
# NCBI
#
seq_n5_n <- fastaEx( fas_n5_n )$seq
id_n5_n <- fastaEx( fas_n5_n )$id
infols_n5_n <- idInfo.na( rawid = id_n5_n, datasource = "n", na_subtype = 6 )
# combine and remove the duplicate isolate ------------
seq_n5 <- c( seq_n5_g, seq_n5_n )
infols_n5 <- lapply( as.list( seq( 1, length( infols_n5_n ) ) ), function(x){ c( infols_n5_g[[x]], infols_n5_n[[x]]) } )
infols_n5[[ length(infols_n5) + 1 ]] = seq_n5
s_infols_n5 <- strainSelect( infols_n5 )
s_infols_n5[[4]] <- seqDate( s_infols_n5[[4]] )
c_infols_n5 <- seqSelect( minlth = 1200, maxamb = 1, s_infols_n5, rmdup = FALSE ) # n = 567
# examine HA-NA information ------------
info_h5 <- taxaInfo( fas_curated_h5 )
h5.i <- grep( "H5", c_infols_n5[[2]] )
m.i <- match( c_infols_n5[[5]][h5.i], info_h5[[5]] )
dismatch = c()
for( i in 1: length( m.i ) )
{
if( !is.na(m.i[i]) )
{
if( c_infols_n5[[3]][h5.i][i] != info_h5[[2]][m.i][i] ){ dismatch = c(dismatch, i) }
if( abs( as.numeric( c_infols_n5[[4]][h5.i][i] ) - info_h5[[4]][m.i][i] ) > 0.1 ){ dismatch = c(dismatch, i) }
}
}
# export ------------
write.fasta( sequences = c_infols_n5[[6]],
names = paste0( c_infols_n5[[1]], "_",
c_infols_n5[[5]], "_|",
c_infols_n5[[3]], "|_",
c_infols_n5[[2]], "_",
c_infols_n5[[4]]
), file.out = paste0( "raw_data.n5/processed/pn5_", length( c_infols_n5[[1]] ) ,".fasta") )
# seq manipulation ------------
trimtool( propblank = 0.9, filedir = "./raw_data.n5/processed/pN5_567_align.fasta" )
# remove outliers ------------
# two outliers found by TempEst
# EPI24750_Duck_Hunnan_70_2004_|China|_H5N5_2004.497
# MH597119_ruddy_turnstone_Delaware_1016391_2003_|USA|_H9N5_2003.384
n567.seqname = fastaEx( "./raw_data.n5/processed/pN5_567_trim2.fasta" )$id
n567.seq = fastaEx( "./raw_data.n5/processed/pN5_567_trim2.fasta" )$seq
rm <- grep( "EPI24750|MH597119", n567.seqname )
write.fasta( n567.seq[-rm], n567.seqname[-rm], file.out = "./raw_data.n5/processed/pN5_565.fasta" )
|
60327298d6ef274c9ddc491bcd17307d4ddfe3dc
|
74b5687987d31caad451f13a1fa5099ba939a63f
|
/load.R
|
2aa258fb461c254d26caf4bc1e01afa7a8b38297
|
[] |
no_license
|
RemkoDuursma/eucfacelaipaper
|
a4f35bfedc15677c84f40ac91a713f0696091abf
|
f83ba8e23fed9c89c2f1be81213ccac046f8a83b
|
refs/heads/master
| 2020-05-20T05:55:51.143622
| 2017-10-19T23:20:49
| 2017-10-19T23:20:49
| 41,735,145
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 718
|
r
|
load.R
|
source("R/utils.R")
Library(dplyr) # summarize, group_by
Library(doBy) # summaryBy
Library(gplots) # plotCI
Library(mgcv) # gam
Library(stringr) # str_trim
Library(Hmisc) # approxExtrap
Library(lubridate)
Library(rmarkdown) # render
Library(broom) # tidy; glance
Library(lme4) # lmer
Library(lmerTest) # anova.merMod
Library(car) # Anova
Library(reporttools) # formatPval
# set path for HIEv
if(!dir.exists("cache"))dir.create("cache")
if(!dir.exists("output"))dir.create("output")
if(!dir.exists("output/figures"))dir.create("output/figures")
# Load all functions
source("R/data_processing.R")
source("R/data_define.R")
source("R/functions-figures.R")
source("R/LAI-functions.R")
|
95f767529878a2604e6e154780d147fdea68cd40
|
cf2abbc30840891c91221a91a370700cdc4c91d5
|
/02_r_programming/week2/corr.R
|
6f14813f069261e56f2c4caaaa7ec92087f98730
|
[] |
no_license
|
ck-unifr/coursera-data-science-specialization
|
00c73f18f1f37a720bffb74d0a33e00c4a4e7c6a
|
61b0d4b37db5f4c2d77e0b0256d86eb5c3d7c461
|
refs/heads/master
| 2021-05-05T13:12:24.783972
| 2018-01-21T11:11:22
| 2018-01-21T11:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,436
|
r
|
corr.R
|
#https://www.coursera.org/learn/r-programming/supplement/amLgW/programming-assignment-1-instructions-air-pollution
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicates the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
## NOTE: Do not round the result!
complete <- complete(directory, 1:332)
# complete <- complete(directory, 1:10)
ids <- complete$id[complete$nobs > threshold]
ids <- as.numeric(ids)
# print(head(complete))
corr_vector <- rep(0, length(ids))
j <- 1
files_list <- list.files(directory, full.names=TRUE) #creates a list of files
#print(files_list)
# dat <- data.frame() #creates an empty data frame
for (i in ids) {
# data <- read.csv(files_list[i])
# data <- data[which(!is.na(data$sulfate) & !is.na(data$nitrate)), ]
# corr_vector[j] <- cor(data$sulfate, data$nitrate)
current_file <- read.csv(files_list[i])
corr_vector[j] <- cor(current_file$sulfate, current_file$nitrate, use="complete.obs")
j <- j + 1
}
return(corr_vector)
}
|
81edeba4b658d1af4b8b03389a0356594c8ed639
|
37bb83711a55e27b1893c514fe18140b3e510504
|
/R/Class_tskrrHomogenousImpute.R
|
e386c5e855f75419079969019b50228ca5180ac6
|
[] |
no_license
|
MichielStock/xnet
|
147712a796d59fb6b35237b138e93cfb0cd507fb
|
76f0cb11c4078efd15a346f31ad4a5188409a9c6
|
refs/heads/master
| 2020-07-19T20:34:03.965482
| 2019-04-10T14:46:59
| 2019-04-10T14:46:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,669
|
r
|
Class_tskrrHomogenousImpute.R
|
#' Class tskrrHomogenousImpute
#'
#' The class \code{tskrrHomogenousImpute} is a subclass of the
#' class \code{\link[xnet:tskrrHomogenous-class]{tskrrHomogenous}} and
#' \code{\link[xnet:tskrrImpute-class]{tskrrImpute}}
#' specifically for homogenous networks with imputed values. It is
#' the result of the function \code{\link{impute_tskrr}}.
#'
#' @slot y the matrix with responses
#' @slot k the eigen decomposition of the kernel matrix for the rows
#' @slot lambda.k the lambda value used for k
#' @slot pred the matrix with the predictions
#' @slot symmetry a character value that can have the possible values
#' \code{"symmetric"}, \code{"skewed"} or \code{"not"}. It indicates
#' whether the \code{y} matrix is symmetric, skewed-symmetric or not
#' symmetric.
#' @slot has.hat a logical value indicating whether the kernel hat matrices
#' are stored in the object.
#' @slot Hk the kernel hat matrix for the rows.
#' @slot labels a list with elements \code{k} and \code{g} (see
#' \code{\link{tskrr-class}}). For homogenous networks, \code{g}
#' is always \code{NA}. If \code{k} is \code{NA}, the labels used
#' are integers indicating the row resp column number.
#' @slot imputeid a vector with integer values indicating which of
#' the values in \code{y} are imputed
#' @slot niter an integer value gving the number of iterations used
#' @slot tol a numeric value with the tolerance used
#'
#' @include Class_tskrrHomogenous.R Class_tskrrImpute.R
#' @rdname tskrrHomogenousImpute-class
#' @aliases tskrrHomogenousImpute
#' @exportClass tskrrHomogenousImpute
setClass("tskrrHomogenousImpute",
contains = c("tskrrImpute", "tskrrHomogenous")
)
|
026e73296ddf5d6f84e338509df4510fcc42ec39
|
22bef0c6a526c05600d60d471c0af715c6c78f8a
|
/code_function/get.msg_format_err.R
|
4cbf8500f2d9532b32a787f8e20466cca964ff55
|
[] |
no_license
|
littlefish0331/LINE_record2datatable
|
8a68901f554998b45e36e461c82fd527fd1f9bc4
|
f8be937c8640ce9d0218d2c46647b650cf63d727
|
refs/heads/master
| 2023-01-08T20:34:54.814168
| 2020-11-11T08:20:09
| 2020-11-11T08:20:09
| 300,217,986
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 228
|
r
|
get.msg_format_err.R
|
get.msg_format_err <- function(x){
t1 <- x %>% pattern.count(., pattern = "\t")
msg_format_err <- NULL
if (sum(t1==1)>0){
idx <- which(t1==1)
msg_format_err <- x[idx]
}
res <- msg_format_err
return(res)
}
|
efd6e027b7da26133d78b6c2d7a51308d70d7166
|
57044c7c31b292a9b28c0b983b3d46804a3d2d72
|
/tests/test-all.R
|
3e29ec27c7dfbeabf00df694464e8218e85b8fdb
|
[
"Apache-2.0"
] |
permissive
|
hrbrmstr/cspy
|
d45c41b82e17e3a14610be0baecf9296a1f4f640
|
b6a81ff24878fd53c7f310e826eb64408657bbbb
|
refs/heads/master
| 2020-04-27T07:59:20.467444
| 2019-03-10T20:24:20
| 2019-03-10T20:24:20
| 174,155,395
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37
|
r
|
test-all.R
|
library(testthat)
test_check("cspy")
|
01a4a98fd0d582a69047ea5fc0c70dcb29a18988
|
dacd3c8c5533768580f681115607c724e2ba034b
|
/R/compute_P.R
|
89c8e3f02c7d92507475316381176990efad687a
|
[] |
no_license
|
ShunsukeMatsuno/NumbersGame
|
1bd94f6d083419e9e8e4fc8710907280fee9f7d6
|
8f6f8dac231ee6759b2bb7104ba48a22b874eb04
|
refs/heads/master
| 2023-08-16T20:32:58.482509
| 2021-09-28T22:37:42
| 2021-09-28T22:37:42
| 268,521,337
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,121
|
r
|
compute_P.R
|
compute_P <- function(theta, S = 1000, parallel = TRUE){
e_vec <- -20:20 # each state (bin)
result <- array(dim = c(S, length(e_vec))) # S x 41 array
# compute m_opt for each e (b)
if(parallel == TRUE){
# parallel computation
## register
library(doParallel)
cl <- parallel::makePSOCKcluster(parallel::detectCores(logical = FALSE))
doParallel::registerDoParallel(cl)
result <- foreach (i = seq_along(e_vec),
.packages = c('dplyr', 'purrr', 'NumbersGame','tidyr'),
.combine = 'cbind') %dopar% {
compute_optimal_manipulation_for_e(e_vec[i], theta, S)$R
}
## kill finished tasks
parallel::stopCluster(cl)
}else{
# sequential computation
for(i in seq_along(e_vec)){
result_temp <- compute_optimal_manipulation_for_e(e_vec[i], theta, S)
result[,i] <- result_temp$R
}
}
# compute P
P <- array(dim = c(41, 41))
for(b in 1:NROW(P)){
for(j in 1:NCOL(P)){
P[j, b] <- sum(result[,b] == j - 21) / S
}
}
return(P)
}
|
3466c8e206b13b82a1756fee0b05102e4bcd7570
|
da80cc590a53b6e3956df26fd836d3e9bc8af698
|
/R/genlogistic.R
|
3dbc67a83909a8787d211de1cd351882f7f3fcbb
|
[] |
no_license
|
cran/L0Learn
|
e1dfd1990f7dbcee3e90a7c0981da7acaff5cbe8
|
3912974251d8192bded47a655e08b9bcd8bac95b
|
refs/heads/master
| 2023-03-17T14:43:44.441721
| 2023-03-07T07:00:18
| 2023-03-07T07:00:18
| 145,894,788
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,005
|
r
|
genlogistic.R
|
#' @importFrom stats rnorm rbinom
#' @importFrom MASS mvrnorm
#' @title Generate Logistic Synthetic Data
#'
#' @description Generates a synthetic dataset as follows: 1) Generate a data matrix,
#' X, drawn from a multivariate Gaussian distribution with mean = 0, sigma = Sigma
#' 2) Generate a vector B with k entries set to 1 and the rest are zeros.
#' 3) Every coordinate yi of the outcome vector y exists in {-1, 1}^n is sampled
#' independently from a Bernoulli distribution with success probability:
#' P(yi = 1|xi) = 1/(1 + exp(-s<xi, B>))
#' Source https://arxiv.org/pdf/2001.06471.pdf Section 5.1 Data Generation
#' @param n Number of samples
#' @param p Number of features
#' @param k Number of non-zeros in true vector of coefficients
#' @param seed The seed used for randomly generating the data
#' @param rho The threshold for setting values to 0. if |X(i, j)| > rho => X(i, j) <- 0
#' @param s Signal-to-noise parameter. As s -> +Inf, the data generated becomes linearly separable.
#' @param sigma Correlation matrix, defaults to I.
#' @param shuffle_B A boolean flag for whether or not to randomly shuffle the Beta vector, B.
#' If FALSE, the first k entries in B are set to 1.
#' @return A list containing:
#' the data matrix X,
#' the response vector y,
#' the coefficients B.
GenSyntheticLogistic <- function(n, p, k, seed, rho=0, s=1, sigma=NULL, shuffle_B=FALSE)
{
if (s < 0){
stop("s must be fall in the interval [0, +Inf)")
}
X = NULL
set.seed(seed)
if (is.null(sigma)){
X = matrix(rnorm(n*p), n, p)
} else {
if ((ncol(sigma) != p) || (nrow(sigma) != p)){
stop("sigma must be a semi positive definite matrix of side length p")
}
X = mvrnorm(n, mu=rep(0, p), Sigma=sigma)
}
X[abs(X) < rho] <- 0.
B = c(rep(1,k),rep(0,p-k))
if (shuffle_B){
B = sample(B)
}
y = rbinom(n, 1, 1/(1 + exp(-s*X%*%B)))
return(list(X=X, B=B, y=y, s=s))
}
|
53f87fcdb7c3537540912419e3f9f616d1306c61
|
18f3e432f4c82d2a6f66789393668d31a1e9a175
|
/man/signifString.Rd
|
f5143f1d51968f61791db053c31b52fd8d068fba
|
[] |
no_license
|
ksl31/pknca
|
39490c48c87a41127582706acc7d007fc8664b2a
|
5f2743cd46b33a67c2da7f78a928447e1fd0d895
|
refs/heads/master
| 2021-04-26T23:28:19.193889
| 2018-02-17T21:58:38
| 2018-02-17T21:58:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 918
|
rd
|
signifString.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/general.functions.R
\name{signifString}
\alias{signifString}
\alias{signifString.data.frame}
\alias{signifString.default}
\title{Round a value to a defined number of significant digits printing out trailing
zeros, if applicable.}
\usage{
signifString(x, digits = 6)
\method{signifString}{data.frame}(x, digits = 6)
\method{signifString}{default}(x, digits = 6)
}
\arguments{
\item{x}{The number to round}
\item{digits}{integer indicating the number of significant digits}
}
\value{
A string with the value
}
\description{
Round a value to a defined number of significant digits printing out trailing
zeros, if applicable.
}
\details{
Values that are not standard numbers like \code{Inf}, \code{NA}, and
\code{NaN} are returned as \code{"Inf"}, \code{"NA"}, and \code{NaN}.
}
\seealso{
\code{\link{signif}}, \code{\link{roundString}}
}
|
5825d78837ab4112dd910b3cd7dec79e664e2813
|
b9f391a847085413f6059ad88392be2b9b874ce2
|
/TCSHackathon.R
|
491d547c2c0848543b4a73c768675519a1b9c103
|
[] |
no_license
|
SupriyaShinde11/DISQHACKATHON
|
3890c8da61c411d9bfb2e09a3a92a24ffe87fbb7
|
1c39b7e12c15a3e948cd293c40216c54b15b84c6
|
refs/heads/master
| 2020-06-20T06:36:12.154387
| 2020-05-06T06:56:02
| 2020-05-06T06:56:02
| 74,876,180
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,847
|
r
|
TCSHackathon.R
|
setwd("D:/RdataWork")
getwd()
data<- read.csv(file="matches.csv",head=TRUE,sep=",")
data
data1<- read.csv(file="Deliveries.csv",head=TRUE,sep=",")
data1
install.packages("ggplot2")
install.packages("tm")
install.packages("sentiment.tar.gz", repos=NULL, type="source")
library("sentiment")
library("Rstem")
library("NLP")
library("slam")
library("tm")
library("Rstem", lib.loc="~/R/win-library/3.2")
library("ggplot2")
library("proto")
library("plyr")
library("RColorBrewer")
data= gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", data)
data= gsub("@\\w+", "", data)
data = gsub("[[:punct:]]", "",data)
data = gsub("[[:digit:]]", "", data)
data= gsub("http\\w+", "", data)
data = gsub("[ \t]{2,}", "", data)
data = gsub("^\\s+|\\s+$", "", data)
data= sapply(data, catch.error)
postcatch.error = function(x)
{
+ # let us create a missing value for test purpose
+ y = NA
+ # try to catch that error (NA) we just created
+ catch_error = tryCatch(tolower(x), error=function(e) e)
+ # if not an error
+ if (!inherits(catch_error, "error"))
+ y = tolower(x)
+ # check result if error exists, otherwise the function works fine.
+ return(y)
}
data= data[!is.na(data)]
data
(wwt <- hist(women$weight,nclass = 7, plot = FALSE))
plot(wwt, labels = TRUE) # default main & xlab using wwt$xname
plot(wwt, border = "dark blue", col = "light blue",
main = "Histogram of the Performance of the Cricket", xlab = "")
library(wordcloud)
wordcloud(data)
if (!require("cricketr")){
install.packages("cricketr",lib = "c:/test")
}
library(cricketr)
data1 <- getPlayerData(35320,dir="D:/RdataWork",file="deliveries.csv",type="batting",homeOrAway=c(1,2),
result=c(1,2,4))
batsmanAvgRunsGround("./deliveries.csv","SC Ganguly")
library(plotly)
p <- plot_ly(
x = c("season", "city", "winner"),
y = c(20, 14, 23),
name = "Performance",
type = "bar"
)
library(sqldf)
sqldf("select batsman count(*) from data1 where extra_runs is not null group by total_runs")
library(ggplot2)
DF=sqldf("select batsman from data1 where extra_runs=0")
qplot(DF$total_runs,data1=DF, geom='histogram')
sqldf("select match_id,inning,batsman count(*) from data1 where batsman_runs is not null group by extra_runs")
# Create the data for the chart.
H <- c(7,12,28,3,41)
M <- c(2008,2009,2010,2011,2012,2013,2014,2015)
# Give the chart file a name.
png(file = "barchart_year_revenue.png")
# Plot the bar chart.
barplot(H,names.arg = M,xlab = "year",ylab = "Revenue",col = "blue",
main = "Revenue chart",border = "red")
# Save the file.
dev.off()
|
655eddf4c107cbedd471fd351dd90474ef87e3fc
|
c9555ae7694cf75e459a4b7026c1a003033dd83b
|
/website/agVizTool/ui.R
|
d69d6d196998fd4119ad3aefa389f0a5c5c4f071
|
[
"MIT"
] |
permissive
|
YutingYao/Ag
|
4285fa9d1c942448ed072200e798dc5f51fd9f29
|
fe73916f6e549ff6cc20bfed96a583e3919ac115
|
refs/heads/master
| 2023-06-17T21:58:14.849291
| 2021-07-20T17:28:30
| 2021-07-20T17:28:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,494
|
r
|
ui.R
|
library(leaflet)
library(shinyBS)
library(shiny)
library(plotly)
library(shinydashboard)
# Map menu
overlays <-c(
"Satellite"="Satellite",
"Topographic"="Topographic",
"Basic"="Basic"
)
boundaries <- c(
"State"="State",
"County"="County",
"District"="District"
)
indicators <- c(
"Net Primary Productivity" = "npp",
"Inter-annual Forage Variability" = "nppsd",
"Vegetation Type Trajectory" = "mc2",
"Heat Stress Index" = "hsi",
"Vulnerability Index" = "vulstk"
)
decades <- c(
"2010-2020" = 2,
"2020-2030" = 3,
"2030-2040" = 4,
"2040-2050" = 5,
"2050-2060" = 6,
"2060-2070" = 7,
"2070-2080" = 8,
"2080-2090" = 9,
"2090-2100" = 10
)
climateModels <- c(
"A1B" = "a1b",
"A2" = "a2",
"B2" = "b2"
)
heatStress <- "Heat Stress - Negative physiological effects on cattle due to temperature"
netPrimaryProductivity <- " Net Primary Productivity - Carbon uptake after subtracting Plant Respiration from Gross Primary Primary Productivity"
forageVariability <- "Inter-annual Forage Variability - Standard deviation in annual average forage quantity"
VegetationType <- "Vegeation Type Trajectory - Ratio of edible to inedible vegetation"
vulnerabilityIndex <- "Vulnerability Index - Aggregate of four indicators"
shinyUI(
navbarPage(title = div( "",
img(src='csanr_logo.png', style='width:100px;height:35px;')
#img(src='WSU-DAS-log.png', style='width:100px;height:35px;'),
#img(src='NW-Climate-Hub-Logo.jpg', style='width:100px;height:35px;'),
#img(src='usfslogo.png', style='width:32px;height:35px;')
), id="nav", windowTitle = "Rangelands",
tabPanel(tags$b("Home"),
navlistPanel(
tabPanel(tags$b("About"), tags$div(style="width:950px", includeHTML("home-page/about.html"))),
tabPanel(tags$b("People"), tags$div(style="width:950px", includeHTML("home-page/people.html"))),
tabPanel(tags$b("Climate Data"), tags$div(style="width:950px", includeHTML("home-page/climate-change-projections.html"))),
tabPanel(tags$b("Rangeland Indicators"), tags$div(style = "width: 950px", includeHTML("home-page/life-cycle.html"))),
tabPanel(tags$b("Contact"), tags$div(style="width:950px", includeHTML("home-page/contact.html"))),
tabPanel(tags$b("Video"), tags$div(style="width:950px", includeHTML("home-page/take-a-tour.html"))),
widths = c(2,10)
)
),
tabPanel("Map",
fluidPage(id = "nav", inverse=FALSE, fluid=FALSE, title="Tool",
div(class="outer",
tags$head(
#include our custom CSS
includeCSS("styles.css"),
includeScript("gomap.js")
),
leafletOutput("map", width="100%", height="100%"),
absolutePanel(id="menuPanel", draggable=FALSE, width=330, height="auto",
left="auto", right=20, bottom="auto", top=60, fixed=TRUE,
inverse=TRUE, h2("Map Key"),
selectInput("tileSelect", "Map Overlay", overlays),
selectInput("boundaries", "Map Boundaries", boundaries),
selectInput("Indicators", "Indicators", indicators),
selectInput("Decades", "Time Period", decades),
selectInput("ClimateModel", "Climate Model", climateModels)
)
)
),
fluidPage(
bsModal("Graphs", trigger=NULL, title = "", size="large",
dashboardPage(
dashboardHeader(title = "Plots"),
dashboardSidebar(
radioButtons("climate", "Scenarios:", climateModels),
radioButtons("indicator", "Indicators:", indicators)),
dashboardBody(plotOutput("Plot"), p(heatStress),p(netPrimaryProductivity), p(forageVariability), p(VegetationType), p(vulnerabilityIndex))
)
)
)
)
)
)
|
d325be6f01adbcc9584914c8f511be7bd34e2058
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.networking/man/apigateway_create_model.Rd
|
76500044664c68473fb34a90a51996748ad3258f
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 938
|
rd
|
apigateway_create_model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigateway_operations.R
\name{apigateway_create_model}
\alias{apigateway_create_model}
\title{Adds a new Model resource to an existing RestApi resource}
\usage{
apigateway_create_model(
restApiId,
name,
description = NULL,
schema = NULL,
contentType
)
}
\arguments{
\item{restApiId}{[required] The RestApi identifier under which the Model will be created.}
\item{name}{[required] The name of the model. Must be alphanumeric.}
\item{description}{The description of the model.}
\item{schema}{The schema for the model. For \code{application/json} models, this should be
JSON schema draft 4 model.}
\item{contentType}{[required] The content-type for the model.}
}
\description{
Adds a new Model resource to an existing RestApi resource.
See \url{https://www.paws-r-sdk.com/docs/apigateway_create_model/} for full documentation.
}
\keyword{internal}
|
66dd41330f221f5d48a191819e5aedb4bdc36250
|
daeb0283ea20e5dd187d6130d39b2b4c52fe6af4
|
/R/headline-index.R
|
441a50cd91acc838eef306eb3ba6ca8b65c3b04e
|
[] |
no_license
|
davidallen02/ism-manufacturing
|
1fbabdf77b009c0b75aa7f49f03ecaf5145c8431
|
4937b20231297821198206e87c6baed03112fde4
|
refs/heads/master
| 2020-12-04T07:02:13.575445
| 2020-10-19T03:49:51
| 2020-10-19T03:49:51
| 231,667,893
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 323
|
r
|
headline-index.R
|
library(magrittr)
dat <- pamngr::get_data("napmpmi") %>%
tidyr::pivot_longer(cols = -dates, names_to = "variable") %>%
dplyr::slice_max(dates, n = 60)
p <- dat %>%
pamngr::lineplot() %>%
pamngr::pam_plot(
plot_title = "ISM Manufacturing PMI",
show_legend = FALSE
)
p %>% pamngr::all_output("headline")
|
b6948b35ca0931c836c04263f6a53a512c2683e5
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610051353-test.R
|
0d6f80b2df80c9a9a7bb783316079e2a6cff6026
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 948
|
r
|
1610051353-test.R
|
testlist <- list(rates = c(5.74790139408988e+199, -7.40507430175477e-171, NaN, -2.11940804072454e-289, 2.00877667922351e-139, -9.5885701929841e-292, -9.77719780333789e-292, -9.77719780333789e-292, -9.77719780333789e-292, -9.77719780333789e-292, -9.77719780333789e-292, -9.77719780333789e-292, 4.95224540605997e-303, 1.21044018122494e-305, -2.05226841231125e-289, NaN, NaN, -5.48612677708849e+303, NaN, NaN, 9.70418708486102e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, 9.70418706716128e-101, -3.72626152437281e+304, NaN, 4.77830972673634e-299, 2.31058199965605e-138, -2.1196558858712e-289, -1.83593039382815e-307, 9.70418706716715e-101, 2.00872040108461e-139, 7.10464042017561e-304, 2.41252237132898e+306, -1.26823257962768e-30, 4.274105578689e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), thresholds = 2.78132115530642e-309, x = NaN)
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
a9e4dfcc3fee3ec57a0fb798a94d4928a8e4f359
|
8ccc8ab383a17439249146ae58b1d6b9808b38d4
|
/server/server_description.R
|
5494932ec005d77fc45144c689b3f128d728ad43
|
[] |
no_license
|
GillesDeLille/mlExplore
|
f475178a8e80d79b0adcc5e594241d0955c06254
|
8743a4f5d7f038c0101d50162daaeb964f671c1b
|
refs/heads/main
| 2023-02-21T03:16:21.135312
| 2020-08-27T18:17:57
| 2020-08-27T18:17:57
| 263,023,106
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 786
|
r
|
server_description.R
|
# ---------------------------------------------------------------------------------------------------------------------------------------------------
output$uiModeles <- renderUI({
impl=fread('implementations.csv')
liste_modeles=impl$modele %>% unique()
selectInput('modele','Modèles disponibles', choices = liste_modeles)
})
# ---------------------------------------------------------------------------------------------------------------------------------------------------
output$uiDescription <- renderUI({
modele='RandomForest' ; if(!is.null(input$modele)) modele=input$modele
list(
setShadow(class = 'box'),
column(2,br()), box(width=8, includeMarkdown(paste0('markdown/',modele,'.Rmd'))), column(2, br())
# ,column(6,uiOutput('uiModeleValidite'))
)
})
|
1df929ef7e536e52333d4039858079c8686bd369
|
9913e092ad2b819ffbb1ecf5a9d2428f7e9c3ec6
|
/Lesson_1/Factors/initLesson.R
|
1e2a2b12102dc7ae8192caee6476bd93c30432c9
|
[
"CC0-1.0",
"X11",
"LicenseRef-scancode-public-domain",
"CC-BY-4.0"
] |
permissive
|
cimentadaj/Rseminars
|
4942660f132987be42d2df75927a975591f86217
|
b4fd63306f5cc16ab700b5c05ffa963bf9c8ee8f
|
refs/heads/master
| 2020-04-16T02:30:16.704785
| 2018-06-29T07:20:41
| 2018-06-29T07:20:41
| 53,986,913
| 3
| 4
| null | 2016-05-05T07:02:50
| 2016-03-15T23:53:25
|
R
|
UTF-8
|
R
| false
| false
| 273
|
r
|
initLesson.R
|
# Code placed in this file fill be executed every time the
# lesson is started. Any variables created here will show up in
# the user's working directory and thus be accessible to them
# throughout the lesson.
gender <- sample(c(1, 0), 100, replace = T)
|
47abb40a5afc0ad23e3623cd302051f4acbae52e
|
da785507ed522cb8d1864f18757248739caacdf5
|
/week3/loan.R
|
12b13ed297931c6ff79180985a6fcba76b0e887c
|
[] |
no_license
|
hkreeves/AnalyticsEdgeR
|
acba4ec1cbd1b04c61beea0a539482bcd15b4516
|
24fe1c798ceacef0a49888b2ead29e5be0e8460c
|
refs/heads/master
| 2016-09-10T04:40:51.435044
| 2014-04-23T21:00:07
| 2014-04-23T21:00:07
| 17,723,750
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,605
|
r
|
loan.R
|
##
## Week3 HW: Loan
##
loan <- read.csv("loans.csv")
str(loan)
summary(loan)
prop.table(table(loan$not.fully.paid))
# how many complete cases
complete <- complete.cases(loan)
table(complete)
table(loan$not.fully.paid[complete])
table(loan$not.fully.paid[!complete])
# multiple imputation
library(mice)
set.seed(144)
loan[,1:(ncol(loan)-1)] <- complete(mice(loan[,1:(ncol(loan)-1)]))
#vars.for.imputation = setdiff(names(loan), "not.fully.paid")
#imp = complete(mice(loan[vars.for.imputation]))
#loan[vars.for.imputation] = imp
loan <- read.csv("loans_imputed.csv")
# split data
library(caTools)
set.seed(144)
split <- sample.split(loan$not.fully.paid, SplitRatio=0.7)
train <- subset(loan, split)
test <- subset(loan, !split)
# logistic regression model using all independent variables
log1 <- glm(not.fully.paid ~., data=train, family="binomial")
summary(log1) # int.rate is not significant, counter-intuitively
cor(train[-2]) # indeed it is high correlated with fico (0.7)
# predict on test set
test$predict.risk <- predict(log1, newdata=test, type="response")
table(test$not.fully.paid, test$predict.risk > 0.5)
# accuracy
mean(test$not.fully == is.numeric(test$predict.risk > 0.5))
# baseline
mean(test$not.fully == 0)
# AUC using ROCR
library(ROCR)
roc1 <- prediction(test$predict.risk, test$not.fully.paid)
as.numeric(performance(roc1, "auc")@y.values) # 0.672
# a smarter baseline model: bivariate model with int.rate
baseline <- glm(not.fully.paid ~ int.rate, data=train, family="binomial")
summary(baseline)
pred.base <- predict(baseline, newdata=test, type="response")
sort(pred.base, dec=T)[1:5] # highest prediction is 0.4266240
# a 0.5 threshold would result in 0 not.fully.paid prediction
# AUC of baseline
roc.base <- prediction(pred.base, test$not)
as.numeric(performance(roc.base, "auc")@y.values) # 0.624
# explore the profit expectation
test$profit <- exp(test$int.rate*3)-1
test$profit[test$not.fully.paid==1] <- -1
maximun profit on $10 investment
10*max(test$profit)
summary(test$profit) # mean is 0.2094
# investment strategy of picking 100 high interest loans with lowest default prob.
summary(test$int.rate)
highinterest <- subset(test, int.rate > 0.15)
str(highinterest)
mean(highinterest$profit) # 0.2251015
t.high <- table(highinterest$not)
prop.table(t.high)
# select top 100 with lowest default prob. on prediction
#cutoff <- sort(highinterest$predict.risk, dec=F)[100]
#picks2 <- subset(highinterest, predict.risk <= cutoff)
picks <- highinterest[order(highinterest$predict.risk)[1:100],]
mean(picks$profit)
prop.table(table(picks$not))
summary(picks$profit)
|
32902ba4a1656e4b436d20d708a7628396a9d9ca
|
f401bed3c72cb7e5ffbf4a465b0a0cd1fe38b301
|
/fig1/reddit/plot.comments.and.uniqueness.r
|
74f73fe64af5ea89660443c17baf62f986d7211e
|
[] |
no_license
|
SinaEghbal/online-diversity
|
994eaf00a5c7a3312d3d2ce3d0b5cd5c5ccbd041
|
fee8ec2faffb534a67cec4387fce3e70e94d3dda
|
refs/heads/master
| 2021-03-30T00:26:26.221316
| 2020-11-27T05:40:53
| 2020-11-27T05:40:53
| 85,516,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,397
|
r
|
plot.comments.and.uniqueness.r
|
#!/usr/bin/Rscript
rm (list = ls ())
load ('data/reddit-volume.dat')
pdf ('comments.pdf')
max_y <- max (as.numeric (reddit['total.comments',]))
plot (reddit['total.comments',], type = 'l', xaxt = 'n', xlab = rep ('', ncol (reddit)), ylab = 'comments', col = 'blue',
ylim = c (0, max_y + 5))
par (new = TRUE)
plot (reddit ['comments.with.links', ], type = 'l', xaxt = 'n', yaxt = 'n', xlab = rep ('', ncol (reddit)), ylab = '', col = 'red', ylim = c (0, max_y + 5))
axis (1, at = seq (1,ncol (reddit), by = 5), labels = colnames (reddit)[seq (1,ncol (reddit), by = 5)], las = 2, xlab = '', cex.axis = 0.7)
legend("topleft", legend = c ('total comments', 'comments with links'), lty= c (1, 1),
col = c ('blue', 'red'))
mtext ('# of Comments/Time - reddit')
dev.off ()
pdf ('uniqueness.pdf')
par (new = FALSE)
max_y <- as.numeric (max (as.numeric (reddit ['uniqueness.of.links', ], na.rm = TRUE)))
# min_y <- as.numeric (min (as.numeric (reddit ['uniqueness.of.links', ], na.rm = TRUE)))
plot (reddit ['uniqueness.of.links', ], type = 'l', ylab = 'Links uniqueness (%)', xlab = '', xaxt = 'n', col = 'blue', ylim = c (0, max_y + 5))
par (new = TRUE)
# plot (reddit ['posts.with.links',], type = 'l', ylim = c (0, max_y + 5), col = 'red', ylab = '', yaxt = 'n', xaxt = 'n', xlab = '')
axis (1, at = seq (1,ncol (reddit), by = 5), labels = colnames (reddit)[seq (1,ncol (reddit), by = 5)], las = 2, xlab = '', cex.axis = 0.7)
legend("topleft", legend = c ('Uniquness (%)'), lty= c (1),
col = c ('blue'))
mtext ('Link uniqueness/Time - reddit')
par (new = FALSE)
dev.off ()
x_reddit <- seq (1, length (reddit ['posts.with.links', ]))# which (!is.na (links ['reddit', ]))# which (!is.na (links ['reddit',]))
fit_linked_posts_reddit <- lm (unlist (reddit ['posts.with.links', ], use.names = FALSE)~poly(x_reddit,2,raw=TRUE))
pdf ('reddit-posts-w-links.pdf')
columns <- colnames (reddit)
plot (reddit ['posts.with.links',], type = 'l', xaxt = 'n', col = 'red', ylab = "% of posts w links", xlab = '')
points (x_reddit, predict (fit_linked_posts_reddit, data.frame (x = x_reddit)), col = 'red', pch = '.')
axis (1, at = seq (1, length (columns), 5), labels = columns [seq (1, length (columns), 5)], las = 2, cex = 0.6)
legend("topleft", legend = c ('# of posts with links', 'prediction model'), lty= c (1, 3),
col = c ('red', 'red'))
mtext ('# of posts with links - reddit')
dev.off ()
|
40614c2aab612a3690fdfa7ffb415f5c09b793a8
|
d78076c589a0de633474ee3309fe049c8a805e07
|
/src/assignTax.R
|
995dac756f088f44c1b5765356bf2797d10a6466
|
[] |
no_license
|
NielInfante/Hawkins_18S
|
35ab2bb62f3f49c624ef454095390a358c0daea8
|
2f0976beb0fa9b6b2f60403e1b930eb19365bbf7
|
refs/heads/master
| 2020-04-09T00:51:39.870863
| 2019-05-09T00:33:36
| 2019-05-09T00:33:36
| 159,884,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,675
|
r
|
assignTax.R
|
# script to classify 28S amplicon data
#library(DECIPHER)
library(dada2)
setwd('~/depot/projects/Hawkins/Metagenomics_Brzostek/MyGo/18S_fromGit/')
taxa <- assignTaxonomy(seqtab, 'Data/fromJen/taxa.fa',multithread = T)
# Try using https://zenodo.org/record/835855#.XLSFbFNKi5M
seqtab <- readRDS('Data/seqtab.rds')
taxa <- assignTaxonomy(seqtab, 'Data/RDP_LSU/fw/fungiLSU_train_012014.fa',multithread = T)
path<-"Data/RDP_LSU/RDPClassifier_fungiLSU_trainsetNo11_rawtrainingdata"
dada2:::makeTaxonomyFasta_RDP(file.path(path, "fungiLSU_taxid_012014.fa"),
file.path(path, "fungiLSU_taxid_012014.txt"),
"Data.RDP_LSU/RDP_LSU_fixed_train_set_v2.fa",compress=FALSE)
dada2:::makeSpeciesFasta_RDP("/media/lauren/96BA-19E6/RDPClassifierLSU/current_Fungi_unaligned.fa", "/media/lauren/96BA-19E6/Upload/rdp_species_assignment_LSU_v2.fa", compress=FALSE)
taxa <- assignTaxonomy(seqtab, "Data/RDP_LSU/current_Fungi_unaligned.fa", multithread=TRUE)
# From Dada2 pipeline
seqtab <- readRDS('Data/seqtab.rds')
dna <- DNAStringSet(getSequences(seqtab)) # Create a DNAStringSet from the ASVs
load("Data/SILVA_SSU_r132_March2018.RData")
ids <- IdTaxa(dna, trainingSet, strand="top", processors=NULL, verbose=FALSE) # use all processors
ranks <- c("domain", "phylum", "class", "order", "family", "genus", "species") # ranks of interest
# Convert the output object of class "Taxa" to a matrix analogous to the output from assignTaxonomy
taxid <- t(sapply(ids, function(x) {
m <- match(ranks, x$rank)
taxa <- x$taxon[m]
taxa[startsWith(taxa, "unclassified_")] <- NA
taxa
}))
colnames(taxid) <- ranks; rownames(taxid) <- getSequences(seqtab.nochim)
|
dfe279e30b49dcbc9f514b7ef467034767b3bbdb
|
4596714df6b860874bd4acd6db97c2623de58739
|
/man/VIMGUI.Rd
|
7d606ad94d77cccc14fda3eebf09feed871592ae
|
[] |
no_license
|
alexkowa/VIMGUI
|
e44f3524b59cfa2c4074fe5f7a661fdfb42a54d2
|
bf04b307f219a9672755b1a6d9d7c327ee0ac807
|
refs/heads/master
| 2021-01-18T23:09:25.240596
| 2016-10-18T08:42:54
| 2016-10-18T08:42:54
| 12,780,463
| 5
| 1
| null | 2016-07-13T06:39:33
| 2013-09-12T09:59:50
|
Tcl
|
UTF-8
|
R
| false
| true
| 884
|
rd
|
VIMGUI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VIMGUI.R
\name{VIMGUI}
\alias{VIMGUI}
\title{GUI for Visualization and Imputation of Missing Values}
\usage{
VIMGUI(startupObject = NULL)
}
\arguments{
\item{startupObject}{Object loaded at the start of the GUI}
}
\description{
Graphical user interface for visualization and imputation of missing values.
}
\details{
Details about handling survey objects follow soon.
}
\author{
Daniel Schopfhauser
}
\references{
M. Templ, A. Alfons, P. Filzmoser (2012) Exploring incomplete
data using visualization tools. \emph{Journal of Advances in Data Analysis
and Classification}, Online first. DOI: 10.1007/s11634-011-0102-y.
A. Kowarik, M. Templ (2016) Imputation with
R package VIM. \emph{Journal of
Statistical Software}, 74(7), 1-16
}
\keyword{hplot}
\keyword{multivariate}
|
5573e15a14f9cc235c5adfe5a3b7ee84800b77af
|
e6d1995424ead9ea3313e3a9453ace4609085657
|
/public/R/analisis_kmeans1.R
|
4bdb6dbc1bfde687dd4238527970e846d6b99135
|
[
"MIT"
] |
permissive
|
ReynaldoBP/fci-prueba
|
72cd1c4f48a04168ff667956165526de64aea4de
|
39014df3bc3e2aa193be435931dc44f301d3d9e5
|
refs/heads/master
| 2020-03-27T18:23:44.496984
| 2018-08-31T06:22:39
| 2018-08-31T06:22:39
| 146,919,655
| 0
| 0
| null | 2018-08-31T16:54:10
| 2018-08-31T16:54:10
| null |
UTF-8
|
R
| false
| false
| 999
|
r
|
analisis_kmeans1.R
|
library(DBI)
library(RPostgres)
args <- commandArgs(TRUE)
#parametros
usuario <- args[1]
cluster <- args[2]
#conexion a la base
conn=dbConnect(RPostgres :: Postgres (),host="52.38.27.79",port="5432",dbname="datos_gye",user="postgres",password="admin1234")
#Concatenar usuario
query<-"SELECT latitud,longitud FROM public.trayectoria_gye_hist where usuario like'%"
query_two<-paste(query,usuario)
query_tree<-"%'"
queryfor<-paste(query_two,query_tree)
#extraer los datos
datos_query=dbGetQuery(conn,queryfor)
#agrupar los datos
datos<-rbind(datos_query)
#generar analisis kmeans
kmeans.res<-kmeans(datos,center=cluster)
#guardar imagen
#jpeg("C:/Users/jcheverria/Desktop/Jorge Cheverria/fci/prueba/public/img/images/analisis2.jpeg", width = 800, height = 600)
jpeg("/home/kbaque/Archivos Kev/UG/Tesis/fci/public/img/images/analisis2.jpeg", width = 800, height = 600)
#realiza el grafico
plot(datos,col=kmeans.res$cluster)
#pinta el cluster
points(kmeans.res$centers,cex=2,col=11,pch=19)
dev.off()
|
41d59b048fdee794517aab5d7c2a10a417daa5b9
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/PCNF-TRACK-2018/E1/Database/cycle_sched_6_7_1.sat/cycle_sched_6_7_1.sat.R
|
68259127ff3908940d7549ac841f207747d6f2d5
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 73
|
r
|
cycle_sched_6_7_1.sat.R
|
c82acbc0ee119805950ca5c55240b862 cycle_sched_6_7_1.sat.qdimacs 6733 20351
|
a69d3ca7977f0be3ae2a821905e122c382c1bd6d
|
0982fb077cd59436811077b505d4377f7c3a3311
|
/man/skcm.melgene.Rd
|
912de96c59ca0552b1b30b3718081de29e0771b5
|
[] |
no_license
|
teazrq/orthoDr
|
c87a851477b84693b81905423cc7db32659dd795
|
b009267a5b0124008711f556f1642f9fb880a5f2
|
refs/heads/master
| 2023-07-06T23:52:36.724019
| 2022-07-19T21:14:17
| 2022-07-19T21:14:17
| 106,500,687
| 7
| 8
| null | 2023-06-22T04:16:27
| 2017-10-11T03:24:50
|
C++
|
UTF-8
|
R
| false
| true
| 1,397
|
rd
|
skcm.melgene.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/skcm.r
\docType{data}
\name{skcm.melgene}
\alias{skcm.melgene}
\title{Genes associated with Melanoma given by the MelGene Database}
\format{
Each row contains one subject, subject ID is indicated by row name.
Gene names in the columns. The columns are scaled.
}
\usage{
skcm.melgene
}
\description{
The expression of top 20 genes of cutaneous melanoma literature based on the
MelGene Database.
}
\references{
Chatzinasiou, Foteini, Christina M. Lill, Katerina Kypreou, Irene Stefanaki, Vasiliki Nicolaou, George Spyrou, Evangelos Evangelou et al. "Comprehensive field synopsis and systematic meta-analyses of genetic association studies in cutaneous melanoma." Journal of the National Cancer Institute 103, no. 16 (2011): 1227-1235.
Emmanouil I. Athanasiadis, Kyriaki Antonopoulou, Foteini Chatzinasiou, Christina M. Lill, Marilena M. Bourdakou, Argiris Sakellariou, Katerina Kypreou, Irene Stefanaki, Evangelos Evangelou, John P.A. Ioannidis, Lars Bertram, Alexander J. Stratigos, George M. Spyrou, A Web-based database of genetic association studies in cutaneous melanoma enhanced with network-driven data exploration tools, Database, Volume 2014, 2014, bau101, https://doi.org/10.1093/database/bau101
\url{https://www.cancer.gov/about-nci/organization/ccg/research/structural-genomics/tcga}
}
\keyword{skcm.melgene}
|
c78096ec9a33172716b66a2c4c907a195cb26d59
|
42f0456d16992dfce9f767de1ab1fccd91f8abbd
|
/topics.R
|
fe4bc97f3785ee33ff00e5001b0bd0f1f9261ba5
|
[] |
no_license
|
matsuim/packages
|
674f22340cc4ed68c43c61c7b46c853c278c5c92
|
0efca58a4b2a4b6c416aae57265e7af1c849598e
|
refs/heads/master
| 2021-01-01T03:44:04.821665
| 2016-06-09T20:03:00
| 2016-06-09T20:03:00
| 59,582,143
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,138
|
r
|
topics.R
|
## get descriptions
load("index.Rdata")
desc <- character(100)
library(RCurl)
for(i in 1:100){
url <- paste('https://cran.r-project.org/package=', index$package[i],sep='')
if(url.exists(url)){
cranpage <- readLines(url)
if(FALSE==TRUE %in% grepl("removed from the CRAN repository",cranpage)){
a<-grep('^<p>', cranpage)
b<-grep('</p>$', cranpage)
c <- cranpage[a[1]:b[1]]
c <-sub('<p>','', c)
c <- gsub('</p>', '', c)
desc[i] <- paste(c,sep=" ", collapse=" ")
}
else{
desc[i] <- NA
}
}
else{
desc[i] <- NA
}
message(i)
}
save(desc, file="desc.Rdata")
#transform descriptions
desc <- desc[!is.na(desc)]
library(tm)
desc <- Corpus(VectorSource(desc))
desc <- tm_map(desc, removePunctuation)
desc <- tm_map(desc, content_transformer(tolower))
desc <- tm_map(desc, removeWords, stopwords("english"))
desc <- tm_map(desc, stripWhitespace)
library(SnowballC)
desc <- tm_map(desc, stemDocument)
desc <- tm_map(desc, content_transformer(gsub), pattern = "colour", replacement = "color")
myStopwords <- c("use", "can", "includ", "also", "will", "see", "well", "htpp", "easi", "provid", "allow", "etc")
desc <- tm_map(desc, removeWords, myStopwords)
dtm <- DocumentTermMatrix(desc)
rownames(dtm) <- head(index$package,100)
freq <- colSums(as.matrix(dtm))
order <- order(freq,decreasing=TRUE)
head(freq[order],10)
#LDA
library(topicmodels)
lda <-LDA(dtm,5, method="Gibbs")
ldatopics <- as.matrix(topics(lda))
colnames(ldatopics) <- c("topic")
ldatopics <- mutate(as.data.frame(ldatopics), package=rownames(ldatopics))
ldaterms <- as.matrix(terms(lda,10))
ldaprobabilities <- as.data.frame(lda@gamma)
save(ldatopics,file="ldatopics.R")
save(ldaterms, file="ldaterms.R")
save(ldaprobabilities, file="ldaprobabilities.R")
#CTM
ctm <- CTM(dtm,5)
ctmtopics <- as.data.frame(topics(ctm))
colnames(ctmtopics) <- c("topic")
ctmtopics <- mutate(as.data.frame(ctmtopics), package=rownames(ctmtopics))
filter(ctmtopics, topic==3)
ctmterms <- as.matrix(terms(ctm,10))
ctmprobabilities <- as.data.frame(ctm@gamma)
#ALSO: to inspect element
writeLines(as.character(desc[[1]]))
|
a0592f255e20297d1d61b2363fc368122ad0c8f7
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/CorReg/R/mixmod_adapter.R
|
6def5e4ef784e7c124f37eb4e6b1a261aa224a4b
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 369
|
r
|
mixmod_adapter.R
|
# 'adapting mixmod object to be more efficient in C++
# ' @param list from calcul_mixmod
mixmod_adapter<-function(mixmod){
res=mixmod
if(!is.null(res$details)){
mat=res$details[[1]]
if(length(res$details)>1){
for (i in 2:length(res$details)){
mat=rbind(mat,res$details[[i]])
}
}
res$details=mat
}
return(res)
}
|
ea52e1706138348ebc68f4610566349d861ddeab
|
36bace661ae4a427344d83ce6c5689f6c33c976f
|
/Experiment_2/Past_analyses/F6_BE_MS.R
|
7dfb579eeebbaf6e8bc26ff86d984f0f0160647c
|
[] |
no_license
|
rettopnivek/Gabor-priming-experiments
|
0e204dd6f6d56ed1eefa3191509067231828843a
|
38dde0c4194d00fd37d9245fa44f18f449780544
|
refs/heads/master
| 2020-04-15T09:43:43.716351
| 2019-01-09T01:17:34
| 2019-01-09T01:17:34
| 164,562,847
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,247
|
r
|
F6_BE_MS.R
|
#---------------------------#
# Sequential sampling model #
# using hierarchical BE #
# Kevin Potter #
# 05/29/2016 #
#---------------------------#
###
### Initial set-up
###
# Clear workspace
rm(list = ls())
# Lookup - 01
Pilot = T
source('F3_Setup.R')
# Load in useful functions
library(rstan)
library(loo)
library(seqmodels)
library(postexamine)
createYes = F
if (createYes) {
# Create Stan script
setwd('Stan_scripts')
fileName = "Wald_race_stan_functions.txt"
wr_f = readChar(fileName, file.info(fileName)$size)
fileName = "WR_multi_subject.txt"
ms = readChar(fileName, file.info(fileName)$size)
model_script = paste( wr_f, ms, sep = "" )
writeChar( model_script, "WR_MS.stan" )
# Clean up workspace
rm( wr_f, ms, model_script )
setwd( orig_dir )
}
###
### Subject data and covariates
###
model_structure_create = function(type,cD,Priors) {
# Purpose:
# A function to create the necessary input to fit a hierarchical
# Bayesian response time model in Stan.
# Arguments:
# type - The desired type of model structure and inputs to
# generate, where...
# (1) Drift model
# - kappa
# Intercept only
# - xi
# Prime type (foil vs. target)
# Prime duration ( Assume D durations )
# - tau
# Intercept only
# (2) Threshold model
# - kappa
# Primed side vs. unprimed side
# Prime duration ( Assume D durations )
# - xi
# Prime type (foil vs. target)
# - tau
# Intercept only
# (3) Over-parameterized model
# - kappa
# Primed side vs. unprimed side
# Prime duration ( Assume D durations )
# - xi
# Prime type (foil vs. target)
# Prime duration ( Assume D durations )
# - tau
# Prime duration ( Assume D durations )
# cD - A data frame with the covariates and dependent variables
# Priors - A matrix with the desired parameter values for the priors
# Returns:
# A list of the inputs
# Number of observations by subject
No = aggregate( rep(1,nrow(cD)),list(cD$Subject), sum )$x
No_max = max( No ) # Largest number of observations
Ns = length( No ) # Number of subjects
if (Pilot) {
allSubj = createIncrement( cD$Subject ) # Ensure subject indexing is incremental
} else allSubj = cD$Subject
subj = unique( allSubj ) # Subject identifiers
# Select type of model to fit
if ( type == 1) {
# Extract number of prime durations
D = length( unique( cD$PrimeDuration ) )
# Coefficients
# kappa -> 1
# xi target -> D x 2
# xi foil -> D x 2
# tau -> 1
# sigma -> 1 (Fixed)
# k = 2 + D*4
# Coefficients
# k (1)
# xi_T_FP [ all D ] xi_T_TP [ all D ] ( 2:(1 + D*2) )
# xi_F_FP [ all D ] xi_F_TP [ all D ] ( (1 + D*2):(1 + D*4) )
# tau (1 + D*4)
# sigma (Fixed to 1)
# Define the number of thresholds, drift rates, and residual latencies
Cf = c( 1, D*4, 1 )
kn = 1
drn = ( max(kn) + 1 ):( max(kn) + D*4 )
rln = max( drn ) + 1 + Cf[3]
# Create index for linear algebra function
Clm = c( kn, drn, rln, kn + rln, drn + rln, rln + rln )
Rws = c( rep( 1, 1 ), # kappa (1)
rep( 2, D*4 ), # xi (1)
rep( 4, 1 ), # tau (1)
rep( 5, 1 ), # kappa (0)
rep( 6, D*4 ), # xi (0)
rep( 8, 1 ) # tau (0)
)
# Create index for parameter selection
parSel = c( 1, 2:(1+D*4), (1+D*4)+1, 1, 2:(1+D*4), (1+D*4)+1 )
index = cbind( Rws, Clm )
# Fixed values
fixed = c(1,1)
index = rbind( index,
c(3,rln-1), # sigma (1)
c(7,(rln-1)+rln) # sigma (0)
)
rm( Clm, Rws )
# Dimensions
index = rbind( index, c(8,length(parSel)) )
# Fastest RTs by subject
min_RT = aggregate( cD$RT, list( allSubj ), min )$x
min_RT = array( min_RT, dim = c(Ns,1) )
X = array( 0, dim = c( Ns, length( parSel ) + length( fixed ), No_max ) )
Y = array( 0, dim = c( Ns, No_max, 2 ) )
# Create a progress bar using a base R function
pb = txtProgressBar( min = 1, max = Ns, style = 3 )
for ( s in 1:Ns ) {
# Extract data
curSubj = cD[ allSubj == subj[s], ]
Y[s, 1:No[s], ] = cbind( curSubj$RT, curSubj$Choice )
# Extract covariates
Dur = curSubj$PrimeDuration
Pri = as.numeric( curSubj$Prime == curSubj$Target )
Tar = curSubj$Target
PriSide = as.numeric( curSubj$Prime )
Int = rep( 1, nrow(curSubj) )
# Create design matrix
X_k = cbind( Int );
cvrt = covCreate( cbind( Dur, Pri ) )
Levels = aggregate(cvrt,list(Dur,Pri),unique)$x
X_x = designCoding( cvrt, Levels = Levels, type = 'Intercept' )
rm( cvrt )
# Desired output
# 8 x N matrix
# Necessary input
# 8 x row(X) -> parameter matrix
# row(X) x N -> Design matrix
# Design matrix
X[ s, , 1:No[s] ] =
t( cbind( X_k, X_x*Tar, X_x*(1-Tar), # kappa/xi (1)
Int, # sigma (1)
Int, # tau (1)
X_k, X_x*(1-Tar), X_x*Tar, # kappa/xi (0)
Int, # sigma (0)
Int # tau (0)
) )
if ( No[s] == No_max ) {
# Create small design matrix for later plotting etc...
cnd = aggregate( t(X[s,,]), list(
Dur, PriSide, Pri, Tar ), unique )
X_small = t( as.matrix( cnd[,-(1:4)] ) )
curCnd = cnd[,1:4]
colnames( curCnd ) = c('Dur','PriSide','Pri','Tar')
}
# Update the progress bar
setTxtProgressBar(pb,s)
}
close(pb)
# Return results
return( list(
Ns = Ns,
No = No,
No_max = No_max,
V = dim(X)[2],
K = length( parSel ),
U = length( fixed ),
C = Cf,
X = X,
fixed = fixed,
index = index,
parSel = parSel,
Y = Y,
min_RT = min_RT,
Priors = Priors,
X_small = X_small,
curCnd = curCnd ) )
}
if ( type == 2) {
# Extract number of prime durations
D = length( unique( cD$PrimeDuration ) )
# Coefficients
# kappa primed -> D
# xi target -> D
# kappa unprimed -> D
# xi foil -> D
# tau -> 1
# sigma -> 1 (Fixed)
# k = 1 + D*4
# Coefficients
# k_P [ all D ] k_UP [ all D ] ( 1:(D*2) )
# xi_T [ all D ] ( (D*2 + 1 ):( D*3 ) )
# xi_F [ all D ] ( ( D*3 + 1 ):( D*4 ) )
# tau (1 + D*4)
# sigma (Fixed to 1)
# Define the number of thresholds, drift rates, and residual latencies
Cf = c( D*2, D*2, 1 )
kn = 1:( D*2 )
drn = ( max(kn) + 1 ):( max(kn) + Cf[2] )
rln = max( drn ) + 1 + Cf[3]
# Create index for linear algebra function
Clm = c( kn, drn, rln, kn + rln, drn + rln, rln + rln )
Rws = c( rep( 1, D*2 ), # kappa (1)
rep( 2, D*2 ), # xi (1)
rep( 4, 1 ), # tau (1)
rep( 5, D*2 ), # kappa (0)
rep( 6, D*2 ), # xi (0)
rep( 8, 1 ) # tau (0)
)
# Create index for parameter selection
parSel = c( 1:(D*4), D*4+1, 1:(D*4), D*4+1 )
index = cbind( Rws, Clm )
# Fixed values
fixed = c(1,1)
index = rbind( index,
c(3,rln-1), # sigma (1)
c(7,(rln-1)+rln) # sigma (0)
)
rm( Clm, Rws )
# Dimensions
index = rbind( index, c(8,length(parSel)) )
# Fastest RTs by subject
min_RT = aggregate( cD$RT, list( allSubj ), min )$x
min_RT = array( min_RT, dim = c(Ns,1) )
X = array( 0, dim = c( Ns, length( parSel ) + length( fixed ), No_max ) )
Y = array( 0, dim = c( Ns, No_max, 2 ) )
# Create a progress bar using a base R function
pb = txtProgressBar( min = 1, max = Ns, style = 3 )
for ( s in 1:Ns ) {
# Extract data
curSubj = cD[ allSubj == subj[s], ]
Y[s, 1:No[s], ] = cbind( curSubj$RT, curSubj$Choice )
# Extract covariates
Dur = curSubj$PrimeDuration
Pri = as.numeric( curSubj$Prime == curSubj$Target )
Tar = curSubj$Target
PriSide = as.numeric( curSubj$Prime )
Int = rep( 1, nrow(curSubj) )
# Create design matrix
Levels = sort( unique( Dur ) )
X_k = designCoding( Dur, Levels = Levels, type = 'Intercept' )
X_x = designCoding( Dur, Levels = Levels, type = 'Intercept' )
# Desired output
# 8 x N matrix
# Necessary input
# 8 x row(X) -> parameter matrix
# row(X) x N -> Design matrix
# Design matrix
X[ s, , 1:No[s] ] =
t( cbind( X_k*PriSide, X_k*(1-PriSide), X_x*Tar, X_x*(1-Tar), # kappa/xi (1)
Int, # sigma (1)
Int, # tau (1)
X_k*(1-PriSide), X_k*PriSide, X_x*(1-Tar), X_x*Tar, # kappa/xi (0)
Int, # sigma (0)
Int # tau (0)
) )
if ( No[s] == No_max ) {
# Create small design matrix for later plotting etc...
cnd = aggregate( t(X[s,,]), list(
Dur, PriSide, Pri, Tar ), unique )
X_small = t( as.matrix( cnd[,-(1:4)] ) )
curCnd = cnd[,1:4]
colnames( curCnd ) = c('Dur','PriSide','Pri','Tar')
}
# Update the progress bar
setTxtProgressBar(pb,s)
}
close(pb)
# Return results
return( list(
Ns = Ns,
No = No,
No_max = No_max,
V = dim(X)[2],
K = length( parSel ),
U = length( fixed ),
C = Cf,
X = X,
fixed = fixed,
index = index,
parSel = parSel,
Y = Y,
min_RT = min_RT,
Priors = Priors,
X_small = X_small,
curCnd = curCnd ) )
}
if ( type == 3) {
# Extract number of prime durations
D = length( unique( cD$PrimeDuration ) )
# Coefficients
# kappa primed -> D
# kappa unprimed -> D
# xi target -> D x 2
# xi foil -> D x 2
# tau -> 1
# sigma -> 1 (Fixed)
# k = 1 + D*2 + D*4
# Coefficients
# k_P [ all D ] k_UP [ all D ] ( 1:(D*2) )
# xi_T_FP [ all D ] xi_T_TP [ all D ] ( (D*2 + 1):(D*4) )
# xi_F_FP [ all D ] xi_F_TP [ all D ] ( (D*4 + 1):(D*6) )
# tau (1 + D*6)
# sigma (Fixed to 1)
# Define the number of thresholds, drift rates, and residual latencies
Cf = c( D*2, D*4, 1 )
kn = 1:( D*2 )
drn = ( max(kn) + 1 ):( max(kn) + Cf[2] )
rln = max( drn ) + 1 + Cf[3]
# Create index for linear algebra function
Clm = c( kn, drn, rln, kn + rln, drn + rln, rln + rln )
Rws = c( rep( 1, D*2 ), # kappa (1)
rep( 2, D*4 ), # xi (1)
rep( 4, 1 ), # tau (1)
rep( 5, D*2 ), # kappa (0)
rep( 6, D*4 ), # xi (0)
rep( 8, 1 ) # tau (0)
)
# Create index for parameter selection
parSel = c( 1:(D*6), 1 + D*6, 1:(D*6), 1 + D*6 )
index = cbind( Rws, Clm )
# Fixed values
fixed = c(1,1)
index = rbind( index,
c(3,rln-1), # sigma (1)
c(7,(rln-1)+rln) # sigma (0)
)
rm( Clm, Rws )
# Dimensions
index = rbind( index, c(8,length(parSel)) )
# Fastest RTs by subject
min_RT = aggregate( cD$RT, list( allSubj ), min )$x
min_RT = array( min_RT, dim = c(Ns,1) )
X = array( 0, dim = c( Ns, length( parSel ) + length( fixed ), No_max ) )
Y = array( 0, dim = c( Ns, No_max, 2 ) )
# Create a progress bar using a base R function
pb = txtProgressBar( min = 1, max = Ns, style = 3 )
for ( s in 1:Ns ) {
# Extract data
curSubj = cD[ allSubj == subj[s], ]
Y[s, 1:No[s], ] = cbind( curSubj$RT, curSubj$Choice )
# Extract covariates
Dur = curSubj$PrimeDuration
Pri = as.numeric( curSubj$Prime == curSubj$Target )
Tar = curSubj$Target
PriSide = as.numeric( curSubj$Prime )
Int = rep( 1, nrow(curSubj) )
# Create design matrix
Levels = sort( unique( Dur ) )
X_k = designCoding( Dur, Levels = Levels, type = 'Intercept' )
cvrt = covCreate( cbind( Dur, Pri ) )
Levels = aggregate(cvrt,list(Dur,Pri),unique)$x
X_x = designCoding( cvrt, Levels = Levels, type = 'Intercept' )
rm( cvrt )
# Desired output
# 8 x N matrix
# Necessary input
# 8 x row(X) -> parameter matrix
# row(X) x N -> Design matrix
# Design matrix
X[ s, , 1:No[s] ] =
t( cbind( X_k*PriSide, X_k*(1-PriSide), # kappa/xi (1)
X_x*Tar, X_x*(1-Tar),
Int, # sigma (1)
Int, # tau (1)
X_k*(1-PriSide), X_k*PriSide, # kappa/xi (0)
X_x*(1-Tar), X_x*Tar,
Int, # sigma (0)
Int # tau (0)
) )
if ( No[s] == No_max ) {
# Create small design matrix for later plotting etc...
cnd = aggregate( t(X[s,,]), list(
Dur, PriSide, Pri, Tar ), unique )
X_small = t( as.matrix( cnd[,-(1:4)] ) )
curCnd = cnd[,1:4]
colnames( curCnd ) = c('Dur','PriSide','Pri','Tar')
}
# Update the progress bar
setTxtProgressBar(pb,s)
}
close(pb)
# Return results
return( list(
Ns = Ns,
No = No,
No_max = No_max,
V = dim(X)[2],
K = length( parSel ),
U = length( fixed ),
C = Cf,
X = X,
fixed = fixed,
index = index,
parSel = parSel,
Y = Y,
min_RT = min_RT,
Priors = Priors,
X_small = X_small,
curCnd = curCnd ) )
}
}
###
### Model estimation in Stan
###
# To run chains in parallel
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
# Define version of model to fit
# type = 1
# type = 2
type = 3
# Define priors for hierarchical parameters
D = length( unique( cD$PrimeDuration ) )
if ( type == 1 ) {
Priors = cbind(
c( c(1), # Kappa
rep( c( 2.725, 2.725 ), each=D ), # Xi
rep( c( 1.5, 1.5 ), each=D ),
7 ), # Theta (Prop. of tau)
c( rep( .5, 1 ), # Kappa
rep( .5, D*2 ), # Xi
rep( .5, D*2 ),
.5 ), # Theta (Prop. of tau)
c( rep( 5, 1 ), # Kappa
rep( 5, D*2 ), # Xi
rep( 5, D*2 ),
3 ), # Theta (Prop. of tau)
c( rep( 8, 1 ), # Kappa
rep( 8, D*2 ), # Xi
rep( 8, D*2 ),
.5 ) # Theta (Prop. of tau)
)
}
if ( type == 2 ) {
Priors = cbind(
c( rep( 1, D*2 ), # Kappa
rep( c( 2.725, 1.5 ), each=D ), # Xi
7 ), # Theta (Prop. of tau)
c( rep( .5, D*2 ), # Kappa
rep( .5, D*2 ), # Xi
.5 ), # Theta (Prop. of tau)
c( rep( 5, D*2 ), # Kappa
rep( 5, D*2 ), # Xi
3 ), # Theta (Prop. of tau)
c( rep( 8, D*2 ), # Kappa
rep( 8, D*2 ), # Xi
.5 ) # Theta (Prop. of tau)
)
}
if ( type == 3 ) {
Priors = cbind(
c( rep( 1, D*2 ), # Kappa
rep( c( 2.725, 2.725 ), each=D ), # Xi
rep( c( 1.5, 1.5 ), each=D ),
7 ), # Theta (Prop. of tau)
c( rep( .5, D*2 ), # Kappa
rep( .5, D*4 ), # Xi
.5 ), # Theta (Prop. of tau)
c( rep( 5, D*2 ), # Kappa
rep( 5, D*4 ), # Xi
3 ), # Theta (Prop. of tau)
c( rep( 8, D*2 ), # Kappa
rep( 8, D*4 ), # Xi
.5 ) # Theta (Prop. of tau)
)
}
# Define folder location and file name to save output
folderName = "C:/Users/Kevin/Documents/Posteriors from Stan/Gabor_priming_2016_v2"
if (Pilot) {
outName = paste("Pilot_Posterior_estimates_",type,".RData",sep="")
} else {
outName = paste("Posterior_estimates_",type,".RData",sep="")
}
modelFit = F
if (modelFit) {
# Extract data and covariates
input = model_structure_create(type,cD,Priors)
burn = 375 # Burn-in
niter = 625 # Number of samples to approximate posterior
chains = 8 # Number of chains to run
setwd('Stan_scripts')
if (Pilot) stan_seed = 30182 else stan_seed = 1384
startTime = Sys.time() # To assess run-time
fit = stan( file = 'WR_MS.stan', data = input,
warmup = burn, iter = burn+niter,
chains = chains,
control = list( adapt_delta = .92 ),
seed = stan_seed )
post = extract(fit)
# Report run time
runTime = Sys.time() - startTime
print( runTime )
rm( startTime )
# If desired, exclude certain chains
# Extract rhat values
rhat = summary(fit)$summary[,"Rhat"]
rhat = rhat[1:(which(names(rhat)=='lp__')-1)]
# Save posterior estimates
setwd( folderName )
save( post, rhat, input, file = outName )
} else {
# Save posterior estimates
setwd( folderName )
load( outName )
}
setwd( orig_dir )
|
39a8e9605d77b1c23735f14304945936c6482aab
|
2fa07606b645d93511efe906e54be8b84811d33b
|
/00_data_prep.R
|
986a869a0b67d51d16f166707841794bedd143a3
|
[] |
no_license
|
kurtzx/Google-Analytics-Customer-Revenue-Prediction
|
4b5234dd1e22559d48a78f8e7f73a04f2bc22b71
|
788635fd92d384ca785b0efeabf473f8fa4d64dd
|
refs/heads/master
| 2020-03-28T19:47:45.220650
| 2018-10-08T17:03:54
| 2018-10-08T17:03:54
| 149,010,356
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 974
|
r
|
00_data_prep.R
|
library(jsonlite)
library(dplyr)
library(purrr)
library(tidyr)
raw_data <- read.csv("Raw Data/train.csv", stringsAsFactors = FALSE)
#device
df_device <- raw_data %>%
select(fullVisitorId, device)
df_device_p <- df_device %>%
mutate(device = map(device, ~ fromJSON(.) %>% as.data.frame())) %>%
unnest(device)
saveRDS(df_device_p, file="df_device_p.rds")
#geoNetwork
df_network <- raw_data %>%
select(fullVisitorId, geoNetwork)
df_network_p <- df_network %>%
mutate(geoNetwork = map(geoNetwork, ~ fromJSON(.) %>% as.data.frame())) %>%
unnest(geoNetwork)
#totals
df_totals <- raw_data %>%
select(fullVisitorId, totals)
df_totals_p <- df_totals %>%
mutate(totals = map(totals, ~ fromJSON(.) %>% as.data.frame())) %>%
unnest(totals)
#trafficSource
df_traffic <- raw_data %>%
select(fullVisitorId, trafficSource)
df_traffic_p <- df_traffic %>%
mutate(trafficSource = map(trafficSource, ~ fromJSON(.) %>% as.data.frame())) %>%
unnest(trafficSource)
|
ec898edd732f5f7cf9cce8c7dcb89841a4a4ae19
|
80d053a041d0e7db3092ec636a02e0d222c661f6
|
/randomforest/RandomForest_clean.r
|
f97e28ad6c37e25fcb0fe938e5fc302277480777
|
[] |
no_license
|
abenc/MachineLearningProject
|
a2793902ed6e526f7a955f30ba03bd503513adf4
|
2960ff83699a8384b111b3b82b426f72d443b82f
|
refs/heads/master
| 2021-01-12T01:16:00.733590
| 2017-01-09T19:29:05
| 2017-01-09T19:29:05
| 78,361,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,568
|
r
|
RandomForest_clean.r
|
library(data.table)
library(caret)
library(pROC)
load("../data_processed/projetDataBase.Rda")
projetTrain = sub.projetTrain.base
projetValid = sub.projetValid.base
set.seed(30)
split = sample(nrow(projetTrain), floor(0.1*nrow(projetTrain)))
sub.train = projetTrain[split,]
sub.test = projetTrain[-split,]
control = trainControl(method="repeatedcv", number=4, repeats=2)
seed = 7
metric = "Accuracy"
mtry = floor(sqrt(ncol(sub.train)))
tunegrid = expand.grid(.mtry=mtry)
rf.model = train(target~.,
data=sub.train,
method="rf",
metric=metric,
tuneGrid=tunegrid,
trControl=control,
importance=TRUE,
localImp=TRUE,
proximity=TRUE)
rf.model
result.predicted.prob <- predict(rf.model, sub.test , type="prob") # Prediction
result.roc <- roc(sub.test$target, result.predicted.prob$OK) # Draw ROC curve.
plot(result.roc, print.thres="best", print.thres.best.method="closest.topleft")
result.coords <- coords(result.roc, "best", best.method="closest.topleft", ret=c("threshold", "accuracy"))
print(result.coords)#to get threshold and accuracy
save(rf.model, file="rf_model.Rdata")
result.predicted.prob.valid <- predict(rf.model, projetValid , type="prob") # Prediction on validation subset
projetValid$Id = as.character(projetValid$Id)
validation.results = cbind(projetValid[,"Id",with=FALSE],result.predicted.prob.valid)
write.csv(validation.results, file = "validation_results.csv")
|
e3d67d472d5ca5675156a9ef8c766d71106de6b5
|
28750d2d90cb45173a956cc8542ebda0d076e6dd
|
/Regressions.R
|
fbe4b4a9621e02ad5d462d207ea62dd75d0a0f81
|
[] |
no_license
|
DrSnowtree/MINCOME
|
994e1060e84aba0d5944b8ead720b4eae8512534
|
aa8f41cae82b995e3290e56a6487dc9c71ca04e9
|
refs/heads/master
| 2021-07-25T20:26:21.767288
| 2020-09-22T13:50:57
| 2020-09-22T13:50:57
| 220,972,475
| 0
| 0
| null | 2019-11-19T13:51:34
| 2019-11-11T12:04:15
| null |
UTF-8
|
R
| false
| false
| 1,089
|
r
|
Regressions.R
|
#Regressions
library("aod")
library("compareGroups")
library("data.table")
library("gtools")
library("haven")
library("dplyr")
library("tidyr")
library("tidyverse")
library("lubridate")
library("data.table")
library("foreign")
library("quantmod")
library("zoo")
library("plm")
library("gplots")
library("stargazer")
library("lfe")
library("Hmisc")
library("readxl")
library("naniar")
library("strex")
library(devtools)
library("fastDummies")
basepaycross_rem$if_increase <- as.factor(basepaycross_rem$if_increase)
basepaycross_rem <- fastDummies::dummy_cols(basepaycross_rem, select_columns = c("plan", "FAMSIZE"))
probitreg <- glm(if_increase ~ plan_2 + plan_1 + plan_3 +
plan_4 + plan_5 +
+ plan_7 + plan_8 +
FAGE + MAGE + DH + SH, family = binomial(link = "probit"),
data = basepaycross_rem)
probitreg <- glm(if_increase ~ control +
FAMS +
FAGE + MAGE + DH + SH, family = binomial(link = "probit"),
data = basepaycross_rem)
summary(probitreg)
|
e3ebd0c3295cf3d31ffc07caa2242c56c27ca99c
|
60dffdc12c12478b469d78cf4cfe5ee148107dd4
|
/man/expandBound.Rd
|
1c7b47c07df355bbfa0824d8ab6942ae42d09dce
|
[] |
no_license
|
cran/evian
|
dab851281636703b01c062b148327ceed16b7c28
|
e65295408da03281978691febd50c92657d972ea
|
refs/heads/master
| 2020-03-27T03:58:07.273598
| 2019-05-23T15:30:03
| 2019-05-23T15:30:03
| 145,902,356
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,244
|
rd
|
expandBound.Rd
|
\name{expandBound}
\alias{expandBound}
\title{A recursive function that expands the grid search for MLE.}
\description{
This is an internal function that finds the proper boundary of the grid.
}
\usage{
expandBound(data,bse,parameters,formula,m,k,family)
}
\arguments{
\item{data}{a data frame inputted from the main function.}
\item{bse}{numeric. The number of beta standard errors to utilize in constraining the beta grid limits. Passed down from argument \code{bse} in the main \code{\link{evian}} function.}
\item{parameters}{a numeric vector of length 3 providing the starting values for the search. This is obtained from the \code{\link{getGridBound}} function. The three numeric values in the vector should represent the beta estimates, s.e., and the correction factor respectively. Details can be found in \code{\link{getGridBound}}.}
\item{formula}{a formula specifying the response and possible covariates to keep in the output dataframe. This is directly obtained from \code{\link{evian}} function.}
\item{k}{numeric vector. The strength of evidence criterion k. Passed down from argument \code{kcutoff} in the main \code{\link{evian}} function.}
\item{m}{numeric. The density of the grid at which to compute the standardized likelihood function. Passed down from argument \code{m} in the main \code{\link{evian}} function.}
\item{family}{a string representing the link function for \code{ProfileLikelihood::ProfileLikelihood.glm}.}
}
\details{
Even though the initial grid bound calculated from \code{getGridBound} works for most of the data, there can be cases where \code{bse} needs to be increased in order to observe all the Likelihood Intervals (LIs) specified from the main function in the range \code{kcutoff} calculated. In this case, our approach is to check whether the current grid range includes the largest LIs. The function will expand the grid range by increasing \code{bse} by 1 if it is not included. This step will be running recursively until the largest LIs are included in the current grid range.
}
\value{
This function returns a numeric vector of length two representing the optimal lower and upper bounds for the grid on which the later functions will search for MLE.
}
\keyword{models}
|
b550858455e049e2c55f74bc70ca921fc33e8ebf
|
16f9d05fa1d0b6aadd313cc2896d7730d0e0d7c6
|
/RGtkGen/man/generate.Rd
|
bb3602e662f2bcd2a9fe10e8bb2d92f0f99e3b16
|
[] |
no_license
|
statTarget/RGtk2
|
76c4b527972777c567fb115587418f9dea61bf29
|
42c2d5bc7c8a462274ddef2bec0eb0f51dde1b53
|
refs/heads/master
| 2023-08-24T05:13:35.291365
| 2021-10-24T18:27:46
| 2021-10-24T19:24:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,671
|
rd
|
generate.Rd
|
\name{genCEnum}
\alias{genCEnum}
\alias{genFieldAccessor}
\alias{genFieldAccessorRCode}
\alias{genFieldAccessorCCode}
\alias{genFieldAccessors}
\title{Generate C code for accessors and enumeration elements}
\description{
These functions generate code S and C code to
acess slots or fields in a Gtk object from S.
}
\usage{
genCEnum(enum, name, defs = NULL, local = T, isEnum = T)
genFieldAccessor(name, type, className, defs)
genFieldAccessorCCode(name, className, croutine, type, defs)
genFieldAccessorRCode(sname, className, croutine, type, defs)
genFieldAccessors(defs)
}
\arguments{
\item{enum}{the definition of the specific enumeration, usually read from
the .defs files via \code{\link{getDefs}}.}
\item{name}{For the other functions, this is the name of the S variable under which the enumeration
constants will be stored.}
\item{defs}{the collection of definitions of classes, enumerations,
etc. read from the .defs files}
\item{local}{unused}
\item{isEnum}{a logical value indicating whether the definition is for
an enumeration or a Gtk flag.}
\item{type}{the name of the Gtk type of the field}
\item{className}{the name of the S class in which the field is
located.
This is used in the generated code to verify that the source object
is of the correct type.}
\item{croutine}{the name of the C routine which is to be called to
fetch the value of the field.}
\item{sname}{the name of the S function/variable which is to hold the field accessor.}
}
\value{
\code{genFieldAccessorRCode} returns a string giving the definition
of an S function for accessing the particular slot/field in the source
object.
\code{genFieldAccessorCCode} returns the associated C code
for accessing the field in the source that is called by the S accessor
function.
\code{genFieldAccessor} returns a list containing the S and C code
for accessing the specified field. These are identified by named elements
\code{rcode} and \item{ccode}.
\code{genFieldAccessors} returns a list with an element for each of
the classes in the collection of definitions.
Each element is itself a list whose elements correspond to the
different fields directly defined within that class (i.e. not
inherited fields). And each element in this sub-list is the
list returned from \code{genFieldAccessor} for that field.
}
\references{\url{http://www.omegahat.net/RGtk/},
\url{http://www.omegahat.net/RGtkBindingGenerator},
\url{http://www.gtk.org}
\url{http://www.pygtk.org}(?)
}
\author{Duncan Temple Lang <duncan@research.bell-labs.com>}
\seealso{
\code{\link{genCode}}
}
\examples{
}
\keyword{programming}
|
ba81b70e8daf3bd30c0304bafeec6f3693063f30
|
5f790f7eda54dce691ebdcf8ddd0d19387697537
|
/man/f.power.Rd
|
764cf940c44209107472c1ab9293657657318b09
|
[] |
no_license
|
cran/haplo.stats
|
7a86d3ee085a15cc82064b107d20a67299c14a4a
|
c62af35927ec027d06787467d53dffdb31081f14
|
refs/heads/master
| 2023-01-27T13:37:05.945272
| 2023-01-20T21:20:10
| 2023-01-20T21:20:10
| 17,696,595
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,022
|
rd
|
f.power.Rd
|
\name{f.power}
\alias{f.power}
\alias{f.power.dif}
\alias{f.sample.size}
\title{
Power and sample size for the F distribution
}
\description{
Power and sample size for the F distribution given
non-centrality, degrees of freedom, alpha, N (for f.power), and
power (for f.sample.size)
}
\usage{
f.power(n, nc, df1, alpha)
f.power.dif(n, nc, df1, alpha, power)
f.sample.size(nc, df1, alpha, power, lower=20, upper=10000)
}
\arguments{
\item{n}{
sample size
}
\item{nc}{
non-centrality parameter
}
\item{df1}{
degrees of freedom for numerator of f distribution
}
\item{alpha}{
type-I error
}
\item{power}{
desired power (for sample size)
}
\item{lower}{
lower limit for search space for sample size solution
}
\item{upper}{
upper limit for search space for sample size solution
}
}
\value{
power, the difference in power from target power, and sample size,
respectively for the three functions, assuming an F distribution for
the test statistic
}
\keyword{power}
% docclass is function
% Converted by Sd2Rd version 43268.
|
95f7d7aeec7443b402fd3816abfaa92fab25cd13
|
8f913f0030270f754295256bf22ce6639385ea90
|
/todo_cleansewithfire/singletrial_check/rangetie_rangeofcalcsd.R
|
535c86475128119710e0bf7c31f1f7e6e24e9f30
|
[] |
no_license
|
stevenlangsford/contextprefs
|
da7ef74d68ff44b182fc21060af91cfb0da22dfb
|
2b28f8fee3dd5dce91134e4270e80584398a4b22
|
refs/heads/master
| 2021-05-12T19:41:54.857330
| 2018-03-30T14:41:43
| 2018-03-30T14:41:43
| 117,043,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,313
|
r
|
rangetie_rangeofcalcsd.R
|
rm(list=ls())
library(tidyverse)
library(jsonlite) #name clash with purrr 'flatten'
library(rwebppl)
##agentparams: fix for each trial
ppnt_calcsd = 5
ppnt_tolerance_prob = .011
ppnt_tolerance_payoff = 1.1
ppnt_orderror =.15
##modelparams: passed in with each row, just to avoid having multiple model files.
useord = TRUE
usecalc = TRUE
##Some interesing specific trials:
identicaltriplet.trial <- data.frame(
ppntid = 0,
probA = .5,
probB = .5,
probC = .5,
payoffA = 100,
payoffB = 100,
payoffC = 100,
calc_sd = ppnt_calcsd,
tolerance_prob = ppnt_tolerance_prob,
tolerance_payoff = ppnt_tolerance_payoff,
p_err = ppnt_orderror,
useord = useord,
usecalc = usecalc,
payoffprior_mean= 100, #prior on probs is always beta(1,1) for now, but payoffs are pretty different between wedell and howes16 rnd-stim.
payoffprior_sd= 10,
trial_id="identicaltriplets"
)
rangetie.trial <- data.frame(
ppntid = 0,
probA = .8,
probB = .8/2,
probC = .8/4,
payoffA = 70,
payoffB = 70*2,
payoffC = 70*4,
calc_sd = ppnt_calcsd,
tolerance_prob = ppnt_tolerance_prob,
tolerance_payoff = ppnt_tolerance_payoff,
p_err = ppnt_orderror,
useord = useord,
usecalc = usecalc,
payoffprior_mean= 100,
payoffprior_sd= 10,
trial_id="rangetie"
)
##Wedell text examples: Note DIFFERENT PRIORS ON PAYOFF
wedellR.trial <- data.frame(
ppntid = 0,
probA = .5,
probB = .4,
probC = .5,
payoffA = 20,
payoffB = 25,
payoffC = 18,
calc_sd = ppnt_calcsd,
tolerance_prob = ppnt_tolerance_prob,
tolerance_payoff = ppnt_tolerance_payoff,
p_err = ppnt_orderror,
useord = useord,
usecalc = usecalc,
payoffprior_mean= 20,
payoffprior_sd= 5,
trial_id="wedellR"
)
wedellR_tinydecoy.trial <- data.frame(
ppntid = 0,
probA = .5,
probB = .4,
probC = .1,
payoffA = 20,
payoffB = 25,
payoffC = 1,
calc_sd = ppnt_calcsd,
tolerance_prob = ppnt_tolerance_prob,
tolerance_payoff = ppnt_tolerance_payoff,
p_err = ppnt_orderror,
useord = useord,
usecalc = usecalc,
payoffprior_mean= 20,
payoffprior_sd= 5,
trial_id="wedellR_tinydecoy"
)
wedellF.trial <- data.frame(
ppntid = 0,
probA = .67,
probB = .5,
probC = .5,
payoffA = 15,
payoffB = 20,
payoffC = 18,
calc_sd = ppnt_calcsd,
tolerance_prob = ppnt_tolerance_prob,
tolerance_payoff = ppnt_tolerance_payoff,
p_err = ppnt_orderror,
useord = useord,
usecalc = usecalc,
payoffprior_mean= 20,
payoffprior_sd= 5,
trial_id="wedellF"
)
wedellF_tinydecoy.trial <- data.frame(
ppntid = 0,
probA = .67,
probB = .5,
probC = .1,
payoffA = 15,
payoffB = 20,
payoffC = 1,
calc_sd = ppnt_calcsd,
tolerance_prob = ppnt_tolerance_prob,
tolerance_payoff = ppnt_tolerance_payoff,
p_err = ppnt_orderror,
useord = useord,
usecalc = usecalc,
payoffprior_mean= 20,
payoffprior_sd= 5,
trial_id="wedellF_tinydecoy"
)
wedellR_decoyeffect_rangeofcalcsd <- function(n.per.cell, calcsd.vector){
dat <- data.frame()
for(ansd in calcsd.vector){
# browser();
nextone <- rbind(as.data.frame(lapply(wedellR.trial, rep, n.per.cell)),
as.data.frame(lapply(wedellR_tinydecoy.trial, rep, n.per.cell))
)
nextone$calc_sd <- ansd
dat <- rbind(dat,nextone)
}
return(dat)
}
##RUN STARTS HERE
simexp.df <- as.data.frame(lapply(rangetie.trial,rep,250*3)) #wedellR_decoyeffect_rangeofcalcsd(50,c(1,2,3,4,5))
simexp.df$calc_sd <- c(.5,1,2,4,8,16)
simexp.df$row_id <- 1:nrow(simexp.df)
##ORD OFF
## simexp.df$useord <- FALSE
## simexp.df$trial_id <- "rangetie_ordoff"
##WIDE v PRIOR
simexp.df$payoffprior_sd <- 100 #stopidli wide
simexp.df$trial_id <- "rangetie_widevprior"
fit <- webppl(program_file="howes16.ppl",data=simexp.df,data_var="expdf",packages="webppl-json")
simexp.df$choice <- fit[[8]] ##most important bit
##calcobs
calcobs <- fit[[1]]
simexp.df$calcA <- calcobs[,1]
simexp.df$calcB <- calcobs[,2]
simexp.df$calcC <- calcobs[,3]
##ordobs
simexp.df$ABprob <- fit[[2]]
simexp.df$ACprob <- fit[[3]]
simexp.df$BCprob <- fit[[4]]
simexp.df$ABpayoff <- fit[[5]]
simexp.df$ACpayoff <- fit[[6]]
simexp.df$BCpayoff <- fit[[7]]
with(simexp.df, {
calcinfo <<- ifelse(usecalc, paste("calc:",signif(calcA,3),signif(calcB,3),signif(calcC,3)), "calc_off");
ordinfo <<- ifelse(useord, paste("pAB",ABprob,"pAC",ACprob,"pBC",BCprob,"vAB",ABpayoff,"vAC",ACpayoff,"vBC",BCpayoff),"ord_off");
infostring <<- paste(calcinfo,ordinfo,"choice",choice);
})
simexp.df$choice <- as.factor(simexp.df$choice)
#output: assumes simexp just has a single trial_id when naming files (and that this doesn't overwrite anything you'd want to keep...)
prefs_sdrange.plot <- ggplot(simexp.df,aes(x=trial_id,group=choice,fill=choice))+geom_bar(position="dodge")+theme_bw()+facet_wrap(~calc_sd)
write.csv(simexp.df,file=paste0(simexp.df$trial_id[1],"_rangeofcalcsd.csv"),row.names=FALSE)
ggsave(prefs_sdrange.plot,file=paste0(simexp.df$trial_id[1],"_rangeofcalcsd.png"))
print(prefs_sdrange.plot)
|
310e0a24fc829c2c1cee986a9c8e169b43bdcf58
|
f94d3f773c156ade2ff9bde4853ec1b5ccbf4567
|
/generalFunctions.R
|
811f1794f9ced7c43002496c3a7345d1b086d4a2
|
[
"MIT"
] |
permissive
|
filipok/2012ROElections
|
7dc00e3259e12381b66353586382aac6985d2151
|
a2ad7c3257df8194e7cc064fb45f45e5d9a036b9
|
refs/heads/master
| 2021-06-04T21:19:24.442975
| 2017-12-18T15:12:46
| 2017-12-18T15:12:46
| 13,176,827
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 952
|
r
|
generalFunctions.R
|
library("R2HTML")
vezi = function (x) {
#de la Georgian
file.remove( 'test.html');
HTML(x, file='test.html', row.names=TRUE, innerBorder=1);
system(' "C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe" "C:\\Users\\Filip\\Dropbox\\R_Work\\2012AlegeriRomania\\test.html" ', wait=FALSE )
}
beep = function(n = 3){
#source: http://stackoverflow.com/questions/3365657/is-there-a-way-to-make-r-beep-play-a-sound-at-the-end-of-a-script
for(i in seq(n)){
system("rundll32 user32.dll,MessageBeep -1")
Sys.sleep(.5)
}
}
#source: (http://thebiobucket.blogspot.ro/2013/04/download-files-from-dropbox.html
dl_from_dropbox <- function(x, key) {
require(RCurl)
bin <- getBinaryURL(paste0("https://dl.dropboxusercontent.com/s/", key, "/", x),
ssl.verifypeer = FALSE)
con <- file(x, open = "wb")
writeBin(bin, con)
close(con)
message(noquote(paste(x, "read into", getwd())))
}
|
ef579edaa6b0ba187ac477637d215f42eaa685ec
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/hypervolume/examples/hypervolume_threshold.Rd.R
|
ffa5d5d13e9aa64d3242534372a7aa79b54c6695
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 811
|
r
|
hypervolume_threshold.Rd.R
|
library(hypervolume)
### Name: hypervolume_threshold
### Title: Thresholds hypervolume and calculates volume quantile statistics
### (empirical cumulative distribution function)
### Aliases: hypervolume_threshold
### ** Examples
## Not run:
##D data(iris)
##D hv = hypervolume_gaussian(data=subset(iris, Species=="setosa")[,1:3],name='setosa')
##D
##D # get hypervolumes at multiple thresholds
##D hvlist = hypervolume_threshold(hv, plot=TRUE)
##D head(hvlist$Statistics)
##D plot(hvlist$HypervolumesThresholded[[c(1,5,10,15,20)]],
##D show.random=FALSE, show.data=FALSE,show.centroid=FALSE)
##D
##D # get hypervolume for a single low quantile value
##D plot(hypervolume_threshold(hv, plot=FALSE, verbose=FALSE,
##D quantile.requested=0.2,quantile.requested.type="volume")[[1]])
## End(Not run)
|
34581e963b22e9039d9ee9deb8c038a88c8083f9
|
570e591d60bf106ee716b2e45c8d1ae40b057496
|
/20180814_PersonalNetworks_cleandata1_code.R
|
9d3ed5535fdb579e691ad64d750a23b0bc2ce6f1
|
[] |
no_license
|
AmarDhand/PersonalNetworks
|
0d6e02d2bf75c6f8da0ed2fec8e2da7897e357ba
|
dd9556b06b1690b5258643711fd245bf3e8a0933
|
refs/heads/master
| 2022-01-31T00:24:55.853792
| 2020-05-12T13:31:53
| 2020-05-12T13:31:53
| 143,941,834
| 7
| 11
| null | 2021-10-13T19:27:46
| 2018-08-08T00:45:51
|
R
|
UTF-8
|
R
| false
| false
| 12,694
|
r
|
20180814_PersonalNetworks_cleandata1_code.R
|
################################################################################
# PROJECT: PersonalNetworks
# PURPOSE: Import raw data in REDCap form and convert it to R
# DIR: "~/Desktop/PersonalNetworks"
# INPUTS: Fake data created in REDCap ("20180806_PersonalNetwork_data.csv")
# Can be replaced with real data of participants.
# OUTPUTS: A temp.rda file that will become the input for part 3 of the code
# AUTHORS: Abby Halm, Nuzulul Kurniansyah, Amar Dhand
# CREATED: 08/01/18
# LATEST: 08/14/18
# PSERIES: NA
# NSERIES: 20180807_PersonalNetworks_cleandata2_code.R
# NOTES: Step 1 of 2 parts of the code.
# Code works on raw .csv outputs from Redcap, no processing required
# ##############################################################################
#Empties Global Environment cache
rm(list = ls())
#Set working directory to current file location
#To set to own working directory
# select "Session->Set Working Directory->To Source File Location"
# then copy result in console into current "setwd("")".
setwd("~/Desktop/PersonalNetworks-master")
#Importing packages. If not yet installed, packages can be installed by going to:
# Tools -> Install Packages, then enter their exact names from within each library()
library(tidyverse)
#Read in data
#Imports data and assigns it to variable "sample_data"
sample_data <- read.csv("20180807_PersonalNetwork_data.csv",
stringsAsFactors = FALSE)
#Stores "sample_data" as a table data frame for easier reading
sample_data <- tbl_df(sample_data)
##The remaining code sets variable types and assigns levels to categorical
# variables. We given a detailed annotation of this process for the variable
# "sex" below. Subsequent variables follow the same pattern.
#Demographics of Central Person (Ego)
#ego's sex, stored as variable "sex", is made into a factor
sample_data$sex <- factor(sample_data$sex, levels = c("0", "1", "2"))
#assigns levels to variable "sex"
levels(sample_data$sex) <- c("Female","Male","Other")
sample_data$edu <- factor(sample_data$edu,
levels = c("1,", "2", "3", "4", "5", "6", "88"))
levels(sample_data$edu) <- c("Some high school or less", "High school grad",
"Some college", "Associate degree", "Bachelor's degree", "Graduate degree",
"Prefer not to answer")
sample_data$employment <- factor(sample_data$employment,
levels = c("1", "2", "3", "0", "4", "5", "6", "7"))
levels(sample_data$employment) <- c("Employed for wages", "Self-employed",
"Out of work and looking for work",
"Out of work but not currently looking for work", "Student", "Retired",
"Unable to work", "Prefer not to answer")
sample_data$occupation <- factor(sample_data$occupation, levels = c("1", "2",
"3", "4", "5", "6", "7", "8", "9", "10", "77"))
# note that for participants who select "0", "4", "5", "6", or "7" for variable
# "employment", the value for "occupation" will be NA
levels(sample_data$occupation) <- c("Executive, manager",
"Sales or clerical worker", "Mechanic, electrician, skilled worker",
"Machine operator, inspector, bus/cab driver", "Service worker",
"Professional", "Business owner", "Laborer, unskilled worker", "Farming",
"Military", "Other")
sample_data$income <- factor(sample_data$income, levels = c("1", "2", "3", "4",
"5"))
levels(sample_data$income) <- c("less than $5,000", "$5,000 to $49,000",
"$50,000 to $169,000", "$170,000 to $490,000", "more than $500,000")
sample_data$married <- factor(sample_data$married, levels = c("0", "1"))
levels(sample_data$married) <- c("Not married", "Married")
sample_data$live_alone <- factor(sample_data$live_alone, levels = c("0", "1"))
levels(sample_data$live_alone) <- c("No", "Yes")
#Ego's race
#Due to multiple choice, code below organizes particpant's choices
# into race1 and race2. If the participant only chooses 1 race, the value for
# "race2" will be NA
r <- sample_data %>% select(record_id, race___1:race___88)
colnames(r) <- c("record_id", "Black", "White", "American_Indian", "Asian",
"Hawaiian", "Other", "Unknown")
#creates variable, "race1", that contains the first race a participant chooses
# if the participant selects multiple races, then "race1" variable represents
# the race that appears first in the list of choices, and does NOT denote any
# ordering assigned by the participant
race1 <- r %>% gather(race, count, -record_id) %>% filter(count == 1) %>%
arrange(record_id) %>% select(-count) %>% group_by(record_id) %>% slice(1) %>%
data.frame()
#creates variable, "race2", that contains the second race a participant chooses
# if the participant selects multiple races, then "race2" variable represents
# the race that appears second in the list of choices, and does NOT denote any
# ordering assigned by the participant
race2 <- r %>% gather(race, count, -record_id) %>% filter(count == 1) %>%
arrange(record_id) %>% select(-count) %>% group_by(record_id) %>% slice(2) %>%
data.frame()
#creates a table that combines "race1" and "race2" by record_id
race <- left_join(race1, race2, by = 'record_id')
colnames(race) <- c("record_id", "race1", "race2")
#adds "race" table onto "sample_data", thus adding variables "race1" and "race2"
# to the original data frame, containing all variables
sample_data <- left_join(sample_data, race, by = "record_id") %>%
select(-race___1:-race___88)
#Ego health habits:
#Again, setting variable types and assinging levels to categorical variables
# see code annotations for "sex" variable for a more detailed description of
# each line of code
sample_data$alcohol <- factor(sample_data$alcohol, levels = c("0", "1", "9"))
levels(sample_data$alcohol) <- c("No", "Yes", "I do not drink heavily")
sample_data$smoke <- factor(sample_data$smoke, levels = c("0", "1",
"9"))
levels(sample_data$smoke) <- c("No", "Yes", "I do not smoke")
sample_data$exercise <- factor(sample_data$exercise, levels = c("0", "1"))
levels(sample_data$exercise) <- c("No", "Yes")
sample_data$diet <- factor(sample_data$diet, levels = c("0", "1"))
levels(sample_data$diet) <- c("No", "Yes")
#Ego Health problems organized into columns
#The code below organizes the Ego's Health Problems (in which the participant
# can select multiple choices) into columns.
#same code as for "race" variable
h <- sample_data %>% select(record_id, health___1:health___0)
colnames(h) <- c("record_id", "General", "Pain", "Cognitive_MentalHealth",
"Cardiac", "NoProblems")
#creates variable, "health_prob1", that contains the first health problem a
# participant chooses if the participant selects multiple health problems,
# then "health_prob1" variable represents the health problem that appears first
# in the list of choices on REDCap, and does NOT denote any ordering
# assigned by the participant
#The same code is then used to create variables for any second, third, or fourth
# health problems the participant chooses.
health_prob1 <- h %>% gather(health_prob, count, -record_id) %>%
filter(count == 1) %>% arrange(record_id) %>% select(-count) %>%
group_by(record_id) %>% slice(1) %>% data.frame()
health_prob2 <- h %>% gather(health_prob, count, -record_id) %>%
filter(count == 1) %>% arrange(record_id) %>% select(-count) %>%
group_by(record_id) %>% slice(2) %>% data.frame()
health_prob3 <- h %>% gather(health_prob, count, -record_id) %>%
filter(count == 1) %>% arrange(record_id) %>% select(-count) %>%
group_by(record_id) %>% slice(3) %>% data.frame()
health_prob4 <- h %>% gather(health_prob, count, -record_id) %>%
filter(count == 1) %>% arrange(record_id) %>% select(-count) %>%
group_by(record_id) %>% slice(4) %>% data.frame()
health_problems <- left_join(health_prob1, health_prob2, by = 'record_id')
health_problems <- left_join(health_problems, health_prob3, by = 'record_id')
health_problems <- left_join(health_problems, health_prob4, by = 'record_id')
colnames(health_problems) <- c("record_id", "health_problem1", "health_problem2",
"health_problem3", "health_problem4")
sample_data <- left_join(sample_data, health_problems, by = "record_id") %>%
select(-health___1:-health___0)
##Calculate total network size. Defined as all unique names entered in name
#generator boxes and extra boxes provided.
sample_data.df <- data.frame(sample_data)
datalist = list()
calculate_size <- function(x) {
##########
# Function: Creates a network_size variable that takes into account any names
# written in the extra names boxes
# Inputs: x = Variable that stores the dataset
# Ouputs: network_size variable for each ID
##########
#first select all names put in the first 15 columns
names_first_15 <- sample_data %>% select(name1, name2, name3, name4, name5,
name6, name7, name8, name9, name10, name11, name12, name13, name14, name15)
#next, select the names for id x
names_first_15 <- names_first_15[x, ]
#create data frame and transpose it to make it easier to manage
names_first_15 <- as.data.frame(t(names_first_15))
#change the column name
colnames(names_first_15) <- c("Names")
#select the keep/remove designation, stored as variables "name_1" to "name_15"
# for each of the first 15 names
keep_names <- sample_data %>% select(name_1:name_15)
keep_names <- keep_names[x, ]
#change colnames to numbers 1:15, so that it is easier to do rbind
colnames(keep_names) <- c(1:15)
#input the data into a data frame and transpose it
keep_names <- data.frame(t(keep_names))
#change the name of the column to "Value"
colnames(keep_names) = "Value"
#combine "names_first_15" (the first 15 names entered) and "keep_names" (the
# keep/remove designation for each of the first 15 names) using cbind function
names_combined <- cbind(names_first_15, keep_names)
#remove any row that contain NA in names_combined
names_combined <- names_combined[complete.cases(names_combined), ]
#split names_combined into names designated as "keep" (Value = 1) and
# names designated as "remove" (Value = 0)
names_combined_keep <- split(names_combined, names_combined$Value == 1)
# Select only the names designated as $`TRUE` ("keep")
names_combined_keep <- names_combined_keep$`TRUE`
#Change all characters into Uppercase
names_combined_keep <- toupper(names_combined_keep$Names)
#Remove any spaces
names_combined_keep <- gsub(" ", "", names_combined_keep)
#Make names_combined_keep into a data frame to make it easier to manage
names_combined_keep <- data.frame(names_combined_keep)
colnames(names_combined_keep) <- "Names"
#Now, take all of the names from the 3 extra names boxes
# Strsplit : split names based on coma saparated value and change them into
# characters.
names_box1 <- strsplit(as.character(sample_data$more_names_1)[x],
split = ",")
names_box2 <- strsplit(as.character(sample_data$more_names_2)[x],
split = ",")
names_box3 <- strsplit(as.character(sample_data$more_names_3)[x],
split = ",")
#Unlist names_box1:names_box3 and create a vector of names for each extra names
# box
names_box1 <- as.vector(unlist(names_box1, use.names = FALSE))
names_box2 <- as.vector(unlist(names_box2, use.names = FALSE))
names_box3 <- as.vector(unlist(names_box3, use.names = FALSE))
#combine the 3 extra names vectors into list so that we can combine
# names_box1:3 into one vector
names_box <- list(names_box1, names_box2, names_box3)
#make the names_box list into a vector
names_box <- Reduce(c, names_box)
#remove "NA" in names_box
names_box <- names_box[!is.na(names_box)]
#Remove Spaces in names_box
names_box <- gsub(" ", "", names_box)
#Change all character in names_box to uppercase
names_box <- toupper(names_box)
#Remove duplicates values in names_box vector
names_box <- unique(names_box)
#makes names_box into a data frame and change the column name to "Names"
# to make it easier to merge with names_combined_keep
names_box <- data.frame(names_box)
colnames(names_box) <- "Names"
# Merge unique names from boxes with unique names of first 15 and
#remove duplicates between them
# Keep this order. Placing names_combined_keep first preserves any duplicate
# names that both designated as "keep" by the participant
names_network <- merge(names_combined_keep,names_box,by = c("Names"), all = TRUE)
# convert names_network into a vector
names_network <- as.vector(t(names_network))
#calculate the total network size
total_size <- length(names_network)
return(total_size)
}
#apply 'calculate_size' function to all study IDs
network_size <- unlist(lapply(1:nrow(sample_data), calculate_size))
#merge network_size and remove other size variables to reduce confusion
sample_data <- cbind(sample_data, network_size) %>% select(-size, -first)
#create temp file of data frame with all changes made in code
save(sample_data, file = "temp.rda")
|
ac95beb6493089b73b1838d63398f71ceb34176e
|
e91cbf316b0a04cfbeada276c66fdd859ce6c565
|
/R/AllClasses.R
|
7dc7094f1af466e12dc44bcd95f1470041109d44
|
[] |
no_license
|
cran/nplr
|
4549ed6b190e4c4f83a62f1b0fb6f743644a736c
|
1cf4cda4e9bc29156f5eccd690a9488d66a45110
|
refs/heads/master
| 2021-01-13T01:35:47.637006
| 2016-12-28T15:38:22
| 2016-12-28T15:38:22
| 19,420,602
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,041
|
r
|
AllClasses.R
|
# -------------------------------------------------------
## DEFINE nplr CLASS
# -------------------------------------------------------
setClass(
Class='nplr',
representation(
weightMethod='ANY',
x='numeric',
y='numeric',
w='numeric',
useLog='logical',
npars='numeric',
LPweight='numeric',
yFit='numeric',
xCurve='numeric',
yCurve='numeric',
inflPoint='data.frame',
goodness='ANY',
stdErr='numeric',
# nlmErr='vector',
pars='data.frame',
AUC='data.frame',
call='ANY'),
prototype = prototype(
weightMethod = NULL,
useLog = TRUE,
npars = 0,
LPweight = 0,
yFit = numeric(),
xCurve = numeric(),
yCurve = numeric(),
inflPoint = data.frame(),
# goodness = list(),
stdErr = 0,
# nlmErr = 0,
pars = data.frame(),
AUC = data.frame(),
call = NULL
)
)
# -------------------------------------------------------
## SHOW METHOD FOR THIS CLASS
# -------------------------------------------------------
setMethod(
f = 'show',
signature = 'nplr',
definition = function(object){
cat("Instance of class", class(object), "\n")
cat("\n")
cat("Call:\n")
print(object@call)
weightMethod <- NULL
goodness <- getGoodness(object)
if(object@weightMethod == "res")
weightMethod <- "residuals"
else if(object@weightMethod == "sdw")
weightMethod <- "standard weights"
else weightMethod <- "general weights"
message("weights method: ", weightMethod)
cat("\n")
cat(sprintf("%s-P logistic model\n", object@npars))
cat("Bottom asymptote:", getPar(object)$params$bottom, "\n")
cat("Top asymptote:", getPar(object)$params$top, "\n")
cat("Inflexion point at (x, y):", as.numeric(getInflexion(object)), "\n")
cat("Goodness of fit:", goodness$gof, "\n")
cat("Weighted Goodness of fit:", goodness$wgof, "\n")
cat("Standard error:", getStdErr(object), "\n")
# cat("nlm error(s):", object@nlmErr, "\n")
cat("\n")
}
)
|
f8f0046bf0d4e12db35de4d032df35cb4c353698
|
82452e98d0c821491d36cec7c2e5d8977c0c45a2
|
/functions/benchmark.R
|
77fb9148a9f20874666f242e945dabf862ee17c4
|
[] |
no_license
|
LisaNicvert/EcoNetwork_public
|
943b04e2a40451b5b25d5e4157e1dcb149684562
|
3081018355eda1a340dab840f9189bfc93c979c6
|
refs/heads/master
| 2022-07-19T11:41:04.456837
| 2020-05-28T14:36:38
| 2020-05-28T14:36:38
| 267,587,453
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,736
|
r
|
benchmark.R
|
########################################################
# Functions for model benchmark (synthetic data)
########################################################
library(igraph)
library(MASS)
library(PLNmodels)
generate_precmat <- function(n = 20, m = 3, u = .1, v = .3,
gr, names){
# Generate a precision matrix with the given parameters.
# n: nodes count
# gr (optional): the graph to use
# m: density of network
# v and u: parameters for resp. correlation and conditioning.
# names (optional): colnames to give to the graph nodes
if(missing(gr)){ # generate network
gr <- sample_pa(n = n, m= m,
directed=FALSE) # Network
}
# Create Omega with same sparsity pattern
G <- as_adjacency_matrix(gr, sparse = FALSE)
eig <- eigen(v*G, only.values = TRUE)$values
Omega <- v*G + diag(abs(min(eig))+u,n,n)
if(missing(names)){
if(n <= 26){
colnames(Omega) <- letters[1:n]
rownames(Omega) <- letters[1:n]
}
else if(n > 26 & n <= 26*2){
colnames(Omega) <- c(letters, LETTERS[1:(n-26)])
rownames(Omega) <- colnames(Omega)
}
else{
colnames(Omega) <- c(letters, LETTERS, seq(22*2, n))
rownames(Omega) <- colnames(Omega)
}
}
else{
colnames(Omega) <- names
rownames(Omega) <- names
}
return(Omega)
}
generate_obs <- function(nrow = 10, mean, Omega){
# generate observations based on the given parameters.
# nrow: number of observations to generate (replicates)
# mean: the ordered mean vector (defaults to 0n)
# Omega: precision matrix
# Observations' colnames are Omega's colnames, rownames are 1:n.
n <- nrow(Omega) # spp nb
if(missing(mean)){ # mean defaults to zero
mean <- rep(0, n)
}
Sigma <- solve(Omega)
# Draw latent variables
Z <- mvrnorm(n = nrow, mu = rep(0, n), Sigma = Sigma)
colnames(Z) <- colnames(Omega)
rownames(Z) <- seq(1, nrow)
# Generate obs from Zi
obs <- matrix(rpois(n = nrow*n, lambda = exp(Z + mean)), nrow = nrow)
colnames(obs) <- colnames(Omega)
rownames(obs) <- seq(1, nrow)
return(list(lambda = exp(Z + mean),
obs = obs))
}
sensi_speci <-function(n = 20, m = 3, u = .1, v=.3, gr, names,
mean, size = seq(10,100, by = 10), nrep = 10,
change = "size", new_graph = FALSE,
crit = "BIC"){
# Compute FDR and FOR, and True/False Positives/Negatives evolution for different
# sample sizes or means, depending on parameter "change".
# n: number of nodes.
# m: number of edges to attach to each step for Barabasi-Albert.
# v and u: parameters for resp. correlation and conditioning.
# mean: mean vector for latent Normal multivariate layer (dimension n if size
# changes; else, dimension n*whatever).
# size: the different sample size to test (seq if size changes,
# else integer.).
# Must be > 1.
# nrep: repetitions number for each sample size (redraw Zi).
# change: what changes between iterations. Possible values:
# mean, size.
# new_graph: if TRUE, a new precision matrix is generated each time.
# crit: the selection criterion to use with PLNnetwork ('BIC' or 'StARS').
if(missing(mean)){
if(change == "size"){
mean <- rep(0,n) # initialises mean vector
}
else if(change == "mean"){ # mean as a matrix
mean <- matrix(unlist(lapply(seq(0,5), FUN = rep, n)), ncol = n,
byrow = TRUE)
}
}
# Initialisation of results vectors
all.sensi <- vector()
all.speci <- vector()
if(change == "size"){
jlim <- length(size) # set main loop size
mean.now <- mean # mean cst
}
else if(change == "mean"){
jlim = nrow(mean) # set main loop size
nrow <- size # size cst
}
if(!new_graph){
# --- Simulate a cov matrix
if(missing(gr)){ # generate network
Omega <- generate_precmat(n = n, m = m, u = u, v = v,
gr = gr)
}
else{
Omega <- generate_precmat(n = n, m = m, u = u, v = v)
}
}
for(j in 1:jlim){ # loop for sample size / mean
if(change == "size"){
nrow <- size[j]
}
else if(change == "mean"){
mean.now <- mean[j,]
}
# Initialisation for the same sample size / mean
sensitivities <- vector()
specificities <- vector()
for(i in 1:nrep){
rep <- i
if(change == "size"){
print(paste("----------------- nrep =", i, "| nrow =", nrow, "-----------------"))
}
else if(change == "mean"){
print(paste("----------------- nrep =", i, "| mean vector =", paste0("(",paste(mean[j,],collapse = ","),")"), "-----------------"))
}
if(new_graph){
# --- Simulate a cov matrix
if(missing(gr)){ # generate network
Omega <- generate_precmat(n = n, m = m, u = u, v = v,
gr = gr)
}
else{
Omega <- generate_precmat(n = n, m = m, u = u, v = v)
}
}
# Generate observations
rand <- generate_obs(nrow = nrow, mean = mean.now, Omega = Omega)
# Infer network
cov <- rep(0, nrow) # initialise covariates
# names(cov) <- rownames(rand$obs) # match rownames to disable warnings
d.prep <- prepare_data(counts = rand$obs, covariates = cov)
nks <- PLNnetwork(Abundance ~ 1, data = d.prep)
nk <- getBestModel(nks, crit = crit)
# Estimate for Omega
Omega.est <- nk$model_par$Omega
# Estimate for partial correlations
Pcor.est <- Omega.est
Pcor <- Omega
for(k in 1:nrow(Omega)){ # nrow(Omega) = nrow(Omega.est) so not important
for (l in 1:ncol(Omega)){ # same for ncol
# Real partial correlation
Pcor[k,l] <- -Omega[k,l]/sqrt(Omega[k,k]*Omega[l,l])
# Estimate partial correlation
Pcor.est[k,l] <- -Omega.est[k,l]/sqrt(Omega.est[k,k]*Omega.est[l,l])
}
}
# False omission rate
# -- For Pcor
TN <- length(Pcor.est[Pcor.est == 0 & Pcor == 0])
FN <- length(Pcor.est[Pcor.est == 0 & Pcor != 0])
FOR <- FN/(TN + FN)
# False discovery rate
# -- For Pcor
FP <- length(Pcor.est[Pcor.est != 0 & Pcor == 0])
TP <- length(Pcor.est[Pcor.est != 0 & Pcor != 0])
FDR <- FP/(TP + FP)
if(change == "size"){
fac <- nrow
r <- cbind(fac, rep, TP, FP, TN, FN,
FDR, FOR)
}
else if(change == "mean"){ # mean of means vector used here
fac <- mean(mean)
r <- cbind(fac, rep, TP, FP, TN, FN,
FDR, FOR)
}
# Update
if(i == 1){
res.rep <- r
}
else{
res.rep <- rbind(res.rep, r)
}
}
# Update
if(j == 1){
res <- res.rep
}
else{
res <- rbind(res, res.rep)
}
}
res <- as.data.frame(res)
if(change == "size"){
colnames(res)[colnames(res) == "fac"] <- "nrow"
}
else if(change == "mean"){
colnames(res)[colnames(res) == "fac"] <- "mean"
}
return(res)
}
repeat_simul <- function(n, nrep, nrow, mean, Omega, crit){
# Repeat the simulation nrep times with the same setting.
for(i in 1:nrep){
message(paste0("------------------------------ Repetition ", i,"/",nrep))
# Generate observations
rand <- generate_obs(nrow = nrow, mean = mean, Omega = Omega)
nrowfinal <- nrow(rand$obs[apply(rand$obs, 1, sum) != 0,])
# Infer network
cov <- rep(0, nrow) # initialise covariates
# names(cov) <- rownames(rand$obs) # match rownames to disable warnings
d.prep <- prepare_data(counts = rand$obs, covariates = cov)
nks <- PLNnetwork(Abundance ~ 1, data = d.prep)
nk <- getBestModel(nks, crit = crit)
# Estimate for Omega
Omega.est <- nk$model_par$Omega
# -- Get half
Omega.est.tri <- Omega.est[lower.tri(Omega.est, diag = FALSE)]
Omega.tri <- Omega[lower.tri(Omega, diag = FALSE)]
TN <- length(Omega.est.tri[Omega.est.tri == 0 & Omega.tri == 0])
FN <- length(Omega.est.tri[Omega.est.tri == 0 & Omega.tri != 0])
FP <- length(Omega.est.tri[Omega.est.tri != 0 & Omega.tri == 0])
TP <- length(Omega.est.tri[Omega.est.tri != 0 & Omega.tri != 0])
# False negative rate
FNR <- FN/(TP + FN)
if(i == 1){
res <- c(nrowfinal, TN, FN, FP, TP, FNR)
}else{
r <- c(nrowfinal, TN, FN, FP, TP, FNR)
res <- rbind(res, r)
}
}
res <- as.data.frame(res)
colnames(res) <- c("nrow", "TN", "FN", "FP", "TP", "FNR")
rownames(res) <- NULL
return(res)
}
|
64343cec751caa636166f64e57910d75255cbf83
|
fde3ff3a39c60bcf5b7537ff3adf24c40055c410
|
/mona.R
|
a9b28c4bda1967bced2ea25f2c8f4fd23990ec09
|
[] |
no_license
|
jrevenaugh/Shiny
|
cb04ebebbb86513fc7c24ad6e83d72c171aeb669
|
3e69759d6470ccd487514e788d7c89d5dc417531
|
refs/heads/master
| 2020-03-29T08:27:19.779486
| 2017-06-22T21:26:08
| 2017-06-22T21:26:08
| 94,667,027
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,188
|
r
|
mona.R
|
# Mona - Shiny app illustrating the use of the SVD as a 2D filter.
# Useful for demonstrating principal components.
#
load( file = "~/R-Studio/Projects/Shiny/Mona Lisa/mona_mat" )
require( tidyverse, quiet = TRUE )
require( shiny, quiet = TRUE )
theme_set( theme_bw() )
options( warn = -1 )
pal <- gray.colors( 256, start = 0, end = 1 )
ui <- shinyUI( fluidPage(
h4( "Mona Lisa SVD" ),
sidebarLayout(
sidebarPanel(
sliderInput("sv", "Singular Values", min = 1,
max = 50, value = c(1, 10), step = 1 ),
plotOutput( "SVPlot" )
),
# Plot panel
mainPanel(
plotOutput( "FMona", height = "auto", width = "auto" )
)
)
))
# Server R code
server <- shinyServer(function(input, output) {
aspect <- 200 / 298
res <- 72
size <- 8
h <- size * res
w <- size * res * aspect
output$FMona <- renderPlot ( height = h, width = w, {
svd <- svd( mona )
s <- rep( 0, 200 )
s[input$sv[1]:input$sv[2]] <- svd$d[input$sv[1]:input$sv[2]]
D <- diag( s )
fmona <- svd$u %*% D %*% t( svd$v )
par( mar = c( 0, 0, 0, 0 ) )
image( z = fmona, col = pal, axes = FALSE, xlab = NA, ylab = NA )
})
output$SVPlot <- renderPlot ({
s <- svd( mona )
S <- data_frame( Number = seq( 1, 50 ), SValue = s$d[1:50] )
P <- data_frame( Number = input$sv[1]:input$sv[2], SValue = s$d[input$sv[1]:input$sv[2]] )
drng <- range( s$d[1:50] )
ymin <- drng[1]
ymax <- drng[2] * 1.1
xmin <- input$sv[1] - 0.5
xmax <- input$sv[2] + 0.5
w <- input
R <- data_frame( xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax )
gSV <- ggplot() +
geom_rect( data = R,
aes( xmin = xmin, ymin = ymin, xmax = xmax, ymax = ymax ),
alpha = 0.2, color = "gray" ) +
geom_point( data = S, inherit.aes = FALSE, aes( x = Number, y = SValue )) +
geom_point( data = P, aes( x = Number, y = SValue ), color = "red" ) +
labs( x = "Number", y = "Singular Value" ) +
theme( text = element_text( size = 20 ) ) + xlim( 0.4, 50.6 ) +
scale_y_log10()
gSV
})
})
# Run the application
shinyApp( ui = ui, server = server )
|
5d75d7f7fd55f03690ff0ab77a66b58705364c55
|
edd3c6adae39948e5a3820b8f8e86bd8c7df5585
|
/errorRateFromBg/numberOfTCConversionsPerStage.R
|
020e7b96717a1186c1dda7a63f5d0d700b9578bb
|
[] |
no_license
|
poojabhat1690/dr_annotation_errorcorrection
|
7317173e7d6b491147351b186462c8366b725943
|
26515f03b79de0d92ea3e017a9749c2f88b3bc63
|
refs/heads/master
| 2020-03-28T13:56:08.090229
| 2018-09-12T11:54:04
| 2018-09-12T11:54:04
| 148,443,162
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,097
|
r
|
numberOfTCConversionsPerStage.R
|
library(RColorBrewer)
#### finds the number of reads with TCs in the untreated samples
####### it is better to run the first part on the cluster.
# untreatedFiles = list.files(path ="////groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/outputs/probabilityOfFindingTC/zygoticTranscripts_vectorized/",pattern ="freqAllMutations_allTranscripts*" )
# untreatedFiles = untreatedFiles[grep("Untreated",untreatedFiles)]
# untreatedFiles_path = paste0("////groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/outputs/probabilityOfFindingTC/zygoticTranscripts_vectorized/",untreatedFiles)
#
# untreatedFiles_data = lapply(untreatedFiles_path,function(x) read.delim(x,stringsAsFactors = F,header = T))
# names(untreatedFiles_data) = untreatedFiles
#
# getTabulatedReads = function(untreatedSample_1){
# minusTab = untreatedSample_1 %>% dplyr::filter(strand == "-") %>%
# dplyr::select( A_C = T_G, A_G = T_C, A_T = T_A, C_A = G_T, C_G = G_C, C_T = G_A, G_A = C_T, G_C = C_G, G_T = C_A, T_A = A_T, T_C = A_G, T_G = A_C,strand,id)
# plusTab = untreatedSample_1 %>% dplyr::filter(strand == "+") %>%
# dplyr::select( A_C, A_G, A_T, C_A , C_G , C_T , G_A , G_C , G_T , T_A , T_C, T_G ,strand,id)
#
# totalReads = rbind.data.frame(plusTab,minusTab)
# totalReads_tabulated = as.data.frame(table(totalReads$T_C)) %>% mutate(frac_freq = Freq/sum(Freq))
# return(totalReads_tabulated)
# }
#
#
# tabulatedSamples = lapply(untreatedFiles_data,function(x) getTabulatedReads(x))
#
# #############################################################################################
############
#save(list = tabulatedSamples,file = "/Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/outputs/errorRates_untreatedSamples_zebrafish/Frequency_perTimePoint_numberOfTs.Rdata")
theme_ameres <- function (type) { ### plotting function
types_plot = c("boxplot","barplot","violin")
if(type %in% types_plot ==T)
{
if(type == "boxplot"){
theme(legend.title=element_blank(),axis.text.x = element_text(margin=margin(10,15,10,15,"pt"),size = 15),axis.text.y = element_text(margin=margin(5,15,10,5,"pt"),size = 15), axis.ticks.length = unit(-0.25 , "cm"),legend.position="bottom",axis.line = element_line(colour = "black", lineend = "round"), axis.title.x = element_text(size=18), axis.title.y = element_text(size=18))
}
if(type == "barplot"){
theme(legend.title=element_blank(),axis.text.x = element_text(margin=margin(10,15,10,15,"pt"),size = 15, hjust = 1),axis.text.y = element_text(margin=margin(5,15,10,5,"pt"),size = 15), axis.ticks.length = unit(-0.25 , "cm"),legend.position="bottom",axis.line = element_line(colour = "black", lineend = "round"), axis.title.x = element_text(size=18), axis.title.y = element_text(size=18))
}
}
}
load("/Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/outputs/errorRates_untreatedSamples_zebrafish/Frequency_perTimePoint_numberOfTs.Rdata")
tabulatedSamples_timepoints = unlist(lapply(strsplit(names(tabulatedSamples),"_",T) ,function(x) x[3]) )
names(tabulatedSamples) = substr(tabulatedSamples_timepoints,start = 1,3)
nTC = as.data.frame(factor(c(0:15)))
colnames(nTC) = "Var1"
tabulatedSamples = tabulatedSamples[paste0("TP",c(1:9))]
nTC$Freq = 0
library(plyr)
tabulatedSamples$df = nTC
library(purrr)
tabulatedSamples = tabulatedSamples %>% purrr::reduce(plyr::join, by = "Var1",.init = tabulatedSamples[[10]])
colnames(tabulatedSamples) = paste(colnames(tabulatedSamples),c("ref","ref",rep(c(1:9),each = 2)),sep="_")
tabulatedSamples$Freq_ref = NULL
colsUse = c(brewer.pal(n = 8,name = "Dark2"),brewer.pal(3,name = "Set1"))
options( scipen = 20 )
pdf("/Volumes//groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/plots/errorRates_untreatedTCs/numberOfTcs_errorRate.pdf",height=4.5)
p = tabulatedSamples %>% dplyr::select(contains("frac_freq")) %>% tidyr::gather() %>% plyr::mutate(nMut = c(0:15))%>% mutate(time = paste0("TP_",rep(1:9,each=16))) %>% dplyr::group_by(nMut) %>% dplyr::mutate(medianS = median(value,na.rm = T)) %>% ungroup() %>%
filter(nMut != 0)%>% filter(nMut != 1) %>%ggpubr::ggline(data = .,x = 'nMut',y = 'value',group='time',col='time',size=1,alpha=0.5) + scale_color_manual(values = colsUse) +
theme_ameres(type="barplot") + scale_x_continuous(labels = c(1:15),breaks = c(1:15)) + xlab("number of T_C conversions") + ylab("log10(Fraction of TC reads)")+ scale_y_log10()
print(p)
p = tabulatedSamples %>% dplyr::select(contains("frac_freq")) %>% tidyr::gather() %>% plyr::mutate(nMut = c(0:15))%>% mutate(time = paste0("TP_",rep(1:9,each=16))) %>% dplyr::group_by(nMut) %>% dplyr::mutate(medianS = median(value,na.rm = T)) %>% ungroup() %>%
filter(nMut != 0)%>% filter(nMut != 1) %>% filter(row_number() <= 14) %>%mutate(frac_round = round(medianS,6)) %>% ggpubr::ggline(data = .,x='nMut',y='medianS',label = 'frac_round',col='red',size=1) + scale_x_continuous(labels = c(1:15),breaks = c(1:15))+
theme_ameres(type = "barplot") + scale_y_log10() + ylab("log10(median fraction TC)") + xlab("number of T_C conversions")
print(p)
dev.off()
######## so...I want to estimate the number of reads that will be wrong in the data based on the sequencing depth of the datasets...
tabulatedSamples %>% dplyr::select(contains("frac_freq")) %>% tidyr::gather() %>% plyr::mutate(nMut = c(0:15)) %>% mutate(time = paste0("TP_",rep(1:9,each=16))) %>% dplyr::group_by(nMut) %>% dplyr::mutate(medianS = median(value,na.rm = T)) %>% ungroup() %>%
filter(nMut != 0)%>% filter(nMut != 1) %>% filter(row_number() <= 14)
#### i also want to know the number of reads that have 3 TCs..... per library....
#### i want to read all the data, of number of TCs per samples.....
Rdata_allSamples = list.files("/Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/outputs/probabilityOfFindingTC/zygoticTranscripts_vectorized/numberOfGenes_allTranscripts/",pattern ="freqAllMutations_allTranscripts*")
Rdata_samplepaths = paste0("/Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/outputs/probabilityOfFindingTC/zygoticTranscripts_vectorized/numberOfGenes_allTranscripts/",Rdata_allSamples)
falsePositiveReads = c()
totalNumberOfreadsWithTC = c()
allReads = c()
for(i in 1:length(Rdata_samplepaths)){
load(Rdata_samplepaths[i])
a = sum(allMutations$T_C) * 0.0000619
falsePositiveReads = c(falsePositiveReads, a)
totalNumberOfreadsWithTC = c(totalNumberOfreadsWithTC,apply(allMutations$T_C,2,sum)[4])
allReads = c( allReads,sum(allMutations$T_C))
}
falsePositvies_samples = cbind.data.frame(falsePositiveReads,totalNumberOfreadsWithTC,Rdata_allSamples,allReads)
falsePositvies_samples = falsePositvies_samples %>% mutate(falsePositiveRate = falsePositiveReads/totalNumberOfreadsWithTC) %>% filter(grepl("Inj",Rdata_allSamples)) %>%
mutate(condition = substr(Rdata_allSamples,start = 42,stop = 47)) %>% mutate(timepoint = substr(Rdata_allSamples,start = 45,stop = 47)) %>% arrange(.,timepoint) %>% mutate(index = c(1:27))
pdf("/Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/plots/errorRates_untreatedTCs/expectedBackgroundVStrue.pdf")
p = ggpubr::ggscatter(data = falsePositvies_samples,x = 'index',y = 'falsePositiveRate',col='timepoint') + ggrepel::geom_text_repel(label=falsePositvies_samples$condition) + ylab("Expected bg reads/actual reads")
p + theme_ameres(type = "barplot") + geom_hline(aes(yintercept = 0.5),col="red",linetype="dashed",size=0.5)
dev.off()
#### i also want to calculate an average of the replicates...
detach(package:plyr)
library(dplyr)
a = falsePositvies_samples %>% dplyr::group_by(timepoint) %>% mutate(sum_TotalReadsWithTC = sum(totalNumberOfreadsWithTC)) %>%
mutate(sum_allreads = sum(allReads)) %>% mutate(totalFalsePositive = sum(falsePositiveReads) ) %>% mutate(falsePositive_combined = totalFalsePositive/sum_TotalReadsWithTC )%>% distinct(falsePositive_combined)
pdf("/Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/plots/errorRates_untreatedTCs/expecterackgroundVsTrue_combinedReplicates.pdf")
p = ggpubr::ggscatter(data = a,x = 'timepoint',y = 'falsePositive_combined') + ylab("Expected bg reads/actual reads")
p + theme_ameres(type = "barplot") + geom_hline(aes(yintercept = 0.5),col="red",linetype="dashed",size=0.5)
dev.off()
#####################
###### i can also get the specific mutation rates for each dataset...
#####################
backgoroundErrorPerreplicateInBg = tabulatedSamples %>% filter(Var1_ref == 3) %>% dplyr::select(contains("frac_freq"))
falsePositiveReads = c()
totalNumberOfreadsWithTC = c()
allReads = c()
for(i in 1:length(Rdata_samplepaths)){
load(Rdata_samplepaths[i])
sampleNum = as.numeric(substr(Rdata_allSamples[1],47,47))
a = sum(allMutations$T_C) * backgoroundErrorPerreplicateInBg[,sampleNum]
falsePositiveReads = c(falsePositiveReads, a)
totalNumberOfreadsWithTC = c(totalNumberOfreadsWithTC,apply(allMutations$T_C,2,sum)[4])
allReads = c( allReads,sum(allMutations$T_C))
}
falsePositvies_samples = cbind.data.frame(falsePositiveReads,totalNumberOfreadsWithTC,Rdata_allSamples,allReads)
falsePositvies_samples = falsePositvies_samples %>% mutate(falsePositiveRate = falsePositiveReads/totalNumberOfreadsWithTC) %>% filter(grepl("Inj",Rdata_allSamples)) %>%
mutate(condition = substr(Rdata_allSamples,start = 42,stop = 47)) %>% mutate(timepoint = substr(Rdata_allSamples,start = 45,stop = 47)) %>% arrange(.,timepoint) %>% mutate(index = c(1:27))
a = falsePositvies_samples %>% dplyr::group_by(timepoint) %>% mutate(sum_TotalReadsWithTC = sum(totalNumberOfreadsWithTC)) %>%
mutate(sum_allreads = sum(allReads)) %>% mutate(totalFalsePositive = sum(falsePositiveReads) ) %>% mutate(falsePositive_combined = totalFalsePositive/sum_TotalReadsWithTC )%>% distinct(falsePositive_combined)
pdf("/Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/plots/errorRates_untreatedTCs/expecterackgroundVsTrue_correctedForBg_eachTimepoint.pdf")
p = ggpubr::ggscatter(data = falsePositvies_samples,x = 'index',y = 'falsePositiveRate',col='timepoint') + ggrepel::geom_text_repel(label=falsePositvies_samples$condition) + ylab("Expected bg reads/actual reads")
p + theme_ameres(type = "barplot") + geom_hline(aes(yintercept = 0.5),col="red",linetype="dashed",size=0.5)
p = ggpubr::ggscatter(data = a,x = 'timepoint',y = 'falsePositive_combined') + ylab("Expected bg reads/actual reads")
p + theme_ameres(type = "barplot") + geom_hline(aes(yintercept = 0.5),col="red",linetype="dashed",size=0.5)
dev.off()
#### so R2_TP2 has some genes with Fp<0
r2_TP2 = Rdata_samplepaths[grep("freqAllMutations_allTranscriptsACCGTGInj_R2_TP2.txt.Rdata",Rdata_samplepaths)]
load(r2_TP2)
a = as.data.frame(allMutations$T_C[which(allMutations$T_C[,4] >=1),] ) %>% distinct(.,id_samples)
allHaving3 = allMutations$T_C[which(allMutations$T_C[,4] >=1),]
### out of these 105 reads are probably wrong....wrong
allHaving3 = as.data.frame.matrix(allHaving3)
colnames(allHaving3) = paste0("nMut",colnames(allHaving3))
allHaving3$ids = row.names(allHaving3)
allHaving3 = allHaving3 %>% filter(nMut3 >1) %>% filter(nMut2>1)
table(allHaving3[,4])
### what are these genes
rpms_allCountingWindows = read.table("//Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/outputs/slamDunk_combinedStageSpecific/dataTables/RPMData_allCountingWindows.txt",sep="\t",stringsAsFactors = F ,header = T)
rpms_allCountingWindows$id = paste0(rpms_allCountingWindows$V1,":",rpms_allCountingWindows$V2+1)
gnees_exp = rpms_allCountingWindows[rpms_allCountingWindows$id %in% a$id_samples,]
genes_strongExp = rpms_allCountingWindows[rpms_allCountingWindows$id %in% allHaving3$ids,]
##### now i also want to check the stochasticity of these genes...
#### i neeed to check if these genes have expression in each stage... i.e >=3TC reads in each sample....
#### readin in all the datasets of the genes that have T
library(dplyr)
library(stringr)
allRdata = list.files("/Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/outputs/probabilityOfFindingTC/zygoticTranscripts_vectorized///numberOfGenes_allTranscripts//",pattern = "*.Rdata")
allRdata_path = paste0("/Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/outputs/probabilityOfFindingTC/zygoticTranscripts_vectorized///numberOfGenes_allTranscripts//",allRdata)
allRdata_path = allRdata_path[grep("Inj",allRdata_path)]
names_samples = str_extract(allRdata_path,"([^/]+)$" ) %>% str_sub(start = -20)
rpms_allCountingWindows = read.table("//Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/outputs/slamDunk_combinedStageSpecific/dataTables/RPMData_allCountingWindows.txt",sep="\t",stringsAsFactors = F ,header = T)
rpms_allCountingWindows$id = paste0(rpms_allCountingWindows$V1,":",rpms_allCountingWindows$V2+1 )
searchForGenes = function(TP,path,genes_exp){
ids_greaterthan2 = vector("list",length(allRdata_path[grep(TP,allRdata_path)]))
for(i in 1:length(ids_greaterthan2)){
load(allRdata_path[grep(TP,allRdata_path)][i])
tctemp = as.data.frame.matrix(allMutations$T_C)
ids_greaterthan2[[i]] = row.names(tctemp[which(tctemp$`3` > 0),])
}
ids_greaterthan2_combine = unique(do.call(c,ids_greaterthan2))
gnees_exp_inTP = ids_greaterthan2_combine[ids_greaterthan2_combine %in% gnees_exp$id] ### these are the genes that have atleas 3 TCss at TP1.
gnees_exp_inTP = as.data.frame(as.character(gnees_exp_inTP))
names(gnees_exp_inTP) = "id"
gnees_exp_inTP$present = 1
return(gnees_exp_inTP)
}
inTP1 = searchForGenes(TP = "TP1",path = allRdata_path,genes_exp = gnees_exp)
inTP2 = searchForGenes(TP = "TP2",path = allRdata_path,genes_exp = gnees_exp)
inTP3 = searchForGenes(TP = "TP3",path = allRdata_path,genes_exp = gnees_exp)
inTP4 = searchForGenes(TP = "TP4",path = allRdata_path,genes_exp = gnees_exp)
inTP5 = searchForGenes(TP = "TP5",path = allRdata_path,genes_exp = gnees_exp)
inTP6 = searchForGenes(TP = "TP6",path = allRdata_path,genes_exp = gnees_exp)
inTP7 = searchForGenes(TP = "TP7",path = allRdata_path,genes_exp = gnees_exp)
inTP8 = searchForGenes(TP = "TP8",path = allRdata_path,genes_exp = gnees_exp)
inTP9 = searchForGenes(TP = "TP9",path = allRdata_path,genes_exp = gnees_exp)
reference = as.data.frame(as.character(gnees_exp$id))
colnames(reference) = "id"
m = list(reference,inTP1,inTP2,inTP3,inTP4,inTP5,inTP6,inTP7,inTP8,inTP9)
allStochastic = m %>% purrr::reduce(plyr::join, by = "id")
allStochastic[is.na(allStochastic) ]<-0
colnames(allStochastic) = c("id",paste0("TP",c(1:9)))
row.names(allStochastic) = allStochastic$id
allStochastic$id = NULL
ord <- hclust( dist((allStochastic), method = "euclidean"), method = "ward.D" )$order
allStochastic = allStochastic[ord,]
allStochastic_melt = melt(allStochastic)
allStochastic_melt$nGene = c(1:196)
pdf("/Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/plots/errorRates_untreatedTCs/stochasticExpression_sampleAboveBackgroundTC.pdf")
p = ggplot(allStochastic_melt,aes(x=variable,y = nGene,fill=value)) + geom_tile() + scale_fill_gradientn(colours = c("white", "black"), values = c(0,0.1,1))
print(p)
dev.off()
allStochastic= allStochastic %>% rownames_to_column(var = "id")
#genes_consider = rpms_allCountingWindows[rpms_allCountingWindows$id %in% row.names(allStochastic),]
allGenes_3TC = plyr::join(allStochastic,rpms_allCountingWindows,by="id")
########################## GO term analysis....
library(biomaRt)
library(org.Dr.eg.db)
library(topGO)
ensembl = useMart(host='dec2017.archive.ensembl.org', biomart='ENSEMBL_MART_ENSEMBL',dataset = "drerio_gene_ensembl")
all_genes = getBM(attributes = c("external_gene_name","ensembl_gene_id"),filters = "external_gene_name",values = rpms_allCountingWindows$V4,mart = ensembl)
colnames(all_genes) = c("V4","ensembl_gene_id")
all_genes_join =plyr::join(all_genes,gnees_exp)
all_genes_join = all_genes_join[!duplicated(all_genes_join),]
head(all_genes)
all_genes_join$category = 0
all_genes_join[!complete.cases(all_genes_join),]$category <- 1
all_genes_category = all_genes_join$category
names(all_genes_category) = all_genes_join$ensembl_gene_id
GOdata <- new("topGOdata", ontology = "BP", allGenes = all_genes_category, geneSel = function(p) p <
0.01, description = "Test", annot = annFUN.org, mapping = "org.Dr.eg.db",
ID = "Ensembl")
resultFisher <- runTest(GOdata, algorithm = "classic", statistic = "fisher")
options(scipen=999)
table_goTerms= GenTable(GOdata, classicFisher = resultFisher,topNodes = 10)
pdf("/Volumes/groups/ameres/Pooja/Projects/zebrafishAnnotation/sequencingRun_december2017/analysis/annotation/stageSpecific/plots/probabilityOfTC/GoTermAnalysis_allCws_moreThan2TCreads_allCws.pdf")
p = table_goTerms %>% mutate(log20P = -log10(as.numeric(classicFisher)) ) %>% ggplot() + geom_bar(aes(x=Term,y=log20P),stat="identity",col="black",fill="black") + coord_flip() + theme_ameres(type = "barplot") + ylab("-log2(pVal)")
print(p)
dev.off()
|
f40400bea05867f6061e8e111dfd5e9451c25145
|
851e9aa210c2e711ad2813b4c145dbeeed482085
|
/tests/testthat/test_radioactivePlumes.R
|
ae121d6a9a32c761a4323a7b9c13bc6f42d83f6f
|
[] |
no_license
|
KristinaHelle/sensors4plumesTests
|
6f7b9a77ebc6e47ad9020b8929ba8fe172ef7c94
|
ca8ee8e2d34044190db769512c44f38403c7502d
|
refs/heads/master
| 2020-12-31T00:01:17.838547
| 2017-03-29T13:16:14
| 2017-03-29T13:16:14
| 86,583,351
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 324
|
r
|
test_radioactivePlumes.R
|
#################################################################
# test radioactivePlumes_area #
# test radioactivePlumes_local #
#################################################################
data(radioactivePlumes_area)
data(radioactivePlumes_local)
|
95e29836504ca43b0bcad3a479b80906b0131a2e
|
246995b2eed46fdc5fe53d6b398ac34020b1c7fe
|
/App/server.R
|
fbf520e163d014822cccbb6a1fd63a3197676a70
|
[] |
no_license
|
aspeijers/AB-testing
|
c04e2802fb72f28bafed0e8cca7666816744e3b8
|
c46c9c8d0b615ba8ca64238c8c8c67040d5096ab
|
refs/heads/master
| 2020-07-02T09:53:23.293828
| 2016-11-30T14:11:27
| 2016-11-30T14:11:27
| 74,311,738
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,177
|
r
|
server.R
|
# load packages
if (!require("ggplot2")) install.packages("ggplot2"); library(ggplot2)
shinyServer(function(input, output, session) {
# calc sample size
n <- reactive({
( qnorm(1-input$signif/100)*sqrt(input$p1/100*(1-input$p1/100)) -
qnorm(1-input$rel/100)*sqrt(input$p2/100*(1-input$p2/100)))^2 / (input$p2/100-input$p1/100)^2
})
# generate data for Control
x1_dens <- reactive({
density( rnorm(n=10^6, mean=input$p1/100, sd=sqrt(n()*input$p1/100*(1-input$p1/100) )/n()) - input$p1/100)
})
# generate data for Treatment
x2_dens <- reactive({
density( rnorm(n=10^6, mean=input$p2/100, sd=sqrt(n()*input$p2/100*(1-input$p2/100) )/n()) - input$p1/100)
})
# combine data into dataframe
dist <- reactive({
as.data.frame( cbind(x1_dens()$x, x1_dens()$y/n(), x2_dens()$x, x2_dens()$y/n()) )
})
# calc intersections for alpha and beta
int_x <- reactive({ qnorm(p=(1-input$signif/100), mean=input$p1/100, sd=sqrt(n()*input$p1/100*(1-input$p1/100))/n()) - input$p1/100})
int_y <- reactive({ dnorm(x=int_x(), mean=input$p1/100, sd=sqrt(n()*input$p1/100*(1-input$p1/100) )/n()) /n() })
int_y_B <- reactive({ dnorm(x=int_x(), mean=input$p2/100, sd=sqrt(n()*input$p2/100*(1-input$p2/100) )/n()) /n() })
# define shaded areas
shaded <- reactive({
shade_A <- rbind( c(int_x(), 0), subset(dist()[,1:2], V1 > int_x()), c(dist()[nrow(dist()),1],0) )
shade_B <- rbind( c(dist()[1,3],0), subset(dist()[,3:4], V3 < int_x()), c(int_x(), 0) )
names(shade_A) <- c("V1", "V2")
names(shade_B) <- c("V3", "V4")
return(list(shade_A, shade_B))
})
# plot
output$plot1 <- renderPlot({
ggplot(dist()) +
geom_line(aes(x=V1, y=V2, col="version_a")) +
geom_line(aes(x=V3, y=V4, col='version_b')) +
geom_vline(xintercept=int_x()) +
#geom_segment(aes(x = int_x, y = 0, xend = int_x, yend = int_y), col="black") +
geom_polygon(data=shaded()[[1]], aes(x=V1, y=V2, fill="alpha"), alpha=0.3) +
geom_polygon(data=shaded()[[2]], aes(x=V3, y=V4, fill="beta"), alpha=0.3) +
labs(x="Conversion Rate (CR)", y="Density") +
annotate("text", x=(input$p2 - input$p1)/100, y=max(int_y(), int_y_B())/3, label = paste0("significance = ", round(input$signif/100,2)), col="red") +
annotate("text", x=0, y=max(int_y(), int_y_B())/3, label = paste0("beta = ", round(1-input$rel/100,2)), col="blue") +
annotate("text", x=Inf, y=Inf, hjust=1.2, vjust=1.2, label=paste(round(n(),0), "samples"), size=8) +
scale_colour_manual(name = "Distributions",
breaks = c("version_a", "version_b"),
values = c('red', 'blue'),
labels = c("Control (A)", "Treatment (B)")) +
scale_fill_manual(name = "",
breaks = c("alpha", "beta"),
values = c('red', 'blue'),
labels = c("alpha", "beta"))
})
})
|
52b63fc71312dcb1d282d20a055cd3fdc55980ef
|
ea27a419137fd54440b795e2a530605b62e83e70
|
/flashdrive/Workshop2015 morning/script-03__plot-six-vars-at-once.R
|
7fb97a1a007d0330bcb746901b71f32eebb2a82f
|
[] |
no_license
|
fawda123/swmp_workshop_2015
|
aec33e2865caf21e30beda0e6df93a649c89a0b6
|
7f2a08aabca0d52eb3cdaa506271092e71da6395
|
refs/heads/master
| 2021-01-10T19:16:30.024857
| 2016-11-11T01:04:41
| 2016-11-11T01:04:41
| 42,066,683
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,110
|
r
|
script-03__plot-six-vars-at-once.R
|
# CLEAR OLD GRAPHICS ... from the plot window only if the length of the device list (# of plots) is > 0
# Todd is only doing this to get rid of any old plots.
if ( length(dev.list()) > 0 ) { dev.off(dev.list()["RStudioGD"]) }
WQB <- read.csv("data_WQBCR.csv", header=TRUE)
#WQB <- read.csv("data_ACEBB.csv", header=TRUE)
#WQB <- read.csv("data_ELKNM.csv", header=TRUE)
#WQB <- read.csv("data_NOCRC.csv", header=TRUE)
plot.new()
par(mfrow=c(3,2)) # 3 rows x 2 columns
par(mfg=c(1,1)) # row 1 column 1
# The "main=names(WQB[2]) gives the plot title the header column
plot(as.Date(WQB[[1]],"%m/%d/%Y"),WQB[[2]],col="red",main=names(WQB[2]))
par(mfg=c(2,1))
plot(as.Date(WQB[[1]],"%m/%d/%Y"),WQB[[3]],col="black",main=names(WQB[3]))
par(mfg=c(3,1))
plot(as.Date(WQB[[1]],"%m/%d/%Y"),WQB[[4]],col="orange",main=names(WQB[4]))
par(mfg=c(1,2))
plot(as.Date(WQB[[1]],"%m/%d/%Y"),WQB[[5]],col="green",main=names(WQB[5]))
par(mfg=c(2,2))
plot(as.Date(WQB[[1]],"%m/%d/%Y"),WQB[[6]],col="cyan",main=names(WQB[6]))
par(mfg=c(3,2))
plot(as.Date(WQB[[1]],"%m/%d/%Y"),WQB[[7]],col="pink",main=names(WQB[7]))
|
649ee87162eb4aa1767ec95f21ee951e7bfa5843
|
093cf42c7d4bee93e30a19382f3748d1bddc5adc
|
/week07/2_1_knn.R
|
e1b793014a14538d065ce11215d30d9b1dad8f6d
|
[] |
no_license
|
Huride/radsp
|
262dfa0e1888c240a59771d3390c14188e778ee7
|
23e3713d1f9b3891cdae5f89b8021905b4feedbb
|
refs/heads/master
| 2022-12-05T16:02:11.174257
| 2020-08-19T09:58:44
| 2020-08-19T09:58:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,277
|
r
|
2_1_knn.R
|
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = knn(train = training_set[, -3], test = grid_set, cl = training_set[, 3], k = 5)
plot(set[, -3],
main = 'K-NN (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = knn(train = training_set[, -3], test = grid_set, cl = training_set[, 3], k = 5)
plot(set[, -3],
main = 'K-NN (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
dataset <- read.csv("Social_Network_Ads.csv")
dataset <- dataset[ , 3:5]
dataset$Purchased <- factor(dataset$Purchased, levels = 0:1)
str(dataset)
library(caTools)
set.seed(123)
split <- sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set <- subset(dataset, split == T)
test_set <- subset(dataset, split == F)
training_set[ , -3] <- scale(training_set[ , -3])
summary(training_set)
test_set[ , -3] <- scale(test_set[ , -3])
# K-NN 모델링
library(class)
y_pred <- knn(train = training_set[ , -3],
test = test_set[ , -3],
cl = training_set$Purchased,
k = 5,
prob = F)
results <- data.frame(test_set, y_pred)
# confusion matrix
cm <- table(test_set$Purchased, y_pred)
cm
# 정확도 계산
(59 + 30) / sum(cm)
# 0.89 (89%)
|
4fb55557ae968ab48bf1e17bf841b1e69b7553a6
|
ad43c7ffa288bd28ea6fcf77a2b70cff9632a55c
|
/classifier/analyse-decision-trees-nature.R
|
76a71302bd7ffa2be800d54f862f0f692d61f264
|
[] |
no_license
|
plsm/assisi-domset-demonstrator
|
9aad5acaf8484ab261e51a8fe444258ecaf02a03
|
9a5ef63224bd29a23d0b77a328e315bad7be6c57
|
refs/heads/master
| 2021-07-05T07:24:40.304924
| 2018-12-04T12:22:04
| 2018-12-04T12:22:04
| 131,604,714
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,810
|
r
|
analyse-decision-trees-nature.R
|
library ("data.table")
library ("ggplot2")
analysis <- fread ("analysis.csv")
read.repeat.data <- function (
filename
) {
cat (sprintf ("Reading %s\n", filename))
result <- fread (
input = filename,
col.names = c (
"time",
"run",
"criterion",
"max.depth",
"min.samples.split",
"random.chance.winscore",
"success.rate",
"false.positive",
"false.negative"
)
)
return (result)
}
all.data <- analysis [
,
read.repeat.data (file.name),
by = .(
file.name,
sampling.time,
delta.time,
temperature.threshold,
decision.tree
)
]
data.to.plot <- all.data [
,
.(success.rate = mean (success.rate)),
by = .(
criterion,
max.depth,
min.samples.split,
delta.time,
temperature.threshold
)
]
make.plot <- function (
data,
label)
{
cat ("\n* ** MAKE PLOT ** *\n")
print (label)
graphics <- ggplot (
data = data,
mapping = aes (
x = delta.time,
y = success.rate,
color = factor (temperature.threshold)
))
graphics <- graphics + geom_line () + geom_point ()
graphics <- graphics + labs (
x = "delta time (s)",
y = "success rate",
color = "temperature threshold (°C)"
)
graphics <- graphics + scale_y_continuous (
labels = function (v) return (sprintf ("%.0f%%", v * 100)),
limits = c (0.85, 1)
)
ggsave (
filename = sprintf (
"analysis-decision-tree_nature_%s.png",
label
),
plot = graphics,
width = 8,
height = 6,
units = "in"
)
return (0)
}
data.to.plot [
,
make.plot (.SD, sprintf ("%s_%d_%d", criterion, max.depth, min.samples.split)),
by = .(criterion, max.depth, min.samples.split)
]
|
a38c3f691ebf97b0b47b555e10960a3ba8f81827
|
fe9692171a1235ee5fd37ae4cf27e4498505cfe9
|
/parFunctions.R
|
206eb0ebcfb66edaa09119113ea80bb48bdf8a43
|
[] |
no_license
|
samueldnj/SableIBM
|
d13c21fa0ae001b8c76ee4ef7aa446184b1d85d8
|
1377ca15b3fed19bfde0ed79919c2b73ea148428
|
refs/heads/master
| 2020-04-04T00:00:53.865174
| 2015-10-28T15:24:29
| 2015-10-28T15:24:29
| 33,518,554
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,211
|
r
|
parFunctions.R
|
#--------------------`-------------------------------------------------
#
# REM612 Project Script - parallel functions
#
# Author: Samuel Johnson
# Date: 19/1/15
#
# This script contains the functions used inside parallel processes
# for the fishing script.
#
#---------------------------------------------------------------------
require ( data.table )
# Function to look up bathymetric grid references for a given point.
# Args: lat = latitude of point
# long = longitude of point
# bathy = list of bathymetric data, produced by bathymetry.R
#
# Rets: out = 2-ple of grid cell references for given point
bathyLookup <- function ( lat = 0, long = 0, bathy = bathyTop )
{
# Extract vectors of grid line refs and matrix of bathymetry
xVec <- bathy $ x
yVec <- bathy $ y
# Now compare Xt and Yt to the vectors of grid lines refs
xDiff <- xVec - lat
yDiff <- yVec - long
# Find minimum difference
xMin <- min ( abs ( xDiff ) )
yMin <- min ( abs ( yDiff ) )
# And which entry has that diff - if there are two equidistant
# then it takes the maximum index. Unsophisticated, but it works.
# In future, perhaps we can shift the finite difference.
xEntry <- max ( which ( abs ( xDiff ) == xMin ) )
yEntry <- max ( which ( abs ( yDiff ) == yMin ) )
# concatenate the grid cell entries and return
out <- c ( xEntry, yEntry )
return ( out )
}
# This function converts a cartesian ordered pair into a polar ordered pair
# Args: x = x-value
# y = y-value
# Rets:
# (r,t) = (radius, angle)
cart2pol <- function ( x = 1, y = 1 )
{
# Convert to a complex number
z <- complex ( real = x, imaginary = y )
# Extract radius and angle
r <- Mod ( z )
t <- Arg ( z )
# return
return ( c ( r, t ) )
}
# This function converts a polar ordered pair into a cartesian ordered pair
# Args: r = radius
# t = angle
# Rets:
# (x,y) = cartesian coords of point
pol2cart <- function ( r = 1, t = 0 )
{
# convert to complex number
z <- complex ( modulus = r, argument = t )
# Extract real and imaginary parts
x <- Re ( z )
y <- Im ( z )
# Return ordered pair
return ( c ( x, y ) )
}
# Function to choose next location for fishing, constraints will be based
# on current boat status and whether or not it is trawling or moving. The
# function relies on recursion to make sure new locations are within
# constraints on fishing areas.
# Args: status = fishing or docked
# lat = latitude of current position
# long = longitude of current position
# towing = boolean for selecting movement speed distribution
# eTime = amount of time movement is happening for
# bathy = bathymetry data as produced by bathymetry.R
# Rets: newLoc = a vector of latitude and longitude
boatMove <- function ( status = docked, lat = 0, long = 0, towing = FALSE,
eTime = 1, bathy = bathyTop, histData = histData,
recursive = FALSE )
{
# Recover historic data from entry list.
minDepth <- histData $ minDepth
maxDepth <- histData $ maxDepth
# First, if boat is docked, generate a random starting location
if ( status == docked | ( lat == 0 & long == 0) )
{
# Uniform random draw from the fishing area - very crudely drawn from
# within a polygon around the 8 stat areas
# Choose new latitude
newLat <- runif ( n = 1, min = - 134, max = - 124 )
# Choose new longitude within polygon
yUB <- ( - 6 * newLat - 407 ) / 7
yLB <- -2. * ( newLat + 125 ) / 3. + 47
newLong <- runif ( n = 1, min = yLB, max = yUB )
} else {
# Draw a random direction
tJump <- rnorm ( n = 1, mean = 0, sd = pi / 2 )
# Draw a random motoring speed in knots
{
if ( towing ) speed <- rlnorm ( n = 1, mean = log ( 3 ), sd = 0.3 )
else speed <- rlnorm ( n = 1, mean = log ( 6 ), sd = 0.5 )
}
# Convert speed to degrees per hour
degSpeed <- speed * 1.852 / 111
# Create jump distance
rJump <- (eTime - 1) * degSpeed
# Convert old location to a complex number for adding jump
oldLoc <- complex ( real = lat, imaginary = long )
# Convert jump to complex number
jump <- complex ( modulus = rJump, argument = tJump )
# Make new location
newLoc <- oldLoc + jump
# Recover new latitude and longitude
newLat <- Re ( newLoc )
newLong <- Im ( newLoc )
}
# Look up depth of new location in bathymetry table. First get gridref
gridCell <- bathyLookup ( lat = newLat, long = newLong, bathy = bathy )
xEntry <- gridCell [ 1 ]
yEntry <- gridCell [ 2 ]
# Recover depth from bathy matrix
depth <- bathy $ z [ xEntry, yEntry ]
# Combine new position into a vector for returning
outVec <- c ( newLat, newLong )
if ( recursive )
{
# Recurse function to correct for bad points
while ( depth < minDepth |
depth > maxDepth |
outVec [ 2 ] > 55. |
outVec [ 1 ] > -125. |
outVec [ 2 ] > ( - 6. * outVec [ 1 ] - 407.) / 7. |
outVec [ 2 ] < -2. * ( outVec [ 1 ] + 125 ) / 3. + 47. )
{
# Generate a new location
outVec <- boatMove ( status = status, lat = lat, long = long,
towing = towing, eTime = eTime, bathy = bathy,
histData = histData, recursive = FALSE )
# Look up depth of new location in bathymetry table. First get gridref
gridCell <- bathyLookup ( lat = outVec [ 1 ], long = outVec [ 2 ],
bathy = bathy )
xEntry <- gridCell [ 1 ]
yEntry <- gridCell [ 2 ]
# Recover depth from bathy matrix
depth <- bathy $ z [ xEntry, yEntry ]
}
}
# Return location
return ( outVec )
}
# The following function is the event scheduler, which will take the event number
# and current state of the system and schedule the next event.
# Arguments: time = current time
# event = current event number
# state = slice of B for boat in question
# fel = future event list for the boat in question
#
# Returns: fel = future event list with new event scheduled
eventSched <- function ( time = t, event = eCount,
fel = FEL, ntows = nTows, histData = histData )
{
# Recover boat number, trip threshold and boat number from arguments
boat <- fel [ event, ] $ boat.number
thresh <- fel [ event, ] $ trip.thresh
interFish <- histData $ interFish
meanEventTime <- histData $ meanEventTime
layOver <- histData $ layOver
meanTows <- histData $ meanTows
if ( time != fel [ event, ] $ time.stamp )
{
cat ( "ACK! Failure on time variable - review logic. \n
boat = ", boat, "\n
t = ", time, "\n
time.stamp = ", fel [ event, ] $ time.stamp ); return ( fel )
}
# If boat is docked, set next fishing event
if ( fel [ event, ] $ event.type == docked )
{
# Draw layover time from Poisson distribution
tLay <- fel [ event, ] $ event.time
# Set next time
tNext <- min ( nT, time + tLay )
# Create new threshold for nTows, reset nTows
tripThresh <- rpois ( n = 1, lambda = meanTows )
ntows <- 0
# Create towDuration for next event
towTime <- towDuration ( lam = meanEventTime )
# Add event to FEL
fel [ event + 1, ] <- c ( boat, tNext, fishing, towTime, ntows, tripThresh )
}
# If boat is fishing, check threshold variable, if below threshold then
# set new fishing event, else set docking event.
if ( fel [ event, ] $ event.type == fishing )
{
# recover tow duration for next event time
towDur <- fel [ event, ] $ event.time
# Set next event time by adding towDuration
# to current time
tNext <- time + towDur - 1
# If tripLength >= thresh, dock, else fish again
if ( ntows >= thresh )
{
# Generate layover time
tLay <- layTime ( lam = layOver )
# Add to FEL
fel [ event + 1, ] <- c ( boat, tNext, docked, tLay, 0, 0 )
} else {
# Generate interfishing time
interfishTime <- rpois ( n = 1, lambda = interFish )
# Add to next event time
tNext <- tNext + interfishTime
# Generate set duration for next event
towDurNext <- towDuration ( lam = meanEventTime )
# Add next event to FEL and
# Correct for going over time - dock if basically at the end of the year
if ( tNext + towDurNext > nT )
{
# Set tNext to time limit
tNext <- nT
# update FEL to reflect end of year behaviour
fel [ event + 1, ] <- c ( boat, tNext, docked, 0, 0, 0 )
} else {
fel [ event + 1, ] <- c ( boat, tNext, fishing, towDurNext, ntows + 1,
thresh )
}
}
}
# return appended future event list
return ( fel )
}
# The following function processes discrete events according to the future
# event list (FEL) for each boat and updates the state variables to reflect
# the behaviour of the boat.
# Arguments: event = the counting index of the FEL entry being processed
# fel = the future event list
# state = the slice of the state variable array being updated
#
# Returns: state = the updated slice of the systems state array
eventProc <- function ( event = eCount, fel = FEL, nowlat = nowLat,
nowlong = nowLong, ntows = nTows, histData = histData,
b = B )
{
# Get time stamp from FEL for logic check
time <- fel [ event, ] $ time.stamp
if ( time != fel [ event, ] $ time.stamp )
{
cat ( "ACK! Failure on time variable - review logic. \n
boat = ", boat, "\n
t = ", t, "\n" ); return ( fel )
}
# Recover other parameters for system state
eTime <- fel [ event, ] $ event.time
boat <- fel [ event, ] $ boat.number
status <- fel [ event, ] $ event.type
# Check event type and update state
# First, if the boat is docked
if ( fel [ event, ] $ event.type == docked )
{
# Update state: change status to docked, reset tripLength counter and set
# location to zero
state <- 0 # auto recycles to set state back to docked
}
# Next if the boat is fishing
if ( fel [ event, ] $ event.type == fishing )
{
# generate event starting location
startLoc <- boatMove ( status = status, lat = nowlat, long = nowlong,
towing = FALSE, eTime = interfishTime,
bathy = bathyTop, histData = histData,
recursive = TRUE )
# generate event ending location
endLoc <- boatMove ( status = status, lat = startLoc [ 1 ],
long = startLoc [ 2 ],
towing = TRUE, eTime = eTime, bathy = bathyTop,
histData = histData, recursive = TRUE )
# generate jump sizes
if ( eTime == 1 ) jumps <- 0
else jumps <- ( endLoc - startLoc ) / ( eTime - 1 )
# Recover system state
state <- b [ , time:( time + eTime - 1 ) ]
# Fix this for small tows - not pretty
if ( class ( state ) != "matrix" )
{
state <- as.matrix ( state )
}
# Set location - this should be made more sophisticated in future, even
# use a Markovian approach (for tripLength > 1)
for ( tStep in 0:( eTime - 1 ) )
{
state [ 1:2, tStep + 1 ] <- startLoc + tStep * jumps
}
# Set status to fishing
state [ 3, ] <- fishing
# Increment nTows and update
state [ 4, ] <- ntows
# Store ending location for next event
nowLat <<- endLoc [ 1 ]
nowLong <<- endLoc [ 2 ]
}
# Return state of boat during fishing event
return ( state )
}
# Function to randomly produce a tow duration for each fishing event
# Args: lam = parameter for Poisson distribution drawn from
# Rets: towTime = duration of trawl event
towDuration <- function ( lam = 3 )
{
towTime <- 0
while ( towTime == 0 )
{
towTime <- rpois ( n = 1, lambda = lam)
}
return ( towTime )
}
# Function to randomly produce layover times for fishing vessels
# Args: lam = parameter for Poisson distribution drawn from
# Rets: lay = duration of layover
layTime <- function ( lam = layOver )
{
lay <- rpois ( n = 1, lambda = lam )
return ( lay )
}
# Function to count record the row numbers for non-zero
# rows in a matrix
# Arguments: mat = matrix to be searched
# Returns: ridx = vector of row indices
rowIdx <- function ( mat = boatFishingTimes )
{
# Sum the entries in each row
rowSums <- apply ( X = mat, MARGIN = 1, FUN = sum )
# Get the row numbers and return
ridx <- which ( rowSums > 0 )
return ( ridx )
}
# Function to check distance of fish from a given centre point location,
# usually of fishing gear. Distance is computed as euclidean distance,
# as radius is usually small enough for this to be fine.
# Arguments: b = column number of cLoc
# cLoc = lat/long of centre point if circle
# fLoc = matrix of fish locations, with lat/long as rows and
# boats as columns
# t = time step for which comparison is being made
# rad = radius of euclidean ball
#
# Returns: detections = a data.table with fish number, location and time
bDetections <- function ( b = 1, cLoc = boatLocs, fLoc = fishLocs, t = t,
rad = 0.005 )
{
# Recover location of centrepoint from cLoc
cLoc <- cLoc [ , b ]
# Create matrix to hold distances from boats to fish
diff <- matrix ( 0, nrow = nrow ( fLoc ), ncol = 2 )
# Compare to fish locations
diff [ , 1 ] <- fLoc [ , 1 ] - cLoc [ 1 ]
diff [ , 2 ] <- fLoc [ , 2 ] - cLoc [ 2 ]
# square and sum for euclidean distance
diff2 [ , 1 ] <- diff [ , 1 ] * diff [ , 1 ]
diff2 [ , 2 ] <- diff [ , 2 ] * diff [ , 2 ]
diffDist <- diff2 [ ,1 ] + diff2 [ , 2 ]
# Look for fish with diffDist < radius
closeFish <- which ( diffDist < rad )
# Get number of detections
nDetections <- length ( closeFish )
# Set up data.table to hold detections, including fish number, location
# and time
detections <- data.frame ( fish.number = closeFish,
boat.lat = rep.int ( x = cLoc [ 1 ],
times = nDetections ),
boat.lon = rep.int ( x = cLoc [ 2 ],
times = nDetections ),
time.stamp = rep.int ( x = t,
times = nDetections ) )
# convert to data.table and return
detections <- as.data.table ( detections )
return ( detections )
}
# Function to look over a given time and record instances of boats
# and fish being within a given radius. Calls bDetect, which breaks the
# detections over boats fishing at the given time.
# Arguments: t = time step for detections
# bArray = array of boat state variables
# fArray = array of fish state variables
# radius = radius of detection
# Returns:
# tDetections = a data.table of detections at time t with fish
# number, location and time
tDetect <- function ( t = 1, bArray = B, fArray = fArray,
radius = 0.005 )
{
# Get boat numbers for fishing boats at each time
boats <- which ( bArray [ 3, t, ] == 1 )
# Make matrix of locations - rows are lat/long, cols are boats
boatLocs <- as.matrix ( bArray [ 1:2, t, boats ] )
# Get matrix of fish locations at that time
fishLocs <- fArray [ , 1:2, t ]
# Get number of boats fishing at that time
nb <- length ( boats )
# lapply bDetections to get a list of detection data.tables
detList <- lapply ( X = 1:nb, FUN = bDetections, cLoc = boatLocs,
fLoc = fishLocs, t = t, rad = radius )
# Bind list of detection data.tables into one data.table and return
tDetections <- rbindlist ( detList )
return ( tDetections )
}
# Function to append inlet numbers to F array and detections data.tables
# in the output of the detections code. This will be done from the beginning
# next time, but for now we must do it like this.
# Args: fArr = array of fish state variables
# inletLocs = matrix of inlet tagging locations
# detList = a list of data.tables of detections
# Rets: outList = a list containing
# - list of inlet fish array indices for fArr,
# - a list of appended detection data.tables
# - an appended version of F
appendInlets <- function ( fArr = F, inletLocs = inlets,
detList = yearDetections )
{
# Extract the indices of fish in each inlet
# A list to hold the indices
inletFishIdx <- vector ( mode = "list", length = 4 )
# This loop populates the list by checking those
# entries in F with a given starting position, and also
# updates the array of states with the inlet of origin.
for (i in 1:nrow ( inletLocs ) )
{
idx <- unique ( which ( fArr [ , 1:2, 1 ] == inletLocs [ i, ],
arr.ind = T ) ) [ , 1 ]
fArr [ idx, 3, ] <- i
inletFishIdx [[ i ]] <- list ( idx, i )
}
# Now, fix the dimnames
dimnames ( fArr ) [[ 2 ]] <- c ( "latitude", "longitude", "inlet" )
# Now to update the detection data.tables
# Loop over the entries in detList
for ( i in 1:length ( detList ) )
{
# Recover detection dt from detList
dt <- detList [[ i ]]
# lapply the function to inletFishIdx
dtJoinList <- lapply ( X = inletFishIdx, FUN = inletJoin, dt = dt )
# bind the result together
dt <- rbindlist ( dtJoinList )
# Add an aggregate column
dt [ , agg := 1 ]
detList [[ i ]] <- dt
}
# Make a list for returning
outList <- list ( )
outList $ inletFishIdx <- inletFishIdx
outList $ detList <- detList
outList $ F <- fArr
# return list of output
return ( outList )
}
# Create a function to join the inlet numbers - path specific to the above
# function
inletJoin <- function ( idx = inletFishIdx [[ 1 ]], dt = detList [[ 1 ]] )
{
# Create a df of inlet numbers and fish numbers, coerce to dt
idxDT <- data.frame ( fish.number = idx [[ 1 ]],
inlet = rep.int ( x = idx [[ 2 ]],
times = length ( idx [[ 1 ]] ) ) )
idxDT <- as.data.table ( idxDT )
# set key of dt that you want to add inlet numbers to
setkey ( dt, fish.number )
# Join DTs and return the result
dt <- dt [ idxDT, nomatch = 0 ]
return ( dt )
}
# Function to build a data.table from fish states, for comparison
# of detected home range to actual over the course of the simulation
# Arguments:
# fN = fish number
# nT = length of time to look over
# states = array of fish states
# Returns:
# true = data.table of true locations of fish fN over time frame given
trueLocs <- function ( fN = 1, nT = simCtl $ nT, states = F )
{
# Create a data.frame to hold info
true <- data.frame ( fish.number = integer ( length = nT ),
fish.lat = double ( length = nT ),
fish.lon = double ( length = nT ),
time.stamp = 1:nT,
inlet = integer ( length = nT ) )
# populate data frame
# First the fish number
true [ , 1 ] <- fN
true [ , c( 2:3, 5 ) ] <- t ( states [ fN, 1:3, ] )
# coerce to a data.table and return
true <- as.data.table ( true )
return ( true )
}
# Function to randomly sample fish and compare detections
# to true home range - for aggregate samples and inlet specific
# fish.
# Arguments: N = sample size
# detections = detection data to use (based on historic fishing
# effort)
# indices = indices of fish in each inlet - bad planning
# states = array of fish state variables
# Returns: areas = matrix of detected and true habitat areas,
# and ratio between them.
compareFunc <- function ( N = 20, detections = yearDetections [[ 1 ]],
indices = inletFishIdx, states = F )
{
# Count inlets
nInlets <- length ( indices )
# Take samples in each inlet
# Make matrix to hold samples
fishSamp <- matrix ( 0, nrow = nInlets, ncol = N )
# Take samples
for ( i in 1:nInlets )
{
idx <- indices [[ i ]][[ 1 ]]
fishSamp [ i, ] <- sample ( x = idx, size = N )
}
# Aggregate into a vector
aggSamp <- as.vector ( fishSamp )
# Reduce state array and detections table to sample
sampF <- states [ aggSamp, , ]
setkey ( detections, fish.number )
sampDet <- detections [ J ( aggSamp ) ]
# Need to create a data.table of locations for fish in aggSamp. Use lapply
# on trueLocs to get one dt for each fish
trueList <- lapply ( X = aggSamp, FUN = trueLocs, nT = simCtl $ nT,
states = states )
# Bind them together into one dt
trueDT <- rbindlist ( trueList )
trueDT [ , agg := 1]
# And now we need to take the initial locations and add them to the detections
# Recover tagging locations
taggLocs <- trueDT [ time.stamp == 1 ]
# Bind to sampDet
sampDet <- rbind ( sampDet, taggLocs, use.names = FALSE )
# Oki doke, now we can start comparing habitat areas. Polygons are for plotting
# later, so we can keep these out of the analysis for now.
# Find area of MCP around detections of aggregate sample
mcpAggDetArea <- mcp.area ( xy = sampDet [ , c("boat.lat", "boat.lon"),
with = FALSE ],
id = sampDet [ , agg ], plotit = FALSE, percent = 100 )
# Find area of MCP of true locations of agg sample
mcpAggTrueArea <- mcp.area ( xy = trueDT [ , c("fish.lat", "fish.lon"),
with = FALSE ],
id = trueDT [ , agg ], plotit = FALSE,
percent = 100 )
areas <- matrix ( 0, nrow = nInlets + 1, ncol = 3 )
dimnames ( areas ) [[ 1 ]] <- c ( "Aggregate", "Inlet1", "Inlet2",
"Inlet3", "Inlet4" )
dimnames ( areas ) [[ 2 ]] <- c ( "Detections", "True", "Ratio" )
areas [ 1, ] <- c ( as.numeric ( mcpAggDetArea ), as.numeric ( mcpAggTrueArea ),
as.numeric ( mcpAggDetArea ) /
as.numeric ( mcpAggTrueArea ) )
# Function to lapply and speed up sample bootstrapping - not working right
# now
# inletMCP <- function ( inlet = 1, samples = sampDet, true = trueDT )
# {
# # Reduce to fish from inlet i
# sampDetInlet <- samples [ inlet == inlet ]
# trueDTInlet <- true [ inlet == inlet ]
# # Find area of minimal convex polygon around detections
# mcpDetInletArea <- mcp.area ( xy = sampDetInlet [ , c("boat.lat", "boat.lon"),
# with = FALSE ],
# id = sampDetInlet [ , agg ],
# plotit = FALSE, percent = 100 )
# # Find area of MCP around true locations
# mcpTrueAreaInlet <- mcp.area ( xy = trueDTInlet [ , c( "fish.lat",
# "fish.lon" ),
# with = FALSE ],
# id = trueDTInlet [ , agg ], plotit = FALSE,
# percent = 100 )
# areas <- c ( as.numeric ( mcpDetInletArea ),
# as.numeric ( mcpTrueAreaInlet ),
# as.numeric ( mcpDetInletArea ) /
# as.numeric ( mcpTrueAreaInlet ) )
# return ( areas )
# }
# out <- lapply ( X = 1:nInlets,
# FUN = inletMCP,
# samples = sampDet,
# true = trueDT )
for ( i in 1:nInlets )
{
# Reduce to fish from inlet i
sampDetInlet <- sampDet [ inlet == i ]
trueDTInlet <- trueDT [ inlet == i ]
# Find area of minimal convex polygon around detections
mcpDetInletArea <- mcp.area ( xy = sampDetInlet [ , c("boat.lat", "boat.lon"),
with = FALSE ],
id = sampDetInlet [ , agg ],
plotit = FALSE, percent = 100 )
# Find area of MCP around true locations
mcpTrueAreaInlet <- mcp.area ( xy = trueDTInlet [ , c( "fish.lat",
"fish.lon" ),
with = FALSE ],
id = trueDTInlet [ , agg ], plotit = FALSE,
percent = 100 )
# Fill row of areas matrix
areas [ i + 1, ] <- c ( as.numeric ( mcpDetInletArea ),
as.numeric ( mcpTrueAreaInlet ),
as.numeric ( mcpDetInletArea ) /
as.numeric ( mcpTrueAreaInlet ) )
}
return ( areas )
}
# # The following functions are wrappers for the compareFunc function,
# # in order to allow for swift parallelisation and returning
# # of lists in a decent structure for analysis.
# sampSizeFun <- function ( sampleSize = 25, nSamp = nSamples,
# detDT = yearDetections [[ 1 ]],
# idx = inletFishIdx, states = F )
# {
# outList <- lapply ( X = 1:nSamp,
# FUN = repWrapper,
# sampSize = sampleSize, det = detDT,
# idx = idx, stateVars = states )
# return ( outList )
# }
repWrapper <- function ( times = 1, sampSize = 25, det = detDT,
idx = idx, stateVars = states )
{
source ( "parFunctions.R" )
n <- times
areas <- compareFunc ( N = sampSize, detections = det,
indices = idx, states = stateVars )
return ( areas )
}
|
02b2f86f3a8f061a1385defa2d06bcd3fd017567
|
93de29e4e74c3858bdee8830ac5c929badfea54d
|
/inst/Bootstrap/app-orig.R
|
d35c1b07359bf13aa45089bed3d8d365273ac214
|
[] |
no_license
|
dtkaplan/LittleApp2
|
cab5fff8cbf28eb1d117fbd63edf5ae599748d11
|
5206c42537e4c54f27480fd6e954d91962ed958c
|
refs/heads/master
| 2021-06-30T03:25:26.777628
| 2020-06-26T17:58:03
| 2020-06-26T17:58:03
| 238,554,054
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 913
|
r
|
app-orig.R
|
library(shiny)
library(shinyWidgets)
library(shinyjs)
library(miniUI)
library(ggplot2)
library(ggformula)
library(LittleApp2)
library(dplyr)
library(triola)
library(sullystats6e)
library(mosaicData)
library(StatsUsingTechnologyData)
library(openintro)
library(Lock5Data)
options(warn = -1) # suppress warnings so log doesn't get crowded
ui <- ui_main(
ui_top_controls(),
data_tab(covar = FALSE),
graph_panel(),
compare_panel(),
stats_panel() #, debug_panel()
)
server <- function(input, output, session) {
observe({
# These are here so that all the Little Apps can eventually be folded into
# a single URL
source(system.file("data_services.R", package="LittleApp2"), local = TRUE)
source(system.file("output_services.R", package = "LittleApp2"), local = TRUE)
source("Bootstrap_services.R", local = TRUE)
})
}
#options(shiny.reactlog = TRUE)
shinyApp(ui, server)
|
4909cffa3c0c39ef851d1bac4a68cd7213c3db0e
|
536309765aee39452c0c896ab7deb082b453b844
|
/meta-analysis-generation_new.R
|
05ae62263504af7ead05355f05bbaa633e147128
|
[] |
no_license
|
sachsmc/MultipleImputationForSurrogates
|
52c8a0b885d49f385110138b35ede6c3069b8fba
|
0ad40a50bc142744660684c04f636f1ae8a02780
|
refs/heads/master
| 2021-01-01T06:55:14.215500
| 2013-09-15T17:10:40
| 2013-09-15T17:10:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,994
|
r
|
meta-analysis-generation_new.R
|
samp.data.binary <- function(TE, sigma = c(1, 1, 1), mu = c(0,2,2), inc.placebo, nnn, beta.S.0.0=0, beta.S.0.1=0, beta.S.1.0, beta.S.1.1, beta.W.0=0, beta.W.1=0, rhos1W, rhos1s0, rhosoW){
require(MASS)
Sigma <- matrix(c(sigma[1]^2, rhos1s0*sigma[1]*sigma[2], rhosoW*sigma[1]*sigma[3], rhos1s0*sigma[1]*sigma[2],sigma[2]^2,
rhos1W*sigma[3]*sigma[2], rhosoW*sigma[1]*sigma[3], rhos1W*sigma[3]*sigma[2],
sigma[3]^2), nrow = 3) ## [S(0), S(1), W]
XXX <- mvrnorm(nnn, mu = mu, Sigma)
## determining alpha0 and alpha1
inc.vaccine <- (1 - TE)*inc.placebo
find.XXX <- mvrnorm(4000, mu = mu, Sigma)
find.Y.0 <- function(alpha.0) mean(pnorm(alpha.0 + beta.S.0.0*find.XXX[,1] + beta.S.1.0*find.XXX[,2] + beta.W.0*find.XXX[,3])) - inc.placebo
find.Y.1 <- function(alpha.1) mean(pnorm(alpha.1 + beta.S.0.1*find.XXX[,1] + beta.S.1.1*find.XXX[,2] + beta.W.1*find.XXX[,3])) - inc.vaccine
alpha.0 <- uniroot(find.Y.0, interval = c(-100, 100))$root
alpha.1 <- uniroot(find.Y.1, interval = c(-100, 100))$root
p.Y.0 <- pnorm(alpha.0 + beta.S.0.0*XXX[,1] + beta.S.1.0*XXX[,2] + beta.W.0*XXX[,3])
p.Y.1 <- pnorm(alpha.1 + beta.S.0.1*XXX[,1] + beta.S.1.1*XXX[,2] + beta.W.1*XXX[,3])
## generate Ys
Y.0 <- unlist(lapply(p.Y.0, function(p) rbinom(1,1,p)))
Y.1 <- unlist(lapply(p.Y.1, function(p) rbinom(1,1,p)))
output.betas <- c(alpha.0, alpha.1 - alpha.0, beta.S.1.0, beta.W.0, beta.S.1.1 - beta.S.1.0, beta.W.1 - beta.W.0)
names(output.betas) <- c("gamma.0.0", "gamma.1.0", "gamma.0.1", "gamma.0.2", "gamma.1.1", "gamma.1.2")
return(list(sample = data.frame(Y.0 = Y.0, Y.1 = Y.1, S.0 = XXX[,1], S.1 = XXX[,2], W = XXX[,3]), output.betas = output.betas))
}
get.trial.data <- function(sample, prob.trt = .5, S.sampling = "simple 1"){
ZZZ <- rbinom( dim(sample)[1], 1, prob.trt)
YYY <- ifelse(ZZZ==1, sample$Y.1, sample$Y.0)
Y.0<- ifelse(ZZZ==1, NA, sample$Y.0)
Y.1<- ifelse(ZZZ==0, NA, sample$Y.1)
SSS <- ifelse(ZZZ==1, sample$S.1, sample$S.0)
## switch for sampling of S simple random is only one currently implemented
if(length(grep("simple", S.sampling)) > 0){
prop.S <- as.numeric(gsub("simple ", "", S.sampling))
missing.s <- sample(1:length(SSS), length(SSS)*(1-prop.S))
SSS[missing.s] <- NA
}
SSS[ZZZ==0] <- NA
R<-ifelse(is.na(SSS), 0,1)
return(data.frame(S = SSS, W = sample$W, Y = YYY, Z = ZZZ, S.0 = sample$S.0, S.1 = sample$S.1, Y.0=Y.0, Y.1=Y.1, R=R))
}
run.probit.model_data <- function(dat){
all.fits <- matrix(NA, nrow = length(imputed.list), ncol = 8)
for(i in 1:length(imputed.list)){
fit <- glm(Y ~ Z*S.1, data = dat, family = binomial(link = "probit"))
all.fits[i,] <- c(fit$coeff, (diag(vcov(fit))))
}
Qbar <- colMeans(all.fits[,1:4])
Ubar <- colMeans(all.fits[,5:8])
B <- diag(cov(all.fits[,1:4]))
tot.se <- sqrt(Ubar + (1 + 1/length(imputed.list))*B)
return(list(beta = Qbar, se.beta = tot.se))
}
|
348e708e215499d0fecc067a62cdb2cac837204e
|
e5496bc9088c66ba5b9aba0b21df987f190847d8
|
/R/classify.R
|
099a19f036e7e519d2cdfdb285525d15b5d4f5bd
|
[] |
no_license
|
LeoPete83/minds_eye_forest
|
62abdd8014ab246867c1e50d98d650b2f755dd0e
|
303973f2bcf14a6f7e4d392b01e170e40d3ffa0b
|
refs/heads/master
| 2018-03-07T12:06:05.688807
| 2008-10-30T04:42:06
| 2008-10-30T04:42:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,956
|
r
|
classify.R
|
# assumptions:
# images_folder -- contains the name of folder of images we're going to classify
# forests_folder -- contains the name of the folder containing the group of forests we're going to use
# r_directory -- the base folder for the R scripts used in this script
# results_folder -- folder to store the results for the classification
setwd(r_directory)
# print(results_folder)
# print(forests_folder)
source('startup.R')
source('get_forests.R')
# loop through all possibly trained rfs
# if a rf is present, then we will use it to
# call the appropriate function for preprocessing the
# images for it, and then classify the image using
# that forest.
possible_rfs <- possible_forests()
for (possible_rf in possible_rfs)
{
print(possible_rf)
# if this rf exists in our workspace
if(exists(possible_rf))
{
print(paste("found forest:", possible_rf))
# acquire the actual rf (possible_rf is just a string)
# rf is the actual forest data structure
rf <- get(possible_rf)
# build a name for the function to call
feature_function <- paste(possible_rf,"features",sep="_")
feature_file <- paste(feature_function,"R",sep=".")
source(feature_file)
# evaluate the feature function, passing in the images
print(paste('getting features for:', possible_rf))
command <- paste('features <- ',feature_function,'(images,gray_images)',sep="")
timer <- system.time(eval(parse(text = command)))
print(timer)
print(paste('features size:', dim(features)))
# classify the features using the rf
print('predicting')
timer <- system.time(results <- predict(rf,features,type="vote", norm.votes=TRUE))
print(timer)
results <- cbind(image_names, results) #put filenames in
# save results
results_file <- paste(results_folder, "/", possible_rf,"_out.txt",sep="")
write.table(results, file=results_file, sep=",", row.names=FALSE,quote=c(1), na="nil")
}
}
|
8cde4672952d17965c1107ef86702932f9b8303c
|
d4d79819bd7250d08ff805eb7415db8e356f9344
|
/man/train.Rd
|
449ec7abe942694081b821c5435d1d1aeb61869f
|
[] |
no_license
|
mvaniterson/wbccPredictor
|
aba43dfa4a6e4cc354b10a317b400317de63b201
|
a4082a7b4c934917714bf0da3eca103894298fc1
|
refs/heads/master
| 2019-08-09T12:41:50.419181
| 2017-11-20T13:31:49
| 2017-11-20T13:31:49
| 111,414,168
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 903
|
rd
|
train.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/training.R
\name{train}
\alias{train}
\title{train}
\usage{
train(data, covariates, cellPercentages, model = formula(cellPercentages ~
covariates + data), ncomp = 50, keep.model = FALSE, ...)
}
\arguments{
\item{data}{matrix with gene expression(counts) or DNA methylation beta-values}
\item{covariates}{matrix of covariates to correct for in the model e.g. Age and Gender}
\item{cellPercentages}{matrix of cell percentages for which the predictor will be trained}
\item{model}{formula e.g. log10(cellPercentages+1)~covariates+data}
\item{ncomp}{total number of pls components that will be tested}
\item{keep.model}{logical default FALSE do not return full pls-model}
\item{...}{additional parameters for plsr see ?plsr}
}
\value{
prediction model plsr object
}
\description{
train predictor
}
\author{
mvaniterson
}
|
5293d046ac691bcf518af9cd65323ba9c31f7d05
|
d7629ee49c54708846411299b9efe174cffae655
|
/tests/testthat/test-get_stations_info.R
|
f5def8e1249365e2f9d9239b092e38279143998e
|
[
"MIT"
] |
permissive
|
dbandrews/noaastnr
|
9d1a12b4ca2ef710919ab7943908e86caaeff21d
|
522026593c2178a288120809cbcc55ea46c19e6e
|
refs/heads/main
| 2023-03-24T04:56:21.208947
| 2021-03-20T19:33:24
| 2021-03-20T19:33:24
| 350,523,680
| 0
| 0
|
NOASSERTION
| 2021-03-22T23:53:10
| 2021-03-22T23:53:10
| null |
UTF-8
|
R
| false
| false
| 3,094
|
r
|
test-get_stations_info.R
|
usaf_len <- 6
wban_len <- 5
country_len <- 2
state_len <- 2
latitude_len <- 7
longitude_len <- 8
elevation_len <- 7
n_col <- 11
for (col_id in c("all", "US")){
stations_df <- get_stations_info(country=col_id)
row_id <- sample(c(1: nrow(stations_df)), 1)
print(row_id)
test_that("Dataframe should have 11 columns.", {
expect_equal(ncol(stations_df), n_col)
})
test_that("Datetime columns should be represented as date time objects.", {
expect_equal(class(stations_df$start)[1],"POSIXct")
expect_equal(class(stations_df$end)[2],"POSIXt")
})
test_that("Character columns should be represented as character objects.", {
expect_equal(class(stations_df$usaf),"character")
expect_equal(class(stations_df$wban),"character")
expect_equal(class(stations_df$station_name),"character")
expect_equal(class(stations_df$country),"character")
expect_equal(class(stations_df$state),"character")
expect_equal(class(stations_df$call),"character")
expect_equal(class(stations_df$latitude),"character")
expect_equal(class(stations_df$longitude),"character")
expect_equal(class(stations_df$elevation),"character")
})
test_that("usaf should be of length 6", {
check <- is.na(stations_df$usaf[row_id]) | is.null(stations_df$usaf[row_id]) | nchar(stations_df$usaf[row_id]== usaf_len)
expect_equal(check, TRUE)
})
test_that("wban should be of lengxth 5", {
check <- is.na(stations_df$wban[row_id]) | is.null(stations_df$wban[row_id]) | nchar(stations_df$wban[row_id]== wban_len)
expect_equal(check, TRUE)
})
test_that("state should be of length 2 or 'NULL' ", {
check <- is.na(stations_df$state[row_id]) | is.null(stations_df$state[row_id]) | nchar(stations_df$state[row_id]== state_len)
expect_equal(check, TRUE)
})
test_that("country should be of length 2 or 'NULL' ", {
check <- is.na(stations_df$country[row_id]) | is.null(stations_df$country[row_id]) | nchar(stations_df$country[row_id]==country_len)
expect_equal(check, TRUE)
})
test_that("latitude should be of length 7 or 'NULL' ", {
check <- is.na(stations_df$latitude[row_id]) | is.null(stations_df$latitude[row_id]) | nchar(stations_df$latitude[row_id]==latitude_len)
expect_equal(check, TRUE)
})
test_that("longitude should be of length 8 or 'NULL' ", {
check <- is.na(stations_df$longitude[row_id]) | is.null(stations_df$longitude[row_id]) | nchar(stations_df$longitude[row_id]==longitude_len)
expect_equal(check, TRUE)
})
test_that("elevation should be of length 7 or 'NULL' ", {
check <- is.na(stations_df$elevation[row_id]) | is.null(stations_df$elevation[row_id]) | nchar(stations_df$elevation[row_id]==elevation_len)
expect_equal(check, TRUE)
})
}
test_that("Test failed for checking 'country_id' param type", {
expect_error(get_stations_info(country_id = 123), "Country code must be entered as a string of length 2")
})
test_that("Test failed for checking 'country_id' param length", {
expect_error(get_stations_info(country_id = "XXX"), "Country code must be of length 2")
})
|
27a5e6c18e3f23427d10a087f12c9a1be42e67c6
|
e5bddc0f7ae75dcf8f93ff3d620075077f93f00c
|
/hands on 1.R
|
23a2901e9332b693a012a5cd8c7cfd83f194add1
|
[] |
no_license
|
AlinaKosiakova/namudarbas
|
b3000bb58c858feafd0f3d9c49b64b274ccf3e5c
|
5685b5b1f981b756c8d5378c5b34505e1c212f91
|
refs/heads/master
| 2020-05-29T15:44:18.158237
| 2019-05-29T13:19:16
| 2019-05-29T13:19:16
| 189,229,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 683
|
r
|
hands on 1.R
|
library(eurostat)
duomenys <- get_eurostat("namq_10_gdp", stringsAsFactors = FALSE)
library(dplyr)
duomenys_apdorotos <- duomenys %>%
filter(geo == "LT"|
geo == "LV"|
geo == "EE",
time >= "2004-01-01",
na_item == "B1GQ",
s_adj == "SCA",
unit == "CLV_I10"
)
library(ggplot2)
ggplot(duomenys_apdorotos, aes(time, values, col=geo))+
geom_line()+
scale_x_date(date_labels = "%Y", date_breaks = "1 year")+
theme (axis.text.x = element_text(angle = 45, hjust = 1))+
labs(title = "Real GDP in Lithuania, Latvia and Estonia, index 2010=100",
subtitle = "Source: Eurostat (namq_10_gdp)",
x="Time",
y="Index")
|
658a220c61398bf770cc12d839abb44715f39416
|
219bef690be341dd3da13958d25c7e4fda53a763
|
/tourism_russia.R
|
97d44af82a3859c8b2ae4c41320ed79f7e3fbf10
|
[] |
no_license
|
Temurgugu/tourism_transformation
|
6a169b60bc0895a9141a541c5408d524db98e0cf
|
fba73c812d7600a92e26d66acfb5e96a54d87aba
|
refs/heads/main
| 2023-08-25T11:38:07.852163
| 2021-10-02T17:43:45
| 2021-10-02T17:43:45
| 355,254,215
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,370
|
r
|
tourism_russia.R
|
rm(list=ls()) # clear the workspace
#=== Temur Gugushvii ===
library(tidyr)
library(ggplot2)
library(tidyverse)
library(readxl)
library(hrbrthemes)
library(viridis)
library(plotly)
library(lubridate)
library(facetscales)
library(data.table)
event_names <- c(`i` = "",
`ii` = "",
`iii`= "",
`iv` = "")
scales_y <- list(
`i` = scale_y_continuous(limits = c(4000, 25000), breaks = seq(4000, 250000, 6000)),
`ii` = scale_y_continuous(limits = c(2000, 150000), breaks = seq(2000, 150000, 40000)),
`iii` = scale_y_continuous(limits = c(30000, 200000), breaks = seq(30000, 200000, 50000)),
`iv` = scale_y_continuous(limits = c(10000, 280000), breaks = seq(10000, 280000, 40000))
)
tg_russia_trips <- readr::read_csv("data/salukvadze_fig_1_russia_trips.csv")
tg_russia_trips_filter <- tg_russia_trips %>%
dplyr::filter(year != 2010, year != 2011,
year != 2016, year != 2017) %>%
mutate(month = fct_relevel(month, "January", "February", "March",
"April", "May", "June", "July", "August",
"September", "October", "November", "December"))
tg_russia_trips_wider <- tg_russia_trips %>%
dplyr::select(year,int_travelers_trip, month) %>%
tidyr::pivot_wider (names_from = year, values_from = int_travelers_trip) %>%
setnames(old = c("2007", "2008", "2009", "2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020"),
new = c("y2007", "y2008", "y2009", "y2010", "y2011", "y2012", "y2013", "y2014", "y2015", "y2016", "y2017", "y2018", "y2019", "y2020")) %>%
mutate(Year_2008 = y2008 - y2007,
Year_2009 = y2009 - y2008,
Year_2010 = y2010 - y2009,
Year_2011 = y2011 - y2010,
Year_2012 = y2012 - y2011,
Year_2013 = y2013 - y2012,
Year_2014 = y2014 - y2013,
Year_2015 = y2015 - y2014,
Year_2016 = y2016 - y2015,
Year_2017 = y2017 - y2016,
Year_2018 = y2018 - y2017,
Year_2019 = y2019 - y2018,
Year_2020 = y2020 - y2019) %>%
dplyr::select(month, Year_2008 : Year_2020) %>%
tidyr::pivot_longer(!month, names_to = "index_year", values_to = "Trips") %>%
mutate(month = fct_relevel(month, "January", "February", "March",
"April", "May", "June", "July", "August",
"September", "October", "November", "December")) %>%
dplyr::filter(index_year != "Year_2010", index_year != "Year_2011", index_year != "Year_2012",
index_year != "Year_2017", index_year != "Year_2020")
tgp_salukvadze_fig_1_russia_trips <- ggplot(tg_russia_trips_filter, aes(month, int_travelers_trip, color=as.factor(year), group=as.factor(year))) +
geom_point()+
geom_line()+
theme(axis.title.x = element_text(colour="black", size=14, hjust=0.5),
axis.title.y = element_text(colour="black", size=14, hjust=0.5),
axis.text.x=element_text(angle = 90, hjust=0.5, size=14, colour="black"),
axis.text.y=element_text(angle = 0, hjust=0.5, size=14, colour="black"),
legend.position = "right",
plot.caption = element_text(size=16, colour="black", hjust=0),
plot.title=element_text(colour="black", size=20),
strip.text.y = element_text( size = 14, color = "black", face = "bold.italic"),
strip.background = element_rect(color="white", fill="white", size=0.5, linetype="solid"),
legend.title=element_text(size=14),
legend.text=element_text(size=14))+
labs(title = "",
subtitle ="",
caption = "Source: Ministry of Internal Affairs of Georgia",
color="Number of visitors")+
xlab("Month")+
ylab("")+
facet_grid_sc(row=vars(period),
scales = list(y = scales_y),
labeller = labeller(time=as_labeller(event_names)),)+
scale_color_manual(values=c('#E42411','#9F188F',"#2AE411", '#BE7A36','#EEEA0A',
'#9F9D18','#189F9D','#1A704E','#701A4A','#B56330',
'#646325','#BBB914','#32BB14','#0A41EE'))
#Save the ggplot
ggsave("visualization/salukvadze_fig_1_russia_trips.png",
plot = tgp_salukvadze_fig_1_russia_trips,
units = "mm",
width = 300,
height = 175)
#Export subdatase
write.csv(tg_russia_trips_wider,"data/salukvadze_fig_1_russia_trips_difference.csv", row.names = FALSE)
salukvadze_fig_1_russia_trips_difference <- ggplot(tg_russia_trips_wider, aes(month, Trips, fill = Trips))+
geom_col()+
theme(axis.title.x = element_text(colour="black", size=10, hjust=0.5),
axis.title.y = element_text(colour="black", size=10, hjust=0.5),
axis.text.x=element_text(angle = 90, hjust=0.5, size=10, colour="black"),
axis.text.y=element_text(angle = 0, hjust=0.5, size=10, colour="black"),
legend.position = "right",
plot.caption = element_text(size=10, colour="black", hjust=0),
plot.title=element_text(colour="black", size=10),
strip.text.y = element_text(size = 10, color = "black", face = "bold.italic"),
strip.background = element_rect(color="white", fill="white", size=0.5, linetype="solid"),
legend.title=element_text(size=10),
legend.text=element_text(size=10))+
labs(title = "",
subtitle ="",
caption = "Source: Ministry of Internal Affairs of Georgia",
color="")+
xlab("Month")+
ylab("The amount of difference")+
scale_fill_viridis_c()+
facet_wrap(vars(index_year), scales = "free")
#Save the ggplot
ggsave("visualization/salukvadze_fig_1_russia_trips_difference.png",
plot = salukvadze_fig_1_russia_trips_difference,
units = "mm",
width = 300,
height = 200)
|
3321e06214438d2bc04e8eba6cf189ce9416b10c
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-r/h2o-package/R/datasets.R
|
a632c2e35dedce55841163a91afb220d8f829076
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 3,075
|
r
|
datasets.R
|
#' Australia Coastal Data
#'
#' Temperature, soil moisture, runoff, and other environmental measurements from
#' the Australia coast. The data is available from https://cs.colby.edu/courses/S11/cs251/labs/lab07/AustraliaSubset.csv.
#'
#' @docType data
#' @keywords datasets
#' @name australia
#' @format A data frame with 251 rows and 8 columns
NULL
#' United States Congressional Voting Records 1984
#'
#' This data set includes votes for each of the U.S. House of Representatives Congressmen on the 16
#' key votes identified by the CQA. The CQA lists nine different types of votes: voted for, paired for,
#' and announced for (these three simplified to yea), voted against, paired against, and announced
#' against (these three simplified to nay), voted present, voted present to avoid conflict of interest,
#' and did not vote or otherwise make a position known (these three simplified to an unknown disposition).
#'
#' @docType data
#' @keywords datasets
#' @name housevotes
#' @format A data frame with 435 rows and 17 columns
#' @source Congressional Quarterly Almanac, 98th Congress, 2nd session 1984, Volume XL: Congressional Quarterly Inc., Washington, D.C., 1985
#' @references Newman, D.J. & Hettich, S. & Blake, C.L. & Merz, C.J. (1998). UCI Repository of machine
#' learning databases [https://www.ics.uci.edu/~mlearn/MLRepository.html]. Irvine, CA: University of
#' California, Department of Information and Computer Science.
NULL
#' Edgar Anderson's Iris Data
#'
#' Measurements in centimeters of the sepal length and width and petal length and width,
#' respectively, for three species of iris flowers.
#'
#' @docType data
#' @keywords datasets
#' @name iris
#' @format A data frame with 150 rows and 5 columns
#' @source Fisher, R. A. (1936) The use of multiple measurements in taxonomic problems. Annals of Eugenics, 7, Part II, 179-188.
#'
#' The data were collected by Anderson, Edgar (1935). The irises of the Gaspe Peninsula, Bulletin of the American Iris Society, 59, 2-5.
NULL
#' Prostate Cancer Study
#'
#' Baseline exam results on prostate cancer patients from Dr. Donn Young at The Ohio State
#' University Comprehensive Cancer Center.
#'
#' @docType data
#' @keywords datasets
#' @name prostate
#' @format A data frame with 380 rows and 9 columns
#' @source Hosmer and Lemeshow (2000) Applied Logistic Regression: Second Edition.
NULL
#' Muscular Actuations for Walking Subject
#'
#' The musculoskeletal model, experimental data, settings files, and results
#' for three-dimensional, muscle-actuated simulations at walking speed as
#' described in Hamner and Delp (2013). Simulations were generated using OpenSim 2.4.
#' The data is available from \url{https://simtk.org/frs/index.php?group_id=603}.
#'
#' @docType data
#' @keywords datasets
#' @name walking
#' @format A data frame with 151 rows and 124 columns
#' @references Hamner, S.R., Delp, S.L. Muscle contributions to fore-aft and vertical body mass center accelerations over a range of running speeds. Journal of Biomechanics, vol 46, pp 780-787. (2013)
NULL
|
eb84ebf2881ddbd24dd9838a7f8adc525c63ab5a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/cpk/examples/cmax.fn.Rd.R
|
eea67c834b75a0d3fdcbbbcd169b6ac6ee288566
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 192
|
r
|
cmax.fn.Rd.R
|
library(cpk)
### Name: cmax.fn
### Title: *cmax* function
### Aliases: cmax.fn
### ** Examples
f = 0.74; dpo <- 3440; vd = 2.8; ar = 2.4; wtkg=86;
cmax <- cmax.fn(f, dpo, vd, ar, wtkg)
|
a59a4891063195c127b0af52220ec673aecc480d
|
4cb331b1a36fc2e9e972cd00b09669e38b10bf7e
|
/R/ROAD.R
|
1a52f3ae4073dd2877fce7fa1114495b509a1755
|
[] |
no_license
|
cran/TULIP
|
b199c02c7e00fcb0acb5f2e90c6af58beea31064
|
0b771e74a228909ea4c3d087655d56e1bb36f220
|
refs/heads/master
| 2021-06-22T04:41:48.775140
| 2021-01-04T16:10:06
| 2021-01-04T16:10:06
| 180,469,922
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 679
|
r
|
ROAD.R
|
ROAD <- function(x, y, standardize = FALSE, lambda = NULL, eps = 1e-07){
obj <- dsda(x, y=y, standardize=standardize, lambda=lambda, alpha=1, eps=eps)
p <- dim(x)[2]
n <- length(y)
nlambda <- length(obj$lambda)
beta <- obj$beta[2:(p+1),]
lambda <- obj$lambda
newbeta <- beta
newlambda <- lambda
mu1 <- apply(x[y==1,],2,mean)
mu2 <- apply(x[y==2,],2,mean)
w <- mu2-mu1
for (i in 1:nlambda){
newlambda[i] <- lambda[i]*2/sum(beta[,i]*w)/n
}
beta <- as.matrix(beta)
newbeta <- as.matrix(newbeta)
newbeta <- sweep(newbeta, 2, t(beta)%*%w, FUN="/")
outlist <- list(beta=newbeta, lambda=newlambda)
class(outlist) <- c("ROAD")
outlist
}
|
c4fb7f8df0d8596d328d46b5cad34d71c03fae13
|
4113f644f89d87cd5355db807a414e0fe669577b
|
/R/Sum.R
|
3eb8496f4632547a72ca0d9c3edfda433c553e3b
|
[] |
no_license
|
Aulide81/Descriptives
|
42784cc05c935675246a2764959aa4a855bd12ca
|
86ed1d9dd33fec577208cdfe15bd06860460c8ca
|
refs/heads/master
| 2021-05-15T04:19:47.743179
| 2018-02-08T13:46:11
| 2018-02-08T13:46:11
| 119,438,697
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 93
|
r
|
Sum.R
|
Sum<-function(x,w){
if(missing(w)){
sum(x,na.rm=T)
}else{
sum(x*w,na.rm=T)
}
}
|
90e100b466b6671c7520b261fb4c5d22bc3f20b0
|
4cb026d08546b07985505db16871ffb5a9e6a915
|
/man/cctcat.Rd
|
7286b34f3845f4d81be411ef13afff8c776c2726
|
[] |
no_license
|
cran/CCTpack
|
da689021e33c1cf7c37bd5cf55626ceff177ba9c
|
37e284813e8d7fa882c8767b71ed9487e150ffc7
|
refs/heads/master
| 2021-03-13T00:05:34.595876
| 2017-10-10T10:09:58
| 2017-10-10T10:09:58
| 17,678,242
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 859
|
rd
|
cctcat.Rd
|
\name{cctcat}
\alias{cctcat}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Accessor function for the model-estimated category boundaries (obtained from the model applied to the data). Applicable for the LTRM model only.
}
\description{
Outputs a table read out of the category boundary parameters of the model inference, as well as their credible intervals (posterior highest density intervals, HDIs), for each cluster.
}
\usage{
cctcat(cctfit)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{cctfit}{The \code{cctfit} object as obtained from the \code{cctapply()} function.}
}
\examples{
data(raterdata)
# cctfit <- cctapply(data = raterdata, clusters = 1, itemdiff = TRUE, samples = 10000,
# chains = 3, burnin = 2000, runchecks = FALSE)
# cctcat(cctfit)
}
|
eb4180e06234ab76e0080c9559930d3ad6fffa25
|
582ced3e41fa163afa55dc913c48ed6ed87472ac
|
/R/incomp.R
|
0bbd27920dac4975b72d8593222a69d0a1fde6c5
|
[] |
no_license
|
cran/parsec
|
70c8651b851fdf0ffbda532ee8c8b6d16571f929
|
a92f381f2df1aa8c058c26eb6da052910bc64756
|
refs/heads/master
| 2023-09-06T06:50:44.609182
| 2023-08-19T13:40:02
| 2023-08-19T14:30:52
| 37,854,653
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 44
|
r
|
incomp.R
|
incomp <-
function(z) UseMethod("incomp")
|
3b61f39cff98f88e35baf0ef810b14d44d4d1f2b
|
c832ad6abdd00ded197d9661b7958cf5e7b19d05
|
/r/fatmed.R
|
37588f5915b288688e70537bf67fbd41b5787f8f
|
[] |
no_license
|
tuhulab/bfi-wholegrain
|
e0c1472f2e7ac108caedaa93e5d97b7281730dc8
|
bba047bf9a1b234a7a947352fef5cff6a73573c7
|
refs/heads/master
| 2022-03-13T15:22:52.932704
| 2019-10-01T19:43:07
| 2019-10-01T19:43:07
| 159,833,198
| 0
| 1
| null | 2019-04-25T14:25:47
| 2018-11-30T14:14:52
|
TeX
|
UTF-8
|
R
| false
| false
| 2,977
|
r
|
fatmed.R
|
#.libPaths("C:/Program Files/R/R-3.5.3/library")
library(tidyverse)
library(xcms)
library(readxl)
#configure parallel computing
# registered()
# register(SerialParam())
###########configure parallel computing###########
register(SerialParam())
#register(SnowParam(workers = 6))
##################################################
########access I drive##############
urine_sample_list_path<- "I:/SCIENCE-NEXS-NyMetabolomics/Personal folders/Tu/fatmed_cdf/urine_sample_list.xlsx"
#urine_sample_list_path <- file.path("C:","Users","czw814","projects","fatmed","data","urine_sample_list.xlsx")
read_MassLynx <- function(MassLynxSampleListPath=...){
samplelist <- read_excel(MassLynxSampleListPath,col_names = c('filename',
'subject',
'MS_file',
'MS_Tune_file',
'Inlet_file',
'Bottle',
'Inject_volume'))
filename <- samplelist[,1]
subject <- samplelist[,2]
polarity <- samplelist[,3]
#use character match to determine polarity
extract_pos <- function(rownumber=...){
polarity1 <- polarity
pos_or_not <- grepl('pos',polarity[rownumber,])
return(pos_or_not)
}
#generate a list
polarity_T_or_F <- sapply(1:nrow(polarity), extract_pos)
polarity <- ifelse(polarity_T_or_F,'pos','neg')
pd <- data.frame(filename,subject,polarity=polarity)
return(pd)
}
urine_sample_list <- read_MassLynx(urine_sample_list_path)
##Calculate running time of reading from portal harddrive (Toshiba)
urine_pos_pd_toshiba <- urine_sample_list %>% filter(polarity=="pos") %>% mutate(path = file.path("D:","FATMED_urine",paste0(.data$filename,"01.CDF")))
raw_data_toshiba <- readMSData(files = urine_pos_pd_toshiba$path,
mode = "onDisk",
msLevel. = 1,
centroided. = TRUE)
mzs <- mz(raw_data_toshiba)
#end <- Sys.time()
#time_from_toshiba_mz <- end-start
#mzs <- mz(raw_data)
#start-end
mzs_by_file <- split(mzs, f=fromFile(raw_data))
#plot BPC
bpis <- chromatogram(raw_data, aggregationFun="max")
#extract 1st chromatogram data
bpi_1 <- bpis[1,1]
head(rtime(bpi_1))
head(intensity(bpi_1))
length(intensity(bpi_1))
cwp <- CentWaveParam(ppm=30,
peakwidth= c(2,20),
snthresh=4,
prefilter=c(2,15),
mzdiff=-0.001,
integrate = 1,
noise=15,mzCenterFun = "mean")
xdata <- findChromPeaks(raw_data_toshiba, param = cwp)
tc <- split(tic(raw_data), f = fromFile(raw_data))
boxplot(tc)
## QC: check pooled samples' total ion current
raw_data_toshiba$phenoData
|
c84b0bd7d19aa146cd79a5ffac1dc4a4f0dfbf58
|
b81875d1dc66033329e6e82914cd08727dffc8bf
|
/R/normgcp.r
|
ec45024b1386b0c2e9509e3a8b00dac0bcf26116
|
[] |
no_license
|
cran/Bolstad
|
b4cb3d49c8edca8ebcc51fe89539a3d144e8de32
|
3dc15d83e44e4e5e120e91465ae7ca213ba4e699
|
refs/heads/master
| 2021-01-21T00:52:47.450624
| 2020-10-05T05:50:02
| 2020-10-05T05:50:02
| 17,678,157
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,308
|
r
|
normgcp.r
|
#' Bayesian inference on a normal mean with a general continuous prior
#'
#' Evaluates and plots the posterior density for \eqn{\mu}{mu}, the mean of a
#' normal distribution, with a general continuous prior on \eqn{\mu}{mu}
#'
#'
#' @param x a vector of observations from a normal distribution with unknown
#' mean and known std. deviation.
#' @param sigma.x the population std. deviation of the normal distribution
#' @param density distributional form of the prior density can be one of:
#' "normal", "unform", or "user".
#' @param params if density = "normal" then params must contain at least a mean
#' and possible a std. deviation. If a std. deviation is not specified then
#' sigma.x will be used as the std. deviation of the prior. If density =
#' "uniform" then params must contain a minimum and a maximum value for the
#' uniform prior. If a maximum and minimum are not specified then a
#' \eqn{U[0,1]} prior is used
#' @param n.mu the number of possible \eqn{\mu}{mu} values in the prior
#' @param mu a vector of possibilities for the probability of success in a
#' single trial. Must be set if density="user"
#' @param mu.prior the associated prior density. Must be set if density="user"
#' @param \dots additional arguments that are passed to \code{Bolstad.control}
#' @return A list will be returned with the following components:
#' \item{likelihood}{the scaled likelihood function for \eqn{\mu}{mu} given
#' \eqn{x} and \eqn{\sigma_x}{sigma.x}} \item{posterior}{the posterior
#' probability of \eqn{\mu}{mu} given \eqn{x} and \eqn{\sigma}{sigma.x}}
#' \item{mu}{the vector of possible \eqn{\mu}{mu} values used in the prior}
#' \item{mu.prior}{the associated probability mass for the values in
#' \eqn{\mu}{mu}}
#' @seealso \code{\link{normdp}} \code{\link{normnp}}
#' @keywords misc
#' @examples
#'
#' ## generate a sample of 20 observations from a N(-0.5,1) population
#' x = rnorm(20,-0.5,1)
#'
#' ## find the posterior density with a uniform U[-3,3] prior on mu
#' normgcp(x, 1, params = c(-3, 3))
#'
#' ## find the posterior density with a non-uniform prior on mu
#' mu = seq(-3, 3, by = 0.1)
#' mu.prior = rep(0, length(mu))
#' mu.prior[mu <= 0] = 1 / 3 + mu[mu <= 0] /9
#' mu.prior[mu > 0] = 1 / 3 - mu[mu > 0] / 9
#' normgcp(x, 1, density = "user", mu = mu, mu.prior = mu.prior)
#'
#' ## find the CDF for the previous example and plot it
#' ## Note the syntax for sintegral has changed
#' results = normgcp(x,1,density="user",mu=mu,mu.prior=mu.prior)
#' cdf = sintegral(mu,results$posterior,n.pts=length(mu))$cdf
#' plot(cdf,type="l",xlab=expression(mu[0])
#' ,ylab=expression(Pr(mu<=mu[0])))
#'
#' ## use the CDF for the previous example to find a 95%
#' ## credible interval for mu. Thanks to John Wilkinson for this simplified code
#'
#' lcb = cdf$x[with(cdf,which.max(x[y<=0.025]))]
#' ucb = cdf$x[with(cdf,which.max(x[y<=0.975]))]
#' cat(paste("Approximate 95% credible interval : ["
#' ,round(lcb,4)," ",round(ucb,4),"]\n",sep=""))
#'
#' ## use the CDF from the previous example to find the posterior mean
#' ## and std. deviation
#' dens = mu*results$posterior
#' post.mean = sintegral(mu,dens)$value
#'
#' dens = (mu-post.mean)^2*results$posterior
#' post.var = sintegral(mu,dens)$value
#' post.sd = sqrt(post.var)
#'
#' ## use the mean and std. deviation from the previous example to find
#' ## an approximate 95% credible interval
#' lb = post.mean-qnorm(0.975)*post.sd
#' ub = post.mean+qnorm(0.975)*post.sd
#'
#'
#' cat(paste("Approximate 95% credible interval : ["
#' ,round(lb,4)," ",round(ub,4),"]\n",sep=""))
#'
#' ## repeat the last example but use the new summary functions for the posterior
#' results = normgcp(x, 1, density = "user", mu = mu, mu.prior = mu.prior)
#'
#' ## use the cdf function to get the cdf and plot it
#' postCDF = cdf(results) ## note this is a function
#' plot(results$mu, postCDF(results$mu), type="l", xlab = expression(mu[0]),
#' ylab = expression(Pr(mu <= mu[0])))
#'
#' ## use the quantile function to get a 95% credible interval
#' ci = quantile(results, c(0.025, 0.975))
#' ci
#'
#' ## use the mean and sd functions to get the posterior mean and standard deviation
#' postMean = mean(results)
#' postSD = sd(results)
#' postMean
#' postSD
#'
#' ## use the mean and std. deviation from the previous example to find
#' ## an approximate 95% credible interval
#' ciApprox = postMean + c(-1,1) * qnorm(0.975) * postSD
#' ciApprox
#'
#' @export normgcp
normgcp = function(x, sigma.x = NULL, density = c("uniform", "normal", "flat", "user") ,
params = NULL, n.mu = 50, mu = NULL,
mu.prior = NULL, ...){
## x - the vector of observations
## sigma.x - the population standard deviation
## density - distributional form of the prior density
## can be one of : flat, normal, uniform, or user
## by default a continuous uniform prior is used
## mu - vector of possible values of the population mean
## mu.prior - the associated prior probability mass
## ret - if true then the likelihood and posterior are returned as a
## list
mean.x = mean(x)
if(n.mu < 3)
stop("Number of prior values of mu must be greater than 2")
if(is.null(sigma.x)){
sigma.x = sd(x - mean.x)
if(!Bolstad.control(...)$quiet){
cat(paste("Standard deviation of the residuals :",
signif(sigma.x,4),"\n", sep = ""))
}
}else{
if(!Bolstad.control(...)$quiet){
cat(paste("Known standard deviation :", signif(sigma.x, 4),"\n",sep=""))
}
}
density = match.arg(density)
if(density == 'flat'){
if(is.null(mu)){
bds = qnorm(mean.x, sigma.x, c(0.005, 0.995))
mu = seq(from = bds[1], to = bds[2], length = n.mu)
}
height = dnorm(qnorm(0.975, mean.x, sigma.x), mean.x, sigma.x)
mu.prior = rep(height, length(mu))
likelihood = posterior = dnorm(mu, mean.x, sigma.x) ## flat prior has posterior mean equal to sample mean,
## and posterior variance equal to the observation variance
if(Bolstad.control(...)$plot){
plot(mu, posterior, ylim = c(0, 1.1 * max(posterior, mu.prior)), type = "l",
lty = 1,col="blue",
xlab = expression(mu), ylab = expression(Probabilty(mu)))
abline(h = height, lty = 2, col = "red")
legend("topleft", bty = "n", cex = 0.7,
lty = 1:2, col = c("blue", "red"),
legend = c("Posterior", "Prior"))
}
}else{
if(density == 'normal'){
if(is.null(params) | length(params) < 1)
stop("You must supply a mean for a normal prior")
mx = params[1]
if(length(params) == 2) ## user has supplied sd as well
s.x = params[2]
else
s.x = sigma.x
mu = seq(mx - 3.5 * s.x, mx + 3.5 * s.x, length = n.mu)
mu.prior = dnorm(mu,mx,s.x)
}else if(density == 'uniform'){
if(is.null(params)){
## set params to mean+/-3.5sd by default
params = c(mean.x - 3.5 * sigma.x, mean.x + 3.5 * sigma.x)
}
if(length(params)<2)
stop("You must supply a minimum and a maximum to use a uniform prior")
minx = params[1]
maxx = params[2]
if(maxx <= minx)
stop("The maximum must be greater than the minimum for a uniform prior")
mu = seq(minx, maxx, length = n.mu)
mu.prior = dunif(mu, minx, maxx)
}else{
## user specified prior
if(is.null(mu) | is.null(mu.prior))
stop("If you wish to use a non-uniform continuous prior then you must supply a mean vector, mu, and an associated density vector, mu.prior")
if(is.function(mu.prior))
mu.prior = mu.prior(mu)
}
if(any(mu.prior< 0))
stop("Prior densities must be >=0")
crude.int = sum(diff(mu) * mu.prior[-1])
if(round(crude.int, 3) != 1){
warning("The prior probabilities did not sum to 1, therefore the prior has been normalized")
mu.prior = mu.prior / crude.int
print(crude.int)
}
n.mu = length(mu)
mx = mean(x)
nx = length(x)
snx = sigma.x^2/nx
likelihood = exp(-0.5*(mx-mu)^2/snx)
## Numerically integrate the denominator
## First calculate the height of the function to be integrated
f.x.mu = likelihood * mu.prior
## Now get a linear approximation so that we don't have to worry about
## the number of points specified by the user
ap = approx(mu,f.x.mu,n=513)
integral = sum(ap$y[2*(1:256)-1]+4*ap$y[2*(1:256)]+ap$y[2*(1:256)+1])
integral = (ap$x[2]-ap$x[1])*integral/3
posterior = likelihood*mu.prior/integral
}
if(Bolstad.control(...)$plot){
plot(mu, posterior, ylim = c(0, 1.1 * max(posterior, mu.prior)), type = "l",
lty = 1,col="blue",
xlab = expression(mu), ylab = expression(Probabilty(mu)))
lines(mu,mu.prior,lty=2,col="red")
legend("topleft", bty = "n", cex = 0.7,
lty = 1:2, col = c("blue", "red"),
legend = c("Posterior", "Prior"))
}
results = list(name = 'mu', param.x = mu, prior = mu.prior,
likelihood = likelihood, posterior = posterior,
mu = mu, mu.prior = mu.prior #for backwards compat. only
)
class(results) = 'Bolstad'
invisible(results)
}
|
acadc5e401e8de9314bfa82b6f6b0cad5502b424
|
6b8ca121b708901a0014cdbbd9a45d3d8a129bc3
|
/man/stk.Rd
|
16a3cb7f20876baa12b6240412d1fa26e03fade9
|
[] |
no_license
|
KVIsweco/DataBlauwgraslandRome
|
1633c6a808bbb44a3f942d2fe7456e8df16b02d1
|
f87fc6e563e39bf1f1bec12c76986d9c3d1119bc
|
refs/heads/master
| 2022-08-22T13:09:30.718995
| 2020-05-25T07:11:31
| 2020-05-25T07:11:31
| 263,053,132
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,060
|
rd
|
stk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{stk}
\alias{stk}
\title{stk}
\format{
An object of class \code{RasterStack} of dimension 771 x 386 x 9.
}
\usage{
stk
}
\description{
Kaarten met informatie over het gebied 'Rome' (Friesland).
}
\details{
\itemize{
\item \code{Veg}: 1 = blauwgrasland; 0 = geen blauwgrasland.
\item \code{GVG}, \code{GLG}, \code{GHG}: resp. gemiddelde voorjaarsgrondwaterstand, laagste grondwaterstand en hoogste grondwaterstand (cm-mv).
\item \code{GVGtovKl}, \code{GLGtovKl}: resp. Gemiddelde voorjaarsgrondwaterstand en laagste grondwaterstand (cm tov keileemniveau).
\item \code{Gt}: grondwatertrap: 10=Gt I; 20=Gt II; 25=Gt II-ster ; 30=Gt III; 35=Gt III-ster; 40=Gt IV; 45=Gt IV-ster; 50=Gt V;
55=Gt V-ster; 60=Gt VI; 70=Gt VII; 75=Gt VII-ster
\item \code{Bofek}: Bofek codes (\url{https://www.wur.nl/nl/show/Bodemkaart-1-50-000.htm})
\item \code{DiepteKl}: Diepte van de keileem (cm=mv)
}
}
\examples{
\dontrun{plot(stk$Veg)}
}
\keyword{datasets}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.