blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8207990c8a0cc1715b54c0ccf633e8ed28905e56
|
3d9c9b128df84bfa2a3fd22ad50fd6ad5b36f329
|
/cachematrix.R
|
fdad92fb54e1be35ba09dd77dd6fba587ebd0952
|
[] |
no_license
|
jpjoines/ProgrammingAssignment2
|
71b0ef80ab5fe99eb589abd5385157cfbdf07b14
|
cc66acd1be2b6c766239e9f5974bf4e0b530b6cb
|
refs/heads/master
| 2021-01-20T17:50:12.635626
| 2015-12-26T22:43:36
| 2015-12-26T22:43:36
| 48,559,095
| 0
| 0
| null | 2015-12-24T23:12:40
| 2015-12-24T23:12:39
| null |
UTF-8
|
R
| false
| false
| 1,199
|
r
|
cachematrix.R
|
# Creates functions to store a matrix and it's inverse in and retrieve them fromthe calling environment.
# Returns a list of these functions.
makeCacheMatrix <- function( x = matrix() )
{
m <- NULL
set <- function( y )
{
x <<- y
m <<- NULL
}
get <- function()
{
x
}
setminverse <- function( minverse )
{
m <<- minverse
}
getminverse <- function()
{
m
}
list( set = set, get = get, setminverse = setminverse, getminverse = getminverse )
}
## Write a short comment describing this function
# Checks the parent environment if the inverse of a matrix has been stored there as a variable.
# If so, returns that value, if not computes the inverse and then stores it in the parent evironment.
# Both are accomplished by using the functions from the list created by makeCacheMatrix() above.
cacheSolve <- function( x, ... )
{
## Return a matrix that is the inverse of 'x'
m <- x$getminverse()
if( !is.null( m ) )
{
message( "getting cached matrix" )
return( m )
}
data <- x$get()
m <- solve( data, ... )
x$setminverse( m )
m
}
|
ec32dd34d09895a9369ffffe64641f514344cc78
|
ed245357e8f54163fb23e59602bb02a23d348803
|
/man/D3XtabHeat.Rd
|
ac5f276d9f770f671d148133fa7cdac1f6b6e33d
|
[] |
no_license
|
michaelgfalk/R2D3
|
07d565956f34c92159e6fecc17bbe28821170263
|
be84f424ebc3f78b3eb88db4eecdd0dbbe900856
|
refs/heads/master
| 2021-05-01T07:51:04.072043
| 2017-02-01T11:25:10
| 2017-02-01T11:25:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 654
|
rd
|
D3XtabHeat.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/D3.R
\name{D3XtabHeat}
\alias{D3XtabHeat}
\title{D3 XtabHeat}
\usage{
D3XtabHeat(JSON, file_out)
}
\arguments{
\item{JSON}{A json object}
\item{file_out}{the location and name for the output html file}
}
\description{
Creates a html file containing json file and a D3.js Cross tab Heat map.
The nested json needs values assigned to it in order for it to work
}
\examples{
data<-data.frame(airquality$Month, airquality$Temp)
json<-jsonXtabs(data)
D3XtabHeat(json, file_out="heat_map.html")
}
\author{
James Thomson
}
\references{
http://bl.ocks.org/tjdecke/5558084
}
|
6cb4ca79c211d6ddf9bdf4d45c836d380f7b5dde
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/10474_2/rinput.R
|
25f593118f387fb284621070ebedb0b8dbe2575f
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("10474_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10474_2_unrooted.txt")
|
b951fe13bec0668ccf48edcabdb5c1fbadb75868
|
0124b6a02692905922a3a8992cb123bb8e039e6b
|
/getArtificialBounds_safetosource.R
|
1853e95e190ff9e742302f2979c2414d3c026328
|
[
"MIT"
] |
permissive
|
pedlefsen/hiv-founder-id
|
ebcadf9563549fb9932237e148a0c93609d856f3
|
50ba8d2757cb3be15357b4bdeaea3fe0d2680eea
|
refs/heads/master
| 2020-12-24T05:58:03.939103
| 2019-07-07T21:36:09
| 2019-07-07T21:36:09
| 42,635,031
| 3
| 3
|
MIT
| 2019-02-08T12:36:25
| 2015-09-17T04:32:20
|
R
|
UTF-8
|
R
| false
| false
| 2,621
|
r
|
getArtificialBounds_safetosource.R
|
getArtificialBounds <- function (
the.region,
the.time,
RESULTS.DIR,
results.dirname,
bounds.subdirname = "bounds"
) {
## Ideally we'd use bounds
## on the actual infection time computed from the
## dates and results of the HIV positivity tests
## (antibody or PCR). The typical approach is to
## perform antibody testing every X days
## (historically this is 6 months in most HIV
## vaccine trials, except during the vaccination
## phase there are more frequent visits and on
## every visit HIV testing is conducted). The
## (fake) bounds used here are calculated in the
## createArtificialBoundsOnInfectionDate.R file.
## The actual bounds would be too tight, since the
## participants were detected HIV+ earlier in these
## people than what we expect to see in a trial in
## which testing is conducted every X days. For
## the center of bounds approach we load the bounds
## files in subdirs of the "bounds" subdirectory eg
## at
## /fh/fast/edlefsen_p/bakeoff/analysis_sequences/bounds/nflg/1m/.
## These files have names beginning with
## "artificialBounds_" and ending with ".tab".
.artificial.bounds.dirname <-
paste( RESULTS.DIR, results.dirname, "/", bounds.subdirname, "/", the.region, "/", the.time, "/", sep = "" );
artificial.bounds.filenames <-
dir( .artificial.bounds.dirname, pattern = "artificialBounds_.*.tab", recursive = FALSE, full.names = TRUE );
names( artificial.bounds.filenames ) <- gsub( "^.*artificialBounds_(.*).tab$", "\\1", artificial.bounds.filenames );
the.artificial.bounds <- lapply( names( artificial.bounds.filenames ), function ( .artificial.bounds.name ) {
.tbl <- read.table( artificial.bounds.filenames[[ .artificial.bounds.name ]], header = TRUE, sep = "\t" );
# Special: for v3, only use caprisa seqs (not rv217, for now).
if( the.region == "v3" ) {
.tbl <-
.tbl[ grep( "^100\\d\\d\\d", rownames( .tbl ) ), , drop = FALSE ];
} else if( the.region == "rv217_v3" ) {
.tbl <-
.tbl[ grep( "^100\\d\\d\\d", rownames( .tbl ), invert = TRUE ), , drop = FALSE ];
}
return( .tbl );
} );
names( the.artificial.bounds ) <- names( artificial.bounds.filenames );
## Special: exclude the deterministic bounds, for now.
the.artificial.bounds <-
the.artificial.bounds[ grep( "deterministic", names( the.artificial.bounds ), invert = TRUE ) ];
return( the.artificial.bounds );
} # getArtificialBounds (..)
|
6b6a54cf3fd625ff3d1a80aaf9229dd24d8c4a3f
|
5e09457601ecc54eb38a64f2ca0a5d0296213d64
|
/tests/demo.R
|
2f185902fbfef558f57eacc42b5b48f6d0b96051
|
[] |
no_license
|
christopherkenny/eir
|
97ce4bc549878dfad871b9b3e6da737b2ed8b7a4
|
4db92e7d8157a940ddfeed45d06da75d512bf025
|
refs/heads/master
| 2023-07-15T01:09:26.392256
| 2021-08-27T20:49:24
| 2021-08-27T20:49:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
demo.R
|
library(ei)
data(sample)
form <- t ~ x
dbuf <- ei(form, total = "n", data = sample)
summary(dbuf)
eiread(dbuf, "betab", "betaw")
plot(dbuf, "tomog", "betab", "betaw", "xtfit")
|
ad75e3169d3b1d4717eb2f6b816380d8646222c5
|
c2a7726ca1fba2cb518a812a7c3d3b0c134304d5
|
/Solutions/day_23.R
|
3593aea892687029a751e8f440329eda531718f9
|
[] |
no_license
|
zapateros/aoc2015
|
adf6754cda053646b2706cdc35b3dd9ee95b2fc8
|
a290669d58b3c77b6f4d8fd2a630cc36229c3f32
|
refs/heads/master
| 2020-04-28T11:59:12.433139
| 2019-03-23T18:49:12
| 2019-03-23T18:49:12
| 175,261,634
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,088
|
r
|
day_23.R
|
input <- readLines("input_day_23.txt")
jmps <- as.numeric(gsub("[^[:digit:]-]","", input))
inp <- gsub(",", "", input)
inp <- gsub("a", "1", inp)
inp <- gsub("b", "2", inp)
i <- 1
i_max <- length(inp)
run <- function(a, b){
regs <- c(a, b)
while(TRUE){
rel <- unlist(strsplit(inp[i], " "))
if(rel[1] == "inc"){
regs[as.numeric(rel[2])] <- regs[as.numeric(rel[2])] + 1
i <- i + 1
}else if(rel[1] == "jmp"){
i <- i + jmps[i]
}else if(rel[1] == "jio"){
if(regs[as.numeric(rel[2])] == 1){
i <- i + jmps[i]
}else{
i <- i + 1
}
}else if(rel[1] == "hlf"){
regs[as.numeric(rel[2])] <- regs[as.numeric(rel[2])] / 2
i <- i + 1
}else if(rel[1] == "tpl"){
regs[as.numeric(rel[2])] <- regs[as.numeric(rel[2])] * 3
i <- i + 1
}else if(rel[1] == "jie"){
if((regs[as.numeric(rel[2])] %% 2) == 0){
i <- i + jmps[i]
}else{
i <- i + 1
}
}
if(i > i_max){
return(regs[2])
break
}
}
}
# Part One
run(0, 0)
# Part Two
run(1, 0)
|
620767a83ca0d3b37b0e3a0f2fb9a42ba7d8cd10
|
580a265a0edc389fc7c9750fd95d23f21e5d92ae
|
/pkg/retistruct/man/solveMappingCart.Rd
|
19454004170dc0522ec4783302ef38208193ef89
|
[] |
no_license
|
bala5411/retistruct
|
20e8c132a603e222e876c1f5cec085449d4188d9
|
63ddb4719ca3f9759f7ea26c490795a04944563c
|
refs/heads/master
| 2020-03-11T11:18:17.607163
| 2017-08-11T20:24:44
| 2017-08-11T20:24:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 903
|
rd
|
solveMappingCart.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spheristruct.R
\name{solveMappingCart}
\alias{solveMappingCart}
\title{Optimise mapping}
\usage{
solveMappingCart(r, alpha = 4, x0 = 0.5, nu = 1, method = "BFGS",
plot.3d = FALSE, dev.flat = NA, dev.polar = NA, ...)
}
\arguments{
\item{r}{reconstructedOutline object}
\item{alpha}{Area penalty scaling coefficient}
\item{x0}{Area penalty cutoff coefficient}
\item{nu}{Power to which to raise area}
\item{method}{Method to pass to \code{optim}}
\item{plot.3d}{If \code{TRUE} make a 3D plot in an RGL window}
\item{dev.flat}{Device handle for plotting grid to}
\item{dev.polar}{Device handle for plotting polar plot to}
\item{...}{Extra arguments to pass to \code{\link{fire}}}
}
\value{
reconstructedOutline object
}
\description{
Optimise the mapping from the flat outline to the sphere
}
\author{
David Sterratt
}
|
496892dc45fe40559d3cf40bce9b4adfc4f67cd0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/TSA/examples/tar.Rd.R
|
82ac20e6f905f3ec709a3f5d37536d8cfb2e1424
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 206
|
r
|
tar.Rd.R
|
library(TSA)
### Name: tar
### Title: Estimation of a TAR model
### Aliases: tar
### Keywords: methods
### ** Examples
data(prey.eq)
prey.tar.1=tar(y=log(prey.eq),p1=4,p2=4,d=3,a=.1,b=.9,print=TRUE)
|
922a904665fafa056ef59f88801fd45a89c2a323
|
1a4ef10027c3198bfa4abea1c50b6fc0a204436e
|
/man/diagnostics.bayes_paired_wilcox_test.Rd
|
0b1924b2661da7f60340f3d86865f16c783e53da
|
[] |
no_license
|
joereinhardt/BayesianFirstAid-Wilcoxon
|
571450ca2e9ff9aca510f58be82a2fb7d65d3286
|
3468f53f4861fa39f09690b73f08b68df3d35f6e
|
refs/heads/master
| 2021-03-30T23:51:09.786665
| 2018-04-24T04:23:32
| 2018-04-24T04:23:32
| 124,392,296
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 477
|
rd
|
diagnostics.bayes_paired_wilcox_test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wilcox_test.R
\name{diagnostics.bayes_paired_wilcox_test}
\alias{diagnostics.bayes_paired_wilcox_test}
\title{Plots and prints diagnostics regarding the convergence of the model.}
\usage{
diagnostics.bayes_paired_wilcox_test(x)
}
\arguments{
\item{x}{The output from a paired sample Bayesian Wilcox test model.}
}
\description{
Plots and prints diagnostics regarding the convergence of the model.
}
|
5279733af88422a9b13e8a210892063b777b5a5c
|
094c74d7322d36af16f269f770ff3555859975cd
|
/cvbank.R
|
763e20caff8af399fe94cd8de87a42ea3a6a6d5c
|
[] |
no_license
|
vema555/logit
|
4104b064c655a58fc8ae3ce1e2ac5f9701f13fa9
|
4fe17f55ef289ef346ffac088c2f47d37bb86b35
|
refs/heads/master
| 2021-01-21T23:16:50.892052
| 2014-10-23T15:53:40
| 2014-10-23T15:53:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 575
|
r
|
cvbank.R
|
library(cvTools)
fnm <- "/Users/vema/Documents/pyscr/datasets/bank/bank.csv"
X <- read.csv(fnm, header=T, sep=";")
N = nrow(X)
k = 10
kf = cvFolds(N, K=k, type="consecutive")
aucvec <- rep(0, k)
for (i in 1:k) {
trainData <- X[kf$which != i,]
testData <- X[kf$which == i,]
print(c(nrow(trainData), nrow(testData)))
glmobj <- glm(y~., data=trainData, family=binomial)
pred <- predict(glmobj, testData, type="link" )
fitpred = prediction(pred, testData$y)
auc.tmp <- performance(fitpred,"auc");
auc <- as.numeric(auc.tmp@y.values)
aucvec[i] <- auc
}
|
ecbd51d9e9cec85c8f8ff5f7749a61191748f4d3
|
58c64e45d3296be1dd29c37e954badbae09a543b
|
/man/relativize.Rd
|
a878b419d6ce5c2e28eb660c61a4fff58186e740
|
[] |
no_license
|
jtfeld/EloOptimized
|
0dde61d734b92020e90bf13b3a30b4c00f8a64e6
|
010fd7ae15806d6e60d6d626ff6cda59fe8bf8ab
|
refs/heads/master
| 2023-05-10T23:04:48.715680
| 2023-05-09T13:48:19
| 2023-05-09T13:48:19
| 147,549,979
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 414
|
rd
|
relativize.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internal_fns.R
\name{relativize}
\alias{relativize}
\title{internal fn to relativize rank scores}
\usage{
relativize(x)
}
\arguments{
\item{x}{input vector}
}
\value{
returns new vector of scaled rank scores
}
\description{
internal function for generating scaled cardinal ranks
}
\details{
scales cardinal Elo scores between 0 and 1
}
|
9ea831a6db8dd97ae099ae52fa096af3f43747e3
|
5137f6f49055a6d75b96f1b1b0c30b055636e44e
|
/tests/testthat.R
|
b7b5d815816ea948c4db027ed63d2ba995cace73
|
[] |
no_license
|
cran/rODE
|
d23abb178718e5d79aa6eba3591f398f485ca201
|
42190459c2b840012a6277018668051f2987ef43
|
refs/heads/master
| 2021-01-22T10:28:29.418155
| 2017-11-10T03:17:51
| 2017-11-10T03:17:51
| 92,644,502
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 78
|
r
|
testthat.R
|
library(testthat)
library(rODE)
test_check("rODE", reporter = "summary")
|
ac073ed70812e81ebeefdce2ba39bec5a919124e
|
540c836f8ea5792121479d4c4a21184781bf5f23
|
/foundation.R
|
34ce7614b0e421021745d2ab24252241c4b6e0b0
|
[] |
no_license
|
darkos/invesco
|
bc79cb0f29b6a5bf39dc5bb08c698ddd50efd36b
|
3eda24906b8288f9be213712b9e0fd4d9deb51fc
|
refs/heads/master
| 2020-06-30T01:42:48.019777
| 2018-07-02T02:13:31
| 2018-07-02T02:13:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,028
|
r
|
foundation.R
|
library(htmltab)
library(quantmod)
# library(tidyverse)
library(tidyquant)
library(ggplot2)
# match("MMM",names(test))
# In data preparation, I use Wikipedia as the data source for the list of S&P 500 components and Yahoo Finance
# as the data source for daily prices. The process in downloading the data can be very time-consuming because
# of the restrictions from the server side. So rather than request the data every time, I download the data with
# long enough horizon and store it locally. And new request will be performed only when extra data needed. Indeed,
# it would be much better if I can build a database locally.
######################## Download data #################################################################
url = 'https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
SP500 = htmltab(url, which = '//*[@id="mw-content-text"]/div/table[1]')
SP500.list = SP500$`Ticker symbol`
SP500.list = sapply(SP500.list, function(x) gsub("\\.", "-",x[1]))
#### Store the stock data in new environment DATA
DATA = new.env()
# ss = SP500.list[1:2]
L = length(SP500.list)
Dates = as.Date(c("1998-06-18","2018-06-18"))
for(i in 1:L){
try(
getSymbols(SP500.list[i],DATA,from = Dates[1],to = Dates[2], return.class="data.frame")
)
}
######################## Process data ###################################################################
#### get the data from the environment
Data <- eapply(DATA, "[")
s = SP500.list[1]
test = Data
L = length(Data)
#### preprocess the data and calculate daily returns
for (i in 1:L) {
# i = 1
lst = test[[i]]
colnames(lst) = c("open","high","low","close","volume","adjusted")
p = lst$adjusted
r = diff(p)/p[-length(p)]
r = c(NA,r)
lst = cbind.data.frame(lst,r)
lst = cbind(as.Date(rownames(lst)),lst)
colnames(lst)[1] = "date"
colnames(lst)[8] = "gain"
test[[i]] = lst
}
######################## Save data #####################################################################
#
# saveRDS(Dates,"dates.RDS")
# saveRDS(test,file = 'Data.rds')
|
42db350763c50a8e124ea92b4dfef13cdfe0fe58
|
dd838895f868fb55021c7c3d1a49e89be1de1be4
|
/apps/utils_singular_value_decomposition.R
|
a0b6077330e02759a96b705f1608a5eb97139464
|
[] |
no_license
|
massonix/math_teaching
|
06067ee357649ec8f5afd9e7e78f04afa2771706
|
ca2188c133974ea490aa371a1a716f72277d8ec1
|
refs/heads/master
| 2021-11-30T16:10:04.056641
| 2021-11-15T09:56:08
| 2021-11-15T09:56:08
| 235,402,510
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,437
|
r
|
utils_singular_value_decomposition.R
|
library(ggforce)
library(dplyr)
norm_vec <- function(x) sqrt(sum(x^2))
plot_circle_svd <- function(i, j, ang) {
norm_i <- norm_vec(i)
norm_j <- norm_vec(j)
p <- ggplot() +
geom_segment(
aes(x = 0, y = 0, xend = i[1], yend = i[2]),
color = "red",
arrow = arrow(length = unit(0.3, "inches"))
) +
geom_segment(
aes(x = 0, y = 0, xend = j[1], yend = j[2]),
color = "blue",
arrow = arrow(length = unit(0.3, "inches"))
) +
geom_line() +
geom_ellipse(aes(x0 = 0, y0 = 0, a = norm_i, b = norm_j, angle = ang)) +
coord_fixed() +
labs(x = "x", y = "y") +
theme_bw()
p
}
# Initialize
mat <- matrix(c(1,2,2,4), 2, 2, byrow = FALSE)
svd_mat <- svd(mat)
i <- svd_mat$v[, 1]
j <- svd_mat$v[, 2]
initial_circle <- plot_circle_svd(i, j, ang = 0)
initial_circle
# Multiply by Vt
vt_i <- drop(t(svd_mat$v) %*% i)
vt_j <- drop(t(svd_mat$v) %*% j)
vt_circle <- plot_circle_svd(vt_i, vt_j, ang = 0)
vt_circle
# Multiply by D
D_mat <- matrix(c(svd_mat$d[1], 0, 0, svd_mat$d[2]), 2, 2, byrow = FALSE)
d_vt_i <- drop(D_mat %*% vt_i)
d_vt_j <- drop(D_mat %*% vt_j)
d_vt_circle <- plot_circle_svd(d_vt_i, d_vt_j, ang = 0)
d_vt_circle
# Multiply by U
u_d_vt_i <- drop(svd_mat$u %*% d_vt_i)
u_d_vt_j <- drop(svd_mat$u %*% d_vt_j)
ang1 <- drop(matlib::angle(d_vt_i, u_d_vt_i, degree = FALSE))
u_d_vt_circle <- plot_circle_svd(u_d_vt_i, u_d_vt_j, ang = -1*ang1)
u_d_vt_circle
|
ac5e9c6d79243c2c5a63bb4d21dcd36cc8e9dbd7
|
3abb0d4b0dd74c5e14eb2d249941775017abe0ca
|
/man/FAR.control.Rd
|
33ff7d81444e7dd9f2a6ee66ba6853be378a5cdb
|
[] |
no_license
|
donghwan/VRclust
|
51fd6146f689ac20c8780b1bea2898bd5e87157d
|
cbddc2997d3c57f092d4745b3ba70ea5e32fd48d
|
refs/heads/master
| 2020-03-23T05:01:16.769171
| 2018-07-16T12:06:30
| 2018-07-16T12:06:30
| 140,700,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,304
|
rd
|
FAR.control.Rd
|
\name{FAR.control}
\alias{FAR.control}
\title{
Construct optimal clustering rule for controling predetermined level of False assignment rate
}
\description{
\code{FAR.control} provide the optimal clustering rule for controlling the false assignment rate at the predetermined level.
}
\usage{
FAR.control(clust, class=1, lambda=NULL, level=0.05)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{clust}{ clustering object from \code{vrclust.fit}}
\item{class}{ Label number of targeted cluster which be controlled with the FAR level}
\item{lambda}{A vector of relative cost lambda. When controlling FAR, the default is NULL.}
\item{level}{Level of targeted FAR}
}
\value{
\item{delta}{A vector of cluster labels of the optimal clustering rule}
\item{lambda}{relative cost corresponding to FAR level}
}
\references{
Donghwan Lee, Dongseok Choi, and Youngjo Lee. VRclust: An optimal clustering algorithm with varying risks of flase assignments across clusters - 2018 Submitted.
}
\author{
Donghwan Lee, Dongseok Choi, and Youngjo Lee
Maintainer: Donghwan Lee <donghwan.lee@ewha.ac.kr>
}
\examples{
mc3k.clust<-vrclust.fit(x.irop, K=3, family="normal")
## FAR control at 0.01
mc3k.01<-FAR.control(mc3k.clust,class=3,level=0.01)
vrclust.est(mc3k.clust,mc3k.01$delta)
}
|
77a5b9e2bdbacd0b7499c00ed346fe553ac6e1c1
|
0698416a0f0d4c2938fc3119e9fed227207321fd
|
/man/prediction_map.Rd
|
8aa898e8cf0fcdb0d5cfd674b39b10544d668621
|
[] |
no_license
|
ctclarks99/HealthEVV
|
603d4c25efc424620aee66264f0ddf49ff33503b
|
be5bdb36d36ea73329198af1d045c13ef568249f
|
refs/heads/master
| 2020-05-12T20:14:41.289743
| 2019-04-23T19:07:10
| 2019-04-23T19:07:10
| 181,543,822
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 380
|
rd
|
prediction_map.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Web_Prep2.R
\name{prediction_map}
\alias{prediction_map}
\title{Find correct ggplot to return}
\usage{
prediction_map(input1, input2)
}
\arguments{
\item{input1}{Selected response from shiny UI}
\item{input2}{Selected plot type from shiny UI}
}
\value{
correct ggplot
}
\description{
Returns ggplot
}
|
00bdf45a01b0461eb6d7f6baf6a9d3c899bdda93
|
d016ef9c578e2224127ee00cdd9050affa30b81f
|
/CNV/Run_Aneufinder_Parallel.R
|
c9ef50bc8e07ed8e9981abf8319bb8f92637e293
|
[] |
no_license
|
UMCUGenetics/Bovine_Embryo
|
8f964daf8b9f58c6cf515825b5d453748e8b5fef
|
77f8a19dbf8b6d30ce75ffe7c915ce121cfc833f
|
refs/heads/master
| 2020-05-29T15:25:43.063716
| 2019-07-04T09:27:06
| 2019-07-04T09:27:06
| 189,220,273
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,771
|
r
|
Run_Aneufinder_Parallel.R
|
# This script will generate jobs to run aneufinder on multiple input folders (eg sequencing runs
args <- commandArgs(trailingOnly=TRUE)
options(scipen = 999)
# The samplesheet should contain the following columns: Run (eg the name for the folder containing the bams), Method (Strand-seq or WGS)
samplesheet_file <- args[1]
# Input folder should contain the Run_Aneufinder.R scripts. Jobs will be written to [input_folder]/Jobs/ and logs to [input_folder]/Logs/
input_folder <- args[2]
# Aneufinder output data will be written to this file. A new subfolder for each run will be generated.
output_folder <- args[3]
# The bam_folder should contain subfolders for each run containing bam files. [bam_folder]/[run]/*.bam
bam_folder <- args[4]
#
bin_size <- args[5]
# Blacklist can be generated by Generate_Aneufinder_Blacklist.R script
blacklist_path <- args[6]
# The runs aneufinder will analyze, leave empty if you want to run for all runs in the samplesheet
runs <- args[7]
## Settings
if(file.exists(blacklist_path) == FALSE){
stop(paste("! Blacklist not found: ", blacklist_path, sep =""))
}
# use BSgenome.Btaurus.UCSC.bosTau8 for UMD3.1 or use hg19 for human
reference_genome <- "BSgenome.Btaurus.UCSC.bosTau8"
# Multiple methods can be run (use comma to separate)
aneufinder_methods <- "edivisive,HMM,dnacopy"
# Multiple bin sizes can be run (c(100000, 200000))
if(!is.null(bin_size) == TRUE){
aneufinder_bin_size <- bin_size
} else {
aneufinder_bin_size <- 1000000
}
# Jobs and logs will be written to these folders:
logs_folder <- paste(input_folder, "Logs/", sep = "")
jobs_folder <- paste(input_folder, "Jobs/", sep = "")
dir.create(logs_folder, showWarnings = F)
dir.create(jobs_folder, showWarnings = F)
if(file.exists(samplesheet_file) == FALSE){
stop(paste("! Samplesheet not found: ", samplesheet_file, sep =""))
}
samplesheet <- read.delim(samplesheet_file, header = T, stringsAsFactors = F)
# Remove positive and negative controls:
samplesheet <- samplesheet[samplesheet$Embryo != "POS" & samplesheet$Embryo != "NEG",]
# only select the specified runs
if(!is.na(runs) == TRUE){
print(runs)
samplesheet <- samplesheet[samplesheet$Run %in% runs,]
}
dir.create(output_folder, showWarnings = F)
Run_Aneufinder_Script <- paste(input_folder, "Run_Aneufinder.R", sep = "")
if(file.exists(Run_Aneufinder_Script)){
for(run in unique(samplesheet$Run)){
run_info <- samplesheet[which(samplesheet$Run == run),]
bam_folder_run <- paste(bam_folder, run, sep = "")
method <- ifelse(unique(run_info$Method) == "Strand-seq", TRUE, FALSE)
if( length(list.files(bam_folder_run, pattern = "\\.bam$")) > 0){
for(bin_size in aneufinder_bin_size){
job_ID <- paste(jobs_folder, "Run_Aneufinder_", run, "_",bin_size, ".sh", sep = "")
shell_script <- paste("
#!/bin/bash
#$ -S /bin/bash
#$ -l h_vmem=30G
#$ -l h_rt=04:00:00
#$ -cwd
#$ -o ",logs_folder, format(Sys.Date(), "%Y%m%d"),"_Run_Aneufinder_", run, "_",bin_size,"_log.txt
#$ -e ",logs_folder, format(Sys.Date(), "%Y%m%d"),"_Run_Aneufinder_", run, "_",bin_size,"_log.txt
# Folder containing all the bam files:
BAM_FOLDER=",bam_folder_run,"
# Output will be written to this folder:
OUTPUT_FOLDER=",output_folder, run, "/
# Bin size for aneufinder. For multiple bin sizes use , as separation: 500000,1000000
BIN_SIZE=",format(bin_size, scientific = FALSE),"
# Set to TRUE if this is a strand-seq analysis:
STRANDSEQ=",method,"
# Reference genome. Use hg19 for human. Use BSgenome.Btaurus.UCSC.bosTau8 for bovine.
REFERENCE=",reference_genome,"
# Blacklist to exclude regions
BLACKLIST=",blacklist_path,"
# Methods used for copy number state calling. Options: edivisive,HMM,dnacopy
METHODS=",aneufinder_methods,"
# Leave empty to use all copy number state. Else use (split on ,): zero-inflation,0-somy,1-somy,2-somy,3-somy,4-somy
CNV_STATES=\"\"
# Leave empty if you want to run for all chromosomes (1-22+X for hg19, 1-29+X for BSgenome.Btaurus.UCSC.bosTau8). Else use , to split (for example 1,2,3,4,5)
CHROMOSOMES=\"\"
date
guixr load-profile ~/.guix-profile/ -- <<EOF
Rscript ",Run_Aneufinder_Script," $BAM_FOLDER $OUTPUT_FOLDER $BIN_SIZE $STRANDSEQ $REFERENCE $BLACKLIST $METHODS $CNV_STATES $CHROMOSOMES
EOF
date", sep = "")
print(paste("# Writing shell script: ",job_ID, sep = ""))
write.table(shell_script, file = job_ID, sep = "\t", quote = F, row.names = F, col.names = F)
command <- paste("qsub ", job_ID, sep ="")
print(command)
system(command)
}
} else {
print("! No BAM files found in folder: ",bam_folder_run, " !", sep = "")
}
}
} else {
print(paste("! Aneufinder script: ", Run_Aneufinder_Script, " not found!"), sep = "")
}
|
a818e7e4ef28330dc4bc8da13903642b8bb8d6ee
|
f978d2c5b0079bd0de6be6ad4b96faf05b2fe189
|
/apostila/_render.R
|
d69d0ec249c53e6586c9fe098dfb6b0b9d65a656
|
[] |
no_license
|
estatsej/curso_rmarkdown
|
58981fe8702337e507724a683150c0f08a4f0d7c
|
81bea361d2034ac174cb71bbe6b0a11b10b0f762
|
refs/heads/master
| 2023-02-10T16:22:08.381427
| 2021-01-13T19:53:59
| 2021-01-13T19:53:59
| 273,235,672
| 0
| 1
| null | 2020-08-05T23:19:25
| 2020-06-18T12:45:18
|
TeX
|
UTF-8
|
R
| false
| false
| 181
|
r
|
_render.R
|
bookdown::render_book("00-index.Rmd", output_dir = "../docs") # Renderizar site
bookdown::render_book("00-index.Rmd", bookdown::pdf_book(), output_dir = "../docs") # Renderizar pdf
|
e1fe34466670c992c2f821ae35f850526353c46d
|
516603468a29ec93396a49fe40606697250d67b2
|
/04_03_mosaicToNewRaster.R
|
e62d267fa0ced12f1d14ddaf479dfbfb3f2de44f
|
[] |
no_license
|
fabiolexcastro/Gates-smallholder-adaptation
|
9256d014ba43924636e138c5e053af06c50e5413
|
fb599eb172c44e73e655669b0083dcd90f803244
|
refs/heads/master
| 2022-08-10T01:30:56.339281
| 2022-07-19T08:01:02
| 2022-07-19T08:01:02
| 250,032,329
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,672
|
r
|
04_03_mosaicToNewRaster.R
|
# Load libraries ----------------------------------------------------------
require(pacman)
pacman::p_load(raster, rgdal, rgeos, stringr, sf, tidyverse)
rm(list = ls())
# Functions to use ---------------------------------------------------------
make_mosaic <- function(yr){
print(yr)
fle <- grep(yr, fls, value = T)
rst <- map(.x = fle, .f = raster)
ms1 <- raster::mosaic(rst[[1]], rst[[2]], fun = 'mean')
ms2 <- raster::mosaic(rst[[3]], rst[[4]], fun = 'mean')
ms3 <- raster::mosaic(ms1, rst[[5]], fun = 'mean')
ms4 <- raster::mosaic(ms2, ms3, fun = 'mean')
ms5 <- raster::mosaic(ms3, ms4, fun = 'mean')
# ms6 <- raster::mosaic(ms4, ms5, fun = 'mean')
# ms1 <- raster::mosaic(rst[[1]], rst[[2]], fun = 'mean')
# ms2 <- raster::mosaic(rst[[3]], rst[[4]], fun = 'mean')
# ms3 <- raster::mosaic(rst[[5]], rst[[6]], fun = 'mean')
# ms4 <- raster::mosaic(ms1, ms2, fun = 'mean')
# ms5 <- raster::mosaic(ms3, ms4, fun = 'mean')
# ms6 <- raster::mosaic(ms4, ms5, fun = 'mean')
# rm(ms1, ms2, ms3, ms4, ms5)
# writeRaster(ms6, paste0('../raster/indicators/heat_stress/mosaic/heat_barley_s2_', yr, '.tif'), overwrite = T)
writeRaster(ms5, paste0('../raster/indicators/heat_stress/mosaic/heat_cowpea_', yr, '.tif'), overwrite = T)
print('Done!!!')
}
make_raster <- function(crp){
crp <- 'cowpea'
stk <- grep(crp, fls, value = T)
stk <- grep(paste0(1983:2016, collapse = '|'), stk, value = T)
stk <- stk[-grep('_s2_', stk, value = F)]
stk <- stack(stk)
avg <- mean(stk)
cfv <- calc(stk, sd) / avg
cfv <- cfv * 100
q95fun <- function(x){quantile(x, probs = .95, na.rm=TRUE)}
p95 <- calc(stk, fun=q95fun, forceapply=T)
print('To write the raster')
writeRaster(avg, paste0('../raster/indicators/heat_stress/mosaic/heat_crop_', crp, '_mean.tif'), overwrite = T)
writeRaster(cfv, paste0('../raster/indicators/heat_stress/mosaic/heat_crop_', crp, '_cv.tif'), overwrite = T)
writeRaster(p95, paste0('../raster/indicators/heat_stress/mosaic/heat_crop_', crp, '_p95.tif'), overwrite = T)
print('To calculate the raster stack')
stk <- stack(avg, cfv, p95)
names(stk) <- c('mean', 'CV', 'percentil95')
vls <- rasterToPoints(stk) %>%
as_tibble() %>%
mutate(id = 1:nrow(.)) %>%
gather(var, value, -id, -x, -y)
g1 <- ggplot(data = filter(vls, var == 'mean')) +
geom_tile(aes(x = x, y = y, fill = value)) +
scale_fill_gradientn(colours = RColorBrewer::brewer.pal(n = 8, name = "YlOrRd"),
na.value = 'white') +
theme_bw() +
geom_polygon(data = shp, aes(x = long, y = lat, group = group), color = 'grey', fill = 'NA') +
coord_equal() +
labs(title = '', fill = 'Days', x = 'Longitude', y = 'Latitude') +
theme(legend.position = 'bottom',
plot.title = element_text(hjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.key.width = unit(5, 'line')) +
guides(shape = guide_legend(override.aes = list(size = 10)))
cfv_tbl <- rasterToPoints(cfv) %>%
as_tibble() %>%
mutate(id = 1:nrow(.)) %>%
gather(var, value, -id, -x, -y)
cfv_tbl <- cfv_tbl %>% mutate(value = ifelse(value > 100, 101, value))
g2 <- ggplot(data = cfv_tbl) +
geom_tile(aes(x = x, y = y, fill = value)) +
scale_fill_gradientn(colours = RColorBrewer::brewer.pal(n = 8, name = "YlOrRd"),
na.value = 'white',
labels = c(0, 25, 50, 75, '>100')) +
theme_bw() +
geom_polygon(data = shp, aes(x = long, y = lat, group = group), color = 'grey', fill = 'NA') +
coord_equal() +
labs(title = '', fill = 'CV', x = 'Longitude', y = 'Latitude') +
theme(legend.position = 'bottom',
plot.title = element_text(hjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.key.width = unit(5, 'line')) +
guides(shape = guide_legend(override.aes = list(size = 10)))
g3 <- ggplot(data = filter(vls, var == 'percentil95')) +
geom_tile(aes(x = x, y = y, fill = value)) +
scale_fill_gradientn(colours = RColorBrewer::brewer.pal(n = 8, name = "YlOrRd"),
na.value = 'white') +
theme_bw() +
geom_polygon(data = shp, aes(x = long, y = lat, group = group), color = 'grey', fill = 'NA') +
coord_equal() +
labs(title = '', fill = 'CV', x = 'Longitude', y = 'Latitude') +
theme(legend.position = 'bottom',
plot.title = element_text(hjust = 0.5),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.key.width = unit(5, 'line')) +
guides(shape = guide_legend(override.aes = list(size = 10)))
library(ggpubr)
gg <- ggarrange(g1, g2, g3, ncol = 1, nrow = 3, labels = c('Mean', 'CV', 'Percentil95'))
ggsave(plot = gg, filename = paste0('../png/maps/mean_cv_p95_', crp, '.png'), units = 'in', width = 12, height = 25, dpi = 300)
print('DOne!')
}
# Load data ---------------------------------------------------------------
fls <- list.files('../raster/indicators/heat_stress', full.names = T, pattern = '.tif$')
fls <- grep('cowpea', fls, value = T)
fls <- fls[grep('s2.tif', fls, value = F)]
yrs <- 1983:2016
shp <- shapefile('../data/shp/base/continents_1.shp')
# Apply the function ------------------------------------------------------
map(.x = yrs, .f = make_mosaic)
# Mean --------------------------------------------------------------------
fls <- list.files('../raster/indicators/heat_stress/mosaic', full.names = T, pattern = '.tif$')
make_raster(crp = 'cowpea')
|
6076f2297a369d4a886be71a4c3442ab4e0be3bb
|
9260d60c9fabcb3d0827f76b3562e07a7998fb71
|
/tests/qnormtest.R
|
d7b954b633dce811c76d9e198d4e580f4f6b543f
|
[] |
no_license
|
bmbolstad/preprocessCore
|
32371e05b8f53a0dfce5160de753606b53f6d4f8
|
33ccbd9451bb8c6e314b6a72ca6a8a63e14dafde
|
refs/heads/master
| 2023-01-22T09:40:39.890788
| 2022-11-01T14:35:18
| 2022-11-01T14:35:18
| 23,523,419
| 11
| 17
| null | 2021-07-02T17:24:53
| 2014-08-31T21:20:30
|
C
|
UTF-8
|
R
| false
| false
| 1,973
|
r
|
qnormtest.R
|
library(preprocessCore)
err.tol <- 10^-8
x <- matrix(c(100,15,200,250,110,16.5,220,275,120,18,240,300),ncol=3)
x
normalize.quantiles(x)
x.norm.truth <- matrix(rep(c(110.0,16.5,220,275.0),3),ncol=3)
if (all(abs(x.norm.truth - normalize.quantiles(x)) < err.tol) != TRUE){
stop("Disagreement in normalize.quantiles(x)")
}
normalize.quantiles.determine.target(x)
x.norm.target.truth <- c(16.5,110.0,220.0,275.0)
if (all(abs(x.norm.target.truth - normalize.quantiles.determine.target(x)) < err.tol) != TRUE){
stop("Disagreement in normalize.quantiles.determine.target(x)")
}
y <- x
y[2,2] <- NA
y
normalize.quantiles(y)
y.norm.target.truth <- c(47.6666666666667,134.4444444444444,226.1111111111111,275.0000000000000)
y.norm.truth <- matrix(c(134.4444444444444, 47.6666666666667, 134.4444444444444,
47.6666666666667, NA, 47.6666666666667,
226.1111111111111, 180.2777777777778, 226.1111111111111,
275.0000000000000, 275.0000000000000, 275.0000000000000),byrow=TRUE,ncol=3)
if (all(abs(y.norm.truth - normalize.quantiles(y)) < err.tol,na.rm=TRUE) != TRUE){
stop("Disagreement in normalize.quantiles(y)")
}
if (all(abs(y.norm.target.truth - normalize.quantiles.determine.target(y)) < err.tol) != TRUE){
stop("Disagreement in normalize.quantiles.determine.target(y)")
}
if (all(abs(normalize.quantiles.use.target(y,y.norm.target.truth) - y.norm.truth) < err.tol,na.rm=TRUE) != TRUE){
stop("Disagreement in normalize.quantiles.use.target(y)")
}
x <- matrix(c(100,15,200,250,110,16.5,220,275,120,18,240,300),ncol=3)
rownames(x) <- letters[1:4]
colnames(x) <- LETTERS[1:3]
y <- normalize.quantiles(x, keep.names = TRUE)
if(!all(colnames(x)==colnames(y))){
stop("Disagreement between initial and final column names despite keep.names=TRUE")
}
if(!all(rownames(x)==rownames(y))){
stop("Disagreement between initial and final row names despite keep.names=TRUE")
}
|
05409ce2b247a529f622c04bf09c6d11a185664c
|
3dfcad5e4ca29823a6e7899dcd22aaf7f5df971c
|
/R/AromaUnitTotalCnBinaryFileList.R
|
ba0f30043421959ea61ef520bd52c9a94a4c8379
|
[] |
no_license
|
HenrikBengtsson/aroma.core
|
f22c931029acf55f3ad2fdb6eb3bc2f0d2ba04e4
|
1bf20e2b09f4b8c0ca945dfb26fdf1902c187109
|
refs/heads/master
| 2022-12-01T09:01:49.295554
| 2022-11-15T18:29:17
| 2022-11-15T18:29:52
| 20,845,682
| 2
| 3
| null | 2018-04-21T02:06:48
| 2014-06-15T01:32:43
|
R
|
UTF-8
|
R
| false
| false
| 6,955
|
r
|
AromaUnitTotalCnBinaryFileList.R
|
setConstructorS3("AromaUnitTotalCnBinaryFileList", function(...) {
extend(GenericDataFileList(...), "AromaUnitTotalCnBinaryFileList")
})
setMethodS3("extractRawGenomicSignals", "AromaUnitTotalCnBinaryFileList", function(this, ..., dropEmpty=TRUE, FUN=extractRawGenomicSignals, verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'FUN':
if (!is.function(FUN)) {
throw("Argument 'FUN' is not a function: ", mode(FUN)[1])
}
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
dfList <- this
verbose && enter(verbose, "Extracting raw copy numbers across list")
nbrOfSources <- length(dfList)
verbose && cat(verbose, "Number of sources: ", nbrOfSources)
verbose && enter(verbose, "Extracting copy numbers of interest")
cnList <- lapply(dfList, FUN=function(df) {
FUN(df, ..., verbose=less(verbose, 25))
})
if (dropEmpty) {
verbose && enter(verbose, "Dropping empty data sets")
ns <- sapply(cnList, FUN=nbrOfLoci)
keep <- which(ns > 0)
cnList <- cnList[keep]
ns <- sapply(cnList, FUN=nbrOfLoci)
nbrOfSources <- length(cnList)
verbose && exit(verbose)
} else {
keep <- seq_along(cnList)
}
attr(cnList, "included") <- keep
verbose && print(verbose, cnList)
verbose && exit(verbose)
cnList
}) # extractRawGenomicSignals()
setMethodS3("extractRawCopyNumbers", "AromaUnitTotalCnBinaryFileList", function(this, ...) {
extractRawGenomicSignals(this, ..., FUN=extractRawCopyNumbers)
}) # extractRawCopyNumbers()
setMethodS3("extractMergedRawCopyNumbers", "AromaUnitTotalCnBinaryFileList", function(this, unshift=TRUE, bandwidth=200e3, ..., verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
# Argument 'unshift':
unshift <- Arguments$getLogical(unshift)
# Argument 'bandwidth':
bandwidth <- Arguments$getDouble(bandwidth, range=c(0, Inf))
dfList <- this
verbose && enter(verbose, "Multi-source segmentation")
verbose && enter(verbose, "Extracting raw copy numbers")
cnList <- extractRawCopyNumbers(dfList, ..., dropEmpty=TRUE, verbose=verbose)
keep <- attr(cnList, "included")
dfList <- dfList[keep]
nbrOfSources <- length(cnList)
verbose && cat(verbose, "Number of sources: ", nbrOfSources)
# Sanity check
.stop_if_not(nbrOfSources > 0)
platforms <- sapply(dfList, FUN=getPlatform)
chipTypes <- sapply(dfList, FUN=getChipType)
# names(cnList) <- sprintf("%s\n%s\n%s", sites, platforms, chipTypes)
verbose && cat(verbose, "Platforms/chip types:")
verbose && print(verbose, paste(platforms, chipTypes, sep=":"))
verbose && exit(verbose)
# Not needed anymore
dfList <- platforms <- chipTypes <- NULL
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Remove relative shifts?
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (unshift) {
verbose && enter(verbose, "Estimate and remove relative shifts")
verbose && enter(verbose, "Estimate CNs at common loci using binning")
verbose && cat(verbose, "Bandwidth: ", bandwidth)
# Estimate the noise level for each platform
xRange <- sapply(cnList, FUN=xRange)
xRange <- range(xRange, na.rm=TRUE)
xRangeStr <- paste(sprintf("%.2f", xRange/1e6), collapse=":")
verbose && cat(verbose, "Range (Mbp): ", xRangeStr)
cnSList <- lapply(cnList, FUN=function(cn) {
t <- system.time({
cnS <- binnedSmoothing(cn, from=xRange[1], to=xRange[2], by=bandwidth)
}, gcFirst = FALSE)
verbose && cat(verbose, "Processing time:")
verbose && print(verbose, t)
attr(cnS, "processingTime") <- t
cnS
})
verbose && print(verbose, cnSList)
verbose && exit(verbose)
verbose && enter(verbose, "Estimate global relative shifts")
# Estimate the global shift for each platform (average over all loci)
yRef <- getSignals(cnSList[[1]])
deltas <- sapply(cnSList, FUN=function(cn) {
y <- getSignals(cn)
.stop_if_not(length(y) == length(yRef))
median(y-yRef, na.rm=TRUE)
})
verbose && cat(verbose, "Relative shifts:")
verbose && print(verbose, deltas)
verbose && exit(verbose)
verbose && enter(verbose, "Removing shifts")
for (kk in seq_along(cnList)) {
# Unshift full resolution data
cn <- cnList[[kk]]
cn$y <- cn$y - deltas[kk]
cnList[[kk]] <- cn
# Unshift smoothed data (not really needed)
cnS <- cnSList[[kk]]
cnS$y <- cnS$y - deltas[kk]
cnSList[[kk]] <- cnS
} # for (kk ...)
verbose && exit(verbose)
verbose && exit(verbose)
} # if (unshift)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Estimating platform-specific weights based their noise levels
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Estimating platform-specific weights based their noise levels")
vars <- sapply(cnList, FUN=function(cn) {
getSigma(cn)^2
})
verbose && cat(verbose, "Robust first-order variance estimates (per source):")
verbose && print(verbose, vars)
verbose && cat(verbose, "Relative to the first source:")
verbose && print(verbose, vars/vars[1])
verbose && cat(verbose, "If adjusted for number of loci:")
ns <- sapply(cnList, FUN=nbrOfLoci)
verbose && print(verbose, vars/ns)
verbose && print(verbose, (vars/ns)/(vars/ns)[1])
# Not needed anymore
ns <- NULL
# Standardized weights
ws <- 1/vars
ws <- ws / sum(ws, na.rm=TRUE)
verbose && cat(verbose, "Weights (per source):")
verbose && print(verbose, ws)
verbose && cat(verbose, "Relative to the first source:")
verbose && print(verbose, ws/ws[1])
verbose && exit(verbose)
verbose && enter(verbose, "Assign platform specific weights")
for (kk in seq_along(cnList)) {
cn <- cnList[[kk]]
cn$weights <- rep(ws[kk], times=nbrOfLoci(cn))
cnList[[kk]] <- cn
cnS <- cnSList[[kk]]
cnS$weights <- rep(ws[kk], times=nbrOfLoci(cnS))
cnSList[[kk]] <- cnS
} # for (kk ...)
verbose && exit(verbose)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Merge and order along genome
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Merge and order along genome")
cnM <- Reduce(append, lapply(cnList, FUN=clone))
cnM <- sort(cnM)
verbose && print(verbose, cnM)
verbose && exit(verbose)
verbose && exit(verbose)
cnM
}) # extractMergedRawCopyNumbers()
|
6616e6a020cc523e8639b4241658725096f65e9c
|
88d71818726675543faa3b3bcaddc7eef6f5583d
|
/text_mining_example/公開資料文字探勘.R
|
57c06baabedb279578928202ab4e68549f2d4109
|
[] |
no_license
|
souv/shiny
|
0a2578dcd635c1cd4a2b3b9adca8a50361f96edf
|
8699e4bb2d256d19e79e3231f7f2b18785bbc8b6
|
refs/heads/master
| 2020-04-06T06:57:30.786671
| 2016-08-31T03:04:53
| 2016-08-31T03:04:53
| 65,263,355
| 0
| 0
| null | null | null | null |
BIG5
|
R
| false
| false
| 2,178
|
r
|
公開資料文字探勘.R
|
#http://rstudio-pubs-static.s3.amazonaws.com/12422_b2b48bb2da7942acaca5ace45bd8c60c.html
#tm包的問題http://stackoverflow.com/questions/24191728/documenttermmatrix-error-on-corpus-argument
orgPath = "./open_stock"
text = Corpus(DirSource(orgPath), list(language = NA))
text <- tm_map(text, removePunctuation)
text <- tm_map(text, removeNumbers)
text <- tm_map(text, function(word)
{ gsub("[A-Za-z0-9]", "", word) })
# 進行中文斷詞
mixseg = worker()
mat <- matrix( unlist(text) )#把character用成matrix
totalSegment = data.frame()
#用雙重迴圈把矩陣轉換成資料表,當中順便做了斷詞
for( j in 1:length(mat) )
{
for( i in 1:length(mat[j,]) )
{
result = segment(as.character(mat[j,i]), jiebar=mixseg)
}
totalSegment = rbind(totalSegment, data.frame(result))
}
#建立term-document matrix
tdm <- TermDocumentMatrix(text, control = list(wordLengths = c(2,3)))
tdm
inspect(tdm[3:25, 1:144])
idx <- which(dimnames(tdm)$Terms == "配")
inspect(tdm[idx+(0:5),101:110])
#
findFreqTerms(tdm, lowfreq=2)
termFrequency <- rowSums(as.matrix(tdm))
termFrequency <- subset(termFrequency, termFrequency>=10)
library(ggplot2)
qplot(names(termFrequency), termFrequency, geom="bar", xlab="Terms") + coord_flip()
a=barplot(termFrequency, las=1)
findAssocs(tdm,"無",0.25)
library(plotly)
plotly(a)
#把每一行都用成一筆txt
setwd("C:\\Users\\user\\Desktop\\台大R\\台大_R\\shiny\\text_mining_example\\open_stock")
stock=read.csv("open_stock.csv")
# d <- split(stock,rep(1:144,each=1))
stock <- as.list(as.data.frame(t(stock)))
for (i in (1:length(stock))){
a=as.character(stock[[i]])
name=paste("number",i,".txt")
writeLines(a,name)
}
# define text array that you want
# delete text length < 2
delidx = which( nchar(as.vector(totalSegment[,1])) < 2 )#用which來做挑選編號
countText = totalSegment[-delidx,]#減去長度小於2的關鍵字的編號
countResult = count(countText)[,1]#要做文字雲的那些關鍵字
countFreq = count(countText)[,2] / sum(count(countText)[,2]) #所占的比例
wordcloud(countResult, countFreq, min.freq = 5, random.order = F, ordered.colors = T,
colors = rainbow(length(countResult)))
|
0e6d93123d57ce84158b8e80a97e382dd94e4848
|
2ead5747ac0e4e9477d03a0df2410158f6341550
|
/R/vlaunch.R
|
51e7d59c57f95e6effdf57a6ed56456994cc77ec
|
[] |
no_license
|
nonki1974/vdmR
|
bdafb6cc1de78cb6589de987ed256f8ec9e40864
|
26a235545e11b7558aaf7be3188500a8e0e7861e
|
refs/heads/master
| 2022-11-19T01:49:27.135584
| 2020-07-08T02:37:42
| 2020-07-08T02:37:42
| 277,970,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,017
|
r
|
vlaunch.R
|
#' Generate main window for interactive plot windows
#'
#' \code{vlauch} generates a main window which opens each pre-generated window including statistical plot with interactivity
#'
#' @docType methods
#' @param data data frame for default data set
#' @param name character for the name of the generated scatter plot
#' @param tag character for the common name of a series of linked plots
#' @param path character string of a directory for writing HTML and SVG files
#' @param iframe logical; if \code{TRUE}, all plot windows are displayed in the main window as inline frames
#' @param browse logical; if \code{TRUE}, browse the main window by the default web browser through the local web server; if \code{FALSE}, generating only
#' @export
#' @examples
#' data(vsfuk2012)
#' vscat(MarriageRate, DivorceRate, vsfuk2012, "scat1", "vsfuk2012", colour=Type)
#' vhist(FertilityRate, vsfuk2012, "hist1", "vsfuk2012", fill=Type)
#' vlaunch(vsfuk2012, "main", "vsfuk2012", browse=FALSE)
#'
vlaunch <- function(data, name, tag, path = tempdir(), iframe=FALSE, browse=TRUE){
fn <- paste0(name, ".", tag)
basehtmlfn <- paste0(".", tag, ".svg.html")
htmlfn <- paste0(name, basehtmlfn)
#fnregex <- paste0("*", basehtmlfn)
plotfilelist <- list.files(path, pattern=paste0("*", basehtmlfn))
plotfilenames <- gsub(paste0(basehtmlfn), "", plotfilelist)
winlist <- paste0("var winlist=['",
gsub(",","','",
paste(plotfilenames, collapse=",")),
"'];\n")
jspath <- file.path(system.file(package="vdmR"), "exec/vdmr_main.js")
file.copy(jspath, paste0(path, "/", fn, ".js"), overwrite=TRUE)
csspath <- file.path(system.file(package="vdmR"), "exec/vdmr_main.css")
file.copy(csspath, paste0(path, "/", fn, ".css"), overwrite=TRUE)
z <- file(paste0(path, "/", fn, ".html"),"w")
cat("<html><head><title>", file=z)
cat(fn, file=z)
cat("</title></head>", file=z)
cat("<script type=\"text/javascript\"><!--\n", file=z)
cat(winlist, file=z)
cat(paste0("var tag='",tag,"';\n"), file=z)
cat(paste0("var colnames= ", rjson::toJSON(colnames(data)), ";"), file=z)
cat("\n//--></script>\n", file=z)
cat("<script type=\"text/javascript\" src=\"", file=z)
cat(paste0(fn, ".js"), file=z)
cat("\"></script>", file=z)
cat("<link rel=\"stylesheet\" type=\"text/css\" href=\"", file=z)
cat(paste0(fn, ".css"), file=z)
cat("\">", file=z)
cat("<link rel=\"stylesheet\" type=\"text/css\" href=\"//cdn.datatables.net/1.10.0/css/jquery.dataTables.css\">", file=z)
cat("<script type=\"text/javascript\" charset=\"utf8\" src=\"//code.jquery.com/jquery-1.10.2.min.js\"></script>", file=z)
cat("<script type=\"text/javascript\" charset=\"utf8\" src=\"//cdn.datatables.net/1.10.0/js/jquery.dataTables.js\"></script>
", file=z)
cat("<body onload=\"init();\">", file=z)
if(iframe==FALSE){
cat("<div id=\"buttons\"></div><br/>", file=z)
}
data.ncol <- ncol(data)
cat("<form id=\"form1\">Identify: <select id=\"identify\" name=\"identify\">", file=z)
cat(paste0(rep("<option value=\"", data.ncol), 0:(data.ncol-1), "\">",
colnames(data), "</option>"), file=z)
cat("</select>", file=z)
cat(" Selection Box: <select id=\"selbox\" onChange=\"setSelToolVisibility(this)\"><option value=\"visible\">visible</option>", file=z)
cat("<option value=\"hidden\">hidden</option></select></form>", file=z)
if(iframe==TRUE){
cat("<div id=\"inlineplot\" style=\"width:100%; height:400\">", file=z)
pfnum <- length(plotfilelist)
for(i in 1:pfnum){
cat("<iframe id=\"",plotfilenames[i],"\" scrolling=\"no\" width=\"",100/pfnum,"%\" height=\"400\" src=\"", plotfilelist[i], "\" frameborder=\"0\"></iframe>", sep="", file=z)
}
cat("</div>", file=z)
}
cat("<table id=\"df\" class=\"display\" cellspacing=\"0\" width=\"100%\">", file=z)
cat("<thead><tr>", paste(
rep("<th>",data.ncol),
colnames(data),
rep("</th>",data.ncol), sep=""
), "</tr></thead>", sep="", file=z)
cat("<tbody>", file=z)
for(r in 1:nrow(data)){
cat("<tr>", paste0(
rep("<td>", data.ncol),
as.vector(t(data[r,])),
rep("</td>", data.ncol)
), "</tr>", sep="", file=z)
}
cat("</tbody></table>", file=z)
cat("</body></html>", file=z)
close(z)
if(browse==TRUE){
if(.Platform$GUI!='RStudio'){
s <- Rook::Rhttpd$new()
s$add(name="vdmR",
app=Rook::Builder$new(
Rook::Static$new(root=path, urls="/"),
Rook::Redirect$new(paste0("/", fn, ".html"))))
s$start()
s$browse(1)
} else {
#dir <- tempfile()
#dir.create(dir)
#fcp <- file.copy(list.files(".",paste0("*.",tag,".*")), dir)
#utils::browseURL(file.path(dir,paste0(fn,".html")))
utils::browseURL(file.path(paste0(path, "/", fn,".html")))
}
}
}
|
28a0f30bfbe9ef74949e98b796c3f45d2b9da800
|
7ba42ea09417547219343e5532a1f7954bdf10b2
|
/R/recursive-pipeline.R
|
02afadb86c8b4dce967bb04bd4e37a1028030538
|
[
"Apache-2.0"
] |
permissive
|
r-spark/sparknlp
|
622822b53e2b5eb43508852e39a911a43efa443f
|
4c2ad871cc7fec46f8574f9361c78b4bed39c924
|
refs/heads/master
| 2023-03-16T05:35:41.244593
| 2022-10-06T13:42:00
| 2022-10-06T13:42:00
| 212,847,046
| 32
| 7
|
NOASSERTION
| 2023-03-13T19:33:03
| 2019-10-04T15:27:28
|
R
|
UTF-8
|
R
| false
| false
| 2,421
|
r
|
recursive-pipeline.R
|
#' Spark NLP RecursivePipeline
#'
#' Recursive pipelines are SparkNLP specific pipelines that allow a Spark ML Pipeline to know about itself on every
#' Pipeline Stage task, allowing annotators to utilize this same pipeline against external resources to process them
#' in the same way the user decides. Only some of our annotators take advantage of this. RecursivePipeline behaves
#' exactly the same than normal Spark ML pipelines, so they can be used with the same intention.
#' See \url{https://nlp.johnsnowlabs.com/docs/en/concepts#recursivepipeline}
#'
#' @param x Either a \code{spark_connection} or \code{ml_pipeline_stage} objects
#' @param uid uid for the pipeline
#' @param ... \code{ml_pipeline_stage} objects
#'
#' @return When \code{x} is a \code{spark_connection}, \code{ml_pipeline()} returns an empty pipeline object.
#' When \code{x} is a \code{ml_pipeline_stage}, \code{ml_pipeline()} returns an \code{ml_pipeline} with the stages
#' set to \code{x} and any transformers or estimators given in \code{...}.
#' @export
nlp_recursive_pipeline <- function(x, ..., uid = random_string("recursive_pipeline_")) {
UseMethod("nlp_recursive_pipeline")
}
#' @export
nlp_recursive_pipeline.spark_connection <- function(x, ..., uid = random_string("recursive_pipeline_")) {
uid <- forge::cast_string(uid)
jobj <- invoke_new(x, "com.johnsnowlabs.nlp.RecursivePipeline", uid)
new_nlp_recursive_pipeline(jobj)
}
#' @export
nlp_recursive_pipeline.ml_pipeline_stage <- function(x, ..., uid = random_string("recursive_pipeline_")) {
uid <- forge::cast_string(uid)
sc <- spark_connection(x)
dots <- list(...) %>%
lapply(function(x) spark_jobj(x))
stages <- c(spark_jobj(x), dots)
jobj <- invoke_static(sc, "sparknlp.Utils", "createRecursivePipelineFromStages", uid, stages)
new_nlp_recursive_pipeline(jobj)
}
new_nlp_recursive_pipeline <- function(jobj, ..., class = character()) {
stages <- tryCatch({
jobj %>%
invoke("getStages") %>%
lapply(ml_call_constructor)
},
error = function(e) {
NULL
})
new_ml_estimator(jobj,
stages = stages,
stage_uids = if (rlang::is_null(stages))
NULL
else
sapply(stages, function(x)
x$uid),
...,
class = c(class, "nlp_recursive_pipeline", "ml_pipeline"))
}
|
3aacd325717f10a6ce7f1abbcd594efd0ac15084
|
3e48ad8e9472e62f2413bb9709b69751fb12e8fe
|
/man/imagematrix.Rd
|
55abc406d48fabf61ee0b415ec7da85b6767ea2e
|
[] |
no_license
|
cran/rimage
|
0219736b0136b4aac3873f275e1ffaf42e88055b
|
0df1fb311be5070f9bfec6eb5d723883c910359f
|
refs/heads/master
| 2021-01-01T19:29:25.965041
| 2010-06-08T00:00:00
| 2010-06-08T00:00:00
| 17,719,334
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,297
|
rd
|
imagematrix.Rd
|
\name{imagematrix}
\alias{imagematrix}
\title{Generate an imagematrix, i.e. primary data structure of rimage}
\description{
This function makes an imagematrix object from a matrix. This data
structure is primary data structure to represent image in rimage package.
}
\usage{imagematrix(mat, type=NULL,
ncol=dim(mat)[1], nrow=dim(mat)[2], noclipping=FALSE)}
\arguments{
\item{mat}{array, matrix or vector}
\item{type}{"rgb" or "grey"}
\item{ncol}{width of image}
\item{nrow}{height of image}
\item{noclipping}{TRUE if you disable automatic clipping. See details.}
}
\details{
For grey scale image, matrix should be given in the form of 2
dimensional matrix. First dimension is row, and second dimension is
column.
For rgb image, matrix should be given in the form of 3 dimensional
array (row, column, channel). mat[,,1], mat[,,2], mat[,,3] are
red plane, green plane and blue plane, respectively.
You can omit 'type' specification if you give a proper array or
matrix.
Also, if you give a rgb image matrix and specify "grey" as type, the rgb
image matrix is automatically converted to a grey scale image.
This function automatically clips the pixel values which are
less than 0 or greater than 1. If you want to disable this
behavior, give 'noclipiing=TRUE'.
The major difference between imagematrix and pixmap is
representation method. pixmap (>0.3) uses OOP class.
On the other hand, rimage uses traditional S class.
The advantage of traditional S class in representing image is that
one can deal with the data structure as an ordinary matrix.
The minor difference between imagematrix and pixmap is
automatic data conversion behavior.
pixmap normalizes a given matrix automatically if
any element of the matrix is out of range between 0 and 1.
On the other hand, imagematrix clips the matrix, which means
that the pixels which have lower
value than 0 are replaced to 0 and the pixels have
greater value than 1 are replaced to 1.
}
\value{
return an imagematrix object
}
\examples{
p <- q <- seq(-1, 1, length=20)
r <- 1 - outer(p^2, q^2, "+") / 2
plot(imagematrix(r))
}
\seealso{\code{\link{plot.imagematrix}},\code{\link{print.imagematrix}},\code{\link{clipping}},\code{\link{normalize}}}
\keyword{misc}
|
43ff2369ca21fbb5102110bb7d2d7cbdc2a10a1d
|
ed100387a89116e92d1a2eef5c32c7f8226e585d
|
/bagOfDrugs_whatNext.R
|
c442eac721084278c9d050aefb0034f92cfaa121
|
[] |
no_license
|
csainsbury/bagOfDrugs
|
0c88eb433000fc54b764a4c4ce2b354ba4e26d22
|
bba697597a3486325ff3fb8e5db47a56fb6b5e72
|
refs/heads/master
| 2021-01-18T17:26:43.548739
| 2017-10-05T18:30:49
| 2017-10-05T18:30:49
| 86,799,109
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,370
|
r
|
bagOfDrugs_whatNext.R
|
# source("~/R/_workingDirectory/_perAdmissionRewriteDataTableFunctions.R")
# library(gtools)
# library(igraph)
library(data.table)
id_per_location <- function(ID) {
return(length(unique(ID)))
}
flagMove <- function(ID, charL) {
charLreport <- charL
charLnumeric <- as.numeric(factor(charL))
testFrame <- data.frame(charLreport, charLnumeric)
testFrame$flagMove <- 0
testFrame$flagMove[1:nrow(testFrame)-1] <- diff(testFrame$charLnumeric)
testFrame$nextL <- c("spacer")
testFrame$nextL[1:(nrow(testFrame)-1)] <- charLreport[2:length(charLreport)]
testFrame$charLreport <- as.character(factor(charL))
outputList <- list(testFrame$charLreport, testFrame$nextL, testFrame$flagMove)
return(outputList)
}
returnUnixDateTime<-function(date) {
returnVal<-as.numeric(as.POSIXct(date, format="%Y-%m-%d", tz="GMT"))
return(returnVal)
}
findSimilarDrugs <- function(inputFrame) {
# inputFrame <- interestSet
# inputFrame <- inputFrame[1:10000,]
inputFrame$DrugName.original <- inputFrame$DrugName
inputFrame$DrugNameNew <- inputFrame$DrugName
inputFrame <- subset(inputFrame, DrugNameNew != "Disposable")
inputFrame$DrugNameNew[grep("Glucose", inputFrame$DrugName, ignore.case = TRUE)] <- "Glucose"
inputFrame$DrugNameNew[grep("Glucogel", inputFrame$DrugName, ignore.case = TRUE)] <- "Glucose"
inputFrame$DrugNameNew[grep("Glucagen Hypokit", inputFrame$DrugName, ignore.case = TRUE)] <- "Glucagon"
inputFrame$DrugNameNew[grep("Optium Plus", inputFrame$DrugName, ignore.case = TRUE)] <- "Test Strips"
inputFrame$DrugNameNew[grep("Metformin", inputFrame$DrugName, ignore.case = TRUE)] <- "Metformin"
inputFrame$DrugNameNew[grep("Glucophage", inputFrame$DrugName, ignore.case = TRUE)] <- "Metformin"
inputFrame$DrugNameNew[grep("Gliclazide", inputFrame$DrugName, ignore.case = TRUE)] <- "Gliclazide"
inputFrame$DrugNameNew[grep("Diamicron", inputFrame$DrugName, ignore.case = TRUE)] <- "Gliclazide"
inputFrame$DrugNameNew[grep("Rosiglitazone", inputFrame$DrugName, ignore.case = TRUE)] <- "Rosiglitazone"
inputFrame$DrugNameNew[grep("Avandia", inputFrame$DrugName, ignore.case = TRUE)] <- "Rosiglitazone"
inputFrame$DrugNameNew[grep("Linagliptin", inputFrame$DrugName, ignore.case = TRUE)] <- "Linagliptin"
inputFrame$DrugNameNew[grep("Victoza", inputFrame$DrugName, ignore.case = TRUE)] <- "Liraglutide"
inputFrame$DrugNameNew[grep("Liraglutide", inputFrame$DrugName, ignore.case = TRUE)] <- "Liraglutide"
inputFrame$DrugNameNew[grep("Pioglitazone", inputFrame$DrugName, ignore.case = TRUE)] <- "Pioglitazone"
inputFrame$DrugNameNew[grep("Sitagliptin", inputFrame$DrugName, ignore.case = TRUE)] <- "Sitagliptin"
inputFrame$DrugNameNew[grep("Januvia", inputFrame$DrugName, ignore.case = TRUE)] <- "Sitagliptin"
inputFrame$DrugNameNew[grep("Dapagliflozin", inputFrame$DrugName, ignore.case = TRUE)] <- "Dapagliflozin"
inputFrame$DrugNameNew[grep("Humalog Mix 25", inputFrame$DrugName, ignore.case = TRUE)] <- "HumalogMix25"
inputFrame$DrugNameNew[grep("Lantus", inputFrame$DrugName, ignore.case = TRUE)] <- "InsulinGlargine"
inputFrame$DrugNameNew[grep("Levemir", inputFrame$DrugName, ignore.case = TRUE)] <- "InsulinDetemir"
inputFrame$DrugNameNew[grep("Insulatard", inputFrame$DrugName, ignore.case = TRUE)] <- "HumanLongActing"
inputFrame$DrugNameNew[grep("Ultratard", inputFrame$DrugName, ignore.case = TRUE)] <- "HumanLongActing"
inputFrame$DrugNameNew[grep("Humulin I", inputFrame$DrugName, ignore.case = TRUE)] <- "HumanLongActing"
inputFrame$DrugNameNew[grep("Actrapid", inputFrame$DrugName, ignore.case = TRUE)] <- "Actrapid"
inputFrame$DrugNameNew[grep("Humalog 100units/ml solution", inputFrame$DrugName, ignore.case = TRUE)] <- "Humalog"
inputFrame$DrugNameNew[grep("Novorapid", inputFrame$DrugName, ignore.case = TRUE)] <- "Novorapid"
inputFrame$DrugNameNew[grep("Novomix 30", inputFrame$DrugName, ignore.case = TRUE)] <- "Novomix30"
inputFrame$DrugNameNew[grep("Mixtard 30", inputFrame$DrugName, ignore.case = TRUE)] <- "Mixtard30"
inputFrame$DrugNameNew[grep("Mixtard 20", inputFrame$DrugName, ignore.case = TRUE)] <- "Mixtard20"
inputFrame$DrugNameNew[grep("Humulin M3", inputFrame$DrugName, ignore.case = TRUE)] <- "HumulinM3"
inputFrame$DrugNameNew[grep("Humalog Mix50", inputFrame$DrugName, ignore.case = TRUE)] <- "HumalogMix50"
inputFrame$DrugNameNew[grep("strip", inputFrame$DrugName, ignore.case = TRUE)] <- "Test Strips"
inputFrame$DrugNameNew[grep("Bd-Microfine", inputFrame$DrugName, ignore.case = TRUE)] <- "Needle"
inputFrame$DrugNameNew[grep("Needle", inputFrame$DrugName, ignore.case = TRUE)] <- "Needle"
outputFrame <- inputFrame
outputFrame$DrugName.original <- NULL
outputFrame$DrugName <- outputFrame$DrugNameNew
outputFrame$DrugNameNew <- NULL
return(outputFrame)
}
# generate node and link files
drugDataSet <- read.csv("~/R/GlCoSy/SDsource/Export_all_diabetes_drugs.txt",header=TRUE,row.names=NULL)
# load and process mortality data
deathData <- read.csv("~/R/GlCoSy/SDsource/diagnosisDateDeathDate.txt", sep=",")
deathData$unix_deathDate <- returnUnixDateTime(deathData$DeathDate)
deathData$unix_deathDate[is.na(deathData$unix_deathDate)] <- 0
deathData$isDead <- ifelse(deathData$unix_deathDate > 0, 1, 0)
deathData$unix_diagnosisDate <- returnUnixDateTime(deathData$DateOfDiagnosisDiabetes_Date)
# drugDataSet <- read.csv("~/R/GlCoSy/SDsource/test_drug_out_second100kIDs_allTime.txt",header=TRUE,row.names=NULL)
drugDataSet$BNFCode <- as.character(drugDataSet$BNFCode)
drugDataSet$DrugName <- as.character(drugDataSet$DrugName)
drugDataSet$LinkId <- as.numeric(levels(drugDataSet$LinkId))[drugDataSet$LinkId]
# drugDataSet$LinkId <- as.numeric(drugDataSet$LinkId)
# drugDataSet <- read.csv("./test_drug_out_second100kIDs_allTime.txt",header=TRUE,row.names=NULL)
# restrict to diabetes drugs
interestSet <- subset(drugDataSet, substr(drugDataSet$BNFCode,1,3) == "6.1" | substr(drugDataSet$BNFCode,1,4) == "0601" | substr(drugDataSet$BNFCode,1,5) == "06.01")
interestSet <- findSimilarDrugs(interestSet)
interestSetDT <- data.table(interestSet)
interestSetDT$prescription_dateplustime1 <- returnUnixDateTime(interestSetDT$PrescriptionDateTime)
interestSetDT_original <- interestSetDT # run from here if altering runin period
# set runin period of interest
# startRuninPeriod <- '2002-01-01'
endRuninPeriod <- '2015-01-01'
# testDeathDate <- '2013-01-01'
# interestSetDT <- interestSetDT[prescription_dateplustime1 > returnUnixDateTime(startRuninPeriod) & prescription_dateplustime1 < returnUnixDateTime(endRuninPeriod)]
interestSetDF <- data.frame(interestSetDT)
# generate a top-100 etc list for merging back
# meeds a bit of data cleaning - merging synonymous drugs etc
n = 45
topNdrugs_DrugNames <- as.data.frame(table(interestSetDF$DrugName))
topNdrugs_DrugNames <- topNdrugs_DrugNames[order(topNdrugs_DrugNames$Freq), ]
topNdrugs <- tail(topNdrugs_DrugNames, n)
# topNdrugs$Var1 <- gsub(" ", "", topNdrugs$Var1, fixed = TRUE)
# topNdrugs$Var1 <- gsub("/", "", topNdrugs$Var1, fixed = TRUE)
# topNdrugs$Var1 <- gsub("-", "", topNdrugs$Var1, fixed = TRUE)
# merge top drugs back with interestSet to generate working data frame:
interestSet_topN_merge <- merge(interestSetDF, topNdrugs, by.x="DrugName", by.y="Var1")
###############################
## start drug data manipulation
###############################
### find prescription date of interest agents
findFirstSGLT2 <- function(prescription_dateplustime1, flagSGLT2use) {
x <- data.table(prescription_dateplustime1, flagSGLT2use)
x <- x[flagSGLT2use == 1]
if (nrow(x) > 0) {
firstPrescription <- min(x$prescription_dateplustime1)
}
if (nrow(x) == 0) {
firstPrescription <- 0
}
return(firstPrescription)
}
drugsetDT_firstPrescribed <- interestSet_topN_merge
drugsetDT_firstPrescribed <- data.table(drugsetDT_firstPrescribed)
drugsetDT_firstPrescribed$flagSGLT2use <- ifelse(drugsetDT_firstPrescribed$DrugName == "Empagliflozin", 1, 0)
drugsetDT_firstPrescribed$flagSGLT2use <- ifelse(drugsetDT_firstPrescribed$DrugName == "Dapagliflozin", 1, drugsetDT_firstPrescribed$flagSGLT2use)
drugsetDT_firstPrescribed[, c("firstSGLT2Prescription") := findFirstSGLT2(prescription_dateplustime1, flagSGLT2use), by=.(LinkId)]
# limit set to SGLT2 prescribed IDs
drugsetDT_firstPrescribed_SGLT2only <- drugsetDT_firstPrescribed[firstSGLT2Prescription > 0]
# cut all data after first prescription
drugsetDT_firstPrescribed_SGLT2only <- drugsetDT_firstPrescribed_SGLT2only[prescription_dateplustime1 <= firstSGLT2Prescription]
#######################################
# start analysis
######################################
drugsetDT <- drugsetDT_firstPrescribed_SGLT2only
# drugsetDT$prescription_dateplustime1 <- returnUnixDateTime(drugsetDT$PrescriptionDateTime)
# drugsetDT_original <-drugsetDT # preserve an original full dataset incase needed
# drugsetDT$LinkId <- as.numeric(levels(drugsetDT$LinkId))[drugsetDT$LinkId]
drugsetDT <- drugsetDT[prescription_dateplustime1 > returnUnixDateTime("2005-01-01") & prescription_dateplustime1 < returnUnixDateTime("2017-01-01")]
# scale time to 0 to 1 range
drugsetDT$prescription_dateplustime1.original <- drugsetDT$prescription_dateplustime1
drugsetDT$prescription_dateplustime1 <- (drugsetDT$prescription_dateplustime1 - min(drugsetDT$prescription_dateplustime1)) / (max(drugsetDT$prescription_dateplustime1) - min(drugsetDT$prescription_dateplustime1))
# drugsetDT$LinkId<-as.numeric(levels(drugsetDT$LinkId))[drugsetDT$LinkId]
# drugsetDT$LinkId[is.na(drugsetDT$LinkId)] <- 0
# drugsetDT <- drugsetDT[LinkId > 0]
# read out and in for testing
# write.table(drugsetDT, file = "~/R/GlCoSy/MLsource/drugsetDT_2002to12.csv", sep=",", row.names = FALSE)
# drugsetDT <- read.csv("~/R/GlCoSy/MLsource/drugsetDT.csv", stringsAsFactors = F, row.names = NULL); drugsetDT$row.names <- NULL; drugsetDT$diffLinkId <- NULL; drugsetDT <- data.table(drugsetDT)
drugsetDT <- transform(drugsetDT,id=as.numeric(factor(LinkId)))
# drugsetDT <- drugsetDT[prescription_dateplustime1.original > returnUnixDateTime('2005-01-01') & prescription_dateplustime1.original < returnUnixDateTime('2015-01-01')]
# calculate time interval:
interval <- max(drugsetDT$prescription_dateplustime1.original)- min(drugsetDT$prescription_dateplustime1.original)
intervalYears <- interval / (60*60*24*365.25)
# months = 3
# set time bins
#
sequence <- seq(0, 1 , (1/40)) # 10y runin - in 3 month blocks
# sequence <- seq(0, 1 , 0.1) # 10y runin - in 12 month blocks
# sequence <- seq(0, 1 , (1/125)) # 10y runin - in 3 month blocks
# generate bag of drugs frame
drugWordFrame <- as.data.frame(matrix(nrow = length(unique(drugsetDT$LinkId)), ncol = (length(sequence)-1) ))
colnames(drugWordFrame) <- c(1:(length(sequence)-1))
drugWordFrame$LinkId <- 0
# function to generate drugwords for each time interval
returnIntervals <- function(LinkId, DrugName, prescription_dateplustime1, sequence, id) {
# DrugName <- subset(drugsetDT, id == 2)$DrugName; prescription_dateplustime1 <- subset(drugsetDT, id == 2)$prescription_dateplustime1; id = 2; LinkId <- subset(drugsetDT, id == 2)$LinkId
inputSet <- data.table(DrugName, prescription_dateplustime1)
## add nil values to fill time slots without any drugs
nilFrame <- as.data.frame(matrix(nrow = length(sequence), ncol = ncol(inputSet)))
colnames(nilFrame) <- colnames(inputSet)
nilFrame$DrugName <- 'nil'
nilFrame$prescription_dateplustime1 <- sequence
outputSet <- rbind(nilFrame, inputSet)
## generate drug words
interimSet <- outputSet
interimSet <- interimSet[, interv := cut(prescription_dateplustime1, sequence)][, .(drugs = (unique(DrugName))), by = interv]
interimSet[, drugWord := paste(drugs, collapse = ''), by = interv]
interimSet <- interimSet[order(interimSet$interv), ]
interimSet[, drugSequenceNumber := seq(1, .N, 1), by = interv]
reportSet <- interimSet[drugSequenceNumber == 1]
reportSet$drugWord <- ifelse(substr(reportSet$drugWord,1,3) == 'nil' & nchar(reportSet$drugWord) == 3, reportSet$drugWord, substr(reportSet$drugWord,4,nchar(reportSet$drugWord)))
reportSet <- reportSet[1:nrow(reportSet)-1, ]
reportSet$intervalNumber <- c(1:nrow(reportSet))
# print(reportSet$drugWord)
return(c(reportSet$drugWord, LinkId[1]))
}
for (j in seq(1, max(drugsetDT$id), )) {
if(j%%100 == 0) {print(j)}
injectionSet <- drugsetDT[id == j]
drugWordFrame[j, ] <- returnIntervals(injectionSet$LinkId, injectionSet$DrugName, injectionSet$prescription_dateplustime1, sequence, j)
}
drugWordFrame[grep("Dapagliflozin", inputFrame$DrugName, ignore.case = TRUE)] <- "1"
#import hba1c data
cleanHbA1cData <- read.csv("~/R/GlCoSy/SD_workingSource/hba1cDTclean.csv", sep=",", header = TRUE, row.names = NULL)
cleanHbA1cData$timeSeriesDataPoint <- cleanHbA1cData$hba1cNumeric
cleanHbA1cDataDT <- data.table(cleanHbA1cData)
# file to find hba1c values for
findHbA1cValues <- function(LinkId_value, firstSGLT2Prescription, firstWindowMonths, IntervalMonths) {
# print(LinkId_value)
firstSGLT2time <- firstSGLT2Prescription[1]
firstWindowSeconds <- firstWindowMonths * (60*60*24*(365.25/12))
IntervalSeconds <- IntervalMonths * (60*60*24*(365.25/12))
hb_sub <- cleanHbA1cDataDT[LinkId == LinkId_value]
# find 1st hba1c
hb_sub$firstDiff <- hb_sub$dateplustime1 - firstSGLT2time
first_hb_sub <- hb_sub[sqrt(firstDiff^2) < (firstWindowSeconds/2)]
if (nrow(first_hb_sub) > 0) {firstHb <- first_hb_sub[sqrt(firstDiff^2) == min(sqrt(firstDiff^2))]$timeSeriesDataPoint}
if (nrow(first_hb_sub) == 0) {firstHb = 0}
# find 2nd hba1c
hb_sub$secondDiff <- hb_sub$dateplustime1 - (firstSGLT2time + IntervalSeconds)
second_hb_sub <- hb_sub[sqrt(secondDiff^2) < (firstWindowSeconds/2)]
if (nrow(second_hb_sub) > 0) {secondHb <- second_hb_sub[sqrt(firstDiff^2) == min(sqrt(firstDiff^2))]$timeSeriesDataPoint}
if (nrow(second_hb_sub) == 0) {secondHb = 0}
returnList <- list(firstHb, secondHb)
return(returnList)
}
drugsetDT[, c("firstHbA1c", "secondHbA1c") := findHbA1cValues(LinkId, firstSGLT2Prescription, 3, 12), by=.(LinkId)]
drugsetDT$include <- ifelse(drugsetDT$firstHbA1c > 0 & drugsetDT$secondHbA1c >0, 1, 0)
drugsetDT$y <- ifelse(drugsetDT$include == 1 & (drugsetDT$firstHbA1c - drugsetDT$secondHbA1c) >= 10, 1, 0)
# flag single row per ID for merging back with combination data
drugsetDT$index <- seq(1, nrow(drugsetDT), 1)
drugsetDT[, c("firstRow") := ifelse(index == min(index), 1, 0), by=.(LinkId)]
meetsCriteriaDT <- drugsetDT[include == 1 & firstRow == 1]
mergeSet <- data.frame(meetsCriteriaDT$LinkId, meetsCriteriaDT$y); colnames(mergeSet) <- c("LinkId", "y")
exportMerge <- merge(mergeSet, drugWordFrame, by.x = "LinkId", by.y = "LinkId")
########################
# write drug names into numbers
drugWordFrame_drugNames <- exportMerge[,3:ncol(exportMerge)]
drugSentenceFrame <- as.data.frame(matrix(nrow = nrow(drugWordFrame_drugNames), ncol = 1))
colnames(drugSentenceFrame) <- c("drugSentence")
vectorWords <- as.vector(as.matrix(drugWordFrame_drugNames))
vectorNumbers <- as.numeric(as.factor(vectorWords))
lookup <- data.frame(vectorWords, vectorNumbers)
lookup <- unique(lookup)
lookup <- data.table(lookup)
# vectorised lookup table use
numericalDrugsFrame <- as.data.frame(matrix(0, nrow = nrow(drugWordFrame_drugNames), ncol = ncol(drugWordFrame_drugNames)))
for (jj in seq(1, ncol(drugWordFrame_drugNames), 1)) {
index <- match(drugWordFrame_drugNames[,jj], lookup$vectorWords)
numericalDrugsFrame[,jj] <- lookup$vectorNumbers[index]
}
# write out sequence for analysis
write.table(numericalDrugsFrame, file = "~/R/GlCoSy/MLsource/SGLT2_X.csv", sep=",", row.names = FALSE)
# write out dep variable (y)
write.table(exportMerge$y, file = "~/R/GlCoSy/MLsource/SGLT2_y.csv", sep = ",", row.names = FALSE)
# write.table(drugWordFrame, file = "~/R/GlCoSy/MLsource/drugWordFrame_withID_2005_2015.csv", sep=",")
# drugWordFrame <- read.csv("~/R/GlCoSy/MLsource/drugWordFrame.csv", stringsAsFactors = F, row.names = NULL); drugWordFrame$row.names <- NULL
# here do analysis to select rows (IDs) for later analysis
# mortality outcome at 2017-01-01
drugWordFrame_mortality <- merge(drugWordFrame, deathData, by.x = "LinkId", by.y= "LinkId")
# remove those dead before end of FU
# analysis frame = those who are not dead, or those who have died after the end of the runin period. ie all individuals in analysis alive at the end of the runin period
drugWordFrame_mortality <- subset(drugWordFrame_mortality, isDead == 0 | (isDead == 1 & unix_deathDate > returnUnixDateTime(endRuninPeriod)) )
# remove those diagnosed after the end of the runin period
drugWordFrame_mortality <- subset(drugWordFrame_mortality, unix_diagnosisDate <= returnUnixDateTime(endRuninPeriod) )
# remove those diagnosed after the beginning of the runin period ie all in analysis have had DM throughout followup period
# drugWordFrame_mortality <- subset(drugWordFrame_mortality, unix_diagnosisDate <= returnUnixDateTime(startRuninPeriod) )
# set up drug sentences for analysis
drugWordFrame_forAnalysis <- drugWordFrame_mortality
drugWordFrame_drugNames <- drugWordFrame_forAnalysis[, 2:(1+(length(sequence)-1)) ]
drugSentenceFrame <- as.data.frame(matrix(nrow = nrow(drugWordFrame_forAnalysis), ncol = 1))
colnames(drugSentenceFrame) <- c("drugSentence")
vectorWords <- as.vector(as.matrix(drugWordFrame_drugNames))
vectorNumbers <- as.numeric(as.factor(vectorWords))
lookup <- data.frame(vectorWords, vectorNumbers)
lookup <- unique(lookup)
lookup <- data.table(lookup)
# vectorised lookup table use
numericalDrugsFrame <- as.data.frame(matrix(0, nrow = nrow(drugWordFrame_drugNames), ncol = ncol(drugWordFrame_drugNames)))
for (jj in seq(1, ncol(drugWordFrame_drugNames), 1)) {
index <- match(drugWordFrame_drugNames[,jj], lookup$vectorWords)
numericalDrugsFrame[,jj] <- lookup$vectorNumbers[index]
}
y_vector <- drugWordFrame_forAnalysis$isDead
y_vector_isType1 <- ifelse(drugWordFrame_forAnalysis$DiabetesMellitusType_Mapped == 'Type 1 Diabetes Mellitus', 1, 0)
y_vector_deadAt_1_year <- ifelse(drugWordFrame_forAnalysis$isDead == 1 & drugWordFrame_forAnalysis$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (1 * 365.25 * 24 * 60 * 60)), 1, 0)
y_vector_deadAt_2_year <- ifelse(drugWordFrame_forAnalysis$isDead == 1 & drugWordFrame_forAnalysis$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (2 * 365.25 * 24 * 60 * 60)), 1, 0)
y_vector_deadAt_3_year <- ifelse(drugWordFrame_forAnalysis$isDead == 1 & drugWordFrame_forAnalysis$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (3 * 365.25 * 24 * 60 * 60)), 1, 0)
y_vector_deadAt_4_year <- ifelse(drugWordFrame_forAnalysis$isDead == 1 & drugWordFrame_forAnalysis$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (4 * 365.25 * 24 * 60 * 60)), 1, 0)
y_vector_deadAt_5_year <- ifelse(drugWordFrame_forAnalysis$isDead == 1 & drugWordFrame_forAnalysis$unix_deathDate < (returnUnixDateTime(endRuninPeriod) + (5 * 365.25 * 24 * 60 * 60)), 1, 0)
# write out sequence for analysis
write.table(numericalDrugsFrame, file = "~/R/GlCoSy/MLsource/drugs_10y_runin.csv", sep=",", row.names = FALSE)
# write out sequence for analysis with LinkId
write.table(drugWordFrame, file = "~/R/GlCoSy/MLsource/drugs_22y_runin_rawWithId.csv", sep=",", row.names = FALSE)
# write out dep variable (y)
write.table(y_vector, file = "~/R/GlCoSy/MLsource/drugs_10y_runin_5y_mortality.csv", sep = ",", row.names = FALSE)
write.table(y_vector_isType1, file = "~/R/GlCoSy/MLsource/isType1_drugs_10y_runin.csv", sep = ",", row.names = FALSE)
write.table(y_vector_deadAt_1_year, file = "~/R/GlCoSy/MLsource/drugs_10y_runin_1y_mortality.csv", sep = ",", row.names = FALSE)
write.table(y_vector_deadAt_2_year, file = "~/R/GlCoSy/MLsource/drugs_10y_runin_2y_mortality.csv", sep = ",", row.names = FALSE)
write.table(y_vector_deadAt_3_year, file = "~/R/GlCoSy/MLsource/drugs_10y_runin_3y_mortality.csv", sep = ",", row.names = FALSE)
write.table(y_vector_deadAt_4_year, file = "~/R/GlCoSy/MLsource/drugs_10y_runin_4y_mortality.csv", sep = ",", row.names = FALSE)
|
e8de183a4a2e794bb938bbe33d52434383d398e1
|
c18e17a1218294189760df46604ff7a3bd71ea34
|
/munge/1_import.R
|
27227faf1ef2e8703b5c04723af7f4dc7e111708
|
[] |
no_license
|
medewitt/wsta_courtdates
|
546f744753283f32881c32bbf7989dd1ba62f396
|
e83a48758c66f61c028502c8b263e54ea24843e7
|
refs/heads/master
| 2020-04-14T06:02:02.329846
| 2019-02-24T20:51:14
| 2019-02-24T20:51:14
| 163,675,659
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,602
|
r
|
1_import.R
|
# Purpose: Get all the court date data from my mailbox and save it
# libraries ---------------------------------------------------------------
library(tidyverse)
library(gmailr)
library(here)
library(Rcpp)
sourceCpp(file = list.files(path = here("libs"), full.names = T))
# import ------------------------------------------------------------------
use_secret_file("~/crime_secret_2.json")
# Get the Messages that are sent by the court system
mssgs <- messages(
search = "NCWACA"
)
# munge -------------------------------------------------------------------
# Check how many emails exist
emails <- length(mssgs[[1]][[1]])
for (i in 1:emails) {
ids <- id(mssgs)
Mn <- message(ids[i], user_id = "me")
path <- "data/"
save_attachments(Mn, attachment_id = NULL, path, user_id = "me")
}
# Un zip the files
zipped_files <- list.files(here("data"), pattern = ".zip", full.names = T)
# See the date ranges available
map_dfr(zipped_files, unzip, exdir = "data/out", list = TRUE)
# Unzip Out
map(zipped_files, unzip, exdir = "data/out")
# Read them
files <- list.files("data/out/FORSYTH/CRIMINAL",
full.names = T, pattern = ".txt")
a <- ""
for(i in seq_along(files)){
a <- concatenate(a, read_lines(file = files[[i]]))
}
str_extract_all(string = a,
pattern = "\\s(\\w{4}\\s\\d{6})\\s")->b
str_extract_all(string = a,
pattern = "\\s(\\w{6})\\s")->c
str_extract_all(string = a,
pattern = "\\((.)+PLEA")->d
str_extract_all(string = a,
pattern = "CLS:\\b[\\d]")->class
str_extract_all(string = a,
pattern = "ATTY:\\w+")->plaint_attorney
str_extract_all(string = a,
pattern = "\\d{6}\\s([A-z,]+)")->defendant_name
str_extract_all(string = a,
pattern = "\\s\\s(\\w+,\\w+|\\w+,\\w+,\\w+)")->plaintiff_name
str_extract_all(string = a,
pattern = "COURT DATE: (\\d{2})/(\\d{2})/(\\d{2})")->court_date
# Add a helper function for cleaning purposes
clean_nas <- function(x){
if(identical(x, character(0))) NA_character_ else x
}
# clean the regex ---------------------------------------------------------
case_num <-b%>%
map(clean_nas) %>%
map(str_trim) %>%
enframe() %>%
unnest() %>%
rename(case_num = value)
charge <-d%>%
map(clean_nas) %>%
map(str_trim) %>%
enframe() %>%
unnest() %>%
rename(charge = value) %>%
mutate(charge = str_remove(charge, "PLEA"),
charge = str_trim(charge, "both"))
class <- class %>%
map(clean_nas) %>%
map(str_trim) %>%
enframe() %>%
unnest() %>%
rename(class = value) %>%
mutate(class = str_remove(class, "CLS:"),
class = str_trim(class, "both"))
plaint_attorney <- plaint_attorney %>%
map(clean_nas) %>%
map(str_trim) %>%
enframe() %>%
unnest() %>%
rename(plaint_attorney = value) %>%
mutate(plaint_attorney = str_remove(plaint_attorney, "CLS:"),
plaint_attorney = str_trim(plaint_attorney, "both"))
defendant_name <- defendant_name %>%
map(clean_nas) %>%
map(str_trim) %>%
enframe() %>%
unnest() %>%
rename(defendant_name = value) %>%
mutate(defendant_name = str_remove(defendant_name, "\\d{6}\\s"))
plaintiff_name <- plaintiff_name %>%
map(clean_nas) %>%
map(str_trim) %>%
enframe() %>%
unnest() %>%
rename(plaintiff_name = value) %>%
mutate(plaintiff_name = str_remove(plaintiff_name, "\\d{6}\\s"))
court_date <- court_date %>%
map(clean_nas) %>%
map(str_trim) %>%
enframe() %>%
unnest() %>%
rename(court_date = value)
total_summary <- data_frame(
case_num = case_num$case_num,
court_date = court_date$court_date,
charge = charge$charge,
class = class$class,
plaint_attorney = plaint_attorney$plaint_attorney,
defendant_name=defendant_name$defendant_name,
plaintiff_name=plaintiff_name$plaintiff_name)
complete_data <- total_summary %>%
tidyr::fill(case_num, .direction = "down") %>%
tidyr::fill(court_date, .direction = "down") %>%
tidyr::fill(defendant_name, .direction = "down") %>%
tidyr::fill(plaintiff_name, .direction = "down") %>%
tidyr::fill(plaint_attorney, .direction = "down") %>%
tidyr::fill(charge, .direction = "up") %>%
filter(!is.na(charge), !is.na(class))
case_small <- complete_data %>%
mutate(case = str_remove_all(case_num, " ")) %>%
mutate(case = str_remove_all(case, "CR"),
case = as.integer(case)) %>%
unique()
# save --------------------------------------------------------------------
write_csv(case_small, here("outputs", "cleaned_court_information.csv"))
|
5752b86d96308e6c3d73973356a9c1214adeacb9
|
f6150b8fe6f9dc44be22cd470969afacb44efe51
|
/trait_phylo_data_processing/deprecated/processtry_11jul.r
|
cd1d8b9ec137fa1cdf95eba0f370dad345199cf4
|
[] |
no_license
|
qdread/nasabio
|
83e83a4d0e64fc427efa7452033eb434add9b6ee
|
7c94ce512ae6349d84cb3573c15be2f815c5758d
|
refs/heads/master
| 2021-01-20T02:02:53.514053
| 2019-12-28T15:22:53
| 2019-12-28T15:22:53
| 82,062,690
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,006
|
r
|
processtry_11jul.r
|
# Updated 17 Aug for newest data.
#try_all <- read.delim('C:/Users/Q/Dropbox/projects/nasabiodiv/fia_try_04jul2017.txt',stringsAsFactors = FALSE, quote = '')
try_all <- read.delim('C:/Users/Q/google_drive/NASABiodiversityWG/Trait_Data/fia_try_17aug/3404.txt',stringsAsFactors = FALSE, quote = '')
nmeas <- table(try_all$AccSpeciesName, try_all$TraitName)
unittable <- table(try_all$TraitName, try_all$OrigUnitStr)
# Create new column called flag and fill with NA's
try_all["flag"] <- NA
flagLatLong <- function(x)
{
# If Longitude/Latitude are zeroes, then flag the row
x$flag[((x$DataName == "Latitude" | x$DataName == "Longitude") &
(x$OrigValueStr == '0'))] <- 'flag coordinates'
return(x)
}
# Calls flagLatLong function on the data
try_all <- flagLatLong(try_all)
# Remove some of the columns that have irrelevant information.
# Numeric traits use StdValue instead of OrigValueStr
useStdValueIfNumeric <- function (x)
{
x$OrigValueStr[!is.na.data.frame(as.numeric(x$OrigValueStr))] <- NA
return(x)
}
try_all <- useStdValueIfNumeric(try_all)
#try_all <- try_all[,c('DatasetID','AccSpeciesName','ObservationID','TraitName','DataName','OrigValueStr','UnitName','StdValue', 'OrigUncertaintyStr','UncertaintyName')]
try_all$correct_value <- try_all$OrigValueStr # Writes character values to correct value column
try_all$correct_value[is.na(try_all$correct_value)] <- try_all$StdValue[is.na(try_all$correct_value)] # Writes Std Value to correct value
#######################################################
# Create one data frame with trait data rows only, and one with spatial data rows only.
# Ignore the rest.
try_nometa <- subset(try_all, TraitName != '')
try_locations <- subset(try_all, DataName %in% c('Longitude','Latitude') & is.na(flag)) # removes flagged coordinates
# Reshape trait locations to only have the ID information and a separate column for latitude and longitude.
library(dplyr)
get_coords <- function(x) {
lats <- x$StdValue[x$DataName == 'Latitude']
lons <- x$StdValue[x$DataName == 'Longitude']
lat_mode <- lats[which.max(table(lats, useNA = 'always'))]
lon_mode <- lons[which.max(table(lons, useNA = 'always'))]
data.frame(lat = lat_mode, lon = lon_mode)
}
try_locations_wide <- try_locations %>%
group_by(AccSpeciesName, DatasetID, ObservationID) %>%
do(get_coords(.))
# Figure out whether individual traits have more than one unit of measurement.
measByUnitTable <- table(try_nometa$TraitName, try_nometa$UnitName)
measByUnitTable[apply(measByUnitTable>0, 1, sum) > 1, ]
# Plant longevity has some blank units and some in years
longevitynounit <- subset(try_nometa, grepl('Plant lifespan',TraitName) & UnitName=='')
# Replace the names with two different values
try_nometa$TraitName[grepl('Plant lifespan',try_nometa$TraitName) & try_nometa$UnitName==''] <- 'Plant lifespan categorical'
try_nometa$TraitName[grepl('Plant lifespan',try_nometa$TraitName) & try_nometa$UnitName=='year'] <- 'Plant lifespan years'
# Seedbank longevity has some entries given as a percentage, some dimensionless, and some in years
seedbanknounit <- subset(try_nometa, grepl('\\(seedbank\\) longevity',TraitName) & UnitName=='dimensionless') # Categorical (e.g. transient, persistent)
try_nometa$TraitName[grepl('\\(seedbank\\) longevity',try_nometa$TraitName) & try_nometa$UnitName=='dimensionless'] <- 'Seedbank longevity categorical'
# LDMC has some with no unit
#ldmcnounit <- subset(try_nometa, grepl('LDMC',TraitName) & UnitName=='')
# They are just missing values
#try_nometa <- subset(try_nometa, !(grepl('LDMC',TraitName) & UnitName==''))
# Sclerophylly has some with no unit. Only a few are in N/mm
scleronounit <- subset(try_nometa, grepl('sclerophylly',TraitName) & UnitName=='')
# They are a variety of things.
try_nometa$TraitName[grepl('sclerophylly',try_nometa$TraitName) & try_nometa$UnitName==''] <- 'Sclerophylly categorical'
library(reshape2) # Package for reshaping data frames and matrices into long or wide format.
# Function to be run on each column to either return the mean or the first character result.
# Use most common character value rather than the first one.
mean_with_char <- function(x) {
xnum <- as.numeric(x)
xtable <- table(x, useNA = 'always')
if (any(!is.na(xnum))) x <- as.character(mean(xnum, na.rm=TRUE)) else x <- names(xtable)[which.max(xtable)]
return(x)
}
# Function to change columns in a df that only consist of numeric strings to numerics.
numstring2num <- function(x) {
xnum <- as.numeric(x)
if (any(!is.na(xnum))) xnum else x
}
# Cast (convert from long form to wide so that each trait has its own column and can be edited)
data_to_cast <- try_nometa[,c('ObservationID','DatasetID','AccSpeciesName','DataName','TraitName','correct_value')]
try_byobsmean <- dcast(data_to_cast,
ObservationID+DatasetID+AccSpeciesName ~ TraitName,
value.var='correct_value',
fun.aggregate = mean_with_char)
# Convert all character columns in try_byobsmean that should be numeric to numeric.
try_byobsmean <- as.data.frame(lapply(try_byobsmean, numstring2num))
# Join trait and spatial info.
try_location_trait_byobs <- left_join(try_locations_wide, try_byobsmean)
# # Get a small subset that is actually good data.
# try_testdata <- try_location_trait_byobs %>%
# rename(Specific.leaf.area = Leaf.area.per.leaf.dry.mass..specific.leaf.area..SLA.,
# Leaf.Cmass = Leaf.carbon..C..content.per.leaf.dry.mass,
# Leaf.dry.matter.content = Leaf.dry.mass.per.leaf.fresh.mass..Leaf.dry.matter.content..LDMC.,
# Leaf.Nmass = Leaf.nitrogen..N..content.per.leaf.dry.mass,
# Leaf.Pmass = Leaf.phosphorus..P..content.per.leaf.dry.mass,
# Specific.stem.density = Stem.dry.mass.per.stem.fresh.volume..stem.specific.density..SSD..wood.density.,
# Stomatal.conductance = Stomata.conductance.per.leaf.area) %>%
# select(AccSpeciesName, DatasetID, ObservationID, lat, lon, Specific.leaf.area, Leaf.thickness, Leaf.Cmass, Leaf.Nmass, Leaf.Pmass, Leaf.dry.matter.content, Plant.height, Seed.dry.mass, Specific.stem.density, Stomatal.conductance)
#
# try_smalldataset <- try_testdata %>%
# ungroup %>%
# select(AccSpeciesName, lat, lon, Specific.leaf.area, Leaf.Nmass, Plant.height) %>%
# filter(complete.cases(.))
#
# Centroid of coords
get_centroid <- function(x) {
coords <- cbind(x$lat, x$lon)
uniquecoords <- unique(coords)
data.frame(lat = mean(uniquecoords[,1], na.rm=T), lon = mean(uniquecoords[,2], na.rm=T))
}
#
# Get median locations for each species (leave out non-US locations)
try_spp_loc <- try_locations_wide %>%
ungroup %>%
filter(lon < -50, lat > 0) %>%
group_by(AccSpeciesName) %>%
do(get_centroid(.)) %>%
ungroup %>%
mutate(AccSpeciesName = gsub('\\ ', '_', AccSpeciesName))
# write.csv(try_spp_loc, file = 'X:/data/fia/tree_locations.csv', row.names = FALSE)
# Get the unit names for each trait ---------------------------------------
try_unitnames <- try_nometa %>%
group_by(TraitName) %>%
summarize(unit = unique(UnitName))
write.csv(try_unitnames, file = 'C:/Users/Q/google_drive/NASABiodiversityWG/Trait_Data/try_units.csv', row.names = FALSE)
# 17 Aug: Export new trait data -------------------------------------------
fp <- 'C:/Users/Q/google_drive/NASABiodiversityWG/Trait_Data/fia_try_17aug/'
write.csv(try_byobsmean, file.path(fp, 'try_trait_byobs_all.csv'), row.names = FALSE)
write.csv(try_location_trait_byobs, file.path(fp, 'try_trait_byobs_georeferenced.csv'), row.names = FALSE)
# Aggregate by species.
try_location_trait_byobs_all <- full_join(try_locations_wide, try_byobsmean)
try_trait_byspecies <- try_location_trait_byobs_all %>% ungroup %>%
select(-DatasetID, -ObservationID, -lat, -lon) %>%
group_by(AccSpeciesName) %>%
summarize_if(is.numeric, mean, na.rm = TRUE)
write.csv(try_trait_byspecies %>% left_join(try_spp_loc), file.path(fp, 'try_trait_byspecies.csv'), row.names = FALSE)
|
1f5b6101ef346d14648b23a25bb954e87166ec91
|
aa9088073c3d4f70845c2669c41adab105e2325b
|
/example_data/example_subtyping.R
|
0caa072f941c0d42657df7c7ea24af38c70f765f
|
[
"MIT"
] |
permissive
|
thchen86/TSED
|
96171d61166d0a9fca3182e8f0fa61080a250b20
|
1040eaa02085bc72240bc6d7ffed919256b5995e
|
refs/heads/master
| 2020-03-17T16:40:18.906918
| 2018-05-17T04:28:25
| 2018-05-17T04:28:25
| 133,758,247
| 1
| 1
|
MIT
| 2018-05-17T04:28:26
| 2018-05-17T04:21:11
| null |
UTF-8
|
R
| false
| false
| 394
|
r
|
example_subtyping.R
|
source("D:/Tools/TSED/expression_data_tumor_subtyping.R")
source("D:/Tools/TSED/centroid_clustering.R")
setwd("D:/Tools/TSED/example_data")
indication <- "OV"
inFile<-paste0("D:/Tools/TSED/example_data/OV_log2TPM_for_classification.txt")
datalib_dir<- "D:/Tools/TSED"
tumor_subtyping(indication=indication,inFile=inFile,prefix=NA,
datalib_dir=datalib_dir)
|
dd2627db1976eff6a4ff40bc4109dad40f0a019f
|
ea86627550c45a67d1f0128dc020e65541c6c66e
|
/man/tab.Rd
|
0dc455bcef1793d846334d34a0e3e0fbe932934e
|
[] |
no_license
|
nurahjaradat/qacr
|
2553b7f38f4d49700a7fd737fd39da3e2a57f0ae
|
c1076abcde6067edb116b5035ad9421f7f4450d4
|
refs/heads/master
| 2020-08-30T11:21:30.879609
| 2019-12-10T20:24:09
| 2019-12-10T20:24:09
| 218,364,420
| 0
| 0
| null | 2019-10-29T19:13:14
| 2019-10-29T19:13:12
| null |
UTF-8
|
R
| false
| true
| 1,252
|
rd
|
tab.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tab.R
\name{tab}
\alias{tab}
\title{Frequency Distribution for a categorical variable}
\usage{
tab(data, x, sort = FALSE, maxcat = NULL, minp = NULL,
na.rm = FALSE, total = FALSE, digits = 2)
}
\arguments{
\item{data}{A dataframe}
\item{x}{A factor in the dataframe}
\item{sort}{Sort levels from high to low, Default: FALSE}
\item{maxcat}{Maximum number of categories to be included.
Will combine extra categories into an other category. Default: NULL}
\item{minp}{Minimum proportion for a category, combines extra categories into
an other category. Cannot specifiy both maxcat and minp. Default: NULL}
\item{na.rm}{Removes missing values when TRUE, Default: FALSE}
\item{total}{Include a total category when true, Default: FALSE}
\item{digits}{Number of digits the percents should be rounded to, Default: 2}
}
\value{
A data.frame
}
\description{
FUNCTION_DESCRIPTION
}
\details{
The function \code{tab} will calculate the frequency distribution
for a categorical variable and output a data.frame with three columns:
level, n, percent.
}
\examples{
\dontrun{
frequency <- tab(venues, state, sort = TRUE, na.rm = TRUE, maxcat = 6, digits = 3)
print(frequency)
}
}
|
5246fee8badf6e0104091b89ff81d15df662a7d2
|
4c668236dfd7c763508835347abf1e85bb96c365
|
/man/lib_remove_doi_http.Rd
|
e3be71f8ee8ba58805b67700fd3d3eb7697040d1
|
[
"MIT"
] |
permissive
|
jeksterslabds/jeksterslabRlib
|
ad59ba97069b45e105602b750a4bc46b5ffbd0b0
|
cd6c1282d576bad34850954d3a665e09e1c257ca
|
refs/heads/master
| 2021-08-10T14:35:38.870081
| 2021-01-15T06:24:50
| 2021-01-15T06:24:50
| 250,654,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,146
|
rd
|
lib_remove_doi_http.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lib_remove_doi_http.R
\name{lib_remove_doi_http}
\alias{lib_remove_doi_http}
\title{Remove DOI HTTP.}
\usage{
lib_remove_doi_http(doi)
}
\arguments{
\item{doi}{Character vector.
A vector of Digital Object Identifiers.}
}
\description{
Removes DOI HTTP using the following patters:
\itemize{
\item \verb{http://dx.doi.org/}
\item \verb{http://doi.org/}
\item \verb{https://dx.doi.org/}
\item \verb{https://doi.org/}
}
}
\examples{
# Single DOI
doi <- "https://doi.org/10.1016/j.adolescence.2014.04.012"
lib_remove_doi_http(doi = doi)
# Vector of DOIs
doi <- c(
"https://doi.org/10.1016/j.adolescence.2014.04.012",
"https://doi.org/10.1007/s11469-015-9612-8",
"https://doi.org/10.1016/j.addbeh.2014.10.002",
"https://doi.org/10.1007/s10566-014-9275-9",
"https://doi.org/10.1037/a0039101",
"https://doi.org/10.1016/j.paid.2016.10.059",
"https://doi.org/10.1016/j.addbeh.2016.03.026",
"https://doi.org/10.1080/10826084.2019.1658784",
"https://doi.org/10.1016/j.addbeh.2018.11.045"
)
lib_remove_doi_http(doi = doi)
}
\author{
Ivan Jacob Agaloos Pesigan
}
|
c4f7761fbaece85aac01209d8038d0fa840575ee
|
afefe95e9a5075fcb6297c1ff47d10fe7933e861
|
/src/generate_deseq_analysis_objects.R
|
97bddd9cf679a9c041dc79ef523735e3b0c39738
|
[
"MIT"
] |
permissive
|
lzamparo/AML_ATAC
|
902045ba847ace44f5b73d0d6db4b08614af20e5
|
7869d5df43e3aa56c72b4255756bd7e9e004db55
|
refs/heads/master
| 2020-04-10T18:00:00.088938
| 2019-04-25T02:40:36
| 2019-04-25T02:40:36
| 161,190,803
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,429
|
r
|
generate_deseq_analysis_objects.R
|
require(data.table)
require(DESeq2)
require(plyr)
require(stringr)
require(Biobase)
require(GenomicRanges)
require(BiocParallel)
require(ggplot2)
require(readxl)
require(ggrepel)
register(MulticoreParam(4))
### compute median coverage for each peak in candidate set & experiment, to provide DESeq2 style raw-counts data.
# go to counts dir
setwd('/Users/zamparol/projects/AML_ATAC/data/counts')
# load all timepoints data into
conditions <- list.files(path = ".", include.dirs = TRUE)
reps <- list.files(path = ".", recursive=TRUE, pattern = "*trimmed_001_read_counts.bed")
files <- ldply(strsplit(reps, '/', fixed = TRUE))
colnames(files) <- c("condition","replicate")
files$Filename = str_split(files$replicate, "_S[0-9]{1,2}", simplify=TRUE)[,1]
# Drop S3; it does not contain the SRSF2 mutation :/
# Also drop: A3 (ASXL1_34), SA1 (SfA_40), they show strange patterns from RNA-seq analyses
files = data.table(files)
files = files[!(Filename %in% c("SRSF2_c1_8", "ASXL1_34_1_repeat", "SfA_40")),]
# load data by condition & replicate into a count matrix
get_counts <- function(myrow){
my_rep <- read.delim(file = paste(myrow[1], myrow[2], sep = "/"), header = FALSE)
colnames(my_rep) <- c("chrom","start", "end", "count")
my_rep <- data.table(my_rep)
my_rep[chrom != "all", count]
}
count_matrix <- as.matrix(cbind(apply(files, 1, get_counts)))
# get the rownames for the count matrix
get_peak_names <- function(myrow){
my_rep <- read.delim(file = paste(myrow$condition, myrow$replicate, sep = "/"), header = FALSE)
colnames(my_rep) <- c("chrom","start", "end", "count")
my_rep <- data.table(my_rep)
my_rep[chrom != "all", paste(chrom,start,end, sep = "-")]
}
peaks <- get_peak_names(files[1,])
rownames(count_matrix) <- peaks
# prepare the col_data object
translator <- read_excel("~/projects/AML_ATAC/data/filename_to_sample.xlsx")
col_data <- data.table(translator)
# ensure the batch is taken as a factor
col_data[, Batch := factor(Batch, levels = c(1,2))]
col_data[, Condition := factor(Condition, levels = c("P", "A", "S", "SA", "SAR", "SARF", "SARN"))]
col_data[, Adapter := factor(c(rep("Good",10),"Bad", rep("Good",10)), levels=c("Good","Bad"))] # Including adapter data from Tiansu
# drop S3
col_data = col_data[!(Filename %in% c("SRSF2_c1_8", "ASXL1_34_1_repeat", "SfA_40")),]
# set colnames of the count_matrix to same
# *** ENSURE THAT THE COLNAMES OF THE COUNT MATRIX CORRESPOND WITH THE ORDER IN WHICH THE COLUMNS OF THE MATRIX WERE READ IN: i.e `files` ***
setkey(col_data, Filename)
reordered_col_data <- col_data[files$Filename, ]
rownames <- reordered_col_data[, paste(Condition, Replicate, sep="_")]
rownames(reordered_col_data) <- rownames
colnames(count_matrix) <- rownames
# Sanity check
if (!all(rownames(reordered_col_data) == colnames(count_matrix))){
print("Mismatch in count matrix column names, rownames of reordered_col_data")
stop()
}
# Set the DDS object
dds <- DESeqDataSetFromMatrix(countData = count_matrix,
colData = reordered_col_data,
design = ~ Batch + Condition)
# load the flanks dds, transfer the estimates size factors
#flanks_dds <- readRDS("../../results/flank_dds.rds")
#sizeFactors(dds) <- sizeFactors(flanks_dds)
dds_peaks <- DESeq(dds, parallel=TRUE)
sig_alpha <- 0.05
### Get the results for each treatment condition versus P
# A vs P
res_A_P <- results(dds_peaks, contrast=c("Condition", "A", "P"), alpha=sig_alpha, independentFiltering=TRUE, parallel=TRUE)
summary(res_A_P)
idx <- res_A_P$padj < sig_alpha
A_P_diff_peaks <- rownames(res_A_P)[idx]
res_S_P <- results(dds_peaks, contrast=c("Condition", "S", "P"), alpha=sig_alpha, independentFiltering=TRUE, parallel=TRUE)
summary(res_S_P)
idx <- res_S_P$padj < sig_alpha
S_P_diff_peaks <- rownames(res_S_P)[idx]
# SA vs P
res_SA_P<- results(dds_peaks, contrast=c("Condition", "SA", "P"), alpha=sig_alpha, independentFiltering=TRUE, parallel=TRUE)
summary(res_SA_P)
idx <- res_SA_P$padj < sig_alpha
SA_P_diff_peaks <- rownames(res_SA_P)[idx]
# SAR vs P
res_SAR_P <- results(dds_peaks, contrast=c("Condition", "SAR", "P"), alpha=sig_alpha, independentFiltering=TRUE, parallel=TRUE)
summary(res_SAR_P)
idx <- res_SAR_P$padj < sig_alpha
SAR_P_diff_peaks <- rownames(res_SAR_P)[idx]
# SARF vs P
res_SARF_P <- results(dds_peaks, contrast=c("Condition", "SARF", "P"), alpha=sig_alpha, independentFiltering=TRUE, parallel=TRUE)
summary(res_SARF_P)
idx <- res_SARF_P$padj < sig_alpha
SARF_P_diff_peaks <- rownames(res_SARF_P)[idx]
# SARN vs P
res_SARN_P <- results(dds_peaks, contrast=c("Condition", "SARN", "P"), alpha=sig_alpha, independentFiltering=TRUE, parallel=TRUE)
summary(res_SARN_P)
idx <- res_SARN_P$padj < sig_alpha
SARN_P_diff_peaks <- rownames(res_SARN_P)[idx]
### Now look at SA vs S, SA vs A, SAR vs SA, SARN vs SAR , SARF vs SAR
res_SA_S <- results(dds_peaks, contrast=c("Condition", "SA", "S"), alpha=sig_alpha, independentFiltering=TRUE, parallel=TRUE)
summary(res_SA_S)
idx <- res_SA_S$padj < sig_alpha
SA_S_diff_peaks <- rownames(res_SA_S)[idx]
res_SA_A <- results(dds_peaks, contrast=c("Condition", "SA", "A"), alpha=sig_alpha, independentFiltering=TRUE, parallel=TRUE)
summary(res_SA_A)
idx <- res_SA_A$padj < sig_alpha
SA_A_diff_peaks <- rownames(res_SA_A)[idx]
res_SAR_SA <- results(dds_peaks, contrast=c("Condition", "SAR", "SA"), alpha=sig_alpha, independentFiltering=TRUE, parallel=TRUE)
summary(res_SAR_SA)
idx <- res_SAR_SA$padj < sig_alpha
SAR_SA_diff_peaks <- rownames(res_SAR_SA)[idx]
res_SARN_SAR <- results(dds_peaks, contrast=c("Condition", "SARN", "SAR"), alpha=sig_alpha, independentFiltering=TRUE, parallel=TRUE)
summary(res_SARN_SAR)
idx <- res_SARN_SAR$padj < sig_alpha
SARN_SAR_diff_peaks <- rownames(res_SARN_SAR)[idx]
res_SARF_SAR <- results(dds_peaks, contrast=c("Condition", "SARF", "SAR"), alpha=sig_alpha, independentFiltering=TRUE, parallel=TRUE)
summary(res_SARF_SAR)
idx <- res_SARF_SAR$padj < sig_alpha
SARF_SAR_diff_peaks <- rownames(res_SARF_SAR)[idx]
setwd('../../results/DESeq')
# write out the SAR vs P diff peaks
bed_matrix <- str_split_fixed(SAR_P_diff_peaks, "-", 3)
bed_df <- as.data.frame(bed_matrix)
write.table(bed_df, "../peaks/SAR_vs_P_diff_peaks.bed", sep="\t", row.names=FALSE, col.names=FALSE, quote=FALSE)
# collect those peaks that are differential among the A-P, SA-P, SAR-P, SARN-P, SARF-P pairwise comparisons
union_diff_peaks_conditions_vs_P <- unique(do.call(c, list(A_P_diff_peaks, SA_P_diff_peaks,SAR_P_diff_peaks, SARF_P_diff_peaks,SARN_P_diff_peaks)))
bed_matrix <- str_split_fixed(union_diff_peaks_conditions_vs_P, "-", 3)
bed_df <- as.data.frame(bed_matrix)
write.table(bed_df, "../peaks/all_conditions_vs_P_diff_peaks.bed", sep="\t", row.names=FALSE, col.names=FALSE, quote=FALSE)
# collect those peaks that are differential among the A-P, SA-A, SAR-SA, SARN-SAR, SARF-SAR pairwise comparisons
union_diff_peaks_A_first <- unique(do.call(c, list(A_P_diff_peaks, SA_A_diff_peaks, SAR_SA_diff_peaks, SARN_SAR_diff_peaks, SARF_SAR_diff_peaks)))
bed_matrix <- str_split_fixed(union_diff_peaks_A_first, "-", 3)
bed_df <- as.data.frame(bed_matrix)
write.table(bed_df, "../peaks/P_A_SA_SAR_SARN_SARF_diff_peaks.bed", sep="\t", row.names=FALSE, col.names=FALSE, quote=FALSE)
# collect those peaks that are differential among the S-P, SA-S, SAR-SA, SARN-SAR, SARF-SAR pairwise comparisons
union_diff_peaks_S_first <- unique(do.call(c, list(S_P_diff_peaks, SA_S_diff_peaks, SAR_SA_diff_peaks, SARN_SAR_diff_peaks, SARF_SAR_diff_peaks)))
bed_matrix <- str_split_fixed(union_diff_peaks_S_first, "-", 3)
bed_df <- as.data.frame(bed_matrix)
write.table(bed_df, "../peaks/P_S_SA_SAR_SARN_SARF_diff_peaks.bed", sep="\t", row.names=FALSE, col.names=FALSE, quote=FALSE)
# collect those peaks that are differential among any of the comparisons. Rainbow product FTW.
# this is broken: rows [2,21] corresponding to A, S and SARF, SARN do not belong. Also row 1 (A,P) should be inverted
# no longer includes: A vs S, SARF vs SARN
all_conditions <- t(combn(unique(reordered_col_data[,Condition]),2))
first_args = c(as.character(all_conditions[1,1]), as.character(all_conditions[3:20,2]))
second_args = c(as.character(all_conditions[1,2]), as.character(all_conditions[3:20,1]))
get_diff_peaks <- function(first, second){
res_contrast <- results(dds_peaks, contrast=c("Condition", first, second), alpha=sig_alpha, independentFiltering=TRUE, parallel=TRUE)
idx <- res_contrast$padj < 0.05
rownames(res_contrast)[idx]
}
diff_list = mapply(get_diff_peaks, first_args, second_args)
diff_list_set = unique(do.call(c, diff_list))
bed_matrix <- str_split_fixed(diff_list_set, "-", 3)
bed_df <- as.data.frame(bed_matrix)
write.table(bed_df, "../peaks/all_pairwise_comparisons_diff_peaks.bed", sep="\t", row.names=FALSE, col.names=FALSE, quote=FALSE)
# save this set, plus the results objets for later retrieval.
setwd('./objects/')
saveRDS(dds_peaks, file = "dds_object.rds")
saveRDS(res_A_P, file = "res_A_P.rds")
saveRDS(res_S_P, file = "res_S_P.rds")
saveRDS(res_SA_P, file = "res_SA_P.rds")
saveRDS(res_SAR_P, file = "res_SAR_P.rds")
saveRDS(res_SARF_P, file = "res_SARF_P.rds")
saveRDS(res_SARN_P, file = "res_SARN_P.rds")
saveRDS(res_SA_A, file = "res_SA_A.rds")
saveRDS(res_SA_S, file = "res_SA_S.rds")
saveRDS(res_SAR_SA, file = "res_SAR_SA.rds")
saveRDS(res_SARN_SAR, file = "res_SARN_SAR.rds")
saveRDS(res_SARF_SAR, file = "res_SARF_SAR.rds")
|
75eb3e38f7b53825f926f93808f9b46799ee744e
|
b12da2cf7bbc0c6d20b58f79a7a033c2f5f3bfb4
|
/server.R
|
7323be97f87ccd0cb3c666857cc893def3e6d356
|
[] |
no_license
|
sherdim/retivo
|
dbe1d1e1fa9512a609e92cd46860d32de04efbb7
|
3f8885b16969865f29000f7cfa9939de20c2f597
|
refs/heads/master
| 2020-12-24T14:26:53.735229
| 2015-02-26T21:07:48
| 2015-02-26T21:07:48
| 22,297,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,504
|
r
|
server.R
|
library(shiny)
library(MASS)
shinyServer(function(input, output, session) {
s=''
output$distPlot <- renderPlot({
inFile <- input$file1
if (is.null(inFile)){
#return(NULL)
#debug
m=read.csv("rt.csv")
x=m$v
}else{
m=read.csv(inFile$datapath)
x=as.matrix(m[-1]) #m$v
}
xMin=0
xMax=input$valid[2] #max(1,max(x))
bins <- seq(min(x), xMax, by=input$tstep)
# draw the histogram with the specified number of bins
h=hist(x, breaks = bins, plot=T, probability=TRUE, main=sprintf('%s',input$model))
#h$counts=h$counts/sum(h$counts)
#plot(h, xlim = c(0,xMax),
# col = 'gray', border = 'darkgreen')
#model
x[x<input$valid[1]]=NA
x[x>input$valid[2]]=NA
abline(v=quantile(x, c(0.5)), col='blue')
abline(v=quantile(x, c(0.25, 0.75)), col='magenta')
M=mean(x)
abline(v=M, col='green')
tStep=0.004
xx=seq(xMin, xMax, by=tStep)
#text(x=0,y=0.1,labels = input$model)
#method = c("Nelder-Mead", "BFGS", "CG", "L-BFGS-B", "SANN"),
s=''
if (is.element(input$model, c('Density'))){
fn=(density(x, bw=input$bw, na.rm=T))
xx=fn$x
yy=fn$y
xM=xx[which.max(yy)]
s= sprintf('bandwidth = %.3f, kernel="%s", mode = %.3f', fn$bw,'gaussian',xM)
}else if (input$model=='Weibull-Gaussian'){
#http://stackoverflow.com/questions/23569133/adding-two-random-variables-via-convolution-in-r
N = function(x,m,s) dnorm(x,m,s) # normal
W = function(x,v,l) dweibull(x,v,l) # weibull( shape, scale
Z=function(x,m=0.3,s=0.1,v=1.2,l=1) W(x,v,l) * N(x,m,s)
fn=Vectorize(Z)
#op <- options(digits = 3)
#fn= function(x,m,s,v,l) (l/v)*(x/v)^(l-1) * exp(-x/v) * 1/((2*pi)^0.5 * s) * exp(-(x-m)^2/(2*s^2))
p=fitdistr(x, fn, start=list(m=0.3, s=0.05, v=5, l=.3),
#control = list(trace = 1,fnscale=1),
lower=c(0.1, 0.01, 1.01, 0.1),upper = c(1.0,.3,20,0.9))
kk=coef(p)
yy=fn(xx, kk[1],kk[2],kk[3],kk[4])
yy=yy*length(yy)/sum(yy)
s=sprintf('mu=%.3f, sigma=%.3f, k=%.3f, lambda=%.3f', kk[1],kk[2],kk[3],kk[4])
}else if (input$model=='Zaitsev-Skorik'){
fn=function(x,a=0.02,b=0.3,k=1) k/(a) * exp(-exp((b-x)/a)+(b-x)/a)
#p=fitdistr(x, fn, start=list(a=0.02,b=0.3), lower=c(0.001,0.1), upper=c(1,xMax))
#kk=round(coef(p), 3)
y=ecdf(x)
#y0=log(log(1.0/y))
fcdf = function(p, x) exp(-exp((p[2]-x)/p[1]))
fmin = function(p, x) sum((fcdf(p,x) - y(x))^2)
fit=optim(list(a=0.02,b=0.3), fmin, x=x)
kk=fit$par
yy=fn(xx, kk[1], kk[2]) #/ input$tstep
#yy=yy/sum(yy)
lines(xx, yy, col='black')
s=sprintf('a=%f, b=%f', kk[1],kk[2])
}else if (input$model=='Wald'){
fn = function(x, mu=0.3, l=1) {
return( sqrt(l/2*pi*x^3) * exp(-l*(x-mu)^2/(2*x*mu^2)))
}
p=fitdistr(x, fn, start=list(mu=0.3, l=1), lower=c(0.1,0.1), upper=c(xMax, 1000))
kk=round(coef(p), 3)
yy=fn(xx, kk[1], kk[2])
yy=yy*length(yy)/sum(yy)
lines(xx, yy, col='black')
s=sprintf('mu=%f, lambda=%f', kk[1],kk[2])
}else if (input$model=='ex-Wald'){
W = function(x, mu=0.3, l=1) {
return( sqrt(l/2*pi*x^3) * exp(-l*(x-mu)^2/(2*x*mu^2)))
}
X =function (x, lambda=1) dexp(x,lambda)
Z=function(x,mu=0.3,l=1,lambda=1) X(x,lambda) * W(x,mu,l)
fn=Vectorize(Z)
p=fitdistr(x, fn, start=list(mu=0.3, l=0.1, lambda=1), lower=c(0.1,0.001,0.0001), upper=c(xMax, 20, 1000))
# fn=function(x,mu=0.3,sigma=0.1,a=1,gamma=0.001){
# k=sqrt(mu^2 - 2* gamma * sigma^2)
# #return( gamma * exp(- gamma*t+(a*(mu-k))/sigma^2)) # * pnorm((x-mu)/sigma))
# return( exp( log(gamma) + (- gamma*t+(a*(mu-k))/sigma^2) + log(pnorm((x-mu)/sigma))))
# }
#p=fitdistr(x, fn, start=list(mu=0.3, sigma=0.1, a=1, gamma=0.001), lower=c(0.1,0.001,0.3,0.0001), upper=c(xMax, 1, 1000, 1))
kk=coef(p)
yy=fn(xx, kk[1], kk[2], kk[3])
yy=yy*length(yy)/sum(yy)
lines(xx, yy, col='black')
s=sprintf('mu = %.3f, lambda = %.03f, gamma = %.3f ', kk[1], kk[2], kk[3])
}else if (input$model=='ex-Gaussian'){
#fn=function(t,m,s,v){
#o=integrate(function(y) exp(-y/v)/(v) * 1/((2*pi)^0.5 * s) * exp(-(t-y-m)^2/(2*s^2))), 0, Inf)
#return(o$value)
#}
#-log(nu)+((mu-x)/nu)+(sigma^2/(2*nu^2))+log(pnorm(((x-mu)/sigma)-sigma/nu))
fn=function(t,mu=0.3,sigma=0.03,nu=0.1){
z=t-mu-((sigma^2)/nu)
return( exp(-log(nu)-(z+(sigma^2/(2*nu)))/nu+log(pnorm(z/sigma))))
}
p=fitdistr(x, fn, start=list(mu=0.3, sigma=0.03, nu=0.1), lower=c(0.1,0.01,0.03), upper=c(xMax, 1, 2))
kk=round(coef(p), 3)
yy=fn(xx, kk[1], kk[2], kk[3])
#yy=yy/sum(yy)
lines(xx, yy, col='black')
s=(sprintf('mu = %.3f, sigma = %.03f, nu = 1/lambda = %.3f ',
kk[1], kk[2], kk[3])
)
#when nu<0.05
#dnorm(x, mean=mu, sd=sigma, log=TRUE)
#exgaussalt
#f(x; y_0, A, x_c, w, t_0 )=y_0+\frac{A}{t_0} \exp \left( \frac {1}{2} \left( \frac {w}{t_0} \right)^2 - \frac {x-x_c}{t_0} \right) \left( \frac{1}{2} + \frac{1}{2} \operatorname{erf} \left( \frac {z}{\sqrt{2}} \right) \right) ,
} else{
p=fitdistr(x, tolower(input$model))
kk=round(coef(p), 3)
yy=switch(input$model,
Gamma=dgamma(xx, p$estimate[1], p$estimate[2]),
Normal=dnorm(xx, p$estimate[1], p$estimate[2]),
Weibull=dweibull(xx, p$estimate[1], p$estimate[2])
)
s=sprintf('parameters: %.3f, %.3f', kk[1], kk[2])
}
## goodness of fit???
#check http://cran.r-project.org/doc/contrib/Ricci-distributions-en.pdf
#qqPlot(x, distribution="weibull", shape=5.2, scale=0.327)
#title(expression(s))
#h$counts=h$counts/sum(h$counts)
#plot(h, xlim = c(0,xMax),
# col = 'gray', border = 'darkgreen')
lines(xx, yy, col='black', lwd=2)
mtext(s, side=3, adj=1)
})
output$info <- renderUI({
HTML(switch(input$model,
Density='Density approximation. You can adjust band width with control.',
Normal='<p>Symmetric Gaussian distribution around (1) mean with (2) variation between
-3σ and 3σ</p>
<p>
$$f(x)=\\frac{1}{\\sqrt{2}\\sigma} \\int e^{-t} dt$$
</p>',
Gamma='Asymmetric distribution. Parameters: (1) shape, (2) scale
$$f(t)=\\frac{(x-\\xi)^{\\beta-1} e^{-\\frac{x-\\xi}{a}}}{a^{\\beta} \\Gamma(\\beta)}$$
',
Weibull='Asymmetric distribution. Parameters: (1) shape, (2) scale
$$ f(t) = \\frac{c}{b^c} t^{c-1} e^{-\\left(\\frac{t}{b}\\right)^c} $$
',
"ex-Gaussian"='ex-Gaussian - convolution of a Gaussian and an exponential distribution,
named by Burbeck & Luce (1982)
$$f(x)=\\frac{1}{\\lambda} e ^{\\frac{\\mu}{\\lambda}+\\frac{\\sigma^2}{2*\\lambda^2}-\\frac{x}{\\lambda}} \\Phi \\left(\\frac{x-\\mu-\\sigma^2/\\lambda}{\\sigma}\\right)$$
<p>
See also:<br/>
<li>
Burbeck, S. L., & Luce, R. D. (1982). Evidence from auditory simple
reaction times for both change and level detectors.
Perception & Psychophysics, 32 (2), 117-133.
<li>Sternberg S. (2014) Reaction Times and the Ex-Gaussian Distribution: When is it Appropriate?<br>
<li>Sternberg, S. (2014). Sequential processes and the shapes of reaction-time distributions.
<li>Van Zandt, T. (2000). How to fit a response time distribution.
Psychonomic Bulletin & Review, 7 (3), 424-465.
<li>Hwang Gu SL, Gau SS, Tzang SW, Hsu WY. The ex-Gaussian distribution of reaction times in adolescents with attention-deficit/hyperactivity disorder. Research in Developmental Disabilities. 2013 Nov;34(11):3709-3719.
</p>
',
'Weibull-Gaussian'='<p>The Weibull-Gaussian is a convolution of the Gaussian (normal) and Weibull distributions.</p>
$$f(t)=\\int \\frac{\\lambda}{\\nu}\\left(\\frac{y}{\\nu}\\right)^{\\lambda-1} e^{\\frac{y}{\\nu}} \\frac{1}{\\sqrt{2*\\pi}\\sigma} e^{-\\frac{(t-y-\\mu)^2}{2\\sigma^2}} dy$$
<p>
<img src="Matsushita_2007.png"/></p>
<li>Matsushita, S. (2007). RT analysis with the Weibull-Gaussian convolution model. Proceedings of Fechner Day, 23(1).
<li>Marmolejo-Ramos, F., & González-Burgos, J. (2013). A power comparison of various tests of univariate normality on Ex-Gaussian distributions. Methodology: European journal of research methods for the behavioral and Social sciences, 9(4), 137.
',
'Wald'=' inverse Gaussian distribution (also known as the Wald distribution)
$$f(x)= \\sqrt{\\frac{\\lambda}{2 \\pi x^3}} e^{\\frac{-\\lambda(x-\\mu)^2}{2x\\mu^2}} $$
',
'ex-Wald'='ex-Wald is the convolution of an exponential and a Wald
distribution that attempts to represent both the decision and response components of a response time (Schwarz, 2001) as a
diffusion process, a continuous accumulation of information towards some decision threshold
$$f(t) = \\gamma exp\\left(-\\gamma t + \\frac{a(\\mu-k)}{\\sigma^2}\\right) * F(t)$$
$$ k = \\sqrt{\\mu^2 - 2\\gamma\\sigma^2} \\geq 0 $$
<p>
<img src="Palmer_Horowitz_2011.png"/></p>
<li>Palmer, E. M., Horowitz, T. S., Torralba, A., & Wolfe, J. M. (2011). What are the shapes of response time distributions in visual search? Journal of Experimental Psychology: Human Perception and Performance, 37.
<li>Schwarz, W. (2001). The ex-Wald distribution as a descriptive model of
response times. Behavioral Research Methods, Instruments & Computers, 33 (4), 457-469.
',
'Zaitsev-Skorik'='<p>
$$f(t)=\\frac{\\lambda}{a} exp\\left[-exp(\\frac{b-x}{a}) + \\frac{b-x}{a}\\right]$$
</p>
<p>
a - razbros, b - moda
</p>
<li>Zaitsev, Skorik, 2002 Mathematical Description of Sensorimotor
Reaction Time Distribution, Human Physiology, No.4
'
), '<script>MathJax.Hub.Queue(["Typeset", MathJax.Hub]);</script>')
})
})
|
f6c2da501a5c2e1a6853e23e15fb8a77e3667d9b
|
9b7b86716dfe2644c53cec99d366ddb947f19b1a
|
/man/as_AssayObject.Rd
|
5d5f5bada32abdbf10a55d75b78582cab265bffb
|
[
"Artistic-2.0"
] |
permissive
|
lima1/sttkit
|
5ddef5da905b84b380a4135926439dbd85dd52d5
|
57dbcf7a90be47bb481d14f9ad5a9569772185f9
|
refs/heads/master
| 2023-08-29T08:32:15.940211
| 2023-08-22T17:44:22
| 2023-08-22T17:44:22
| 233,417,067
| 16
| 7
|
Artistic-2.0
| 2023-07-27T16:22:25
| 2020-01-12T15:51:00
|
R
|
UTF-8
|
R
| false
| true
| 393
|
rd
|
as_AssayObject.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.R
\name{as_AssayObject}
\alias{as_AssayObject}
\title{as_AssayObject}
\usage{
as_AssayObject(object)
}
\arguments{
\item{object}{Currently supported is RCDT and Giotto (spatEnrObj) output}
}
\description{
Convert deconvolution output object to a TransferPrediction object
}
\examples{
# as_AssayObject(object)
}
|
fae8bda3d0384be2cceed2ba0ec16ff3cc44bdd6
|
e17eddb34306d7b400ff6fc74e0f94faf8ff4441
|
/simcf/R/gof.R
|
fd38634d6a6204b901c1734a5fc28a1abc580246
|
[] |
no_license
|
jrnold/tile-simcf
|
8e269c0e0ef79779514731cba7d05e19fb9a559d
|
fb2c0f51edf39ccba2e624fe326b571a3bfbe030
|
refs/heads/master
| 2020-12-28T22:54:01.602037
| 2015-05-14T14:35:07
| 2015-05-14T14:35:07
| 34,191,812
| 0
| 1
| null | 2015-04-19T03:16:09
| 2015-04-19T03:16:09
| null |
UTF-8
|
R
| false
| false
| 3,977
|
r
|
gof.R
|
#' Prediction-based goodness of fit measures for categorical models
#'
#' Compute percent correctly predicted and concordance indexes for logistic
#' regression, ordered probit, and similar models.
#'
#' To add.
#'
#' @param res A glm object, e.g., as produced by \code{\link{glm}}.
#' @param y A numeric vector containing the categorical response variable. Not
#' implemented for factors.
#' @param type Character, whether the function should return the goodness of
#' fit for the estimated \code{model}, the goodness of fit for a \code{null}
#' model, or the percentage \code{improve}-ment from the latter to the former.
#' @param x The matrix of covariates.
#' @param b The vector of estimated parameters, with cutpoints at the end.
#' There should be k-2 cutpoints for a model with k response categories. Not
#' currently compatible with \code{polr}.
#' @param constant Numeric, the position of the element of \code{b} containing
#' the constant term; set to \code{NA} for no constant.
#' @param ncat Number of response categories.
#' @return Returns either the percent correctly predicted, the concordance
#' index, or the percent improvement in these goodness of fit measures.
#' @author Christopher Adolph <\email{cadolph@@u.washington.edu}>
#' @keywords htest
#' @name pcp.glm
#' @rdname pcp.glm
#' @export
pcp.glm <- function(res, y, type="model") { # other types: null, improve
pcp <- mean(round(predict(res,type="response"))==y)
pcpNull <- max(mean(y), mean(1-y))
pcpImprove <- (pcp-pcpNull)/(1-pcpNull)
if (type=="pcp")
return(pcp)
if (type=="null")
return(pcpNull)
if (type=="improve")
return(pcpImprove)
}
#' @export
#' @rdname pcp.glm
pcp.oprobit <- function(x, y, b, constant=1, ncat=3, type="model") { # other types: null, improve
b <- matrix(b,nrow=100,ncol=length(b),byrow=TRUE)
simy <- oprobitsimev(x, b, constant=constant, cat=ncat)
cats <- sort(unique(y))
predcatN <- cats[rev(order(table(y)))][1]
n <- length(y)
pcp <- pcpNull <- predcatM <- rep(NA,n)
for (i in 1:n) {
predcatM[i] <- cats[rev(order(simy$pe[i,]))][1]
pcp[i] <- predcatM[i]==y[i]
pcpNull[i] <- predcatN==y[i]
}
pcp <- mean(pcp)
pcpNull <- mean(pcpNull)
pcpImprove <- (pcp-pcpNull)/(1-pcpNull)
if (type=="model")
return(pcp)
if (type=="null")
return(pcpNull)
if (type=="improve")
return(pcpImprove)
}
#' @export
#' @rdname pcp.glm
concord.glm <- function(res, y, type="model") { # other types: null, improve
if (type=="model") {
yhat <- predict(res,type="response")
concord <- roc.area(y, yhat)$A
}
if (type=="null") {
yhat <- rep(max(mean(y), mean(1-y)), length(y))
concord <- roc.area(y, yhat)$A
}
if (type=="improve") {
yhat <- predict(res,type="response")
model <- roc.area(y, yhat)$A
yhat <- rep( max(mean(y), mean(1-y)), length(y))
null <- roc.area(y, yhat)$A
concord <- (model-null)/(1-null)
}
concord
}
#' @export
#' @rdname pcp.glm
concord.oprobit <- function(x, y, b, constant=1, ncat=3, type="model") { # other types: null, improve
b <- matrix(b,nrow=100,ncol=length(b),byrow=TRUE)
simy <- oprobitsimev(x, b, constant=constant, cat=ncat)
cats <- sort(unique(y))
if (type=="model") {
model <- rep(NA,ncat)
for (j in 1:ncat) {
yhat <- simy$pe[,j]
model[j] <- roc.area(as.numeric(y==cats[j]), yhat)$A
}
concord <- mean(model)
}
if (type=="null") {
null <- rep(NA,ncat)
for (j in 1:ncat) {
probs <- rep(mean(y==cats[j]), length(y))
null[j] <- roc.area(as.numeric(y==cats[j]), probs)$A
}
concord <- mean(null)
}
if (type=="improve") {
improve <- rep(NA,ncat)
for (j in 1:ncat) {
probs <- rep(mean(y==cats[j]), length(y))
null <- roc.area(as.numeric(y==cats[j]), probs)$A
yhat <- simy$pe[,j]
model <- roc.area(as.numeric(y==cats[j]), yhat)$A
improve[j] <- (model-null)/(1-null)
}
concord <- mean(improve)
}
concord
}
|
a107cd2a62e880f793846349a7c08aaf3dd70560
|
ec4536f9edbb1ccbf582204e24dd5e1d72afd8e2
|
/R/papum-rf-tree-cover-transition-probability.R
|
86f90db8cd4119f62e2f410fda2099cd4f4e9761
|
[] |
no_license
|
monsoonforest/scripts-forever
|
2d5f9a7fb18bdc2c34f931eb4ad0ce20a17eedac
|
45c8dd0eb9c4d7b12e78a68e7402479687e012dc
|
refs/heads/master
| 2023-06-15T06:10:31.206443
| 2021-07-07T15:22:18
| 2021-07-07T15:22:18
| 159,921,435
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,654
|
r
|
papum-rf-tree-cover-transition-probability.R
|
## TRANSITION PROBABILITY BETWEEN TC 2000 TO TC 2010
## count number of pixels of a certain value in a raster
## create a variable with a all packages named
packages <- c("raster", "gridExtra","data.table", "rgdal", "rgeos", "maptools","dplyr", "ncdf4", "ggplot2", "tibble", "wesanderson", "scales")
## call all packages using lapply
lapply(packages, library,character.only=TRUE)
## CALL BOTH THE RASTERS INTO MEMORY
treecover2000 <- raster("tree-cover-2000-papum-rf-clip-utm46.tif")
treecover2010 <- raster("tree-cover-2010-papum-rf-clip-utm46.tif")
## COVERT THE 2000 TREE COVER LAYER INTO A GRID
treecover2000pts <- rasterToPoints(treecover2000)
## DEFINE A VARIABLE XY THAT REFERS TO THE COLUMNS X AND Y IN THE POINTS FILE
xy <- treecover2000pts[,c(1,2)]
## CONVERT THE POINTS FILE INTO A DATAFRAME
treecoverptssp <- SpatialPointsDataFrame(coords = xy, data=as.data.frame(treecover2000pts), proj4string = CRS("+proj=utm +zone=46 +datum=WGS84 +units=m +no_defs +ellps=WGS84 +towgs84=0,0,0"))
## CREATE A SEQUENCE OF ID's FOR THE ROWS THUS GIVING EACH PIXEL A UNIQUE ID
treecoverptssp$ID <- seq.int(nrow(treecoverptssp))
## CREATE A DATAFRAME OF THE VALUES OF TREECOVER 2010 FROM THE SAME LOCATIONS AS THE TREE COVER 2000
trial <- data.frame(treecoverptssp$ID, raster::extract(treecover2000, treecoverptssp), raster::extract(treecover2010, treecoverptssp))
## RENAME THE DATAFRAME
names(trial) <- c("ID","treecover2000", "treecover2010")
trial$treecover2000 <- as.numeric(trial$treecover2000)
trial$treecover2010 <- as.numeric(trial$treecover2010)
trial$TC2000bins <- cut(trial$treecover2000, breaks=c(0,10,20,30,40,50,60,70,80,90,100), labels=c("0-10","11-20","21-30","31-40","41-50","51-60","61-70","71-80","81-90","91-100"))
trial$TC2010bins <- cut(trial$treecover2010, breaks=c(0,10,20,30,40,50,60,70,80,90,100), labels=c("0-10","11-20","21-30","31-40","41-50","51-60","61-70","71-80","81-90","91-100"))
trial$TC2010bins[is.na(trial$TC2010bins)] <- "0-10"
myCols <- c("TC2000bins", "TC2010bins")
jnk <- trial %>% select(match(myCols,names(.)))
countsbygroup <- table(jnk[,c("TC2000bins","TC2010bins")])
library(ca)
## CORRESPONDENCE ANALYSIS
plot(ca(countsbygroup))
transitionmatrix <- countsbygroup/rowSums(countsbygroup)
transitionmatrixmelt <- melt(transitionmatrix)
ggplot(transitionmatrixmelt, aes(TC2000bins, TC2010bins, fill=value))+scale_fill_viridis() + coord_equal() + xlab("TREE COVER 2000") +ylab("TREE COVER 2010") + geom_tile()
## THE N SIZE OF EACH TREE COVER CLASS IS UNEQUAL, HENCE THE CONFIDENCE INTERVAL OF THE PROBABILITIES WILL BE DIFFERENT
## TRY TRANSITION PROBABILITY FROM FOREST TO NO FOREST
|
0af95b7cba8814fc16d34bed03d097d3db447628
|
cd894190d89701829548250cc98a714c5a3c4060
|
/R/CO2_ppm.R
|
989be50b4b0509dc7863a9f6396a4cd3771a5276
|
[] |
no_license
|
hdykiel/CO2
|
f33ff6fa5c644796bb61cae52f3b0021fa15c38d
|
92d93a6066eda286f5da584eec541173eebc5109
|
refs/heads/master
| 2021-01-15T03:02:32.145607
| 2020-02-25T17:05:09
| 2020-02-25T17:05:09
| 242,857,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 419
|
r
|
CO2_ppm.R
|
#' Get CO2 Parts Per Million
#'
#' Retrieves CO2 ppm from http://hqcasanova.com/co2/
#'
#' @return
#' @export
#'
#' @examples
#' ppm()
#' > [1] 414.01
# set user agent
ua <- httr::user_agent("https://github.com/hdykiel/CO2")
ppm <- function() {
# send GET request
resp <- httr::GET("http://hqcasanova.com/co2/", ua)
# parse response
content <- httr::content(resp)
as.numeric(sub(" ppm", "", content))
}
|
a499237444bbb8e9711f2d94f9e0f59f1e19dbb7
|
b85191b3e41c77470a97855bbce88577afabcd28
|
/test_spblock.R
|
8c4df73c17cf84502100b4151d381d4f2e95bafd
|
[] |
no_license
|
wangx23/subgroup_st
|
4008f49064ef51192e4abb16b8a3e143c041fd00
|
a7d2c11a899060baf183cd639cbf3da300e657a7
|
refs/heads/master
| 2020-09-08T11:13:24.492790
| 2019-12-05T16:31:50
| 2019-12-05T16:31:50
| 221,117,635
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,335
|
r
|
test_spblock.R
|
##### test for irregular matrix with spatial block####
######## sp matrix ###
library(glmnet)
library(lars)
library(dplyr)
library(ggplot2)
library(gridExtra)
library(tidyverse)
n = 10 ## regular grid size
ntime = 6
n0 = n^2
n1 = 50
n2 = 30
time1 = 3
grids = matrix(1,ntime,1) %x% as.matrix(expand.grid(1:n,1:n))
colnames(grids) = c("x","y") ### row number and column number
grids = as.data.frame(grids)
Ymat = matrix(0, n0, ntime)
Ymat[1:n1,1:time1] = rnorm(n1*time1)*0.1 + 4
Ymat[(n1+1):n0,1:time1] = rnorm((n0-n1)*time1)*0.1 - 4
Ymat[1:n2,(time1+1):ntime] = rnorm(n2*(ntime - time1))*0.1 + 4
Ymat[(n2+1):n0,(time1 + 1):ntime] = rnorm((n0-n2)*(ntime - time1))*0.1 - 4
datadf = grids %>% mutate(
year = rep(1:ntime, each = n^2),
grouptime = rep(1:2, each = ntime/2*n*n),
groupsp = c(rep(rep(1:2, c(n1,n0-n1)),time1), rep(rep(1:2, c(n2,n0-n2)),ntime - time1)),
obs = c(Ymat)) %>%
mutate(grouptime = as.factor(grouptime),
groupsp = as.factor(groupsp))
ggplot(data = filter(datadf, year ==3), aes(x = y, y=n+1 - x, color = obs)) +
geom_point() + theme_bw() + theme(legend.position = "none")
ggplot(data = filter(datadf, year ==4), aes(x = y, y=n+1 - x, color = obs)) +
geom_point() + theme_bw() + theme(legend.position = "none")
theme1 = theme_bw() +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
legend.position = "none")
g1 = ggplot(data = filter(datadf, year ==3),
aes(x = y, y=n+1 - x, color = as.factor(groupsp))) +
geom_point() + theme1
g2 = ggplot(data = filter(datadf, year ==4),
aes(x = y, y=n+1 - x, color = as.factor(groupsp))) +
geom_point() + theme1
pdf("/Volumes/GoogleDrive/My Drive/Research/Subgroups_st/docs/figures/twogroups_regular.pdf",height = 3,width = 7)
grid.arrange(g1,g2, ncol = 2)
dev.off()
### column matrix
datmat = select(datadf, y, x, year, obs) %>% spread(year, obs)
Ymat = datmat[,-(1:2)]
Ymat = as.matrix(Ymat)
dat1 = select(datadf, y, x, year, obs, groupsp) %>%
mutate(index = rep(n0:1, ntime))
ggplot(data = dat1, aes(x = year, y = index)) + geom_raster(aes(fill = obs))
ggplot(data = dat1, aes(x = year, y = index)) + geom_raster(aes(fill = groupsp))
ncols = ncol(Ymat)
nrows = nrow(Ymat)
U1 = matrix(1,nrows,nrows)
U2 = matrix(1,ncols,ncols)
U1[upper.tri(U1)] = 0
U2[upper.tri(U2)] = 0
Yvec = c(Ymat)
Xmat = U2 %x% U1
res1 = glmnet(x = Xmat[,-1],y = Yvec, intercept = TRUE, standardize = FALSE,
lambda = 0.05)
coefmat = matrix(coef(res1), nrows, ncols)
Ypred1 = U1 %*% coefmat %*% t(U2)
plot(Ypred1, Yvec)
sum(coefmat!=0)
table(Ypred1)
dat2 = dat1
dat2$obs = c(Ypred1)
dat2$groupsp = as.factor(dat2$obs)
ggplot(data = dat2, aes(x = year, y = index)) + geom_raster(aes(fill = obs))
ggplot(data = dat2, aes(x = year, y = index)) + geom_raster(aes(fill = groupsp))
res3 = lars(x = Xmat,y = Yvec,intercept = FALSE,normalize = FALSE,
max.steps = 20, type = "lar")
coefmat3 = matrix(coef(res3)[10,], nrows, ncols)
Ypred3 = U1 %*% coefmat3 %*% t(U2)
image(Ypred3)
plot(c(Ypred3), Yvec)
sum(coefmat3!=0)
table(Ypred3)
dat3 = dat1
dat3$obs = c(Ypred3)
dat3$groupsp = as.factor(dat3$obs)
g1 = ggplot(data = dat3, aes(x = year, y = index)) + geom_raster(aes(fill = obs))
g2 = ggplot(data = dat3, aes(x = year, y = index)) + geom_raster(aes(fill = groupsp))
##### row order
datmat2 = select(datadf, x, y, year, obs) %>% arrange(year,x) %>% spread(year, obs)
Ymat2 = datmat2[,-(1:2)]
Ymat2 = as.matrix(Ymat2)
Yvec2 = c(Ymat2)
dat21 = select(datadf, x, y, year, obs) %>% arrange(year,x) %>%
mutate(index = rep(n0:1, ntime))
ggplot(data = dat21, aes(x = year, y = index)) + geom_raster(aes(fill = obs))
res2 = glmnet(x = Xmat[,-1],y = Yvec2, intercept = TRUE, standardize = FALSE,
lambda = 0.01)
coefmat21 = matrix(coef(res2), nrows, ncols)
Ypred21 = U1 %*% coefmat21%*% t(U2)
plot(Ypred21, Yvec2)
sum(coefmat21!=0)
table(Ypred21)
res23 = lars(x = Xmat,y = Yvec2,intercept = FALSE,normalize = FALSE,
max.steps = 20, type = "lar")
coefmat23 = matrix(coef(res23)[20,], nrows, ncols)
Ypred23 = U1 %*% coefmat23 %*% t(U2)
image(Ypred23)
plot(c(Ypred23), Yvec2)
sum(coefmat23!=0)
|
e43e6c2ad2289786ea8647e47090abda14374e42
|
583c2374b676c60cdb64ffae1d48e0d0f2cf5e7f
|
/man/mmlcr.Rd
|
83f994b89f1e5acbb02923e6c896b13c7b0de40a
|
[] |
no_license
|
cran/mmlcr
|
cf8e1c575b226a20484158f16998ca05bb4573a9
|
d4426714daa734cb546f34441719a80e38344ccc
|
refs/heads/master
| 2021-01-21T11:45:59.129520
| 2006-04-10T00:00:00
| 2006-04-10T00:00:00
| 17,719,035
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,920
|
rd
|
mmlcr.Rd
|
\name{mmlcr}
\alias{mmlcr}
\alias{mmlcr.default}
\alias{mmlcr.mmlcr}
\alias{anova.mmlcr}
\alias{formula.mmlcr}
\alias{logLik.mmlcr}
\alias{print.mmlcr}
\alias{print.summary.mmlcr}
\alias{summary.mmlcr}
\alias{vcov.mmlcr}
\title{
Mixed Mode Latent Class Regression
}
\description{
This function fits a latent class regression model, where the responses may be of
different types, as well as longitudinal or time-invariant.
}
\usage{
mmlcr(object = NULL, ...)
\method{mmlcr}{default}(object = NULL, outer, components,
data = error("data must be given (as a data.frame)"),
subset, n.groups = 2, prior.prob = NULL,
post.prob = NULL, no.fit = FALSE, max.iter = 50,
trace = TRUE, tol = 0.005, ...)
\method{mmlcr}{mmlcr}(object, max.iter = 50, trace = TRUE, tol = 0.005, ...)
}
\arguments{
\item{outer}{
a formula of the form \code{~ predictors | id} where the model statement for predictors is
as for regression models and \code{id} is the subject factor. The expression \code{1} can be used
for the predictors when there is no other suitable candidate.
}
\item{components}{
a list of the response components. Each element of the list is itself a list.
At a minimum, that list consists of a formula of the form \code{resp ~ covars}, where \code{resp} is the
response variable for that component and \code{covars} are any additional
covariates (\code{resp ~ 1} is commonly used, or something like \code{resp ~ age} for longitudinal classes),
and a class. Implemented classes include
\code{cnormlong}, \code{cnormonce}, \code{multinomlong}, \code{multinomonce}, \code{nb1long}, \code{nblong}, \code{nbonce},
\code{normlong}, \code{normonce}, \code{poislong}, and \code{poisonce}. Some classes require additional list
elements. Details can be found with the \code{mmlcrcomponentinit} help files.
}
\item{data}{
a data frame in which to interpret the variables occurring in outer and in the
formulas for the individual components.
}
\item{subset }{
expression giving which subset of the rows of the data should be used in the fit.
All observations are included by default.
}
\item{n.groups}{
the number of latent classes.
}
\item{prior.prob }{
a data.frame giving the prior probabilities of class membership according
to covariate information, not ``prior knowledge.'' The row.names should
match the subject id given in outer. If no value is given, which is recommended, the
default is equal probabilities.
}
\item{post.prob }{
a data.frame giving the posterior probabities of class membership. The row.names
should match the subject id given in outer. If \code{post.prob} is not given, starting values
are assigned randomly. If one is fitting a modification of a prior fit called \code{fit1},
perhaps by changing the covariates, the recommendation is to include the term
\code{post.prob = fit1$post.prob}.
}
\item{no.fit }{
if TRUE, returns a \code{mmlcrObject} without fitting. The \code{mmlcrObject}
could be fit later by a call to \code{mmlcr(mmlcrObject)}.
}
\item{object}{
an \code{mmlcr} object. See the \code{mmlcrObject} help file for details.
}
\item{max.iter}{
the maximum number of iterations
}
\item{trace}{
if TRUE, traces through the iterations, giving the loglikelihood, a convergence speed
index, the loglikelihood goal, and the current class percentages.
}
\item{tol}{
the tolerance between the loglikelihood and the loglikelihood goal, used as a
stopping criterion.
}
\item{...}{possible additional arguments.}
}
\value{
a fitted \code{mmlcrObject}
}
\details{
The components portion of the call refers to components of the response, not
components in the sense, used in much of the mixtures literature, that is here
called latent classes or groups.
It is not yet possible to specify a prior, or even starting values, for the parameters
of the individual latent classes. Instead, one can assign starting values to the posterior
probabilities of each individual via the post.prob part of the function call.
In a typical use of this package, one might want to model, say, alcohol use.
The idea is that there may be several model trajectories over the years,
such as little or no use, heavy use, adolescent-limited use, and so on.
For each class, we would like to model a different longitudinal regression
(i.e., the same form for the regression, but different coefficients for each latent class).
Furthermore, we would like to include covariates for class membership,
so that the model looks like
Covariates ==> Latent Class ==> Longitudinal Trajectory,
with the regression coefficients on the right independent of the
covariates on the left conditional on class membership.
One could potentially have a number of arrows leading off of the latent class,
so that one could simultaneously model cigarette and alcohol use, for example.
The first arrow is modeled with the outer formula, which the second arrow is modeled
with the formula(s) in the component term.
}
\seealso{
\code{\link{mmlcrObject}}, \code{\link{mmlcrcomponentinit}},
\code{\link{summary.mmlcr}}, \code{\link{plot.mmlcr}}
}
\examples{
data(mmlcrdf)
mmlcrdf.mmlcr2 <- mmlcr(outer = ~ sex + cov1 | id,
components = list(
list(formula = resp1 ~ 1, class = "cnormonce", min = 0, max = 50),
list(formula = resp2 ~ poly(age, 2) + tcov1, class = "poislong"),
list(formula = resp3 ~ poly(age, 2), class = "multinomlong")
), data = mmlcrdf, n.groups = 2)
mmlcrdf.mmlcr2.inter <- mmlcr(outer = ~ sex * cov1 | id,
components = list(
list(formula = resp1 ~ 1, class = "cnormonce", min = 0, max = 50),
list(formula = resp2 ~ poly(age, 2) + tcov1, class = "poislong"),
list(formula = resp3 ~ poly(age, 2), class = "multinomlong")
), data = mmlcrdf, n.groups = 2,
post.prob = mmlcrdf.mmlcr2$post.prob, no.fit = TRUE)
mmlcrdf.mmlcr2.inter <- mmlcr(mmlcrdf.mmlcr2.inter)
}
\keyword{models}
% Converted by Sd2Rd version 1.21.
|
b7a7fe3b5cc43631ba4c24158025c1ff79b174c2
|
4a24d8fe7f51e9bdaa3c72d6605273959cc6c12c
|
/man/survivalOutcome.Rd
|
d8ae6342a829676ef9465bac931708e36da6621d
|
[] |
no_license
|
cran/supcluster
|
c76923742eeb51b3ae2dbe83cfcdf5cc9bac4a54
|
ecb4381eb47f037f62f60e3c032402b1052fdf78
|
refs/heads/master
| 2022-06-08T19:16:09.490180
| 2022-05-19T13:50:02
| 2022-05-19T13:50:02
| 35,846,422
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,932
|
rd
|
survivalOutcome.Rd
|
\name{survivalOutcome}
\alias{survivalOutcome}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Simulates a survival model for use with \code{\link{generate.cluster.data}}
%% ~~function to do ... ~~
}
\description{Given a vector of frailties, say \eqn{x_1,...}, this function generates a censored exponentially distributed random variable with rate equal to \eqn{\mu+\beta x_i}. The censoring distribution is uniform with from \eqn{f} to
\eqn{f+a}.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
survivalOutcome(mu, beta, accrual, followUp)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{mu}{ The constant term \eqn{\mu}
%% ~~Describe \code{mu} here~~
}
\item{beta}{ The frailty effect \eqn{\beta}
%% ~~Describe \code{beta} here~~
}
\item{accrual}{ The accrual time \eqn{a}, in a clinical study}
%% ~~Describe \code{accrual} here~~
\item{followUp}{ The follow up time \eqn{f} in a clinical study
%% ~~Describe \code{followUp} here~~
}
}
\value{ A data frame is returned with two columns \code{survival} and \code{censor}
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\author{
David A. Schoenfeld
%% ~~who you are~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{\link{coxLink}},\code{\link{binaryOutcome}},\code{\link{binaryLink}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
generatedData=generate.cluster.data(.25,npats=25,clusts=c(12,8),beta=c(-5,5),
outcomeModel=survivalOutcome(0,1,1,1))
usBinary=supcluster(generatedData[[1]],outcome="outcome",
maxclusters=5,nstart=100,n=200,fbeta=FALSE,
linkLikelihood=coxLink(generatedData[[2]]))
}
\keyword{ ~survival }
\keyword{ ~censoring }% __ONLY ONE__ keyword per line
|
74c15078aa3b7afdadd54d858f9d83c3fcc1f7f7
|
234a69e6e0c3cb329c5213e68842fabdd4388f54
|
/compute_ParasiteProfile.R
|
7466afbe69ed7ecb7217ac3dee635808739b462b
|
[] |
no_license
|
jwatowatson/bunching
|
3b2a81dac98d4cb935cd6e48bce3edb9edef3d0a
|
f04f50ba90728209c3133d5662d874a85ebb4441
|
refs/heads/master
| 2021-08-23T08:32:29.559012
| 2017-12-04T09:47:28
| 2017-12-04T09:47:28
| 112,720,765
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,668
|
r
|
compute_ParasiteProfile.R
|
# This function computes the parasite biomass profiles for a given set of PD parameters and PK profiles
# drug_concentrations : N x Tmax matrix; N is number of subjects, Tmax is maximum number of days to forward simulate
# log_EC_50 : log value of the EC50 parameter of PD model
# log_k : log value of the slope parameter k in PD model
# merozoite_release : days at which merozoites erupt from liver (start of blood stage infection)
# patency_pcount : parasite biomass at which the infection becomes patent
# This function returns parasite profiles : N x Tmax matrix
compute_ParasiteProfiles = function(drug_concentrations,
log_EC_50, log_k,
merozoite_release,
patency_pcount){
N = length(merozoite_release)
N_merozoites = 10000
log_threshold = log(sqrt(0.1));
Tmax = 150
max_effect = 10^3
parasite_profiles = array(dim = c(N, 150))
k = exp(log_k)
for(i in 1:N){
log_parasitaemia = log(N_merozoites);
days = merozoite_release[i];
log_patency_threshold = log( patency_pcount[i] );
while(log_parasitaemia < log_patency_threshold & log_parasitaemia >= 0 & days < Tmax){
parasite_profiles[i, days] = log_parasitaemia
drug_level = drug_concentrations[i,days];
dose_response = sqrt( max_effect / (1 + exp(-k*( log(drug_level) - log_EC_50))) );
log_parasitaemia = log_parasitaemia - max(log_threshold, log(dose_response));
days = days + 1;
}
parasite_profiles[i, days] = log_parasitaemia
}
return(parasite_profiles);
}
|
149c2b735e1ae8ce69bfa512353e1f950dafa2f7
|
31cd90d9e9862b71459b8f23bec563a3084a6978
|
/novaReport/R/tableNominal2.R
|
7b04bff400dd43d2946d5212a7f7cb588a978b21
|
[
"MIT"
] |
permissive
|
Boshoffsmit/Rpackages
|
2594169369785b89c178236d555555254de3be2f
|
0488525236b986d016a99e98c57744cbcb488efb
|
refs/heads/master
| 2020-12-27T15:30:01.820546
| 2016-03-11T08:20:09
| 2016-03-11T08:20:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,764
|
r
|
tableNominal2.R
|
tableNominal2 <- function (vars, weights = NA, subset = NA, group = NA, miss.cat = NA,
print.pval = c("none", "fisher", "chi2"), pval.bound = 10^-4,
fisher.B = 2000, vertical = TRUE, cap = "", lab = "", col.tit.font = c("bf",
"", "sf", "it", "rm"), font.size = "tiny", longtable = TRUE,
nams = NA, cumsum = TRUE, ...)
{
print.pval <- match.arg(print.pval)
if (is.data.frame(vars) == TRUE) {
tmp <- vars
vars <- list()
for (i in 1:ncol(tmp)) {
vars[[i]] <- tmp[, i]
}
nams <- colnames(tmp)
}
n.var <- length(nams)
if (identical(subset, NA) == FALSE) {
if (identical(group, NA) == FALSE) {
group <- group[subset]
}
if (identical(weights, NA) == FALSE) {
weights <- weights[subset]
}
for (i in 1:n.var) {
vars[[i]] <- vars[[i]][subset]
}
}
vert.lin <- "|"
if (vertical == FALSE) {
vert.lin <- ""
}
for (i in 1:length(nams)) {
nams[i] <- gsub("_", "\\\\_", as.character(nams[i]))
}
if (max(is.na(miss.cat)) == 0) {
for (i in miss.cat) {
vars[[i]] <- NAtoCategory(vars[[i]], label = "missing")
}
}
if (identical(group, NA) == TRUE) {
group <- rep(1, length(vars[[1]]))
}
if (identical(weights, NA) == TRUE) {
weights2 <- 1
}
if (identical(weights, NA) == FALSE) {
weights2 <- weights
}
for (i in 1:n.var) {
vars[[i]][vars[[i]] == "NA"] <- NA
vars[[i]] <- rep(vars[[i]], times = weights2)
}
group <- rep(group, times = weights2)
vars <- lapply(vars, as.factor)
group <- as.factor(group)
ns.level <- unlist(lapply(lapply(vars, levels), length))
n.group <- length(levels(group))
cumsum <- as.logical(cumsum)
stopifnot(identical(length(cumsum), 1L))
nColPerGroup <- 2L + as.integer(cumsum)
out <- matrix(NA, ncol = 2 + nColPerGroup * (n.group + 1),
nrow = (sum(ns.level) + n.var))
out <- data.frame(out)
for (i in 1:n.var) {
ind <- max(cumsum(ns.level[1:i])) - ns.level[i] + 1:(ns.level[i] +
1) + (i - 1)
splits <- split(vars[[i]], group)
for (g in 1:n.group) {
tmp <- splits[[g]]
tmp <- tmp[is.na(tmp) == FALSE]
if (sum(is.na(tmp)) > 0) {
excl <- NULL
}
else {
excl <- NA
}
tab <- table(tmp, exclude = excl)
tab.s <- round(100 * tab/sum(tab), 2)
out[ind, 2 + nColPerGroup * (g - 1) + 1] <- c(tab,
sum(tab))
out[ind, 2 + nColPerGroup * (g - 1) + 2] <- c(tab.s,
sum(tab.s))
if (cumsum) {
out[ind, 2 + nColPerGroup * (g - 1) + 3] <- c(cumsum(tab.s),
NA)
}
}
out[ind[1], 1] <- nams[[i]]
out[ind, 2] <- c(levels(vars[[i]]), "all")
tab2 <- table(vars[[i]])
tab2.s <- round(100 * tab2/sum(tab2), 2)
out[ind, 2 + nColPerGroup * n.group + 1] <- c(tab2, sum(tab2))
out[ind, 2 + nColPerGroup * n.group + 2] <- c(tab2.s,
sum(tab2.s))
if (cumsum) {
out[ind, 2 + nColPerGroup * n.group + 3] <- c(cumsum(tab2.s),
NA)
}
v1 <- vars[[i]]
g1 <- as.character(group)
indNA <- (is.na(g1) == FALSE) & (g1 != "NA") & (is.na(v1) ==
FALSE) & (v1 != "NA")
v2 <- as.character(v1[indNA])
g2 <- g1[indNA]
ind1 <- length(unique(g2)) > 1
ind2 <- print.pval %in% c("fisher", "chi2")
ind3 <- length(unique(v2)) > 1
splits2 <- split(v2, g2)
ind4 <- 1 - max(unlist(lapply(lapply(splits2, is.na),
sum)) == unlist(lapply(lapply(splits2, is.na), length)))
if (ind1 * ind2 * ind3 * ind4 == 1) {
if (print.pval == "fisher") {
pval <- if (fisher.B == Inf)
fisher.test(v2, g2, simulate.p.value = FALSE)$p.value
else fisher.test(v2, g2, simulate.p.value = TRUE,
B = fisher.B)$p.value
}
if (print.pval == "chi2") {
pval <- chisq.test(v2, g2, correct = TRUE)$p.value
}
out[max(ind), 1] <- paste("$p", formatPval(pval,
includeEquality = TRUE, eps = pval.bound), "$",
sep = "")
}
}
col.tit <- if (cumsum) {
c("n", "\\%", "\\sum \\%")
}
else {
c("n", "\\%")
}
col.tit.font <- match.arg(col.tit.font)
fonts <- getFonts(col.tit.font)
digits <- if (cumsum) {
c(0, 1, 1)
}
else {
c(0, 1)
}
groupAlign <- paste(rep("r", nColPerGroup), collapse = "")
al <- paste("lll", vert.lin, groupAlign, sep = "")
tmp <- cumsum(ns.level + 1)
hlines <- sort(c(0, tmp - 1, rep(tmp, each = 2)))
tab.env <- "longtable"
float <- FALSE
if (identical(longtable, FALSE)) {
tab.env <- "tabular"
float <- TRUE
}
if (n.group > 1) {
zz <- rep(c(levels(group), "all"), each = nColPerGroup)
zz[-match(unique(zz), zz)] <- ""
zz = toupper(sapply(zz, substr, start=1, stop=4))
dimnames(out)[[2]] <- c(fonts$text("Variable"), fonts$text("Levels"),
fonts$math(paste(col.tit, "_{\\mathrm{", zz, "}}", sep = "")))
for (i in 1:n.group) {
al <- paste(al, vert.lin, groupAlign, sep = "")
}
out[length(out[,1]),2] <- out[length(out[,1]),1]
xtab1 <- xtable::xtable(out, digits = c(rep(0, 3), rep(digits,
n.group + 1)), align = al, caption = cap, label = lab)
xtab2 <- print(xtab1, include.rownames = FALSE, floating = float,
type = "latex", hline.after = hlines, size = font.size,
sanitize.text.function = function(x) {
gsub("_", " ", x)
}, tabular.environment = tab.env, ...)
}
if (n.group == 1) {
out <- if (cumsum) {
out[, 1:5]
}
else {
out[, 1:4]
}
dimnames(out)[[2]] <- c(fonts$text("Variable"), fonts$text("Levels"),
fonts$math(col.tit))
out[length(out[,1]),2] <- out[length(out[,1]),1]
xtab1 <- xtable::xtable(out[-1], digits = c(rep(0, 3), digits)[-1],
align = substring(al, 2, nchar(al)), caption = cap, label = lab)
xtab2 <- print(xtab1, include.rownames = FALSE, floating = float,
type = "latex", hline.after = hlines, size = font.size,
sanitize.text.function = function(x) {
gsub("_", " ", x)
}, tabular.environment = tab.env, ...)
}
}
|
8f931770ec91cf17b8dc25401aa35fc34261bb99
|
2e591b9f592900b4893cebba7a621d6cf115b329
|
/Regression/Section 4 - Simple Linear Regression/simpleLinearRegression.R
|
94d7531fb2338e75c4baa3e69304e7943322fb86
|
[] |
no_license
|
iamarjunchandra/Machinelearning-Tutorial-Works
|
d935aee525f454e597c0dc5fe2f888b0e87a06e2
|
6673a5e6dfdc65d0432dbce65d7b9473258f57ec
|
refs/heads/main
| 2023-01-04T09:18:25.348894
| 2020-10-25T08:27:16
| 2020-10-25T08:27:16
| 307,048,702
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,224
|
r
|
simpleLinearRegression.R
|
# Import dataset
data = read.csv('Salary_Data.csv')
# Split data into Train and test set
# install.packages('caTools')
library(caTools)
split = sample.split(data, SplitRatio = 2/3)
trainSet = subset(data, split == TRUE)
testSet = subset(data, split == FALSE)
# Fit the train set to linear regression model
regressor = lm(formula = Salary ~ YearsExperience,
data = trainSet)
# Predict the Salary for test Set
salaryPredicted = predict(regressor, newdata = testSet)
# Visualize the Predicted values
# install.packages('ggplot2')
library(ggplot2)
ggplot()+
geom_point(aes(x = trainSet$YearsExperience, y = trainSet$Salary), colour = 'RED')+
geom_line(aes(x = trainSet$YearsExperience, y = predict(regressor,newdata = trainSet)), colour = 'BLUE')+
ggtitle('Experience VS Salary (Train Set Result)')+
xlab('Experience')+
ylab('Salary')
# Train Set Result
library(ggplot2)
ggplot()+
geom_point(aes(x = testSet$YearsExperience, y = testSet$Salary), colour = 'RED')+
geom_line(aes(x = trainSet$YearsExperience, y = predict(regressor,newdata = trainSet)), colour = 'BLUE')+
ggtitle('Experience VS Salary (Test Set Result)')+
xlab('Experience')+
ylab('Salary')
|
dc9c16311e71b0abb9ac3452dc64e3ff5293d496
|
2499bed9dd66e72f5b9779a9c9f81109050e485e
|
/shiny/polynomial_fit/server.R
|
64ab0addd3cc43fbf8342819a7269ee86312ab03
|
[] |
no_license
|
witt-analytics/anlt510_resources
|
2b54e518759387d5c611fd4f175a19789cbb93f6
|
2b98f7a13a2c1d93cb03e09322451324db849280
|
refs/heads/main
| 2023-08-17T17:38:24.950592
| 2021-10-15T11:45:24
| 2021-10-15T11:45:24
| 405,414,451
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,887
|
r
|
server.R
|
server = function(input, output, session) {
x_i <- reactive({seq(0, 6, by = 0.5)})
noise <- reactive({rnorm(length(x_i()), 0, input$sigma2)})
y_i <- reactive({5 * x_i() + 3 + noise()})
# output$sumd <- renderUI({
# HTML(glue::glue("<h3>Σ(distance) = {round(sum(noise()),digits=5)}</h3>"))})
# output$sumda <- renderUI({
# HTML(glue::glue("<h3 style='color: red;'>Σ(|distance|) = {round(sum(abs(noise())),digits=5)}</h3>"))})
# output$sumd2 <- renderUI({
# HTML(glue::glue("<h3 style='color:#00e500;'>Σ(distance)<sup>2</sup> = {round(sum(noise()^2),digits=5)}</h3>"))})
output$plot3d2 <- renderPlot({
N = input$degree + 1
terms = character(N)
for(i in 1:N){
terms[i] = glue::glue("const[{i}] * x ^ ({i} - 1)")
}
fun <- function(y, x,const) {
Fun <- glue::glue("sum( (y - ( {paste(terms, collapse = ' + ')} ) ) ^ 2)")
return(eval(parse(text = Fun)))
}
res = nlminb(start = rnorm(N),
objective = fun,
x = x_i(),
y = y_i())[1:5]
plt_fun <- function(x,const) return(eval(parse(text = paste(terms, collapse = ' + '))))
par(cex.axis = 1.1, cex.lab = 1.1, font = 2, las = 1, lwd = 2)
curve(plt_fun(x,res$par),
xlim = c(0,6),
ylim = c(0,35),
n = 1000,
lwd = 2)
points(x_i(),
y_i(),
col = 4,
cex = 1.5,
pch = 16)
params = round(res$par, digits = 3)
n_params = length(params)
powers = 0:n_params
trms = character(n_params)
trms[1] = paste(params[1])
for(i in 2:n_params){
trms[i] = glue::glue("{params[i]}*x^{powers[i]}")
}
text(x = 0,
y = 0.5,
parse(text = paste(trms, collapse = " + ")),
cex = 1.5,
font = 2,
adj = 0)
text(x = c(0.75,0.75),
y = c(30,27),
c(expression(bold(underline("SSE"))), round(res$objective,4)),
cex = c(1.5,1.5),
font = 2,
adj = 0)
})
}
|
de4083efcc020df642a88489b51047839c4cf0cd
|
36628243c050cc012243cce16d55e6d24c95b1cf
|
/R/client_sendeR.R
|
c6af42a82e93fe720cf454ca9c4e860c7655885a
|
[
"MIT"
] |
permissive
|
TymekDev/sendeR
|
e5bf9ca406dd130b8003f54c00050de16fedae7a
|
32142f3ee24ad0c1b674102848e41c461a5107d0
|
refs/heads/master
| 2022-11-07T07:07:13.054088
| 2020-06-26T16:48:17
| 2020-06-26T16:48:17
| 213,371,734
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,377
|
r
|
client_sendeR.R
|
#' @title sendeR client
#'
#' @description Function \code{client_sendeR} is a constructor for an object
#' of a \code{client_sendeR} class. This object is an underlying structure
#' on which every other client is based.
#'
#' @param service typically a name of service whose client extends the object.
#' @param ... named arguments with additional fields which will be passed to
#' \code{\link{set_fields}} during client creation.
#'
#' @seealso \code{\link{is.client_sendeR}}, \code{\link{send_message}}
#'
#' @examples
#' client <- client_sendeR("a service name")
#'
#' # Variant with default parameters set
#' client2 <- client_sendeR("service", message = "Default message template")
#'
#' @rdname client_sendeR
#' @export
client_sendeR <- function(service, ...) {
assert(is_character_len1(service), msg_character_len1("service"))
client <- list("service" = service)
client <- add_class(client, "client_sendeR", FALSE)
if (length(list(...)) > 0 ) {
client <- set_fields(client, ...)
}
client
}
# Function returns field names in the client_sendeR object.
# Note: field names should be the same as the constructor's arguments.
default_fields.client_sendeR <- function(client) {
"service"
}
#' @title sendeR clients' verification
#'
#' @description \code{is.client_sendeR} tests if a provided object is of
#' the \code{client_sendeR} class and whether it has all the fields
#' a \code{client_sendeR} should have.
#'
#' All other functions check if the provided object extends
#' \code{client_sendeR} (passes \code{is.client_sendeR} test) and whether it
#' has all the fields a given client should have.
#'
#' @param x an object to be tested.
#'
#' @rdname is.client_sendeR
#' @export
is.client_sendeR <- function(x) {
inherits(x, "client_sendeR") &&
all(default_fields.client_sendeR(x) %in% names(x))
}
#' @description The \code{send_message} method for the \code{client_sendeR}
#' only serves as a placeholder.
#'
#' @rdname send_message
#' @export
send_message.client_sendeR <- function(client, message, destination,
verbose = FALSE, ...) {
assert(is.client_sendeR(client), not_a_client("client", "sendeR"))
warning("client_NotifieR does not support sending messages.")
}
#' @param x an object to print.
#' @param ... argument not used. Required only for overloading purposes.
#'
#' @rdname client_sendeR
#' @export
print.client_sendeR <- function(x, ...) {
assert(is.client_sendeR(x), not_a_client("x", "sendeR"))
cat(format(x))
}
# Function creates a character string with a relevant information about a given
# client_sendeR. This method is intended to be used only in
# a print.client_sendeR method.
format.client_sendeR <- function(x, ...) {
assert(is.client_sendeR(x), not_a_client("x", "sendeR"))
defaults <- setdiff(default_fields(x), "service")
if (length(defaults) > 0) {
defaults <- format_fields(x, defaults)
}
additionals <- setdiff(names(x), c(default_fields(x), "service"))
if (length(additionals) > 0) {
additionals <- sprintf("\n\nAdditional fields:\n%s\n",
format_fields(x, additionals))
}
paste(
sprintf("sendeR client (%s)\n", x$service),
defaults,
additionals,
sep = ""
)
}
|
022231e1dee0eb0b43f55307651222a8fb2ad220
|
4476502e4fed662b9d761c83e352c4aed3f2a1c2
|
/GIT_NOTE/02_Rscript_workplace/chap14/14장 연습문제 완 결.R
|
ff26ec6856eef1ceef5ea3a71f85d7a92fc5286c
|
[] |
no_license
|
yeon4032/STUDY
|
7772ef57ed7f1d5ccc13e0a679dbfab9589982f3
|
d7ccfa509c68960f7b196705b172e267678ef593
|
refs/heads/main
| 2023-07-31T18:34:52.573979
| 2021-09-16T07:45:57
| 2021-09-16T07:45:57
| 407,009,836
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 2,975
|
r
|
14장 연습문제 완 결.R
|
#################################
## <제14장 연습문제>
#################################
# 01. mpg 데이터 셋을 대상으로 7:3 비율로 학습데이터와 검정데이터로 각각
# 샘플링한 후 각 단계별로 분류분석을 수행하시오.
# 조건) x,y 변수선택
# 독립변수(설명변수) : displ + cyl + year
# 종속변수(반응변수) : cty
library(rpart) # 분류모델 사용:
#분류트리(y가 범주형),회귀트리(y가 연속형)
#-> 평가방법
#분류트리: 혼동매트릭스(컨퓨전매트릭스),
#회귀트리: MSE,RMSE,cor
library(ggplot2) # dataset 사용
data(mpg)
str(mpg)
# 단계1 : 학습데이터와 검정데이터 샘플링
# 2. 데이터 탐색 및 전처리
idx<-sample(nrow(mpg),0.7*nrow(mpg))
train<-mpg[idx,]
test<-mpg[-idx,]
# 단계2 : 학습데이터 이용 분류모델 생성
model1<-rpart(cty ~ displ + cyl + year, data=train)
model1#중요변수:displ
# 단계3 : 검정데이터 이용 예측치 생성 및 평가
y_pred<-predict(model1,test)
y_true<-test$cty
cor(y_true,y_pred) #0.8854559
# 단계4 : 분류분석 결과 시각화
rpart.plot(model1)
prp(model1)
# 단계5 : 분류분석 결과 해설
# 02. weather 데이터를 이용하여 다음과 같은 단계별로 의사결정 트리 방식으로 분류분석을 수행하시오.
# 조건1) rpart() 함수 이용 분류모델 생성
# 조건2) y변수 : RainTomorrow, x변수 : Date와 RainToday 변수 제외한 나머지 변수로 분류모델 생성
# 조건3) 모델의 시각화를 통해서 y에 가장 영향을 미치는 x변수 확인
# 조건4) 비가 올 확률이 50% 이상이면 ‘Yes Rain’, 50% 미만이면 ‘No Rain’으로 범주화
# 단계1 : 데이터 가져오기
library(rpart) # model 생성
library(rpart.plot) # 분류트리 시각화
setwd("c:/ITWILL/2_Rwork/data")
weather = read.csv("weather.csv", header=TRUE)
str(weather)
# 단계2 : 데이터 샘플링
weather.df<-weather[,c(-1,-14)]
x<-sample(nrow(weather.df),0.7*nrow(weather.df))
x
train<-weather.df[x,]
test<-weather.df[-x,]
dim(train)
dim(test)
# 단계3 : 분류모델 생성
model2<-rpart(RainTomorrow ~., data= train)
model2
# 단계4 : 분류모델 시각화 - 중요변수 확인
model2#Humidity
# 단계5 : 예측 확률 범주화('Yes Rain', 'No Rain')
y_pred<-predict(model2,test)
y_pred
y_pred2<-ifelse(y_pred[,2]>=0.5,'Yes Rain','No Rain')
y_pred2
y_true<-test$RainTomorrow
y_true
tab<-table(y_true,y_pred2)
tab
acc<-(tab[1,1]+tab[2,2])/sum(tab)
acc#0.8181818
# 단계6 : 혼돈 matrix 생성 및 분류 정확도 구하기
install.packages("gmodels") # gmodels 패키지 설치
library(gmodels) # CrossTable() 함수 사용
CrossTable(x=y_true,y=y_pred2)
|
4b0330e59db1e1459c47572dc72fe9562889a2ac
|
a52eff39d28de6e589632543f1d65201f676e09e
|
/2020-01-31-Classic/makeAnim.R
|
fba3e736c5e8f593a41aa9bacd67ec3dc159cd9c
|
[] |
no_license
|
quoctran98/TheRiddler
|
bc22c81530189e7f65bdcdbb30d7771310b04fab
|
41817339f9a98ea81eec99029e9295ad225a8f50
|
refs/heads/master
| 2022-03-20T00:21:34.237242
| 2022-02-21T06:01:38
| 2022-02-21T06:01:38
| 225,267,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,795
|
r
|
makeAnim.R
|
library(ggplot2)
vol <- function(n, N = 12, a = 1) {
magnaTileTopAngle <- 360/N
magnaTileBaseAngle <- (180 - magnaTileTopAngle) / 2
b <- a * (sin(magnaTileBaseAngle *(pi/180))/sin(magnaTileTopAngle *(pi/180)))
d <- a / (2 * tan((180/n) *(pi/180)))
areaBase <- 0.5 * a * d
c <- a * (sin((((n-2)*90)/n) *(pi/180)) / sin((360/n) *(pi/180)))
H <- sqrt((b^2) - (c^2))
V <- (1/3) * areaBase * H
return(V)
}
df <- data.frame(matrix(ncol = 3))
colnames(df) <- c("x", "y", "N")
df <- df[-1,]
dfOpt <- data.frame(matrix(ncol = 3))
colnames(dfOpt) <- c("x", "y", "N")
dfOpt <- dfOpt[-1,]
for (magnaN in 12:50) {
x <- 3:(magnaN-1)
y <- vol(x, N = magnaN)
y <- y - min(y)
N <- rep.int(magnaN, length(x))
dfTemp <- data.frame("x" = x, "y" = y, "N" = N)
df <- rbind(df, dfTemp)
optY <- max(y)
optX <- x[match(optY, y)]
dfTemp <- data.frame("x" = optX, "y" = optY, "N" = N)
dfOpt <- rbind(dfOpt, dfTemp)
}
for (i in unique(df$N)) {
xmax <- max(df[df$N <= i,"x"])
ymax <- max(df[df$N <= i,"y"])
ggplot(data = NULL) +
geom_line(data = df[df$N <= i,], aes(x=x, y=y, color=(N), group=N)) +
geom_point(data = dfOpt[dfOpt$N <= i,], aes(x=x, y=y)) +
xlim(c(2, xmax)) +
ylim(c(0, ymax)) +
geom_vline(data = dfOpt, aes(xintercept = dfOpt[dfOpt$N == i, "x"][1])) +
geom_vline(data = df, aes(xintercept = max(df[df$N == i, "x"]))) +
xlab("Number of Magna-Tiles") +
ylab("Normalized Pyramid Volume") +
labs(title = paste("Optimal Pyramid with Variable Sized Magna-Tiles (N = ", i, ")", sep = ""),
subtitle = "where N is the number of Magna-Tiles needed to form a flat regular polygon") +
theme(axis.text.y=element_blank(),
legend.position="none")
ggsave(paste("./anim/", i, ".png", sep = ""))
}
|
c1752d41eaa942bf4ee07343627daf2fe55ff775
|
74835f9cfd9b54852267760a16ec7c7727e127bc
|
/R/abc.R
|
c65afe200aac92fc5befef52a12492c2d4bafffc
|
[] |
no_license
|
cgrazian/PETabc
|
e52181fdd53dd5b003cca1420d6c707e01daffa8
|
d250e2e16656d18dcb0e87c2fdb5f6e77ed93edc
|
refs/heads/master
| 2023-07-19T22:39:16.074450
| 2021-09-01T06:41:31
| 2021-09-01T06:41:31
| 265,183,843
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,521
|
r
|
abc.R
|
#' PET ABC function
#'
#' This function peforms ABC inference for the compartimental models.
#' @param y vector of measured radioactivity concentrations in a voxel for each time-point. This can be in terms of observations or directly in terms of voxel time activity curve.
#' @param tspan vector of time points of observations.
#' @param N Number of simulations. Default to 10^5.
#' @param inputfunction A function describing the input function. Default inputfunction().
#' @param type The type of model in the input function.
#' @param model The type of kinetic model: "single" performs the analysis for the single-tisse
#' model, "two" performs the analysis for the two-tissue model. Default "single".
#' @param a1 Minimum of the uniform prior for K1. Default to 0 .
#' @param b1 Maximum of the uniform prior for K1. Default to 0.2 .
#' @param a2 Minimum of the uniform prior for K2. Default to 0.3 .
#' @param b2 Maximum of the uniform prior for K2. Default to 0.5 .
#' @param a3 Minimum of the uniform prior for K3. Default to 0 .
#' @param b3 Maximum of the uniform prior for K3. Default to 0.1 .
#' @param a4 Minimum of the uniform prior for K4. Default to 0 .
#' @param b4 Maximum of the uniform prior for K4. Default to 0.2 .
#' @param PLOT If plots have to be produced. Default at TRUE.
#' @return ABCout a matrix with values simulated from the posterior distribution of the parameters of the selected model.
#' @return Smat a matrix with values of summary statistics of the simulated curves.
#' @return ABCout_accepted a matrix with values simulated from the posterior distribution of the parameters
#' of the selected model; it shows only the values accepted according to the automatically selected threshold.
#' @return error vector of computed squared differences among the observed and the simulated summary statistics.
#' @return tol automatically selected tolerance level; this tolerance level is used to define the matrix ABCout_accepted.
#' @keywords PETabc
#' @export
PETabc <- function(y,tspan,N=100000, inputfunction.=inputfunction, type=2,model="single",
a1=0,b1=0.2,a2=0.3,b2=0.5,a3=0,b3=0.1,a4=0,b4=0.2, PLOT=T)
{
# y: vector of measured radioactivity concentrations in a voxel for each time-point. This can be in terms of observations or directly in terms of voxel time activity curve.
# t: vector of time points of observations
# N: number of simulations
# Tmax: maximum time range
# type: type for the singtissue and twotissue functions
# other parameters: bounds of the prior distributions
#
# observed summary statistics
Sobs=smooth.spline(tspan,y, cv=T)$y
names.Smat <- c()
for(j in 1:length(tspan)){
names.Smat[j] <- paste("y",j,sep="")
}
if(model=="single"){
# Storing matrices
parMat1=matrix(NA, ncol=2, nrow=N)
colnames(parMat1) <- c("K1","K2")
error1=matrix(NA, ncol=1, nrow=N)
Smat1 <- matrix(NA, nrow=N,ncol=length(tspan))
colnames(Smat1) <- names.Smat
# single-tissue function
singtissue= function(t, y, parms){
K1c=parms[1]
k2c=parms[2]
type=parms[3]
ipt=inputfunction.(t, type)
#differential equation describing the concentration in the reference tissue
#input function taken from...
dydt= K1c*ipt-k2c*y
return(list(c(dydt)))
} # end of single-tissue function
} else {
# Storing matrices
parMat2=matrix(NA, ncol=4, nrow=N)
colnames(parMat2) <- c("K1","K2","K3","K4")
error2=matrix(NA, ncol=1, nrow=N)
Smat2 <- matrix(NA, nrow=N,ncol=length(tspan))
colnames(Smat2) <- names.Smat
# two-tissue function
twotissue = function(t, y, parms) {
#differential equation describing the concentration in the target tissue
#based on a 2 tissue compartment model
K1=parms[1]
k2=parms[2]
k3=parms[3]
k4=parms[4]
type=parms[5]
ipt=inputfunction.(t, type)
dydt=c()
dydt[1]= ((K1*ipt)-(k2*y[1])-(k3*y[1])+(k4*y[2]))
dydt[2]=(k3*y[1]-k4*y[2])
return(list(c(dydt)))
} # end of two-tissue function
} # end of if
#Sobs= ksmooth(c(1:61), y2, bandwidth=2)$y
#Smat1=Smat2=matrix(NA, ncol=length(Sobs), nrow=N)
#time interval and sampling
### ABC simulations
for (i in 1:N) {
# printin every 10,000 simulations
if(round(i/10000)==i/10000){
cat("i", i, "\n")
} #end printing
# Simulation from the prior
K1=runif(1, a1, b1)
K2=runif(1, a2, b2)
if(model=="single"){
parms1=c(K1, K2, type)
out=ode(0, tspan, singtissue, parms1, method="ode45")
data1=t(out[,2])
#Smat1[i,]=as.vector(data1)
Smat1[i,]=smooth.spline(tspan, data1, cv=T)$y
error1[i,]=sum((y-Smat1[i,])^2)
parMat1[i,] <- c(K1,K2)
if(round(i/10000)==i/10000){
parMat_temp <- parMat1[1:i,]
error_temp <- error1[1:i,]
h1=quantile(error_temp, probs=0.05)
out1=parMat_temp[(error_temp<h1[1])==1,]
par(mfrow=c(1,2))
plot(density(out1[,1]),main="K1",
xlab="K1")
abline(v=mean(out1[,1]))
plot(density(out1[,2]),main="k2",
xlab="k2")
abline(v=mean(out1[,2]))
} #intermediate plot
} else { # end of single-tissue
K3=runif(1, a3, b3)
K4=runif(1, a4, b4)
parms2=c(K1, K2, K3, K4, type)
out=ode(c(0,0), tspan, twotissue, parms2, method="ode45")
data2=t(out[,2])+t(out[,3])
#Smat2=as.vector(data2)
Smat2[i,]=smooth.spline(tspan, data2, cv=T)$y
error2[i,]=sum((y-Smat2[i,])^2)
parMat2[i,] <- c(K1,K2,K3,K4)
if(round(i/10000)==i/10000){
parMat_temp <- parMat2[1:i,]
error_temp <- error2[1:i,]
h2=quantile(error_temp, probs=0.05)
out2=parMat_temp[(error_temp<h2[1])==1,]
par(mfrow=c(2,2),oma=c(3,3,0,0),mar=c(3,3,2,2))
plot(density(out2[,1]),main="K1",
xlab="K1")
abline(v=mean(out2[,1]))
plot(density(out2[,2]),main="k2",
xlab="k2")
abline(v=mean(out2[,2]))
plot(density(out2[,3]),main="k3",
xlab="K1")
abline(v=mean(out2[,3]))
plot(density(out2[,4]),main="k4",
xlab="K1")
abline(v=mean(out2[,4]))
} #intermediate plot
} # end of if (two-tissue)
# Ct0=0 #initial condition : cncentration=0 at t=0
#out=ode(0, tspan, singtissue, parms1, method="ode45")
# out=ode(0, tspan, singtissue2, parms1[1:2], method="ode45")
# write(c(K1, K2, K3, K4), file="parMat.out", ncol=4, append=T)
# write(t(Smat1), file="SMat1.out", ncol=length(time_vec), append=T)
# write(t(Smat2), file="SMat2.out", ncol=length(time_vec), append=T)
# write(t(smooth.spline(time_vec, Smat1, cv=T)$y), ncol=length(time_vec),
# file="Smat1S.out", append=T)
# write(t(smooth.spline(time_vec, Smat2, cv=T)$y), ncol=length(time_vec),
# file="Smat2S.out", append=T)
} # end of for loop
### Saving the output
if(model=="single"){
# Chosen threshold
h1=apply(error1, 2, quantile, probs=0.05)
# Select the values respecting the threshold
out1=parMat1[(error1[,1]<h1[1])==1,]
} else{ # end of single-tissue
# Chosen threshold
h2=apply(error2, 2, quantile, probs=0.05)
# Select the values respecting the threshold
out2=parMat2[(error2[,1]<h2[1])==1,]
} # end of two-tissue
### Posterior plots
if(PLOT==T){
if(model=="single"){
pdf("posteriors_singletissue.pdf")
par(mfrow=c(1,2))
plot(density(out1[,1]),main="K1",
xlab="K1")
abline(v=mean(out1[,1]))
plot(density(out1[,2]),main="k2",
xlab="k2")
abline(v=mean(out1[,2]))
dev.off()
} else { # end of plots for single-tissue
pdf("posteriors_twotissue.pdf")
par(mfrow=c(2,2))
plot(density(out2[,1]),main="K1",
xlab="K1")
abline(v=mean(out2[,1]))
plot(density(out2[,2]),main="k2",
xlab="k2")
abline(v=mean(out2[,2]))
plot(density(out2[,3]),main="k3",
xlab="K1")
abline(v=mean(out2[,3]))
plot(density(out2[,4]),main="k4",
xlab="K1")
abline(v=mean(out2[,4]))
dev.off()
} # end of plots for two-tissue
} # end of plots
### Output: files
if(model=="single"){
write(t(parMat1), file="parMat1.out", ncol=4)
write(t(Smat1), file="SMat1.out", ncol=60)
write(t(error1), file="error1.out", ncol=3)
return(list(ABCout=parMat1,Smat=Smat1,ABCout_accepted=out1,error=error1,tol=h1))
} else { # end of single-tissue
write(t(parMat2), file="parMat2.out", ncol=4)
write(t(Smat2), file="SMat2.out", ncol=60)
write(t(error2), file="error2.out", ncol=3)
return(list(ABCout=parMat2,Smat=Smat2,ABCout_accepted=out2,error=error2,tol=h2) )
} # end of two-tissue
}
#
#
#
# #simulate N=200000 parameters and store
# N=1000
# parMat1=matrix(NA, ncol=2, nrow=N)
# parMat2=matrix(NA, ncol=4, nrow=N)
# errorMat1=errorMat2=c()
# Sobs=smooth.spline(c(1:61), y1, cv=T)$y
# #Sobs= ksmooth(c(1:61), y2, bandwidth=2)$y
# #Smat1=Smat2=matrix(NA, ncol=length(Sobs), nrow=N)
#
# ntries=0
#
# for (i in c(1:N)) {
# cat("i", i, ".. ntries", ntries, "\n")
# error=999
# ntries=0
#
# while (error > 0.7) {
# ntries=ntries+1
# K1=runif(1, 0, 0.2)
# K2=runif(1, 0.3, 0.5)
# # K3=runif(1, 0, 0.1)
# # K4=runif(1, 0, 0.2)
#
#
# parms1=c(K1, K2, 2)
# #parms2=c(K1, K2, K3, K4, 2)
# tspan=c(0:60) #time interval and sampling
# # Ct0=0 #initial condition : cncentration=0 at t=0
#
# out=ode(0, tspan, singtissue, parms1, method="ode45")
# data1=t(out[,2])
# Smat1[i,]=smooth.spline(c(1:61), data1, cv=T)$y
# error=sum(abs(Smat1[i,]-Sobs))
# errorMat1[i]=error
# parMat1[i,]=c(K1, K2)
# }
#
# # out=ode(c(0,0), tspan, twotissue, parms2, method="ode45")
# # data2=t(out[,2])+t(out[,3])
# #Smat2=smooth.spline(c(1:61), data2, cv=T)$y
# # Smat2=data2
#
# Ind=errorMat1< thresh #0.685
# par(mfrow=c(2,2))
# hist(parMat1[Ind==1,1])
# # abline(v=0.0918, lwd=3, col=2)
# # abline(v=mean(parMat1[Ind==1,1]), lwd=3, col=3)
# hist(parMat1[Ind==1,2])
# # abline(v=0.4484, lwd=3, col=2)
# # abline(v=mean(parMat1[Ind==1,2]), lwd=3, col=3)
# plot( parMat1[Ind==1,1], errorMat1[Ind==1])
# plot( parMat1[Ind==1,2], errorMat1[Ind==1])
#
#
# library(abc)
# out=abc(Sobs, parMat1, Smat1, tol=1, method="loclinear")
# adj=out$adj.values
# unadj=out$unadj.values
#
# return(ABCres=out)
# }
|
5b2aea71d6570bf8de6bec3a83cc218a2bd9c51b
|
0a543f431a1b832ebe8a679441e665bf53e02894
|
/plot1.R
|
2d24f5b6dd2c790bca660698ea54e6c54a3d3390
|
[] |
no_license
|
Mohamed-Ahmed-Salem/ExData_Plotting1
|
6ebbc87346128d0bf113cf2d1226f7752cc73d96
|
6085b377439f670d306ed6803650d5e83dfbddb9
|
refs/heads/master
| 2021-01-15T12:25:36.317728
| 2016-04-04T23:00:26
| 2016-04-04T23:00:26
| 55,448,583
| 0
| 0
| null | 2016-04-04T22:02:09
| 2016-04-04T22:02:08
| null |
UTF-8
|
R
| false
| false
| 551
|
r
|
plot1.R
|
input <- read.table("household_power_consumption.txt", header = F, sep=";", skip = 66637, nrows = 2880);
colnames(input) <- c("Date","Time","Global_active_power",
"Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3");
DTime <- with(input, as.POSIXct(paste(input[,1], input[,2]), format="%m/%d/%Y %H:%M:%S"));DTime
hist(input$Global_active_power,main="Global active power", xlab="Global_active_power (Kilowatts)", border="blue", col="red",las=1, );
dev.copy(png,'myplot.png')
dev.off()
|
ba6cb202f5664ef104ed1eadd2e0cfd5374c2876
|
7427b37f3e4a3d711c2c4bf05b8f4b038f6e7954
|
/helpers/model-validation.R
|
bf873b10eef87104d255961e1f37bfa3eed74dae
|
[] |
no_license
|
aravindhebbali/app_blorr
|
fad0891ea2fe779b8a9dc5e48d07c0e081467148
|
70fabd4113aba05b5fc533d7b10cdbccbed68966
|
refs/heads/master
| 2021-05-11T04:46:23.818849
| 2018-01-19T18:20:30
| 2018-01-19T18:20:30
| 117,946,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,859
|
r
|
model-validation.R
|
source('helpers/utils.R')
source('helpers/output.R')
blr_confusion_matrix <- function(model, cutoff = 0.5, data = NULL) UseMethod('blr_confusion_matrix')
blr_confusion_matrix.default <- function(model, cutoff = 0.5, data = NULL) {
if (is.null(data)) {
data <- model %>%
use_series(data)
}
resp <- model %>%
formula %>%
extract2(2) %>%
as.character
conf_matrix <- data %>%
mutate(
prob = predict.glm(object = model, type = 'response'),
predicted = if_else(prob > cutoff, 1, 0)
) %>%
select(resp, predicted) %>%
table
a <- conf_matrix[4]
b <- conf_matrix[3]
c <- conf_matrix[2]
d <- conf_matrix[1]
abcd <- sum(a, b, c ,d)
accuracy <- (a + d) / abcd
precision <- a / (a + b)
recall <- a / (a + c)
sensitivity <- a / (a + c)
specificity <- d / (d + b)
prevalence <- (a + c) / abcd
detection_rate <- a / abcd
detection_prevalence <- (a + b) / abcd
balanced_accuracy <- (sensitivity + specificity) / 2
ppv <- (sensitivity * prevalence) / ((sensitivity * prevalence) +
((1 - specificity) * (1 - prevalence)))
npv <- (specificity * (1 - prevalence)) / (((1 - sensitivity) * prevalence) +
(specificity * (1 - prevalence)))
result <- list(
confusion_matrix = conf_matrix,
accuracy = accuracy,
precision = precision,
sensitivity = sensitivity,
specificity = specificity,
recall = recall,
prevalence = prevalence,
detection_rate = detection_rate,
detection_prevalence = detection_prevalence,
balanced_accuracy = balanced_accuracy,
pos_pred_value = ppv,
neg_pred_value = npv
)
class(result) <- 'blr_confusion_matrix'
return(result)
}
print.blr_confusion_matrix <- function(x, ...) {
print_blr_confusion_matrix(x)
}
|
ebfc9aae8afe150ce5155ece64099a7e137954e6
|
bed336fc87b09834348f6c3de364953c7558d8bb
|
/man/generateWig.Rd
|
e2cf91a55f1b1342f7ca9bcb55ff680642573275
|
[] |
no_license
|
keleslab/mosaics
|
7bc6b95376d8b8a78427194ef3181be67020de40
|
786f5db1438015aaa6bac6c423e7b3655e5df946
|
refs/heads/master
| 2021-07-14T18:00:09.109482
| 2020-02-27T00:37:56
| 2020-02-27T00:37:56
| 38,325,192
| 1
| 0
| null | 2016-05-02T17:50:57
| 2015-06-30T18:11:55
|
R
|
UTF-8
|
R
| false
| false
| 5,924
|
rd
|
generateWig.Rd
|
\name{generateWig}
\alias{generateWig}
\title{
Construct wiggle files from an aligned ChIP-sep read file
}
\description{
Construct wiggle files from an aligned ChIP-sep read file.
}
\usage{
generateWig( infile=NULL, fileFormat=NULL, outfileLoc="./",
byChr=FALSE, useChrfile=FALSE, chrfile=NULL, excludeChr=NULL,
PET=FALSE, fragLen=200, span=200, capping=0, normConst=1, perl = "perl" )
}
\arguments{
\item{infile}{
Name of the aligned read file to be processed.
}
\item{fileFormat}{
Format of the aligned read file to be processed.
Currently, \code{generateWig} permits the following aligned read file formats
for SET data (\code{PET = FALSE}):
\code{"eland_result"} (Eland result), \code{"eland_extended"} (Eland extended),
\code{"eland_export"} (Eland export), \code{"bowtie"} (default Bowtie),
\code{"sam"} (SAM), \code{"bam"} (BAM), \code{"bed"} (BED), and \code{"csem"} (CSEM).
For PET data (\code{PET = TRUE}), the following aligned read file formats are allowed:
\code{"eland_result"} (Eland result), \code{"sam"} (SAM), and \code{"bam"} (BAM).
}
\item{outfileLoc}{
Directory of processed wiggle files.
By default, processed wiggle files are exported to the current directory.
}
\item{byChr}{
Construct separate wiggle file for each chromosome?
Possible values are \code{TRUE} or \code{FALSE}.
If \code{byChr=FALSE}, all chromosomes are exported to one file.
If \code{byChr=TRUE}, each chromosome is exported to a separate file.
Default is \code{FALSE}.
}
\item{useChrfile}{
Is the file for chromosome info provided?
Possible values are \code{TRUE} or \code{FALSE}.
If \code{useChrfile=FALSE}, it is assumed that the file for chromosome info is not provided.
If \code{useChrfile=TRUE}, it is assumed that the file for chromosome info is provided.
Default is \code{FALSE}.
}
\item{chrfile}{
Name of the file for chromosome info.
In this file, the first and second columns are ID and size of each chromosome, respectively.
}
\item{excludeChr}{
Vector of chromosomes that will be excluded from the analysis.
This argument is ignored if \code{useChrfile=TRUE}.
}
\item{PET}{
Is the file paired-end tag (PET) data?
If \code{PET=FALSE}, it is assumed that the file is SET data.
If \code{PET=TRUE}, it is assumed that the file is PET data.
Default is \code{FALSE} (SET data).
}
\item{fragLen}{
Average fragment length. Default is 200.
This argument is ignored if \code{PET=TRUE}.
}
\item{span}{
Span used in wiggle files. Default is 200.
}
\item{capping}{
Maximum number of reads allowed to start at each nucleotide position.
To avoid potential PCR amplification artifacts, the maximum number of reads
that can start at a nucleotide position is capped at \code{capping}.
Capping is not applied if non-positive value is used for \code{capping}.
Default is 0 (no capping).
}
\item{normConst}{
Normalizing constant to scale values in each position.
}
\item{perl}{
Name of the perl executable to be called. Default is \code{"perl"}.
}
}
\details{
Wiggle files are constructed from the aligned read file and
exported to the directory specified in \code{outfileLoc} argument.
If \code{byChr=FALSE}, wiggle files are named
as \code{[infileName]_fragL[fragLen]_span[span].wig} for SET data (\code{PET = FALSE})
and \code{[infileName]_span[span].wig} for PET data (\code{PET = TRUE}).
If \code{byChr=TRUE}, wiggle files are named
as \code{[infileName]_fragL[fragLen]_span[span]_[chrID].wig} for SET data (\code{PET = FALSE})
and \code{[infileName]_span[span]_[chrID].wig} for PET data (\code{PET = TRUE}),
where \code{chrID} is chromosome IDs that reads align to.
These chromosome IDs are extracted from the aligned read file.
If the file for chromosome information is provided (\code{useChrfile=TRUE} and \code{chrfile} is not NULL),
only the chromosomes specified in the file will be considered.
Chromosomes that are specified in \code{excludeChr}
will not be included in the processed wiggle files.
\code{excludeChr} argument is ignored if \code{useChrfile=TRUE}.
\code{generateWig} currently supports the following aligned read file formats
for SET data (\code{PET = FALSE}):
Eland result (\code{"eland_result"}), Eland extended (\code{"eland_extended"}),
Eland export (\code{"eland_export"}), default Bowtie (\code{"bowtie"}),
SAM (\code{"sam"}), , \code{"bam"} (BAM), BED (\code{"bed"}), and CSEM (\code{"csem"}).
For PET data (\code{PET = TRUE}), the following aligned read file formats are allowed:
\code{"eland_result"} (Eland result), \code{"sam"} (SAM), and \code{"bam"} (BAM).
If input file format is neither BED nor CSEM BED,
this method retains only reads mapping uniquely to the reference genome.
}
\value{
Processed wig files are exported to the directory specified in \code{outfileLoc}.
}
\references{
Kuan, PF, D Chung, JA Thomson, R Stewart, and S Keles (2011),
"A Statistical Framework for the Analysis of ChIP-Seq Data",
\emph{Journal of the American Statistical Association}, Vol. 106, pp. 891-903.
Chung, D, Zhang Q, and Keles S (2014), "MOSAiCS-HMM: A model-based approach for detecting regions of histone modifications from ChIP-seq data", Datta S and Nettleton D (eds.), \emph{Statistical Analysis of Next Generation Sequencing Data}, Springer.
}
\author{ Dongjun Chung, Pei Fen Kuan, Rene Welch, Sunduz Keles }
\examples{
\dontrun{
library(mosaicsExample)
generateWig( infile=system.file( file.path("extdata","wgEncodeSydhTfbsGm12878Stat1StdAlnRep1_chr22_sorted.bam"), package="mosaicsExample"),
fileFormat="bam", outfileLoc="~/",
PET=FALSE, fragLen=200, span=200, capping=0, normConst=1 )
}
}
\keyword{models}
\keyword{methods}
|
84aaf4f8dc4d37775e94e17eb3618c021183295f
|
d1b93689a39a3dea6b42c20400b165382e8bbde1
|
/man/earth_event.Rd
|
fc0ff4d3b5a4b8acd8ee8ddcf5d046bdaa93e1ee
|
[] |
no_license
|
cran/nasadata
|
dbd25dcaf574dbf071bcbd6c3b412e79a6a7669f
|
ce407a9ced0fc476269677372c25e5309693bfa3
|
refs/heads/master
| 2020-12-22T01:33:28.332752
| 2016-05-07T00:41:17
| 2016-05-07T00:41:17
| 236,630,885
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,298
|
rd
|
earth_event.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eonet.R
\name{earth_event}
\alias{earth_event}
\title{Calls EONET webservice}
\usage{
earth_event(status = "all", sources = "all", category_id = "all",
limit = 10, days = 20, LimitType = "limit", TrySimplify = TRUE)
}
\arguments{
\item{status}{Accepts 1 or 0 (open or closed). Defaults to "all", which includes both.}
\item{sources}{Accepts character id strings from EONET sources (see \code{eonet_sources})}
\item{category_id}{Accepts number id strings from EONET category tree (se \code{eonet_categories})}
\item{limit}{Limit of events to download. If LimitType = "days" this is not considered. Defaults to 10.}
\item{days}{Limit of days (less than today) to download events from. If LimitType = "limit" this is not considered. Defaults to 20.}
\item{LimitType}{Type of limit to consider: "limit" (count of events), "days" (days less than today) or "all" (both limits).}
\item{TrySimplify}{If TRUE tries to coerce category and event data.frames into one (successful if there is one category per event).}
}
\description{
Calls NASA's Earth Observatory Natural Event Tracker (EONET) webservice and returns a data.frame with individual event or events.
}
\examples{
\dontrun{
event <- earth_event(limit = 1)
}
}
|
d90168b926e0755d4183c6bd1d266ff7f5559fb4
|
ac7f56abb15d52b75e21c65e9f914eceede4a55d
|
/Death rates US Hospitals/rankall.R
|
a435b037d562c775f24e8daa3f9bf7ea35fc86c3
|
[] |
no_license
|
LauFernanda/Rprogramming
|
db3d35c1bc191f9fa92fb8630282960fd8c390ba
|
cb6a566b025a43c0aef94b5ed7eda5e689ae0db1
|
refs/heads/master
| 2021-01-10T16:54:29.780711
| 2016-02-10T04:33:52
| 2016-02-10T04:33:52
| 51,420,203
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,460
|
r
|
rankall.R
|
rankall <- function(outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv",na.strings = "Not Available",stringsAsFactors=FALSE) ## Read outcome data
## Check that outcome are valid
valid_outcomes <- c("heart attack", "heart failure", "pneumonia")
if (!outcome %in% valid_outcomes) {
stop("invalid outcome")
}
## For each state, find the hospital of the given rank
else {
if(outcome== "heart attack"){
index <-11
}
else if(outcome== "heart failure"){
index <- 17
}
else if(outcome== "pneumonia"){
index <- 23
}
reduced_data<- subset(data, select= c(7,2,index))
colnames(reduced_data)<- c("state","hospital","outcome")
data_ranking<- reduced_data[order(reduced_data$state,reduced_data$outcome,reduced_data$hospital,na.last=NA),]
ranking_state<- split(data_ranking, data_ranking$state)
## Return a data frame with the hospital names and the
## (abbreviated) state name
requested<- lapply(ranking_state,function(x,num){
## gives value to best and worst
nobs <- nrow(x)
if (num== "best"){
num <- 1
}
else if (num== "worst"){
num <- nobs
}
else {
num<- num
}
requestedname<-x$hospital[num]
}
,num)
hospital<- unlist(requested)
state<- names(requested)
requesteddf <- data.frame(State=state,Hospital=hospital)
requesteddf
}
}
|
5afbd56003ee87ad1f60b393da48ddc8eece5dce
|
07e1b168bce1233a26b9d2ce8eeaa55516bee6b0
|
/commons-math-legacy/src/test/R/LevyDistributionTestCases.R
|
caf9a8c560466328f81806d79a77c6249ce5dab6
|
[
"BSD-3-Clause",
"Minpack",
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
apache/commons-math
|
1016687acb2c14c9df8060603c006e8ad78df2b8
|
9e02432e06501f94690e172f3856450cd3a55a20
|
refs/heads/master
| 2023-08-15T01:32:18.953065
| 2023-08-05T16:14:12
| 2023-08-05T16:14:12
| 24,928,494
| 592
| 517
|
Apache-2.0
| 2023-07-03T09:08:43
| 2014-10-08T07:00:06
|
Java
|
UTF-8
|
R
| false
| false
| 3,749
|
r
|
LevyDistributionTestCases.R
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#------------------------------------------------------------------------------
# R source file to validate Lévy distribution tests in
# org.apache.commons.math4.distribution.LevyDistributionTest
#
# To run the test, install R, put this file and testFunctions
# into the same directory, launch R from this directory and then enter
# source("<name-of-this-file>")
#
# R functions used
# dlevy(q, m=0, s=1, log=FALSE)
# plevy(q, m=0, s=1)
# qlevy(p, m=0, s=1)
#-----------------------------------------------------------------------------
tol <- 1E-9
# Function definitions
source("testFunctions") # utility test functions
# function to verify distribution computations
verifyDistribution <- function(points, expected, m, s, tol) {
rDistValues <- rep(0, length(points))
i <- 0
for (point in points) {
i <- i + 1
rDistValues[i] <- plevy(point, m, s)
}
output <- c("Distribution test m = ",m,", s = ", s)
if (assertEquals(expected, rDistValues, tol, "Distribution Values")) {
displayPadded(output, SUCCEEDED, WIDTH)
} else {
displayPadded(output, FAILED, WIDTH)
}
}
# function to verify density computations
verifyDensity <- function(points, expected, m, s, tol) {
rDensityValues <- rep(0, length(points))
i <- 0
for (point in points) {
i <- i + 1
rDensityValues[i] <- dlevy(point, m, s, log=FALSE)
}
output <- c("Density test m = ",m,", s = ", s)
if (assertEquals(expected, rDensityValues, tol, "Density Values")) {
displayPadded(output, SUCCEEDED, WIDTH)
} else {
displayPadded(output, FAILED, WIDTH)
}
}
#--------------------------------------------------------------------------
cat("Levy test cases\n")
m <- 1.2
s <- 0.4
distributionPoints <- c(1.2001, 1.21, 1.225, 1.25, 1.3, 1.9, 3.4, 5.6)
densityValues <- c(0.0, 5.200563737654472E-7, 0.021412836122382383, 0.4133397070818418, 1.0798193302637613, 0.3237493191610873, 0.07060325500936372, 0.026122839883975738)
distributionValues <- c(0.0, 2.539628589470901E-10, 6.334248366624259E-5, 0.004677734981047284, 0.04550026389635843, 0.4496917979688907, 0.6698153575994166, 0.763024600552995)
verifyDistribution(distributionPoints, distributionValues, m, s, tol)
verifyDensity(distributionPoints, densityValues, m, s, tol)
m <- 5
s <- 1.3
distributionPoints <- c(5.0001, 6, 7, 8, 9, 10, 11, 12, 13, 14)
densityValues <- c(0.0, 0.23745992633364185, 0.1161959636020616, 0.07048597672583455, 0.04833023442399538, 0.03572468867742048, 0.02777194506550441, 0.022382435270909086, 0.018533623436073274, 0.0156730047506865)
distributionValues <- c(0.0, 0.25421322360396437, 0.42011267955064, 0.5103578488686935, 0.5686182086635944, 0.6101201547975077, 0.6415915735304425, 0.6665077778509312, 0.6868651803414656, 0.7039020091632311)
verifyDistribution(distributionPoints, distributionValues, m, s, tol)
verifyDensity(distributionPoints, densityValues, m, s, tol)
displayDashes(WIDTH)
|
e6bebe9f0e2af07cdd416479f1fa7f359142e304
|
da01e34920d50ba85bf33e448db2dde9bcb5a936
|
/man/symbols.Rd
|
4ac1ef84e55fa5527e9652885c21776728ef0d2d
|
[] |
no_license
|
Marlin-Na/symbolicR.old
|
16e2cda36faa3fa54f93f330a45395a8201a4dec
|
194959103f05debaf73b9d44b62a86d75a8a5bd7
|
refs/heads/master
| 2021-10-09T06:30:11.768174
| 2017-05-27T08:17:38
| 2017-05-27T08:17:38
| 84,909,650
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 951
|
rd
|
symbols.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sympy_import.R
\name{symbols}
\alias{symbols}
\alias{sym}
\alias{syms_init}
\title{Initiate Symbols}
\usage{
sym(name, ...)
syms_init(..., .envir = parent.frame(), .quite = FALSE)
}
\arguments{
\item{...}{For \code{sym}, pass the assumptions of the symbol.
For \code{syms_init}, arguments with a name will be treated as assumptions,
while arguments without a name are substituted as symbol names
to initiate. See example.}
\item{.envir}{The environment to initiate the symbols.}
\item{.quite}{Logical value to indicate whether to show messages when initiating the symbols}
}
\value{
\code{sym} returns a sympy symbol.
\code{syms_init} invisibly returns a list of the initiated symbols.
}
\description{
Symbols can be initiated with \code{sym} or \code{syms_init}
}
\examples{
x <- sym("x", real = TRUE)
syms_init(y, z, positive = TRUE, .quite = TRUE)
x
y
class(x)
}
|
2a1a33efcbf929d4612b708000636c5d807113a3
|
cc29bde617d1c0409f7c5be0da27144fcce2a842
|
/plot1.R
|
486bceebd75b4b502494c33dced32bc50b3ebbd6
|
[] |
no_license
|
caldwellsa/ExData_Plotting1
|
d2bc50abe59faf638e34e8387e834290079308c1
|
49f8bf4cb1460a4dd3af891050daedba5b23aa6e
|
refs/heads/master
| 2020-12-26T02:40:41.445214
| 2014-05-10T07:33:56
| 2014-05-10T07:33:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 567
|
r
|
plot1.R
|
## plot1.R
## For Coursera Exploratory Data Analysis, Course project 1
#
#
# read data from local file
data<-read.table("household_power_consumption.txt",header=TRUE,
sep=";",na.strings="?")
#reduce dataset to only include the days we care about
data<-data[data$Date=="1/2/2007"|data$Date=="2/2/2007",]
#open PNG device
png(filename="plot1.png",width=480,height=480,units="px")
#plot data
hist(data$Global_active_power,
xlab="Global Active Power (kilowatts)",
main="Global Active Power",
col="red")
#close graphics device
dev.off()
|
a33789f0f9c0987f102ed31ea963824b405681cf
|
22711eda5cee031748950a32f320e7e7607e5219
|
/R/seuratToAnnDataCombined.R
|
255afad0919c335723fc0528b8fc7440b9c6c227
|
[] |
no_license
|
cnk113/analysis-scripts
|
c479bc8b643983607d75c71662cb9a8058e2dbff
|
56d33b14d803956b19491bd52b504b06eb237101
|
refs/heads/main
| 2023-09-01T01:45:06.698178
| 2023-08-18T20:36:35
| 2023-08-18T20:36:35
| 483,434,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,261
|
r
|
seuratToAnnDataCombined.R
|
seuratToH5AD <- function(seuratObj, h5ad_output, pca=FALSE, umap=FALSE){
#Sys.setenv(RETICULATE_PYTHON = "/home/chang/miniconda3/envs/venv/bin/python")
library(reticulate)
#use_condaenv('venv')
library(Matrix)
writeMM(t(seuratObj@assays$RNA@counts), file='combined.mtx')
writeMM(t(seuratObj@assays$spliced@counts), file='spliced.mtx')
writeMM(t(seuratObj@assays$unspliced@counts), file='unspliced.mtx')
write.csv(rownames(seuratObj@assays$spliced@counts), file = "genes.csv", row.names = FALSE)
if(umap == TRUE){
write.csv(seuratObj@reductions$umap@cell.embeddings, file = "umap.csv", row.names = FALSE)
}
if(pca == TRUE){
write.csv(seuratObj@reductions$pca@cell.embeddings, file = "pca.csv", row.names = FALSE)
}
write.csv(colnames(seuratObj@assays$spliced@counts), file = "cells.csv", row.names = FALSE)
write.csv(seuratObj@meta.data, file = "meta.csv", row.names = FALSE)
source_python('~/build_combined.py')
build(h5ad_output, pca = pca, umap = umap)
file.remove('combined.mtx')
file.remove('spliced.mtx')
file.remove('unspliced.mtx')
file.remove('genes.csv')
file.remove('cells.csv')
if(umap == TRUE){
file.remove('umap.csv')
}
if(pca == TRUE){
file.remove('pca.csv')
}
file.remove('meta.csv')
}
|
e7407a6ae0ddb476b0c4c84b2fc375873df98f81
|
c7fe5c58b7e8970f79b48c3b2c2e255ece23bd7c
|
/br/conf/logic/br4a.R
|
3acb2d83986180326ccd98a00c92db2b866ca6cf
|
[] |
no_license
|
eepgwde/vojdamago
|
87a12bddda41653d811e0cfcb1e99763344b3aa7
|
c813b95b0d14086b1e6bf63504084adfe6350efd
|
refs/heads/main
| 2023-07-13T10:53:50.867674
| 2021-08-17T14:40:57
| 2021-08-17T14:40:57
| 391,428,442
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,763
|
r
|
br4a.R
|
## weaves
##
## NZV to remove
## Can be called either at data-prep or before correlation
## NZV in br0
## Inspect the original source data.
## ppl0 is the source data
## br0 has nzv from earlier stages.
## Mark the ppl1 in the field rm0 to be removed.
x0 <- aggregate(ppl0[, "surftype"], list(surftype=ppl0$surftype), FUN=length)
x0 <- as.character(x0[ order(-x0$x),][1,"surftype"])
## Mark ones to remove.
x.rremove <- rownames(ppl0)[ppl0$surftype != x0]
## Flag for removal.
## These are well-known, but some I keep in (isoneway)
x.cremove <- c("lanes2", "isshared", "surftype", "isdual")
x.cremove <- append(x.cremove, br0[["nzv.names"]]$cwy1)
x.cremove <- append(x.cremove, "a0Xisshared")
x.cremove <- append(x.cremove, c("smplwtf90", "ssmplwtf90", "ssmplriskf90") )
## Logic on dataset
if (br0[["tgt0"]] == "xlocalr") {
## local roads are all unclassified
x.cremove <- append(x.cremove, "iscls")
}
if (br0[["tgt0"]] == "xxwintryr") {
## local roads are all unclassified
x.cremove <- append(x.cremove, "ssn")
}
## Logic in data input
src0 <- br0[["source"]]
src1 <- as.integer(gsub("[a-z\\\\/\\\\.]+", "", br0[["source"]]))
## Whole sample set
if (src1 == 1) {
## no need to remove anything
;
}
## Long roads
if (src1 == 2) {
## do remove things.
;
}
## Short roads
if (src1 == 3) {
## do remove things.
x.cremove <- append(x.cremove, "distance");
}
## rural is 4, urban is 5
## isurban becomes an NZV in either case
if (src1 == 4 || src1 == 5) {
## do remove things.
x.cremove <- append(x.cremove, "isurban");
}
## Remove prescient variables
source("br4a0.R")
## The forward and backward works are NZV, but I'll leave them in.
## The PoI are also NZV, but I'll leave them in.
|
6145c9bfa29ce9a60707dfc1d02dcb8d11202a9b
|
ccffe9f9b732285a8d23c4806cb3e439852aca05
|
/install.R
|
6e2aa587665646504923a42b6b3c164795a549e8
|
[] |
no_license
|
mattcingram/517r
|
6f51b9fa1b099c0e65eddc5e9c02597a59161f8d
|
c164d414120f119ff533b987da0cc2ac7d8e5188
|
refs/heads/main
| 2023-03-08T10:01:34.731455
| 2021-02-25T22:55:57
| 2021-02-25T22:55:57
| 342,399,073
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 55
|
r
|
install.R
|
install.packages(c("pacman", "stargazer", "coefplot"))
|
ac90779a8d3ef1b5e2d189095eedac62a462eb58
|
a88362eb644f4d3052a7b6ba60c6cc5dc1dc0b2d
|
/week1_practice.R
|
7071da8c8a3efb39a119255018bd0c24f910a862
|
[] |
no_license
|
wouwou4444/FM-stat_with_r
|
eb9ded1d217b5ee4abde5e26ea8a294f636cba06
|
691ece8c52b47cbe7d2f41bf2c1b18b35260ff79
|
refs/heads/master
| 2020-04-04T13:51:50.517051
| 2018-11-05T05:13:22
| 2018-11-05T05:13:22
| 155,977,927
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,168
|
r
|
week1_practice.R
|
#### statistic with R on fun
setwd(dir = "../../fun-mooc/statitic with R/")
install.packages("prettyR")
install.packages("binom")
library(tidyverse)
library(prettyR)
library(binom)
library(prettyR)
smp <- read.csv2("./smp1.csv")
smp$ed.b <- smp$ed > 2
smp$ed.b <- ifelse(smp$ed > 2, 1, 0)
table(smp$ed.b, useNA = "always")
smp %>%
group_by(ed.b) %>%
summarize(age_sd = sd(age,na.rm = TRUE))
subject <- rep(1:5,2)
group <- rep(c("b", "w2"), each= 5)
measure <- c(140, 138, 150, 148, 135, 132, 135, 151, 146, 130)
df <- data.frame("subject" = subject, "group"= group, "measure" = measure)
df
g1 <-df %>%
filter(group == "b") %>%
select(measure)
g2 <-df %>%
filter(group == "w2") %>%
select(measure)
difference <- g2 - g1
mn <- mean(difference$measure)
s <- sd(difference$measure)
n <- 6
t.test(difference$measure)
pt(.975,df = 16, lower.tail = FALSE)
5*0.0625
power.t.test(n = 100, delta = .01, sd = .04,
alternative = "one.sided", type = "one.sample",
sig.level = .05)
power.t.test(power = .9, delta = .01, type = "one.sample",
sd = .04, alternative = "one.sided",
sig.level = .05)
|
9a686d34619bfdcf23245e4edcaf9a2e729b7c52
|
a538c8f66bbdc90b1f88290577c6cbd374562bf8
|
/tests/testthat/test-08-predict.R
|
e83a2ff36f9fe744ceb56170f643d8785625c9d8
|
[] |
no_license
|
cran/bartCause
|
4cb6a278b928ddd1f390c064268e8daa5e05417c
|
e48a8a4006016001a6461004fb2c26160720b859
|
refs/heads/master
| 2023-01-25T02:16:24.855165
| 2023-01-23T18:40:08
| 2023-01-23T18:40:08
| 252,709,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,094
|
r
|
test-08-predict.R
|
context("predict")
source(system.file("common", "linearData.R", package = "bartCause"), local = TRUE)
n.train <- 80L
x <- testData$x[seq_len(n.train),]
y <- testData$y[seq_len(n.train)]
z <- testData$z[seq_len(n.train)]
x.new <- testData$x[seq.int(n.train + 1L, nrow(testData$x)),]
n.test <- nrow(x.new)
test_that("predict gives sane results", {
n.samples <- 7L
n.chains <- 2L
fit <- bartc(y, z, x, method.trt = "glm", method.rsp = "bart",
n.chains = n.chains, n.threads = 1L, n.burn = 0L, n.samples = n.samples, n.trees = 13L,
keepTrees = TRUE,
verbose = FALSE)
# check predict for single row
expect_equal(length(predict(fit, x.new[1,], type = "mu.0")), n.samples * n.chains)
p.score <- predict(fit, x.new, type = "p.score")
mu.1 <- predict(fit, x.new, type = "mu.1", combineChains = FALSE)
mu.0 <- predict(fit, x.new, type = "mu.0", combineChains = TRUE)
icate <- predict(fit, x.new, type = "icate", combineChains = TRUE)
expect_true(is.null(dim(p.score)))
expect_equal(dim(mu.1), c(n.chains, n.samples, n.test))
expect_equal(dim(mu.0), c(n.chains * n.samples, n.test))
expect_equal(as.vector(icate), as.vector(matrix(aperm(mu.1, c(2L, 1L, 3L)), n.samples * n.chains)) - as.vector(mu.0))
})
test_that("predict results matches training data", {
n.samples <- 7L
n.chains <- 2L
fit <- bartc(y, z, x, method.trt = "bart", method.rsp = "bart",
n.chains = n.chains, n.threads = 1L, n.burn = 0L, n.samples = n.samples, n.trees = 13L,
keepTrees = TRUE,
args.trt = list(k = 1.5), verbose = FALSE)
p.score <- extract(fit, type = "p.score")
mu.1 <- extract(fit, type = "mu.1")
mu.0 <- extract(fit, type = "mu.0")
icate <- extract(fit, type = "icate")
mu <- extract(fit, type = "mu.obs")
p.score.new <- predict(fit, x, type = "p.score")
mu.1.new <- predict(fit, x, type = "mu.1")
mu.0.new <- predict(fit, x, type = "mu.0")
icate.new <- predict(fit, x, type = "icate")
mu.new <- predict(fit, cbind(x, z), type = "mu")
expect_equal(p.score, p.score.new)
expect_equal(mu.0, mu.0.new)
expect_equal(mu.1, mu.1.new)
expect_equal(icate, icate.new)
expect_equal(mu, mu.new)
})
set.seed(22)
g <- sample(3L, nrow(x), replace = TRUE)
n.samples <- 7L
n.chains <- 2L
test_that("predict works with grouped data, glm trt model", {
fit <- bartc(y, z, x, method.trt = "glm", method.rsp = "bart", group.by = g,
n.chains = n.chains, n.threads = 1L, n.burn = 0L, n.samples = n.samples, n.trees = 13L,
keepTrees = TRUE, use.ranef = FALSE,
args.trt = list(k = 1.5), verbose = FALSE)
p.score <- fitted(fit, type = "p.score")
mu.1 <- extract(fit, type = "mu.1")
mu.0 <- extract(fit, type = "mu.0")
icate <- extract(fit, type = "icate")
p.score.new <- predict(fit, x, group.by = g, type = "p.score")
mu.1.new <- predict(fit, x, group.by = g, type = "mu.1")
mu.0.new <- predict(fit, x, group.by = g, type = "mu.0")
icate.new <- predict(fit, x, group.by = g, type = "icate")
expect_equal(p.score, p.score.new)
expect_equal(mu.0, mu.0.new)
expect_equal(mu.1, mu.1.new)
expect_equal(icate, icate.new)
})
test_that("predict works with grouped data, glmer trt model", {
skip_if_not_installed("lme4")
suppressWarnings(
fit <- bartc(y, z, x, method.trt = "glm", method.rsp = "bart", group.by = g,
n.chains = n.chains, n.threads = 1L, n.burn = 0L, n.samples = n.samples, n.trees = 13L,
keepTrees = TRUE, use.ranef = FALSE,
args.trt = list(k = 1.5), verbose = FALSE)
)
p.score <- fitted(fit, type = "p.score")
mu.1 <- extract(fit, type = "mu.1")
mu.0 <- extract(fit, type = "mu.0")
icate <- extract(fit, type = "icate")
p.score.new <- predict(fit, x, group.by = g, type = "p.score")
mu.1.new <- predict(fit, x, group.by = g, type = "mu.1")
mu.0.new <- predict(fit, x, group.by = g, type = "mu.0")
icate.new <- predict(fit, x, group.by = g, type = "icate")
expect_equal(p.score, p.score.new)
expect_equal(mu.0, mu.0.new)
expect_equal(mu.1, mu.1.new)
expect_equal(icate, icate.new)
})
test_that("predict works with grouped data, bart trt model", {
fit <- bartc(y, z, x, method.trt = "bart", method.rsp = "bart", group.by = g,
n.chains = n.chains, n.threads = 1L, n.burn = 0L, n.samples = n.samples, n.trees = 13L,
keepTrees = TRUE,
args.trt = list(k = 1.5), verbose = FALSE)
p.score <- extract(fit, type = "p.score")
mu.1 <- extract(fit, type = "mu.1")
mu.0 <- extract(fit, type = "mu.0")
icate <- extract(fit, type = "icate")
p.score.new <- predict(fit, x, group.by = g, type = "p.score")
mu.1.new <- predict(fit, x, group.by = g, type = "mu.1")
mu.0.new <- predict(fit, x, group.by = g, type = "mu.0")
icate.new <- predict(fit, x, group.by = g, type = "icate")
expect_equal(p.score, p.score.new)
expect_equal(mu.0, mu.0.new)
expect_equal(mu.1, mu.1.new)
expect_equal(icate, icate.new)
fit <- bartc(y, z, x, method.trt = "bart", method.rsp = "bart", group.by = g,
n.chains = n.chains, n.threads = 1L, n.burn = 0L, n.samples = n.samples, n.trees = 13L,
keepTrees = TRUE, use.ranef = FALSE,
args.trt = list(k = 1.5), verbose = FALSE)
p.score <- extract(fit, type = "p.score")
mu.1 <- extract(fit, type = "mu.1")
mu.0 <- extract(fit, type = "mu.0")
icate <- extract(fit, type = "icate")
p.score.new <- predict(fit, x, group.by = g, type = "p.score")
mu.1.new <- predict(fit, x, group.by = g, type = "mu.1")
mu.0.new <- predict(fit, x, group.by = g, type = "mu.0")
icate.new <- predict(fit, x, group.by = g, type = "icate")
expect_equal(p.score, p.score.new)
expect_equal(mu.0, mu.0.new)
expect_equal(mu.1, mu.1.new)
expect_equal(icate, icate.new)
})
rm(testData, n.train, x, y, z, g, n.samples, n.chains, x.new, n.test)
|
98702d9424df1bb2e9a8d58a86d94dd9ef358b46
|
a2486374afa095172e596aed81b16ea9788aa4ea
|
/machine-learning-ex7/ex7R/runKMeans.R
|
3a9829693f902af7289aba1247c45826cdbfbee1
|
[] |
no_license
|
jerryhsieh/Stanfor-Machine-Learning
|
41bd0dbd95f5f2484d3b9967de451a38d909332a
|
f35658e5465596ff810536e1e822ae7c76b458b7
|
refs/heads/master
| 2021-01-10T09:06:18.649084
| 2017-02-27T01:42:19
| 2017-02-27T01:42:19
| 36,797,928
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,994
|
r
|
runKMeans.R
|
runkMeans <- function (X, initial_centroids, max_iters, plot_progress=FALSE) {
#RUNKMEANS runs the K-Means algorithm on data matrix X, where each row of X
#is a single example
# [centroids, idx] = RUNKMEANS(X, initial_centroids, max_iters, ...
# plot_progress) runs the K-Means algorithm on data matrix X, where each
# row of X is a single example. It uses initial_centroids used as the
# initial centroids. max_iters specifies the total number of interactions
# of K-Means to execute. plot_progress is a true/false flag that
# indicates if the function should also plot its progress as the
# learning happens. This is set to false by default. runkMeans returns
# centroids, a Kxn matrix of the computed centroids and idx, a m x 1
# vector of centroid assignments (i.e. each entry in range [1..K])
#
# Set default value for plot progress
# plot_progress = FALSE as default
# Plot the data if we are plotting progress
#if (plot_progress){
# figure
# hold on
#}
source("plotProgressKMeans.R")
source("findClosetCentroids.R")
source("computeCentroids.R")
# Initialize values
m = nrow(X)
n = ncol(X)
K = nrow(initial_centroids)
centroids = initial_centroids
previous_centroids = centroids
idx = matrix(0, m, 1)
# Run K-Means
for (i in 1:max_iters) {
# Output progress
message(sprintf('K-Means iteration %d/%d...\n', i, max_iters))
#if exist('OCTAVE_VERSION')
# fflush(stdout);
#end
# For each example in X, assign it to the closest centroid
idx = findClosestCentroids(X, centroids)
# Optionally, plot progress here
if (plot_progress) {
plotProgresskMeans(X, centroids, previous_centroids, idx, K, i)
previous_centroids = centroids
readline(prompt='Press enter to continue.\n')
}
# Given the memberships, compute new centroids
centroids = computeCentroids(X, idx, K)
}
CandI = list(centroids=centroids, idx=idx)
# Hold off if we are plotting progress
#if plot_progress
#hold off;
#end
}
|
2976115804747c9b8bd9a656367d530da6b5f863
|
c11472bbe3d9370c0172ed8c7b696c78304eb8ce
|
/R-libraries/OysterMan/R/Maturity.R
|
0973c9bf9d35ba29aa1e62af07e9ae48d8dd0487
|
[] |
no_license
|
jyqalan/OysterMan
|
144b06cf7c2ab056c7022557409ff74a8328abbc
|
2b6bcd8286df46805c877d559dc29f8f039e240f
|
refs/heads/master
| 2021-01-18T22:47:59.390051
| 2016-03-27T07:22:03
| 2016-03-27T07:22:03
| 54,817,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,669
|
r
|
Maturity.R
|
if(!isGeneric("get_mature_proportions")) setGeneric("get_mature_proportions", function(object, ...)standardGeneric("get_mature_proportions"))
setGeneric("Maturity", function(object, ...)standardGeneric("Maturity"))
setClass("Maturity",representation(ogive="Ogive",mature_proportions="vector"))
setMethod("initialize", signature(.Object="Maturity"),
function(.Object,ogive,n_classes, class_mins, plus_group, plus_group_size){
low = 1
high = n_classes
class_sizes = vector("numeric",length=n_classes)
for (i in low:high){
class_sizes[i] = class_mins[i] + class_mins[i+1] * 0.5;
}
if (plus_group){
if (length(class_mins) != (high-low+1)) stop("class_mins is the wrong size")
class_sizes[high] = plus_group_size
} else {
if (length(class_mins) != (high-low+2)) fatal("class_mins is the wrong size");
class_sizes[high] = (class_mins[high] + class_mins[high+1]) * 0.5;
}
if(ogive@low != 1) stop("ogive@low != 1")
if(ogive@high != n_classes) stop("ogive@high != n_classe")
.Object@ogive=ogive
if(ogive@by_class){
.Object@mature_proportions=get_value(ogive,ogive@low:ogive@high)
} else {
.Object@mature_proportions=get_value(ogive,class_sizes)
}
return (.Object)
}
)
setMethod("Maturity", signature(object="missing"),
function(object,ogive,n_classes, class_mins, plus_group, plus_group_size) {
return(new("Maturity",ogive=ogive,n_class=n_classes,class_mins=class_mins, plus_group=plus_group, plus_group_size=plus_group_size))
}
)
setMethod("get_mature_proportions", signature(object="Maturity"),
function(object){
return (object@mature_proportions)
}
)
|
5750fe7ae859fb044465ddfafe85798f610ab634
|
dd9acf1e77c99ee3d3b3176d41c84d634cc66d7a
|
/CapstoneMovieLens.R
|
fdf3c29084f5bf3e4d91a3bb1650f5b4c6cd124e
|
[] |
no_license
|
jaygopalak/Movie-Recommendation
|
91c278d28290f93d0c17b4ddbda9c87b24f2b339
|
8909b3fe2bbf5c145776fa3a3157981bc971c0cf
|
refs/heads/main
| 2023-08-07T14:07:45.652023
| 2021-09-16T05:22:57
| 2021-09-16T05:22:57
| 407,031,012
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,697
|
r
|
CapstoneMovieLens.R
|
################################
# Capstone Movie Lens Code #
################################
# Create edx set, validation set
# Note: this process could take a couple of minutes
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
#Adding other libraries for visualizations
install.packages("ggthemes", repos = "http://cran.us.r-project.org")
library(ggthemes)
library(ggplot2)
#Data Inspection
head(edx)
summary(edx)
# Defining a RMSE function
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings-predicted_ratings)^2,na.rm=T))
}
# Modify the year as a column in the edx & validation datasets
edx <- edx %>% mutate(year = as.numeric(str_sub(title,-5,-2)))
validation <- validation %>% mutate(year = as.numeric(str_sub(title,-5,-2)))
# Splitting edx further to run data cleaning quicker
edx1 <- edx[1:2000000,]
edx2 <- edx[2000001:4000000,]
edx3 <- edx[4000001:6000000,]
edx4 <- edx[6000001:8000000,]
edx5 <- edx[8000001:9000055,]
# Splitting the genres in the datasets (note: this process will take a while)
splitedx1 <- edx1 %>% separate_rows(genres, sep = "\\|")
splitedx2 <- edx2 %>% separate_rows(genres, sep = "\\|")
splitedx3 <- edx3 %>% separate_rows(genres, sep = "\\|")
splitedx4 <- edx4 %>% separate_rows(genres, sep = "\\|")
splitedx5 <- edx5 %>% separate_rows(genres, sep = "\\|")
splitvalid <- validation %>% separate_rows(genres, sep = "\\|")
#Combining the parsed edx datasets
splitedx <- rbind(splitedx1, splitedx2, splitedx3, splitedx4, splitedx5)
#Removing the previously created datasets after combining
rm(edx1, edx2, edx3, edx4, edx5, splitedx1, splitedx2, splitedx3, splitedx4, splitedx5)
#Number of unique movies and users
edx %>% summarize(no_of_users = n_distinct(userId), no_of_movies = n_distinct(movieId))
# Number of Movie ratings by genre
genre_rating <- splitedx%>%
group_by(genres) %>%
summarize(count = n()) %>%
arrange(desc(count))
print(genre_rating)
####Visualization of Variables#####
#Ratings Distribution
edx %>% group_by(rating) %>% summarize(n=n())
#Visualization of Ratings Distribution
edx %>% group_by(rating) %>%
summarize(count=n()) %>%
ggplot(aes(x=rating, y=count)) +
geom_line() +
geom_point() +
scale_y_log10() +
ggtitle("Ratings Distribution") +
xlab("Rating") +
ylab("Count") +
theme_economist()
#Visualization of Movies Distribution based on ratings
edx %>% group_by(movieId) %>%
summarise(n=n()) %>%
ggplot(aes(n)) +
geom_histogram(color = "white") +
scale_x_log10() +
ggtitle("Distribution of Movies") +
xlab("Number of Ratings") +
ylab("Number of Movies") +
theme_economist()
# Visualization of User's rating distribution (right skewed)
edx %>% count(userId) %>%
ggplot(aes(n)) +
geom_histogram(color = "white") +
scale_x_log10() +
ggtitle("Users") +
xlab("Number of Ratings") +
ylab("Number of Users") +
theme_economist()
# The distribution of mean ratings based on year of release
edx %>% group_by(year) %>%
summarize(rating = mean(rating)) %>%
ggplot(aes(year, rating)) +
geom_point() +
geom_smooth() +
xlab("Year") +
ylab("Rating") +
theme_economist()
####Data Analysis####
# Initiate RMSE results as a data frame to compare results from different models
rmse_results <- data_frame()
# The training dataset's mean rating
mu <- mean(edx$rating)
# model accounting for the movie effect b_i (rating minus the mean for each rating the movie received)
movieavg_norm <- edx %>%
group_by(movieId) %>%
summarize(b_i = mean(rating - mu))
movieplot<- movieavg_norm %>% qplot(b_i, geom = "histogram", bins = 30, data = .,color = I("black"))
movieplot
# model taking into account the user effect b_u
useravg_norm <- edx %>%
left_join(movieavg_norm, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = mean(rating - mu - b_i))
userplot <- useravg_norm %>% qplot(b_u, geom ="histogram", bins = 30, data = ., color = I("black"))
userplot
#### Model Validation #####
## Naive Model: mean only
naive_rmse <- RMSE(validation$rating,mu)
## Test results based on simple prediction
naive_rmse
## Check results
rmse_results <- data_frame(method = "Using mean only", RMSE = naive_rmse)
rmse_results %>% knitr::kable()
## Movie Effect Model ##
predicted_ratings_movie_norm <- validation %>%
left_join(movieavg_norm, by='movieId') %>%
mutate(pred = mu + b_i)
model_1_rmse <- RMSE(validation$rating,predicted_ratings_movie_norm$pred)
rmse_results <- bind_rows(rmse_results,
data_frame(method="Movie Effect Model",
RMSE = model_1_rmse ))
# saving results in a table
rmse_results %>% knitr::kable()
## Movie and User Effects Model ##
# Use test set,join movie averages & user averages
# Prediction equals the mean with user effect b_u & movie effect b_i
predicted_ratings_user_norm <- validation %>%
left_join(movieavg_norm, by='movieId') %>%
left_join(useravg_norm, by='userId') %>%
mutate(pred = mu + b_i + b_u)
# test and save rmse results
model_2_rmse <- RMSE(validation$rating,predicted_ratings_user_norm$pred)
rmse_results <- bind_rows(rmse_results,
data_frame(method="Movie and User Effect Model",
RMSE = model_2_rmse ))
rmse_results %>% knitr::kable()
## Regularized Movie and User Effects Model ##
# lambda is a tuning parameter (cross-validation is used to choose it)
lambdas <- seq(0, 10, 0.25)
# For each lambda,find b_i & b_u, followed by rating prediction & testing
# note:the below code could take a while
rmses <- sapply(lambdas, function(l){
mu <- mean(edx$rating)
b_i <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+l))
b_u <- edx %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+l))
predicted_ratings <- validation %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
return(RMSE(validation$rating,predicted_ratings))
})
# Plot rmses vs lambdas to select the optimal lambda
qplot(lambdas, rmses)
lambda <- lambdas[which.min(rmses)]
lambda
# Compute regularized estimates of b_i using lambda
movieavg_reg <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+lambda), n_i = n())
# Compute regularized estimates of b_u using lambda
useravg_reg <- edx %>%
left_join(movieavg_reg, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = sum(rating - mu - b_i)/(n()+lambda), n_u = n())
# Predict ratings
predicted_ratings_reg <- validation %>%
left_join(movieavg_reg, by='movieId') %>%
left_join(useravg_reg, by='userId') %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
# Testing and saving results
model_3_rmse <- RMSE(validation$rating,predicted_ratings_reg)
rmse_results <- bind_rows(rmse_results,
data_frame(method="Regularized Movie and User Effect Model",
RMSE = model_3_rmse ))
rmse_results %>% knitr::kable()
##Regularized with all Effects Model ##
# The approach utilized in the above model is implemented below with the added genres and year effects
# b_y and b_g represent the year & genre effects, respectively
lambdas <- seq(0, 20, 1)
# Note: the below code could take some time
rmses <- sapply(lambdas, function(l){
mu <- mean(edx$rating)
b_i <- splitedx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+l))
b_u <- splitedx %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu)/(n()+l))
b_y <- splitedx %>%
left_join(b_i, by='movieId') %>%
left_join(b_u, by='userId') %>%
group_by(year) %>%
summarize(b_y = sum(rating - mu - b_i - b_u)/(n()+lambda), n_y = n())
b_g <- splitedx %>%
left_join(b_i, by='movieId') %>%
left_join(b_u, by='userId') %>%
left_join(b_y, by = 'year') %>%
group_by(genres) %>%
summarize(b_g = sum(rating - mu - b_i - b_u - b_y)/(n()+lambda), n_g = n())
predicted_ratings <- splitvalid %>%
left_join(b_i, by='movieId') %>%
left_join(b_u, by='userId') %>%
left_join(b_y, by = 'year') %>%
left_join(b_g, by = 'genres') %>%
mutate(pred = mu + b_i + b_u + b_y + b_g) %>%
.$pred
return(RMSE(splitvalid$rating,predicted_ratings))
})
# Computing new predictions using the optimal lambda
# Testing and saving the results
qplot(lambdas, rmses)
lambda_2 <- lambdas[which.min(rmses)]
lambda_2
movie_reg_avgs_2 <- splitedx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu)/(n()+lambda_2), n_i = n())
user_reg_avgs_2 <- splitedx %>%
left_join(movie_reg_avgs_2, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = sum(rating - mu - b_i)/(n()+lambda_2), n_u = n())
year_reg_avgs <- splitedx %>%
left_join(movie_reg_avgs_2, by='movieId') %>%
left_join(user_reg_avgs_2, by='userId') %>%
group_by(year) %>%
summarize(b_y = sum(rating - mu - b_i - b_u)/(n()+lambda_2), n_y = n())
genre_reg_avgs <- splitedx %>%
left_join(movie_reg_avgs_2, by='movieId') %>%
left_join(user_reg_avgs_2, by='userId') %>%
left_join(year_reg_avgs, by = 'year') %>%
group_by(genres) %>%
summarize(b_g = sum(rating - mu - b_i - b_u - b_y)/(n()+lambda_2), n_g = n())
predicted_ratings <- splitvalid %>%
left_join(movie_reg_avgs_2, by='movieId') %>%
left_join(user_reg_avgs_2, by='userId') %>%
left_join(year_reg_avgs, by = 'year') %>%
left_join(genre_reg_avgs, by = 'genres') %>%
mutate(pred = mu + b_i + b_u + b_y + b_g) %>%
.$pred
model_4_rmse <- RMSE(splitvalid$rating,predicted_ratings)
rmse_results <- bind_rows(rmse_results,
data_frame(method="Regularized Movie, User, Year, and Genre Effect Model",
RMSE = model_4_rmse ))
rmse_results %>% knitr::kable()
## Results ##
# RMSE resutls overview
rmse_results %>% knitr::kable()
|
02356ca51a0a5ba633d6fe0f9c582f3389c6dc18
|
722e40a995b61553c751b54bb363e7ca869a7011
|
/K-means Clustering.R
|
40bcdc2e9f3e66331f50c220571b59147ad3c818
|
[] |
no_license
|
Ravirajadrangi/Wharton-People-Analytics
|
b90b11ab8a826ef32aafd920d563a2e66c4ad6c3
|
44f25da3befb8f0b1531fd1f9b53d79c37479b03
|
refs/heads/master
| 2021-01-02T22:54:15.921278
| 2017-03-15T20:54:04
| 2017-03-15T20:54:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,363
|
r
|
K-means Clustering.R
|
##K means
data<- read.csv("Whartonkmeans.csv",header= T)
data$X17accept.size.ratio<-NULL
data$X16accept.size.ratio<-NULL
colmostbold =c(1,6,8,12,13,14,16,18,19,20,21,23,24)
colsreduced =c(1,8,12,13,14,16,19,20,21,23,24)
colssparse =c(1,12,13,14,16,21)
data<- data[colssparse]
library(dplyr)
grpdata <- group_by(data,Unid)
str(data)
uniqdata<- unique(grpdata)
uniqdata$tier <- NULL
View(grpdata)
data<-uniqdata
#Remove Predictors with zero variance
data <- data[sapply(data, function(x)length(levels(factor(x,exclude = NA)))>1)]
data <- scale(data)
dim(data)
require(caret)
tier_dummy <- data
dmy <- dummyVars(~.,tier_dummy)
tier_dummy <- predict(dmy,newdata = tier_dummy)
tier_dummy <- data.frame(tier_dummy)
tier_dummy[is.na(tier_dummy)] <- 0
#tier_dummy<- scale(tier_dummy)
#mydata <- data[,c(8,9,10)]
wss <- (nrow(tier_dummy)-1)*sum(apply(tier_dummy,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(tier_dummy,
centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares",
main="Assessing the Optimal Number of Clusters with the Elbow Method",
pch=20, cex=2)
#mydata<- na.omit(data)
#mydata<- scale(mydata)
#mydata$Sizenum <- as.numeric(mydata$Sizenum)
fit <- kmeans(tier_dummy, 4,nstart = 20)
fit$cluster
resultssparse <- (fit$centers)
resultssparse <- data.frame(resultssparse)
View(resultsmostbold)
write.csv(resultssparse,file="Whartonkmsparse.csv",row.names = FALSE)
x<-table(fit$cluster)#,data$tier)
addmargins(x)
kmeanstier<- fit$cluster
results <- NULL
results$reduced <- fit$cluster
results$mostbold <- fit$cluster
results$sparse <-fit$cluster
results <- data.frame(results)
results$allbold <- NULL
data$newtier <- as.factor(kmeanstier)
write.csv(data,file="Whartonkmeans.csv",row.names = FALSE)
##merge results with whartonkmeans
whdata<- read.csv("Whartonkmeans.csv",header= T)
mdata <- merge(x=whdata,y=results,by)
##DBSCAN
library(dbscan)
EPS = 7
cluster.dbscan <- dbscan(tier_dummy, eps = EPS, minPts = 30, borderPoints = T,
search = "kdtree")
plot(cluster.dbscan$cluster,tier_dummy)
table(cluster.dbscan$cluster)
plot(lat ~ lng, data = data, col = cluster.dbscan$cluster + 1L, pch = 20)
summary(uniqdata$Accepted)
|
2d064327e8a1021e84421af4487da24ed1701ee6
|
493d13873d4f285e9c5927e3e1f235bf3af1104a
|
/nucleR_2.12.1_AH_edited_asy/man/readBowtie.Rd
|
20e6e104065c15110d90aa517139ddee4b55ad45
|
[
"MIT"
] |
permissive
|
elifesciences-publications/HTa_Histone_analog
|
d0d0cc2ea1c00ef7b5126a459688a8786d5cf53b
|
2e67014a2b0c85002a8268178410d616da7e6244
|
refs/heads/master
| 2020-09-15T05:40:39.307201
| 2019-11-22T08:39:23
| 2019-11-22T08:39:23
| 223,359,603
| 0
| 0
|
MIT
| 2019-11-22T08:37:05
| 2019-11-22T08:37:04
| null |
UTF-8
|
R
| false
| true
| 778
|
rd
|
readBowtie.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readBowtie.R
\name{readBowtie}
\alias{readBowtie}
\title{Import reads from a vector of Bowtie files}
\usage{
readBowtie(files, type = "paired")
}
\arguments{
\item{files}{List of input Bowtie files.}
\item{type}{Describes the type of reads. Values allowed are \code{single} for
single-ended reads and \code{paired} for pair-ended.}
}
\value{
\link[GenomicRanges:GRangesList]{GenomicRanges::GRangesList} containing the reads of each input BAM
file.
}
\description{
This function allows to load reads from Bowtie files from both single and
paired-end commming from Next Generation Sequencing nucleosome mapping
experiments.
}
\author{
Ricard Illa \email{ricard.illa@irbbarcelona.org}
}
\keyword{file}
|
97b4604cc0f8a703c7e334bc6d49bf4bc9ee2a7e
|
02068fe4358a53cccd8c3a38760e28ed639d95ce
|
/R/wbt-method.R
|
cc0bc9b4dcd56415d6002752b9353d7468a6ef37
|
[
"MIT"
] |
permissive
|
cran/whitebox
|
6583da01b83d11e1cae05642bae2c1d0977bdb0c
|
6cf940a69420a151a92c89adce1b4f38614b4e98
|
refs/heads/master
| 2023-06-22T08:58:10.218177
| 2023-06-07T14:00:02
| 2023-06-07T14:00:02
| 169,614,635
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,050
|
r
|
wbt-method.R
|
#' Run WhiteboxTools by Tool Name
#'
#' You are required to specify all required arguments as either paths to files, or R object types that can be associated with a file.
#'
#' Supports SpatRaster / RasterLayer input / output. Arguments are transformed from their source class and passed to WhiteboxTools executable as standard character string arguments involving file paths.
#'
#' To print help for any tool, see `wbt_tool_help()`
#'
#' @param result an S3 object of class `wbt_result` to use to supply input arguments, may be _missing_ such that first argument is `tool_name`
#' @param tool_name character. name of the tool to run. Or a tool/function name (i.e. a symbol) that is non-standard evaluated as a character.
#' @param ... arguments to tool
#' @param crs character Optional: a WKT Coordinate Reference System string, or other identifier such as EPSG code or PROJ string
#' @param verbose_mode passed to `wbt_run_tool()`
#' @param command_only Return command that would be run with `system()`? Default: `FALSE`
#' @details `tool_name` may be specified with or without quotes or `wbt_` prefix. e.g. `"wbt_slope"`, `wbt_slope`, `slope`, and `"slope"` are identical.
#'
#' @seealso [wbt_tool_help()]
#' @keywords General
#' @return a list with class `"wbt_result"` containing elements:
#' * `tool` - the tool name
#' * `args` - arguments passed to executable
#' * `stdout` - console output (result of `wbt_run_tool()`)
#' * `crs` - Coordinate Reference System string (WKT or PROJ)
#' * `result` - any 'result' parameters (`--output`) that can be converted to R objects after run. A list of RasterLayer or character. May be a `try-error` if arguments are specified incorrectly.
#' * `history` - history of 'result' when `wbt_result` was passed as input, most recent output at end
#'
#' @export
wbt <- function(result,
tool_name,
...,
crs = NULL,
verbose_mode = FALSE,
command_only = FALSE) {
if (missing(result) || is.null(result)) {
wbt.missing(
result = NULL,
tool_name = gsub("[^A-Za-z_]", "", wbt_internal_tool_name(deparse(
substitute(tool_name)
))),
...,
crs = crs,
verbose_mode = verbose_mode,
command_only = command_only
)
} else UseMethod("wbt")
}
# constructors for wbt_result and wbt_error_result
# TODO: think about result class structure
.wbt_result_class <- function(tool_name, args, stdout, crs, result) {
structure(
list(
tool = tool_name,
args = args,
stdout = stdout,
crs = crs,
result = result
), class = "wbt_result")
}
wbt_error_result <- function(tool_name, args, crs, message) {
errm <- try(stop(), silent = TRUE)
errm[1] <- message
.wbt_result_class(
tool_name = tool_name,
args = args,
stdout = NULL,
crs = crs,
result = errm
)
}
#' @export
print.wbt_result <- function(x, ...) {
cat(paste0('<wbt_result>\n'))
n <- length(x$history)
showargs <- trimws(strsplit(x$args, "--")[[1]])
showargs <- paste0(showargs[nchar(showargs) > 1], "'")
showargs <- paste0(gsub('=', "\t: '", showargs), collapse = '\n')
if (length(showargs) >= 1 && nchar(showargs) > 1) {
cat(paste0('--- PARAMETERS (', x$tool,') ---\n', showargs),"\n")
} else {
cat(paste0('No parameters (', x$tool,')\n'))
}
if (length(x$result) > 0) {
if (is.null(names(x$result))) {
if (inherits(x$result, 'try-error')) {
cat("\n",attr(x$result, "tool"), x$result[1], "\n")
}
} else {
if (length(x$result) > 0) {
cat("--- RESULT ---\n")
}
sapply(names(x$result), function(y) {
resy <- x$result[[y]]
classy <- class(resy)
cat(paste0("$", y,"\n"))
if (is.character(resy)) {
if (file.exists(resy)) {
cat(' File result: ', resy,
paste0('exists (Last modified: ', as.character(file.mtime(resy)), ')\n'))
} else {
# TODO: are all output arguments "character" reasonably to be construed as files?
cat(' ', shQuote(resy), ' [file path does not exist]\n')
}
} else {
# show() for RasterLayer, SpatRaster outputs
print(x$result[[y]])
}
})
}
} else {
cat(paste0(' NULL result'))
}
if (n > 1) {
cat(paste0("--- HISTORY ---\n"))
cat(paste0("Prior results (n=", n - 1, ") for:"),
paste0(sapply(x$history[1:(length(x$history) - 1)], function(y)
if (!is.null(y$tool))
return(y$tool)),
collapse = ", "), "\n -",
paste0(sapply(x$history[1:(length(x$history) - 1)], function(y)
if (!is.null(y$tool))
return(paste0(y$tool," (", paste0(names(y$result), collapse = ", "), "<",
paste0(sapply(y$result, class), collapse = ", "), ">)"))),
collapse = "\n - "))
}
cat("\n")
}
#' @export
#' @rdname wbt
wbt.wbt_result <- function(result, tool_name, ..., crs = NULL, verbose_mode = FALSE, command_only = FALSE) {
# process user input
userargs <- list(...)
# get tool parameters and clean tool name
prm <- .get_tool_params(tool_name)
tool_name <- unique(prm$tool_name)[1]
userargs <- .process_tool_params(tool_name = tool_name,
userargs = userargs,
result = result,
prm = prm)
yrg <- try(.process_user_args(userargs), silent = TRUE)
if (inherits(yrg, 'try-error')) {
message(yrg)
message("Failed to process user arguments, this should not happen; returning NULL")
return(NULL)
}
# handle CRS propagation
crs <- ifelse(is.null(result$crs), "", result$crs)
# TODO: carry over other arguments?
# add prior call in history
res <- .wbt(tool_name, yrg, prm, crs = crs, verbose_mode = verbose_mode, command_only = command_only)
if (inherits(res, 'wbt_result')) {
res$history <- c(result$history, list(res))
}
res
}
#' @description `wbt_result()`: return a combined list of results from either the history of a `wbt_result` (if present and `history=TRUE`), or the result of a `wbt_result`
#' @param result an object of class `wbt_result`
#' @param i Optional index of result list element to return as result. Default is whole list.
#' @param history Default: `TRUE` returns a list of all history results
#' @param attribute Default: `"output"`
#' @return list of result in `attribute` if `"history"` is present, otherwise the result in `attribute`. If `i` is specified, just the `i`th element of the list.
#' @export
#' @rdname wbt
wbt_result <- function(result, i = NULL, history = TRUE, attribute = "output") {
UseMethod("wbt_result")
}
#' @export
wbt_result.wbt_result <- function(result, i = NULL, history = TRUE, attribute = "output") {
# if there is $history present, by default return a list of all the results
if (!is.null(result[["history"]]) && history) {
res <- sapply(result[["history"]], function(x) x$result[[attribute]])
} else {
res <- result$result[[attribute]]
}
# get the last result as stored in $result
if (is.null(i)) {
return(res)
}
if (i < 0 || i > length(res)) {
stop(sprintf("result list index %s is out of bounds", i), call. = FALSE)
}
.subset2(res, i)
}
#' @export
as.data.frame.wbt_result <- function(x, ...) {
outputlist <- wbt_result(x)
cbind(as.data.frame(unclass(x)[c("tool", "args", "stdout", "crs")],
...)[rep(1, length(outputlist)),],
data.frame(output = I(outputlist)))
}
#' @export
#' @rdname wbt
wbt.character <- function(result, tool_name, ..., crs = NULL, verbose_mode = FALSE, command_only = FALSE) {
# process user input
userargs <- list(...)
if (!missing(tool_name) && !is.null(tool_name)) {
warning("wbt.character uses first argument (`result`) as tool_name, `tool_name` ignored", call. = FALSE)
}
tool_name <- gsub(" ", "", result, fixed = TRUE)
# get tool parameters and clean tool name
prm <- .get_tool_params(tool_name)
tool_name <- unique(prm$tool_name)[1]
userargs <- .process_tool_params(tool_name = tool_name,
userargs = userargs,
prm = prm)
# get input CRS; argument takes precedence
if (missing(crs) || is.null(crs) || crs == "") {
# is_input is derived from the -i flag which is only defined for --input
# we want to check multiple inputs too, these have a numeric suffix e.g --input2
ldx <- prm$is_input | grepl("^input[1-9]+", prm$argument_name)
crs <- .process_crs(userargs[names(userargs) %in% prm$argument_name[ldx]])
}
# process user input (convert complex object -> character arguments)
yrg <- try(.process_user_args(userargs), silent = TRUE)
if (inherits(yrg, 'try-error')) {
message(yrg)
message("Failed to process user arguments, this should not happen; returning NULL")
return(NULL)
}
res <- .wbt(tool_name, yrg, prm, crs = crs, verbose_mode = verbose_mode, command_only = command_only)
if (inherits(res, 'wbt_result')) {
res$history <- list(res)
}
res
}
# support for using exported function names directly as input
#' @export
#' @rdname wbt
wbt.function <- function(result, tool_name, ..., crs = NULL, verbose_mode = FALSE, command_only = FALSE ) {
tool_name <- deparse(substitute(result))
if (is.character(tool_name)) {
wbt.character(result = tool_name, tool_name = NULL, ..., crs = crs,
verbose_mode = verbose_mode, command_only = command_only)
}
}
# start a toolchain with a call where result is missing or tool_name specified as result
#' @export
#' @rdname wbt
wbt.missing <- function(result, tool_name, ..., crs = NULL, verbose_mode = FALSE, command_only = FALSE) {
if (is.character(tool_name)) {
wbt.character(tool_name, result, ..., crs = crs, verbose_mode = verbose_mode, command_only = command_only)
}
}
.process_crs <- function(inputargs) {
# support raster inputs in the following formats
pkgin <- sapply(inputargs, function(x) {
if (inherits(x, 'SpatRaster')) return("terra")
if (inherits(x, 'SpatVector')) return("terra")
if (inherits(x, 'sf')) return("sf")
if (inherits(x, 'sfc')) return("sf")
if (inherits(x, 'RasterLayer')) return("raster")
if (inherits(x, 'RasterStack')) return("raster")
if (inherits(x, 'RasterBrick')) return("raster")
""
})
# requireNamespace("terra") for terra and/or raster as needed
pkgreq <- sapply(unique(pkgin[nchar(pkgin) > 0]),
requireNamespace, quietly = TRUE)
if (any(!pkgreq)) {
stop("package ", pkgin[!pkgreq], " is required", call. = FALSE)
}
if (!all(pkgin == pkgin[1])) {
# if (pkgin[1] != "")
# message("NOTE: Input spatial object classes do not match.")
}
crsin <- lapply(seq_along(inputargs), function(i) {
x <- inputargs[[i]]
if (pkgin[i] == "terra") {
x2 <- try(as.character(terra::crs(x)), silent = FALSE)
} else if (pkgin[i] == "sf") {
x2 <- try(as.character(sf::st_crs(x)), silent = FALSE)
} else {
x2 <- try(if (inherits(x, 'RasterLayer')) raster::wkt(raster::crs(x)))
}
if (is.null(x2) || inherits(x2, 'try-error')) {
return("")
}
x2
})
if (length(crsin) > 0) {
crsmatch <- do.call('c', lapply(crsin, function(x) x == crsin[[1]]))
if (length(crsmatch) == 0 || !all(crsmatch) || any(is.na(crsmatch))) {
message("NOTE: Input CRS do not match.")
}
# take first input CRS
res <- crsin[[1]]
attr(res, 'package') <- pkgin[1]
} else {
res <- ""
attr(res, 'package') <- ""
}
# record package used to create the object for use in output
res
}
.process_user_args <- function(userargs) {
# handle rasters so if given a raster as input, the filename is used as input
# if a .tif file is returned as output, then the output is a RasterLayer and so on
yrg <- lapply(names(userargs), function(argname) {
x <- userargs[[argname]]
# sfc/sp support
if (inherits(x, 'sfc') || inherits(x, 'Spatial')) {
if (requireNamespace("sf")) {
x <- sf::st_as_sf(x)
}
}
# raster rasterlayer support
if (inherits(x, c('RasterLayer', 'RasterStack', 'RasterBrick'))) {
if (requireNamespace("raster")) {
res <- try(raster::filename(x))
attr(res, "package") <- "raster"
return(res)
}
# terra spatraster support
} else if (inherits(x, 'SpatRaster')) {
if (requireNamespace("terra")) {
res <- try(terra::sources(x)$source, silent = TRUE)
if (inherits(res, 'try-error')) {
x <- wbt_source(x)
res2 <- attr(x, 'wbt_dsn')
}
if (is.null(res2)) {
message(res[1])
} else res <- res2
attr(res, "package") <- "terra"
return(res)
}
# vector data support
} else if (inherits(x, c('SpatVector', 'SpatVectorProxy', 'sf'))) {
src <- attr(x, 'wbt_dsn')
if (is.null(src)) {
x <- wbt_source(x)
src <- attr(x, 'wbt_dsn')
}
if (!is.null(src) && file.exists(src)) {
if (inherits(x, c('SpatVector', 'SpatVectorProxy'))) {
attr(src, "package") <- "terra"
} else if (inherits(x, 'sf')) {
attr(src, "package") <- "sf"
}
return(src)
} else {
stop("load/initialize SpatVector/sf objects with wbt_source()", call. = FALSE)
}
} else if (inherits(x, 'try-error')) {
return(.warninput(paste0("NOTE: try-error result cannot be used as `", argname,"`")))
} else if (!class(x)[1] %in% c("numeric", "integer", "character", "logical")) {
return(.warninput(
paste0('argument type (',
paste0(class(x), collapse = ", "),
') is not supported at this time')
))
} else {
# allowed inputs "numeric", "integer", "character", "logical"
# shell quote all character (safer if paths contain parentheses)
# convert all numeric to numeric (handles scientific notation)
switch(class(x),
# "character" = shQuote(x),
"numeric" = as.numeric(x),
x)
}
})
names(yrg) <- names(userargs)
# handle try errors by treating them as if the arguments were not supplied
yrg <- lapply(yrg, function(y) {
if (inherits(y, 'try-error')) {
message(y[1])
return(NULL)
}
return(y)
})
yrg[!sapply(yrg, is.null)]
}
# used to put notes into try-errors about user input; things that normally might cause fatal errors
.warninput <- function(x) {
res <- try(stop(), silent = TRUE)
if (!is.null(x))
res[1] <- paste0(x, collapse = "\n")
res
}
.get_tool_params <- function(tool_name) {
try({
wbttoolparameters <- get('wbttoolparameters')
}, silent = TRUE)
# remove underscores and other possible prefixes
tool_name <- gsub("_", "", wbt_internal_tool_name(tool_name))
# return subset by tool_name
res <- wbttoolparameters[which(toupper(wbttoolparameters$tool_name) %in% toupper(tool_name)), ]
if (nrow(res) == 0) {
extra <- ''
if (trimws(tool_name) != '') {
tn <- unique(wbttoolparameters$tool_name)
guess <- grep(paste0(".*", tolower(tool_name), ".*"), tolower(tn), ignore.case = TRUE)
# guess <- pmatch(tolower(tool_name), tolower(tn), duplicates.ok = TRUE)
if (length(guess) > 0) {
extra <- paste0(", perhaps you meant one of: ", paste0(shQuote(tn[guess]), collapse = ", " ))
}
}
warning("Parameters for tool_name='", tool_name, "' not found", extra, call. = FALSE)
}
res
}
.process_tool_params <- function(tool_name,
userargs,
result = NULL,
prm = .get_tool_params(tool_name)) {
# take output from result to augment as first input if not specified
inputprm <- prm$argument_name[prm$is_input][1]
if (length(inputprm) && !inputprm %in% names(userargs)) {
#TODO: multi output? is this robust
newinput <- character(0)
if (!is.null(result)) {
newinput <- result$result
if (!inherits(newinput, 'try-error')) {
newinput <- newinput$output
}
} else if ("input" %in% names(userargs)) {
newinput <- userargs$input
}
if (length(newinput) > 0) {
# re-arrange and re-name user "input" arg if necessary
userargs <- c(list(input = newinput), userargs[names(userargs) != "input"])
names(userargs)[1] <- inputprm
}
}
userargs
}
.wbt_args <- function(tool_name, args, params = .get_tool_params(tool_name), crs = NULL) {
# match tool_name and params options for helpful errors
newpkg <- unique(do.call('c', lapply(args, attr, 'package')))[1]
# construct arg string --param1=value1 --param2=value2
newargs <- paste(paste0("--", names(args), "=", as.character(args)), collapse = " ")
reqparams <- sapply(params$argument_name[!params$optional], function(x) any(sapply(x, function(y) grepl(y, newargs))))
# at least one required param is missing or a bad parameter is specified
chkprm <- !names(args) %in% params$argument_name
if (sum(as.numeric(reqparams)) < length(params$argument_name[!params$optional]) || any(chkprm)) {
# if (wbt_verbose()) {
# user specified a bad parameter
invalid <- character()
if (any(chkprm)) {
message("Error: invalid parameter", ifelse(sum(chkprm) > 1, "s", ""),
" '", paste0(names(args)[chkprm], collapse = "', '"), "'\n")
invalid <- names(args)[chkprm]
}
# inform of required args
message("Required arguments:")
reqprm <- params$argument_name[!params$optional][!reqparams]
ismissing <- ifelse(!reqparams, " [ ] ", " [*] ")
for (i in seq_along(reqparams)) {
message("-", ismissing[i], params$argument_name[!params$optional][i], " -- ", params$description[!params$optional][i])
}
message("")
# inform of optional args
optparams <- !params$argument_name[params$optional] %in% names(args)
ismissing <- ifelse(optparams, " [ ] ", " [*] ")
if (any(optparams)) {
message("Optional arguments:")
for (i in seq_along(optparams)) {
message("-", ismissing[i], params$argument_name[params$optional][i], " -- ", params$description[params$optional][i])
}
}
# }
if (length(invalid) > 0) {
invalid <- paste(paste0(shQuote(invalid), collapse = ", "), "invalid")
}
if (length(reqprm) > 0) {
reqprm <- paste(paste0(shQuote(reqprm), collapse = ", "), "required")
}
errres <- .warninput(paste0("ERROR: ",
paste0(c(invalid, reqprm),
collapse = "; ")))
attr(errres, 'tool') <- tool_name
attr(errres, 'args') <- newargs
return(errres)
}
attr(newargs, 'package') <- newpkg
newargs
}
# processing of output files -> R objects
.wbt_process_output <- function(args, crs = NULL, pkg = NULL) {
if (is.null(pkg)) {
pkg <- ""
}
lapply(args[names(args) %in% "output"], function(x) {
# TODO: user defined file patterns and customizable methods
# https://jblindsay.github.io/wbt_book/supported_formats.html
# support for GeoTIFF
if (!is.na(x) & (endsWith(x, ".tif") | endsWith(x, ".tiff"))) {
wdp <- file.path(wbt_wd(), x)
# check working directory if set
if (file.exists(wdp)) {
x <- wdp
}
# default value is original value
y <- x
# support for the raster v.s. terra v.s. ? package via attribute passed with crs or prior result
israster <- attr(crs, 'package')
if (length(israster) == 0) israster <- pkg
if (israster %in% "raster") {
if (requireNamespace('raster')) {
y <- suppressWarnings(try(raster::raster(x)))
if (!inherits(y, 'try-error')) {
if (length(crs) == 0) {
crs <- ""
}
# propagate wbt_result CRS if the result has none
if (is.na(as.character(raster::crs(y))) && !is.na(crs)) {
raster::crs(y) <- ifelse(!is.character(crs), "", crs)
}
}
y
}
# for other cases, use terra
} else if (israster %in% "terra") {
if (requireNamespace('terra')) {
y <- suppressWarnings(try(terra::rast(x)))
if (!inherits(y, 'try-error')) {
terra::crs(y) <- ifelse(!is.character(crs), "", crs)
}
}
}
##
## TODO: LAS object from lidR package support?
#
# } else if (!is.na(x) & endsWith(x, ".las")) {
# if (requireNamespace('lidR')) {
# # TODO: support additional arguments to readLAS()?
# try(lidR::readLAS(x))
# }
##
return(y)
} else if (!is.na(x) && endsWith(x, ".shp")) {
if (requireNamespace('terra')) {
return(try(terra::vect(x, crs = ifelse(!is.character(crs), "", crs), proxy = TRUE)))
}
} else {
return(x)
}
})
}
.wbt <- function(tool_name,
args,
params = .get_tool_params(tool_name),
crs = NULL,
verbose_mode = FALSE,
command_only = FALSE) {
# process and check user arguments
newargs <- .wbt_args(tool_name = tool_name, args = args, params = params, crs = crs)
if (inherits(newargs, 'try-error')) {
return(invisible(.wbt_result_class(tool_name = tool_name,
args = attr(newargs, 'args'),
stdout = NULL,
crs = crs,
result = newargs)))
}
# pass through wbt_run_tool
console <- try(wbt_run_tool(tool_name = tool_name,
args = newargs,
verbose_mode = verbose_mode,
command_only = command_only
), silent = TRUE)
if (command_only) {
return(console[1])
}
if (inherits(console, 'try-error')) {
return(invisible(.wbt_result_class(tool_name = tool_name,
args = newargs,
stdout = console[1], # return the error message, and a try-error in result
crs = crs,
result = .warninput(c("ERROR: Tool execution failed", console[1]))
)))
}
# if tool runs without needing "output" specified
# assume it modifies the file specified as the first input
if (!"output" %in% names(args)) {
# TODO: does this need further generalization/use of tool parameter LUT?
args[["output"]] <- args[[grep("dem|input", names(args))[1]]]
}
return(invisible(
.wbt_result_class(
tool_name = tool_name,
args = newargs,
stdout = console,
crs = crs,
result = .wbt_process_output(args, crs, pkg = attr(newargs, 'package'))
)
))
}
|
c81f5204c6b443ff2471affb1028e54dec80a2f6
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/decido/inst/testfiles/earcut_cpp/libFuzzer_earcut_cpp/earcut_cpp_valgrind_files/1609874554-test.R
|
46755f4aeba80890bb6abdbdcb7506adf56ac7db
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 524
|
r
|
1609874554-test.R
|
testlist <- list(holes = integer(0), numholes = integer(0), x = c(-2.937446524423e-306, -2.93744652442304e-306, -2.937446524423e-306, -2.937446524423e-306, -2.937446524423e-306, -2.937446524423e-306, 1.62527834855936e-319, 0, 1.20953760085021e-312, 2.17292368994844e-311, 1.35111450752867e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(decido:::earcut_cpp,testlist)
str(result)
|
4239ea763dafd87f439b9c96cc5b6b6d6a9bdf25
|
2f88dfeee952434082f8de387be2516b5e777cc6
|
/R/getAmat.R
|
883d5f679925fd1841803b67c3b988d875234acd
|
[] |
no_license
|
cran/SUMMER
|
be071c667fe88db1a619e6694859bd2e1f8dad2a
|
11d92710f2873220b013847984f89d75ca199555
|
refs/heads/master
| 2022-08-10T19:29:29.147417
| 2022-07-08T07:50:06
| 2022-07-08T07:50:06
| 112,650,211
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 658
|
r
|
getAmat.R
|
#' Extract adjacency matrix from the map
#'
#' @param geo SpatialPolygonsDataFrame of the map
#' @param names character vector of region ids to be added to the neighbours list
#'
#' @return Spatial djacency matrix.
#' @author Zehang Richard Li
#' @examples
#' \dontrun{
#' data(DemoMap)
#' mat <- getAmat(geo = DemoMap$geo, names = DemoMap$geo$REGNAME)
#' mat
#' DemoMap$Amat
#' }
#' @export
getAmat <- function(geo, names){
nb.r <- spdep::poly2nb(geo, queen=F, row.names = names)
mat <- spdep::nb2mat(nb.r, style="B",zero.policy=TRUE)
regions <- colnames(mat) <- rownames(mat)
mat <- as.matrix(mat[1:dim(mat)[1], 1:dim(mat)[1]])
return(mat)
}
|
26bc2a1424c577aa9b1dc163a336302574d148d8
|
ef8d66ebaeaf27fa1aed1cf01ebd70ce8224c5cd
|
/man/mash_plot_effects.Rd
|
be7dde1009aa56e3dfa2905093c5a8a17a806f01
|
[] |
no_license
|
Alice-MacQueen/CDBNgenomics
|
dd6c8026156d91be7f12a9857d0ebeb89c32c384
|
6b00f48eb1c6eec848f11416d7a5fd752cd778bd
|
refs/heads/master
| 2021-07-08T06:15:56.774003
| 2020-08-12T19:28:32
| 2020-08-12T19:28:32
| 178,261,021
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,162
|
rd
|
mash_plot_effects.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/handle_mash_results.R
\name{mash_plot_effects}
\alias{mash_plot_effects}
\title{ggplot of single mash effect}
\usage{
mash_plot_effects(
m,
n = NA,
i = NA,
marker = TRUE,
saveoutput = FALSE,
suffix = ""
)
}
\arguments{
\item{m}{An object of type mash}
\item{n}{Optional. Integer or integer vector. The result number to plot, in
order of significance. 1 would be the top result, for example. Find
these with \code{\link{get_significant_results}}.}
\item{i}{Optional. Integer or integer vector. The result number to plot, in
the order of the mash object. 1 would be the first marker in the mash
object, for example. Find these with \code{\link{get_marker_df}}.}
\item{marker}{Optional. Print the marker name on the plot?}
\item{saveoutput}{Logical. Should the output be saved to the path?}
\item{suffix}{Character. Optional. A unique suffix used to save the files,
instead of the current date & time.}
}
\description{
Creates a plot with point estimates and standard errors for
effects of a single SNP in multiple conditions.
}
\note{
Specify only one of n or i.
}
|
2aaa9adb123502c34c239926277719553d0e923b
|
08b4eaf203fbbe87b09fdb2dc96b5d11fff2c171
|
/R/utils_validation.R
|
f9455f9d0c1f2362801b355b73953e9713a44eb6
|
[] |
no_license
|
cran/scDiffCom
|
a8f28d7f92acfba6b84e123707c437300a9adfd9
|
26fbcb29d53a04e49208cb38f3e515f4a59827aa
|
refs/heads/master
| 2023-07-09T07:30:59.085372
| 2021-08-17T06:20:05
| 2021-08-17T06:20:05
| 397,309,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,100
|
r
|
utils_validation.R
|
validate_parameters <- function(
params,
from_inputs
) {
params_names_base <- c(
"object_name",
"LRI_species",
"seurat_celltype_id",
"seurat_condition_id",
"seurat_assay",
"seurat_slot",
"log_scale",
"score_type",
"threshold_min_cells",
"threshold_pct",
"iterations",
"threshold_quantile_score",
"threshold_p_value_specificity",
"threshold_p_value_de",
"threshold_logfc",
"return_distributions",
"seed",
"verbose"
)
params_names_additional <- c(
"conditional_analysis",
"permutation_analysis",
"max_nL",
"max_nR"
)
params_names_all <- c(params_names_base, params_names_additional)
if (from_inputs) {
if (!identical(sort(names(params)), sort(params_names_base))) {
stop("Parameters do not match.")
}
params <- params[params_names_base]
} else {
if (!identical(sort(names(params)), sort(params_names_all))) {
stop("Parameters do not match.")
}
params <- params[params_names_all]
}
res <- NULL
if (!is.character(params$object_name) | length(params$object_name) != 1) {
res <- c(res, "'object_name' must be a character vector of length 1")
}
if (!(params$LRI_species %in% c("mouse", "human"))) {
res <- c(res, "'LRI_species' must be either 'mouse' or 'human'")
}
if (!is.character(params$seurat_celltype_id) |
length(params$seurat_celltype_id) != 1) {
res <- c(
res,
"'seurat_celltype_id' must be a character vector of length 1"
)
}
if (!is.null(params$seurat_condition_id)) {
if (
!is.list(params$seurat_condition_id) ||
length(params$seurat_condition_id) != 3 ||
!identical(
names(params$seurat_condition_id),
c("column_name", "cond1_name", "cond2_name")) ||
length(unlist(params$seurat_condition_id)) != 3
) {
res <- c(
res,
paste0(
"'seurat_condition_id' must be NULL or a length-3 list",
" with names 'column_name', 'cond1_name', 'cond2_name'"
)
)
} else if(
grepl(
"_",
params$seurat_condition_id$cond1_name
) |
grepl(
"_",
params$seurat_condition_id$cond2_name
)
) {
res <- c(
res,
"Underscores are not allowed in 'cond1_name' and 'cond2_name'"
)
}
}
if (!is.character(params$seurat_assay) | length(params$seurat_assay) != 1) {
res <- c(
res,
"'seurat_assay' must be NULL or a character vector of length 1"
)
}
if (!(params$seurat_slot %in% c("counts", "data"))) {
res <- c(res, "'seurat_slot' must be either 'data' or 'counts'")
}
if (!is.logical(params$log_scale) | length(params$log_scale) != 1) {
res <- c(res, "'log_scale' must be a logical vector of length 1")
}
if (!(params$score_type %in% c("geometric_mean", "arithmetic_mean"))) {
res <- c(
res,
"'score_type' must be either 'geometric_mean' or 'arithmetic_mean'"
)
}
if (
!is.numeric(params$threshold_min_cells) |
length(params$threshold_min_cells) > 1
) {
res <- c(
res,
"'threshold_min_cells' must be a numeric vector of length 1"
)
} else if (
params$threshold_min_cells < 0 |
params$threshold_min_cells %% 1 != 0
) {
res <- c(
res,
"'threshold_min_cells' must be a non-negative integer"
)
}
if (!is.numeric(params$threshold_pct) | length(params$threshold_pct) != 1) {
res <- c(res, "'threshold_pct' must be a numeric vector of length 1")
} else if(params$threshold_pct < 0 | params$threshold_pct >= 1) {
res <- c(res, "'threshold_pct' must be a numeric in [0,1[")
}
if (!is.numeric(params$iterations) | length(params$iterations) > 1) {
res <- c(res, "'iterations' must be a numeric vector of length 1")
} else if (params$iterations < 0 | params$iterations %% 1 != 0) {
res <- c(res, "'iterations' must be a positive integer or zero")
}
if (
!is.numeric(params$threshold_quantile_score) |
length(params$threshold_quantile_score) != 1
) {
res <- c(
res,
"'threshold_quantile_score' must be a numeric vector of length 1"
)
} else if (
params$threshold_quantile_score < 0 |
params$ threshold_quantile_score >= 1
) {
res <- c(
res,
"'threshold_quantile_score' must be a numeric in [0,1["
)
}
if (
!is.numeric(params$threshold_p_value_specificity) |
length(params$threshold_p_value_specificity) != 1
) {
res <- c(
res,
"'threshold_p_value_specificity' must be a numeric vector of length 1"
)
} else if (
params$threshold_p_value_specificity <= 0 |
params$threshold_p_value_specificity > 1
) {
res <- c(
res,
"'threshold_p_value_specificity' must be a numeric in ]0,1]"
)
}
if (
!is.numeric(params$threshold_p_value_de) |
length(params$threshold_p_value_de) != 1
) {
res <- c(
res,
"'threshold_p_value_de' must be a numeric vector of length 1"
)
} else if(
params$threshold_p_value_de <= 0 |
params$threshold_p_value_de > 1
) {
res <- c(res, "'threshold_p_value_de' must be a numeric in ]0,1]")
}
if (
!is.numeric(params$threshold_logfc) |
length(params$threshold_logfc) != 1
) {
res <- c(res, "'threshold_logfc' must be a numeric vector of length 1")
} else if(params$threshold_logfc <= 0) {
res <- c(res, "'threshold_logfc' must be a positive numeric")
}
if(
!is.logical(params$return_distributions) |
length(params$return_distributions) != 1
) {
res <- c(
res,
"'return_distributions' must be a logical vector of length 1"
)
}
if(from_inputs) {
if (!is.numeric(params$seed) | length(params$seed) > 1) {
res <- c(res, "'seed' must be a numeric vector of length 1")
} else if (params$seed < 0 | params$seed %% 1 != 0) {
res <- c(res, "'seed' must be a non-negative integer")
}
} else {
if (!is.numeric(params$seed)) {
res <- c(res, "'seed' must be a numeric vector")
}
}
if(!is.logical(params$verbose) | length(params$verbose) != 1) {
res <- c(res, "'verbose' must be a logical vector of length 1")
}
if(!from_inputs) {
if(
!is.logical(params$conditional_analysis) |
length(params$conditional_analysis) != 1
) {
res <- c(
res,
"'conditional_analysis' must be a logical vector of length 1"
)
}
if(
!is.logical(params$permutation_analysis) |
length(params$permutation_analysis) != 1
) {
res <- c(
res,
"'permutation_analysis' must be a logical vector of length 1"
)
}
if (!is.numeric(params$max_nL) | length(params$max_nL) != 1) {
res <- c(res, "'max_nL' must be a numeric vector of length 1")
}
if (!is.numeric(params$max_nR) | length(params$max_nR) != 1) {
res <- c(res, "'max_nR' must be a numeric vector of length 1")
}
}
list(
params = params,
check = res
)
}
validate_slot_parameters <- function(
parameters
) {
res <- validate_parameters(
params = parameters,
from_inputs = FALSE
)$check
if(is.null(res)){
NULL
} else {
paste0(
"@parameters is not formatted the correct way: ",
res
)
}
}
validate_slot_cci_table_raw <- function(
parameters,
cci_table_raw
) {
NULL
}
validate_slot_cci_table_detected <- function(
parameters,
cci_table_detected
) {
NULL
}
validate_slot_ora_table <- function(
parameters,
ora_table
) {
NULL
}
validate_slot_ora_stringent <- function(
parameters,
ora_table
) {
NULL
}
validate_slot_distributions <- function(
parameters,
distributions
) {
NULL
}
validate_slot_is_combined <- function(
) {
NULL
}
|
84e8acd827f5051ba1e5799e0f24319e06d79ec2
|
d122fd80d0beebfa9874e251aa07384db7025474
|
/R/util.R
|
2901ec301b1737aa9447e65b7a441f3d9749406a
|
[] |
no_license
|
fellstat/vivid
|
d567047ae44bcdd4323ffcc25588d82dbeaca984
|
a777e68e023a954e7afe0ef4a6c1d4f47e45a6c4
|
refs/heads/master
| 2020-04-09T20:43:27.034262
| 2019-05-22T06:42:07
| 2019-05-22T06:42:07
| 160,582,234
| 10
| 0
| null | 2019-05-14T16:09:12
| 2018-12-05T21:41:34
|
R
|
UTF-8
|
R
| false
| false
| 4,568
|
r
|
util.R
|
gen_uuid <- function(){
paste(sample(c(letters[1:6],0:9),30,replace=TRUE),collapse="")
}
vid_fun <- function(uuid){
if(missing(uuid))
uuid <- gen_uuid()
fun <- function(x) paste0(uuid,"-", x)
attr(fun, "uuid") <- uuid
fun
}
## Evaluates an expression in global environment and returns the result and uuid.
geval <- local(function(expr, uuid, substitute = FALSE, envir = .GlobalEnv, enclos = baseenv(), ...) {
if (substitute) expr <- substitute(expr)
val <- try(eval(expr, envir = envir, enclos = enclos))
list(val, uuid)
})
gevalQ <- local(function(expr, uuid, queue, substitute = FALSE, envir = .GlobalEnv, enclos = baseenv(), ...) {
if (substitute) expr <- substitute(expr)
val <- try(eval(expr, envir = envir, enclos = enclos))
ret <- list(val, uuid)
queue$producer$fire("callback_exec", ret)
ret
})
vivid_globals <- function(){
.globals
}
.get_objects <- function(filter=NULL, envir=.GlobalEnv) {
if(is.data.frame(envir))
objects <- names(envir)
else
objects <- ls(envir = envir)
cls <- list()
if (length(objects) > 0){
for (i in 1:length(objects)) {
#d <- get(objects[i], envir = envir)
d <- envir[[objects[i]]]
cls[[i]] <- class(d)
}
}
if(!is.null(filter)){
is_of_cls <- unlist(lapply(cls, function(x) any(x %in% filter)))
objects <- objects[is_of_cls]
cls <- cls[is_of_cls]
}
tibble::tibble(objects=objects, classes=cls)
}
#' @export
.get_data <- function(envir=.GlobalEnv) .get_objects("data.frame")
format_vars <- function(vars){
paste0(vars, collapse = ", ")
}
texasCi <- function(){
library(shinyTree)
library(ggplot2)
envirs=base::search()
Tree0s=NULL;Traa0s=NULL
l1stopened=TRUE
for (envir in envirs) {
pkgname=envir
if(substr(envir,1,8)=="package:"){pkgname=substr(envir,9,1000)}
if(substr(envir,1,6)=="tools:"){pkgname=substr(envir,7,1000)}
ccs=sapply(lapply(ls(as.environment(envir)), get), is.data.frame)
dds=ls(as.environment(envir))[(ccs==TRUE)]
Tree1s=NULL;Traa1s=NULL
l2stopened=TRUE
for (dd in dds) {
Tree2s=NULL;Traa2s=NULL
if(substr(envir,1,8)=="package:"){
TreeA=list()
TreeAt=list()
eval(parse(text=paste0("TreeA=(names(",pkgname,"::", dd,"))")))
eval(parse(text=paste0("TreeAt=(sapply(",pkgname,"::", dd,", class))")))
for (kk in 1:length(TreeA)){
Treea=TreeA[kk]
Treeat=TreeAt[kk]
if(isTRUE(nchar(Treea)>0) & isTRUE(nchar(Treeat)>0) )
eval(parse(text=paste0("Tree2s=c(Tree2s, '",Treea,"'=list(structure(\"",Treea,"\",sticon=' fa fa-tag fa-tag-",checkintype(Treeat),"',dt='",checkintype(Treeat),"')))"))) #,stopened=TRUE
}
} else if (substr(envir,1,6)=="tools:"){
} else if (envir==".GlobalEnv"){
TreeA=list()
TreeAt=list()
eval(parse(text=paste0("TreeA=(names(",".GlobalEnv","$", dd,"))")))
eval(parse(text=paste0("TreeAt=(sapply(",".GlobalEnv","$", dd,", class))")))
#TreeA=datasets()
for (kk in 1:length(TreeA)){
Treea=TreeA[kk]
Treeat=TreeAt[kk]
if(isTRUE(nchar(Treea)>0) & isTRUE(nchar(Treeat)>0) )
eval(parse(text=paste0("Tree2s=c(Tree2s, '",Treea,"'=list(structure(\"",Treea,"\",sticon=' fa fa-tag fa-tag-",checkintype(Treeat),"',dt='",checkintype(Treeat),"')))"))) #,stopened=TRUE
}
}
if(length(Tree2s)){
eval(parse(text=paste0("Tree1s=c(Tree1s,'",dd,"'=list(structure(Tree2s,sttype='df-node',sticon='tags',stopened=",toString(l2stopened),")))")))
eval(parse(text=paste0("Traa1s=c(Traa1s,'",dd,"'=list(structure('",dd,"',sticon='tags',stopened=",toString(TRUE),")))")))
l2stopened=FALSE
}
}
if(length(Tree1s)){
eval(parse(text=paste0("Tree0s=c(Tree0s,'",pkgname,"'=list(structure(Tree1s,sttype='pkg-node',sticon='fas fa-box',stopened=",toString(l1stopened),")))")))
eval(parse(text=paste0("Traa0s=c(Traa0s,'",pkgname,"'=list(structure(Traa1s,sttype='pkg-node',sticon='fas fa-box',stopened=",toString(TRUE),")))")))
l1stopened=FALSE
}
}
result=list()
result$Tree0s=Tree0s
result$Traa0s=Traa0s
result
}
checkintype <- function (intype){
if (toString(intype)=='integer'){
'integer'
}else if (toString(intype)=='numeric'){
'numeric'
}else if (toString(intype)=='character'){
'character'
}else if (toString(intype)=='Date'){
'Date'
}else if (toString(intype)=='ts'){
'ts'
}else if (toString(intype)=="c(\"ordered\", \"factor\")"){
'orderedfactor'
}else if (toString(intype)=='factor'){
'factor'
}else{
'unknown'
}
}
|
afa78d2bc5d3643e172ffaae5dd6df03dd0057f3
|
76434d63930c563cb9bab7d263df2c80da04cb6f
|
/man/ImportFSD.Rd
|
257616339967627b39174ebb5e47d1726968b522
|
[] |
no_license
|
cran/bda
|
45de77f9d513cbeea00fc34120308f1d37dd2fd0
|
b7cc310ed8ce18c2327f99647f024727e28e59dd
|
refs/heads/master
| 2023-06-22T14:56:20.682683
| 2023-06-18T21:40:09
| 2023-06-18T21:40:09
| 17,694,669
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,091
|
rd
|
ImportFSD.Rd
|
% This is a template R document file
% Distributed under GPL 3 or later
\name{ImportFSD}
\alias{ImportFSD}
\title{Import Firm Size and Firm Age Data}
\description{
To read firm size and/or firm age data from built-in datasets.
}
\usage{
ImportFSD(x,type,year)
}
\arguments{
\item{x}{A built-in firm size and/or firm age dataset.}
\item{type}{type of data: "size" or "age". If missing, read the age data from rows and the size data from columns.}
\item{year}{The number year the firm size/age data to be read. If missing, assume the year is 2014.}
}
\value{
\item{xy}{a matrix of joint frequency distribution table.}
\item{breaks}{the class boundaries for firm age as a component \code{age}, and for firm size as a component \code{size}.}
\item{size}{a 'bdata' subject of the firm size data}
\item{age}{a 'bdata' object of the firm age data}
}
\examples{
data(FSD)
## bivariate data
xy = ImportFSD(Firm2)
## firm age of 2013
x = ImportFSD(FirmAge, type="age", year=2013)
## firm size of 2013
y = ImportFSD(FirmSize, type="size", year=2013)
}
\keyword{distribution}
|
494fb662486081b5ab2a50f66ee4f971c6caf6e7
|
fbcc24bb45fc2a58e6e6cdb80c13f85929db8f6b
|
/part3_maSigpro_analysis/20220201_part3_2_maSigpro_phospho.R
|
a5aea2e300871ba223b5aaedb58c57a41288ba07
|
[] |
no_license
|
KozorovitskiyLaboratory/STRaxon
|
f082d50e97010c22e146de797ba4f74fc14e1931
|
081cca42d806d4175e4a85aea11f34a2642114ba
|
refs/heads/main
| 2022-10-20T05:00:01.018988
| 2022-03-30T19:51:18
| 2022-03-30T19:51:18
| 444,848,289
| 0
| 0
| null | 2022-01-05T15:10:42
| 2022-01-05T15:10:42
| null |
UTF-8
|
R
| false
| false
| 9,446
|
r
|
20220201_part3_2_maSigpro_phospho.R
|
## Dumrongprechachan et al 2022
## Part 3 maSigpro phospho
## original code by A2IDEA, modified by V.D.
library(tidyverse)
library(maSigPro)
setwd('/Volumes/GoogleDrive/My Drive/Vas_Klab_GoogleDrive/Project_STRaxon/analysis_v2/part3_maSigpro_analysis')
## True means that redundant peptides will be collapsed
## False means redundant peptides will be removed
tryHard <- FALSE
getClusterMembership <- function(group, K=8) {
## group: one of the values from names(sigs$sig.genes)
## K: number of clusters (default is 8 but this is arbitrary)
x <- see.genes(sigs$sig.genes[[group]],
time.col=2,
repl.col=1,
group.cols=c(3:6),
show.fit=F,
k=K,
dis=design$dis)
clusterMembers <- data.frame(feature=names(x$cut),
peptide=NA,
prot=NA,
cluster=x$cut,
stringsAsFactors=F,
row.names=NULL)
clusterMembers$peptide <- gsub("(.+)\\..+$", "\\1", clusterMembers$feature)
clusterMembers$prot <- gsub(".+\\.(.+)$", "\\1", clusterMembers$feature)
clusterMembers <- arrange(clusterMembers, cluster, prot)
## This produces a matrix with protein IDs as rows and cluster labels as columns
## numbers in each cell tell you how many peptides that protein has in each cluster
## PROBLEM: how do you interpret a protein having different peptides in different clusters?
peptide_usage <- table(clusterMembers$prot, clusterMembers$cluster)
ret <- list(membership=clusterMembers, usage=peptide_usage)
return(ret)
}
########################
# Load data
########################
# normalized phosphopeptide intensities (batch effect corrected + med_norm)
ms2 <- read_csv('rawd/20220131_phosphopeptide_STRaxon_full.csv')
phos_reformat <- read.delim('rawd/20220201_phosphopeptide_reformatted.txt', sep = '\t')
tmp <- merge(ms2, phos_reformat,
by.x = c("Annotated Sequence", "Master Protein Accessions", "Positions in Master Proteins"),
by.y = c("seq", "Master.Protein.Accessions", "Positions.in.Master.Proteins"))
ms2 <- tmp
rm(tmp)
# you need to rearrange the columns
g <- grep("modPep", colnames(ms2))
j <- grep("Master Protein", colnames(ms2)) ## start point
k <- grep("Qvality PEP", colnames(ms2)) - 1## stop point
tmp <- ms2[,c(g,j:k)]
ms2 <- select(tmp, -c(`Positions in Master Proteins`, `Modifications in Master Proteins`))
colnames(ms2)[c(1,2)] <- c("pep", "prots")
rm(tmp, g, j, k)
########################
# Unroll peptides
########################
# function to unroll the protein-peptide listings
unroll <- function(i) {
all_prots <- strsplit(ms2$prots[i], split=";", fixed=T)[[1]]
all_prots <- sapply(all_prots, function(x) { trimws(gsub("-\\d+$", "", x)) } )
all_prots <- unique(all_prots)
## all isoforms of the same protein, collapse to single value
if(length(all_prots) == 1) {
ret <- ms2[i,]
ret$prots <- all_prots
} else {
## multiple distinct protein IDs, unroll them into new rows
ret <- data.frame()
for(cur_prot in all_prots) {
tmp <- ms2[i,]
tmp$prots <- cur_prot
ret <- rbind(ret, tmp)
}
}
return(ret)
}
# unroll the master protein accessions so that you have only 1 peptide + protein pairing
d <- data.frame()
for(i in 1:nrow(ms2)) {
d <- rbind(d, unroll(i))
}
ms2 <- d
rm(d, i, unroll)
########################
# Remove duplicates
########################
# Each protein + peptide must only be represented once.
# For protein + peptide pairings that occur more than once, take the row instance with the largest `rowSum`.
# You can use the `tryHard = TRUE` argument to use this filtering approach.
#tmp <- data.frame(ms2, rowSum=apply(ms2[,-c(1,2)], 1, sum), stringsAsFactors=F)
tmp <- data.frame(ms2, rowSum=apply(ms2[,-c(1,2,3)], 1, sum), stringsAsFactors=F)
tmp$k <- paste0(tmp$pep,".",tmp$prots)
t1 <- select(tmp, k, rowSum)
t2 <- group_by(t1, k) %>% summarize(freq=n(), maxSum=max(rowSum))
if(tryHard) {
## collapse peptides that are indistinguishable
## We will select the row for the peptide that has the largest rowSum value
t3 <- inner_join(x=t1, y=t2, by=c("k"="k", "rowSum"="maxSum"))
d <- inner_join(x=tmp, y=t3, by=c("k", "rowSum"))
ms2 <- select(d, -rowSum, -k, -freq)
rm(t3)
} else {
## remove peptides that are indistinguishable
keep_peps <- filter(t2, freq == 1) %>% as.data.frame()
keep_peps <- keep_peps[,1]
keep_peps <- unique(keep_peps)
d <- filter(tmp, k %in% keep_peps)
ms2 <- select(d, -rowSum, -k)
}
rm(tmp, t1, t2, d)
# Keep complete cases
tmp <- as.matrix(ms2[,-c(1,2)])
j <- complete.cases(tmp)
ms2 <- ms2[j,]
rm(tmp, j)
ms2 <- ms2[,-3]
########################
# Study design
########################
pheno <- data.frame(lab=colnames(ms2)[-c(1,2)],
plex=0, state=NA, rep=0,
stringsAsFactors=F,
row.names=colnames(ms2)[-c(1,2)])
pheno$state <- gsub(".*\\.(.+)\\d+$", "\\1", pheno$lab)
pheno$plex <- as.integer(gsub("Plex(\\d+)\\..*", "\\1", pheno$lab))
pheno$rep <- as.integer(gsub("Plex\\d+\\.\\w+(\\d+)$", "\\1", pheno$lab))
pheno <- pheno[,-1]
pheno$state <- factor(pheno$state, levels=c("neonate", "earlypostnatal", "preweanling", "adult"))
## maSigPro code starts here\
## time points:
## "neonate"=1, "earlypostnatal"=2, "preweanling"=3, "adult"=4
d <- dplyr::select(pheno, rep, plex)
d$Time <- as.numeric(pheno$state)
d$Neonate=0
d$Earlypostnatal=0
d$Preweanling=0
d$Adult=0
## You need to annotate the comparison columns
d$Neonate[ grep("neonate", rownames(d)) ] <- 1
d$Earlypostnatal[ grep("early", rownames(d)) ] <- 1
d$Preweanling[ grep("pre", rownames(d)) ] <- 1
d$Adult[ grep("adult", rownames(d)) ] <- 1
d <- dplyr::select(d, -plex)
design <- make.design.matrix(edesign=d,
degree=2,
time.col=2, ## column for time
repl.col=1, ## column for replicates
group.cols=c(3,4,5,6))
rm(d)
########################
# maSigPro regression model
########################
d <- as.matrix(select(ms2, -pep, -prots))
rownames(d) <- paste0(ms2$pep,".",ms2$prots)
## Perform regression fit for each protein
fit <- p.vector(data=d,
design=design,
min.obs=1,
#Q = 0.05,
Q = 1,
MT.adjust="BH",
counts=F,
family=gaussian())
tstep <- T.fit(fit,
step.method = "backward",
min.obs=1,
alfa = 0.05,
family=gassian())
## Get out all peptides+proteins that are significant
#sigs <- get.siggenes(tstep, rsq = 0.6, vars = "all")
sigs <- get.siggenes(tstep, rsq = 0, vars = "all")
# 8 clusters
sigs_all <- see.genes(sigs$sig.genes, show.fit = T, dis =design$dis,
cluster.method="hclust" ,cluster.data = 1, k = 8)
map <- select(ms2, pep, prots) %>%
mutate(feature = paste(pep, prots, sep = ".")) %>%
rename(prot = prots)
tmp <- as.data.frame(sigs_all$cut) %>%
tibble::rownames_to_column("feature") %>%
dplyr::rename(cluster = "sigs_all$cut") %>%
left_join(map, by = "feature")
########################
# Export results
########################
# extract coefficient, p-value, R2 statistics
tmp2 <- merge(sigs[['sig.genes']]$coefficients[,2:4], sigs[['sig.genes']]$sig.pvalues[,c(4:6, 1:2)],
by = 0, all = TRUE)
colnames(tmp2)[1] <- 'pep.Prot'
tmp3 <- merge(tmp, tmp2, by.x = 'feature', by.y = 'pep.Prot')
res <- cbind(padjust=fit$p.adjusted[ fit$p.adjusted < 1 ], fit$SELEC)
res <- data.frame(feature=rownames(res),
pep=gsub("(.+)\\.\\w+", "\\1", rownames(res)),
prot=gsub(".+\\.(\\w+)", "\\1", rownames(res)),
res,
row.names=NULL,
stringsAsFactors=F)
res1 <- right_join(x=tmp, y=res,by= c("feature", "pep", "prot"))
tmp10 <- phos_reformat[,-6]
colnames(tmp10)[c(3,6)] <- c('prot', 'pep')
res2 <- left_join(res1, tmp10, by = c('pep', 'prot'))
res3 <- res2
res3$FirstmodResidue <- str_extract(res3$Modifications.in.Master.Proteins, pattern = '([^;]+)')
res3$FirstmodResidue <- str_extract(res3$FirstmodResidue, pattern = '\\[(.*?)\\]')
res3$FirstmodResidue <- str_replace(res3$FirstmodResidue, pattern = '\\[', replacement = '')
res3$FirstmodResidue <- str_replace(res3$FirstmodResidue, pattern = '\\]', replacement = '')
res3$First_site <- substr(res3$FirstmodResidue, start = 1, stop = 1)
res3$First_ResNum <- str_extract(res3$FirstmodResidue, pattern = '[0-9]+')
#res3$countPhos <- str_count(res3$modResidue, ';') + 1
#x <- inner_join(x=cluster_members, y=res, by="feature")
#x <- arrange(x, prot, pep, group, cluster)
#write.table(x, file=outF, sep="\t", row.names=F)
#write.table(res2, file= "out/20220201_phosphopeptide_STRaxon_full_ALL_8clusters_maSigPro_v2.tsv", sep="\t", row.names=F)
rm(d)
########################
# note - hinge analysis
########################
set.seed(1234)
k.max <- 25
wss <- sapply(1:k.max, function(k) { kmeans(d, k, nstart=50, iter.max=15)$tot.withinss })
plot(1:k.max, y=wss, type="b", pch=19, xaxt="n", xlab="# Clusters K", ylab="Total within-cluster sum of squares")
axis(1, at=seq(1,k.max), las=1)
rm(k.max, wss)
|
6f9113173763f0012c715935366429dfb0c4ed06
|
fa1576197ae752c9778052f4d005a8e91a405181
|
/man/mutate_geocode.Rd
|
d4d2512126a6f89256b4164a6680e784e5451845
|
[] |
no_license
|
HughParsonage/PSMA
|
425874c00c77bf4214ee735f75dd853b66c2eb1b
|
3dd0ab55facc0a15f8fedbb424d3ef11371bc2fe
|
refs/heads/master
| 2023-03-06T17:43:52.769636
| 2022-05-13T07:04:00
| 2022-05-13T07:04:00
| 102,568,751
| 13
| 6
| null | 2023-03-03T03:18:31
| 2017-09-06T05:59:01
|
R
|
UTF-8
|
R
| false
| true
| 1,145
|
rd
|
mutate_geocode.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutate_geocode.R
\name{mutate_geocode}
\alias{mutate_geocode}
\alias{add_geocode}
\title{Add latitude and longitude columns to a data frame of addresses}
\usage{
mutate_geocode(
DT,
flat_number = NULL,
number_first = NULL,
building_name = NULL,
street_name = NULL,
street_type = NULL,
postcode = NULL,
new_names = c("lat", "lon"),
approx = 0L,
overwrite = FALSE
)
}
\arguments{
\item{DT}{A \code{data.frame} to which columns will be added.}
\item{flat_number, number_first, building_name, street_name, street_type, postcode}{Columns quoted or unquoted to be passed to \code{\link{geocode}}. If \code{NULL}, \code{DT} must the columns spelled the same as the arguments here.}
\item{new_names}{Character vector of length-2 specifying the new names in the resulting \code{data.frame} for the latitude and longitude respectively.}
\item{approx}{See \code{\link{geocode}}.}
\item{overwrite}{If \code{new_names} are present in \code{DT}, should they be overwritten?}
}
\description{
Add latitude and longitude columns to a data frame of addresses
}
|
015265ed5023981cc28d0f5f981855d4aa3839dc
|
296ca8d3920a7bc8b84b8ab8fe1c03c80b1b6f8e
|
/man/format_AttractData.Rd
|
a489fa9d1a4e4972a19926b29b78a1117b042b7c
|
[] |
no_license
|
mikeod38/ProvidenciaChemo
|
e3b03456787baa4c318afad76d830b97de1a8d91
|
0cc8a8a46c6286d8df22540bff7f155a127e858c
|
refs/heads/master
| 2021-04-12T08:01:09.374669
| 2020-03-25T02:13:36
| 2020-03-25T02:13:36
| 126,062,218
| 0
| 3
| null | 2020-01-06T17:26:12
| 2018-03-20T18:09:52
|
HTML
|
UTF-8
|
R
| false
| true
| 294
|
rd
|
format_AttractData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_AttractData.R
\name{format_AttractData}
\alias{format_AttractData}
\title{format_AttractData}
\usage{
format_AttractData(data)
}
\description{
format_AttractData
}
\examples{
data \%>\% format_AttractData()
}
|
3b670aeaac62039cfad4d38d2452c7dead6217d3
|
b304b77438b29787b8e4ad3a868f9e5a63f5dac0
|
/code/across_time_analysis.R
|
f2382f4e2913d421c99458e113bfb8272d801fa7
|
[] |
no_license
|
marinapapa/ColMotion-Species-Time
|
2b81aabe701568060f0a11600509213389f2a2f2
|
82483b0f47cbe3168ebcf2fd2e39089b57e34675
|
refs/heads/main
| 2023-04-07T10:08:22.135577
| 2023-01-24T18:57:32
| 2023-01-24T18:57:32
| 547,367,442
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,601
|
r
|
across_time_analysis.R
|
##\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
## Timeseries analysis (KPSS, changepoints & bootstrap)
## Author: Marina Papadopoulou (m.papadopoulou.rug@gmail.com)
## Publication: Dynamics of collective motion across time and species
##\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
###########################
## Bootstrap functions
to_boot_sp <- function(df, var, indices)
{
df <- df[indices,]
df$var_mean <- dplyr::cummean(df[,var])
df$cum_dur <- cumsum(df$event_dur_s)
df$event_ord <- 1:nrow(df)
sc <- lm.br::lm.br( df$var_mean ~ df$event_ord, 'LT' )
theta_d <- df[df$event_ord == round(as.numeric(sc$coef[1])), 'cum_dur']
return(theta_d)
}
bootstrap_all <- function(df, vars, R)
{
ret_vars <- c('var', 'theta_idx', 'sl','ci_l', 'ci_h', 'theta_s', 'ci_ls', 'ci_hs', 'boots_sd')
toret <- as.data.frame(matrix(NA, nrow = length(vars), ncol = length(ret_vars)))
colnames(toret) <- ret_vars
k <- 1
for (i in vars)
{
# get main plateu calculations
plat_stats <- get_plateau_stats(df, i)
# run bootstrap
res <- boot::boot(data = df,
statistic = to_boot_sp,
R = R,
var = i)
res <- as.vector(res$t)
toret[k,] <- c(plat_stats, sd(res))
k <- k + 1
}
return(toret)
}
get_plateau_stats <- function(df, var)
{
df$var_mean <- dplyr::cummean(df[,var])
df$cum_dur <- cumsum(df$event_dur_s)
df$event_ord <- 1:nrow(df)
sc <- lm.br::lm.br( df$var_mean ~ df$event_ord, 'LT' )
theta <- as.numeric(sc$coef[1])
sl_capt <- capture.output(sc$sl(theta0 = max(df$event_ord)))
signl <- gsub(" ", "",sub("\\ for .*", "", sub(".*\\SL=", "", sl_capt)), fixed = TRUE)
p_ci <- capture.output(sc$ci())
p_ci <- p_ci[length(p_ci)]
if (stringr::str_count( p_ci, "], ") > 0)
{
splstr <- unlist(stringr::str_split(p_ci, '], '))
p_ci <- splstr[length(splstr)]
}
pci_l <- floor(as.numeric(gsub("[^0-9.-]", "", sub("\\,.*", "", p_ci))))
pci_h <- ceiling(as.numeric(gsub("[^0-9.-]", "", sub(".*\\,", "", p_ci))))
if (!(is.na(pci_l)) && is.na(pci_h)) { pci_h <- max(df$event_ord, na.rm = T)}
if (is.na(pci_l)) { pci_h <- NA }
theta_s <- df[df$event_ord == round(as.numeric(theta)),'cum_dur']
if (!(is.na(pci_l))){
cumdur_l <- df[df$event_ord == pci_l,'cum_dur']
} else {
cumdur_l <- NA
}
if (!(is.na(pci_h))){
cumdur_h <- df[df$event_ord == pci_h,'cum_dur']
} else {
cumdur_h <- NA
}
toret <- c(var, theta, signl, pci_l, pci_h, theta_s, cumdur_l, cumdur_h)
return(toret)
}
###########################
## Load data
df_b <- read.csv('../data/col_motion_metrics/metrics_per_event_baboons.csv')
df_f <- read.csv('../data/col_motion_metrics/metrics_per_event_fish.csv')
df_g <- read.csv('../data/col_motion_metrics/metrics_per_event_goats.csv')
df_p <- read.csv('../data/col_motion_metrics/metrics_per_event_pigeons.csv')
vars <- c('mean_mean_nnd',
'mean_sd_nnd',
'sd_mean_nnd',
'mean_pol',
'sd_pol',
'stdv_speed',
'mean_sd_front',
'mean_shape',
'sd_shape')
###########################
## Run bootstrap over all parameters
bt_g <- bootstrap_all(df_g, vars, 10000)
bt_g$species <- 'goats'
bt_f <- bootstrap_all(df_f, vars, 10000)
bt_f$species <- 'fish'
bt_b <- bootstrap_all(df_b, vars, 10000)
bt_b$species <- 'baboons'
bt_p <- bootstrap_all(df_p, vars, 10000)
bt_p$species <- 'pigeons'
all_boots <- dplyr::bind_rows(bt_f, bt_p, bt_g, bt_b)
all_boots$sl_bool <- FALSE
signifs <- as.numeric(all_boots$sl) < 0.05
all_boots$sl_bool <- signifs
###########################
## KPSS test for stationarity
kpss_tests <- as.data.frame(matrix(NA, nrow = 0, ncol = 4))
colnames(kpss_tests) <- c('fish', 'pigeons', 'goats', 'baboons')
for (var in vars)
{
kpss_tests[var, ] <- c(round(tseries::kpss.test(dplyr::cummean(df_f[,var]), null = "Trend")$p.value, 3),
round(tseries::kpss.test(dplyr::cummean(df_p[,var]), null = "Trend")$p.value, 3),
round(tseries::kpss.test(dplyr::cummean(df_g[,var]), null = "Trend")$p.value, 3),
round(tseries::kpss.test(dplyr::cummean(df_b[,var]), null = "Trend")$p.value, 3))
}
kpss_tests$var <- rownames(kpss_tests)
kpss_tests <- reshape2::melt(kpss_tests)
kpss_tests$kpss_bool <- kpss_tests$value < 0.05
colnames(kpss_tests) <- c('var', 'species', 'kpss_pvalue', 'kpss_bool')
## Join and save
res_df <- dplyr::inner_join(all_boots, kpss_tests)
#write.csv(res_df, '../data/changepoints_stats.csv', row.names = FALSE)
|
831e972fdd9117a673a2dffcbb7d854a607b35bc
|
795bcc24ebb1f2d306edba8c2396a8e534b86d09
|
/plot1.R
|
5e68548fb7c0d51c9da4e162743a9f16ab965784
|
[] |
no_license
|
amarviswa/ExData_Plotting1
|
de14b6a6b72e2e51978358018380e1939568cb13
|
2c999b52def930c080b13ec42d466b0aeb2d155a
|
refs/heads/master
| 2021-01-16T18:17:27.981384
| 2014-09-07T07:21:36
| 2014-09-07T07:21:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,213
|
r
|
plot1.R
|
setInternet2(TRUE)
## Exploratory Data Analysis - Plotting Assignment 1
##
## plot1.R - generates plot1.png
## Download the file and put it in working directory
fName = "exdata_plotting1.zip"
if (!file.exists(fName)) {
retval = download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = fName,)
}
## Read data from the contents of the zip file
df.power = read.csv(unz(fName, "household_power_consumption.txt"), header=T,sep=";", stringsAsFactors=F, na.strings="?",colClasses=c("character", "character", "numeric","numeric", "numeric", "numeric","numeric", "numeric", "numeric"))
## Formatting the date and subseting the data only on 2007-02-01 and 2007-02-02
df.power$Date = as.Date(df.power$Date, format="%d/%m/%Y")
startDate = as.Date("01/02/2007", format="%d/%m/%Y")
endDate = as.Date("02/02/2007", format="%d/%m/%Y")
df.power = df.power[df.power$Date >= startDate & df.power$Date <= endDate, ]
## Creating the plot 1
png(filename="plot1.png", width=480, height=480)
hist(df.power$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red")
dev.off()
|
afa884f383fb96dccdf43186ba45fbfbc3e785b0
|
0bdbb8c1eb0e92d9d65233237b9f4b0facd506dd
|
/project.R
|
c034b5bcc7b09d7f72a3a646b7b5754f563385be
|
[] |
no_license
|
nobaluis/bedu-r-project
|
dcc3c43b45961b035ef416630d91940706631f0c
|
5a2c933131715cb7212730dac581a1312516304b
|
refs/heads/master
| 2023-03-03T10:14:42.340430
| 2021-02-09T04:09:31
| 2021-02-09T04:09:31
| 334,507,828
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,878
|
r
|
project.R
|
# Data tools
library(dplyr) # for data wrangling
library(rsample) # for data spliting
library(recipes) # for data transformation tasks
# Data visualization
library(ggplot2) # for graphics
library(visdat) # for data visualization
# Model tools
library(e1071) # req. for caret
library(caret) # for classification training
library(kernlab) # for fitting SVMs
library(mlbench) # for measure performance
# Load the data
data.all <- read.csv("data/train.csv", na="")
# Describe the data
str(data.all) # structure in the dataset
head(data.all) # observations sample
# Assign correct data types
data.all$Survived <- as.factor(data.all$Survived)
data.all$Pclass <- as.factor(data.all$Pclass)
data.all$Sex <- as.factor(data.all$Sex)
data.all$Embarked <- as.factor(data.all$Embarked)
data.all$Name <- as.character(data.all$Pclass)
data.all$Cabin <- as.character(data.all$Cabin)
data.all$Ticket <- as.character(data.all$Ticket)
data.all$Age <- as.integer(data.all$Age)
str(data.all) # verify changes
# Data visualization
vis_dat(data.all)
# Missing values viz
vis_miss(data.all, cluster=TRUE)
# Counting missing values
for(col in c("Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked")){
print(sprintf("%s NA count: %d", col, sum(is.na(data.all[, col]))))
}
# Feature engineering
rec <- recipe(
Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked,
data = data.all
)
# Impute missing values
rec <- rec %>% step_knnimpute(all_predictors(), neighbors = 6)
# Normalize numeric values
rec <- rec %>% step_BoxCox(all_numeric(), -all_outcomes())
# Standardize numeric values
rec <- rec %>%
step_center(all_numeric(), -all_outcomes()) %>%
step_scale(all_numeric(), -all_outcomes())
# Dummy encode for categorical features
rec <- rec %>% step_dummy(all_nominal_predictors())
# Prepare the recipe
rec <- prep(rec, training = data.all)
# Perform all operations
data.clean <- bake(rec, new_data = data.all)
# Vizualization the clean data
vis_dat(data.clean)
# Split data in training and test set
set.seed(123) # for reproducibility
spliter <- initial_split(data.clean, prob=0.8, strata = "Survived")
data.train <- training(spliter)
data.test <- testing(spliter)
# Model (SVM radial) parameters
caret::getModelInfo("svmRadial")$svmRadial$parameters # model infor
# Model fitting
set.seed(6465)
model <- train(
Survived ~ ., # target fromula
data = data.train, # training data
method = "svmRadial", # SVM with radial basis fuunction: K(x,x') = exp(gamma norm(x-x')^2),
trControl = trainControl(method="repeatedcv", number=10, repeats=3), # repeted k-fold cross valdiation
preProcess = c("center","scale"),
tuneLength = 10
)
model
# Trainning results
ggplot(model)
# Test the model
predictions <- predict(model, data.test)
# Measuring model performance
confusionMatrix(data = predictions, reference = data.test$Survived)
|
eca455b76567478c7aa83b6b5e84f9207731b696
|
3a7a5305cb8c7b3eea8092ed6baf95a5fb9e4fe9
|
/run_analysis.R
|
c9ddfef1b487766c13b62e9df33ab389ad60f611
|
[] |
no_license
|
swaldecker/run_analysis_repo
|
0e64d2e7a46d3846cadfc966160c3928fb0c1e2e
|
ac8812955e4a3999bacb393e12972ed0582e83e0
|
refs/heads/master
| 2020-04-06T06:01:01.506699
| 2016-11-14T01:28:19
| 2016-11-14T01:28:19
| 73,649,207
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,488
|
r
|
run_analysis.R
|
# R script for tidying data set from UCI
#
# Load packages
library(tidyr)
library(dplyr)
# Specify directories where data is located
# NOTE: I renamed the folder resulting from decompressing the .zip file to
# UCIdata
train_tag <- "train"
test_tag <- "test"
# Returns a data frame of the training data set or the testing data set
# depending on the value of "tag" ("train" or "test")
tidy_set <- function( tag ) {
## Make sure that tag is an acceptable value
if( !(tag %in% c("train", "test"))) {
print("tag not an acceptable value!")
print( paste( "tag = ", tag))
stop()
} #endif
## Folder containing the training and testing datasets
data_dir <- "./UCI HAR Dataset/"
## Generate paths to files to be read in
features_file <- paste( data_dir, "features.txt", sep = "" )
subject_file <- paste( data_dir, tag, "/", "subject_", tag, ".txt", sep = "" )
measurement_file <- paste( data_dir, tag, "/", "X_", tag, ".txt", sep = "" )
activity_file <- paste( data_dir, tag, "/", "y_", tag, ".txt", sep = "" )
## read in features.txt first, as these will be used to give the intial
## column names to the data frame created from X_train.txt or X_test.txt
features_df <- read.table( features_file )
features <- as.character( features_df[[2]] )
## clean up the column names a bit
features <- gsub( "-", "_", features ) # replace dashes with underscores
features <- gsub( "\\(\\)", "", features ) # remove the double parentheses
features <- gsub( "\\(|\\)", "_", features ) # other parentheses to underscores
features <- gsub( ",", "_", features ) # replace commas with underscores
features <- sapply( features, tolower, USE.NAMES = FALSE ) # change to lowercase
## Generate data frame for each of these files using read.table
subject_df <- read.table( subject_file, col.names = "subject" )
measurement_df <- read.table( measurement_file, col.names = features )
activity_df <- read.table( activity_file, col.names = "activity" )
## For this case, the data frames should have the same number of rows
## Check to make sure this is so.
if(!(nrow( subject_df ) == nrow( measurement_df ) ) |
!(nrow( subject_df ) == nrow( activity_df ) ) ) {
print("nrow( subject_df ) = ", nrow( subject_df ) )
print("nrow( measurement_df ) = ", nrow( measurement_df ) )
print("nrow( activity_df ) = ", nrow( activity_df ) )
stop("Error: data frames not of equal length")
} # endif
## create a new column specifying whether the data are from
## the training set or the testing set.
subject_df <- mutate( subject_df, set = paste(tag, "ing", sep = "" ) )
## change the column values in activity_df from integer to factor,
## then rename the levels
activity_df[[1]] <- as.factor( activity_df[[1]] )
levels( activity_df[[1]] ) <-
c("Walking", "Walking Upstairs", "Walking Downstairs",
"Sitting", "Standing", "Laying")
## join the columns of these data frames together with bind_cols from dplyr
bind_cols( subject_df, activity_df, measurement_df )
} # end tidy_set function
## now, generate data frames from the training and testing data sets.
train_df <- tidy_set( train_tag )
test_df <- tidy_set( test_tag )
## now, combine them using bind_rows from dplyr
combined_df <- bind_rows( train_df, test_df )
## next, select only the measurment columns with "mean" and "std"
## in the variable name.
keep <- "subject|set|activity|mean|std"
selected_cols <- grep( keep, names(combined_df), value = TRUE )
## data set from step 4 ##
selected_df <- combined_df[ , selected_cols ]
## now work on data set from step 5. First, group by subject and
## activity
mean_tbl <- tbl_df(selected_df) %>% group_by( subject, activity )
## Second, use the summarise_each function from the dplyr package
## to apply the function mean() to each column except the "set" column.
mean_tbl <- summarise_each( mean_tbl, funs(mean), - set )
## modify the names of the measurement variables
## first, select the column variables to change, then add "mean_"
## to the beginning of these.
new_names <- grep( "mean|std", names(mean_tbl), value = TRUE ) %>%
sapply( function( x ) paste( "mean_", x, sep = "" ), USE.NAMES = FALSE )
new_names <- c("subject", "activity", new_names )
names(mean_tbl) <- new_names
|
edd57c0faaf0c66ea2fb9a9facbcfe9157cbce8a
|
bb4c63c25c9d546d530065591fb552cf562dce33
|
/man/PTCA4CATA.Rd
|
1f64f5083dd7610d9a3a06b60264c025a4c20833
|
[] |
no_license
|
HerveAbdi/PTCA4CATA
|
98d6be1b0781f79d8f1ed591f4598883e4f7b764
|
23e8764e0e1f2806b4defe584aa876f0b91c5485
|
refs/heads/master
| 2022-07-27T07:23:39.066852
| 2022-07-14T21:28:33
| 2022-07-14T21:28:33
| 92,956,064
| 8
| 8
| null | 2022-05-04T21:45:54
| 2017-05-31T14:42:24
|
R
|
UTF-8
|
R
| false
| true
| 710
|
rd
|
PTCA4CATA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PTCA4CATA.R
\docType{package}
\name{PTCA4CATA}
\alias{PTCA4CATA}
\title{PTCA4CATA package to perform Partial Triadic Correspondence Analysis
(PTCA) for Check All That Apply (CATA) Data}
\description{
The PTCA4CATA package provides a set of tools to 1) analyze
"Check All That Apply (CATA)" Data;
2) display the results graphically;
and 3)
perform the correct inferences.
In addition to correspondence analysis, Hellinger analysis
can also be used.
PTCA4CATA uses Partial Triadic Correspondence Analysis (PTCA)
to analyze the
pseudo-contingency table obtained from a CATA task.
}
\author{
Herve Abdi \email{herve@utdallas.edu}
}
|
122a3c96cd87cf65996a3bbeedc06a595356642b
|
1bb242e0c0403209e1eca0320c5ed5eb4a5e1e1a
|
/metaPopSim_autoPoly/getMotherGameteInfo.R
|
be229eee2ca631a6dac139442c62cbe473c15d6b
|
[] |
no_license
|
dfield007/AutoPoly
|
5a3bdbdcc2ffcdfd898eddaf5d9ff5ca25ce9c3c
|
71f0f733372c1fdb164f093a069be8bce36c696a
|
refs/heads/master
| 2020-12-02T08:13:45.429858
| 2018-02-12T14:52:12
| 2018-02-12T14:52:12
| 96,789,391
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,168
|
r
|
getMotherGameteInfo.R
|
######################################
# #
# 8 Functions to obtain the probs #
# of gametes from a given phenotype #
# or genotype, for double reduction #
# or no double reduction #
# #
# Author: David Field #
# Date: 1/2/2010 #
# #
######################################
###############################################################
getMotherGameteInfoHexPhenoMin <- function (phenotype) {
##################################################
# 1. Hexaploid, marker="phenotype", DRR="min" #
##################################################
switch(length(phenotype),
{## Monoallele
gametes.table <- c("aaa")
probs.table <- c(1.0)
},
{## Bialleles
gametes.table <- c("aaa", "aab", "bbb", "abb")
probs.table <- c(0.15, 0.35, 0.15, 0.35)
},
{## Trialleles
gametes.table <- c("aaa", "aab", "aac", "bbb", "abb",
"bbc", "ccc", "acc", "bcc", "abc")
probs.table <- c(0.03, 0.105, 0.105, 0.03, 0.105, 0.105,
0.03, 0.105, 0.105, 0.28)
},
{## Quadrialleles
gametes.table <-c("aaa", "aab", "aac", "aad", "bbb", "abb",
"bbc", "bbd", "ccc", "acc", "bcc", "ccd",
"ddd", "add", "bdd", "cdd", "abc", "abd",
"acd", "bcd")
probs.table <- c(0.005, 0.035, 0.035, 0.035, 0.005, 0.035,
0.035, 0.035, 0.005, 0.035, 0.035, 0.035,
0.005, 0.035, 0.035, 0.035, 0.14, 0.14,
0.14, 0.14)
},
{## Pentalleles
gametes.table <- c("aab", "aac", "aad", "aae", "abb",
"bbc", "bbd", "bbe", "acc", "bcc",
"ccd", "cce", "add", "bdd", "cdd",
"dde", "aee", "bee", "cee", "dee",
"abc", "abd", "abe", "acd", "ace",
"ade", "bcd", "bce", "bde", "cde")
probs.table <- c(0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01,
0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01,
0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.08,
0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08,
0.08, 0.08)
},
{## Hexalleles
gametes.table <- c("abc", "abd", "abe", "abf", "acd",
"ace", "acf", "ade", "adf", "aef",
"bcd", "bce", "bcf", "bde", "bdf",
"bef", "cde", "cdf", "cef", "def")
probs.table <- c(0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05,
0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05,
0.05, 0.05, 0.05, 0.05, 0.05, 0.05)
}
) ##End switch()
## Convert gametes.table from a vector of abc strings to a list of
## allele vectors
gametes.table <- lapply(strsplit(gametes.table,""),
function (thisVec,phenotype) {
y <- match(thisVec,c("a","b","c","d","e","f"))
return(phenotype[y])
},
phenotype)
##Form the gamete names and assign these to gametes.table and
##probs.table
nms <- sapply(gametes.table,
function(thisVec) {
paste(thisVec,collapse=" ")
})
names(gametes.table) <- nms
names(probs.table) <- nms
return(list(gametes=gametes.table, prob=probs.table))
## stringsAsFactors = FALSE))
}
##################################################
# 2. Hexaploid, marker="phenotype", DRR="max" #
##################################################
getMotherGameteInfoHexPhenoMax <- function (phenotype) {
switch(length(phenotype),
{## Monoallele
gametes.table <- c("aaa")
probs.table <- c(1.0)},
{## Bialleles
gametes.table <- c("aaa", "aab", "bba", "bbb")
probs.table <- c(0.1818, 0.3182, 0.3182, 0.1818)},
{## Trialleles
gametes.table <- c("aaa", "aab", "aac", "abc", "bba",
"bbb", "bbc", "cca", "ccb", "ccc")
probs.table <- c(0.049, 0.108, 0.108, 0.204, 0.108, 0.049,
0.108, 0.108, 0.108, 0.049)
},
{## Quadrialleles
gametes.table <- c("aaa", "aab", "aac", "aad", "abc",
"abd", "acd", "bba", "bbb", "bbc",
"bbd", "bcd", "cca", "ccb", "ccc",
"ccd", "dda", "ddb", "ddc", "ddd")
probs.table <- c(0.015, 0.045, 0.045, 0.045, 0.102, 0.102,
0.102, 0.045, 0.015, 0.045, 0.045, 0.102,
0.044, 0.045, 0.015, 0.045, 0.045, 0.045,
0.045, 0.015)
},
{## Pentalleles
gametes.table <- c("aaa", "aab", "aac", "aad", "aae",
"abc", "abd", "abe", "acd", "ace",
"ade", "bba", "bbb", "bbc", "bbd",
"bbe", "bcd", "bce", "bde", "cca",
"ccb", "ccc", "ccd", "cce", "cde",
"dda", "ddb", "ddc", "ddd", "dde",
"eea", "eeb", "eec", "eed", "eee")
probs.table <- c(0.004, 0.020, 0.020, 0.020, 0.020, 0.058,
0.058, 0.058, 0.058, 0.058, 0.058, 0.020,
0.004, 0.020, 0.020, 0.020, 0.058, 0.058,
0.058, 0.020, 0.020, 0.004, 0.020, 0.020,
0.058, 0.020, 0.020, 0.020, 0.004, 0.020,
0.020, 0.020, 0.020, 0.020, 0.004)
},
{## Hexalleles
gametes.table <- c("aab", "aac", "aad", "aae", "aaf",
"abc", "abd", "abe", "abf", "acd",
"ace", "acf", "ade", "adf", "aef",
"bba", "bbc", "bbd", "bbe", "bbf",
"bcd", "bce", "bcf", "bde", "bdf",
"bef", "cca", "ccb", "ccd", "cce",
"ccf", "cde", "cdf", "cef", "dda",
"ddb", "ddc", "dde", "ddf", "def",
"eea", "eeb", "eec", "eed", "eef",
"ffa", "ffb", "ffc", "ffd", "ffe")
probs.table <- c(0.009090667, 0.009090667, 0.009090667,
0.009090667, 0.009090667, 0.036364000,
0.036364000, 0.036364000, 0.036364000,
0.036364000, 0.036364000, 0.036364000,
0.036364000, 0.036364000, 0.036364000,
0.009090667, 0.009090667, 0.009090667,
0.009090667, 0.009090667, 0.036364000,
0.036364000, 0.036364000, 0.036364000,
0.036364000, 0.036364000, 0.009090667,
0.009090667, 0.009090667, 0.009090667,
0.009090667, 0.036364000, 0.036364000,
0.036364000, 0.009090667, 0.009090667,
0.009090667, 0.009090667, 0.009090667,
0.036364000, 0.009090667, 0.009090667,
0.009090667, 0.009090667, 0.009090667,
0.009090667, 0.009090667, 0.009090667,
0.009090667, 0.009090667)
}
) ##End switch()
## Convert gametes.table from a vector of abc strings to a list of
## allele vectors
gametes.table <- lapply(strsplit(gametes.table,""),
function (thisVec,phenotype) {
y <- match(thisVec,c("a","b","c","d","e","f"))
return(phenotype[y])
},
phenotype)
##Form the gamete names and assign these to gametes.table and
##probs.table
nms <- sapply(gametes.table,
function(thisVec) {
paste(thisVec,collapse=" ")
})
names(gametes.table) <- nms
names(probs.table) <- nms
return(list(gametes=gametes.table, prob=probs.table))
## stringsAsFactors = FALSE))
}
########################################################
getMotherGameteInfoHexGenoMin <- function (motherGenotype) {
##################################################
# 3. Hexaploid, marker="genotype", DRR="min" #
##################################################
switch(length(unique(motherGenotype)),
{## Monoallele
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
gametes.table <- c("aaa")
probs.table <- c(1.0)
},
{## Biallele
#count table of each allele, sorted so most frequent comes first
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
if (max(thisTable)==5) { #Biallele type I, if max no. of allele is 5 must be type I
gametes.table <- c("aaa", "aab")
probs.table <- c(0.5, 0.5)
} else if (max(thisTable)==4) { #Biallele type II, if max no. of allele is 4 must be type II
gametes.table <- c("aaa", "aab", "abb")
probs.table <- c(0.2, 0.6, 0.2)
} else if (max(thisTable)==3) { #Biallele type III, if max no. of allele is 3 must be type III
gametes.table <- c("aaa", "aab", "bbb", "abb")
probs.table <- c(0.05, 0.45, 0.05, 0.45)
}
}, #end switch
{## Triallele
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
if (max(thisTable)==4) { #Triallele type I if max no. of allele is 4 must be type I
gametes.table <- c("aaa", "aab", "aac", "abc")
probs.table <- c(0.2, 0.3, 0.3, 0.2)
} else if (max(thisTable)==3) { #Triallele type II, if max no. of allele is 3 must be type II
gametes.table <- c("aaa", "aab", "aac", "abb","bbc","abc")
probs.table <- c(0.05, 0.3, 0.15, 0.15, 0.05, 0.3)
} else if (max(thisTable)==2) { #Triallele type III, if max no. of allele is 2 must be type III
gametes.table <- c("aab", "aac", "abb", "bbc", "acc", "bcc", "abc")
probs.table <- c(0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4)
}
},
{## Quadriallele
thisTable <- sort(table(motherGenotype),decreasing=TRUE) #count table of each allele
if (max(thisTable)==3) { #Quadriallele type I if max no. of allele is 3 must be type I
gametes.table <-c("aaa", "aab", "aac", "aad","abc", "abd", "acd", "bcd")
probs.table <- c(0.05, 0.15, 0.15, 0.15, 0.15, 0.15, 0.15, 0.05)
} else if (max(thisTable)==2) { #Quadriallele type II if max no. of allele is 2 must be type II
gametes.table <-c("aab", "aac", "aad", "abb", "bbc", "bbd", "abc", "abd", "acd", "bcd")
probs.table <- c(0.1, 0.05, 0.05, 0.1, 0.05, 0.05, 0.2, 0.2, 0.1, 0.1)
}
},
{## Pentallele
thisTable <- sort(table(motherGenotype),decreasing=TRUE) #count table of each allele
gametes.table <- c("aab", "aac", "aad", "aae", "abc", "abd", "abe",
"acd", "ace", "ade", "bcd", "bce", "bde", "cde")
probs.table <- c(0.05, 0.05, 0.05, 0.05, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.05, 0.05, 0.05, 0.05)
},
{## Hexalleles
thisTable <- sort(table(motherGenotype),decreasing=TRUE) #count table of each allele
gametes.table <- c("abc", "abd", "abe", "abf", "acd",
"ace", "acf", "ade", "adf", "aef",
"bcd", "bce", "bcf", "bde", "bdf",
"bef", "cde", "cdf", "cef", "def")
probs.table <- c(0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05,
0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05,
0.05, 0.05, 0.05, 0.05, 0.05, 0.05)
}
) ##End switch()
## Convert gametes.table from a vector of abc strings to a list of
## allele vectors
thisGenoAllelesOrder<-names(thisTable)
gametes.table <- lapply(strsplit(gametes.table,""),
function (thisVec,thisGenoAllelesOrder) {
# gametes.table<-strsplit(gametes.table,"")
#thisVec<-gametes.table[[2]]
y <- match(thisVec,c("a","b","c","d","e","f"))
return(thisGenoAllelesOrder[y])
},
thisGenoAllelesOrder)
##Form the gamete names and assign these to gametes.table and
##probs.table
nms <- sapply(gametes.table,
function(thisVec) {
paste(thisVec,collapse=" ")
})
names(gametes.table) <- nms
names(probs.table) <- nms
return(list(gametes=gametes.table, prob=probs.table))
## stringsAsFactors = FALSE))
}
##########################################################
getMotherGameteInfoHexGenoMax <- function (motherGenotype) {
##################################################
# 4. Hexaploid, marker="genotype", DRR="max" #
##################################################
switch(length(unique(motherGenotype)),
{## Monoallele
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
gametes.table <- c("aaa")
probs.table <- c(1.0)
},
{## Biallele
#count table of each allele, sorted so most frequent comes first
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
if (max(thisTable)==5) { #Biallele type I, if max no. of allele is 5 must be type I
gametes.table <- c("aaa", "aab", "abb")
probs.table <- c(0.546, 0.409, 0.045)
} else if (max(thisTable)==4) { #Biallele type II, if max no. of allele is 4 must be type II
gametes.table <- c("aaa", "aab", "bbb", "abb")
probs.table <- c(0.255, 0.509, 0.018, 0.218)
} else if (max(thisTable)==3) { #Biallele type III, if max no. of allele is 3 must be type III
gametes.table <- c("aaa", "aab", "bbb", "abb")
probs.table <- c(0.091, 0.409, 0.091, 0.409)
}
}, #end switch
{## Triallele
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
if (max(thisTable)==4) { #Triallele type I if max no. of allele is 4 must be type I
gametes.table <- c("aaa", "aab", "aac", "abb", "bbc", "acc", "bcc", "abc")
probs.table <- c(0.255, 0.255, 0.255, 0.036, 0.009, 0.036, 0.009, 0.145)
} else if (max(thisTable)==3) { #Triallele type II, if max no. of allele is 3 must be type II
gametes.table <- c("aaa", "aab", "aac", "bbb", "abb", "bbc", "acc", "bcc", "abc")
probs.table <- c(0.091, 0.273, 0.136, 0.018, 0.164, 0.055, 0.027, 0.018, 0.218)
} else if (max(thisTable)==2) { #Triallele type III, if max no. of allele is 2 must be type III
gametes.table <- c("aaa", "aab", "aac", "bbb", "abb", "bbc", "ccc","acc", "bcc", "abc")
probs.table <- c(0.018, 0.109, 0.109, 0.018, 0.109, 0.109, 0.018, 0.109, 0.109, 0.292)
}
},
{## Quadriallele
thisTable <- sort(table(motherGenotype),decreasing=TRUE) #count table of each allele
if (max(thisTable)==3) { #Quadriallele type I if max no. of allele is 3 must be type I
gametes.table <-c("aaa", "aab", "aac", "aad", "abb", "bbc", "bbd", "acc", "bcc",
"ccd", "add", "bdd", "cdd", "abc", "abd", "acd", "bcd")
probs.table <- c(0.0909, 0.1364, 0.1364, 0.1364, 0.0273, 0.0091, 0.0091, 0.0273, 0.0091,
0.0091, 0.0273, 0.0091, 0.0091, 0.1091, 0.1091, 0.1091, 0.0364)
} else if (max(thisTable)==2) { #Quadriallele type II if max no. of allele is 2 must be type II
gametes.table <-c("aaa", "aab", "aac", "aad", "bbb", "abb", "bbc", "bbd", "acc",
"bcc", "ccd", "add", "bdd", "cdd", "abc", "abd", "acd", "bcd")
probs.table <- c(0.0182, 0.1091, 0.0545, 0.0545, 0.0182, 0.1091, 0.0545, 0.0545, 0.0182,
0.0182, 0.0091, 0.0182, 0.0182, 0.0091, 0.1455, 0.1455, 0.0727, 0.0727)
}
},
{## Pentallele
thisTable <- sort(table(motherGenotype),decreasing=TRUE) #count table of each allele
gametes.table <- c("aaa", "aab", "aac", "aad", "aae", "abb",
"bbc", "bbd", "bbe", "acc", "bcc", "ccd",
"cce", "add", "bdd", "cdd", "dde", "aee",
"bee", "cee", "dee", "abc", "abd", "abe",
"acd", "ace", "ade", "bcd", "bce", "bde",
"cde")
probs.table <- c(0.018, 0.055, 0.055, 0.055, 0.055, 0.018, 0.009,
0.009, 0.009, 0.018, 0.009, 0.009, 0.009, 0.018,
0.009, 0.009, 0.009, 0.018, 0.009, 0.009, 0.009,
0.073, 0.073, 0.073, 0.073, 0.073, 0.073, 0.036,
0.036, 0.036, 0.036)
},
{## Hexalleles
thisTable <- sort(table(motherGenotype),decreasing=TRUE) #count table of each allele
gametes.table <- c("aab", "aac", "aad", "aae", "aaf", "abb", "bbc",
"bbd", "bbe", "bbf", "acc", "bcc", "ccd", "cce",
"ccf", "add", "bdd", "cdd", "dde", "ddf", "aee",
"bee", "cee", "dee", "eef", "aff", "bff", "cff",
"dff", "eff", "abc", "abd", "abe", "abf", "acd",
"ace", "acf", "ade", "adf", "aef", "bcd", "bce",
"bcf", "bde", "bdf", "bef", "cde", "cdf", "cef",
"def")
probs.table <- c(0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009,
0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009,
0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009,
0.036, 0.036, 0.036, 0.036, 0.036,0.036, 0.036, 0.036, 0.036, 0.036,
0.036, 0.036, 0.036, 0.036, 0.036,0.036, 0.036, 0.036, 0.036, 0.036)
}
) ##End switch()
## Convert gametes.table from a vector of abc strings to a list of
## allele vectors
thisGenoAllelesOrder<-names(thisTable)
gametes.table <- lapply(strsplit(gametes.table,""),
function (thisVec,thisGenoAllelesOrder) {
# gametes.table<-strsplit(gametes.table,"")
#thisVec<-gametes.table[[2]]
y <- match(thisVec,c("a","b","c","d","e","f"))
return(thisGenoAllelesOrder[y])
},
thisGenoAllelesOrder)
##Form the gamete names and assign these to gametes.table and
##probs.table
nms <- sapply(gametes.table,
function(thisVec) {
paste(thisVec,collapse=" ")
})
names(gametes.table) <- nms
names(probs.table) <- nms
return(list(gametes=gametes.table, prob=probs.table))
## stringsAsFactors = FALSE))
}
##########################################################
getMotherGameteInfoTetrPhenoMin <- function (phenotype) {
##################################################
# 5. Tetraploid, marker="phenotype", DRR="min" #
##################################################
switch(length(phenotype),
{## Monoallele
gametes.table <- c("aa")
probs.table <- c(1.0)
},
{## Bialleles
gametes.table <- c("aa","ab","bb")
probs.table <- c(0.222,0.556,0.222)
},
{## Trialleles
gametes.table <- c("aa", "ab", "ac", "bb", "bc", "cc")
probs.table <- c(0.056, 0.222, 0.278, 0.111, 0.278, 0.056)
},
{## Quadrialleles
gametes.table <- c("ab", "ac", "ad", "bc", "bd", "cd")
probs.table <- c(0.167, 0.167, 0.167, 0.167, 0.167, 0.167)
}
) ##End switch()
## Convert gametes.table from a vector of abc strings to a list of
## allele vectors
gametes.table <- lapply(strsplit(gametes.table,""),
function (thisVec,phenotype) {
y <- match(thisVec,c("a","b","c","d"))
return(phenotype[y])
},
phenotype)
##Form the gamete names and assign these to gametes.table and
##probs.table
nms <- sapply(gametes.table,
function(thisVec) {
paste(thisVec,collapse=" ")
})
names(gametes.table) <- nms
names(probs.table) <- nms
return(list(gametes=gametes.table, prob=probs.table))
## stringsAsFactors = FALSE))
}
############################################################
getMotherGameteInfoTetrPhenoMax <- function (phenotype) {
##################################################
# 6. Tetraploid, marker="phenotype", DRR="max" #
##################################################
switch(length(phenotype),
{## Monoallele
gametes.table <- c("aa")
probs.table <- c(1.0)
},
{## Bialleles
gametes.table <- c("aa","ab","bb")
probs.table <- c(0.2619,0.4762,0.2619)
},
{## Trialleles
gametes.table <- c("aa", "ab", "ac", "bb", "bc", "cc")
probs.table <- c(0.095, 0.238, 0.238, 0.095, 0.238, 0.095)
},
{## Quadrialleles
gametes.table <- c("aa", "ab", "ac", "ad", "bb", "bc", "bd", "cc", "cd", "dd")
probs.table <- c(0.036, 0.143, 0.143, 0.143, 0.036, 0.143, 0.143, 0.036, 0.143, 0.036)
}
) ##End switch()
## Convert gametes.table from a vector of abc strings to a list of
## allele vectors
gametes.table <- lapply(strsplit(gametes.table,""),
function (thisVec,phenotype) {
y <- match(thisVec,c("a","b","c","d"))
return(phenotype[y])
},
phenotype)
##Form the gamete names and assign these to gametes.table and
##probs.table
nms <- sapply(gametes.table,
function(thisVec) {
paste(thisVec,collapse=" ")
})
names(gametes.table) <- nms
names(probs.table) <- nms
return(list(gametes=gametes.table, prob=probs.table))
## stringsAsFactors = FALSE))
}
#test motherGenotype<-c("188","206")
##########################################################
getMotherGameteInfoTetrGenoMin <- function (motherGenotype) {
##################################################
# 7. Tetraploid, marker="genotype", DRR="min" #
##################################################
switch(length(unique(motherGenotype)),
{## Monoallele
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
gametes.table <- c("aa")
probs.table <- c(1.0)
},
{## Bialleles
#count table of each allele, sorted so most frequent comes first
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
if (max(thisTable)==3) { #Biallele type I (simplex), if max no. of allele is 3
gametes.table <- c("aa","ab")
probs.table <- c(0.5,0.5)
} else if (max(thisTable)==2) { #Biallele type II (duplex), if max no. of allele is 2
gametes.table <- c("aa","ab","bb")
probs.table <- c(0.167,0.667,0.167)
}
}, #end switch
{## Trialleles
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
gametes.table <- c("aa", "ab", "ac", "bc")
probs.table <- c(0.16666, 0.333333, 0.333333, 0.16666)
},
{## Quadrialleles
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
gametes.table <- c("ab", "ac", "ad", "bc", "bd", "cd")
probs.table <- c(0.167, 0.167, 0.167, 0.167, 0.167, 0.167)
}
) ##End switch()
## Convert gametes.table from a vector of abc strings to a list of
## allele vectors
thisGenoAllelesOrder<-names(thisTable)
gametes.table <- lapply(strsplit(gametes.table,""),
function (thisVec,thisGenoAllelesOrder) {
# gametes.table<-strsplit(gametes.table,"")
#thisVec<-gametes.table[[2]]
y <- match(thisVec,c("a","b","c","d","e","f"))
return(thisGenoAllelesOrder[y])
},
thisGenoAllelesOrder)
##Form the gamete names and assign these to gametes.table and
##probs.table
nms <- sapply(gametes.table,
function(thisVec) {
paste(thisVec,collapse=" ")
})
names(gametes.table) <- nms
names(probs.table) <- nms
return(list(gametes=gametes.table, prob=probs.table))
## stringsAsFactors = FALSE))
}
##########################################################
getMotherGameteInfoTetrGenoMax <- function (motherGenotype) {
##################################################
# 8. Tetraploid, marker="genotype", DRR="max" #
##################################################
switch(length(unique(motherGenotype)),
{## Monoallele
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
gametes.table <- c("aa")
probs.table <- c(1.0)
},
{## Bialleles
#count table of each allele, sorted so most frequent comes first
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
if (max(thisTable)==3) { #Biallele type I (simplex), if max no. of allele is 3
gametes.table <- c("aa","ab","bb")
probs.table <- c(0.536,0.429,0.035)
} else if (max(thisTable)==2) { #Biallele type II (duplex), if max no. of allele is 2
gametes.table <- c("aa","ab","bb")
probs.table <- c(0.214,0.572,0.214)
}
}, #end switch
{## Trialleles
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
gametes.table <- c("aa", "ab", "ac", "bc", "bb", "cc")
probs.table <- c(0.214286, 0.285714, 0.285714, 0.142857, 0.03571, 0.03571)
},
{## Quadrialleles
thisTable <- sort(table(motherGenotype),decreasing=TRUE)
gametes.table <- c("aa", "ab", "ac", "ad", "bc", "bd", "bb", "cc", "cd", "dd")
probs.table <- c(0.036, 0.143, 0.143, 0.143, 0.143, 0.143, 0.036, 0.036, 0.143, 0.036)
}
) ##End switch()
## Convert gametes.table from a vector of abc strings to a list of
## allele vectors
thisGenoAllelesOrder<-names(thisTable)
gametes.table <- lapply(strsplit(gametes.table,""),
function (thisVec,thisGenoAllelesOrder) {
# gametes.table<-strsplit(gametes.table,"")
#thisVec<-gametes.table[[2]]
y <- match(thisVec,c("a","b","c","d","e","f"))
return(thisGenoAllelesOrder[y])
},
thisGenoAllelesOrder)
##Form the gamete names and assign these to gametes.table and
##probs.table
nms <- sapply(gametes.table,
function(thisVec) {
paste(thisVec,collapse=" ")
})
names(gametes.table) <- nms
names(probs.table) <- nms
return(list(gametes=gametes.table, prob=probs.table))
## stringsAsFactors = FALSE))
}
##There are more codes to come, that implement the more general cases
##of 0 < DRR < maximum for each ploidy.
##Note also that the above probability tables depend only on the
##number of alleles present in each possible gamete - can we
##abbreviate the above code then? use some sort of combinatorial
##selection code to derive the set of possible gametes from the
##specified phenotype alleles, then assign the relevant probabilities
##based on the number of unique alleles in each Gamete? Hmmm....
##The reason I am interested in this approach rather than the above,
##is that I beleive that DF's formulas for the general DRR case will
##depend only on the number of unique alleles in the potential
##Gamete. Hence t'would be sensible to have code that assigns probs
##on the basis of the number of unique alleles in the gamete...
## the combinat package, function combn()does NOT appear to be the way to
## generate the possible gametes, unfortuantely....
## require(combinat)
## combn(c("a","b","c"),3)
## combn(c("a","b","c"),2)
## ##Nope - that's not what I want...
## (Combns and Permns do not allow repetition of the elements...)
##Although there is the following:
## combn(rep(c("a","b","c"),3),3)
##This gets closer, but Combn does not recognise that elements of the
##sample are repeated - hence we do not get true combinations (since,
##for example, aab, aba, and baa are all provided in the output. To
##get what I want, the result of this command would need to be sorted
##by columns (the result is a matrix), then reduced to the unique
##columns... We could do this, of course, but there must be an easier
##way to get what I want...
##Expand.grid is another way to get all possible combinations, but I
##have to repeat the sample as many times as the gamete length...
##E.g., for the input Phenotype c("a","b","c"), with gamete length 3,
##we use:
## expand.grid(c("a","b","c"),c("a","b","c"),c("a","b","c"))
##However, this still has the unwanted repetitions which need to be
##removed.
## What do I need? I need to generate the unique, unordered
## selections (with repetition) of m objects from k. There are
## formulas to determing the NUMBER of such selections, but I want a
## simple algorithm (or better, an R function) to generate all of the
## selections themselves...
## I don't think this is going to happen, actually - better to just
## have the tables of selections ready as needed. Still, we can
## combine each such table with a vector of "numbers of indices" that
## also is used to index a shorter vector of probabilities. For
## example, in getMotherGameteInfoHexMin() the trialleles case is:
## gametes.table <- c("aaa", "aab", "aac", "bbb", "abb",
## "bbc", "ccc", "acc", "bcc", "abc")
## probs.table <- c(0.03, 0.105, 0.105, 0.03, 0.105, 0.105,
## 0.03, 0.105, 0.105, 0.28)
##We could replace these lines by:
## gametes.table <- c("aaa", "aab", "aac", "bbb", "abb",
## "bbc", "ccc", "acc", "bcc", "abc")
## probs.table <- c(0.03, 0.105, 0.28)
## probs.ind <- c(1,2,2,1,2,2,1,2,2,3)
## hence:
## probs.table[probs.ind]
## will give the appropriate probability vector to match the vector of
## gametes... Probs.ind can be specified explicitely as above, but we
## could generate it using some R code to count the number of unique
## letters in each entry of gametes.table...
|
83b620c1e0731a327ecd4efae6fca1b2a67f357c
|
453041446d42539b2e13b835fce51825b2e43280
|
/wordfrequencyAnalysis.R
|
506cee3592ec210b9b5960cb6e1ea58f126175fb
|
[] |
no_license
|
pacoraggio/textminingExploratoryAnalysis
|
b7b2f403c28b4bbbbf2f5ae4f0d7fc0f053d5d6f
|
f129ddf3b7ed1dc2fa6739a5b5f0e5d94891da58
|
refs/heads/master
| 2022-04-16T09:40:56.606543
| 2020-03-22T13:38:14
| 2020-03-22T13:38:14
| 244,325,480
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,881
|
r
|
wordfrequencyAnalysis.R
|
# name: wordfrequencyAnalysis.R
# sbj: word frequency analysis
# different Datasets
# type: script
# author: Paolo Coraggio
# create date: 11/03/2020
rm(list = ls())
options(stringsAsFactors = FALSE)
library(dplyr)
library(tidytext)
library(stringr)
library(ggplot2)
library(tictoc)
library(patchwork)
source("createSampledText.R")
source("plotcustomf.R")
source("cleanCorpus.R")
source("wordfrequency.R")
source("tidyNgrams.R")
load("news_2percentNoProf.RData")
load("blogs_2percentNoProf.RData")
load("twitter_2percentNoProf.RData")
load("complete_2PercentNoProf.RData")
load("news_2percentWP.RData")
load("blogs_2percentWP.RData")
load("twitter_2percentWP.RData")
load("complete_2percentWP.RData")
# Word frequency with profanity
tic()
df.news_wfP <- word.frequency(df.news_withprofanity)
df.blogs_wfP <- word.frequency(df.blogs_withprofanity)
df.twitter_wfP <- word.frequency(df.twitter_withprofanity)
df.complete_wfP <- word.frequency(df.complete_withprofanity)
toc()
# 9.13 sec elapsed - 7.51 sec elapsed
save(df.news_wfP, file = "dfnews_wfProf.RData")
save(df.blogs_wfP, file = "dfblogs_wfProf.RData")
save(df.twitter_wfP, file = "dftwitter_wfProf.RData")
save(df.complete_wfP, file = "dfcomplete_wfProf.RData")
# Word frequency without profanity
tic()
df.news_wf <- word.frequency(df.news)
df.blogs_wf <- word.frequency(df.blogs)
df.twitter_wf <- word.frequency(df.twitter)
df.complete_wf <- word.frequency(df.complete)
toc()
# 7.41 sec elapsed - 7.5 sec elapsed
save(df.news_wf, file = "dfnews_wf.RData")
save(df.blogs_wf, file = "dfblogs_wf.RData")
save(df.twitter_wf, file = "dftwitter_wf.RData")
save(df.complete_wf, file = "dfcomplete_wf.RData")
tic()
df.news_wfSW <- word.frequency(df.news, remove.stopwords = TRUE)
df.blogs_wfSW <- word.frequency(df.blogs, remove.stopwords = TRUE)
df.twitter_wfSW <- word.frequency(df.twitter, remove.stopwords = TRUE)
df.complete_wfSW <- word.frequency(df.complete, remove.stopwords = TRUE)
toc()
save(df.news_wfSW, file = "dfnews_wfSW.RData")
save(df.blogs_wfSW, file = "dfblogs_wfSW.RData")
save(df.twitter_wfSW, file = "dftwitter_wfSW.RData")
save(df.complete_wfSW, file = "dfcomplete_wfSW.RData")
## mean character lenght for each dataset (whithout stop words)
length(unique(df.news_wf$word))
df.charstat <- data.frame("total number" = c(sum(df.news_wf$frequency),
sum(df.blogs_wf$frequency),
sum(df.twitter_wf$frequency),
sum(df.complete_wf$frequency)),
"unique words" = c(length(unique(df.news_wf$word)),
length(unique(df.blogs_wf$word)),
length(unique(df.twitter_wf$word)),
length(unique(df.complete_wf$word))),
"max length" = c(max(nchar(df.news_wf$word)),
max(nchar(df.blogs_wf$word)),
max(nchar(df.twitter_wf$word)),
max(nchar(df.complete_wf$word))),
"mean" = c(mean(nchar(df.news_wf$word)),
mean(nchar(df.blogs_wf$word)),
mean(nchar(df.twitter_wf$word)),
mean(nchar(df.complete_wf$word))),
"sd" = c(sd(nchar(df.news_wf$word)),
sd(nchar(df.blogs_wf$word)),
sd(nchar(df.twitter_wf$word)),
sd(nchar(df.complete_wf$word))),
row.names = c("news", "blogs", "twitter", "complete")
)
save(df.charstat, file = "wordstat.RData")
## mean character lenght for each dataset (with stop words)
df.charstatSW <- data.frame("total number" = c(sum(df.news_wfSW$frequency),
sum(df.blogs_wfSW$frequency),
sum(df.twitter_wfSW$frequency),
sum(df.complete_wfSW$frequency)),
"unique words" = c(length(unique(df.news_wfSW$word)),
length(unique(df.blogs_wfSW$word)),
length(unique(df.twitter_wfSW$word)),
length(unique(df.complete_wfSW$word))),
"max length" = c(max(nchar(df.news_wfSW$word)),
max(nchar(df.blogs_wfSW$word)),
max(nchar(df.twitter_wfSW$word)),
max(nchar(df.complete_wfSW$word))),
"mean" = c(mean(nchar(df.news_wfSW$word)),
mean(nchar(df.blogs_wfSW$word)),
mean(nchar(df.twitter_wfSW$word)),
mean(nchar(df.complete_wfSW$word))),
"sd" = c(sd(nchar(df.news_wfSW$word)),
sd(nchar(df.blogs_wfSW$word)),
sd(nchar(df.twitter_wfSW$word)),
sd(nchar(df.complete_wfSW$word))),
row.names = c("news", "blogs", "twitter", "complete")
)
save(df.charstatSW, file = "wordstatSW.RData")
df.blogslongestwords <- df.blogs_wf[nchar(df.blogs_wf$word) > 20,]
df.newslongestwords <- df.news_wf[nchar(df.news_wf$word) > 20,]
df.twitterlongestwords <- df.twitter_wf[nchar(df.twitter_wf$word)>20,]
df.blogslongestwords <- df.blogslongestwords[with(df.blogslongestwords,order(nchar(df.blogslongestwords$word),
decreasing = TRUE)),]
df.newslongestwords <- df.newslongestwords[with(df.newslongestwords,order(nchar(df.newslongestwords$word),
decreasing = TRUE)),]
df.twitterlongestwords <- df.twitterlongestwords[with(df.twitterlongestwords,order(nchar(df.twitterlongestwords$word),
decreasing = TRUE)),]
save(df.newslongestwords, file = "newslongest.RData")
save(df.blogslongestwords, file = "blogslongest.RData")
save(df.twitterlongestwords, file = "twitterlongest.Rdata")
head(df.newslongestwords)
head(df.blogslongestwords)
head(df.twitterlongestwords)
tail(df.blogslongestwords)
tail(df.newslongestwords)
tail(df.twitterlongestwords)
df.blogs_wf[nchar(df.blogs_wf$word) == max(nchar(df.blogs_wf$word)),]$word
df.twitter.wf[nchar(twitter.wf$word) == max(nchar(twitter.wf$word)),]$word
news.wf[nchar(news.wf$word) == max(nchar(news.wf$word)),]$word
# "wwwdnrstatemiusspatialdatalibrarypdfmapsmineralleaseinformationoaklandnominationspdf"
wwwcomplete <- grep("^www", df.complete$text, value = TRUE)
wwwnews <- grep("^www", df.news$text, value = TRUE)
head(news.wf)
# 8 sec elapsed - 7.92 sec elapsed
r2 <- grep("shit", complete.wfSW$word, value = TRUE)
length(r2)
# 42 occurrences without tyding
tail(complete.wf$frequency)
summary(complete.wf$frequency)
nrow(complete.wf)
nrow(complete.wf[complete.wf$frequency == 1,])
nrow(complete.wf[complete.wf$frequency < 10,])/nrow(complete.wf)
sum(news.wfSW$frequency)
source("plotcustomf.R")
windows()
pnews <- plotbar.wf(df.news_wf, title = "News Word Frequency", topn = 15)
pblogs <- plotbar.ngramf(df.blogs_wf, title = "Blogs Word Frequency", topn = 15)
ptwitter <- plotbar.ngramf(df.twitter_wf, title = "Twitter Word Frequency", topn = 15)
pcomplete <- plotbar.ngramf(df.complete_wf, title = "Complete Word Frequency", topn = 15)
pnewsSW <- plotbar.ngramf(df.news_wfSW, title = "News Word Frequency (SW)", topn = 15)
pblogsSW <- plotbar.ngramf(df.blogs_wfSW, title = "Blogs Word Frequency (SW)", topn = 15)
ptwitterSW <- plotbar.ngramf(df.twitter_wfSW, title = "Twitter Word Frequency (SW)" , topn = 15)
pcompleteSW <- plotbar.ngramf(df.complete_wfSW, title = "Complete Word Frequency (SW)", topn = 15)
save(pnews, file = "plotuninews.RData")
save(pblogs, file = "plotuniblogs.RData")
save(ptwitter, file = "plotunitwitter.RData")
save(pcomplete, file = "plotunicomplete.Rdata")
save(pnewsSW, file = "plotuninewsSW.RData")
save(pblogsSW, file = "plotuniblogsSW.RData")
save(ptwitterSW, file = "plotunitwitterSW.RData")
save(pcompleteSW, file = "plotunicompleteSW.Rdata")
# , "plotuniblogs.RData", "plotunitwitter.RData", "plotunicomplete.RData"))
windows()
pnews + pnewsSW
windows()
pcomplete + pcompleteSW
news.wf[news.wf$word == "sex",]$frequency +
twitter.wf[twitter.wf$word == "sex",]$frequency +
blogs.wf[blogs.wf$word == "sex",]$frequency ==
complete.wf[complete.wf$word == "sex",]$frequency
# ## after profanity filter - to review
#
# load("dfnewsNP.RData")
# load("dfblogsNP.RData")
# load("dftwitterNP.RData")
# load("dfcompleteNP.RData")
rm(list = ls())
load("dfnews_wfSW.RData")
load("dfblogs_wfSW.RData")
load("dftwitter_wfSW.RData")
load("dfcomplete_wfSW.RData")
load("dfnews_wf.RData")
load("dfblogs_wf.RData")
load("dftwitter_wf.RData")
load("dfcomplete_wf.RData")
head(df.blogs_wf, n = 15)
max(df.complete_wf$frequency)
max(nchar(df.complete_wf[df.complete_wf$frequency == 1,]$word))
frequency <- sort(unique(df.complete_wf$frequency))
max.length <- c()
unique.words <- c()
sum.frequency <- c()
for(i in frequency){
max.length <- c(max.length, max(nchar(df.complete_wf[df.complete_wf$frequency == i,]$word)))
unique.words <- c(unique.words, length(unique(df.complete_wf[df.complete_wf$frequency == i,]$word)))
sum.frequency <- c(sum.frequency, sum(df.complete_wf[df.complete_wf$frequency == i,]$frequency))
}
df.wordlength <- data.frame(frequency = frequency,
max.length = max.length,
unique.words = unique.words,
sum.frequency = sum.frequency)
dim(df.wordlength)
head(df.wordlength)
windows()
ggplot(df.wordlength[2:200,], aes(x = frequency, y = max.length)) +
theme_solarized_2(base_size = 12) +
xlab("Word Frequency") +
geom_smooth() +
ylab("Word Maximum Length") +
geom_point()
df.wordlength[df.wordlength$unique.words > 1,]
## 255 is the largest index containing more than 1 word with that frequency
windows()
ggplot(df.wordlength[10:255,], aes(x = frequency, y = unique.words)) +
theme_solarized_2(base_size = 12) +
xlab("Word Frequency") +
geom_smooth() +
ylab("Number of unique words") +
geom_point()
windows()
ggplot(df.wordlength[688:nrow(df.wordlength),], aes(x = frequency, y = unique.words)) +
geom_point()
#####################################
rm(list = ls())
library(dplyr)
library(tidytext)
library(stringr)
library(ggplot2)
library(tictoc)
library(patchwork)
load("dfnews_wf.RData")
load("dfblogs_wf.RData")
load("dftwitter_wf.RData")
load("dfcomplete_wf.RData")
load("dfnews_wfSW.RData")
load("dfblogs_wfSW.RData")
load("dftwitter_wfSW.RData")
load("dfcomplete_wfSW.RData")
head(df.news_wf)
news.percentwords <- c()
blogs.percentwords <- c()
twitter.percentwords <- c()
complete.percentwords <- c()
k1 <- 1:30
for(i in k1)
{
news.percentwords <- c(news.percentwords,
nrow(df.news_wf[df.news_wf$frequency <= i,])/nrow(df.news_wf))
blogs.percentwords <- c(blogs.percentwords,
nrow(df.blogs_wf[df.blogs_wf$frequency <= i,])/nrow(df.blogs_wf))
twitter.percentwords <- c(twitter.percentwords,
nrow(df.twitter_wf[df.twitter_wf$frequency <= i,])/nrow(df.twitter_wf))
complete.percentwords <- c(complete.percentwords,
nrow(df.complete_wf[df.complete_wf$frequency <= i,])/nrow(df.complete_wf))
}
length(news.percentwords)
df.newsfreqpercentuni <- data.frame(frequency = k1,
percentage = news.percentwords,
type = rep("news 1-grams", length(k1)))
df.blogsfreqpercentuni <- data.frame(frequency = k1,
percentage = blogs.percentwords,
type = rep("blogs 1-grams", length(k1)))
df.twitterfreqpercentuni <- data.frame(frequency = k1,
percentage = twitter.percentwords,
type = rep("twitter 1-grams", length(k1)))
df.completefreqpercentuni <- data.frame(frequency = k1,
percentage = complete.percentwords,
type = rep("complete 1-grams", length(k1)))
df.freqpercentuni <-rbind(df.newsfreqpercentuni,
df.blogsfreqpercentuni,
df.twitterfreqpercentuni,
df.completefreqpercentuni)
# library(ggplot2)
unique(df.freqpercentuni$type)
df.freqpercentuni$type = factor(df.freqpercentuni$type, levels = c("news 1-grams",
"blogs 1-grams",
"twitter 1-grams",
"complete 1-grams"))
# windows()
# ggplot(df.freqpercsum[df.freqpercsum$source == "news",], aes(frequency, percentage, color = source)) +
# geom_point(size = 0.9, aes(shape = source)) +
# facet_wrap(~n.grams)
save(df.freqpercentuni, file = "df_freqpercentuni.RData")
library(ggthemes)
windows()
ggplot(df.freqpercentuni, aes(frequency, percentage, color = type)) +
geom_point(size = 0.9, aes(shape = type)) +
theme_solarized_2(base_size = 12, ) +
ggtitle("Unigram Analysis - Percentage of unique words vs word counts")
|
4569dece74ce2fb2eeb98e4fa00ceb8d1d53d894
|
fd006b7b22ec47e218ed9aae1f13131713f6d57f
|
/R/Criterion.R
|
a888f42cba0a508e42d109c51f07065b19239576
|
[] |
no_license
|
cran/Mediana
|
0651ea64e7d6ac2f47f7630fef81544bb1563b37
|
4be4b35fcbe6254b35dc1840426cd9ffc925ada9
|
refs/heads/master
| 2020-12-25T17:37:38.446245
| 2019-05-08T12:20:03
| 2019-05-08T12:20:03
| 39,719,278
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,370
|
r
|
Criterion.R
|
######################################################################################################################
# Function: Criterion.
# Argument: Criterion ID, method, tests, statistics, parameters, labels.
# Description: This function is used to create an object of class Criterion
#' @export
Criterion = function(id, method, tests = NULL, statistics = NULL, par = NULL, labels) {
# Error checks
if (!is.character(id)) stop("Criterion: ID must be character.")
if (!is.character(method)) stop("Criterion: method must be character.")
if (!is.null(tests) & !is.list(tests)) stop("Criterion: tests must be wrapped in a list.")
if (any(lapply(tests, is.character) == FALSE)) stop("Criterion: tests must be character.")
if (!is.null(statistics) & !is.list(statistics)) stop("Criterion: statistics must be wrapped in a list.")
if (any(lapply(statistics, is.character) == FALSE)) stop("Criterion: statistics must be character.")
if (is.null(tests) & is.null(statistics )) stop("Criterion: tests and/or statistics must be provided")
criterion = list(id = id ,
method = method ,
tests = tests ,
statistics = statistics ,
par = par ,
labels = labels)
class(criterion) = "Criterion"
return(criterion)
invisible(criterion)
}
|
1014de63e1f11d29566f9b107cc781114f179d00
|
a7f4a2da0e68806b3a49860a4d554a4bd9041815
|
/Scripts/plot2.R
|
7de3be5527c255c39a26a599d4baff19ee7477bc
|
[] |
no_license
|
hellabaylife/Air-Polution-Case-Study
|
268795785623fae2a8131fc9fa76cf70ecc3011c
|
c19b39509c9c278bc7199e88b1b38bbcd66afe2b
|
refs/heads/master
| 2022-09-20T17:08:50.662286
| 2020-05-29T00:06:13
| 2020-05-29T00:06:13
| 267,723,361
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 955
|
r
|
plot2.R
|
library(dplyr)
#load data files
classification <- readRDS("./data/Source_Classification_Code.rds")
PMdata <- readRDS("./data/summarySCC_PM25.rds")
head(PMdata)
head(classification)
#join relevent data together
joineddf <- left_join(PMdata,classification,by = "SCC")
colnames(joineddf)
head(joineddf)
df0 <-joineddf[,-c(7,16,17,18)]
#filter for only for Balitmore City
baltimore <- df0$fips == "24510"
df_baltimore <- df0[baltimore,]
#agregate emissions by year for Balitmore City
emissions_by_year <- aggregate(Emissions ~ year, df_baltimore, sum)
#plot emissions by year for Balitmore City
plot(emissions_by_year$year, emissions_by_year$Emissions, type = "o", col = "blue", main = expression("Total Baltimore "~ PM[2.5]~ "Emissions by Year"), ylab = expression("Total Baltimore "~ PM[2.5] ~ "Emissions"), xlab = "Year")
#save plot to png file
dev.copy(png, file = "plot2.png",width = 480,height = 480) ## Copy my plot to a PNG file
dev.off()
|
18c20dcb66f9bba023064cc77d67b127df896fb3
|
345587a1f6bb4bed773ec293be831e3a26329013
|
/run_analysis.R
|
2e185e7f125cb9d9c6f0dc8b81b84186c7a9345b
|
[] |
no_license
|
twibawa/getting_and_cleaning_data
|
889edcd48cec6d5c3789f475f1a9d6156d799b65
|
324ff6d6e35967aead1ad0e3426d5984fd8f6a43
|
refs/heads/master
| 2021-01-02T22:38:06.230092
| 2015-01-22T14:06:24
| 2015-01-22T14:06:24
| 29,672,244
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,184
|
r
|
run_analysis.R
|
rm(list = ls(all = T))
dir <- "/homelocal/twibawa/coursera/getting_and_cleaning_data/course_project1/UCI HAR Dataset/"
setwd(dir)
#read feaature
ftr <- readLines("features.txt")
#read activity labels
act_label <- readLines("activity_labels.txt")
#directory train data
indir1 <- "/homelocal/twibawa/coursera/getting_and_cleaning_data/course_project1/UCI HAR Dataset/train/"
#directory test data
indir2 <- "/homelocal/twibawa/coursera/getting_and_cleaning_data/course_project1/UCI HAR Dataset/test/"
#read train dataset
train.dat <- read.table(file = paste(indir1, "X_train.txt", sep = ""), header = F)
colnames(train.dat) <- seq(1,561, 1)
train.subject <- read.table(file = paste(indir1, "subject_train.txt", sep = ""), header = F)
colnames(train.subject) <- "subject"
train.act <- read.table(file = paste(indir1, "y_train.txt", sep = ""), header = F)
colnames(train.act) <- "action"
#constructing train dataset
train <- as.data.frame(cbind(train.subject,train.act,train.dat))
#read test dataset
test.dat <- read.table(file = paste(indir2, "X_test.txt", sep = ""), header = F)
colnames(test.dat) <- seq(1,561, 1)
test.subject <- read.table(file = paste(indir2, "subject_test.txt", sep = ""), header = F)
colnames(test.subject) <- "subject"
test.act <- read.table(file = paste(indir2, "y_test.txt", sep = ""), header = F)
colnames(test.act) <- "action"
#constructing train dataset
test <- as.data.frame(cbind(test.dat,test.subject,test.act))
#1. merge training and test data
dat <- rbind(train, test)
#2. extracts only the measurements on the mean and standard deviation for each measurement
#extract from feature.txt which only contain mean or std
ab <- grepl("\\<mean\\>|\\<std\\>", ftr)
#subset feature
ac <- ftr[ab]
#extract numbers of feature
ad <- c(sapply(strsplit(ac, split = " ", fixed = T), FUN = "[", 1))
#subset data based on subsetted feature
dat2 <- dat[,c("subject", "action", ad)]
#3. Uses descriptive activity names to name the activities in the data set
ca <- c(sapply(strsplit(act_label, split = " ", fixed = T), FUN = "[", 1))
cb <- c(tolower(sapply(strsplit(act_label, split = " ", fixed = T), FUN = "[", 2)))
cc <- data.frame(cbind(ca, cb))
#looping to change value within coloum action, 1 is change to WALKING, etc..
dat3 <- NULL
for (i in unique(cc$ca)){
cd <- which(dat2$action == i)
if (length(cd > 0)){
cdi <- dat2[cd,]
if (length(cdi > 0)){
print(cdi[1,])
cdi$action <- gsub(cdi$action, cc$cb[which(cc$ca==i)], cdi$action)
print(cc$cb[which(cc$ca==i)])
dat3 <- rbind(dat3, cdi)
}
}
}
#dat3 <- data.frame(dat3)
#4. Appropriately labels the data set with descriptive variable names
#extract from feature
da <- c(sapply(strsplit(ac, split = " ", fixed = T), FUN = "[", 2))
#define header
header <- c("subject", "action", da)
#change colomn names entirely with header
colnames(dat3) <- header
#5. From the data set in step 4, creates a second, independent tidy data set with the
## average of each variable for each activity and each subject
library(plyr)
dat4 <- ddply(dat3, .(subject, action), function(x) colMeans(x[,3:ncol(dat3)]))
#write results in a table
write.table(dat4, "av_dat.txt", row.name=FALSE, sep = "\t")
|
da6cd28c8175ed601d47af063ed219d2f97af44c
|
7ae50474a9f2bfceaa72cc72c1e81edc46a81d16
|
/Lab/403_19lab1.R
|
0bc39e994000c48e736fc63e56b3fca4acdd8ede
|
[] |
no_license
|
NanTang1106/Stat-403
|
9ad61032a9d2591fcda04583ade90254e81aa625
|
ec2a7a4765224ed2cc068df40dc35f5c4818b553
|
refs/heads/master
| 2022-09-08T23:00:37.710467
| 2020-05-30T22:48:46
| 2020-05-30T22:48:46
| 268,172,005
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,688
|
r
|
403_19lab1.R
|
## -------------------------------------------------------------
## Stat/Q Sci 403 Lab 1 | Spring 2019 | University of Washington
## -------------------------------------------------------------
### ###
### Part 1: First steps ###
### ###
## R as a caculator
1+4
2*5
18-3
5/4
## Naming variables
x <- 2*5
y = 2*5
## Other math operators also work
log(10)
exp(-4)
2^4
## Creating vectors is usually done by combine function c()
x = c(3,5)
c(18,2000,981)
c(18,2000,981, "a")
## effect of colon
1:10
x = 5:10
x
x[1]
x[5]
x[5] = 11
x
## creating a matrix:
matrix(c(1,2,3,4), nrow=2, ncol=2, byrow = T)
matrix(1:10, nrow=2)
matrix(1:10, ncol=2)
## naming a matrix
A = matrix(1:10, ncol=2)
colnames(A) = c("XXX", "YYY")
A
rownames(A) = c("AA", "BB", "CC", "DD", "EE")
A
A[,2]
rbind(c(1,2,3), 5:7)
rbind(c(1,"a","3"), 5:7)
c(T,F,T, 5)
## creating a list
B = list(c(1:4), A, exp)
B
B[[1]]
B[[2]]
B[[2]][,1]
B[[3]]
B[[3]](5)
exp(5)
## creating a dataframe
C = data.frame(A)
C
C$XXX
# you can use "$" in a dataframe to extra component
A$XXX
# this does not work for a matrix
## using a built-in data
faithful
head(faithful)
faithful$eruptions
# this works--the built-in data are dataframe
### ###
### Part 2: Graphics ###
### ###
## histogram
hist(faithful$eruptions)
hist(faithful$eruptions, breaks=50, col="green", probability = T)
## boxplot
boxplot(faithful$eruptions)
boxplot(faithful$eruptions, col="skyblue")
## scatter plot
plot(x= faithful[,2], y=faithful[,1])
plot(faithful, pch=16, col="limegreen", main="Scatter plot!")
plot(faithful, pch=16, col="limegreen", cex.axis=1.5, cex.lab=1.2,
main="Scatter plot!", cex.main=2)
## creating a curve of a function
x_base = seq(from=-5, to =5, by=0.1)
x_base
y_value = exp(x_base)+exp(-x_base)
y_value
plot(x=x_base, y=y_value)
plot(x=x_base, y=y_value, type="l")
plot(x=x_base, y=y_value, type="l", lwd=5, col="brown")
#####
##### Exercise:
##### 2-1. Plot the function f(x) = sin(x)
##### for x within [-10, 10].
x_value <- seq(-10, 10, 0.1)
y_value <- sin(x_value)
plot(x_value, y_value, type='l', col='darkorchid', lwd=3)
#####
##### 2-2. Plot the function f(x) = exp(-0.1*x)*cos(5*x)
##### for x within [0,10].
#####
x_value <- seq(0, 10, 0.05)
y_value <- exp(-0.1 * x_value) * cos(5 * x_value)
plot(x_value, y_value, type='l', lwd=3)
### ###
### Part 3: Random Variables ###
### ###
## normal random variable
rnorm(10, mean = 5, sd=10)
# equals to rnorm(n=10), n is the number of random numbers
qnorm(0.50)
pnorm(1.64)
pnorm(qnorm(0.3))
qnorm(pnorm(3))
dnorm(0)
1/sqrt(2*pi)*exp(-0^2/2)
## exponential random variable
rexp(100,rate = 5)
hist(rexp(1000), breaks=100)
hist(rexp(1000), breaks=100, probability = T)
# y-axis is now "density"
dexp(1)
exp(-1)
dexp(1, rate = 2)
2*exp(-2)
# p(x;\lambda) = \lambda * exp(-\lambda*x)
## uniform random variable
runif(50)
hist(runif(10000), col="orange")
runif(50, min=2, max=5)
## Bernoulli and Bionomial distribution
rbinom(10, size=1, p=0.7)
# size=1: Bernoulli
rbinom(10, size=2, p=0.7)
#####
##### Exercise:
##### 3-1. Plot the density curve of N(1,2^2)
##### in the interval [-3, 3]
#####
x_value <- seq(-3, 3, 0.1)
y_value <- dnorm(x_value)
plot(x_value, y_value, col='limegreen', type='l', lwd=3)
##### 3-2. Generate 1000 data points from Exp(3),
##### plot the histogram,
##### and compare to its density curve
#####
x_value = rexp(1000, 3)
hist(x_value, breaks = 50, col='limegreen', probability = T)
### ###
### Part 4: Loops ###
### ###
x0 = NULL
for(i in 1:10){
x0 = c(x0, 2*i)
print(i)
}
x0
## resampling uniform distributions
for(i in 1:100){
hist(runif(1000), breaks=seq(from=0, to=1, by=0.05),
probability =T, ylim=c(0,1.6), col="palegreen")
abline(h=1, lwd=3, col="purple")
Sys.sleep(0.1)
}
## distribution of many medians
n_rep = 10000
n = 100
df0 = 4
sample_median = rep(NA,n_rep)
for(i in 1:n_rep){
data = rchisq(n, df = df0)
# generate n data points from Chi-sq(df0)
sample_median[i] = median(data)
}
hist(sample_median, probability = T, col="skyblue", breaks=50)
# It also converges to a normal distribution!
# We will talk about why this happens at the end of this course.
## central limit theorem
n_rep = 10000
n = 10
rate0 = 3
sample_mean = rep(NA, n_rep)
for(i in 1:n_rep){
data = rexp(n,rate = rate0)
# generate n data points from Exp(rate0)
sample_mean[i] = mean(data)
# i-th element of object "sample_mean" is a sample average
}
hist(sample_mean, probability = T, col="orchid", breaks=100)
x_base = seq(from=0, to=1, by=0.001)
lines(x=x_base, y=dnorm(x_base, mean=1/rate0, sd=1/(sqrt(n)*rate0)),
lwd=3, col="dodgerblue")
## failure of central limit theorem
n_rep = 100
n = 1000
sample_mean = rep(NA, n_rep)
for(i in 1:n_rep){
data = rcauchy(n)
# generate n data points from Cauchy(0,1) distirbution
# the "mean" of Cauchy distribution does not exist!
sample_mean[i] = mean(data)
}
hist(sample_mean, probability = T, col="pink", breaks=100)
x_base = seq(from=0, to=1, by=0.001)
# with some probability, the sample mean could be very large/small
#####
##### Exercise:
##### 4. Study the distribution of sample standard deviation.
##### Try to generate n=100 data points from Exp(5), compute the sample SD.
##### Repeat the procedure n_rep=10000 times and plot the histogram.
##### You can use the function sd() to compute sample SD.
##### Vary the value of n to see if the distribution converges.
#####
n_rep = 10000
result <- rep(NA, n_rep)
for (i in 1:n_rep) {
result[i] <- sd(rexp(100, 5))
}
hist(result, probability = T, breaks = 100)
## changing sample size (optional)
n_rep = 10000
rate0 = 3
n_seq = c(1, 5, 10, 25, 50, 100, 250, 500, 1000)
for(n in n_seq){
sample_mean = rep(NA, n_rep)
for(i in 1:n_rep){
data = rexp(n,rate = rate0)
sample_mean[i] = mean(data)
}
hist(sample_mean, probability = T, col="palevioletred", breaks=50,
xlim=c(0,1), main=n)
x_base = seq(from=0, to=1, by=0.001)
lines(x=x_base, y=dnorm(x_base, mean=1/rate0, sd=1/(sqrt(n)*rate0)),
lwd=3, col="dodgerblue")
Sys.sleep(2)
}
|
264b2e29dfa6ac134d84a0703489fb36df1ba762
|
e9f06ea90b7c9127f3b05bafa7e0d34c7074d4e5
|
/Dict-PCA.R
|
4118d4457a46ffbadf0b28c823faf797366bfbf3
|
[] |
no_license
|
danielahertrich/masterthesis
|
1dee9b1800f8dee197972aa3d009000394c8e224
|
1c2e2c6c0efc3c22a778ae106e092d8f36e5183f
|
refs/heads/master
| 2021-01-01T19:47:57.860948
| 2017-07-29T22:42:29
| 2017-07-29T22:42:29
| 98,687,685
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 655
|
r
|
Dict-PCA.R
|
library(tictoc)
## load data
load(file="faces400.rda")
load(file="faces100.rda")
Xtrain <- rbind(test[51:100,],X[1:350,])
################################################################################
## Compute PCA approximation
## set parameter
q <- 100 #300
## learn dictionary and measure time
tic.clearlog()
tic("Total")
sv <- svd(Xtrain)
A <- sv$u[,1:q,drop=FALSE] %*% diag(sv$d[1:q])
H <- t(sv$v[,1:q,drop=FALSE])
toc(log = TRUE)
timing <- tic.log(format = TRUE)
save(timing, file=paste("time_PCA",q,".rda",sep=""))
## save dictionary
spdict <- list(A=A,H=H)
save(spdict,file = paste("dict_PCA",q,".rda",sep = ""))
|
0abaf89c3264908338509df0394f8f37558b3104
|
ae6e6b7c22365c298ae1dfa7e1cecefb79f91f3d
|
/專案/R簡介/simult.R
|
ed8bdd33dd2c499d943b1cad65e237e000b16fac
|
[] |
no_license
|
jimmywuwu/FinanceLab
|
8d7cc201a1be19e25b0c720cc52be877b24c0679
|
6bdc90a78e242a4ed7882cbde6a2c449a0135ba3
|
refs/heads/master
| 2021-06-28T14:02:43.454150
| 2017-09-21T11:52:12
| 2017-09-21T11:52:12
| 104,292,075
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 481
|
r
|
simult.R
|
##產生 exponential random variable ,以樣本平均檢查大數法則的性質
rexp(30)
x=array();
for(i in 1:10000){
x[i]=mean(rexp(30))
}
hist(x,xlim=c(0.5,1.5))
##產生 normal random variable ,檢查信賴區間的性質
ci=data.frame()
upper=array()
lower=array()
(1>2)*(1>2)
for(i in 1:10000){
upper[i]=mean(rnorm(30))+1/sqrt(30)*1.96;
lower[i]=mean(rnorm(30))-1/sqrt(30)*1.96;
contain[i]=(upper[i]>0)*(0>lower[i]);
}
ci=data.frame(upper,lower,contain)
|
13c12fb838999f418210b3332e07af45f2366fec
|
34f47cd569e93cb52b3d49921cb344d038beb388
|
/man/binscatter.Rd
|
6656ccdca9bfd3beb3e07d9732c4263963da51ed
|
[
"MIT"
] |
permissive
|
apoorvalal/LalRUtils
|
05d0fca93f543fac5c12da47c390a8c083823724
|
718b20d0c601993c6385b8256dd40ff055b0a4e9
|
refs/heads/master
| 2023-05-11T15:40:31.236435
| 2023-05-07T22:32:00
| 2023-05-07T22:32:00
| 97,187,378
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,392
|
rd
|
binscatter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotUtils.R
\name{binscatter}
\alias{binscatter}
\title{R implementation of binned scatterplot and CEF plotter, with added options for cluster variance}
\usage{
binscatter(
fmla,
key_var,
data,
plotraw = TRUE,
bins = 20,
rawdata_colour = "black",
rawdata_alpha = 0.2,
rawdata_size = 0.5,
linfit_width = 0.6,
linfit_colour = "blue",
cef_point_size = 1,
cef_point_colour = "red",
ci_colour = "gray",
ci_alpha = 0.3
)
}
\arguments{
\item{fmla}{FELM formula *as a string*}
\item{key_var}{X axis variable for CEF}
\item{data}{dataframe}
\item{plotraw}{T if underlying scatterplot should be plotted}
\item{bins}{number of bins}
\item{rawdata_colour}{Colour of Rawdata}
\item{rawdata_alpha}{Alpha of Rawdata}
\item{rawdata_size}{Size of Rawdata}
\item{linfit_width}{width of linear fit}
\item{linfit_colour}{colour of linear fit}
\item{cef_point_size}{Size of binscatter points}
\item{cef_point_colour}{Colour of binscatter points}
\item{ci_colour}{Colour of CI ribbon}
\item{ci_alpha}{Alpha of CI ribbon}
}
\description{
R implementation of binned scatterplot and CEF plotter, with added options for cluster variance
}
\examples{
binscatter('Sepal.Length ~ Petal.Length + Petal.Width|Species', key_var = 'Petal.Width', iris)
}
\keyword{bins}
\keyword{cef}
\keyword{scatterplot}
|
aaf635a09f77d6275714379ca3d67a1e080fdb5c
|
cec2ef5ae03f994aa618be4fe5df61619a12257b
|
/GRLTest/IndicatortargetmorethanworsT/IndicatortargetmorethanworsT/IndicatortargetmorethanworsT.r
|
e082bc185acc47dcc5a12aecbc81f8952f5ff462
|
[] |
no_license
|
m81092/GRLToMath
|
40052eb6b4e8ecff544a2d18af408366c1465c8e
|
6bd13adeea09700ce738412895c6b81af0456fc5
|
refs/heads/master
| 2020-06-19T14:02:55.387404
| 2018-06-20T21:57:05
| 2018-06-20T21:57:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 261
|
r
|
IndicatortargetmorethanworsT.r
|
IndicatortargetmorethanworsT <- function( Indicator1){
expr = ifelse(Indicator1 >= 300.0,100.0,ifelse(Indicator1 >= 200.0,50.0*abs(0.01*Indicator1 - 2.0) + 50.0,ifelse(Indicator1 > 30.0,-50.0*abs(0.00588235294117647*Indicator1 - 1.17647058823529) + 50.0,0))) }
|
8130a0b31ec0d991267b03d8093dd0cecbea1cec
|
233fc514b4ddcaf277e618a7195e46c0bac079b5
|
/dimred_functions.r
|
eb57d7d50a707818873d012b72579347de8eec34
|
[] |
no_license
|
uncountablecat/capstone-project
|
7ad691dba0ce241d50a7fe76c577b0e4819dc075
|
192f67bbe6c8cf0a777c6fe70a3a0ceda273d2b1
|
refs/heads/master
| 2021-05-30T03:37:12.186633
| 2015-12-05T00:06:26
| 2015-12-05T00:06:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,397
|
r
|
dimred_functions.r
|
# This file contains dimension reduction interface and functions
# Dimension reduction interface
dimreduc = function(X_highdim, method) {
if (method == 'PCA' || method == 'pca') {
pca(X_highdim);
}
else if (method =='kpca' || method == 'kPCA') {
# print ('Performing kernal PCA......');
# #kpca(X_train);
# print ('Kernal PCA done!');
}
else if (method == 'difmap') {
diffusionmap(X_highdim,0.5);
}
else if (method == 'laplacian' || method == 'Laplacian') {
laplacian(X_train);
}
else if (method == 'none') {
return (X_highdim); #basically does nothing
}
else {
# print ('ENTER THE FOLLOWING ARGUMENTS AS A STRING:');
# print ('Principal Component Analysis: Enter \'pca\'');
# print ('Kernal PCA: Enter \'kpca\'');
# print ('Diffusion Map: Enter \'difmap\'');
# print ('Laplacian Eigenmaps: Enter \'laplacian\'')
}
}
pca = function(X_highdim) {
pcaObj = prcomp(X_highdim,scale=TRUE,center=TRUE,retx=TRUE);
# choose n such that it explains 80% of the variance
# d = 0; variance_explained = 0.0;
# while (variance_explained < 0.8) {
# d = d + 1;
# variance_explained = sum((pcaObj$sd[1:d])^2)/sum((pcaObj$sd)^2);
# };
q = pcaObj$sdev;
# print( cumsum(q))
# print( cumsum(q^2) / sum( q^2) )
cumProp = cumsum(q^2) / sum(q^2);
d = min( which ( cumProp > 80/100 ) );
dim_reduc_matrix = pcaObj$rotation[,1:d];
X_lowdim = X_highdim %*% dim_reduc_matrix;
return (X_lowdim);
}
diffusionmap = function(X_highdim, alpha=0.75) {
#Create Gaussian kernel matrix
DIST = dist(X_highdim, method = "euclidean", diag = FALSE, upper = TRUE, p = 2);
DIST = as.matrix(DIST);
K = DIST^2;
K = (-1/alpha)*K;
K = exp(K);
#Create diffusion matrix. Recall that diffusion matrix P = D^(-1) %*% K
#Where D is diagonal consisting row-sums of K
D = matrix(data=0,nrow=dim(K)[1],ncol=dim(K)[2]);
for (i in 1:dim(K)[1]) {
D[i,i] = sum(K[,i]);
}
#Create matrix P
P = solve(D) %*% K;#solve calculates the inverse of D
#Calculate eigenvectors of D
eigen_P = eigen(P);
# remember that the first eigenvector is always trivial
eigenvectors_P = eigen_P$vectors[,2:3];
eigenvectors_P = as.matrix(eigenvectors_P);
return (eigenvectors_P);
#colnames(test) = c('x','y','type');
#plot = ggplot(test,aes(y,x));
#print(plot + geom_point(aes(colour=factor(type))));
}
laplacian = function(X_highdim) {
# construct adjacency matrix using k-nearest neighbor
}
|
f560eef6cf97d48e82bda72ce7a2ccab72c13330
|
e7d957ba5725f4b1dd4392daa97ed0ff2e65c7dc
|
/R/01_text_processing.R
|
85a98c4414e27cca011a9303e810346383ee9668
|
[] |
no_license
|
batpigandme/tidymueller
|
c2dd50801535eece01eb01c3a6cf5fac15290ede
|
85cf108e52c29d2ff75e4a28965296f9b9b13220
|
refs/heads/master
| 2020-05-15T12:04:22.480401
| 2019-04-22T10:02:32
| 2019-04-22T10:02:32
| 182,253,609
| 18
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,392
|
r
|
01_text_processing.R
|
# src: https://www.garrickadenbuie.com/blog/redacted-text-extracted-mueller-report/
# libraries ---------------------------------------------------------------
library(tidyverse)
library(ggpage)
library(tidytext)
library(stringr)
# read in report from @grrrck ---------------------------------------------
mueller_report_csv <- "https://raw.githubusercontent.com/gadenbuie/mueller-report/ab74012b0532ffa34f3a45196d2b28004e11b9c2/mueller_report.csv"
mueller_report <- read_csv(mueller_report_csv)
# tidy texting ------------------------------------------------------------
tidy_mueller <- mueller_report %>%
unnest_tokens(word, text)
# remove stop words
data(stop_words)
tidy_mueller <- tidy_mueller %>%
anti_join(stop_words)
mueller_word_count <- tidy_mueller %>%
count(word, sort = TRUE) %>%
filter(!str_detect(word, "[0-9]")) # remove numbers
# read in watergate special prosecution force report ----------------------
# source: https://archive.org/stream/WatergateSpecialProsuectionForceReport/Watergate%20Special%20Prosuection%20Force%20Report_djvu.txt"
watergate_report <- read_csv(here::here("data", "watergate_report.csv"))
tidy_watergate <- watergate_report %>%
unnest_tokens(word, text)
tidy_watergate <- tidy_watergate %>%
anti_join(stop_words)
watergate_word_count <- tidy_watergate %>%
count(word, sort = TRUE) %>%
filter(!str_detect(word, "[0-9]"))
|
d8fd83e50703a3bf6ea1a8f804865f64efbce3ba
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/msm/examples/ppass.msm.Rd.R
|
d84908315bb77b4de8666cf2c636755515657ca4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 852
|
r
|
ppass.msm.Rd.R
|
library(msm)
### Name: ppass.msm
### Title: Passage probabilities
### Aliases: ppass.msm
### Keywords: models
### ** Examples
Q <- rbind(c(-0.5, 0.25, 0, 0.25), c(0.166, -0.498, 0.166, 0.166),
c(0, 0.25, -0.5, 0.25), c(0, 0, 0, 0))
## ppass[1,2](t) converges to 0.5 with t, since given in state 1, the
## probability of going to the absorbing state 4 before visiting state
## 2 is 0.5, and the chance of still being in state 1 at t decreases.
ppass.msm(qmatrix=Q, tot=2)
ppass.msm(qmatrix=Q, tot=20)
ppass.msm(qmatrix=Q, tot=100)
Q <- Q[1:3,1:3]; diag(Q) <- 0; diag(Q) <- -rowSums(Q)
## Probability of about 1/2 of visiting state 3 by time 10.5, the
## median first passage time
ppass.msm(qmatrix=Q, tot=10.5)
## Mean first passage time from state 2 to state 3 is 10.02: similar
## to the median
efpt.msm(qmatrix=Q, tostate=3)
|
3030fa0515d4e16f4724cddededdae2946a2e885
|
01204b228054d6d3240961f5a115a7d6ce6296ad
|
/R/wop.R
|
347a7772082f1dfbc0fd7d2fb6b89b6bd4fac53c
|
[] |
no_license
|
rbarcellos/MScPack
|
90e111f10ad0eaf66984909a321e151aea7d0951
|
687b10b626613beae4770a28932a032f5917547a
|
refs/heads/master
| 2020-05-29T17:12:55.723706
| 2014-07-06T20:12:06
| 2014-07-06T20:12:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,840
|
r
|
wop.R
|
# MScPack
# Description: WOP and PLT functions
# Author: Rafael Barcellos
# Last updated 26th June, 2014
# R 3.1.0
#' Matriz de cargas via WOP
#'
#' Aplica metodo WOP para garantir unicidade da matriz de cargas.
#' @param Lambda array oriundo do MCMC;
#' @param max.iter maximo de iteracoes ate convergencia.
#' @return Lista com matriz ortogonal \code{D} e cargas tendo aplicado o WOP.
wop.fdlm <- function(Lambda, max.iter = 100){
q = dim(Lambda)[1]
k = dim(Lambda)[2]
N = dim(Lambda)[3]
Lambda.star = Lambda[,,N]
Lambda.0 = array(0, c(q, k))
it = 0
D = array(NA, c(k, k, N))
LambdaWOP = array(NA, c(q, k, N))
while(sum((Lambda.star-Lambda.0)^2)>1e-9){
Lambda.0 = Lambda.star
for (r in 1:N){
Sr = t(Lambda[,,r]) %*% Lambda.0
Sr.svd = svd(Sr)
D[,,r] = Sr.svd$u %*% t(Sr.svd$v)
LambdaWOP[,,r] = Lambda[,,r] %*% D[,,r]
}
Lambda.star = apply(LambdaWOP, c(1,2), mean)
it = it+1
if(it>max.iter)
break
}
return(list(LambdaWOP = LambdaWOP, D = D))
}
#' Matriz de cargas via PLT
#'
#' Aplica restricao PLT na matriz de cargas do MCMC.
#' @param Lambda array com as simulacoes da matriz de cargas.
#' @return Lista com array de cargas sujeitos a PLT e matriz ortogonal da restricao.
plt.fdlm <- function(Lambda){
N = dim(Lambda)[3]
LambdaPLT = Lambda
k = dim(Lambda)[2]
D.plt = array(NA, c(k, k, N))
for (r in 1:N){
qr.Lambda = qr(t(Lambda[,,r]))
LambdaPLT[,,r] = t(qr.R(qr.Lambda))
reflexion = diag(sign(diag(LambdaPLT[,,r])))
LambdaPLT[,,r] = LambdaPLT[,,r] %*% reflexion
D.plt[,,r] = qr.Q(qr.Lambda) %*% reflexion
}
# Lambda' = Q %*% R
# Lambda = R' %*% Q'
# Lambda %*% Q = R'
# Lambda %*% Q %*% reflexion = R' %*% reflexion = Lambda_PLT
# :. D.plt = Q %*% reflexion
return(list(LambdaPLT = LambdaPLT, D.plt = D.plt))
}
|
c5de6878b46a83a0ae0b8adb8d1579f9f3e04d71
|
15459ebda416cc43de700dd417c585ac3fc42b95
|
/run_analysis.R
|
60d72ca62ef76ecda4b1bfcb18e6e07e34368679
|
[] |
no_license
|
jross252/CourseProjectGetCleanData
|
220f8d9f2e1a009bf5879fe54c671f5ca03d36b6
|
9df0b9064b7fb0ec80cc002619f1a6deba9e0444
|
refs/heads/master
| 2021-01-19T14:07:31.931202
| 2014-12-21T07:14:36
| 2014-12-21T07:14:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,758
|
r
|
run_analysis.R
|
library(dplyr)
findColumns <- function(dataFile){
##this function finds a vector of relevant columns -
##in other words, columns containing mean and std deviation data.
##It looks for "mean()" or "std()" in the column heading.
subsetVector <- c()
for (i in 1:nrow(dataFile)){
testString <- unlist(strsplit(dataFile[i,2], "-"))[2]
if (identical(testString,"mean()") ==TRUE|identical(testString,"std()") ==TRUE) {
subsetVector <- c(subsetVector, i)
}
}
subsetVector
}
testData <- read.table("X_test.txt", stringsAsFactors = FALSE)
trainData <- read.table("X_train.txt", stringsAsFactors = FALSE)
##read in the two big data files
bigComboData <- rbind(testData, trainData)
##combine them into one large file - step 1 in assignment
columnHeadings <- read.table("features.txt", stringsAsFactors = FALSE)
## read in column headings from features.txt
subsetVector <- findColumns(columnHeadings)
## get vector of position of column headings with mean and std dev
selectData <- bigComboData[, subsetVector]
## subset combo data file for all columns of mean and std dev
## this is step 2 of the assignment
testActivities <- read.table("y_test.txt", stringsAsFactors = FALSE)
trainActivities <- read.table("y_train.txt", stringsAsFactors = FALSE)
## read in the files with activity data (by number)
comboActivities <- as.character(c(testActivities[,1], trainActivities[,1]))
##combine activities into 1 vector
comboActivities <- gsub("1", "Walking", comboActivities)
comboActivities <- gsub("2", "Walking_Upstairs", comboActivities)
comboActivities <- gsub("3", "Walking_Downstairs", comboActivities)
comboActivities <- gsub("4", "Sitting", comboActivities)
comboActivities <- gsub("5", "Standing", comboActivities)
comboActivities <- gsub("6", "Lying_Down", comboActivities)
##Replace activity numbers with descriptive names - step 3 of assignment
selectData <- cbind(comboActivities, selectData)
##add activities to data file with means and std deviations
testSubjects <- read.table("subject_test.txt")[,1]
trainSubjects <- read.table("subject_train.txt")[,1]
allSubjects <- c(testSubjects, trainSubjects)
##read in subject IDs and concatenate into one vector of length 10299
selectData <- cbind(allSubjects, selectData)
##add subject IDs to big data file
colnames(selectData) <- c("Subject_ID", "Activity", columnHeadings[subsetVector,2])
## add column names to the big data file - step 4 of the assignment
summaryData <- summarise_each(group_by(selectData, Subject_ID, Activity), funs(mean))
## use the dplyr functions to get means of each variable for each
## subject/activity pair.
|
11708700b96d4b85e0cedeecea06999d465e4ab8
|
643be29c3b468936910c2202bbe1efb98c567ca8
|
/Scripts/build-attribute-reports.R
|
81bdb1d5a93424265bd94102742816ffdff5844f
|
[] |
no_license
|
DasOakster/wilko.com
|
83d478fcbada1b8dc3170897982ae1bb60a87992
|
5c652122dc937c2eae0add9a9a352bb98e37ff33
|
refs/heads/master
| 2021-08-28T05:58:27.856192
| 2017-12-01T16:15:39
| 2017-12-01T16:15:39
| 112,758,581
| 0
| 0
| null | 2017-12-01T16:15:40
| 2017-12-01T15:59:40
|
R
|
UTF-8
|
R
| false
| false
| 3,239
|
r
|
build-attribute-reports.R
|
# Creates a summary of product attribute completion by PSA 1
build.psa1.attribute.summary <- function() {
# Attribute the attribute columns from Web Product Data
attributes <- 37:49
# Set up vectors to create the data frame
v.psa <<- character()
v.attribute <<- character()
v.complete <<- double()
psa1 <<- unique(web.product.data$PSA_1)
for(j in 1:length(psa1)) {
psa <<- psa1[j]
for(i in 1:length(attributes)) {
dq.field <<- attributes[i]
dq.data <<- web.product.data[web.product.data$PSA_1 == psa,c(1,2,7,8,9,dq.field)]
dq.data <<- na.omit(dq.data)
total <<- NROW(dq.data)
completed <<- sum(dq.data[,6]==1)
complete.ratio <<- completed / total
v.psa <<- append(v.psa,psa)
v.attribute <<- append(v.attribute,names(web.product.data[dq.field]))
v.complete <<- append(v.complete,complete.ratio)
}
}
psa1.attribute.score <<- cbind.data.frame(v.psa,v.attribute,v.complete)
psa1.attribute.score <<- na.omit(psa1.attribute.score)
colnames(psa1.attribute.score ) <<- c("PSA.1","DQ Attribute","Complete %")
write.csv(psa1.attribute.score,paste("Web Product Attribute Completion PSA 1_",Sys.Date(),".csv",sep = ""), row.names = FALSE)
}
#-------------------------------------------------------------------------------------------------------------------------------
# Creates a summary of product attribute completion by PSA 2
build.psa2.attribute.summary <- function() {
psa1.all <- unique(web.product.data$PSA_1)
psa2.attribute.summary <- data.frame("PSA_1" = character(),"PSA_2" = character(),"Attribute" = character(),"Complete" = double())
attributes <- 37:49
for(i in 1:length(psa1.all)) {
psa1 <- psa1.all[i]
psa.data <- subset(web.product.data,web.product.data$PSA_1 == psa1)
psa2.all <- unique(psa.data$PSA_2)
for(j in 1:length(psa2.all)) {
psa2 <- psa2.all[j]
for(k in 1:length(attributes)) {
attribute <- attributes[k]
attribute_name <- names(web.product.data[attribute])
dq.data <- psa.data[psa.data$PSA_2 == psa2,c(1,2,7,8,9,attribute)]
dq.data <- na.omit(dq.data)
total <- NROW(dq.data)
completed <- sum(dq.data[,6]==1)
complete.ratio <- completed / total
df <- data.frame(psa1,psa2,attribute_name,complete.ratio)
psa2.attribute.summary <- rbind(psa2.attribute.summary,df)
}
}
}
psa2.attribute.summary <- na.omit(psa2.attribute.summary)
colnames(psa2.attribute.summary) <- c("PSA.1", "PSA.2", "Attribute", "Complete %")
write.csv(psa2.attribute.summary,paste("Web Product Attribute Completion PSA 2_",Sys.Date(),".csv",sep = ""), row.names = FALSE)
}
|
65661dd147976a8e201e16c0b8a196ef3880e73c
|
76bcce81de3e6f21ce01767367e0f29e051f7d18
|
/Chapter 3 Performing Operations/VectorArithmetic.R
|
c23d80e411819aa71a65c2f3baca69a06c2e7902
|
[] |
no_license
|
amasse-1/Learning-R-for-Data-Analysis
|
9a779ef238930c340f4b95ea83180b85b950347f
|
cefc635f2b003dc4b08c60c4930aa5414e93b895
|
refs/heads/main
| 2022-12-30T06:53:37.913503
| 2020-10-22T22:47:09
| 2020-10-22T22:47:09
| 282,105,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
VectorArithmetic.R
|
# Vector Arithmetic
series<-c(1:9)
cat("Series:", series, "\n")
slice<-series[1:3]
cat("Slice:", slice, "\n")
total<- series + slice
cat("Total:",total,"\n")
slice<-series[1:4]
cat("New S;ice", slice, "\n")
total<- series + slice
cat("New Total:",total,"\n")
|
260695308c2014c40bb221688506b7bd3ff49bf5
|
60c4da067d2e2b2e491714f735f2b10fe973a959
|
/man/seq_plot.Rd
|
2e3da9e1d3483d0b27c2b7117e721a94e1a82a14
|
[
"Apache-2.0"
] |
permissive
|
lance-waller-lab/envi
|
cba3c7025d8977528c6f5bc74d46b241840ba050
|
aef46e16e472e114137858498d9c6f8c99fe5341
|
refs/heads/main
| 2023-04-09T10:21:42.032660
| 2023-02-02T00:17:01
| 2023-02-02T00:17:01
| 295,176,649
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,553
|
rd
|
seq_plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seq_plot.R
\name{seq_plot}
\alias{seq_plot}
\title{Prepare an 'im' or 'SpatRaster' object for plotting with sequential color palette}
\usage{
seq_plot(input, cols, thresh_up = NULL, digits = 1)
}
\arguments{
\item{input}{An object of class 'im' or 'SpatRaster' from the \code{\link{perlrren}} function.}
\item{thresh_up}{Numeric. The upper value to concatenate the color key. The default (NULL) uses the maximum value from \code{input}.}
\item{digits}{Integer. The number of significant digits for the labels using the \code{\link[base]{round}} function (default is 1).}
\item{plot_cols}{Character string of length three (3) specifying the colors for plotting: 1) presence, 2) neither, and 3) absence from the \code{\link{plot_obs}} function.}
}
\value{
An object of class 'list'. This is a named list with the following components:
\describe{
\item{\code{v}}{An object of class 'vector' for the estimated ecological niche values.}
\item{\code{cols}}{An object of class 'vector', returns diverging color palette values.}
\item{\code{breaks}}{An object of class 'vector', returns diverging color palette breaks.}
\item{\code{at}}{An object of class 'vector', returns legend breaks.}
\item{\code{labels}}{An object of class 'vector', returns legend labels.}
}
}
\description{
Internal function to convert 'im' object or 'SpatRaster' object to values readable by \code{\link[fields]{image.plot}} function within the \code{\link{plot_perturb}} function.
}
\keyword{internal}
|
9e5804bf65d638001bf7eaa6e08d29cad2ac9c68
|
6d98e4a6beb8083aafa57e86f841b695db4a38a9
|
/man/archive.Rd
|
8d5f2d5cb257ca038310f9d72065c3d9d09ab631
|
[] |
no_license
|
gitter-badger/archivist
|
feb506469622b63e51174d23ad8526a0d7d68644
|
b6f1c7721ff9579245fb9d0d3b216555594d1c25
|
refs/heads/master
| 2021-01-19T06:47:33.279858
| 2016-01-07T19:21:12
| 2016-01-07T19:21:12
| 29,694,949
| 0
| 0
| null | 2015-01-22T19:06:48
| 2015-01-22T19:06:48
| null |
UTF-8
|
R
| false
| true
| 5,200
|
rd
|
archive.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/archive.R
\name{archive}
\alias{archive}
\title{Archive Artifact to Local and Github Repository}
\usage{
archive(artifact, commitMessage = aoptions("commitMessage"),
repo = aoptions("repo"), user = aoptions("user"),
password = aoptions("password"), archiveData = aoptions("archiveData"),
archiveTags = aoptions("archiveTags"),
archiveMiniature = aoptions("archiveMiniature"),
force = aoptions("force"), rememberName = aoptions("rememberName"), ...,
userTags = c(), silent = aoptions("silent"), ascii = aoptions("ascii"),
alink = aoptions("alink"))
}
\arguments{
\item{artifact}{An artifact to be archived on Local and Github \link{Repository}.}
\item{commitMessage}{A character denoting a message added to the commit while archiving \code{artifact} on GitHub Repository.
By default, an artifact's \link{md5hash} is added to the commit message when it is specified to \code{NULL}.}
\item{repo}{A character denoting GitHub repository name and synchronized local existing directory in which an artifact will be saved.}
\item{user}{A character denoting GitHub user name. Can be set globally with \code{aoptions("user", user)}.
See \link{archivist-github-integration}.}
\item{password}{A character denoting GitHub user password. Can be set globally with \code{aoptions("password", password)}.
See \link{archivist-github-integration}.}
\item{archiveData}{A logical value denoting whether to archive the data from the \code{artifact}.}
\item{archiveTags}{A logical value denoting whether to archive Tags from the \code{artifact}.}
\item{archiveMiniature}{A logical value denoting whether to archive a miniature of the \code{artifact}.}
\item{force}{A logical value denoting whether to archive \code{artifact} if it is already archived in
a Repository.}
\item{rememberName}{A logical value. Should not be changed by a user. It is a technical parameter.}
\item{...}{Further arguments passed to \link{alink} function, when \code{alink} set to \code{TRUE} OR
graphical parameters denoting width and height of a miniature. See details.
Further arguments passed to \link{head}. See Details section in \link{saveToRepo} about \code{firtsRows} parameter}
\item{userTags}{A character vector with Tags. These Tags will be added to the repository along with the artifact.}
\item{silent}{If TRUE produces no warnings.}
\item{ascii}{A logical value. An \code{ascii} argument is passed to \link{save} function.}
\item{alink}{Logical. Whether the result should be put into \link{alink} function. Pass further arguments with \code{...}
parameter.}
}
\description{
\code{archive} stores artifacts in the local \link{Repository} and automatically pushes archived
artifacts to the Github \code{Repository} with which the local \code{Repository} is synchronized
(via \link{createEmptyGithubRepo} or \link{cloneGithubRepo}). Function stores artifacts on the same
way as \link{saveToRepo} function. More archivist functionalities that integrate archivist and GitHub API
can be found here \link{archivist-github-integration} (\link{agithub}).
}
\examples{
\dontrun{
# empty Github Repository creation
library(httr)
myapp <- oauth_app("github",
key = app_key,
secret = app_secret)
github_token <- oauth2.0_token(oauth_endpoints("github"),
myapp,
scope = "public_repo")
# setting options
aoptions("github_token", github_token)
aoptions("user", user)
aoptions("password", user)
createEmptyGithubRepo("archive-test4", default = TRUE)
## artifact's archiving
exampleVec <- 1:100
# archiving
archive(exampleVec) -> md5hash_path
## proof that artifact is really archived
showGithubRepo() # uses options from setGithubRepo
# let's remove exampleVec
rm(exampleVec)
# and load it back from md5hash_path
aread(md5hash_path)
# clone example
unlink("archive-test", recursive = TRUE)
cloneGithubRepo('https://github.com/MarcinKosinski/archive-test')
setGithubRepo(aoptions("user"), "archive-test")
data(iris)
archive(iris)
showGithubRepo()
## alink() option
vectorLong <- 1:100
vectorShort <- 1:20
# archiving
alink(archive(vectorLong))
archive(vectorShort, alink = TRUE)
showGithubRepo()
}
}
\author{
Marcin Kosinski, \email{m.p.kosinski@gmail.com}
}
\seealso{
Other archivist: \code{\link{Repository}},
\code{\link{Tags}}, \code{\link{\%a\%}},
\code{\link{addTagsRepo}}, \code{\link{ahistory}},
\code{\link{alink}}, \code{\link{aoptions}},
\code{\link{archivist-github-integration}},
\code{\link{archivist-package}}, \code{\link{aread}},
\code{\link{asearch}}, \code{\link{cache}},
\code{\link{cloneGithubRepo}},
\code{\link{copyLocalRepo}},
\code{\link{createEmptyRepo}}, \code{\link{deleteRepo}},
\code{\link{getTagsLocal}},
\code{\link{loadFromLocalRepo}}, \code{\link{md5hash}},
\code{\link{rmFromRepo}}, \code{\link{saveToRepo}},
\code{\link{searchInLocalRepo}},
\code{\link{setLocalRepo}},
\code{\link{shinySearchInLocalRepo}},
\code{\link{showLocalRepo}},
\code{\link{splitTagsLocal}},
\code{\link{summaryLocalRepo}},
\code{\link{zipLocalRepo}}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.