blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3fcf444ac841378b010c9cf6ffd933283a5b00a7
|
ec1ce1d118af37136e93ada8ba2be12333ed505f
|
/R/summary.parGADA.R
|
27097c16fb2643751c43a5ab5f3bfd341a024af9
|
[
"MIT"
] |
permissive
|
isglobal-brge/R-GADA
|
d80bf3a5970aaff39f038bf69468fcdeff3c0d40
|
5ec5b5eb1f7da4b946d795425cec02c196f33245
|
refs/heads/master
| 2022-01-09T15:40:17.053097
| 2019-05-04T18:13:06
| 2019-05-04T18:13:06
| 114,474,855
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,864
|
r
|
summary.parGADA.R
|
summary.parGADA<-function(object, Samples, threshold, length.base, chr=c(1:22,"X","Y"), ...)
{
x <- object
setwd(x)
if (missing(length.base))
{
warning("All segments are reported. If you want to filter the minimum and maximum \n lenght of segments, adjust 'length.base' \n (e.g. length.base=c(500,10e6) in base units)")
length.base<-c(0,Inf)
}
if (missing(threshold))
{
threshold <- findNormalLimits(x)
if(any(is.na(threshold)))
stop("Normal Limits cannot be estimated. Give 'threshold' argument manually")
}
if (missing(Samples))
Samples<-attr(x,"Samples")
if (length(Samples)==1)
Samples<-c(1,Samples)
load("SBL/allSegments")
ff<-function(x,chr,threshold,length.base)
{
cond<-x[,5]==chr & x[,6]!=0 & (x[,4]<threshold[1] | x[,4]>threshold[2]) & x[,2]-x[,1]>=length.base[1] & x[,2]-x[,1]<=length.base[2] & x[,2]-x[,1]>0
return(x[cond,])
}
ff2<-function(x,chr,threshold,length.base)
{
cond<-x[,5]==chr & x[,6]!=0 & x[,2]-x[,1]>0
xx<-x[cond,]
cond2<- (xx[,4]<=threshold[1] | xx[,4]>=threshold[2]) & xx[,2]-xx[,1]>=length.base[1] & xx[,2]-xx[,1]<=length.base[2]
return(!cond2)
}
ans<-list()
no.cnv<-list()
for (i in 1:length(chr))
{
ans[[i]] <-lapply(res, FUN=ff, chr=chr[i],
threshold=threshold,
length.base=length.base)
no.cnv[[i]] <-lapply(res, FUN=ff2, chr=chr[i],
threshold=threshold,
length.base=length.base)
}
attr(ans,"no.cnv")<-no.cnv
attr(ans,"length.base")<-length.base
attr(ans,"threshold")<-threshold
attr(ans,"Info")<-x
attr(ans,"Samples")<-Samples
attr(ans,"labels.samples")<-labels(x)
attr(ans,"chr")<-chr
class(ans)<-"summaryParGADA"
names(ans)<-paste("chromosome",chr)
ans
}
|
3f3f40748ed4083a520bdd2057e8015999539249
|
eda8d141b93c154e4c680e08a8bb6564e165713f
|
/R/datesplitter.R
|
85d5669f4ffb5ce9d207a88db5f03604dcb3686d
|
[] |
no_license
|
yoavbendor1/LakeN
|
1ec810515a1c14140f80d2fdaf1dd9bfeb981a49
|
b6f378992fc33fdd3c02840aed48d0a8ed9524d3
|
refs/heads/master
| 2020-06-13T23:05:48.966498
| 2017-01-12T10:54:34
| 2017-01-12T10:54:34
| 75,534,210
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,514
|
r
|
datesplitter.R
|
#' date splitter
#'
#' this function splits the input data.frame into a list of data frames according to a specified date range
#'
#' @param input (-) input data as dataframe
#'
#' @author Yoav BD
#' @return only the data that falls into the required date range
datesplitter <-
function(input, max.date,min.date){
# initialize the result vector
years=format(unique(as.Date(strftime(as.Date(input$Date), format = "%Y-01-01"))),'%Y')
min.year=format(unique(as.Date(strftime(as.Date(min.date), format = "%Y-01-01"))),'%Y')
max.year=format(unique(as.Date(strftime(as.Date(max.date), format = "%Y-01-01"))),'%Y')
min.month=format(unique(as.Date(strftime(as.Date(min.date), format = "%Y-%m-01"))),'%m')
max.month=format(unique(as.Date(strftime(as.Date(max.date), format = "%Y-%m-01"))),'%m')
min.day=format(unique(as.Date(strftime(as.Date(min.date), format = "%Y-%m-%d"))),'%d')
max.day=format(unique(as.Date(strftime(as.Date(max.date), format = "%Y-%m-%d"))),'%d')
max.date.vec=matrix(data=NA, ncol=1, nrow=length(years))
min.date.vec=matrix(data=NA, ncol=1, nrow=length(years))
for (jj in 1:length(years)){
if (min.month < 10) min.format=paste(years[jj],min.month, min.day, sep="-")
if (min.month >= 10) min.format=paste(years[jj],min.month, min.day, sep="-")
if (max.month < 10) max.format=paste(years[jj],max.month, max.day, sep="-")
if (max.month >= 10) max.format=paste(years[jj],max.month, max.day, sep="-")
max.date.vec[jj,1]=format(unique(as.Date(strftime(as.Date(max.date), format = max.format))),'%Y-%m-%d')
min.date.vec[jj,1]=format(unique(as.Date(strftime(as.Date(min.date), format = min.format))),'%Y-%m-%d')
}
if (min.date.vec[1,1] > max.date.vec[1,1]) max.date.vec[1,1]=NA
if (min.date.vec[length(min.date.vec),1] > max.date.vec[length(max.date.vec),1]) min.date.vec[length(min.date.vec),1]=NA
min.date.vec=min.date.vec[!is.na(min.date.vec)]
max.date.vec=max.date.vec[!is.na(max.date.vec)]
result=vector("list", length=0)
for (jj in 1:length(max.date.vec)){
temp.index=matrix(data=NA, ncol=1, nrow=nrow(input))
for (ii in 1:nrow(input)){
temp.1=ifelse (input$Date[ii] > max.date.vec[jj] | input$Date[ii] < min.date.vec[jj], 0,
ifelse (input$Date[ii] >= min.date.vec[jj] & input$Date[ii] <= max.date.vec[jj], 1))
temp.index[ii,1]=temp.1
}
row_sub = apply(temp.index, 1, function(row) all(row ==1))
result[[years[jj+1]]]=as.data.frame(input[row_sub,])
}
#result[,first.data.col]=input[,first.data.col]*temp.index
return(result)
}
|
877ae1b4d97014c46253323a32f7c0163f4e9632
|
8b093e95766bf3c10cb83a115d4ea64672e6aa60
|
/gsplom.Rcheck/00_pkg_src/gsplom/R/gsplom.R
|
d66a63d80944232650a90986d8ba7ba9d20409c0
|
[
"Artistic-2.0"
] |
permissive
|
andrewdyates/gsplom.rpackage
|
8a833398809246fe2882b53f4722529ebae04208
|
50a01d0a85cc299886bf9d812c441ed71589cd0c
|
refs/heads/master
| 2021-03-12T23:26:51.662351
| 2014-04-18T23:04:14
| 2014-04-18T23:04:14
| 17,549,321
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,767
|
r
|
gsplom.R
|
##########################################################################
# Copyright (c) 2014 Andrew Yates
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##########################################################################
source("R/clsDist.R")
library("fastcluster")
# Generated using make.color.bins()
# core colors: GLYPH.COLS <- c("#ffffff", "#00b271", "#0089d9", "#3424b3", "#000000", "#a40e78", "#d82a36", "#eb5918", "#ffffff")
# N: 15
GLYPH.COLORS <- c("#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#EEF9F5","#DDF4EC","#CCEFE2","#BBEAD9","#AAE5CF","#99E0C6","#88DBBC","#77D5B3","#66D0A9","#55CBA0","#44C696","#32C18D","#21BC83","#10B77A","#00B271","#FFFFFF","#EEF7FC","#DDEFF9","#CCE7F7","#BBDFF4","#AAD7F2","#99CFEF","#88C7ED","#77C0EA","#66B8E8","#55B0E5","#44A8E3","#32A0E0","#2198DE","#1090DB","#0089D9","#FFFFFF","#F1F0F9","#E3E1F4","#D6D3EF","#C8C4EA","#BBB6E5","#ADA7E0","#A098DB","#928AD6","#857BD1","#776DCC","#6A5EC7","#5C4FC2","#4F41BD","#4132B8","#3424B3","#FFFFFF","#EEEEEE","#DDDDDD","#CCCCCC","#BBBBBB","#AAAAAA","#999999","#888888","#777777","#666666","#555555","#444444","#323232","#212121","#101010","#000000","#FFFFFF","#F8EEF6","#F2DEED","#ECCEE3","#E6BEDB","#E0AED2","#DA9EC9","#D48EC0","#CE7EB7","#C86EAE","#C25EA5","#BC4E9C","#B63E93","#B02E8A","#AA1E81","#A40E78","#FFFFFF","#FCF0F1","#F9E2E4","#F7D4D6","#F4C6C9","#F2B8BC","#EFA9AE","#EC9BA1","#EA8D93","#E77F86","#E57179","#E2626B","#DF545E","#DD4650","#DA3843","#D82A36","#FFFFFF","#FDF3EF","#FCE8E0","#FBDDD0","#F9D2C1","#F8C7B2","#F7BCA2","#F5B193","#F4A683","#F39B74","#F19065","#F08555","#EF7A46","#ED6F36","#EC6427","#EB5918","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF","#FFFFFF")
gsplom <- function(M, ...) {
message("Computing all-pairs-rows Distance Correlation...")
if (any(is.na(M))) {
message("Input M has missing values. Using dcorMatrixNA.")
DCOR <- dcorMatrixNA(M)
} else {
DCOR <- dcorMatrix(M)
}
message("Computing all-pairs-rows Logical Dependency Class...")
CLS <- logicClassMatrix(M)
R <- gsplomCore(DCOR, CLS, ...)
R$CLS <- CLS
R$DCOR <- DCOR
R
}
gsplomCore <- function(DCOR, CLS, doLabels=TRUE, ...) {
R <- list()
message("Computing dCor distance matrix...")
DCOR.Dist <- as.matrix(dist(DCOR))
message("Computing Logical Dependency Class distance matrix...")
CLS.Dist <- logicClassDist(CLS)
DIST <- DCOR.Dist + sqrt(CLS.Dist)/2
dCorRowMeans <- rowMeans(DCOR, na.rm=TRUE)
R$Rhclust <- as.dendrogram(hclust(as.dist(DIST), method="average"))
R$Rhclust <- reorder(R$Rhclust, dCorRowMeans)
R$order <- order.dendrogram(R$Rhclust)
if (doLabels) {
labels <- rownames(DCOR)
} else {
labels <- NA
}
gsplomPlot(DCOR, CLS, R$order, labels=labels, ...)
R
}
gsplomPlot <- function(DCOR, CLS, order, labels, MIN=0.2, MAX=1, asGlyphs=TRUE, doDrawLabs=TRUE, cex=0.7, ...) {
breaks <- makeBreaks(MAX, MIN)
offsets <- makeOffsets(breaks)
## Select the greatest offset less than x.
choose.offset <- function(x) offsets[tail(which(breaks<=x),1)]
N <- 15
N.BREAKS <- c(sapply(0:8, function(x) rep(x,N+1)+breaks[1:(N+1)]), 9)
## Make image heatmap from matrices
OFFSET <- apply(DCOR, c(1,2), choose.offset)
G <- (CLS+OFFSET)[order,order]
if (asGlyphs) G <- expandCls(G)
w <- ncol(G); h <- nrow(G)
Img <- t(G)[,seq(h,1,-1)]
## Get labels
if(!is.null(labels)) {
labels <- labels[order]
labCol <- labels
labRow <- rev(labels)
if (asGlyphs) {
labCol <- sapply(1:(length(labels)*2), function(i) expandNames(i,labCol))
labRow <- sapply(1:(length(labels)*2), function(i) expandNames(i,labRow))
}
}
image(1:w, 1:h, Img, xlab="", ylab="", col=GLYPH.COLORS, breaks=N.BREAKS, axes=FALSE, ...)
if(!is.null(labels)) {
axis(1, 1:w, labels = labCol, las = 2, line = -0.5, tick = 0, cex.axis = cex)
axis(4, 1:h, labels = labRow, las = 2, line = -0.5, tick = 0, cex.axis = cex)
}
}
makeBreaks <- function(MAX=1, MIN=0, N=15, MOST=1, LEAST=0) {
th <- (MAX-MIN)/N ## bin for above and below bin threshold
c(LEAST, sapply(0:(N-1), function(i) MIN+i*th), MOST)
}
makeOffsets <- function(breaks, N=15) {
offsets <- sapply(1:(N+1), function(i) (breaks[i]+breaks[i+1])/2)
offsets <- c(offsets, tail(offsets,1)) ## offset beyond max value is still max value
offsets
}
expandCls <- function(CLS, pad=FALSE, bg=NA) {
G <- matrix(bg, nrow(CLS)*2, ncol(CLS)*2)
for(i in 0:(nrow(CLS)-1)) {
for(j in 0:(ncol(CLS)-1)) {
gly <- toGlyph(CLS[i+1,j+1], bg)
G[(i*2+1):(i*2+2),(j*2+1):(j*2+2)] <- gly
}
}
G
}
### Expand CLS matrix into 2x2 glyphs.
## --------------------
# 0, 1,2,3, 4, 5,6,7
toGlyph <- function(z, bg=NA) {
r <- NaN
if(z >= 0 && z < 1) # NA (no significant dependency)
r <- matrix(c(bg,bg,bg,bg), nrow=2)
if(z >= 1 && z < 2) # HIH (high x implies high y)
r <- matrix(c(z,z,z,bg), nrow=2)
if(z >= 2 && z < 3) # PC (positive correlation)
r <- matrix(c(bg,z,z,bg), nrow=2)
if(z >= 3 && z < 4) # LIL (low x implies low y)
r <- matrix(c(bg,z,z,z), nrow=2)
if(z >= 4 && z < 5) # UNL (unspecified non-linear)
r <- matrix(c(z,z,z,z), nrow=2)
if(z >= 5 && z < 6) # HIL (high x implies low y)
r <- matrix(c(z,z,bg,z), nrow=2)
if(z >= 6 && z < 7) # NC (negative correlation)
r <- matrix(c(z,bg,bg,z), nrow=2)
if(z >= 7 && z < 8) # LIH (low x implies low y)
r <- matrix(c(z,bg,z,z), nrow=2)
r
}
expandNames <- function(i, name.list) {
if (i%%2==1) {
name.list[(i+1)/2]
} else {
""
}
}
|
554ac7f1010aa498c934bbcfd0f2901ddb91fc02
|
999bae91d5c15cd8f5371468a89a977c2cb36cbc
|
/man/doMitosis.Rd
|
2dc83cc8bd030f45bd510ff1bd73dec91f3694b3
|
[] |
no_license
|
ataudt/CINsim
|
1311be6c4a5e28206366a07074031b540841c097
|
4f21df879b275a7e33ce45236fd408c7635bd185
|
refs/heads/master
| 2021-01-22T01:10:45.500354
| 2017-09-03T17:41:55
| 2017-09-03T17:41:55
| 102,203,380
| 0
| 0
| null | 2017-09-02T14:49:19
| 2017-09-02T14:49:19
| null |
UTF-8
|
R
| false
| true
| 798
|
rd
|
doMitosis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doMitosis.R
\name{doMitosis}
\alias{doMitosis}
\title{Perform a round of mitosis with possible mis-segregations}
\usage{
doMitosis(karyotype, pMisseg, cellID)
}
\arguments{
\item{karyotype}{A karyotype provided as a vector on which to apply the mitosis.}
\item{pMisseg}{The probability of mis-segregation per chromosome copy number.}
\item{cellID}{The cell ID of the provided karyotype to ensure faithful clone tracking.}
}
\value{
A 2-row matrix with karyotypes from daughter cells.
}
\description{
This function will perform mitosis on a given karyotype, apply chromosome mis-segregation events if the criteria are met, and returns a matrix with two new karytoypes.
}
\author{
Bjorn Bakker
}
|
f5e9656ed69b7a57974243ad9c92060285ef59ce
|
069b5c0f325197dfbcc6584288a70ae7c51c8b81
|
/RegressionModel/Quiz3.R
|
679cda37a55667d157ae941f1830e247276b0132
|
[] |
no_license
|
JohanLi1990/DataScienceCoursera
|
3db341c5a2074f9bc25121a699a0374026fdf233
|
99aa5574e530b6f9b9dd9bf208008e2c845625ae
|
refs/heads/master
| 2021-01-23T13:44:02.512726
| 2017-12-25T00:40:32
| 2017-12-25T00:40:32
| 38,518,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 725
|
r
|
Quiz3.R
|
#Question 1
data("mtcars")
fit_cars <- lm(mpg ~ factor(cyl) + wt, mtcars)
summary(fit_cars)$coef
#-6.0709
#Question 2
fit_noWT <- lm(mpg ~ factor(cyl), mtcars)
summary(fit_noWT)$coef
#Question 3
fit1<- fit_cars
fit2<- lm(mpg ~ factor(cyl) + wt + factor(cyl)*wt, mtcars)
summary(fit1)$coef
summary(fit2)$coef
#a better way is to use model comparing method.
anova(fit1, fit2)
#p value = 0.1239 >0.05 so reject, so simpler model is true.
#Question 4
fit4<-lm(mpg ~ I(wt * 0.5) + factor(cyl), data = mtcars)
summary(fit4)$coef
#Question 5
x <- c(0.586, 0.166, -0.042, -0.614, 11.72)
y <- c(0.549, -0.026, -0.127, -0.751, 1.344)
fit5<-lm(y~x)
hatvalues(fit5)
#0.9946
#Question 6
dfbetas(fit5)
#-133.82261293
#Question 7
|
1a820c6b7ac00e783f849dffdd1c8d9139002607
|
7b8478fa05b32da12634bbbe313ef78173a4004f
|
/man/partition_group.Rd
|
6a03332ac952ca6e173287bf96311dc1dcd2cfe0
|
[] |
no_license
|
jeblundell/multiplyr
|
92d41b3679184cf1c3a637014846a92b2db5b8e2
|
079ece826fcb94425330f3bfb1edce125f7ee7d1
|
refs/heads/develop
| 2020-12-25T18:02:10.156393
| 2017-11-07T12:48:41
| 2017-11-07T12:48:41
| 58,939,162
| 4
| 1
| null | 2017-11-07T12:01:35
| 2016-05-16T14:30:38
|
R
|
UTF-8
|
R
| false
| true
| 1,087
|
rd
|
partition_group.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nse.R, R/ops.R
\name{partition_group}
\alias{partition_group}
\alias{partition_group_}
\title{Partition data so that each group is wholly on a node}
\usage{
partition_group(.self, ...)
partition_group_(.self, ..., .dots)
}
\arguments{
\item{.self}{Data frame}
\item{...}{Additional parameters}
\item{.dots}{Workaround for non-standard evaluation}
}
\value{
Data frame
}
\description{
Partitions data across the cluster such that each group is wholly
contained on a single node.
}
\details{
This should not typically be called explicitly; group_by achieves the same
thing. Generally speaking it would be fairly pointless to group things and
then not have each group fully accessible, but theoretically is possible to
so (use \code{group_by (..., auto_partition=FALSE}).
}
\examples{
\donttest{
dat <- Multiplyr (x=1:100, G=rep(c("A", "B", "C", "D"), each=25))
dat \%>\% partition_group (G)
dat \%>\% shutdown()
}
}
\seealso{
Other cluster functions: \code{\link{partition_even}},
\code{\link{shutdown}}
}
|
c43d04a80188f1e5b01b0f69f439c517932cb162
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rredis/examples/redisRPopLPush.Rd.R
|
c9e8a783f1a415e236f93c3a68959601c251f303
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 322
|
r
|
redisRPopLPush.Rd.R
|
library(rredis)
### Name: redisRPopLPush
### Title: Remove the tail from a list, pushing to another.
### Aliases: redisRPopLPush
### ** Examples
## Not run:
##D redisConnect()
##D redisLPush('x',1)
##D redisLPush('x',2)
##D redisLPush('x',3)
##D redisRPopLPush('x','x')
##D redisRPopLPush('x','x')
## End(Not run)
|
94b8285b1d9309d4317f1ee74d941712cc2cf02e
|
15b683ee0db7721bafbe55a7bc7ec77618492957
|
/server.R
|
6ea7f9948f6daf566d1c0d1db31afdbb3241a37c
|
[] |
no_license
|
pssguy/worldPopulation
|
b9f62cc19e00f8a41acbb9e2ebe56ade5f8bcb62
|
afdd89be425e852fd0e64d89ba31fae6f2ba4cbd
|
refs/heads/master
| 2021-01-10T04:45:44.853127
| 2015-06-07T00:04:54
| 2015-06-07T00:04:54
| 36,402,707
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 379
|
r
|
server.R
|
shinyServer(function(input, output,session) {
source("code/charts.R", local=TRUE)
#source("code/maps.R", local=TRUE)
source("code/maps2015.R", local=TRUE)
source("code/tables.R", local=TRUE)
source("code/worldPop.R", local=TRUE)
source("code/streamFront.R", local=TRUE)
source("code/countryPop.R", local=TRUE)
source("code/maxFront.R", local=TRUE)
})
|
8668157f1b8ba9e12ca29c0cd992f878c1018574
|
d4eb16fea7caf5f560d447d2c987e7a4f502ef73
|
/man/tbl_last_name.Rd
|
e616fa0412d420f5004a0d903154496a7ee31cc6
|
[] |
no_license
|
abresler/entities
|
640334c6e1c145bce1bb13058744dd98949962bd
|
bb7bc0e721032f1126dd95d0127d9e383f7f1867
|
refs/heads/master
| 2023-08-08T05:07:36.241883
| 2023-07-19T13:09:15
| 2023-07-19T13:09:15
| 196,063,861
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 490
|
rd
|
tbl_last_name.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ethnicity_parser.R
\name{tbl_last_name}
\alias{tbl_last_name}
\title{Parse name column into parts}
\usage{
tbl_last_name(
data,
name_column = "namePrincipalInvestigator",
include_name_type = F,
snake_names = F,
return_only_names = F
)
}
\arguments{
\item{return_only_names}{}
}
\description{
Parse name column into parts
}
\examples{
library(dplyr)
starwars \%>\% tbl_last_name(name_column = "name")
}
|
49cfed9ee345988b3dcfe7189c96c32ea40a425a
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/formula.tools/R/parts.rhs.get.R
|
22496b35110c9848e9403accf30dc405b6714135
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,490
|
r
|
parts.rhs.get.R
|
# -----------------------------------------------------------------------------
# rhs
# extract and manipulate the right-hand side of R objects
# -----------------------------------------------------------------------------
#' @name rhs
#' @rdname formula.parts
#' @export rhs
setGeneric( 'rhs', function(x, ...) standardGeneric( 'rhs' ) )
# -------------------------------------
# SINGULAR
# -------------------------------------
#' @rdname formula.parts
#' @aliases .rhs.singular
.rhs.singular <- function(x) {
if( ! is.operator( x[[1]] ) ) stop( x[[1]], " does not appear to be an operator." )
if( is.two.sided(x) )
x[[3]] else
if( is.one.sided(x) )
x[[2]]
}
#' @rdname formula.parts
#' @aliases rhs,call-method
setMethod( 'rhs', 'call', .rhs.singular )
#' @rdname formula.parts
#' @aliases rhs,formula-method
setMethod( 'rhs', 'formula', .rhs.singular )
#' @rdname formula.parts
#' @aliases rhs,<--method
setMethod( 'rhs', '<-', function(x) x[[3]] )
# -------------------------------------
# PLURAL
# -------------------------------------
#' @rdname formula.parts
#' @aliases rhs,expression-method
setMethod( 'rhs', 'expression',
function(x,...) {
ret <- vector( 'expression', length(x) )
for( i in 1:length(x) ) {
rh <- rhs( x[[i]] )
if( ! is.null( rh ) ) ret[[i]] <- rh
}
ret
}
)
#' @rdname formula.parts
#' @aliases rhs,list-method
setMethod( 'rhs', 'list', function(x,...) lapply( x, rhs, ... ) )
|
1731ab741b6c42f9d8aac835fed6a0278c990f6f
|
ea0d9ecd125ee8765a7073fc7ce7853711be855a
|
/old/neuro.R
|
6dda68d9dc7d7af12e6c80ceb261e742c6c44633
|
[
"MIT"
] |
permissive
|
mattocci27/TurnoverBCImain
|
c94737f77a0a581c8f0a83a973bb7ae2dddd1449
|
cc3c0317243daa6e44c46d6fbc65d81e03f7405c
|
refs/heads/master
| 2021-01-19T05:06:43.153545
| 2017-08-03T17:15:19
| 2017-08-03T17:15:19
| 46,080,402
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,427
|
r
|
neuro.R
|
rm(list = ls()) # This clears everything from memory.
setwd("~/Dropbox/BCI_Turnover")
load("BCI_turnover20150611.RData")
source("~/Dropbox/MS/TurnoverBCI/TurnoverBCImain/source.R")
library(dplyr)
library(ggplot2)
library(nnet)
ab_data <- as.data.frame(sapply(D20m,function(x)apply(x,2,sum)))
ab_data$sp <- rownames(ab_data)
trait.temp <- data.frame(sp=rownames(trait),
moist=trait$Moist,
slope=trait$sp.slope.mean,
slope.sd = trait$sp.slope.sd,
convex=trait$sp.convex.mean,
convex.sd=trait$sp.convex.sd,
WSG=trait$WSG)
ab_t_data <- merge(ab_data,trait.temp,by="sp")
ab_t_data2 <- na.omit(ab_t_data)
ab_t_data2
temp1 <- ab_t_data[1:150, -1] %>% na.omit
temp2 <- ab_t_data[151:312, -1] %>% na.omit
temp_nnet <- nnet(census_2010 ~ census_1982 + moist + slope + convex + WSG, size = 5, data = temp1, lineout = TRUE)
temp_pred <- predict(temp_nnet, temp2)
predict(temp_nnet)
xx <- rnorm(100)
xx1 <- rnorm(100)
yy <- rnorm(100, 2 * xx - xx1 - 1)
dat <- data.frame(xx,xx1, yy)
temp1 <- dat[1:50,]
temp1 <- rbind(temp1, temp1)
temp2 <- dat[51:100,]
temp_nnet <- nnet(yy ~ xx + xx1, size = 2,
data = temp1, lineout = T, skip = T,
decay=1e-3, Hess = T)
predict(temp_nnet)
temp_pred <- predict(temp_nnet, temp2)
plot(temp2[,"yy"], temp_pred[,1])
rock.nn <- nnet(log(perm) ~ area + peri + shape, rock1,
size=3, decay=1e-3, linout=T, skip=T, maxit=1000, Hess=T)
predict(rock.nn)
|
44584e97881ae734b36c301b04b58d8c0e2c8b34
|
139e93dc5ad1f30938195671caf4aefce99f188d
|
/man/reduce_right.Rd
|
f3478e4f640ad9405df8210dfb1f8190678c458f
|
[
"MIT"
] |
permissive
|
tidyverse/purrr
|
7b94592b1eb6f4e6db8d83fc307465ce7b65b520
|
ac4f5a9b9ff2b5b36770c4c5e064547264544fd2
|
refs/heads/main
| 2023-08-28T01:39:40.614443
| 2023-08-10T14:13:52
| 2023-08-10T14:13:52
| 27,309,729
| 901
| 265
|
NOASSERTION
| 2023-09-03T11:49:30
| 2014-11-29T17:33:40
|
R
|
UTF-8
|
R
| false
| true
| 2,266
|
rd
|
reduce_right.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reduce.R
\name{reduce_right}
\alias{reduce_right}
\alias{reduce2_right}
\alias{accumulate_right}
\title{Reduce from the right (retired)}
\usage{
reduce_right(.x, .f, ..., .init)
reduce2_right(.x, .y, .f, ..., .init)
accumulate_right(.x, .f, ..., .init)
}
\arguments{
\item{.x}{A list or atomic vector.}
\item{.f}{For \code{reduce()}, a 2-argument function. The function will be passed
the accumulated value as the first argument and the "next" value as the
second argument.
For \code{reduce2()}, a 3-argument function. The function will be passed the
accumulated value as the first argument, the next value of \code{.x} as the
second argument, and the next value of \code{.y} as the third argument.
The reduction terminates early if \code{.f} returns a value wrapped in
a \code{\link[=done]{done()}}.}
\item{...}{Additional arguments passed on to the mapped function.
We now generally recommend against using \code{...} to pass additional
(constant) arguments to \code{.f}. Instead use a shorthand anonymous function:
\if{html}{\out{<div class="sourceCode R">}}\preformatted{# Instead of
x |> map(f, 1, 2, collapse = ",")
# do:
x |> map(\\(x) f(x, 1, 2, collapse = ","))
}\if{html}{\out{</div>}}
This makes it easier to understand which arguments belong to which
function and will tend to yield better error messages.}
\item{.init}{If supplied, will be used as the first value to start
the accumulation, rather than using \code{.x[[1]]}. This is useful if
you want to ensure that \code{reduce} returns a correct value when \code{.x}
is empty. If missing, and \code{.x} is empty, will throw an error.}
\item{.y}{For \code{reduce2()} and \code{accumulate2()}, an additional
argument that is passed to \code{.f}. If \code{init} is not set, \code{.y}
should be 1 element shorter than \code{.x}.}
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}
These functions were deprecated in purrr 0.3.0. Please use the
\code{.dir} argument of \code{\link[=reduce]{reduce()}} instead, or reverse your vectors
and use a left reduction.
}
\keyword{internal}
|
cfae9b49eaa053bbf866966047fd787007ffef81
|
c6da4424c172b71477fe5c8420d0ab69d3b277cf
|
/R/Other.classifier.R
|
50117c60e9f55868edaaa6cdfaa9893c9c6cff93
|
[] |
no_license
|
cobrbra/RPEnsemble
|
68702aaf1bca8bf605407f93cabfec6ee0f1feef
|
d1161d8e9352a98eb0cbb1a224db91f420edd66f
|
refs/heads/main
| 2023-08-26T13:17:08.488723
| 2021-10-09T12:03:53
| 2021-10-09T12:03:53
| 337,134,340
| 0
| 0
| null | 2021-02-08T16:18:04
| 2021-02-08T16:18:03
| null |
UTF-8
|
R
| false
| false
| 1,026
|
r
|
Other.classifier.R
|
#' The user's favourite classifier
#'
#' @description User defined code to convert existing R code for classification to the correct format.
#'
#' @param x An n by p matrix containing the training dataset.
#' @param grouping A vector of length n containing the training data classes.
#' @param xTest An n.test by p test dataset.
#' @param CV If TRUE perform cross-validation (or otherwise) to classify training set. If FALSE, classify test set.
#' @param ... Optional arguments e.g. tuning parameters
#'
#' @details User editable code for your choice of base classifier.
#'
#' @return A vector of classes of the training or test set.
#' @export
#'
#' @examples
#' Other.classifier(NULL, NULL, NULL)
Other.classifier <- function(x,
grouping,
xTest,
CV = FALSE,
... ) {
# Write code for choice of base classifier that returns vector of predicted classes of the training or test set as variable class.
}
|
1066df7c8c1547f7ad8ca22b3ae10339d0a798a6
|
bc7120d924070eb3a482923f20e7507dcae0a751
|
/simulation/n2_02/sum2_20191002.R
|
4f494e12bb52a6c8a91ae8174c76a88962866b7c
|
[] |
no_license
|
dragontaoran/proj_two_phase_mexy
|
803ec584f2eb260ec48901126cc933032ce8edd1
|
ccbf3365b6285e0bc6869f3c907703f2a8217760
|
refs/heads/master
| 2022-12-14T01:16:51.212254
| 2020-09-12T17:06:18
| 2020-09-12T17:06:18
| 159,406,581
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,584
|
r
|
sum2_20191002.R
|
n2 = c(50, 100, 200, 300)
hn = c(40, 30, 10, 1)
beta = 0.4
z975 = qnorm(0.975)
nrow = length(n2)
fn_out = paste0("sum2_20191002.tab")
sink(fn_out)
cat("\tMBE\t\t\t\t\t\tUAEE\t\t\t\t\t\tSMLE\t\t\t\n")
cat("n2\tBias\tSE\tSEE\tCP\tRE\t\tBias\tSE\tSEE\tCP\tRE\t\tBias\tSE\tSEE\tCP\n")
sink()
res = matrix(NA, nrow=nrow, ncol=17)
i = 1
for (k in 1:4) {
res[i,1] = n2[k]
prefix = paste0("SY2011_n2_", n2[k])
load(paste0("results/", prefix, ".RData"))
print(nrow(results))
res[i,2] = mean(results[,1])-beta
res[i,3] = sd(results[,1])
res[i,4] = mean(results[,2])
res[i,5] = mean((results[,1]-z975*results[,2] <= beta) & (results[,1]+z975*results[,2] >= beta))
prefix = paste0("CC2000_n2_", n2[k])
load(paste0("results/", prefix, ".RData"))
print(nrow(results))
res[i,8] = mean(results[,1])-beta
res[i,9] = sd(results[,1])
res[i,10] = mean(results[,2])
res[i,11] = mean((results[,1]-z975*results[,2] <= beta) & (results[,1]+z975*results[,2] >= beta))
prefix = paste0("n2_", n2[k], "_hn", hn[k])
load(paste0("results/", prefix, ".RData"))
print(nrow(results))
res[i,14] = mean(results[,1])-beta
res[i,15] = sd(results[,1])
res[i,16] = mean(results[,2])
res[i,17] = mean((results[,1]-z975*results[,2] <= beta) & (results[,1]+z975*results[,2] >= beta))
res[i,6] = (res[i,15]/res[i,3])^2
res[i,12] = (res[i,15]/res[i,9])^2
i = i+1
}
write.table(res, file=fn_out, append=TRUE, quote=FALSE, col.names=FALSE, row.names=FALSE, sep="\t", na="")
|
fc372429081a7d428488bad765becc2f7a813249
|
032d7c101fbe2f572d34845353813d806331ba27
|
/tests/testthat/module_examples/mappingSelect/app.R
|
e0b0382c1de0a0c4e1b0169e2a90c241092063f9
|
[
"MIT"
] |
permissive
|
SafetyGraphics/safetyGraphics
|
1fb022b3e6825395ac87475a88fc87bdf6c7a842
|
f602cdc2068e7b290513c9accad7cf351775ce4e
|
refs/heads/dev
| 2023-06-21T11:37:46.817097
| 2023-05-08T15:20:57
| 2023-05-08T15:20:57
| 145,888,064
| 83
| 20
|
NOASSERTION
| 2023-06-16T17:08:28
| 2018-08-23T17:45:06
|
R
|
UTF-8
|
R
| false
| false
| 1,853
|
r
|
app.R
|
library(shiny)
library(safetyGraphics)
ui <- tagList(
tags$head(
tags$link(
rel = "stylesheet",
type = "text/css",
href = "index.css"
)
),
fluidPage(
h2("Example 1: Column select - No Default"),
mappingSelectUI("NoDefault","Subject ID", names(safetyData::adam_adae)),
h3("Module Output"),
verbatimTextOutput("ex1"),
h2("Example 2: Column Select - With default"),
mappingSelectUI("WithDefault", "Subject ID", names(safetyData::adam_adae), "USUBJID"),
h3("Module Output"),
verbatimTextOutput("ex2"),
h2("Example 3: Field select - No Default"),
mappingSelectUI("NoDefaultField","Body System - Cardiac Disorders", unique(safetyData::adam_adae$AEBODSYS)),
h3("Module Output"),
verbatimTextOutput("ex3"),
h2("Example 4: Field Select - With default"),
mappingSelectUI("WithDefaultField", "Body System - Cardiac Disorders", unique(safetyData::adam_adae$AEBODSYS), "CARDIAC DISORDERS"),
verbatimTextOutput("ex4"),
h2("Example 5: Field Select - With invalid default"),
mappingSelectUI("WithInvalidDefault", "Body System - Cardiac Disorders", unique(safetyData::adam_adae$AEBODSYS), "CARDIAC DISORDERZ"),
verbatimTextOutput("ex5")
)
)
server <- function(input,output,session){
ex1<-callModule(mappingSelect, "NoDefault")
output$ex1<-renderPrint(ex1())
ex2<-callModule(mappingSelect, "WithDefault")
output$ex2<-renderPrint(ex2())
ex3<-callModule(mappingSelect, "NoDefaultField")
output$ex3<-renderPrint(ex3())
ex4<-callModule(mappingSelect, "WithDefaultField")
output$ex4<-renderPrint(ex4())
ex5<-callModule(mappingSelect, "WithInvalidDefault")
output$ex5<-renderPrint(ex5())
}
shinyApp(ui, server)
|
b7f114ca2e72664b9f999afe546a713b09041627
|
3aef5a679c390d1f2c7ecba35eca09864164c5a5
|
/man-roxygen/example-CohortDtstmTrans.R
|
909badc0ecdcd9233c66a6cb906edddd649d439e
|
[] |
no_license
|
jeff-m-sullivan/hesim
|
576edfd8c943c62315890528039366fe20cf7844
|
fa14d0257f0d6d4fc7d344594b2c4bf73417aaf3
|
refs/heads/master
| 2022-11-14T07:35:15.780960
| 2022-09-02T03:13:49
| 2022-09-02T03:13:49
| 140,300,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,817
|
r
|
example-CohortDtstmTrans.R
|
library("msm")
library("data.table")
set.seed(101)
# We consider two examples that have the same treatment strategies and patients.
# One model is parameterized by fitting a multi-state model with the "msm"
# package; in the second model, the parameters are entered "manually" with
# a "params_mlogit_list" object.
# MODEL SETUP
strategies <- data.table(
strategy_id = c(1, 2, 3),
strategy_name = c("SOC", "New 1", "New 2")
)
patients <- data.table(patient_id = 1:2)
hesim_dat <- hesim_data(
strategies = strategies,
patients = patients
)
# EXAMPLE #1: msm
## Fit multi-state model with panel data via msm
qinit <- rbind(
c(0, 0.28163, 0.01239),
c(0, 0, 0.10204),
c(0, 0, 0)
)
fit <- msm(state_id ~ time, subject = patient_id,
data = onc3p[patient_id %in% sample(patient_id, 100)],
covariates = list("1-2" =~ strategy_name),
qmatrix = qinit)
## Simulation model
transmod_data <- expand(hesim_dat)
transmod <- create_CohortDtstmTrans(fit,
input_data = transmod_data,
cycle_length = 1/2,
fixedpars = 2,
n = 2)
transmod$sim_stateprobs(n_cycles = 2)
# EXAMPLE #2: params_mlogit_list
## Input data
transmod_data[, intercept := 1]
transmod_data[, new1 := ifelse(strategy_name == "New 1", 1, 0)]
transmod_data[, new2 := ifelse(strategy_name == "New 2", 1, 0)]
## Parameters
n <- 10
transmod_params <- params_mlogit_list(
## Transitions from stable state (stable -> progression, stable -> death)
stable = params_mlogit(
coefs = list(
progression = data.frame(
intercept = rnorm(n, -0.65, .1),
new1 = rnorm(n, log(.8), .02),
new2 = rnorm(n, log(.7, .02))
),
death = data.frame(
intercept = rnorm(n, -3.75, .1),
new1 = rep(0, n),
new2 = rep(0, n)
)
)
),
## Transition from progression state (progression -> death)
progression = params_mlogit(
coefs = list(
death = data.frame(
intercept = rnorm(n, 2.45, .1),
new1 = rep(0, n),
new2 = rep(0, n)
)
)
)
)
transmod_params
## Simulation model
tmat <- rbind(c(0, 1, 2),
c(NA, 0, 1),
c(NA, NA, NA))
transmod <- create_CohortDtstmTrans(transmod_params,
input_data = transmod_data,
trans_mat = tmat, cycle_length = 1)
transmod$sim_stateprobs(n_cycles = 2)
\dontshow{
pb <- expmat(coef(fit)$baseline)[, , 1]
## From stable
b1 <- log(pb[1, 2]/(1 - pb[1, 2] - pb[1, 3]))
b2 <- log(pb[1, 3]/(1 - pb[1, 2] - pb[1, 3]))
exp(b1)/(1 + exp(b1) + exp(b2))
exp(b2)/(1 + exp(b1) + exp(b2))
### From progression
b <- qlogis(pb[2, 2])
}
|
0d9d628ceae8aa6c0cd4b73a94d64e9e0d069f55
|
fd2cd35a789adc3a1e4c83cd7798c6385118c068
|
/scripts_R_genericos/positividade/exploracao_positividade.R
|
cf4e198f402ec4cdf0c01ae91391bd0f35fa23b3
|
[] |
no_license
|
covid19br/central_covid
|
1ce07ad6a086304983aa97caee9243bfc37367ee
|
74e1ca39e4307fc43a8c510d7e98825ae1816d91
|
refs/heads/master
| 2023-05-11T14:45:02.202142
| 2023-03-22T21:20:51
| 2023-03-22T21:20:51
| 263,488,734
| 12
| 1
| null | 2020-07-10T18:42:29
| 2020-05-13T00:55:54
|
HTML
|
UTF-8
|
R
| false
| false
| 5,541
|
r
|
exploracao_positividade.R
|
library(plyr)
library(dplyr)
library(ggplot2)
library(zip)
library(aweek)
source("../../nowcasting/fct/get.last.date.R")
set_week_start("Sunday")
################################################################################
## Campinas
################################################################################
dir.dados <- "../../dados/municipio_campinas/casos_totais/"
dados <- read.csv2(paste0(dir.dados,"Juncao_", get.last.date(dir.dados),".csv"))
with(dados, table(TIPO_TESTE, BANCO))
## Positividade por semana, E-SUS e SIVEP
## N total de casos por semana
n.casos <-
dados %>%
filter(SEM_PRI!="" & (CLASSI_FIN == "COVID" | PCR_SARS2 == "Detect\xe1vel" | RES_TESTE == "Positivo" )) %>%
mutate(semana = as.integer(gsub("SE ", "", SEM_PRI))) %>%
group_by(semana) %>%
summarise(N.casos = n())
## Variacao % de casos de uma semana a outra
n.casos$dif.casos <- c(NA, diff(n.casos$N.casos)) / lag(n.casos$N.casos)
## Positividade E-SUS
positividade <-
dados %>%
filter(SEM_PRI!="" & BANCO == "ESUS VE" & TIPO_TESTE == "RT-PCR") %>%
mutate(semana = as.integer(gsub("SE ", "", SEM_PRI))) %>%
group_by(semana) %>%
summarise(testados = n(), positivos = sum(RES_TESTE == "Positivo"), positividade = positivos/testados) %>%
merge( n.casos, by = "semana")
plot(positividade ~semana, data= positividade)
plot(dif.casos ~ positividade, data = positividade, subset = semana >10 & semana <41)
## Series temporais
par(mfrow = c(3,1))
plot(N.casos ~ semana, data= positividade, type ="b", ylab = "Total de casos")
plot(positividade ~ semana, data= positividade, type ="b", ylab = "Positividade Rt-PCR")
plot(testados ~ semana , data = positividade, type = "b", ylab = "Testes RT-PCR")
par(mfrow = c(1,1))
## Casos x positividade , com e sem lags
plot(N.casos ~ positividade, data = positividade, subset = semana >11 & semana <41)
plot(N.casos ~ lag(positividade, 5), data = positividade, subset = semana >11 & semana <41)
plot(N.casos ~ lag(positividade, 5), data = positividade)
plot(dif.casos ~ lag(positividade, 5), data = positividade, subset = semana >11 & semana <41)
## Trajetorias no espaço casos x positividade
## Sem lag
positividade %>%
filter(semana > 11 & semana < 41) %>%
ggplot(aes(x = positividade, y = N.casos)) +
geom_point() +
geom_path(lty =2)
## Com lag
positividade %>%
filter(semana > 11 & semana < 41) %>%
ggplot(aes(y = N.casos, x = lag(positividade,5))) +
geom_point() +
geom_path(lty =2)
## N de positivos x n testados
plot(positivos ~ testados, data = positividade, type = "n", ylab = "Positivos RT-PCR", xlab = "Testados RT-PCR")
text(labels = as.character(positividade$semana),
y= positividade$positivos,
x= positividade$testados,
adj = c(0.5, 0.5))
abline(0,.25, lty = 2, col = "green", lwd=1.5)
abline(0,.5, lty = 2, col = "orange", lwd=1.5)
abline(0,.75, lty = 2, col = "red", lwd = 1.5)
legend("topleft", c("25%", "50%", "75%"), bty = "n", title = "Positividade", lty=2, col =c("green", "orange", "red"))
################################################################################
## Floripa
################################################################################
floripa <- read.csv("covid_florianopolis.csv") %>%
mutate(data_primeiros_sintomas = as.Date(data_primeiros_sintomas),
semana = date2week(data_primeiros_sintomas, numeric = TRUE) )
positividade.fl <-
floripa %>%
filter(data_primeiros_sintomas > "2019-12-01" & data_primeiros_sintomas < as.Date(Sys.Date())) %>%
group_by(semana) %>%
summarise(N.casos = sum(classificacao_final=="CONFIRMAÇÃO CLÍNICO EPIDEMIOLÓGICO" |
classificacao_final== "CONFIRMAÇÃO LABORATORIAL"),
testados = sum(tipo_teste == "RT-PCR"),
positivos = sum(tipo_teste == "RT-PCR" & classificacao_final== "CONFIRMAÇÃO LABORATORIAL"),
positividade = positivos/testados)
plot(N.casos ~ positividade, data = positividade, subset = semana >11 & semana <41)
plot(N.casos ~ lag(positividade, 5), data = positividade, subset = semana >11 & semana <41)
plot(N.casos ~ lag(positividade, 5), data = positividade)
plot(dif.casos ~ lag(positividade, 5), data = positividade, subset = semana >11 & semana <41)
## Series temporais
png("series_semanais_testes_casos_floripa.png", width = 600)
par(mfrow = c(3,1))
plot(N.casos ~ semana, data= positividade.fl, type ="b", ylab = "N de casos", xlab ="", main = "Casos Totais")
plot(positividade ~ semana, data= positividade.fl, type ="b", ylab = "Positividade", xlab ="", main = "Positvidade PCR")
plot(testados ~ semana , data = positividade.fl, type = "b", ylab = "N de testados", main = "RT-PCR realizados")
par(mfrow = c(1,1))
dev.off()
## N de positivos x n testados
png("testesXpositivos_floripa.png")
plot(positivos ~ testados, data = positividade.fl, type = "n", ylab = "N de Positivos",
xlab = "N de testados", main = "Testes RT-PCR em Florianópolis")
text(labels = as.character(positividade.fl$semana),
y= positividade.fl$positivos,
x= positividade.fl$testados,
adj = c(0.5, 0.5))
abline(0,.5, lty = 2, col = "orange")
abline(0,.25, lty = 2, col = "green")
abline(0,.75, lty = 2, col = "red")
legend("topleft", c("25%", "50%", "75%"), bty = "n", title = "Positividade", lty=2, col =c("green", "orange", "red"))
dev.off()
## Em escala log
positividade.fl %>%
ggplot(aes(x = positividade, y = N.casos)) +
geom_point() +
xlab("Positividade RT-PCR")
#geom_path(lty =2)
|
e263b0253838d841b9ad0ed7a30c81f23639b707
|
38d76982c3844b8dd57c5f51d5691fc803728a1a
|
/graphs/rscripts/overlay.R
|
04356db6b00498c21182ba68646f5cb80eeb9797
|
[] |
no_license
|
alfa07/FixCache
|
afd6a0ed865afe47329b63fe380ade66e7f0be9b
|
d1de1b96332fef6de06a8ca51c5772e094d5af73
|
refs/heads/master
| 2020-12-25T11:31:49.207985
| 2012-03-13T22:13:31
| 2012-03-13T22:13:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,123
|
r
|
overlay.R
|
#!/usr/bin/env Rscript
# month vs hitrate
rates <- read.csv("apache.csv", comment.char="#")
maxmon = max(rates$Month)
#maxmon = 36
plot(rates$Month, rates$HitRate, type="p", ylim=range(0,100), xlim=range(1,maxmon), xaxt="n", ylab="HitRate", xlab="Month")
axis(at=rates$Month, side=1)
#abline(h=max(HitRate), lty=2)
#abline(h=min(HitRate), lty=2)
#mtext(side=4, text=min(HitRate), las=1, at=min(HitRate))
#mtext(side=4, text=max(HitRate), las=1, at=max(HitRate))
par(new=T)
prates <- read.csv("postgres.csv", comment.char="#")
plot(prates$Month, prates$HitRate, type="p", pch=8, ylim=range(0,100), xlim=range(1,maxmon), xaxt="n", yaxt="n", xlab="", ylab="")
par(new=T)
vrates <- read.csv("volde.csv", comment.char="#")
plot(vrates$Month, vrates$HitRate, type="p", pch=3, ylim=range(0,100), xlim=range(1,maxmon), xaxt="n", yaxt="n", xlab="", ylab="")
par(new=T)
erates <- read.csv("v8.csv", comment.char="#")
plot(erates$Month, erates$HitRate, type="p", pch=2, ylim=range(0,100), xlim=range(1,maxmon), xaxt="n", yaxt="n", xlab="", ylab="")
legend(120, 25, legend=c("Apache","Postgres","Voldemort","V8"), pch=c(1,8,3,2))
|
cd684edeb6dc6c87127c9cc30bf9cf6abe7e652f
|
a593d96a7f0912d8dca587d7fd54ad96764ca058
|
/man/spark_integ_test_skip.Rd
|
d88123772c445edd9c118283c55f11ab2f1e1c58
|
[
"Apache-2.0"
] |
permissive
|
sparklyr/sparklyr
|
98f3da2c0dae2a82768e321c9af4224355af8a15
|
501d5cac9c067c22ad7a9857e7411707f7ea64ba
|
refs/heads/main
| 2023-08-30T23:22:38.912488
| 2023-08-30T15:59:51
| 2023-08-30T15:59:51
| 59,305,491
| 257
| 68
|
Apache-2.0
| 2023-09-11T15:02:52
| 2016-05-20T15:28:53
|
R
|
UTF-8
|
R
| false
| true
| 587
|
rd
|
spark_integ_test_skip.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{spark_integ_test_skip}
\alias{spark_integ_test_skip}
\title{It lets the package know if it should test a particular functionality or not}
\usage{
spark_integ_test_skip(sc, test_name)
}
\arguments{
\item{sc}{Spark connection}
\item{test_name}{The name of the test}
}
\description{
It lets the package know if it should test a particular functionality or not
}
\details{
It expects a boolean to be returned. If TRUE, the corresponding test will be
skipped. If FALSE the test will be conducted.
}
|
18724dd4228fe40ffd13d1218fbac25a7352c1a7
|
8bc373f5ef5e4918d3f1e2d4a3ff3194bfbdfb20
|
/USAspending FY18 Contracts Analysis.R
|
bbaf2b763825b26815c67b2d70be5d0912404738
|
[] |
no_license
|
shunley42/ShunleyLearnsGit
|
94d2e0088d23aeee1099aceda2d4e171e5b4be27
|
776d004c4cf3ce04ae239a7666be723c693f00fa
|
refs/heads/master
| 2020-07-30T00:43:12.430153
| 2019-10-13T22:57:36
| 2019-10-13T22:57:36
| 210,022,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,878
|
r
|
USAspending FY18 Contracts Analysis.R
|
library(tidyverse)
library(vroom)
# Create Working Directory Short Cuts
local_dir <- "/Users/samhunley/Desktop/2018_all_Contracts_Full_20191009"
setwd(local_dir)
# Loading that data
# vroom is fun, loads multiple csvs at once
# The data are the 2018 contracts archive from https://www.usaspending.gov/#/download_center/award_data_archive
# Quick warning! This is about 7 GB of data!
files <- fs::dir_ls(glob = "2018_all_Contracts_Full_20191010_*csv")
contracts18 <- vroom(files, delim = ",", col_select = c(award_id_piid, award_description,
product_or_service_code_description, action_date,
naics_description, awarding_agency_name,
awarding_sub_agency_name, recipient_name,
recipient_country_name, recipient_state_name,
recipient_city_name,
federal_action_obligation))
######
# FOR LOCKHEED ANALYSIS LATER
#####
lockSSAcontracts <- contracts18 %>%
filter(awarding_agency_name == "SOCIAL SECURITY ADMINISTRATION (SSA)" &
recipient_name == "LOCKHEED MARTIN CORPORATION")
# saving to CSV just to make it easier to browse
write_csv(lockSSAcontracts, "lockheedSSAContracts.csv")
#####
#
#####
# Compressing the data by summarizing
contracts18 <- contracts18 %>%
group_by(recipient_name, product_or_service_code_description, naics_description, awarding_agency_name,
awarding_sub_agency_name) %>%
summarise(fundsObligated = sum(federal_action_obligation))
# Saving the smaller dataset as an RData file to save time loading later
saveRDS(contracts18, "contracts18.RData")
# seeing what we're dealing with
contracts18 <- contracts18 %>%
arrange(desc(fundsObligated))
# So, we have several companies receiving awards from multiple agencies, producing multiple entries
# I think the best approach is to get a Top 10, and then use the above dataset to make sense of it
vendors <- contracts18 %>%
group_by(recipient_name) %>%
summarise(fundsObligated = sum(fundsObligated)) %>%
filter(fundsObligated > 0) %>%
arrange(desc(fundsObligated))
# Saving these data, too, because that makes sense
saveRDS(vendors, "vendors18.RData")
vendors <- readRDS("vendors18.RData")
vendorsLite <- vendors %>%
filter(fundsObligated > 2000000000)
write.csv(vendorsLite, "top32_Vendors.csv", row.names = FALSE)
# loading contract data
contracts18 <- readRDS("contracts18.RData")
vSmall <- vendors %>%
filter(fundsObligated > 8400000000)
top5 <- c(vSmall$recipient_name)
# loading just the top five
contracts18$recipient_name <- ifelse(contracts18$recipient_name %in% top5, contracts18$recipient_name, NA)
contracts18 <- contracts18 %>%
filter(!is.na(recipient_name))
naics <- contracts18 %>%
group_by(naics_description) %>%
summarise(fundsObligated = sum(fundsObligated)) %>%
arrange(desc(fundsObligated))
write.csv(naics, "naics18.csv", row.names = FALSE)
psc <- contracts18 %>%
group_by(product_or_service_code_description) %>%
summarise(fundsObligated = sum(fundsObligated))%>%
arrange(desc(fundsObligated))
write.csv(psc, "psc18.csv", row.names = FALSE)
# Looking at just MCKESSON CORPORATION because the rest of the top 5 are arms dealers or aerospace
mckesson <- contracts18 %>%
filter(recipient_name == "MCKESSON CORPORATION")
# Funding agencies
mckesson %>%
group_by(awarding_agency_name) %>%
summarise(fundsObligated = sum(fundsObligated))
# PSCs
mckesson %>%
group_by(product_or_service_code_description) %>%
summarise(fundsObligated = sum(fundsObligated))
# NAICs
mckesson %>%
group_by(naics_description) %>%
summarise(fundsObligated = sum(fundsObligated))
# NAICs to PSCs
test <- mckesson %>%
group_by(naics_description, product_or_service_code_description) %>%
summarise(fundsObligated = sum(fundsObligated))
# Looking at just LOCKHEED MARTIN CORPORATION because the rest of the top 5 are arms dealers or aerospace
lockheed <- contracts18 %>%
filter(recipient_name == "LOCKHEED MARTIN CORPORATION")
# Funding agencies
agencies <- lockheed %>%
group_by(awarding_agency_name) %>%
summarise(fundsObligated = sum(fundsObligated)) %>%
filter(fundsObligated > 0) %>%
arrange(desc(fundsObligated))
# SSA & NAICs/PSCs
ssa <- lockheed %>%
filter(awarding_agency_name == "SOCIAL SECURITY ADMINISTRATION (SSA)")
# NAICs
lockheed %>%
group_by(naics_description) %>%
summarise(fundsObligated = sum(fundsObligated))
# PSCs
lockheed %>%
group_by(product_or_service_code_description) %>%
summarise(fundsObligated = sum(fundsObligated)) %>%
arrange(desc(fundsObligated))
|
6ccb6dc29b0a8242bfdf5fc46aa2b1a2bf6d02a6
|
1381b8df20007b2fdda32a61f90031ca6a161d59
|
/Intro to R/workshop examples.R
|
371eda92316be9cb5edbbcaaa20d15717ae101e4
|
[] |
no_license
|
gridclub/r-summer-workshops-2016
|
02f13fdb45f6a99a749f8353e1fbdb74fb378af5
|
0f7867cf02df991fdb14115a9fe57cea9cc2bba7
|
refs/heads/master
| 2021-01-09T20:47:39.999744
| 2016-07-19T17:51:48
| 2016-07-19T17:51:48
| 63,630,839
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,283
|
r
|
workshop examples.R
|
library("Hmisc")
getHdata(FEV)
#FEV example
mean(FEV$fev)
median(FEV$fev)
summary(FEV$fev)
plot(x=FEV$height, y=FEV$fev)
class(FEV$height)
class(FEV$sex)
class(FEV$smoke)
str(FEV)
FEV$height2<-round(FEV$height/12, digits = 2)
View(FEV)
plot(x=FEV$height2, y=FEV$fev)
#MTCARS example
# Find dataset using the command - datasets::mtcars.
datasets::mtcars
# Use the help console to see a description of the variables
?datasets::mtcars
# Assign this data set the name cardata
cardata<-datasets::mtcars
# The variable wt gives us weight in lb/1000. Convert this variable to be the weigth in lbs.
cardata$wt<-cardata$wt*1000
# Plot y=mpg vs. x=wt (in lbs not lbs/1000)
plot(x=cardata$wt, y=cardata$mpg)
p1<-plot(x=cardata$wt, y=cardata$mpg)
# Make a new data frame named carsubset that only includes the variables mpg, cyl and hp for the first 10 cars.
carsubset<-data.frame(cardata$mpg, cardata$cyl,cardata$hp)
rownames(carsubset)<-rownames(cardata)
carsubset<-carsubset[1:10,]
# Find the mean mpg for the carsubset data.
mean(carsubset$cardata.mpg)
# make a histogram for cylinders.
hist(carsubset$cardata.cyl)
#In R markdown create a pdf document with the carsubset dataframe
#displayed as a nice table (use kable function in knitr package).
|
345da27e99a350284346249027dd4dca7cea5554
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610044401-test.R
|
37bc2862f7e0932a7b194592eb32569d9993c59b
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
1610044401-test.R
|
testlist <- list(hi = -1.33360288657597e+241, lo = NaN, mu = -7.47863579530838e+240, sig = -7.47863579530838e+240)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
57e92775de8e110670fcc8a653c8f0f553e71fc2
|
8482c80d84ac0ad76c32b2bbc93d8b0c4df0e90d
|
/subsetting.R
|
d02d4519ae32f3e6272b875566be689ff1cc5a6a
|
[] |
no_license
|
errpv78/Data_science_coursera
|
52b6881b4d85bd9c4b96bf2201d1b397dbd83bce
|
eb529f975c807e144cea2e54014d038130ef1136
|
refs/heads/master
| 2022-11-04T21:49:16.963371
| 2020-06-17T18:02:47
| 2020-06-17T18:02:47
| 272,050,338
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,797
|
r
|
subsetting.R
|
# Subsetting
"[ - returns an object of same class as original, can be used to
select more than 1 element
[[ - to extract elements of a list or df, can extract only a single
element and class of returned object not necessarily be a list or df
$ - to extract elements of list or df by name, semantics same as [[
> x = c('a', 'd', 'z', 'r', 'c')
> x[1] - numeric index
[1] 'a'
> x[2:4]
[1] 'd' 'z' 'r'
> x [x > 'c'] - logical index
[1] 'd' 'z' 'r'
> u = x < 'm'
> u
[1] TRUE TRUE FALSE FALSE TRUE
> x[u]
[1] 'a' 'd' 'c'
"
# Subsetting lists
"> v = list(p = 7:8, a = c('a', 'b'), foo=78)
> v
$p
[1] 7 8
$a
[1] 'a' 'b'
$foo
[1] 78
> v[1]
$p
[1] 7 8
> v[[2]]
[1] 'a' 'b'
> v$foo
[1] 78
> v['p']
$p
[1] 7 8
> v[['v']]
NULL
> v[['a']]
[1] 'a' 'b'
> v[3]
$foo
[1] 78
> v[c(1,3)]
$p
[1] 7 8
$foo
[1] 78
# [[]] can be usd with variables equal to names of obj but $ can't
> name = 'foo'
> v[[name]]
[1] 78
> v$name
NULL
# [[]] can be used to get single element at any position
> v[[c(1,2)]]
[1] 8
> v[[2]]
[1] 'a' 'b'
> v[[c(2, 1)]]
[1] 'a'
> v[[c(1, 1)]]
[1] 7
"
# Subsetting matrices
"> v = matrix(1:6, 3,2)
> v
[,1] [,2]
[1,] 1 4
[2,] 2 5
[3,] 3 6
> v[1,2]
[1] 4
> v[2,3]
Error in v[2, 3] : subscript out of bounds
> v[3,2]
[1] 6
> v[1,]
[1] 1 4
> v[,2]
[1] 4 5 6
# By default when 1 single element of matrix is retrieved it is
returned as a vector og length 1*1 than a matrix of 1*1, this
behaviour can be switched by detting drop = False
> v = matrix(1:8, 4,2)
> v[1, 2]
[1] 5
> v[1, 2, drop=F]
[,1]
[1,] 5
> v = matrix(1:8, 4,2)
> v[1, 2]
[1] 5
> v[1, 2, drop=F]
[,1]
[1,] 5
> v = matrix(1:6, 3,2)
> v[,2]
[1] 4 5 6
> v[,2, drop=F]
[,1]
[1,] 4
[2,] 5
[3,] 6
"
# Partial matching
"Partial matching of names is allowed with [[ and $
> x = list(parikh = 2:8)
> x[['p']]
NULL
> x[['p'', exact = F]]
[1] 2 3 4 5 6 7 8
> x$p
[1] 2 3 4 5 6 7 8
"
# Removing NA Values
"> x = c(1,2,NA, 4, NaN, 6)
> miss = is.na(x)
> x[!miss]
[1] 1 2 4 6
# When there are multiple things and we want to take subset with
no missing values
> p = c(1,2,NA, 4,5)
> v = c(6,NA,NA, 9,10)
> good = complete.cases(p, v)
> good
[1] TRUE FALSE FALSE TRUE TRUE
> p[good]
[1] 1 4 5
> v[good]
[1] 6 9 10
"
# Vectorized Operations
"> p = 1:5; v = 6:10
> p+v
[1] 7 9 11 13 15
> p-v
[1] -5 -5 -5 -5 -5
> p/v
[1] 0.1666667 0.2857143 0.3750000 0.4444444 0.5000000
> p*v
[1] 6 14 24 36 50
> p>v
[1] FALSE FALSE FALSE FALSE FALSE
> p>3
[1] FALSE FALSE FALSE TRUE TRUE"
"Matrix operations
> p = matrix(23:26,2,2); v = matrix(1:4, 2, 2);
# Element wise
> p * v
[,1] [,2]
[1,] 23 75
[2,] 48 104
> p/v
[,1] [,2]
[1,] 23 8.333333
[2,] 12 6.500000
# True matrix multiplication
> p %*% v
[,1] [,2]
[1,] 73 169
[2,] 76 176
"
|
783c8d0b0b5e91e8916e22a2fdd1566bf26ca2df
|
020f5a54a70d4790de6643cd6feaf809afcf1621
|
/man/estimateIGTProjection-methods.Rd
|
c830447d30a24e37460bef24f8d32bebab8b6a50
|
[
"Apache-2.0"
] |
permissive
|
hms-dbmi/EHRtemporalVariability
|
2377d8d5ae78777ad3edb64f2cfa39d679fbbd18
|
a1e2aa12b391b268d7000d93ce67da1bfc43c199
|
refs/heads/master
| 2021-06-08T01:44:27.951209
| 2021-05-31T11:51:28
| 2021-05-31T11:51:28
| 140,324,604
| 14
| 7
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,879
|
rd
|
estimateIGTProjection-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/allGenerics.R, R/allMethods.R
\name{estimateIGTProjection}
\alias{estimateIGTProjection}
\alias{estimateIGTProjection,IGTProjection-method}
\alias{estimateIGTProjection,DataTemporalMap-method}
\title{Estimates an Information Geometric Temporal plot projection}
\usage{
estimateIGTProjection(
dataTemporalMap,
dimensions = 3,
startDate = NULL,
endDate = NULL,
embeddingType = "classicalmds"
)
\S4method{estimateIGTProjection}{DataTemporalMap}(
dataTemporalMap,
dimensions = 3,
startDate = NULL,
endDate = NULL,
embeddingType = "classicalmds"
)
}
\arguments{
\item{dataTemporalMap}{of class \code{DataTemporalMap} object.}
\item{dimensions}{\code{numeric} integer value indicating the number of dimensions
for the projection.}
\item{startDate}{a Date object indicating the date at which to start the analysis,
in case of being different from the first chronological date in the date column
(the default).}
\item{endDate}{a Date object indicating the date at which to end the analysis,
in case of being different from the last chronological date in the date column
(the default).}
\item{embeddingType}{the type of embedding to apply to the dissimilarity matrix of time batches
in order to obtain the non-parametric Statistical Manifold, from "classicalmds" and "nonmetricmds",
with "classicalmds" as default. "classicalmds" uses the base R stats::cmdscale function, while "nonmetricmds"
uses the MASS:isoMDS function. The returned stress format will depend on the selected embedding type:
"classicalmds" returns 1-GOF as returned by stats::cmdscale function, "nonmetricmds" returns the final stress
in percent, as returned by the MASS::isoMDS function}
}
\value{
An \code{IGTProjection} object containing the projected coordinates of each
temporal batch in the embedded non-parametric Statistical Manifold, as well as the
embedding stress according to the embeddingType.
}
\description{
Estimates an \code{IGTProjection} object from a \code{DataTemporalMap} object.
}
\examples{
load(system.file("extdata",
"variabilityDemoNHDSdiagcode1-phewascode.RData",
package="EHRtemporalVariability"))
igtProj <- estimateIGTProjection( dataTemporalMap = probMaps$`diagcode1-phewascode`,
dimensions = 3,
startDate = "2000-01-01",
endDate = "2010-12-31")
\dontrun{
# For additional and larger examples download the following .Rdata file:
gitHubUrl <- 'http://github.com/'
gitHubPath <- 'hms-dbmi/EHRtemporalVariability-DataExamples/'
gitHubFile <- 'raw/master/variabilityDemoNHDS.RData'
inputFile <- paste0(gitHubUrl, gitHubPath, gitHubFile)
load(url(inputFile))
igtProj <- estimateIGTProjection( dataTemporalMap = probMaps[[1]],
dimensions = 3,
startDate = "2000-01-01",
endDate = "2010-12-31")
}
}
|
9647e82987cd65e65c50f9f25274d24439dda9d5
|
f3d976a4c8407b3254253a8b4ef8ff764bde241d
|
/Open science workshop/t-test messing.R
|
2a875db1c7900d1dd8e7dcef29ef4da7f0383743
|
[] |
no_license
|
charliekirkwood/demos
|
d98a0391836b577303b8e4b782f419931b4919b3
|
ec299ebfc1d74f8b6fbfddff9bf7361ef18d2147
|
refs/heads/master
| 2021-07-14T08:43:49.832928
| 2020-07-25T21:49:08
| 2020-07-25T21:49:08
| 183,048,869
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 328
|
r
|
t-test messing.R
|
library(data.table)
library(ggplot2)
n <- 1000
scoresA <- rnorm(n = n, m = 0, sd = 1)
scoresA
scoresB <- rnorm(n = n, m = 0.1, sd = 1)
scoresB
t.test(scoresA, scoresB)
ggplot(melt(data.frame(A = scoresA, B = scoresB))) + geom_jitter(aes(x = variable, y = value, col = variable), alpha = 0.33, shape = 16) + theme_classic()
|
109f16a4fe083ddcce650d4823425855a8d316e0
|
b6a030a2141f52f114844e33fefe2b7cc72df6f6
|
/Credit Data Analysis Using Tree, Random Forest,Bagging.R
|
15dc495343f239384c61056169994a2b58b6cdd9
|
[] |
no_license
|
NavneetSonak/Credit-Data-Analysis-Using-Various-ML-Alogorithms
|
639dae62fbd73a38a001b4adf48da35996228c62
|
0cb6941a563e6555d706455294cfecf336b82113
|
refs/heads/master
| 2021-05-10T14:09:38.581261
| 2018-01-22T20:04:37
| 2018-01-22T20:04:37
| 118,503,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,601
|
r
|
Credit Data Analysis Using Tree, Random Forest,Bagging.R
|
setwd('/Users/navneetsonak/Desktop/ClassesSlides/DataMining/R')
library(readxl)
library(dummies)
library(tree)
library(ISLR)
library(dplyr)
library(gbm)
library(randomForest)
## Importing Credit Data and doing data preprocessing.
credit <- read_excel('credit3.xlsx',sheet=1)
credit$profitable <- NULL
change <- function(x){
if (x<0)
return(0)
else
return(1)
}
credit$profitable <- sapply(credit$NPV, change)
credit$CHK_ACCT <- as.factor(credit$CHK_ACCT)
credit$SAV_ACCT <- as.factor(credit$SAV_ACCT)
credit$HISTORY <- as.factor(credit$HISTORY)
credit$JOB <- as.factor(credit$JOB)
credit$TYPE <- as.factor(credit$TYPE)
credit <- dummy.data.frame(credit, all=TRUE)
credit$profitable <- as.factor(credit$profitable)
## Dividing datset into test and train
set.seed(12345)
train <- sample(nrow(credit),0.7*nrow(credit))
credit_train <- credit[train,]
credit_test <- credit[-train,]
## Running Classification tree alogorithm
tree.credit = tree(profitable~., data=credit_train[,-c(1,42,43)])
summary(tree.credit)
tree.pred1=predict(tree.credit,credit_test[,-c(1,42,43)],type="class")
table(tree.pred1,credit_test$profitable)
plot(tree.credit)
text(tree.credit,pretty=0)
## Choosing the best Pruned Classification tree size
set.seed(123)
cv.credit=cv.tree(tree.credit,K=10)
names(cv.credit)
plot(cv.credit$k,cv.credit$dev,type="b")
plot(cv.credit$size,cv.credit$dev,type="b")
##Pruned the tree
prune.credit=prune.misclass(tree.credit,best=2)
plot(prune.credit)
text(prune.credit,pretty=0)
tree.pred=predict(prune.credit,credit_test[,-c(1,42,43)],type="class")
tree_confisuion<- table(tree.pred,credit_test$profitable)
accuracy_tree <- (tree_confisuion[1,1]+tree_confisuion[2,2])/sum(tree_confisuion)
accuracy_tree
##This is how the above classification tree will work for the following data:
##The student is 27 years old, domestic, has $100 in her checking account but no savings account.
##The applicant has 1 existing credits, and a credit duration of 12 months, and the credit was paid back duly.
##The applicant has been renting her current place for less than 12 months, does not own any real estate, just started graduate school (the present employment variable is set to 1 and nature of job to 2).
##The applicant has no dependents and no guarantor.
##The applicant wants to buy a used car and has requested $4,500 in credit, and therefore the Installment rate is quite high or 2.5%,
##however the applicant does not have other installment plan credits.
##Finally, the applicant has a phone in her name.
pred_data <-data.frame(AGE =27, CHK_ACCT1=1,CHK_ACCT0=0,CHK_ACCT2=0,CHK_ACCT3=0, SAV_ACCT0=1,SAV_ACCT1=0,SAV_ACCT2=0,SAV_ACCT3=0,SAV_ACCT4=0,NUM_CREDITS = 1, DURATION = 12, HISTORY2=1,HISTORY0=0,HISTORY1=0,HISTORY3=0,HISTORY4=0, PRESENT_RESIDENT=1, EMPLOYMENT=1, JOB2=1,JOB0=0,JOB1=0,JOB3=0, NUM_DEPENDENTS = 1, RENT=1, INSTALL_RATE=3, GUARANTOR=0, OTHER_INSTALL=0, OWN_RES=0, TELEPHONE=1, FOREIGN=0, REAL_ESTATE=0, TYPE2=1,TYPE1=0,TYPE0=0,TYPE3=0,TYPE4=0,TYPE5=0,TYPE6=0, AMOUNT_REQUESTED=4500)
tree.pred_data <- predict(prune.credit,pred_data,type="class")
tree.pred_data_prob <- predict(prune.credit,pred_data)
tree.pred_data_prob
## Running a regression tree algorithm
tree.credit_reg= tree(NPV~.,credit_train[,-c(1,42,44)])
summary(tree.credit_reg)
pred_credit_reg <- predict(tree.credit_reg,newdata=credit_test[,-c(1,42,44)])
length(pred_credit_reg)
plot(tree.credit_reg)
text(tree.credit_reg, pretty=0)
mse <- mean((pred_credit_reg-credit_test$NPV)^2)
## pruning the regression tree
set.seed(123)
cv.credit_reg=cv.tree(tree.credit_reg)
plot(cv.credit_reg$size,cv.credit_reg$dev,type='b')
## Pruned the tree
prune.credit_reg=prune.tree(tree.credit_reg,best=10)
plot(prune.credit_reg)
text(prune.credit_reg,pretty=0)
## Bagging
set.seed(12345)
bag.credit=randomForest(profitable~.,data=credit_train[,-c(1,42,43,45)],mtry=40,importance=TRUE)
bag.credit
bag.credit_pred <- predict(bag.credit, newdata = credit_test[,-c(1,42,43,45)])
bagging_confusion<- table(credit_test$profitable,bag.credit_pred)
accuracy_bag <- (bagging_confusion[1,1]+bagging_confusion[2,2])/sum(bagging_confusion)
accuracy_bag
test_bag <- data.frame(credit_test$NPV, bag.credit_pred)
## Random Forest
rf.credit=randomForest(profitable~.,data=credit_train[,-c(1,42,43,45)],mtry=10,importance=TRUE)
rf.credit_pred <- predict(rf.credit, newdata = credit_test[,-c(1,42,43,45)])
rf_confusion <- table(credit_test$profitable,rf.credit_pred)
accuracy_rf <- (rf_confusion[1,1]+rf_confusion[2,2])/sum(rf_confusion)
accuracy_rf
test_rf <- data.frame(credit_test$NPV, rf.credit_pred)
|
55afb7f06045a2b7ec3c38887ce639e7278fee06
|
38fe31eca00aead4f35512d0dffe84b80ca994e6
|
/R/profile.uiprobit.R
|
00fabd21240c0c7dff13f03bb73fd9f39dd1f09e
|
[] |
no_license
|
cran/ui
|
8f780729efca11df54b7a86947988fd4b6ab2345
|
c426fcbce6ae3ca676b092a45b02d67f8ee6bdd2
|
refs/heads/master
| 2020-12-22T22:58:12.606147
| 2019-11-11T12:10:02
| 2019-11-11T12:10:02
| 236,955,797
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,290
|
r
|
profile.uiprobit.R
|
#' Plot of UI and CI
#'
#' Plot function for objects returned from \code{\link{ui.probit}}.
#' Plots confidence intervals for different values of rho and the uncertainty interval.
#' @param fitted An object of class uiprobit
#' @param plot.all If TRUE, plots all covariates.
#' @param which Specify which variables should be plotted by either sending in their names in a vector or a vector with their numbers (1 intercept, 2 for the first covariate etc.).
#' @param intercept If TRUE, also plots the intercept.
#' @param xlab Title for x-axis, default is \code{expression(rho)}.
#' @param ylab Title for y-axis, default is the variable names.
#' @param cex.lab Size of lables.
#' @param mar Margin around panels in plot.
#' @param ... Additional arguments, use is discouraged.
#'
#'
#' @importFrom graphics par plot polygon lines
#' @export
profile.uiprobit<-function(fitted, plot.all=TRUE, which=NA, intercept=FALSE, xlab=NULL,ylab=NULL, cex.lab=2,mar=c(6,6,2,2), ...){
p<-dim(fitted$coef)[1]-1
nrho<-dim(fitted$coef)[2]
if(sum(is.na(which)>0)){
if(plot.all==FALSE) warning('Need to specify which variable in order to not plot all.')
plot.all=TRUE
}else{
plot.all=FALSE
if(mode(which)=='character'){
which<-which(NamnX %in% which)
}
}
if(plot.all){
if(intercept){which<- 1:(p+1)}else{which<- 2:(p+1)}
}
if(is.null(ylab)){NamnX<-rownames(fitted$coef)
}else{if(length(as.vector(ylab))==1){
NamnX<-rep(ylab,length(rownames(fitted$coef)))
}else{
if(length(as.vector(ylab))==length(which)){
NamnX<-rep("",length(rownames(fitted$coef)))
NamnX[which]=ylab }else{
stop('Wrong dimensions on ylab.')
}}}
if(is.null(xlab)){xlab=expression(rho)}
if(sum(is.na(fitted$ui))>0){
ui<-fitted$uirough
ci<-fitted$cirough
}else{
ui<-fitted$ui
ci<-fitted$ci
}
n<-length(which)
dim.par<-c(floor(sqrt(n)),ceiling(sqrt(n)))
if(dim.par[1]*dim.par[2]<n){
dim.par[1]<-dim.par[1]+1
}
par(mfrow=dim.par,mar=mar)
for(i in which){
plot(fitted$rho,fitted$coef[i,],type='l',ylim=c(min(c(0, ui[i,])),max(c(0, ui[i,]))),xlab=xlab,ylab=NamnX[i],cex.lab=cex.lab)
polygon(c(fitted$rho, rev(fitted$rho)), c(ci[i,,2],rev(ci[i,,1])), col = "grey90", border = NA)
lines(fitted$rho,fitted$coef[i,])
lines(fitted$rho,ci[i,,1],lty=2)
lines(fitted$rho,ci[i,,2],lty=2)
lines(c(-1,1),c(0,0))
}
}
|
886dc6f2228cce19043f815aa17c5e1dd582b3b8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/boot/examples/simplex.Rd.R
|
d12d2fa22d2a4c3dbd4bd2c45cfd7af5f37f95f0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 490
|
r
|
simplex.Rd.R
|
library(boot)
### Name: simplex
### Title: Simplex Method for Linear Programming Problems
### Aliases: simplex
### Keywords: optimize
### ** Examples
# This example is taken from Exercise 7.5 of Gill, Murray and Wright (1991).
enj <- c(200, 6000, 3000, -200)
fat <- c(800, 6000, 1000, 400)
vitx <- c(50, 3, 150, 100)
vity <- c(10, 10, 75, 100)
vitz <- c(150, 35, 75, 5)
simplex(a = enj, A1 = fat, b1 = 13800, A2 = rbind(vitx, vity, vitz),
b2 = c(600, 300, 550), maxi = TRUE)
|
00301961ec02e1f0a8302ca827cb95de96fea809
|
f9783fa798eed1074a94a9fb882fccb9c3b21a81
|
/315B_HW2/hw2_q13.R
|
ff848e816322ff60c424f6f357c8bc84de2d2908
|
[] |
no_license
|
isaackleislemurphy/Stanford-STATS-315B
|
d7eae223d0062f543c1ed934196958356ebba3a1
|
89291693cbcb82d12901da831292d1075326030c
|
refs/heads/main
| 2023-05-14T08:52:58.505531
| 2021-06-05T03:22:30
| 2021-06-05T03:22:30
| 333,242,486
| 1
| 0
| null | 2021-02-27T01:47:33
| 2021-01-26T23:11:20
|
R
|
UTF-8
|
R
| false
| false
| 3,230
|
r
|
hw2_q13.R
|
# STATS 315B Homework 2 Question 13 and 14(RPART)
library(dplyr)
library(gbm)
age_df <- read.csv("/Users/kaiokada/Desktop/Stanford/Q3/STATS315B/HW1/age_stats315B.csv")
age_df <- sapply(age_df, as.numeric)
age_df <- transform(age_df,
age = as.numeric(age),
Edu = factor(Edu, ordered=TRUE, c(1, 2, 3, 4, 5, 6)),
Income = factor(Income, ordered=TRUE, c(1, 2, 3, 4, 5, 6, 7, 8, 9)),
LiveBA = factor(LiveBA, ordered=TRUE, c(1, 2, 3, 4, 5)),
Persons = factor(Persons, ordered=TRUE, c(1,2,3,4,5,6,7,8,9)),
Under18 = factor(Under18, ordered=TRUE, c(0,1,2,3,4,5,6,7,8,9)),
Occup = factor(Occup),
TypeHome = factor(TypeHome),
sex = factor(sex),
MarStat = factor(MarStat),
HouseStat = factor(HouseStat),
DualInc = factor(DualInc),
Ethnic = factor(Ethnic),
Lang = factor(Lang))
age_df
# split the dataset
set.seed(2021)
train_rows <- sample(1:nrow(age_df), size = as.integer(nrow(age_df) * 0.7)) # 70% training
eval_rows <- setdiff(1:nrow(age_df), train_rows)
val_rows <- sample(eval_rows, size = as.integer(length(eval_rows) * 0.5)) # 15% dev
test_rows <- setdiff(eval_rows, val_rows)
train_data <- age_df[train_rows,]
val_data <- age_df[val_rows,]
test_data <- age_df[test_rows,]
# Hyperparameter tuning:
tune_grid = expand.grid(
interaction.depth=c(2, 4, 6),
shrinkage=c(0.01, 0.05, 0.10, 0.15)
)
tune_grid2 = expand.grid(
interaction.depth=c(6, 8, 10),
shrinkage=c(0.0, 0.005, 0.01)
)
best_grid_row <- 0
min_error = 10000.0
# Fit the tree, find the best parameters
for (i in 1:nrow(tune_grid2)) {
cat("-")
set.seed(2021)
fit.gbm <- gbm(age~.,
data=train_data,
train.fraction=1,
interaction.depth=tune_grid2$interaction.depth[i],
shrinkage=tune_grid2$shrinkage[i],
n.trees=2500,
bag.fraction=0.5,
cv.folds=5,
distribution="gaussian",
verbose=F)
best.iter <- gbm.perf(fit.gbm,method="cv")
yhat <- predict(fit.gbm, val_data, type="response", n.trees=best.iter)
ytrue <- val_data$age
mse <- mean((yhat - ytrue)^2)
if (mse < min_error) {
min_error = mse
best_grid_row = i
}
}
tune_grid2[best_grid_row,]
# Apply best hyperparams obtained.
set.seed(2021)
fit.gbm.best <- gbm(age~.,data=train_data,
train.fraction=1,
interaction.depth=tune_grid2$interaction.depth[best_grid_row],
shrinkage=tune_grid2$shrinkage[best_grid_row],
n.trees=2500,
bag.fraction=0.5,
cv.folds=5,
distribution="gaussian",
verbose=T)
best.iter <- gbm.perf(fit.gbm.best, method="cv")
best.iter
yhat_test <- predict(fit.gbm.best, test_data, type="response", n.trees=best.iter)
ytrue <- test_data$age
mse_test <- mean((yhat_test - ytrue)^2)
summary(fit.gbm.best,main="RELATIVE INFLUENCE OF ALL PREDICTORS")
mse_test
|
7640d842abf0ba413183b3a8c77c5dca02fd0acd
|
1cc809cc80bafec357d5ff1c1fa0f259d6f6ade9
|
/Ch044.R
|
8673e2becc15a8da8668084caf00c6fd14c49de9
|
[] |
no_license
|
SoonKwon/SSCRL381Summer2015
|
a82c2ace7a12600f7616ba61ec83bc0ab3052a9c
|
8447970a7cd6fcc782b0f421d9b43e7a8be23b9c
|
refs/heads/master
| 2021-01-19T00:16:13.653400
| 2015-07-25T02:59:21
| 2015-07-25T02:59:21
| 38,501,737
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,275
|
r
|
Ch044.R
|
x <- c(1, 2, 3, 4, 5,6 ,7, 8, 9, 10)
x
y <- 1:10
y
z <- c(1:10)
z
1:10
10:1
-2:3
5: -7
x*5
x+2
x - 3
x/4
x^2
sqrt(x)
mean(x)
length(x)
nchar(x)
nchar(y)
?mean
?length
?'+'
?'=='
?'++'
?'^'
apropos("mea")
?kmeans
x[1]
x[1:2]
x[1, 2]
x[4]
x[4:6]
x[4,6]
x[c(4, 6)]
x[c(4, 60)]
c(One = "a", Two = "y", Last = "r")
w <- 1:3
names(w) <- c("a", "b", "c")
w
x <- 1:10
y <- -5:4
x
y
length(x)
length(y)
length(x + y)
x + y
x - y
x * y
x/y
x^y
x
y
x <= 5
x > y
x < y
x
y
any(x < y)
all(x > y)
x + c(1, 2)
x + c(1, 2, 3)
q <- c("Hockey", "Football", "Baseball", "Curling", "Rugby", "Lacrosse", "Basketball", "Tennis", "Cricket", "Soccer")
q
nchar(q)
q2 <- c(q, "Hockey", "Lacrosse", "Hockey", "Water Polo", "Hockey", "Lacrosse")
q2
q2Factor <- as.factor(q2)
q2Factor
as.numeric(q2Factor)
degree <- c("Masters", "Doctorate", "High School", "College")
degree
degreeLevel <- c("High School", "College", "Masters", "Doctorate")
degreeLevels
degreeOrder <- factor(degree, levels = degreeLevel, ordered = TRUE)
degreeOrder
as.numeric(degreeOrder)
# NA for Missing and NULL for Nothing
p <- c(1, 2, NA, 8, 3, NA, 3)
p
is.na(p)
is.null(p)
q <- c(1, 2, NULL, 8, 3, NULL, 3)
q
is.na(q)
is.null(q)
r <- NULL
r
is.null(r)
|
d33038dfff3d3392d474f54d665c2b386402b936
|
a864a155a4ac597349fdc6a0d0b7618e695f1f2e
|
/man/VisiumExperiment.Rd
|
2b61e957aa125e9a989492b59d8581a602364c9c
|
[] |
no_license
|
celsomilne/SpatialExperiment
|
eb80936e938596ced34514c5a7af4b30e18e7658
|
ae1da7d1eaeeed6bee5492b0cb08a7e05ac544f2
|
refs/heads/master
| 2022-12-02T06:22:56.199378
| 2020-07-31T00:52:29
| 2020-07-31T00:52:29
| 280,007,348
| 0
| 0
| null | 2020-07-31T00:52:30
| 2020-07-15T23:49:43
| null |
UTF-8
|
R
| false
| true
| 2,957
|
rd
|
VisiumExperiment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R
\docType{class}
\name{VisiumExperiment-class}
\alias{VisiumExperiment-class}
\alias{VisiumExperiment}
\alias{coerce,SpatialExperiment,VisiumExperiment-method}
\title{The VisiumExperiment class}
\usage{
VisiumExperiment(..., scaleFactors = list(), imagePaths = list())
}
\arguments{
\item{...}{arguments to be passed to the \code{\link{SpatialExperiment}}
constructor to fill the slots of the base class.}
\item{scaleFactors}{the 10x Visium image scale factors.}
\item{imagePaths}{the list of the paths for the 10x Visium images.}
}
\value{
none
}
\description{
The VisiumExperiment class is designed to represent 10x Visium spatial
Gene Expression data.
It inherits from the \linkS4class{SpatialExperiment} class and is used in
the same manner.
In addition, the class supports the integration with 10x Visium spatial
coordinates and its scale factors.
}
\section{Slots}{
\describe{
\item{\code{scaleFactors}}{list}
\item{\code{imagePaths}}{list}
}}
\examples{
barcodesFile <- system.file(file.path("extdata", "10x_visium",
"barcodes.tsv"),
package="SpatialExperiment")
barcodesEx <- read.csv(barcodesFile, sep="\t",
header=FALSE, col.names=c("Barcodes"))
featuresFile <- system.file(file.path("extdata", "10x_visium",
"features.tsv"), package="SpatialExperiment")
featuresEx <- read.csv(featuresFile, sep="\t",
header=FALSE, col.names=c("Barcodes", "Feature_name",
"Feature_type"))
countsFile <- system.file(file.path("extdata", "10x_visium",
"matrix.mtx"), package="SpatialExperiment")
countsEx <- Matrix::readMM(file=countsFile)
posFile <- system.file(file.path("extdata", "10x_visium",
"tissue_positions_list.tsv"),
package="SpatialExperiment")
tissPosEx <- read.csv(posFile,
sep="\t", header=FALSE,
col.names=c("Barcodes", "in_tissue",
"array_row", "array_col",
"pxl_col_in_fullres", "pxl_row_in_fullres"))
scaleFile <- system.file(file.path("extdata", "10x_visium",
"scalefactors_json.json"),
package="SpatialExperiment")
scalefactors <- rjson::fromJSON(file=scaleFile)
imagePaths <- list.files(system.file(file.path("extdata", "10x_visium",
"images"),
package="SpatialExperiment"), full.names=TRUE)
ve <- VisiumExperiment(rowData=featuresEx, colData=barcodesEx,
assays=c(counts=countsEx),
spatialCoords=tissPosEx,
scaleFactors=scalefactors,
imagePaths=imagePaths)
ve
}
\author{
Dario Righelli
}
|
9579a6bc62650a6b1f0865316fe06bd7b4ac4c28
|
200412f0bd7d414124b3ea07478e7a40d48c338e
|
/man/initpi.Rd
|
505d9dec75541951621e18103b7a77b3ba8826a4
|
[] |
no_license
|
dcgerard/stramash
|
6c4eb8cc168d6ae7800c5949885f32984d367c3d
|
7578dbad1bddc3d6a3fcb1bb0e0134cdb79dde40
|
refs/heads/master
| 2021-01-20T17:26:56.198069
| 2016-06-21T16:04:55
| 2016-06-21T16:04:55
| 61,399,327
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 547
|
rd
|
initpi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stramash.R
\name{initpi}
\alias{initpi}
\title{This is copied from the ashr package because I need it and it is
not exported.}
\usage{
initpi(k, n, null.comp, randomstart)
}
\arguments{
\item{k}{The size of the grid.}
\item{n}{The sample size.}
\item{null.comp}{The position of the null component.}
\item{randomstart}{A logical. Should we start at a random location?}
}
\description{
This is copied from the ashr package because I need it and it is
not exported.
}
|
110a17e4fb62f7fc50839bbf908050c12e2532e0
|
9cc64c2a53e17d943119ff2eea6004777104e005
|
/35225_Public_Health_Information_Data_Analysis/Chapter04/ch04_2.R
|
70269b0911ca503bcbd6d4871b1d1cec422daa68
|
[] |
no_license
|
devmei/KNOU
|
da8ba1d315ffc9a40605c97175d470c660750fc0
|
e6b77d83c77ab6b11e7ecddfec1fb0a2e2ca49f5
|
refs/heads/master
| 2023-09-01T00:36:12.143572
| 2021-10-19T08:24:15
| 2021-10-19T08:24:15
| 349,593,800
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,355
|
r
|
ch04_2.R
|
# 동질성 검정의 예
# 비타민 효과
vitamin_effect <- matrix(c(31, 109, 17, 122), nrow = 2, byrow = T)
# 비타민 복용 여부: 대조군, 처리군
# 감기 여부: 감기 걸림, 감기 안 걸림
dimnames(vitamin_effect) <- list(taking = c("Control", "Treatment"), cold = c("Catch", "NotCatch"))
vitamin_effect
## cold
## taking Catch NotCatch
## Control 31 109
## Treatment 17 122
# 분할표
table_v <- addmargins((vitamin_effect))
table_v
## cold
## taking Catch NotCatch Sum
## Control 31 109 140
## Treatment 17 122 139
## Sum 48 231 279
# 카이검정
chisq.test(vitamin_effect)
##
## Pearson's Chi-squared test with Yates' continuity correction
##
## data: vitamin_effect
## X-squared = 4.1407, df = 1, p-value = 0.04186
# 관찰도수
chisq.test(vitamin_effect)$observed
## cold
## taking Catch NotCatch
## Control 31 109
## Treatment 17 122
# 기대도수
chisq.test(vitamin_effect)$expected
## cold
## taking Catch NotCatch
## Control 24.08602 115.914
## Treatment 23.91398 115.086
# 피어슨 잔차
chisq.test(vitamin_effect)$residuals
## cold
## taking Catch NotCatch
## Control 1.408787 -0.6421849
## Treatment -1.413846 0.6444908
|
12348ff11d6b3ecdb85bb581ee86346468703296
|
b7194433c8b351b730fe7b398ac8a76659ee27f2
|
/Scripts/read.tre.R
|
f53674760ba22b800003d65b37fe49ae54515156
|
[] |
no_license
|
samuelgerber/VascularNetworks
|
1a95ce87b333f9d0019f35601f40e8f01facd947
|
daf82992d38836a662d3de79532bf0ccc1a772a3
|
refs/heads/master
| 2021-09-18T20:43:40.671339
| 2018-07-19T19:27:21
| 2018-07-19T19:27:21
| 110,156,971
| 0
| 1
| null | 2017-12-09T01:06:01
| 2017-11-09T19:25:34
|
R
|
UTF-8
|
R
| false
| false
| 1,097
|
r
|
read.tre.R
|
read.tree <- function(inputFile){
con <- file(inputFile, open = "r")
X = c()
id = 0
pid = 0
while (length( line <- readLines(con, n = 1, warn = FALSE)) > 0) {
if( startsWith( line, "ID" ) ){
id = as.integer( strsplit(line, "=")[[1]][2] )
}
else if( startsWith( line, "ParentID" ) ){
pid = as.integer( strsplit(line, "=")[[1]][2] )
}
else if( startsWith( line, "NPoints" ) ){
npoints = as.integer( strsplit(line, "=")[[1]][2] )
readLines(con, n=1)
X = rbind(X, cbind( read.table(con, nrows=npoints)[,1:4], id, pid ) )
}
}
close(con)
X
}
read.all.trees <- function(folder){
dirs <- list.dirs( folder )
names = list()
data = list()
index = 1
for( d in dirs){
name = strsplit(d, "/")[[1]]
name = name[length(name)]
#d = paste(d, "/VascularNetwork.tre", sep="")
d = paste(d, "/color-new-t1Phantom.tre", sep="")
if( file.access(d) == 0 ){
print(name)
names[[index]] = name
data[[index]] = read.tree( d )
index = index + 1
}
}
list(names=names, data=data)
}
|
eb18b79584365654c7a7fd84a9926fbe2afd10f8
|
221befa0744f895155f744b6702bb5fa04334c1c
|
/lecture13.R
|
8607174025a495fc13ded4bd70bfa8aa40d95d80
|
[] |
no_license
|
LEESUAJE1978/Habit-Factory
|
66c651ce5100e35ef2dcff8a4965b37cf6ede4f2
|
27cea363ecfa28797b38b202f1e64b29b6e84378
|
refs/heads/master
| 2020-03-20T16:32:03.861433
| 2018-11-28T12:05:47
| 2018-11-28T12:05:47
| 137,541,121
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,224
|
r
|
lecture13.R
|
require(dplyr); require(stringr); require(KoNLP); require(wordcloud); require(ggplot2); require(lubridate)
str(bium.cart)
theme_set(theme_gray(base_family="AppleGothic"))
par(family='AppleGothic')#그래프 한글 깨짐 해결 방법
Sys.setlocale(category = "LC_CTYPE", locale = "ko_KR.UTF-8")
# 1. 성별에 따른 구매금액의 차이
class(bium.cart$gender)
addmargins(table(bium.cart$gender))
sum(is.na(bium.cart$gender))
qplot(bium.cart$gender)
class(bium.cart$price)
summary(bium.cart$price)
qplot(bium.cart$price)+xlim(1,20000)
sum(is.na(bium.cart$price))
gender.price = bium.cart %>% filter(!is.na(gender)) %>% group_by(gender) %>% summarise(avg = mean(price))
ggplot(data =gender.price, aes(x=gender, y =avg))+geom_col()
str(bium.order)
class(bium.order$total_price)
summary(bium.order$total_price)
qplot(bium.order$total_price)+xlim(1,100000)
sum(is.na(bium.order$total_price))
gender.total.price = bium.order %>% filter(!is.na(gender)) %>% group_by(gender) %>% summarise(avg = mean(total_price))
ggplot(data =gender.total.price, aes(x=gender, y =avg))+geom_col()
#con1:성별에 따른 구매금액의 차이는 나타나지 않음
# 2. 나이에 따른 구매금액의 차이
class(bium.cart$birth_year)
summary(bium.cart$birth_year)
table(is.na(bium.cart$birth_year))
bium.cart$age = 2018- bium.cart$birth_year +1
summary(bium.cart$age)
qplot(bium.cart$birth_year)+xlim(1950, 2018)
qplot(bium.cart$age)+xlim(10, 90)
age.price = bium.cart %>% filter(!is.na(age), age<100) %>% group_by(age) %>% summarise(avg=mean(price))
ggplot(data = age.price, aes(x = age, y= avg))+geom_line()
class(bium.order$birth)
class(bium.order$birth)
bium.order$year = as.numeric(substr(bium.order$birth, 1,4))
qplot(bium.order$year)+xlim(1950,2018)
qplot(bium.order$age)+xlim(10,90)
bium.order$age = 2018-bium.order$year -1
table(is.na(bium.order$year))
age.total.price = bium.order %>% filter(!is.na(age), between(age, 0, 100)) %>% group_by(age) %>% summarise(avg=mean(total_price))
ggplot(data = age.total.price, aes(x=age, y=avg))+geom_line()
ggplot(data = age.total.price, aes(x=age, y=avg))+geom_col()+geom_line()
#객단가는 50대 중반이 가장 높은 것으로 나타남
#시각화에 따라 데이터 인식이 달라질 수 있음에 주의
# 3. 연령대에 따른 구매금액의 차이
bium.order = bium.order %>% mutate(age.group = ifelse(between(age,0,10), "10대미만",
ifelse(between(age,11,20),"10대",
ifelse(between(age,21,30),"20대",
ifelse(between(age, 31,40),"30대",
ifelse(between(age,41,50),"40대",
ifelse(between(age,51,60),"50대",
ifelse(between(age, 61,70),"60대","60대이상"))))))))
table(bium.order$agegroup)
bium.cart = bium.cart %>% mutate(age.group = ifelse(between(age,0,10), "10대미만",
ifelse(between(age,11,20),"10대",
ifelse(between(age,21,30),"20대",
ifelse(between(age, 31,40),"30대",
ifelse(between(age,41,50),"40대",
ifelse(between(age,51,60),"50대",
ifelse(between(age, 61,70),"60대","60대이상"))))))))
table(is.na(bium.cart$agegroup))
agegroup.price = bium.cart %>% filter(!is.na(age.group)) %>% group_by(age.group) %>% summarise(avg.price = mean(price))
agegroup.total.price = bium.order %>% filter(!is.na(agegroup)) %>% group_by(age.group) %>% summarise(avg.price = mean(total_price))
ggplot(data = agegroup.price, aes(x=age.group, y=avg.price))+geom_col()
ggplot(data = agegroup.total.price, aes(x=age.group, y= avg.price))+geom_col()
#평균 구매금액은 40대 60대 50대 순으로 나타남
# 4.연령 및 성별에 따른 구매금액과 구매빈도의 차이
avg.price.freq = bium.order %>%
filter(!is.na(age) & !is.na(gender)) %>%
group_by(age.group, gender)%>%
summarise(avg = mean(total_price), freq =n())
ggplot(data = avg.price.freq, aes(x=age.group, y=avg,fill=gender))+geom_col()
ggplot(data = avg.price.freq, aes(x=age.group, y=avg,fill=gender))+geom_col(position = "dodge")
ggplot(data=avg.price.freq, aes(age.group, y=freq, fill =gender))+geom_col()
ggplot(data=avg.price.freq, aes(age.group, y=freq, fill =gender))+geom_col(position = "dodge")
ggplot(data = avg.price.freq, aes(x=age.group, y=avg))+geom_line() #이 데이터 셋에 geom.line을 사용하면 안되는 이유?
#5. 지역에 따른 구매 금액의 차이
str(bium.order)
bium.order$region = substr(bium.order$address1, 1,2)
table(is.na(bium.order$region))
region.price = bium.order %>% filter(!is.na(region)) %>%
group_by(region) %>%
summarise(avg = mean(total_price), freq=n()) %>%
mutate(total = avg *freq)
ggplot(data = region.price, aes(x= region, y=avg)) + geom_col()
ggplot(data = region.price, aes(x= region, y= freq)) + geom_col()
ggplot(data = region.price, aes(x= region, y= total)) + geom_col()
write.xlsx(region.price,"test1.xlsx" )
cor(bium.order$region, bium.order$total_price)
install.packages("treemap")
require(treemap)
png(filename="tree1.png",width=800, height=800)
treemap(region.price, vSize = "total", index = "region",title = "지역별 매출현황")
dev.off()
#매출액
options(scipen = 100)
str(bium.order)
cor.price = bium.order %>%
filter(between(age,0,100)) %>%
select(age, total_price)
#######Review Text Mining
useNIADic()
Sys.setlocale(category = "LC_CTYPE", locale = "ko_KR.UTF-8") #문자 깨짐 해결방법
theme_set(theme_gray(base_family="AppleGothic"))
par(family='AppleGothic')#그래프 한글 깨짐 해결 방법
txt = readLines('review.txt')
head(txt)
txt = str_replace_all(txt,"\\W"," ")
noun = extractNoun(txt)
wordcount = table(unlist(noun))
df.word = as.data.frame(wordcount, stringsAsFactors = F)
df.word = rename(df.word, word = Var1, freq =Freq)
df.word = filter(df.word, nchar(word) >=2, word !=c("반찬","주문")) #2개 글자 이상 단어 추출
df.word %>% filter(!(word %in% c("반찬", "주문"))) %>% arrange(desc(freq))
df.word.rev = df.word %>% filter(!(str_detect(word, "반찬|주문|해서|^ㅎ"))) %>% arrange(desc(freq))
top50 = df.word %>% arrange(desc(freq)) %>% head(50)
pal = brewer.pal(8,"Dark2")
set.seed(1234)
wordcloud(words = top50$word,
freq = top50$freq,
min.freq = 2,
max.words = 200,
random.order =F,
rot.per = .1,
scale = c(4,0.3),
colors = pal )
wordcloud(words = df.word$word,
freq = df.word$freq,
min.freq = 2,
max.words = 200,
random.order =F,
rot.per = .1,
scale = c(4,0.3),
colors = pal )
wordcloud(words = df.word.rev$word,
freq = df.word.rev$freq,
min.freq = 2,
max.words = 200,
random.order =F,
rot.per = .1,
scale = c(4,0.3),
colors = pal )
|
c98d8b849e8e3930fa763e8d8c67013b25f2cb7e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NISTunits/examples/NISTmilePerGallonTOmeterPerCubMeter.Rd.R
|
276d9d2d4b3022b3e4192260801fcd25525f7c18
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
r
|
NISTmilePerGallonTOmeterPerCubMeter.Rd.R
|
library(NISTunits)
### Name: NISTmilePerGallonTOmeterPerCubMeter
### Title: Convert mile per gallon to meter per cubic meter
### Aliases: NISTmilePerGallonTOmeterPerCubMeter
### Keywords: programming
### ** Examples
NISTmilePerGallonTOmeterPerCubMeter(10)
|
51d3e11df9d1f94359f47032027b7c4e14d81462
|
e66d20590a2a24489b3ced4f20885d6bfcf312ed
|
/ESA5-RandJulia/Exercise1-guessingNumber.r
|
210a27c672ced09b368ccd89fb2c368202ca188f
|
[] |
no_license
|
Karina1988/DataScience2020
|
3270de13dabda54b3231d5a33abfa96784126ea3
|
975b97ec6a16cae105bc80a24edbd2ab6cfeeced
|
refs/heads/master
| 2022-11-07T05:25:21.727440
| 2020-06-28T16:17:07
| 2020-06-28T16:17:07
| 260,479,731
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 705
|
r
|
Exercise1-guessingNumber.r
|
startGame <- function()
{
randNumber <- round(runif(1, 0, 100))
guess = -1
while (guess != randNumber)
{
guess <- readInput()
if (guess == randNumber)
{
cat("Jippie, die Zahl ist korrekt!\n")
restartGame <- readRestartGame()
if (restartGame == "y")
{
startGame()
}
else
{
cat("Ciao!\n")
}
}
else if (guess < randNumber)
{
cat("Deine Zahl ist zu niedrig.\n")
}
else
{
cat("Deine Zahl ist zu hoch.\n")
}
}
}
readInput <- function()
{
input <- readline(prompt="Rate die Zahl zwischen 0 und 100: ")
return(as.integer(input))
}
readRestartGame <- function()
{
input <- readline(prompt="Nochmal? y/n \n")
return(input)
}
startGame()
|
39f74f4c8e20ae66a5d691b2727030f45e3df7ee
|
9d068471c68078d3a26d83929158dd3b51e96887
|
/R/calc_stream_voronoi_weights.R
|
923fdb9b5206e7abfe2b5d865efbd310391b4efd
|
[
"MIT"
] |
permissive
|
scantle/pbjr
|
b4077d5c19c58f6e7f5db978f6a82205cf9b2394
|
365106683e21ef9358695aceecc265346d6c4ea5
|
refs/heads/master
| 2023-04-13T05:33:18.764597
| 2021-04-15T19:28:37
| 2021-04-15T19:28:37
| 279,678,995
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,576
|
r
|
calc_stream_voronoi_weights.R
|
#-------------------------------------------------------------------------------------------------#
#' Calculate polyline (e.g. stream) barycentric coordinates
#'
#' These coordinates are used as "weights" in the PBJ MODFLOW-USG package to interpolate heads and
#' distribute flows.
#'
#' The function can take a while to run
#'
#' @param stream sf polyline, "exploded" into segments (see \code{\link{line_explode}})
#' @param voronoi sf polygon of voronoi tesselation (unstructured model grid). Shapefile ID field
#' will be used to determine node ID.
#' @param triangles sf polygon of delaunay triangulation corresponding to voronoi grid.
#' @param addTo (optional) existing calc_stream_voronoi_weights() output new output should be added
#' to (False by default)
#' @param geometry (optional) T/F whether to include sf geometry in output dataframe (default: True)
#' @param correct_seg_order (optional) T/F to re-order the line segments after finding overlaps with
#' the triangle grid. Will crash if you have multiple seperate lines (e.g. two streams). (default:
#' True)
#' @param cutoff_value numeric, minimum barcentric coordinate value. Values below will be forced to
#' zero (1e-7 by default)
#' @param seg_min_length numeric, minimum length of segment to include in calculation (default 1e-7).
#' Generally just to weed out numerical errors.
#' @param keep_stream_cols character array, columns in stream segment dataframe to add to returned
#' dataframe.
#' @return DataFrame or sf object, if geometry = True. Each row is one segment-triangle overlap,
#' with six barycentric weights (three for segment end), the three voronoi shape IDs (model nodes)
#' connected by the triangle, and the segment length in the triangle.
#'
#' This the expected input of \code{\link{stream_elev_from_slope}} and
#' the \code{calc_conductance*} functions (e.g. \code{\link{calc_conductance_modflow}})
#' @author Leland Scantlebury
#' @export calc_stream_voronoi_weights
#'
#' @examples
#' #-- Read in shapefiles
#' str <- read_sf(system.file("extdata", "MehlandHill2010_stream.shp", package = "pbjr"))
#' tri <- read_sf(system.file("extdata", "720_triangles.shp", package = "pbjr"))
#' vor <- read_sf(system.file("extdata", "720_voronoi.shp", package = "pbjr"))
#'
#' #-- Explode polyline
#' str <- line_explode(str)
#'
#' #-- Run the function
#' swdf <- calc_stream_voronoi_weights(stream = str, voronoi = vor, triangles = tri)
#'
#' #-- Example of addTo use (more likely run with new stream shapefile)
#' more_swdf <- calc_stream_voronoi_weights(stream = str, voronoi = vor, triangles = tri,
#' addTo = swdf)
calc_stream_voronoi_weights <- function(stream, voronoi, triangles, addTo=NULL, geometry=T,
correct_seg_order=T,
cutoff_value=1e-7, seg_min_length=1e-7,
keep_stream_cols=NULL) {
#-----------------------------------------------------------------------------------------------#
#-- Get Intersections
st_agr(triangles) <- 'constant' # Silence useless spatial consistency error
st_agr(stream) <- 'constant'
st_agr(voronoi) <- 'constant'
tri_stream <- st_intersection(triangles, stream)
#-- Remove segments below length threshold
tri_stream <- tri_stream[as.numeric(st_length(tri_stream)) > seg_min_length,]
#-- st_intersection can mess up segment order - it uses the triangle ID # to determine the order
#-- This correction won't work for multiple streams - they must be sequential
#TODO add support for multiple seperate lines (e.g., multiple streams)
if (correct_seg_order) {
tri_stream <- reorder_segments(stream, tri_stream)
}
tri_stream$Order <- 1:nrow(tri_stream)
#-----------------------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------------------#
# Extract segment triangles, keep order
seg_triangles <- merge(st_drop_geometry(tri_stream[,c('Order','ID')]), triangles, by = 'ID')
seg_triangles <- seg_triangles[order(seg_triangles$Order),]
#-----------------------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------------------#
#-- Report
message(paste('Calculating barycentric coords for',nrow(tri_stream),'triangle-stream segment combinations'))
#message('May take a bit...')
#-----------------------------------------------------------------------------------------------#
#-----------------------------------------------------------------------------------------------#
#-- Get Barycentric Coords
bary_coords <- geo_to_barycentric_coords(segments = tri_stream, seg_triangles = seg_triangles)
#-- Zero small coordinates, re-normalize
bary_coords[bary_coords < cutoff_value] <- 0.0
bary_coords[,1:3] <- t(apply(bary_coords[,1:3], 1, function(x)(x/sum(x))))
bary_coords[,4:6] <- t(apply(bary_coords[,4:6], 1, function(x)(x/sum(x))))
#-- Would love a simpler way to get voronoi-triangle corner mapping
tri_corners <- triangle_corners(seg_triangles)
for (p in 2:4) {
pgeo <- st_as_sf(tri_corners[,p])
pgeo$tID <- tri_corners$ID
st_agr(pgeo) <- 'constant'
st_crs(pgeo) <- st_crs(voronoi)
pgeo$uniqueID <- 1:nrow(pgeo)
vor_overlap <- st_intersection(voronoi,pgeo)
#-- Check - sometimes a triangle point (usually at a border) is attached to multiple voronoi cells.
# POSSIBLY can be corrected for by checking if extra (duplicate) voronoi cells even intersect with relevant triangles, stream
if (nrow(tri_corners) < nrow(vor_overlap)) {
message('One of more triangle points intersect multiple voronoi cells. Attempting to correct...')
vor_overlap <- point_multi_voronoi_intersect_fixer(tri_corners, vor_overlap, voronoi, triangles, stream)
}
#-- Move Vornoi IDs over to new Node column
tri_corners <- cbind(tri_corners, vor_overlap$ID)
names(tri_corners)[length(names(tri_corners))] <- paste0('Node',p-1)
}
#-- Assemble output
weights <- data.frame('Order'=tri_stream$Order,
'Triangle'=tri_stream$ID,
'Segment'=st_drop_geometry(tri_stream[,length(names(tri))]),
'Length'=as.numeric(st_length(tri_stream)), # Ignore "units" class, trust users
'Node1'=tri_corners$Node1,
'Node2'=tri_corners$Node2,
'Node3'=tri_corners$Node3,
'seg1.a1'=bary_coords[,1],
'seg1.a2'=bary_coords[,2],
'seg1.a3'=bary_coords[,3],
'seg2.a1'=bary_coords[,4],
'seg2.a2'=bary_coords[,5],
'seg2.a3'=bary_coords[,6], row.names = NULL)
if (geometry) {
weights$geometry <- tri_stream$geometry
}
if (!is.null(keep_stream_cols)) {
weights <- cbind(weights, st_drop_geometry(tri_stream[, keep_stream_cols]))
}
#-----------------------------------------------------------------------------------------------#
#-- Handle addTo if needed
if (!is.null(addTo)) {
weights <- rbind(addTo, weights)
#TODO LIkely could use some error handling
}
#-----------------------------------------------------------------------------------------------#
return(weights)
}
#-------------------------------------------------------------------------------------------------#
point_multi_voronoi_intersect_fixer <- function(tri_corners, vor_overlap, voronoi, triangles, stream) {
# Create "super" unique column
vor_overlap$superID <- 1:nrow(vor_overlap)
# Find points that came from identical point intersections
dupes <- vor_overlap[vor_overlap$uniqueID %in% vor_overlap[duplicated(vor_overlap$uniqueID),]$uniqueID,]
# Remove cells without river cells
vor_overlap_cells <- voronoi[voronoi$ID %in% vor_overlap$ID,]
st_agr(vor_overlap_cells) <- 'constant'
#-- Test one - Remove cells that do not overlap the triangles in question
vor_overlap_cells <- vor_overlap_cells[sapply(st_overlaps(vor_overlap_cells,triangles[triangles$ID %in% dupes$tID,]), length) > 0 ,]
# Remove from dupes the cells that overlap a triangle
dupes <- dupes[!dupes$ID %in% vor_overlap_cells$ID,]
# Now, remove dupes from vor_overlap that are also in this list
#test <- vor_overlap[!((vor_overlap$uniqueID %in% dupes$uniqueID)&(vor_overlap$ID %in% dupes$ID)),]
test <- vor_overlap[!(vor_overlap$superID %in% dupes$superID),]
# Did we do it??
if (nrow(tri_corners) == nrow(test)) {
# This one doesn't really need a warning
return(test)
}
# Darn. Start over:
# Find points that came from identical point intersections
dupes <- vor_overlap[vor_overlap$uniqueID %in% vor_overlap[duplicated(vor_overlap$uniqueID),]$uniqueID,]
# Remove cells without river cells
vor_overlap_cells <- voronoi[voronoi$ID %in% vor_overlap$ID,]
st_agr(vor_overlap_cells) <- 'constant'
#-- Test two - Remove cells that do not intersect any part of the stream (can remove too many!)
vor_overlap_cells <- vor_overlap_cells[sapply(st_intersects(vor_overlap_cells,stream), length) > 0 ,]
# Remove from dupes the cells that contain a stream segment
dupes <- dupes[!dupes$ID %in% vor_overlap_cells$ID,]
# Now, remove dupes from vor_overlap that are also in this list
#test <- vor_overlap[!((vor_overlap$uniqueID %in% dupes$uniqueID)&(vor_overlap$ID %in% dupes$ID)),]
test <- vor_overlap[!(vor_overlap$superID %in% dupes$superID),]
# Did we do it??
if (nrow(tri_corners) == nrow(test)) {
warning('Removed voronoi cell(s) with no stream segments that non-uniquely intersected with triangle point(s)')
return(test)
}
# Fine. We'll do this by triangle, choosing just by area
# Find points that came from identical point intersections
dupes <- vor_overlap[vor_overlap$uniqueID %in% vor_overlap[duplicated(vor_overlap$uniqueID),]$uniqueID,]
vor_overlap_cells <- voronoi[voronoi$ID %in% vor_overlap$ID,]
st_agr(vor_overlap_cells) <- 'constant'
result <- lapply(unique(dupes$tID), function(triID) {
vocs <- vor_overlap_cells[sapply(st_overlaps(vor_overlap_cells,triangles[triangles$ID==triID,]), length) > 0 ,]
if (any(!unique(dupes[dupes$tID==triID,]$ID) %in% vocs$ID)) {
# first test actually worked - but there were different answers for different triangles
res <- dupes[(dupes$tID==triID)&(!dupes$ID %in% vocs$ID),]
return(res)
}
# Find out which was cell has less intersection with the triangle
vor_intersect_cells <- st_intersection(vor_overlap_cells,triangles[triangles$ID==triID,])
vor_intersect_cells$inter_area <- st_area(vor_intersect_cells)
res <- dupes[(dupes$tID==triID)&(dupes$ID != vor_intersect_cells[which.max(vor_intersect_cells$inter_area),]$ID),]
return(res)
})
dupes <- do.call(rbind, result)
#test <- vor_overlap[!((vor_overlap$uniqueID %in% dupes$uniqueID)&(vor_overlap$ID %in% dupes$ID)),]
test <- vor_overlap[!(vor_overlap$superID %in% dupes$superID),]
# I'm pretty sure that will do it. I suppose if ANOTHER try was needed we could just (with no tests) drop duplicates.
if (nrow(tri_corners) == nrow(test)) {
warning('Non-unique triangle point to voronoi cell relationships - largest intersecting cell used when necessary')
return(test)
}
stop('Unable to rectify non-unique triangle point to voronoi relationship(s)')
}
|
6e2284c11ff10c31b34d845709d732a3abd4e2d3
|
82c8de125787a80a6fbdc190c72b83c2819275dc
|
/fft_funs_reduced.R
|
f4df93985e4f4d351ee9f3894e3011ca3bc09e3e
|
[] |
no_license
|
chxyself25/GLOBAL-SPACE-TIME-MODELS
|
929b9b6b83d3d91d8a1ff30544f188c2a1b3f48e
|
3c634f0aa0e2b120374ea7c066789113837d9688
|
refs/heads/master
| 2020-04-03T04:01:21.586460
| 2019-11-08T05:42:33
| 2019-11-08T05:42:33
| 155,001,090
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,482
|
r
|
fft_funs_reduced.R
|
# covariance function for section 5.2: single latitude
library(Matrix)
## F operator
DFT_mat <- function(N){
M <- outer(0:(N-1), 0:(N-1), function(i, j) exp(-(2*pi*1i*i*j)/N) ) / sqrt(N)
return(M)
}
# Foutier transform to Z
FFT <- function(D_mat, T.len = 15, N=96) {
res <- apply(D_mat, 2, function(x) {
unlist(lapply(1:T.len, function(y) fft(x[(N*(y-1)+1):(y*N)])/sqrt(N)))
})
return((res))
}
# covariance function for a single latitude band
# N should be fixed at 96 all through the calculation
# l is longitudinal lag, ranges from 0 to 2*pi*(N-1)/N, sysmmetric about pi
KL_l <- function(phi, alpha, nu, l, N = 96) {
# spectral density
fL_c <- sapply(0:(N-1), function(c) {
phi/((alpha^2 + 4*(sin(c/N*pi))^2)^(nu+1/2))
})
return( mean(fL_c*cos(l*(0:(N-1)))) )
}
# function for getting Sigmas in temporal structure, assuming axially symmetry
# covariance matrix for one band at one time
Sigmas_fft <- function(phi, alpha, nu, N = 96) {
col1 <- sapply(0:(N-1), function(x) {KL_l(phi = phi, alpha = alpha, nu = nu, l = (2*pi*x)/N )})
eig_val <- Re(fft(col1))
return(eig_val)
}
# function for calculating Sigma for a single latitude band
# landfrac: a vector of length N including all land fractions in the latitude band
# phi, alpha, and nu are parameters in spectral density
# psis = c(psi0, psi1) (water and land coefficients)
Sigma_fft <- function(phi, alpha, nu, psis, T.len = 15, N = 96) {
sigmas <- Sigmas_fft(phi = phi, alpha = alpha, nu = nu)
psi <- rep(1,N)*psis
temps <- matrix(NA, ncol = N, nrow = T.len)
temps[1,] <- sigmas
for (i in 2:T.len) {
temps[i,] <- sigmas*psi^(2*i-2)
}
temps1 <- apply(temps,2,cumsum)
mat <- array(NA, dim = c(T.len, T.len, N))
for (i in 1:(T.len-1)){
for (j in (i+1):T.len){ # j>i
mat[i,j,] <- temps1[i,]*psi^(j-i)
mat[j,i,] <- mat[i,j,]
}
}
for (i in 1:T.len){
mat[i,i,] <- temps1[i,]
}
return(mat)
}
## rearrange Sigma and Z_mat
rearr_Sigma <- function(Sigma){
N <- dim(Sigma)[3]
diag_block <- vector("list",N)
for (i in 1:N){
diag_block[[i]] <- Sigma[,,i]
}
return(diag_block)
}
rearr_Data <- function(Z_mat, T.len = 15, N = 96){
Z_block <- vector("list",N)
for (i in 1:N){
Z_block[[i]] <- Z_mat[seq(i, T.len*N, by = N),]
}
return(Z_block)
}
################ reml negative likelihood function for single band #######################
reml_neglik_reduced <- function(pars, T.len = 15, N = 96, R = 5, D = D_mat){
# Foutier transform
Z_mat <- FFT(D, T.len = T.len, N = N)
phi1=pars[1]
alpha1=pars[2]
nu1=pars[3]
psis1=pars[4]
cat("paras = ", pars, ", \t")
Sigma_mat<-Sigma_fft(phi=phi1, alpha=alpha1, nu=nu1, psis=psis1, T.len = T.len, N = N)
# rearrange columns and rows
Sigma_block <- rearr_Sigma(Sigma_mat)
Z_block <- rearr_Data(Z_mat, T.len, N)
Sigma_block_inv <- lapply(Sigma_block, solve)
eigen_sigma_block <- lapply(Sigma_block, function(x){
return(eigen(x,symmetric = TRUE)$values)
})
eigen_sigma <- unlist(eigen_sigma_block)
A <- -0.5 * (R-1) * sum( log( eigen_sigma ) )
B <- 0
for (i in 1:R){
for (j in 1:N){
B0 <- Re( Conj(t( Z_block[[j]][,i] )) %*% Sigma_block_inv[[j]] %*% Z_block[[j]][,i] )
B <- B + B0
}
}
loglike<- A - 0.5 * B #- 0.5*T.len*N*(R-1)*log(2*pi) - 0.5*T.len*N*log(R)
cat("loglike = ", loglike, "\n\n")
return(-loglike)
}
##### run
R01 <- readRDS("/Users/apple/Desktop/ISU 2018 fall/STAT 606/final_project/data/2000_5_2100/2000_5_2100_R01.rds")
R02 <- readRDS("/Users/apple/Desktop/ISU 2018 fall/STAT 606/final_project/data/2000_5_2100/2000_5_2100_R02.rds")
R03 <- readRDS("/Users/apple/Desktop/ISU 2018 fall/STAT 606/final_project/data/2000_5_2100/2000_5_2100_R03.rds")
R04 <- readRDS("/Users/apple/Desktop/ISU 2018 fall/STAT 606/final_project/data/2000_5_2100/2000_5_2100_R04.rds")
R05 <- readRDS("/Users/apple/Desktop/ISU 2018 fall/STAT 606/final_project/data/2000_5_2100/2000_5_2100_R05.rds")
lat_id <- 32
R01=c(R01[lat_id,,])
R02=c(R02[lat_id,,])
R03=c(R03[lat_id,,])
R04=c(R04[lat_id,,])
R05=c(R05[lat_id,,])
R <- cbind(R01,R02,R03,R04,R05)
R_bar <- apply(R,1,mean)
D1=R01-R_bar
D2=R02-R_bar
D3=R03-R_bar
D4=R04-R_bar
D5=R05-R_bar
# T=21;N=96;M=1;R=5
D_mat = cbind(D1,D2,D3,D4,D5)
################# constrOptim
UI <- diag(4)
UI
CI <- c(0,0,0,0)
CI
result <- constrOptim(theta = c(0.5, 0.001, 0.004, 0.005), f = reml_neglik_reduced, grad = NULL,
ui = UI, ci = CI)
|
5f0b431a85eaa3bb5374fcadce9a05be17fa5fd3
|
2b811017ef9ad341af3bf7fd94703da31a7a0f24
|
/common.R
|
b6c6c85a000231062d59e2d83eb939ce53c909ca
|
[] |
no_license
|
maptracker/ProgrammingAssignment3
|
c01425528bdf70b57faa2e3c4c58aec74d2257fb
|
d655490085864ecb6fc39c49de8bf141a7f14d60
|
refs/heads/master
| 2021-01-10T07:54:14.801014
| 2015-10-29T14:02:15
| 2015-10-29T14:02:15
| 45,136,728
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,554
|
r
|
common.R
|
# Coursera course assignment
# https://class.coursera.org/rprog-033/assignment/view?assignment_id=7
# These are functions common to all three sub-assignments
readOoCM <- function( file = "outcome-of-care-measures.csv") {
# Build the column classes
# These are names and addresses in the first columns:
addressCols <- rep("character", 10)
# The blocks of data columns, six per measured outcome:
dataSet <- c("numeric", "character",
"numeric", "numeric", "numeric", "character")
# number of different measurements:
numBlocks <- 6
dataCols <- rep(dataSet, 6)
allCols <- c(addressCols, dataCols)
oocm <- read.csv(file,
na.strings = c("Not Available"),
quote = "\"",
colClasses = "character" )
# ARRGGG If a CSV file quotes a value, then R *INSISTS* on
# treating the values as strings, even if you indicate it is numeric:
# https://stackoverflow.com/a/6616047
# So we have to coerce the columns after reading everything as strings
for (i in seq_len(length(allCols))) {
ctype = allCols[i];
if (ctype == "numeric") {
oocm[, i] <- as.numeric(oocm[, i])
}
}
oocm
}
validateState <- function( oocm, state ) {
# Directly check intersection of the request with the State column
overlap <- intersect(oocm$State, state)
if (length(overlap) == 0) stop("invalid state")
}
#validOutcomes <- c("heart attack", "heart failure", "pneumonia")
validateOutcome <- function( outcome ) {
if (outcome == "heart attack") {
return("Heart.Attack")
} else if (outcome == "heart failure") {
return("Heart.Failure")
} else if (outcome == "pneumonia") {
return("Pneumonia")
}
stop("invalid outcome")
#overlap <- intersect(validOutcomes, outcome)
#if (length(overlap) == 0) stop("invalid outcome");
}
fullOutcomeColumn <- function( outcome ) {
valid <- validateOutcome( outcome )
foc <- paste("Hospital.30.Day.Death..Mortality..Rates.from", valid,
sep = '.', collapse = "");
# message("Using column ", foc)
foc
}
normalizeIndex <- function( oocmPart, num ) {
# Number of hospitals in subset:
numHosp <- nrow(oocmPart)
rankIndex <- num
if (num == "best") {
# First position
rankIndex <- 1
} else if (num == "worst") {
# Last position
rankIndex <- numHosp
} else if (num > numHosp | num < 1) {
# out of bounds
return(NA)
}
rankIndex
}
|
f83351d964ce2ebea663e82849dfacd83cd1340f
|
c005f838c7f71d734a1409c003b053d5fdcefb87
|
/hackthon2021/ui.R
|
17c81ee40eed685fdb77712fc48213f009b43af0
|
[] |
no_license
|
JJimLee/MountainMadness
|
199321cdc2cbc719c6364c18767cf101c997be72
|
c05917d7a0601c75711b1ef0e0fc62caaa2ddb5d
|
refs/heads/master
| 2023-03-07T04:28:31.112442
| 2021-02-14T18:30:03
| 2021-02-14T18:30:03
| 338,662,340
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,415
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
getwd()
library(shiny)
library(ggplot2)
library(tidyverse)
library(dplyr)
library(cowplot)
name_df = data_frame("name"=c("Apple","Google","IBM","Cisco","MasterCard","Amazon","Adobe","Microsoft","Comcast_Corporation","Facebook","AT&T","Visa"))
#IBM Paypal Netflix NVDA Oracle Adobe Comcast_Corporation cisco AT&T MasterCard Visa Facebook Alphabet Amozan Apple Microsoft
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Time series forecasting"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
helpText("stock market forecasting"),
selectInput("var",
label = strong("Choose a stock to display"),
choices = name_df$name,
selected = "Apple"),
numericInput("num",label = "Please enter the days you want to forecast ((max=20))",1,min = 1, max =20),
br(),
submitButton("Let's forecast!")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
))
|
c20356251cd24f1931539c83eb91a249dd1c76b7
|
58b0cfbf14d5adcf8b84bedf827d7cbde1c3e1dd
|
/data-raw/usa_fc.R
|
03133c3532a100337be67f3b6c6695b6946241f4
|
[] |
no_license
|
ipeaGIT/gtfs2emis
|
5c40fcb787eaa72a6b4e67c9199c5220a7542d10
|
d68ffe805a66229f73df76de1e9b42c39d981688
|
refs/heads/master
| 2023-07-19T16:04:46.727586
| 2023-07-13T15:31:34
| 2023-07-13T15:31:34
| 218,305,263
| 23
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,491
|
r
|
usa_fc.R
|
# ------------------------------------------------------------
# based on EMFAC2017 "Volume III – Technical Documentation V1.0.2 July 20, 2018"
# ------------------------------------------------------------
# Table 4.3-55 Speed Correction Factors for Diesel Buses
# ------------------------------------------------------------
# Diesel Bus | HD Diesel Truck | Speed Correction Curve
# Pre-2003 | Pre-2003 | SCF for Pre-2003 HDDT
# 2003-2006 | 2006-2006 | SCF for 2003-2006 HDDT
# 2007-2009 | 2007-2009 | SCF for 2007-2009 HDDT
# 2020+ | 2013+ | SCF for 2013+ HDDT
rm(list=ls())
library(magrittr)
library(ggplot2)
# ---------------------------------------------------------
# Table 4.3-49: Coefficients for EMFAC2017 HHDDT Speed Corrections Factors
# ---------------------------------------------------------
`%nin%` = Negate(`%in%`)
table4349 <- data.table::data.table(
pollutant = c("HC","CO","NOx","NOx","PM","CO2"),
from_year_group = c(2010,2010,2010,2013,2010,2010),
to_year_group = c(2020,2020,2012,2020,2020,2020),
a = c(0.553, 3.64, 21.7, 21.3, 0,7450),
b = c(-1.15, -1.2,-0.527,-0.702 ,0,-0.469),
c = c(3.77*10^(-2), .35, 10.2, 6.14, 7.34, 3610),
d = c(-1.33*10^(-3), -1.24*10^(-2), -3.85, -0.225, -0.297, 99.8),
e = c(1.48*10^(-5), 1.19*10^(-4), 4.28*10^(-3), 2.25*10^(-3), 7.34*10^(-3), 1.07))
table4349_above <- data.table::copy(table4349)[pollutant %nin% "PM",]
table4349_above[ , `:=`(speed = "18.8-65mph",
a = 0, b = 0)]
table4349[pollutant %in% "PM",`:=`(speed = "all",
a = 0,
b = 0)]
table4349[pollutant %nin% "PM",`:=`(speed = "<18.8mph",
c = 0,
d = 0,
e = 0)]
table4349 <- data.table::rbindlist(list(table4349,table4349_above))
# ---------------------------------------------------------
# Eq.4.3-10 SCF for all pollutants and speed below 18.8 mph
# for PM, only Eq. 4.3.11 is applied
# Eq.4.3.11 SCF for all pollutants and speed between 18.8 and 65 mph
# ---------------------------------------------------------
function_scf_diesel <- function(a,b,c,d,e,speed,temp_speed){
switch(temp_speed,
"<18.8mph" = (a * speed ^ b) / (a * 18.8 ^ b),
"18.8-65mph" = (c + d * speed + e * speed^2)/(c + d * 18.8 + e * 18.8^2),
"all" = (c + d * speed + e * speed^2)/(c + d * 18.8 + e * 18.8^2))
}
# ---------------------------------------------------------
# CNG Buses SCF
# ---------------------------------------------------------
# Table 4.3-57: Speed Correction Factors for 2008+ Model Year CNG Buses
# ---------------------------------------------------------
function_scf_cng <- function(speed,pol){
switch(pol,
"HC" = (-1.031 * log(speed) + 5.906),
"CO" = (-2.076 * log(speed) + 16.22),
"NOx" = (-0.130 * log(speed) + 0.727),
"PM" = (6.34*10^(-6) * speed^2 - 6.16*10^(-4) * speed + 1.74*10^(-2)),
"CO2" = (-549 * log(speed) + 3597))
}
# --------------------------
# analysis
# --------------------------
year <- 1970:2020
speed <- 1:104 %>% units::set_units("km/h") %>% units::set_units("mile/h") %>% as.numeric()
vector_pol <- c("HC","CO","NOx","PM","CO2")
fuel <- c("Diesel","CNG")
# --------------------------
# SCF loop
# --------------------------
dt <- lapply(1:length(vector_pol),function(j){
dt <- lapply(1:length(speed),function(i){
dt <- lapply(1:length(year),function(k){
dt <- lapply(1:length(fuel),function(l){# j = 1 | k = 39 | i = 1
# message
# message(paste0("pol = ",vector_pol[j]," | year = ",year[k]," | speed = ",round(speed[i],3)))
#message(paste0("j = ",j," ; k = ",k," ; i = ",i,"; l = ",l))
# speed
temp_speed <- ifelse(speed[i] < 18.8 ,"<18.8mph","18.8-65mph")
temp_table <- data.table::copy(table4349)[pollutant %in% vector_pol[j] &
speed %in% temp_speed &
from_year_group <= year[k] &
to_year_group >= year[k], ]
if(vector_pol[j] == "PM"){
temp_speed <- "all"
temp_table <- data.table::copy(table4349)[pollutant %in% vector_pol[j] &
from_year_group <= year[k], ]
}
if(nrow(temp_table) == 0){
scf <- 1
}else{
scf <- function_scf_diesel(a = temp_table$a,
b = temp_table$b,
c = temp_table$c,
d = temp_table$d,
e = temp_table$e,
temp_speed = temp_table$speed,
speed = speed[i])
}
#
# cng buses 2008+
#
if(year[k] >= 2008 & fuel[l] == "CNG"){
scf <- function_scf_cng(speed = speed[i],pol = vector_pol[j])
}
# export dt SCF correction
export_dt <- data.table::data.table("speed" = speed[i],
"scf" = scf,
"pollutant" = vector_pol[j],
"year" = year[k],
"fuel" = fuel[l])
return(export_dt)
}) %>% data.table::rbindlist()
return(dt)
}) %>% data.table::rbindlist()
return(dt)
}) %>% data.table::rbindlist()
return(dt)
}) %>% data.table::rbindlist()
# ------------------------------------------------------------
# Table 4.3-56 Speed Correction Factors for CNG Buses
# ------------------------------------------------------------
# CNG Bus | HD Diesel Truck | Speed Correction Curve
# Pre-2003 | 2003-2006 | SCF for 2003-2006 HDDT
# 2003-2007 | 2007-2009 | SCF for 2007-2009 HDDT
# 2008+ | ---- | SCF based on CNG HDT data
# ------------------------------------------------------------
ggplot() +
geom_point(data = dt[year %in% 2008 & fuel %in% "CNG",],
aes(x = speed,y = scf,color = pollutant),stat = "identity") +
facet_grid(rows = vars(pollutant),scales = "free")
#
#
#
pol = "CO"
year_group = 2012
speed = 20
scf(pol = "CO2",speed = 15,year_group = 2010)
|
39159051a4bdb41b3d627794a17253a793153780
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ggspectra/examples/s.e.response_label.Rd.R
|
03c4b5ffd7359c67352e2abc3783510112445a0d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
s.e.response_label.Rd.R
|
library(ggspectra)
### Name: s.e.response_label
### Title: spectral response axis labels
### Aliases: s.e.response_label s.q.response_label
### ** Examples
counts_label()
counts_label("R.expression")
counts_label("LaTeX")
|
eba947572dbe5ceafa5967576f759e2ef994af24
|
48aaf67e29ebf44aadea93c2c51cec2a5797464e
|
/code/code_macro_ggplot.R
|
f0583204f92d9f5a714ac8c26550fbc0fdd61a11
|
[] |
no_license
|
Osant11/ggplot_function
|
81fdf11156885d9cf87856c75131d1a02e1f338b
|
1ead432c16183166a0a9ca0d9c9bced6aed7f6e8
|
refs/heads/master
| 2022-11-21T05:15:09.469419
| 2020-07-30T14:20:37
| 2020-07-30T14:20:37
| 283,795,119
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,532
|
r
|
code_macro_ggplot.R
|
library(haven)
library(ggplot2)
library(plyr)
library(dplyr)
library(shiny)
library(tidyr)
library(knitr)
library(stringr)
library(DT)
library(tidyverse)
library(plyr)
library(shinyWidgets)
library(jsonlite)
library(gmodels)
library(gridExtra)
library(grid)
library(egg)
library(ggpubr)
library(foreign)
library(writexl)
library("openxlsx")
### Load dataset ###
trt_n <- data1_sum %>%
group_by(id_num) %>%
filter(!duplicated(id_num)) %>%
group_by(TRT) %>%
dplyr::summarise(n_trt = n())
### FUNCTION FOR SUMMARY STATISTICS ###
quanti_stat_visit_plot <- function(dataset, group, var){
dataset %>%
group_by_(group, as.symbol("VISIT"), as.symbol("VISIT_NUM"), as.symbol("xaxis")) %>%
dplyr::summarise(n = n(),
mean = round(mean(!!sym(var), na.rm = T), 3),
sum = round(sum(!!sym(var), na.rm = T), 1),
st = round(sd(!!sym(var), na.rm = T), 3),
Median = round(median(!!sym(var), na.rm = T), 2),
Q1 = round(quantile(!!sym(var), na.rm = T, 0.25, type = 2), 2),
Q3 = round(quantile(!!sym(var), na.rm = T, 0.75, type = 2), 2),
min = round(min(!!sym(var), na.rm = T), 2),
max = round(max(!!sym(var), na.rm = T), 2)) %>%
ungroup() %>%
mutate(mean = ifelse(is.nan(mean), 0, mean),
se = round(st / sqrt(n), 2),
lower.ci = ifelse(is.na(se), 0,round(mean - qt(1 - (0.05 / 2), n - 1) * se, 2)),
upper.ci = ifelse(is.na(se), 0,round(mean + qt(1 - (0.05 / 2), n - 1) * se, 2)),
lowsd = mean - st,
uppsd = mean + st,
lowse = ifelse(is.na(se), 0, mean - se),
uppse = ifelse(is.na(se), 0, mean + se)
)
}
### GGPLOT FUNCTION ###
macro_plot <- function(dataset, xaxis, yaxis, group, col, nsub, grporder,
ymin, ymax, xlabels, position, yline, yline_alpha,
ylimits, ybreaks, color_grp, xlab, ylab,
titles, footnotes){
p <- ggplot(dataset, aes_string(x = xaxis, y = yaxis, group = group, col = col, shape = group, linetype = group)) +
geom_errorbar(aes_string(ymin = ymin, ymax = ymax), position = position, linetype = "solid", show.legend = F) +
geom_line(data = dataset %>% filter(xaxis <= 254), position = position) +
geom_line(data = dataset %>% filter(xaxis > 254), position = position) +
geom_point(position = position) +
geom_hline(yintercept = yline, alpha = yline_alpha) +
scale_x_discrete(labels = xlabels) +
scale_y_continuous(limits = ylimits,
breaks = ybreaks) +
scale_color_manual(values = color_grp) +
scale_linetype_manual(values=c("dashed", "solid", "dotdash")) +
scale_shape_manual(values=c(0, 1, 2)) +
xlab(xlab) +
ylab(ylab) +
theme_linedraw() +
theme(plot.title = element_text(size = 10),
plot.subtitle = element_text(size = 10),
legend.title = element_blank(),
legend.text=element_text(size=9),
legend.justification = c(0.5, 0.05),
legend.position = c(0.5, 0.85),
legend.direction="horizontal",
legend.key = element_blank(),
legend.background = element_rect(fill="transparent"),
axis.text.x = element_text(angle = -45, hjust = 0),
axis.title.x = element_text(size = 9),
axis.title.y = element_text(size = 9),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
plot.margin = unit(c(0.5,0,-0.5,0), "lines")
)
df.table <- ggplot(dataset, aes_string(x = xaxis, y = group,
label = nsub, colour = group)) +
geom_text(size = 3) +
scale_y_discrete(limits= grporder) +
scale_color_manual(values = color_grp) +
theme_minimal() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none",
panel.border = element_blank(),
axis.text.x = element_blank(),
axis.ticks = element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
plot.margin = unit(c(-0.3,0,0,0), "lines")
)
df1 <- dataset %>%
arrange_(group, "xaxis") %>%
filter(!duplicated(xaxis)) %>%
mutate(Subjects1 = ifelse(!duplicated(!!sym(group)), "Number of subjects", ""))
df.table1 <- ggplot(df1, aes(x = xaxis, y = group, label = Subjects1)) +
geom_text(size = 3.5) +
scale_color_manual(values = c("Black")) +
scale_x_discrete(expand = c(0.09,0)) +
theme_minimal() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none",
panel.border = element_blank(),
axis.text.x = element_blank(),
axis.ticks = element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
axis.text.y=element_blank(),
plot.margin = unit(c(0,0.2,0,0.2), "cm")
)
c <- ggpubr::ggarrange(p, df.table1, df.table, ncol = 1, nrow = 3, heights = c(17, 1.2, 2), align = "v")
grid.arrange(c, bottom = textGrob(footnotes,
x = 0.05, hjust = 0, gp = gpar(fontface = 3L, fontsize = 9)),
top = textGrob(titles,
x = 0.05, hjust = 0, gp = gpar(fontface = 3L, fontsize = 9))
)
}
###################################### PLOT VALUE ######################################
###################################### PLOT VALUE ######################################
###################################### PLOT VALUE ######################################
###################################### PLOT VALUE ######################################
data1_sum_1 <- quanti_stat_visit_plot(data1_sum, "TRT", "VALUE") %>%
mutate(yaxis = case_when(
TRT == "Treatment A" ~ 1,
TRT == "Treatment B" ~ 2,
TRT == "Treatment C" ~ 3
),
xaxis_char = case_when(
xaxis == 5000 ~ "LOT",
xaxis == 10005 ~ "FU7",
xaxis == 10017 ~ "FU30",
xaxis == 10046 ~ "FU90",
xaxis == 15000 ~ "LFU",
TRUE ~ as.character(xaxis)
)
) %>%
arrange(TRT, xaxis) %>%
left_join(trt_n, by = "TRT") %>%
mutate(TRT = paste(TRT, " (N = ", n_trt, ")", sep = ""))
p<- macro_plot(dataset = data1_sum_1, xaxis = "as.factor(xaxis)", yaxis = "mean",
group = "TRT", col = "TRT",
nsub = "n",
grporder = c("Treatment C (N = 98)", "Treatment B (N = 103)", "Treatment A (N = 99)"),
xlabels = data1_sum_1$xaxis_char,
ymin = "lowse", ymax = "uppse", position = position_dodge(0.50),
ylimits = c(92.5, 101),
ybreaks = c(seq(92.5, 100, 2.5)),
yline = NULL, yline_alpha = NULL,
color_grp = c("Blue2", "Red2", "Green4"),
xlab = "Analysis visit (Week)",
ylab = "Mean (variable of interest)",
titles = paste("Mean (plus/minus standard error) of (variable of interest), by analysis visit",
"Analysis Set: (Analysis Set)", sep = "\n"),
footnotes = paste("Footnote 1",
"Footnote 2",
paste("Produced on:", format(Sys.time(), format = "%F %R %Z", usetz=F)),
sep = "\n")
)
|
c923ad6254ca1e79bc084f6d23ca05a4f06cbc23
|
810cbb5991fe2214217322fe08045c3c2f2ba242
|
/demo_plotly3.R
|
b39b3e175398e3fbd49228a3c524a7901b800fd5
|
[] |
no_license
|
jpcolino/R_plotly
|
0a55eb77e1bb99a3b397be20b49a53f25ca97dcd
|
edc4e4024155c37022d6b34bf64fe05ba066ea85
|
refs/heads/master
| 2020-04-06T04:00:31.354747
| 2014-08-19T13:12:43
| 2014-08-19T13:12:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 995
|
r
|
demo_plotly3.R
|
library(plotly)
p <- plotly(username="****", key="****")
trace1 <- list(
x = c(1, 2, 3),
y = c(2, 3, 4),
type = "scatter"
)
trace2 <- list(
x = c(20, 30, 40),
y = c(5, 5, 5),
xaxis = "x2",
yaxis = "y",
type = "scatter"
)
trace3 <- list(
x = c(2, 3, 4),
y = c(600, 700, 800),
xaxis = "x",
yaxis = "y3",
type = "scatter"
)
trace4 <- list(
x = c(4000, 5000, 6000),
y = c(7000, 8000, 9000),
xaxis = "x4",
yaxis = "y4",
type = "scatter"
)
data <- list(trace1, trace2, trace3, trace4)
layout <- list(
xaxis = list(domain = c(0, 0.45)),
yaxis = list(domain = c(0, 0.45)),
xaxis4 = list(
domain = c(0.55, 1),
anchor = "y4"
),
xaxis2 = list(domain = c(0.55, 1)),
yaxis3 = list(domain = c(0.55, 1)),
yaxis4 = list(
domain = c(0.55, 1),
anchor = "x4"
)
)
response <- p$plotly(data, kwargs=list(layout=layout, filename="shared-axes-subplots", fileopt="overwrite"))
url <- response$url
filename <- response$filename
|
de2b3c7f38adda03f9adcc898a20986dff8c23d7
|
527d1b56858db7817dc4a855ae5758bfb5d6f7ec
|
/man/polish_publisher_forby.Rd
|
2168cca5597310b9a621a3cd4ae55602b2a2a48d
|
[] |
no_license
|
aa-m-sa/estc
|
8d758a743f415a5e8957505fa3493373fb58f9d9
|
eaa6dd925d67fec214f61bbf23d7ee1eaf15fcd5
|
refs/heads/master
| 2020-04-03T16:11:23.692465
| 2016-02-08T14:42:01
| 2016-02-08T14:42:01
| 51,322,179
| 1
| 0
| null | 2016-02-08T19:46:17
| 2016-02-08T19:46:15
| null |
UTF-8
|
R
| false
| true
| 566
|
rd
|
polish_publisher_forby.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polish_publisher_forby.R
\name{polish_publisher_forby}
\alias{polish_publisher_forby}
\title{Polish publisher}
\usage{
polish_publisher_forby(x)
}
\arguments{
\item{x}{Publisher vector}
}
\value{
Polished vector
}
\description{
Polish publisher field separating for/by
}
\details{
Polish publisher field.
}
\examples{
# polish_publisher("printed and sold by R. Marchbank")
}
\author{
Leo Lahti \email{leo.lahti@iki.fi}
}
\references{
See citation("bibliographica")
}
\keyword{utilities}
|
f2fa54f0f3fe4914d235bcf06c1cd94c6d0a5a2a
|
3fe32caad93364807d28380dde65409429324649
|
/man/image.pyn.psres.Rd
|
bdbc4d84e79436280ddc9bf6c0f54516ae4dab99
|
[] |
no_license
|
serhalp/pyn
|
c890b80f0286fba3c03413a511254fa65403ce06
|
6b88b622f44df69f2ec737096bf48f9d0e491544
|
refs/heads/master
| 2021-01-20T06:57:00.226739
| 2014-05-21T15:30:26
| 2014-05-21T15:30:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,484
|
rd
|
image.pyn.psres.Rd
|
\name{image.pyn.psres}
\alias{image.pyn.psres}
\title{
Spatial distribution of probe residuals
}
\description{
Generate a heat map of probe residuals in a dataset, for visual inspection of empirical
spatial distribution.
}
\usage{
image.pyn.psres(batch, which = 1:length(batch), transfo = log2, draw.legend = TRUE,
shuffle = FALSE, ...)
}
\arguments{
\item{batch}{
an 'AffyBatch' object containing the probe intensities to analyze.
}
\item{which}{
indices of arrays in the batch to analyze; one plot will be generated per array.
}
\item{transfo}{
function to apply to each probe intensity before analysis.
}
\item{draw.legend}{
if TRUE, draw a legend for the heat map, relating colours to values; if FALSE, don't.
}
\item{shuffle}{
if TRUE, spatially shuffle the locations of the residuals before generating the
image.
}
\item{\dots}{
further arguments passed on to \code{image}.
}
}
\value{
NULL
}
\references{
Serhal, P. and Lemieux, S. (2012). Correction of spatial bias in oligonucleotide array
data. BMC Bioinformatics, submitted.
}
\author{
Philippe Serhal \email{philippe.serhal@umontreal.ca} \cr
Sébastien Lemieux \email{s.lemieux@umontreal.ca}
}
\seealso{
\link{normalize.pyn}, \link{hist.res.probeset}
}
\examples{
# Load the sample dataset and generate heat map of probe residuals.
data (gse2189, package = "pyn")
image.pyn.psres (gse2189)
}
\keyword{ ~hplot }
\keyword{ ~pyn }
|
68e4e7db3e83849f47e536f9891a01ca8db2ba22
|
88b1344aa6dd01a44141c5971c357f8ecacbd469
|
/R_Indice/MMIRIQ.R
|
46dda36eaea5aa321ac96674e05f3dea25b67e82
|
[] |
no_license
|
Meomeoowww/R
|
8a6790d1b4c5cc38dbbceab47d9112963ef8dd72
|
f3752069a11e450b09f5b61f9ceb1446f0fdaf1a
|
refs/heads/master
| 2020-12-04T22:34:13.455214
| 2020-01-05T14:36:17
| 2020-01-05T14:36:17
| 231,923,675
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,248
|
r
|
MMIRIQ.R
|
MMIRIQ <- function(df, v_input) {
#Evaluation et substitution de la variable
dfvar<-eval(substitute(v_input), eval(df))
# Library
library(classInt)
nclass=classIntervals(dfvar, n=10, style='equal', intervalClosure='right')
Etendue<-max(dfvar)-min(dfvar)
library(modes)
mode(x = nclass$brks)
df$nclass=cut(dfvar, 10)
# Computation du MMI
##Creation de zz zee1 vee
zee<-table(df$nclass)
zee1<-as.matrix(zee)
vee<-vector(mode="integer", length=10)
##version zee avec nivo suivant
for(i in 1:10) {
if (i >= 1 & i<=9) {vee[i]<-zee1[i+1]-zee1[i] }
else {vee[i]<- 0-zee1[i]}
}
qee<-as.vector(sign(vee))
qee3<-vector(mode="integer", length=10)
for (i in 1:10) {
if (i==1 & qee[i]==-1) {qee3[i]<-1}
else if (i==1 & qee[i]==1) {qee3[i]<-0}
else if (i >1 & i<=9 & qee[i]==1 & qee[i+1]==-1) {qee3[i+1]<-1}
#else qee3[i]<-0
}
MMIDATA<-data.frame(classe=zee, Multimode=qee3)
MMI<-sum(MMIDATA[which(MMIDATA[,3]==1),2])^2/sum(MMIDATA[which(MMIDATA[,3]==1),2]^2)
##Computation RIQ
RIQ<-IQR(dfvar)/median(dfvar)
Result<- c(round(RIQ, 1), round(MMI,1))
return(Result)
}
|
4888af72e3b3463b9665b6e18ce23ecf9f9647fb
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/nproc/examples/print.nproc.Rd.R
|
f6308792909cc8c4fad13c9ff564088ac9ee8f66
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 245
|
r
|
print.nproc.Rd.R
|
library(nproc)
### Name: print.nproc
### Title: Print the nproc object.
### Aliases: print.nproc
### ** Examples
n = 1000
x = matrix(rnorm(n*2),n,2)
c = 1+3*x[,1]
y = rbinom(n,1,1/(1+exp(-c)))
fit = nproc(x, y, method = 'lda')
print(fit)
|
ffe5be0e399f9dfa3645db82905a6aeda7e8c5c7
|
b9cb63e0be55172f45ac7679506fd0e5d11d6571
|
/R/BivBoot.R
|
8d8b64b1b4e8b0c19828f38b6ccb2f77a88a924a
|
[] |
no_license
|
cran/survivalBIV
|
de95fe0fefb5bbe0a7b5ddf1ac08aec993ed3c37
|
b98f7c532f452931274e92c09715315685733ad4
|
refs/heads/master
| 2018-12-29T19:16:45.958191
| 2012-02-28T00:00:00
| 2012-02-28T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 743
|
r
|
BivBoot.R
|
BivBoot <- function(object, t1, t2, n.boot, conf.level, method.boot, new.class) {
vec <- vector( mode="numeric", length=nrow(object$data) )
for (i in 2:n.boot) {
x <- with( object, list("data"=data[sample.int(n=nrow(data), replace=TRUE),]) )
class(x) <- new.class
x <- Biv(x)
BivSort(x)
vec[i] <- BivDist(x, t1, t2)
}
x <- switch( new.class, "CKM"=Biv.CKM(object), "IPCW1"=Biv.IPCW1(object), "IPCW2"=Biv.IPCW2(object), "KMPW"=Biv.KMPW(object), "KMW"=Biv.KMW(object) )
BivSort(x)
vec[1] <- BivDist(x, t1, t2)
band <- quantile( vec, probs=c( (1-conf.level)/2, (1+conf.level)/2 ) )
if (method.boot == "basic") {
band <- 2*vec[1]-rev(band)
names(band) <- rev( names(band) )
}
return( c(vec[1], band) )
}
|
a17dd45aa915e051ffd35779fd939d3d40e6ef74
|
443f745e549179abb00c1e78ed1fdd4a1a26270d
|
/pareto/man/rpareto.Rd
|
b7dea6daa91ccad0f13f161120ad395c601b097c
|
[] |
no_license
|
ruc067/ucsd-291-computing
|
631817f0e0b2e163ff4c3c83cdb8e73113d74eb5
|
d17c1de4c1227fddea5a3438383a23f7586d1ef0
|
refs/heads/master
| 2020-04-10T07:38:31.028724
| 2018-12-15T18:28:57
| 2018-12-15T18:28:57
| 160,885,007
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 700
|
rd
|
rpareto.Rd
|
\name{rpareto}
\alias{rpareto}
\title{
Random number generator of pareto distrition
}
\description{
it will generate random variable of pareto distrition.
}
\usage{
rpareto(n = 1, alpha = 2, beta = 2)
}
\arguments{
\item{n}{
int n
}
\item{alpha}{
vector alpha
}
\item{beta}{
vector beta
}
}
\details{
The function will generate random variable of pareto distrition. The RNG method used is Inverse CDF Method.
}
\references{
\href{https://en.wikipedia.org/wiki/Pareto_distribution}{Wikipedia pareto distribution}
}
\examples{
rpareto(2, alpha = 2, beta = 2)
rpareto(1, alpha = -2, beta = 2)
}
\author{
Ruifeng Chen
}
\keyword{ rpareto }% use one of RShowDoc("KEYWORDS")
|
f98c45a32a34cae96a6fe3e642ef2bc45a515c30
|
eb02c02ed9104c3d2117ee09e2e04be40510c70d
|
/man/generalize.hz.Rd
|
72a1f052a3ecb76fc8c35a8d7cbaa326fa1a7e37
|
[] |
no_license
|
rsbivand/aqp
|
e53458c345ecf3e2d86b21907d7ecce874efcbcf
|
0d940dbb433b54b9db8749642e627de38890924c
|
refs/heads/master
| 2022-12-08T20:35:37.938564
| 2020-09-11T03:17:59
| 2020-09-11T03:17:59
| 293,072,930
| 0
| 0
| null | 2020-09-05T12:37:47
| 2020-09-05T12:37:47
| null |
UTF-8
|
R
| false
| false
| 1,519
|
rd
|
generalize.hz.Rd
|
\name{generalize.hz}
\alias{generalize.hz}
\title{Generalize Horizon Names}
\description{Generalize a vector of horizon names, based on new classes, and REGEX patterns.}
\usage{generalize.hz(x, new, pat, non.matching.code, hzdepm, ...)}
\arguments{
\item{x}{a character vector of horizon names}
\item{new}{a character vector of new horizon classes}
\item{pat}{a character vector of REGEX, same length as \code{x}}
\item{non.matching.code}{text used to describe any horizon not matching any item in \code{pat}}
\item{hzdepm}{a numeric vector of horizon mid-points, must not contain NA, same length as \code{x}}
\item{\dots}{additional arguments passed to \code{grep()} such as \code{perl=TRUE} for advanced REGEX}
}
\value{factor of the same length as \code{x}}
\author{Dylan E. Beaudette}
\examples{
\dontrun{
data(sp1)
# check original distribution of hz designations
table(sp1$name)
# generalize
sp1$genhz <- generalize.hz(sp1$name,
new=c('O','A','B','C','R'),
pat=c('O', '^A','^B','C','R'))
# see how we did / what we missed
table(sp1$genhz, sp1$name)
## a more advanced example, requries perl=TRUE
# example data
x <- c('A', 'AC', 'Bt1', '^AC', 'C', 'BC', 'CB')
# new labels
n <- c('A', '^AC', 'C')
# patterns:
# "A anywhere in the name"
# "literal '^A' anywhere in the name"
# "C anywhere in name, but without preceding A"
p <- c('A', '\\^A', '(?<!A)C')
# note additional argument
res <- generalize.hz(x, new = n, pat=p, perl=TRUE)
# double-check: OK
table(res, x)
}
}
\keyword{manip}
|
a18221d5bce7945a3b38159e60be5dc80b1427a3
|
2487dfa8bb23d3e1a9000dba265c416cccb69939
|
/demo/MCMCGuide01.R
|
efce6b1bf353643b57c8c1a2707dd723ec439ef3
|
[] |
no_license
|
cran/R2MLwiN
|
f2c5694b60e3a392ad516ab63689c642f3fc72bb
|
593d94db244d3fc07538aedf83fc183859b9f5fd
|
refs/heads/master
| 2023-03-21T15:14:11.554599
| 2023-03-14T04:40:02
| 2023-03-14T04:40:02
| 17,681,793
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,066
|
r
|
MCMCGuide01.R
|
############################################################################
# MLwiN MCMC Manual
#
# 1 Introduction to MCMC Estimation and Bayesian Modelling . . . . . . . 1
#
# Browne, W.J. (2009) MCMC Estimation in MLwiN, v2.13. Centre for
# Multilevel Modelling, University of Bristol.
############################################################################
# R script to replicate all analyses using R2MLwiN
#
# Zhang, Z., Charlton, C., Parker, R, Leckie, G., and Browne, W.J.
# Centre for Multilevel Modelling, 2012
# http://www.bristol.ac.uk/cmm/software/R2MLwiN/
############################################################################
# 1.1 Bayesian modelling using Markov Chain Monte Carlo methods . . . . . .1
# 1.2 MCMC methods and Bayesian modelling . . . . . . . . . . . . . . . . .2
# 1.3 Default prior distributions . . . . . . . . . . . . . . . . . . . . .4
# 1.4 MCMC estimation . . . . . . . . . . . . . . . . . . . . . . . . . . .5
# 1.5 Gibbs sampling . . . . . . . . . . . . . . . . . . . . . . . . . . . 5
# 1.6 Metropolis Hastings sampling . . . . . . . . . . . . . . . . . . . . 8
# 1.7 Running macros to perform Gibbs sampling and Metropolis
# Hastings sampling on the simple linear regression model . . . . . . 10
library(R2MLwiN)
# MLwiN folder
mlwin <- getOption("MLwiN_path")
while (!file.access(mlwin, mode = 1) == 0) {
cat("Please specify the root MLwiN folder or the full path to the MLwiN executable:\n")
mlwin <- scan(what = character(0), sep = "\n")
mlwin <- gsub("\\", "/", mlwin, fixed = TRUE)
}
options(MLwiN_path = mlwin)
## save current par settings
mypar <- par(no.readonly = TRUE)
## Read tutorial data
data(tutorial, package = "R2MLwiN")
set.seed(1)
## Set variables
y <- tutorial$normexam
x <- tutorial$standlrt
N <- length(y)
xsq <- x^2
xy <- x * y
sumy <- sum(y)
sumx <- sum(x)
sumxsq <- sum(xsq)
sumxy <- sum(xy)
## Starting values for parameter estimates
beta0 <- 0
beta1 <- 0
sigma2e <- 1
epsilon <- 0.001
burnin <- 500
chain <- 5000
totaliterations <- burnin + chain
thinning <- 1
estimates <- matrix(, nrow = floor(chain/thinning), 3)
rownames(estimates) <- which((1:chain)%%thinning == 0)
colnames(estimates) <- c("beta0", "beta1", "sigma2e")
j <- 1
for (i in 1:totaliterations) {
beta0 <- rnorm(1, (sumy - beta1 * sumx)/N, sqrt(sigma2e/N))
beta1 <- rnorm(1, (sumxy - beta0 * sumx)/sumxsq, sqrt(sigma2e/sumxsq))
e2i <- (y - (beta0 + beta1 * x))^2
sume2i <- sum(e2i)
sigma2e <- 1/rgamma(1, epsilon + N/2, epsilon + sume2i/2)
if ((i%%thinning == 0) && (i > burnin)) {
estimates[j, ] <- round(c(beta0, beta1, sigma2e), 3)
j <- j + 1
}
}
sumstat <- round(rbind(colMeans(estimates), apply(estimates, 2, sd)), 4)
rownames(sumstat) <- c("mean", "sd")
print(sumstat)
# 1.8 Dynamic traces for MCMC . . . . . . . . . . . . . . . . . . . . . . 12
par(mfrow = c(3, 1), mar = c(4, 4.5, 2, 2))
plot(1:nrow(estimates), estimates[, "beta0"], xlab = "iteration", ylab = expression(paste("Est. of ", beta[0])), type = "l")
plot(1:nrow(estimates), estimates[, "beta1"], xlab = "iteration", ylab = expression(paste("Est. of ", beta[1])), type = "l")
plot(1:nrow(estimates), estimates[, "sigma2e"], xlab = "iteration", ylab = expression(paste("Est. of ", sigma[e]^2)),
type = "l")
# 1.9 Macro to run a hybrid Metropolis and Gibbs sampling method
# for a linear regression example . . . . . . . . . . . . . . . . . . 15
set.seed(1)
## Set variables
y <- tutorial$normexam
x <- tutorial$standlrt
N <- length(y)
xsq <- x^2
xy <- x * y
sumy <- sum(y)
sumx <- sum(x)
sumxsq <- sum(xsq)
sumxy <- sum(xy)
## Starting values for paramter estimates
beta0 <- 0
beta1 <- 0
sigma2e <- 1
epsilon <- 0.001
burnin <- 500
chain <- 5000
beta0sd <- 0.01
beta1sd <- 0.01
beta0accept <- 0
beta1accept <- 0
totaliterations <- burnin + chain
thinning <- 1
estimates <- matrix(, nrow = floor(chain/thinning), 3)
rownames(estimates) <- which((1:chain)%%thinning == 0)
colnames(estimates) <- c("beta0", "beta1", "sigma2e")
j <- 1
for (i in 1:totaliterations) {
# Update beta0 Propose a new beta0
beta0prop <- rnorm(1, beta0, beta0sd)
beta0logpostdiff <- -1 * (2 * (beta0 - beta0prop) * (sumy - beta1 * sumx) + N * (beta0prop^2 - beta0^2))/(2 *
sigma2e)
if (beta0logpostdiff > 0) {
# Definitely accept as higher posterior
beta0 <- beta0prop
beta0accept <- beta0accept + 1
} else {
# Only sometimes accept
if (runif(1) < exp(beta0logpostdiff)) {
beta0 <- beta0prop
beta0accept <- beta0accept + 1
}
}
# Update beta1
beta1prop <- rnorm(1, beta1, beta1sd)
beta1logpostdiff <- -1 * (2 * (beta1 - beta1prop) * (sumxy - beta0 * sumx) + sumxsq * (beta1prop^2 - beta1^2))/(2 *
sigma2e)
if (beta1logpostdiff > 0) {
# Definitely accept as higher posterior
beta1 <- beta1prop
beta1accept <- beta1accept + 1
} else {
# Only sometimes accept
if (runif(1) < exp(beta1logpostdiff)) {
beta1 <- beta1prop
beta1accept <- beta1accept + 1
}
}
e2i <- (y - (beta0 + beta1 * x))^2
sume2i <- sum(e2i)
sigma2e <- 1/rgamma(1, epsilon + N/2, epsilon + sume2i/2)
if ((i%%thinning == 0) && (i > burnin)) {
estimates[j, ] <- round(c(beta0, beta1, sigma2e), 3)
j <- j + 1
}
}
sumstat <- round(rbind(colMeans(estimates), apply(estimates, 2, sd)), 4)
rownames(sumstat) <- c("mean", "sd")
print(sumstat)
cat(paste("The acceptance rate of beta0: ", round(beta0accept/totaliterations, 3), "\n"))
cat(paste("The acceptance rate of beta1: ", round(beta1accept/totaliterations, 3), "\n"))
# 1.10 MCMC estimation of multilevel models in MLwiN . . . . . . . . . . .18
# Chapter learning outcomes . . . . . . . . . . . . . . . . . . . . . . . 19
## reinstate par settings
par(mypar)
############################################################################
|
86cddbc47f76f584ae5372a37f8059f6c85a7400
|
a52d2e7daf2471b89d559cd516d3652dbe6315f3
|
/Code/02_3c_Format GLOBI.R
|
b746e6e3430b74c5852ae70bfa990f00c1a2c7c4
|
[
"MIT"
] |
permissive
|
viralemergence/virion
|
f659713981367304f78b9368c48cf8773177e58e
|
a44e986ff40eb56121f93f6fd1715469532c99fc
|
refs/heads/main
| 2023-09-02T10:25:00.523067
| 2023-08-30T21:21:54
| 2023-08-30T21:21:54
| 319,686,363
| 26
| 7
| null | 2023-09-11T19:16:03
| 2020-12-08T15:50:12
|
R
|
UTF-8
|
R
| false
| false
| 2,222
|
r
|
02_3c_Format GLOBI.R
|
library(tidyverse)
library(magrittr)
globi <- read_csv("Intermediate/Unformatted/GLOBIUnformatted.csv")
temp <- data.frame(Host = character(),
Virus = character(),
HostTaxID = double(),
VirusTaxID = double(),
HostNCBIResolved = logical(),
VirusNCBIResolved = logical(),
HostGenus = character(),
HostFamily = character(),
HostOrder = character(),
HostClass = character(),
HostOriginal = character(),
HostSynonyms = character(),
VirusGenus = character(),
VirusFamily = character(),
VirusOrder = character(),
VirusClass = character(),
VirusOriginal = character(),
HostFlagID = logical(),
DetectionMethod = character(),
DetectionOriginal = character(),
Database = character(),
DatabaseVersion = character(),
PublicationYear = double(),
ReferenceText = character(),
PMID = double(),
NCBIAccession = character(),
ReleaseYear = double(),
ReleaseMonth = double(),
ReleaseDay = double(),
CollectionYear = double(),
CollectionMonth = double(),
CollectionDay = double(),
stringsAsFactors = FALSE)
globi %<>% mutate(DetectionOriginal = "GLOBI",
Database = "GLOBI",
DatabaseVersion = format(file.info("Source/GLOBI-raw.csv")$ctime,
format = "%b %d, %Y"),
DetectionMethod = "Not specified")
bind_rows(temp, globi) -> globi
# Consistency steps: all lowercase names
globi %<>% mutate_at(c("Host", "HostGenus", "HostFamily", "HostOrder", "HostClass",
"Virus", "VirusGenus", "VirusFamily", "VirusOrder", "VirusClass"),
tolower)
write_csv(globi, 'Intermediate/Formatted/GLOBIFormatted.csv')
|
40dd86db06c57156ba437cba978e3bc2c16f6bdb
|
a231d4d134b59309c1b4e7f89534600e0e6054b7
|
/tests/testthat/test_search_users.R
|
7e5fa73747ea56ef5d58fb1198f582478a5203f3
|
[] |
no_license
|
cderv/rtweet
|
30403ca521ea0fbeb04d83ad4d80c915b19ab6ec
|
1bf207122b891ad6a0261b41c4151846ec892cde
|
refs/heads/master
| 2020-12-03T00:23:38.962959
| 2017-06-26T18:11:32
| 2017-06-26T18:11:32
| 96,025,023
| 1
| 0
| null | 2017-07-02T13:26:56
| 2017-07-02T13:26:56
| null |
UTF-8
|
R
| false
| false
| 604
|
r
|
test_search_users.R
|
context("search_users")
test_that("search_users returns users data", {
skip_on_cran()
n <- 3
token <- readRDS("twitter_tokens")
x <- search_users("twitter", n = n, verbose = FALSE, token = token)
expect_equal(is.data.frame(x), TRUE)
expect_named(x)
expect_true("user_id" %in% names(x))
expect_gt(nrow(x), 2)
expect_gt(ncol(x), 15)
expect_true("tweets" %in% names(attributes(x)))
expect_true(is.data.frame(attr(x, "tweets")))
expect_true(is.data.frame(tweets_data(x)))
expect_gt(nrow(tweets_data(x)), 0)
expect_gt(ncol(tweets_data(x)), 15)
expect_named(tweets_data(x))
})
|
7adb0c5fdd97e3581ca792656fbff12800b6b594
|
0626a4ee953adb6c076c7d49e0273919d393a896
|
/DecisionTrees/trn_decTree.R
|
62ff943c401766c85400ae19f4659838a2ace7eb
|
[] |
no_license
|
jtuttle7/MachineLearning2019
|
af1366227654f5c559ef2ed430e575142e12e6b0
|
a93044fa7f65f7e661f2892ce0fd9eb311e04c47
|
refs/heads/master
| 2020-04-22T20:18:52.004782
| 2019-04-24T21:33:45
| 2019-04-24T21:33:45
| 170,634,067
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 83,344
|
r
|
trn_decTree.R
|
# Decision Tree Training Algorithm
# Librarys ----
# update.packages(old.packages())
data_tree_Flag <- require(data.tree)
dplyr_Flag <- require(dplyr)
# tidyverse_Flag <- library(tidyverse)
if(!data_tree_Flag){
install.packages("data.tree")
library(data.tree)
}
if(!dplyr_Flag){
install.packages("dplyr")
}
source("MachineLearning_JFT.R")
# *** ----
# # Functions ----
#
# # uq_entries <- function(data_vec){
# #
# # # if(is.factor(data_vec)){
# # # data_vec <- sapply(data_vec,as.character)
# # # }
# #
# # uq_obs <- vector(mode=class(data_vec[1]))
# #
# # uq_obs[1] <- data_vec[1]
# #
# # trk_obs <- 2
# #
# # if(length(data_vec)==1){
# # return(uq_obs)
# # }else{
# #
# # for(i in 2:length(data_vec)){
# # if(sum(data_vec[i]==uq_obs) == 0){
# # uq_obs[trk_obs] = data_vec[i]
# # trk_obs = trk_obs + 1
# # }
# # }
# # }
# #
# # return(uq_obs)
# #
# # }
#
#
#
#
# info_gain <- function(S_purity,weights,subS_purity){
#
# subS_sum <- 0
#
# for(i in 1:length(subS_purity)){
#
# subS_sum <- subS_sum + weights[i]*subS_purity[i]
#
# }
#
# gain <- S_purity - subS_sum
#
# return(round(gain,4))
#
#
# }
#
#
#
#
# entrpy <- function(probs){
#
# if(is.na(sum(probs))){
# return(0)
# }
#
# val <- 0
#
# for(i in 1:length(probs)){
#
# if(probs[i] == 0){
#
# }else{
# val <- val + probs[i]*log(probs[i],2)
# }
#
# }
#
# result <- -val
#
# return(result)
#
# }
#
#
#
#
# entrpy_col <- function(data_col,d_labels){
#
# if(is.factor(data_col)){
# data_col <- sapply(data_col,as.character)
# }
#
# if(is.factor(d_labels)){
# d_labels <- sapply(d_labels,as.character)
# }
#
# # Find Unique observations of Attribute
# uq_obs <- unique(data_col)
#
# # Find unique labels
# uq_labs <- unique(d_labels)
#
# # Combine into small matrix
# comb_d_l <- cbind(data_col,d_labels)
#
#
# # Separate data and labels into distinct attributes
#
# subsets <- vector(mode="list",length=length(uq_obs))
#
# for(i in 1:length(subsets)){
# sav <- which(data_col==uq_obs[i])
#
# subsets[[i]] = as.data.frame(comb_d_l[sav,])
# }
#
# # Initialize output
#
# weights <- vector(mode="numeric",length=length(uq_obs))
#
# entrpy_subs <- weights
#
#
# # Determine weights of each attribute
#
# for(i in 1:length(weights)){
# curr_subset <- subsets[[i]]
# weights[i] = length(curr_subset$data_col)/length(data_col)
# }
#
#
# # Compute entropy of each unique observation
#
# for(i in 1:length(entrpy_subs)){
#
# num_uq_labs <- vector(mode="numeric",length=length(uq_labs))
#
# curr_subset <- subsets[[i]]
#
# # Find number of each unique label within each subset and calcualte entropy
# for(j in 1:length(uq_labs)){
# num_uq_labs[j] <- sum(curr_subset$d_labels==uq_labs[j])
#
# entrpy_subs[i] <- entrpy_subs[i] - (num_uq_labs[j]/nrow(curr_subset))*log((num_uq_labs[j]/nrow(curr_subset)),2)
#
# if(is.na(entrpy_subs[i])){
# entrpy_subs[i] <- 0
# }
# }
#
# }
#
# num_uq_labs <- vector(mode="numeric",length=length(uq_labs))
#
# entrpy_all <- 0
#
# # Find total entropy of attribute
# for(i in 1:length(uq_labs)){
# num_uq_labs[i] <- sum(d_labels==uq_labs[i])
#
# entrpy_all <- entrpy_all - (num_uq_labs[i]/length(d_labels))*log((num_uq_labs[i]/length(d_labels)),2)
#
# if(is.na(entrpy_all)){
# entrpy_all <- 0
# }
# }
#
#
#
#
# return(list("pur_all"=entrpy_all,"weights"=weights,"pur_subs"=entrpy_subs))
#
# }
#
#
#
#
# Mj_Error <- function(data_col,d_labels){
#
# if(is.factor(data_col)){
# data_col <- sapply(data_col,as.character)
# }
#
# if(is.factor(d_labels)){
# d_labels <- sapply(d_labels,as.character)
# }
#
# # Find Unique observations of Attribute
# uq_obs <- unique(data_col)
#
# # Find unique labels
# uq_labs <- unique(d_labels)
#
# # Combine into small matrix
# comb_d_l <- cbind(data_col,d_labels)
#
#
# # Separate data and labels into distinct attributes
#
# subsets <- vector(mode="list",length=length(uq_obs))
#
# for(i in 1:length(subsets)){
# sav <- which(data_col==uq_obs[i])
#
# subsets[[i]] = as.data.frame(comb_d_l[sav,])
# }
#
# # Initialize output
#
# weights <- vector(mode="numeric",length=length(uq_obs))
#
# maj_err_subs <- weights
#
#
# # Determine weights of each attribute
#
# for(i in 1:length(weights)){
# curr_subset <- subsets[[i]]
# weights[i] = length(curr_subset$data_col)/length(data_col)
# }
#
#
# # Compute majority error of each unique observation
#
# for(i in 1:length(maj_err_subs)){
#
# num_uq_labs <- vector(mode="numeric",length=length(uq_labs))
#
# curr_subset <- subsets[[i]]
#
# # Find number of each unique label within each subset
# for(j in 1:length(uq_labs)){
# num_uq_labs[j] <- sum(curr_subset$d_labels==uq_labs[j])
# }
#
# # Calculate majority error
# maj_err_subs[i] <- (nrow(curr_subset) - max(num_uq_labs))/nrow(curr_subset)
#
# }
#
# num_uq_labs <- vector(mode="numeric",length=length(uq_labs))
#
# # Find majority error of attribute
# for(i in 1:length(uq_labs)){
# num_uq_labs[i] <- sum(d_labels==uq_labs[i])
# }
#
# maj_err_all <- (length(d_labels) - max(num_uq_labs))/length(d_labels)
#
#
#
# return(list("pur_all"=maj_err_all,"weights"=weights,"pur_subs"=maj_err_subs))
#
# }
#
#
#
#
# gini <- function(data_col,d_labels){
#
# if(is.factor(data_col)){
# data_col <- sapply(data_col,as.character)
# }
#
# if(is.factor(d_labels)){
# data_col <- sapply(d_labels,as.character)
# }
#
# # Find Unique observations of Attribute
# uq_obs <- unique(data_col)
#
# # Find unique labels
# uq_labs <- unique(d_labels)
#
# # Combine into small matrix
# comb_d_l <- cbind(data_col,d_labels)
#
#
# # Separate data and corresponding labels into distinct attributes
#
# subsets <- vector(mode="list",length=length(uq_obs))
#
# for(i in 1:length(subsets)){
# sav <- which(data_col==uq_obs[i])
#
# subsets[[i]] = as.data.frame(comb_d_l[sav,])
# }
#
# # Initialize output
#
# weights <- vector(mode="numeric",length=length(uq_obs))
#
# gini_subs <- weights
#
# # Determine weights of each attribute
#
# for(i in 1:length(weights)){
# curr_subset <- subsets[[i]]
# weights[i] = length(curr_subset[,1])/length(data_col)
# }
#
#
# # Compute GINI Index of each unique observation
#
# for(i in 1:length(gini_subs)){
#
# num_uq_labs <- vector(mode="numeric",length=length(uq_labs))
#
# curr_subset <- subsets[[i]]
#
# # Find number of each unique label within each subset
# for(j in 1:length(uq_labs)){
# num_uq_labs[j] <- sum(curr_subset$d_labels==uq_labs[j])
# }
#
# # Calculate GINI
#
# gini_sum <- sum((num_uq_labs/nrow(curr_subset))^2)
#
# gini_subs[i] <- 1 - gini_sum
#
# }
#
# # Find GINI of attribute
# num_uq_labs <- vector(mode="numeric",length=length(uq_labs))
#
# for(i in 1:length(uq_labs)){
# num_uq_labs[i] <- sum(d_labels==uq_labs[i])
# }
#
# gini_sum <- sum((num_uq_labs/length(d_labels))^2)
#
# gini_all <- 1 - gini_sum
#
#
#
# return(list("pur_all"=gini_all,"weights"=weights,"pur_subs"=gini_subs))
#
# }
#
#
#
#
# trn_ID3 <- function(treeVar,data,depth,PurityMethod="Entropy",missingData_ID=NA,numericColInds=0,labelInd=ncol(data),levelName="Base",firstCall=1){
#
#
# # Do some preprocessing on training data for missing data and numeric input columns
#
#
# if(firstCall){
#
# # Address missing information. Replace with majority value of that attribute
#
# treeVar$mostCom_eachAtt <- vector(mode="character",length=ncol(data)) # Store most common attribute from training set
#
# for(i in 1:ncol(data)){
#
# uq_atts <- unique(data[,i])
# num_uq_atts <- vector(mode="numeric",length=length(uq_atts))
#
# for(j in 1:length(uq_atts)){
# num_uq_atts[j] = sum(data[,i]==uq_atts[j])
# }
#
# srt <- sort(num_uq_atts,index.return=T,decreasing=T)
#
# com2not <- srt$ix
# commInd <- 1
#
# while(uq_atts[com2not[commInd]] %in% missingData_ID){
# commInd <- commInd + 1
# }
#
# data[(data[,i] %in% missingData_ID),i] = uq_atts[com2not[commInd]]
# treeVar$mostCom_eachAtt[i] <- uq_atts[com2not[commInd]]
#
# }
#
#
#
# treeVar$medianVal <- vector(mode="numeric",length=length(numericColInds)) # Store median value from training set
#
# # If numeric, separate based on being above or below the median value of column
# if(numericColInds[1]){
#
# for(i in 1:length(numericColInds)){
#
# rplCol = as.numeric(data[,numericColInds[i]])
#
# abvMd <- rplCol > median(rplCol)
# data[abvMd,numericColInds[i]] = paste(">",median(rplCol))
# data[!abvMd,numericColInds[i]] = paste("<=",median(rplCol))
# treeVar$medianVal[i] = median(rplCol)
#
# }
#
# }
# }
#
#
# # Convert data into matrix class to avoid issues with referencing factors
# # Will turn everything into character class (if any character entry anywhere in dataset), so don't try doing math on numbers now. If need to, will have to address factors differently (use data.frame somehow)
# data = as.matrix(data)
#
#
# # Transform numeric data into binary classification using median as transfer point
#
#
# # Determine which purity calculation method we're using
# if(PurityMethod=="Entropy"){
# pur_calc <- function(data_col,d_labels){
# entrpy_col(data_col,d_labels)
# }
# }else if(PurityMethod=="Majority Error"){
# pur_calc <- function(data_col,d_labels){
# Mj_Error(data_col,d_labels)
# }
# }else if(PurityMethod=="GINI"){
# pur_calc <- function(data_col,d_labels){
# gini(data_col,d_labels)
# }
# }
#
#
# # Split out data and labels
# curr_data <- data[,-labelInd]
# curr_labs <- data[,labelInd]
#
# if(is.null(ncol(curr_data))){
# last_att = 1
# }else{
# last_att = 0
# }
#
#
# # Identify unique labels at this point
# uq_labs <- unique(curr_labs)
#
#
# # Save Splitting Variable of the Current Branch
# treeVar$splitVar <- levelName
#
#
# # If only one label exists, create leaf node
# if(length(uq_labs) == 1){
# treeVar$AddChild(as.character(uq_labs[1]))
#
#
# }else if(depth==1){ # If reached maximum user-specified depth, create leaf node with majority label
#
# num_uq_labs <- vector(mode="numeric",length=length(uq_labs))
#
# for(i in 1:length(num_uq_labs)){
#
# num_uq_labs[i] <- sum(curr_labs==uq_labs[i])
#
# }
#
# maj_lab_ind <- which.max(num_uq_labs)
#
# treeVar$AddChild(as.character(uq_labs[maj_lab_ind]))
#
#
# }else{
#
# # If the last attribute to split on, create new node, and new node beneath that, one for attribute, then the last to identify label
#
# if(last_att){
#
# for(i in 1:length(curr_data)){
#
# firstChild <- treeVar$AddChild(curr_data[i])
# leaf <- firstChild$AddChild(as.character(curr_labs[i]))
#
# }
#
#
# }else{
#
#
# # Place to store info gain of each attribute for this node
# treeVar$ig <- vector(mode="numeric",length=ncol(curr_data))#col_curr_data)
#
# for(i in 1:length(treeVar$ig)){
# puritys <- pur_calc(curr_data[,i],curr_labs)
# treeVar$ig[i] <- info_gain(puritys$pur_all,puritys$weights,puritys$pur_subs)
# }
#
# # Find column of maximum info gain
# max_ig_ind <- which.max(treeVar$ig)
#
# # Identify Attributes in that column
# uq_split_atts <- unique(curr_data[,max_ig_ind])
#
#
# # Store most common observation of this attribute in case of missing data
# # This sets up re-routing variable to choose most common observation of this attribute for when prediction occurs (treeVar$mostCommAtt)
# num_uq_atts <- vector(mode="numeric",length=length(uq_split_atts))
#
# for(i in 1:length(num_uq_atts)){
#
# num_uq_atts[i] <- sum(curr_labs==uq_split_atts[i])
#
# }
#
# mostComm_ind <- which.max(num_uq_atts)
#
# treeVar$mostCommAtt <- uq_split_atts[mostComm_ind]
#
#
#
# # Split data up by those attributes and recursively call trainer to get down to leaf node or max depth
# for(i in 1:length(uq_split_atts)){
#
#
# keepInds_split <- curr_data[,max_ig_ind] == uq_split_atts[i]
#
# keepData <- curr_data[keepInds_split,-max_ig_ind]
# keepLabs <- curr_labs[keepInds_split]
#
# keepFull <- cbind(keepData,keepLabs)
#
# newdepth = depth - 1
#
# newNode <- treeVar$AddChild(uq_split_atts[i])
#
# trn_ID3(newNode,keepFull,newdepth,PurityMethod=PurityMethod,levelName=colnames(curr_data)[max_ig_ind],firstCall=0)
#
#
# }
#
# }
#
# }
#
#
# }
#
#
#
#
# predict_ID3 <- function(tree,features,numericOut=F,missingData_ID=NA){
#
# # Arrived at point where next branch gives label, return that label
#
# if(tree$children[[1]]$isLeaf){
# if(numericOut){
# return(as.numeric(tree$children[[1]]$name))
# }else{
# return(tree$children[[1]]$name)
# }
# }
#
# # Grab appropriate child as designated by the feature name (determined from what the next child was split on) and corresponding feature in input observation
# child <- tree$children[[features[[tree$children[[1]]$splitVar]]]]
#
#
# # If the child it called is null (never trained on or doesn't exist), or it matches the ID specified by the user of missing data, grab most common attribute tree
# if(is.null(child) | is.na(features[[tree$children[[1]]$splitVar]])){
# child <- tree$children[[tree$mostCommAtt]]
# }else if(length(missingData_ID)>1){
# if(features[[tree$children[[1]]$splitVar]] %in% missingData_ID){
# child <- tree$children[[tree$mostCommAtt]]
# }
# }
#
# # Recursively call until arrive at leaf node
# return(predict_ID3(child,features,numericOut))
#
# }
#
#
#
#
# test_ID3 <- function(tree,testdata,missingData_ID=NA,numericColInds=0,numericOut=F,labelInd=ncol(testdata)){
#
#
# # Preprocessing of test data to match structure of tree as determiend by training set
#
#
#
# # Address missing information. Replace with majority value of that attribute from training set
#
# for(i in 1:ncol(testdata)){
#
# testdata[(testdata[,i] %in% missingData_ID),i] = tree$mostCom_eachAtt[i]
#
# }
#
#
# # If numeric, separate based on being above or below the median value of column from training set
# if(numericColInds[1]){
#
# for(i in 1:length(numericColInds)){
#
# rplCol = as.numeric(testdata[,numericColInds[i]])
#
# abvMd <- rplCol > tree$medianVal[i]
# testdata[abvMd,numericColInds[i]] = paste(">",tree$medianVal[i])
# testdata[!abvMd,numericColInds[i]] = paste("<=",tree$medianVal[i])
#
# }
#
# }
#
#
#
# if(is.null(try(nrow(testdata)))){
# testLength = 1
# }else{
# testLength=nrow(testdata)
# }
#
# # Create a vector to store predictions in
# if(numericOut){
# ID3_Predictions <- vector(mode="numeric",length=testLength)
# }else{
# ID3_Predictions <- vector(mode="character",length=testLength)
# }
#
#
# # Move through all test data and predict label
# for(i in 1:length(ID3_Predictions)){
#
# curr_features = testdata[i,]
#
# if(is.factor(curr_features)){
# curr_features=sapply(curr_features,as.character)
# }
#
# ID3_Predictions[i]=predict_ID3(tree=tree, features=curr_features, numericOut=numericOut, missingData_ID=missingData_ID)
#
# }
#
# # Calculate Prediction Error of Model
# predError <- (length(ID3_Predictions) - sum(ID3_Predictions==testdata[,labelInd]))/length(ID3_Predictions)
#
# return(predError)
#
# }
#
#
#
#
# # *** ----
#
#
#
# # Read In Tennis Data ----
#
#
# tennis_NoHd_data <- read.csv("G:/My Drive/U_of_U/19_Spring/MachineLearning/HW/HW1/tennis_noHeader.csv",header=F,stringsAsFactors = F)
#
# tennis_Hd_data <- read.csv("G:/My Drive/U_of_U/19_Spring/MachineLearning/HW/HW1/tennis_Header.csv",header=T,stringsAsFactors = F)
#
#
#
# # *** ----
#
#
#
#
# # Train Tennis Tree ----
#
# TennisTree <- Node$new("Tennis")
#
# trn_ID3(treeVar=TennisTree,data=tennis_Hd_data,depth=6,PurityMethod = "Entropy")
#
# print(TennisTree,"splitVar","isLeaf")
#
#
# # *** ----
#
#
#
#
# # Test Model of Tennis Tree ----
#
# test_Tennis_Hd <- read.csv("G:/My Drive/U_of_U/19_Spring/MachineLearning/HW/HW1/tennis_Header_test.csv",header=T,stringsAsFactors = F)
#
# test_ID3(TennisTree,test_Tennis_Hd,numericOut = F)
#
# # *** ----
#
#
#
# # *** ----
#
#
#
#
# Car Problem ----
# Read in Training & Test Data ----
car_trn_data <- read.csv("G:/My Drive/U_of_U/19_Spring/MachineLearning/HW/HW1/car/train.csv",header=F,stringsAsFactors=F)
car_tst_data <- read.csv("G:/My Drive/U_of_U/19_Spring/MachineLearning/HW/HW1/car/test.csv",header=F,stringsAsFactors=F)
# *** ----
# Initialize trees for each depth of car model, and IG method ----
car_d1_ent <- Node$new("Car_Depth1_Ent")
car_d2_ent <- Node$new("Car_Depth2_Ent")
car_d3_ent <- Node$new("Car_Depth3_Ent")
car_d4_ent <- Node$new("Car_Depth4_Ent")
car_d5_ent <- Node$new("Car_Depth5_Ent")
car_d6_ent <- Node$new("Car_Depth6_Ent")
car_d1_MJe <- Node$new("Car_Depth1_MJerr")
car_d2_MJe <- Node$new("Car_Depth2_MJerr")
car_d3_MJe <- Node$new("Car_Depth3_MJerr")
car_d4_MJe <- Node$new("Car_Depth4_MJerr")
car_d5_MJe <- Node$new("Car_Depth5_MJerr")
car_d6_MJe <- Node$new("Car_Depth6_MJerr")
car_d1_GIN <- Node$new("Car_Depth1_GINI")
car_d2_GIN <- Node$new("Car_Depth2_GINI")
car_d3_GIN <- Node$new("Car_Depth3_GINI")
car_d4_GIN <- Node$new("Car_Depth4_GINI")
car_d5_GIN <- Node$new("Car_Depth5_GINI")
car_d6_GIN <- Node$new("Car_Depth6_GINI")
# *** ----
# Train each tree ----
trn_ID3(car_d1_ent,car_trn_data,depth=1,PurityMethod="Entropy")
trn_ID3(car_d2_ent,car_trn_data,depth=2,PurityMethod="Entropy")
trn_ID3(car_d3_ent,car_trn_data,depth=3,PurityMethod="Entropy")
trn_ID3(car_d4_ent,car_trn_data,depth=4,PurityMethod="Entropy")
trn_ID3(car_d5_ent,car_trn_data,depth=5,PurityMethod="Entropy")
trn_ID3(car_d6_ent,car_trn_data,depth=6,PurityMethod="Entropy")
trn_ID3(car_d1_MJe,car_trn_data,depth=1,PurityMethod="Majority Error")
trn_ID3(car_d2_MJe,car_trn_data,depth=2,PurityMethod="Majority Error")
trn_ID3(car_d3_MJe,car_trn_data,depth=3,PurityMethod="Majority Error")
trn_ID3(car_d4_MJe,car_trn_data,depth=4,PurityMethod="Majority Error")
trn_ID3(car_d5_MJe,car_trn_data,depth=5,PurityMethod="Majority Error")
trn_ID3(car_d6_MJe,car_trn_data,depth=6,PurityMethod="Majority Error")
trn_ID3(car_d1_GIN,car_trn_data,depth=1,PurityMethod="GINI")
trn_ID3(car_d2_GIN,car_trn_data,depth=2,PurityMethod="GINI")
trn_ID3(car_d3_GIN,car_trn_data,depth=3,PurityMethod="GINI")
trn_ID3(car_d4_GIN,car_trn_data,depth=4,PurityMethod="GINI")
trn_ID3(car_d5_GIN,car_trn_data,depth=5,PurityMethod="GINI")
trn_ID3(car_d6_GIN,car_trn_data,depth=6,PurityMethod="GINI")
# *** ----
# Train & Test error of each tree ----
# Entropy
car_d1_ent_trnerr <- test_ID3(car_d1_ent,car_trn_data,numericOut=F)
car_d2_ent_trnerr <- test_ID3(car_d2_ent,car_trn_data,numericOut=F)
car_d3_ent_trnerr <- test_ID3(car_d3_ent,car_trn_data,numericOut=F)
car_d4_ent_trnerr <- test_ID3(car_d4_ent,car_trn_data,numericOut=F)
car_d5_ent_trnerr <- test_ID3(car_d5_ent,car_trn_data,numericOut=F)
car_d6_ent_trnerr <- test_ID3(car_d6_ent,car_trn_data,numericOut=F)
car_d1_ent_tsterr <- test_ID3(car_d1_ent,car_tst_data,numericOut=F)
car_d2_ent_tsterr <- test_ID3(car_d2_ent,car_tst_data,numericOut=F)
car_d3_ent_tsterr <- test_ID3(car_d3_ent,car_tst_data,numericOut=F)
car_d4_ent_tsterr <- test_ID3(car_d4_ent,car_tst_data,numericOut=F)
car_d5_ent_tsterr <- test_ID3(car_d5_ent,car_tst_data,numericOut=F)
car_d6_ent_tsterr <- test_ID3(car_d6_ent,car_tst_data,numericOut=F)
# Majority Error
car_d1_MJe_trnerr <- test_ID3(car_d1_MJe,car_trn_data,numericOut=F)
car_d2_MJe_trnerr <- test_ID3(car_d2_MJe,car_trn_data,numericOut=F)
car_d3_MJe_trnerr <- test_ID3(car_d3_MJe,car_trn_data,numericOut=F)
car_d4_MJe_trnerr <- test_ID3(car_d4_MJe,car_trn_data,numericOut=F)
car_d5_MJe_trnerr <- test_ID3(car_d5_MJe,car_trn_data,numericOut=F)
car_d6_MJe_trnerr <- test_ID3(car_d6_MJe,car_trn_data,numericOut=F)
car_d1_MJe_tsterr <- test_ID3(car_d1_MJe,car_tst_data,numericOut=F)
car_d2_MJe_tsterr <- test_ID3(car_d2_MJe,car_tst_data,numericOut=F)
car_d3_MJe_tsterr <- test_ID3(car_d3_MJe,car_tst_data,numericOut=F)
car_d4_MJe_tsterr <- test_ID3(car_d4_MJe,car_tst_data,numericOut=F)
car_d5_MJe_tsterr <- test_ID3(car_d5_MJe,car_tst_data,numericOut=F)
car_d6_MJe_tsterr <- test_ID3(car_d6_MJe,car_tst_data,numericOut=F)
# GINI2
car_d1_GIN_trnerr <- test_ID3(car_d1_GIN,car_trn_data,numericOut=F)
car_d2_GIN_trnerr <- test_ID3(car_d2_GIN,car_trn_data,numericOut=F)
car_d3_GIN_trnerr <- test_ID3(car_d3_GIN,car_trn_data,numericOut=F)
car_d4_GIN_trnerr <- test_ID3(car_d4_GIN,car_trn_data,numericOut=F)
car_d5_GIN_trnerr <- test_ID3(car_d5_GIN,car_trn_data,numericOut=F)
car_d6_GIN_trnerr <- test_ID3(car_d6_GIN,car_trn_data,numericOut=F)
car_d1_GIN_tsterr <- test_ID3(car_d1_GIN,car_tst_data,numericOut=F)
car_d2_GIN_tsterr <- test_ID3(car_d2_GIN,car_tst_data,numericOut=F)
car_d3_GIN_tsterr <- test_ID3(car_d3_GIN,car_tst_data,numericOut=F)
car_d4_GIN_tsterr <- test_ID3(car_d4_GIN,car_tst_data,numericOut=F)
car_d5_GIN_tsterr <- test_ID3(car_d5_GIN,car_tst_data,numericOut=F)
car_d6_GIN_tsterr <- test_ID3(car_d6_GIN,car_tst_data,numericOut=F)
# *** ----
# Organize Error Results for Easy Viewing ----
Car_Err_Tbl <- data.frame("Entropy Train Error"=round(c(car_d1_ent_trnerr
,car_d2_ent_trnerr
,car_d3_ent_trnerr
,car_d4_ent_trnerr
,car_d5_ent_trnerr
,car_d6_ent_trnerr),3),
"Entropy Test Error"=round(c(car_d1_ent_tsterr
,car_d2_ent_tsterr
,car_d3_ent_tsterr
,car_d4_ent_tsterr
,car_d5_ent_tsterr
,car_d6_ent_tsterr),3),
"Majority Error Train Error"=c(car_d1_MJe_trnerr
,car_d2_MJe_trnerr
,car_d3_MJe_trnerr
,car_d4_MJe_trnerr
,car_d5_MJe_trnerr
,car_d6_MJe_trnerr),
"Majority Error Test Error"=round(c(car_d1_MJe_tsterr
,car_d2_MJe_tsterr
,car_d3_MJe_tsterr
,car_d4_MJe_tsterr
,car_d5_MJe_tsterr
,car_d6_MJe_tsterr),3),
"GINI Train Error"=round(c(car_d1_GIN_trnerr
,car_d2_GIN_trnerr
,car_d3_GIN_trnerr
,car_d4_GIN_trnerr
,car_d5_GIN_trnerr
,car_d6_GIN_trnerr),3),
"GINI Test Error"=round(c(car_d1_GIN_tsterr
,car_d2_GIN_tsterr
,car_d3_GIN_tsterr
,car_d4_GIN_tsterr
,car_d5_GIN_tsterr
,car_d6_GIN_tsterr),3))
Car_Err_Tbl
# *** ----
# *** ----
#
#
#
#
# Bank Problem ----
# Read in Training & Test Data ----
bnk_trn_data <- read.csv("G:/My Drive/U_of_U/19_Spring/MachineLearning/HW/HW1/bank/train.csv",header=F,stringsAsFactors=F)
bnk_tst_data <- read.csv("G:/My Drive/U_of_U/19_Spring/MachineLearning/HW/HW1/bank/test.csv",header=F,stringsAsFactors=F)
# *** ----
# Initialize trees for each depth of bank model with no missing data, and IG method ----
bnk_d1_ent_nMs <- Node$new("Bank_nMs_Depth1_Ent")
bnk_d2_ent_nMs <- Node$new("Bank_nMs_Depth2_Ent")
bnk_d3_ent_nMs <- Node$new("Bank_nMs_Depth3_Ent")
bnk_d4_ent_nMs <- Node$new("Bank_nMs_Depth4_Ent")
bnk_d5_ent_nMs <- Node$new("Bank_nMs_Depth5_Ent")
bnk_d6_ent_nMs <- Node$new("Bank_nMs_Depth6_Ent")
bnk_d7_ent_nMs <- Node$new("Bank_nMs_Depth7_Ent")
bnk_d8_ent_nMs <- Node$new("Bank_nMs_Depth8_Ent")
bnk_d9_ent_nMs <- Node$new("Bank_nMs_Depth9_Ent")
bnk_d10_ent_nMs <- Node$new("Bank_nMs_Depth10_Ent")
bnk_d11_ent_nMs <- Node$new("Bank_nMs_Depth11_Ent")
bnk_d12_ent_nMs <- Node$new("Bank_nMs_Depth12_Ent")
bnk_d13_ent_nMs <- Node$new("Bank_nMs_Depth13_Ent")
bnk_d14_ent_nMs <- Node$new("Bank_nMs_Depth14_Ent")
bnk_d15_ent_nMs <- Node$new("Bank_nMs_Depth15_Ent")
bnk_d16_ent_nMs <- Node$new("Bank_nMs_Depth16_Ent")
bnk_d1_MJe_nMs <- Node$new("Bank_nMs_Depth1_MJerr")
bnk_d2_MJe_nMs <- Node$new("Bank_nMs_Depth2_MJerr")
bnk_d3_MJe_nMs <- Node$new("Bank_nMs_Depth3_MJerr")
bnk_d4_MJe_nMs <- Node$new("Bank_nMs_Depth4_MJerr")
bnk_d5_MJe_nMs <- Node$new("Bank_nMs_Depth5_MJerr")
bnk_d6_MJe_nMs <- Node$new("Bank_nMs_Depth6_MJerr")
bnk_d7_MJe_nMs <- Node$new("Bank_nMs_Depth7_MJerr")
bnk_d8_MJe_nMs <- Node$new("Bank_nMs_Depth8_MJerr")
bnk_d9_MJe_nMs <- Node$new("Bank_nMs_Depth9_MJerr")
bnk_d10_MJe_nMs <- Node$new("Bank_nMs_Depth10_MJerr")
bnk_d11_MJe_nMs <- Node$new("Bank_nMs_Depth11_MJerr")
bnk_d12_MJe_nMs <- Node$new("Bank_nMs_Depth12_MJerr")
bnk_d13_MJe_nMs <- Node$new("Bank_nMs_Depth13_MJerr")
bnk_d14_MJe_nMs <- Node$new("Bank_nMs_Depth14_MJerr")
bnk_d15_MJe_nMs <- Node$new("Bank_nMs_Depth15_MJerr")
bnk_d16_MJe_nMs <- Node$new("Bank_nMs_Depth16_MJerr")
bnk_d1_GIN_nMs <- Node$new("Bank_nMs_Depth1_GINI")
bnk_d2_GIN_nMs <- Node$new("Bank_nMs_Depth2_GINI")
bnk_d3_GIN_nMs <- Node$new("Bank_nMs_Depth3_GINI")
bnk_d4_GIN_nMs <- Node$new("Bank_nMs_Depth4_GINI")
bnk_d5_GIN_nMs <- Node$new("Bank_nMs_Depth5_GINI")
bnk_d6_GIN_nMs <- Node$new("Bank_nMs_Depth6_GINI")
bnk_d7_GIN_nMs <- Node$new("Bank_nMs_Depth7_GINI")
bnk_d8_GIN_nMs <- Node$new("Bank_nMs_Depth8_GINI")
bnk_d9_GIN_nMs <- Node$new("Bank_nMs_Depth9_GINI")
bnk_d10_GIN_nMs <- Node$new("Bank_nMs_Depth10_GINI")
bnk_d11_GIN_nMs <- Node$new("Bank_nMs_Depth11_GINI")
bnk_d12_GIN_nMs <- Node$new("Bank_nMs_Depth12_GINI")
bnk_d13_GIN_nMs <- Node$new("Bank_nMs_Depth13_GINI")
bnk_d14_GIN_nMs <- Node$new("Bank_nMs_Depth14_GINI")
bnk_d15_GIN_nMs <- Node$new("Bank_nMs_Depth15_GINI")
bnk_d16_GIN_nMs <- Node$new("Bank_nMs_Depth16_GINI")
# *** ----
# Train each tree ----
trn_ID3(bnk_d1_ent_nMs,bnk_trn_data,depth=1,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d2_ent_nMs,bnk_trn_data,depth=2,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d3_ent_nMs,bnk_trn_data,depth=3,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d4_ent_nMs,bnk_trn_data,depth=4,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d5_ent_nMs,bnk_trn_data,depth=5,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d6_ent_nMs,bnk_trn_data,depth=6,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d7_ent_nMs,bnk_trn_data,depth=7,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d8_ent_nMs,bnk_trn_data,depth=8,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d9_ent_nMs,bnk_trn_data,depth=9,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d10_ent_nMs,bnk_trn_data,depth=10,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d11_ent_nMs,bnk_trn_data,depth=11,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d12_ent_nMs,bnk_trn_data,depth=12,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d13_ent_nMs,bnk_trn_data,depth=13,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d14_ent_nMs,bnk_trn_data,depth=14,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d15_ent_nMs,bnk_trn_data,depth=15,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d16_ent_nMs,bnk_trn_data,depth=16,PurityMethod="Entropy",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d1_MJe_nMs,bnk_trn_data,depth=1,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d2_MJe_nMs,bnk_trn_data,depth=2,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d3_MJe_nMs,bnk_trn_data,depth=3,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d4_MJe_nMs,bnk_trn_data,depth=4,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d5_MJe_nMs,bnk_trn_data,depth=5,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d6_MJe_nMs,bnk_trn_data,depth=6,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d7_MJe_nMs,bnk_trn_data,depth=7,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d8_MJe_nMs,bnk_trn_data,depth=8,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d9_MJe_nMs,bnk_trn_data,depth=9,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d10_MJe_nMs,bnk_trn_data,depth=10,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d11_MJe_nMs,bnk_trn_data,depth=11,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d12_MJe_nMs,bnk_trn_data,depth=12,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d13_MJe_nMs,bnk_trn_data,depth=13,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d14_MJe_nMs,bnk_trn_data,depth=14,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d15_MJe_nMs,bnk_trn_data,depth=15,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d16_MJe_nMs,bnk_trn_data,depth=16,PurityMethod="Majority Error",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d1_GIN_nMs,bnk_trn_data,depth=1,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d2_GIN_nMs,bnk_trn_data,depth=2,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d3_GIN_nMs,bnk_trn_data,depth=3,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d4_GIN_nMs,bnk_trn_data,depth=4,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d5_GIN_nMs,bnk_trn_data,depth=5,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d6_GIN_nMs,bnk_trn_data,depth=6,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d7_GIN_nMs,bnk_trn_data,depth=7,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d8_GIN_nMs,bnk_trn_data,depth=8,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d9_GIN_nMs,bnk_trn_data,depth=9,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d10_GIN_nMs,bnk_trn_data,depth=10,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d11_GIN_nMs,bnk_trn_data,depth=11,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d12_GIN_nMs,bnk_trn_data,depth=12,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d13_GIN_nMs,bnk_trn_data,depth=13,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d14_GIN_nMs,bnk_trn_data,depth=14,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d15_GIN_nMs,bnk_trn_data,depth=15,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d16_GIN_nMs,bnk_trn_data,depth=16,PurityMethod="GINI",numericColInds=c(1,6,10,12,13,14,15))
# *** ----
# Train & Test error of each tree ----
# Entropy
bnk_d1_ent_nMs_trnerr <- test_ID3(bnk_d1_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d2_ent_nMs_trnerr <- test_ID3(bnk_d2_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d3_ent_nMs_trnerr <- test_ID3(bnk_d3_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d4_ent_nMs_trnerr <- test_ID3(bnk_d4_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d5_ent_nMs_trnerr <- test_ID3(bnk_d5_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d6_ent_nMs_trnerr <- test_ID3(bnk_d6_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d7_ent_nMs_trnerr <- test_ID3(bnk_d7_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d8_ent_nMs_trnerr <- test_ID3(bnk_d8_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d9_ent_nMs_trnerr <- test_ID3(bnk_d9_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d10_ent_nMs_trnerr <- test_ID3(bnk_d10_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d11_ent_nMs_trnerr <- test_ID3(bnk_d11_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d12_ent_nMs_trnerr <- test_ID3(bnk_d12_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d13_ent_nMs_trnerr <- test_ID3(bnk_d13_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d14_ent_nMs_trnerr <- test_ID3(bnk_d14_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d15_ent_nMs_trnerr <- test_ID3(bnk_d15_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d16_ent_nMs_trnerr <- test_ID3(bnk_d16_ent_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d1_ent_nMs_tsterr <- test_ID3(bnk_d1_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d2_ent_nMs_tsterr <- test_ID3(bnk_d2_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d3_ent_nMs_tsterr <- test_ID3(bnk_d3_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d4_ent_nMs_tsterr <- test_ID3(bnk_d4_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d5_ent_nMs_tsterr <- test_ID3(bnk_d5_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d6_ent_nMs_tsterr <- test_ID3(bnk_d6_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d7_ent_nMs_tsterr <- test_ID3(bnk_d7_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d8_ent_nMs_tsterr <- test_ID3(bnk_d8_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d9_ent_nMs_tsterr <- test_ID3(bnk_d9_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d10_ent_nMs_tsterr <- test_ID3(bnk_d10_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d11_ent_nMs_tsterr <- test_ID3(bnk_d11_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d12_ent_nMs_tsterr <- test_ID3(bnk_d12_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d13_ent_nMs_tsterr <- test_ID3(bnk_d13_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d14_ent_nMs_tsterr <- test_ID3(bnk_d14_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d15_ent_nMs_tsterr <- test_ID3(bnk_d15_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d16_ent_nMs_tsterr <- test_ID3(bnk_d16_ent_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
# Majority Error
bnk_d1_MJe_nMs_trnerr <- test_ID3(bnk_d1_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d2_MJe_nMs_trnerr <- test_ID3(bnk_d2_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d3_MJe_nMs_trnerr <- test_ID3(bnk_d3_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d4_MJe_nMs_trnerr <- test_ID3(bnk_d4_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d5_MJe_nMs_trnerr <- test_ID3(bnk_d5_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d6_MJe_nMs_trnerr <- test_ID3(bnk_d6_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d7_MJe_nMs_trnerr <- test_ID3(bnk_d7_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d8_MJe_nMs_trnerr <- test_ID3(bnk_d8_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d9_MJe_nMs_trnerr <- test_ID3(bnk_d9_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d10_MJe_nMs_trnerr <- test_ID3(bnk_d10_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d11_MJe_nMs_trnerr <- test_ID3(bnk_d11_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d12_MJe_nMs_trnerr <- test_ID3(bnk_d12_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d13_MJe_nMs_trnerr <- test_ID3(bnk_d13_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d14_MJe_nMs_trnerr <- test_ID3(bnk_d14_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d15_MJe_nMs_trnerr <- test_ID3(bnk_d15_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d16_MJe_nMs_trnerr <- test_ID3(bnk_d16_MJe_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d1_MJe_nMs_tsterr <- test_ID3(bnk_d1_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d2_MJe_nMs_tsterr <- test_ID3(bnk_d2_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d3_MJe_nMs_tsterr <- test_ID3(bnk_d3_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d4_MJe_nMs_tsterr <- test_ID3(bnk_d4_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d5_MJe_nMs_tsterr <- test_ID3(bnk_d5_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d6_MJe_nMs_tsterr <- test_ID3(bnk_d6_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d7_MJe_nMs_tsterr <- test_ID3(bnk_d7_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d8_MJe_nMs_tsterr <- test_ID3(bnk_d8_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d9_MJe_nMs_tsterr <- test_ID3(bnk_d9_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d10_MJe_nMs_tsterr <- test_ID3(bnk_d10_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d11_MJe_nMs_tsterr <- test_ID3(bnk_d11_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d12_MJe_nMs_tsterr <- test_ID3(bnk_d12_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d13_MJe_nMs_tsterr <- test_ID3(bnk_d13_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d14_MJe_nMs_tsterr <- test_ID3(bnk_d14_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d15_MJe_nMs_tsterr <- test_ID3(bnk_d15_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d16_MJe_nMs_tsterr <- test_ID3(bnk_d16_MJe_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
# GINI
bnk_d1_GIN_nMs_trnerr <- test_ID3(bnk_d1_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d2_GIN_nMs_trnerr <- test_ID3(bnk_d2_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d3_GIN_nMs_trnerr <- test_ID3(bnk_d3_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d4_GIN_nMs_trnerr <- test_ID3(bnk_d4_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d5_GIN_nMs_trnerr <- test_ID3(bnk_d5_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d6_GIN_nMs_trnerr <- test_ID3(bnk_d6_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d7_GIN_nMs_trnerr <- test_ID3(bnk_d7_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d8_GIN_nMs_trnerr <- test_ID3(bnk_d8_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d9_GIN_nMs_trnerr <- test_ID3(bnk_d9_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d10_GIN_nMs_trnerr <- test_ID3(bnk_d10_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d11_GIN_nMs_trnerr <- test_ID3(bnk_d11_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d12_GIN_nMs_trnerr <- test_ID3(bnk_d12_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d13_GIN_nMs_trnerr <- test_ID3(bnk_d13_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d14_GIN_nMs_trnerr <- test_ID3(bnk_d14_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d15_GIN_nMs_trnerr <- test_ID3(bnk_d15_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d16_GIN_nMs_trnerr <- test_ID3(bnk_d16_GIN_nMs,bnk_trn_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d1_GIN_nMs_tsterr <- test_ID3(bnk_d1_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d2_GIN_nMs_tsterr <- test_ID3(bnk_d2_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d3_GIN_nMs_tsterr <- test_ID3(bnk_d3_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d4_GIN_nMs_tsterr <- test_ID3(bnk_d4_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d5_GIN_nMs_tsterr <- test_ID3(bnk_d5_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d6_GIN_nMs_tsterr <- test_ID3(bnk_d6_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d7_GIN_nMs_tsterr <- test_ID3(bnk_d7_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d8_GIN_nMs_tsterr <- test_ID3(bnk_d8_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d9_GIN_nMs_tsterr <- test_ID3(bnk_d9_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d10_GIN_nMs_tsterr <- test_ID3(bnk_d10_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d11_GIN_nMs_tsterr <- test_ID3(bnk_d11_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d12_GIN_nMs_tsterr <- test_ID3(bnk_d12_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d13_GIN_nMs_tsterr <- test_ID3(bnk_d13_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d14_GIN_nMs_tsterr <- test_ID3(bnk_d14_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d15_GIN_nMs_tsterr <- test_ID3(bnk_d15_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
bnk_d16_GIN_nMs_tsterr <- test_ID3(bnk_d16_GIN_nMs,bnk_tst_data,numericOut=F,numericColInds=c(1,6,10,12,13,14,15))
# *** ----
# Organize Error Results for Easy Viewing ----
Bnk_Err_Tbl_nMs <- data.frame("Entropy Train Error"=round(c(bnk_d1_ent_nMs_trnerr
,bnk_d2_ent_nMs_trnerr
,bnk_d3_ent_nMs_trnerr
,bnk_d4_ent_nMs_trnerr
,bnk_d5_ent_nMs_trnerr
,bnk_d6_ent_nMs_trnerr
,bnk_d7_ent_nMs_trnerr
,bnk_d8_ent_nMs_trnerr
,bnk_d9_ent_nMs_trnerr
,bnk_d10_ent_nMs_trnerr
,bnk_d11_ent_nMs_trnerr
,bnk_d12_ent_nMs_trnerr
,bnk_d13_ent_nMs_trnerr
,bnk_d14_ent_nMs_trnerr
,bnk_d15_ent_nMs_trnerr
,bnk_d16_ent_nMs_trnerr),3),
"Entropy Test Error"=round(c(bnk_d1_ent_nMs_tsterr
,bnk_d2_ent_nMs_tsterr
,bnk_d3_ent_nMs_tsterr
,bnk_d4_ent_nMs_tsterr
,bnk_d5_ent_nMs_tsterr
,bnk_d6_ent_nMs_tsterr
,bnk_d7_ent_nMs_tsterr
,bnk_d8_ent_nMs_tsterr
,bnk_d9_ent_nMs_tsterr
,bnk_d10_ent_nMs_tsterr
,bnk_d11_ent_nMs_tsterr
,bnk_d12_ent_nMs_tsterr
,bnk_d13_ent_nMs_tsterr
,bnk_d14_ent_nMs_tsterr
,bnk_d15_ent_nMs_tsterr
,bnk_d16_ent_nMs_tsterr),3),
"Majority Error Train Error"=c(bnk_d1_MJe_nMs_trnerr
,bnk_d2_MJe_nMs_trnerr
,bnk_d3_MJe_nMs_trnerr
,bnk_d4_MJe_nMs_trnerr
,bnk_d5_MJe_nMs_trnerr
,bnk_d6_MJe_nMs_trnerr
,bnk_d7_MJe_nMs_trnerr
,bnk_d8_MJe_nMs_trnerr
,bnk_d9_MJe_nMs_trnerr
,bnk_d10_MJe_nMs_trnerr
,bnk_d11_MJe_nMs_trnerr
,bnk_d12_MJe_nMs_trnerr
,bnk_d13_MJe_nMs_trnerr
,bnk_d14_MJe_nMs_trnerr
,bnk_d15_MJe_nMs_trnerr
,bnk_d16_MJe_nMs_trnerr),
"Majority Error Test Error"=round(c(bnk_d1_MJe_nMs_tsterr
,bnk_d2_MJe_nMs_tsterr
,bnk_d3_MJe_nMs_tsterr
,bnk_d4_MJe_nMs_tsterr
,bnk_d5_MJe_nMs_tsterr
,bnk_d6_MJe_nMs_tsterr
,bnk_d7_MJe_nMs_tsterr
,bnk_d8_MJe_nMs_tsterr
,bnk_d9_MJe_nMs_tsterr
,bnk_d10_MJe_nMs_tsterr
,bnk_d11_MJe_nMs_tsterr
,bnk_d12_MJe_nMs_tsterr
,bnk_d13_MJe_nMs_tsterr
,bnk_d14_MJe_nMs_tsterr
,bnk_d15_MJe_nMs_tsterr
,bnk_d16_MJe_nMs_tsterr),3),
"GINI Train Error"=round(c(bnk_d1_GIN_nMs_trnerr
,bnk_d2_GIN_nMs_trnerr
,bnk_d3_GIN_nMs_trnerr
,bnk_d4_GIN_nMs_trnerr
,bnk_d5_GIN_nMs_trnerr
,bnk_d6_GIN_nMs_trnerr
,bnk_d7_GIN_nMs_trnerr
,bnk_d8_GIN_nMs_trnerr
,bnk_d9_GIN_nMs_trnerr
,bnk_d10_GIN_nMs_trnerr
,bnk_d11_GIN_nMs_trnerr
,bnk_d12_GIN_nMs_trnerr
,bnk_d13_GIN_nMs_trnerr
,bnk_d14_GIN_nMs_trnerr
,bnk_d15_GIN_nMs_trnerr
,bnk_d16_GIN_nMs_trnerr),3),
"GINI Test Error"=round(c(bnk_d1_GIN_nMs_tsterr
,bnk_d2_GIN_nMs_tsterr
,bnk_d3_GIN_nMs_tsterr
,bnk_d4_GIN_nMs_tsterr
,bnk_d5_GIN_nMs_tsterr
,bnk_d6_GIN_nMs_tsterr
,bnk_d7_GIN_nMs_tsterr
,bnk_d8_GIN_nMs_tsterr
,bnk_d9_GIN_nMs_tsterr
,bnk_d10_GIN_nMs_tsterr
,bnk_d11_GIN_nMs_tsterr
,bnk_d12_GIN_nMs_tsterr
,bnk_d13_GIN_nMs_tsterr
,bnk_d14_GIN_nMs_tsterr
,bnk_d15_GIN_nMs_tsterr
,bnk_d16_GIN_nMs_tsterr),3))
row.names(Bnk_Err_Tbl_nMs) <- as.character(seq(1,16,1))
Bnk_Err_Tbl_nMs
# *** ----
# *** ----
# Initialize trees for each depth of bank model with missing data, and IG method ----
bnk_d1_ent_MsD <- Node$new("Bank_MsD_Depth1_Ent")
bnk_d2_ent_MsD <- Node$new("Bank_MsD_Depth2_Ent")
bnk_d3_ent_MsD <- Node$new("Bank_MsD_Depth3_Ent")
bnk_d4_ent_MsD <- Node$new("Bank_MsD_Depth4_Ent")
bnk_d5_ent_MsD <- Node$new("Bank_MsD_Depth5_Ent")
bnk_d6_ent_MsD <- Node$new("Bank_MsD_Depth6_Ent")
bnk_d7_ent_MsD <- Node$new("Bank_MsD_Depth7_Ent")
bnk_d8_ent_MsD <- Node$new("Bank_MsD_Depth8_Ent")
bnk_d9_ent_MsD <- Node$new("Bank_MsD_Depth9_Ent")
bnk_d10_ent_MsD <- Node$new("Bank_MsD_Depth10_Ent")
bnk_d11_ent_MsD <- Node$new("Bank_MsD_Depth11_Ent")
bnk_d12_ent_MsD <- Node$new("Bank_MsD_Depth12_Ent")
bnk_d13_ent_MsD <- Node$new("Bank_MsD_Depth13_Ent")
bnk_d14_ent_MsD <- Node$new("Bank_MsD_Depth14_Ent")
bnk_d15_ent_MsD <- Node$new("Bank_MsD_Depth15_Ent")
bnk_d16_ent_MsD <- Node$new("Bank_MsD_Depth16_Ent")
bnk_d1_MJe_MsD <- Node$new("Bank_MsD_Depth1_MJerr")
bnk_d2_MJe_MsD <- Node$new("Bank_MsD_Depth2_MJerr")
bnk_d3_MJe_MsD <- Node$new("Bank_MsD_Depth3_MJerr")
bnk_d4_MJe_MsD <- Node$new("Bank_MsD_Depth4_MJerr")
bnk_d5_MJe_MsD <- Node$new("Bank_MsD_Depth5_MJerr")
bnk_d6_MJe_MsD <- Node$new("Bank_MsD_Depth6_MJerr")
bnk_d7_MJe_MsD <- Node$new("Bank_MsD_Depth7_MJerr")
bnk_d8_MJe_MsD <- Node$new("Bank_MsD_Depth8_MJerr")
bnk_d9_MJe_MsD <- Node$new("Bank_MsD_Depth9_MJerr")
bnk_d10_MJe_MsD <- Node$new("Bank_MsD_Depth10_MJerr")
bnk_d11_MJe_MsD <- Node$new("Bank_MsD_Depth11_MJerr")
bnk_d12_MJe_MsD <- Node$new("Bank_MsD_Depth12_MJerr")
bnk_d13_MJe_MsD <- Node$new("Bank_MsD_Depth13_MJerr")
bnk_d14_MJe_MsD <- Node$new("Bank_MsD_Depth14_MJerr")
bnk_d15_MJe_MsD <- Node$new("Bank_MsD_Depth15_MJerr")
bnk_d16_MJe_MsD <- Node$new("Bank_MsD_Depth16_MJerr")
bnk_d1_GIN_MsD <- Node$new("Bank_MsD_Depth1_GINI")
bnk_d2_GIN_MsD <- Node$new("Bank_MsD_Depth2_GINI")
bnk_d3_GIN_MsD <- Node$new("Bank_MsD_Depth3_GINI")
bnk_d4_GIN_MsD <- Node$new("Bank_MsD_Depth4_GINI")
bnk_d5_GIN_MsD <- Node$new("Bank_MsD_Depth5_GINI")
bnk_d6_GIN_MsD <- Node$new("Bank_MsD_Depth6_GINI")
bnk_d7_GIN_MsD <- Node$new("Bank_MsD_Depth7_GINI")
bnk_d8_GIN_MsD <- Node$new("Bank_MsD_Depth8_GINI")
bnk_d9_GIN_MsD <- Node$new("Bank_MsD_Depth9_GINI")
bnk_d10_GIN_MsD <- Node$new("Bank_MsD_Depth10_GINI")
bnk_d11_GIN_MsD <- Node$new("Bank_MsD_Depth11_GINI")
bnk_d12_GIN_MsD <- Node$new("Bank_MsD_Depth12_GINI")
bnk_d13_GIN_MsD <- Node$new("Bank_MsD_Depth13_GINI")
bnk_d14_GIN_MsD <- Node$new("Bank_MsD_Depth14_GINI")
bnk_d15_GIN_MsD <- Node$new("Bank_MsD_Depth15_GINI")
bnk_d16_GIN_MsD <- Node$new("Bank_MsD_Depth16_GINI")
# *** ----
# Train each tree ----
trn_ID3(bnk_d1_ent_MsD,bnk_trn_data,depth=1,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d2_ent_MsD,bnk_trn_data,depth=2,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d3_ent_MsD,bnk_trn_data,depth=3,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d4_ent_MsD,bnk_trn_data,depth=4,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d5_ent_MsD,bnk_trn_data,depth=5,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d6_ent_MsD,bnk_trn_data,depth=6,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d7_ent_MsD,bnk_trn_data,depth=7,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d8_ent_MsD,bnk_trn_data,depth=8,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d9_ent_MsD,bnk_trn_data,depth=9,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d10_ent_MsD,bnk_trn_data,depth=10,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d11_ent_MsD,bnk_trn_data,depth=11,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d12_ent_MsD,bnk_trn_data,depth=12,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d13_ent_MsD,bnk_trn_data,depth=13,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d14_ent_MsD,bnk_trn_data,depth=14,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d15_ent_MsD,bnk_trn_data,depth=15,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d16_ent_MsD,bnk_trn_data,depth=16,PurityMethod="Entropy",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d1_MJe_MsD,bnk_trn_data,depth=1,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d2_MJe_MsD,bnk_trn_data,depth=2,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d3_MJe_MsD,bnk_trn_data,depth=3,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d4_MJe_MsD,bnk_trn_data,depth=4,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d5_MJe_MsD,bnk_trn_data,depth=5,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d6_MJe_MsD,bnk_trn_data,depth=6,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d7_MJe_MsD,bnk_trn_data,depth=7,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d8_MJe_MsD,bnk_trn_data,depth=8,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d9_MJe_MsD,bnk_trn_data,depth=9,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d10_MJe_MsD,bnk_trn_data,depth=10,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d11_MJe_MsD,bnk_trn_data,depth=11,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d12_MJe_MsD,bnk_trn_data,depth=12,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d13_MJe_MsD,bnk_trn_data,depth=13,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d14_MJe_MsD,bnk_trn_data,depth=14,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d15_MJe_MsD,bnk_trn_data,depth=15,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d16_MJe_MsD,bnk_trn_data,depth=16,PurityMethod="Majority Error",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d1_GIN_MsD,bnk_trn_data,depth=1,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d2_GIN_MsD,bnk_trn_data,depth=2,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d3_GIN_MsD,bnk_trn_data,depth=3,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d4_GIN_MsD,bnk_trn_data,depth=4,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d5_GIN_MsD,bnk_trn_data,depth=5,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d6_GIN_MsD,bnk_trn_data,depth=6,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d7_GIN_MsD,bnk_trn_data,depth=7,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d8_GIN_MsD,bnk_trn_data,depth=8,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d9_GIN_MsD,bnk_trn_data,depth=9,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d10_GIN_MsD,bnk_trn_data,depth=10,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d11_GIN_MsD,bnk_trn_data,depth=11,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d12_GIN_MsD,bnk_trn_data,depth=12,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d13_GIN_MsD,bnk_trn_data,depth=13,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d14_GIN_MsD,bnk_trn_data,depth=14,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d15_GIN_MsD,bnk_trn_data,depth=15,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
trn_ID3(bnk_d16_GIN_MsD,bnk_trn_data,depth=16,PurityMethod="GINI",missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
# *** ----
# Train & Test error of each tree ----
# Entropy
bnk_d1_ent_MsD_trnerr <- test_ID3(bnk_d1_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d2_ent_MsD_trnerr <- test_ID3(bnk_d2_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d3_ent_MsD_trnerr <- test_ID3(bnk_d3_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d4_ent_MsD_trnerr <- test_ID3(bnk_d4_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d5_ent_MsD_trnerr <- test_ID3(bnk_d5_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d6_ent_MsD_trnerr <- test_ID3(bnk_d6_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d7_ent_MsD_trnerr <- test_ID3(bnk_d7_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d8_ent_MsD_trnerr <- test_ID3(bnk_d8_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d9_ent_MsD_trnerr <- test_ID3(bnk_d9_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d10_ent_MsD_trnerr <- test_ID3(bnk_d10_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d11_ent_MsD_trnerr <- test_ID3(bnk_d11_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d12_ent_MsD_trnerr <- test_ID3(bnk_d12_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d13_ent_MsD_trnerr <- test_ID3(bnk_d13_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d14_ent_MsD_trnerr <- test_ID3(bnk_d14_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d15_ent_MsD_trnerr <- test_ID3(bnk_d15_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d16_ent_MsD_trnerr <- test_ID3(bnk_d16_ent_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d1_ent_MsD_tsterr <- test_ID3(bnk_d1_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d2_ent_MsD_tsterr <- test_ID3(bnk_d2_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d3_ent_MsD_tsterr <- test_ID3(bnk_d3_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d4_ent_MsD_tsterr <- test_ID3(bnk_d4_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d5_ent_MsD_tsterr <- test_ID3(bnk_d5_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d6_ent_MsD_tsterr <- test_ID3(bnk_d6_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d7_ent_MsD_tsterr <- test_ID3(bnk_d7_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d8_ent_MsD_tsterr <- test_ID3(bnk_d8_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d9_ent_MsD_tsterr <- test_ID3(bnk_d9_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d10_ent_MsD_tsterr <- test_ID3(bnk_d10_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d11_ent_MsD_tsterr <- test_ID3(bnk_d11_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d12_ent_MsD_tsterr <- test_ID3(bnk_d12_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d13_ent_MsD_tsterr <- test_ID3(bnk_d13_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d14_ent_MsD_tsterr <- test_ID3(bnk_d14_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d15_ent_MsD_tsterr <- test_ID3(bnk_d15_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d16_ent_MsD_tsterr <- test_ID3(bnk_d16_ent_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
# Majority Error
bnk_d1_MJe_MsD_trnerr <- test_ID3(bnk_d1_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d2_MJe_MsD_trnerr <- test_ID3(bnk_d2_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d3_MJe_MsD_trnerr <- test_ID3(bnk_d3_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d4_MJe_MsD_trnerr <- test_ID3(bnk_d4_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d5_MJe_MsD_trnerr <- test_ID3(bnk_d5_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d6_MJe_MsD_trnerr <- test_ID3(bnk_d6_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d7_MJe_MsD_trnerr <- test_ID3(bnk_d7_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d8_MJe_MsD_trnerr <- test_ID3(bnk_d8_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d9_MJe_MsD_trnerr <- test_ID3(bnk_d9_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d10_MJe_MsD_trnerr <- test_ID3(bnk_d10_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d11_MJe_MsD_trnerr <- test_ID3(bnk_d11_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d12_MJe_MsD_trnerr <- test_ID3(bnk_d12_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d13_MJe_MsD_trnerr <- test_ID3(bnk_d13_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d14_MJe_MsD_trnerr <- test_ID3(bnk_d14_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d15_MJe_MsD_trnerr <- test_ID3(bnk_d15_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d16_MJe_MsD_trnerr <- test_ID3(bnk_d16_MJe_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d1_MJe_MsD_tsterr <- test_ID3(bnk_d1_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d2_MJe_MsD_tsterr <- test_ID3(bnk_d2_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d3_MJe_MsD_tsterr <- test_ID3(bnk_d3_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d4_MJe_MsD_tsterr <- test_ID3(bnk_d4_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d5_MJe_MsD_tsterr <- test_ID3(bnk_d5_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d6_MJe_MsD_tsterr <- test_ID3(bnk_d6_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d7_MJe_MsD_tsterr <- test_ID3(bnk_d7_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d8_MJe_MsD_tsterr <- test_ID3(bnk_d8_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d9_MJe_MsD_tsterr <- test_ID3(bnk_d9_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d10_MJe_MsD_tsterr <- test_ID3(bnk_d10_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d11_MJe_MsD_tsterr <- test_ID3(bnk_d11_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d12_MJe_MsD_tsterr <- test_ID3(bnk_d12_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d13_MJe_MsD_tsterr <- test_ID3(bnk_d13_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d14_MJe_MsD_tsterr <- test_ID3(bnk_d14_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d15_MJe_MsD_tsterr <- test_ID3(bnk_d15_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d16_MJe_MsD_tsterr <- test_ID3(bnk_d16_MJe_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
# GINI
bnk_d1_GIN_MsD_trnerr <- test_ID3(bnk_d1_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d2_GIN_MsD_trnerr <- test_ID3(bnk_d2_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d3_GIN_MsD_trnerr <- test_ID3(bnk_d3_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d4_GIN_MsD_trnerr <- test_ID3(bnk_d4_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d5_GIN_MsD_trnerr <- test_ID3(bnk_d5_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d6_GIN_MsD_trnerr <- test_ID3(bnk_d6_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d7_GIN_MsD_trnerr <- test_ID3(bnk_d7_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d8_GIN_MsD_trnerr <- test_ID3(bnk_d8_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d9_GIN_MsD_trnerr <- test_ID3(bnk_d9_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d10_GIN_MsD_trnerr <- test_ID3(bnk_d10_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d11_GIN_MsD_trnerr <- test_ID3(bnk_d11_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d12_GIN_MsD_trnerr <- test_ID3(bnk_d12_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d13_GIN_MsD_trnerr <- test_ID3(bnk_d13_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d14_GIN_MsD_trnerr <- test_ID3(bnk_d14_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d15_GIN_MsD_trnerr <- test_ID3(bnk_d15_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d16_GIN_MsD_trnerr <- test_ID3(bnk_d16_GIN_MsD,bnk_trn_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d1_GIN_MsD_tsterr <- test_ID3(bnk_d1_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d2_GIN_MsD_tsterr <- test_ID3(bnk_d2_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d3_GIN_MsD_tsterr <- test_ID3(bnk_d3_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d4_GIN_MsD_tsterr <- test_ID3(bnk_d4_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d5_GIN_MsD_tsterr <- test_ID3(bnk_d5_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d6_GIN_MsD_tsterr <- test_ID3(bnk_d6_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d7_GIN_MsD_tsterr <- test_ID3(bnk_d7_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d8_GIN_MsD_tsterr <- test_ID3(bnk_d8_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d9_GIN_MsD_tsterr <- test_ID3(bnk_d9_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d10_GIN_MsD_tsterr <- test_ID3(bnk_d10_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d11_GIN_MsD_tsterr <- test_ID3(bnk_d11_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d12_GIN_MsD_tsterr <- test_ID3(bnk_d12_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d13_GIN_MsD_tsterr <- test_ID3(bnk_d13_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d14_GIN_MsD_tsterr <- test_ID3(bnk_d14_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d15_GIN_MsD_tsterr <- test_ID3(bnk_d15_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
bnk_d16_GIN_MsD_tsterr <- test_ID3(bnk_d16_GIN_MsD,bnk_tst_data,numericOut=F,missingData_ID=c(NA,"unknown"),numericColInds=c(1,6,10,12,13,14,15))
# *** ----
# Organize Error Results for Easy Viewing ----
Bnk_Err_Tbl_MsD <- data.frame("Entropy Train Error"=round(c(bnk_d1_ent_MsD_trnerr
,bnk_d2_ent_MsD_trnerr
,bnk_d3_ent_MsD_trnerr
,bnk_d4_ent_MsD_trnerr
,bnk_d5_ent_MsD_trnerr
,bnk_d6_ent_MsD_trnerr
,bnk_d7_ent_MsD_trnerr
,bnk_d8_ent_MsD_trnerr
,bnk_d9_ent_MsD_trnerr
,bnk_d10_ent_MsD_trnerr
,bnk_d11_ent_MsD_trnerr
,bnk_d12_ent_MsD_trnerr
,bnk_d13_ent_MsD_trnerr
,bnk_d14_ent_MsD_trnerr
,bnk_d15_ent_MsD_trnerr
,bnk_d16_ent_MsD_trnerr),3),
"Entropy Test Error"=round(c(bnk_d1_ent_MsD_tsterr
,bnk_d2_ent_MsD_tsterr
,bnk_d3_ent_MsD_tsterr
,bnk_d4_ent_MsD_tsterr
,bnk_d5_ent_MsD_tsterr
,bnk_d6_ent_MsD_tsterr
,bnk_d7_ent_MsD_tsterr
,bnk_d8_ent_MsD_tsterr
,bnk_d9_ent_MsD_tsterr
,bnk_d10_ent_MsD_tsterr
,bnk_d11_ent_MsD_tsterr
,bnk_d12_ent_MsD_tsterr
,bnk_d13_ent_MsD_tsterr
,bnk_d14_ent_MsD_tsterr
,bnk_d15_ent_MsD_tsterr
,bnk_d16_ent_MsD_tsterr),3),
"Majority Error Train Error"=c(bnk_d1_MJe_MsD_trnerr
,bnk_d2_MJe_MsD_trnerr
,bnk_d3_MJe_MsD_trnerr
,bnk_d4_MJe_MsD_trnerr
,bnk_d5_MJe_MsD_trnerr
,bnk_d6_MJe_MsD_trnerr
,bnk_d7_MJe_MsD_trnerr
,bnk_d8_MJe_MsD_trnerr
,bnk_d9_MJe_MsD_trnerr
,bnk_d10_MJe_MsD_trnerr
,bnk_d11_MJe_MsD_trnerr
,bnk_d12_MJe_MsD_trnerr
,bnk_d13_MJe_MsD_trnerr
,bnk_d14_MJe_MsD_trnerr
,bnk_d15_MJe_MsD_trnerr
,bnk_d16_MJe_MsD_trnerr),
"Majority Error Test Error"=round(c(bnk_d1_MJe_MsD_tsterr
,bnk_d2_MJe_MsD_tsterr
,bnk_d3_MJe_MsD_tsterr
,bnk_d4_MJe_MsD_tsterr
,bnk_d5_MJe_MsD_tsterr
,bnk_d6_MJe_MsD_tsterr
,bnk_d7_MJe_MsD_tsterr
,bnk_d8_MJe_MsD_tsterr
,bnk_d9_MJe_MsD_tsterr
,bnk_d10_MJe_MsD_tsterr
,bnk_d11_MJe_MsD_tsterr
,bnk_d12_MJe_MsD_tsterr
,bnk_d13_MJe_MsD_tsterr
,bnk_d14_MJe_MsD_tsterr
,bnk_d15_MJe_MsD_tsterr
,bnk_d16_MJe_MsD_tsterr),3),
"GINI Train Error"=round(c(bnk_d1_GIN_MsD_trnerr
,bnk_d2_GIN_MsD_trnerr
,bnk_d3_GIN_MsD_trnerr
,bnk_d4_GIN_MsD_trnerr
,bnk_d5_GIN_MsD_trnerr
,bnk_d6_GIN_MsD_trnerr
,bnk_d7_GIN_MsD_trnerr
,bnk_d8_GIN_MsD_trnerr
,bnk_d9_GIN_MsD_trnerr
,bnk_d10_GIN_MsD_trnerr
,bnk_d11_GIN_MsD_trnerr
,bnk_d12_GIN_MsD_trnerr
,bnk_d13_GIN_MsD_trnerr
,bnk_d14_GIN_MsD_trnerr
,bnk_d15_GIN_MsD_trnerr
,bnk_d16_GIN_MsD_trnerr),3),
"GINI Test Error"=round(c(bnk_d1_GIN_MsD_tsterr
,bnk_d2_GIN_MsD_tsterr
,bnk_d3_GIN_MsD_tsterr
,bnk_d4_GIN_MsD_tsterr
,bnk_d5_GIN_MsD_tsterr
,bnk_d6_GIN_MsD_tsterr
,bnk_d7_GIN_MsD_tsterr
,bnk_d8_GIN_MsD_tsterr
,bnk_d9_GIN_MsD_tsterr
,bnk_d10_GIN_MsD_tsterr
,bnk_d11_GIN_MsD_tsterr
,bnk_d12_GIN_MsD_tsterr
,bnk_d13_GIN_MsD_tsterr
,bnk_d14_GIN_MsD_tsterr
,bnk_d15_GIN_MsD_tsterr
,bnk_d16_GIN_MsD_tsterr),3))
row.names(Bnk_Err_Tbl_MsD) <- as.character(seq(1,16,1))
Bnk_Err_Tbl_MsD
|
cc905476775b6beac0b0091855a81264b36cd951
|
4582853950f0b804f12bc7a135ab581f972e14d5
|
/code/RS_pheno_model_1.R
|
cd1c9b68c297bc82358e8aa64087fbb87608e49a
|
[] |
no_license
|
katjakowalski/MA
|
2eae69e2e70af51be5ef2eafaf2f773e01ccc547
|
96e43199c4041e798601471c9d61a93e8cc468e9
|
refs/heads/master
| 2020-03-28T06:39:22.670034
| 2019-05-01T13:46:05
| 2019-05-01T13:46:05
| 147,850,556
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,689
|
r
|
RS_pheno_model_1.R
|
#### load packages ####
library(mgcv)
#library(tidyverse)
#library(ggplot2)
#library(reshape2)
library(dplyr)
library(tidyr)
library(lubridate)
library(zoo)
#### end ####
path_data <- "\\\\141.20.140.91/SAN_Projects/Spring/workspace/Katja/germany/data_USB/data"
path_results <- "\\\\141.20.140.91/SAN_Projects/Spring/workspace/Katja/germany/data_USB/results"
path_code <- "\\\\141.20.140.91/SAN_Projects/Spring/workspace/Katja/germany/data_USB/code"
data <- read.csv(header=TRUE, sep=",", file=file.path(path_data, "data_refl_cleared.csv"))
# data <- subset(data, dwd_stat != 379 & # climate data missing
# dwd_stat != 760 & # climate data missing
# dwd_stat != 1503 & # climate data missing
# dwd_stat != 2878 & # climate data missing
# dwd_stat != 3490 & # climate data missing
# dwd_stat != 4878 & # climate data missing
# dwd_stat != 5100 & # climate data missing
# dwd_stat != 5715 & # climate data missing
# dwd_stat != 4485 & # climate data missing
# dwd_stat != 1550 & # samples missing
# dwd_stat != 3679 & # sample missing
# dwd_stat != 7424) # sample missing
data_evi <- subset(data, data$evi < 1.1 & data$evi >= 0 & data$year == 2017)
data_ndvi <- subset(data, data$ndvi < 1.1 & data$ndvi >= 0 & data$year == 2017)
pheno_model <- function(plotid,
index,
doy,
year,
stat_id,
min_obs = 10){
data = data.frame(plotid,
index,
doy,
year,
stat_id)
l_samples <- length(unique(data$plotid))
nls_fit_result <- vector("list", l_samples)
sp_fit_result <- vector("list", l_samples)
k <- 0
for( p in unique(data$plotid)){
transition <- c()
b4_start <- c()
d = subset(data, data$plotid == p & data$year == "2017")
stat_id <- d$stat_id[1]
k <- k + 1
if(length(d$doy) >= min_obs){
d_tr <- subset(d, d$doy >= 75 & d$doy <= 250)
transition <- with(d_tr, doy[index == max(index)]) + 20
d <- subset(d, d$doy <= transition)
b4_start <- round(mean(d[which(d$index > median(d$index)), "doy"]), 0)
base_index <- mean(subset(d, d$doy <= 50)$index)
if (is.nan(base_index)){
base_index <- min(d$index)
}
d <- d[!d$doy <= 50,] # delete all rows < doy 50
df_base <- d[0,] # create empty df with column names
df_base[c(1:50), ] <- rep(NA, ncol(df_base)) # fill 50 rows with NA
df_base$index <- base_index
df_base$doy <- seq(1,50,1)
dat <- rbind(d, df_base)
#LOGISTIC MODEL
nls_fit <-
tryCatch(nls(index ~ b1 + (b2 / (1 + exp(-b3 * (doy - b4)))),
start = list(
b1 = min(index),
b2 = max(index),
b3 = 0.2,
b4 = b4_start),
data = dat),
error = function(e)
return(NA))
if (class(nls_fit) == "nls") {
dat$predict_nls <- predict(nls_fit)
mse_log <- mean(abs(dat$predict_nls - dat$index)^2)
nls_fit_result[[k]] <- as.data.frame(data.frame(t(coef(nls_fit)),
"plotid"=p,
"obs_error"= 0,
"fit_error"= 0,
"transition" = transition,
"observations" = length(d$doy),
"MSE_log" = mse_log,
"stat_id" = stat_id))
}
# if class NA:
else {
nls_fit_result[[k]] <- as.data.frame(data.frame("b1" = NA,
"b2" = NA,
"b3" = NA,
"b4" =NA,
"plotid" = p,
"obs_error" = 0,
"fit_error" = 1,
"transition" = transition,
"observations" = length(d$doy),
"MSE_log" = NA,
"stat_id" = stat_id))
}
#GAM
fit_sp <- tryCatch(gam(index ~ s(doy, sp = 0.005),method="REML", data = dat), error = function(e) return(NA))
# approximation of 1st derivative using finite differences
if(class(fit_sp) == "gam"){
dat$predict_gam <- predict(fit_sp)
mse_gam <- mean(abs(dat$predict_gam - dat$index)^2)
newDF <- with(dat, data.frame(doy = seq(0, transition, 1)))
B <- predict(fit_sp, newDF, type = "response", se.fit = TRUE)
eps <- 1e-7
X0 <- predict(fit_sp, newDF, type = 'lpmatrix')
newDFeps_p <- newDF + eps
X1 <- predict(fit_sp, newDFeps_p, type = 'lpmatrix')
Xp <- (X0 - X1) / eps
fd_d1 <- Xp %*% coef(fit_sp)
sp_doy <- which.min(fd_d1)
sp_fit_result[[k]] <- as.data.frame(data.frame("sp" = sp_doy,
"plotid"=p,
"obs_error_sp" = 0,
"fit_error_sp" = 0,
"transition" = transition,
"observations" = length(d$doy),
"MSE_gam" = mse_gam,
"stat_id" = stat_id))
fd_d1 = NULL
fit_sp = NULL
}
# if class NA:
else {
sp_fit_result[[k]] <- as.data.frame(data.frame("sp" = NA,
"plotid" = p,
"obs_error_sp" = 0,
"fit_error_sp"= 1,
"transition" = transition,
"observations" = length(d$doy),
"MSE_gam" = NA,
"stat_id" = stat_id))
}
}
# if observations < 10:
else {
sp_fit_result[[k]] <- as.data.frame(data.frame("sp" = NA,
"plotid"= p,
"obs_error_sp" = 1,
"fit_error_sp" = 0,
"transition" = 0,
"observations" = length(d$doy),
"MSE_gam" = NA,
"stat_id" = stat_id))
nls_fit_result [[k]] <- as.data.frame(data.frame("b1" = NA,
"b2" = NA,
"b3" = NA,
"b4" = NA,
"plotid"=p,
"obs_error" = 1,
"fit_error" = 0,
"transition" = 0,
"observations" = length(d$doy),
"MSE_log" = NA,
"stat_id" = stat_id))
}
}
return(list(nls_fit_result, sp_fit_result))
}
# apply function using EVI and NDVI data (~ 40 min. each)
ptm <- proc.time()
pheno_result_evi <- pheno_model(data_evi$plotid, data_evi$evi, data_evi$doy, data_evi$year, data_evi$dwd_stat)
(proc.time() - ptm) / 60
ptm <- proc.time()
pheno_result_ndvi <- pheno_model(data_ndvi$plotid, data_ndvi$ndvi, data_ndvi$doy, data_ndvi$year, data_ndvi$dwd_stat)
(proc.time() - ptm) / 60
res_nls_evi <- data.frame(do.call(rbind, pheno_result_evi[[1]]))
res_spl_evi <- data.frame(do.call(rbind, pheno_result_evi[[2]]))
results_evi <- merge(res_spl_evi[, c(1:8)], res_nls_evi[, c(4,5,7,10)], by="plotid")
res_nls_ndvi <- data.frame(do.call(rbind, pheno_result_ndvi[[1]]))
res_spl_ndvi <- data.frame(do.call(rbind, pheno_result_ndvi[[2]]))
results_ndvi <- merge(res_spl_ndvi[, c(1:8)], res_nls_ndvi[, c(4,5,7,10)], by="plotid")
# model differences (sample)
results_evi$diff_px <- results_evi$sp - results_evi$b4
results_ndvi$diff_px <- results_ndvi$sp - results_ndvi$b4
results_evi <- results_evi %>%
rename("LOG_EVI" = "b4", "GAM_EVI" = "sp",
"MSE_GAM_EVI" ="MSE_gam", "MSE_LOG_EVI"="MSE_log")
results_ndvi <- results_ndvi %>%
rename("LOG_NDVI" ="b4", "GAM_NDVI"="sp",
"MSE_GAM_NDVI"="MSE_gam", "MSE_LOG_NDVI"="MSE_log")
# write to disk (sample)
write.csv(results_evi, file = file.path(path_results, "results_px_evi.csv"), row.names = FALSE)
write.csv(results_ndvi, file = file.path(path_results, "results_px_ndvi.csv"), row.names = FALSE)
# sample
results_px <- merge(results_ndvi[, c("plotid","LOG_NDVI","GAM_NDVI","observations", "stat_id","MSE_GAM_NDVI","MSE_LOG_NDVI")],
results_evi[, c("plotid","LOG_EVI","GAM_EVI", "MSE_GAM_EVI","MSE_LOG_EVI")], by="plotid")
results_px$GAM_diff <- results_px$GAM_NDVI- results_px$GAM_EVI
results_px$LOG_diff <- results_px$LOG_NDVI - results_px$LOG_EVI
results_px$NDVI_diff <- results_px$GAM_NDVI- results_px$LOG_NDVI
results_px$EVI_diff <- results_px$GAM_EVI - results_px$LOG_EVI
MSE_px <- results_px[, c("MSE_LOG_NDVI", "MSE_LOG_EVI", "MSE_GAM_NDVI", "MSE_GAM_EVI")]
colnames(MSE_px) <- c("LOG_NDVI", "LOG_EVI", "GAM_NDVI", "GAM_EVI")
# Aggregation to plots
mean_evi <- results_evi %>%
group_by(stat_id) %>%
summarise_all(funs(mean), na.rm=TRUE)
mean_ndvi <- results_ndvi %>%
group_by(stat_id) %>%
summarise_all(funs(mean), na.rm=TRUE)
# model differnces (plot)
mean_evi$diff_station <- mean_evi$GAM_EVI - mean_evi$LOG_EVI
mean_ndvi$diff_station <- mean_ndvi$GAM_NDVI - mean_ndvi$LOG_NDVI
# add X and Y coordinates
dwd_stations <- read.csv(file.path(path_data,"20190120_stations_dwd.csv"), header=TRUE)
mean_evi <- merge(mean_evi, dwd_stations[, c("X", "Y","Stations_i")], by.x="stat_id", by.y="Stations_i", all.x=TRUE)
mean_ndvi <- merge(mean_ndvi, dwd_stations[, c("X", "Y", "Stations_i")], by.x="stat_id", by.y="Stations_i", all.x=TRUE)
# write to disk
write.csv(mean_evi, file=file.path(path_results, "results_plot_evi.csv"),row.names = FALSE )
write.csv(mean_ndvi, file=file.path(path_results, "results_plot_ndvi.csv"),row.names = FALSE )
|
b4a3d4e1a97b4f6149ebbbcb4a1dc2b9c5f629e1
|
0cb55c0b8e87681d1d7e372794b0c4ef3acd15ed
|
/GISforMaddie.R
|
a048f783082ae67f1e89a973509b3184efef3127
|
[] |
no_license
|
jsilevine/MpalaWater
|
2d0ac7543f2968d8916f2682183adda8355dc87c
|
f8984274b25f51bed37bd260856fff4cb4157f25
|
refs/heads/master
| 2022-11-15T12:21:48.251595
| 2020-07-08T19:07:35
| 2020-07-08T19:07:35
| 257,985,948
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,703
|
r
|
GISforMaddie.R
|
library(ggplot2)
library(rgdal)
library(rnaturalearth)
library(sf)
library(raster)
library(ggplot2)
library(stars)
library(maptools)
library(data.table)
library(birk)
library(plyr)
##INSTRUCTIONS:
#Move to line 218 to Start, first part
## read data
# data <- read.csv("sightings.csv")
# data <- data[data$Y > 0,] ## get rid of funky 0s, not sure what thats about
buffer <- st_read("BufferedMpalaPolygon.shp") #mpala border with added buffer (.2 degrees)
## load in a basemap of Kenya
countries <- ne_countries(scale = 110)
Kenya <- countries[countries$name == "Kenya",]
## define coordinates
buffercoords <- buffer #already in a shp crs format
##Transformations
bufferobs <- st_as_sf(buffercoords)
bufferobs <- st_transform(bufferobs, 32637)
# coords <- data
# coordinates(coords) <- c(X ="X", Y="Y") ## convert to spatialPoints object
# proj4string(coords) <- CRS("+init=epsg:21097") ## define the projection: which is UTM 37N (the corresponding epsg is 21097)
# ## transform coords to WGS84 to plot with basemap
# coords <- spTransform(coords, proj4string(Kenya))
# obs <- st_as_sf(coords) ## convert to sf object
# obs <- st_transform(obs, 32637) ## transorm to UTM so we can work in meters instead of DDs
# st_bbox(obs)
## plot basemap and coordinates
plot(Kenya)
plot(bufferobs, add = TRUE)
## define function to create a regular grid
create_grid <- function(buffer, grid.size = 100, random = FALSE, offset.mean = NA, offset.sd = NA, what="polygons") {
## error checking
if (random == TRUE & any(is.na(c(offset.mean, offset.sd)))) {
print("Error: must supply mean and sd for random offset")
return()
}
## if random offset desired, create offset
if (random == TRUE) {
lower.left <- st_bbox(buffer)[c("xmin", "ymin")] ## get lower left extent of obs
offset <- lower.left - abs(rnorm(2, offset.mean, offset.sd))
}
else offset <- st_bbox(buffer)[c("xmin", "ymin")]
## create grid object
grid <- st_make_grid(x = buffer,
cellsize = grid.size,
offset = offset,
what = what, ## want output to be spatial object
square = TRUE ## could do hexagons ?
)
grid <- st_as_sf(grid)
return(grid)
}
## --------- Processing ---------- ##
## Okay now to extract the observations based on the data
## we want one row per observation so we should do a left join of the grid to the observations.
## okay so next step is probably to write this all into a function that just takes the data locations
## as an argument and spits out a data frame like this, perhaps trimmed down to the relevant points
## Also need to add in the rest of the data!
##NEXT STEP
#function that takes in obseration data and runs against 3 rasters, creates new distances csv file.
#Merge New Data
existingdata <- read.csv("sightings.csv")
existingdata <- existingdata[exisitingdata$Y > 0,] ## get rid of funky 0s, not sure what thats about
newdata <- read.csv("/Users/Maddie/Documents/GradSchool/Spring2020/WildlifeSightings/RawData/May/2_1_2__All_live_wildlife_sightings_000010.csv")
newdata <- newdata[newdata$Y > 0,] ## get rid of funky 0s, not sure what thats about
mergedobs <- rbind.fill(existingdata,newdata)
#input full observation data here (not nececssary above?)
riverraster <- raster("/Users/Maddie/Documents/GradSchool/Spring2020/WildlifeSightings/MpalaWater/Rasters/riverraster.tif")
roadraster <- raster("/Users/Maddie/Documents/GradSchool/Spring2020/WildlifeSightings/MpalaWater/Rasters/roadraster.tif")
waterraster <- raster("/Users/Maddie/Documents/GradSchool/Spring2020/WildlifeSightings/MpalaWater/Rasters/waterraster.tif")
sample_grid <- create_grid(bufferobs, grid.size = 400, random = TRUE, offset.mean = 10, offset.sd = 5, what="polygons")
## compared to the (264088.2 33723.97) to dam 1, the distance is 753.8003 using the grid method and it's 638.8 using qgis
##with the simple distance measuring tool is 638.96 to dam 1. zebra is in grid 533
#grid 533 = 263891 ymin: 33409.79 xmax: 264291 ymax: 33809.79 so four points =
## corners of polygon: 8676403, 8676408, 8677268, 999.0729
#(264250.3 34342)
#Overall function to find distances between river, road, and water on grid
find_distances <- function(observationdata, riverraster, roadraster, waterraster, sample_grid) {
observationdata <- observationdata[observationdata$Y > 0,] ## get rid of funky 0s, not sure what thats about
## define coordinates
coords <- observationdata
coordinates(coords) <- c(X ="X", Y="Y") ## convert to spatialPoints object
proj4string(coords) <- CRS("+init=epsg:21097")
obs <- st_as_sf(coords) ## convert to sf object
obs <- st_transform(obs, 32637) ## transorm to UTM so we can work in meters instead of DDs
sample_grid$gridID = seq(1, nrow(sample_grid), by = 1)
grid_data <- as.data.frame(sample_grid)
joined.data <- st_join(obs, sample_grid, join = st_within, left = TRUE)
joined.data <- as.data.frame(joined.data)
merged.data <- merge(joined.data, grid_data, by="gridID")
point_names <- subset(merged.data, select=geometry)
merged.data$geometry = NULL
colnames(merged.data)[which(names(merged.data) == "x")] <- "geometry"
observation_polygons <- st_as_sf(merged.data)
observation_polygons$obs.points = point_names
obsriverdistance <- extract(riverraster, observation_polygons, method='simple', fun=mean)
obsroaddistance <- extract(roadraster, observation_polygons, method='simple', fun=mean)
obswaterdistance <- extract(waterraster, observation_polygons, method='simple', fun=mean)
observation_polygons_df <- as.data.frame(observation_polygons)
#add column to dataframe with water distance
observation_polygons_df$river_distance=obsriverdistance
observation_polygons_df$road_distance=obsroaddistance
observation_polygons_df$water_distance=obswaterdistance
return(observation_polygons_df)
}
river_road_water_distances <- find_distances(mergedobs, riverraster, roadraster, waterraster, sample_grid) #returns as dataframe
write.csv(river_road_water_distances, "river_road_water.csv")
head(river_road_water_distances)
#Adding Dam IDs
#st_join(sample_grid_object, dam points, join=st_nearest_feature, left=TRUE)
#gives you nearest dam to every grid id. gives you all grid ids and feature of dam data set
#merge to bigger dataset with grid based on grid id
#Create a separate grid for dam IDs that allows us to get feautres of dam data set
dam_grid <- create_grid(bufferobs, grid.size = 400, random = TRUE, offset.mean = 10, offset.sd = 5, what = "centers")
dam_grid$gridID = seq(1, nrow(dam_grid), by = 1)
dam_grid <- st_transform(dam_grid, "+init=epsg:21097 +proj=utm +zone=37 +ellps=clrk80 +towgs84=-160,-6,-302,0,0,0,0 +units=m +no_defs")
water_sf <- st_as_sf(watercoords)
nearest_dam <- st_join(dam_grid, water_sf, join=st_nearest_feature, left=TRUE)
#Join distances added to dataframe above with the nearest dam data by gridID
river_road_waterID <- merge(river_road_water_distances, nearest_dam, by="gridID", all.x=TRUE)
#remove all geometries so we can transform into csv
river_road_waterID <- river_road_waterID[-c(23,24,44)]
##Adding water level info
damlevelsID <- read.csv("DamLevels_ID.csv")
river_road_waterID <- read.csv("river_road_water_dataID.csv")
#convert patrol dates to actual dates in R
river_road_waterID$Patrol.End.Date <- as.Date(river_road_waterID$Patrol.End.Date, format= "%d-%h-%y")
river_road_waterID$Patrol.Start.Date <- as.Date(river_road_waterID$Patrol.Start.Date, format= "%d-%h-%y")
colnames(river_road_waterID)[which(names(river_road_waterID) == "Number")] <- "DamID"
#Cleanup / Rename damlevelsID dataframe
names(damlevelsID)[1] <- "DamID"
names(damlevelsID)[3] <- "2020-01-01"
names(damlevelsID)[4] <- "2020-02-01"
names(damlevelsID)[5] <- "2020-03-01"
#Add all water level information to dataframe
river_road_waterID_levels <- merge(river_road_waterID, damlevelsID, by="DamID", all.x=TRUE)
#create dates as a separate vector to compare
datevector <- as.Date(names(damlevelsID)[3:5])
#function to find water date closest to sighting date
findclosestDate <- function(row) {
# row = [... '2020-01-01', ... 2]
# index_of_patrol_start_date = river_road_waterID_levels.getColumnIndex('Patrol Start Date')
# patrol_start_date = row[index_of_patrol_start_date]
# row {... 'Period Start Date': '2020-01-01', ... '2020-01-01': 2}
index <- which.closest(datevector, row)
date <- datevector[index]
column_name <- format(date, "%Y-%m-%d")
return(column_name)
}
#Added closeest Date to the dataframe
river_road_waterID_levels$closestDate <- sapply(river_road_waterID_levels$Patrol.Start.Date, findclosestDate)
#Determine the index of the date
indexes <- match(river_road_waterID_levels$closestDate, names(river_road_waterID_levels))
#Extract the value from that index
value <- function(river_road_waterID_levels) {
z <- c()
for (i in 1:length(indexes))
z <- c(z,river_road_waterID_levels[i,(indexes)[i]])
return(z)
}
#Add values to data frame
river_road_waterID_levels <- cbind(river_road_waterID_levels, value(river_road_waterID_levels))
#rename column of water levels
colnames(river_road_waterID_levels)[which(names(river_road_waterID_levels) == "value(river_road_waterID_levels)")] <- "closestValue"
write.csv(river_road_waterID_levels, "CompleteDataFrame-May.csv")
##Graphing Distances for Dan
## date = closestDate(patrolStartDate, three_dates)
## return row[date]
elliedata <- subset(nonsfjoined, Species==" Elephant",
+ select=Patrol.ID:road.distance)
ggplot(nonsfjoined, aes(river.distance, water.distance, colour = Species)) + geom_point()
p <- ggplot(river_road_waterID_levels, aes(closestValue, water_distance, colour = Species)) + geom_point()
p + facet_wrap(vars(Species))
##Testing Distances
#
# test <- st_distance(obs, waterobs, by_element = FALSE) # should get difference between each observation and each dam
# testdf <- as.data.frame(test)
# minimums <- apply(testdf, 1, min)
# minimums <- as.data.frame(minimums)
# minimums$index <- apply(testdf, 1, which.min)
#
# grid533 <- subset(sample_grid, gridID == 533)
# grid200 <- subset(sample_grid, gridID == 200)
# grid100 <- subset(sample_grid, gridID == 100)
# three_grids <- subset(sample_grid, gridID == 533 | gridID == 200| gridID == 100)
# three_grids <- as(st_geometry(three_grids), "Spatial")
# grid533 <- as(st_geometry(grid533), "Spatial")
# grid100 <- as(st_geometry(grid100), "Spatial")
# grid200 <- as(st_geometry(grid200), "Spatial")
# grid533 <- as(grid533, "SpatialPolygonsDataFrame")
# grid100 <- as(grid100, "SpatialPolygonsDataFrame")
# grid200 <- as(grid200, "SpatialPolygonsDataFrame")
# three_grids <- as(three_grids, "SpatialPolygonsDataFrame")
#
# writeOGR(obj=grid533, dsn="tempdir", layer="grid533", driver="ESRI Shapefile") # this is in geographical projection
# writeOGR(obj=grid100, dsn="tempdir", layer="grid100", driver="ESRI Shapefile") # this is in geographical projection
# writeOGR(obj=grid200, dsn="tempdir", layer="grid200", driver="ESRI Shapefile") # this is in geographical projection
#
# gridd533distance <- extract(waterraster, grid533, method='simple', fun=mean)
# #min = 4239
# #max = 4702
# #range is around 462 meters. The distances match up with qgis woohoo!
# gridd533distance <- extract(waterraster, grid533, method='simple')
# ggridd100distance <- extract(waterraster, grid100, method='simple', fun=mean)
# gridd200distance <- extract(waterraster, grid200, method='simple', fun=mean)
# threegrid_distance <- extract(waterraster, three_grids, method='simple', fun=mean)
# threegrid_distance
#
#
# ##Water dam
|
8b4b114c0c4c7771b5f20a2eeed3e238b8033aef
|
38eb5cf2965f0b2319942cd04c0a70c59d8825bc
|
/mlr_regression/mn_regression_swc.R
|
283fafcb978e473bf95bd0daa69a9c1c7f6df267
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
lmanubens/BigNeuron
|
769f1cb5451691a820c42c2dcccee649f5d002be
|
fc9d2052589f03d45ff530f56beebb1f1b18b77a
|
refs/heads/main
| 2023-04-18T18:39:54.181296
| 2023-01-21T00:46:46
| 2023-01-21T00:46:46
| 362,476,584
| 7
| 3
| null | 2022-05-12T01:22:09
| 2021-04-28T13:26:16
|
R
|
UTF-8
|
R
| false
| false
| 5,534
|
r
|
mn_regression_swc.R
|
# require(foreign)
# require(nnet)
# require(ggplot2)
# require(reshape2)
# # https://stats.idre.ucla.edu/r/dae/multinomial-logistic-regression/
# ml_b <- read.dta("https://stats.idre.ucla.edu/stat/data/hsbdemo.dta")
require(tidyr)
plotCM <- function(cm){
cmdf <- as.data.frame(cm[["table"]])
cmdf[["color"]] <- ifelse(cmdf[[1]] == cmdf[[2]], "green", "red")
alluvial::alluvial(cmdf[,1:2]
, freq = cmdf$Freq
, col = cmdf[["color"]]
, alpha = 0.5
, hide = cmdf$Freq == 0
)
}
load('subsetdata.Rdata')
data <- my_data
load('groupsdf.Rdata')
groupsdf$algorithm[groupsdf$algorithm=="app2new1"] <- "app2"
groupsdf$algorithm[groupsdf$algorithm=="app2new2"] <- "app2"
groupsdf$algorithm[groupsdf$algorithm=="app2new3"] <- "app2"
groupsdf$algorithm[groupsdf$algorithm=="Advantra_updated"] <- "Advantra"
groupsdf$algorithm[groupsdf$algorithm=="neutube_updated"] <- "neutube"
groupsdf$algorithm[groupsdf$algorithm=="pyzh_updated"] <- "pyzh"
groupsdf$algorithm[groupsdf$algorithm=="LCMboost_updated"] <- "LCMboost"
groupsdf$algorithm[groupsdf$algorithm=="LCMboost_3"] <- "LCMboost"
groupsdf$algorithm[groupsdf$algorithm=="fastmarching_spanningtree_updated"] <- "fastmarching_spanningtree"
groupsdf$algorithm[groupsdf$algorithm=="axis_analyzer_updated"] <- "axis_analyzer"
groupsdf$algorithm[groupsdf$algorithm=="NeuronChaser_updated"] <- "NeuronChaser"
groupsdf$algorithm[groupsdf$algorithm=="meanshift_updated"] <- "meanshift"
groupsdf$algorithm[groupsdf$algorithm=="NeuroGPSTree_updated"] <- "NeuroGPSTree"
groupsdf$algorithm[groupsdf$algorithm=="ENT_updated"] <- "EnsembleNeuronTracerBasic"
ml <- cbind(data,groupsdf)
ml <- ml[,c(1:28,44:61)]
# ml <- melt(ml,id=44:46)
ml$ids <- sapply(strsplit(as.character(groupsdf$paths),'/'), "[", 8)
ml <- ml[ml$algorithm!="Annotated",]
##
ml$bestalg <- ml$algorithm
for(i in unique(ml$ids)){
# bestalg <- ml[ml$`average of bi-directional entire-structure-averages` == min(ml[ml$ids==i,]$`average of bi-directional entire-structure-averages`),]$algorithm
bestalg <- ml[ml$`average.of.bi.directional.entire.structure.averages` == min(ml[ml$ids==i,]$`average.of.bi.directional.entire.structure.averages`),]$algorithm
ml[ml$ids == i,]$bestalg <- bestalg[1]
}
# ml <- ml[ml$algorithm==ml$bestalg,]
# ml$algorithm <- NULL
##
# levels(ml$variable)[levels(ml$variable)=='average of bi-directional entire-structure-averages'] <- 'av_bid_ent_str_av'
# names(ml)[names(ml) == 'average of bi-directional entire-structure-averages'] <- 'av_bid_ent_str_av'
names(ml)[names(ml) == 'average.of.bi.directional.entire.structure.averages'] <- 'av_bid_ent_str_av'
with(ml, table(variable, algorithm))
with(ml, do.call(rbind, tapply(av_bid_ent_str_av, algorithm, function(x) c(M = mean(x), SD = sd(x)))))
# https://dataaspirant.com/2017/02/03/decision-tree-classifier-implementation-in-r/
library(caret)
library(rpart.plot)
# Normalize dataset
# ml[,1:43] <- scale(ml[,1:43])
ml <- ml[complete.cases(ml)==T,]
anyNA(ml)
ml$group <- NULL
ml$dataset <- NULL
ml$ids <- NULL
# ml$`entire-structure-average (from neuron 1 to 2)`<- NULL
# ml$`entire-structure-average (from neuron 2 to 1)`<- NULL
# ml$av_bid_ent_str_av <- NULL
# ml$`different-structure-average`<- NULL
# ml$`percent of different-structure (from neuron 1 to 2)`<- NULL
# ml$`percent of different-structure (from neuron 2 to 1)`<- NULL
# ml$`percent of different-structure`<- NULL
ml$`entire.structure.average..from.neuron.1.to.2.`<- NULL
ml$`entire.structure.average..from.neuron.2.to.1.`<- NULL
ml$av_bid_ent_str_av <- NULL
ml$`different.structure.average`<- NULL
ml$`percent.of.different.structure..from.neuron.1.to.2.`<- NULL
ml$`percent.of.different.structure..from.neuron.2.to.1.`<- NULL
ml$`percent.of.different.structure`<- NULL
ml <- droplevels(ml)
# ml[,1:43] <- scale(ml[,1:43])
# Create training dataset
# set.seed(3033)
intrain <- createDataPartition(y = ml$bestalg, p= 0.7, list = FALSE)
training <- ml[intrain,]
testing <- ml[-intrain,]
training <- droplevels(training)
#check dimensions of train & test set
dim(training); dim(testing);
# Training
trctrl <- trainControl(method = "repeatedcv", number = 20, repeats = 5)
trctrl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 5,
summaryFunction = multiClassSummary,
classProbs = TRUE)
# preProcessInTrain<-c("center", "scale")
# metric_used<-"algorithm"
# set.seed(3333)
dtree_fit <- train(bestalg ~., data = training, method = "rpart",
parms = list(split = "gini"),
# trControl=trainControl(method="none"),
trControl=trctrl,
# metric=metric_used,
# preProc = preProcessInTrain,
tuneLength = 20)
# dtree_fit <- train(bestalg ~., data = training, method = "treebag")
# dtree_fit <- train(bestalg ~., data = training, method = "LogitBoost")# 85% Acc
# Plot decision tree
prp(dtree_fit$finalModel, box.palette = "Blues", tweak = 1.2)
library(rattle)
fancyRpartPlot(dtree_fit$finalModel)
# Predict
predict(dtree_fit, newdata = testing[1,])
test_pred <- predict(dtree_fit, newdata = testing)
confusionMatrix(test_pred, testing$bestalg) %>% plotCM() #check accuracy
confusionMatrix(test_pred, testing$bestalg)
|
d92b834a4c2891013ad05cd9faa622f982f3274b
|
057cb37817ffeec47fffecdabb59fc1f060884e8
|
/old/experiment_real_data_missing_data_3/import.data.pf.R
|
c9af0f520d6ed13f3e768087f9b765d14b1434ff
|
[] |
no_license
|
BIMIB-DISCo/MST
|
7e1a89e430ed0c16f42a9caecafb0e1f426189fc
|
d1db82d70c6df9c19ab86153e3b7e696d1f7588b
|
refs/heads/master
| 2021-01-13T06:56:36.494870
| 2018-12-09T22:22:08
| 2018-12-09T22:22:08
| 54,653,989
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 920
|
r
|
import.data.pf.R
|
# load the required R packages
library(ape)
library(TRONCO)
# structure to save all the results
data_paper1 = list()
# read the data and format them
data_1 = read.table(file=paste0(getwd(),"/alterations.txt"),header=TRUE,sep="\t",check.names=FALSE,stringsAsFactors=FALSE)
#rownames(data_1) = data_1[,1]
#data_1 = data_1[,-1]
for (i in 1:nrow(data_1)) {
for(j in 1:ncol(data_1)) {
if(data_1[i,j]=="-") {
data_1[i,j] = NA
}
else if(data_1[i,j]==2) {
data_1[i,j] = 1
}
}
}
data_1 = as.matrix(data_1)
original.data.paper3 = apply(data_1, 2, as.numeric)
rownames(original.data.paper3) = 1:nrow(original.data.paper3)
colnames(original.data.paper3) = paste0('c', 1:ncol(original.data.paper3))
rownames(original.data.paper3) = paste0('s', 1:nrow(original.data.paper3))
stree = nj(dist.gene(original.data.paper3))
plot(stree)
dev.copy2pdf(file = 'data_3.pdf')
|
82c40864f911f784e34d5c4e6962b119d4f8df3b
|
1757fb5db6b90cfb4a09e73cf1178fdbe851bf3d
|
/script_prep_recursive.R
|
0357e5d2d8a530754aa89d522c8ec3571c3df873
|
[] |
no_license
|
gvschweinitz/BLS_19_Does-Machine-Learning-Help-Us-Predict-Banking-Crises
|
b03a2ea494926033fed547d1b0e5a11f80696e8f
|
578a204b6f4fade8bf05793535607b68a463ea3a
|
refs/heads/master
| 2023-01-21T10:03:37.193918
| 2020-12-01T09:28:38
| 2020-12-01T09:28:38
| 313,629,287
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,220
|
r
|
script_prep_recursive.R
|
####################################
# Setting some names
datasets.eval <- "data.db.recursive.eval"
resnames <- "res.recursive"
posvecs.eval <- "pos.eval.recursive" # posvecs.eval is used to select from resnames (in script_tables)
resnames.bs <- "res.bs.recursive"
##################################
# Bootstrap settings
do.bootstrap <- TRUE
R.BS <- 500 # number of bootstrap draws (500)
blocklength <- 12 # blocklength for autocorrelation
sample.countries <- FALSE # TRUE if countries are to be randomly excluded, FALSE otherwise (our preference: FALSE)
min_cr <- 5 # minimum number of pre-crisis periods in the bootstrap
alphavec <- c(0.05,0.95) # quantiles to be plotted as confidence bands c(0.05,0.95) c(0.1,0.9)
##################################
# parameter settings
parameters$optimizationMode <- FALSE
parameters$paramtable <- read.csv(paramtable_file,row.names="method")
for (col in grep("_val",colnames(parameters$paramtable))){
parameters$paramtable[,col] <- as.numeric(as.character(parameters$paramtable[,col]))
}
for (col in grep("_name",colnames(parameters$paramtable))){
parameters$paramtable[,col] <- as.character(parameters$paramtable[,col])
}
|
28717ab29152a359909941bc0067fc0a34f0ebca
|
7f61b86168c2cf799a8bb34bb0e889909f72d302
|
/onlyr.r
|
68f74738025cf1430014d7dee765c37924d2db22
|
[] |
no_license
|
alex-mccall/NER
|
b4debcc66971a94a5872e3bfa38f097d15bc5b2a
|
fcc089e7f5869e811087d4d40911e7caf2bea7c8
|
refs/heads/master
| 2023-08-03T21:57:20.677670
| 2021-09-22T09:50:38
| 2021-09-22T10:27:39
| 376,233,380
| 0
| 0
| null | 2021-08-16T01:15:05
| 2021-06-12T07:53:26
|
R
|
UTF-8
|
R
| false
| false
| 2,013
|
r
|
onlyr.r
|
library(crfsuite)
x <- ner_download_modeldata("conll2002-nl")
subset(x, doc_id == 100)
library(data.table)
x <- as.data.table(x)
x <- x[, pos_previous := shift(pos, n = 1, type = "lag"), by = list(doc_id)]
x <- x[, pos_next := shift(pos, n = 1, type = "lead"), by = list(doc_id)]
x <- x[, token_previous := shift(token, n = 1, type = "lag"), by = list(doc_id)]
x <- x[, token_next := shift(token, n = 1, type = "lead"), by = list(doc_id)]
x <- x[, pos_previous := txt_sprintf("pos[w-1]=%s", pos_previous), by = list(doc_id)]
x <- x[, pos_next := txt_sprintf("pos[w+1]=%s", pos_next), by = list(doc_id)]
x <- x[, token_previous := txt_sprintf("token[w-1]=%s", token_previous), by = list(doc_id)]
x <- x[, token_next := txt_sprintf("token[w-1]=%s", token_next), by = list(doc_id)]
subset(x, doc_id == 100, select = c("doc_id", "token", "token_previous", "token_next"))
x <- as.data.frame(x)
crf_train <- subset(x, data == "ned.train")
crf_test <- subset(x, data == "testa")
model <- crf(y = crf_train$label,
x = crf_train[, c("pos", "pos_previous", "pos_next",
"token", "token_previous", "token_next")],
group = crf_train$doc_id,
method = "lbfgs", file = "tagger.crfsuite",
options = list(max_iterations = 25, feature.minfreq = 5, c1 = 0, c2 = 1))
model
stats <- summary(model)
plot(stats$iterations$loss, pch = 20, type = "b",
main = "Loss evolution", xlab = "Iteration", ylab = "Loss")
scores <- predict(model,
newdata = crf_test[, c("pos", "pos_previous", "pos_next",
"token", "token_previous", "token_next")],
group = crf_test$doc_id)
crf_test$entity <- scores$label
table(crf_test$entity, crf_test$label)
install.packages("shiny")
install.packages("flexdashboard")
install.packages("DT")
install.packages("writexl")
rmarkdown::run(file = system.file(package = "crfsuite", "app", "annotation.Rmd"))
|
49326bd26389f2985b09546ff5ae4a8b07eebfb6
|
81d75f0a026326a6a504c9acee33ca49029765d7
|
/R/MultiscaleAnalysis/man/trim.trailing.Rd
|
de8d33985097961047719104b67889459462d85b
|
[] |
no_license
|
jamiepg1/multiscale-galactose
|
669b3068850ebdf324a90af3c35a01874dcfea16
|
05b0a7b3a41e5870c23fc1c9711ec6d46126d6a2
|
refs/heads/master
| 2018-05-30T08:02:58.651778
| 2014-08-06T15:26:09
| 2014-08-06T15:26:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 303
|
rd
|
trim.trailing.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{trim.trailing}
\alias{trim.trailing}
\title{Returns string w/o trailing whitespace}
\usage{
trim.trailing(x)
}
\arguments{
\item{x}{string to strip whitspaces}
}
\value{
stripped string
}
\description{
Returns string w/o trailing whitespace
}
|
640452439366ef8a815c23260df98373b0de4710
|
dbf026742bfa0213d8fde73647e24e6a940d4ba4
|
/week2.R
|
072eadf9f2f5f40810bcd7c14d0da2640a767f66
|
[] |
no_license
|
cjf4/Summer-Bridge-Assignments
|
549cb07d73620b29c6518026ad40a4f231d19a75
|
0fb5657b8bfc8f477f399eb49e61940f6c24cfe8
|
refs/heads/master
| 2021-03-12T20:06:23.046012
| 2015-09-01T01:45:00
| 2015-09-01T01:45:00
| 38,578,341
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 394
|
r
|
week2.R
|
fenton_factorial <- function(x) {
factorialized <- 1
if (x == 0) {
return(1)
} else if ( x < 0 || round(x) != x) {
return("Factorials must be non negative integers")
}
for (i in 1:x) {
factorialized <- i * factorialized
}
return(factorialized)
}
fenton_choose <- function(n,r) {
return(fenton_factorial(n) / (fenton_factorial(n - r) * fenton_factorial(r)))
}
|
f1aa60bbb839ef6f0c0b2c66efd998c6be3eacf2
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/Rankcluster/man/unfrequence.Rd
|
465cfe3b90dc8362a477a51e18085a10eca1bf93
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 660
|
rd
|
unfrequence.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RankFunctions.R
\name{unfrequence}
\alias{unfrequence}
\title{Convert data}
\usage{
unfrequence(data)
}
\arguments{
\item{data}{a matrix containing rankings and observation frequency.}
}
\value{
a matrix containing all the rankings.
}
\description{
This function takes in input a matrix in which the m first columns are the different observed ranks and the last column contains the observation frequency,
and returns a matrix containing all the ranks (ranks with frequency>1 are repeated).
}
\examples{
data(quiz)
Y <- unfrequence(quiz$frequency)
Y
}
\seealso{
\link{frequence}
}
|
9add266af52bf6fcf3c0488ac55068e753f58aa6
|
7635df9f67bc61d1ead8da6b8d6a876f4a79340d
|
/old_scripts/05_plotting/counting.R
|
3d89bd5fdd605de5f9df3f86961b4f16ae5c890e
|
[] |
no_license
|
Tubbz-alt/RNAseqMuscle
|
ea66d4196889aed912a1dcc94f972e1f9c8e0817
|
d56b152e2db30e778a14cee7830a86545f1ae489
|
refs/heads/master
| 2022-02-14T23:22:34.280891
| 2019-07-11T23:10:42
| 2019-07-11T23:10:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,725
|
r
|
counting.R
|
# Need to load the jaccard.R and logseq.R functions before running this function.
# --- Change these conditions ---
data_file = 'C:/Users/sarah/OneDrive/Documents/2018/04_2018_Fall/RNAseq_analysis/2018_12_12/soleus-data-combined.txt'
# directory to save results - no trailing forward slash!
save_path = 'C:/Users/sarah/OneDrive/Documents/2018/04_2018_Fall/RNAseq_analysis/2018_12_12/'
# column names; these are basically arbitrary and do not include the column of gene names
col_names <- c('wt 1','wt 2','wt 3','wt 4','wt 5','wt 6','mut 1','mut 2','mut 3','mut 4','mut 5')
# set up the two groups (in this case, I have 6 wt replicates followed by 5 mut replicates)
groups <- factor(c(1,1,1,1,1,1,2,2,2,2,2))
# condition vector - tells DESeq what contrast to do (I called wt = untreated and wt = treated here)
condition <- c(rep("untreated",6),rep("treated",5))
# threshold for significance
adjp <- 0.05
# drop genes with very low total counts? (recommended)
drop_low = TRUE
deres <- DESeq2_DE(data_file, col_names, condition, adjp, drop_low)
deedg <- edgeR_DE(data_file, groups, adjp, drop_low)
# --------------------------------------------------------------------------------------
s = logseq(0.1, 10^-5, 1000)
table1 <- data.frame("pvalue"=integer(),"condition"= integer(),"N"=integer(), "MedianLogFold" = integer())
table2 <- data.frame("pvalue"=integer(),"condition"= integer(),"N"=integer(), "MedianLogFold" = integer())
table4 <- data.frame("pvalue"=integer(), "JaccardIndex" = integer())
# --- Creating a data.frame of the DESeq and edgeR data ---
count= 0
for(i in s){
# --- Creating the subset of data results ---
DESE <- deres$table[which(deres$table$padj< i),]
EDR <- deedg$table[which(deedg$table$PValue< i),]
# --- Number of genes at specific p-values for each method---
a <- nrow(DESE)
d <-c("DESeq")
b <- nrow(EDR)
e <-c("edgeR")
# --- Calculating the median of the log-fold change for each method ---
deseq <- median(abs(DESE$log2FoldChange))
edge <- median(abs(EDR$logFC))
# --- Isolate the gene symbols for each method---
xrow <- row.names(DESE)
yrow <- row.names(EDR)
# --- Filling in the tables ---
table1[nrow(table1)+1,] <- c(i,d,a,deseq)
table2[nrow(table2)+1,] <- c(i,e,b,edge)
# --- Jaccard Index value at specific p-values ---
f <- jaccard(xrow,yrow)
table4[nrow(table4)+1,] <- c(i,f)
count = count + 1
}
# --- Removing the NA at the end of the tables ---
table1 <- table1[1:count-1,]
table2 <- table2[1:count-1,]
table4 <- table4[1:count-1,]
# --- Combine DESeq and edgeR tables ---
table3 <- rbind(table1,table2)
# --- Converting characters into numericals ---
table3$pvalue <-as.numeric(table3$pvalue)
table3$MedianLogFold <- as.numeric(table3$MedianLogFold)
table3$N <- as.numeric(table3$N)
table4$pvalue <-as.numeric(table4$pvalue)
table4$JaccardIndex <- as.numeric(table4$JaccardIndex)
# --- Data Visualization ---
library("ggplot2")
library("gridExtra")
library(ggpubr)
library(RColorBrewer)
p1 <- ggplot(table3, aes(x= -log10(pvalue), y=N, color=condition)) + geom_point() +scale_x_log10()+theme(legend.position="none", axis.title.x = element_blank())+scale_color_manual(values = c("red", "blue")) + geom_vline(xintercept = -log10(0.01))
p2 <- ggplot(table3, aes(x= -log10(pvalue), y=MedianLogFold, color=condition)) + geom_point() + scale_x_log10()+theme(legend.position="none") + scale_color_manual(values = c("red", "blue")) +geom_vline(xintercept = -log10(0.01))
p3 <- ggplot(table4, aes(x= -log10(pvalue), y=JaccardIndex)) + geom_point() + scale_x_log10()+theme(axis.title.x = element_blank()) + geom_vline(xintercept = -log10(0.01))
ggarrange(p1, p3, p2, ncol=1, nrow = 3, common.legend = TRUE, legend = "bottom")
|
315caf360846ce6154b5f558b68aec06d0c5f3d3
|
fb1140dd0146e7c42ec2352ea8492be7a492d062
|
/code/tdc_ed.R
|
16212e5a9f27135b28efc74fd8d72165c9e7ec7c
|
[] |
no_license
|
rowlandseymour/PBLA
|
4231f545c6d2456ed0402b1957eba42e08755933
|
d9d9e72d53bf53dfb861a19febcf1ef981c384dd
|
refs/heads/master
| 2022-03-16T16:50:49.545215
| 2019-11-06T00:22:13
| 2019-11-06T00:22:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,087
|
r
|
tdc_ed.R
|
##################################################
### Code to generate Eichner Dietz (ED) method ###
### results for the Tristan da Cunha data ###
##################################################
# Install any missing and required packages
list.of.packages <- c("ggplot2")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
library("ggplot2")
##################################################
## Read in the data (files provided in data folder of github repository)
data<-read.table("../data/TristanDaCunha/tdc_jitteredtimes.txt", header=FALSE)
c<-read.table("../data/TristanDaCunha/tdc_agegroups.txt", header=FALSE)
data <- cbind(data,c)
##################################################
## Set up required variables and functions
datar<-data[1:40,1]
N<-254 # population size
nI <-40 # number infected
g<-0.371 # gamma (removal rate)
beta <- c(0.00451, 0.00181, 0.00131) # beta (infection rate)
theta <- c(0.00451, 0.00181, 0.00131, g) # set of parameters to be estimated
# Trapezium rule
trapz <- function (x, y) {
idx = 2:length(x)
return(as.double((x[idx] - x[idx - 1]) %*% (y[idx] + y[idx-1]))/2)
}
# ED log likelihood (to be minimised in NLM, so made negative)
likelihood_o_min<-function(theta, N, nI, data, K = 1000, init.value = -10){
# all parameters must be non-negative
if (any(theta<0)){
return(100000.00)
}
# set up
r<-data[1:40,1] # removal times
type<-data[,2] # age groups
g<-theta[4] # gamma removal rate
# number never infected in each group
m1 <- 16
m2 <- 30
m3 <- 168
#contribution from infectives
lhc <- 0
for (k in 2:length(r)){
# which beta, from age groupings
if (type[k]==1){b = theta[1]
}else if (type[k]==2){b = theta[2]
}else{b = theta[3]}
# time to integrate over
x <- seq(init.value, max(r), length.out=K)
# now calculate the contribution...
sumA <- rep(0,K)
for (i in 1:length(x)){
sumA[i] <- sum(exp(-g*(r-pmin(x[i],r)))[-k])
}
y <- rep(0,K)
for (j in 1:length(r)){
if (j!=k){
choose.index <- which(x < min(r[k], r[j])) # integral limits
y[choose.index ]<- y[choose.index] + exp(-g*(r[j]+r[k]) + 2*g*x[choose.index] - (b/g)*sumA[choose.index])
}
}
lhc <- lhc + log(b) + log(g) + log(trapz(x,y))
}
#contribution from non-infectives
nilhc <- -(nI/g)*(theta[1]*m1 + theta[2]*m2 + theta[3]*m3)
#overall negative log likelihood
loglh <- lhc + nilhc
return(-loglh)
}
# Since we would like MAPs, we also need to include priors
post<-function(theta, N, nI, data, K = 1000, init.value = -10){
likelihood_o_min(theta, N, nI, data, K = 1000, init.value = -10) +
log(pgamma(theta[1], shape=0.0000001, scale = 1/0.00001)) +
log(pgamma(theta[2], shape=0.0000001, scale = 1/0.00001)) +
log(pgamma(theta[3], shape=0.0000001, scale = 1/0.00001)) +
log(pgamma(theta[4], shape=0.0001, scale = 1/0.001))
}
##################################################
## Now, perform the optimisation
# note: warning 'NA/Inf replaced by maximum positive value' from NLM may occur - unacceptable parameter values just being tested
opt3<-nlm(post, c(0.1,0.1,0.1,0.01), N=N, nI=nI, data=data, typsize = c(0.001, 0.001, 0.001, 0.1),
fscale = -225, print.level=2 )
write(opt3$estimate, file ="ed_tdc_opt.txt", sep = ",", ncolumns=1, append = TRUE)
#$estimate
# [1] 0.01040.00408 0.00289 0.879
##################################################
## Create plots comparing PBLA, DA-MCMC and ED
# Read in the results from PBLA:
beta1<-read.table("beta1.txt")
beta2<-read.table("beta2.txt")
beta3<-read.table("beta3.txt")
gamma<-read.table("gamma.txt")
# and for ED (in case we ran the optimiser in another session):
opt3$estimate <- t(read.table("ed_tdc_opt.txt"))
# We add the Hayakawa et al DA-MCMC results in manually
# Create figures:
pl<-ggplot(data=data.frame(beta1[,1]), aes(beta1[,1])) +
geom_histogram(col="gray2", fill="gray2", alpha = .2) +
ggtitle("") + theme_light() +
theme(plot.title = element_text(hjust = 0.5, size = 30), axis.text=element_text(size=18,colour="black"),
axis.title=element_text(size=25), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")) +
labs(x=expression(beta[1]), y="Count") +
xlim(c(0,0.03))
pl + geom_vline(xintercept = 0.00451, linetype="solid",
color = "black", size=1) + geom_vline(xintercept = opt3$estimate[1], linetype="dashed",
color = "black", size=1)
pl<-ggplot(data=data.frame(beta2[,1]), aes(beta2[,1])) +
geom_histogram(col="gray2", fill="gray2", alpha = .2) +
ggtitle("") + theme_light() +
theme(plot.title = element_text(hjust = 0.5, size = 30), axis.text=element_text(size=18,colour="black"),
axis.title=element_text(size=25), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")) +
labs(y="Count") +
scale_x_continuous(name=expression(beta[2]),breaks=c(0.0,0.005,0.01), labels=c(0.0,0.005,0.01), limits=c(0,0.01))
pl + geom_vline(xintercept = 0.00181, linetype="solid",
color = "black", size=1) + geom_vline(xintercept = opt3$estimate[2], linetype="dashed",
color = "black", size=1)
pl<-ggplot(data=data.frame(beta3[,1]), aes(beta3[,1])) +
geom_histogram(col="gray2", fill="gray2", alpha = .2) +
ggtitle("") + theme_light() +
theme(plot.title = element_text(hjust = 0.5, size = 30), axis.text=element_text(size=18,colour="black"),
axis.title=element_text(size=25), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")) +
labs(x=expression(beta[3]), y="Count") +
xlim(c(0,0.006))
pl + geom_vline(xintercept = 0.00131, linetype="solid",
color = "black", size=1) + geom_vline(xintercept = opt3$estimate[3], linetype="dashed",
color = "black", size=1)
pl<-ggplot(data=data.frame(gamma[,1]), aes(gamma[,1])) +
geom_histogram(col="gray2", fill="gray2", alpha = .2) +
ggtitle("") + theme_light() +
theme(plot.title = element_text(hjust = 0.5, size = 30), axis.text=element_text(size=18,colour="black"),
axis.title=element_text(size=25), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")) +
labs(x=expression(gamma), y="Count") +
xlim(c(0,2))
pl + geom_vline(xintercept = 0.371, linetype="solid",
color = "black", size=1) + geom_vline(xintercept = opt3$estimate[4], linetype="dashed",
color = "black", size=1)
## R0
R0_ed<- (opt3$estimate[1]*25 + opt3$estimate[2]*36 + opt3$estimate[3]*193)/opt3$estimate[4]
R0<- (beta1*25 + beta2*36 + beta3*193)/gamma
pl<-ggplot(data=data.frame(R0[,1]), aes(R0[,1])) +
geom_histogram(col="gray2", fill="gray2", alpha = .2) +
ggtitle("") + theme_light() +
theme(plot.title = element_text(hjust = 0.5, size = 30), axis.text=element_text(size=18,colour="black"),
axis.title=element_text(size=25), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")) +
labs(x=expression(R[0]), y="Count") +
xlim(c(0,2.5))
pl + geom_vline(xintercept = 1.2, linetype="solid",
color = "black", size=1) + geom_vline(xintercept = R0_ed, linetype="dashed",
color = "black", size=1)
|
83f0654f36fa8ba87eb092ae9052271894f03543
|
d1844855b2aff3b43af5094f1a214b0a84b75242
|
/2.Tables.R
|
96ed190e663a0602a3bee5bcf756aaf96e3d0305
|
[] |
no_license
|
AlistairMcNairSenior/BCAAs_Meta_Analysis
|
829b9879ae4eb032c35064809aa6d7a7e6427b01
|
8d19585625e09ad60b3c01ba7f95f42ee24598a5
|
refs/heads/main
| 2023-05-11T20:57:54.284148
| 2021-06-09T02:58:57
| 2021-06-09T02:58:57
| 375,200,615
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,245
|
r
|
2.Tables.R
|
# Clean up the R environment
rm(list=ls())
# Set the WD
wd<-"/Users/alistairsenior/Dropbox (Sydney Uni)/2018 BCAA Meta-analysis/Analysis" # Work iMac
#wd<-"/Users/asenior/Dropbox (Sydney Uni)/2018 BCAA Meta-analysis/Analysis" # Work Macbook
#wd<-"/Users/asenior/Dropbox (Sydney Uni)/2018 BCAA Meta-analysis/Analysis" # Home iMac
setwd(paste0(wd, "/Analyses"))
library(metafor)
library(plyr)
library(corpcor)
library(ape)
library(splines)
source("0.Headers.R")
# Load the results files, and format in to 2 part lists to make the tables
load("traits_pt1.Rdata")
traits_1<-traits
load("traits_pts2.3.Rdata")
traits_2<-traits
traits<-list(traits_1, traits_2)
load("data_list_pt1.Rdata")
data_1<-data_list
load("data_list_pts2.3.Rdata")
data_2<-data_list
data_list<-list(data_1, data_2)
load("TSV_list_pt1.Rdata")
TSV_1<-TSV_list
load("TSV_list_pts2.3.Rdata")
TSV_2<-TSV_list
TSV_list<-list(TSV_1, TSV_2)
load("MA_pool_list_pt1.Rdata")
MA_1<-MA_pool_list
load("MA_pool_list_pts2.3.Rdata")
MA_2<-MA_pool_list
MA_pool_list<-list(MA_1, MA_2)
load("ER_pool_list_pt1.Rdata")
ER_1<-ER_pool_list
load("ER_pool_list_pts2.3.Rdata")
ER_2<-ER_pool_list
ER_pool_list<-list(ER_1, ER_2)
load("TF_pool_list_pt1.Rdata")
TF_1<-TF_pool_list
load("TF_pool_list_pts2.3.Rdata")
TF_2<-TF_pool_list
TF_pool_list<-list(TF_1, TF_2)
load("MR_nutri_list_pt1.Rdata")
MR_nut_1<-MR_nutri_pool_list
load("MR_nutri_list_pts2.3.Rdata")
MR_nut_2<-MR_nutri_pool_list
MR_nutri_pool_list<-list(MR_nut_1, MR_nut_2)
load("MR_other_pool_list_pt1.Rdata")
MR_other_1<-MR_other_pool_list
load("MR_other_pool_list_pts2.3.Rdata")
MR_other_2<-MR_other_pool_list
MR_other_pool_list<-list(MR_other_1, MR_other_2)
# Change the WD to the tables folder
setwd(paste0(wd, "/tables"))
# Summary Table
for(p in 1:length(traits)){
for(t in 1:length(traits[[p]])){
# Grab trait t from part p
data<-data_list[[p]][[t]][[1]]
# Generate a summary
summary_t<-data.frame(trait=traits[[p]][[t]], n_articles=length(unique(data$Art_ID)), n_experiments=length(unique(data$Experimental_Unit)), n_diets=length(unique(c(data$Group, data$exp_Group))), n_lnRR=dim(data)[1], prop_mouse=mean(data$Species == "M"))
# Combine the the results
if(p == 1 & t==1){
summary<-summary_t
}else{
summary<-rbind(summary, summary_t)
}
}
}
# Save the summary table
write.table(summary, file="summary_table.csv", sep=",", row.names=F, col.names=names(summary))
# Meta-Analysis Results
for(p in 1:length(traits)){
for(t in 1:length(traits[[p]])){
# Grab trait t from part p
model<-MA_pool_list[[p]][[t]][[1]]
TSV<-TSV_list[[p]][[t]][[1]]
tau2<-sum(model[[2]][,c(1,2)])
I2_total<-tau2/(tau2+TSV) * 100
I2_Exp<-model[[2]][,1]/(tau2+TSV) * 100
# Generate a summary
summary_t<-data.frame(trait=traits[[p]][[t]], Est.=model[[1]][,1], LCL=model[[1]][,1]-1.96*sqrt(model[[1]][,2]), UCL=model[[1]][,1]+1.96*sqrt(model[[1]][,2]), sigma2_Exp=model[[2]][,1], sigma2_Resid=model[[2]][,2], I2_total=I2_total, I2_Exp=I2_Exp)
# Combine the the results
if(p == 1 & t==1){
summary<-summary_t
}else{
summary<-rbind(summary, summary_t)
}
}
}
# Save the summary table
write.table(summary, file="MA_table.csv", sep=",", row.names=F, col.names=names(summary))
# PB Results
for(p in 1:length(traits)){
for(t in 1:length(traits[[p]])){
# Grab trait t from part p
ER_model<-ER_pool_list[[p]][[t]][[1]]
TF_model<-TF_pool_list[[p]][[t]][[1]]
# Results table
summary_t<-data.frame(trait=traits[[p]][[t]], ER_coef=ER_model[[1]][2,1], ER_LCL=ER_model[[1]][2,1] - 1.96*sqrt(ER_model[[1]][2,3]), ER_UCL=ER_model[[1]][2,1] + 1.96*sqrt(ER_model[[1]][2,3]), n_missing=TF_model[[3]][1], prop_right=TF_model[[3]][2], TF_coef=TF_model[[1]][1], TF_LCL=TF_model[[1]][1] - 1.96*sqrt(TF_model[[1]][2]), TF_UCL=TF_model[[1]][1] + 1.96*sqrt(TF_model[[1]][2]))
# Combine the the results
if(p == 1 & t==1){
summary<-summary_t
}else{
summary<-rbind(summary, summary_t)
}
}
}
# Save the summary table
write.table(summary, file="PB_table.csv", sep=",", row.names=F, col.names=names(summary))
# 'Other' Meta-Regression Results
for(p in 1:length(traits)){
for(t in 1:length(traits[[p]])){
# Grab trait t from part p
models<-MR_other_pool_list[[p]][[t]][[1]]
# Results table
summary_t<-data.frame(trait=NA, Coef=NA, Est.=NA, LCL=NA, UCL=NA, m.models=NA)
# Go through each model and pull out the results
for(r in 1:length(models)){
# pull out the rth_model
model_r<-models[[r]]
# Get the numebr of coefficients
n_coef<-length(row.names(model_r[[1]]))
# Check out that the model actually fitted
if(is.na(model_r[[1]][1,1]) == F){
# Names of the coeffs
names<-row.names(model_r[[1]])
names[2]<-paste0(names[2], " - ", names[1])
# Get the coefficients as a difference
coeffs<-model_r[[1]][,1]
coeffs[2]<-coeffs[2] - coeffs[1]
# Get the SEs of the diff
se<-sqrt(diag(model_r[[1]][,-1]))
se[2]<-sqrt(sum(se^2) - 2*model_r[[1]][1,3])
# combine the results
summary_t_r<-data.frame(trait=c(traits[[p]][[t]], rep("", n_coef-1)), Coef=names, Est.=coeffs, LCL=coeffs-1.96*se, UCL=coeffs+1.96*se, m.models=model_r[[4]])
# Add to the other results for trait t
summary_t<-rbind(summary_t, summary_t_r)
}
}
summary_t<-summary_t[-1,]
# Combine the the results
if(p == 1 & t==1){
summary<-summary_t
}else{
summary<-rbind(summary, summary_t)
}
}
}
# Save the summary table
write.table(summary, file="MR_table.csv", sep=",", row.names=F, col.names=names(summary))
# AIC of nutritional models
for(p in 1:length(traits)){
for(t in 1:length(traits[[p]])){
# Get the AIC of the meta-analysis
summary_t<-data.frame(trait=traits[[p]][[t]])
summary_t$Model<-0
summary_t$Model_name<-"MA"
summary_t$df<-dim(MA_pool_list[[p]][[t]][[1]][[1]])[1]
summary_t$AIC<-MA_pool_list[[p]][[t]][[1]][[3]]
# Get AIC from MRs
for(r in 1:length(MR_nutri_pool_list[[p]][[t]][[1]])){
model_r<-MR_nutri_pool_list[[p]][[t]][[1]][[r]]
if(row.names(model_r[[1]])[1] != "NoModel"){
summary_t<-rbind(summary_t, data.frame(trait="", Model=r, Model_name=names(MR_nutri_pool_list[[p]][[t]][[1]])[r], df=length(row.names(model_r[[1]])), AIC=model_r[[3]]))
}
}
# Sort on AIC and pick within 2 points the model with lowest DF and then AIC
sort_summary_t<-summary_t[order(summary_t$AIC),]
summary_t[,-1]<-sort_summary_t[,-1]
summary_t$delta_AIC<-summary_t$AIC - summary_t$AIC[1]
summary_t$selection<-0
options<-summary_t[which(summary_t$delta_AIC < 2),]
choice<-options$Model[which(options$df == min(options$df))[1]]
summary_t$selection[which(summary_t$Model == choice)]<-1
# Add all the traits together
if(p == 1 & t == 1){
summary<-summary_t
}else{
summary<-rbind(summary, summary_t)
}
}
}
write.table(summary, file="AIC_table.csv", sep=",", row.names=F, col.names=names(summary))
# AIC-favoured Nutritional Meta-Regression Results
AIC<-summary
choices<-AIC$Model[which(AIC$selection == 1)]
aic_models<-list()
counter<-1
for(p in 1:length(traits)){
for(t in 1:length(traits[[p]])){
# Grab trait t from part p
trait<-traits[[p]][[t]]
# Get the model, and also save it
if(choices[counter] != 0){
model<-MR_nutri_pool_list[[p]][[t]][[1]][[choices[counter]]]
}else{
model<-MA_pool_list[[p]][[t]][[1]]
}
aic_models[[counter]]<-model
names(aic_models)[counter]<-trait
# Results table
Ests<-model[[1]][,1]
SE<-sqrt(diag(as.matrix(model[[1]][,-1])))
summary_t<-data.frame(trait=c(trait, rep("", length(Ests)-1)), Coef=row.names(model[[1]]), Est.=Ests, LCL=Ests-1.96*SE, UCL=Ests+1.96*SE, Q_Mod=c(model[[2]][3], rep("", length(Ests)-1)), Q_p=c(model[[2]][4], rep("", length(Ests)-1)), m.models=c(model[[4]], rep("", length(Ests)-1)))
# Combine the the results
if(p == 1 & t==1){
summary<-summary_t
}else{
summary<-rbind(summary, summary_t)
}
#Advan ce the counter
counter<-counter+1
}
}
# Save the summary table
write.table(summary, file="MR_nutri_table.csv", sep=",", row.names=F, col.names=names(summary))
# Save the AIC favoured models
setwd(paste0(wd, "/Analyses"))
save(aic_models, file="AIC_models.Rdata")
|
c579d00caed67f48016f3bc92d5fbdb49f18e6b8
|
8f14b26387e0a19bc51cc4b40c67ccd225accb9b
|
/man/calc_uncertainty.Rd
|
d1fd078d6b64ddcafe44e14060a7736bf5a2e83b
|
[] |
no_license
|
mikkelkrogsholm/pollofpolls
|
fcfc3b98f165e13e4bd4580411cdfd3af014a12c
|
5e9c845658daac93fc20d6ac510cfa1e6b93c069
|
refs/heads/master
| 2020-03-28T01:05:16.176564
| 2018-10-01T10:33:57
| 2018-10-01T10:33:57
| 147,475,979
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 429
|
rd
|
calc_uncertainty.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{calc_uncertainty}
\alias{calc_uncertainty}
\title{Calculate poll uncertainty}
\usage{
calc_uncertainty(p, n, z = 1.96)
}
\arguments{
\item{p}{share of poll}
\item{n}{number of respondents}
\item{z}{the z-score}
}
\value{
a number
}
\description{
Uses a formula to calculate poll uncertainty
}
\examples{
calc_uncertainty(.2, 1000)
}
|
cd8650937fde246cf427d72971c7fc5560f0ecf1
|
dd8c07ac7e734bc1ecacedd5cab0909bbb96b345
|
/Projekt2_kody_AnnaKoziol.r
|
30fb83a04db9f03ca405f6147f22ac76ec68919a
|
[] |
no_license
|
AniaKoziol/DataMining
|
84df6ee9e405c7dd17839bfea0df89019a0ab055
|
34db2073f5caf6d9be15c313913db41d66cb8eee
|
refs/heads/main
| 2023-02-23T18:16:19.495502
| 2021-01-29T21:51:59
| 2021-01-29T21:51:59
| 334,271,005
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 11,206
|
r
|
Projekt2_kody_AnnaKoziol.r
|
install.packages("praznik")
install.packages("verification")
install.packages("infotheo")
install.packages("AUC")
install.packages("randomForest")
install.packages("class")
install.packages("Boruta")
install.packages("factoextra")
install.packages("ROCR")
install.packages("adabag")
install.packages("caret")
library(pROC)
library(caret)
library(adabag)
library(ROCR)
library(factoextra)
library(DMwR)
library(class)
library(randomForest)
library(rpart)
library(e1071)
library(verification)
library(praznik)
library(car)
library(pROC)
library(caret)
library(corrplot)
library(Boruta)
library(pROC)
library(class)
library(DMwR)
library(e1071)
dane_train <- read.csv("train_projekt2.csv")
#--------- podzial na treningowy i testowy--------------------
df <- dane_train[sample(1:nrow(dane_train)),]
df_testowy <- df[1:400,]
df_treningowy <- data.frame(df[401:nrow(df),])
#--------sprawdzenie brakow danych --------------
#
sum(apply(dane_train , 2, function(x){ sum(is.na(x))})) # nie ma brakow danych
#------------------- Var kolumn ---------------------------------
wariancje <- as.data.frame(as.table(apply(df_treningowy[,-ncol(df_treningowy)], 2, var)))
head(wariancje)
wariancja_zero <- subset(as.data.frame(as.table(apply(df_treningowy[,-ncol(df_treningowy)], 2, var))), Freq < 0.01) #brak
#-------- takie same wiersze ------------------------------------
#
sum(duplicated(df_treningowy)) #brak
#----------------- obserwacje odstajace----------------
cooksd <- cooks.distance(glm(Y ~ .,
family = "binomial",
data = dane_train))
plot(cooksd,
pch="*",
cex=2,
main="Influential Obs by Cooks distance")
abline(h = 4*mean(cooksd, na.rm=T), col="red")
outliers <- rownames(dane_train[cooksd > 4*mean(cooksd, na.rm=T), ])
length(outliers) #30 outlinerów , nie tak dużo
print(outliers)
#---------------BORUTA---------------------
boruta.fs <- Boruta(Y~., data = df_treningowy, doTrace=2)
uznane <- which(boruta.fs$finalDecision== "Confirmed")
boruta.fs$finalDecision
#---------------CMIM------------------------------------------------
uznane2<- CMIM(df_treningowy[,-501],df_treningowy$Y,20) # pokrywaja sie w wieksozsci
uznane2$selection
#------------ IMPORTANCE RANDOM FOREST--------------------------
#
las <- randomForest(as.factor(Y)~., data=df_treningowy, importance=TRUE)
varImpPlot(las) # na podtsawie wykresu pokrywaj? si? z Boruta
#------------- NOWE DANE ---------------------------------------------
nowe_dane <- df_treningowy[,c(uznane,501)]
nowe_dane <- as.data.frame(nowe_dane)
nowy_test <- df_testowy[,c(uznane,501)]
write.csv(nowe_dane, file = "C://Users//Ania//Desktop//Studia-SMAD//Data Mining//projekt 2")
korelacje <- (round(cor(nowe_dane[,-ncol(nowe_dane)]),2))
corrplot(korelacje, type = "upper", order = "hclust", tl.col = "darkblue", tl.srt = 45)
korelacje_df <- as.data.frame(as.table(korelacje))
head(korelacje_df) #df w sesnie data frame
skorelowane_zmienne <- subset(korelacje_df, (abs(Freq) > 0.5 & abs(Freq) <1))
length(subset(korelacje_df, (abs(Freq) > 0.8 & abs(Freq) <1))$Freq)/2 #12
# zmienne : V29 + V49 + V65 + V106 + V129 +V154 +V242+ V282+V319 +V337+ V339+ V379+
# V434+ V443+ V452+ V454+ V456+ V473+ V476+ V494
# -------------VIF-------------------
vif <-as.data.frame(as.table(vif(nowe_dane[,-ncol(nowe_dane)])))
duzy_vif <- subset(vif, (abs(Freq) > 10))
vif(lm(Y ~., data=nowe_dane))
#---------------KORELACJE ------------------
korelacje <- (round(cor(nowe_dane[,-ncol(nowe_dane)]),2))
corrplot(korelacje, type = "upper", order = "hclust", tl.col = "darkblue", tl.srt = 45)
korelacje_df <- as.data.frame(as.table(korelacje))
head(korelacje_df) #df w sesnie data frame
skorelowane_zmienne <- subset(korelacje_df, (abs(Freq) > 0.8 & abs(Freq) <1))
#----- METODY KLASYFIKACJI-------------------------------------------------------------
y <- nowy_test$Y
# 1)-- drzewo decyzyjne
tree <- rpart(factor(Y)~., data = nowe_dane, minsplit=5, cp= 0.001)
pred_tree <- predict(tree, nowy_test, type="prob")
pred_tree <- as.data.frame(pred_tree)
plot(tree)
text(tree, pretty = 0)
pred_tree2 <- predict(tree, nowy_test, type='class')
dokl_drzewo <- sum(nowe_dane$Y == pred_tree2)/nrow(nowe_dane) #0.500625
par(pty = "s")
roc(y,pred_tree[,2], plot = T , legacy.axes = T, print.auc = T)
#-------------------------- p?tla AUC DRZEWO ---------------------------------------
ile_prob <- 10
temp <- 0
set.seed(123)
for(i in 1:ile_prob){
index <- createDataPartition(nowe_dane$Y, p = 3/4, list = FALSE)
dane_treinigowe <- nowe_dane[index,]
dane_validacyjne <- nowe_dane[-index,]
model <- rpart(as.factor(Y)~.
,data=dane_treinigowe)
wynik = predict(model,dane_validacyjne, type="prob")
wynik <- as.data.frame(wynik)[,2]
labels = dane_validacyjne$Y
area <- auc(labels, wynik)
temp <- temp + area
}
area <- temp/ile_prob*100
area #84,22
#---------------------------------------------------------------------------
# 2)-- SVM
x <- subset(nowe_dane, select=-Y)
y <- nowy_test$Y
svm_model <- svm(Y ~ ., data=nowe_dane)
summary(svm_model)
pred <- predict(svm_model,nowy_test)
par(pty = "s")
roc(y,pred, plot = T , legacy.axes = T, print.auc = T) #0.9443
par(pty = "s")
# jadro sinusoidalne
svm_model2 <- svm(Y ~ ., data=nowe_dane, kernel = "sigmoid")
summary(svm_model2)
pred2 <- predict(svm_model2,nowy_test)
par(pty = "s")
roc(y,pred2, plot = T , legacy.axes = T, print.auc = T) #0.5637
# jadro polynomial
svm_model3 <- svm(Y ~ ., data=nowe_dane, kernel = "polynomial")
summary(svm_model3)
pred3 <- predict(svm_model3,nowy_test)
par(pty = "s")
roc(y,pred2, plot = T , legacy.axes = T, print.auc = T) #0.7128
par(pty = "s")
roc(y,pred, plot = T , legacy.axes = T, print.auc = T) #0.9443
plot.roc(y,pred2, col= "#4daf4a", print.auc=T, add = T, print.auc.y= 0.42 )
plot.roc(y,pred3, col= "red", print.auc=T, add = T, print.auc.y= 0.35 )
legend("bottomright", legend = c("radial","sigmoid", "polynomial"), col = c("black", "#4daf4a", "red"), lwd=2)
#-----------------------------------------------------------------------
# 3)-- Random Forest
las2 <- randomForest(factor(Y)~., data=nowe_dane, importance=TRUE)
las3 <- randomForest(factor(Y)~., data=nowe_dane, importance=TRUE, ntree =1000)
varImpPlot(las2)
las2$importance
print(las2) # b??d = 11,69%
pred_rf <- predict(las2, nowy_test, type = 'prob')
pred_rf3 <- predict(las3, nowy_test, type = 'prob')
pred_rf<- as.data.frame(pred_rf)
confusionMatrix(pred_rf, factor(y)) # accuracy 0.8975
roc(y,pred_rf[,2], plot = T , legacy.axes = T, print.auc = T)
roc(y,pred_rf3[,2], plot = T , legacy.axes = T, print.auc = T)
#------------------------- KLASY ---------------------
KNN <- kmeans(scale(nowe_dane[, -ncol(nowe_dane)]),2 , 100)
table(KNN$cluster)
fviz_cluster(object = KNN, data = scale(nowe_dane[, -ncol(nowe_dane)]) ,
stand = F)
# funkcja prawie rodzileila na dwie klasy przy uzyciu d?ch g?ownych sk?adowych
#-------- Klasyfikacje na knn ------------
knnFit <- train(Y ~ ., data = nowe_dane, method = "knn", preProcess = c("center","scale"),
tuneLength = 20)
plot(knnFit)
knnPredict <- predict(knnFit,newdata = nowy_test , type="prob")
knnROC <- roc(nowy_test$Y,knnPredict[,"0"])
knnROC
plot(knnROC, type="S", las=1, print.auc=T, color="blue", main ="Klasyfikacja KNN", legacy.axes = T)
#-----------------------------KNN na skaladowych -------------------------------
knnFit2 <- train(Y ~ ., data = scores, method = "knn", preProcess = c("center","scale"),
tuneLength = 20)
plot(knnFit2)
knnPredict2 <- predict(knnFit2,newdata = test.scores , type="prob")
knnROC2 <- roc(test.scores$Y,knnPredict2[,"0"])
knnROC2
plot(knnROC2, type="S", las=1, print.auc=T, color="blue", main ="Klasyfikacja KNN", legacy.axes = T)
#slabiej
#--------------------------- PCA --------------------------------
#standaryzowanie danych
scale <- scale(nowe_dane[, -ncol(nowe_dane)])
scale <- as.data.frame(scale)
pca <- princomp(~. , cor = T, data= scale)
print(summary(pca))
par(lwd = 2)
plot(pca, las = 1 , col = "pink") #5 skladowych g??wnych wyjasnia prawie 100% zmienno?ci
biplot(pca)
pca$loadings
#--------- DATA FRAME Z PCA -----------------------------
scores <- data.frame(Y = nowe_dane$Y,
pca$scores[, 1:5])
test.scores <- data.frame(Y = nowy_test$Y,
predict(pca, newdata =nowy_test[, -ncol(nowy_test)]))
test.scores <- as.data.frame(test.scores[, 1:6])
#--------------------------------------------------------------
#-------------- WSZYSKIE ROC na jednym rysunku -------------------
par(pty = "s")
roc(y,pred, plot = T , legacy.axes = T, print.auc = T) #0.9443
plot.roc(y,pred_tree[,2], col= "#4daf4a", print.auc=T, add = T, print.auc.y= 0.42 )
plot.roc(y,pred_rf[,2], col= "red", print.auc=T, add = T, print.auc.y= 0.35 )
roc(y,pred.ada1000$prob[,2], plot = T , legacy.axes = T, print.auc = T, col = "orange", add = 0.2, print.auc.y= 0.35)
legend("bottomright", legend = c("SVM", "Decision Tree", "RForest"), col = c("black", "#4daf4a", "red"), lwd=2)
#--------------- DRZEWO NA SKLADOWYCH --------------------------
tree_pca <- rpart(factor(Y)~., data = scores)
pred_tree_pca <- predict(tree_pca, test.scores, type='prob')
pred_tree_pca <- as.data.frame(pred_tree)
plot(tree)
text(tree, pretty = 0)
par(pty = "s")
roc(y,pred_tree_pca[,2], plot = T , legacy.axes = T, print.auc = T)
#--------- ADA BOOST -----------------------------
nowe_dane$Y <- as.factor(nowe_dane$Y)
ada1000 <- boosting(Y~., data=nowe_dane, mfinal=1000)
pred.ada1000 <- predict(ada1000, nowy_test)
1 - pred.ada$error
100*mean(pred.ada1000$class!=nowy_test$Y ) # b?ad 13.5
dokl_ada <- sum(nowy_test$Y == pred.ada1000$class)/nrow(nowy_test) #0.88
par(pty = "s")
roc(y,pred.ada1000$prob[,2], plot = T , legacy.axes = T, print.auc = T, col = "black", main = "Boosting", add = T)
#------------------------------------ADABoost na skladowych -------------------------
scores$Y <- as.factor(scores$Y)
ada100_pca <- boosting(Y~., data=scores, mfinal=100)
pred.ada100_pca <- predict(ada100_pca, test.scores)
1 - pred.ada$error
100*mean(pred.ada$class!=nowy_test$Y ) # b?ad 13.5
dokl_ada <- sum(nowy_test$Y == pred.ada1000$class)/nrow(nowy_test) #0.88
par(pty = "s")
roc(y,pred.ada100_pca$prob[,2], plot = T , legacy.axes = T, print.auc = T, col = "black", main = "ada", add = T)
# AUC 0.52.
#--------------KLASYFIKACJA TESTOWEGO--------------------------
dane_test <- read.csv("test_projekt2.csv")
knnFit <- train(Y ~ ., data = nowe_dane, method = "knn", preProcess = c("center","scale"),
tuneLength = 20)
plot(knnFit)
knnPredict <- predict(knnFit,newdata = dane_test , type="prob")
wynniki_klasyfikacji <- knnPredict[,2]
write.csv2(wynniki_klasyfikacji, file = "AKO.csv")
|
cd8eec4f6fb014a2c450c4bdc4a318d9613c3860
|
a73f297916588af6cbfa7779b236fa26891c5478
|
/run_analysis.R
|
1b7b0b5b39fd23835da020881b76722fc312e24b
|
[] |
no_license
|
arildnor/GettingAndCleaningData
|
13aa1782a74bf86fa168dafd0ed72e59d3d1830b
|
e5c65f61e00d6092344fb8f411a2ff9dbe2b5a04
|
refs/heads/master
| 2021-01-10T18:30:53.365445
| 2015-02-22T10:09:35
| 2015-02-22T10:09:35
| 31,122,952
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,401
|
r
|
run_analysis.R
|
################################################################################
##
## Getting and Cleaning Data
## Programming assignment
##
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for
## each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive variable names.
## 5. From the data set in step 4, creates a second, independent tidy data set
## with the average of each variable for each activity and each subject.
##
## The dataset includes the following files:
##
##- 'README.txt'
##- 'features_info.txt': Shows information about the variables used on
## the feature vector.
##- 'features.txt': List of all features.
##- 'activity_labels.txt': Links the class labels with their activity name.
##
################################################################################
#
# Starting R in the main directory, reading the location and setting
# up to read the files. Data is downloaded to the "UCI HAR Dataset" folder.
#
currentDirectory <- getwd()
datasetHomeFolder <- "UCI HAR Dataset"
#
# This dataset contains the information on the variables used.
# It is a text file and are not used in the subsequent workflow.
#
feautures_info_name <- "features_info.txt"
features_info_file <- paste(currentDirectory,"/",datasetHomeFolder,"/",feautures_info_name,sep="")
features_info <- read.table(features_info_file, sep="\t", header=FALSE)
features_info
#
# This dataset contains a list of variables in the datasets.
#
features_name <- "features.txt"
features_file <- paste(currentDirectory,"/",datasetHomeFolder,"/",features_name,sep="")
features <- read.table(features_file, sep="\t", header=FALSE)
features
#
# This is the activity labels connected to the y_datasets
#
activity_labels_name <- "activity_labels.txt"
activity_labels_file <- paste(currentDirectory,"/",datasetHomeFolder,"/",activity_labels_name,sep="")
activity_labels <- read.table(activity_labels_file, sep="\t", header=FALSE)
activity_labels
#
# Read the datafiles from subdirectories.
# We have 2 directories containing test and train datasets
#
testsetHomeFolder <- "test"
trainsetHomeFolder <- "train"
#
testDirectory <- paste(currentDirectory,"/",datasetHomeFolder,"/",testsetHomeFolder,sep="")
trainDirectory <- paste(currentDirectory,"/",datasetHomeFolder,"/",trainsetHomeFolder,sep="")
print(testDirectory)
print(trainDirectory)
#
# The following files are available for the train and test data.
# Their descriptions are equivalent.
# 'train/subject_train.txt' and 'test/subject_train.txt':
# Each row identifies the subject who performed the activity for
# each window sample. Its range is from 1 to 30.
#
thisFiletest <- "subject_test.txt"
thisFiletrain <- "subject_train.txt"
testFileName <- paste(testDirectory,"/",thisFiletest,sep="")
trainFileName <- paste(trainDirectory,"/",thisFiletrain,sep="")
print(testFileName)
print(trainFileName)
subject_test <- read.table(testFileName, header=FALSE)
subject_train <- read.table(trainFileName, header=FALSE)
str(subject_test)
str(subject_train)
#
# The following files are available for the train and test data.
# Their descriptions are equivalent.
#'train/y_train.txt' and 'test/y_train.txt':
# Each row contains the activity linked to the rows in the X_ tables.
# Rows are linked to subject and y_test. There are 6 activities described
# in the activity_labels file
#
thisFiletest <- "y_test.txt"
thisFiletrain <- "y_train.txt"
testFileName <- paste(testDirectory,"/",thisFiletest,sep="")
trainFileName <- paste(trainDirectory,"/",thisFiletrain,sep="")
print(testFileName)
print(trainFileName)
y_test <- read.table(testFileName, header=FALSE)
y_train <- read.table(trainFileName, header=FALSE)
str(y_test)
str(y_train)
#
# The following files are available for the train and test data.
# Their descriptions are equivalent.
#'train/X_train.txt' and 'test/X_train.txt':
# Each row contains the summary measurements for 561 variables.
# Rows are linked to subject and y_test
#
thisFiletest <- "X_test.txt"
thisFiletrain <- "X_train.txt"
testFileName <- paste(testDirectory,"/",thisFiletest,sep="")
trainFileName <- paste(trainDirectory,"/",thisFiletrain,sep="")
print(testFileName)
print(trainFileName)
X_test <- read.table(testFileName, header=FALSE)
X_train <- read.table(trainFileName, header=FALSE)
str(X_test)
str(X_train)
dim(X_test)
dim(X_train)
##
## Raw signals are in subfolders. Are not used for the assignment.
##
##- 'train/Inertial Signals/total_acc_x_train.txt':
## The acceleration signal from the smartphone accelerometer X axis
## in standard gravity units 'g'. Every row shows a 128 element vector.
## The same description applies for the 'total_acc_x_train.txt'
## and 'total_acc_z_train.txt' files for the Y and Z axis.
##- 'train/Inertial Signals/body_acc_x_train.txt':
## The body acceleration signal obtained by subtracting the gravity
## from the total acceleration.
##- 'train/Inertial Signals/body_gyro_x_train.txt':
## The angular velocity vector measured by the gyroscope
## for each window sample. The units are radians/second.
#
# All data are read into R. Ready to merge
# Check dimensions.
# All train sets should have same number of rows. The same is for test sets.
#
dim(subject_train)
dim(subject_test)
dim(y_train)
dim(y_test)
dim(X_train)
dim(X_test)
################################################################################
#
# 1. Merges the training and the test sets to create one data set.
#
# Merge the datasets, combining rows since both datasets have same format
#
#
# Subject datasets
#
merged_subject <- rbind(subject_train,subject_test)
dim(merged_subject)
#
# Set column name
#
names(merged_subject)[1] <- paste("subject")
#
# y_ datasets
#
merged_Y <- rbind(y_train,y_test)
dim(merged_Y)
#
# set column name
#
names(merged_Y)[1] <- paste("activity")
#
# for the X_ dataset we want to extract certain columns
#
merged_X <- rbind(X_train,X_test)
dim(merged_X)
#
# Set the varable names in the X_ set using
# the column names from the features set.
#
names(merged_X) <- features$V1
#
# Can check all column names
#
str(merged_subject)
str(merged_Y)
str(merged_X)
################################################################################
#
# 2. Extracts only the measurements on the mean and standard deviation for
# each measurement.
# Check for mean and std in the header information in the merged X_ set.
# Should Keep true and remove false from final dataset
# This will reduce the number of variables from 561 to 66
#
#
substring="-mean\\(\\)|-std\\(\\)"
final_merged_x <- merged_X[grepl(substring,names(merged_X))]
str(final_merged_x)
dim(final_merged_x)
head(final_merged_x)
################################################################################
#
# 3. Uses descriptive activity names to name the activities in the data set
#
str(merged_Y)
str(activity_labels)
activity_labels$rownumber <- row(activity_labels, as.factor = FALSE)
str(activity_labels)
print(activity_labels)
#
# Add the 3 datasets into combined dataset.
# This now contains subject, activity and the selected X_ columns
#
combined_set <- cbind(merged_subject,merged_Y,final_merged_x)
str(combined_set)
head(combined_set)
################################################################################
#
# 4. Appropriately labels the data set with descriptive variable names.
#
# Looking at the output from str() I decide to keep the labels as is now.
# An option would be to remove the number that are the first part of the
# 66 mean and std columns. But keeping them shows what position they came from
# in the original dataset.
#
str(combined_set)
################################################################################
#
## 5. From the data set in step 4, creates a second, independent tidy data set
## with the average of each variable for each activity and each subject.
#
# Using dplyr package for last part of project
#
require(dplyr)
require(tidyr)
#
# Set the tidy set using tbl_df to speed up
#
tidy_set <- tbl_df(combined_set)
str(tidy_set)
dim(tidy_set)
#
# Use pipe functionality to do the summarising
# I put activity first. It is then easy to see
# the results for each subject for a given activity, comparing the subjects
# in a way. Who is best
#
grouped_tidy <- tidy_set %>%
group_by(activity,subject) %>%
summarise_each(funs(mean)) %>%
print
#
# The text explanation for activity is now a number due to the sumarise
# function. Need to put back a text string as was done before.
# More research may have shown me another solution, but this works
#
grouped_tidy$activity <-
activity_labels$V1[match(grouped_tidy$activity,activity_labels$rownumber)]
str(grouped_tidy)
#
# Check dimensions and head in the combined and aggregated set to see if it is OK
# There are 30 subjects and 6 activities. The maxomum rows in the summarized set
# are therefore 180
#
#table(merged_subject)
#table(activity_labels)
#
dim(combined_set)
dim(tidy_set)
dim(grouped_tidy)
head(combined_set,n=50)
head(grouped_tidy,n=50)
#
# Write the tidy dataset
#
write.table(grouped_tidy, file="tidy.txt", row.names=FALSE)
#
# Done
#
################################################################################
|
cf519c98505060a185fe554a4fc11ded8a0ce7a9
|
1aa7f59efe886e200ebfa4b1a1eb7b038e2239fa
|
/PenDrive/JR/R/ex03_exploratoria.R
|
3c21493ace1b5c66db627cfc06cb3ca2259dec23
|
[] |
no_license
|
jayme-jr/Backups
|
72b2de6d99ed9061eeafa3b98136a1c4edb3246f
|
29f7f0a2dea038726d93d0c3c2241e6781c8cc25
|
refs/heads/master
| 2021-03-02T17:18:09.013473
| 2020-03-08T21:49:30
| 2020-03-08T21:49:30
| 245,887,640
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,287
|
r
|
ex03_exploratoria.R
|
##======================================================================
## Exercícios - Análise Exploratória de Dados
##======================================================================
##======================================================================
## Importação e conferência dos dados
## Baixar os dados em:
## http://www.leg.ufpr.br/~walmes/data/aval_carros_nota.txt
## Importe os dados gerados
dados <- read.table("http://www.leg.ufpr.br/~walmes/data/aval_carros_nota.txt",
header = TRUE, sep = "\t", dec = ".")
## Verifique a estrutura dos dados importados
str(dados)
## Faça um sumário estatístico do objeto de dados
summary(dados)
## Classifique todas as variáveis do conjunto de dados
## Escreva aqui em forma de comentário, uma linha para cada variável
## Exemplo:
## var1: qualitativa ordinal
#var1carro = qualitativa nominal
#var2dono = quantitativa discreta
#var3item = qualitativa nominal
#var4nota = quantitativa contínua
##======================================================================
## Variável carro
## Frequência absoluta
table(dados$carro)
## Frequência relativa
prop.table(table(dados$carro))
## Gráfico de barras
barplot(table(dados$carro))
## Qual o carro que ocorre com maior frequência? Crie um comando para
## retornar apenas o nome do carro, ou seja, calcule a moda.
## Dica: use as funções table() e which.max()
names(table(dados$carro))[which.max(table(dados$carro))]
##======================================================================
## Variável item
## Frequência absoluta
table(dados$item)
## Frequência relativa
prop.table(table(dados$item))
## Gráfico de barras
barplot(table(dados$item))
##======================================================================
## Cruzamento entre as variáveis item e carro
## Faça as tabelas com as variáveis nessa ordem: item e carro
## Frequência absoluta
table(dados$item, dados$carro)
## Frequência relativa
prop.table(table(dados$item, dados$carro))
## Adicione as margens da tabela de frequência absoluta (somas de linhas
## e colunas)
addmargins(table(dados$item, dados$carro))
## Frequência relativa em relação ao total de carros
prop.table(table(dados$item, dados$carro), margin = 2)
## Gráfico de barras da frequência absoluta, com as barras lado-a-lado
barplot(table(dados$item, dados$carro), legend = TRUE, beside = TRUE)
##======================================================================
## Variável nota
## Máximo e mínimo
range(dados$nota, na.rm = TRUE)
## Fazendo uma tabela de frequência:
# 1) Número de classes estimado, com base no critério de Sturges
# Dica: ?nclass.Sturges
nclass.Sturges(dados$nota)
# 2) Com base no número de classes definido acima, crie as classes com a
# função cut(), e armazene em um objeto chamado "classes".
# As classes obrigatoriamente devem ser entre o valor mínimo e o máximo
# da variável nota (não podem ser criadas classes com valores inferiores
# ao mínimo ou superiores ao máximo), e devem ser *exatamente* o número
# de classes retornado acima
classes <- cut(dados$nota, breaks = seq(1, 10, length = 14),
include.lowest = TRUE)
# 3) Tabela com as frequências absolutas por classe
table(classes)
# 4) Tabela com as frequências relativas por classe
prop.table(table(classes))
## Faça um histograma
hist(dados$nota)
## Faça um histograma com as mesmas classes que você criou acima na
## função cut()
hist(dados$nota, breaks = seq(0, 10, length = 14))
## Boxplot
boxplot(dados$nota)
## Mediana
median(dados$nota, na.rm = TRUE)
## Média
mean(dados$nota, na.rm = TRUE)
## Quartis
quantile(dados$nota, na.rm = TRUE)
## Amplitude
diff(range(dados$nota), na.rm = TRUE)
## Variância
var(dados$nota, na.rm = TRUE)
## Desvio-padrão
sd(dados$nota, na.rm = TRUE)
## Coeficiente de variação
sd(dados$nota, na.rm = TRUE)/length(dados$nota)
##======================================================================
## Cruzamento entra as variáveis nota e carro
## Faça as tabelas com as variáveis nessa ordem: nota e carro
## Com o objeto "classes" criado acima, que define as classes para a
## variável nota, obtenha uma tabela de frequência relativa entre as
## marcas de carro e as classes de nota. As frequências devem ser
## calculadas relativas ao total das linhas, ou seja das classes de
## notas. Acrescente também os totais marginais dessa tabela.
addmargins(prop.table(table(classes, dados$carro)), margin = 1)
## Calcule a média de nota para cada marca de carro
tapply(dados$nota, dados$carro, mean, na.rm = TRUE)
## Faça um boxplot das notas para cada carro (em um mesmo gráfico)
boxplot(nota ~ carro, data = dados)
##======================================================================
## Crie uma nova variável no seu objeto de dados com o seguinte cálculo
## 10 + nota * 5 + rnorm(100, 0, 10)
length(dados$nota)
dados$nota2 <- 10 + dados$nota * 5 + rnorm(124980, 0, 10)
str(dados)
## Faça um gráfico de dispersão entre a variável nota e esta nova
## variável que você criou acima
plot(nota2 ~ nota, data = dados)
## Calcule o coeficiente de correlação de Pearson entre estas duas
## variáveis
cor(dados$nota2[!is.na(dados$nota2)], dados$nota[!is.na(dados$nota)])
|
6c8636d3d943666298e66ea630d988ef07c3ed7f
|
309d9decb79e9853a10d17e4822367dc7b75d831
|
/plot2.R
|
6d4b2f8e5e042c5c4a3e4ebd0189ae82e7832300
|
[] |
no_license
|
cgangwar/ExData_Plotting1
|
5d6a398cb9a8210700ce32e491f32891e58ddafc
|
9f240df610261922db7d23370dbc3030598a5940
|
refs/heads/master
| 2021-01-24T21:35:54.020072
| 2015-10-11T07:38:13
| 2015-10-11T07:38:13
| 44,043,168
| 0
| 0
| null | 2015-10-11T07:28:29
| 2015-10-11T07:28:29
| null |
UTF-8
|
R
| false
| false
| 728
|
r
|
plot2.R
|
#load data from current directory
classes <- c("character","character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
file <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", colClasses = classes)
#clean up and subset
file$Date <- as.Date(file$Date, format = "%d/%m/%Y")
file$timetemp <- paste(file$Date, file$Time)
file$Time <- strptime(file$timetemp, format = "%Y-%m-%d %H:%M:%S")
set <- subset(file, Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
#plot
png( filename = "plot2.png", width = 480, height = 480)
with(set, plot(Time, Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = ""))
dev.off()
|
8b7661acaa6613a4779045a7209fd2cee0bbe113
|
7c0f1ca5f591fbfa8be8d94f0bc422bc7c9f39b7
|
/man/GenomicAnnotationsForPREDA2GenomicAnnotations.Rd
|
fc41560f8f61e0428b3d8ce6c12f2afaa43ae1a5
|
[] |
no_license
|
bernatgel/PREDAsam
|
2779a93879cf8b0d641cc2dec7857024f4e63513
|
fe90fd71652cc52a99e9946ee78bcb3152c00270
|
refs/heads/master
| 2020-05-21T07:34:39.489048
| 2019-05-10T10:11:31
| 2019-05-10T10:11:31
| 185,964,143
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 528
|
rd
|
GenomicAnnotationsForPREDA2GenomicAnnotations.Rd
|
\name{GenomicAnnotationsForPREDA2GenomicAnnotations}
\alias{GenomicAnnotationsForPREDA2GenomicAnnotations}
\alias{GenomicAnnotationsForPREDA2GenomicAnnotations,GenomicAnnotationsForPREDA-method}
\title{
extract the GenomicAnnotations object from the GenomicAnnotationsForPREDA object
}
\description{
extract the GenomicAnnotations object from the GenomicAnnotationsForPREDA object
}
\usage{
GenomicAnnotationsForPREDA2GenomicAnnotations(.Object)
}
\arguments{
\item{.Object}{
an object of class GenomicAnnotationsForPREDA
}
}
|
0552ded4b1d6c3cb6ae541c35efd505535f4779a
|
c48f2a584ba20fab90aaeab28b575e180a276928
|
/man/gforce.confint2test.Rd
|
f9997af6ca9eb89f17114681ce40a7aeceb8f0bd
|
[] |
no_license
|
cran/GFORCE
|
3a2fab8e3cf3d07c18427332985d0901c00c17d7
|
1c89579c5ecce1e8aef93bd7d2854986b835454a
|
refs/heads/master
| 2020-03-20T11:15:14.741926
| 2019-04-07T08:42:42
| 2019-04-07T08:42:42
| 137,397,785
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 628
|
rd
|
gforce.confint2test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FDR_control.R
\name{gforce.confint2test}
\alias{gforce.confint2test}
\title{Convert confidence intervals to equivalent test statistics.}
\usage{
gforce.confint2test(conf_ints, alpha)
}
\arguments{
\item{conf_ints}{\eqn{d x d x 3} array. Each \eqn{d x d x 1} slice is a symmetric matrix.}
\item{alpha}{confidence level level of the confidence intervals.}
}
\value{
a \eqn{d x d} symmetric matrix of test statistics.
}
\description{
Can convert a 4D array encoding the confidence intervals for a precision
matrix to standard normal test-statistics.
}
|
9e0d6c16f7b82d670b47e571195a6993b3686a2a
|
1b73390902afc90781b72e7cf49f08878ddd4a08
|
/R/shiny_chunk.r
|
74a927d5ffc08c45a7d832a859bedec0a8d7c1ae
|
[] |
no_license
|
skranz/RTutor2
|
cf018a114a724a666672399e73b8da38c58bd522
|
cb19b69a6d11d2f957dad72df56a52680c2ff353
|
refs/heads/master
| 2021-01-17T13:24:57.440277
| 2017-10-07T01:51:22
| 2017-10-07T01:51:22
| 51,748,016
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,270
|
r
|
shiny_chunk.r
|
set.nali.names = function(x, nali) {
restore.point("set.nali.names")
ind = match(names(x), names(nali))
names(x)[!is.na(ind)] = unlist(nali[ind[!is.na(ind)]])
x
}
# update a chunk.ui to the specified mode
update.chunk.ui = function(uk, mode=uk$mode,app=getApp(),dset=TRUE) {
restore.point("update.chunk.ui")
ck = uk$ck
uk$mode = mode
ui = get.chunk.ui(uk)
#cat(as.character(ui))
#if (dset)
# dsetUI(ck$nali$chunkUI, ui)
setUI(ck$nali$chunkUI, ui)
}
# returns the ui for a chunk based on its current mode
# mode can be "input", "output", or "inactive"
get.chunk.ui = function(uk) {
restore.point("get.chunk.ui")
mode = uk$mode
if (mode=="input") {
return(make.chunk.input.ui(uk=uk))
} else if (mode=="output") {
return(make.chunk.output.ui(uk=uk))
} else if (mode=="inactive") {
HTML("You must first solve the earlier chunks...")
} else {
HTML("Not shown")
}
}
make.chunk.input.ui = function(uk, theme="textmate", height=NULL, code.lines=NULL, fontSize=12, console.height=height, opts=rt.opts()) {
restore.point("make.chunk.input.ui")
ck = uk$ck
nali = ck$nali
code = merge.lines(uk$stud.code)
if (is.null(code.lines))
code.lines = max(length(sep.lines(code)), length(sep.lines(ck$sol.txt)))+1
if (is.null(height)) {
height = max((fontSize * 1.25) * code.lines,30)+35
}
if (is.null(console.height)) {
console.code.lines = min(code.lines,10)
console.height = (fontSize * 1.25) * console.code.lines + 50
}
if (uk$solved) {
label = "was already solved"
} else {
label = "not yet solved"
}
solutionBtn = NULL
if (isTRUE(opts$show.solution.btn)) {
solutionBtn=bsButton(nali$solutionBtn, "solution",size="extra-small")
} else {
solutionBtn = NULL
}
if (isTRUE(opts$show.data.exp)) {
dataBtn = bsButton(nali$dataBtn, "data", size="extra-small")
} else {
dataBtn = NULL
}
if (isTRUE(opts$show.save.btn)) {
saveBtn = bsButton(nali$saveBtn, "save",size="extra-small")
} else {
saveBtn = NULL
}
if (!opts$noeval) {
button.row = tagList(
bsButton(nali$checkBtn, "check",size="extra-small"),
bsButton(nali$hintBtn, "hint", size="extra-small"),
bsButton(nali$runBtn, "run chunk",size="extra-small"),
dataBtn,
saveBtn,
solutionBtn
)
keys = list(runLineKey="Ctrl-Enter", helpKey="F1", runKey="Ctrl-R|Ctrl-Shift-Enter", hintKey="Ctrl-H", checkKey = "Ctrl-Alt-R|Ctrl-T")
} else {
button.row = tagList(
bsButton(nali$checkBtn, "check",size="extra-small"),
bsButton(nali$hintBtn, "hint", size="extra-small"),
solutionBtn
)
keys = list(hintKey="Ctrl-H", checkKey = "Ctrl-Alt-R|Ctrl-T")
}
keys = set.nali.names(keys, nali)
edit.row = tagList(
aceEditor(nali$editor, code, mode="r",theme=theme, height=height, fontSize=13,hotkeys = keys, wordWrap=TRUE, debounce=1, showLineNumbers=isTRUE(opts$show.line.numbers)),
aceEditor(nali$console, "", mode="r",theme="clouds", height=console.height, fontSize=13,hotkeys = NULL, wordWrap=TRUE, debounce=1, showLineNumbers=FALSE,highlightActiveLine=FALSE)
)
#aceAutocomplete(nali$editor)
tagList(
button.row,
bsAlert(nali$alertOut),
edit.row
)
}
make.chunk.output.ui = function(uk, opts = rt.opts()) {
restore.point("make.chunk.output.ui")
ck = uk$ck; nali = ck$nali
code = uk$stud.code
if (isTRUE(opts$show.save.btn)) {
saveBtn = bsButton(nali$saveBtn, "save", size="extra-small")
} else {
saveBtn = NULL
}
if (isTRUE(opts$show.data.exp)) {
dataBtn = bsButton(nali$dataBtn, "data", size="extra-small")
} else {
dataBtn = NULL
}
if (!opts$noeval) {
button.row = tagList(
bsButton(nali$editBtn, "edit",size="extra-small"),
dataBtn,
saveBtn
)
} else {
button.row = tagList(
bsButton(nali$editBtn, "edit",size="extra-small")
)
}
solved = uk$solved
mode = uk$mode
if (solved) {
code = code
args = ck$args
preknit =
# noeval will always be preknit
opts$noeval |
# don't preknit special output or if chunk option replace.sol=FALSE
(opts$preknit & !(!is.null(args[["output"]]) | is.false(args$replace.sol)))
if (preknit) {
if (!is.null(args[["output"]])) {
html = HTML("<p> SPECIAL OUTPUT HERE <p>")
} else {
html = HTML(ck$sol.html)
}
} else {
# not preknitted (default)
if (!is.null(args[["output"]])) {
html = chunk.special.output(uk=uk)
} else {
html = chunk.to.html(uk=uk)
html = HTML(html)
}
}
} else {
if ((identical(code, ck$shown.txt) | isTRUE(opts$noeval)) & !is.null(ck$shown.html)) {
# just show precompiled show
html = ck$shown.html
} else {
# compile no solution again
if (opts$noeval) {
uk$stud.code = ck$shown.txt
html = chunk.to.html(uk=uk, eval=FALSE)
} else {
html = chunk.to.html(uk=uk, eval=FALSE)
}
}
html = HTML(html)
}
restore.point("make.chunk.output.ui.2")
tagList(
button.row,
bsAlert(nali$alertOut),
html
)
}
make.global.chunk.hotkey.handlers = function(opts=rt.opts()) {
aceHotkeyHandler("checkKey", shiny.chunk.hotkey)
aceHotkeyHandler("hintKey", shiny.chunk.hotkey)
if (!opts$noeval) {
aceHotkeyHandler("runKey", shiny.chunk.hotkey)
aceHotkeyHandler("runLineKey", shiny.chunk.hotkey)
aceHotkeyHandler("helpKey", shiny.chunk.hotkey)
}
}
shiny.chunk.hotkey = function(keyId,editorId,selection,cursor,text,...,app=getApp(),opts=rt.opts()) {
args = list(...)
restore.point("shiny.chunk.hotkey")
bi = as.numeric(str.between(editorId,"__","__"))
uk = get.ts(bi=bi)
if (is.null(uk)) {
restore.point("shiny.chunk.hotkey.null.uk")
warning("shiny.chunk.hotkey: uk is null")
}
noeval = opts$noeval
if (keyId=="checkKey") {
check.shiny.chunk(uk=uk)
} else if (keyId=="hintKey") {
hint.shiny.chunk(uk = uk,code=text)
} else if (keyId=="runKey" & !noeval) {
run.shiny.chunk(uk=uk,code=text)
} else if (keyId=="runLineKey" & !noeval) {
run.line.shiny.chunk(uk=uk, cursor=cursor, selection=selection, code=text)
} else if (keyId=="helpKey" & !noeval) {
help.shiny.chunk(uk=uk, cursor=cursor, selection=selection, code=text)
}
}
make.chunk.handlers = function(uk, nali= uk$ck$nali, opts=rt.opts()) {
restore.point("make.chunk.handlers")
buttonHandler(nali$checkBtn, check.shiny.chunk, uk=uk)
buttonHandler(nali$hintBtn, hint.shiny.chunk, uk=uk)
if (!opts$noeval) {
buttonHandler(nali$runBtn, run.shiny.chunk, uk=uk)
}
if (isTRUE(opts$show.solution.btn))
buttonHandler(nali$solutionBtn, solution.shiny.chunk, uk=uk)
buttonHandler(nali$editBtn, edit.shiny.chunk, uk=uk)
}
run.shiny.chunk = function(uk, envir = get.task.env(ts=uk), code=getInputValue(uk$ck$nali$editor), opts=rt.opts(),...) {
restore.point("run.shiny.chunk")
if (is.null(code)) code = ""
chunk.is.selected(uk)
ck = uk$ck
if (opts$in.R.console) {
eval.in.console(code, envir=envir)
} else {
eval.in.ace.console(code, envir=envir, consoleId=ck$nali$console)
}
}
run.line.shiny.chunk = function(uk, envir=get.task.env(ts=uk), cursor=NULL, selection=NULL,code=getInputValue(uk$ck$nali$editor),..., app=getApp(), opts=rt.opts()) {
restore.point("run.line.shiny.chunk")
if (is.null(code)) code = ""
uk$stud.code = code
chunk.is.selected(uk)
if (selection == "") {
txt = sep.lines(code)
txt = txt[cursor$row+1]
} else {
txt = selection
}
if (opts$in.R.console) {
eval.in.console(txt, envir=envir)
} else {
eval.in.ace.console(txt, envir=envir, consoleId=uk$ck$nali$console)
}
}
check.shiny.chunk = function(uk, internal=FALSE, max.lines=300, store.output=FALSE, opts=rt.opts(), app=getApp(),...) {
uk$stud.code = getInputValue(uk$ck$nali$editor)
if (is.null(uk$stud.code)) uk$stud.code = ""
chunk.is.selected(uk)
uk$task.env = make.fresh.task.env(ts=uk)
restore.point("check.shiny.chunk")
ck = uk$ck
if (!is.false(opts$catch.errors)) {
ret = tryCatch(check.chunk(uk=uk, store.output=store.output, use.secure.eval=opts$use.secure.eval), error = function(e) {uk$log$failure.message <- as.character(e);return(FALSE)})
} else {
ret = check.chunk(uk=uk,store.output=store.output, use.secure.eval=opts$use.secure.eval)
}
# Don't yet know how we deal with this
# ps$prev.check.chunk.ind = chunk.ind
if (!internal) {
if (!ret) {
txt = merge.lines(c(uk$log$success, uk$log$failure.message,"Press Ctrl-H to get a hint."))
updateAceEditor(app$session, ck$nali$console, value=txt, mode="text")
uk$solved = FALSE
} else {
#restore.point("success test shiny chunk")
if (NROW(uk$log$chunk.console.out)>max.lines) {
txt = merge.lines(
c("You successfully solved the chunk!",
uk$log$chunk.console.out[1:max.lines],
paste0("\n...", NROW(uk$log$chunk.console.out)-max.lines," lines ommited...")))
} else {
txt = merge.lines(c("You successfully solved the chunk!",
uk$log$chunk.console.out))
}
updateAceEditor(app$session, ck$nali$console, value=txt,mode="r")
proceed.with.successfuly.checked.chunk(uk)
}
}
#cat("\nend check.shiny.chunk.ui\n")
return(ret)
}
proceed.with.successfuly.checked.chunk = function(uk,opts=rt.opts()) {
restore.point("proceed.with.successfuly.checked.chunk")
ck = uk$ck
uk$solved = TRUE
# If we have precomp=TRUE, it is often sensible to replace
# user solution with sample solution
# A replace.sol chunk option takes precedence over global problem set option
if (!is.null(ck$args[["replace.sol"]])) {
replace.sol = ck$args[["replace.sol"]]
} else {
replace.sol = isTRUE(opts$replace.sol)
}
if (isTRUE(replace.sol)) {
uk$stud.code = ck$sol.txt
}
# if (is.last.chunk.of.ex(chunk.ind)) {
# ex.ind = ps$cdt$ex.ind[chunk.ind]
# if (!isTRUE(ps$precomp))
# ps$edt$ex.final.env[[ex.ind]] = copy.task.env(ps$task.env)
# }
uk$mode = "output"
update.chunk.ui(uk)
# # set the next chunk to edit mode
# if (chunk.ind < NROW(ps$cdt)) {
# if (ps$cdt$ex.ind[chunk.ind] == ps$cdt$ex.ind[chunk.ind+1] &
# !ps$cdt$is.solved[chunk.ind+1]) {
#
# #cat("update next chunk...")
# ps$cdt$mode[chunk.ind+1] = "input"
# update.chunk.ui(chunk.ind+1)
# }
# }
}
hint.shiny.chunk = function(uk, code=getInputValue(uk$ck$nali$editor), ...,opts=rt.opts(),app=getApp()) {
restore.point("hint.shiny.chunk")
if (is.null(code)) code = ""
uk$stud.code = code
chunk.is.selected(uk)
if (!isTRUE(opts$hint.noeval)) {
if (!identical(uk$stud.code,uk$last.check.code))
check.chunk(uk,opts=opts)
}
txt = tryCatch(merge.lines(
capture.output(run.chunk.hint(uk=uk, opts=opts))
),
error = function(e) {merge.lines(as.character(e))}
)
txt = paste0("Hint: ", txt)
updateAceEditor(app$session, uk$ck$nali$console, value=txt, mode="text")
update.ups.hint.shown(uk)
}
help.shiny.chunk = function(uk, cursor=NULL, selection="",..., app=getApp()) {
chunk.is.selected(uk)
envir=get.task.env(ts=uk); in.R.console=is.null(uk$nali$console)
restore.point("help.shiny.chunk")
if (selection == "") {
txt = sep.lines(ps$code)
txt = txt[cursor$row+1]
txt = word.at.pos(txt, pos=cursor$column+1)
} else {
txt = selection
}
if (is.null(txt) | isTRUE(nchar(txt)==0)) {
updateAceEditor(app$session, uk$ck$nali$console, value="No R command selected to show help for.", mode="text")
return()
}
help = get.help.txt(txt)
# To do: replace special characters in a better manner
help = iconv(help, to='ASCII//TRANSLIT')
#Encoding(help) = "UTF8"
updateAceEditor(app$session, uk$ck$nali$console, value=help, mode="text")
return()
}
restore.shiny.chunk = function(uk,...,app=getApp()) {
restore.point("restore.shiny.chunk")
uk$stud.code = uk$ck$shown.txt
uk$solved = FALSE
updateAceEditor(app$session, uk$ck$nali$editor, value=uk$stud.code, mode="r")
updateAceEditor(app$session, uk$ck$console, value="restored originally shown code...", mode="text")
}
solution.shiny.chunk = function(uk,...,app=getApp()) {
restore.point("solution.shiny.chunk")
uk$stud.code = uk$ck$sol.txt
updateAceEditor(app$session, uk$ck$nali$editor, value = uk$stud.code, mode="r")
updateAceEditor(app$session, uk$ck$nali$console, value = "Sample solution shown", mode="text")
}
# edit button is pressed
edit.shiny.chunk = function(uk, opts = rt.opts(),...) {
restore.point("edit.shiny.chunk")
ck = uk$ck
#browser()
#if (can.chunk.be.edited(ck)) {
if (TRUE) {
update.chunk.ui(uk=uk, mode="input")
} else {
nali = ck$nali
rtutorAlert(session,nali$alertOut,
title = "Cannot edit chunk",
message= uk$log$failure.message,
type = "info", append=FALSE
)
}
}
chunk.to.html = function(uk,txt = uk$stud.code, opts=rt.opts(), envir=get.task.env(ts=uk), eval=TRUE, success.message=isTRUE(uk$solved), echo=TRUE, nali=NULL, quiet=TRUE) {
restore.point("chunk.to.html")
#if (is.null(txt))
# return("")
if (is.null(txt)) txt = ""
ck = uk$ck
# Adapt output text
if (paste0(txt,collapse="\n") == "")
txt = "# Press 'edit' to enter your code."
if (ck$num.e>0) {
if (success.message) {
add = c("# Great, solved correctly!")
if (opts$show.points) {
points = ck$max.points
if (points==1) {
add = paste0(add, " (1 point)")
} else if (points>0) {
add = paste0(add, " (",points, " points)")
}
}
txt = c(add,txt)
} else {
txt = c("# Not yet solved...",txt)
echo = TRUE
}
}
# Get output arguments
args = opts$chunk.out.args
if (length(ck$args)>0) {
args[names(ck$args)] = ck$args
}
args$eval = eval
args$echo = echo
header = paste0("```{r '",ck$id,"'",chunk.opt.list.to.string(args,TRUE),"}")
library(knitr)
library(markdown)
txt = c(header,sep.lines(txt),"```")
#all.parent.env(task.env)
html ="Evaluation error!"
if (opts$use.secure.eval) {
html = try(
RTutor::rtutor.eval.secure(quote(
knitr::knit2html(text=txt, envir=envir,fragment.only = TRUE,quiet = quiet)
), envir=environment())
)
} else {
html = try(
knitr::knit2html(text=txt, envir=envir,fragment.only = TRUE,quiet = quiet)
)
}
if (is(html, "try-error")) {
html = as.character(html)
}
restore.point("chunk.to.html.knit2html")
nali = ck$nali
# Add syntax highlightning
if (!is.null(nali$chunkUI)) {
html = paste0(paste0(html,collapse="\n"),"\n",
"<script>$('#",nali$chunkUI," pre code').each(function(i, e) {hljs.highlightBlock(e)});</script>")
}
html
}
default.chunk.out.args = function(...) {
args = list(fig.width=6.5, fig.height=4.5, fig.align='center', "warning"=FALSE, cache=FALSE, collapse=TRUE, comment=NA)
given = list(...)
args[names(given)] = given
args
}
chunk.is.selected = function(uk, ps = get.ps()) {
ps$task.ind = uk$task.ind
task.is.selected(uk)
}
|
79c8e0819a5bfe546e4745a91ddf0159b9fe931d
|
f990b8616b9ae5b343bec0faa3721ec1610aa2e1
|
/scripts/combine_snvs_pindel.R
|
8350c27d089df4cde1ab01d5da8e7aa3e68fad27
|
[] |
no_license
|
UCSF-Costello-Lab/LG3_Pipeline
|
7f46f266d8b94825d786b5c4f74f136cd06c0230
|
4a9751d742ed21a604e2dfe6257410bcc7c7183f
|
refs/heads/master
| 2022-10-21T04:59:20.822171
| 2021-10-12T02:55:34
| 2021-10-12T02:55:34
| 138,243,933
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 635
|
r
|
combine_snvs_pindel.R
|
assertFile <- function(pathname) {
if (!utils::file_test("-f", pathname)) {
pathnameX <- normalizePath(pathname, mustWork = FALSE)
stop(sprintf("File not found: %s => %s (current working directory is %s)", sQuote(pathname), sQuote(pathnameX), sQuote(getwd())))
}
invisible(pathname)
}
args <- commandArgs(trailingOnly = TRUE)
assertFile(args[1])
assertFile(args[2])
snvs <- read.delim(args[1], as.is = TRUE)
indels <- read.delim(args[2], as.is = TRUE)
both <- merge(snvs, indels, all = TRUE)
names(both)[1] <- "#gene"
write.table(both, file = args[3], quote = FALSE, sep = "\t", row.names = FALSE, col.names = TRUE)
|
a05d6889f4e18dbb89541b03dcd2fd11e3b57f9f
|
e7133561b40db4b5bfa6a09248a2d7b48aee0144
|
/collection_I_need_to_sort/missing_data_exercise.R
|
673fc01b6cf252233e1eaf9b60535d34ed9d6734
|
[
"MIT"
] |
permissive
|
jokedurnez/RandomNotes
|
25f6f8d557740d69e00963995ce3c3e15202cfc4
|
12b5c95a2f038e80159dd97e84e863e6a5b11541
|
refs/heads/master
| 2021-01-20T17:54:44.271016
| 2018-02-02T21:12:21
| 2018-02-02T21:12:21
| 60,736,193
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 460
|
r
|
missing_data_exercise.R
|
null = 10
total = 30
par(mfrow=c(1,3))
for (alt in c(0,0.2,0.5)){
plot(1:3,c(0,0,1),col="white")
l = 0
for (eff in c(0,0.2)){
l = l+1
mnT = c()
vaT = c()
typeI = power = c()
k = 0
for (null in seq(0,29,10)){
k = k+1
print(null)
p = c()
t = c()
for (i in 1:10000){
y = c(rnorm(total-null,eff,1),rep(alt,null))
test = t.test(y)
p[i] = test$p.value
t[i] = test$statistic
}
mnT[k]=mean(t)
vaT[k]=var(t)
typeI[k] = mean(p<0.05)
}
lines(typeI,col=l)
}
}
|
86512be64a1200fda6716843ffb5a32846c187ed
|
d5173f5299f3e4b556caf219f4dc1913f2c050db
|
/plot2.R
|
1d9540e8003c6163b5fa61144aad42b260c6dc78
|
[] |
no_license
|
mhrds/ExData_Plotting1
|
68cefb2daa6e90e91302e40e56b786bb599f5071
|
ac43812baceee0137af7a268bc05fdce6d72bb6b
|
refs/heads/master
| 2021-01-16T19:27:27.413110
| 2017-08-13T11:20:50
| 2017-08-13T11:20:50
| 100,170,037
| 0
| 0
| null | 2017-08-13T10:44:48
| 2017-08-13T10:44:48
| null |
UTF-8
|
R
| false
| false
| 477
|
r
|
plot2.R
|
source("getdata.R")
df <- getdata()
# *************************************************************************
# Plot 2: line plot, Global Active Power ~ week day
# *************************************************************************
png(file = "plot2.png", width=480, height=480)
par(mfrow=c(1,1),mar=c(5,5,1,1))
with(df,
plot(datetime, global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)")
)
dev.off()
|
7ac0b3c4dd7b0eaf2c1ce976c9ad989ea195988f
|
f6954adefc687611fdabe5fe9729b85e39880137
|
/man/plot_spliced_isoforms.Rd
|
7aa273edbb17c9dce6c2e082c9d46c75744116d8
|
[] |
no_license
|
GiuseppeLeuzzi/iSTOP
|
0d4e5e00a77483ef1d593000f2825fbcf4101251
|
86c6f98695cb3a76b78b496185f1931013db641a
|
refs/heads/master
| 2023-04-19T16:56:42.520003
| 2021-05-06T19:45:48
| 2021-05-06T19:45:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 609
|
rd
|
plot_spliced_isoforms.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-isoforms.R
\name{plot_spliced_isoforms}
\alias{plot_spliced_isoforms}
\title{Plot spliced isoforms}
\usage{
plot_spliced_isoforms(gene, coords, colors = NULL, ...)
}
\arguments{
\item{gene}{A single gene name}
\item{coords}{A dataframe of \link[=CDS]{CDS coordinates} (will be filtered for \code{gene})}
\item{colors}{A character vector of colors to be applied to each track}
\item{...}{Tracks. Each track must be a dataframe with two columns - \code{gene} and \code{genome_coord}.}
}
\description{
Plot spliced isoforms
}
|
52fe9777ff9d2ea61abb56edc50112d78b480a70
|
924cf645f797e404689ead13ea4f2b398b3c0feb
|
/R/saeeb.R
|
fab91fed27781bc972a3399eefb2ae224e6acfe9
|
[] |
no_license
|
cran/saeeb
|
b8f56e7c643bae215b9e52a0377c63ed03a9935c
|
1e368f6cbb1e17afa20181dbd265add280be142a
|
refs/heads/master
| 2022-04-27T20:29:27.990958
| 2020-04-28T08:50:03
| 2020-04-28T08:50:03
| 259,620,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,751
|
r
|
saeeb.R
|
#' Small Area Estimation for Count Data
#'
#' This package provides functions for small area estimation using Empirical Bayes (EB) Poisson-Gamma model. This model only accomodates count data type and gives option whether to use covariates in the estimation or not. Each function returns EB estimators and mean squared error (MSE) estimators for each area. The EB estimators are obtained using the model proposed by Wakefield (2006) and refined by Kismiantini (2007) and the MSE estimators are obtained using Jackknife method by Jiang et. al. (2002).
#'
#' @section Functions:
#' \describe{
#' \item{\code{\link{ebcov}}}{Gives the EB Poisson-Gamma with covariates and the Jackknife MSE estimators.}
#' \item{\code{\link{ebnocov}}}{Gives the EB Poisson-Gamma without covariates and the Jackknife MSE estimators.}
#' }
#'
#' @docType package
#' @name saeeb
#' @author Rizki Ananda Fauziah, Ika Yuni Wulansari
#' @references Clayton, David & Kaldor, John. (1987). Empirical Bayes Estimates of Age-Standardized Relative Risks for Use in Disease Mapping. Biometrics, 43, 671-681. doi:10.2307/2532003.
#' @references Jiang, J., Lahiri, P., & Wan, S. M. (2002). A Unified Jackknife Theory for Empirical Best Prediction with M-Estimation. The Annals of Statistics, 30, 6, 1782-1810. doi:10.1214/aos/1043351257.
#' @references Kismiantini. (2007). Pendugaan Statistik Area Kecil Berbasis Model Poisson-Gamma [Tesis]. Bogor: Institut Pertanian Bogor.
#' @references Rao, J. N. K. & Molina, Isabel. (2015). Small Area Estimation (2nd ed.). New Jersey: John Wiley & Sons, Inc.
#' @references Wakefield, Jon. (2006). Disease Mapping and Spatial Regression with Count Data. Biostatistics, 8, 2, 158–183. doi:10.1093/biostatistics/kxl008.
NULL
|
2af3592961b315cf1c4eb8d7faa36aab1ae638c2
|
a0f45dc0295161eb4c0345a7ce44626146972e0f
|
/dropdown_shiny_module.R
|
53f0aa26496f0ea9640af8a1b70c69b927b969cb
|
[] |
no_license
|
imsashii/appsilon_task
|
bdcaed08d777c8bd488219b6c637381a5c35ae18
|
47abef472ddcbb3956a117b34b3331e96b6351df
|
refs/heads/main
| 2023-02-21T14:01:58.949432
| 2021-01-24T17:38:28
| 2021-01-24T17:38:28
| 332,344,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 882
|
r
|
dropdown_shiny_module.R
|
dropdown_ui <- function(id) {
tagList(
#for vessel type
selectInput(NS(id,"vessel_type"),label = "Select Vessel Type:",
choices = str_sort(unique(ship_type_and_name$ship_type_name)),
selected = "Tanker",
width = "90%"
),
tags$br(),
#for vessel name
uiOutput(NS(id,"select_vessel_output"))
)
}
dropdown_server <- function(id) {
moduleServer(id,function(input,output,session) {
vessels <- reactive(ship_type_and_name$ship_name[ship_type_and_name$ship_type_name
== input$vessel_type])
output$select_vessel_output <- renderUI({
selectInput(
inputId = "vessel",
label = "Select Vessel:",
choices = str_sort(vessels()),
#selected = "ARK FORWARDER",
width = "90%")
})
return(vessel_type = reactive({input$vessel_type}))
})
}
|
47dee265f73f7a90f4605eac72bed3dbd09a405b
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/metacoder/inst/testfiles/centroid/AFL_centroid/centroid_valgrind_files/1615765267-test.R
|
abe7b8d4be79c39330120ad82800ce5b6a3611a9
|
[
"MIT"
] |
permissive
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
1615765267-test.R
|
testlist <- list(b = c(1.79303608770963e-248, 8.27286244416292e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(metacoder:::centroid,testlist)
str(result)
|
f99fa17df81e402fac64bf806dbadac8372d17ee
|
55e31f67fae4ca26ac652784bbf1a99f820465c2
|
/ui.R
|
e62fd278f8ca8d7f6cad3d2a1df698072bac5fa8
|
[] |
no_license
|
justmytwospence/StochastiWalk
|
b5323a965f64ba0c8662cc81321b197790843a40
|
1d9d8f545bf007798a6009d77b30b17af4b3f742
|
refs/heads/master
| 2021-01-01T05:36:05.522548
| 2013-11-25T01:02:18
| 2013-11-25T01:02:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,558
|
r
|
ui.R
|
library(shiny)
sidebarPanel <- function (...)
{
div(class = "span3", tags$form(class = "well", ...))
}
shinyUI(pageWithSidebar(
headerPanel("StochastiWalk"),
sidebarPanel(
checkboxInput('showmodel',
'Show model',
value = FALSE),
checkboxInput('showforecast',
'Show forecast',
value = FALSE),
checkboxInput('shock',
'Administer shock',
value = FALSE),
br(),
sliderInput('trend',
'Trend',
min = -.1,
max = .1,
step = .001,
value = 0),
gsub("label class=\"radio\"", "label class=\"radio inline\"",
radioButtons('d',
'Order of Integration',
choices = list(
'None' = 0,
'1' = 1,
'2' = 2),
selected = 'None')),
gsub("label class=\"radio\"", "label class=\"radio inline\"",
radioButtons('p',
'Autoregressive Order',
choices = list(
'None' = 0,
'1' = 1,
'2' = 2,
'3' = 3),
selected = 'None')),
conditionalPanel('input.p > 0',
sliderInput('ar1',
'AR1',
min = -1,
max = 1,
step = .05,
value = 0)),
conditionalPanel('input.p > 1',
sliderInput('ar2',
'AR2',
min = -1,
max = 1,
step = .05,
value = 0)),
conditionalPanel('input.p > 2',
sliderInput('ar3',
'AR3',
min = -1,
max = 1,
step = .05,
value = 0)),
gsub("label class=\"radio\"", "label class=\"radio inline\"",
radioButtons('q',
'Moving Average Order',
choices = list(
'None' = 0,
'1' = 1,
'2' = 2,
'3' = 3),
selected = 'None')),
conditionalPanel('input.q > 0',
sliderInput('ma1',
'MA1',
min = -2,
max = 2,
step = .1,
value = 0)),
conditionalPanel('input.q > 1',
sliderInput('ma2',
'MA1',
min = -2,
max = 2,
step = .1,
value = 0)),
conditionalPanel('input.q > 2',
sliderInput('ma3',
'MA3',
min = -2,
max = 2,
step = .1,
value = 0))
),
mainPanel(
plotOutput('ts', height = 275),
#gsub("class=\"tabbable\"", "class=\"tabbable tabs-left\"",
tabsetPanel(
tabPanel('Unit Roots',
div(class = 'row-fluid',
div(class = 'span6',
plotOutput('ur.ar', height = 375, width = 375)
),
div(class = 'span6',
plotOutput('ur.ma', height = 375, width = 375)
)
)),
tabPanel('Autocorrelation',
div(class = 'row-fluid',
div(class = 'span6',
plotOutput('acf.raw')),
div(class = 'span6',
plotOutput('acf.compare'))
)),
tabPanel("About",
p(
div(tags$p('Created by Spencer Boucher, MS candidate in data analytics.')),
div(tags$a(href = 'http://spencerboucher.com', 'spencerboucher.com')),
div(tags$a(href = 'https://github.com/justmytwospence/StochastiWalk', 'Find the source code on GitHub.'))))
)#)
)
))
|
47f955d4ceb91dd633d459d0ec77f66602df2234
|
94fb0b0151efcb821ce317032f24b6029605a98c
|
/R/models_plots.r
|
9116b47c68a87e1301ff8ab86afc910a9ca0e082
|
[
"MIT"
] |
permissive
|
Seneketh/StanDDM
|
bf00b318ef90796abe16fd7206d5aeecd4bfaac6
|
f70ad34e9f760512dc62cab0c7297737c3f87586
|
refs/heads/master
| 2023-07-20T07:18:50.904778
| 2023-07-16T12:55:49
| 2023-07-16T12:55:49
| 188,408,350
| 38
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,654
|
r
|
models_plots.r
|
#' Data Simulation Plots
#'
#' Plots simulated and experimental data. The created plots are: Reaction times densities for
#' correct and incorrect responses and the proportion of correct answeres for each one of
#' six reaction time bins. A folder with the model name is created and the plots saved
#' automatically in the current working directory.
#' @export
#' @param experim_dat Experimental data in the form produced by \code{\link{experimental_data_processing}}.
#' @param experim_dat Simulated data in the form produced by \code{\link{simulDat}}.
#' @param model_name Optional name for the model to distinguish to which data/model the
#' function was applied to. Default is 'NAME_UNDEFINED'.
#' @return Generates and saves plots in a directory with name of \code{model_name}.
models_plots <- function(experim_dat, simul_dat, model_name='NAME_UNDEFINED'){
ggplot2::theme_set(theme_bw())
if(is.null(model_name) | nchar(model_name) < 4 | model_name=='NAME_UNDEFINED'){
model_name <- "NAME_UNDEFINED"
dir.create(paste(model_name), showWarnings = FALSE)
}
the_combine <- rbind(data.frame(experim_dat, 'cond'='Data'), simul_dat$data)
#------------------------------------------------------------
plot1 <- ggplot() +
geom_density(aes(x=rt, group=cor, fill=cor),data=simul_dat$data, alpha=.3) +
theme(legend.position="bottom") +
labs(title = paste(model_name),
subtitle = "Comparison of Simulated Correct vs. Incorrect RTs",
x = "Reaction Times",
y = 'Density',
fill='Correct') +
scale_colour_hue(name="Data Types:")
ggsave(plot1, filename = paste(model_name, '/',model_name, '_simRTcorvsncorr.pdf',sep = ''),
device = 'pdf',
scale = 1, width = 12, height = 8, units = "in")
#------------------------------------------------------------
#------------------------------------------------------------
plot2tmp <- the_combine
plot2tmp$rt[plot2tmp$cor==0] <- plot2tmp$rt[plot2tmp$cor==0] *-1
plot2 <- ggplot() +
geom_density(aes(x=rt,y=..density..,color=cond),data=plot2tmp) +
# geom_density(aes(x=value,y=..density..,color=cond),data=c) +
facet_wrap(~plot2tmp$suj) +
labs(title = paste(model_name),
subtitle = "Comparison of Data and Simulated RTs",
x = "Reaction Times",
y = 'Density',
caption = 'Note: Negative/Positive RTs belong to incorrect/correct trials respectively') +
theme(legend.position="bottom") +
scale_colour_hue(name="Data Types:")
ggsave(plot2, filename = paste(model_name, '/', model_name, '_simRTcomparison.pdf',sep = ''), device = 'pdf',
scale = 1, width = 12, height = 8, units = c("in"))
#------------------------------------------------------------
#PLOT3: Check in what range the error rates are Sim vs. Data
#------------------------------------------------------------
nbins <- 6
dat_cuants <- quantile(experim_dat$rt, probs = seq(0, 1, length.out = nbins+1)) ; dat_cuants[length(dat_cuants)] <- tail(dat_cuants, n=1)+0.1
sim_cuants <- quantile(simul_dat$data$rt, probs = seq(0, 1, length.out = nbins+1)) ; sim_cuants[length(sim_cuants)] <- tail(sim_cuants, n=1)+0.1
the_combine$cuants[the_combine$cond=='Data'] <- experim_dat %>%
split(experim_dat$suj) %>% map(c('rt')) %>%
map(function(x) {cut(x, breaks = dat_cuants, right=FALSE, na.rm = TRUE)}) %>%
as_tibble() %>% tidyr::pivot_longer(cols=everything(), names_to = 'variable', values_to = 'value') %>% .$value
the_combine$cuants[the_combine$cond=='Sim'] <- simul_dat$data %>%
split(simul_dat$data$suj) %>% map(c('rt')) %>%
map(function(x) {cut(x, breaks = sim_cuants, right=FALSE, na.rm = TRUE)}) %>%
as_tibble() %>% tidyr::pivot_longer(cols=everything(), names_to = 'variable', values_to = 'value') %>% .$value
the_combine$cuants <- unlist(the_combine$cuants) %>% as.factor()
the_combine$cor <- as.numeric(levels(the_combine$cor))[the_combine$cor]
smrzd <- ddply(the_combine, c("cond","cuants"), summarise,
N = length(cor),
mean = mean(cor),
sd = sd(cor),
se = sd / sqrt(N))
pd <- position_dodge(0.1) #The errorbars overlapped, so use position_dodge to move them horizontally
plot3 <- ggplot(smrzd, aes(x=cuants, y=mean, colour=cond, group=cond)) +
geom_errorbar(aes(ymin=mean-se, ymax=mean+se), width=.1,position=pd) +
geom_line(position=pd) +
geom_point(position=pd) +
labs(title = paste(model_name),
subtitle = "Comparison of Response correctness per RT Quantiles",
x = "RT Quantile",
y = 'Proportion Correct Answers',
caption = 'Note: Bars represent standard errors') +
theme(legend.position="bottom") +
scale_colour_hue(name="Data Types:") + ylim(0, 1) +
theme(legend.position="bottom")
ggsave(plot3, filename = paste(model_name, '/', model_name, '_cuan_comp.pdf',sep = ''),
device = 'pdf',
scale = 1, width = 12, height = 8, units = "in")
#------------------------------------------------------------
#------------------------------------------------------------
smrzd <- ddply(the_combine, c("suj","cond","cuants"), summarise,
N = length(cor),
mean = mean(cor),
sd = sd(cor),
se = sd / sqrt(N))
pd <- position_dodge(0.1) #The errorbars overlapped, so use position_dodge to move them horizontally
plot4 <- ggplot(smrzd, aes(x=cuants, y=mean, colour=cond, group=cond)) +
geom_errorbar(aes(ymin=mean-se, ymax=mean+se), width=.1,position=pd) +
geom_line(position=pd) +
geom_point(position=pd) +
labs(title = paste(model_name),
subtitle = "Comparison of Response correctness per RT Quantiles and Subject",
x = "RT Quantile",
y = 'Proportion Correct Answers',
caption = 'Note: Bars represent standard errors') +
theme(legend.position="bottom") +
scale_colour_hue(name="Data Types:") + ylim(0, 1) +
theme(legend.position="bottom") + facet_wrap(~smrzd$suj)
ggsave(plot4, filename = paste(model_name, '/', model_name, '_cuan_comp_suj.pdf',sep = ''),
device = 'pdf',
scale = 1, width = 12, height = 8, units = "in")
}
|
1b578218f5ba5174858f2967312e901366ee6a5a
|
32c9487479a074f78b2e69ea400112373613fd7f
|
/man/create_Lines.Rd
|
1a82fda25c2e4cd4a19df49f6550acb2c6dee4a7
|
[] |
no_license
|
ccamlr/CCAMLRGIS
|
0be131efab80a140e4974db299b4562b136e181b
|
96e25f5261cfdeac716d33fcd29bed8dc894d81c
|
refs/heads/master
| 2023-08-31T04:24:56.468626
| 2023-08-29T00:34:22
| 2023-08-29T00:34:22
| 211,758,459
| 8
| 2
| null | 2023-08-29T00:34:24
| 2019-09-30T02:21:14
|
R
|
UTF-8
|
R
| false
| true
| 2,478
|
rd
|
create_Lines.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create.R
\name{create_Lines}
\alias{create_Lines}
\title{Create Lines}
\usage{
create_Lines(
Input,
NamesIn = NULL,
Buffer = 0,
Densify = FALSE,
Clip = FALSE,
SeparateBuf = TRUE
)
}
\arguments{
\item{Input}{input dataframe.
If \code{NamesIn} is not provided, the columns in the \code{Input} must be in the following order:
Line name, Latitude, Longitude.
If a given line is made of more than two points, the locations of points
must be given in order, from one end of the line to the other.}
\item{NamesIn}{character vector of length 3 specifying the column names of line identifier, Latitude
and Longitude fields in the \code{Input}.
Names must be given in that order, e.g.:
\code{NamesIn=c('Line ID','Line Latitudes','Line Longitudes')}.}
\item{Buffer}{numeric, distance in nautical miles by which to expand the lines. Can be specified for
each line (as a numeric vector).}
\item{Densify}{logical, if set to TRUE, additional points between extremities of lines spanning more
than 0.1 degree longitude are added at every 0.1 degree of longitude prior to projection (see examples).}
\item{Clip}{logical, if set to TRUE, polygon parts (from buffered lines) that fall on land are removed (see \link{Clip2Coast}).}
\item{SeparateBuf}{logical, if set to FALSE when adding a \code{Buffer},
all spatial objects are merged, resulting in a single spatial object.}
}
\value{
Spatial object in your environment.
Data within the resulting spatial object contains the data provided in the \code{Input} plus
additional "LengthKm" and "LengthNm" columns which corresponds to the lines lengths,
in kilometers and nautical miles respectively. If additional data was included in the \code{Input},
any numerical values are summarized for each line (min, max, mean, median, sum, count and sd).
To see the data contained in your spatial object, type: \code{View(MyLines)}.
}
\description{
Create lines to display, for example, fishing line locations or tagging data.
}
\examples{
\donttest{
# For more examples, see:
# https://github.com/ccamlr/CCAMLRGIS#create-lines
#Densified lines (note the curvature of the lines)
MyLines=create_Lines(Input=LineData,Densify=TRUE)
plot(st_geometry(MyLines),lwd=2,col=rainbow(nrow(MyLines)))
}
}
\seealso{
\code{\link{create_Points}}, \code{\link{create_Polys}}, \code{\link{create_PolyGrids}},
\code{\link{create_Stations}}, \code{\link{create_Pies}}.
}
|
89f332c16b707179bf03b5ac1ac469c4ec71c09b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/glmgraph/examples/coef.glmgraph.Rd.R
|
3985b0d4a4e6ee8d2e7ffd43f3231de810abdb13
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 797
|
r
|
coef.glmgraph.Rd.R
|
library(glmgraph)
### Name: coef.glmgraph
### Title: Retrieve coefficients from a fitted "glmgraph" object.
### Aliases: coef.glmgraph
### Keywords: models regression
### ** Examples
set.seed(1234)
library(glmgraph)
n <- 100
p1 <- 10
p2 <- 90
p <- p1+p2
X <- matrix(rnorm(n*p), n,p)
magnitude <- 1
## construct laplacian matrix from adjacency matrix
A <- matrix(rep(0,p*p),p,p)
A[1:p1,1:p1] <- 1
A[(p1+1):p,(p1+1):p] <- 1
diag(A) <- 0
btrue <- c(rep(magnitude,p1),rep(0,p2))
intercept <- 0
eta <- intercept+X%*%btrue
diagL <- apply(A,1,sum)
L <- -A
diag(L) <- diagL
### gaussian
Y <- eta+rnorm(n)
obj <- glmgraph(X,Y,L)
coefs <- coef(obj)
coefs <- coef(obj,lambda2=0.01)
coefs <- coef(obj,lambda1=c(0.11,0.12))
coefs <- coef(obj,lambda1=c(0.11,0.12),lambda2=0.01)
|
17f155b0e789f2d3c39157e032fe93d9f4e89faf
|
4c3ff90922b2fa72e82e7ab9a3ba8e7c8ad51113
|
/code/rnaseq_code/is_elements_assemblies.R
|
975186549c304b0131836e16f63ff3e278c50751
|
[
"MIT"
] |
permissive
|
felixgrunberger/pyrococcus_reannotation
|
cdb6390aa160c879599ddcc92986ed8491ae3af2
|
d3fb45473bb0c92a2edf99ab770ac8f32e69e55c
|
refs/heads/master
| 2020-04-29T18:04:05.504125
| 2019-10-22T08:18:24
| 2019-10-22T08:18:24
| 176,313,200
| 0
| 0
|
MIT
| 2019-10-22T08:18:02
| 2019-03-18T15:16:47
|
R
|
UTF-8
|
R
| false
| false
| 3,480
|
r
|
is_elements_assemblies.R
|
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# Number of iselements in pyrococcus assemblies
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# load libraries
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
library(tidyverse)
library(here)
library(data.table)
library(viridis)
library(ape)
library(ggthemes)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# theme for plotting
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
theme_Publication <- function(base_size=14) {
(theme_foundation(base_size=base_size, base_family="Helvetica")
+ theme(plot.title = element_text(face = "bold",
size = rel(1.2), hjust = 0.5),
text = element_text(),
panel.background = element_rect(colour = NA),
plot.background = element_rect(colour = NA),
panel.border = element_rect(colour = NA),
axis.title = element_text(face = "bold",size = rel(1)),
axis.title.y = element_text(angle=90,vjust =2),
axis.title.x = element_text(vjust = -0.2),
axis.text = element_text(),
axis.line = element_line(colour="black"),
axis.ticks = element_line(),
panel.grid.major = element_line(colour="#f0f0f0"),
panel.grid.minor = element_blank(),
legend.key = element_rect(colour = NA),
legend.position = "bottom",
legend.direction = "horizontal",
legend.key.size= unit(0.2, "cm"),
legend.spacing = unit(0, "cm"),
legend.title = element_text(face="italic"),
plot.margin=unit(c(10,5,5,5),"mm"),
strip.background=element_rect(colour="#f0f0f0",fill="#f0f0f0"),
strip.text = element_text(face="bold")
))
}
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# load ISEScan 1.6 data
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# >> COM1
com1_ises <- read.gff(here("data/ises_data/pfu_com1_ises.fasta.gff")) %>%
dplyr::filter(type == "insertion_sequence") %>%
summarise(counts = n(),
group = "COM1")
# >> DSM3638
dsmz_old <- read.gff(here("data/ises_data/pfu_dsm3638_ises.fasta.gff")) %>%
dplyr::filter(type == "insertion_sequence") %>%
summarise(counts = n(),
group = "DSM 3638 \n(2001)")
# >> DSMZ NEW ASSEMBLY
dsmz_new_assembly <- read.gff(here("data/ises_data/pfu_dsmz_assembly_ises.fasta.gff")) %>%
dplyr::filter(type == "insertion_sequence") %>%
summarise(counts = n(),
group = "DSM 3638 \n(2016)")
# >> combine groups
ises_table <- rbind(com1_ises, dsmz_old, dsmz_new_assembly)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
# plot data
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> #
pdf(here("figures/rnaseq_figures/iselements_pyrococcus_strains.pdf"),
width = 5, height = 3.5, paper = "special",onefile=FALSE)
ggplot(data = ises_table, aes(y = counts, x = group, fill = group)) +
geom_bar(stat="identity") +
theme_Publication() +
guides(fill = F) +
scale_fill_viridis(alpha = 1,discrete = T, option = "viridis", begin = 0.9, end = 0.4) +
geom_text(aes(label=counts), vjust=1.6, color="white", size=3.5)+
ylab("number of IS elements") +
xlab("assembly/strain")
dev.off()
|
871d2fd26449eb22a3db5d3fdb654f3048038b39
|
ed3509c1c488234d378f008ed7e76b2325e3ac3f
|
/scripts/unsc_2020_script.R
|
136e08bed2a33f0c7b9a80dbe9f49fdc18cc7a70
|
[] |
no_license
|
antonio-henrique-pires/unsc_2020
|
0a19b415c56b7dfdad3166274c66107aeb049680
|
2bcad093b1d692ae34257c52c39161e7807aab28
|
refs/heads/main
| 2023-06-18T20:49:41.459444
| 2021-07-03T03:12:46
| 2021-07-03T03:12:46
| 375,398,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,795
|
r
|
unsc_2020_script.R
|
### unsc extract - live meetings
library(tidyverse)
library(magick)
library(tesseract)
library(lubridate)
library(quanteda)
library(stringi)
eng <- tesseract(language = "eng", options = list(tessedit_pageseg_mode = 1))
readpdf <- image_read_pdf("speeches_pdf/S_PV.8724_E.pdf")
txt <- ocr(readpdf, engine = eng)
readpdf2 <- str_replace_all(paste(txt, collapse= " "), "[:blank:]{2,}","")
readpdf3 <- str_replace_all(readpdf2, "\\\n"," ")
readpdf3 <- stri_trans_general(readpdf3, "Latin-ASCII")
# vector speech
separate_unsc <- str_split(readpdf3,
"(?=\\b(Dato[ ]Lim[ ]Jock[ ]Hoi[:]))|(?=\\b(Monsignor[ ]Hansen|The[ ]President|President[ ]Abbas|Aboul[ ]Gheit|King[ ]Philippe|The Secretary-General|Hadiza)[ ][(])|(?=\\b(The President|The Secretary-General)\\:)|(?=\\b(Mr|Ms|Mrs)[.][ ](Lowcock|Griffiths|Chambas|Robinson|Skoog|Mardini|Ruiz[ ]Massieu|Lacroix|Rajasingham|DiCarlo|Mueller|Raz|Matar|Salame|Licharz|Nakamitsu|Spleeters|Rama|Pedersen|Voronkov|Coninsx|Freij|Mladenov|Becker|Bachelet|De[ ]Roux|Sooka|Sori[-]Coulibaly|Costa[ ]Filho|Grau|Cevik|La[ ]Lime|Swan|Madeira|Smith|Zlauvinen|Mueller|Fore|Hennis[-]Plasschaert|Shearer|Sunday|Mohammed|Dieye|Alsaidy|Almasabi|De[ ]Almeida[ ]Filho|Waly|Tsolakis|Annadif|Vervaeke|Onanga[-]Anyanga)[:])|(?=\\b(Mr|Mrs|Ms)[.][ ]([A-Z]*[a-z]*|[A-Z]*[a-z]*[ ][A-Z]*[a-z]*|[A-Z]*[a-z]*[ ][A-Z]*[a-z]*[ ][A-Z]*[a-z]*|[A-Z]*[a-z]*[-][A-Z]*[a-z]*|[A-Z]*[a-z]*[A-Z]*[a-z]*|[A-Z]*[a-z]*.[A-Z]*[a-z]*|Pinto[ ]Lopes[ ]D.Alva)[ ][(])") %>% unlist()
# tibble speech
df <- tibble(content = separate_unsc)
# collapse false positives
#df$content[26] <- paste(df$content[26], df$content[27], sep = " ")
#df <- df %>% slice(-27)
# date
df$date <- str_extract(df$content[1], "[0-9]{1,2}[ ][A-Z]*[a-z]*[ ][0-9]{4}")
# spv
df$spv <- sub("S[/]PV[.]", "", str_extract(df$content[1], "S[/]PV[.][0-9]{4}"))
# spv if Resumption
#df$spv <- paste(df$spv, "Resumption1", sep = "")
# topic part 1
df$topic <- str_extract(df$content[1], "(?=S[/]PV[.][0-9]{4}[ ]*)(.*)(?=[ ]*[0-9]{2}[/][0-9]{2}/[0-9]{4})")
df$topic <- sub("S[/]PV[.][0-9]{4}[ ]*", "", df$topic)
# topic if resumption
#df$topic <- sub("S[/]PV[.][0-9]{4}[ ]*[ ][(]Resumption 1[)]", "", df$topic)
#df$topic <- sub("[(]Resumption 1[)]", "", df$topic)
# topic part 2
df$topic <- str_trim(df$topic)
# speaker part 1
df$speaker <- str_extract(df$content, "[^([(]|[:])]+")
df$speaker <- sub("[ ][(]", "", df$speaker)
# participant type part 1
df$participanttype <- str_extract(df$content, "(Monsignor[ ]Hansen|Dato[ ]Lim[ ]Jock[ ]Hoi|President[ ]Abbas|Aboul[ ]Gheit|King[ ]Philippe|Hadiza)|(The President|The Secretary-General)|((Mr|Ms|Mrs)[.][ ](Lowcock|Griffiths|Chambas|Robinson|Skoog|Mardini|Ruiz[ ]Massieu|Lacroix|Rajasingham|DiCarlo|Mueller|Raz|Matar|Salame|Licharz|Nakamitsu|Spleeters|Rama|Pedersen|Voronkov|Coninsx|Freij|Mladenov|Becker|Bachelet|De[ ]Roux|Sooka|Sori[-]Coulibaly|Costa[ ]Filho|Grau|Cevik|La[ ]Lime|Swan|Madeira|Smith|Zlauvinen|Mueller|Fore|Hennis[-]Plasschaert|Shearer|Sunday|Mohammed|Dieye|Alsaidy|Almasabi|De[ ]Almeida[ ]Filho|Waly|Tsolakis|Annadif|Vervaeke|Onanga[-]Anyanga))|((Mr|Mrs|Ms)[.][ ]([A-Z]*[a-z]*|[A-Z]*[a-z]*[ ][A-Z]*[a-z]*|[A-Z]*[a-z]*[ ][A-Z]*[a-z]*[ ][A-Z]*[a-z]*|[A-Z]*[a-z]*[-][A-Z]*[a-z]*|[A-Z]*[a-z]*[A-Z]*[a-z]*|[A-Z]*[a-z]*.[A-Z]*[a-z]*|Pinto[ ]Lopes[ ]D.Alva)[ ][()])")
df$participanttype <- sub("[ ][(]", "", df$participanttype)
# speaker part 2
df$speaker <- str_trim(df$speaker)
df$speaker[df$speaker %in% c("The President", "Mr. President")] <- 'Mr. Pecsteen de Buytswerve'
# speech number
df$speech <- 0:(nrow(df)-1)
# country
df$country <- str_extract(df$content, "(?<=[(])[^)]+")
df$country[df$participanttype == "The President"] <- 'Belgium'
df$country[df$speaker %in% c("Mr. Chambas", "Mrs. Robinson", "The Secretary-General", "Mr. Ruiz Massieu", "Mr. Lacroix", "Mr. Griffiths",
"Mr. Rajasingham", "Ms. DiCarlo", "Ms. Mueller", "Mr. Lowcock", "Ms. Matar", "Mr. Salame", "Mr. Licharz",
"Mrs. Nakamitsu", "Mr. Spleeters", "Mr. Pedersen", "Mr. Voronkov", "Ms. Coninsx",
"Mr. Mladenov", "Ms. Bachelet", "Ms. Sori-Coulibaly", "Mr. Ndiaye", "Mr. Hilale", "Ms. La Lime",
"Mr. Swan", "Mr. Zlauvinen", "Ms. Mueller", "Ms. Fore", "Ms. Hennis-Plasschaert", "Mr. Shearer", "Mr. Dieye",
"Ms. Waly", "Ms. Gamba de Potgieter", "Mr. Annadif", "Mr. Huang Xia", "Mr. Onanga-Anyanga",
"Ms. Zerrougui")] <- 'UN'
# Maybe also representative
df$country[df$speaker %in% c("Mr. Costa Filho", "Mr. De Almeida Filho")] <- 'UN'
#
df$country[df$speaker %in% c("Mr. Mardini")] <- 'International Committee of the Red Cross'
df$country[df$speaker %in% c("Mrs. Raz")] <- 'Committee on the Exercise of the Inalienable Rights of the Palestinian People'
df$country[df$speaker %in% c("Monsignor Hansen")] <- 'Holy See'
df$country[df$speaker %in% c("Mr. Abdelaziz", "Mr. Aboul Gheit")] <- 'League of the Arab States'
df$country[df$speaker %in% c("Dato Lim Jock Hoi")] <- 'ASEAN'
df$country[df$speaker %in% c("Mr. Rama", "Ms. Grau", "Mr. Cevik")] <- 'OSCE'
df$country[df$speaker %in% c("Ms. Freij")] <- 'Individual'
df$country[df$speaker %in% c("President Abbas")] <- 'Palestine'
df$country[df$speaker %in% c("Ms. Becker")] <- 'Watchlist on Children and Armed Conflict'
df$country[df$speaker %in% c("Mr. Chergui", "Mr. Matondo", "Mr. Madeira", "Ms. Mohammed")] <- 'African Union'
df$country[df$speaker %in% c("King Philippe")] <- 'Belgium'
df$country[df$speaker %in% c("Mr. De Roux")] <- 'Commission for the Clarification of Truth, Coexistence and Non-Repetition of Colombia'
df$country[df$speaker %in% c("Ms. Sooka")] <- 'Foundation for Human Rights in South Africa'
df$country[df$speaker %in% c("Ms. Gilles")] <- 'Fondasyon Je Klere (FJKL)'
df$country[df$speaker %in% c("Mr. Smith")] <- 'Stockholm International Peace Research Institute'
df$country[df$speaker %in% c("Ms. Sunday")] <- "Women's Monthly Forum on Peace and Political Processes in South Sudan"
df$country[df$speaker %in% c("Ms. Carabali Rodallega")] <- 'Municipal Association of Women'
df$country[df$speaker %in% c("Ms. Alsaidy")] <- 'Médecins du Monde'
df$country[df$speaker %in% c("Ms. Almasabi")] <- 'Arab Human Rights Foundation'
df$country[df$speaker %in% c("Ms. Tsolakis")] <- 'Global Coalition to Protect Education from Attacks'
df$country[df$speaker %in% c("Hadiza", "Ms. Mayaki")] <- 'Youth Parliament of the Niger'
# participant type part 2
df$participanttype[df$speaker %in% c("Mr. President")] <- 'The President'
df$participanttype[df$country %in% c("Viet Nam",
"South Africa",
"Dominican Republic",
"Estonia",
"France",
"China",
"Germany",
"Indonesia",
"Niger",
"Russian Federation",
"Saint Vincent and the Grenadines",
"Tunisia",
"United Kingdom",
"United States of America")] <- 'Mentioned'
df$participanttype[!df$country %in% c("Viet Nam",
"Belgium",
"China",
"Dominican Republic",
"Estonia",
"France",
"Germany",
"Indonesia",
"Niger",
"Russian Federation",
"Saint Vincent and the Grenadines",
"South Africa",
"Tunisia",
"United Kingdom",
"United States of America")] <- 'Guest'
# day, month, year
df$date2 <- dmy(df$date)
df$year <- year(df$date2)
df$month <- month(df$date2)
df$day <- day(df$date2)
df <- df %>% select(-date2)
# filename
df$filename <- paste("UNSC_",df$year,"_SPV.",df$spv,"_spch",formatC(0:(length(df$content)-1), digits = 2, flag = "0"),".txt", sep = "")
# basename
df$basename <- sub("_spch[0-9]{3}.txt", "", df$filename)
# role in un
df$role_in_un <- ""
# types, tokens, sentences
df$types <- ntype(df$content)
df$tokens <- ntoken(df$content)
df$sentences <- nsentence(df$content)
# Remove headers and footers
df$content <- gsub("[ ][ ][0-9]{1,2}[/][0-9]{1,2}[ ][ ]", " ", df$content)
df$content <- gsub("[ ][ ]20-03982[ ]", " ", df$content)
#df$content <- gsub("[(]Resumption[ ]1[)][ ]", "", df$content)
df$content <- gsub("S[/]PV[.]8724 The situation in Guinea-Bissau 14[/]02[/]2020", "", df$content)
df$content <- gsub("14[/]02[/]2020 The situation in Guinea-Bissau S[/]PV[.]8724", "", df$content)
# teste para footer
x <- str_extract_all(df$content, "[ ][ ][A-Z]*[a-z]*[0-9]*[ ][ ]")
x <- str_extract_all(df$content, "TA")
#df$content <- gsub("[ ]TAS[ ]", "", df$content)
#
df$content <- str_trim(df$content)
df$content <- gsub("[ ]{1,}", " ", df$content)
# remove speech 0
df <- df %>% slice(-1)
# export
for(i in 1:length(df$content)){write(df$content[i], file = df$filename[i])}
# update base
#load("speeches_meta/unsc_2020.RData")
unsc_2020 <- rbind(unsc_2020, df)
save(unsc_2020, file = "speeches_meta/unsc_2020.RData")
# possível regex para países (?<=[(])[^)]+
# ver nomes dos speakers
|
3e60d722b4a396facfd9216ad2606fb0809c3a22
|
42289239f68be6a947a917bb88cd77443cc4e762
|
/functions/plot.min.elec.ambTemp.R
|
129cd4a5252bdf090e85c3d44676c31d59354fbc
|
[] |
no_license
|
Ness2/ecogenie
|
7802e8f8b2227ea5ccffbe56b25a1aefdf6c031d
|
004bf5d61294e82d0d8c8d3bd590f26e5ff3dadf
|
refs/heads/master
| 2021-04-06T08:21:09.876443
| 2018-05-01T13:56:50
| 2018-05-01T13:56:50
| 125,255,976
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,829
|
r
|
plot.min.elec.ambTemp.R
|
#####################################################################################
#
# ecogenie: plot by minute, electricity and ambient temperature versus time
#
#####################################################################################
# EcoGenie Plot
plot.min.elec.ambTemp <- function(yearStart = "2012", yearEnd = "2017") {
yearIndex <- as.numeric(yearStart):as.numeric(yearEnd) # these are the energy-year starting years
tmp.list.1 <- list(length = length(yearIndex)) # create temporary list for list names
tmp.list.2 <- list(length = length(yearIndex)) # create a list of energy-years
for (i in 1:length(yearIndex)) {
yearIndexEnd <- yearIndex[[i]] + 1 # an energy-year ends in the year after the starting year
tmp.list.1[[i]] <- paste("Y", yearIndex[[i]], yearIndexEnd, sep = "_") # energy-year names
# NOTE: clean.data should have save years as this plot function
tmp.list.2[[i]] <- ggplot(clean.data[[i]], aes(x = iteration_stamp)) +
geom_line(mapping = aes(y = electricity),
colour = aquamarine,
size = 0.8,
group = 1,
alpha = 0.43
) +
geom_point(mapping = aes(y = electricity),
colour = darkblue,
size = 1.33,
group = 1,
alpha = 0.37
) +
geom_point(mapping = aes(y = ambient_temperature/333), # search for a representative scaling factor
# use this same scaling factor at scale_y_continious, but divide instead of multiply or;
# multiply instead of divide. This will create a trustworthy scaled ambient temperature.
colour = darkred,
size = 0.1,
group = 1,
alpha = 0.008
) +
# NOTE: missing.data should have save years as this plot function
geom_rect(data = missing.data[[i]], # make rectangular "lines" at missing data spots
xmin = missing.data[[i]]$iteration_stamp, # this is where the rectangle starts
xmax = missing.data[[i]]$end, # this gives the rectangles an area
ymin = -Inf,
ymax = +Inf,
fill = "yellow",
alpha = 0.8,
inherit.aes = FALSE
) +
scale_y_continuous(sec.axis = sec_axis(~.*333, name = "Ambient Temperature (deg Celsius)")
) +
labs(title = "Correlation between ambient temperature and electricity consumption",
x = "Time (min)",
y = "Electricity Consumption (kWh)",
caption = "EcoGenie"
) +
EcoGenieTheme
}
names(tmp.list.2) <- tmp.list.1 # give names to list
tmp.list.2 # output your created energy time sequence
} # end of function
|
93fd68b2543b5b475d98faea2f560b1f041202dc
|
bcf7553b47d6d1c444d7310fab129a5ddd13c818
|
/zindi.R
|
b26d55043844b1e8066d4b1207e9fcb540ce5f2a
|
[] |
no_license
|
mousaa32/zindi_competition
|
f6c1a8bc0e9a1704f5e39ffc46b53a53038f4143
|
721a0d309c3904123cc11cc7dcabb55c53f7ed7b
|
refs/heads/master
| 2020-12-20T17:15:45.789013
| 2020-01-25T09:48:58
| 2020-01-25T09:48:58
| 236,151,505
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 749
|
r
|
zindi.R
|
train=read.csv(file.choose(),header = TRUE,sep = ",",quote="\"")
train
train1=train[,-3]
train1
summary(Time.from.Pickup.to.Arrival)
attach(train1)
#nombre de temperature et precipitation qu'on a pas renseigne
sum(is.na(Temperature))
sum(is.na(Precipitation.in.millimeters))
#pour echanger les NA par la moyenne
library(dplyr)
train1=train1 %>%
dplyr::mutate (Temperature = ifelse(is.na(Temperature),23.26, Temperature))
#train1=train1 %>%
#dplyr::mutate (Temperature = ifelse(is.na(Temperature),median(Temperature), Temperature))
summary(train1)
a=median(Temperature)
View(train1)
train2=train1[,-22]
View(train2)
str(train2)
train3=train2[,-c(1,2,4,7,10,13,16,19,26)]
attach(train3)
cor(train3)
hist(Temperature)
|
feb14f3db93dc4073595a70f299b9e7dce48d0f1
|
970095a9f7a3c2c2d683b63fd6708e35b9be99b3
|
/man/is_ggthemr.Rd
|
3ec9db8ee0ada0b4a0424bc9c5ae4ca31cd62719
|
[] |
no_license
|
fxcebx/ggthemr
|
dbdca484f213d9c4a7798d9950611344420cb751
|
8f9ec298c3d890942e77b38d7fd090e41d55002a
|
refs/heads/master
| 2021-01-22T14:55:51.447304
| 2014-09-13T10:53:18
| 2014-09-13T10:53:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 333
|
rd
|
is_ggthemr.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{is_ggthemr}
\alias{is_ggthemr}
\title{Is a ggthemr object}
\usage{
is_ggthemr(x)
}
\arguments{
\item{x}{object to test.}
}
\description{
Test an object to determine if it is of class ggthemr.
}
\examples{
themr <- ggthemr('sea')
is_ggthemr(themr)
}
\author{
Ciaran Tobin
}
|
65806049b150e3e00f65d005947102783725451e
|
9191588c3b7796015ee122631d4abf34b6f95c57
|
/r_code/hapmap_asw_results.R
|
d44252ef3594e0cad730630ca798542fec577f8b
|
[] |
no_license
|
mchughc/hapmap_analysis
|
7b3f72e73d856c78da9097a1e1e4bcd460fe3d8c
|
e3b22e36e025b62032a02db16177c5605a9b59d0
|
refs/heads/master
| 2021-01-25T12:01:44.956468
| 2015-11-16T22:23:29
| 2015-11-16T22:23:29
| 15,750,864
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,487
|
r
|
hapmap_asw_results.R
|
# hapmap ASW ancestry results
## load in all data
asw <- get(load("hapmap_aswOnly_estimates.RData"))
dim(asw) # 87 76
# check proportion native american --
png("../ancestry_differences/plots/asw_chrall_chrX_hgdp.png")
plot(asw[,"chrAll.hgdp"],asw[,"chrX.hgdp"],xlab="All Autosomal SNPs",ylab="X Chromosome",
type="n",main="Proportion of HGDP Americas Ancestry")
points(asw[asw$unrelated,"chrAll.hgdp"],asw[asw$unrelated,"chrX.hgdp"],col="red")
points(asw[!asw$unrelated,"chrAll.hgdp"],asw[!asw$unrelated,"chrX.hgdp"],col="black")
legend("bottomright",c("Unrelated"),col="red",pch=1)
abline(0,1,lty=2,col="grey")
dev.off()
# make a barplot of the x chr results
png("../ancestry_differences/plots/asw_chrx_frappe.png")
xchr <- asw[,c("chrX.ceu","chrX.yri","chrX.hgdp")]
xchr <- xchr[order(xchr$chrX.ceu,xchr$chrX.hgdp,xchr$chrX.yri),]
tt <- t(xchr)
colnames(tt) <- rep("",ncol(tt))
barplot(tt,col=c("blue","red","green"),xlab="Individual",main="HapMap ASW Estimated Ancestry\nX Chromosome")
dev.off()
l <- lm(asw[asw$unrelated,"chrX.hgdp"]~asw[asw$unrelated,"chrAll.hgdp"])
png("../ancestry_differences/plots/asw_chrall_chrX_hgdp_unrel.png")
plot(asw[,"chrAll.hgdp"],asw[,"chrX.hgdp"],xlab="All Autosomal SNPs",ylab="X Chromosome",
type="n",main="Proportion of HGDP Americas Ancestry\nFor 45 Unrelated HapMap ASW")
points(asw[asw$unrelated,"chrAll.hgdp"],asw[asw$unrelated,"chrX.hgdp"],col="black")
#points(mxl[!mxl$unrelated,"chrAll.hgdp"],mxl[!mxl$unrelated,"chrX.hgdp"],col="black")
#legend("bottomright",c("Unrelated"),col="red",pch=1)
abline(0,1,lty=2,col="grey")
abline(summary(l)$coef[1,1],summary(l)$coef[2,1])
dev.off()
plot(abs(asw[,"chrAll.hgdp"]-asw[,"chrX.hgdp"]),
type="n",main="Proportion of HGDP Americas Ancestry\nFor 45 Unrelated HapMap ASW")
points(abs(asw[asw$unrelated,"chrAll.hgdp"]-asw[asw$unrelated,"chrX.hgdp"]),col="black")
png("../ancestry_differences/plots/asw_chr15_chr8_hgdp.png")
plot(asw[,"chr15.hgdp"],asw[,"chr8.hgdp"],xlab="Chromosome 15",ylab="Chromosome 8",
type="n",main="Proportion of HGDP Americas Ancestry")
points(asw[asw$unrelated,"chr15.hgdp"],asw[asw$unrelated,"chr8.hgdp"],col="red")
points(asw[!asw$unrelated,"chr15.hgdp"],asw[!asw$unrelated,"chr8.hgdp"],col="black")
legend("bottomright",c("Unrelated"),col="red",pch=1)
abline(0,1)
dev.off()
png("../ancestry_differences/plots/asw_chr15_chrX_hgdp.png")
plot(asw[,"chr15.hgdp"],asw[,"chrX.hgdp"],xlab="Chromosome 15",ylab="X Chromosome",
type="n",main="Proportion of HGDP Americas Ancestry")
points(asw[asw$unrelated,"chr15.hgdp"],asw[asw$unrelated,"chrX.hgdp"],col="red")
points(asw[!asw$unrelated,"chr15.hgdp"],asw[!asw$unrelated,"chrX.hgdp"],col="black")
legend("bottomright",c("Unrelated"),col="red",pch=1)
abline(0,1)
dev.off()
var(asw$chrX.hgdp[asw$unrelated]) # 0.00128
var(asw$chrAll.hgdp[asw$unrelated]) # 0.001000
t.test(asw$chrX.hgdp[asw$unrelated],asw$chrAll.hgdp[asw$unrelated])
#t = -0.0931, df = 86.733, p-value = 0.9261
#alternative hypothesis: true difference in means is not equal to 0
#95 percent confidence interval:
# -0.01479572 0.01347215
#sample estimates:
# mean of x mean of y
#0.02187775 0.02253954
var(asw$chr8.hgdp[asw$unrelated]) # 0.002596
var(asw$chr15.hgdp[asw$unrelated]) # 0.0031869
t.test(asw$chr8.hgdp[asw$unrelated],asw$chr15.hgdp[asw$unrelated])
#t = -0.3246, df = 87.091, p-value = 0.7463
#alternative hypothesis: true difference in means is not equal to 0
#95 percent confidence interval:
# -0.02621073 0.01885227
#sample estimates:
# mean of x mean of y
#0.02298864 0.02666786
##################
# do analysis comparing each chr to the pool of all other chrs
res <- data.frame(matrix(NA,nrow=23,ncol=3))
names(res) <- c("chr","pvalue","bonf_pvalue")
sm <- asw[asw$unrelated,seq(from=6,to=ncol(asw)-4,by=3)]
for(i in 1:23){
tmp <- sm[,-i]
pool <- apply(tmp,1,mean)
t <- t.test(sm[,i],pool,paired=TRUE)
res[i,] <- c(i,t$p.value,t$p.value*23)
}
res # all bonf p-vals are >1!
resEuro <- data.frame(matrix(NA,nrow=23,ncol=3))
names(resEuro) <- c("chr","pvalue","bonf_pvalue")
sm <- asw[asw$unrelated,seq(from=4,to=ncol(asw)-4,by=3)]
for(i in 1:23){
tmp <- sm[,-i]
pool <- apply(tmp,1,mean)
t <- t.test(sm[,i],pool,paired=TRUE)
resEuro[i,] <- c(i,t$p.value,t$p.value*23)
}
resEuro # still nothing
resAfr <- data.frame(matrix(NA,nrow=23,ncol=3))
names(resAfr) <- c("chr","pvalue","bonf_pvalue")
sm <- asw[asw$unrelated,seq(from=5,to=ncol(asw)-4,by=3)]
for(i in 1:23){
tmp <- sm[,-i]
pool <- apply(tmp,1,mean)
t <- t.test(sm[,i],pool,paired=TRUE)
resAfr[i,] <- c(i,t$p.value,t$p.value*23)
}
resAfr # nothing!
library(xtable)
resAfr$bonf_pvalue[resAfr$bonf_pvalue>1]<-1
resEuro$bonf_pvalue[resEuro$bonf_pvalue>1]<-1
res$bonf_pvalue[res$bonf_pvalue>1]<-1
xtable(cbind(res[,2:3],resEuro[,2:3],resAfr[,2:3]),digits=3)
#png("paired_ttest_pools.png")
pdf("../ancestry_differences/plots/asw_paired_ttest_pools.pdf")
plot(res$chr,-log10(res$pvalue),pch=19,col="green",xlab="Chromosome",axes=FALSE,
ylab=expression(paste(-log[10],"(p-value)",sep="")),frame.plot=TRUE,
main="P-values from Paired T-Tests")
points(resEuro$chr,-log10(resEuro$pvalue),pch=2,col="blue")
points(resAfr$chr,-log10(resAfr$pvalue),pch=3,col="red")
axis(1,at=1:23,labels=c(1:22,"X"),cex.axis=0.8)
axis(2,at=1:5,labels=c(1:5),cex.axis=0.8)
legend("topleft",c("European","African","Native American"),pch=c(2,3,19),
col=c("blue","red","green"))
dev.off()
#png("paired_ttest_bonfCorr_pools.png")
pdf("../ancestry_differences/plots/asw_paired_ttest_bonfCorr_pools.pdf")
plot(res$chr,-log10(res$bonf_pvalue),pch=19,col="green",xlab="Chromosome",axes=FALSE,
ylab=expression(paste(-log[10],"(p-value)",sep="")),frame.plot=TRUE,
main="Bonferroni Corrected P-values from Paired T-Tests")
points(resEuro$chr,-log10(resEuro$bonf_pvalue),pch=2,col="blue")
points(resAfr$chr,-log10(resAfr$bonf_pvalue),pch=3,col="red")
axis(1,at=1:23,labels=c(1:22,"X"),cex.axis=0.8)
axis(2,at=1:5,labels=c(1:5),cex.axis=0.8)
legend("topleft",c("European","African","Native American"),pch=c(2,3,19),
col=c("blue","red","green"))
dev.off()
#png("paired_ttest_bonfAndNot.png")
pdf("../ancestry_differences/plots/asw_paired_ttest_bonfAndNot.pdf")
par(mfrow=c(2,1))
plot(res$chr,-log10(res$pvalue),pch=19,col="green",xlab="Chromosome",axes=FALSE,
ylab=expression(paste(-log[10],"(p-value)",sep="")),frame.plot=TRUE,
main="P-values from Paired T-Tests")
points(resEuro$chr,-log10(resEuro$pvalue),pch=2,col="blue")
points(resAfr$chr,-log10(resAfr$pvalue),pch=3,col="red")
abline(h=-log10(0.05),col="gray",lty=2)
axis(1,at=1:23,labels=c(1:22,"X"),cex.axis=0.75)
axis(2,at=1:5,labels=c(1:5),cex.axis=0.8)
legend("topleft",c("European","African","Native American"),pch=c(2,3,19),
col=c("blue","red","green"),cex=0.8)
plot(res$chr,-log10(res$bonf_pvalue),pch=19,col="green",xlab="Chromosome",axes=FALSE,
ylab=expression(paste(-log[10],"(p-value)",sep="")),frame.plot=TRUE,
main="Bonferroni Corrected P-values from Paired T-Tests")
points(resEuro$chr,-log10(resEuro$bonf_pvalue),pch=2,col="blue")
points(resAfr$chr,-log10(resAfr$bonf_pvalue),pch=3,col="red")
abline(h=-log10(0.05),col="gray",lty=2)
axis(1,at=1:23,labels=c(1:22,"X"),cex.axis=0.75)
axis(2,at=1:5,labels=c(1:5),cex.axis=0.8)
dev.off()
#### make boxplots of ancestry proportions for each ancestry subpop
pdf("../ancestry_differences/plots/asw_boxplot_ancestryProp.pdf",width=10)
boxplot(asw[asw$unrelated,c(73,70,74,71,75,72)],names=c("Euro, Auto","Euro, X Chr","Afr, Auto","Afr, X Chr",
"Native Am, Auto","Native Am, X Chr"),
ylab="Proportion Ancestry",col=c("purple",NA,"cyan",NA,"orange",NA),
border=c("black","purple","black","cyan","black","orange"),cex.lab=0.85,
main="Proportion Ancestry in 45 Unrelated ASW Subjects\nFor Autosomes and X Chr Separately")
dev.off()
## make boxplots of both ASW and MXL samples together
# make sure x-axis labels all show up
pdf("../ancestry_differences/plots/asw_mxl_boxplot_ancestryProp.pdf",width=11,height=10)
par( mai=c(0.5, 0.65, 0.4, 0.15), mgp=c(2, 0.5, 0), tck=-0.03 ,mfrow=c(2,1))
#par(mfrow=c(2,1))
boxplot(asw[asw$unrelated,c(73,70,74,71,75,72)],names=c("Euro, Auto","Euro, X Chr","Afr, Auto","Afr, X Chr",
"Native Am, Auto","Native Am, X Chr"),
ylab="Proportion Ancestry",col=c("purple",NA,"cyan",NA,"orange",NA),
border=c("black","purple","black","cyan","black","orange"))
mtext("A", side=3, line=0.75,adj=0,cex=1.3)
mxl <- get(load("hapmap_mxlOnly_estimates.RData"))
dim(mxl) # 86 78
boxplot(mxl[mxl$unrelated,c(73,70,74,71,75,72)],names=c("Euro, Auto","Euro, X Chr","Afr, Auto","Afr, X Chr",
"Native Am, Auto","Native Am, X Chr"),
ylab="Proportion Ancestry",col=c("purple",NA,"cyan",NA,"orange",NA),
border=c("black","purple","black","cyan","black","orange"))
mtext("B", side=3, line=0.75,adj=0,cex=1.3)
dev.off()
#### make "manhattan" plot of admixture res by chromosome
euro <- seq(from=4,to=71,by=3)
names(asw[euro]) # good!
afr <- euro+1
nam <- afr+1
names(asw[afr]); names(asw[nam])
# so in order from chr 1-22, then x
# will have different order of samples depending on ancestry for each chr
# only plot the 45 unrelated
## do just autosomes and x chr next to eachother
# take results for chrAll and Xchr
# cols 73-75, 70-72 are xchr
pdf("../asw_frappe_auto_xChr.pdf",width=14)
par(mfrow=c(1,2))
toPlOrd <- order(asw[asw$unrelated,73])
toPl1 <- asw[asw$unrelated,73][toPlOrd]
toPl2 <- asw[asw$unrelated,74][toPlOrd]
toPl3 <- asw[asw$unrelated,75][toPlOrd]
# autosomes
barplot(height=rbind(toPl1,toPl2,toPl3),col=c("blue","red","green"),space=0,axes=F,border=T,xlab="Individual",
main="HapMap ASW Autosomal Ancestry")
legend("left",c("European","African","Native American"),lty=1,col=c("blue","red","green"),lwd=6,bg="white")
axis(2)
#xchr
toPlOrd <- order(asw[asw$unrelated,70])
toPl1 <- asw[asw$unrelated,70][toPlOrd]
toPl2 <- asw[asw$unrelated,71][toPlOrd]
toPl3 <- asw[asw$unrelated,72][toPlOrd]
barplot(height=rbind(toPl1,toPl2,toPl3),col=c("blue","red","green"),space=0,axes=F,border=T,xlab="Individual",
main="HapMap ASW X Chromsome Ancestry")
axis(2)
dev.off()
# x chr summary statistics
range(asw[asw$unrelated,70]); sd(asw[asw$unrelated,70]) # euro
#[1] 1.11722e-12 6.65364e-01
#[1] 0.1435466
range(asw[asw$unrelated,71]); sd(asw[asw$unrelated,71]) # afr
#[1] 0.257434 0.999999
#[1] 0.1528913
range(asw[asw$unrelated,72]); sd(asw[asw$unrelated,72]) # nAm
#[1] 2.32179e-26 1.89480e-01
#[1] 0.03570911
# autosomal summary statistics
range(asw[asw$unrelated,73]); sd(asw[asw$unrelated,73]) # euro
# 0.06313174 0.39098417
# [1] 0.07488461
range(asw[asw$unrelated,74]); sd(asw[asw$unrelated,74]) # afr
# 0.5849860 0.9193735
# [1] 0.08082176
range(asw[asw$unrelated,75]); sd(asw[asw$unrelated,75]) # nAm
# 0.006093686 0.216595391
# [1] 0.03162537
mean(asw[asw$unrelated,72]) # 0.0219
mean(asw[asw$unrelated,71]) # 0.8238
####### try simple t-test
# pool all autosomal ancestries together, compare w all x chr ancestries
# a simple t-test on the ancestries together, NOT paired
# Native American
x <- asw[asw$unrelated,"chrAll.hgdp"]
y <- asw[asw$unrelated,"chrX.hgdp"]
t <- t.test(x,y)
t$p.value # 0.9260633
# African
x <- asw[asw$unrelated,"chrAll.yri"]
y <- asw[asw$unrelated,"chrX.yri"]
t <- t.test(x,y)
t$p.value # 0.2376916
# European
x <- asw[asw$unrelated,"chrAll.ceu"]
y <- asw[asw$unrelated,"chrX.ceu"]
t <- t.test(x,y)
t$p.value # 0.2174325
rm(list=ls())
#####
# Remake ttest results without plot titles, in one panel
asw <- get(load("hapmap_aswOnly_estimates.RData"))
dim(asw) # 87 76
# do analysis comparing each chr to the pool of all other chrs
res <- data.frame(matrix(NA,nrow=23,ncol=3))
names(res) <- c("chr","pvalue","bonf_pvalue")
sm <- asw[asw$unrelated,seq(from=6,to=ncol(asw)-4,by=3)]
for(i in 1:23){
tmp <- sm[,-i]
pool <- apply(tmp,1,mean)
t <- t.test(sm[,i],pool,paired=TRUE)
res[i,] <- c(i,t$p.value,t$p.value*23)
}
res # all bonf p-vals are >1!
resEuro <- data.frame(matrix(NA,nrow=23,ncol=3))
names(resEuro) <- c("chr","pvalue","bonf_pvalue")
sm <- asw[asw$unrelated,seq(from=4,to=ncol(asw)-4,by=3)]
for(i in 1:23){
tmp <- sm[,-i]
pool <- apply(tmp,1,mean)
t <- t.test(sm[,i],pool,paired=TRUE)
resEuro[i,] <- c(i,t$p.value,t$p.value*23)
}
resEuro # still nothing
resAfr <- data.frame(matrix(NA,nrow=23,ncol=3))
names(resAfr) <- c("chr","pvalue","bonf_pvalue")
sm <- asw[asw$unrelated,seq(from=5,to=ncol(asw)-4,by=3)]
for(i in 1:23){
tmp <- sm[,-i]
pool <- apply(tmp,1,mean)
t <- t.test(sm[,i],pool,paired=TRUE)
resAfr[i,] <- c(i,t$p.value,t$p.value*23)
}
resAfr # nothing!
res$bonf_pvalue[res$bonf_pvalue>1] <- 1
resAfr$bonf_pvalue[resAfr$bonf_pvalue>1] <- 1
resEuro$bonf_pvalue[resEuro$bonf_pvalue>1] <- 1
pdf("../ancestry_differences/plots/asw_paired_ttest_bonfAndNot_bothPops.pdf")
par( mai=c(0.5, 0.65, 0.4, 0.15), mgp=c(2, 0.5, 0), tck=-0.03 ,mfrow=c(2,2))
plot(res$chr,-log10(res$pvalue),pch=19,col="green",xlab="Chromosome",axes=FALSE,
ylab=expression(paste(-log[10],"(p-value)",sep="")),frame.plot=TRUE,ylim=c(0,1.5))
points(resEuro$chr,-log10(resEuro$pvalue),pch=2,col="blue")
points(resAfr$chr,-log10(resAfr$pvalue),pch=3,col="red")
abline(h=-log10(0.05),col="gray",lty=2)
axis(1,at=1:23,labels=c(1:22,"X"),cex.axis=0.75)
axis(2,at=1:5,labels=c(1:5),cex.axis=0.8)
mtext("A", side=3, line=0.75,adj=0,cex=1.3)
plot(res$chr,-log10(res$bonf_pvalue),pch=19,col="green",xlab="Chromosome",axes=FALSE,
ylab=expression(paste(-log[10],"(p-value)",sep="")),frame.plot=TRUE,ylim=c(0,1.5))
points(resEuro$chr,-log10(resEuro$bonf_pvalue),pch=2,col="blue")
points(resAfr$chr,-log10(resAfr$bonf_pvalue),pch=3,col="red")
abline(h=-log10(0.05),col="gray",lty=2)
axis(1,at=1:23,labels=c(1:22,"X"),cex.axis=0.75)
axis(2,at=1:5,labels=c(1:5),cex.axis=0.8)
mtext("B", side=3, line=0.75,adj=0,cex=1.3)
### now add in MXL results
mxl <- get(load("hapmap_mxlOnly_estimates.RData"))
dim(mxl) # 86 78
res <- data.frame(matrix(NA,nrow=23,ncol=3))
names(res) <- c("chr","pvalue","bonf_pvalue")
sm <- mxl[mxl$unrelated,seq(from=6,to=ncol(mxl)-2,by=3)]
for(i in 1:23){
tmp <- sm[,-i]
pool <- apply(tmp,1,mean)
t <- t.test(sm[,i],pool,paired=TRUE)
res[i,] <- c(i,t$p.value,t$p.value*23)
}
res # xchr bonf pvalue: 7.400872e-05
resEuro <- data.frame(matrix(NA,nrow=23,ncol=3))
names(resEuro) <- c("chr","pvalue","bonf_pvalue")
sm <- mxl[mxl$unrelated,seq(from=4,to=ncol(mxl)-2,by=3)]
for(i in 1:23){
tmp <- sm[,-i]
pool <- apply(tmp,1,mean)
t <- t.test(sm[,i],pool,paired=TRUE)
resEuro[i,] <- c(i,t$p.value,t$p.value*23)
}
resEuro # xchr bonf pvalue: 7.248653e-05
resAfr <- data.frame(matrix(NA,nrow=23,ncol=3))
names(resAfr) <- c("chr","pvalue","bonf_pvalue")
sm <- mxl[mxl$unrelated,seq(from=5,to=ncol(mxl)-2,by=3)]
for(i in 1:23){
tmp <- sm[,-i]
pool <- apply(tmp,1,mean)
t <- t.test(sm[,i],pool,paired=TRUE)
resAfr[i,] <- c(i,t$p.value,t$p.value*23)
}
resAfr # nothing!
res$bonf_pvalue[res$bonf_pvalue>1] <- 1
resAfr$bonf_pvalue[resAfr$bonf_pvalue>1] <- 1
resEuro$bonf_pvalue[resEuro$bonf_pvalue>1] <- 1
plot(res$chr,-log10(res$pvalue),pch=19,col="green",xlab="Chromosome",axes=FALSE,
ylab=expression(paste(-log[10],"(p-value)",sep="")),frame.plot=TRUE,ylim=c(0,7))
points(resEuro$chr,-log10(resEuro$pvalue),pch=2,col="blue")
points(resAfr$chr,-log10(resAfr$pvalue),pch=3,col="red")
abline(h=-log10(0.05),col="gray",lty=2)
axis(1,at=1:23,labels=c(1:22,"X"),cex.axis=0.75)
axis(2,at=1:7,labels=c(1:7),cex.axis=0.8)
legend("topleft",c("European","African","Native American"),pch=c(2,3,19),
col=c("blue","red","green"),cex=0.8)
mtext("C", side=3, line=0.75,adj=0,cex=1.3)
plot(res$chr,-log10(res$bonf_pvalue),pch=19,col="green",xlab="Chromosome",axes=FALSE,
ylab=expression(paste(-log[10],"(p-value)",sep="")),frame.plot=TRUE,ylim=c(-0.01,5.8))
points(resEuro$chr,-log10(resEuro$bonf_pvalue),pch=2,col="blue")
points(resAfr$chr,-log10(resAfr$bonf_pvalue),pch=3,col="red")
abline(h=-log10(0.05),col="gray",lty=2)
axis(1,at=1:23,labels=c(1:22,"X"),cex.axis=0.75)
axis(2,at=1:6,labels=c(1:6),cex.axis=0.8)
mtext("D", side=3, line=0.75,adj=0,cex=1.3)
dev.off()
|
6cd7fe4f832d751efc37c1e60b730fff711f9878
|
cc2bd8bb7a92aad4b7b4186c58a3ce0a00aa9f97
|
/R/run-code.R
|
8a3c538ea1f1055ec2fd4a0f32b6461e05bc407e
|
[
"MIT"
] |
permissive
|
d-edison/RSOQuestions
|
419bb486bd9579a49513e0a8e5db60939df18c8f
|
32d1e72d8e99d2c80d2f00101025f49abaf2d791
|
refs/heads/master
| 2020-05-07T15:19:35.266164
| 2019-04-10T17:47:04
| 2019-04-10T17:47:04
| 180,631,894
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 598
|
r
|
run-code.R
|
#' Run A Question's Code
#'
#' This function takes all formatted code blocks and attempts to run them in the
#' global environment.
#'
#' @param q An object of class \code{SOQuestion}
#'
#' @return results and errors, invisibly
#' @export
#'
#' @examples
#' q <- get_question(54028838)
#' run_code(q)
run_code <- function(q){
UseMethod("run_code")
}
#' @export
run_code.SOQuestion <- function(q){
res <- purrr::map(q$code, purrr::safely(eval), envir = .GlobalEnv)
for (i in seq_along(res)){
res[[i]]$call <- q$code[[i]]
}
res <- res %>% purrr::map(purrr::compact)
invisible(res)
}
|
e23f4f97243f8d1df4c2dc147b1f16787f738081
|
66ad2ca9bc3b25ac120c00346eb4f0ff391ad7e9
|
/workflow/scripts/report_maker.r
|
1a6989606cc07eb1635290875c542f156d19e621
|
[] |
no_license
|
functional-dark-side/agnostos-wf
|
258681b3794c3f44cdc0f72e99dd9dba15c1c857
|
96ead910150ec1f3d2ffa1ac1e23d0e18e41ee09
|
refs/heads/master
| 2023-08-19T10:36:36.914637
| 2023-06-27T09:13:19
| 2023-06-27T09:13:19
| 251,011,742
| 41
| 16
| null | 2022-06-16T11:09:05
| 2020-03-29T11:04:00
|
R
|
UTF-8
|
R
| false
| false
| 1,774
|
r
|
report_maker.r
|
#!/usr/bin/env Rscript
library(tidyverse)
library(knitr)
library(rmarkdown)
library(optparse)
# Script command line options ---------------------------------------------
option_list <- list(
make_option(c("-b", "--basedir"), type="character", default=NULL,
help="Main workflow folder path", metavar="character"),
make_option(c("-r", "--outdir"), type="character", default=NULL,
help="Result folder path", metavar="character"),
make_option(c("-n", "--name"), type="character", default=NULL,
help="Workflow input data name", metavar="character"),
make_option(c("-i", "--input"), type="character", default=NULL,
help="Workflow input data path", metavar="character"),
make_option(c("-s", "--stage"), type="character", default=NULL,
help="data stage", metavar="character"),
make_option(c("-w", "--wf_report"), type="character", default=NULL,
help="Workflow ireport .Rmd file", metavar="character"),
make_option(c("-o", "--output"), type="character", default=1,
help="Workflow report output path", metavar="character")
)
opt_parser <- OptionParser(option_list=option_list)
opt <- parse_args(opt_parser)
if (is.null(opt$basedir) | is.null(opt$outdir) | is.null(opt$wf_report) |
is.null(opt$input) | is.null(opt$name) | is.null(opt$stage) | is.null(opt$output)){
print_help(opt_parser)
stop("You need to provide the path to the main input and output folders\n", call.=FALSE)
}
rmarkdown::render(opt$wf_report,
params = list(directory = opt$basedir, name_data = opt$name,
input_data=opt$input, stage=opt$stage),
output_file = opt$output,
output_dir = opt$outdir
)
|
333b536edb03d13ea312bffc935c3b36c8b67980
|
fab27c3ae407b98c6f0d4a5bb5640232ddc8322f
|
/PS5/cTree.R
|
173599afdf590d66d591a056d45806cdfdf6bd3a
|
[] |
no_license
|
denipanova/adcomp
|
7af9049a62d72fef97f7e732d942760e5933afea
|
e4246815b8be5eb219846fb2f088dd7a91b9a48e
|
refs/heads/master
| 2021-01-10T11:58:04.475738
| 2016-03-08T23:25:50
| 2016-03-08T23:25:50
| 49,706,592
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,361
|
r
|
cTree.R
|
#Define Treshold function for multiple classes and multiple
# variables
findThreshold <- function(boundary,X_data,Y_data,depth, minPoints, costFnc) {
indices<-rep(TRUE,nrow(X_data))
varNames<-colnames(X_data)
for (var in varNames){
ind<-(X_data[,var]>=boundary[1,var] & X_data[,var]<=boundary[2,var])
indices<- indices & ind
}
X<-X_data[indices,]
Y<-Y_data[indices]
if (length(Y)>minPoints) {
#keeping track of best cut by dimention
thresholdDim<-rep(NA,length(varNames))
errorDim<-rep(NA,length(varNames))
#looping through all variables
for (var in varNames){
x<-sort(unique(X[,var]))
nIter <- length(x)-1
errors <- rep(NA, nIter)
thresholds <- rep(NA, nIter)
#splitLabels <- matrix(NA, ncol=2, nrow=nIter)
#loop through all posiible cut points in the current dim
for (i in 1:(nIter)) {
# locate a potential threshold, a split between two points
potThres <- mean(x[i:(i+1)])
# define purity
left <-(X[,var]<=potThres)
right <-(X[,var]>=potThres)
purity_left <- costFnc(as.vector(table(Y[left]))/length(Y[left]))
purity_right <- costFnc(as.vector(table(Y[right]))/length(Y[right]))
errors[i]<-purity_left + purity_right
thresholds[i]<-potThres
}
#define the best posiible cut for each variable
errorDim[var]<-min(errors)
thresholdDim[var]<-thresholds[which.min(errors)]
}
bestThres<-thresholdDim[which.min(errorDim)]
#which is this variable
bestVar<-names(bestThres)
#calculate the purity in each bucket
left_best <-(X[,bestVar]<=bestThres)
right_best <-(X[,bestVar]>=bestThres)
purity_left_best <- costFnc(as.vector(table(Y[left_best]))/length(Y[left_best]))
purity_right_best <- costFnc(as.vector(table(Y[right_best]))/length(Y[right_best]))
#we redefine the boundaries
#create two new buckets to
#replace the already existing one
boundary1<-boundary
boundary1[2,bestVar]<-bestThres
boundary2<-boundary
boundary2[1,bestVar]<-bestThres
return(list(boundary1,boundary2,purity_left_best,purity_right_best))
} else {
purity_old <- costFnc(as.vector(table(Y))/length(Y))
return(list(boundary, boundary, purity_old, purity_old))
}
}
#Define entropy functions
#NOTE: they are DIFFERENT than
#the ones provided in class
ME <- function(prob) {
MissError <- 1 - max(prob)
return(MissError)
}
Gini <- function(prob) {
Gini <- sum(prob*(1-prob))
return(Gini)
}
Entropy <- function(prob) {
CrossEntropy <- - sum(prob*log(prob))
return(CrossEntropy)
}
### Here is the cTree function
cTree <- function(formula, data, depth, minPoints, costFnc=Entropy) {
#Packages
if (!require("assertthat")) install.packages("assertthat"); library(assertthat)
if (!require("formula.tools")) install.packages("formula.tools"); library(formula.tools)
if (!require("plyr")) install.packages("plyr"); library(plyr)
#check inputs
assert_that(class(formula)=="formula")
not_empty(names(data))
assert_that(is.data.frame(data))
# redefine data
X.names <- get.vars(rhs(formula))
Y.names <- get.vars(lhs(formula))
dim <- length(X.names)
X_data <- data[,X.names]
Y_data <- data[,Y.names] #number of elements in list = number of nodes
#define the first purity and boundary on the whole data set
purity<-costFnc(as.vector(table(Y_data))/length(Y_data))
#purity <- lapply(Y_data, FUN=function(x) costFnc(as.vector(table(x))/length(x)) )
boundaries <- list(sapply(X_data, FUN=function(x) c( min(x), max(x) ) ) ) #creates a list of one element (a matrix)
# define recursive function
recursive <- function(purity, boundaries, X_data,Y_data,depth, minPoints, costFnc) {
#define current debth length = current number of nodes
BoundariesList <- lapply(boundaries, FUN= function(x) findThreshold(x,X_data,Y_data,depth, minPoints, costFnc))
print(BoundariesList)
#define purity decrease measure
purity_decrease<-rep(NA,length(boundaries))
for (i in 1:length(boundaries)){
#compute the decrease for each node
purity_decrease[i]<- purity[i] - ( BoundariesList[[i]][[3]] + BoundariesList[[i]][[4]] )
}
#compute the max decrease change
best_node<- which.max(purity_decrease)
#take the relevant boundaries and purities
new_boundaries<-BoundariesList[[best_node]][1:2]
new_purity<-unlist(BoundariesList[[best_node]][3:4])
print("new_boundaries")
print(new_boundaries)
print("new_purity")
print(new_purity)
#replace the unnecessary features with the ones from the new nodes
if (length(boundaries)==1){
boundaries<-(new_boundaries)
purity<-(new_purity)
}else{
boundaries<-c(new_boundaries,boundaries[-best_node])
purity<-c(new_purity, purity[-best_node])
}
print("B")
print(boundaries)
print("P")
print(purity)
#check the new number of nodes
no_nodes<-length(boundaries)
#define termination stage
if ( no_nodes==depth ) return(list(boundaries=boundaries, purity=purity))
#recursive stage if the is-statement is false
print(".")
T<- recursive(purity,boundaries, X_data, Y_data,depth, minPoints, costFnc)
return(T)
}
results <- recursive(purity, boundaries, X_data, Y_data,depth, minPoints, costFnc)
final_boundaries <- results[[1]]
# initialise final dataframe to be returned
#add id to the dataset to keep track of the order
data$id <- seq(1,nrow(data))
data_final <- data[FALSE,]
data_final$predLabel <- integer()
data_final$prob <- double()
#fill up final df, bucket at a time
for (element in 1:length(final_boundaries)) {
indices_final<-rep(TRUE,nrow(X_data))
boundary<-final_boundaries[[element]]
for (var in X.names){
ind<-(data[,var]>= boundary[1,var] & data[,var]<=boundary[2,var])
indices_final<- indices_final & ind
}
new_data <-data[indices_final,]
print(head(new_data,2))
# add predicted label and prob to X
Y_new_data<-unlist(new_data[,Y.names])
print(head(Y_new_data))
table_prob<-(table(Y_new_data))/length(Y_new_data)
new_data$prob <- max(as.vector(table_prob))
new_data$predLabel <- as.integer(names(table_prob[table_prob==max(as.vector(table_prob))]))
data_final <- rbind(data_final, new_data)
}
data_final<-data_final[order(data_final$id),]
return(list(prob=data_final$prob,predLabels=data_final$predLabel,boundaries=final_boundaries))
}
|
f7aa3168fd1519b7c8073dbfe6367a710203aee3
|
cba90745ccc82ec6528b8887faa3c2cbcd51d169
|
/npl-20180815/npl.R
|
2a36cfaa9ab272ecf8b3fca2ba3f67294820914d
|
[] |
no_license
|
sifat2207/finhacks2018
|
4885e8793ed362afa8d2564072fd825ec4a39c24
|
ee2c800d1e5bfe7568920ef06cbaef552291afcd
|
refs/heads/master
| 2020-04-24T08:07:48.266908
| 2018-09-22T16:25:56
| 2018-09-22T16:25:56
| 171,820,957
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,149
|
r
|
npl.R
|
#library-----
library(dplyr)
library(pROC)
library(caret)
library(xgboost)
library(smbinning)
library(e1071)
# data------
data_train=read.csv("npl_train.csv")%>%
select(-X)
data_test=read.csv("npl_test.csv")%>%
select(-X)
#data preparation
#cek missing data
summary(data_train)
summary(data_test)
#tidak ada data missing
#features-------
vardep="flag_kredit_macet"
var_nominal=c("kode_cabang","skor_delikuensi")
data_train=data_train%>%
mutate_at(vars(one_of(var_nominal)),funs(as.character))%>%
mutate_at(vars(one_of(var_nominal)),funs(as.factor))
data_test=data_test%>%
mutate_at(vars(one_of(var_nominal)),funs(as.character))%>%
mutate_at(vars(one_of(var_nominal)),funs(as.factor))
levels(data_test$kode_cabang)=levels(data_train$kode_cabang)
levels(data_test$skor_delikuensi)=levels(data_train$skor_delikuensi)
formu=as.formula(paste(vardep,"~."))
#model xgboost-----
threshold <- 0.5
# convert categorical factor into one-hot encoding
data_numerik=data_train%>%
select(-kode_cabang,-skor_delikuensi,-flag_kredit_macet)
region <- model.matrix(~kode_cabang-1,data_train)
delikuensi <- model.matrix(~skor_delikuensi-1,data_train)
data_numerik=cbind(data_numerik,delikuensi,region)
data_matrix <- data.matrix(data_numerik)
train_label=data.matrix(data_train%>%select_(.dots=c(vardep)))
dtrain <- xgb.DMatrix(data = data_matrix, label=train_label )
model_xgboost <- xgboost(data = dtrain, # the data
nround = 100, # max number of boosting iterations
objective = "binary:logistic") # the objective function
#validitas
data_train$pred <- predict(model_xgboost, dtrain)
# Calculate the area under the ROC curve
roc.curve <- roc(data_train$flag_kredit_macet, data_train$pred, ci=T)
# Calculate the area under the ROC curve
roc.curve <- roc(data_train$flag_kredit_macet, data_train$pred, ci=T)
# Calculates a cross-tabulation of observed and predicted classes
# with associated statistics
con=confusionMatrix(factor(data_train$pred>threshold), factor(data_train$flag_kredit_macet==1), positive="TRUE")
#####################################################################
#Xgboost dengan SMOTE-------
source('smote function.R')
set.seed(1234)
data_smote=data_train %>% select(-pred)
data_smote$flag_kredit_macet=as.character(data_smote$flag_kredit_macet)
data_smote$flag_kredit_macet=as.factor(data_smote$flag_kredit_macet)
data_smote <- SMOTE(flag_kredit_macet ~ .,
data=data_smote,
perc.over = 200, perc.under=300)
data_smote$flag_kredit_macet=as.character(data_smote$flag_kredit_macet)
data_smote$flag_kredit_macet=as.numeric(data_smote$flag_kredit_macet)
# convert categorical factor into one-hot encoding
region_smote <- model.matrix(~kode_cabang-1,data_smote)
delikuensi_smote <- model.matrix(~skor_delikuensi-1,data_smote)
data_numerik_smote=data_smote%>%select(-kode_cabang,-skor_delikuensi,-flag_kredit_macet)
data_numerik_smote=cbind(data_numerik_smote,delikuensi_smote,region_smote)
data_matrix_smote <- data.matrix(data_numerik_smote)
train_label_smote=data.matrix(data_smote%>%select_(.dots=c(vardep)))
dtrain_smote <- xgb.DMatrix(data = data_matrix_smote, label=train_label_smote )
model_xgboost_smote <- xgboost(data = dtrain_smote, # the data
nround = 100, # max number of boosting iterations
objective = "binary:logistic") # the objective function
#validitas
data_smote$pred <- predict(model_xgboost_smote, dtrain_smote)
# Calculate the area under the ROC curve
roc.curve_smote <- roc(data_smote$flag_kredit_macet, data_smote$pred, ci=T)
# Calculates a cross-tabulation of observed and predicted classes
# with associated statistics
con_smote=confusionMatrix(factor(data_smote$pred>threshold), factor(data_smote$flag_kredit_macet==1), positive="TRUE")
#####
#
data_train$pred_smote <- predict(model_xgboost_smote, dtrain)
roc.curve_train_smote <- roc(data_train$flag_kredit_macet, data_train$pred_smote, ci=T)
# Calculates a cross-tabulation of observed and predicted classes
# with associated statistics
con_train_smote=confusionMatrix(factor(data_train$pred_smote>threshold), factor(data_train$flag_kredit_macet==1), positive="TRUE")
#importance variable
all_=xgb.importance(colnames(dtrain),model=model_xgboost)
smote=xgb.importance(colnames(dtrain),model=model_xgboost_smote)
#prediksi data test------
# convert categorical factor into one-hot encoding
data_numerik_test=data_test%>%
select(-kode_cabang,-skor_delikuensi)
region_test <- model.matrix(~kode_cabang-1,data_test)
delikuensi_test <- model.matrix(~skor_delikuensi-1,data_test)
data_numerik_test=cbind(data_numerik_test,delikuensi_test,region_test)
data_matrix_test <- data.matrix(data_numerik_test)
#train_label=data.matrix(data_train%>%select_(.dots=c(vardep)))
dtest <- xgb.DMatrix(data = data_matrix_test )
data_test$pred <- predict(model_xgboost, dtest)
data_test$pred_smote <- predict(model_xgboost_smote, dtest)
#stabilitas hasil prediksi-----
#all data training
batas_percentile=round(quantile(data_train$pred, probs = seq(0.05,0.95,0.05)),8)
data_train$klas=cut(data_train$pred,breaks = c(0,batas_percentile,1), labels = c(1:20))
#data_train%>%group_by(klas)%>% summarise(min=min(pred), max=max(pred),bad=sum(flag_kredit_macet),n= n())
#smote data training
batas_percentile_smote=round(quantile(data_train$pred_smote, probs = seq(0.05,0.95,0.05)),8)
data_train$klas_smote=cut(data_train$pred_smote,breaks = c(0,batas_percentile_smote,1), labels = c(1:20))
#PSI----
library(smbinning)
data_test$klas=cut(data_test$pred,breaks = c(0,batas_percentile,1), labels = c(1:20))
data_test$klas_smote=cut(data_test$pred_smote,breaks = c(0,batas_percentile_smote,1), labels = c(1:20))
data_psi=data_train%>%
select(klas, klas_smote)%>%
mutate(type="1")%>%
rbind(data_test%>%
select(klas, klas_smote)%>%
mutate(type="2"))
smbinning.psi(data_psi, y="type",x="klas") #PSI=0.07432464
smbinning.psi(data_psi, y="type",x="klas_smote") #PSi=0.03056048
|
50eda883558111a4314517dd0410b76467e389e9
|
65b1646d9f20dda9f11409eb83415f4c186294a0
|
/man/owtemps.Rd
|
602ac64c8efc60864676d8a32ad79459dec7c08b
|
[] |
no_license
|
paulnorthrop/chandwich
|
a1ac1a7504ee4e291834ac47b9548b04e1e93af5
|
3a58e1fc2347db976434ae61385b404be44495b4
|
refs/heads/master
| 2023-04-07T00:54:04.576945
| 2022-04-02T01:21:27
| 2022-04-02T01:21:27
| 105,452,669
| 4
| 2
| null | 2023-04-04T19:28:18
| 2017-10-01T15:38:38
|
R
|
UTF-8
|
R
| false
| true
| 762
|
rd
|
owtemps.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chandwich.R
\docType{data}
\name{owtemps}
\alias{owtemps}
\title{Oxford and Worthing annual maximum temperatures}
\format{
A dataframe with 80 rows and 2 columns, named Oxford and Worthing.
}
\source{
Tabony, R. C. (1983) Extreme value analysis in meteorology.
\emph{The Meteorological Magazine}, \strong{112}, 77-98.
}
\usage{
owtemps
}
\description{
Annual maximum temperatures, in degrees Fahrenheit, at Oxford and
Worthing (England), for the period 1901 to 1980.
}
\references{
Chandler, R. E. and Bate, S. (2007). Inference for clustered
data using the independence loglikelihood. \emph{Biometrika},
\strong{94}(1), 167-183. \doi{10.1093/biomet/asm015}
}
\keyword{datasets}
|
01621556adf2519f17f15b20100d7e0d20473d91
|
9de3b2b8b28f89cfb13723b6be99f157fc13a313
|
/2_Functions/2_Analysis/Function_process_covariates_include_longmatch.R
|
ffea53be35d49af17a46cc1e7bcc24b6ffe560af
|
[] |
no_license
|
WWF-ConsEvidence/MPAMystery
|
0e730dd4d0e39e6c44b36d5f9244a0bfa0ba319b
|
6201c07950206a4eb92531ff5ebb9a30c4ec2de9
|
refs/heads/master
| 2023-06-22T04:39:12.209784
| 2021-07-20T17:53:51
| 2021-07-20T19:34:34
| 84,862,221
| 8
| 1
| null | 2019-07-24T08:21:16
| 2017-03-13T18:43:30
|
R
|
UTF-8
|
R
| false
| false
| 6,685
|
r
|
Function_process_covariates_include_longmatch.R
|
#---- Preprocess matching covariates function ----
# author: Louise Glew, louise.glew@gmail.com
# modified: --
process_covariates <-
function(HH.data, DE.data,t0.t2.pairs, t0.t4.pairs) {
#---- Import look up tables ----
ethnic.lkp<- read.delim("x_Flat_data_files/1_Social/Inputs/BHS/eth_output_kc_2017_1217.txt")
education.lkp <- read.delim("x_Flat_data_files/1_Social/Inputs/BHS/education_lkp.txt")
# ---Create functions
# Function to remove all white space in string variables
trim <- function(x) gsub("^\\s+|\\s+$","",x)
# Function to clean string variables (lower case, remove punctuation)
str_clean <- function(strings) {
require(dplyr)
require(tm)
strings %>% tolower() %>% removePunctuation(preserve_intra_word_dashes = FALSE) %>% stripWhitespace() %>%
trim()
}
#----Define function
# Age
age.bin<-c(0,20,30,40,50,60,70,990)
HH.age<-subset(DE.data, select=c("HouseholdID","DemographicCode","IndividualAge"), DemographicCode==1)
HH.monitoring.year <-subset(HH.data, select=c("HouseholdID","MonitoringYear"))
HH.age <-left_join(HH.age,(subset(HH.data, select=c("HouseholdID","MonitoringYear"))),by="HouseholdID")
HH.age$IndividualAge[HH.age$IndividualAge >= 990] <- 990 #recode blind values
t0.age <- subset(HH.age, MonitoringYear=="t0")
t2.age <- subset(HH.age, MonitoringYear=="t2")
t4.age <- subset(HH.age, MonitoringYear=="t4")
t0.age$IndividualAge <- .bincode(t0.age$IndividualAge,age.bin,TRUE,TRUE)
t2.age$IndividualAge <- .bincode((t2.age$IndividualAge-2),age.bin,TRUE,TRUE)
t4.age$IndividualAge <- .bincode((t4.age$IndividualAge-4),age.bin,TRUE,TRUE)
HH.age <- rbind(t0.age, t2.age, t4.age)
HH.age <-unique(subset(HH.age, select=c("HouseholdID","IndividualAge")))
rm(t0.age,t2.age,t4.age,HH.monitoring.year,age.bin)
# Gender of Household Head
gender.HHH <- unique(subset(DE.data, select=c("HouseholdID","IndividualGender"), DemographicCode==1))
# Residency
resident.bin<-c(0,10,20,30,40,50,60,990)
HH.residency<-subset(HH.data,select=c("HouseholdID","YearsResident", "MonitoringYear"))
HH.residency$YearsResident[HH.residency$YearsResident >= 990] <- 990
t0.residency <- subset(HH.residency, MonitoringYear=="t0")
t2.residency <- subset(HH.residency, MonitoringYear=="t2")
t4.residency <- subset(HH.residency, MonitoringYear=="t4")
t0.residency$YearsResident <- .bincode(t0.residency$YearsResident,resident.bin,TRUE,TRUE)
t2.residency$YearsResident <- ifelse(t2.residency$YearsResident>2,(.bincode((t2.residency$YearsResident-2),resident.bin,TRUE,TRUE)),1)
t4.residency$YearsResident <- ifelse(t4.residency$YearsResident>4,(.bincode((t4.residency$YearsResident-4),resident.bin,TRUE,TRUE)),1)
HH.residency <-rbind(t0.residency,t2.residency,t4.residency)
HH.residency <- na.omit(HH.residency)
HH.residency$MonitoringYear<-NULL
rm(resident.bin, t0.residency, t2.residency,t4.residency)
# Dominant ethnicity
HH.eth <- subset(HH.data, select=c("HouseholdID","PaternalEthnicity", "MonitoringYear", "SettlementID"))
HH.eth$PaternalEthnicity <-str_clean(HH.eth$PaternalEthnicity)
HH.eth<- left_join(HH.eth,ethnic.lkp, by=c("PaternalEthnicity"="std.eth.str"))
max.eth <- HH.eth %>%
group_by(MonitoringYear,SettlementID,eth.iso)%>%
summarise(freq.eth=n()) %>%
top_n(1, freq.eth)
HH.eth <-left_join(HH.eth,max.eth, by=c("SettlementID" = "SettlementID", "MonitoringYear"="MonitoringYear"))
HH.eth$dom.eth <- ifelse(HH.eth$eth.iso.x==HH.eth$eth.iso.y,1,0)
HH.eth <-subset(HH.eth, select=c("HouseholdID","dom.eth"))
x <-HH.eth %>% #quick bodge to get rid of duplicates where ethnicities tied.
group_by(HouseholdID)%>%
top_n(1,dom.eth)
HH.eth <-unique(x)
rm(max.eth,x)
# Education level of household head
HH.ed <- subset(DE.data, select=c("HouseholdID","IndividualEducation"), DemographicCode==1)
HH.ed <- left_join(HH.ed, education.lkp, by=c("IndividualEducation"))
HH.ed$IndividualEducation <-NULL
# Children in Household
DE.data$Child<-ifelse(DE.data$IndividualAge<19,1,0) # create new variable, child/adult
N.Child<-DE.data%>%
group_by(HouseholdID) %>%
summarise(n.child=sum(Child))
# Market distance
market.distance<-subset(HH.data,select=c("HouseholdID","TimeMarket", "MonitoringYear","SettlementID"))
market.distance$TimeMarket[market.distance$TimeMarket >=990] <- 990
market.mean <-market.distance %>%
group_by(SettlementID,MonitoringYear)%>%
summarise (mean=mean(TimeMarket[TimeMarket!=990])) # subsequent rows handle blind codes, and missing data
market.mean$mean[is.na(market.mean$mean)]<- ave(market.mean$mean,
market.mean$SettlementID,
FUN=function(x)mean(x,na.rm = T))[is.na(market.mean$mean)]
impute.market <- filter(market.distance,TimeMarket==990)
impute.market <-inner_join(subset(impute.market, select=c("HouseholdID","MonitoringYear", "SettlementID")),market.mean, by=c("MonitoringYear", "SettlementID"))
colnames(impute.market) <-c("HouseholdID","MonitoringYear", "SettlementID", "TimeMarket")
market.distance <-rbind((subset(market.distance, TimeMarket!=990)),impute.market)
rm(market.mean, impute.market)
# Site and treatment
MPA <-left_join((subset (HH.data, select=c("HouseholdID", "MPAID","SettlementID"))),(subset(SE.data, select=c("SettlementID","Treatment"))),by="SettlementID")
# Longitudinal pairs
t0.t2.pairs <-melt(t0.t2.matched, id.vars="match.id",measure.vars=c("HouseholdID.t0","HouseholdID.t2"))
t0.t2.pairs<-subset(t0.t2.pairs,select=c("match.id","value"))
colnames(t0.t2.pairs) <-c("t0.t2.pair","HouseholdID")
t0.t4.pairs <-melt(t0.t4.matched, id.vars="match.id",measure.vars=c("HouseholdID.t0","HouseholdID.t4"))
t0.t4.pairs<-subset(t0.t4.pairs,select=c("match.id","value"))
colnames(t0.t4.pairs) <-c("t0.t4.pair","HouseholdID")
#Compile match covariate
match.covariate <-list(MPA,market.distance,N.Child,HH.ed, HH.eth,HH.residency,gender.HHH, HH.age,t0.t2.pairs,t0.t4.pairs) %>%
Reduce(function(dtf1,dtf2) left_join(dtf1,dtf2,by="HouseholdID"), .)
match.covariate$t0.t2.pair[is.na(match.covariate$t0.t2.pair)]<-99999
match.covariate$t0.t4.pair[is.na(match.covariate$t0.t4.pair)]<-99999
rm(MPA,market.distance,N.Child,HH.ed, HH.eth,HH.residency,gender.HHH, HH.age)
return (match.covariate)
}
#rm()
|
ef1669f86ee112e7d21ad32168376438f5d478ad
|
94aed35f1f7cca636419b88a53799f34e5c5dfee
|
/man/makeSingleCellExperiment.Rd
|
7e9f1d3a118485e5c1d92cc15b0f4e4085cf6dca
|
[
"MIT"
] |
permissive
|
trichelab/basejump
|
a4a3b9e58016449faeb9b3d77cf1c09d4eafe4c7
|
6724b10dbf42dd075c7db5854a13d9509fe9fb72
|
refs/heads/master
| 2020-12-12T11:54:17.660956
| 2020-01-08T13:24:07
| 2020-01-08T13:24:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,340
|
rd
|
makeSingleCellExperiment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R,
% R/makeSingleCellExperiment-methods.R
\name{makeSingleCellExperiment}
\alias{makeSingleCellExperiment}
\alias{makeSingleCellExperiment,SimpleList-method}
\alias{makeSingleCellExperiment,list-method}
\title{Make a SingleCellExperiment object}
\usage{
makeSingleCellExperiment(assays, ...)
\S4method{makeSingleCellExperiment}{SimpleList}(
assays,
rowRanges = GRangesList(),
colData = DataFrame(),
metadata = list(),
reducedDims = SimpleList(),
transgeneNames = NULL,
spikeNames = NULL
)
\S4method{makeSingleCellExperiment}{list}(
assays,
rowRanges = GRangesList(),
colData = DataFrame(),
metadata = list(),
reducedDims = SimpleList(),
transgeneNames = NULL,
spikeNames = NULL
)
}
\arguments{
\item{assays}{\code{SimpleList}.
Count matrices, which must have matching dimensions. Counts can be passed
in as either a dense matrix (\code{matrix}) or sparse matrix (\code{sparseMatrix}).}
\item{...}{Additional arguments.}
\item{rowRanges}{\code{GRanges} or \code{GRangesList}.
Genomic ranges (e.g. genome annotations). Metadata describing the assay
rows.}
\item{colData}{\code{DataFrame}.
Metadata describing the assay columns. For bulk RNA-seq, this data
describes the samples. For single-cell RNA-seq, this data describes the
cells.}
\item{metadata}{\code{list}.
Metadata.}
\item{reducedDims}{\code{SimpleList}.
List containing matrices of cell coordinates in reduced space.}
\item{transgeneNames}{\code{character}.
Vector indicating which assay rows denote transgenes (e.g. EGFP, TDTOMATO).}
\item{spikeNames}{\code{character}.
Vector indicating which assay rows denote spike-in sequences (e.g. ERCCs).}
}
\value{
\code{SingleCellExperiment}.
}
\description{
This function is a utility wrapper for \code{SummarizedExperiment} that provides
automatic subsetting for row and column data, as well as automatic handling
of transgenes and spike-ins.
}
\note{
Updated 2019-08-27.
}
\section{Session information}{
This function improves upon the standard constructor by slotting useful
session information into the \code{metadata} slot by default:
\itemize{
\item \code{date}: Today's date, returned from \code{Sys.Date}.
\item \code{wd}: Working directory, returned from \code{getwd}.
\item \code{sessionInfo}: \code{\link[sessioninfo:session_info]{sessioninfo::session_info()}} return.
}
This behavior can be disabled by setting \code{sessionInfo = FALSE}.
}
\examples{
data(SingleCellExperiment, package = "acidtest")
## SimpleList ====
object <- SingleCellExperiment
assays <- assays(object)
rowRanges <- rowRanges(object)
colData <- colData(object)
metadata <- metadata(object)
reducedDims <- reducedDims(object)
x <- makeSingleCellExperiment(
assays = assays,
rowRanges = rowRanges,
colData = colData,
metadata = metadata,
reducedDims = reducedDims
)
print(x)
}
\seealso{
\itemize{
\item \code{\link[SummarizedExperiment:SummarizedExperiment]{SummarizedExperiment()}}.
\item \code{\link[SingleCellExperiment:SingleCellExperiment]{SingleCellExperiment()}}.
\item \code{help("RangedSummarizedExperiment-class", "SummarizedExperiment")}.
\item \code{help("SummarizedExperiment-class", "SummarizedExperiment")}.
\item \code{help("SingleCellExperiment-class", "SingleCellExperiment")}.
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.